hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
59c9d5283f2ff9c1598c5d295dd7d77e340a5598.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /** * Vector addition: C = A + B. * * This sample is a very basic sample that implements element by element * vector addition. It is the same as the sample illustrating Chapter 2 * of the programming guide with some additions like error checking. */ #include <stdio.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <hip/hip_runtime.h> /** * CUDA Kernel Device code * * Computes the vector addition of A and B into C. The 3 vectors have the same * number of elements numElements. */ __global__ void vectorAdd(const float *A, const float *B, float *C, int numElements, int k) { int i = blockDim.x * blockIdx.x + threadIdx.x; //blockDim.x = num of thread per block //blockIdx.x = each block //threadIdx.x. = each thread if (i < numElements) { int start = (i*k); if(i == 0) { start = 0; } int end = k + start; if(end > numElements) { end = numElements; } for(int j = start; j < end; j++) { if(j < numElements) { C[j] = A[j] + B[j]; } } } } /** * Host main routine */ int main(int argc, char **argv) { // Error code to check return values for CUDA calls hipError_t err = hipSuccess; // Print the vector length to be used, and compute its size int numElements = 100000000; int k = atoi(argv[1]); size_t size = numElements * sizeof(float); printf("[Vector addition of %d elements]\n", numElements); // Allocate the host input vector A float *h_A = (float *)malloc(size); // Allocate the host input vector B float *h_B = (float *)malloc(size); // Allocate the host output vector C float *h_C = (float *)malloc(size); // Verify that allocations succeeded if (h_A == NULL || h_B == NULL || h_C == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } // Initialize the host input vectors for (int i = 0; i < numElements; ++i) { h_A[i] = rand()/(float)RAND_MAX; h_B[i] = rand()/(float)RAND_MAX; } // Allocate the device input vector A float *d_A = NULL; err = hipMalloc((void **)&d_A, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device input vector B float *d_B = NULL; err = hipMalloc((void **)&d_B, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device output vector C float *d_C = NULL; err = hipMalloc((void **)&d_C, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the host input vectors A and B in host memory to the device input vectors in // device memory printf("Copy input data from the host memory to the CUDA device\n"); err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Launch the Vector Add CUDA Kernel //int threadsPerBlock = 256; //int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; int nT = ceil(numElements/k); int blocksPerGrid = 1000; int threadsPerBlock = nT/blocksPerGrid; printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); hipLaunchKernelGGL(( vectorAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, numElements, k); err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the device result vector in device memory to the host result vector // in host memory. printf("Copy output data from the CUDA device to the host memory\n"); err = hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Verify that the result vector is correct for (int i = 0; i < numElements; ++i) { if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5) { fprintf(stderr, "Result verification failed at element %d!\n", i); exit(EXIT_FAILURE); } } printf("Test PASSED\n"); // Free device global memory err = hipFree(d_A); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipFree(d_B); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector B (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipFree(d_C); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector C (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Free host memory free(h_A); free(h_B); free(h_C); // Reset the device and exit // hipDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling hipDeviceReset causes all profile data to be // flushed before the application exits err = hipDeviceReset(); if (err != hipSuccess) { fprintf(stderr, "Failed to deinitialize the device! error=%s\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } printf("Done\n"); return 0; }
59c9d5283f2ff9c1598c5d295dd7d77e340a5598.cu
/** * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /** * Vector addition: C = A + B. * * This sample is a very basic sample that implements element by element * vector addition. It is the same as the sample illustrating Chapter 2 * of the programming guide with some additions like error checking. */ #include <stdio.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <cuda_runtime.h> /** * CUDA Kernel Device code * * Computes the vector addition of A and B into C. The 3 vectors have the same * number of elements numElements. */ __global__ void vectorAdd(const float *A, const float *B, float *C, int numElements, int k) { int i = blockDim.x * blockIdx.x + threadIdx.x; //blockDim.x = num of thread per block //blockIdx.x = each block //threadIdx.x. = each thread if (i < numElements) { int start = (i*k); if(i == 0) { start = 0; } int end = k + start; if(end > numElements) { end = numElements; } for(int j = start; j < end; j++) { if(j < numElements) { C[j] = A[j] + B[j]; } } } } /** * Host main routine */ int main(int argc, char **argv) { // Error code to check return values for CUDA calls cudaError_t err = cudaSuccess; // Print the vector length to be used, and compute its size int numElements = 100000000; int k = atoi(argv[1]); size_t size = numElements * sizeof(float); printf("[Vector addition of %d elements]\n", numElements); // Allocate the host input vector A float *h_A = (float *)malloc(size); // Allocate the host input vector B float *h_B = (float *)malloc(size); // Allocate the host output vector C float *h_C = (float *)malloc(size); // Verify that allocations succeeded if (h_A == NULL || h_B == NULL || h_C == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } // Initialize the host input vectors for (int i = 0; i < numElements; ++i) { h_A[i] = rand()/(float)RAND_MAX; h_B[i] = rand()/(float)RAND_MAX; } // Allocate the device input vector A float *d_A = NULL; err = cudaMalloc((void **)&d_A, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device input vector B float *d_B = NULL; err = cudaMalloc((void **)&d_B, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device output vector C float *d_C = NULL; err = cudaMalloc((void **)&d_C, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the host input vectors A and B in host memory to the device input vectors in // device memory printf("Copy input data from the host memory to the CUDA device\n"); err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Launch the Vector Add CUDA Kernel //int threadsPerBlock = 256; //int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; int nT = ceil(numElements/k); int blocksPerGrid = 1000; int threadsPerBlock = nT/blocksPerGrid; printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, numElements, k); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the device result vector in device memory to the host result vector // in host memory. printf("Copy output data from the CUDA device to the host memory\n"); err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Verify that the result vector is correct for (int i = 0; i < numElements; ++i) { if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5) { fprintf(stderr, "Result verification failed at element %d!\n", i); exit(EXIT_FAILURE); } } printf("Test PASSED\n"); // Free device global memory err = cudaFree(d_A); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_B); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_C); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Free host memory free(h_A); free(h_B); free(h_C); // Reset the device and exit // cudaDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling cudaDeviceReset causes all profile data to be // flushed before the application exits err = cudaDeviceReset(); if (err != cudaSuccess) { fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } printf("Done\n"); return 0; }
6f7fc2be74e20d58102a107fa309059b8e698421.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "tf_texture_1d.h" #include <cuMat/src/Context.h> #include <ATen/hip/HIPContext.h> #ifdef RENDERER_HAS_RENDERER BEGIN_RENDERER_NAMESPACE __device__ float4 fetch(float density, const TfTexture1D::GpuData& gpuData) { float4 result{ 0.0f, 0.0f, 0.0f, 0.0f }; //This can be done using binary search but now simply do linear search. for (int i = 0; i < gpuData.sizeColor_ + 1; ++i) { auto leftDensity = i == 0 ? 0.0f : gpuData.densityAxisColor_[i - 1]; auto rightDensity = i == gpuData.sizeColor_ ? 1.0f : gpuData.densityAxisColor_[i]; if (density < rightDensity) { auto leftColor = i == 0 ? gpuData.colorAxis_[0] : gpuData.colorAxis_[i - 1]; auto rightColor = i == gpuData.sizeColor_ ? gpuData.colorAxis_[gpuData.sizeColor_ - 1] : gpuData.colorAxis_[i]; auto t = (density - leftDensity) / (rightDensity - leftDensity); auto rgb = labToRgb(lerp(leftColor, rightColor, t)); result.x = rgb.x; result.y = rgb.y; result.z = rgb.z; break; } } for (int i = 0; i < gpuData.sizeOpacity_ + 1; ++i) { auto leftDensity = i == 0 ? 0.0f : gpuData.densityAxisOpacity_[i - 1]; auto rightDensity = i == gpuData.sizeOpacity_ ? 1.0f : gpuData.densityAxisOpacity_[i]; if (density < rightDensity) { auto leftOpacity = i == 0 ? gpuData.opacityAxis_[0] : gpuData.opacityAxis_[i - 1]; auto rightOpacity = i == gpuData.sizeOpacity_ ? gpuData.opacityAxis_[gpuData.sizeOpacity_ - 1] : gpuData.opacityAxis_[i]; auto t = (density - leftDensity) / (rightDensity - leftDensity); result.w = lerp(leftOpacity, rightOpacity, t); break; } } return result; } __global__ void ComputeCudaTextureKernel( dim3 virtualSize, TfTexture1D::GpuData gpuData) { CUMAT_KERNEL_1D_LOOP(x, virtualSize) auto density = x / static_cast<float>(virtualSize.x); auto rgbo = fetch(density, gpuData); surf1Dwrite(rgbo.x, gpuData.surfaceObject_, x * 16); surf1Dwrite(rgbo.y, gpuData.surfaceObject_, x * 16 + 4); surf1Dwrite(rgbo.z, gpuData.surfaceObject_, x * 16 + 8); surf1Dwrite(rgbo.w, gpuData.surfaceObject_, x * 16 + 12); CUMAT_KERNEL_1D_LOOP_END } MY_API void computeCudaTexture(const TfTexture1D::GpuData& gpuData) { cuMat::Context& ctx = cuMat::Context::current(); cuMat::KernelLaunchConfig cfg = ctx.createLaunchConfig1D(gpuData.cudaArraySize_, ComputeCudaTextureKernel); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); ComputeCudaTextureKernel << <cfg.block_count, cfg.thread_per_block, 0, stream >> > (cfg.virtual_size, gpuData); CUMAT_CHECK_ERROR(); } END_RENDERER_NAMESPACE #endif
6f7fc2be74e20d58102a107fa309059b8e698421.cu
#include "tf_texture_1d.h" #include <cuMat/src/Context.h> #include <ATen/cuda/CUDAContext.h> #ifdef RENDERER_HAS_RENDERER BEGIN_RENDERER_NAMESPACE __device__ float4 fetch(float density, const TfTexture1D::GpuData& gpuData) { float4 result{ 0.0f, 0.0f, 0.0f, 0.0f }; //This can be done using binary search but now simply do linear search. for (int i = 0; i < gpuData.sizeColor_ + 1; ++i) { auto leftDensity = i == 0 ? 0.0f : gpuData.densityAxisColor_[i - 1]; auto rightDensity = i == gpuData.sizeColor_ ? 1.0f : gpuData.densityAxisColor_[i]; if (density < rightDensity) { auto leftColor = i == 0 ? gpuData.colorAxis_[0] : gpuData.colorAxis_[i - 1]; auto rightColor = i == gpuData.sizeColor_ ? gpuData.colorAxis_[gpuData.sizeColor_ - 1] : gpuData.colorAxis_[i]; auto t = (density - leftDensity) / (rightDensity - leftDensity); auto rgb = labToRgb(lerp(leftColor, rightColor, t)); result.x = rgb.x; result.y = rgb.y; result.z = rgb.z; break; } } for (int i = 0; i < gpuData.sizeOpacity_ + 1; ++i) { auto leftDensity = i == 0 ? 0.0f : gpuData.densityAxisOpacity_[i - 1]; auto rightDensity = i == gpuData.sizeOpacity_ ? 1.0f : gpuData.densityAxisOpacity_[i]; if (density < rightDensity) { auto leftOpacity = i == 0 ? gpuData.opacityAxis_[0] : gpuData.opacityAxis_[i - 1]; auto rightOpacity = i == gpuData.sizeOpacity_ ? gpuData.opacityAxis_[gpuData.sizeOpacity_ - 1] : gpuData.opacityAxis_[i]; auto t = (density - leftDensity) / (rightDensity - leftDensity); result.w = lerp(leftOpacity, rightOpacity, t); break; } } return result; } __global__ void ComputeCudaTextureKernel( dim3 virtualSize, TfTexture1D::GpuData gpuData) { CUMAT_KERNEL_1D_LOOP(x, virtualSize) auto density = x / static_cast<float>(virtualSize.x); auto rgbo = fetch(density, gpuData); surf1Dwrite(rgbo.x, gpuData.surfaceObject_, x * 16); surf1Dwrite(rgbo.y, gpuData.surfaceObject_, x * 16 + 4); surf1Dwrite(rgbo.z, gpuData.surfaceObject_, x * 16 + 8); surf1Dwrite(rgbo.w, gpuData.surfaceObject_, x * 16 + 12); CUMAT_KERNEL_1D_LOOP_END } MY_API void computeCudaTexture(const TfTexture1D::GpuData& gpuData) { cuMat::Context& ctx = cuMat::Context::current(); cuMat::KernelLaunchConfig cfg = ctx.createLaunchConfig1D(gpuData.cudaArraySize_, ComputeCudaTextureKernel); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); ComputeCudaTextureKernel << <cfg.block_count, cfg.thread_per_block, 0, stream >> > (cfg.virtual_size, gpuData); CUMAT_CHECK_ERROR(); } END_RENDERER_NAMESPACE #endif
7f06089f8a6323b74d582813dc7412c0e1ddb211.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "structure.h" #define gpuErrchk(ans) \ { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true) { if (code != hipSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } static const int kThreads = 256; using IndexT = int; __managed__ NodeBase **dev_nodes; __managed__ SpringBase **dev_springs; __device__ void new_NodeBase(NodeBase *node, float pos_x, float pos_y) { node->pos_x = pos_x; node->pos_y = pos_y; node->num_springs = 0; node->type = kTypeNodeBase; for (int i = 0; i < kMaxDegree; ++i) { node->springs[i] = NULL; } } __device__ void new_AnchorNode(NodeBase *node, float pos_x, float pos_y) { new_NodeBase(node, pos_x, pos_y); node->type = kTypeAnchorNode; } __device__ void new_AnchorPullNode(NodeBase *node, float pos_x, float pos_y, float vel_x, float vel_y) { new_AnchorNode(node, pos_x, pos_y); node->vel_x = vel_x; node->vel_y = vel_y; node->type = kTypeAnchorPullNode; } __device__ void new_Node(NodeBase *node, float pos_x, float pos_y, float mass) { new_NodeBase(node, pos_x, pos_y); node->mass = mass; node->type = kTypeNode; } // __device__ float NodeBase_distance_to(Node *node, Node *other) { // float dx = node->pos_x - other->pos_x; // float dy = node->pos_y - other->pos_y; // float dist_sq = dx * dx + dy * dy; // return sqrt(dist_sq); // } __device__ void NodeBase_add_spring(NodeBase *node, SpringBase *spring) { assert(node != NULL); // CONCORD int idx = atomicAdd(&node->num_springs, 1); assert(idx + 1 <= kMaxDegree); node->springs[idx] = spring; // CONCORD // assert(spring->p1 == node || spring->p2 == node); } __device__ void new_Spring(SpringBase *spring, NodeBase *p1, NodeBase *p2, float spring_factor, float max_force) { spring->is_active = true; spring->p1 = p1; spring->p2 = p2; spring->factor = spring_factor; spring->force = 0.0f; spring->max_force = max_force; // CONCORD spring->initial_length = p1->distance_to(p2); spring->delete_flag = false; // CONCORD // if (!(spring->initial_length > 0.0f)) // CONCORD // printf("%f \n", spring->initial_length); // CONCORD assert(spring->initial_length > 0.0f); NodeBase_add_spring(p1, spring); NodeBase_add_spring(p2, spring); } __device__ void NodeBase_remove_spring(NodeBase *node, SpringBase *spring) { for (int i = 0; i < kMaxDegree; ++i) { // CONCORD if (node->springs[i] == spring) { node->springs[i] = NULL; // CONCORD if (atomicSub(&node->num_springs, 1) == 1) { // Deleted last spring. node->type = 0; } return; } } // Spring not found. assert(false); } __device__ void AnchorPullNode_pull(NodeBase *node) { node->pos_x += node->vel_x * kDt; node->pos_y += node->vel_y * kDt; } __device__ void Spring_self_destruct(SpringBase *spring) { // CONCORD NodeBase *p1; NodeBase *p2; CONCORD(p1, spring, get_p1()); CONCORD(p2, spring, get_p2()); NodeBase_remove_spring(p1, spring); // CONCORD NodeBase_remove_spring(p2, spring); spring->is_active = false; } __device__ void Spring_compute_force(SpringBase *spring) { // CONCORD float dist; NodeBase *p1; NodeBase *p2; CONCORD(p1, spring, get_p1()); CONCORD(p2, spring, get_p2()); CONCORD(dist, p1, distance_to(p2)); // CONCORD float int_len ; CONCORD(int_len,spring,get_init_len()); float displacement = max(0.0f, dist - int_len); // CONCORD CONCORD(spring, update_force(displacement)); // CONCORD bool cond; CONCORD(cond, spring,is_max_force()); if (cond) { // CONCORD CONCORD( p1, remove_spring(spring)); // CONCORD CONCORD( p2, remove_spring(spring)); // CONCORD CONCORD(spring, deactivate()); // Spring_self_destruct(spring); } } __device__ void Node_move(NodeBase *node) { float force_x = 0.0f; float force_y = 0.0f; for (int i = 0; i < kMaxDegree; ++i) { // CONCORD SpringBase *s; CONCORD(s, node, spring(i)); ; if (s != NULL) { NodeBase *from; NodeBase *to; NodeBase *p1; NodeBase *p2; CONCORD(p1, s, get_p1()); if (p1 == node) { from = node; CONCORD(to, s, get_p2()); } else { CONCORD(p2, s, get_p2()); assert(p2 == node); from = node; CONCORD(to, s, get_p1()); } // Calculate unit vector. // CONCORD float dist; CONCORD(dist, to, distance_to(from)); ; // CONCORD float unit_x; CONCORD(unit_x, to, unit_x(from, dist)); ; // CONCORD float unit_y; CONCORD(unit_y, to, unit_y(from, dist)); ; // Apply force. // CONCORD float temp_cond; CONCORD(temp_cond, s, get_force()); force_x += unit_x * temp_cond; // CONCORD CONCORD(temp_cond, s, get_force()); force_y += unit_y * temp_cond; } } // Calculate new velocity and position. // CONCORD CONCORD(node, update_vel_x(force_x)); ; // CONCORD CONCORD(node, update_vel_y(force_y)); ; // CONCORD CONCORD(node, update_pos_x(force_x)); ; // CONCORD CONCORD(node, update_pos_y(force_y)); ; } __device__ void NodeBase_initialize_bfs(NodeBase *node) { if (node->type == kTypeAnchorNode) { // CONCORD CONCORD(node, set_distance(0)); ; } else { // CONCORD CONCORD(node, set_distance(kMaxDistance)); ; // should be int_max } } __device__ bool dev_bfs_continue; __device__ void NodeBase_bfs_visit(NodeBase *node, int distance) { // CONCORD float dis; CONCORD(dis, node, get_distance()); if (distance == dis) { // Continue until all vertices were visited. dev_bfs_continue = true; for (int i = 0; i < kMaxDegree; ++i) { // CONCORD SpringBase *spring; CONCORD(spring, node, spring(i)); ; // CONCORD if (spring != NULL) { // Find neighboring vertices. NodeBase *n; // CONCORD NodeBase *temo_; CONCORD(temo_, spring, get_p1()); if (node == temo_) { // CONCORD CONCORD(n, spring, get_p2()); ; } else { // CONCORD CONCORD(n, spring, get_p1()); ; } float dis2; CONCORD(dis2, n, get_distance()); // CONCORD if (dis2 == kMaxDistance) { // Set distance on neighboring vertex if unvisited. // CONCORD CONCORD(n, set_distance(distance + 1)); ; } } } } } __device__ void Spring_bfs_delete(SpringBase *spring) { // CONCORD if (spring->delete_flag) { NodeBase *p1; NodeBase *p2; CONCORD(p1, spring, get_p1()); CONCORD(p2, spring, get_p2()); // CONCORD CONCORD(p1, remove_spring(spring)); ; // CONCORD CONCORD(p2, remove_spring(spring)); ; // CONCORD CONCORD(spring, deactivate()); ; } } __device__ void NodeBase_bfs_set_delete_flags(NodeBase *node) { if (node->distance == kMaxDistance) { // should be int_max for (int i = 0; i < kMaxDegree; ++i) { // CONCORD SpringBase *spring; CONCORD(spring, node, spring(i)); ; // CONCORD if (spring != NULL) { spring->delete_flag = true; // Spring_bfs_delete(spring); } } } } // Only for rendering and checksum computation. __device__ int dev_num_springs; __device__ SpringInfo dev_spring_info[kMaxSprings]; int host_num_springs; SpringInfo host_spring_info[kMaxSprings]; __device__ void Spring_add_to_rendering_array(SpringBase *spring) { // CONCORD int idx = atomicAdd(&dev_num_springs, 1); dev_spring_info[idx].p1_x = spring->p1->pos_x; dev_spring_info[idx].p1_y = spring->p1->pos_y; dev_spring_info[idx].p2_x = spring->p2->pos_x; dev_spring_info[idx].p2_y = spring->p2->pos_y; dev_spring_info[idx].force = spring->force; dev_spring_info[idx].max_force = spring->max_force; } __global__ void kernel_AnchorPullNode_pull() { for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < kMaxNodes; i += blockDim.x * gridDim.x) { if (dev_nodes[i]->type == kTypeAnchorPullNode) { // CONCORD CONCORD(dev_nodes[i], pull()); ; } } } __global__ void kernel_Node_move() { for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < kMaxNodes; i += blockDim.x * gridDim.x) { if (dev_nodes[i]->type == kTypeNode) { Node_move(dev_nodes[i]); } } } __global__ void kernel_NodeBase_initialize_bfs() { for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < kMaxNodes; i += blockDim.x * gridDim.x) { if (dev_nodes[i]->type != 0) { NodeBase_initialize_bfs(dev_nodes[i]); } } } __global__ void kernel_NodeBase_bfs_visit(int dist) { for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < kMaxNodes; i += blockDim.x * gridDim.x) { if (dev_nodes[i]->type != 0) { NodeBase_bfs_visit(dev_nodes[i], dist); } } } __global__ void kernel_NodeBase_bfs_set_delete_flags() { for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < kMaxNodes; i += blockDim.x * gridDim.x) { if (dev_nodes[i]->type != 0) { NodeBase_bfs_set_delete_flags(dev_nodes[i]); } } } __global__ void kernel_Spring_compute_force() { for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < kMaxSprings; i += blockDim.x * gridDim.x) { // CONCORD if (dev_springs[i]->get_is_active()) { Spring_compute_force(dev_springs[i]); } } } __global__ void kernel_Spring_bfs_delete() { for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < kMaxSprings; i += blockDim.x * gridDim.x) { // CONCORD if (dev_springs[i]->get_is_active()) { Spring_bfs_delete(dev_springs[i]); } } } __global__ void kernel_Spring_add_to_rendering_array() { for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < kMaxSprings; i += blockDim.x * gridDim.x) { // CONCORD if (dev_springs[i]->get_is_active()) { Spring_add_to_rendering_array(dev_springs[i]); } } } __global__ void kernel_initialize_nodes() { for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < kMaxNodes; i += blockDim.x * gridDim.x) { dev_nodes[i] = new Node(); assert(dev_nodes[i] != NULL); dev_nodes[i]->type = 0; } } __global__ void kernel_initialize_springs() { for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < kMaxSprings; i += blockDim.x * gridDim.x) { // CONCORD dev_springs[i] = new Spring(); // CONCORD assert(dev_springs[i] != NULL); dev_springs[i]->is_active = false; } } void transfer_data() { int zero = 0; hipMemcpyToSymbol(dev_num_springs, &zero, sizeof(int), 0, hipMemcpyHostToDevice); gpuErrchk(hipDeviceSynchronize()); hipLaunchKernelGGL(( kernel_Spring_add_to_rendering_array), dim3(128), dim3(128), 0, 0, ); gpuErrchk(hipDeviceSynchronize()); hipMemcpyFromSymbol(&host_num_springs, dev_num_springs, sizeof(int), 0, hipMemcpyDeviceToHost); gpuErrchk(hipDeviceSynchronize()); hipMemcpyFromSymbol(host_spring_info, dev_spring_info, sizeof(SpringInfo) * host_num_springs, 0, hipMemcpyDeviceToHost); gpuErrchk(hipDeviceSynchronize()); } float checksum() { transfer_data(); float result = 0.0f; // CONCORD for (int i = 0; i < host_num_springs; ++i) { result += host_spring_info[i].p1_x * host_spring_info[i].p2_y * host_spring_info[i].force; } return result; } void compute() { hipLaunchKernelGGL(( kernel_Spring_compute_force), dim3((kMaxSprings + kThreads - 1) / kThreads), dim3(kThreads), 0, 0, ); gpuErrchk(hipDeviceSynchronize()); hipLaunchKernelGGL(( kernel_Node_move), dim3((kMaxNodes + kThreads - 1) / kThreads), dim3(kThreads), 0, 0, ); gpuErrchk(hipDeviceSynchronize()); } void bfs_and_delete() { // Perform BFS to check reachability. hipLaunchKernelGGL(( kernel_NodeBase_initialize_bfs), dim3((kMaxNodes + kThreads - 1) / kThreads), dim3(kThreads), 0, 0, ); gpuErrchk(hipDeviceSynchronize()); for (int i = 0; i < kMaxDistance; ++i) { bool continue_flag = false; hipMemcpyToSymbol(dev_bfs_continue, &continue_flag, sizeof(bool), 0, hipMemcpyHostToDevice); hipLaunchKernelGGL(( kernel_NodeBase_bfs_visit), dim3((kMaxNodes + kThreads - 1) / kThreads), dim3(kThreads), 0, 0, i); gpuErrchk(hipDeviceSynchronize()); hipMemcpyFromSymbol(&continue_flag, dev_bfs_continue, sizeof(bool), 0, hipMemcpyDeviceToHost); if (!continue_flag) break; } // Delete springs (and nodes). hipLaunchKernelGGL(( kernel_NodeBase_bfs_set_delete_flags), dim3((kMaxNodes + kThreads - 1) / kThreads), dim3(kThreads), 0, 0, ); gpuErrchk(hipDeviceSynchronize()); hipLaunchKernelGGL(( kernel_Spring_bfs_delete), dim3((kMaxSprings + kThreads - 1) / kThreads), dim3(kThreads), 0, 0, ); gpuErrchk(hipDeviceSynchronize()); } void step() { hipLaunchKernelGGL(( kernel_AnchorPullNode_pull), dim3((kMaxNodes + kThreads - 1) / kThreads), dim3(kThreads), 0, 0, ); gpuErrchk(hipDeviceSynchronize()); for (int i = 0; i < kNumComputeIterations; ++i) { compute(); } bfs_and_delete(); } void initialize_memory() { hipLaunchKernelGGL(( kernel_initialize_nodes), dim3((kMaxNodes + kThreads - 1) / kThreads), dim3(kThreads), 0, 0, ); gpuErrchk(hipDeviceSynchronize()); hipLaunchKernelGGL(( kernel_initialize_springs), dim3((kMaxSprings + kThreads - 1) / kThreads), dim3(kThreads), 0, 0, ); gpuErrchk(hipDeviceSynchronize()); } __device__ IndexT dev_tmp_nodes[kMaxNodes]; __device__ IndexT dev_node_counter; __global__ void kernel_create_nodes(DsNode *nodes, int num_nodes) { for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < num_nodes; i += blockDim.x * gridDim.x) { int idx = atomicAdd(&dev_node_counter, 1); assert(dev_nodes != NULL); dev_tmp_nodes[i] = idx; assert(dev_nodes[idx] != NULL); if (nodes[i].type == kTypeNode) { new_Node(dev_nodes[idx], nodes[i].pos_x, nodes[i].pos_y, nodes[i].mass); } else if (nodes[i].type == kTypeAnchorPullNode) { new_AnchorPullNode(dev_nodes[idx], nodes[i].pos_x, nodes[i].pos_y, nodes[i].vel_x, nodes[i].vel_y); } else if (nodes[i].type == kTypeAnchorNode) { new_AnchorNode(dev_nodes[idx], nodes[i].pos_x, nodes[i].pos_y); } else { assert(false); } } } __global__ void kernel_create_springs(DsSpring *springs, int num_springs) { // CONCORD for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < num_springs; i += blockDim.x * gridDim.x) { // CONCORD assert(dev_springs[i] != nullptr); new_Spring(dev_springs[i], dev_nodes[dev_tmp_nodes[springs[i].p1]], dev_nodes[dev_tmp_nodes[springs[i].p2]], springs[i].spring_factor, springs[i].max_force); // printf("%p \n", dev_springs[i]); } } void load_dataset(Dataset &dataset) { DsNode *host_nodes; hipMalloc(&host_nodes, sizeof(DsNode) * dataset.nodes.size()); hipMemcpy(host_nodes, dataset.nodes.data(), sizeof(DsNode) * dataset.nodes.size(), hipMemcpyHostToDevice); DsSpring *host_springs; hipMalloc(&host_springs, sizeof(DsSpring) * dataset.springs.size()); hipMemcpy(host_springs, dataset.springs.data(), sizeof(DsSpring) * dataset.springs.size(), hipMemcpyHostToDevice); gpuErrchk(hipDeviceSynchronize()); IndexT zero = 0; hipMemcpyToSymbol(dev_node_counter, &zero, sizeof(IndexT), 0, hipMemcpyHostToDevice); gpuErrchk(hipDeviceSynchronize()); assert(dataset.nodes.size() == kMaxNodes); // kernel_create_nodes1<<<(kMaxNodes + kThreads - 1) / kThreads, // kThreads>>>( // host_nodes, dataset.nodes.size()); hipLaunchKernelGGL(( kernel_create_nodes), dim3((kMaxNodes + kThreads - 1) / kThreads), dim3(kThreads), 0, 0, host_nodes, dataset.nodes.size()); gpuErrchk(hipDeviceSynchronize()); // kernel_create_spring1<<<(kMaxSprings + kThreads - 1) / kThreads, // kThreads>>>( // host_nodes, dataset.springs.size()); hipLaunchKernelGGL(( kernel_create_springs), dim3((kMaxSprings + kThreads - 1) / kThreads), dim3(kThreads), 0, 0, host_springs, dataset.springs.size()); gpuErrchk(hipDeviceSynchronize()); hipFree(host_nodes); hipFree(host_springs); } int main(int /*argc*/, char ** /*argv*/) { // Allocate memory. hipDeviceSetLimit(hipLimitMallocHeapSize, 4ULL * 1024 * 1024 * 1024); hipMalloc(&dev_nodes, sizeof(NodeBase *) * kMaxNodes); // hipMemcpyToSymbol(dev_nodes, &host_nodes, sizeof(Node *), 0, // hipMemcpyHostToDevice); assert(dev_nodes != NULL); // printf("%p\n", dev_nodes); // Spring *host_springs; hipMalloc(&dev_springs, sizeof(SpringBase *) * kMaxSprings); // hipMemcpyToSymbol(dev_springs, &host_springs, sizeof(Spring *), 0, // hipMemcpyHostToDevice); initialize_memory(); Dataset dataset; random_dataset(dataset); load_dataset(dataset); auto time_start = std::chrono::system_clock::now(); for (int i = 0; i < kNumSteps; ++i) { #ifndef NDEBUG printf("%i\n", i); #endif // NDEBUG step(); } auto time_end = std::chrono::system_clock::now(); auto elapsed = time_end - time_start; auto micros = std::chrono::duration_cast<std::chrono::microseconds>(elapsed).count(); printf("%lu\n", micros); #ifndef NDEBUG printf("Checksum: %f\n", checksum()); #endif // NDEBUG }
7f06089f8a6323b74d582813dc7412c0e1ddb211.cu
#include "structure.h" #define gpuErrchk(ans) \ { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } static const int kThreads = 256; using IndexT = int; __managed__ NodeBase **dev_nodes; __managed__ SpringBase **dev_springs; __device__ void new_NodeBase(NodeBase *node, float pos_x, float pos_y) { node->pos_x = pos_x; node->pos_y = pos_y; node->num_springs = 0; node->type = kTypeNodeBase; for (int i = 0; i < kMaxDegree; ++i) { node->springs[i] = NULL; } } __device__ void new_AnchorNode(NodeBase *node, float pos_x, float pos_y) { new_NodeBase(node, pos_x, pos_y); node->type = kTypeAnchorNode; } __device__ void new_AnchorPullNode(NodeBase *node, float pos_x, float pos_y, float vel_x, float vel_y) { new_AnchorNode(node, pos_x, pos_y); node->vel_x = vel_x; node->vel_y = vel_y; node->type = kTypeAnchorPullNode; } __device__ void new_Node(NodeBase *node, float pos_x, float pos_y, float mass) { new_NodeBase(node, pos_x, pos_y); node->mass = mass; node->type = kTypeNode; } // __device__ float NodeBase_distance_to(Node *node, Node *other) { // float dx = node->pos_x - other->pos_x; // float dy = node->pos_y - other->pos_y; // float dist_sq = dx * dx + dy * dy; // return sqrt(dist_sq); // } __device__ void NodeBase_add_spring(NodeBase *node, SpringBase *spring) { assert(node != NULL); // CONCORD int idx = atomicAdd(&node->num_springs, 1); assert(idx + 1 <= kMaxDegree); node->springs[idx] = spring; // CONCORD // assert(spring->p1 == node || spring->p2 == node); } __device__ void new_Spring(SpringBase *spring, NodeBase *p1, NodeBase *p2, float spring_factor, float max_force) { spring->is_active = true; spring->p1 = p1; spring->p2 = p2; spring->factor = spring_factor; spring->force = 0.0f; spring->max_force = max_force; // CONCORD spring->initial_length = p1->distance_to(p2); spring->delete_flag = false; // CONCORD // if (!(spring->initial_length > 0.0f)) // CONCORD // printf("%f \n", spring->initial_length); // CONCORD assert(spring->initial_length > 0.0f); NodeBase_add_spring(p1, spring); NodeBase_add_spring(p2, spring); } __device__ void NodeBase_remove_spring(NodeBase *node, SpringBase *spring) { for (int i = 0; i < kMaxDegree; ++i) { // CONCORD if (node->springs[i] == spring) { node->springs[i] = NULL; // CONCORD if (atomicSub(&node->num_springs, 1) == 1) { // Deleted last spring. node->type = 0; } return; } } // Spring not found. assert(false); } __device__ void AnchorPullNode_pull(NodeBase *node) { node->pos_x += node->vel_x * kDt; node->pos_y += node->vel_y * kDt; } __device__ void Spring_self_destruct(SpringBase *spring) { // CONCORD NodeBase *p1; NodeBase *p2; CONCORD(p1, spring, get_p1()); CONCORD(p2, spring, get_p2()); NodeBase_remove_spring(p1, spring); // CONCORD NodeBase_remove_spring(p2, spring); spring->is_active = false; } __device__ void Spring_compute_force(SpringBase *spring) { // CONCORD float dist; NodeBase *p1; NodeBase *p2; CONCORD(p1, spring, get_p1()); CONCORD(p2, spring, get_p2()); CONCORD(dist, p1, distance_to(p2)); // CONCORD float int_len ; CONCORD(int_len,spring,get_init_len()); float displacement = max(0.0f, dist - int_len); // CONCORD CONCORD(spring, update_force(displacement)); // CONCORD bool cond; CONCORD(cond, spring,is_max_force()); if (cond) { // CONCORD CONCORD( p1, remove_spring(spring)); // CONCORD CONCORD( p2, remove_spring(spring)); // CONCORD CONCORD(spring, deactivate()); // Spring_self_destruct(spring); } } __device__ void Node_move(NodeBase *node) { float force_x = 0.0f; float force_y = 0.0f; for (int i = 0; i < kMaxDegree; ++i) { // CONCORD SpringBase *s; CONCORD(s, node, spring(i)); ; if (s != NULL) { NodeBase *from; NodeBase *to; NodeBase *p1; NodeBase *p2; CONCORD(p1, s, get_p1()); if (p1 == node) { from = node; CONCORD(to, s, get_p2()); } else { CONCORD(p2, s, get_p2()); assert(p2 == node); from = node; CONCORD(to, s, get_p1()); } // Calculate unit vector. // CONCORD float dist; CONCORD(dist, to, distance_to(from)); ; // CONCORD float unit_x; CONCORD(unit_x, to, unit_x(from, dist)); ; // CONCORD float unit_y; CONCORD(unit_y, to, unit_y(from, dist)); ; // Apply force. // CONCORD float temp_cond; CONCORD(temp_cond, s, get_force()); force_x += unit_x * temp_cond; // CONCORD CONCORD(temp_cond, s, get_force()); force_y += unit_y * temp_cond; } } // Calculate new velocity and position. // CONCORD CONCORD(node, update_vel_x(force_x)); ; // CONCORD CONCORD(node, update_vel_y(force_y)); ; // CONCORD CONCORD(node, update_pos_x(force_x)); ; // CONCORD CONCORD(node, update_pos_y(force_y)); ; } __device__ void NodeBase_initialize_bfs(NodeBase *node) { if (node->type == kTypeAnchorNode) { // CONCORD CONCORD(node, set_distance(0)); ; } else { // CONCORD CONCORD(node, set_distance(kMaxDistance)); ; // should be int_max } } __device__ bool dev_bfs_continue; __device__ void NodeBase_bfs_visit(NodeBase *node, int distance) { // CONCORD float dis; CONCORD(dis, node, get_distance()); if (distance == dis) { // Continue until all vertices were visited. dev_bfs_continue = true; for (int i = 0; i < kMaxDegree; ++i) { // CONCORD SpringBase *spring; CONCORD(spring, node, spring(i)); ; // CONCORD if (spring != NULL) { // Find neighboring vertices. NodeBase *n; // CONCORD NodeBase *temo_; CONCORD(temo_, spring, get_p1()); if (node == temo_) { // CONCORD CONCORD(n, spring, get_p2()); ; } else { // CONCORD CONCORD(n, spring, get_p1()); ; } float dis2; CONCORD(dis2, n, get_distance()); // CONCORD if (dis2 == kMaxDistance) { // Set distance on neighboring vertex if unvisited. // CONCORD CONCORD(n, set_distance(distance + 1)); ; } } } } } __device__ void Spring_bfs_delete(SpringBase *spring) { // CONCORD if (spring->delete_flag) { NodeBase *p1; NodeBase *p2; CONCORD(p1, spring, get_p1()); CONCORD(p2, spring, get_p2()); // CONCORD CONCORD(p1, remove_spring(spring)); ; // CONCORD CONCORD(p2, remove_spring(spring)); ; // CONCORD CONCORD(spring, deactivate()); ; } } __device__ void NodeBase_bfs_set_delete_flags(NodeBase *node) { if (node->distance == kMaxDistance) { // should be int_max for (int i = 0; i < kMaxDegree; ++i) { // CONCORD SpringBase *spring; CONCORD(spring, node, spring(i)); ; // CONCORD if (spring != NULL) { spring->delete_flag = true; // Spring_bfs_delete(spring); } } } } // Only for rendering and checksum computation. __device__ int dev_num_springs; __device__ SpringInfo dev_spring_info[kMaxSprings]; int host_num_springs; SpringInfo host_spring_info[kMaxSprings]; __device__ void Spring_add_to_rendering_array(SpringBase *spring) { // CONCORD int idx = atomicAdd(&dev_num_springs, 1); dev_spring_info[idx].p1_x = spring->p1->pos_x; dev_spring_info[idx].p1_y = spring->p1->pos_y; dev_spring_info[idx].p2_x = spring->p2->pos_x; dev_spring_info[idx].p2_y = spring->p2->pos_y; dev_spring_info[idx].force = spring->force; dev_spring_info[idx].max_force = spring->max_force; } __global__ void kernel_AnchorPullNode_pull() { for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < kMaxNodes; i += blockDim.x * gridDim.x) { if (dev_nodes[i]->type == kTypeAnchorPullNode) { // CONCORD CONCORD(dev_nodes[i], pull()); ; } } } __global__ void kernel_Node_move() { for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < kMaxNodes; i += blockDim.x * gridDim.x) { if (dev_nodes[i]->type == kTypeNode) { Node_move(dev_nodes[i]); } } } __global__ void kernel_NodeBase_initialize_bfs() { for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < kMaxNodes; i += blockDim.x * gridDim.x) { if (dev_nodes[i]->type != 0) { NodeBase_initialize_bfs(dev_nodes[i]); } } } __global__ void kernel_NodeBase_bfs_visit(int dist) { for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < kMaxNodes; i += blockDim.x * gridDim.x) { if (dev_nodes[i]->type != 0) { NodeBase_bfs_visit(dev_nodes[i], dist); } } } __global__ void kernel_NodeBase_bfs_set_delete_flags() { for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < kMaxNodes; i += blockDim.x * gridDim.x) { if (dev_nodes[i]->type != 0) { NodeBase_bfs_set_delete_flags(dev_nodes[i]); } } } __global__ void kernel_Spring_compute_force() { for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < kMaxSprings; i += blockDim.x * gridDim.x) { // CONCORD if (dev_springs[i]->get_is_active()) { Spring_compute_force(dev_springs[i]); } } } __global__ void kernel_Spring_bfs_delete() { for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < kMaxSprings; i += blockDim.x * gridDim.x) { // CONCORD if (dev_springs[i]->get_is_active()) { Spring_bfs_delete(dev_springs[i]); } } } __global__ void kernel_Spring_add_to_rendering_array() { for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < kMaxSprings; i += blockDim.x * gridDim.x) { // CONCORD if (dev_springs[i]->get_is_active()) { Spring_add_to_rendering_array(dev_springs[i]); } } } __global__ void kernel_initialize_nodes() { for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < kMaxNodes; i += blockDim.x * gridDim.x) { dev_nodes[i] = new Node(); assert(dev_nodes[i] != NULL); dev_nodes[i]->type = 0; } } __global__ void kernel_initialize_springs() { for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < kMaxSprings; i += blockDim.x * gridDim.x) { // CONCORD dev_springs[i] = new Spring(); // CONCORD assert(dev_springs[i] != NULL); dev_springs[i]->is_active = false; } } void transfer_data() { int zero = 0; cudaMemcpyToSymbol(dev_num_springs, &zero, sizeof(int), 0, cudaMemcpyHostToDevice); gpuErrchk(cudaDeviceSynchronize()); kernel_Spring_add_to_rendering_array<<<128, 128>>>(); gpuErrchk(cudaDeviceSynchronize()); cudaMemcpyFromSymbol(&host_num_springs, dev_num_springs, sizeof(int), 0, cudaMemcpyDeviceToHost); gpuErrchk(cudaDeviceSynchronize()); cudaMemcpyFromSymbol(host_spring_info, dev_spring_info, sizeof(SpringInfo) * host_num_springs, 0, cudaMemcpyDeviceToHost); gpuErrchk(cudaDeviceSynchronize()); } float checksum() { transfer_data(); float result = 0.0f; // CONCORD for (int i = 0; i < host_num_springs; ++i) { result += host_spring_info[i].p1_x * host_spring_info[i].p2_y * host_spring_info[i].force; } return result; } void compute() { kernel_Spring_compute_force<<<(kMaxSprings + kThreads - 1) / kThreads, kThreads>>>(); gpuErrchk(cudaDeviceSynchronize()); kernel_Node_move<<<(kMaxNodes + kThreads - 1) / kThreads, kThreads>>>(); gpuErrchk(cudaDeviceSynchronize()); } void bfs_and_delete() { // Perform BFS to check reachability. kernel_NodeBase_initialize_bfs<<<(kMaxNodes + kThreads - 1) / kThreads, kThreads>>>(); gpuErrchk(cudaDeviceSynchronize()); for (int i = 0; i < kMaxDistance; ++i) { bool continue_flag = false; cudaMemcpyToSymbol(dev_bfs_continue, &continue_flag, sizeof(bool), 0, cudaMemcpyHostToDevice); kernel_NodeBase_bfs_visit<<<(kMaxNodes + kThreads - 1) / kThreads, kThreads>>>(i); gpuErrchk(cudaDeviceSynchronize()); cudaMemcpyFromSymbol(&continue_flag, dev_bfs_continue, sizeof(bool), 0, cudaMemcpyDeviceToHost); if (!continue_flag) break; } // Delete springs (and nodes). kernel_NodeBase_bfs_set_delete_flags<<< (kMaxNodes + kThreads - 1) / kThreads, kThreads>>>(); gpuErrchk(cudaDeviceSynchronize()); kernel_Spring_bfs_delete<<<(kMaxSprings + kThreads - 1) / kThreads, kThreads>>>(); gpuErrchk(cudaDeviceSynchronize()); } void step() { kernel_AnchorPullNode_pull<<<(kMaxNodes + kThreads - 1) / kThreads, kThreads>>>(); gpuErrchk(cudaDeviceSynchronize()); for (int i = 0; i < kNumComputeIterations; ++i) { compute(); } bfs_and_delete(); } void initialize_memory() { kernel_initialize_nodes<<<(kMaxNodes + kThreads - 1) / kThreads, kThreads>>>(); gpuErrchk(cudaDeviceSynchronize()); kernel_initialize_springs<<<(kMaxSprings + kThreads - 1) / kThreads, kThreads>>>(); gpuErrchk(cudaDeviceSynchronize()); } __device__ IndexT dev_tmp_nodes[kMaxNodes]; __device__ IndexT dev_node_counter; __global__ void kernel_create_nodes(DsNode *nodes, int num_nodes) { for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < num_nodes; i += blockDim.x * gridDim.x) { int idx = atomicAdd(&dev_node_counter, 1); assert(dev_nodes != NULL); dev_tmp_nodes[i] = idx; assert(dev_nodes[idx] != NULL); if (nodes[i].type == kTypeNode) { new_Node(dev_nodes[idx], nodes[i].pos_x, nodes[i].pos_y, nodes[i].mass); } else if (nodes[i].type == kTypeAnchorPullNode) { new_AnchorPullNode(dev_nodes[idx], nodes[i].pos_x, nodes[i].pos_y, nodes[i].vel_x, nodes[i].vel_y); } else if (nodes[i].type == kTypeAnchorNode) { new_AnchorNode(dev_nodes[idx], nodes[i].pos_x, nodes[i].pos_y); } else { assert(false); } } } __global__ void kernel_create_springs(DsSpring *springs, int num_springs) { // CONCORD for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < num_springs; i += blockDim.x * gridDim.x) { // CONCORD assert(dev_springs[i] != nullptr); new_Spring(dev_springs[i], dev_nodes[dev_tmp_nodes[springs[i].p1]], dev_nodes[dev_tmp_nodes[springs[i].p2]], springs[i].spring_factor, springs[i].max_force); // printf("%p \n", dev_springs[i]); } } void load_dataset(Dataset &dataset) { DsNode *host_nodes; cudaMalloc(&host_nodes, sizeof(DsNode) * dataset.nodes.size()); cudaMemcpy(host_nodes, dataset.nodes.data(), sizeof(DsNode) * dataset.nodes.size(), cudaMemcpyHostToDevice); DsSpring *host_springs; cudaMalloc(&host_springs, sizeof(DsSpring) * dataset.springs.size()); cudaMemcpy(host_springs, dataset.springs.data(), sizeof(DsSpring) * dataset.springs.size(), cudaMemcpyHostToDevice); gpuErrchk(cudaDeviceSynchronize()); IndexT zero = 0; cudaMemcpyToSymbol(dev_node_counter, &zero, sizeof(IndexT), 0, cudaMemcpyHostToDevice); gpuErrchk(cudaDeviceSynchronize()); assert(dataset.nodes.size() == kMaxNodes); // kernel_create_nodes1<<<(kMaxNodes + kThreads - 1) / kThreads, // kThreads>>>( // host_nodes, dataset.nodes.size()); kernel_create_nodes<<<(kMaxNodes + kThreads - 1) / kThreads, kThreads>>>( host_nodes, dataset.nodes.size()); gpuErrchk(cudaDeviceSynchronize()); // kernel_create_spring1<<<(kMaxSprings + kThreads - 1) / kThreads, // kThreads>>>( // host_nodes, dataset.springs.size()); kernel_create_springs<<<(kMaxSprings + kThreads - 1) / kThreads, kThreads>>>(host_springs, dataset.springs.size()); gpuErrchk(cudaDeviceSynchronize()); cudaFree(host_nodes); cudaFree(host_springs); } int main(int /*argc*/, char ** /*argv*/) { // Allocate memory. cudaDeviceSetLimit(cudaLimitMallocHeapSize, 4ULL * 1024 * 1024 * 1024); cudaMalloc(&dev_nodes, sizeof(NodeBase *) * kMaxNodes); // cudaMemcpyToSymbol(dev_nodes, &host_nodes, sizeof(Node *), 0, // cudaMemcpyHostToDevice); assert(dev_nodes != NULL); // printf("%p\n", dev_nodes); // Spring *host_springs; cudaMalloc(&dev_springs, sizeof(SpringBase *) * kMaxSprings); // cudaMemcpyToSymbol(dev_springs, &host_springs, sizeof(Spring *), 0, // cudaMemcpyHostToDevice); initialize_memory(); Dataset dataset; random_dataset(dataset); load_dataset(dataset); auto time_start = std::chrono::system_clock::now(); for (int i = 0; i < kNumSteps; ++i) { #ifndef NDEBUG printf("%i\n", i); #endif // NDEBUG step(); } auto time_end = std::chrono::system_clock::now(); auto elapsed = time_end - time_start; auto micros = std::chrono::duration_cast<std::chrono::microseconds>(elapsed).count(); printf("%lu\n", micros); #ifndef NDEBUG printf("Checksum: %f\n", checksum()); #endif // NDEBUG }
fdc641a03ab1b678a5023c16de8cfcc57de4c4a0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THH/generic/THHTensorMath.hip" #else void THCTensor_(fill)(THCState* state, THCTensor *self_, scalar_t value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); if (!THC_pointwiseApply1<scalar_t>( state, self_, TensorFillOp<scalar_t>(value))) { THArgCheck(false, 1, CUTORCH_DIM_WARNING); } THCudaCheck(hipGetLastError()); } void THCTensor_(zero)(THCState *state, THCTensor *self_) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); if (THCTensor_(isContiguous)(state, self_)) { THCudaCheck(hipMemsetAsync(THCTensor_(data)(state, self_), 0, sizeof(scalar_t) * THCTensor_(nElement)(state, self_), THCState_getCurrentStream(state))); } else { if (!THC_pointwiseApply1<scalar_t>( state, self_, TensorFillOp<scalar_t>(ScalarConvert<int, scalar_t>::to(0)))) { THArgCheck(false, 1, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); } void THCTensor_(zerosLike)(THCState *state, THCTensor *r_, THCTensor *input) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, r_, input)); THCTensor_(resizeAs)(state, r_, input); THCTensor_(zero)(state, r_); } void THCTensor_(onesLike)(THCState *state, THCTensor *r_, THCTensor *input) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, r_, input)); THCTensor_(resizeAs)(state, r_, input); THCTensor_(fill)(state, r_, ScalarConvert<int, scalar_t>::to(1)); } ptrdiff_t THCTensor_(numel)(THCState *state, THCTensor *t) { return THCTensor_(nElement)(state, t); } void THCTensor_(cat)(THCState *state, THCTensor *result, THCTensor *ta, THCTensor *tb, int dimension) { THCTensor* inputs[2]; inputs[0] = ta; inputs[1] = tb; THCTensor_(catArray)(state, result, inputs, 2, dimension); } void THCTensor_(check_shape_except_dim)(THCState *state, THCTensor *first, THCTensor *second, int dimension); inline void THCTensor_(check_shape_except_dim)(THCState *state, THCTensor *first, THCTensor *second, int dimension) { int first_dims = first->dim(); int second_dims = second->dim(); THArgCheck(first_dims == second_dims, 0, "Tensors must have same number of dimensions: got %d and %d", first_dims, second_dims); for (int dim = 0; dim < first_dims; dim++) { if (dim == dimension) { continue; } int64_t first_dim_size = THCTensor_(size)(state, first, dim); int64_t second_dim_size = THCTensor_(size)(state, second, dim); THArgCheck(first_dim_size == second_dim_size, 0, "Sizes of tensors must match except in dimension %d. Got %lld and %lld in dimension %d", dimension, (long long)first_dim_size, (long long)second_dim_size, dim); } } void THCTensor_(catArray)(THCState *state, THCTensor *result, THCTensor **inputs, int numInputs, int dimension) { // previously, size [0] tensors were the only possible empty tensors; thus, it wasn't possible // to cat empty tensors unless all the other tensors were 1-dimensional, so we allowed these tensors // to be "skipped". We maintain this behavior for backwards compatibility, but only for this specific // size (i.e. other empty sizes are not skipped). // FIXME: warn if this is the case int i, j, cohortMax; int64_t offset; bool hasSkippedInput = false; THCTensor *notSkippedTensor = NULL; // non-owning reference auto should_skip = [](THCTensor *t) { return t->is_empty() && t->dim() == 1; }; int nDims = 0; for (i = 0; i < numInputs; i++) { if (should_skip(inputs[i])) { hasSkippedInput = true; continue; } nDims = inputs[i]->dim(); notSkippedTensor = inputs[i]; } // If all inputs are empty tensors, return an empty tensor if (notSkippedTensor == NULL) { return; } THArgCheck(numInputs > 0, 3, "invalid number of inputs %d", numInputs); THArgCheck(dimension >= 0, 4, "invalid dimension %d", dimension); std::vector<int64_t> size(nDims); // Compute size of the result in the cat dimension int64_t cat_dim_size = 0; for (int i = 0; i < numInputs; i++) { THCTensor *tensor = inputs[i]; if (should_skip(tensor)) { continue; } THCTensor_(check_shape_except_dim)(state, notSkippedTensor, tensor, dimension); cat_dim_size += THCTensor_(size)(state, tensor, dimension); } // Compute the size of the result for (int dim = 0; dim < nDims; dim++) { int64_t result_dim_size = THCTensor_(size)(state, notSkippedTensor, dim); if (dim == dimension) { result_dim_size = cat_dim_size; } size[dim] = result_dim_size; } THCTensor_(resize)(state, result, size, {}); // We parallelize the copy if all 6 conditions pass: // // 1. There is more than one input tensor // 2. No empty inputs // 3. The result tensor is 32-bit indexable // 4. The number of dimensions is <= 4 // 5. All input tensors are contiguous (output tensor may be non-contig) // 6. All input tensors can use 32-bit indexing // 7. All input tensors are on the same device if (numInputs > 1 && !hasSkippedInput && result->dim() <= CAT_ARRAY_MAX_INPUT_DIMS && THCTensor_canUse32BitIndexMath(state, result) && THCTensor_allContiguous(state, inputs, numInputs) && THCTensor_all32BitIndexable(state, inputs, numInputs) && THCTensor_allSameDevice(state, inputs, numInputs)) { // First, let's set up our kernel parameters. We start with a raw pointer to the storage // for the output Tensor. scalar_t *data = THCTensor_(data)(state, result); // Kernel Parameter size_t tensorMetadataSize = sizeof(CatArrInputTensor<scalar_t, unsigned int>) * CAT_ARRAY_BATCH_SIZE; auto d_inputs = static_cast<CatArrInputTensor<scalar_t, unsigned int> *>(THCudaMalloc(state, tensorMetadataSize)); OutputTensorSizeStride<unsigned int, CAT_ARRAY_MAX_INPUT_DIMS> param; // Next, let's initialize the size, stride arrays for the output Tensor. for (i = 0; i < nDims; ++i) { param.outputSize[i] = THCTensor_(size)(state, result, i); param.outputStride[i] = THCTensor_(stride)(state, result, i); } at::hip::HIPStreamMasqueradingAsCUDA stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); // Template Declarations for dim = 1, 2, 3, 4 #define HANDLE_CASE(DIMS) \ hipLaunchKernelGGL(( CatArrayBatchedCopy<scalar_t, unsigned int, DIMS>), dim3(catGrid), dim3(applyBlock), 0, stream.stream(), data, d_inputs, param, dimension, param.outputStride[dimension]); // Now we loop offset = 0; for (i = 0; i < numInputs; i += CAT_ARRAY_BATCH_SIZE) { // Re-allocate stackInputs every iteration to avoid read-after-write hazard { auto stackInputs_owner = THCudaHostAlloc(state, tensorMetadataSize); CatArrInputTensor<scalar_t, unsigned int>* stackInputs = static_cast<CatArrInputTensor<scalar_t, unsigned int>*>(stackInputs_owner.get()); cohortMax = 0; for (j = 0; j < CAT_ARRAY_BATCH_SIZE && (i+j) < numInputs; ++j) { int64_t dimSize = THCTensor_(size)(state, inputs[i+j], dimension); stackInputs[j].input = THCTensor_(data)(state, inputs[i+j]); stackInputs[j].offset = offset; stackInputs[j].dimSize = dimSize; stackInputs[j].nElements = THCTensor_(nElement)(state, inputs[i+j]); cohortMax = cohortMax > (int) stackInputs[j].nElements ? cohortMax : (int) stackInputs[j].nElements; // update offset offset += dimSize; } THCudaCheck(hipMemcpyAsync( d_inputs, stackInputs, j * sizeof(CatArrInputTensor<scalar_t, unsigned int>), hipMemcpyHostToDevice, stream.stream())); THCudaHostRecord(state, stackInputs); } // Next, let's consider how we set our kernel launch parameters. // We borrow from THCApply, which the kernel's internal indexing // is based on. dim3 applyBlock = getApplyBlock(); //Get grid where x dim fills half gpu and y dim is number of tensors. //This will have cating two tensors fill the entire grid, but prevent //many threads from needlessly load meta data if their sizes is small. dim3 catGrid; getCatGrid(state, j, catGrid); switch (nDims) { case 1: HANDLE_CASE(1); break; case 2: HANDLE_CASE(2); break; case 3: HANDLE_CASE(3); break; case 4: HANDLE_CASE(4); break; } THCudaCheck(hipGetLastError()); } THCudaFree(state, d_inputs); #undef HANDLE_CASE } else { offset = 0; for (j = 0; j < numInputs; j++) { if (should_skip(inputs[j])) continue; int64_t dimSize = THCTensor_(size)(state, inputs[j], dimension); THCTensor *nt = THCTensor_(newWithTensor)(state, result); THCTensor_(narrow)(state, nt, NULL, dimension, offset, dimSize); THCTensor_(copy)(state, nt, inputs[j]); THCTensor_(free)(state, nt); offset += dimSize; } } } void THCTensor_(nonzero)(THCState* state, THCudaLongTensor *tensor, THCTensor *self) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self )); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, tensor)); using namespace thrust::placeholders; THCThrustAllocator thrustAlloc(state); self = THCTensor_(newContiguous)(state, self); thrust::device_ptr<scalar_t> self_data(THCTensor_(data)(state, self)); int num_dim = THCTensor_(nDimensionLegacyNoScalars)(state, self); int64_t N = THCTensor_(nElement)(state, self); THCudaLongTensor_resize2d(state, tensor, N, num_dim); tensor = THCudaLongTensor_newContiguous(state, tensor); thrust::device_ptr<int64_t> tensor_data(THCudaLongTensor_data(state, tensor)); thrust::counting_iterator<int64_t> idxfirst(0); thrust::counting_iterator<int64_t> idxlast = idxfirst + N; typedef thrust::device_ptr<int64_t> Iter; strided_range<Iter> strided_tensor(tensor_data, tensor_data+N*num_dim, num_dim); #if TORCH_HIP_VERSION >= 7000 hipStream_t stream = THCState_getCurrentStream(state); #endif strided_range<Iter>::iterator dend = thrust::copy_if( #if TORCH_HIP_VERSION >= 7000 thrust::hip::par(thrustAlloc).on(stream), #endif idxfirst, idxlast, self_data, strided_tensor.begin(), NonZeroOp<scalar_t>() ); int64_t num_nonzeros = thrust::distance(strided_tensor.begin(), dend); int64_t div = 1; for (int dim = num_dim-1; dim >= 0; dim--) { strided_range<Iter> stride_dim(tensor_data+dim, tensor_data+N*num_dim, num_dim); thrust::transform( #if TORCH_HIP_VERSION >= 7000 thrust::hip::par(thrustAlloc).on(stream), #endif strided_tensor.begin(), strided_tensor.end(), stride_dim.begin(), idx_functor(div, THTensor_sizeLegacyNoScalars(self, dim)) ); div *= THTensor_sizeLegacyNoScalars(self, dim); } THCudaLongTensor_resize2d(state, tensor, num_nonzeros, num_dim); THCTensor_(free)(state, self); THCudaLongTensor_free(state, tensor); THCudaCheck(hipGetLastError()); } void THCTensor_(diag)(THCState *state, THCTensor *self_, THCTensor *src_, int64_t k){ THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_)); int nDimension = THCTensor_(nDimensionLegacyNoScalars)(state, src_); THArgCheck((nDimension == 2) || (nDimension == 1), 1, "expected a matrix or a vector"); if (nDimension == 2) { int64_t stride0 = THCTensor_(stride)(state, src_, 0); int64_t stride1 = THCTensor_(stride)(state, src_, 1); int64_t size0 = THCTensor_(size)(state, src_, 0); int64_t size1 = THCTensor_(size)(state, src_, 1); int64_t size = (k > 0) ? min((int64_t)size0, (int64_t)size1 - k) : min((int64_t)size0 + k, (int64_t)size1); THCTensor_(resize1d)(state, self_, size); if (size > 0) { int64_t strideSelf = THCTensor_(stride)(state, self_, 0); const dim3 threads(min((int64_t)THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock, (int64_t)size)); dim3 grid(min((int64_t)1024, (int64_t)THCCeilDiv(size, (int64_t)threads.x))); int64_t start = (k >= 0 ? k * stride1 : -k * stride0); hipLaunchKernelGGL(( THCTensor_copyFromDiagonal<scalar_t>), dim3(grid), dim3(threads), 0, THCState_getCurrentStream(state), THCTensor_(data)(state, self_), THCTensor_(data)(state, src_), start, size, stride0 + stride1, strideSelf); } } else { ptrdiff_t totalElements = THCTensor_(nElement)(state, src_); ptrdiff_t size = (k > 0) ? totalElements + k : totalElements - k; int64_t strideSrc = THTensor_strideLegacyNoScalars(src_, 0); THCTensor_(resize2d)(state, self_, size, size); THCTensor_(zero)(state, self_); if (size > 0) { int64_t stride0 = THCTensor_(stride)(state, self_, 0); int64_t stride1 = THCTensor_(stride)(state, self_, 1); const dim3 threads(min((int64_t)THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock, (int64_t)size)); dim3 grid(min((int64_t)1024, (int64_t)THCCeilDiv(size, (ptrdiff_t)threads.x))); ptrdiff_t start = (k >= 0 ? k * stride1 : -k * stride0); hipLaunchKernelGGL(( THCTensor_copyToDiagonal<scalar_t>), dim3(grid), dim3(threads), 0, THCState_getCurrentStream(state), THCTensor_(data)(state, self_), THCTensor_(data)(state, src_), start, totalElements, stride0 + stride1, strideSrc); } } THCudaCheck(hipGetLastError()); } void THCTensor_(eye)(THCState *state, THCTensor *self_, int64_t n, int64_t m) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); THArgCheck(n > 0, 1, "invalid argument"); if(m <= 0) m = n; THCTensor_(resize2d)(state, self_, n, m); THCTensor_(zero)(state, self_); int64_t sz = THMin(n, m); int64_t stride = THCTensor_(stride)(state, self_, 0) + THCTensor_(stride)(state, self_, 1); THCTensor *diag = THCTensor_(newWithStorage1d)(state, THTensor_getStoragePtr(self_), self_->storage_offset(), sz, stride); THCTensor_(fill)(state, diag, ScalarConvert<int, scalar_t>::to(1)); THCTensor_(free)(state, diag); } accreal THCTensor_(trace)(THCState *state, THCTensor *src_) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, src_)); THArgCheck((THTensor_nDimensionLegacyAll(src_) == 2), 1, "expected a matrix"); THCTensor *diag = THCTensor_(new)(state); THCTensor_(diag)(state, diag, src_, 0); accreal trace = THCTensor_(sumall)(state, diag); THCTensor_(free)(state, diag); return trace; } void THCTensor_(arange)(THCState* state, THCTensor *r_, accreal xmin, accreal xmax, accreal step) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_)); THArgCheck(step > 0 || step < 0, 3, "step must be nonzero"); THArgCheck(std::isfinite(static_cast<double>(xmin)) && std::isfinite(static_cast<double>(xmax)) , 1, "unsupported range: ", xmin, " -> ", xmax); THArgCheck(((step > 0) && (xmax >= xmin)) || ((step < 0) && (xmax <= xmin)) , 2, "upper bound and larger bound inconsistent with step sign"); double size_d = ceil(ScalarConvert<accreal, double>::to(xmax - xmin) / step); THArgCheck(size_d >= 0 && size_d <= static_cast<double>(PTRDIFF_MAX) , 1, "invalid size, possible overflow?"); ptrdiff_t size = static_cast<ptrdiff_t>(size_d); if (THCTensor_(nElement)(state, r_) != size) THCTensor_(resize1d)(state, r_, size); THCTensor *r = THCTensor_(newContiguous)(state, r_); LinspaceOp<scalar_t,accreal> linspace_method(xmin, step); thrust::device_ptr<scalar_t> data_(THCTensor_(data)(state, r)); thrust::tabulate(data_, data_ + size, linspace_method); THCTensor_(freeCopyTo)(state, r, r_); THCudaCheck(hipGetLastError()); } #endif
fdc641a03ab1b678a5023c16de8cfcc57de4c4a0.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THC/generic/THCTensorMath.cu" #else void THCTensor_(fill)(THCState* state, THCTensor *self_, scalar_t value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); if (!THC_pointwiseApply1<scalar_t>( state, self_, TensorFillOp<scalar_t>(value))) { THArgCheck(false, 1, CUTORCH_DIM_WARNING); } THCudaCheck(cudaGetLastError()); } void THCTensor_(zero)(THCState *state, THCTensor *self_) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); if (THCTensor_(isContiguous)(state, self_)) { THCudaCheck(cudaMemsetAsync(THCTensor_(data)(state, self_), 0, sizeof(scalar_t) * THCTensor_(nElement)(state, self_), THCState_getCurrentStream(state))); } else { if (!THC_pointwiseApply1<scalar_t>( state, self_, TensorFillOp<scalar_t>(ScalarConvert<int, scalar_t>::to(0)))) { THArgCheck(false, 1, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); } void THCTensor_(zerosLike)(THCState *state, THCTensor *r_, THCTensor *input) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, r_, input)); THCTensor_(resizeAs)(state, r_, input); THCTensor_(zero)(state, r_); } void THCTensor_(onesLike)(THCState *state, THCTensor *r_, THCTensor *input) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, r_, input)); THCTensor_(resizeAs)(state, r_, input); THCTensor_(fill)(state, r_, ScalarConvert<int, scalar_t>::to(1)); } ptrdiff_t THCTensor_(numel)(THCState *state, THCTensor *t) { return THCTensor_(nElement)(state, t); } void THCTensor_(cat)(THCState *state, THCTensor *result, THCTensor *ta, THCTensor *tb, int dimension) { THCTensor* inputs[2]; inputs[0] = ta; inputs[1] = tb; THCTensor_(catArray)(state, result, inputs, 2, dimension); } void THCTensor_(check_shape_except_dim)(THCState *state, THCTensor *first, THCTensor *second, int dimension); inline void THCTensor_(check_shape_except_dim)(THCState *state, THCTensor *first, THCTensor *second, int dimension) { int first_dims = first->dim(); int second_dims = second->dim(); THArgCheck(first_dims == second_dims, 0, "Tensors must have same number of dimensions: got %d and %d", first_dims, second_dims); for (int dim = 0; dim < first_dims; dim++) { if (dim == dimension) { continue; } int64_t first_dim_size = THCTensor_(size)(state, first, dim); int64_t second_dim_size = THCTensor_(size)(state, second, dim); THArgCheck(first_dim_size == second_dim_size, 0, "Sizes of tensors must match except in dimension %d. Got %lld and %lld in dimension %d", dimension, (long long)first_dim_size, (long long)second_dim_size, dim); } } void THCTensor_(catArray)(THCState *state, THCTensor *result, THCTensor **inputs, int numInputs, int dimension) { // previously, size [0] tensors were the only possible empty tensors; thus, it wasn't possible // to cat empty tensors unless all the other tensors were 1-dimensional, so we allowed these tensors // to be "skipped". We maintain this behavior for backwards compatibility, but only for this specific // size (i.e. other empty sizes are not skipped). // FIXME: warn if this is the case int i, j, cohortMax; int64_t offset; bool hasSkippedInput = false; THCTensor *notSkippedTensor = NULL; // non-owning reference auto should_skip = [](THCTensor *t) { return t->is_empty() && t->dim() == 1; }; int nDims = 0; for (i = 0; i < numInputs; i++) { if (should_skip(inputs[i])) { hasSkippedInput = true; continue; } nDims = inputs[i]->dim(); notSkippedTensor = inputs[i]; } // If all inputs are empty tensors, return an empty tensor if (notSkippedTensor == NULL) { return; } THArgCheck(numInputs > 0, 3, "invalid number of inputs %d", numInputs); THArgCheck(dimension >= 0, 4, "invalid dimension %d", dimension); std::vector<int64_t> size(nDims); // Compute size of the result in the cat dimension int64_t cat_dim_size = 0; for (int i = 0; i < numInputs; i++) { THCTensor *tensor = inputs[i]; if (should_skip(tensor)) { continue; } THCTensor_(check_shape_except_dim)(state, notSkippedTensor, tensor, dimension); cat_dim_size += THCTensor_(size)(state, tensor, dimension); } // Compute the size of the result for (int dim = 0; dim < nDims; dim++) { int64_t result_dim_size = THCTensor_(size)(state, notSkippedTensor, dim); if (dim == dimension) { result_dim_size = cat_dim_size; } size[dim] = result_dim_size; } THCTensor_(resize)(state, result, size, {}); // We parallelize the copy if all 6 conditions pass: // // 1. There is more than one input tensor // 2. No empty inputs // 3. The result tensor is 32-bit indexable // 4. The number of dimensions is <= 4 // 5. All input tensors are contiguous (output tensor may be non-contig) // 6. All input tensors can use 32-bit indexing // 7. All input tensors are on the same device if (numInputs > 1 && !hasSkippedInput && result->dim() <= CAT_ARRAY_MAX_INPUT_DIMS && THCTensor_canUse32BitIndexMath(state, result) && THCTensor_allContiguous(state, inputs, numInputs) && THCTensor_all32BitIndexable(state, inputs, numInputs) && THCTensor_allSameDevice(state, inputs, numInputs)) { // First, let's set up our kernel parameters. We start with a raw pointer to the storage // for the output Tensor. scalar_t *data = THCTensor_(data)(state, result); // Kernel Parameter size_t tensorMetadataSize = sizeof(CatArrInputTensor<scalar_t, unsigned int>) * CAT_ARRAY_BATCH_SIZE; auto d_inputs = static_cast<CatArrInputTensor<scalar_t, unsigned int> *>(THCudaMalloc(state, tensorMetadataSize)); OutputTensorSizeStride<unsigned int, CAT_ARRAY_MAX_INPUT_DIMS> param; // Next, let's initialize the size, stride arrays for the output Tensor. for (i = 0; i < nDims; ++i) { param.outputSize[i] = THCTensor_(size)(state, result, i); param.outputStride[i] = THCTensor_(stride)(state, result, i); } at::cuda::CUDAStream stream = at::cuda::getCurrentCUDAStream(); // Template Declarations for dim = 1, 2, 3, 4 #define HANDLE_CASE(DIMS) \ CatArrayBatchedCopy<scalar_t, unsigned int, DIMS><<<catGrid, applyBlock, 0, stream.stream()>>>(data, d_inputs, param, dimension, param.outputStride[dimension]); // Now we loop offset = 0; for (i = 0; i < numInputs; i += CAT_ARRAY_BATCH_SIZE) { // Re-allocate stackInputs every iteration to avoid read-after-write hazard { auto stackInputs_owner = THCudaHostAlloc(state, tensorMetadataSize); CatArrInputTensor<scalar_t, unsigned int>* stackInputs = static_cast<CatArrInputTensor<scalar_t, unsigned int>*>(stackInputs_owner.get()); cohortMax = 0; for (j = 0; j < CAT_ARRAY_BATCH_SIZE && (i+j) < numInputs; ++j) { int64_t dimSize = THCTensor_(size)(state, inputs[i+j], dimension); stackInputs[j].input = THCTensor_(data)(state, inputs[i+j]); stackInputs[j].offset = offset; stackInputs[j].dimSize = dimSize; stackInputs[j].nElements = THCTensor_(nElement)(state, inputs[i+j]); cohortMax = cohortMax > (int) stackInputs[j].nElements ? cohortMax : (int) stackInputs[j].nElements; // update offset offset += dimSize; } THCudaCheck(cudaMemcpyAsync( d_inputs, stackInputs, j * sizeof(CatArrInputTensor<scalar_t, unsigned int>), cudaMemcpyHostToDevice, stream.stream())); THCudaHostRecord(state, stackInputs); } // Next, let's consider how we set our kernel launch parameters. // We borrow from THCApply, which the kernel's internal indexing // is based on. dim3 applyBlock = getApplyBlock(); //Get grid where x dim fills half gpu and y dim is number of tensors. //This will have cating two tensors fill the entire grid, but prevent //many threads from needlessly load meta data if their sizes is small. dim3 catGrid; getCatGrid(state, j, catGrid); switch (nDims) { case 1: HANDLE_CASE(1); break; case 2: HANDLE_CASE(2); break; case 3: HANDLE_CASE(3); break; case 4: HANDLE_CASE(4); break; } THCudaCheck(cudaGetLastError()); } THCudaFree(state, d_inputs); #undef HANDLE_CASE } else { offset = 0; for (j = 0; j < numInputs; j++) { if (should_skip(inputs[j])) continue; int64_t dimSize = THCTensor_(size)(state, inputs[j], dimension); THCTensor *nt = THCTensor_(newWithTensor)(state, result); THCTensor_(narrow)(state, nt, NULL, dimension, offset, dimSize); THCTensor_(copy)(state, nt, inputs[j]); THCTensor_(free)(state, nt); offset += dimSize; } } } void THCTensor_(nonzero)(THCState* state, THCudaLongTensor *tensor, THCTensor *self) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self )); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, tensor)); using namespace thrust::placeholders; THCThrustAllocator thrustAlloc(state); self = THCTensor_(newContiguous)(state, self); thrust::device_ptr<scalar_t> self_data(THCTensor_(data)(state, self)); int num_dim = THCTensor_(nDimensionLegacyNoScalars)(state, self); int64_t N = THCTensor_(nElement)(state, self); THCudaLongTensor_resize2d(state, tensor, N, num_dim); tensor = THCudaLongTensor_newContiguous(state, tensor); thrust::device_ptr<int64_t> tensor_data(THCudaLongTensor_data(state, tensor)); thrust::counting_iterator<int64_t> idxfirst(0); thrust::counting_iterator<int64_t> idxlast = idxfirst + N; typedef thrust::device_ptr<int64_t> Iter; strided_range<Iter> strided_tensor(tensor_data, tensor_data+N*num_dim, num_dim); #if CUDA_VERSION >= 7000 cudaStream_t stream = THCState_getCurrentStream(state); #endif strided_range<Iter>::iterator dend = thrust::copy_if( #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(stream), #endif idxfirst, idxlast, self_data, strided_tensor.begin(), NonZeroOp<scalar_t>() ); int64_t num_nonzeros = thrust::distance(strided_tensor.begin(), dend); int64_t div = 1; for (int dim = num_dim-1; dim >= 0; dim--) { strided_range<Iter> stride_dim(tensor_data+dim, tensor_data+N*num_dim, num_dim); thrust::transform( #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(stream), #endif strided_tensor.begin(), strided_tensor.end(), stride_dim.begin(), idx_functor(div, THTensor_sizeLegacyNoScalars(self, dim)) ); div *= THTensor_sizeLegacyNoScalars(self, dim); } THCudaLongTensor_resize2d(state, tensor, num_nonzeros, num_dim); THCTensor_(free)(state, self); THCudaLongTensor_free(state, tensor); THCudaCheck(cudaGetLastError()); } void THCTensor_(diag)(THCState *state, THCTensor *self_, THCTensor *src_, int64_t k){ THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_)); int nDimension = THCTensor_(nDimensionLegacyNoScalars)(state, src_); THArgCheck((nDimension == 2) || (nDimension == 1), 1, "expected a matrix or a vector"); if (nDimension == 2) { int64_t stride0 = THCTensor_(stride)(state, src_, 0); int64_t stride1 = THCTensor_(stride)(state, src_, 1); int64_t size0 = THCTensor_(size)(state, src_, 0); int64_t size1 = THCTensor_(size)(state, src_, 1); int64_t size = (k > 0) ? min((int64_t)size0, (int64_t)size1 - k) : min((int64_t)size0 + k, (int64_t)size1); THCTensor_(resize1d)(state, self_, size); if (size > 0) { int64_t strideSelf = THCTensor_(stride)(state, self_, 0); const dim3 threads(min((int64_t)THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock, (int64_t)size)); dim3 grid(min((int64_t)1024, (int64_t)THCCeilDiv(size, (int64_t)threads.x))); int64_t start = (k >= 0 ? k * stride1 : -k * stride0); THCTensor_copyFromDiagonal<scalar_t><<<grid, threads, 0, THCState_getCurrentStream(state)>>> (THCTensor_(data)(state, self_), THCTensor_(data)(state, src_), start, size, stride0 + stride1, strideSelf); } } else { ptrdiff_t totalElements = THCTensor_(nElement)(state, src_); ptrdiff_t size = (k > 0) ? totalElements + k : totalElements - k; int64_t strideSrc = THTensor_strideLegacyNoScalars(src_, 0); THCTensor_(resize2d)(state, self_, size, size); THCTensor_(zero)(state, self_); if (size > 0) { int64_t stride0 = THCTensor_(stride)(state, self_, 0); int64_t stride1 = THCTensor_(stride)(state, self_, 1); const dim3 threads(min((int64_t)THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock, (int64_t)size)); dim3 grid(min((int64_t)1024, (int64_t)THCCeilDiv(size, (ptrdiff_t)threads.x))); ptrdiff_t start = (k >= 0 ? k * stride1 : -k * stride0); THCTensor_copyToDiagonal<scalar_t><<<grid, threads, 0, THCState_getCurrentStream(state)>>> (THCTensor_(data)(state, self_), THCTensor_(data)(state, src_), start, totalElements, stride0 + stride1, strideSrc); } } THCudaCheck(cudaGetLastError()); } void THCTensor_(eye)(THCState *state, THCTensor *self_, int64_t n, int64_t m) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); THArgCheck(n > 0, 1, "invalid argument"); if(m <= 0) m = n; THCTensor_(resize2d)(state, self_, n, m); THCTensor_(zero)(state, self_); int64_t sz = THMin(n, m); int64_t stride = THCTensor_(stride)(state, self_, 0) + THCTensor_(stride)(state, self_, 1); THCTensor *diag = THCTensor_(newWithStorage1d)(state, THTensor_getStoragePtr(self_), self_->storage_offset(), sz, stride); THCTensor_(fill)(state, diag, ScalarConvert<int, scalar_t>::to(1)); THCTensor_(free)(state, diag); } accreal THCTensor_(trace)(THCState *state, THCTensor *src_) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, src_)); THArgCheck((THTensor_nDimensionLegacyAll(src_) == 2), 1, "expected a matrix"); THCTensor *diag = THCTensor_(new)(state); THCTensor_(diag)(state, diag, src_, 0); accreal trace = THCTensor_(sumall)(state, diag); THCTensor_(free)(state, diag); return trace; } void THCTensor_(arange)(THCState* state, THCTensor *r_, accreal xmin, accreal xmax, accreal step) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_)); THArgCheck(step > 0 || step < 0, 3, "step must be nonzero"); THArgCheck(std::isfinite(static_cast<double>(xmin)) && std::isfinite(static_cast<double>(xmax)) , 1, "unsupported range: ", xmin, " -> ", xmax); THArgCheck(((step > 0) && (xmax >= xmin)) || ((step < 0) && (xmax <= xmin)) , 2, "upper bound and larger bound inconsistent with step sign"); double size_d = ceil(ScalarConvert<accreal, double>::to(xmax - xmin) / step); THArgCheck(size_d >= 0 && size_d <= static_cast<double>(PTRDIFF_MAX) , 1, "invalid size, possible overflow?"); ptrdiff_t size = static_cast<ptrdiff_t>(size_d); if (THCTensor_(nElement)(state, r_) != size) THCTensor_(resize1d)(state, r_, size); THCTensor *r = THCTensor_(newContiguous)(state, r_); LinspaceOp<scalar_t,accreal> linspace_method(xmin, step); thrust::device_ptr<scalar_t> data_(THCTensor_(data)(state, r)); thrust::tabulate(data_, data_ + size, linspace_method); THCTensor_(freeCopyTo)(state, r, r_); THCudaCheck(cudaGetLastError()); } #endif
2b7e2682051f9b3155d34bc6854e04465b6f1316.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void cuda_multiply(float *dst, float *src1, float *src2, int width, int height) { int row = threadIdx.y + blockIdx.y * blockDim.y; int col = threadIdx.x + blockIdx.x * blockDim.x; if(row < height && col < width) { int index = row * width + col; dst[index] = src1[index] * src2[index]; #ifdef debug printf("multiply dst[%d] = %f\n", index, dst[index]); #endif } }
2b7e2682051f9b3155d34bc6854e04465b6f1316.cu
#include "includes.h" __global__ void cuda_multiply(float *dst, float *src1, float *src2, int width, int height) { int row = threadIdx.y + blockIdx.y * blockDim.y; int col = threadIdx.x + blockIdx.x * blockDim.x; if(row < height && col < width) { int index = row * width + col; dst[index] = src1[index] * src2[index]; #ifdef debug printf("multiply dst[%d] = %f\n", index, dst[index]); #endif } }
dec95a2846d021914eec0e73e538aa6095f149b5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void STREAM_Triad_Optimized_double(double *a, double *b, double *c, double scalar, size_t len) { /* * Ensure size of thread index space is as large as or greater than * vector index space else return. */ if (blockDim.x * gridDim.x < len) return; size_t idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < len) c[idx] = a[idx]+scalar*b[idx]; }
dec95a2846d021914eec0e73e538aa6095f149b5.cu
#include "includes.h" __global__ void STREAM_Triad_Optimized_double(double *a, double *b, double *c, double scalar, size_t len) { /* * Ensure size of thread index space is as large as or greater than * vector index space else return. */ if (blockDim.x * gridDim.x < len) return; size_t idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < len) c[idx] = a[idx]+scalar*b[idx]; }
847d73433a95b8b06ec158e81cc3a658b5b879a8.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "gpu_array_init_r4__.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; size_t tsize = XSIZE*YSIZE; float *arr = NULL; hipMalloc(&arr, XSIZE*YSIZE); float val = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( gpu_array_init_r4__), dim3(gridBlock),dim3(threadBlock), 0, 0, tsize,arr,val); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( gpu_array_init_r4__), dim3(gridBlock),dim3(threadBlock), 0, 0, tsize,arr,val); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( gpu_array_init_r4__), dim3(gridBlock),dim3(threadBlock), 0, 0, tsize,arr,val); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
847d73433a95b8b06ec158e81cc3a658b5b879a8.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "gpu_array_init_r4__.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; size_t tsize = XSIZE*YSIZE; float *arr = NULL; cudaMalloc(&arr, XSIZE*YSIZE); float val = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); gpu_array_init_r4__<<<gridBlock,threadBlock>>>(tsize,arr,val); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { gpu_array_init_r4__<<<gridBlock,threadBlock>>>(tsize,arr,val); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { gpu_array_init_r4__<<<gridBlock,threadBlock>>>(tsize,arr,val); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
575c4c63db71c1a1a10b9d39c2e78d4f799c581f.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include "CHECK.h" #include "defs.h" /* Uses the GPU to perform a find all of the Waldos in the input map. Fills the locationType object with the locations of the waldos that it finds. @param map - one dimensional array to implement the 2D N by N array containing the waldos @param N - size of 1 dimension @param gpufound - struct that should be filled with the locations of the waldos gpufound->indices - filled with row and col of each waldo waldo positions are added to the array in the order of row then column gpufound->size - size of indices array gpufound->count - number of elements in the array 2 * number of waldos at end For example, if the waldos are in positions (3, 20), (10, 40), (2, 5), (3, 60) then the indices array will be filled as follows: gpufound->indices: 2, 5, 3, 20, 3, 60, 10, 40 Note that the row and col pairs are in consecutive elements in the array and the array is ordered first by row (e.g., 2, 3, 3, 10) and then by column (e.g., 3, 20 comes before 3, 60) gpufound->count will be set to 8 since the indices array will have 8 elements at the end. @return time it takes to find the waldos in milliseconds */ float gpuFindWaldoS(unsigned char * map, int N, locationType * gpufound) { unsigned char * dMap; //create input array for GPU CHECK(hipMalloc((void **)&dMap, sizeof(unsigned char) * N * N)); CHECK(hipMemcpy(dMap, map, sizeof(unsigned char) * N * N, hipMemcpyHostToDevice)); //You may want to hipMalloc more space here that you will //need before the timing begins float gpuMsecTime = -1; hipEvent_t start_cpu, stop_cpu; //start the timing CHECK(hipEventCreate(&start_cpu)); CHECK(hipEventCreate(&stop_cpu)); CHECK(hipEventRecord(start_cpu)); //Write the findWaldoS function. //Before exiting gpuFindWaldoS, your code //will need to have filled the gpufound struct. //You can either do that here or in your findWaldoS //function. //findWaldoS(...); //stop the timing CHECK(hipEventRecord(stop_cpu)); CHECK(hipEventSynchronize(stop_cpu)); CHECK(hipEventElapsedTime(&gpuMsecTime, start_cpu, stop_cpu)); //release the space for the GPU arrays CHECK(hipFree(dMap)); //free any other space you allocated return gpuMsecTime; }
575c4c63db71c1a1a10b9d39c2e78d4f799c581f.cu
#include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include "CHECK.h" #include "defs.h" /* Uses the GPU to perform a find all of the Waldos in the input map. Fills the locationType object with the locations of the waldos that it finds. @param map - one dimensional array to implement the 2D N by N array containing the waldos @param N - size of 1 dimension @param gpufound - struct that should be filled with the locations of the waldos gpufound->indices - filled with row and col of each waldo waldo positions are added to the array in the order of row then column gpufound->size - size of indices array gpufound->count - number of elements in the array 2 * number of waldos at end For example, if the waldos are in positions (3, 20), (10, 40), (2, 5), (3, 60) then the indices array will be filled as follows: gpufound->indices: 2, 5, 3, 20, 3, 60, 10, 40 Note that the row and col pairs are in consecutive elements in the array and the array is ordered first by row (e.g., 2, 3, 3, 10) and then by column (e.g., 3, 20 comes before 3, 60) gpufound->count will be set to 8 since the indices array will have 8 elements at the end. @return time it takes to find the waldos in milliseconds */ float gpuFindWaldoS(unsigned char * map, int N, locationType * gpufound) { unsigned char * dMap; //create input array for GPU CHECK(cudaMalloc((void **)&dMap, sizeof(unsigned char) * N * N)); CHECK(cudaMemcpy(dMap, map, sizeof(unsigned char) * N * N, cudaMemcpyHostToDevice)); //You may want to cudaMalloc more space here that you will //need before the timing begins float gpuMsecTime = -1; cudaEvent_t start_cpu, stop_cpu; //start the timing CHECK(cudaEventCreate(&start_cpu)); CHECK(cudaEventCreate(&stop_cpu)); CHECK(cudaEventRecord(start_cpu)); //Write the findWaldoS function. //Before exiting gpuFindWaldoS, your code //will need to have filled the gpufound struct. //You can either do that here or in your findWaldoS //function. //findWaldoS(...); //stop the timing CHECK(cudaEventRecord(stop_cpu)); CHECK(cudaEventSynchronize(stop_cpu)); CHECK(cudaEventElapsedTime(&gpuMsecTime, start_cpu, stop_cpu)); //release the space for the GPU arrays CHECK(cudaFree(dMap)); //free any other space you allocated return gpuMsecTime; }
efeb4b7d7ebdefa68e699378f648a4d84927a30f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define imin(a,b) (a<b?a:b) const int N = 33 * 1024; const int threadsPerBlock = 256; const int blocksPerGrid = imin( 32, (N+threadsPerBlock-1) / threadsPerBlock ); __global__ void dot( float *a, float *b, float *c ) { __shared__ float cache[threadsPerBlock]; int tid = threadIdx.x + blockIdx.x * blockDim.x; int cacheIndex = threadIdx.x; float temp = 0; while (tid < N) { temp += a[tid] * b[tid]; tid += blockDim.x * gridDim.x; } // set the cache values cache[cacheIndex] = temp; // synchronize threads in this block __syncthreads(); // for reductions, threadsPerBlock must be a power of 2 // because of the following code int i = blockDim.x/2; while (i != 0) { if (cacheIndex < i) cache[cacheIndex] += cache[cacheIndex + i]; __syncthreads(); i /= 2; } if (cacheIndex == 0) c[blockIdx.x] = cache[0]; } int main( void ) { float *a, *b, c, *partial_c; float *dev_a, *dev_b, *dev_partial_c; // allocate memory on the cpu side a = (float*)malloc( N*sizeof(float) ); b = (float*)malloc( N*sizeof(float) ); partial_c = (float*)malloc( blocksPerGrid*sizeof(float) ); // allocate the memory on the GPU hipMalloc( (void**)&dev_a, N*sizeof(float) ); hipMalloc( (void**)&dev_b, N*sizeof(float) ); hipMalloc( (void**)&dev_partial_c, blocksPerGrid*sizeof(float) ); // fill in the host memory with data for (int i=0; i<N; i++) { a[i] = i; b[i] = i*2; } // copy the arrays 'a' and 'b' to the GPU hipMemcpy( dev_a, a, N*sizeof(float), hipMemcpyHostToDevice ); hipMemcpy( dev_b, b, N*sizeof(float), hipMemcpyHostToDevice ); hipLaunchKernelGGL(( dot), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, dev_a, dev_b, dev_partial_c ); // copy the array 'c' back from the GPU to the CPU hipMemcpy( partial_c, dev_partial_c, blocksPerGrid*sizeof(float), hipMemcpyDeviceToHost ); // finish up on the CPU side c = 0; for (int i=0; i<blocksPerGrid; i++) { c += partial_c[i]; } #define sum_squares(x) (x*(x+1)*(2*x+1)/6) printf( "Does GPU value %.6g = %.6g?\n", c, 2 * sum_squares( (float)(N - 1) ) ); // free memory on the gpu side hipFree( dev_a ); hipFree( dev_b ); hipFree( dev_partial_c ); // free memory on the cpu side free( a ); free( b ); free( partial_c ); }
efeb4b7d7ebdefa68e699378f648a4d84927a30f.cu
#include <stdio.h> #define imin(a,b) (a<b?a:b) const int N = 33 * 1024; const int threadsPerBlock = 256; const int blocksPerGrid = imin( 32, (N+threadsPerBlock-1) / threadsPerBlock ); __global__ void dot( float *a, float *b, float *c ) { __shared__ float cache[threadsPerBlock]; int tid = threadIdx.x + blockIdx.x * blockDim.x; int cacheIndex = threadIdx.x; float temp = 0; while (tid < N) { temp += a[tid] * b[tid]; tid += blockDim.x * gridDim.x; } // set the cache values cache[cacheIndex] = temp; // synchronize threads in this block __syncthreads(); // for reductions, threadsPerBlock must be a power of 2 // because of the following code int i = blockDim.x/2; while (i != 0) { if (cacheIndex < i) cache[cacheIndex] += cache[cacheIndex + i]; __syncthreads(); i /= 2; } if (cacheIndex == 0) c[blockIdx.x] = cache[0]; } int main( void ) { float *a, *b, c, *partial_c; float *dev_a, *dev_b, *dev_partial_c; // allocate memory on the cpu side a = (float*)malloc( N*sizeof(float) ); b = (float*)malloc( N*sizeof(float) ); partial_c = (float*)malloc( blocksPerGrid*sizeof(float) ); // allocate the memory on the GPU cudaMalloc( (void**)&dev_a, N*sizeof(float) ); cudaMalloc( (void**)&dev_b, N*sizeof(float) ); cudaMalloc( (void**)&dev_partial_c, blocksPerGrid*sizeof(float) ); // fill in the host memory with data for (int i=0; i<N; i++) { a[i] = i; b[i] = i*2; } // copy the arrays 'a' and 'b' to the GPU cudaMemcpy( dev_a, a, N*sizeof(float), cudaMemcpyHostToDevice ); cudaMemcpy( dev_b, b, N*sizeof(float), cudaMemcpyHostToDevice ); dot<<<blocksPerGrid,threadsPerBlock>>>( dev_a, dev_b, dev_partial_c ); // copy the array 'c' back from the GPU to the CPU cudaMemcpy( partial_c, dev_partial_c, blocksPerGrid*sizeof(float), cudaMemcpyDeviceToHost ); // finish up on the CPU side c = 0; for (int i=0; i<blocksPerGrid; i++) { c += partial_c[i]; } #define sum_squares(x) (x*(x+1)*(2*x+1)/6) printf( "Does GPU value %.6g = %.6g?\n", c, 2 * sum_squares( (float)(N - 1) ) ); // free memory on the gpu side cudaFree( dev_a ); cudaFree( dev_b ); cudaFree( dev_partial_c ); // free memory on the cpu side free( a ); free( b ); free( partial_c ); }
7f9161f1aa9036d794bba1918b3809dbd8cdc166.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #include "globals.h" #include "cuda_functions.h" #include "cuda_math.h" #include "boundary.h" /* * The L-versions of the RHS have to be ran with * - the L-version of the derivatives * i.e.: derDev1xL instead of derDev1x * - the L-version of the grid * i.e.: h_gridL[0] instead of h_grid[0] */ /* The whole RHS in the X direction is calculated in RHSDeviceSharedFlxX_old thanks to the beneficial memory layout that allows to use small pencils */ /* For the Y and Z direction, fluxes require a small pencil discretization while the rest of the RHS can be calculated on large pencils which speed * up significantly the computation. Therefore 5 streams are used * stream 0 -> complete X RHS (in RHSDeviceSharedFlxX_old) (small pencil grid) * stream 1 -> viscous terms and pressure terms in Y (in RHSDeviceFullYL) (large pencil grid) * stream 2 -> viscous terms and pressure terms in Z (in RHSDeviceFullZL) (large pencil grid) * stream 3 -> advective fluxes in Y direction (in FLXDeviceY) (small pencil transposed grid) * stream 4 -> advective fluxes in Z direction (in FLXDeviceZ) (small pencil transposed grid)*/ __global__ void RHSDeviceSharedFlxX(myprec *rX, myprec *uX, myprec *vX, myprec *wX, myprec *eX, myprec *r, myprec *u, myprec *v, myprec *w, myprec *h , myprec *t, myprec *p, myprec *mu, myprec *lam, myprec *dil, myprec *dpdz) { Indices id(threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y); id.mkidX(); int si = id.i + stencilSize; // local i for shared memory access + halo offset int sj = id.tiy; // local j for shared memory access myprec rXtmp=0; myprec uXtmp=0; myprec vXtmp=0; myprec wXtmp=0; myprec eXtmp=0; myprec wrk1=0; myprec wrk2=0; __shared__ myprec s_r[sPencils][mx+stencilSize*2]; __shared__ myprec s_u[sPencils][mx+stencilSize*2]; __shared__ myprec s_v[sPencils][mx+stencilSize*2]; __shared__ myprec s_w[sPencils][mx+stencilSize*2]; __shared__ myprec s_h[sPencils][mx+stencilSize*2]; __shared__ myprec s_t[sPencils][mx+stencilSize*2]; __shared__ myprec s_p[sPencils][mx+stencilSize*2]; __shared__ myprec s_m[sPencils][mx+stencilSize*2]; __shared__ myprec s_l[sPencils][mx+stencilSize*2]; #if !periodicX __shared__ myprec s_s0[sPencils][mx+stencilSize*2]; __shared__ myprec s_s4[sPencils][mx+stencilSize*2]; __shared__ myprec s_s8[sPencils][mx+stencilSize*2]; #endif __shared__ myprec s_dil[sPencils][mx+stencilSize*2]; s_r[sj][si] = r[id.g]; s_u[sj][si] = u[id.g]; s_v[sj][si] = v[id.g]; s_w[sj][si] = w[id.g]; s_h[sj][si] = h[id.g]; s_t[sj][si] = t[id.g]; s_p[sj][si] = p[id.g]; s_m[sj][si] = mu[id.g]; s_l[sj][si] = lam[id.g]; #if !periodicX s_s0[sj][si]= gij[0][id.g]; s_s4[sj][si]= gij[4][id.g]; s_s8[sj][si]= gij[8][id.g]; #endif s_dil[sj][si] = dil[id.g]; __syncthreads(); // fill in periodic images in shared memory array if (id.i < stencilSize) { #if periodicX perBCx(s_r[sj],si); perBCx(s_u[sj],si); perBCx(s_v[sj],si); perBCx(s_w[sj],si); perBCx(s_h[sj],si); perBCx(s_t[sj],si); perBCx(s_p[sj],si); perBCx(s_m[sj],si); perBCx(s_l[sj],si); #else wallBCxMir(s_p[sj],si); wallBCxVel(s_u[sj],si); wallBCxVel(s_v[sj],si); wallBCxVel(s_w[sj],si); wallBCxExt(s_t[sj],si,1.0,1.0); stateBoundPT(s_r[sj], s_t[sj], s_u[sj], s_v[sj], s_w[sj], s_h[sj], s_p[sj], s_m[sj], s_l[sj], si); wallBCxMir(s_s0[sj],si); wallBCxVel(s_s4[sj],si); wallBCxVel(s_s8[sj],si); #endif } __syncthreads(); //initialize momentum RHS with stresses so that they can be added for both viscous terms and viscous heating without having to load additional terms uXtmp = ( 2 * gij[0][id.g] - 2./3.*s_dil[sj][si] ); vXtmp = ( gij[1][id.g] + gij[3][id.g] ); wXtmp = ( gij[2][id.g] + gij[6][id.g] ); //adding the viscous dissipation part duidx*mu*six eXtmp = s_m[sj][si]*(uXtmp*gij[0][id.g] + vXtmp*gij[1][id.g] + wXtmp*gij[2][id.g]); //Adding here the terms d (mu) dx * sxj; (lambda in case of h in rhse); derDevSharedV1x(&wrk2,s_m[sj],si); //wrk2 = d (mu) dx uXtmp *= wrk2; vXtmp *= wrk2; wXtmp *= wrk2; // viscous fluxes derivative mu*d^2ui dx^2 derDevSharedV2x(&wrk1,s_u[sj],si); uXtmp = uXtmp + wrk1*s_m[sj][si]; derDevSharedV2x(&wrk1,s_v[sj],si); vXtmp = vXtmp + wrk1*s_m[sj][si]; derDevSharedV2x(&wrk1,s_w[sj],si); wXtmp = wXtmp + wrk1*s_m[sj][si]; //adding the viscous dissipation part ui*(mu * d2duidx2 + dmudx * six) eXtmp = eXtmp + s_u[sj][si]*uXtmp + s_v[sj][si]*vXtmp + s_w[sj][si]*wXtmp; //adding the molecular conduction part (d2 temp dx2*lambda + dlambda dx * d temp dx) derDevSharedV2x(&wrk1,s_t[sj],si); eXtmp = eXtmp + wrk1*s_l[sj][si]; derDevSharedV1x(&wrk2,s_l[sj],si); //wrk2 = d (lam) dx derDevSharedV1x(&wrk1,s_t[sj],si); //wrk1 = d (t) dx eXtmp = eXtmp + wrk1*wrk2; //Adding here the terms - d (ru phi) dx; fluxQuadSharedx(&wrk1,s_r[sj],s_u[sj],si); rXtmp = wrk1; __syncthreads(); fluxCubeSharedx(&wrk1,s_r[sj],s_u[sj],s_u[sj],si); uXtmp = uXtmp + wrk1; __syncthreads(); fluxCubeSharedx(&wrk1,s_r[sj],s_u[sj],s_v[sj],si); vXtmp = vXtmp + wrk1; __syncthreads(); fluxCubeSharedx(&wrk1,s_r[sj],s_u[sj],s_w[sj],si); wXtmp = wXtmp + wrk1; __syncthreads(); fluxCubeSharedx(&wrk1,s_r[sj],s_u[sj],s_h[sj],si); eXtmp = eXtmp + wrk1; __syncthreads(); // pressure and dilation derivatives if (id.i < stencilSize) { #if periodicX perBCx(s_dil[sj],si); #else wallBCxDil(s_dil[sj],s_s0[sj],s_s4[sj],s_s8[sj],si); #endif } __syncthreads(); derDevSharedV1x(&wrk2,s_dil[sj],si); derDevShared1x(&wrk1 ,s_p[sj],si); uXtmp = uXtmp + s_m[sj][si]*wrk2/3.0 - wrk1 ; eXtmp = eXtmp + s_m[sj][si]*wrk2/3.0*s_u[sj][si]; rX[id.g] = rXtmp; uX[id.g] = uXtmp; vX[id.g] = vXtmp; wX[id.g] = wXtmp; eX[id.g] = eXtmp ; } __global__ void RHSDeviceSharedFlxY(myprec *rY, myprec *uY, myprec *vY, myprec *wY, myprec *eY, myprec *r, myprec *u, myprec *v, myprec *w, myprec *h , myprec *t, myprec *p, myprec *mu, myprec *lam, myprec *dil, myprec *dpdz) { Indices id(threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y); id.mkidYFlx(); int si = id.j + stencilSize; // local i for shared memory access + halo offset int sj = id.tiy; // local j for shared memory access myprec rYtmp=0; myprec uYtmp=0; myprec vYtmp=0; myprec wYtmp=0; myprec eYtmp=0; myprec wrk1=0; myprec wrk2=0; __shared__ myprec s_r[sPencils][my+stencilSize*2]; __shared__ myprec s_u[sPencils][my+stencilSize*2]; __shared__ myprec s_v[sPencils][my+stencilSize*2]; __shared__ myprec s_w[sPencils][my+stencilSize*2]; __shared__ myprec s_h[sPencils][my+stencilSize*2]; __shared__ myprec s_t[sPencils][my+stencilSize*2]; __shared__ myprec s_p[sPencils][my+stencilSize*2]; __shared__ myprec s_m[sPencils][my+stencilSize*2]; __shared__ myprec s_l[sPencils][my+stencilSize*2]; __shared__ myprec s_s3[sPencils][my+stencilSize*2]; __shared__ myprec s_s4[sPencils][my+stencilSize*2]; __shared__ myprec s_s5[sPencils][my+stencilSize*2]; __shared__ myprec s_dil[sPencils][my+stencilSize*2]; s_r[sj][si] = r[id.g]; s_u[sj][si] = u[id.g]; s_v[sj][si] = v[id.g]; s_w[sj][si] = w[id.g]; s_h[sj][si] = h[id.g]; s_t[sj][si] = t[id.g]; s_p[sj][si] = p[id.g]; s_m[sj][si] = mu[id.g]; s_l[sj][si] = lam[id.g]; s_dil[sj][si] = dil[id.g]; s_s3[sj][si] = gij[3][id.g]; s_s4[sj][si] = gij[4][id.g]; s_s5[sj][si] = gij[5][id.g]; __syncthreads(); // fill in periodic images in shared memory array if (id.j < stencilSize) { perBCy(s_r[sj],si); perBCy(s_u[sj],si); perBCy(s_v[sj],si); perBCy(s_w[sj],si); perBCy(s_h[sj],si); perBCy(s_t[sj],si); perBCy(s_p[sj],si); perBCy(s_m[sj],si); perBCy(s_l[sj],si); } __syncthreads(); //initialize momentum RHS with stresses so that they can be added for both viscous terms and viscous heating without having to load additional terms uYtmp = ( s_s3[sj][si] + gij[1][id.g] ) ; vYtmp = ( 2 * s_s4[sj][si] - 2./3.*s_dil[sj][si] ) ; wYtmp = ( s_s5[sj][si] + gij[7][id.g] ) ; //adding the viscous dissipation part duidy*mu*siy eYtmp = s_m[sj][si]*(uYtmp*s_s3[sj][si] + vYtmp*s_s4[sj][si] + wYtmp*s_s5[sj][si]); //Adding here the terms d (mu) dy * siy; derDevSharedV1y(&wrk2,s_m[sj],si); //wrk2 = d (mu) dx uYtmp *= wrk2; vYtmp *= wrk2; wYtmp *= wrk2; // viscous fluxes derivative mu*d^2dui dy^2 derDevSharedV2y(&wrk1,s_u[sj],si); uYtmp = uYtmp + wrk1*s_m[sj][si]; derDevSharedV2y(&wrk1,s_v[sj],si); vYtmp = vYtmp + wrk1*s_m[sj][si]; derDevSharedV2y(&wrk1,s_w[sj],si); wYtmp = wYtmp + wrk1*s_m[sj][si]; //adding the viscous dissipation part ui*(mu * d2duidy2 + dmudy * siy) eYtmp = eYtmp + s_u[sj][si]*uYtmp + s_v[sj][si]*vYtmp + s_w[sj][si]*wYtmp; derDevSharedV2y(&wrk1,s_t[sj],si); eYtmp = eYtmp + wrk1*s_l[sj][si]; derDevSharedV1y(&wrk2,s_l[sj],si); //wrk2 = d (lam) dx derDevSharedV1y(&wrk1,s_t[sj],si); //wrk1 = d (t) dx eYtmp = eYtmp + wrk1*wrk2; // split advection terms //Adding here the terms - d (ru phi) dy; fluxQuadSharedy(&wrk1,s_r[sj],s_v[sj],si); rYtmp = wrk1; __syncthreads(); fluxCubeSharedy(&wrk1,s_r[sj],s_v[sj],s_u[sj],si); uYtmp = uYtmp + wrk1; __syncthreads(); fluxCubeSharedy(&wrk1,s_r[sj],s_v[sj],s_v[sj],si); vYtmp = vYtmp + wrk1; __syncthreads(); fluxCubeSharedy(&wrk1,s_r[sj],s_v[sj],s_w[sj],si); wYtmp = wYtmp + wrk1; __syncthreads(); fluxCubeSharedy(&wrk1,s_r[sj],s_v[sj],s_h[sj],si); eYtmp = eYtmp + wrk1; __syncthreads(); // pressure and dilation derivatives if (id.j < stencilSize) { perBCy(s_dil[sj],si); } __syncthreads(); derDevSharedV1y(&wrk2,s_dil[sj],si); derDevShared1y(&wrk1,s_p[sj],si); vYtmp = vYtmp + s_m[sj][si]*wrk2/3.0 - wrk1 ; eYtmp = eYtmp + s_m[sj][si]*wrk2/3.0*s_v[sj][si]; #if useStreams rY[id.g] = rYtmp; uY[id.g] = uYtmp; vY[id.g] = vYtmp; wY[id.g] = wYtmp; eY[id.g] = eYtmp; #else rY[id.g] += rYtmp; uY[id.g] += uYtmp; vY[id.g] += vYtmp; wY[id.g] += wYtmp; eY[id.g] += eYtmp; #endif } __global__ void RHSDeviceSharedFlxZ(myprec *rZ, myprec *uZ, myprec *vZ, myprec *wZ, myprec *eZ, myprec *r, myprec *u, myprec *v, myprec *w, myprec *h , myprec *t, myprec *p, myprec *mu, myprec *lam, myprec *dil, myprec *dpdz) { Indices id(threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y); id.mkidZFlx(); int si = id.k + stencilSize; // local i for shared memory access + halo offset int sj = id.tiy; // local j for shared memory access myprec rZtmp=0; myprec uZtmp=0; myprec vZtmp=0; myprec wZtmp=0; myprec eZtmp=0; myprec wrk1=0; myprec wrk2=0; __shared__ myprec s_r[sPencils][mz+stencilSize*2]; __shared__ myprec s_u[sPencils][mz+stencilSize*2]; __shared__ myprec s_v[sPencils][mz+stencilSize*2]; __shared__ myprec s_w[sPencils][mz+stencilSize*2]; __shared__ myprec s_h[sPencils][mz+stencilSize*2]; __shared__ myprec s_t[sPencils][mz+stencilSize*2]; __shared__ myprec s_p[sPencils][mz+stencilSize*2]; __shared__ myprec s_m[sPencils][mz+stencilSize*2]; __shared__ myprec s_l[sPencils][mz+stencilSize*2]; __shared__ myprec s_s6[sPencils][mz+stencilSize*2]; __shared__ myprec s_s7[sPencils][mz+stencilSize*2]; __shared__ myprec s_s8[sPencils][mz+stencilSize*2]; __shared__ myprec s_dil[sPencils][mz+stencilSize*2]; s_r[sj][si] = r[id.g]; s_u[sj][si] = u[id.g]; s_v[sj][si] = v[id.g]; s_w[sj][si] = w[id.g]; s_h[sj][si] = h[id.g]; s_t[sj][si] = t[id.g]; s_p[sj][si] = p[id.g]; s_m[sj][si] = mu[id.g]; s_l[sj][si] = lam[id.g]; s_s6[sj][si] = gij[6][id.g]; s_s7[sj][si] = gij[7][id.g]; s_s8[sj][si] = gij[8][id.g]; s_dil[sj][si] = dil[id.g]; __syncthreads(); // fill in periodic images in shared memory array if (id.k < stencilSize) { perBCz(s_r[sj],si); perBCz(s_u[sj],si); perBCz(s_v[sj],si); perBCz(s_w[sj],si); perBCz(s_h[sj],si); perBCz(s_t[sj],si); perBCz(s_p[sj],si); perBCz(s_m[sj],si); perBCz(s_l[sj],si); } __syncthreads(); //initialize momentum RHS with stresses so that they can be added for both viscous terms and viscous heating without having to load additional terms uZtmp = ( s_s6[sj][si] + gij[2][id.g] ); vZtmp = ( s_s7[sj][si] + gij[5][id.g] ); wZtmp = (2 * s_s8[sj][si] - 2./3.*s_dil[sj][si] ); //adding the viscous dissipation part duidz*mu*siz eZtmp = s_m[sj][si]*(uZtmp*s_s6[sj][si] + vZtmp*s_s7[sj][si] + wZtmp*s_s8[sj][si]); //Adding here the terms d (mu) dz * szj; derDevSharedV1z(&wrk2,s_m[sj],si); //wrk2 = d (mu) dz uZtmp *= wrk2; vZtmp *= wrk2; wZtmp *= wrk2; // viscous fluxes derivative derDevSharedV2z(&wrk1,s_u[sj],si); uZtmp = uZtmp + wrk1*s_m[sj][si]; derDevSharedV2z(&wrk1,s_v[sj],si); vZtmp = vZtmp + wrk1*s_m[sj][si]; derDevSharedV2z(&wrk1,s_w[sj],si); wZtmp = wZtmp + wrk1*s_m[sj][si]; //adding the viscous dissipation part ui*(mu * d2duidz2 + dmudz * siz) derDevSharedV2z(&wrk1,s_t[sj],si); eZtmp = eZtmp + s_u[sj][si]*uZtmp + s_v[sj][si]*vZtmp + s_w[sj][si]*wZtmp + wrk1*s_l[sj][si]; derDevSharedV1z(&wrk2,s_l[sj],si); //wrk2 = d (lam) dz derDevSharedV1z(&wrk1,s_t[sj],si); //wrk1 = d (t) dx eZtmp = eZtmp + wrk1*wrk2; //Adding here the terms - d (ru phi) dz; fluxQuadSharedz(&wrk1,s_r[sj],s_w[sj],si); rZtmp = wrk1; __syncthreads(); fluxCubeSharedz(&wrk1,s_r[sj],s_w[sj],s_u[sj],si); uZtmp = uZtmp + wrk1; __syncthreads(); fluxCubeSharedz(&wrk1,s_r[sj],s_w[sj],s_v[sj],si); vZtmp = vZtmp + wrk1; __syncthreads(); fluxCubeSharedz(&wrk1,s_r[sj],s_w[sj],s_w[sj],si); wZtmp = wZtmp + wrk1; __syncthreads(); fluxCubeSharedz(&wrk1,s_r[sj],s_w[sj],s_h[sj],si); eZtmp = eZtmp + wrk1; __syncthreads(); // pressure and dilation derivatives __syncthreads(); if (id.k < stencilSize) { perBCz(s_dil[sj],si); } __syncthreads(); derDevSharedV1z(&wrk2,s_dil[sj],si); derDevShared1z(&wrk1,s_p[sj],si); wZtmp = wZtmp + s_m[sj][si]*wrk2/3.0 - wrk1 ; eZtmp = eZtmp + s_m[sj][si]*wrk2/3.0*s_w[sj][si]; #if useStreams rZ[id.g] = rZtmp; uZ[id.g] = uZtmp; vZ[id.g] = vZtmp; wZ[id.g] = wZtmp + *dpdz; eZ[id.g] = eZtmp + *dpdz*s_w[sj][si] ; #else rZ[id.g] += rZtmp; uZ[id.g] += uZtmp; vZ[id.g] += vZtmp; wZ[id.g] += wZtmp + *dpdz; eZ[id.g] += eZtmp + *dpdz*s_w[sj][si] ; #endif __syncthreads(); } __global__ void RHSDeviceSharedFlxY_new(myprec *rY, myprec *uY, myprec *vY, myprec *wY, myprec *eY, myprec *r, myprec *u, myprec *v, myprec *w, myprec *h , myprec *t, myprec *p, myprec *mu, myprec *lam, myprec *dil, myprec *dpdz) { Indices id(threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y); id.mkidYFlx(); int si = id.j + stencilSize; // local i for shared memory access + halo offset int sj = id.tiy; // local j for shared memory access myprec rYtmp=0; myprec uYtmp=0; myprec vYtmp=0; myprec wYtmp=0; myprec eYtmp=0; myprec wrk1=0; myprec wrk2=0; __shared__ myprec s_u[sPencils][my+stencilSize*2]; __shared__ myprec s_v[sPencils][my+stencilSize*2]; __shared__ myprec s_w[sPencils][my+stencilSize*2]; __shared__ myprec s_dil[sPencils][my+stencilSize*2]; __shared__ myprec s_prop[sPencils][my+stencilSize*2]; s_u[sj][si] = u[id.g]; s_v[sj][si] = v[id.g]; s_w[sj][si] = w[id.g]; s_dil[sj][si] = dil[id.g]; s_prop[sj][si] = mu[id.g]; __syncthreads(); // fill in periodic images in shared memory array if (id.j < stencilSize) { perBCy(s_u[sj],si); perBCy(s_v[sj],si); perBCy(s_w[sj],si); perBCy(s_prop[sj],si); perBCy(s_dil[sj],si); } __syncthreads(); //initialize momentum RHS with stresses so that they can be added for both viscous terms and viscous heating without having to load additional terms uYtmp = ( gij[3][id.g] + gij[1][id.g] ); vYtmp = (2 * gij[4][id.g] - 2./3.*s_dil[sj][si] ); wYtmp = ( gij[5][id.g] + gij[7][id.g] ); //adding the viscous dissipation part duidy*mu*siy eYtmp = s_prop[sj][si]*(uYtmp*gij[3][id.g] + vYtmp*gij[4][id.g] + wYtmp*gij[5][id.g]); //Adding here the terms d (mu) dy * syj; derDevSharedV1y(&wrk2,s_prop[sj],si); //wrk2 = d (mu) dy uYtmp *= wrk2; vYtmp *= wrk2; wYtmp *= wrk2; // viscous fluxes derivative derDevSharedV2y(&wrk1,s_u[sj],si); uYtmp = uYtmp + wrk1*s_prop[sj][si]; derDevSharedV2y(&wrk1,s_v[sj],si); vYtmp = vYtmp + wrk1*s_prop[sj][si]; derDevSharedV2y(&wrk1,s_w[sj],si); wYtmp = wYtmp + wrk1*s_prop[sj][si]; //adding the viscous dissipation part ui*(mu * d2duidy2 + dmudy * siy) eYtmp = eYtmp + s_u[sj][si]*uYtmp + s_v[sj][si]*vYtmp + s_w[sj][si]*wYtmp; //dilation derivatives derDevSharedV1y(&wrk2,s_dil[sj],si); vYtmp = vYtmp + s_prop[sj][si]*wrk2/3.0; eYtmp = eYtmp + s_prop[sj][si]*wrk2/3.0*s_v[sj][si]; // pressure derivatives s_dil[sj][si] = p[id.g]; __syncthreads(); if (id.j < stencilSize) { perBCy(s_dil[sj],si); } __syncthreads(); derDevShared1y(&wrk1,s_dil[sj],si); vYtmp = vYtmp - wrk1; // fourier terms s_prop[sj][si] = lam[id.g]; s_dil[sj][si] = t[id.g]; __syncthreads(); if (id.j < stencilSize) { perBCy(s_dil[sj],si); perBCy(s_prop[sj],si); } __syncthreads(); derDevSharedV2y(&wrk1,s_dil[sj],si); eYtmp = eYtmp + wrk1*s_prop[sj][si]; derDevSharedV1y(&wrk2,s_prop[sj],si); //wrk2 = d (lam) dy derDevSharedV1y(&wrk1,s_dil[sj] ,si); //wrk1 = d (t) dy eYtmp = eYtmp + wrk1*wrk2; //Adding here the terms - d (ru phi) dy; s_prop[sj][si] = r[id.g]; s_dil[sj][si] = h[id.g]; __syncthreads(); if (id.j < stencilSize) { perBCy(s_dil[sj],si); perBCy(s_prop[sj],si); } __syncthreads(); fluxQuadSharedy(&wrk1,s_prop[sj],s_v[sj],si); rYtmp = wrk1; __syncthreads(); fluxCubeSharedy(&wrk1,s_prop[sj],s_v[sj],s_u[sj],si); uYtmp = uYtmp + wrk1; __syncthreads(); fluxCubeSharedy(&wrk1,s_prop[sj],s_v[sj],s_v[sj],si); vYtmp = vYtmp + wrk1; __syncthreads(); fluxCubeSharedy(&wrk1,s_prop[sj],s_v[sj],s_w[sj],si); wYtmp = wYtmp + wrk1; __syncthreads(); fluxCubeSharedy(&wrk1,s_prop[sj],s_v[sj],s_dil[sj],si); eYtmp = eYtmp + wrk1; __syncthreads(); #if useStreams rY[id.g] = rYtmp; uY[id.g] = uYtmp; vY[id.g] = vYtmp; wY[id.g] = wYtmp; eY[id.g] = eYtmp; #else rY[id.g] += rYtmp; uY[id.g] += uYtmp; vY[id.g] += vYtmp; wY[id.g] += wYtmp; eY[id.g] += eYtmp; #endif __syncthreads(); } __global__ void RHSDeviceSharedFlxZ_new(myprec *rZ, myprec *uZ, myprec *vZ, myprec *wZ, myprec *eZ, myprec *r, myprec *u, myprec *v, myprec *w, myprec *h , myprec *t, myprec *p, myprec *mu, myprec *lam, myprec *dil, myprec *dpdz) { Indices id(threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y); id.mkidZFlx(); int si = id.k + stencilSize; // local i for shared memory access + halo offset int sj = id.tiy; // local j for shared memory access myprec rZtmp=0; myprec uZtmp=0; myprec vZtmp=0; myprec wZtmp=0; myprec eZtmp=0; myprec wrk1=0; myprec wrk2=0; __shared__ myprec s_u[sPencils][mz+stencilSize*2]; __shared__ myprec s_v[sPencils][mz+stencilSize*2]; __shared__ myprec s_w[sPencils][mz+stencilSize*2]; __shared__ myprec s_dil[sPencils][mz+stencilSize*2]; __shared__ myprec s_prop[sPencils][mz+stencilSize*2]; s_u[sj][si] = u[id.g]; s_v[sj][si] = v[id.g]; s_w[sj][si] = w[id.g]; s_dil[sj][si] = dil[id.g]; s_prop[sj][si] = mu[id.g]; __syncthreads(); // fill in periodic images in shared memory array if (id.k < stencilSize) { perBCz(s_u[sj],si); perBCz(s_v[sj],si); perBCz(s_w[sj],si); perBCz(s_prop[sj],si); perBCz(s_dil[sj],si); } __syncthreads(); //initialize momentum RHS with stresses so that they can be added for both viscous terms and viscous heating without having to load additional terms uZtmp = ( gij[6][id.g] + gij[2][id.g] ); vZtmp = ( gij[7][id.g] + gij[5][id.g] ); wZtmp = (2 * gij[8][id.g] - 2./3.*s_dil[sj][si] ); //adding the viscous dissipation part duidz*mu*siz eZtmp = s_prop[sj][si]*(uZtmp*gij[6][id.g] + vZtmp*gij[7][id.g] + wZtmp*gij[8][id.g]); //Adding here the terms d (mu) dz * szj; derDevSharedV1z(&wrk2,s_prop[sj],si); //wrk2 = d (mu) dz uZtmp *= wrk2; vZtmp *= wrk2; wZtmp *= wrk2; // viscous fluxes derivative derDevSharedV2z(&wrk1,s_u[sj],si); uZtmp = uZtmp + wrk1*s_prop[sj][si]; derDevSharedV2z(&wrk1,s_v[sj],si); vZtmp = vZtmp + wrk1*s_prop[sj][si]; derDevSharedV2z(&wrk1,s_w[sj],si); wZtmp = wZtmp + wrk1*s_prop[sj][si]; //adding the viscous dissipation part ui*(mu * d2duidz2 + dmudz * siz) eZtmp = eZtmp + s_u[sj][si]*uZtmp + s_v[sj][si]*vZtmp + s_w[sj][si]*wZtmp; //dilation derivatives derDevSharedV1z(&wrk2,s_dil[sj],si); wZtmp = wZtmp + s_prop[sj][si]*wrk2/3.0; eZtmp = eZtmp + s_prop[sj][si]*wrk2/3.0*s_w[sj][si]; // pressure derivatives s_dil[sj][si] = p[id.g]; __syncthreads(); if (id.k < stencilSize) { perBCz(s_dil[sj],si); } __syncthreads(); derDevShared1z(&wrk1,s_dil[sj],si); wZtmp = wZtmp - wrk1; // fourier terms s_prop[sj][si] = lam[id.g]; s_dil[sj][si] = t[id.g]; __syncthreads(); if (id.k < stencilSize) { perBCz(s_dil[sj],si); perBCz(s_prop[sj],si); } __syncthreads(); derDevSharedV2z(&wrk1,s_dil[sj],si); eZtmp = eZtmp + wrk1*s_prop[sj][si]; derDevSharedV1z(&wrk2,s_prop[sj],si); //wrk2 = d (lam) dz derDevSharedV1z(&wrk1,s_dil[sj],si); //wrk1 = d (t) dz eZtmp = eZtmp + wrk1*wrk2; //Adding here the terms - d (ru phi) dz; s_prop[sj][si] = r[id.g]; s_dil[sj][si] = h[id.g]; __syncthreads(); if (id.k < stencilSize) { perBCz(s_dil[sj],si); perBCz(s_prop[sj],si); } __syncthreads(); fluxQuadSharedz(&wrk1,s_prop[sj],s_w[sj],si); rZtmp = wrk1; __syncthreads(); fluxCubeSharedz(&wrk1,s_prop[sj],s_w[sj],s_u[sj],si); uZtmp = uZtmp + wrk1; __syncthreads(); fluxCubeSharedz(&wrk1,s_prop[sj],s_w[sj],s_v[sj],si); vZtmp = vZtmp + wrk1; __syncthreads(); fluxCubeSharedz(&wrk1,s_prop[sj],s_w[sj],s_w[sj],si); wZtmp = wZtmp + wrk1; __syncthreads(); fluxCubeSharedz(&wrk1,s_prop[sj],s_w[sj],s_dil[sj],si); eZtmp = eZtmp + wrk1; __syncthreads(); #if useStreams rZ[id.g] = rZtmp; uZ[id.g] = uZtmp; vZ[id.g] = vZtmp; wZ[id.g] = wZtmp + *dpdz; eZ[id.g] = eZtmp + *dpdz*s_w[sj][si] ; #else rZ[id.g] += rZtmp; uZ[id.g] += uZtmp; vZ[id.g] += vZtmp; wZ[id.g] += wZtmp + *dpdz; eZ[id.g] += eZtmp + *dpdz*s_w[sj][si] ; #endif __syncthreads(); }
7f9161f1aa9036d794bba1918b3809dbd8cdc166.cu
/** * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #include "globals.h" #include "cuda_functions.h" #include "cuda_math.h" #include "boundary.h" /* * The L-versions of the RHS have to be ran with * - the L-version of the derivatives * i.e.: derDev1xL instead of derDev1x * - the L-version of the grid * i.e.: h_gridL[0] instead of h_grid[0] */ /* The whole RHS in the X direction is calculated in RHSDeviceSharedFlxX_old thanks to the beneficial memory layout that allows to use small pencils */ /* For the Y and Z direction, fluxes require a small pencil discretization while the rest of the RHS can be calculated on large pencils which speed * up significantly the computation. Therefore 5 streams are used * stream 0 -> complete X RHS (in RHSDeviceSharedFlxX_old) (small pencil grid) * stream 1 -> viscous terms and pressure terms in Y (in RHSDeviceFullYL) (large pencil grid) * stream 2 -> viscous terms and pressure terms in Z (in RHSDeviceFullZL) (large pencil grid) * stream 3 -> advective fluxes in Y direction (in FLXDeviceY) (small pencil transposed grid) * stream 4 -> advective fluxes in Z direction (in FLXDeviceZ) (small pencil transposed grid)*/ __global__ void RHSDeviceSharedFlxX(myprec *rX, myprec *uX, myprec *vX, myprec *wX, myprec *eX, myprec *r, myprec *u, myprec *v, myprec *w, myprec *h , myprec *t, myprec *p, myprec *mu, myprec *lam, myprec *dil, myprec *dpdz) { Indices id(threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y); id.mkidX(); int si = id.i + stencilSize; // local i for shared memory access + halo offset int sj = id.tiy; // local j for shared memory access myprec rXtmp=0; myprec uXtmp=0; myprec vXtmp=0; myprec wXtmp=0; myprec eXtmp=0; myprec wrk1=0; myprec wrk2=0; __shared__ myprec s_r[sPencils][mx+stencilSize*2]; __shared__ myprec s_u[sPencils][mx+stencilSize*2]; __shared__ myprec s_v[sPencils][mx+stencilSize*2]; __shared__ myprec s_w[sPencils][mx+stencilSize*2]; __shared__ myprec s_h[sPencils][mx+stencilSize*2]; __shared__ myprec s_t[sPencils][mx+stencilSize*2]; __shared__ myprec s_p[sPencils][mx+stencilSize*2]; __shared__ myprec s_m[sPencils][mx+stencilSize*2]; __shared__ myprec s_l[sPencils][mx+stencilSize*2]; #if !periodicX __shared__ myprec s_s0[sPencils][mx+stencilSize*2]; __shared__ myprec s_s4[sPencils][mx+stencilSize*2]; __shared__ myprec s_s8[sPencils][mx+stencilSize*2]; #endif __shared__ myprec s_dil[sPencils][mx+stencilSize*2]; s_r[sj][si] = r[id.g]; s_u[sj][si] = u[id.g]; s_v[sj][si] = v[id.g]; s_w[sj][si] = w[id.g]; s_h[sj][si] = h[id.g]; s_t[sj][si] = t[id.g]; s_p[sj][si] = p[id.g]; s_m[sj][si] = mu[id.g]; s_l[sj][si] = lam[id.g]; #if !periodicX s_s0[sj][si]= gij[0][id.g]; s_s4[sj][si]= gij[4][id.g]; s_s8[sj][si]= gij[8][id.g]; #endif s_dil[sj][si] = dil[id.g]; __syncthreads(); // fill in periodic images in shared memory array if (id.i < stencilSize) { #if periodicX perBCx(s_r[sj],si); perBCx(s_u[sj],si); perBCx(s_v[sj],si); perBCx(s_w[sj],si); perBCx(s_h[sj],si); perBCx(s_t[sj],si); perBCx(s_p[sj],si); perBCx(s_m[sj],si); perBCx(s_l[sj],si); #else wallBCxMir(s_p[sj],si); wallBCxVel(s_u[sj],si); wallBCxVel(s_v[sj],si); wallBCxVel(s_w[sj],si); wallBCxExt(s_t[sj],si,1.0,1.0); stateBoundPT(s_r[sj], s_t[sj], s_u[sj], s_v[sj], s_w[sj], s_h[sj], s_p[sj], s_m[sj], s_l[sj], si); wallBCxMir(s_s0[sj],si); wallBCxVel(s_s4[sj],si); wallBCxVel(s_s8[sj],si); #endif } __syncthreads(); //initialize momentum RHS with stresses so that they can be added for both viscous terms and viscous heating without having to load additional terms uXtmp = ( 2 * gij[0][id.g] - 2./3.*s_dil[sj][si] ); vXtmp = ( gij[1][id.g] + gij[3][id.g] ); wXtmp = ( gij[2][id.g] + gij[6][id.g] ); //adding the viscous dissipation part duidx*mu*six eXtmp = s_m[sj][si]*(uXtmp*gij[0][id.g] + vXtmp*gij[1][id.g] + wXtmp*gij[2][id.g]); //Adding here the terms d (mu) dx * sxj; (lambda in case of h in rhse); derDevSharedV1x(&wrk2,s_m[sj],si); //wrk2 = d (mu) dx uXtmp *= wrk2; vXtmp *= wrk2; wXtmp *= wrk2; // viscous fluxes derivative mu*d^2ui dx^2 derDevSharedV2x(&wrk1,s_u[sj],si); uXtmp = uXtmp + wrk1*s_m[sj][si]; derDevSharedV2x(&wrk1,s_v[sj],si); vXtmp = vXtmp + wrk1*s_m[sj][si]; derDevSharedV2x(&wrk1,s_w[sj],si); wXtmp = wXtmp + wrk1*s_m[sj][si]; //adding the viscous dissipation part ui*(mu * d2duidx2 + dmudx * six) eXtmp = eXtmp + s_u[sj][si]*uXtmp + s_v[sj][si]*vXtmp + s_w[sj][si]*wXtmp; //adding the molecular conduction part (d2 temp dx2*lambda + dlambda dx * d temp dx) derDevSharedV2x(&wrk1,s_t[sj],si); eXtmp = eXtmp + wrk1*s_l[sj][si]; derDevSharedV1x(&wrk2,s_l[sj],si); //wrk2 = d (lam) dx derDevSharedV1x(&wrk1,s_t[sj],si); //wrk1 = d (t) dx eXtmp = eXtmp + wrk1*wrk2; //Adding here the terms - d (ru phi) dx; fluxQuadSharedx(&wrk1,s_r[sj],s_u[sj],si); rXtmp = wrk1; __syncthreads(); fluxCubeSharedx(&wrk1,s_r[sj],s_u[sj],s_u[sj],si); uXtmp = uXtmp + wrk1; __syncthreads(); fluxCubeSharedx(&wrk1,s_r[sj],s_u[sj],s_v[sj],si); vXtmp = vXtmp + wrk1; __syncthreads(); fluxCubeSharedx(&wrk1,s_r[sj],s_u[sj],s_w[sj],si); wXtmp = wXtmp + wrk1; __syncthreads(); fluxCubeSharedx(&wrk1,s_r[sj],s_u[sj],s_h[sj],si); eXtmp = eXtmp + wrk1; __syncthreads(); // pressure and dilation derivatives if (id.i < stencilSize) { #if periodicX perBCx(s_dil[sj],si); #else wallBCxDil(s_dil[sj],s_s0[sj],s_s4[sj],s_s8[sj],si); #endif } __syncthreads(); derDevSharedV1x(&wrk2,s_dil[sj],si); derDevShared1x(&wrk1 ,s_p[sj],si); uXtmp = uXtmp + s_m[sj][si]*wrk2/3.0 - wrk1 ; eXtmp = eXtmp + s_m[sj][si]*wrk2/3.0*s_u[sj][si]; rX[id.g] = rXtmp; uX[id.g] = uXtmp; vX[id.g] = vXtmp; wX[id.g] = wXtmp; eX[id.g] = eXtmp ; } __global__ void RHSDeviceSharedFlxY(myprec *rY, myprec *uY, myprec *vY, myprec *wY, myprec *eY, myprec *r, myprec *u, myprec *v, myprec *w, myprec *h , myprec *t, myprec *p, myprec *mu, myprec *lam, myprec *dil, myprec *dpdz) { Indices id(threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y); id.mkidYFlx(); int si = id.j + stencilSize; // local i for shared memory access + halo offset int sj = id.tiy; // local j for shared memory access myprec rYtmp=0; myprec uYtmp=0; myprec vYtmp=0; myprec wYtmp=0; myprec eYtmp=0; myprec wrk1=0; myprec wrk2=0; __shared__ myprec s_r[sPencils][my+stencilSize*2]; __shared__ myprec s_u[sPencils][my+stencilSize*2]; __shared__ myprec s_v[sPencils][my+stencilSize*2]; __shared__ myprec s_w[sPencils][my+stencilSize*2]; __shared__ myprec s_h[sPencils][my+stencilSize*2]; __shared__ myprec s_t[sPencils][my+stencilSize*2]; __shared__ myprec s_p[sPencils][my+stencilSize*2]; __shared__ myprec s_m[sPencils][my+stencilSize*2]; __shared__ myprec s_l[sPencils][my+stencilSize*2]; __shared__ myprec s_s3[sPencils][my+stencilSize*2]; __shared__ myprec s_s4[sPencils][my+stencilSize*2]; __shared__ myprec s_s5[sPencils][my+stencilSize*2]; __shared__ myprec s_dil[sPencils][my+stencilSize*2]; s_r[sj][si] = r[id.g]; s_u[sj][si] = u[id.g]; s_v[sj][si] = v[id.g]; s_w[sj][si] = w[id.g]; s_h[sj][si] = h[id.g]; s_t[sj][si] = t[id.g]; s_p[sj][si] = p[id.g]; s_m[sj][si] = mu[id.g]; s_l[sj][si] = lam[id.g]; s_dil[sj][si] = dil[id.g]; s_s3[sj][si] = gij[3][id.g]; s_s4[sj][si] = gij[4][id.g]; s_s5[sj][si] = gij[5][id.g]; __syncthreads(); // fill in periodic images in shared memory array if (id.j < stencilSize) { perBCy(s_r[sj],si); perBCy(s_u[sj],si); perBCy(s_v[sj],si); perBCy(s_w[sj],si); perBCy(s_h[sj],si); perBCy(s_t[sj],si); perBCy(s_p[sj],si); perBCy(s_m[sj],si); perBCy(s_l[sj],si); } __syncthreads(); //initialize momentum RHS with stresses so that they can be added for both viscous terms and viscous heating without having to load additional terms uYtmp = ( s_s3[sj][si] + gij[1][id.g] ) ; vYtmp = ( 2 * s_s4[sj][si] - 2./3.*s_dil[sj][si] ) ; wYtmp = ( s_s5[sj][si] + gij[7][id.g] ) ; //adding the viscous dissipation part duidy*mu*siy eYtmp = s_m[sj][si]*(uYtmp*s_s3[sj][si] + vYtmp*s_s4[sj][si] + wYtmp*s_s5[sj][si]); //Adding here the terms d (mu) dy * siy; derDevSharedV1y(&wrk2,s_m[sj],si); //wrk2 = d (mu) dx uYtmp *= wrk2; vYtmp *= wrk2; wYtmp *= wrk2; // viscous fluxes derivative mu*d^2dui dy^2 derDevSharedV2y(&wrk1,s_u[sj],si); uYtmp = uYtmp + wrk1*s_m[sj][si]; derDevSharedV2y(&wrk1,s_v[sj],si); vYtmp = vYtmp + wrk1*s_m[sj][si]; derDevSharedV2y(&wrk1,s_w[sj],si); wYtmp = wYtmp + wrk1*s_m[sj][si]; //adding the viscous dissipation part ui*(mu * d2duidy2 + dmudy * siy) eYtmp = eYtmp + s_u[sj][si]*uYtmp + s_v[sj][si]*vYtmp + s_w[sj][si]*wYtmp; derDevSharedV2y(&wrk1,s_t[sj],si); eYtmp = eYtmp + wrk1*s_l[sj][si]; derDevSharedV1y(&wrk2,s_l[sj],si); //wrk2 = d (lam) dx derDevSharedV1y(&wrk1,s_t[sj],si); //wrk1 = d (t) dx eYtmp = eYtmp + wrk1*wrk2; // split advection terms //Adding here the terms - d (ru phi) dy; fluxQuadSharedy(&wrk1,s_r[sj],s_v[sj],si); rYtmp = wrk1; __syncthreads(); fluxCubeSharedy(&wrk1,s_r[sj],s_v[sj],s_u[sj],si); uYtmp = uYtmp + wrk1; __syncthreads(); fluxCubeSharedy(&wrk1,s_r[sj],s_v[sj],s_v[sj],si); vYtmp = vYtmp + wrk1; __syncthreads(); fluxCubeSharedy(&wrk1,s_r[sj],s_v[sj],s_w[sj],si); wYtmp = wYtmp + wrk1; __syncthreads(); fluxCubeSharedy(&wrk1,s_r[sj],s_v[sj],s_h[sj],si); eYtmp = eYtmp + wrk1; __syncthreads(); // pressure and dilation derivatives if (id.j < stencilSize) { perBCy(s_dil[sj],si); } __syncthreads(); derDevSharedV1y(&wrk2,s_dil[sj],si); derDevShared1y(&wrk1,s_p[sj],si); vYtmp = vYtmp + s_m[sj][si]*wrk2/3.0 - wrk1 ; eYtmp = eYtmp + s_m[sj][si]*wrk2/3.0*s_v[sj][si]; #if useStreams rY[id.g] = rYtmp; uY[id.g] = uYtmp; vY[id.g] = vYtmp; wY[id.g] = wYtmp; eY[id.g] = eYtmp; #else rY[id.g] += rYtmp; uY[id.g] += uYtmp; vY[id.g] += vYtmp; wY[id.g] += wYtmp; eY[id.g] += eYtmp; #endif } __global__ void RHSDeviceSharedFlxZ(myprec *rZ, myprec *uZ, myprec *vZ, myprec *wZ, myprec *eZ, myprec *r, myprec *u, myprec *v, myprec *w, myprec *h , myprec *t, myprec *p, myprec *mu, myprec *lam, myprec *dil, myprec *dpdz) { Indices id(threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y); id.mkidZFlx(); int si = id.k + stencilSize; // local i for shared memory access + halo offset int sj = id.tiy; // local j for shared memory access myprec rZtmp=0; myprec uZtmp=0; myprec vZtmp=0; myprec wZtmp=0; myprec eZtmp=0; myprec wrk1=0; myprec wrk2=0; __shared__ myprec s_r[sPencils][mz+stencilSize*2]; __shared__ myprec s_u[sPencils][mz+stencilSize*2]; __shared__ myprec s_v[sPencils][mz+stencilSize*2]; __shared__ myprec s_w[sPencils][mz+stencilSize*2]; __shared__ myprec s_h[sPencils][mz+stencilSize*2]; __shared__ myprec s_t[sPencils][mz+stencilSize*2]; __shared__ myprec s_p[sPencils][mz+stencilSize*2]; __shared__ myprec s_m[sPencils][mz+stencilSize*2]; __shared__ myprec s_l[sPencils][mz+stencilSize*2]; __shared__ myprec s_s6[sPencils][mz+stencilSize*2]; __shared__ myprec s_s7[sPencils][mz+stencilSize*2]; __shared__ myprec s_s8[sPencils][mz+stencilSize*2]; __shared__ myprec s_dil[sPencils][mz+stencilSize*2]; s_r[sj][si] = r[id.g]; s_u[sj][si] = u[id.g]; s_v[sj][si] = v[id.g]; s_w[sj][si] = w[id.g]; s_h[sj][si] = h[id.g]; s_t[sj][si] = t[id.g]; s_p[sj][si] = p[id.g]; s_m[sj][si] = mu[id.g]; s_l[sj][si] = lam[id.g]; s_s6[sj][si] = gij[6][id.g]; s_s7[sj][si] = gij[7][id.g]; s_s8[sj][si] = gij[8][id.g]; s_dil[sj][si] = dil[id.g]; __syncthreads(); // fill in periodic images in shared memory array if (id.k < stencilSize) { perBCz(s_r[sj],si); perBCz(s_u[sj],si); perBCz(s_v[sj],si); perBCz(s_w[sj],si); perBCz(s_h[sj],si); perBCz(s_t[sj],si); perBCz(s_p[sj],si); perBCz(s_m[sj],si); perBCz(s_l[sj],si); } __syncthreads(); //initialize momentum RHS with stresses so that they can be added for both viscous terms and viscous heating without having to load additional terms uZtmp = ( s_s6[sj][si] + gij[2][id.g] ); vZtmp = ( s_s7[sj][si] + gij[5][id.g] ); wZtmp = (2 * s_s8[sj][si] - 2./3.*s_dil[sj][si] ); //adding the viscous dissipation part duidz*mu*siz eZtmp = s_m[sj][si]*(uZtmp*s_s6[sj][si] + vZtmp*s_s7[sj][si] + wZtmp*s_s8[sj][si]); //Adding here the terms d (mu) dz * szj; derDevSharedV1z(&wrk2,s_m[sj],si); //wrk2 = d (mu) dz uZtmp *= wrk2; vZtmp *= wrk2; wZtmp *= wrk2; // viscous fluxes derivative derDevSharedV2z(&wrk1,s_u[sj],si); uZtmp = uZtmp + wrk1*s_m[sj][si]; derDevSharedV2z(&wrk1,s_v[sj],si); vZtmp = vZtmp + wrk1*s_m[sj][si]; derDevSharedV2z(&wrk1,s_w[sj],si); wZtmp = wZtmp + wrk1*s_m[sj][si]; //adding the viscous dissipation part ui*(mu * d2duidz2 + dmudz * siz) derDevSharedV2z(&wrk1,s_t[sj],si); eZtmp = eZtmp + s_u[sj][si]*uZtmp + s_v[sj][si]*vZtmp + s_w[sj][si]*wZtmp + wrk1*s_l[sj][si]; derDevSharedV1z(&wrk2,s_l[sj],si); //wrk2 = d (lam) dz derDevSharedV1z(&wrk1,s_t[sj],si); //wrk1 = d (t) dx eZtmp = eZtmp + wrk1*wrk2; //Adding here the terms - d (ru phi) dz; fluxQuadSharedz(&wrk1,s_r[sj],s_w[sj],si); rZtmp = wrk1; __syncthreads(); fluxCubeSharedz(&wrk1,s_r[sj],s_w[sj],s_u[sj],si); uZtmp = uZtmp + wrk1; __syncthreads(); fluxCubeSharedz(&wrk1,s_r[sj],s_w[sj],s_v[sj],si); vZtmp = vZtmp + wrk1; __syncthreads(); fluxCubeSharedz(&wrk1,s_r[sj],s_w[sj],s_w[sj],si); wZtmp = wZtmp + wrk1; __syncthreads(); fluxCubeSharedz(&wrk1,s_r[sj],s_w[sj],s_h[sj],si); eZtmp = eZtmp + wrk1; __syncthreads(); // pressure and dilation derivatives __syncthreads(); if (id.k < stencilSize) { perBCz(s_dil[sj],si); } __syncthreads(); derDevSharedV1z(&wrk2,s_dil[sj],si); derDevShared1z(&wrk1,s_p[sj],si); wZtmp = wZtmp + s_m[sj][si]*wrk2/3.0 - wrk1 ; eZtmp = eZtmp + s_m[sj][si]*wrk2/3.0*s_w[sj][si]; #if useStreams rZ[id.g] = rZtmp; uZ[id.g] = uZtmp; vZ[id.g] = vZtmp; wZ[id.g] = wZtmp + *dpdz; eZ[id.g] = eZtmp + *dpdz*s_w[sj][si] ; #else rZ[id.g] += rZtmp; uZ[id.g] += uZtmp; vZ[id.g] += vZtmp; wZ[id.g] += wZtmp + *dpdz; eZ[id.g] += eZtmp + *dpdz*s_w[sj][si] ; #endif __syncthreads(); } __global__ void RHSDeviceSharedFlxY_new(myprec *rY, myprec *uY, myprec *vY, myprec *wY, myprec *eY, myprec *r, myprec *u, myprec *v, myprec *w, myprec *h , myprec *t, myprec *p, myprec *mu, myprec *lam, myprec *dil, myprec *dpdz) { Indices id(threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y); id.mkidYFlx(); int si = id.j + stencilSize; // local i for shared memory access + halo offset int sj = id.tiy; // local j for shared memory access myprec rYtmp=0; myprec uYtmp=0; myprec vYtmp=0; myprec wYtmp=0; myprec eYtmp=0; myprec wrk1=0; myprec wrk2=0; __shared__ myprec s_u[sPencils][my+stencilSize*2]; __shared__ myprec s_v[sPencils][my+stencilSize*2]; __shared__ myprec s_w[sPencils][my+stencilSize*2]; __shared__ myprec s_dil[sPencils][my+stencilSize*2]; __shared__ myprec s_prop[sPencils][my+stencilSize*2]; s_u[sj][si] = u[id.g]; s_v[sj][si] = v[id.g]; s_w[sj][si] = w[id.g]; s_dil[sj][si] = dil[id.g]; s_prop[sj][si] = mu[id.g]; __syncthreads(); // fill in periodic images in shared memory array if (id.j < stencilSize) { perBCy(s_u[sj],si); perBCy(s_v[sj],si); perBCy(s_w[sj],si); perBCy(s_prop[sj],si); perBCy(s_dil[sj],si); } __syncthreads(); //initialize momentum RHS with stresses so that they can be added for both viscous terms and viscous heating without having to load additional terms uYtmp = ( gij[3][id.g] + gij[1][id.g] ); vYtmp = (2 * gij[4][id.g] - 2./3.*s_dil[sj][si] ); wYtmp = ( gij[5][id.g] + gij[7][id.g] ); //adding the viscous dissipation part duidy*mu*siy eYtmp = s_prop[sj][si]*(uYtmp*gij[3][id.g] + vYtmp*gij[4][id.g] + wYtmp*gij[5][id.g]); //Adding here the terms d (mu) dy * syj; derDevSharedV1y(&wrk2,s_prop[sj],si); //wrk2 = d (mu) dy uYtmp *= wrk2; vYtmp *= wrk2; wYtmp *= wrk2; // viscous fluxes derivative derDevSharedV2y(&wrk1,s_u[sj],si); uYtmp = uYtmp + wrk1*s_prop[sj][si]; derDevSharedV2y(&wrk1,s_v[sj],si); vYtmp = vYtmp + wrk1*s_prop[sj][si]; derDevSharedV2y(&wrk1,s_w[sj],si); wYtmp = wYtmp + wrk1*s_prop[sj][si]; //adding the viscous dissipation part ui*(mu * d2duidy2 + dmudy * siy) eYtmp = eYtmp + s_u[sj][si]*uYtmp + s_v[sj][si]*vYtmp + s_w[sj][si]*wYtmp; //dilation derivatives derDevSharedV1y(&wrk2,s_dil[sj],si); vYtmp = vYtmp + s_prop[sj][si]*wrk2/3.0; eYtmp = eYtmp + s_prop[sj][si]*wrk2/3.0*s_v[sj][si]; // pressure derivatives s_dil[sj][si] = p[id.g]; __syncthreads(); if (id.j < stencilSize) { perBCy(s_dil[sj],si); } __syncthreads(); derDevShared1y(&wrk1,s_dil[sj],si); vYtmp = vYtmp - wrk1; // fourier terms s_prop[sj][si] = lam[id.g]; s_dil[sj][si] = t[id.g]; __syncthreads(); if (id.j < stencilSize) { perBCy(s_dil[sj],si); perBCy(s_prop[sj],si); } __syncthreads(); derDevSharedV2y(&wrk1,s_dil[sj],si); eYtmp = eYtmp + wrk1*s_prop[sj][si]; derDevSharedV1y(&wrk2,s_prop[sj],si); //wrk2 = d (lam) dy derDevSharedV1y(&wrk1,s_dil[sj] ,si); //wrk1 = d (t) dy eYtmp = eYtmp + wrk1*wrk2; //Adding here the terms - d (ru phi) dy; s_prop[sj][si] = r[id.g]; s_dil[sj][si] = h[id.g]; __syncthreads(); if (id.j < stencilSize) { perBCy(s_dil[sj],si); perBCy(s_prop[sj],si); } __syncthreads(); fluxQuadSharedy(&wrk1,s_prop[sj],s_v[sj],si); rYtmp = wrk1; __syncthreads(); fluxCubeSharedy(&wrk1,s_prop[sj],s_v[sj],s_u[sj],si); uYtmp = uYtmp + wrk1; __syncthreads(); fluxCubeSharedy(&wrk1,s_prop[sj],s_v[sj],s_v[sj],si); vYtmp = vYtmp + wrk1; __syncthreads(); fluxCubeSharedy(&wrk1,s_prop[sj],s_v[sj],s_w[sj],si); wYtmp = wYtmp + wrk1; __syncthreads(); fluxCubeSharedy(&wrk1,s_prop[sj],s_v[sj],s_dil[sj],si); eYtmp = eYtmp + wrk1; __syncthreads(); #if useStreams rY[id.g] = rYtmp; uY[id.g] = uYtmp; vY[id.g] = vYtmp; wY[id.g] = wYtmp; eY[id.g] = eYtmp; #else rY[id.g] += rYtmp; uY[id.g] += uYtmp; vY[id.g] += vYtmp; wY[id.g] += wYtmp; eY[id.g] += eYtmp; #endif __syncthreads(); } __global__ void RHSDeviceSharedFlxZ_new(myprec *rZ, myprec *uZ, myprec *vZ, myprec *wZ, myprec *eZ, myprec *r, myprec *u, myprec *v, myprec *w, myprec *h , myprec *t, myprec *p, myprec *mu, myprec *lam, myprec *dil, myprec *dpdz) { Indices id(threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y); id.mkidZFlx(); int si = id.k + stencilSize; // local i for shared memory access + halo offset int sj = id.tiy; // local j for shared memory access myprec rZtmp=0; myprec uZtmp=0; myprec vZtmp=0; myprec wZtmp=0; myprec eZtmp=0; myprec wrk1=0; myprec wrk2=0; __shared__ myprec s_u[sPencils][mz+stencilSize*2]; __shared__ myprec s_v[sPencils][mz+stencilSize*2]; __shared__ myprec s_w[sPencils][mz+stencilSize*2]; __shared__ myprec s_dil[sPencils][mz+stencilSize*2]; __shared__ myprec s_prop[sPencils][mz+stencilSize*2]; s_u[sj][si] = u[id.g]; s_v[sj][si] = v[id.g]; s_w[sj][si] = w[id.g]; s_dil[sj][si] = dil[id.g]; s_prop[sj][si] = mu[id.g]; __syncthreads(); // fill in periodic images in shared memory array if (id.k < stencilSize) { perBCz(s_u[sj],si); perBCz(s_v[sj],si); perBCz(s_w[sj],si); perBCz(s_prop[sj],si); perBCz(s_dil[sj],si); } __syncthreads(); //initialize momentum RHS with stresses so that they can be added for both viscous terms and viscous heating without having to load additional terms uZtmp = ( gij[6][id.g] + gij[2][id.g] ); vZtmp = ( gij[7][id.g] + gij[5][id.g] ); wZtmp = (2 * gij[8][id.g] - 2./3.*s_dil[sj][si] ); //adding the viscous dissipation part duidz*mu*siz eZtmp = s_prop[sj][si]*(uZtmp*gij[6][id.g] + vZtmp*gij[7][id.g] + wZtmp*gij[8][id.g]); //Adding here the terms d (mu) dz * szj; derDevSharedV1z(&wrk2,s_prop[sj],si); //wrk2 = d (mu) dz uZtmp *= wrk2; vZtmp *= wrk2; wZtmp *= wrk2; // viscous fluxes derivative derDevSharedV2z(&wrk1,s_u[sj],si); uZtmp = uZtmp + wrk1*s_prop[sj][si]; derDevSharedV2z(&wrk1,s_v[sj],si); vZtmp = vZtmp + wrk1*s_prop[sj][si]; derDevSharedV2z(&wrk1,s_w[sj],si); wZtmp = wZtmp + wrk1*s_prop[sj][si]; //adding the viscous dissipation part ui*(mu * d2duidz2 + dmudz * siz) eZtmp = eZtmp + s_u[sj][si]*uZtmp + s_v[sj][si]*vZtmp + s_w[sj][si]*wZtmp; //dilation derivatives derDevSharedV1z(&wrk2,s_dil[sj],si); wZtmp = wZtmp + s_prop[sj][si]*wrk2/3.0; eZtmp = eZtmp + s_prop[sj][si]*wrk2/3.0*s_w[sj][si]; // pressure derivatives s_dil[sj][si] = p[id.g]; __syncthreads(); if (id.k < stencilSize) { perBCz(s_dil[sj],si); } __syncthreads(); derDevShared1z(&wrk1,s_dil[sj],si); wZtmp = wZtmp - wrk1; // fourier terms s_prop[sj][si] = lam[id.g]; s_dil[sj][si] = t[id.g]; __syncthreads(); if (id.k < stencilSize) { perBCz(s_dil[sj],si); perBCz(s_prop[sj],si); } __syncthreads(); derDevSharedV2z(&wrk1,s_dil[sj],si); eZtmp = eZtmp + wrk1*s_prop[sj][si]; derDevSharedV1z(&wrk2,s_prop[sj],si); //wrk2 = d (lam) dz derDevSharedV1z(&wrk1,s_dil[sj],si); //wrk1 = d (t) dz eZtmp = eZtmp + wrk1*wrk2; //Adding here the terms - d (ru phi) dz; s_prop[sj][si] = r[id.g]; s_dil[sj][si] = h[id.g]; __syncthreads(); if (id.k < stencilSize) { perBCz(s_dil[sj],si); perBCz(s_prop[sj],si); } __syncthreads(); fluxQuadSharedz(&wrk1,s_prop[sj],s_w[sj],si); rZtmp = wrk1; __syncthreads(); fluxCubeSharedz(&wrk1,s_prop[sj],s_w[sj],s_u[sj],si); uZtmp = uZtmp + wrk1; __syncthreads(); fluxCubeSharedz(&wrk1,s_prop[sj],s_w[sj],s_v[sj],si); vZtmp = vZtmp + wrk1; __syncthreads(); fluxCubeSharedz(&wrk1,s_prop[sj],s_w[sj],s_w[sj],si); wZtmp = wZtmp + wrk1; __syncthreads(); fluxCubeSharedz(&wrk1,s_prop[sj],s_w[sj],s_dil[sj],si); eZtmp = eZtmp + wrk1; __syncthreads(); #if useStreams rZ[id.g] = rZtmp; uZ[id.g] = uZtmp; vZ[id.g] = vZtmp; wZ[id.g] = wZtmp + *dpdz; eZ[id.g] = eZtmp + *dpdz*s_w[sj][si] ; #else rZ[id.g] += rZtmp; uZ[id.g] += uZtmp; vZ[id.g] += vZtmp; wZ[id.g] += wZtmp + *dpdz; eZ[id.g] += eZtmp + *dpdz*s_w[sj][si] ; #endif __syncthreads(); }
23dbec8712d9652f786d4f09cd48212e56fbd37c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <sys/time.h> #include <stdlib.h> #include <unistd.h> #include "Brian.v1.h" #include "Brian.v0.h" //On=1,Off=0,Dying=2 int SIZE, ITERATIONS, ANIMATE, BLOCKS, THREADS, SEED, UNOPTIMIZED, PRINT, live_cells, dead_cells, dying_cells; void print_board(int board[], int size, int iteration) { if (iteration != -1) { printf("Iteration %d\n", iteration); } for (int i = 0;i < size; i++) { for (int j = 0; j < size; j++) { if (board[i * size + j] == 1)//if it is alive { printf("\u25A3 "); live_cells++; } else if(board[i * size +j] == 0)//if it is dead { printf("\u25A2 "); dead_cells++; } else if(board[i * size +j] == 2)//if it is dying { printf("\u25A7 "); dying_cells++; } } printf("\n"); } printf("Live Cells =%d ,Dead Cells =%d Dying Cells =%d\n\n",live_cells, dead_cells, dying_cells); live_cells = 0; dead_cells = 0; dying_cells = 0; } void arg_parse(int argc, char *argv[]) { int i = 1; char c; while(i < argc) { sscanf(argv[i++], "%c", &c); if (c == 's')//matrix size { sscanf(argv[i++], "%d", &SIZE); } if (c == 'a')//animation or not { ANIMATE = 1; printf("fu"); } if (c == 'i')//iterations { sscanf(argv[i++], "%d", &ITERATIONS); } if (c == 'b')//number of blocks { sscanf(argv[i++], "%d", &BLOCKS); } if (c == 't')//number of threads { sscanf(argv[i++], "%d", &THREADS); } if (c == 'e')//random seed(?) { sscanf(argv[i++], "%d", &SEED); } if (c == 'u')//version using global memory { UNOPTIMIZED = 1; } if (c == 'p')//print board { sscanf(argv[i++], "%d", &PRINT); } } } int run() { // run arguments int animate = ANIMATE != -1 ? ANIMATE : false; // variable for animation--default False int size = SIZE ? SIZE : 64;//matrix size--default 64 int iterations = ITERATIONS ? ITERATIONS : 6;//generations--default 6 int no_blocks = BLOCKS ? BLOCKS : size; //number of blocks--default 64 int no_threads = THREADS ? THREADS : size;//number of thread--default 64 int unoptimized_run = UNOPTIMIZED ? UNOPTIMIZED : 0;//variable for version--default optimized int print = PRINT != -1 ? PRINT : true; // Initialize random seed srand(SEED != -1 ? SEED : time(NULL)); // host(cpu) memory int *input = (int*)calloc(size * size, sizeof(int));//matrix for production-initialisation int *output = (int*)calloc(size * size, sizeof(int));//the matrix we print int *devin, *devout, *devtemp;//matrix of gpu // device(gpu) memory hipMalloc((void**)&devin, size * size * sizeof(int));//matrix for production-initialisation hipMalloc((void**)&devout, size * size * sizeof(int));//the matrix we print hipMalloc((void**)&devtemp, size * size * sizeof(int));//matrix of next generation // production and initialisation of the universe for (int i = 0;i < size; i++) { for (int j = 0; j < size; j++) { input[i*size + j] = rand() % 3;// a number from 0,2 } } if (print) print_board(input, size, 0); // initial matrix migration from cpu to gpu hipMemcpy(devin, input, size * size * sizeof(int), hipMemcpyHostToDevice); //the matrix we print hipMemcpy(devout, output, size * size * sizeof(int), hipMemcpyHostToDevice); //used when the size of shared memory is unknown during the compile //dynamic memory allocation in shared memory //it is used only from version 2 //containts threads data of a block int shared_board_size = (no_threads + 2 * size) * sizeof(int); // timer start struct timeval tv1, tv2; gettimeofday(&tv1, NULL); // choose version // 1st version with global memmory if (unoptimized_run) { for (int i = 0;i<iterations;i++) { if (i == 0) { //start calculations with first production-initialisation matrix hipLaunchKernelGGL(( play), dim3(no_blocks),dim3(no_threads), 0, 0, devin, devout); } else { //continue calculations with next generation matrix hipLaunchKernelGGL(( play), dim3(no_blocks),dim3(no_threads), 0, 0, devtemp, devout); } //migration of next generation matrix to output matrix of gpu(inside og gpu) hipMemcpy(devtemp, devout, size * size * sizeof(int), hipMemcpyDeviceToDevice); //migration of output matrix from gpu to cpu hipMemcpy(output, devout, size * size * sizeof(int), hipMemcpyDeviceToHost); //print results if (animate == true) { system("clear"); print_board(output, size, i); usleep(100000); } } printf("Unoptimized run\n"); } //2nd version with shared memmory,uses a 3rd matrix for calculations else { for (int i = 0;i<iterations;i++) { if (i == 0) { //start calculations with first production-initialisation matrix hipLaunchKernelGGL(( play_with_shared_memory), dim3(no_blocks),dim3(no_threads),shared_board_size, 0, devin, devout, size); } else { //continue calculations with next generation matrix hipLaunchKernelGGL(( play_with_shared_memory), dim3(no_blocks),dim3(no_threads),shared_board_size, 0, devtemp, devout, size); } //migration of next generation matrix to output matrix of gpu(inside og gpu) hipMemcpy(devtemp, devout, size * size * sizeof(int), hipMemcpyDeviceToDevice); //migration of output matrix from gpu to cpu hipMemcpy(output, devout, size * size * sizeof(int), hipMemcpyDeviceToHost); //print results if (animate == true) { system("clear"); print_board(output, size, i); usleep(100000); } } } // migration of result from gpu to cpu hipMemcpy(output, devout, size * size * sizeof(int), hipMemcpyDeviceToHost); if (print) print_board(output, size, iterations); // calculate the run time gettimeofday(&tv2, NULL); printf ("Total time in kernel = %f seconds\n",(double) (tv2.tv_usec - tv1.tv_usec) / 1000000 + (double) (tv2.tv_sec - tv1.tv_sec)); // Free device memory hipFree(devin); hipFree(devout); hipFree(devtemp); return 0; } int main(int argc, char* argv[]) { SIZE = 0, ITERATIONS = 0, ANIMATE = -1, BLOCKS = 0, THREADS = 0, UNOPTIMIZED = 0, SEED = -1, PRINT = -1; arg_parse(argc, argv); run(); return 0; }
23dbec8712d9652f786d4f09cd48212e56fbd37c.cu
#include <stdio.h> #include <sys/time.h> #include <stdlib.h> #include <unistd.h> #include "Brian.v1.h" #include "Brian.v0.h" //On=1,Off=0,Dying=2 int SIZE, ITERATIONS, ANIMATE, BLOCKS, THREADS, SEED, UNOPTIMIZED, PRINT, live_cells, dead_cells, dying_cells; void print_board(int board[], int size, int iteration) { if (iteration != -1) { printf("Iteration %d\n", iteration); } for (int i = 0;i < size; i++) { for (int j = 0; j < size; j++) { if (board[i * size + j] == 1)//if it is alive { printf("\u25A3 "); live_cells++; } else if(board[i * size +j] == 0)//if it is dead { printf("\u25A2 "); dead_cells++; } else if(board[i * size +j] == 2)//if it is dying { printf("\u25A7 "); dying_cells++; } } printf("\n"); } printf("Live Cells =%d ,Dead Cells =%d Dying Cells =%d\n\n",live_cells, dead_cells, dying_cells); live_cells = 0; dead_cells = 0; dying_cells = 0; } void arg_parse(int argc, char *argv[]) { int i = 1; char c; while(i < argc) { sscanf(argv[i++], "%c", &c); if (c == 's')//matrix size { sscanf(argv[i++], "%d", &SIZE); } if (c == 'a')//animation or not { ANIMATE = 1; printf("fu"); } if (c == 'i')//iterations { sscanf(argv[i++], "%d", &ITERATIONS); } if (c == 'b')//number of blocks { sscanf(argv[i++], "%d", &BLOCKS); } if (c == 't')//number of threads { sscanf(argv[i++], "%d", &THREADS); } if (c == 'e')//random seed(?) { sscanf(argv[i++], "%d", &SEED); } if (c == 'u')//version using global memory { UNOPTIMIZED = 1; } if (c == 'p')//print board { sscanf(argv[i++], "%d", &PRINT); } } } int run() { // run arguments int animate = ANIMATE != -1 ? ANIMATE : false; // variable for animation--default False int size = SIZE ? SIZE : 64;//matrix size--default 64 int iterations = ITERATIONS ? ITERATIONS : 6;//generations--default 6 int no_blocks = BLOCKS ? BLOCKS : size; //number of blocks--default 64 int no_threads = THREADS ? THREADS : size;//number of thread--default 64 int unoptimized_run = UNOPTIMIZED ? UNOPTIMIZED : 0;//variable for version--default optimized int print = PRINT != -1 ? PRINT : true; // Initialize random seed srand(SEED != -1 ? SEED : time(NULL)); // host(cpu) memory int *input = (int*)calloc(size * size, sizeof(int));//matrix for production-initialisation int *output = (int*)calloc(size * size, sizeof(int));//the matrix we print int *devin, *devout, *devtemp;//matrix of gpu // device(gpu) memory cudaMalloc((void**)&devin, size * size * sizeof(int));//matrix for production-initialisation cudaMalloc((void**)&devout, size * size * sizeof(int));//the matrix we print cudaMalloc((void**)&devtemp, size * size * sizeof(int));//matrix of next generation // production and initialisation of the universe for (int i = 0;i < size; i++) { for (int j = 0; j < size; j++) { input[i*size + j] = rand() % 3;// a number from 0,2 } } if (print) print_board(input, size, 0); // initial matrix migration from cpu to gpu cudaMemcpy(devin, input, size * size * sizeof(int), cudaMemcpyHostToDevice); //the matrix we print cudaMemcpy(devout, output, size * size * sizeof(int), cudaMemcpyHostToDevice); //used when the size of shared memory is unknown during the compile //dynamic memory allocation in shared memory //it is used only from version 2 //containts threads data of a block int shared_board_size = (no_threads + 2 * size) * sizeof(int); // timer start struct timeval tv1, tv2; gettimeofday(&tv1, NULL); // choose version // 1st version with global memmory if (unoptimized_run) { for (int i = 0;i<iterations;i++) { if (i == 0) { //start calculations with first production-initialisation matrix play<<<no_blocks,no_threads>>>(devin, devout); } else { //continue calculations with next generation matrix play<<<no_blocks,no_threads>>>(devtemp, devout); } //migration of next generation matrix to output matrix of gpu(inside og gpu) cudaMemcpy(devtemp, devout, size * size * sizeof(int), cudaMemcpyDeviceToDevice); //migration of output matrix from gpu to cpu cudaMemcpy(output, devout, size * size * sizeof(int), cudaMemcpyDeviceToHost); //print results if (animate == true) { system("clear"); print_board(output, size, i); usleep(100000); } } printf("Unoptimized run\n"); } //2nd version with shared memmory,uses a 3rd matrix for calculations else { for (int i = 0;i<iterations;i++) { if (i == 0) { //start calculations with first production-initialisation matrix play_with_shared_memory<<<no_blocks,no_threads,shared_board_size>>>(devin, devout, size); } else { //continue calculations with next generation matrix play_with_shared_memory<<<no_blocks,no_threads,shared_board_size>>>(devtemp, devout, size); } //migration of next generation matrix to output matrix of gpu(inside og gpu) cudaMemcpy(devtemp, devout, size * size * sizeof(int), cudaMemcpyDeviceToDevice); //migration of output matrix from gpu to cpu cudaMemcpy(output, devout, size * size * sizeof(int), cudaMemcpyDeviceToHost); //print results if (animate == true) { system("clear"); print_board(output, size, i); usleep(100000); } } } // migration of result from gpu to cpu cudaMemcpy(output, devout, size * size * sizeof(int), cudaMemcpyDeviceToHost); if (print) print_board(output, size, iterations); // calculate the run time gettimeofday(&tv2, NULL); printf ("Total time in kernel = %f seconds\n",(double) (tv2.tv_usec - tv1.tv_usec) / 1000000 + (double) (tv2.tv_sec - tv1.tv_sec)); // Free device memory cudaFree(devin); cudaFree(devout); cudaFree(devtemp); return 0; } int main(int argc, char* argv[]) { SIZE = 0, ITERATIONS = 0, ANIMATE = -1, BLOCKS = 0, THREADS = 0, UNOPTIMIZED = 0, SEED = -1, PRINT = -1; arg_parse(argc, argv); run(); return 0; }
7753b0b1d2963d00cacfd260c002abd10616b988.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "smallsieve.h" #include <stdio.h> #include <string.h> #include <stdlib.h> #include <math.h> #include <time.h> typedef unsigned __int64 uint64; typedef unsigned __int32 uint32; ///////////////////////////KERNELS START/////////////////////////// //Checks if x is prime; if bit corresponding to x is 0, then return true. __device__ bool isPrime(uint64 *mark, uint64 x) { return (mark[x / 128] & ((uint64)1 << ((x >> 1) & 63))) ? 0 : 1; } //Set the bit corresponding to x __device__ void bitSet(uint64 *mark, uint64 x) { mark[x / 128] |= ((uint64)1 << ((x >> 1) & 63)); } __global__ void SieveBlock(uint32 *P, uint64 *mark, uint64 m, uint32 completed, uint32 plen, uint32 segsize, bool print) { //Each thread sieves [(pow2_32 >> 1) / (threads*blocks)] elements of the current block uint64 id, i, j, minb, min, max, prime; uint64 global_min = completed * m + 1; id = threadIdx.x; min = global_min + (id * segsize); max = min + segsize - 2; //printf("Kernel %llu handles %11llu->%11llu\n", id, min, max);//works correctly prime = P[0]; for (i = 1;(prime*prime <= max) && (i < plen);i++) { minb = ((min / prime) * prime); if (minb < min) minb += prime; if (~minb & 1) minb += prime; for (j = minb;j <= max;j += (prime << 1)) bitSet(mark, j - global_min + 1); prime = P[i]; } //Print last found prime for last set of segments if (print) { for (j = max; j >= min; j -= 2) { if (isPrime(mark, j - global_min + 1)) { printf("Kernel %llu: %llu|%llu\n", id, j, max); break; } } } } ////////////////////////////KERNELS END//////////////////////////// // SEGMENTED BOOLSIEVE // n RAM Time // E07 552KB 0.026s // E08 620KB 0.206s // E09 704KB 1.895s // E10 668KB 20.02s // E11 904KB 205.2s //CUDA SEGMENTED BITSIEVE // n Time // E06 0.087s // E07 0.598s // E08 4.119s // E09 32.34s //Stats logged via Visual Studio Performance Profiler on i7 4790K @4.00GHz w/ 16GB DDR3 RAM and GTX 1070Ti //Driver function int main(uint32 argc, char* argv[]) { //Range: Data-type dependent uint64 n, m; printf("Enter n: "); scanf("%llu", &n); bound(n, m); uint32 threadCount = threadCalc(m); uint32 segsize = m / threadCount; uint32 plen = 0; uint32 *P = NULL; P = segboolsieve(n, m, plen); if (P == NULL) { printf("Memory Allocation Failure!\n"); exit(0); } else printf("Last prime in utility sieve: %u @ index [%u]\n", P[plen - 1], plen - 1); uint32 segments = (uint32)((n + 1) / (m + 1)); //No. of segments uint32 completed = 1; printf("\n%u segments(s) for [%llu->%llu]\n", segments - 1, m + 1, n); bool print = false; uint32 *dP; uint64 *dmark; uint64 *mark = (uint64 *)calloc((m / 128), sizeof(uint64)); //Log execution time float GPU_TIME = 0.0; float temp_t; //CUDA Malloc hipMalloc(&dP, (plen + 1) * (size)); hipMalloc(&dmark, ((m / 128) * size)); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); dim3 TPB(threadCount, 1, 1); hipMemcpy(dP, P, plen * size, hipMemcpyHostToDevice); while (completed < segments) { if (completed + 1 == segments) print = true; hipMemcpy(dmark, mark, (m / 128) * size, hipMemcpyHostToDevice); hipEventRecord(start); SieveBlock << <1, TPB >> > (dP, dmark, m, completed, plen, segsize, print); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&temp_t, start, stop); GPU_TIME += temp_t; completed++; } free(P); free(mark); hipFree(dP); hipFree(dmark); GPU_TIME /= 1000; printf("COMPUTE-PHASE GPU Time: %0.3f seconds\n", GPU_TIME); return 0; }
7753b0b1d2963d00cacfd260c002abd10616b988.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "smallsieve.h" #include <stdio.h> #include <string.h> #include <stdlib.h> #include <math.h> #include <time.h> typedef unsigned __int64 uint64; typedef unsigned __int32 uint32; ///////////////////////////KERNELS START/////////////////////////// //Checks if x is prime; if bit corresponding to x is 0, then return true. __device__ bool isPrime(uint64 *mark, uint64 x) { return (mark[x / 128] & ((uint64)1 << ((x >> 1) & 63))) ? 0 : 1; } //Set the bit corresponding to x __device__ void bitSet(uint64 *mark, uint64 x) { mark[x / 128] |= ((uint64)1 << ((x >> 1) & 63)); } __global__ void SieveBlock(uint32 *P, uint64 *mark, uint64 m, uint32 completed, uint32 plen, uint32 segsize, bool print) { //Each thread sieves [(pow2_32 >> 1) / (threads*blocks)] elements of the current block uint64 id, i, j, minb, min, max, prime; uint64 global_min = completed * m + 1; id = threadIdx.x; min = global_min + (id * segsize); max = min + segsize - 2; //printf("Kernel %llu handles %11llu->%11llu\n", id, min, max);//works correctly prime = P[0]; for (i = 1;(prime*prime <= max) && (i < plen);i++) { minb = ((min / prime) * prime); if (minb < min) minb += prime; if (~minb & 1) minb += prime; for (j = minb;j <= max;j += (prime << 1)) bitSet(mark, j - global_min + 1); prime = P[i]; } //Print last found prime for last set of segments if (print) { for (j = max; j >= min; j -= 2) { if (isPrime(mark, j - global_min + 1)) { printf("Kernel %llu: %llu|%llu\n", id, j, max); break; } } } } ////////////////////////////KERNELS END//////////////////////////// // SEGMENTED BOOLSIEVE // n RAM Time // E07 552KB 0.026s // E08 620KB 0.206s // E09 704KB 1.895s // E10 668KB 20.02s // E11 904KB 205.2s //CUDA SEGMENTED BITSIEVE // n Time // E06 0.087s // E07 0.598s // E08 4.119s // E09 32.34s //Stats logged via Visual Studio Performance Profiler on i7 4790K @4.00GHz w/ 16GB DDR3 RAM and GTX 1070Ti //Driver function int main(uint32 argc, char* argv[]) { //Range: Data-type dependent uint64 n, m; printf("Enter n: "); scanf("%llu", &n); bound(n, m); uint32 threadCount = threadCalc(m); uint32 segsize = m / threadCount; uint32 plen = 0; uint32 *P = NULL; P = segboolsieve(n, m, plen); if (P == NULL) { printf("Memory Allocation Failure!\n"); exit(0); } else printf("Last prime in utility sieve: %u @ index [%u]\n", P[plen - 1], plen - 1); uint32 segments = (uint32)((n + 1) / (m + 1)); //No. of segments uint32 completed = 1; printf("\n%u segments(s) for [%llu->%llu]\n", segments - 1, m + 1, n); bool print = false; uint32 *dP; uint64 *dmark; uint64 *mark = (uint64 *)calloc((m / 128), sizeof(uint64)); //Log execution time float GPU_TIME = 0.0; float temp_t; //CUDA Malloc cudaMalloc(&dP, (plen + 1) * (size)); cudaMalloc(&dmark, ((m / 128) * size)); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); dim3 TPB(threadCount, 1, 1); cudaMemcpy(dP, P, plen * size, cudaMemcpyHostToDevice); while (completed < segments) { if (completed + 1 == segments) print = true; cudaMemcpy(dmark, mark, (m / 128) * size, cudaMemcpyHostToDevice); cudaEventRecord(start); SieveBlock << <1, TPB >> > (dP, dmark, m, completed, plen, segsize, print); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&temp_t, start, stop); GPU_TIME += temp_t; completed++; } free(P); free(mark); cudaFree(dP); cudaFree(dmark); GPU_TIME /= 1000; printf("COMPUTE-PHASE GPU Time: %0.3f seconds\n", GPU_TIME); return 0; }
9940817ab4c3ff3838f9805fc10a9e482e4dec1e.hip
// !!! This is a file automatically generated by hipify!!! /** * \file dnn/src/cuda/type_cvt/kern.cu * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2020 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ #include "./kern.cuh" #include "megdnn/dtype.h" #include "src/cuda/elemwise_helper.cuh" using namespace megdnn; using namespace cuda; using namespace elemwise_intl; namespace { template <typename ctype_dest, typename ctype_src, typename enable = void> struct TypeCvtOp { ctype_dest* dest; __device__ __forceinline__ void operator()(uint32_t idx, ctype_src src) { dest[idx] = static_cast<ctype_dest>(src); } }; template <typename ctype_dest, typename ctype_src, typename enable = void> struct TypeCvtOpToQuantized { ctype_dest* dest; CudaDTypeParam<ctype_dest> param; __device__ __forceinline__ void operator()(uint32_t idx, ctype_src src) { dest[idx] = param.quantize(src); } }; template <typename ctype_dest, typename ctype_src, typename enable = void> struct TypeCvtOpFromQuantized { ctype_dest* dest; CudaDTypeParam<ctype_src> param; __device__ __forceinline__ void operator()(uint32_t idx, ctype_src src) { dest[idx] = static_cast<ctype_dest>(param.dequantize(src)); } }; template <typename ctype_dest, typename ctype_src, typename enable = void> struct TypeCvtOpBetweenQuantized { ctype_dest* dest; CudaDTypeParam<ctype_src> src_param; CudaDTypeParam<ctype_dest> dst_param; __device__ __forceinline__ void operator()(uint32_t idx, ctype_src src) { dest[idx] = dst_param.quantize(src_param.dequantize(src)); } }; template <typename ctype_dest, typename ctype_src> struct TypeCvtOp<ctype_dest, ctype_src, typename std::enable_if< std::is_same<ctype_src, dt_int8>::value || std::is_same<ctype_src, dt_uint8>::value>::type> { ctype_dest* dest; using src_vect_type = typename VectTypeTrait<ctype_src>::vect_type; using dst_vect_type = typename VectTypeTrait<ctype_dest>::vect_type; __device__ __forceinline__ void operator()(uint32_t idx, ctype_src src) { dest[idx] = static_cast<ctype_dest>(src); } __device__ __forceinline__ void operator()(uint32_t idx, src_vect_type src) { ctype_dest x = static_cast<ctype_dest>(src.x); ctype_dest y = static_cast<ctype_dest>(src.y); ctype_dest z = static_cast<ctype_dest>(src.z); ctype_dest w = static_cast<ctype_dest>(src.w); *(dst_vect_type*)(&dest[idx]) = VectTypeTrait<ctype_dest>::make_vector(x, y, z, w); } }; template <typename ctype_dest, typename ctype_src> struct TypeCvtOpToQuantized< ctype_dest, ctype_src, typename std::enable_if< std::is_same<ctype_src, dt_int8>::value || std::is_same<ctype_src, dt_uint8>::value>::type> { ctype_dest* dest; CudaDTypeParam<ctype_dest> param; using src_vect_type = typename VectTypeTrait<ctype_src>::vect_type; using dst_vect_type = typename VectTypeTrait<ctype_dest>::vect_type; __device__ __forceinline__ void operator()(uint32_t idx, ctype_src src) { dest[idx] = param.quantize(src); } __device__ __forceinline__ void operator()(uint32_t idx, src_vect_type src) { ctype_dest x = param.quantize(src.x); ctype_dest y = param.quantize(src.y); ctype_dest z = param.quantize(src.z); ctype_dest w = param.quantize(src.w); *(dst_vect_type*)(&dest[idx]) = VectTypeTrait<ctype_dest>::make_vector(x, y, z, w); } }; template <typename ctype_dest, typename ctype_src> struct TypeCvtOpFromQuantized< ctype_dest, ctype_src, typename std::enable_if< std::is_same<ctype_src, dt_qint8>::value || std::is_same<ctype_src, dt_quint8>::value>::type> { ctype_dest* dest; CudaDTypeParam<ctype_src> param; using src_vect_type = typename VectTypeTrait<ctype_src>::vect_type; using dst_vect_type = typename VectTypeTrait<ctype_dest>::vect_type; __device__ __forceinline__ void operator()(uint32_t idx, ctype_src src) { dest[idx] = static_cast<ctype_dest>(param.dequantize(src)); } __device__ __forceinline__ void operator()(uint32_t idx, src_vect_type src) { ctype_dest x = static_cast<ctype_dest>(param.dequantize(ctype_src(src.x))); ctype_dest y = static_cast<ctype_dest>(param.dequantize(ctype_src(src.y))); ctype_dest z = static_cast<ctype_dest>(param.dequantize(ctype_src(src.z))); ctype_dest w = static_cast<ctype_dest>(param.dequantize(ctype_src(src.w))); *(dst_vect_type*)(&dest[idx]) = VectTypeTrait<ctype_dest>::make_vector(x, y, z, w); } }; template <typename ctype_dest, typename ctype_src> struct TypeCvtOpBetweenQuantized< ctype_dest, ctype_src, typename std::enable_if< std::is_same<ctype_src, dt_qint8>::value || std::is_same<ctype_src, dt_quint8>::value>::type> { ctype_dest* dest; CudaDTypeParam<ctype_src> src_param; CudaDTypeParam<ctype_dest> dst_param; using src_vect_type = typename VectTypeTrait<ctype_src>::vect_type; using dst_vect_type = typename VectTypeTrait<ctype_dest>::vect_type; __device__ __forceinline__ ctype_dest apply(ctype_src in) { float inter = src_param.dequantize(in); return dst_param.quantize(inter); } __device__ __forceinline__ void operator()(uint32_t idx, ctype_src src) { dest[idx] = dst_param.quantize(src_param.dequantize(src)); } __device__ __forceinline__ void operator()(uint32_t idx, src_vect_type src) { ctype_dest x = apply(ctype_src(src.x)); ctype_dest y = apply(ctype_src(src.y)); ctype_dest z = apply(ctype_src(src.z)); ctype_dest w = apply(ctype_src(src.w)); *(dst_vect_type*)(&dest[idx]) = VectTypeTrait<ctype_dest>::make_vector(x, y, z, w); } }; } // anonymous namespace #define main_func(OpType, body) \ { \ typedef typename DTypeTrait<dtype_src>::ctype ctype_src; \ typedef typename DTypeTrait<dtype_dest>::ctype ctype_dest; \ typedef OpType<ctype_dest, ctype_src> Op; \ ElemwiseOpParamN<1> param; \ param[0] = src; \ param.init_from_given_tensor(); \ megdnn_assert(DTypeTrait<ctype_src>::enumv == \ src.layout.dtype.enumv().ev); \ megdnn_assert(DTypeTrait<ctype_dest>::enumv == \ dest.layout.dtype.enumv().ev); \ Op op; \ op.dest = dest.ptr<ctype_dest>(); \ body; \ return run_elemwise<Op, ctype_src, 1>(param, stream, op); \ } namespace megdnn { namespace cuda { template <typename dtype_src, typename dtype_dest> void typecvt_kern_q2q( const TensorND& dest, const TensorND& src, const CudaDTypeParam<dtype_src>& src_param, const CudaDTypeParam<dtype_dest>& dst_param, hipStream_t stream) { main_func(TypeCvtOpBetweenQuantized, op.dst_param = dst_param; op.src_param = src_param;) } template <typename dtype_src, typename dtype_dest> void typecvt_kern_n2q( const TensorND& dest, const TensorND& src, const CudaDTypeParam<dtype_dest>& dst_param, hipStream_t stream) { main_func(TypeCvtOpToQuantized, op.param = dst_param;); } template <typename dtype_src, typename dtype_dest> void typecvt_kern_q2n( const TensorND& dest, const TensorND& src, const CudaDTypeParam<dtype_src>& src_param, hipStream_t stream) { main_func(TypeCvtOpFromQuantized, op.param = src_param;); } template <typename dtype_src, typename dtype_dest> void typecvt_kern_n2n(const TensorND& dest, const TensorND& src, hipStream_t stream) { main_func(TypeCvtOp, ); } #define INST_Q2Q(dtype_src, dtype_dest) \ template void typecvt_kern_q2q<dtype_src, dtype_dest>( \ const TensorND& dest, const TensorND& src, \ const CudaDTypeParam<dtype_src>& src_param, \ const CudaDTypeParam<dtype_dest>& dst_param, hipStream_t stream); #define INST_Q2N(dtype_src, dtype_dest) \ template void typecvt_kern_q2n<dtype_src, dtype_dest>( \ const TensorND& dest, const TensorND& src, \ const CudaDTypeParam<dtype_src>& src_param, hipStream_t stream); #define INST_N2Q(dtype_src, dtype_dest) \ template void typecvt_kern_n2q<dtype_src, dtype_dest>( \ const TensorND& dest, const TensorND& src, \ const CudaDTypeParam<dtype_dest>& dst_param, hipStream_t stream); #define INST_N2N(dtype_src, dtype_dest) \ template void typecvt_kern_n2n<dtype_src, dtype_dest>( \ const TensorND& dest, const TensorND& src, hipStream_t stream); #define MEGDNN_FOREACH_COMPUTING_DTYPE_WITH_DTYPE_SRC(dtype_src, cb) \ cb(dtype_src, dt_int8) \ cb(dtype_src, dt_int32) \ cb(dtype_src, dt_int16) \ cb(dtype_src, dt_uint8) \ cb(dtype_src, dt_float32) \ cb(dtype_src, dt_float16) \ cb(dtype_src, dt_bfloat16) \ #define MEGDNN_FOREACH_QUANTIZED_DTYPE_WITH_DTYPE_SRC(dtype_src, cb) \ cb(dtype_src, dt_quint8) \ cb(dtype_src, dt_qint32) \ cb(dtype_src, dt_qint8) \ #define INST_SRC_QUANTIZED(dtype_src) \ MEGDNN_FOREACH_COMPUTING_DTYPE_WITH_DTYPE_SRC(dtype_src, INST_Q2N) \ MEGDNN_FOREACH_QUANTIZED_DTYPE_WITH_DTYPE_SRC(dtype_src, INST_Q2Q) \ #define INST_SRC_NORMAL(dtype_src) \ MEGDNN_FOREACH_COMPUTING_DTYPE_WITH_DTYPE_SRC(dtype_src, INST_N2N) \ MEGDNN_FOREACH_QUANTIZED_DTYPE_WITH_DTYPE_SRC(dtype_src, INST_N2Q) \ #define MEGDNN_FOREACH_COMPUTING_CTYPE(cb) \ cb(dt_int8) \ cb(dt_int32) \ cb(dt_int16) \ cb(dt_uint8) \ cb(dt_float32) \ cb(dt_float16) \ cb(dt_bfloat16) \ #define MEGDNN_FOREACH_QUANTIZED_CTYPE(cb) \ cb(dt_quint8) \ cb(dt_qint32) \ cb(dt_qint8) MEGDNN_FOREACH_QUANTIZED_CTYPE(INST_SRC_QUANTIZED) MEGDNN_FOREACH_COMPUTING_CTYPE(INST_SRC_NORMAL) template void typecvt_kern_n2q<dtype::Int8, dtype::QuantizedS8>( const TensorND& src, const TensorND& dst, const CudaDTypeParam<dt_qint8>& param, hipStream_t stream); } // namespace cuda } // namespace megdnn // vim: syntax=cpp.doxygen
9940817ab4c3ff3838f9805fc10a9e482e4dec1e.cu
/** * \file dnn/src/cuda/type_cvt/kern.cu * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2020 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ #include "./kern.cuh" #include "megdnn/dtype.h" #include "src/cuda/elemwise_helper.cuh" using namespace megdnn; using namespace cuda; using namespace elemwise_intl; namespace { template <typename ctype_dest, typename ctype_src, typename enable = void> struct TypeCvtOp { ctype_dest* dest; __device__ __forceinline__ void operator()(uint32_t idx, ctype_src src) { dest[idx] = static_cast<ctype_dest>(src); } }; template <typename ctype_dest, typename ctype_src, typename enable = void> struct TypeCvtOpToQuantized { ctype_dest* dest; CudaDTypeParam<ctype_dest> param; __device__ __forceinline__ void operator()(uint32_t idx, ctype_src src) { dest[idx] = param.quantize(src); } }; template <typename ctype_dest, typename ctype_src, typename enable = void> struct TypeCvtOpFromQuantized { ctype_dest* dest; CudaDTypeParam<ctype_src> param; __device__ __forceinline__ void operator()(uint32_t idx, ctype_src src) { dest[idx] = static_cast<ctype_dest>(param.dequantize(src)); } }; template <typename ctype_dest, typename ctype_src, typename enable = void> struct TypeCvtOpBetweenQuantized { ctype_dest* dest; CudaDTypeParam<ctype_src> src_param; CudaDTypeParam<ctype_dest> dst_param; __device__ __forceinline__ void operator()(uint32_t idx, ctype_src src) { dest[idx] = dst_param.quantize(src_param.dequantize(src)); } }; template <typename ctype_dest, typename ctype_src> struct TypeCvtOp<ctype_dest, ctype_src, typename std::enable_if< std::is_same<ctype_src, dt_int8>::value || std::is_same<ctype_src, dt_uint8>::value>::type> { ctype_dest* dest; using src_vect_type = typename VectTypeTrait<ctype_src>::vect_type; using dst_vect_type = typename VectTypeTrait<ctype_dest>::vect_type; __device__ __forceinline__ void operator()(uint32_t idx, ctype_src src) { dest[idx] = static_cast<ctype_dest>(src); } __device__ __forceinline__ void operator()(uint32_t idx, src_vect_type src) { ctype_dest x = static_cast<ctype_dest>(src.x); ctype_dest y = static_cast<ctype_dest>(src.y); ctype_dest z = static_cast<ctype_dest>(src.z); ctype_dest w = static_cast<ctype_dest>(src.w); *(dst_vect_type*)(&dest[idx]) = VectTypeTrait<ctype_dest>::make_vector(x, y, z, w); } }; template <typename ctype_dest, typename ctype_src> struct TypeCvtOpToQuantized< ctype_dest, ctype_src, typename std::enable_if< std::is_same<ctype_src, dt_int8>::value || std::is_same<ctype_src, dt_uint8>::value>::type> { ctype_dest* dest; CudaDTypeParam<ctype_dest> param; using src_vect_type = typename VectTypeTrait<ctype_src>::vect_type; using dst_vect_type = typename VectTypeTrait<ctype_dest>::vect_type; __device__ __forceinline__ void operator()(uint32_t idx, ctype_src src) { dest[idx] = param.quantize(src); } __device__ __forceinline__ void operator()(uint32_t idx, src_vect_type src) { ctype_dest x = param.quantize(src.x); ctype_dest y = param.quantize(src.y); ctype_dest z = param.quantize(src.z); ctype_dest w = param.quantize(src.w); *(dst_vect_type*)(&dest[idx]) = VectTypeTrait<ctype_dest>::make_vector(x, y, z, w); } }; template <typename ctype_dest, typename ctype_src> struct TypeCvtOpFromQuantized< ctype_dest, ctype_src, typename std::enable_if< std::is_same<ctype_src, dt_qint8>::value || std::is_same<ctype_src, dt_quint8>::value>::type> { ctype_dest* dest; CudaDTypeParam<ctype_src> param; using src_vect_type = typename VectTypeTrait<ctype_src>::vect_type; using dst_vect_type = typename VectTypeTrait<ctype_dest>::vect_type; __device__ __forceinline__ void operator()(uint32_t idx, ctype_src src) { dest[idx] = static_cast<ctype_dest>(param.dequantize(src)); } __device__ __forceinline__ void operator()(uint32_t idx, src_vect_type src) { ctype_dest x = static_cast<ctype_dest>(param.dequantize(ctype_src(src.x))); ctype_dest y = static_cast<ctype_dest>(param.dequantize(ctype_src(src.y))); ctype_dest z = static_cast<ctype_dest>(param.dequantize(ctype_src(src.z))); ctype_dest w = static_cast<ctype_dest>(param.dequantize(ctype_src(src.w))); *(dst_vect_type*)(&dest[idx]) = VectTypeTrait<ctype_dest>::make_vector(x, y, z, w); } }; template <typename ctype_dest, typename ctype_src> struct TypeCvtOpBetweenQuantized< ctype_dest, ctype_src, typename std::enable_if< std::is_same<ctype_src, dt_qint8>::value || std::is_same<ctype_src, dt_quint8>::value>::type> { ctype_dest* dest; CudaDTypeParam<ctype_src> src_param; CudaDTypeParam<ctype_dest> dst_param; using src_vect_type = typename VectTypeTrait<ctype_src>::vect_type; using dst_vect_type = typename VectTypeTrait<ctype_dest>::vect_type; __device__ __forceinline__ ctype_dest apply(ctype_src in) { float inter = src_param.dequantize(in); return dst_param.quantize(inter); } __device__ __forceinline__ void operator()(uint32_t idx, ctype_src src) { dest[idx] = dst_param.quantize(src_param.dequantize(src)); } __device__ __forceinline__ void operator()(uint32_t idx, src_vect_type src) { ctype_dest x = apply(ctype_src(src.x)); ctype_dest y = apply(ctype_src(src.y)); ctype_dest z = apply(ctype_src(src.z)); ctype_dest w = apply(ctype_src(src.w)); *(dst_vect_type*)(&dest[idx]) = VectTypeTrait<ctype_dest>::make_vector(x, y, z, w); } }; } // anonymous namespace #define main_func(OpType, body) \ { \ typedef typename DTypeTrait<dtype_src>::ctype ctype_src; \ typedef typename DTypeTrait<dtype_dest>::ctype ctype_dest; \ typedef OpType<ctype_dest, ctype_src> Op; \ ElemwiseOpParamN<1> param; \ param[0] = src; \ param.init_from_given_tensor(); \ megdnn_assert(DTypeTrait<ctype_src>::enumv == \ src.layout.dtype.enumv().ev); \ megdnn_assert(DTypeTrait<ctype_dest>::enumv == \ dest.layout.dtype.enumv().ev); \ Op op; \ op.dest = dest.ptr<ctype_dest>(); \ body; \ return run_elemwise<Op, ctype_src, 1>(param, stream, op); \ } namespace megdnn { namespace cuda { template <typename dtype_src, typename dtype_dest> void typecvt_kern_q2q( const TensorND& dest, const TensorND& src, const CudaDTypeParam<dtype_src>& src_param, const CudaDTypeParam<dtype_dest>& dst_param, cudaStream_t stream) { main_func(TypeCvtOpBetweenQuantized, op.dst_param = dst_param; op.src_param = src_param;) } template <typename dtype_src, typename dtype_dest> void typecvt_kern_n2q( const TensorND& dest, const TensorND& src, const CudaDTypeParam<dtype_dest>& dst_param, cudaStream_t stream) { main_func(TypeCvtOpToQuantized, op.param = dst_param;); } template <typename dtype_src, typename dtype_dest> void typecvt_kern_q2n( const TensorND& dest, const TensorND& src, const CudaDTypeParam<dtype_src>& src_param, cudaStream_t stream) { main_func(TypeCvtOpFromQuantized, op.param = src_param;); } template <typename dtype_src, typename dtype_dest> void typecvt_kern_n2n(const TensorND& dest, const TensorND& src, cudaStream_t stream) { main_func(TypeCvtOp, ); } #define INST_Q2Q(dtype_src, dtype_dest) \ template void typecvt_kern_q2q<dtype_src, dtype_dest>( \ const TensorND& dest, const TensorND& src, \ const CudaDTypeParam<dtype_src>& src_param, \ const CudaDTypeParam<dtype_dest>& dst_param, cudaStream_t stream); #define INST_Q2N(dtype_src, dtype_dest) \ template void typecvt_kern_q2n<dtype_src, dtype_dest>( \ const TensorND& dest, const TensorND& src, \ const CudaDTypeParam<dtype_src>& src_param, cudaStream_t stream); #define INST_N2Q(dtype_src, dtype_dest) \ template void typecvt_kern_n2q<dtype_src, dtype_dest>( \ const TensorND& dest, const TensorND& src, \ const CudaDTypeParam<dtype_dest>& dst_param, cudaStream_t stream); #define INST_N2N(dtype_src, dtype_dest) \ template void typecvt_kern_n2n<dtype_src, dtype_dest>( \ const TensorND& dest, const TensorND& src, cudaStream_t stream); #define MEGDNN_FOREACH_COMPUTING_DTYPE_WITH_DTYPE_SRC(dtype_src, cb) \ cb(dtype_src, dt_int8) \ cb(dtype_src, dt_int32) \ cb(dtype_src, dt_int16) \ cb(dtype_src, dt_uint8) \ cb(dtype_src, dt_float32) \ cb(dtype_src, dt_float16) \ cb(dtype_src, dt_bfloat16) \ #define MEGDNN_FOREACH_QUANTIZED_DTYPE_WITH_DTYPE_SRC(dtype_src, cb) \ cb(dtype_src, dt_quint8) \ cb(dtype_src, dt_qint32) \ cb(dtype_src, dt_qint8) \ #define INST_SRC_QUANTIZED(dtype_src) \ MEGDNN_FOREACH_COMPUTING_DTYPE_WITH_DTYPE_SRC(dtype_src, INST_Q2N) \ MEGDNN_FOREACH_QUANTIZED_DTYPE_WITH_DTYPE_SRC(dtype_src, INST_Q2Q) \ #define INST_SRC_NORMAL(dtype_src) \ MEGDNN_FOREACH_COMPUTING_DTYPE_WITH_DTYPE_SRC(dtype_src, INST_N2N) \ MEGDNN_FOREACH_QUANTIZED_DTYPE_WITH_DTYPE_SRC(dtype_src, INST_N2Q) \ #define MEGDNN_FOREACH_COMPUTING_CTYPE(cb) \ cb(dt_int8) \ cb(dt_int32) \ cb(dt_int16) \ cb(dt_uint8) \ cb(dt_float32) \ cb(dt_float16) \ cb(dt_bfloat16) \ #define MEGDNN_FOREACH_QUANTIZED_CTYPE(cb) \ cb(dt_quint8) \ cb(dt_qint32) \ cb(dt_qint8) MEGDNN_FOREACH_QUANTIZED_CTYPE(INST_SRC_QUANTIZED) MEGDNN_FOREACH_COMPUTING_CTYPE(INST_SRC_NORMAL) template void typecvt_kern_n2q<dtype::Int8, dtype::QuantizedS8>( const TensorND& src, const TensorND& dst, const CudaDTypeParam<dt_qint8>& param, cudaStream_t stream); } // namespace cuda } // namespace megdnn // vim: syntax=cpp.doxygen
e662be4b885a02b09e632cb08c577b4b2299a07c.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> //Device Code __global__ void Criba(int* Nums, int* Prims,int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; int k = 0,j=0,x=0,p,N=100; if(i < tam ) { for(j=2;j<=N;j++){ if(Nums[j] != 1 || j == 2) { Prims[x]=j; printf("El nmero %d es primo",j); for(p=2;(p*j)<=n;p++){ Nums[(p*j)]=1; } x++; } } printf("Hay %d nmeros primos",x); } } int main() { int tam = 1000, i = 0, j = 0; size_t size = tam * tam * sizeof(float); // Allocate input vectors h_A and h_B in host memory float* h_A = (float*) malloc (size); float* h_B = (float*) malloc (size); float* h_C = (float*) malloc (size); //Initialize input Vectors for(i=0; i < tam; i++) { for(j=0; j < tam; j++) { srand(time(NULL)); *(h_A + (i * tam) + j) = drand48() * (10.0 - 0.0) + 0.0; *(h_B + (i * tam) + j) = drand48() * (10.0 - 0.0) + 0.0; } } //Allocate vectors in device memory float* d_A; hipMalloc(&d_A, size); float* d_B; hipMalloc(&d_B, size); float* d_C; hipMalloc(&d_C, size); // Copy vectors from host memory to device memory hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice); hipMemcpy(d_C, h_C, size, hipMemcpyHostToDevice); // Invoke kernel dim3 dimGrid(256,256); dim3 dimBlock(tam,tam); hipLaunchKernelGGL(( VecAdd), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C, tam); // Copy result from device memory to hostmemory // h_C contains the result in host memory hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost); // Free device memory hipFree(d_A); hipFree(d_B); hipFree(d_C); //impresion A /* for(i = 0; i<tam; i++) { for(j=0; j<tam; j++) { printf("\tA[%d][%d]= %f", i, j, *(h_A + (i * tam) + j) ); } printf("\n"); } printf("\n"); //impresion B for(i = 0; i<tam; i++) { for(j=0; j<tam; j++) { printf("\tB[%d][%d]= %f",i, j, *(h_B + (i * tam) + j) ); } printf("\n"); } printf("\n"); //impresion C for(i = 0; i<tam; i++) { for(j=0; j<tam; j++) { printf("\tC[%d][%d]= %f",i, j, *(h_C + (i * tam) + j) ); } printf("\n"); } printf("\n");*/ //Free host memory free(h_A); free(h_B); free(h_C); }
e662be4b885a02b09e632cb08c577b4b2299a07c.cu
#include <stdio.h> #include <cuda_runtime.h> //Device Code __global__ void Criba(int* Nums, int* Prims,int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; int k = 0,j=0,x=0,p,N=100; if(i < tam ) { for(j=2;j<=N;j++){ if(Nums[j] != 1 || j == 2) { Prims[x]=j; printf("El número %d es primo",j); for(p=2;(p*j)<=n;p++){ Nums[(p*j)]=1; } x++; } } printf("Hay %d números primos",x); } } int main() { int tam = 1000, i = 0, j = 0; size_t size = tam * tam * sizeof(float); // Allocate input vectors h_A and h_B in host memory float* h_A = (float*) malloc (size); float* h_B = (float*) malloc (size); float* h_C = (float*) malloc (size); //Initialize input Vectors for(i=0; i < tam; i++) { for(j=0; j < tam; j++) { srand(time(NULL)); *(h_A + (i * tam) + j) = drand48() * (10.0 - 0.0) + 0.0; *(h_B + (i * tam) + j) = drand48() * (10.0 - 0.0) + 0.0; } } //Allocate vectors in device memory float* d_A; cudaMalloc(&d_A, size); float* d_B; cudaMalloc(&d_B, size); float* d_C; cudaMalloc(&d_C, size); // Copy vectors from host memory to device memory cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); cudaMemcpy(d_C, h_C, size, cudaMemcpyHostToDevice); // Invoke kernel dim3 dimGrid(256,256); dim3 dimBlock(tam,tam); VecAdd<<<dimGrid, dimBlock>>>(d_A, d_B, d_C, tam); // Copy result from device memory to hostmemory // h_C contains the result in host memory cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); // Free device memory cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); //impresion A /* for(i = 0; i<tam; i++) { for(j=0; j<tam; j++) { printf("\tA[%d][%d]= %f", i, j, *(h_A + (i * tam) + j) ); } printf("\n"); } printf("\n"); //impresion B for(i = 0; i<tam; i++) { for(j=0; j<tam; j++) { printf("\tB[%d][%d]= %f",i, j, *(h_B + (i * tam) + j) ); } printf("\n"); } printf("\n"); //impresion C for(i = 0; i<tam; i++) { for(j=0; j<tam; j++) { printf("\tC[%d][%d]= %f",i, j, *(h_C + (i * tam) + j) ); } printf("\n"); } printf("\n");*/ //Free host memory free(h_A); free(h_B); free(h_C); }
eb656555aa02963e99d869ca4d35a76ae2eab989.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "test_geodesics_ptp.cuh" #include "test_geodesics_ptp.h" #include "geodesics_ptp.cuh" #include "geodesics_ptp.h" #include <fstream> #include <rocblas.h> #include <thrust/count.h> #include <thrust/device_vector.h> #include <thrust/execution_policy.h> vector<pair<index_t, distance_t> > iter_error_parallel_toplesets_propagation_gpu(che * mesh, const vector<index_t> & sources, const vector<index_t> & limits, const index_t * sorted_index, const distance_t * exact_dist, double & time_ptp) { hipDeviceReset(); float time; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); // BEGIN PTP CHE * h_mesh = new CHE(mesh); CHE * dd_mesh, * d_mesh; cuda_create_CHE(h_mesh, dd_mesh, d_mesh); distance_t * h_dist = new distance_t[h_mesh->n_vertices]; distance_t * d_dist[2]; hipMalloc(&d_dist[0], sizeof(distance_t) * h_mesh->n_vertices); hipMalloc(&d_dist[1], sizeof(distance_t) * h_mesh->n_vertices); index_t * d_sorted; hipMalloc(&d_sorted, sizeof(index_t) * h_mesh->n_vertices); distance_t * d_error; hipMalloc(&d_error, sizeof(distance_t) * h_mesh->n_vertices); vector<pair<index_t, distance_t> > iter_error = iter_error_run_ptp_gpu(d_mesh, h_mesh->n_vertices, h_dist, d_dist, sources, limits, sorted_index, d_sorted, exact_dist, d_error); delete [] h_dist; hipFree(d_error); hipFree(d_dist[0]); hipFree(d_dist[1]); hipFree(d_sorted); cuda_free_CHE(dd_mesh, d_mesh); // END PTP hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); time_ptp = time / 1000; hipEventDestroy(start); hipEventDestroy(stop); return iter_error; } /// Return an array of time in seconds. double * times_farthest_point_sampling_ptp_gpu(che * mesh, vector<index_t> & samples, size_t n, distance_t radio) { hipDeviceReset(); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // BEGIN FPS PTP CHE * h_mesh = new CHE(mesh); CHE * dd_mesh, * d_mesh; cuda_create_CHE(h_mesh, dd_mesh, d_mesh); distance_t * h_dist = new distance_t[h_mesh->n_vertices]; distance_t * d_dist[2]; hipMalloc(&d_dist[0], sizeof(distance_t) * h_mesh->n_vertices); hipMalloc(&d_dist[1], sizeof(distance_t) * h_mesh->n_vertices); distance_t * d_error; hipMalloc(&d_error, sizeof(distance_t) * h_mesh->n_vertices); index_t * d_sorted; hipMalloc(&d_sorted, sizeof(index_t) * h_mesh->n_vertices); vector<index_t> limits; index_t * toplesets = new index_t[h_mesh->n_vertices]; index_t * sorted_index = new index_t[h_mesh->n_vertices]; hipblasHandle_t handle; hipblasCreate(&handle); if(n >= h_mesh->n_vertices) n = h_mesh->n_vertices >> 1; double * times = new double[n + 1]; n -= samples.size(); samples.reserve(n); float time_fps; index_t d; int f; distance_t max_dist = INFINITY; while(n-- && max_dist > radio) { hipEventRecord(start, 0); limits.clear(); mesh->compute_toplesets(toplesets, sorted_index, limits, samples); d = run_ptp_gpu(d_mesh, h_mesh->n_vertices, h_dist, d_dist, samples, limits, sorted_index, d_sorted, d_error); // 1 indexing #ifdef SINGLE_P hipblasIsamax(handle, mesh->n_vertices(), d_dist[d], 1, &f); #else hipblasIdamax(handle, mesh->n_vertices(), d_dist[d], 1, &f); #endif hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time_fps, start, stop); times[samples.size()] = time_fps / 1000; if(radio > 0 || !n) hipMemcpy(&max_dist, d_dist[d] + f - 1, sizeof(distance_t), hipMemcpyDeviceToHost); samples.push_back(f - 1); } hipblasDestroy(handle); delete [] h_dist; delete [] toplesets; delete [] sorted_index; hipFree(d_error); hipFree(d_dist[0]); hipFree(d_dist[1]); hipFree(d_sorted); cuda_free_CHE(dd_mesh, d_mesh); // END FPS PTP hipEventDestroy(start); hipEventDestroy(stop); return times; } vector<pair<index_t, distance_t> > iter_error_run_ptp_gpu(CHE * d_mesh, const index_t & n_vertices, distance_t * h_dist, distance_t ** d_dist, const vector<index_t> & sources, const vector<index_t> & limits, const index_t * h_sorted, index_t * d_sorted, const distance_t * exact_dist, distance_t * d_error) { #pragma omp parallel for for(index_t v = 0; v < n_vertices; v++) h_dist[v] = INFINITY; for(index_t i = 0; i < sources.size(); i++) h_dist[sources[i]] = 0; hipMemcpy(d_dist[0], h_dist, sizeof(distance_t) * n_vertices, hipMemcpyHostToDevice); hipMemcpy(d_dist[1], h_dist, sizeof(distance_t) * n_vertices, hipMemcpyHostToDevice); hipMemcpy(d_sorted, h_sorted, sizeof(index_t) * n_vertices, hipMemcpyHostToDevice); vector<pair<index_t, distance_t> > iter_error; iter_error.reserve(limits.size()); index_t d = 0; index_t start, end, n_cond; index_t i = 1, j = 2; index_t n_iter = 0; while(i < j) { n_iter++; start = limits[i]; end = limits[j]; n_cond = limits[i + 1] - start; hipLaunchKernelGGL(( relax_ptp) , dim3(NB(end - start)), dim3(NT) , 0, 0, d_mesh, d_dist[!d], d_dist[d], d_sorted, end, start); // begin calculating iteration error hipMemcpy(h_dist, d_dist[!d], sizeof(distance_t) * n_vertices, hipMemcpyDeviceToHost); if(j == limits.size() - 1) iter_error.push_back(make_pair(n_iter, compute_error(h_dist, exact_dist, n_vertices, sources.size()))); // end hipLaunchKernelGGL(( relative_error) , dim3(NB(n_cond)), dim3(NT) , 0, 0, d_error, d_dist[!d], d_dist[d], start, start + n_cond, d_sorted); hipDeviceSynchronize(); if(n_cond == thrust::count_if(thrust::device, d_error + start, d_error + start + n_cond, is_ok())) i++; if(j < limits.size() - 1) j++; d = !d; } return iter_error; }
eb656555aa02963e99d869ca4d35a76ae2eab989.cu
#include "test_geodesics_ptp.cuh" #include "test_geodesics_ptp.h" #include "geodesics_ptp.cuh" #include "geodesics_ptp.h" #include <fstream> #include <cublas_v2.h> #include <thrust/count.h> #include <thrust/device_vector.h> #include <thrust/execution_policy.h> vector<pair<index_t, distance_t> > iter_error_parallel_toplesets_propagation_gpu(che * mesh, const vector<index_t> & sources, const vector<index_t> & limits, const index_t * sorted_index, const distance_t * exact_dist, double & time_ptp) { cudaDeviceReset(); float time; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); // BEGIN PTP CHE * h_mesh = new CHE(mesh); CHE * dd_mesh, * d_mesh; cuda_create_CHE(h_mesh, dd_mesh, d_mesh); distance_t * h_dist = new distance_t[h_mesh->n_vertices]; distance_t * d_dist[2]; cudaMalloc(&d_dist[0], sizeof(distance_t) * h_mesh->n_vertices); cudaMalloc(&d_dist[1], sizeof(distance_t) * h_mesh->n_vertices); index_t * d_sorted; cudaMalloc(&d_sorted, sizeof(index_t) * h_mesh->n_vertices); distance_t * d_error; cudaMalloc(&d_error, sizeof(distance_t) * h_mesh->n_vertices); vector<pair<index_t, distance_t> > iter_error = iter_error_run_ptp_gpu(d_mesh, h_mesh->n_vertices, h_dist, d_dist, sources, limits, sorted_index, d_sorted, exact_dist, d_error); delete [] h_dist; cudaFree(d_error); cudaFree(d_dist[0]); cudaFree(d_dist[1]); cudaFree(d_sorted); cuda_free_CHE(dd_mesh, d_mesh); // END PTP cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); time_ptp = time / 1000; cudaEventDestroy(start); cudaEventDestroy(stop); return iter_error; } /// Return an array of time in seconds. double * times_farthest_point_sampling_ptp_gpu(che * mesh, vector<index_t> & samples, size_t n, distance_t radio) { cudaDeviceReset(); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // BEGIN FPS PTP CHE * h_mesh = new CHE(mesh); CHE * dd_mesh, * d_mesh; cuda_create_CHE(h_mesh, dd_mesh, d_mesh); distance_t * h_dist = new distance_t[h_mesh->n_vertices]; distance_t * d_dist[2]; cudaMalloc(&d_dist[0], sizeof(distance_t) * h_mesh->n_vertices); cudaMalloc(&d_dist[1], sizeof(distance_t) * h_mesh->n_vertices); distance_t * d_error; cudaMalloc(&d_error, sizeof(distance_t) * h_mesh->n_vertices); index_t * d_sorted; cudaMalloc(&d_sorted, sizeof(index_t) * h_mesh->n_vertices); vector<index_t> limits; index_t * toplesets = new index_t[h_mesh->n_vertices]; index_t * sorted_index = new index_t[h_mesh->n_vertices]; cublasHandle_t handle; cublasCreate(&handle); if(n >= h_mesh->n_vertices) n = h_mesh->n_vertices >> 1; double * times = new double[n + 1]; n -= samples.size(); samples.reserve(n); float time_fps; index_t d; int f; distance_t max_dist = INFINITY; while(n-- && max_dist > radio) { cudaEventRecord(start, 0); limits.clear(); mesh->compute_toplesets(toplesets, sorted_index, limits, samples); d = run_ptp_gpu(d_mesh, h_mesh->n_vertices, h_dist, d_dist, samples, limits, sorted_index, d_sorted, d_error); // 1 indexing #ifdef SINGLE_P cublasIsamax(handle, mesh->n_vertices(), d_dist[d], 1, &f); #else cublasIdamax(handle, mesh->n_vertices(), d_dist[d], 1, &f); #endif cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time_fps, start, stop); times[samples.size()] = time_fps / 1000; if(radio > 0 || !n) cudaMemcpy(&max_dist, d_dist[d] + f - 1, sizeof(distance_t), cudaMemcpyDeviceToHost); samples.push_back(f - 1); } cublasDestroy(handle); delete [] h_dist; delete [] toplesets; delete [] sorted_index; cudaFree(d_error); cudaFree(d_dist[0]); cudaFree(d_dist[1]); cudaFree(d_sorted); cuda_free_CHE(dd_mesh, d_mesh); // END FPS PTP cudaEventDestroy(start); cudaEventDestroy(stop); return times; } vector<pair<index_t, distance_t> > iter_error_run_ptp_gpu(CHE * d_mesh, const index_t & n_vertices, distance_t * h_dist, distance_t ** d_dist, const vector<index_t> & sources, const vector<index_t> & limits, const index_t * h_sorted, index_t * d_sorted, const distance_t * exact_dist, distance_t * d_error) { #pragma omp parallel for for(index_t v = 0; v < n_vertices; v++) h_dist[v] = INFINITY; for(index_t i = 0; i < sources.size(); i++) h_dist[sources[i]] = 0; cudaMemcpy(d_dist[0], h_dist, sizeof(distance_t) * n_vertices, cudaMemcpyHostToDevice); cudaMemcpy(d_dist[1], h_dist, sizeof(distance_t) * n_vertices, cudaMemcpyHostToDevice); cudaMemcpy(d_sorted, h_sorted, sizeof(index_t) * n_vertices, cudaMemcpyHostToDevice); vector<pair<index_t, distance_t> > iter_error; iter_error.reserve(limits.size()); index_t d = 0; index_t start, end, n_cond; index_t i = 1, j = 2; index_t n_iter = 0; while(i < j) { n_iter++; start = limits[i]; end = limits[j]; n_cond = limits[i + 1] - start; relax_ptp <<< NB(end - start), NT >>> (d_mesh, d_dist[!d], d_dist[d], d_sorted, end, start); // begin calculating iteration error cudaMemcpy(h_dist, d_dist[!d], sizeof(distance_t) * n_vertices, cudaMemcpyDeviceToHost); if(j == limits.size() - 1) iter_error.push_back(make_pair(n_iter, compute_error(h_dist, exact_dist, n_vertices, sources.size()))); // end relative_error <<< NB(n_cond), NT >>> (d_error, d_dist[!d], d_dist[d], start, start + n_cond, d_sorted); cudaDeviceSynchronize(); if(n_cond == thrust::count_if(thrust::device, d_error + start, d_error + start + n_cond, is_ok())) i++; if(j < limits.size() - 1) j++; d = !d; } return iter_error; }
7ab3bc2a677f45dabc9ebeb5095e0ce7cbaf017d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" using namespace std; #ifndef MAP_FILE #define MAP_FILE MAP_SHARED #endif __global__ void elementwise_1D_1D_exp_grad(float* in_x, float* in_d, float* out_x, float * out_d, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) in_d[tid] += out_d[tid] * out_x[tid]; }
7ab3bc2a677f45dabc9ebeb5095e0ce7cbaf017d.cu
#include "includes.h" using namespace std; #ifndef MAP_FILE #define MAP_FILE MAP_SHARED #endif __global__ void elementwise_1D_1D_exp_grad(float* in_x, float* in_d, float* out_x, float * out_d, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) in_d[tid] += out_d[tid] * out_x[tid]; }
d6aed997c6f32f2d826bee8cf27d948d53ad3161.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "book.h" #include "cpu_anim.h" #include "hip/hip_runtime.h" #include <stdio.h> #define DIM 1024 #define PI 3.1415926535897932f __global__ void kernal(unsigned char *ptr, int ticks){ //map from threadIdx/BlockIdx to pixel position int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; //Clalculate the value at that position float fx = x - DIM / 2; float fy = y - DIM / 2; float d = sqrtf(fx * fx + fy * fy); unsigned char grey = (unsigned char)(128.0f + 127.0f * cos(d / 10.0f - ticks / 7.0f) / (d / 10.0f + 1.0f)); ptr[offset * 4 + 0] = grey; ptr[offset * 4 + 1] = grey; ptr[offset * 4 + 2] = grey; ptr[offset * 4 + 3] = 255; } struct DataBlock{ unsigned char *dev_bitmap; CPUAnimBitmap }; void generate_frame(DataBlock *d, int ticks){ dim3 blocks(DIM / 16, DIM / 16); dim3 threads(16, 16); kernal << <blocks, threads >> >(d->dev_bitmap, ticks); HANDLE_ERROR(hipMemcpy(d->bitmap->get_ptr(), d->dev_bitmap, d->bitmap->image_size(), hipMemcpyDeviceToHost)); } //Clean up memory allocated on the GPU void cleanup(DataBlock *d) { hipFree(d->dev_bitmap); } int main(void){ DataBlock data; CPUAnimBitmap bitmap(DIM, DIM, &data); HANDLE_ERROR(hipMalloc((void **)&data.dev_bitmap, bitmap.image_size())); bitmap.anim_and_exit((void(*)(void*, int))generate_frame, (void(*)(void*))cleanup); }
d6aed997c6f32f2d826bee8cf27d948d53ad3161.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "book.h" #include "cpu_anim.h" #include "cuda.h" #include <stdio.h> #define DIM 1024 #define PI 3.1415926535897932f __global__ void kernal(unsigned char *ptr, int ticks){ //map from threadIdx/BlockIdx to pixel position int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; //Clalculate the value at that position float fx = x - DIM / 2; float fy = y - DIM / 2; float d = sqrtf(fx * fx + fy * fy); unsigned char grey = (unsigned char)(128.0f + 127.0f * cos(d / 10.0f - ticks / 7.0f) / (d / 10.0f + 1.0f)); ptr[offset * 4 + 0] = grey; ptr[offset * 4 + 1] = grey; ptr[offset * 4 + 2] = grey; ptr[offset * 4 + 3] = 255; } struct DataBlock{ unsigned char *dev_bitmap; CPUAnimBitmap }; void generate_frame(DataBlock *d, int ticks){ dim3 blocks(DIM / 16, DIM / 16); dim3 threads(16, 16); kernal << <blocks, threads >> >(d->dev_bitmap, ticks); HANDLE_ERROR(cudaMemcpy(d->bitmap->get_ptr(), d->dev_bitmap, d->bitmap->image_size(), cudaMemcpyDeviceToHost)); } //Clean up memory allocated on the GPU void cleanup(DataBlock *d) { cudaFree(d->dev_bitmap); } int main(void){ DataBlock data; CPUAnimBitmap bitmap(DIM, DIM, &data); HANDLE_ERROR(cudaMalloc((void **)&data.dev_bitmap, bitmap.image_size())); bitmap.anim_and_exit((void(*)(void*, int))generate_frame, (void(*)(void*))cleanup); }
028d21645922f06dc0978f161a8e17456dce9a08.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel2_zvel_plus_4_top; int xdim0_update_halo_kernel2_zvel_plus_4_top_h = -1; __constant__ int ydim0_update_halo_kernel2_zvel_plus_4_top; int ydim0_update_halo_kernel2_zvel_plus_4_top_h = -1; __constant__ int xdim1_update_halo_kernel2_zvel_plus_4_top; int xdim1_update_halo_kernel2_zvel_plus_4_top_h = -1; __constant__ int ydim1_update_halo_kernel2_zvel_plus_4_top; int ydim1_update_halo_kernel2_zvel_plus_4_top_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel2_zvel_plus_4_top * (y) + \ xdim0_update_halo_kernel2_zvel_plus_4_top * \ ydim0_update_halo_kernel2_zvel_plus_4_top * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel2_zvel_plus_4_top * (y) + \ xdim1_update_halo_kernel2_zvel_plus_4_top * \ ydim1_update_halo_kernel2_zvel_plus_4_top * (z)) // user function __device__ inline void update_halo_kernel2_zvel_plus_4_top_gpu(double *zvel0, double *zvel1, const int *fields) { if (fields[FIELD_ZVEL0] == 1) zvel0[OPS_ACC0(0, 0, 0)] = zvel0[OPS_ACC0(0, -4, 0)]; if (fields[FIELD_ZVEL1] == 1) zvel1[OPS_ACC1(0, 0, 0)] = zvel1[OPS_ACC1(0, -4, 0)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel2_zvel_plus_4_top( double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel2_zvel_plus_4_top + idx_z * 1 * 1 * xdim0_update_halo_kernel2_zvel_plus_4_top * ydim0_update_halo_kernel2_zvel_plus_4_top; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel2_zvel_plus_4_top + idx_z * 1 * 1 * xdim1_update_halo_kernel2_zvel_plus_4_top * ydim1_update_halo_kernel2_zvel_plus_4_top; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel2_zvel_plus_4_top_gpu(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel2_zvel_plus_4_top(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 3, range, 94)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(94, "update_halo_kernel2_zvel_plus_4_top"); OPS_kernels[94].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel2_zvel_plus_4_top_h || ydim0 != ydim0_update_halo_kernel2_zvel_plus_4_top_h || xdim1 != xdim1_update_halo_kernel2_zvel_plus_4_top_h || ydim1 != ydim1_update_halo_kernel2_zvel_plus_4_top_h) { hipMemcpyToSymbol(xdim0_update_halo_kernel2_zvel_plus_4_top, &xdim0, sizeof(int)); xdim0_update_halo_kernel2_zvel_plus_4_top_h = xdim0; hipMemcpyToSymbol(ydim0_update_halo_kernel2_zvel_plus_4_top, &ydim0, sizeof(int)); ydim0_update_halo_kernel2_zvel_plus_4_top_h = ydim0; hipMemcpyToSymbol(xdim1_update_halo_kernel2_zvel_plus_4_top, &xdim1, sizeof(int)); xdim1_update_halo_kernel2_zvel_plus_4_top_h = xdim1; hipMemcpyToSymbol(ydim1_update_halo_kernel2_zvel_plus_4_top, &ydim1, sizeof(int)); ydim1_update_halo_kernel2_zvel_plus_4_top_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[94].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_update_halo_kernel2_zvel_plus_4_top), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[94].time += t1 - t2; } ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[94].mpi_time += t2 - t1; OPS_kernels[94].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[94].transfer += ops_compute_transfer(dim, start, end, &arg1); } }
028d21645922f06dc0978f161a8e17456dce9a08.cu
// // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel2_zvel_plus_4_top; int xdim0_update_halo_kernel2_zvel_plus_4_top_h = -1; __constant__ int ydim0_update_halo_kernel2_zvel_plus_4_top; int ydim0_update_halo_kernel2_zvel_plus_4_top_h = -1; __constant__ int xdim1_update_halo_kernel2_zvel_plus_4_top; int xdim1_update_halo_kernel2_zvel_plus_4_top_h = -1; __constant__ int ydim1_update_halo_kernel2_zvel_plus_4_top; int ydim1_update_halo_kernel2_zvel_plus_4_top_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel2_zvel_plus_4_top * (y) + \ xdim0_update_halo_kernel2_zvel_plus_4_top * \ ydim0_update_halo_kernel2_zvel_plus_4_top * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel2_zvel_plus_4_top * (y) + \ xdim1_update_halo_kernel2_zvel_plus_4_top * \ ydim1_update_halo_kernel2_zvel_plus_4_top * (z)) // user function __device__ inline void update_halo_kernel2_zvel_plus_4_top_gpu(double *zvel0, double *zvel1, const int *fields) { if (fields[FIELD_ZVEL0] == 1) zvel0[OPS_ACC0(0, 0, 0)] = zvel0[OPS_ACC0(0, -4, 0)]; if (fields[FIELD_ZVEL1] == 1) zvel1[OPS_ACC1(0, 0, 0)] = zvel1[OPS_ACC1(0, -4, 0)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel2_zvel_plus_4_top( double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel2_zvel_plus_4_top + idx_z * 1 * 1 * xdim0_update_halo_kernel2_zvel_plus_4_top * ydim0_update_halo_kernel2_zvel_plus_4_top; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel2_zvel_plus_4_top + idx_z * 1 * 1 * xdim1_update_halo_kernel2_zvel_plus_4_top * ydim1_update_halo_kernel2_zvel_plus_4_top; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel2_zvel_plus_4_top_gpu(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel2_zvel_plus_4_top(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 3, range, 94)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(94, "update_halo_kernel2_zvel_plus_4_top"); OPS_kernels[94].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel2_zvel_plus_4_top_h || ydim0 != ydim0_update_halo_kernel2_zvel_plus_4_top_h || xdim1 != xdim1_update_halo_kernel2_zvel_plus_4_top_h || ydim1 != ydim1_update_halo_kernel2_zvel_plus_4_top_h) { cudaMemcpyToSymbol(xdim0_update_halo_kernel2_zvel_plus_4_top, &xdim0, sizeof(int)); xdim0_update_halo_kernel2_zvel_plus_4_top_h = xdim0; cudaMemcpyToSymbol(ydim0_update_halo_kernel2_zvel_plus_4_top, &ydim0, sizeof(int)); ydim0_update_halo_kernel2_zvel_plus_4_top_h = ydim0; cudaMemcpyToSymbol(xdim1_update_halo_kernel2_zvel_plus_4_top, &xdim1, sizeof(int)); xdim1_update_halo_kernel2_zvel_plus_4_top_h = xdim1; cudaMemcpyToSymbol(ydim1_update_halo_kernel2_zvel_plus_4_top, &ydim1, sizeof(int)); ydim1_update_halo_kernel2_zvel_plus_4_top_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[94].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_update_halo_kernel2_zvel_plus_4_top<<<grid, tblock>>>( (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[94].time += t1 - t2; } ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[94].mpi_time += t2 - t1; OPS_kernels[94].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[94].transfer += ops_compute_transfer(dim, start, end, &arg1); } }
abf4c5235252a0667bf7e3f67e21a6ab28204298.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __device__ __forceinline__ float relu(float a) { return a < 0 ? 0 : a; } __global__ void relu_derivative(float *upper_grads, float *upper_values, unsigned int upper_size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if(index < upper_size) if (upper_values[index] == 0) upper_grads[index] = 0; }
abf4c5235252a0667bf7e3f67e21a6ab28204298.cu
#include "includes.h" __device__ __forceinline__ float relu(float a) { return a < 0 ? 0 : a; } __global__ void relu_derivative(float *upper_grads, float *upper_values, unsigned int upper_size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if(index < upper_size) if (upper_values[index] == 0) upper_grads[index] = 0; }
412be54c26694ec101ad1ecd871a03c125bd2051.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <iostream> #include "utils.hpp" #define TILE_WIDTH 32 using namespace std; void checkCudaError(hipError_t err) { if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } } __global__ void matrixMult(float *matA, float *matB, float *matC, int n) { __shared__ float shared_matA[TILE_WIDTH][TILE_WIDTH]; __shared__ float shared_matB[TILE_WIDTH][TILE_WIDTH]; int i = blockIdx.y * TILE_WIDTH + threadIdx.y; int j = blockIdx.x * TILE_WIDTH + threadIdx.x; float sum = 0; for (int m = 0; m * TILE_WIDTH < n; m++) { if (i < n && m * TILE_WIDTH + threadIdx.x < n) { shared_matA[threadIdx.y][threadIdx.x] = matA[i * n + (m * TILE_WIDTH + threadIdx.x)]; } else { shared_matA[threadIdx.y][threadIdx.x] = 0; } if (m * TILE_WIDTH + threadIdx.y < n && j < n) { shared_matB[threadIdx.y][threadIdx.x] = matB[(m * TILE_WIDTH + threadIdx.y) * n + j]; } else { shared_matB[threadIdx.y][threadIdx.x] = 0; } __syncthreads(); for (int k = 0; k < TILE_WIDTH; k++) { sum += shared_matA[threadIdx.y][k] * shared_matB[k][threadIdx.x]; } __syncthreads(); } if (i < n && j < n) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; matC[row * n + col] = sum; } } int main(int argc, char **argv) { int A_ROWS, A_COLS, B_ROWS, B_COLS; if (argc < 4) { cout << "Usage: " << argv[0] << " a_rows a_cols b_cols" << endl; return -1; } else { A_ROWS = atoi(argv[1]); A_COLS = atoi(argv[2]); B_ROWS = A_COLS; B_COLS = atoi(argv[3]); } clock_t start, end; double time_used; size_t size_a = A_ROWS * A_COLS * sizeof(float); size_t size_b = B_ROWS * B_COLS * sizeof(float); size_t size_c = A_ROWS * B_COLS * sizeof(float); float *h_a = (float *) malloc(size_a); float *h_b = (float *) malloc(size_b); float *h_c = (float *) malloc(size_c); float *d_a; float *d_b; float *d_c; // Init matrices utils::InitMat(h_a, A_ROWS, A_COLS); utils::InitMat(h_b, B_ROWS, B_COLS); hipError_t err; err = hipMalloc((void **) &d_a, size_a); checkCudaError(err); err = hipMalloc((void **) &d_b, size_b); checkCudaError(err); err = hipMalloc((void **) &d_c, size_c); checkCudaError(err); // Multiplication in GPU start = clock(); err = hipMemcpy(d_a, h_a, size_a, hipMemcpyHostToDevice); checkCudaError(err); err = hipMemcpy(d_b, h_b, size_b, hipMemcpyHostToDevice); checkCudaError(err); int block_size = TILE_WIDTH; dim3 block_dim(block_size, block_size, 1); dim3 grid_dim(ceil((float)B_COLS / block_size), ceil((float)A_ROWS / block_size), 1); hipLaunchKernelGGL(( matrixMult), dim3(grid_dim), dim3(block_dim), 0, 0, d_a, d_b, d_c, A_ROWS); hipDeviceSynchronize(); err = hipMemcpy(h_c, d_c, size_c, hipMemcpyDeviceToHost); checkCudaError(err); end = clock(); // Show matriced if (argv[4]) { utils::ShowMat("A", h_a, A_ROWS, A_COLS); utils::ShowMat("B", h_b, B_ROWS, B_COLS); utils::ShowMat("P_C", h_c, A_ROWS, B_COLS); } time_used = ((double) (end - start)) / CLOCKS_PER_SEC; printf("%.10f ", time_used); // time in GPU // printf("%.10f\n", cpu_time_used / gpu_time_used); // acceleration free(h_a); free(h_b); free(h_c); hipFree(d_a); hipFree(d_b); hipFree(d_c); return 0; }
412be54c26694ec101ad1ecd871a03c125bd2051.cu
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <iostream> #include "utils.hpp" #define TILE_WIDTH 32 using namespace std; void checkCudaError(cudaError_t err) { if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } } __global__ void matrixMult(float *matA, float *matB, float *matC, int n) { __shared__ float shared_matA[TILE_WIDTH][TILE_WIDTH]; __shared__ float shared_matB[TILE_WIDTH][TILE_WIDTH]; int i = blockIdx.y * TILE_WIDTH + threadIdx.y; int j = blockIdx.x * TILE_WIDTH + threadIdx.x; float sum = 0; for (int m = 0; m * TILE_WIDTH < n; m++) { if (i < n && m * TILE_WIDTH + threadIdx.x < n) { shared_matA[threadIdx.y][threadIdx.x] = matA[i * n + (m * TILE_WIDTH + threadIdx.x)]; } else { shared_matA[threadIdx.y][threadIdx.x] = 0; } if (m * TILE_WIDTH + threadIdx.y < n && j < n) { shared_matB[threadIdx.y][threadIdx.x] = matB[(m * TILE_WIDTH + threadIdx.y) * n + j]; } else { shared_matB[threadIdx.y][threadIdx.x] = 0; } __syncthreads(); for (int k = 0; k < TILE_WIDTH; k++) { sum += shared_matA[threadIdx.y][k] * shared_matB[k][threadIdx.x]; } __syncthreads(); } if (i < n && j < n) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; matC[row * n + col] = sum; } } int main(int argc, char **argv) { int A_ROWS, A_COLS, B_ROWS, B_COLS; if (argc < 4) { cout << "Usage: " << argv[0] << " a_rows a_cols b_cols" << endl; return -1; } else { A_ROWS = atoi(argv[1]); A_COLS = atoi(argv[2]); B_ROWS = A_COLS; B_COLS = atoi(argv[3]); } clock_t start, end; double time_used; size_t size_a = A_ROWS * A_COLS * sizeof(float); size_t size_b = B_ROWS * B_COLS * sizeof(float); size_t size_c = A_ROWS * B_COLS * sizeof(float); float *h_a = (float *) malloc(size_a); float *h_b = (float *) malloc(size_b); float *h_c = (float *) malloc(size_c); float *d_a; float *d_b; float *d_c; // Init matrices utils::InitMat(h_a, A_ROWS, A_COLS); utils::InitMat(h_b, B_ROWS, B_COLS); cudaError_t err; err = cudaMalloc((void **) &d_a, size_a); checkCudaError(err); err = cudaMalloc((void **) &d_b, size_b); checkCudaError(err); err = cudaMalloc((void **) &d_c, size_c); checkCudaError(err); // Multiplication in GPU start = clock(); err = cudaMemcpy(d_a, h_a, size_a, cudaMemcpyHostToDevice); checkCudaError(err); err = cudaMemcpy(d_b, h_b, size_b, cudaMemcpyHostToDevice); checkCudaError(err); int block_size = TILE_WIDTH; dim3 block_dim(block_size, block_size, 1); dim3 grid_dim(ceil((float)B_COLS / block_size), ceil((float)A_ROWS / block_size), 1); matrixMult<<<grid_dim, block_dim>>>(d_a, d_b, d_c, A_ROWS); cudaDeviceSynchronize(); err = cudaMemcpy(h_c, d_c, size_c, cudaMemcpyDeviceToHost); checkCudaError(err); end = clock(); // Show matriced if (argv[4]) { utils::ShowMat("A", h_a, A_ROWS, A_COLS); utils::ShowMat("B", h_b, B_ROWS, B_COLS); utils::ShowMat("P_C", h_c, A_ROWS, B_COLS); } time_used = ((double) (end - start)) / CLOCKS_PER_SEC; printf("%.10f ", time_used); // time in GPU // printf("%.10f\n", cpu_time_used / gpu_time_used); // acceleration free(h_a); free(h_b); free(h_c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
bad433bcd145fef6a9bb2da3dfcbcc5413c518d1.hip
// !!! This is a file automatically generated by hipify!!! // Compile: nvcc -g -G -arch=sm_61 hello-world.cu -o hello-world #include <cstdio> #include <hip/hip_runtime.h> #include <iostream> __global__ void hwkernel() { printf("Hello World!\n"); } int main() { std::cout << "Before launching kernel" << std::endl << std::endl; hipLaunchKernelGGL(( hwkernel), dim3(1), dim3(8), 0, 0, ); hipDeviceSynchronize(); return EXIT_SUCCESS; }
bad433bcd145fef6a9bb2da3dfcbcc5413c518d1.cu
// Compile: nvcc -g -G -arch=sm_61 hello-world.cu -o hello-world #include <cstdio> #include <cuda.h> #include <iostream> __global__ void hwkernel() { printf("Hello World!\n"); } int main() { std::cout << "Before launching kernel" << std::endl << std::endl; hwkernel<<<1, 8>>>(); cudaDeviceSynchronize(); return EXIT_SUCCESS; }
2945e0aff4ca318e995a52a038ef5451a9519a7b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #define _USE_MATH_DEFINES #include <math.h> #include <stdio.h> #include <time.h> #include <windows.h> void serialFir(float *filtered, const float *values, const float *coeffs, unsigned int size, unsigned int taps) { const time_t start_time = time(0); fprintf(stdout, "Serial start time: %d\n", start_time); // For each value for (unsigned int i = 0; i < size - taps; ++i) { // MAC accross tap coeffs float res = 0.f; for (unsigned int j = 0; j < taps; ++j) { const float v = values[i + j]; const float c = coeffs[j]; res += v * c; } filtered[i] = res; } fprintf(stdout, "Serial end time: %d\n", time(0)); fprintf(stdout, "Serial run time: %d\n", time(0) - start_time); } // Parallel fir kernel __global__ void firKernel(float *filtered, const float *values, const float *coeffs, const int size, const int taps) { // Copy the coeffs extern __shared__ float s_coeffs[]; const int coeff_blocks = (int)((taps / (float)blockDim.x) + 0.5f); for (unsigned int i = 0; i < (coeff_blocks * blockDim.x); i += blockDim.x) { if (i < taps) { s_coeffs[i] = coeffs[i]; } } // Wait for all threads __syncthreads(); int i = blockIdx.x * blockDim.x + threadIdx.x; //int i = threadIdx.x; // MAC accross tap coeffs float res = 0.f; for (unsigned int j = 0; j < taps; ++j) { res += values[i + j] * s_coeffs[j]; } //filtered[(blockIdx.x * blockDim.x) + i] = res; filtered[i] = res; } // Helper function for using CUDA to add vectors in parallel. hipError_t firWithCuda(float *filtered, const float *values, const float *coeffs, unsigned int size, unsigned int taps) { float *dev_values = 0; float *dev_coeffs = 0; float *dev_filtered = 0; hipError_t cudaStatus; const int threadCount = 64; const int blockCount = ((size - taps) / threadCount) + ((size - taps) % threadCount == 0?0:1); const time_t start_time = time(0); fprintf(stdout, "Parallel start time: %d\n", start_time); // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed: %s", hipGetErrorString(cudaStatus)); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = hipMalloc((void**)&dev_values, size * sizeof(float)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_coeffs, taps * sizeof(float)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_filtered, (size - taps) * sizeof(float)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_values, values, size * sizeof(float), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } cudaStatus = hipMemcpy(dev_coeffs, coeffs, taps * sizeof(float), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. hipLaunchKernelGGL(( firKernel), dim3(blockCount), dim3(threadCount), taps * sizeof(float), 0, dev_filtered, dev_values, dev_coeffs, size, taps); // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error: %s!\n", hipGetErrorString(cudaStatus)); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(filtered, dev_filtered, (size - taps) * sizeof(float), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } fprintf(stdout, "Parallel end time: %d\n", time(0)); fprintf(stdout, "Parallel run time: %d\n", time(0) - start_time); Error: hipFree(dev_values); hipFree(dev_coeffs); hipFree(dev_filtered); return cudaStatus; } /*int main() { const float EPSILON = 1.0e-3f; const int size = 1048576;//131072;//4194304; const int taps = 1024;//8192;//8192; float *values; float *coeffs; float *serial_filtered; float *parallel_filtered; values = (float *)malloc(size * sizeof(float)); coeffs = (float *)malloc(taps * sizeof(float)); serial_filtered = (float *)malloc((size - taps) * sizeof(float)); parallel_filtered = (float *)malloc((size - taps) * sizeof(float)); // Initialise data for (unsigned int i = 0; i < size; ++i) { values[i] = (float)sin(2. * M_PI * (i / (double)size)); } for (unsigned int i = 0; i < taps; ++i) { coeffs[i] = (float)cos(2. * M_PI * (i / (double)taps)); } // Run serial kernel serialFir(serial_filtered, values, coeffs, size, taps); // Run parallel fir hipError_t cudaStatus = firWithCuda(parallel_filtered, values, coeffs, size, taps); if (cudaStatus != hipSuccess) { fprintf(stderr, "firWithCuda failed!"); return 1; } // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } // Check output for (unsigned int i = 0; i < size - taps; ++i) { if (fabs(serial_filtered[i] - parallel_filtered[i]) > EPSILON) { fprintf(stdout, "Mismatch at %i, expected: %f, got %f\n", i, serial_filtered[i], parallel_filtered[i]); } } // Clean up free(values); free(coeffs); free(serial_filtered); free(parallel_filtered); return 0; }*/
2945e0aff4ca318e995a52a038ef5451a9519a7b.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #define _USE_MATH_DEFINES #include <math.h> #include <stdio.h> #include <time.h> #include <windows.h> void serialFir(float *filtered, const float *values, const float *coeffs, unsigned int size, unsigned int taps) { const time_t start_time = time(0); fprintf(stdout, "Serial start time: %d\n", start_time); // For each value for (unsigned int i = 0; i < size - taps; ++i) { // MAC accross tap coeffs float res = 0.f; for (unsigned int j = 0; j < taps; ++j) { const float v = values[i + j]; const float c = coeffs[j]; res += v * c; } filtered[i] = res; } fprintf(stdout, "Serial end time: %d\n", time(0)); fprintf(stdout, "Serial run time: %d\n", time(0) - start_time); } // Parallel fir kernel __global__ void firKernel(float *filtered, const float *values, const float *coeffs, const int size, const int taps) { // Copy the coeffs extern __shared__ float s_coeffs[]; const int coeff_blocks = (int)((taps / (float)blockDim.x) + 0.5f); for (unsigned int i = 0; i < (coeff_blocks * blockDim.x); i += blockDim.x) { if (i < taps) { s_coeffs[i] = coeffs[i]; } } // Wait for all threads __syncthreads(); int i = blockIdx.x * blockDim.x + threadIdx.x; //int i = threadIdx.x; // MAC accross tap coeffs float res = 0.f; for (unsigned int j = 0; j < taps; ++j) { res += values[i + j] * s_coeffs[j]; } //filtered[(blockIdx.x * blockDim.x) + i] = res; filtered[i] = res; } // Helper function for using CUDA to add vectors in parallel. cudaError_t firWithCuda(float *filtered, const float *values, const float *coeffs, unsigned int size, unsigned int taps) { float *dev_values = 0; float *dev_coeffs = 0; float *dev_filtered = 0; cudaError_t cudaStatus; const int threadCount = 64; const int blockCount = ((size - taps) / threadCount) + ((size - taps) % threadCount == 0?0:1); const time_t start_time = time(0); fprintf(stdout, "Parallel start time: %d\n", start_time); // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed: %s", cudaGetErrorString(cudaStatus)); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_values, size * sizeof(float)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_coeffs, taps * sizeof(float)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_filtered, (size - taps) * sizeof(float)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_values, values, size * sizeof(float), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(dev_coeffs, coeffs, taps * sizeof(float), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. firKernel<<<blockCount, threadCount, taps * sizeof(float)>>>(dev_filtered, dev_values, dev_coeffs, size, taps); // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error: %s!\n", cudaGetErrorString(cudaStatus)); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(filtered, dev_filtered, (size - taps) * sizeof(float), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } fprintf(stdout, "Parallel end time: %d\n", time(0)); fprintf(stdout, "Parallel run time: %d\n", time(0) - start_time); Error: cudaFree(dev_values); cudaFree(dev_coeffs); cudaFree(dev_filtered); return cudaStatus; } /*int main() { const float EPSILON = 1.0e-3f; const int size = 1048576;//131072;//4194304; const int taps = 1024;//8192;//8192; float *values; float *coeffs; float *serial_filtered; float *parallel_filtered; values = (float *)malloc(size * sizeof(float)); coeffs = (float *)malloc(taps * sizeof(float)); serial_filtered = (float *)malloc((size - taps) * sizeof(float)); parallel_filtered = (float *)malloc((size - taps) * sizeof(float)); // Initialise data for (unsigned int i = 0; i < size; ++i) { values[i] = (float)sin(2. * M_PI * (i / (double)size)); } for (unsigned int i = 0; i < taps; ++i) { coeffs[i] = (float)cos(2. * M_PI * (i / (double)taps)); } // Run serial kernel serialFir(serial_filtered, values, coeffs, size, taps); // Run parallel fir cudaError_t cudaStatus = firWithCuda(parallel_filtered, values, coeffs, size, taps); if (cudaStatus != cudaSuccess) { fprintf(stderr, "firWithCuda failed!"); return 1; } // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } // Check output for (unsigned int i = 0; i < size - taps; ++i) { if (fabs(serial_filtered[i] - parallel_filtered[i]) > EPSILON) { fprintf(stdout, "Mismatch at %i, expected: %f, got %f\n", i, serial_filtered[i], parallel_filtered[i]); } } // Clean up free(values); free(coeffs); free(serial_filtered); free(parallel_filtered); return 0; }*/
c2ec01b524bd1b53d5a54db6bfbf4dfe086ecfcc.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <iostream> #include <hip/hip_runtime_api.h> //#include <cutil.h> #include <hip/hip_runtime.h> float* h_A; float* h_B; float* h_C; float* h_res; float* d_A; float* d_B; float* d_C; float* d_res; __global__ //void compute(const float* A, const float* B, const float* C, float* D, int n) { void compute(float* D, int n, int div) { int tid = blockDim.x * blockIdx.x + threadIdx.x; float I1 = tid * 2.0; int thread_id = threadIdx.x % 32; if (thread_id < div) { __asm volatile ( " .reg .s32 %r111;\n\t" " .reg .s32 %r112;\n\t" " .reg .s32 %r113;\n\t" " .reg .s32 %r114;\n\t" " .reg .s32 %r115;\n\t" " .reg .s32 %r116;\n\t" " .reg .s32 %r117;\n\t" " .reg .s32 %r118;\n\t" " .reg .s32 %r119;\n\t" " .reg .s32 %r120;\n\t" " .reg .s32 %r121;\n\t" " .reg .s32 %r122;\n\t" " .reg .s32 %r123;\n\t" " .reg .s32 %r124;\n\t" " .reg .s32 %r125;\n\t" " .reg .s32 %r126;\n\t" " .reg .s32 %r127;\n\t" " .reg .s32 %r128;\n\t" "mov.s32 %r112, 44;\n\t" "mov.s32 %r113, %r112;\n\t" "mov.s32 %r114, 22;\n\t" "mov.s32 %r115, 33;\n\t" "mov.s32 %r116, 123;\n\t" "mov.s32 %r117, 242;\n\t" "mov.s32 %r118, 334;\n\t" "mov.s32 %r119, 562;\n\t" "mov.s32 %r120, 256;\n\t" "mov.s32 %r121, 156;\n\t" "mov.s32 %r122, 256;\n\t" "mov.s32 %r123, 556;\n\t" "mov.s32 %r124, 856;\n\t" "mov.s32 %r125, 356;\n\t" "mov.s32 %r126, 556;\n\t" "mov.s32 %r127, 656;\n\t" "mov.s32 %r128, 56;\n\t" ); for (int k = 0; k < n; k++) { __asm volatile ( "and.b32 %r113, %r111, %r113;\n\t" "and.b32 %r114, %r111, %r114;\n\t" "and.b32 %r115, %r111, %r115;\n\t" "and.b32 %r116, %r111, %r116;\n\t" "and.b32 %r117, %r111, %r117;\n\t" "and.b32 %r118, %r111, %r118;\n\t" "and.b32 %r119, %r111, %r119;\n\t" "and.b32 %r120, %r111, %r120;\n\t" "and.b32 %r121, %r111, %r121;\n\t" "and.b32 %r122, %r111, %r122;\n\t" "and.b32 %r123, %r111, %r123;\n\t" "and.b32 %r124, %r111, %r124;\n\t" "and.b32 %r125, %r111, %r125;\n\t" "and.b32 %r126, %r111, %r126;\n\t" "and.b32 %r127, %r111, %r127;\n\t" "and.b32 %r128, %r111, %r128;\n\t" "and.b32 %r113, %r111, %r113;\n\t" "and.b32 %r114, %r111, %r114;\n\t" "and.b32 %r115, %r111, %r115;\n\t" "and.b32 %r116, %r111, %r116;\n\t" "and.b32 %r117, %r111, %r117;\n\t" "and.b32 %r118, %r111, %r118;\n\t" "and.b32 %r119, %r111, %r119;\n\t" "and.b32 %r120, %r111, %r120;\n\t" "and.b32 %r121, %r111, %r121;\n\t" "and.b32 %r122, %r111, %r122;\n\t" "and.b32 %r123, %r111, %r123;\n\t" "and.b32 %r124, %r111, %r124;\n\t" "and.b32 %r125, %r111, %r125;\n\t" "and.b32 %r126, %r111, %r126;\n\t" "and.b32 %r127, %r111, %r127;\n\t" "and.b32 %r128, %r111, %r128;\n\t" "and.b32 %r113, %r111, %r113;\n\t" "and.b32 %r114, %r111, %r114;\n\t" "and.b32 %r115, %r111, %r115;\n\t" "and.b32 %r116, %r111, %r116;\n\t" "and.b32 %r117, %r111, %r117;\n\t" "and.b32 %r118, %r111, %r118;\n\t" "and.b32 %r119, %r111, %r119;\n\t" "and.b32 %r120, %r111, %r120;\n\t" "and.b32 %r121, %r111, %r121;\n\t" "and.b32 %r122, %r111, %r122;\n\t" "and.b32 %r123, %r111, %r123;\n\t" "and.b32 %r124, %r111, %r124;\n\t" "and.b32 %r125, %r111, %r125;\n\t" "and.b32 %r126, %r111, %r126;\n\t" "and.b32 %r127, %r111, %r127;\n\t" "and.b32 %r128, %r111, %r128;\n\t" "and.b32 %r113, %r111, %r113;\n\t" "and.b32 %r114, %r111, %r114;\n\t" "and.b32 %r115, %r111, %r115;\n\t" "and.b32 %r116, %r111, %r116;\n\t" "and.b32 %r117, %r111, %r117;\n\t" "and.b32 %r118, %r111, %r118;\n\t" "and.b32 %r119, %r111, %r119;\n\t" "and.b32 %r120, %r111, %r120;\n\t" "and.b32 %r121, %r111, %r121;\n\t" "and.b32 %r122, %r111, %r122;\n\t" "and.b32 %r123, %r111, %r123;\n\t" "and.b32 %r124, %r111, %r124;\n\t" "and.b32 %r125, %r111, %r125;\n\t" "and.b32 %r126, %r111, %r126;\n\t" "and.b32 %r127, %r111, %r127;\n\t" "and.b32 %r128, %r111, %r128;\n\t" "and.b32 %r113, %r111, %r113;\n\t" "and.b32 %r114, %r111, %r114;\n\t" "and.b32 %r115, %r111, %r115;\n\t" "and.b32 %r116, %r111, %r116;\n\t" "and.b32 %r117, %r111, %r117;\n\t" "and.b32 %r118, %r111, %r118;\n\t" "and.b32 %r119, %r111, %r119;\n\t" "and.b32 %r120, %r111, %r120;\n\t" "and.b32 %r121, %r111, %r121;\n\t" "and.b32 %r122, %r111, %r122;\n\t" "and.b32 %r123, %r111, %r123;\n\t" "and.b32 %r124, %r111, %r124;\n\t" "and.b32 %r125, %r111, %r125;\n\t" "and.b32 %r126, %r111, %r126;\n\t" "and.b32 %r127, %r111, %r127;\n\t" "and.b32 %r128, %r111, %r128;\n\t" "and.b32 %r113, %r111, %r113;\n\t" "and.b32 %r114, %r111, %r114;\n\t" "and.b32 %r115, %r111, %r115;\n\t" "and.b32 %r116, %r111, %r116;\n\t" "and.b32 %r117, %r111, %r117;\n\t" "and.b32 %r118, %r111, %r118;\n\t" "and.b32 %r119, %r111, %r119;\n\t" "and.b32 %r120, %r111, %r120;\n\t" "and.b32 %r121, %r111, %r121;\n\t" "and.b32 %r122, %r111, %r122;\n\t" "and.b32 %r123, %r111, %r123;\n\t" "and.b32 %r124, %r111, %r124;\n\t" "and.b32 %r125, %r111, %r125;\n\t" "and.b32 %r126, %r111, %r126;\n\t" "and.b32 %r127, %r111, %r127;\n\t" "and.b32 %r128, %r111, %r128;\n\t" "and.b32 %r113, %r111, %r113;\n\t" "and.b32 %r114, %r111, %r114;\n\t" "and.b32 %r115, %r111, %r115;\n\t" "and.b32 %r116, %r111, %r116;\n\t" "and.b32 %r117, %r111, %r117;\n\t" "and.b32 %r118, %r111, %r118;\n\t" "and.b32 %r119, %r111, %r119;\n\t" "and.b32 %r120, %r111, %r120;\n\t" "and.b32 %r121, %r111, %r121;\n\t" "and.b32 %r122, %r111, %r122;\n\t" "and.b32 %r123, %r111, %r123;\n\t" "and.b32 %r124, %r111, %r124;\n\t" "and.b32 %r125, %r111, %r125;\n\t" "and.b32 %r126, %r111, %r126;\n\t" "and.b32 %r127, %r111, %r127;\n\t" "and.b32 %r128, %r111, %r128;\n\t" "and.b32 %r113, %r111, %r113;\n\t" "and.b32 %r114, %r111, %r114;\n\t" "and.b32 %r115, %r111, %r115;\n\t" "and.b32 %r116, %r111, %r116;\n\t" "and.b32 %r117, %r111, %r117;\n\t" "and.b32 %r118, %r111, %r118;\n\t" "and.b32 %r119, %r111, %r119;\n\t" "and.b32 %r120, %r111, %r120;\n\t" "and.b32 %r121, %r111, %r121;\n\t" "and.b32 %r122, %r111, %r122;\n\t" "and.b32 %r123, %r111, %r123;\n\t" "and.b32 %r124, %r111, %r124;\n\t" "and.b32 %r125, %r111, %r125;\n\t" "and.b32 %r126, %r111, %r126;\n\t" "and.b32 %r127, %r111, %r127;\n\t" "and.b32 %r128, %r111, %r128;\n\t" "and.b32 %r113, %r111, %r113;\n\t" "and.b32 %r114, %r111, %r114;\n\t" "and.b32 %r115, %r111, %r115;\n\t" "and.b32 %r116, %r111, %r116;\n\t" "and.b32 %r117, %r111, %r117;\n\t" "and.b32 %r118, %r111, %r118;\n\t" "and.b32 %r119, %r111, %r119;\n\t" "and.b32 %r120, %r111, %r120;\n\t" "and.b32 %r121, %r111, %r121;\n\t" "and.b32 %r122, %r111, %r122;\n\t" "and.b32 %r123, %r111, %r123;\n\t" "and.b32 %r124, %r111, %r124;\n\t" "and.b32 %r125, %r111, %r125;\n\t" "and.b32 %r126, %r111, %r126;\n\t" "and.b32 %r127, %r111, %r127;\n\t" "and.b32 %r128, %r111, %r128;\n\t" "and.b32 %r113, %r111, %r113;\n\t" "and.b32 %r114, %r111, %r114;\n\t" "and.b32 %r115, %r111, %r115;\n\t" "and.b32 %r116, %r111, %r116;\n\t" "and.b32 %r117, %r111, %r117;\n\t" "and.b32 %r118, %r111, %r118;\n\t" "and.b32 %r119, %r111, %r119;\n\t" "and.b32 %r120, %r111, %r120;\n\t" "and.b32 %r121, %r111, %r121;\n\t" "and.b32 %r122, %r111, %r122;\n\t" "and.b32 %r123, %r111, %r123;\n\t" "and.b32 %r124, %r111, %r124;\n\t" "and.b32 %r125, %r111, %r125;\n\t" "and.b32 %r126, %r111, %r126;\n\t" "and.b32 %r127, %r111, %r127;\n\t" "and.b32 %r128, %r111, %r128;\n\t" "and.b32 %r113, %r111, %r113;\n\t" "and.b32 %r114, %r111, %r114;\n\t" "and.b32 %r115, %r111, %r115;\n\t" "and.b32 %r116, %r111, %r116;\n\t" "and.b32 %r117, %r111, %r117;\n\t" "and.b32 %r118, %r111, %r118;\n\t" "and.b32 %r119, %r111, %r119;\n\t" "and.b32 %r120, %r111, %r120;\n\t" "and.b32 %r121, %r111, %r121;\n\t" "and.b32 %r122, %r111, %r122;\n\t" "and.b32 %r123, %r111, %r123;\n\t" "and.b32 %r124, %r111, %r124;\n\t" "and.b32 %r125, %r111, %r125;\n\t" "and.b32 %r126, %r111, %r126;\n\t" "and.b32 %r127, %r111, %r127;\n\t" "and.b32 %r128, %r111, %r128;\n\t" "and.b32 %r113, %r111, %r113;\n\t" "and.b32 %r114, %r111, %r114;\n\t" "and.b32 %r115, %r111, %r115;\n\t" "and.b32 %r116, %r111, %r116;\n\t" "and.b32 %r117, %r111, %r117;\n\t" "and.b32 %r118, %r111, %r118;\n\t" "and.b32 %r119, %r111, %r119;\n\t" "and.b32 %r120, %r111, %r120;\n\t" "and.b32 %r121, %r111, %r121;\n\t" "and.b32 %r122, %r111, %r122;\n\t" "and.b32 %r123, %r111, %r123;\n\t" "and.b32 %r124, %r111, %r124;\n\t" "and.b32 %r125, %r111, %r125;\n\t" "and.b32 %r126, %r111, %r126;\n\t" "and.b32 %r127, %r111, %r127;\n\t" "and.b32 %r128, %r111, %r128;\n\t" "and.b32 %r113, %r111, %r113;\n\t" "and.b32 %r114, %r111, %r114;\n\t" "and.b32 %r115, %r111, %r115;\n\t" "and.b32 %r116, %r111, %r116;\n\t" "and.b32 %r117, %r111, %r117;\n\t" "and.b32 %r118, %r111, %r118;\n\t" "and.b32 %r119, %r111, %r119;\n\t" "and.b32 %r120, %r111, %r120;\n\t" "and.b32 %r121, %r111, %r121;\n\t" "and.b32 %r122, %r111, %r122;\n\t" "and.b32 %r123, %r111, %r123;\n\t" "and.b32 %r124, %r111, %r124;\n\t" "and.b32 %r125, %r111, %r125;\n\t" "and.b32 %r126, %r111, %r126;\n\t" "and.b32 %r127, %r111, %r127;\n\t" "and.b32 %r128, %r111, %r128;\n\t" "and.b32 %r113, %r111, %r113;\n\t" "and.b32 %r114, %r111, %r114;\n\t" "and.b32 %r115, %r111, %r115;\n\t" "and.b32 %r116, %r111, %r116;\n\t" "and.b32 %r117, %r111, %r117;\n\t" "and.b32 %r118, %r111, %r118;\n\t" "and.b32 %r119, %r111, %r119;\n\t" "and.b32 %r120, %r111, %r120;\n\t" "and.b32 %r121, %r111, %r121;\n\t" "and.b32 %r122, %r111, %r122;\n\t" "and.b32 %r123, %r111, %r123;\n\t" "and.b32 %r124, %r111, %r124;\n\t" "and.b32 %r125, %r111, %r125;\n\t" "and.b32 %r126, %r111, %r126;\n\t" "and.b32 %r127, %r111, %r127;\n\t" "and.b32 %r128, %r111, %r128;\n\t" "and.b32 %r113, %r111, %r113;\n\t" "and.b32 %r114, %r111, %r114;\n\t" "and.b32 %r115, %r111, %r115;\n\t" "and.b32 %r116, %r111, %r116;\n\t" "and.b32 %r117, %r111, %r117;\n\t" "and.b32 %r118, %r111, %r118;\n\t" "and.b32 %r119, %r111, %r119;\n\t" "and.b32 %r120, %r111, %r120;\n\t" "and.b32 %r121, %r111, %r121;\n\t" "and.b32 %r122, %r111, %r122;\n\t" "and.b32 %r123, %r111, %r123;\n\t" "and.b32 %r124, %r111, %r124;\n\t" "and.b32 %r125, %r111, %r125;\n\t" "and.b32 %r126, %r111, %r126;\n\t" "and.b32 %r127, %r111, %r127;\n\t" "and.b32 %r128, %r111, %r128;\n\t" "and.b32 %r113, %r111, %r113;\n\t" "and.b32 %r114, %r111, %r114;\n\t" "and.b32 %r115, %r111, %r115;\n\t" "and.b32 %r116, %r111, %r116;\n\t" "and.b32 %r117, %r111, %r117;\n\t" "and.b32 %r118, %r111, %r118;\n\t" "and.b32 %r119, %r111, %r119;\n\t" "and.b32 %r120, %r111, %r120;\n\t" "and.b32 %r121, %r111, %r121;\n\t" "and.b32 %r122, %r111, %r122;\n\t" "and.b32 %r123, %r111, %r123;\n\t" "and.b32 %r124, %r111, %r124;\n\t" "and.b32 %r125, %r111, %r125;\n\t" "and.b32 %r126, %r111, %r126;\n\t" "and.b32 %r127, %r111, %r127;\n\t" "and.b32 %r128, %r111, %r128;\n\t" ); } } // __syncthreads(); // if ((blockDim.x * blockIdx.x + threadIdx.x) == 0) *D = I1; // __syncthreads(); } void usage() { std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl; } int main(int argc, char **argv) { if (argc != 5) { usage(); exit(1); } int num_blocks = atoi(argv[1]); int num_threads_per_block = atoi(argv[2]); int iterations = atoi(argv[3]); int divergence = atoi(argv[4]); // h_A = new float(2.0); // h_B = new float(3.0); // h_C = new float(4.0); // hipMalloc((void**)&d_A, sizeof(float)); // hipMalloc((void**)&d_B, sizeof(float)); // hipMalloc((void**)&d_C, sizeof(float)); hipMalloc((void**)&d_res, sizeof(float)); // hipMemcpy(d_A, h_A, sizeof(float), hipMemcpyHostToDevice); // hipMemcpy(d_B, h_B, sizeof(float), hipMemcpyHostToDevice); // hipMemcpy(d_C, h_C, sizeof(float), hipMemcpyHostToDevice); hipEvent_t start, stop; float time; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipProfilerStart(); // hipLaunchKernelGGL(( compute), dim3(num_blocks), dim3(num_threads_per_block), 0, 0, d_A, d_B, d_C, d_res, iterations); hipLaunchKernelGGL(( compute), dim3(num_blocks), dim3(num_threads_per_block), 0, 0, d_res, iterations, divergence); hipProfilerStop(); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); std::cout << "GPU Elapsed Time = " << time << std::endl; hipEventDestroy(start); hipEventDestroy(stop); hipDeviceSynchronize(); hipMemcpy(h_res, d_res, sizeof(float), hipMemcpyDeviceToHost); return 0; }
c2ec01b524bd1b53d5a54db6bfbf4dfe086ecfcc.cu
#include <stdio.h> #include <iostream> #include <cuda_profiler_api.h> //#include <cutil.h> #include <cuda_runtime.h> float* h_A; float* h_B; float* h_C; float* h_res; float* d_A; float* d_B; float* d_C; float* d_res; __global__ //void compute(const float* A, const float* B, const float* C, float* D, int n) { void compute(float* D, int n, int div) { int tid = blockDim.x * blockIdx.x + threadIdx.x; float I1 = tid * 2.0; int thread_id = threadIdx.x % 32; if (thread_id < div) { __asm volatile ( " .reg .s32 %r111;\n\t" " .reg .s32 %r112;\n\t" " .reg .s32 %r113;\n\t" " .reg .s32 %r114;\n\t" " .reg .s32 %r115;\n\t" " .reg .s32 %r116;\n\t" " .reg .s32 %r117;\n\t" " .reg .s32 %r118;\n\t" " .reg .s32 %r119;\n\t" " .reg .s32 %r120;\n\t" " .reg .s32 %r121;\n\t" " .reg .s32 %r122;\n\t" " .reg .s32 %r123;\n\t" " .reg .s32 %r124;\n\t" " .reg .s32 %r125;\n\t" " .reg .s32 %r126;\n\t" " .reg .s32 %r127;\n\t" " .reg .s32 %r128;\n\t" "mov.s32 %r112, 44;\n\t" "mov.s32 %r113, %r112;\n\t" "mov.s32 %r114, 22;\n\t" "mov.s32 %r115, 33;\n\t" "mov.s32 %r116, 123;\n\t" "mov.s32 %r117, 242;\n\t" "mov.s32 %r118, 334;\n\t" "mov.s32 %r119, 562;\n\t" "mov.s32 %r120, 256;\n\t" "mov.s32 %r121, 156;\n\t" "mov.s32 %r122, 256;\n\t" "mov.s32 %r123, 556;\n\t" "mov.s32 %r124, 856;\n\t" "mov.s32 %r125, 356;\n\t" "mov.s32 %r126, 556;\n\t" "mov.s32 %r127, 656;\n\t" "mov.s32 %r128, 56;\n\t" ); for (int k = 0; k < n; k++) { __asm volatile ( "and.b32 %r113, %r111, %r113;\n\t" "and.b32 %r114, %r111, %r114;\n\t" "and.b32 %r115, %r111, %r115;\n\t" "and.b32 %r116, %r111, %r116;\n\t" "and.b32 %r117, %r111, %r117;\n\t" "and.b32 %r118, %r111, %r118;\n\t" "and.b32 %r119, %r111, %r119;\n\t" "and.b32 %r120, %r111, %r120;\n\t" "and.b32 %r121, %r111, %r121;\n\t" "and.b32 %r122, %r111, %r122;\n\t" "and.b32 %r123, %r111, %r123;\n\t" "and.b32 %r124, %r111, %r124;\n\t" "and.b32 %r125, %r111, %r125;\n\t" "and.b32 %r126, %r111, %r126;\n\t" "and.b32 %r127, %r111, %r127;\n\t" "and.b32 %r128, %r111, %r128;\n\t" "and.b32 %r113, %r111, %r113;\n\t" "and.b32 %r114, %r111, %r114;\n\t" "and.b32 %r115, %r111, %r115;\n\t" "and.b32 %r116, %r111, %r116;\n\t" "and.b32 %r117, %r111, %r117;\n\t" "and.b32 %r118, %r111, %r118;\n\t" "and.b32 %r119, %r111, %r119;\n\t" "and.b32 %r120, %r111, %r120;\n\t" "and.b32 %r121, %r111, %r121;\n\t" "and.b32 %r122, %r111, %r122;\n\t" "and.b32 %r123, %r111, %r123;\n\t" "and.b32 %r124, %r111, %r124;\n\t" "and.b32 %r125, %r111, %r125;\n\t" "and.b32 %r126, %r111, %r126;\n\t" "and.b32 %r127, %r111, %r127;\n\t" "and.b32 %r128, %r111, %r128;\n\t" "and.b32 %r113, %r111, %r113;\n\t" "and.b32 %r114, %r111, %r114;\n\t" "and.b32 %r115, %r111, %r115;\n\t" "and.b32 %r116, %r111, %r116;\n\t" "and.b32 %r117, %r111, %r117;\n\t" "and.b32 %r118, %r111, %r118;\n\t" "and.b32 %r119, %r111, %r119;\n\t" "and.b32 %r120, %r111, %r120;\n\t" "and.b32 %r121, %r111, %r121;\n\t" "and.b32 %r122, %r111, %r122;\n\t" "and.b32 %r123, %r111, %r123;\n\t" "and.b32 %r124, %r111, %r124;\n\t" "and.b32 %r125, %r111, %r125;\n\t" "and.b32 %r126, %r111, %r126;\n\t" "and.b32 %r127, %r111, %r127;\n\t" "and.b32 %r128, %r111, %r128;\n\t" "and.b32 %r113, %r111, %r113;\n\t" "and.b32 %r114, %r111, %r114;\n\t" "and.b32 %r115, %r111, %r115;\n\t" "and.b32 %r116, %r111, %r116;\n\t" "and.b32 %r117, %r111, %r117;\n\t" "and.b32 %r118, %r111, %r118;\n\t" "and.b32 %r119, %r111, %r119;\n\t" "and.b32 %r120, %r111, %r120;\n\t" "and.b32 %r121, %r111, %r121;\n\t" "and.b32 %r122, %r111, %r122;\n\t" "and.b32 %r123, %r111, %r123;\n\t" "and.b32 %r124, %r111, %r124;\n\t" "and.b32 %r125, %r111, %r125;\n\t" "and.b32 %r126, %r111, %r126;\n\t" "and.b32 %r127, %r111, %r127;\n\t" "and.b32 %r128, %r111, %r128;\n\t" "and.b32 %r113, %r111, %r113;\n\t" "and.b32 %r114, %r111, %r114;\n\t" "and.b32 %r115, %r111, %r115;\n\t" "and.b32 %r116, %r111, %r116;\n\t" "and.b32 %r117, %r111, %r117;\n\t" "and.b32 %r118, %r111, %r118;\n\t" "and.b32 %r119, %r111, %r119;\n\t" "and.b32 %r120, %r111, %r120;\n\t" "and.b32 %r121, %r111, %r121;\n\t" "and.b32 %r122, %r111, %r122;\n\t" "and.b32 %r123, %r111, %r123;\n\t" "and.b32 %r124, %r111, %r124;\n\t" "and.b32 %r125, %r111, %r125;\n\t" "and.b32 %r126, %r111, %r126;\n\t" "and.b32 %r127, %r111, %r127;\n\t" "and.b32 %r128, %r111, %r128;\n\t" "and.b32 %r113, %r111, %r113;\n\t" "and.b32 %r114, %r111, %r114;\n\t" "and.b32 %r115, %r111, %r115;\n\t" "and.b32 %r116, %r111, %r116;\n\t" "and.b32 %r117, %r111, %r117;\n\t" "and.b32 %r118, %r111, %r118;\n\t" "and.b32 %r119, %r111, %r119;\n\t" "and.b32 %r120, %r111, %r120;\n\t" "and.b32 %r121, %r111, %r121;\n\t" "and.b32 %r122, %r111, %r122;\n\t" "and.b32 %r123, %r111, %r123;\n\t" "and.b32 %r124, %r111, %r124;\n\t" "and.b32 %r125, %r111, %r125;\n\t" "and.b32 %r126, %r111, %r126;\n\t" "and.b32 %r127, %r111, %r127;\n\t" "and.b32 %r128, %r111, %r128;\n\t" "and.b32 %r113, %r111, %r113;\n\t" "and.b32 %r114, %r111, %r114;\n\t" "and.b32 %r115, %r111, %r115;\n\t" "and.b32 %r116, %r111, %r116;\n\t" "and.b32 %r117, %r111, %r117;\n\t" "and.b32 %r118, %r111, %r118;\n\t" "and.b32 %r119, %r111, %r119;\n\t" "and.b32 %r120, %r111, %r120;\n\t" "and.b32 %r121, %r111, %r121;\n\t" "and.b32 %r122, %r111, %r122;\n\t" "and.b32 %r123, %r111, %r123;\n\t" "and.b32 %r124, %r111, %r124;\n\t" "and.b32 %r125, %r111, %r125;\n\t" "and.b32 %r126, %r111, %r126;\n\t" "and.b32 %r127, %r111, %r127;\n\t" "and.b32 %r128, %r111, %r128;\n\t" "and.b32 %r113, %r111, %r113;\n\t" "and.b32 %r114, %r111, %r114;\n\t" "and.b32 %r115, %r111, %r115;\n\t" "and.b32 %r116, %r111, %r116;\n\t" "and.b32 %r117, %r111, %r117;\n\t" "and.b32 %r118, %r111, %r118;\n\t" "and.b32 %r119, %r111, %r119;\n\t" "and.b32 %r120, %r111, %r120;\n\t" "and.b32 %r121, %r111, %r121;\n\t" "and.b32 %r122, %r111, %r122;\n\t" "and.b32 %r123, %r111, %r123;\n\t" "and.b32 %r124, %r111, %r124;\n\t" "and.b32 %r125, %r111, %r125;\n\t" "and.b32 %r126, %r111, %r126;\n\t" "and.b32 %r127, %r111, %r127;\n\t" "and.b32 %r128, %r111, %r128;\n\t" "and.b32 %r113, %r111, %r113;\n\t" "and.b32 %r114, %r111, %r114;\n\t" "and.b32 %r115, %r111, %r115;\n\t" "and.b32 %r116, %r111, %r116;\n\t" "and.b32 %r117, %r111, %r117;\n\t" "and.b32 %r118, %r111, %r118;\n\t" "and.b32 %r119, %r111, %r119;\n\t" "and.b32 %r120, %r111, %r120;\n\t" "and.b32 %r121, %r111, %r121;\n\t" "and.b32 %r122, %r111, %r122;\n\t" "and.b32 %r123, %r111, %r123;\n\t" "and.b32 %r124, %r111, %r124;\n\t" "and.b32 %r125, %r111, %r125;\n\t" "and.b32 %r126, %r111, %r126;\n\t" "and.b32 %r127, %r111, %r127;\n\t" "and.b32 %r128, %r111, %r128;\n\t" "and.b32 %r113, %r111, %r113;\n\t" "and.b32 %r114, %r111, %r114;\n\t" "and.b32 %r115, %r111, %r115;\n\t" "and.b32 %r116, %r111, %r116;\n\t" "and.b32 %r117, %r111, %r117;\n\t" "and.b32 %r118, %r111, %r118;\n\t" "and.b32 %r119, %r111, %r119;\n\t" "and.b32 %r120, %r111, %r120;\n\t" "and.b32 %r121, %r111, %r121;\n\t" "and.b32 %r122, %r111, %r122;\n\t" "and.b32 %r123, %r111, %r123;\n\t" "and.b32 %r124, %r111, %r124;\n\t" "and.b32 %r125, %r111, %r125;\n\t" "and.b32 %r126, %r111, %r126;\n\t" "and.b32 %r127, %r111, %r127;\n\t" "and.b32 %r128, %r111, %r128;\n\t" "and.b32 %r113, %r111, %r113;\n\t" "and.b32 %r114, %r111, %r114;\n\t" "and.b32 %r115, %r111, %r115;\n\t" "and.b32 %r116, %r111, %r116;\n\t" "and.b32 %r117, %r111, %r117;\n\t" "and.b32 %r118, %r111, %r118;\n\t" "and.b32 %r119, %r111, %r119;\n\t" "and.b32 %r120, %r111, %r120;\n\t" "and.b32 %r121, %r111, %r121;\n\t" "and.b32 %r122, %r111, %r122;\n\t" "and.b32 %r123, %r111, %r123;\n\t" "and.b32 %r124, %r111, %r124;\n\t" "and.b32 %r125, %r111, %r125;\n\t" "and.b32 %r126, %r111, %r126;\n\t" "and.b32 %r127, %r111, %r127;\n\t" "and.b32 %r128, %r111, %r128;\n\t" "and.b32 %r113, %r111, %r113;\n\t" "and.b32 %r114, %r111, %r114;\n\t" "and.b32 %r115, %r111, %r115;\n\t" "and.b32 %r116, %r111, %r116;\n\t" "and.b32 %r117, %r111, %r117;\n\t" "and.b32 %r118, %r111, %r118;\n\t" "and.b32 %r119, %r111, %r119;\n\t" "and.b32 %r120, %r111, %r120;\n\t" "and.b32 %r121, %r111, %r121;\n\t" "and.b32 %r122, %r111, %r122;\n\t" "and.b32 %r123, %r111, %r123;\n\t" "and.b32 %r124, %r111, %r124;\n\t" "and.b32 %r125, %r111, %r125;\n\t" "and.b32 %r126, %r111, %r126;\n\t" "and.b32 %r127, %r111, %r127;\n\t" "and.b32 %r128, %r111, %r128;\n\t" "and.b32 %r113, %r111, %r113;\n\t" "and.b32 %r114, %r111, %r114;\n\t" "and.b32 %r115, %r111, %r115;\n\t" "and.b32 %r116, %r111, %r116;\n\t" "and.b32 %r117, %r111, %r117;\n\t" "and.b32 %r118, %r111, %r118;\n\t" "and.b32 %r119, %r111, %r119;\n\t" "and.b32 %r120, %r111, %r120;\n\t" "and.b32 %r121, %r111, %r121;\n\t" "and.b32 %r122, %r111, %r122;\n\t" "and.b32 %r123, %r111, %r123;\n\t" "and.b32 %r124, %r111, %r124;\n\t" "and.b32 %r125, %r111, %r125;\n\t" "and.b32 %r126, %r111, %r126;\n\t" "and.b32 %r127, %r111, %r127;\n\t" "and.b32 %r128, %r111, %r128;\n\t" "and.b32 %r113, %r111, %r113;\n\t" "and.b32 %r114, %r111, %r114;\n\t" "and.b32 %r115, %r111, %r115;\n\t" "and.b32 %r116, %r111, %r116;\n\t" "and.b32 %r117, %r111, %r117;\n\t" "and.b32 %r118, %r111, %r118;\n\t" "and.b32 %r119, %r111, %r119;\n\t" "and.b32 %r120, %r111, %r120;\n\t" "and.b32 %r121, %r111, %r121;\n\t" "and.b32 %r122, %r111, %r122;\n\t" "and.b32 %r123, %r111, %r123;\n\t" "and.b32 %r124, %r111, %r124;\n\t" "and.b32 %r125, %r111, %r125;\n\t" "and.b32 %r126, %r111, %r126;\n\t" "and.b32 %r127, %r111, %r127;\n\t" "and.b32 %r128, %r111, %r128;\n\t" "and.b32 %r113, %r111, %r113;\n\t" "and.b32 %r114, %r111, %r114;\n\t" "and.b32 %r115, %r111, %r115;\n\t" "and.b32 %r116, %r111, %r116;\n\t" "and.b32 %r117, %r111, %r117;\n\t" "and.b32 %r118, %r111, %r118;\n\t" "and.b32 %r119, %r111, %r119;\n\t" "and.b32 %r120, %r111, %r120;\n\t" "and.b32 %r121, %r111, %r121;\n\t" "and.b32 %r122, %r111, %r122;\n\t" "and.b32 %r123, %r111, %r123;\n\t" "and.b32 %r124, %r111, %r124;\n\t" "and.b32 %r125, %r111, %r125;\n\t" "and.b32 %r126, %r111, %r126;\n\t" "and.b32 %r127, %r111, %r127;\n\t" "and.b32 %r128, %r111, %r128;\n\t" "and.b32 %r113, %r111, %r113;\n\t" "and.b32 %r114, %r111, %r114;\n\t" "and.b32 %r115, %r111, %r115;\n\t" "and.b32 %r116, %r111, %r116;\n\t" "and.b32 %r117, %r111, %r117;\n\t" "and.b32 %r118, %r111, %r118;\n\t" "and.b32 %r119, %r111, %r119;\n\t" "and.b32 %r120, %r111, %r120;\n\t" "and.b32 %r121, %r111, %r121;\n\t" "and.b32 %r122, %r111, %r122;\n\t" "and.b32 %r123, %r111, %r123;\n\t" "and.b32 %r124, %r111, %r124;\n\t" "and.b32 %r125, %r111, %r125;\n\t" "and.b32 %r126, %r111, %r126;\n\t" "and.b32 %r127, %r111, %r127;\n\t" "and.b32 %r128, %r111, %r128;\n\t" ); } } // __syncthreads(); // if ((blockDim.x * blockIdx.x + threadIdx.x) == 0) *D = I1; // __syncthreads(); } void usage() { std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl; } int main(int argc, char **argv) { if (argc != 5) { usage(); exit(1); } int num_blocks = atoi(argv[1]); int num_threads_per_block = atoi(argv[2]); int iterations = atoi(argv[3]); int divergence = atoi(argv[4]); // h_A = new float(2.0); // h_B = new float(3.0); // h_C = new float(4.0); // cudaMalloc((void**)&d_A, sizeof(float)); // cudaMalloc((void**)&d_B, sizeof(float)); // cudaMalloc((void**)&d_C, sizeof(float)); cudaMalloc((void**)&d_res, sizeof(float)); // cudaMemcpy(d_A, h_A, sizeof(float), cudaMemcpyHostToDevice); // cudaMemcpy(d_B, h_B, sizeof(float), cudaMemcpyHostToDevice); // cudaMemcpy(d_C, h_C, sizeof(float), cudaMemcpyHostToDevice); cudaEvent_t start, stop; float time; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); cudaProfilerStart(); // compute<<<num_blocks, num_threads_per_block>>>(d_A, d_B, d_C, d_res, iterations); compute<<<num_blocks, num_threads_per_block>>>(d_res, iterations, divergence); cudaProfilerStop(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); std::cout << "GPU Elapsed Time = " << time << std::endl; cudaEventDestroy(start); cudaEventDestroy(stop); cudaDeviceSynchronize(); cudaMemcpy(h_res, d_res, sizeof(float), cudaMemcpyDeviceToHost); return 0; }
181c2217b7cefb45aa0d81366d1897a74a316844.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <time.h> // #define SIZE 1000 #define SIZE 10 __global__ void max(int *a , int *c) // kernel function definition { int i = threadIdx.x; // initialize i to thread ID *c = a[0]; if(a[i] < *c) { *c = a[i]; } } int main() { int i; srand(time(NULL)); //makes use of the computer's internal clock to control the choice of the seed int a[SIZE]={12,4,7,3,9,5,11,6,1,76}; int c; int *dev_a, *dev_c; //GPU / device parameters hipMalloc((void **) &dev_a, SIZE*sizeof(int)); //assign memory to parameters on GPU from CUDA runtime API hipMalloc((void **) &dev_c, SIZE*sizeof(int)); // for( i = 0 ; i < SIZE ; i++) //{ //a[i] = i; // input the numbers // } hipMemcpy(dev_a , a, SIZE*sizeof(int),hipMemcpyHostToDevice); //copy the array from CPU to GPU hipLaunchKernelGGL(( max), dim3(1),dim3(SIZE), 0, 0, dev_a,dev_c); // call kernel function <<<number of blocks, number of threads hipMemcpy(&c, dev_c, SIZE*sizeof(int),hipMemcpyDeviceToHost); // copy the result back from GPU to CPU printf("\nmin = %d ",c); hipFree(dev_a); // Free the allocated memory hipFree(dev_c); printf(""); return 0; }
181c2217b7cefb45aa0d81366d1897a74a316844.cu
#include <cuda.h> #include <stdio.h> #include <time.h> // #define SIZE 1000 #define SIZE 10 __global__ void max(int *a , int *c) // kernel function definition { int i = threadIdx.x; // initialize i to thread ID *c = a[0]; if(a[i] < *c) { *c = a[i]; } } int main() { int i; srand(time(NULL)); //makes use of the computer's internal clock to control the choice of the seed int a[SIZE]={12,4,7,3,9,5,11,6,1,76}; int c; int *dev_a, *dev_c; //GPU / device parameters cudaMalloc((void **) &dev_a, SIZE*sizeof(int)); //assign memory to parameters on GPU from CUDA runtime API cudaMalloc((void **) &dev_c, SIZE*sizeof(int)); // for( i = 0 ; i < SIZE ; i++) //{ //a[i] = i; // input the numbers // } cudaMemcpy(dev_a , a, SIZE*sizeof(int),cudaMemcpyHostToDevice); //copy the array from CPU to GPU max<<<1,SIZE>>>(dev_a,dev_c); // call kernel function <<<number of blocks, number of threads cudaMemcpy(&c, dev_c, SIZE*sizeof(int),cudaMemcpyDeviceToHost); // copy the result back from GPU to CPU printf("\nmin = %d ",c); cudaFree(dev_a); // Free the allocated memory cudaFree(dev_c); printf(""); return 0; }
31bc972a565e3befc852f730062a5d8d11656931.hip
// !!! This is a file automatically generated by hipify!!! #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/SoftShrink.cu" #else #include "../common.h" void THNN_(SoftShrink_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, accreal lambda_) { real lambda = ScalarConvert<accreal, real>::to(lambda_); THCUNN_assertSameGPU(state, 2, input, output); THCTensor_(resizeAs)(state, output, input); THC_pointwiseApply2(state, output, input, SoftShrinkUpdateOutput<real>(lambda)); THCudaCheck(hipGetLastError()); } void THNN_(SoftShrink_updateGradInput)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradInput, accreal lambda_) { real lambda = ScalarConvert<accreal, real>::to(lambda_); THCUNN_check_nElement(state, input, gradOutput); THCUNN_assertSameGPU(state, 3, input, gradOutput, gradInput); THCTensor_(resizeAs)(state, gradInput, input); THC_pointwiseApply3(state, gradInput, input, gradOutput, SoftShrinkUpdateGradInput<real>(lambda)); THCudaCheck(hipGetLastError()); } #endif
31bc972a565e3befc852f730062a5d8d11656931.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/SoftShrink.cu" #else #include "../common.h" void THNN_(SoftShrink_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, accreal lambda_) { real lambda = ScalarConvert<accreal, real>::to(lambda_); THCUNN_assertSameGPU(state, 2, input, output); THCTensor_(resizeAs)(state, output, input); THC_pointwiseApply2(state, output, input, SoftShrinkUpdateOutput<real>(lambda)); THCudaCheck(cudaGetLastError()); } void THNN_(SoftShrink_updateGradInput)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradInput, accreal lambda_) { real lambda = ScalarConvert<accreal, real>::to(lambda_); THCUNN_check_nElement(state, input, gradOutput); THCUNN_assertSameGPU(state, 3, input, gradOutput, gradInput); THCTensor_(resizeAs)(state, gradInput, input); THC_pointwiseApply3(state, gradInput, input, gradOutput, SoftShrinkUpdateGradInput<real>(lambda)); THCudaCheck(cudaGetLastError()); } #endif
4d90a389142d0db757d40554f988c80dbd658841.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <limits> #include <ATen/ATen.h> #include <ATen/MemoryOverlap.h> #include <ATen/WrapDimUtils.h> #include <ATen/LegacyTHFunctionsCUDA.h> #include <ATen/core/Array.h> #include <ATen/hip/cub.cuh> #include <ATen/hip/HIPContext.h> #include <ATen/hip/detail/KernelUtils.h> #include <ATen/hip/detail/OffsetCalculator.cuh> #include <ATen/native/hip/SortUtils.cuh> #include <ATen/native/hip/SortingCommon.cuh> namespace at { namespace native { bool should_use_small_sort(const Tensor &self, int64_t dim) { int64_t ndim = self.dim(); int64_t nsort = self.sizes()[dim]; int64_t threshold; if (self.scalar_type() == kLong || self.scalar_type() == kDouble) { threshold = 1024; } else { threshold = 2048; } return nsort <= threshold; } std::vector<int64_t> infer_dense_strides_dim_last(const Tensor & self, int64_t dim); void fillSliceWithIndex(Tensor& t,int dim) { if (t.numel()) { auto sizes = DimVector(t.dim(), 1); sizes[dim] = t.sizes()[dim]; auto range = at::arange(t.sizes()[dim], t.options()); auto rangeview = range.view(sizes); t.copy_(rangeview); } } // In alignment with default sort on a c++ map, this function // will permute key and value tensors identically, and // in such a way that the 'key' tensor is ordered numerically void sortKeyValueInplace(const Tensor& key, const Tensor& value, int dim, bool dir) { TORCH_CHECK(key.sizes() == value.sizes(), "Key tensor must have same size as value tensor"); int dims = value.dim(); TORCH_CHECK(dims <= MAX_DIMS, "value tensor has too many dimensions"); // if key and value tensors have the same size, we do not need to check both ptrdiff_t inElements = key.numel(); if (inElements == 0) { return; } int64_t keySliceSize = key.size(dim); ptrdiff_t keySlices = inElements / keySliceSize; // The amount of shared memory and block size is based on // 2^ceil(lg(n)); we choose that sorting implementation for a given // size. int64_t ceilPowerOf2 = nextHighestPowerOf2(keySliceSize); // FIXME: We'd have to find some other trick with Thrust to perform a // vectorized (key, value) sort by slice segment TORCH_INTERNAL_ASSERT(ceilPowerOf2 <= 2048, "sortKeyValueInplace only works for sizes <= 2048 at present"); // The grid is based on the number of independent slices that we // have to sort; one block per slice dim3 grid; TORCH_INTERNAL_ASSERT(getGridFromTiles(keySlices, grid), "Too many slices to sort"); #define HANDLE_CASE(TYPE, A, SIZE) \ do { \ int blockSize = SIZE / 2; \ if (blockSize < 1) { \ blockSize = 1; \ } \ \ dim3 block(blockSize); \ \ if (dir) { \ hipLaunchKernelGGL(( bitonicSortKVInPlace<scalar_t, int64_t, A, -1, \ GTOp<scalar_t, true>, TYPE, SIZE>) \ , dim3(grid), dim3(block), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \ keyInfo, \ keySlices, \ (TYPE) keySliceSize, \ (TYPE) keyInfo.strides[collapseKeyDim], \ valueInfo, \ (TYPE) valueInfo.strides[collapseValueDim], \ GTOp<scalar_t, true>()); \ C10_HIP_KERNEL_LAUNCH_CHECK(); \ } else { \ hipLaunchKernelGGL(( bitonicSortKVInPlace<scalar_t, int64_t, A, -1, \ LTOp<scalar_t, true>, TYPE, SIZE>) \ , dim3(grid), dim3(block), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \ keyInfo, \ keySlices, \ (TYPE) keySliceSize, \ (TYPE) keyInfo.strides[collapseKeyDim], \ valueInfo, \ (TYPE) valueInfo.strides[collapseValueDim], \ LTOp<scalar_t, true>()); \ C10_HIP_KERNEL_LAUNCH_CHECK(); \ } \ } while (0) #define HANDLE_SORT_CASE(TYPE, A) \ { \ switch (ceilPowerOf2) { \ case 2048: \ HANDLE_CASE(TYPE, A, 2048); \ break; \ case 1024: \ case 512: \ case 256: \ HANDLE_CASE(TYPE, A, 1024); \ break; \ case 128: \ case 64: \ HANDLE_CASE(TYPE, A, 128); \ break; \ case 32: \ case 16: \ case 8: \ case 4: \ case 2: \ HANDLE_CASE(TYPE, A, 32); \ break; \ case 1: \ /* Nothing to do, data already sorted */ \ break; \ default: \ TORCH_INTERNAL_ASSERT(false); \ } \ } // The constructed key/value tensor info is used to select the slice // we are sorting on a per-block basis // The constructed key/value tensor info is used to select the slice // we are sorting on a per-block basis AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Half, at::ScalarType::BFloat16, at::ScalarType::Bool, key.scalar_type(), "sortKeyValueInplace", [&] { if (at::cuda::detail::canUse32BitIndexMath(key)) { at::cuda::detail::TensorInfo<scalar_t, unsigned int> keyInfo = at::cuda::detail::getTensorInfo<scalar_t, unsigned int>(key); at::cuda::detail::TensorInfo<int64_t, unsigned int> valueInfo = at::cuda::detail::getTensorInfo<int64_t, unsigned int>(value); auto strideKey = keyInfo.strides[dim]; keyInfo.sizes[dim] = 1; int collapseKeyDim = keyInfo.collapseDims(dim); keyInfo.strides[collapseKeyDim] = strideKey; auto strideValue = valueInfo.strides[dim]; valueInfo.sizes[dim]=1; int collapseValueDim = valueInfo.collapseDims(dim); valueInfo.strides[collapseValueDim] = strideValue; if (keyInfo.isContiguous()) { HANDLE_SORT_CASE(unsigned int, -2); } else { switch (keyInfo.dims) { case 2: HANDLE_SORT_CASE(unsigned int, 2); break; default: HANDLE_SORT_CASE(unsigned int, -1); break; } } } else { at::cuda::detail::TensorInfo<scalar_t, uint64_t> keyInfo = at::cuda::detail::getTensorInfo<scalar_t, uint64_t>(key); at::cuda::detail::TensorInfo<int64_t, uint64_t> valueInfo = at::cuda::detail::getTensorInfo<int64_t, uint64_t>(value); auto strideKey = keyInfo.strides[dim]; keyInfo.sizes[dim] = 1; int collapseKeyDim = keyInfo.collapseDims(dim); keyInfo.strides[collapseKeyDim] = strideKey; auto strideValue = valueInfo.strides[dim]; valueInfo.sizes[dim]=1; int collapseValueDim = valueInfo.collapseDims(dim); valueInfo.strides[collapseValueDim] = strideValue; // int64_t case is rare, just instantiate the generic version HANDLE_SORT_CASE(uint64_t, -1); } }); #undef HANDLE_CASE #undef HANDLE_SORT_CASE #undef HANDLE_A_CASE } namespace { struct offset_t { int stride; int begin; __device__ int operator[](int i) { return stride * (begin + i); } }; } // We perform a segmented sort in cub with inputs that have // more than 1024/2048 elements along the selected dimension. // Otherwise, we do an inplace bitonic sort (see sortKeyValueInplace). std::tuple<Tensor &,Tensor &> sort_out_stable_cuda(const Tensor & self, c10::optional<bool> stable, int64_t dim, bool descending, Tensor & values, Tensor & indices) { // this algorithm is always stable TORCH_INTERNAL_ASSERT(stable.has_value(), "sort_out(): c10::optional<bool> for stable has to have value."); TensorArg self_arg{self, "self", 1}, values_arg{values, "values", 2}, indices_arg{indices, "indices", 3}; checkAllSameGPU(__func__, {self_arg, values_arg, indices_arg}); bool is_non_overlapping_and_dense = self.is_non_overlapping_and_dense(); int64_t numel = self.numel(); int64_t ndim = self.dim(); dim = maybe_wrap_dim(dim, ndim); int64_t nsort = self.sizes()[dim]; TORCH_CHECK(nsort <= std::numeric_limits<int>::max(), "The dimension being sorted can not have more than INT_MAX elements."); const auto self_dtype = self.dtype(); // FIXME: remove this check once cub sort supports bool TORCH_CHECK(self_dtype != ScalarType::Bool, "Sort currently does not support bool dtype on CUDA."); TORCH_CHECK(self_dtype != ScalarType::ComplexFloat && self_dtype != ScalarType::ComplexDouble, "Sort currently does not support complex dtypes on CUDA."); if (ndim == 0) { if (!values.defined()) { values = self.clone(); } else { values.resize_as_(self); values.copy_(self); } if (!indices.defined()) { indices = at::zeros({}, self.options().dtype(kLong)); } else { indices.resize_as_(self); indices.zero_(); } return std::forward_as_tuple(values, indices); } // use inplace algorithm for smaller input sizes without stable=True if (should_use_small_sort(self, dim) && !stable.value()) { // from thc: sorted->values, indices->indices, input->self if (!values.defined()) { values = at::empty_like(self); } if (!indices.defined()) { indices = at::empty_like(self, self.options().dtype(kLong)); } // Make sure sufficient output space is allocated auto self_size = self.sizes(); at::native::resize_output(values, self_size); at::native::resize_output(indices, self_size); fillSliceWithIndex(indices, dim); // We sort k/v pairs in-place; copy unsorted input to output values.copy_(self); // Sort using our in-place k/v kernel that supports arbitrary // layout sortKeyValueInplace(values, indices, dim, descending); return std::forward_as_tuple(values, indices); } Tensor self_; bool newself = false; if (is_non_overlapping_and_dense && self.stride(dim) == 1) { self_ = self; } else { auto new_strides_unsort = infer_dense_strides_dim_last(self, dim); self_ = at::empty_strided(self.sizes(), new_strides_unsort, self.options()); self_.copy_(self); newself = true; } Tensor values_tmp, indices_tmp; void *values_ptr_; int64_t *indices_ptr; if (!values.defined()) { if (is_non_overlapping_and_dense) { values = at::empty_strided(self.sizes(), self.strides(), self.options()); } else { auto strides = at::infer_dense_strides(self.sizes(), self.strides()); values = at::empty_strided(self.sizes(), strides, self.options()); } } else { TORCH_CHECK(self_.scalar_type() == values.scalar_type(), "Unexpected dtype for values, expect ", self_.scalar_type(), ", got ", values.scalar_type()); values.resize_as_(self); } if (values.strides() == self_.strides() && (newself || get_overlap_status(self, values) == MemOverlapStatus::NO)) { values_ptr_ = values.data_ptr(); } else { values_tmp = at::empty_strided(self_.sizes(), self_.strides(), self_.options()); values_ptr_ = values_tmp.data_ptr(); } if (!indices.defined()) { if (is_non_overlapping_and_dense) { indices = at::empty_strided(self.sizes(), self.strides(), self.options().dtype(kLong)); } else { auto strides = at::infer_dense_strides(self.sizes(), self.strides()); indices = at::empty_strided(self.sizes(), strides, self.options().dtype(kLong)); } } else { TORCH_CHECK(kLong == indices.scalar_type(), "Unexpected dtype for values, expect torch.long, got ", indices.scalar_type()); indices.resize_as_(self); } if (indices.strides() != self_.strides()) { indices_tmp = at::empty_strided(self_.sizes(), self_.strides(), self_.options().dtype(kLong)); indices_ptr = indices_tmp.data_ptr<int64_t>(); } else { indices_ptr = indices.data_ptr<int64_t>(); } if (numel == 0) { return std::forward_as_tuple(values, indices); } int64_t numel_or_intmax = ::min(numel, static_cast<int64_t>(std::numeric_limits<int>::max())); int64_t nbatch = (numel_or_intmax / nsort) * nsort; #ifdef __HIP_PLATFORM_HCC__ constexpr bool is_rocm = true; #else constexpr bool is_rocm = false; #endif AT_DISPATCH_ALL_TYPES_AND3(kBool, kHalf, kBFloat16, self_.scalar_type(), "sort", [&]{ c10::guts::if_constexpr<!(is_rocm && std::is_same<scalar_t, c10::BFloat16>::value)>([&](auto _){ const scalar_t *self_ptr = self_.data_ptr<scalar_t>(); auto values_ptr = reinterpret_cast<scalar_t *>(values_ptr_); int64_t remaining = _(numel); while (remaining > 0) { int64_t n = ::min(remaining, nbatch); int64_t nsegments = n / nsort; auto reverse_indices = at::arange(nsort, indices.options()).view({1, nsort}).expand({nsegments, nsort}).contiguous(); at::cuda::cub::segmented_sort_pairs(self_ptr, values_ptr, reverse_indices.data_ptr<int64_t>(), indices_ptr, n, nsegments, offset_t{(int)nsort, 0}, offset_t{(int)nsort, 1}, descending); remaining -= n; self_ptr += n; values_ptr += n; indices_ptr += n; } }, [&](auto _){ TORCH_CHECK(_(false), "BFloat16 is not supported on ROCm"); }); }); if (values_tmp.defined()) { values.copy_(values_tmp); } if (indices_tmp.defined()) { indices.copy_(indices_tmp); } return std::forward_as_tuple(values, indices); } std::tuple<Tensor &,Tensor &> sort_out_cuda(const Tensor & self, int64_t dim, bool descending, Tensor & values, Tensor & indices) { return sort_out_stable_cuda(self, /*stable=*/false, dim, descending, values, indices); } std::tuple<Tensor,Tensor> sort_stable_cuda(const Tensor & self, c10::optional<bool> stable, int64_t dim, bool descending) { Tensor values, indices; return sort_out_stable_cuda(self, stable, dim, descending, values, indices); } std::tuple<Tensor,Tensor> sort_cuda(const Tensor & self, int64_t dim, bool descending) { return sort_stable_cuda(self, /*stable=*/false, dim, descending); } }} // namespace at::native
4d90a389142d0db757d40554f988c80dbd658841.cu
#include <limits> #include <ATen/ATen.h> #include <ATen/MemoryOverlap.h> #include <ATen/WrapDimUtils.h> #include <ATen/LegacyTHFunctionsCUDA.h> #include <ATen/core/Array.h> #include <ATen/cuda/cub.cuh> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/detail/KernelUtils.h> #include <ATen/cuda/detail/OffsetCalculator.cuh> #include <ATen/native/cuda/SortUtils.cuh> #include <ATen/native/cuda/SortingCommon.cuh> namespace at { namespace native { bool should_use_small_sort(const Tensor &self, int64_t dim) { int64_t ndim = self.dim(); int64_t nsort = self.sizes()[dim]; int64_t threshold; if (self.scalar_type() == kLong || self.scalar_type() == kDouble) { threshold = 1024; } else { threshold = 2048; } return nsort <= threshold; } std::vector<int64_t> infer_dense_strides_dim_last(const Tensor & self, int64_t dim); void fillSliceWithIndex(Tensor& t,int dim) { if (t.numel()) { auto sizes = DimVector(t.dim(), 1); sizes[dim] = t.sizes()[dim]; auto range = at::arange(t.sizes()[dim], t.options()); auto rangeview = range.view(sizes); t.copy_(rangeview); } } // In alignment with default sort on a c++ map, this function // will permute key and value tensors identically, and // in such a way that the 'key' tensor is ordered numerically void sortKeyValueInplace(const Tensor& key, const Tensor& value, int dim, bool dir) { TORCH_CHECK(key.sizes() == value.sizes(), "Key tensor must have same size as value tensor"); int dims = value.dim(); TORCH_CHECK(dims <= MAX_DIMS, "value tensor has too many dimensions"); // if key and value tensors have the same size, we do not need to check both ptrdiff_t inElements = key.numel(); if (inElements == 0) { return; } int64_t keySliceSize = key.size(dim); ptrdiff_t keySlices = inElements / keySliceSize; // The amount of shared memory and block size is based on // 2^ceil(lg(n)); we choose that sorting implementation for a given // size. int64_t ceilPowerOf2 = nextHighestPowerOf2(keySliceSize); // FIXME: We'd have to find some other trick with Thrust to perform a // vectorized (key, value) sort by slice segment TORCH_INTERNAL_ASSERT(ceilPowerOf2 <= 2048, "sortKeyValueInplace only works for sizes <= 2048 at present"); // The grid is based on the number of independent slices that we // have to sort; one block per slice dim3 grid; TORCH_INTERNAL_ASSERT(getGridFromTiles(keySlices, grid), "Too many slices to sort"); #define HANDLE_CASE(TYPE, A, SIZE) \ do { \ int blockSize = SIZE / 2; \ if (blockSize < 1) { \ blockSize = 1; \ } \ \ dim3 block(blockSize); \ \ if (dir) { \ bitonicSortKVInPlace<scalar_t, int64_t, A, -1, \ GTOp<scalar_t, true>, TYPE, SIZE> \ <<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>( \ keyInfo, \ keySlices, \ (TYPE) keySliceSize, \ (TYPE) keyInfo.strides[collapseKeyDim], \ valueInfo, \ (TYPE) valueInfo.strides[collapseValueDim], \ GTOp<scalar_t, true>()); \ C10_CUDA_KERNEL_LAUNCH_CHECK(); \ } else { \ bitonicSortKVInPlace<scalar_t, int64_t, A, -1, \ LTOp<scalar_t, true>, TYPE, SIZE> \ <<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>( \ keyInfo, \ keySlices, \ (TYPE) keySliceSize, \ (TYPE) keyInfo.strides[collapseKeyDim], \ valueInfo, \ (TYPE) valueInfo.strides[collapseValueDim], \ LTOp<scalar_t, true>()); \ C10_CUDA_KERNEL_LAUNCH_CHECK(); \ } \ } while (0) #define HANDLE_SORT_CASE(TYPE, A) \ { \ switch (ceilPowerOf2) { \ case 2048: \ HANDLE_CASE(TYPE, A, 2048); \ break; \ case 1024: \ case 512: \ case 256: \ HANDLE_CASE(TYPE, A, 1024); \ break; \ case 128: \ case 64: \ HANDLE_CASE(TYPE, A, 128); \ break; \ case 32: \ case 16: \ case 8: \ case 4: \ case 2: \ HANDLE_CASE(TYPE, A, 32); \ break; \ case 1: \ /* Nothing to do, data already sorted */ \ break; \ default: \ TORCH_INTERNAL_ASSERT(false); \ } \ } // The constructed key/value tensor info is used to select the slice // we are sorting on a per-block basis // The constructed key/value tensor info is used to select the slice // we are sorting on a per-block basis AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Half, at::ScalarType::BFloat16, at::ScalarType::Bool, key.scalar_type(), "sortKeyValueInplace", [&] { if (at::cuda::detail::canUse32BitIndexMath(key)) { at::cuda::detail::TensorInfo<scalar_t, unsigned int> keyInfo = at::cuda::detail::getTensorInfo<scalar_t, unsigned int>(key); at::cuda::detail::TensorInfo<int64_t, unsigned int> valueInfo = at::cuda::detail::getTensorInfo<int64_t, unsigned int>(value); auto strideKey = keyInfo.strides[dim]; keyInfo.sizes[dim] = 1; int collapseKeyDim = keyInfo.collapseDims(dim); keyInfo.strides[collapseKeyDim] = strideKey; auto strideValue = valueInfo.strides[dim]; valueInfo.sizes[dim]=1; int collapseValueDim = valueInfo.collapseDims(dim); valueInfo.strides[collapseValueDim] = strideValue; if (keyInfo.isContiguous()) { HANDLE_SORT_CASE(unsigned int, -2); } else { switch (keyInfo.dims) { case 2: HANDLE_SORT_CASE(unsigned int, 2); break; default: HANDLE_SORT_CASE(unsigned int, -1); break; } } } else { at::cuda::detail::TensorInfo<scalar_t, uint64_t> keyInfo = at::cuda::detail::getTensorInfo<scalar_t, uint64_t>(key); at::cuda::detail::TensorInfo<int64_t, uint64_t> valueInfo = at::cuda::detail::getTensorInfo<int64_t, uint64_t>(value); auto strideKey = keyInfo.strides[dim]; keyInfo.sizes[dim] = 1; int collapseKeyDim = keyInfo.collapseDims(dim); keyInfo.strides[collapseKeyDim] = strideKey; auto strideValue = valueInfo.strides[dim]; valueInfo.sizes[dim]=1; int collapseValueDim = valueInfo.collapseDims(dim); valueInfo.strides[collapseValueDim] = strideValue; // int64_t case is rare, just instantiate the generic version HANDLE_SORT_CASE(uint64_t, -1); } }); #undef HANDLE_CASE #undef HANDLE_SORT_CASE #undef HANDLE_A_CASE } namespace { struct offset_t { int stride; int begin; __device__ int operator[](int i) { return stride * (begin + i); } }; } // We perform a segmented sort in cub with inputs that have // more than 1024/2048 elements along the selected dimension. // Otherwise, we do an inplace bitonic sort (see sortKeyValueInplace). std::tuple<Tensor &,Tensor &> sort_out_stable_cuda(const Tensor & self, c10::optional<bool> stable, int64_t dim, bool descending, Tensor & values, Tensor & indices) { // this algorithm is always stable TORCH_INTERNAL_ASSERT(stable.has_value(), "sort_out(): c10::optional<bool> for stable has to have value."); TensorArg self_arg{self, "self", 1}, values_arg{values, "values", 2}, indices_arg{indices, "indices", 3}; checkAllSameGPU(__func__, {self_arg, values_arg, indices_arg}); bool is_non_overlapping_and_dense = self.is_non_overlapping_and_dense(); int64_t numel = self.numel(); int64_t ndim = self.dim(); dim = maybe_wrap_dim(dim, ndim); int64_t nsort = self.sizes()[dim]; TORCH_CHECK(nsort <= std::numeric_limits<int>::max(), "The dimension being sorted can not have more than INT_MAX elements."); const auto self_dtype = self.dtype(); // FIXME: remove this check once cub sort supports bool TORCH_CHECK(self_dtype != ScalarType::Bool, "Sort currently does not support bool dtype on CUDA."); TORCH_CHECK(self_dtype != ScalarType::ComplexFloat && self_dtype != ScalarType::ComplexDouble, "Sort currently does not support complex dtypes on CUDA."); if (ndim == 0) { if (!values.defined()) { values = self.clone(); } else { values.resize_as_(self); values.copy_(self); } if (!indices.defined()) { indices = at::zeros({}, self.options().dtype(kLong)); } else { indices.resize_as_(self); indices.zero_(); } return std::forward_as_tuple(values, indices); } // use inplace algorithm for smaller input sizes without stable=True if (should_use_small_sort(self, dim) && !stable.value()) { // from thc: sorted->values, indices->indices, input->self if (!values.defined()) { values = at::empty_like(self); } if (!indices.defined()) { indices = at::empty_like(self, self.options().dtype(kLong)); } // Make sure sufficient output space is allocated auto self_size = self.sizes(); at::native::resize_output(values, self_size); at::native::resize_output(indices, self_size); fillSliceWithIndex(indices, dim); // We sort k/v pairs in-place; copy unsorted input to output values.copy_(self); // Sort using our in-place k/v kernel that supports arbitrary // layout sortKeyValueInplace(values, indices, dim, descending); return std::forward_as_tuple(values, indices); } Tensor self_; bool newself = false; if (is_non_overlapping_and_dense && self.stride(dim) == 1) { self_ = self; } else { auto new_strides_unsort = infer_dense_strides_dim_last(self, dim); self_ = at::empty_strided(self.sizes(), new_strides_unsort, self.options()); self_.copy_(self); newself = true; } Tensor values_tmp, indices_tmp; void *values_ptr_; int64_t *indices_ptr; if (!values.defined()) { if (is_non_overlapping_and_dense) { values = at::empty_strided(self.sizes(), self.strides(), self.options()); } else { auto strides = at::infer_dense_strides(self.sizes(), self.strides()); values = at::empty_strided(self.sizes(), strides, self.options()); } } else { TORCH_CHECK(self_.scalar_type() == values.scalar_type(), "Unexpected dtype for values, expect ", self_.scalar_type(), ", got ", values.scalar_type()); values.resize_as_(self); } if (values.strides() == self_.strides() && (newself || get_overlap_status(self, values) == MemOverlapStatus::NO)) { values_ptr_ = values.data_ptr(); } else { values_tmp = at::empty_strided(self_.sizes(), self_.strides(), self_.options()); values_ptr_ = values_tmp.data_ptr(); } if (!indices.defined()) { if (is_non_overlapping_and_dense) { indices = at::empty_strided(self.sizes(), self.strides(), self.options().dtype(kLong)); } else { auto strides = at::infer_dense_strides(self.sizes(), self.strides()); indices = at::empty_strided(self.sizes(), strides, self.options().dtype(kLong)); } } else { TORCH_CHECK(kLong == indices.scalar_type(), "Unexpected dtype for values, expect torch.long, got ", indices.scalar_type()); indices.resize_as_(self); } if (indices.strides() != self_.strides()) { indices_tmp = at::empty_strided(self_.sizes(), self_.strides(), self_.options().dtype(kLong)); indices_ptr = indices_tmp.data_ptr<int64_t>(); } else { indices_ptr = indices.data_ptr<int64_t>(); } if (numel == 0) { return std::forward_as_tuple(values, indices); } int64_t numel_or_intmax = std::min(numel, static_cast<int64_t>(std::numeric_limits<int>::max())); int64_t nbatch = (numel_or_intmax / nsort) * nsort; #ifdef __HIP_PLATFORM_HCC__ constexpr bool is_rocm = true; #else constexpr bool is_rocm = false; #endif AT_DISPATCH_ALL_TYPES_AND3(kBool, kHalf, kBFloat16, self_.scalar_type(), "sort", [&]{ c10::guts::if_constexpr<!(is_rocm && std::is_same<scalar_t, c10::BFloat16>::value)>([&](auto _){ const scalar_t *self_ptr = self_.data_ptr<scalar_t>(); auto values_ptr = reinterpret_cast<scalar_t *>(values_ptr_); int64_t remaining = _(numel); while (remaining > 0) { int64_t n = std::min(remaining, nbatch); int64_t nsegments = n / nsort; auto reverse_indices = at::arange(nsort, indices.options()).view({1, nsort}).expand({nsegments, nsort}).contiguous(); at::cuda::cub::segmented_sort_pairs(self_ptr, values_ptr, reverse_indices.data_ptr<int64_t>(), indices_ptr, n, nsegments, offset_t{(int)nsort, 0}, offset_t{(int)nsort, 1}, descending); remaining -= n; self_ptr += n; values_ptr += n; indices_ptr += n; } }, [&](auto _){ TORCH_CHECK(_(false), "BFloat16 is not supported on ROCm"); }); }); if (values_tmp.defined()) { values.copy_(values_tmp); } if (indices_tmp.defined()) { indices.copy_(indices_tmp); } return std::forward_as_tuple(values, indices); } std::tuple<Tensor &,Tensor &> sort_out_cuda(const Tensor & self, int64_t dim, bool descending, Tensor & values, Tensor & indices) { return sort_out_stable_cuda(self, /*stable=*/false, dim, descending, values, indices); } std::tuple<Tensor,Tensor> sort_stable_cuda(const Tensor & self, c10::optional<bool> stable, int64_t dim, bool descending) { Tensor values, indices; return sort_out_stable_cuda(self, stable, dim, descending, values, indices); } std::tuple<Tensor,Tensor> sort_cuda(const Tensor & self, int64_t dim, bool descending) { return sort_stable_cuda(self, /*stable=*/false, dim, descending); } }} // namespace at::native
69fa25283408bd628edf8208d223095317d291bc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #include <string.h> #include <stdbool.h> #include <time.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/stat.h> #include <inttypes.h> #include <errno.h> #define ROTL64(x, y) (((x) << (y)) | ((x) >> (64 - (y)))) #define MSG_LEN 10 #define HASH_LEN 30 #define THREADS_PER_BLOCK 512 // Round constants __constant__ static const uint64_t RC[24] = { 0x0000000000000001, 0x0000000000008082, 0x800000000000808a, 0x8000000080008000, 0x000000000000808b, 0x0000000080000001, 0x8000000080008081, 0x8000000000008009, 0x000000000000008a, 0x0000000000000088, 0x0000000080008009, 0x000000008000000a, 0x000000008000808b, 0x800000000000008b, 0x8000000000008089, 0x8000000000008003, 0x8000000000008002, 0x8000000000000080, 0x000000000000800a, 0x800000008000000a, 0x8000000080008081, 0x8000000000008080, 0x0000000080000001, 0x8000000080008008 }; // Rotation offsets __constant__ static const int r[24] = { 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14, 27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44 }; __constant__ static const int piln[24] = { 10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4, 15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1 }; // Updates the state with 24 rounds __device__ void keccakf(uint64_t *state){ int i, j; uint64_t temp, C[5]; for (int round = 0; round < 24; round++) { // Theta for (i = 0; i < 5; i++) { C[i] = state[i] ^ state[i + 5] ^ state[i + 10] ^ state[i + 15] ^ state[i + 20]; } for (i = 0; i < 5; i++) { temp = C[(i + 4) % 5] ^ ROTL64(C[(i + 1) % 5], 1); for (j = 0; j < 25; j += 5) { state[j + i] ^= temp; } } // Rho Pi temp = state[1]; for (i = 0; i < 24; i++) { j = piln[i]; C[0] = state[j]; state[j] = ROTL64(temp, r[i]); temp = C[0]; } // Chi for (j = 0; j < 25; j += 5) { for (i = 0; i < 5; i++) { C[i] = state[j + i]; } for (i = 0; i < 5; i++) { state[j + i] ^= (~C[(i + 1) % 5]) & C[(i + 2) % 5]; } } // Iota state[0] ^= RC[round]; } } __global__ void keccak__offset(uint8_t *message_, unsigned long numbytes){ int message_len = MSG_LEN; uint64_t state[25]; uint8_t temp[144]; int rsize = 136; // 200 - 2 * 32 int rsize_byte = 17; // rsize / 8 uint8_t message[MSG_LEN]; unsigned long offset = (blockIdx.x * THREADS_PER_BLOCK * MSG_LEN) + (threadIdx.x * MSG_LEN); if (offset >= numbytes) { return; } memcpy(message, &message_[offset], MSG_LEN); memset(state, 0, sizeof(state)); // for ( ; message_len >= rsize; message_len -= rsize, (uint8_t *)message += rsize) { for (int i = 0; i < rsize_byte; i++) { state[i] ^= ((uint64_t *) message)[i]; } keccakf(state); // } // Calculating the last state block and padding the result memcpy(temp, message, message_len); temp[message_len++] = 1; memset(temp + message_len, 0, rsize - message_len); temp[rsize - 1] |= 0x80; for (int i = 0; i < rsize_byte; i++) { state[i] ^= ((uint64_t *) temp)[i]; } keccakf(state); __syncthreads(); } void hashdemo(uint8_t *message_, unsigned long numbytes){ uint8_t *message; // uint8_t *ctx_key_d, *ctx_enckey_d; // RC[24] hipMemcpyToSymbol(RC, RC, sizeof(uint64_t)*24); hipMemcpyToSymbol(r, r, sizeof(int)*24); hipMemcpyToSymbol(piln, piln, sizeof(int)*24); hipMalloc((void**)&message, numbytes); hipMemcpy(message, message_, numbytes, hipMemcpyHostToDevice); dim3 dimBlock(ceil((double)numbytes / (double)(THREADS_PER_BLOCK * MSG_LEN))); dim3 dimGrid(THREADS_PER_BLOCK); hipLaunchKernelGGL(( keccak__offset), dim3(dimBlock), dim3(dimGrid), 0, 0, message, numbytes); hipFree(message); // hipFree(ctx_key_d); // hipFree(ctx_enckey_d); } __global__ void GPU_init() { } int main(){ // open file FILE *file; uint8_t *buf; // file buffer unsigned long numbytes; char *fname; clock_t start, enc_time, dec_time, end; int mili_sec, i; int padding; //key: 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f /* create a key vector */ uint8_t key[32]; int deviceCount = 0; hipError_t error_id = hipGetDeviceCount(&deviceCount); if (error_id != hipSuccess){ printf("Error: %s\n", hipGetErrorString(error_id)); printf("Exiting...\n"); exit(EXIT_FAILURE); } // This function call returns 0 if there are no CUDA capable devices. if (deviceCount == 0){ printf("There are no available device(s) that support CUDA\n"); exit(EXIT_FAILURE); } // handle txt file fname = "input.txt"; file = fopen(fname, "r"); if (file == NULL) {printf("File %s doesn't exist\n", fname); exit(1); } printf("Opened file %s\n", fname); fseek(file, 0L, SEEK_END); numbytes = ftell(file); printf("Size is %lu\n", numbytes); // copy file into memory fseek(file, 0L, SEEK_SET); buf = (uint8_t*)calloc(numbytes, sizeof(uint8_t)); if(buf == NULL) exit(1); if (fread(buf, 1, numbytes, file) != numbytes) { printf("Unable to read all bytes from file %s\n", fname); exit(EXIT_FAILURE); } fclose(file); // this is to force nvcc to put the gpu initialization here hipLaunchKernelGGL(( GPU_init), dim3(1), dim3(1), 0, 0, ); // encryption start = clock(); hashdemo(buf, numbytes); end = clock(); printf("time used:%f\n", (double)(end - start) / CLOCKS_PER_SEC); printf("CPU encryption throughput: %f bytes/second\n", (double)(numbytes) / ((double)(end - start) / CLOCKS_PER_SEC)); free(buf); return EXIT_SUCCESS; }
69fa25283408bd628edf8208d223095317d291bc.cu
#include <stdlib.h> #include <stdio.h> #include <string.h> #include <stdbool.h> #include <time.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/stat.h> #include <inttypes.h> #include <errno.h> #define ROTL64(x, y) (((x) << (y)) | ((x) >> (64 - (y)))) #define MSG_LEN 10 #define HASH_LEN 30 #define THREADS_PER_BLOCK 512 // Round constants __constant__ static const uint64_t RC[24] = { 0x0000000000000001, 0x0000000000008082, 0x800000000000808a, 0x8000000080008000, 0x000000000000808b, 0x0000000080000001, 0x8000000080008081, 0x8000000000008009, 0x000000000000008a, 0x0000000000000088, 0x0000000080008009, 0x000000008000000a, 0x000000008000808b, 0x800000000000008b, 0x8000000000008089, 0x8000000000008003, 0x8000000000008002, 0x8000000000000080, 0x000000000000800a, 0x800000008000000a, 0x8000000080008081, 0x8000000000008080, 0x0000000080000001, 0x8000000080008008 }; // Rotation offsets __constant__ static const int r[24] = { 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14, 27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44 }; __constant__ static const int piln[24] = { 10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4, 15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1 }; // Updates the state with 24 rounds __device__ void keccakf(uint64_t *state){ int i, j; uint64_t temp, C[5]; for (int round = 0; round < 24; round++) { // Theta for (i = 0; i < 5; i++) { C[i] = state[i] ^ state[i + 5] ^ state[i + 10] ^ state[i + 15] ^ state[i + 20]; } for (i = 0; i < 5; i++) { temp = C[(i + 4) % 5] ^ ROTL64(C[(i + 1) % 5], 1); for (j = 0; j < 25; j += 5) { state[j + i] ^= temp; } } // Rho Pi temp = state[1]; for (i = 0; i < 24; i++) { j = piln[i]; C[0] = state[j]; state[j] = ROTL64(temp, r[i]); temp = C[0]; } // Chi for (j = 0; j < 25; j += 5) { for (i = 0; i < 5; i++) { C[i] = state[j + i]; } for (i = 0; i < 5; i++) { state[j + i] ^= (~C[(i + 1) % 5]) & C[(i + 2) % 5]; } } // Iota state[0] ^= RC[round]; } } __global__ void keccak__offset(uint8_t *message_, unsigned long numbytes){ int message_len = MSG_LEN; uint64_t state[25]; uint8_t temp[144]; int rsize = 136; // 200 - 2 * 32 int rsize_byte = 17; // rsize / 8 uint8_t message[MSG_LEN]; unsigned long offset = (blockIdx.x * THREADS_PER_BLOCK * MSG_LEN) + (threadIdx.x * MSG_LEN); if (offset >= numbytes) { return; } memcpy(message, &message_[offset], MSG_LEN); memset(state, 0, sizeof(state)); // for ( ; message_len >= rsize; message_len -= rsize, (uint8_t *)message += rsize) { for (int i = 0; i < rsize_byte; i++) { state[i] ^= ((uint64_t *) message)[i]; } keccakf(state); // } // Calculating the last state block and padding the result memcpy(temp, message, message_len); temp[message_len++] = 1; memset(temp + message_len, 0, rsize - message_len); temp[rsize - 1] |= 0x80; for (int i = 0; i < rsize_byte; i++) { state[i] ^= ((uint64_t *) temp)[i]; } keccakf(state); __syncthreads(); } void hashdemo(uint8_t *message_, unsigned long numbytes){ uint8_t *message; // uint8_t *ctx_key_d, *ctx_enckey_d; // RC[24] cudaMemcpyToSymbol(RC, RC, sizeof(uint64_t)*24); cudaMemcpyToSymbol(r, r, sizeof(int)*24); cudaMemcpyToSymbol(piln, piln, sizeof(int)*24); cudaMalloc((void**)&message, numbytes); cudaMemcpy(message, message_, numbytes, cudaMemcpyHostToDevice); dim3 dimBlock(ceil((double)numbytes / (double)(THREADS_PER_BLOCK * MSG_LEN))); dim3 dimGrid(THREADS_PER_BLOCK); keccak__offset<<<dimBlock, dimGrid>>>(message, numbytes); cudaFree(message); // cudaFree(ctx_key_d); // cudaFree(ctx_enckey_d); } __global__ void GPU_init() { } int main(){ // open file FILE *file; uint8_t *buf; // file buffer unsigned long numbytes; char *fname; clock_t start, enc_time, dec_time, end; int mili_sec, i; int padding; //key: 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f /* create a key vector */ uint8_t key[32]; int deviceCount = 0; cudaError_t error_id = cudaGetDeviceCount(&deviceCount); if (error_id != cudaSuccess){ printf("Error: %s\n", cudaGetErrorString(error_id)); printf("Exiting...\n"); exit(EXIT_FAILURE); } // This function call returns 0 if there are no CUDA capable devices. if (deviceCount == 0){ printf("There are no available device(s) that support CUDA\n"); exit(EXIT_FAILURE); } // handle txt file fname = "input.txt"; file = fopen(fname, "r"); if (file == NULL) {printf("File %s doesn't exist\n", fname); exit(1); } printf("Opened file %s\n", fname); fseek(file, 0L, SEEK_END); numbytes = ftell(file); printf("Size is %lu\n", numbytes); // copy file into memory fseek(file, 0L, SEEK_SET); buf = (uint8_t*)calloc(numbytes, sizeof(uint8_t)); if(buf == NULL) exit(1); if (fread(buf, 1, numbytes, file) != numbytes) { printf("Unable to read all bytes from file %s\n", fname); exit(EXIT_FAILURE); } fclose(file); // this is to force nvcc to put the gpu initialization here GPU_init<<<1, 1>>>(); // encryption start = clock(); hashdemo(buf, numbytes); end = clock(); printf("time used:%f\n", (double)(end - start) / CLOCKS_PER_SEC); printf("CPU encryption throughput: %f bytes/second\n", (double)(numbytes) / ((double)(end - start) / CLOCKS_PER_SEC)); free(buf); return EXIT_SUCCESS; }
043b109216a2b639960380f1c75f0bd73b076b0a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ // This file is auto-generated. See "generate_kernels.py" #include <ATen/native/transformers/hip/mem_eff_attention/kernel_backward.h> __global__ void __launch_bounds__( AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, false, true, false, 64, 64, 64>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, false, true, false, 64, 64, 64>::kMinBlocksPerSm) fmha_cutlassB_f16_notaligned_64x64_k64_dropout_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, false, true, false, 64, 64, 64>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 500 #if __CUDA_ARCH__ < 700 if (!p.advance_to_block()) { return; } AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, false, true, false, 64, 64, 64>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassB_f16_notaligned_64x64_k64_dropout_sm50` is for sm50-sm70, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, false, true, false, 64, 64, 64>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, false, true, false, 64, 64, 64>::kMinBlocksPerSm) fmha_cutlassB_f16_notaligned_64x64_k64_dropout_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, false, true, false, 64, 64, 64>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 700 #if __CUDA_ARCH__ < 750 if (!p.advance_to_block()) { return; } AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, false, true, false, 64, 64, 64>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassB_f16_notaligned_64x64_k64_dropout_sm70` is for sm70-sm75, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, false, true, false, 64, 64, 64>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, false, true, false, 64, 64, 64>::kMinBlocksPerSm) fmha_cutlassB_f16_notaligned_64x64_k64_dropout_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, false, true, false, 64, 64, 64>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 750 #if __CUDA_ARCH__ < 800 if (!p.advance_to_block()) { return; } AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, false, true, false, 64, 64, 64>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassB_f16_notaligned_64x64_k64_dropout_sm75` is for sm75-sm80, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif }
043b109216a2b639960380f1c75f0bd73b076b0a.cu
/* * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ // This file is auto-generated. See "generate_kernels.py" #include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h> __global__ void __launch_bounds__( AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, false, true, false, 64, 64, 64>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, false, true, false, 64, 64, 64>::kMinBlocksPerSm) fmha_cutlassB_f16_notaligned_64x64_k64_dropout_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, false, true, false, 64, 64, 64>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 500 #if __CUDA_ARCH__ < 700 if (!p.advance_to_block()) { return; } AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, false, true, false, 64, 64, 64>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassB_f16_notaligned_64x64_k64_dropout_sm50` is for sm50-sm70, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, false, true, false, 64, 64, 64>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, false, true, false, 64, 64, 64>::kMinBlocksPerSm) fmha_cutlassB_f16_notaligned_64x64_k64_dropout_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, false, true, false, 64, 64, 64>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 700 #if __CUDA_ARCH__ < 750 if (!p.advance_to_block()) { return; } AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, false, true, false, 64, 64, 64>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassB_f16_notaligned_64x64_k64_dropout_sm70` is for sm70-sm75, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, false, true, false, 64, 64, 64>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, false, true, false, 64, 64, 64>::kMinBlocksPerSm) fmha_cutlassB_f16_notaligned_64x64_k64_dropout_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, false, true, false, 64, 64, 64>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 750 #if __CUDA_ARCH__ < 800 if (!p.advance_to_block()) { return; } AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, false, true, false, 64, 64, 64>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassB_f16_notaligned_64x64_k64_dropout_sm75` is for sm75-sm80, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif }
86aa7c05e3338a95c4f96cb1e3179deba4a2578e.hip
// !!! This is a file automatically generated by hipify!!! #include "BrickIndexGPU.h" #include <hip/hip_runtime.h> #include "cudaUtil.h" #include <iostream> //__constant__ BrickIndexGPU c_brickIndex; BrickIndexGPU g_brickIndex; void BrickIndexGPU::Allocate(bool cpuTracing, uint brickCountNew, uint2 slotCountNew) { brickCount = brickCountNew; slotCount = slotCountNew; if(cpuTracing) { pBrickToSlot = new uint2[brickCount]; } else { cudaSafeCall(cudaMalloc2(&dpBrickToSlot, brickCount * sizeof(uint2))); cudaSafeCall(cudaMalloc2(&dpSlotTimestepMin, slotCount.x * slotCount.y * sizeof(uint))); cudaSafeCall(cudaMalloc2(&dpSlotTimestepMax, slotCount.x * slotCount.y * sizeof(uint))); } } void BrickIndexGPU::Deallocate() { if(dpSlotTimestepMax) { cudaSafeCall(hipFree(dpSlotTimestepMax)); dpSlotTimestepMax = nullptr; } if(dpSlotTimestepMin) { cudaSafeCall(hipFree(dpSlotTimestepMin)); dpSlotTimestepMin = nullptr; } if(dpBrickToSlot) { cudaSafeCall(hipFree(dpBrickToSlot)); dpBrickToSlot = nullptr; } delete[] pBrickToSlot; pBrickToSlot = nullptr; } void BrickIndexGPU::Update(bool cpuTracing, const uint2* pBrickToSlot, const uint* pSlotTimestepMin, const uint* pSlotTimestepMax) { if(cpuTracing) { memcpy(this->pBrickToSlot, pBrickToSlot, brickCount * sizeof(uint2)); } else { cudaSafeCall(hipMemcpyAsync(dpBrickToSlot, pBrickToSlot, brickCount * sizeof(uint2), hipMemcpyHostToDevice)); if(pSlotTimestepMin != nullptr) cudaSafeCall(hipMemcpyAsync(dpSlotTimestepMin, pSlotTimestepMin, slotCount.x * slotCount.y * sizeof(uint), hipMemcpyHostToDevice)); if(pSlotTimestepMax != nullptr) cudaSafeCall(hipMemcpyAsync(dpSlotTimestepMax, pSlotTimestepMax, slotCount.x * slotCount.y * sizeof(uint), hipMemcpyHostToDevice)); } } void BrickIndexGPU::Upload(bool cpuTracing) const { if(cpuTracing) memcpy(&g_brickIndex, this, sizeof(g_brickIndex)); //else // cudaSafeCall(hipMemcpyToSymbolAsync(c_brickIndex, this, sizeof(*this), 0, hipMemcpyHostToDevice)); }
86aa7c05e3338a95c4f96cb1e3179deba4a2578e.cu
#include "BrickIndexGPU.h" #include <cuda_runtime.h> #include "cudaUtil.h" #include <iostream> //__constant__ BrickIndexGPU c_brickIndex; BrickIndexGPU g_brickIndex; void BrickIndexGPU::Allocate(bool cpuTracing, uint brickCountNew, uint2 slotCountNew) { brickCount = brickCountNew; slotCount = slotCountNew; if(cpuTracing) { pBrickToSlot = new uint2[brickCount]; } else { cudaSafeCall(cudaMalloc2(&dpBrickToSlot, brickCount * sizeof(uint2))); cudaSafeCall(cudaMalloc2(&dpSlotTimestepMin, slotCount.x * slotCount.y * sizeof(uint))); cudaSafeCall(cudaMalloc2(&dpSlotTimestepMax, slotCount.x * slotCount.y * sizeof(uint))); } } void BrickIndexGPU::Deallocate() { if(dpSlotTimestepMax) { cudaSafeCall(cudaFree(dpSlotTimestepMax)); dpSlotTimestepMax = nullptr; } if(dpSlotTimestepMin) { cudaSafeCall(cudaFree(dpSlotTimestepMin)); dpSlotTimestepMin = nullptr; } if(dpBrickToSlot) { cudaSafeCall(cudaFree(dpBrickToSlot)); dpBrickToSlot = nullptr; } delete[] pBrickToSlot; pBrickToSlot = nullptr; } void BrickIndexGPU::Update(bool cpuTracing, const uint2* pBrickToSlot, const uint* pSlotTimestepMin, const uint* pSlotTimestepMax) { if(cpuTracing) { memcpy(this->pBrickToSlot, pBrickToSlot, brickCount * sizeof(uint2)); } else { cudaSafeCall(cudaMemcpyAsync(dpBrickToSlot, pBrickToSlot, brickCount * sizeof(uint2), cudaMemcpyHostToDevice)); if(pSlotTimestepMin != nullptr) cudaSafeCall(cudaMemcpyAsync(dpSlotTimestepMin, pSlotTimestepMin, slotCount.x * slotCount.y * sizeof(uint), cudaMemcpyHostToDevice)); if(pSlotTimestepMax != nullptr) cudaSafeCall(cudaMemcpyAsync(dpSlotTimestepMax, pSlotTimestepMax, slotCount.x * slotCount.y * sizeof(uint), cudaMemcpyHostToDevice)); } } void BrickIndexGPU::Upload(bool cpuTracing) const { if(cpuTracing) memcpy(&g_brickIndex, this, sizeof(g_brickIndex)); //else // cudaSafeCall(cudaMemcpyToSymbolAsync(c_brickIndex, this, sizeof(*this), 0, cudaMemcpyHostToDevice)); }
2a5e42afb57ee4a7a0e9bff7dee0a6e8f1ec708c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "diffusion_cuda_opt.h" #include "common/cuda_util.h" namespace diffusion { namespace cuda_opt { // loop peeling, register blocking, z-dim blocking, unrolling __global__ void kernel3d_opt1(F1_DECL f1, F2_DECL f2, int nx, int ny, int nz, REAL ce, REAL cw, REAL cn, REAL cs, REAL ct, REAL cb, REAL cc) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; const int block_z = nz / gridDim.z; int k = block_z * blockIdx.z; const int k_end = k + block_z; int xy = nx * ny; int c = OFFSET3D(i, j, k, nx, ny); int w = (i == 0) ? c : c - 1; int e = (i == nx-1) ? c : c + 1; int s = (j == 0) ? c : c - nx; int n = (j == ny-1) ? c : c + nx; REAL t1, t2, t3; t2 = f1[c]; t1 = (k == 0) ? t2 : f1[c-xy]; t3 = f1[c+xy]; f2[c] = cc * t2 + cw * f1[w] + ce * f1[e] + cs * f1[s] + cn * f1[n] + cb * t1 + ct * t3; c += xy; w += xy; e += xy; n += xy; s += xy; ++k; PRAGMA_UNROLL for (; k < k_end-1; ++k) { t1 = t2; t2 = t3; t3 = f1[c+xy]; f2[c] = cc * t2 + cw * f1[w] + ce * f1[e] + cs * f1[s] + cn * f1[n] + cb * t1 + ct * t3; c += xy; w += xy; e += xy; n += xy; s += xy; } t1 = t2; t2 = t3; t3 = (k < nz-1) ? f1[c+xy] : t3; f2[c] = cc * t2 + cw * f1[w] + ce * f1[e] + cs * f1[s] + cn * f1[n] + cb * t1 + ct * t3; return; } // opt1 + prefetch __global__ void kernel3d_opt2(F1_DECL f1, F2_DECL f2, int nx, int ny, int nz, REAL ce, REAL cw, REAL cn, REAL cs, REAL ct, REAL cb, REAL cc) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; const int block_z = nz / gridDim.z; int k = block_z * blockIdx.z; const int k_end = k + block_z; int xy = nx * ny; int c = i + j * nx + k * xy; int w = (i == 0) ? c : c - 1; int e = (i == nx-1) ? c : c + 1; int s = (j == 0) ? c : c - nx; int n = (j == ny-1) ? c : c + nx; REAL t1, t2, t3, t4; t2 = f1[c]; t1 = (k == 0) ? t2 : f1[c-xy]; t3 = f1[c+xy]; t4 = f1[c+xy*2]; f2[c] = cc * t2 + cw * f1[w] + ce * f1[e] + cs * f1[s] + cn * f1[n] + cb * t1 + ct * t3; c += xy; w += xy; e += xy; n += xy; s += xy; ++k; PRAGMA_UNROLL for (; k < k_end-2; ++k) { t1 = t2; t2 = t3; t3 = t4; t4 = f1[c+xy*2]; f2[c] = cc * t2 + cw * f1[w] + ce * f1[e] + cs * f1[s] + cn * f1[n] + cb * t1 + ct * t3; c += xy; w += xy; e += xy; n += xy; s += xy; } t1 = t2; t2 = t3; t3 = t4; t4 = (k < nz-2) ? f1[c+xy*2] : t4; f2[c] = cc * t2 + cw * f1[w] + ce * f1[e] + cs * f1[s] + cn * f1[n] + cb * t1 + ct * t3; c += xy; w += xy; e += xy; n += xy; s += xy; ++k; t1 = t2; t2 = t3; t3 = t4; f2[c] = cc * t2 + cw * f1[w] + ce * f1[e] + cs * f1[s] + cn * f1[n] + cb * t1 + ct * t3; return; } } // namespace cuda_opt void DiffusionCUDAOpt1::Setup() { DiffusionCUDA::Setup(); FORCE_CHECK_CUDA(hipFuncSetCacheConfig(cuda_opt::kernel3d_opt1, hipFuncCachePreferL1)); } void DiffusionCUDAOpt1::RunKernel(int count) { assert(ndim_ == 3); size_t s = sizeof(REAL) * nx_ * ny_ * nz_; FORCE_CHECK_CUDA(hipMemcpy(f1_d_, f1_, s, hipMemcpyHostToDevice)); dim3 block_dim(block_x_, block_y_, block_z_); dim3 grid_dim(nx_ / block_x_, ny_ / block_y_, grid_z_); CHECK_CUDA(hipEventRecord(ev1_)); for (int i = 0; i < count; ++i) { hipLaunchKernelGGL(( cuda_opt::kernel3d_opt1), dim3(grid_dim), dim3(block_dim), 0, 0, f1_d_, f2_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_); REAL *t = f1_d_; f1_d_ = f2_d_; f2_d_ = t; } CHECK_CUDA(hipEventRecord(ev2_)); FORCE_CHECK_CUDA(hipMemcpy(f1_, f1_d_, s, hipMemcpyDeviceToHost)); return; } void DiffusionCUDAOpt2::Setup() { DiffusionCUDA::Setup(); FORCE_CHECK_CUDA(hipFuncSetCacheConfig(cuda_opt::kernel3d_opt2, hipFuncCachePreferL1)); } void DiffusionCUDAOpt2::RunKernel(int count) { assert(ndim_ == 3); size_t s = sizeof(REAL) * nx_ * ny_ * nz_; FORCE_CHECK_CUDA(hipMemcpy(f1_d_, f1_, s, hipMemcpyHostToDevice)); dim3 block_dim(block_x_, block_y_, block_z_); dim3 grid_dim(nx_ / block_x_, ny_ / block_y_, grid_z_); CHECK_CUDA(hipEventRecord(ev1_)); for (int i = 0; i < count; ++i) { hipLaunchKernelGGL(( cuda_opt::kernel3d_opt2), dim3(grid_dim), dim3(block_dim), 0, 0, f1_d_, f2_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_); REAL *t = f1_d_; f1_d_ = f2_d_; f2_d_ = t; } CHECK_CUDA(hipEventRecord(ev2_)); FORCE_CHECK_CUDA(hipMemcpy(f1_, f1_d_, s, hipMemcpyDeviceToHost)); return; } }
2a5e42afb57ee4a7a0e9bff7dee0a6e8f1ec708c.cu
#include "diffusion_cuda_opt.h" #include "common/cuda_util.h" namespace diffusion { namespace cuda_opt { // loop peeling, register blocking, z-dim blocking, unrolling __global__ void kernel3d_opt1(F1_DECL f1, F2_DECL f2, int nx, int ny, int nz, REAL ce, REAL cw, REAL cn, REAL cs, REAL ct, REAL cb, REAL cc) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; const int block_z = nz / gridDim.z; int k = block_z * blockIdx.z; const int k_end = k + block_z; int xy = nx * ny; int c = OFFSET3D(i, j, k, nx, ny); int w = (i == 0) ? c : c - 1; int e = (i == nx-1) ? c : c + 1; int s = (j == 0) ? c : c - nx; int n = (j == ny-1) ? c : c + nx; REAL t1, t2, t3; t2 = f1[c]; t1 = (k == 0) ? t2 : f1[c-xy]; t3 = f1[c+xy]; f2[c] = cc * t2 + cw * f1[w] + ce * f1[e] + cs * f1[s] + cn * f1[n] + cb * t1 + ct * t3; c += xy; w += xy; e += xy; n += xy; s += xy; ++k; PRAGMA_UNROLL for (; k < k_end-1; ++k) { t1 = t2; t2 = t3; t3 = f1[c+xy]; f2[c] = cc * t2 + cw * f1[w] + ce * f1[e] + cs * f1[s] + cn * f1[n] + cb * t1 + ct * t3; c += xy; w += xy; e += xy; n += xy; s += xy; } t1 = t2; t2 = t3; t3 = (k < nz-1) ? f1[c+xy] : t3; f2[c] = cc * t2 + cw * f1[w] + ce * f1[e] + cs * f1[s] + cn * f1[n] + cb * t1 + ct * t3; return; } // opt1 + prefetch __global__ void kernel3d_opt2(F1_DECL f1, F2_DECL f2, int nx, int ny, int nz, REAL ce, REAL cw, REAL cn, REAL cs, REAL ct, REAL cb, REAL cc) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; const int block_z = nz / gridDim.z; int k = block_z * blockIdx.z; const int k_end = k + block_z; int xy = nx * ny; int c = i + j * nx + k * xy; int w = (i == 0) ? c : c - 1; int e = (i == nx-1) ? c : c + 1; int s = (j == 0) ? c : c - nx; int n = (j == ny-1) ? c : c + nx; REAL t1, t2, t3, t4; t2 = f1[c]; t1 = (k == 0) ? t2 : f1[c-xy]; t3 = f1[c+xy]; t4 = f1[c+xy*2]; f2[c] = cc * t2 + cw * f1[w] + ce * f1[e] + cs * f1[s] + cn * f1[n] + cb * t1 + ct * t3; c += xy; w += xy; e += xy; n += xy; s += xy; ++k; PRAGMA_UNROLL for (; k < k_end-2; ++k) { t1 = t2; t2 = t3; t3 = t4; t4 = f1[c+xy*2]; f2[c] = cc * t2 + cw * f1[w] + ce * f1[e] + cs * f1[s] + cn * f1[n] + cb * t1 + ct * t3; c += xy; w += xy; e += xy; n += xy; s += xy; } t1 = t2; t2 = t3; t3 = t4; t4 = (k < nz-2) ? f1[c+xy*2] : t4; f2[c] = cc * t2 + cw * f1[w] + ce * f1[e] + cs * f1[s] + cn * f1[n] + cb * t1 + ct * t3; c += xy; w += xy; e += xy; n += xy; s += xy; ++k; t1 = t2; t2 = t3; t3 = t4; f2[c] = cc * t2 + cw * f1[w] + ce * f1[e] + cs * f1[s] + cn * f1[n] + cb * t1 + ct * t3; return; } } // namespace cuda_opt void DiffusionCUDAOpt1::Setup() { DiffusionCUDA::Setup(); FORCE_CHECK_CUDA(cudaFuncSetCacheConfig(cuda_opt::kernel3d_opt1, cudaFuncCachePreferL1)); } void DiffusionCUDAOpt1::RunKernel(int count) { assert(ndim_ == 3); size_t s = sizeof(REAL) * nx_ * ny_ * nz_; FORCE_CHECK_CUDA(cudaMemcpy(f1_d_, f1_, s, cudaMemcpyHostToDevice)); dim3 block_dim(block_x_, block_y_, block_z_); dim3 grid_dim(nx_ / block_x_, ny_ / block_y_, grid_z_); CHECK_CUDA(cudaEventRecord(ev1_)); for (int i = 0; i < count; ++i) { cuda_opt::kernel3d_opt1<<<grid_dim, block_dim>>> (f1_d_, f2_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_); REAL *t = f1_d_; f1_d_ = f2_d_; f2_d_ = t; } CHECK_CUDA(cudaEventRecord(ev2_)); FORCE_CHECK_CUDA(cudaMemcpy(f1_, f1_d_, s, cudaMemcpyDeviceToHost)); return; } void DiffusionCUDAOpt2::Setup() { DiffusionCUDA::Setup(); FORCE_CHECK_CUDA(cudaFuncSetCacheConfig(cuda_opt::kernel3d_opt2, cudaFuncCachePreferL1)); } void DiffusionCUDAOpt2::RunKernel(int count) { assert(ndim_ == 3); size_t s = sizeof(REAL) * nx_ * ny_ * nz_; FORCE_CHECK_CUDA(cudaMemcpy(f1_d_, f1_, s, cudaMemcpyHostToDevice)); dim3 block_dim(block_x_, block_y_, block_z_); dim3 grid_dim(nx_ / block_x_, ny_ / block_y_, grid_z_); CHECK_CUDA(cudaEventRecord(ev1_)); for (int i = 0; i < count; ++i) { cuda_opt::kernel3d_opt2<<<grid_dim, block_dim>>> (f1_d_, f2_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_); REAL *t = f1_d_; f1_d_ = f2_d_; f2_d_ = t; } CHECK_CUDA(cudaEventRecord(ev2_)); FORCE_CHECK_CUDA(cudaMemcpy(f1_, f1_d_, s, cudaMemcpyDeviceToHost)); return; } }
184130f258f8417e3200b2c34ae3cc8b657667b6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* References: * * Harish, Pawan, and P. J. Narayanan. * "Accelerating large graph algorithms on the GPU using CUDA." * International conference on high-performance computing. * Springer, Berlin, Heidelberg, 2007. * * Hong, Sungpack, et al. * "Accelerating CUDA graph algorithms at maximum warp." * Acm Sigplan Notices 46.8 (2011): 267-276. * * Lifeng Nai, Yinglong Xia, Ilie G. Tanase, Hyesoon Kim, and Ching-Yung Lin. * GraphBIG: Understanding Graph Computing in the Context of Industrial Solutions, * In the proccedings of the International Conference for High Performance Computing, Networking, Storage and Analysis (SC), * Nov. 2015 * */ #include "helper_emogi.h" #define MYINFINITY 0xFFFFFFFF #define MEM_ALIGN MEM_ALIGN_32 typedef uint32_t EdgeT; __global__ void kernel_baseline(uint32_t *label, const uint32_t level, const uint64_t vertex_count, const uint64_t *vertexList, const EdgeT *edgeList, bool *changed) { const uint64_t tid = blockDim.x * BLOCK_SIZE * blockIdx.y + blockDim.x * blockIdx.x + threadIdx.x; if(tid < vertex_count && label[tid] == level) { const uint64_t start = vertexList[tid]; const uint64_t end = vertexList[tid+1]; for(uint64_t i = start; i < end; i++) { const EdgeT next = edgeList[i]; if(label[next] == MYINFINITY) { label[next] = level + 1; *changed = true; } } } } __global__ void kernel_coalesce(uint32_t *label, const uint32_t level, const uint64_t vertex_count, const uint64_t *vertexList, const EdgeT *edgeList, bool *changed) { const uint64_t tid = blockDim.x * BLOCK_SIZE * blockIdx.y + blockDim.x * blockIdx.x + threadIdx.x; const uint64_t warpIdx = tid >> WARP_SHIFT; const uint64_t laneIdx = tid & ((1 << WARP_SHIFT) - 1); if(warpIdx < vertex_count && label[warpIdx] == level) { const uint64_t start = vertexList[warpIdx]; const uint64_t shift_start = start & MEM_ALIGN; const uint64_t end = vertexList[warpIdx+1]; for(uint64_t i = shift_start + laneIdx; i < end; i += WARP_SIZE) { if (i >= start) { const EdgeT next = edgeList[i]; if(label[next] == MYINFINITY) { label[next] = level + 1; *changed = true; } } } } } __global__ void kernel_coalesce_chunk(uint32_t *label, const uint32_t level, const uint64_t vertex_count, const uint64_t *vertexList, const EdgeT *edgeList, bool *changed) { const uint64_t tid = blockDim.x * BLOCK_SIZE * blockIdx.y + blockDim.x * blockIdx.x + threadIdx.x; const uint64_t warpIdx = tid >> WARP_SHIFT; const uint64_t laneIdx = tid & ((1 << WARP_SHIFT) - 1); const uint64_t chunkIdx = warpIdx * CHUNK_SIZE; uint64_t chunk_size = CHUNK_SIZE; if((chunkIdx + CHUNK_SIZE) > vertex_count) { if ( vertex_count > chunkIdx ) chunk_size = vertex_count - chunkIdx; else return; } for(uint32_t i = chunkIdx; i < chunk_size + chunkIdx; i++) { if(label[i] == level) { const uint64_t start = vertexList[i]; const uint64_t shift_start = start & MEM_ALIGN; const uint64_t end = vertexList[i+1]; for(uint64_t j = shift_start + laneIdx; j < end; j += WARP_SIZE) { if (j >= start) { const EdgeT next = edgeList[j]; if(label[next] == MYINFINITY) { label[next] = level + 1; *changed = true; } } } } } } int main(int argc, char *argv[]) { std::ifstream file; std::string vertex_file, edge_file; std::string filename; bool changed_h, *changed_d, no_src = false; int c, num_run = 1, arg_num = 0, device = 0; impl_type type; mem_type mem; uint32_t *label_d, level, zero, iter; uint64_t *vertexList_h, *vertexList_d; EdgeT *edgeList_h, *edgeList_d; uint64_t *edgeList64_h; uint64_t vertex_count, edge_count, vertex_size, edge_size; uint64_t typeT, src; uint64_t numblocks, numthreads; float milliseconds; double avg_milliseconds; hipEvent_t start, end; while ((c = getopt(argc, argv, "f:r:t:i:m:d:h")) != -1) { switch (c) { case 'f': filename = optarg; arg_num++; break; case 'r': if (!no_src) src = atoll(optarg); arg_num++; break; case 't': type = (impl_type)atoi(optarg); arg_num++; break; case 'i': no_src = true; src = 0; num_run = atoi(optarg); arg_num++; break; case 'm': mem = (mem_type)atoi(optarg); arg_num++; break; case 'd': device = atoi(optarg); break; case 'h': printf("4-byte edge BFS\n"); printf("\t-f | input file name (must end with .bel)\n"); printf("\t-r | BFS root (unused when i > 1)\n"); printf("\t-t | type of BFS to run\n"); printf("\t | BASELINE = 0, COALESCE = 1, COALESCE_CHUNK = 2\n"); printf("\t-m | memory allocation\n"); printf("\t | GPUMEM = 0, UVM_READONLY = 1, UVM_DIRECT = 2\n"); printf("\t-i | number of iterations to run\n"); printf("\t-d | GPU device id (default=0)\n"); printf("\t-h | help message\n"); return 0; case '?': break; default: break; } } if (arg_num < 4) { printf("4-byte edge BFS\n"); printf("\t-f | input file name (must end with .bel)\n"); printf("\t-r | BFS root (unused when i > 1)\n"); printf("\t-t | type of BFS to run\n"); printf("\t | BASELINE = 0, COALESCE = 1, COALESCE_CHUNK = 2\n"); printf("\t-m | memory allocation\n"); printf("\t | GPUMEM = 0, UVM_READONLY = 1, UVM_DIRECT = 2\n"); printf("\t-i | number of iterations to run\n"); printf("\t-d | GPU device id (default=0)\n"); printf("\t-h | help message\n"); return 0; } checkCudaErrors(hipEventCreate(&start)); checkCudaErrors(hipEventCreate(&end)); vertex_file = filename + ".col"; edge_file = filename + ".dst"; std::cout << filename << std::endl; // Read files file.open(vertex_file.c_str(), std::ios::in | std::ios::binary); if (!file.is_open()) { fprintf(stderr, "Vertex file open failed\n"); exit(1); } file.read((char*)(&vertex_count), 8); file.read((char*)(&typeT), 8); vertex_count--; printf("Vertex: %lu, ", vertex_count); vertex_size = (vertex_count+1) * sizeof(uint64_t); vertexList_h = (uint64_t*)malloc(vertex_size); file.read((char*)vertexList_h, vertex_size); file.close(); file.open(edge_file.c_str(), std::ios::in | std::ios::binary); if (!file.is_open()) { fprintf(stderr, "Edge file open failed\n"); exit(1); } file.read((char*)(&edge_count), 8); file.read((char*)(&typeT), 8); printf("Edge: %lu\n", edge_count); fflush(stdout); edge_size = edge_count * sizeof(EdgeT); edgeList_h = NULL; edgeList64_h = (uint64_t*)malloc(edge_count * sizeof(uint64_t)); file.read((char*)edgeList64_h, edge_count * sizeof(uint64_t)); // Allocate memory for GPU checkCudaErrors(hipMalloc((void**)&vertexList_d, vertex_size)); checkCudaErrors(hipMalloc((void**)&label_d, vertex_count * sizeof(uint32_t))); checkCudaErrors(hipMalloc((void**)&changed_d, sizeof(bool))); switch (mem) { case GPUMEM: edgeList_h = (EdgeT*)malloc(edge_size); checkCudaErrors(hipMalloc((void**)&edgeList_d, edge_size)); for (uint64_t i = 0; i < edge_count; i++) edgeList_h[i] = (uint32_t)edgeList64_h[i]; break; case UVM_READONLY: checkCudaErrors(hipMallocManaged((void**)&edgeList_d, edge_size)); for (uint64_t i = 0; i < edge_count; i++) edgeList_d[i] = (uint32_t)edgeList64_h[i]; checkCudaErrors(hipMemAdvise(edgeList_d, edge_size, hipMemAdviseSetReadMostly, device)); break; case UVM_DIRECT: checkCudaErrors(hipMallocManaged((void**)&edgeList_d, edge_size)); for (uint64_t i = 0; i < edge_count; i++) edgeList_d[i] = (uint32_t)edgeList64_h[i]; checkCudaErrors(hipMemAdvise(edgeList_d, edge_size, hipMemAdviseSetAccessedBy, device)); break; } free(edgeList64_h); file.close(); printf("Allocation finished\n"); fflush(stdout); // Initialize values checkCudaErrors(hipMemcpy(vertexList_d, vertexList_h, vertex_size, hipMemcpyHostToDevice)); if (mem == GPUMEM) checkCudaErrors(hipMemcpy(edgeList_d, edgeList_h, edge_size, hipMemcpyHostToDevice)); numthreads = BLOCK_SIZE; switch (type) { case BASELINE: numblocks = ((vertex_count + numthreads) / numthreads); break; case COALESCE: numblocks = ((vertex_count * WARP_SIZE + numthreads) / numthreads); break; case COALESCE_CHUNK: numblocks = ((vertex_count * (WARP_SIZE / CHUNK_SIZE) + numthreads) / numthreads); break; default: fprintf(stderr, "Invalid type\n"); exit(1); break; } dim3 blockDim(BLOCK_SIZE, (numblocks+BLOCK_SIZE)/BLOCK_SIZE); avg_milliseconds = 0.0f; printf("Initialization done\n"); fflush(stdout); // Set root for (int i = 0; i < num_run; i++) { zero = 0; checkCudaErrors(hipMemset(label_d, 0xFF, vertex_count * sizeof(uint32_t))); checkCudaErrors(hipMemcpy(&label_d[src], &zero, sizeof(uint32_t), hipMemcpyHostToDevice)); level = 0; iter = 0; checkCudaErrors(hipEventRecord(start, 0)); // Run BFS do { changed_h = false; checkCudaErrors(hipMemcpy(changed_d, &changed_h, sizeof(bool), hipMemcpyHostToDevice)); switch (type) { case BASELINE: hipLaunchKernelGGL(( kernel_baseline), dim3(blockDim), dim3(numthreads), 0, 0, label_d, level, vertex_count, vertexList_d, edgeList_d, changed_d); break; case COALESCE: hipLaunchKernelGGL(( kernel_coalesce), dim3(blockDim), dim3(numthreads), 0, 0, label_d, level, vertex_count, vertexList_d, edgeList_d, changed_d); break; case COALESCE_CHUNK: hipLaunchKernelGGL(( kernel_coalesce_chunk), dim3(blockDim), dim3(numthreads), 0, 0, label_d, level, vertex_count, vertexList_d, edgeList_d, changed_d); break; default: fprintf(stderr, "Invalid type\n"); exit(1); break; } iter++; level++; checkCudaErrors(hipMemcpy(&changed_h, changed_d, sizeof(bool), hipMemcpyDeviceToHost)); } while(changed_h); checkCudaErrors(hipEventRecord(end, 0)); checkCudaErrors(hipEventSynchronize(end)); checkCudaErrors(hipEventElapsedTime(&milliseconds, start, end)); printf("run %*d: ", 3, i); printf("src %*lu, ", 12, src); printf("iteration %*u, ", 3, iter); printf("time %*f ms\n", 12, milliseconds); fflush(stdout); avg_milliseconds += (double)milliseconds; src += vertex_count / num_run; if (i < num_run - 1) { EdgeT *edgeList_temp; // Flush GPU page cache for each iteration by re-allocating UVM switch (mem) { case UVM_READONLY: checkCudaErrors(hipMallocManaged((void**)&edgeList_temp, edge_size)); memcpy(edgeList_temp, edgeList_d, edge_size); checkCudaErrors(hipFree(edgeList_d)); edgeList_d = edgeList_temp; checkCudaErrors(hipMemAdvise(edgeList_d, edge_size, hipMemAdviseSetReadMostly, device)); break; default: break; } } } printf("Average run time %f ms\n", avg_milliseconds / num_run); free(vertexList_h); if (edgeList_h) free(edgeList_h); checkCudaErrors(hipFree(vertexList_d)); checkCudaErrors(hipFree(edgeList_d)); checkCudaErrors(hipFree(label_d)); checkCudaErrors(hipFree(changed_d)); return 0; }
184130f258f8417e3200b2c34ae3cc8b657667b6.cu
/* References: * * Harish, Pawan, and P. J. Narayanan. * "Accelerating large graph algorithms on the GPU using CUDA." * International conference on high-performance computing. * Springer, Berlin, Heidelberg, 2007. * * Hong, Sungpack, et al. * "Accelerating CUDA graph algorithms at maximum warp." * Acm Sigplan Notices 46.8 (2011): 267-276. * * Lifeng Nai, Yinglong Xia, Ilie G. Tanase, Hyesoon Kim, and Ching-Yung Lin. * GraphBIG: Understanding Graph Computing in the Context of Industrial Solutions, * In the proccedings of the International Conference for High Performance Computing, Networking, Storage and Analysis (SC), * Nov. 2015 * */ #include "helper_emogi.h" #define MYINFINITY 0xFFFFFFFF #define MEM_ALIGN MEM_ALIGN_32 typedef uint32_t EdgeT; __global__ void kernel_baseline(uint32_t *label, const uint32_t level, const uint64_t vertex_count, const uint64_t *vertexList, const EdgeT *edgeList, bool *changed) { const uint64_t tid = blockDim.x * BLOCK_SIZE * blockIdx.y + blockDim.x * blockIdx.x + threadIdx.x; if(tid < vertex_count && label[tid] == level) { const uint64_t start = vertexList[tid]; const uint64_t end = vertexList[tid+1]; for(uint64_t i = start; i < end; i++) { const EdgeT next = edgeList[i]; if(label[next] == MYINFINITY) { label[next] = level + 1; *changed = true; } } } } __global__ void kernel_coalesce(uint32_t *label, const uint32_t level, const uint64_t vertex_count, const uint64_t *vertexList, const EdgeT *edgeList, bool *changed) { const uint64_t tid = blockDim.x * BLOCK_SIZE * blockIdx.y + blockDim.x * blockIdx.x + threadIdx.x; const uint64_t warpIdx = tid >> WARP_SHIFT; const uint64_t laneIdx = tid & ((1 << WARP_SHIFT) - 1); if(warpIdx < vertex_count && label[warpIdx] == level) { const uint64_t start = vertexList[warpIdx]; const uint64_t shift_start = start & MEM_ALIGN; const uint64_t end = vertexList[warpIdx+1]; for(uint64_t i = shift_start + laneIdx; i < end; i += WARP_SIZE) { if (i >= start) { const EdgeT next = edgeList[i]; if(label[next] == MYINFINITY) { label[next] = level + 1; *changed = true; } } } } } __global__ void kernel_coalesce_chunk(uint32_t *label, const uint32_t level, const uint64_t vertex_count, const uint64_t *vertexList, const EdgeT *edgeList, bool *changed) { const uint64_t tid = blockDim.x * BLOCK_SIZE * blockIdx.y + blockDim.x * blockIdx.x + threadIdx.x; const uint64_t warpIdx = tid >> WARP_SHIFT; const uint64_t laneIdx = tid & ((1 << WARP_SHIFT) - 1); const uint64_t chunkIdx = warpIdx * CHUNK_SIZE; uint64_t chunk_size = CHUNK_SIZE; if((chunkIdx + CHUNK_SIZE) > vertex_count) { if ( vertex_count > chunkIdx ) chunk_size = vertex_count - chunkIdx; else return; } for(uint32_t i = chunkIdx; i < chunk_size + chunkIdx; i++) { if(label[i] == level) { const uint64_t start = vertexList[i]; const uint64_t shift_start = start & MEM_ALIGN; const uint64_t end = vertexList[i+1]; for(uint64_t j = shift_start + laneIdx; j < end; j += WARP_SIZE) { if (j >= start) { const EdgeT next = edgeList[j]; if(label[next] == MYINFINITY) { label[next] = level + 1; *changed = true; } } } } } } int main(int argc, char *argv[]) { std::ifstream file; std::string vertex_file, edge_file; std::string filename; bool changed_h, *changed_d, no_src = false; int c, num_run = 1, arg_num = 0, device = 0; impl_type type; mem_type mem; uint32_t *label_d, level, zero, iter; uint64_t *vertexList_h, *vertexList_d; EdgeT *edgeList_h, *edgeList_d; uint64_t *edgeList64_h; uint64_t vertex_count, edge_count, vertex_size, edge_size; uint64_t typeT, src; uint64_t numblocks, numthreads; float milliseconds; double avg_milliseconds; cudaEvent_t start, end; while ((c = getopt(argc, argv, "f:r:t:i:m:d:h")) != -1) { switch (c) { case 'f': filename = optarg; arg_num++; break; case 'r': if (!no_src) src = atoll(optarg); arg_num++; break; case 't': type = (impl_type)atoi(optarg); arg_num++; break; case 'i': no_src = true; src = 0; num_run = atoi(optarg); arg_num++; break; case 'm': mem = (mem_type)atoi(optarg); arg_num++; break; case 'd': device = atoi(optarg); break; case 'h': printf("4-byte edge BFS\n"); printf("\t-f | input file name (must end with .bel)\n"); printf("\t-r | BFS root (unused when i > 1)\n"); printf("\t-t | type of BFS to run\n"); printf("\t | BASELINE = 0, COALESCE = 1, COALESCE_CHUNK = 2\n"); printf("\t-m | memory allocation\n"); printf("\t | GPUMEM = 0, UVM_READONLY = 1, UVM_DIRECT = 2\n"); printf("\t-i | number of iterations to run\n"); printf("\t-d | GPU device id (default=0)\n"); printf("\t-h | help message\n"); return 0; case '?': break; default: break; } } if (arg_num < 4) { printf("4-byte edge BFS\n"); printf("\t-f | input file name (must end with .bel)\n"); printf("\t-r | BFS root (unused when i > 1)\n"); printf("\t-t | type of BFS to run\n"); printf("\t | BASELINE = 0, COALESCE = 1, COALESCE_CHUNK = 2\n"); printf("\t-m | memory allocation\n"); printf("\t | GPUMEM = 0, UVM_READONLY = 1, UVM_DIRECT = 2\n"); printf("\t-i | number of iterations to run\n"); printf("\t-d | GPU device id (default=0)\n"); printf("\t-h | help message\n"); return 0; } checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&end)); vertex_file = filename + ".col"; edge_file = filename + ".dst"; std::cout << filename << std::endl; // Read files file.open(vertex_file.c_str(), std::ios::in | std::ios::binary); if (!file.is_open()) { fprintf(stderr, "Vertex file open failed\n"); exit(1); } file.read((char*)(&vertex_count), 8); file.read((char*)(&typeT), 8); vertex_count--; printf("Vertex: %lu, ", vertex_count); vertex_size = (vertex_count+1) * sizeof(uint64_t); vertexList_h = (uint64_t*)malloc(vertex_size); file.read((char*)vertexList_h, vertex_size); file.close(); file.open(edge_file.c_str(), std::ios::in | std::ios::binary); if (!file.is_open()) { fprintf(stderr, "Edge file open failed\n"); exit(1); } file.read((char*)(&edge_count), 8); file.read((char*)(&typeT), 8); printf("Edge: %lu\n", edge_count); fflush(stdout); edge_size = edge_count * sizeof(EdgeT); edgeList_h = NULL; edgeList64_h = (uint64_t*)malloc(edge_count * sizeof(uint64_t)); file.read((char*)edgeList64_h, edge_count * sizeof(uint64_t)); // Allocate memory for GPU checkCudaErrors(cudaMalloc((void**)&vertexList_d, vertex_size)); checkCudaErrors(cudaMalloc((void**)&label_d, vertex_count * sizeof(uint32_t))); checkCudaErrors(cudaMalloc((void**)&changed_d, sizeof(bool))); switch (mem) { case GPUMEM: edgeList_h = (EdgeT*)malloc(edge_size); checkCudaErrors(cudaMalloc((void**)&edgeList_d, edge_size)); for (uint64_t i = 0; i < edge_count; i++) edgeList_h[i] = (uint32_t)edgeList64_h[i]; break; case UVM_READONLY: checkCudaErrors(cudaMallocManaged((void**)&edgeList_d, edge_size)); for (uint64_t i = 0; i < edge_count; i++) edgeList_d[i] = (uint32_t)edgeList64_h[i]; checkCudaErrors(cudaMemAdvise(edgeList_d, edge_size, cudaMemAdviseSetReadMostly, device)); break; case UVM_DIRECT: checkCudaErrors(cudaMallocManaged((void**)&edgeList_d, edge_size)); for (uint64_t i = 0; i < edge_count; i++) edgeList_d[i] = (uint32_t)edgeList64_h[i]; checkCudaErrors(cudaMemAdvise(edgeList_d, edge_size, cudaMemAdviseSetAccessedBy, device)); break; } free(edgeList64_h); file.close(); printf("Allocation finished\n"); fflush(stdout); // Initialize values checkCudaErrors(cudaMemcpy(vertexList_d, vertexList_h, vertex_size, cudaMemcpyHostToDevice)); if (mem == GPUMEM) checkCudaErrors(cudaMemcpy(edgeList_d, edgeList_h, edge_size, cudaMemcpyHostToDevice)); numthreads = BLOCK_SIZE; switch (type) { case BASELINE: numblocks = ((vertex_count + numthreads) / numthreads); break; case COALESCE: numblocks = ((vertex_count * WARP_SIZE + numthreads) / numthreads); break; case COALESCE_CHUNK: numblocks = ((vertex_count * (WARP_SIZE / CHUNK_SIZE) + numthreads) / numthreads); break; default: fprintf(stderr, "Invalid type\n"); exit(1); break; } dim3 blockDim(BLOCK_SIZE, (numblocks+BLOCK_SIZE)/BLOCK_SIZE); avg_milliseconds = 0.0f; printf("Initialization done\n"); fflush(stdout); // Set root for (int i = 0; i < num_run; i++) { zero = 0; checkCudaErrors(cudaMemset(label_d, 0xFF, vertex_count * sizeof(uint32_t))); checkCudaErrors(cudaMemcpy(&label_d[src], &zero, sizeof(uint32_t), cudaMemcpyHostToDevice)); level = 0; iter = 0; checkCudaErrors(cudaEventRecord(start, 0)); // Run BFS do { changed_h = false; checkCudaErrors(cudaMemcpy(changed_d, &changed_h, sizeof(bool), cudaMemcpyHostToDevice)); switch (type) { case BASELINE: kernel_baseline<<<blockDim, numthreads>>>(label_d, level, vertex_count, vertexList_d, edgeList_d, changed_d); break; case COALESCE: kernel_coalesce<<<blockDim, numthreads>>>(label_d, level, vertex_count, vertexList_d, edgeList_d, changed_d); break; case COALESCE_CHUNK: kernel_coalesce_chunk<<<blockDim, numthreads>>>(label_d, level, vertex_count, vertexList_d, edgeList_d, changed_d); break; default: fprintf(stderr, "Invalid type\n"); exit(1); break; } iter++; level++; checkCudaErrors(cudaMemcpy(&changed_h, changed_d, sizeof(bool), cudaMemcpyDeviceToHost)); } while(changed_h); checkCudaErrors(cudaEventRecord(end, 0)); checkCudaErrors(cudaEventSynchronize(end)); checkCudaErrors(cudaEventElapsedTime(&milliseconds, start, end)); printf("run %*d: ", 3, i); printf("src %*lu, ", 12, src); printf("iteration %*u, ", 3, iter); printf("time %*f ms\n", 12, milliseconds); fflush(stdout); avg_milliseconds += (double)milliseconds; src += vertex_count / num_run; if (i < num_run - 1) { EdgeT *edgeList_temp; // Flush GPU page cache for each iteration by re-allocating UVM switch (mem) { case UVM_READONLY: checkCudaErrors(cudaMallocManaged((void**)&edgeList_temp, edge_size)); memcpy(edgeList_temp, edgeList_d, edge_size); checkCudaErrors(cudaFree(edgeList_d)); edgeList_d = edgeList_temp; checkCudaErrors(cudaMemAdvise(edgeList_d, edge_size, cudaMemAdviseSetReadMostly, device)); break; default: break; } } } printf("Average run time %f ms\n", avg_milliseconds / num_run); free(vertexList_h); if (edgeList_h) free(edgeList_h); checkCudaErrors(cudaFree(vertexList_d)); checkCudaErrors(cudaFree(edgeList_d)); checkCudaErrors(cudaFree(label_d)); checkCudaErrors(cudaFree(changed_d)); return 0; }
2040aac98abe2e0acfe984b85dc0003a067ca6ff.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * CUDA code for making particles interact. * In this version, the kernel function is one big function */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include "pair_array_functions.c" #define CELLSIZE (64) __device__ int get_global_offset(int bIdx_x, int grid_x, int grid_y, int grid_z) { // This gets the offset of the current thread in the global particle array /*return (int)(cell_size * ( (bIdx_y * gDim_x*(bDim_x*bDim_y*bDim_z)) + (bIdx_x * (bDim_x*bDim_y*bDim_z)) + tIdx_z * (bDim_x*bDim_y) + tIdx_y * (bDim_x) + tIdx_x));*/ int z = bIdx_x % grid_z; int y = ((bIdx_x - z) % (grid_z * grid_y)) / grid_z; int x = (bIdx_x - z - (grid_z * y)) / (grid_z * grid_y); return z+grid_z*y+(grid_z*grid_y)*x; } __device__ int current_global_offset(int grid_x, int grid_y, int grid_z) { return get_global_offset((int)blockIdx.x, grid_x, grid_y, grid_z); } __device__ int neighbour_offset(int x_rel, int y_rel, int z_rel, int grid_x, int grid_y, int grid_z) { // Gets the offset of a neighbour at relative position (x,y,z) // NOTE: Implementation-specific, assuming 1D grid of 1D blocks! int z = ((int)blockIdx.x) % grid_z; int y = ((((int)blockIdx.x) - z) % (grid_z * grid_y)) / grid_z; int x = (((int)blockIdx.x) - z - (grid_z * y)) / (grid_z * grid_y); x += x_rel; y += y_rel; z += z_rel; if (x < 0 || y < 0 || z < 0 || x >= grid_x || y >= grid_y || z >= grid_z) { return -1; } else { return z+grid_z*y+(grid_z*grid_y)*x; } } __device__ int get_local_offset() { // This gets the offset of the current thread in the local particle array return (int)(threadIdx.z * (blockDim.x*blockDim.y) + threadIdx.y * (blockDim.x) + threadIdx.x); } __global__ void brute_force(particle* all_particles, int cell_size, int grid_x, int grid_y, int grid_z) { // This begins calculation of the particle interactions // Work out where in the array our particles start int cell_offset = current_global_offset(grid_x, grid_y, grid_z); // Each thread loads its own particle to local memory particle self = all_particles[(cell_size*cell_offset) + get_local_offset()]; // Initialise the interaction values self.x_acc = (float)0.0; particle other; // Now calculate interactions // Loop everything for (int index=0; index < grid_x * grid_y * grid_z * CELLSIZE; index++) { // Try doing a predictable interaction other = all_particles[index]; self.x_acc += (float)1.0; } // Don't overwrite particles in use __syncthreads(); // Now put our particle back into global memory all_particles[(cell_size * cell_offset) + get_local_offset()] = self; } __global__ void do_cell(particle* all_particles, int cell_size, int grid_x, int grid_y, int grid_z) { // This begins calculation of the particle interactions // Each block needs access to its local particles and a neighbour // Note: We can't use 2*cell_size here since it's not constant! __shared__ particle local_particles[2*CELLSIZE]; // Work out where in the array our particles start int cell_offset = current_global_offset(grid_x, grid_y, grid_z); // Each thread loads its own particle to local memory local_particles[get_local_offset()] = all_particles[(cell_size*cell_offset) + get_local_offset()]; // Initialise the interaction values local_particles[get_local_offset()].x_acc = (float)0.0; int n_offset; // Now load in our neighbours and calculate interactions // Loop through neighbours for (int x_rel = -1; x_rel < 2; x_rel++) { for (int y_rel = -1; y_rel < 2; y_rel++) { for (int z_rel = -1; z_rel < 2; z_rel++) { // Only act if we've got a valid neighbour if (neighbour_offset(x_rel, y_rel, z_rel, grid_x, grid_y, grid_z) >= 0) { // Load neighbouring cell to shared memory // This is the memory location we need to start allocating // from n_offset = neighbour_offset(x_rel, y_rel, z_rel, grid_x, grid_y, grid_z); // Each thread allocates a particle from the neighbour // Start at cell_size since we're filling the second half local_particles[cell_size + get_local_offset()] = all_particles[n_offset*cell_size + get_local_offset()]; // Ensure all particles have been loaded into shared mem __syncthreads(); // Work out the interactions with these neighbours // Calculate interactions between a particle and everything // in the neighbour // Loop through the size of the neighbour for (unsigned int counter = 0; counter < cell_size; counter++) { // Make our particle interact with everything in the // second half of local memory // Try doing a predictable interaction local_particles[get_local_offset()].x_acc += (float)1.0; } // Ensure everyone's finished before loading the next cell __syncthreads(); } } } } // Now put shared values back into global memory all_particles[(cell_size * cell_offset) + get_local_offset()] = local_particles[get_local_offset()]; // Make sure we're all done __syncthreads(); }
2040aac98abe2e0acfe984b85dc0003a067ca6ff.cu
/* * CUDA code for making particles interact. * In this version, the kernel function is one big function */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include "pair_array_functions.c" #define CELLSIZE (64) __device__ int get_global_offset(int bIdx_x, int grid_x, int grid_y, int grid_z) { // This gets the offset of the current thread in the global particle array /*return (int)(cell_size * ( (bIdx_y * gDim_x*(bDim_x*bDim_y*bDim_z)) + (bIdx_x * (bDim_x*bDim_y*bDim_z)) + tIdx_z * (bDim_x*bDim_y) + tIdx_y * (bDim_x) + tIdx_x));*/ int z = bIdx_x % grid_z; int y = ((bIdx_x - z) % (grid_z * grid_y)) / grid_z; int x = (bIdx_x - z - (grid_z * y)) / (grid_z * grid_y); return z+grid_z*y+(grid_z*grid_y)*x; } __device__ int current_global_offset(int grid_x, int grid_y, int grid_z) { return get_global_offset((int)blockIdx.x, grid_x, grid_y, grid_z); } __device__ int neighbour_offset(int x_rel, int y_rel, int z_rel, int grid_x, int grid_y, int grid_z) { // Gets the offset of a neighbour at relative position (x,y,z) // NOTE: Implementation-specific, assuming 1D grid of 1D blocks! int z = ((int)blockIdx.x) % grid_z; int y = ((((int)blockIdx.x) - z) % (grid_z * grid_y)) / grid_z; int x = (((int)blockIdx.x) - z - (grid_z * y)) / (grid_z * grid_y); x += x_rel; y += y_rel; z += z_rel; if (x < 0 || y < 0 || z < 0 || x >= grid_x || y >= grid_y || z >= grid_z) { return -1; } else { return z+grid_z*y+(grid_z*grid_y)*x; } } __device__ int get_local_offset() { // This gets the offset of the current thread in the local particle array return (int)(threadIdx.z * (blockDim.x*blockDim.y) + threadIdx.y * (blockDim.x) + threadIdx.x); } __global__ void brute_force(particle* all_particles, int cell_size, int grid_x, int grid_y, int grid_z) { // This begins calculation of the particle interactions // Work out where in the array our particles start int cell_offset = current_global_offset(grid_x, grid_y, grid_z); // Each thread loads its own particle to local memory particle self = all_particles[(cell_size*cell_offset) + get_local_offset()]; // Initialise the interaction values self.x_acc = (float)0.0; particle other; // Now calculate interactions // Loop everything for (int index=0; index < grid_x * grid_y * grid_z * CELLSIZE; index++) { // Try doing a predictable interaction other = all_particles[index]; self.x_acc += (float)1.0; } // Don't overwrite particles in use __syncthreads(); // Now put our particle back into global memory all_particles[(cell_size * cell_offset) + get_local_offset()] = self; } __global__ void do_cell(particle* all_particles, int cell_size, int grid_x, int grid_y, int grid_z) { // This begins calculation of the particle interactions // Each block needs access to its local particles and a neighbour // Note: We can't use 2*cell_size here since it's not constant! __shared__ particle local_particles[2*CELLSIZE]; // Work out where in the array our particles start int cell_offset = current_global_offset(grid_x, grid_y, grid_z); // Each thread loads its own particle to local memory local_particles[get_local_offset()] = all_particles[(cell_size*cell_offset) + get_local_offset()]; // Initialise the interaction values local_particles[get_local_offset()].x_acc = (float)0.0; int n_offset; // Now load in our neighbours and calculate interactions // Loop through neighbours for (int x_rel = -1; x_rel < 2; x_rel++) { for (int y_rel = -1; y_rel < 2; y_rel++) { for (int z_rel = -1; z_rel < 2; z_rel++) { // Only act if we've got a valid neighbour if (neighbour_offset(x_rel, y_rel, z_rel, grid_x, grid_y, grid_z) >= 0) { // Load neighbouring cell to shared memory // This is the memory location we need to start allocating // from n_offset = neighbour_offset(x_rel, y_rel, z_rel, grid_x, grid_y, grid_z); // Each thread allocates a particle from the neighbour // Start at cell_size since we're filling the second half local_particles[cell_size + get_local_offset()] = all_particles[n_offset*cell_size + get_local_offset()]; // Ensure all particles have been loaded into shared mem __syncthreads(); // Work out the interactions with these neighbours // Calculate interactions between a particle and everything // in the neighbour // Loop through the size of the neighbour for (unsigned int counter = 0; counter < cell_size; counter++) { // Make our particle interact with everything in the // second half of local memory // Try doing a predictable interaction local_particles[get_local_offset()].x_acc += (float)1.0; } // Ensure everyone's finished before loading the next cell __syncthreads(); } } } } // Now put shared values back into global memory all_particles[(cell_size * cell_offset) + get_local_offset()] = local_particles[get_local_offset()]; // Make sure we're all done __syncthreads(); }
25ca07dd4ddcfd9c786b121e3e6ea36cd8e3426a.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <iostream> #include <ctime> #include "EBMP/EasyBMP.h" #include <algorithm> //Russian characters aren't displayed.Comments in English, sorry... // 2D float texture texture<float, hipTextureType2D, hipReadModeElementType> texRef; using namespace std; #define checkCudaErrors(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char* file, int line, bool abort = true) { if (code != hipSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } void saveImage(float* image, int height, int width, bool method) { BMP Output; Output.SetSize(width, height); for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { RGBApixel pixel; pixel.Red = image[i * width + j]; pixel.Green = image[i * width + j]; pixel.Blue = image[i * width + j]; Output.SetPixel(j, i, pixel); } } if (method) Output.WriteToFile("GPUoutAngelina.bmp"); else Output.WriteToFile("CPUoutAngelina.bmp"); } void noiseImg(float* image, int height, int width, int per) { BMP Output; Output.SetSize(width, height); int countOfPixels = int(height * width / 100 * per); while (countOfPixels > 0) { int i = rand() % height; int j = rand() % width; int c = rand() % 2; if (c == 1) image[i * width + j] = 255; else image[i * width + j] = 0; countOfPixels--; } for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { RGBApixel pixel; pixel.Red = image[i * width + j]; pixel.Green = image[i * width + j]; pixel.Blue = image[i * width + j]; Output.SetPixel(j, i, pixel); } } Output.WriteToFile("NoiseAngelina.bmp"); } void medianFilterCPU(float* image, float* resault, int height, int width) { //mask3x3 int m = 3; int n = 3; int mean = m * n / 2; int pad = m / 2; float* expandImageArray = (float*)calloc((height + 2 * pad) * (width + 2 * pad), sizeof(float)); for (int j = 0; j < height; j++) { for (int i = 0; i < width; i++) { expandImageArray[(j + pad) * (width + 2 * pad) + i + pad] = image[j * width + i]; } } for (int j = 0; j < height; j++) { for (int i = 0; i < width; i++) { float* window = (float*)calloc(m * n, sizeof(float)); for (int k = 0; k < m; k++) { for (int t = 0; t < n; t++) { window[k * n + t] = expandImageArray[j * (width + 2 * pad) + i + k * (width + 2 * pad) + t]; } } bool swapped = true; int t = 0; int tmp; while (swapped) { swapped = false; t++; for (int i = 0; i < m * n - t; i++) { if (window[i] > window[i + 1]) { tmp = window[i]; window[i] = window[i + 1]; window[i + 1] = tmp; swapped = true; } } } //sort(window, window + m * n); resault[j * width + i] = window[mean]; } } } __global__ void myFilter(float* output, int imageWidth, int imageHeight) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; // mask 3x3 float window[9]; int m = 3; int n = 3; int mean = m * n / 2; int pad = m / 2; for (int i = -pad; i <= pad; i++) { for (int j = -pad; j <= pad; j++) { window[(i + pad) * n + j + pad] = tex2D(texRef, col + j, row + i); } } bool swapped = true; int t = 0; int tmp; while (swapped) { swapped = false; t++; for (int i = 0; i < m * n - t; i++) { if (window[i] > window[i + 1]) { tmp = window[i]; window[i] = window[i + 1]; window[i + 1] = tmp; swapped = true; } } } output[row * imageWidth + col] = window[mean]; } int main(void) { int nIter = 100; BMP Image; Image.ReadFromFile("angelina.bmp"); int height = Image.TellHeight(); int width = Image.TellWidth(); float* imageArray = (float*)calloc(height * width, sizeof(float)); float* outputCPU = (float*)calloc(height * width, sizeof(float)); float* outputGPU = (float*)calloc(height * width, sizeof(float)); float* outputDevice; for (int j = 0; j < Image.TellHeight(); j++) { for (int i = 0; i < Image.TellWidth(); i++) { imageArray[j * width + i] = Image(i, j)->Red; } } noiseImg(imageArray, height, width, 8); unsigned int start_time = clock(); for (int j = 0; j < nIter; j++) { medianFilterCPU(imageArray, outputCPU, height, width); } unsigned int elapsedTime = clock() - start_time; float msecPerMatrixMulCpu = elapsedTime / nIter; cout << "CPU time: " << msecPerMatrixMulCpu << endl; int device_count = 0; hipGetDeviceCount(&device_count); if (device_count == 0) cout << "Sorry! You dont have CudaDevice" << endl; else { cout << "CudaDevice found! Device count: " << device_count << endl; // Allocate CUDA array in device memory //Returns a channel descriptor with format f and number of bits of each component x, y, z, and w hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat); hipArray* cu_arr; checkCudaErrors(hipMallocArray(&cu_arr, &channelDesc, width, height)); checkCudaErrors(hipMemcpyToArray(cu_arr, 0, 0, imageArray, height * width * sizeof(float), hipMemcpyHostToDevice)); // set texture parameters texRef.addressMode[0] = hipAddressModeClamp; texRef.addressMode[1] = hipAddressModeClamp; texRef.filterMode = hipFilterModePoint; // Bind the array to the texture hipBindTextureToArray(texRef, cu_arr, channelDesc); checkCudaErrors(hipMalloc(&outputDevice, height * width * sizeof(float))); dim3 threadsPerBlock(16, 16); dim3 blocksPerGrid((width + threadsPerBlock.x - 1) / threadsPerBlock.x, (height + threadsPerBlock.y - 1) / threadsPerBlock.y); hipEvent_t start; hipEvent_t stop; checkCudaErrors(hipEventCreate(&start)); checkCudaErrors(hipEventCreate(&stop)); // start record checkCudaErrors(hipEventRecord(start, 0)); for (int j = 0; j < nIter; j++) { myFilter << <blocksPerGrid, threadsPerBlock >> > (outputDevice, width, height); } // stop record checkCudaErrors(hipEventRecord(stop, 0)); // wait end of event checkCudaErrors(hipEventSynchronize(stop)); float msecTotal = 0.0f; checkCudaErrors(hipEventElapsedTime(&msecTotal, start, stop)); float msecPerMatrixMul = msecTotal / nIter; cout << "GPU time: " << msecPerMatrixMul << endl; hipDeviceSynchronize(); checkCudaErrors(hipMemcpy(outputGPU, outputDevice, height * width * sizeof(float), hipMemcpyDeviceToHost)); hipDeviceSynchronize(); saveImage(outputGPU, height, width, true); saveImage(outputCPU, height, width, false); checkCudaErrors(hipFreeArray(cu_arr)); checkCudaErrors(hipFree(outputDevice)); } return 0; }
25ca07dd4ddcfd9c786b121e3e6ea36cd8e3426a.cu
#include <cuda_runtime.h> #include <iostream> #include <ctime> #include "EBMP/EasyBMP.h" #include <algorithm> //Russian characters aren't displayed.Comments in English, sorry... // 2D float texture texture<float, cudaTextureType2D, cudaReadModeElementType> texRef; using namespace std; #define checkCudaErrors(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char* file, int line, bool abort = true) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } void saveImage(float* image, int height, int width, bool method) { BMP Output; Output.SetSize(width, height); for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { RGBApixel pixel; pixel.Red = image[i * width + j]; pixel.Green = image[i * width + j]; pixel.Blue = image[i * width + j]; Output.SetPixel(j, i, pixel); } } if (method) Output.WriteToFile("GPUoutAngelina.bmp"); else Output.WriteToFile("CPUoutAngelina.bmp"); } void noiseImg(float* image, int height, int width, int per) { BMP Output; Output.SetSize(width, height); int countOfPixels = int(height * width / 100 * per); while (countOfPixels > 0) { int i = rand() % height; int j = rand() % width; int c = rand() % 2; if (c == 1) image[i * width + j] = 255; else image[i * width + j] = 0; countOfPixels--; } for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { RGBApixel pixel; pixel.Red = image[i * width + j]; pixel.Green = image[i * width + j]; pixel.Blue = image[i * width + j]; Output.SetPixel(j, i, pixel); } } Output.WriteToFile("NoiseAngelina.bmp"); } void medianFilterCPU(float* image, float* resault, int height, int width) { //mask3x3 int m = 3; int n = 3; int mean = m * n / 2; int pad = m / 2; float* expandImageArray = (float*)calloc((height + 2 * pad) * (width + 2 * pad), sizeof(float)); for (int j = 0; j < height; j++) { for (int i = 0; i < width; i++) { expandImageArray[(j + pad) * (width + 2 * pad) + i + pad] = image[j * width + i]; } } for (int j = 0; j < height; j++) { for (int i = 0; i < width; i++) { float* window = (float*)calloc(m * n, sizeof(float)); for (int k = 0; k < m; k++) { for (int t = 0; t < n; t++) { window[k * n + t] = expandImageArray[j * (width + 2 * pad) + i + k * (width + 2 * pad) + t]; } } bool swapped = true; int t = 0; int tmp; while (swapped) { swapped = false; t++; for (int i = 0; i < m * n - t; i++) { if (window[i] > window[i + 1]) { tmp = window[i]; window[i] = window[i + 1]; window[i + 1] = tmp; swapped = true; } } } //sort(window, window + m * n); resault[j * width + i] = window[mean]; } } } __global__ void myFilter(float* output, int imageWidth, int imageHeight) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; // mask 3x3 float window[9]; int m = 3; int n = 3; int mean = m * n / 2; int pad = m / 2; for (int i = -pad; i <= pad; i++) { for (int j = -pad; j <= pad; j++) { window[(i + pad) * n + j + pad] = tex2D(texRef, col + j, row + i); } } bool swapped = true; int t = 0; int tmp; while (swapped) { swapped = false; t++; for (int i = 0; i < m * n - t; i++) { if (window[i] > window[i + 1]) { tmp = window[i]; window[i] = window[i + 1]; window[i + 1] = tmp; swapped = true; } } } output[row * imageWidth + col] = window[mean]; } int main(void) { int nIter = 100; BMP Image; Image.ReadFromFile("angelina.bmp"); int height = Image.TellHeight(); int width = Image.TellWidth(); float* imageArray = (float*)calloc(height * width, sizeof(float)); float* outputCPU = (float*)calloc(height * width, sizeof(float)); float* outputGPU = (float*)calloc(height * width, sizeof(float)); float* outputDevice; for (int j = 0; j < Image.TellHeight(); j++) { for (int i = 0; i < Image.TellWidth(); i++) { imageArray[j * width + i] = Image(i, j)->Red; } } noiseImg(imageArray, height, width, 8); unsigned int start_time = clock(); for (int j = 0; j < nIter; j++) { medianFilterCPU(imageArray, outputCPU, height, width); } unsigned int elapsedTime = clock() - start_time; float msecPerMatrixMulCpu = elapsedTime / nIter; cout << "CPU time: " << msecPerMatrixMulCpu << endl; int device_count = 0; cudaGetDeviceCount(&device_count); if (device_count == 0) cout << "Sorry! You dont have CudaDevice" << endl; else { cout << "CudaDevice found! Device count: " << device_count << endl; // Allocate CUDA array in device memory //Returns a channel descriptor with format f and number of bits of each component x, y, z, and w cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat); cudaArray* cu_arr; checkCudaErrors(cudaMallocArray(&cu_arr, &channelDesc, width, height)); checkCudaErrors(cudaMemcpyToArray(cu_arr, 0, 0, imageArray, height * width * sizeof(float), cudaMemcpyHostToDevice)); // set texture parameters texRef.addressMode[0] = cudaAddressModeClamp; texRef.addressMode[1] = cudaAddressModeClamp; texRef.filterMode = cudaFilterModePoint; // Bind the array to the texture cudaBindTextureToArray(texRef, cu_arr, channelDesc); checkCudaErrors(cudaMalloc(&outputDevice, height * width * sizeof(float))); dim3 threadsPerBlock(16, 16); dim3 blocksPerGrid((width + threadsPerBlock.x - 1) / threadsPerBlock.x, (height + threadsPerBlock.y - 1) / threadsPerBlock.y); cudaEvent_t start; cudaEvent_t stop; checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); // start record checkCudaErrors(cudaEventRecord(start, 0)); for (int j = 0; j < nIter; j++) { myFilter << <blocksPerGrid, threadsPerBlock >> > (outputDevice, width, height); } // stop record checkCudaErrors(cudaEventRecord(stop, 0)); // wait end of event checkCudaErrors(cudaEventSynchronize(stop)); float msecTotal = 0.0f; checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop)); float msecPerMatrixMul = msecTotal / nIter; cout << "GPU time: " << msecPerMatrixMul << endl; cudaDeviceSynchronize(); checkCudaErrors(cudaMemcpy(outputGPU, outputDevice, height * width * sizeof(float), cudaMemcpyDeviceToHost)); cudaDeviceSynchronize(); saveImage(outputGPU, height, width, true); saveImage(outputCPU, height, width, false); checkCudaErrors(cudaFreeArray(cu_arr)); checkCudaErrors(cudaFree(outputDevice)); } return 0; }
8bc0ebd28ba4478c101b75d71776e6577e2bd899.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THHUNN/generic/MultiMarginCriterion.hip" #else // TODO: improve error messages void THNN_(MultiMarginCriterion_updateOutput)( THCState *state, THCTensor *input, THCIndexTensor *target, THCTensor *output, int64_t reduction, int p, THCTensor *weights, accreal margin_) { scalar_t margin = ScalarConvert<accreal, scalar_t>::to(margin_); THCUNN_assertSameGPU(state, 2, input, target); input = THCTensor_(newContiguous)(state, input); if(weights) weights = THCTensor_(newContiguous)(state, weights); if (THTensor_nDimensionLegacyNoScalars(input) == 1) { int nframe = 1; THArgCheck(!target->is_empty() && (THTensor_nDimensionLegacyNoScalars(target) == 1) && (THTensor_sizeLegacyNoScalars(target, 0) == nframe), 3, "inconsistent target size"); dim3 blocks(1); dim3 threads(MULTIMARGIN_THREADS); if (reduction == at::Reduction::None) { THCTensor_(resizeAs)(state, output, target); } else { THCTensor_(resize0d)(state, output); } if (p == 1) { hipLaunchKernelGGL(( cunn_MultiMarginCriterion_updateOutput_kernel<1, scalar_t, accreal>) , dim3(blocks),dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), THCTensor_(data)(state, output), THCTensor_(data)(state, input), THCIndexTensor_(data)(state, target), weights ? THCTensor_(data)(state, weights) : NULL, 1, THTensor_sizeLegacyNoScalars(input, 0), reduction == at::Reduction::Mean, margin ); } else if (p == 2) { hipLaunchKernelGGL(( cunn_MultiMarginCriterion_updateOutput_kernel<2, scalar_t, accreal>) , dim3(blocks),dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), THCTensor_(data)(state, output), THCTensor_(data)(state, input), THCIndexTensor_(data)(state, target), weights ? THCTensor_(data)(state, weights) : NULL, 1, THTensor_sizeLegacyNoScalars(input, 0), reduction == at::Reduction::Mean, margin ); } THCudaCheck(hipGetLastError()); } else if (input->dim() == 2) { int nframe = input->size(0); THArgCheck(!target->is_empty() && (THTensor_nDimensionLegacyNoScalars(target) == 1) && (THTensor_sizeLegacyNoScalars(target, 0) == nframe), 3, "inconsistent target size"); dim3 blocks(input->size(0)); dim3 threads(MULTIMARGIN_THREADS); if (reduction == at::Reduction::None) { THCTensor_(resizeAs)(state, output, target); if (p == 1) { hipLaunchKernelGGL(( cunn_MultiMarginCriterion_updateOutput_kernel<1, scalar_t, accreal>) , dim3(blocks),dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), THCTensor_(data)(state, output), THCTensor_(data)(state, input), THCIndexTensor_(data)(state, target), weights ? THCTensor_(data)(state, weights) : NULL, nframe, input->size(1), false, margin ); } else if (p == 2) { hipLaunchKernelGGL(( cunn_MultiMarginCriterion_updateOutput_kernel<2, scalar_t, accreal>) , dim3(blocks),dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), THCTensor_(data)(state, output), THCTensor_(data)(state, input), THCIndexTensor_(data)(state, target), weights ? THCTensor_(data)(state, weights) : NULL, nframe, input->size(1), false, margin ); } THCudaCheck(hipGetLastError()); } else { THCTensor_(resize0d)(state, output); THCTensor *output_ = THCTensor_(newWithSize1d)(state, input->size(0)); // tmp output buffer if (p == 1) { hipLaunchKernelGGL(( cunn_MultiMarginCriterion_updateOutput_kernel<1, scalar_t, accreal>) , dim3(blocks),dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), THCTensor_(data)(state, output_), THCTensor_(data)(state, input), THCIndexTensor_(data)(state, target), weights ? THCTensor_(data)(state, weights) : NULL, nframe, input->size(1), reduction == at::Reduction::Mean, margin ); } else if (p == 2) { hipLaunchKernelGGL(( cunn_MultiMarginCriterion_updateOutput_kernel<2, scalar_t, accreal>) , dim3(blocks),dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), THCTensor_(data)(state, output_), THCTensor_(data)(state, input), THCIndexTensor_(data)(state, target), weights ? THCTensor_(data)(state, weights) : NULL, input->size(0), input->size(1), reduction == at::Reduction::Mean, margin ); } THCudaCheck(hipGetLastError()); float sum = THCTensor_(sumall)(state, output_); THCTensor_(set0d)(state, output, ScalarConvert<accreal, scalar_t>::to(sum)); THCTensor_(free)(state, output_); } } else { AT_ERROR("non-empty vector or matrix expected, got sizes: ", input->sizes()); } THCTensor_(free)(state, input); if(weights) THCTensor_(free)(state, weights); } void THNN_(MultiMarginCriterion_updateGradInput)( THCState *state, THCTensor *input, THCIndexTensor *target, THCTensor *gradOutput, THCTensor *gradInput, int64_t reduction, int p, THCTensor *weights, accreal margin_) { scalar_t margin = ScalarConvert<accreal, scalar_t>::to(margin_); THCUNN_assertSameGPU(state, 3, input, gradInput, target); input = THCTensor_(newContiguous)(state, input); gradOutput = THCTensor_(newContiguous)(state, gradOutput); THCTensor_(resizeAs)(state, gradInput, input); if(weights) weights = THCTensor_(newContiguous)(state, weights); if (THTensor_nDimensionLegacyNoScalars(input) == 1) { dim3 blocks(1); dim3 threads(MULTIMARGIN_THREADS); if (p == 1) { hipLaunchKernelGGL(( cunn_MultiMarginCriterion_updateGradInput_kernel<1, scalar_t, accreal>) , dim3(blocks),dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), THCTensor_(data)(state, gradInput), THCTensor_(data)(state, gradOutput), THCTensor_(data)(state, input), THCIndexTensor_(data)(state, target), weights ? THCTensor_(data)(state, weights) : NULL, 1, THTensor_sizeLegacyNoScalars(gradInput, 0), reduction == at::Reduction::Mean, margin, reduction != at::Reduction::None ); } else if (p == 2) { hipLaunchKernelGGL(( cunn_MultiMarginCriterion_updateGradInput_kernel<2, scalar_t, accreal>) , dim3(blocks),dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), THCTensor_(data)(state, gradInput), THCTensor_(data)(state, gradOutput), THCTensor_(data)(state, input), THCIndexTensor_(data)(state, target), weights ? THCTensor_(data)(state, weights) : NULL, 1, THTensor_sizeLegacyNoScalars(gradInput, 0), reduction == at::Reduction::Mean, margin, reduction != at::Reduction::None ); } THCudaCheck(hipGetLastError()); } else if (input->dim() == 2) { int nframe = gradInput->size(0); THArgCheck(!target->is_empty() && (THTensor_nDimensionLegacyNoScalars(target) == 1) && (THTensor_sizeLegacyNoScalars(target, 0) == nframe), 3, "inconsistent target size"); dim3 blocks(gradInput->size(0)); dim3 threads(MULTIMARGIN_THREADS); if (p == 1) { hipLaunchKernelGGL(( cunn_MultiMarginCriterion_updateGradInput_kernel<1, scalar_t, accreal>) , dim3(blocks),dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), THCTensor_(data)(state, gradInput), THCTensor_(data)(state, gradOutput), THCTensor_(data)(state, input), THCIndexTensor_(data)(state, target), weights ? THCTensor_(data)(state, weights) : NULL, nframe, gradInput->size(1), reduction == at::Reduction::Mean, margin, reduction != at::Reduction::None ); } else if (p == 2) { hipLaunchKernelGGL(( cunn_MultiMarginCriterion_updateGradInput_kernel<2, scalar_t, accreal>) , dim3(blocks),dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), THCTensor_(data)(state, gradInput), THCTensor_(data)(state, gradOutput), THCTensor_(data)(state, input), THCIndexTensor_(data)(state, target), weights ? THCTensor_(data)(state, weights) : NULL, nframe, gradInput->size(1), reduction == at::Reduction::Mean, margin, reduction != at::Reduction::None ); } THCudaCheck(hipGetLastError()); } else { AT_ERROR("non-empty vector or matrix expected, got ", input->sizes()); } THCTensor_(free)(state, input); THCTensor_(free)(state, gradOutput); if(weights) THCTensor_(free)(state, weights); } #endif
8bc0ebd28ba4478c101b75d71776e6577e2bd899.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THCUNN/generic/MultiMarginCriterion.cu" #else // TODO: improve error messages void THNN_(MultiMarginCriterion_updateOutput)( THCState *state, THCTensor *input, THCIndexTensor *target, THCTensor *output, int64_t reduction, int p, THCTensor *weights, accreal margin_) { scalar_t margin = ScalarConvert<accreal, scalar_t>::to(margin_); THCUNN_assertSameGPU(state, 2, input, target); input = THCTensor_(newContiguous)(state, input); if(weights) weights = THCTensor_(newContiguous)(state, weights); if (THTensor_nDimensionLegacyNoScalars(input) == 1) { int nframe = 1; THArgCheck(!target->is_empty() && (THTensor_nDimensionLegacyNoScalars(target) == 1) && (THTensor_sizeLegacyNoScalars(target, 0) == nframe), 3, "inconsistent target size"); dim3 blocks(1); dim3 threads(MULTIMARGIN_THREADS); if (reduction == at::Reduction::None) { THCTensor_(resizeAs)(state, output, target); } else { THCTensor_(resize0d)(state, output); } if (p == 1) { cunn_MultiMarginCriterion_updateOutput_kernel<1, scalar_t, accreal> <<<blocks,threads, 0, c10::cuda::getCurrentCUDAStream()>>>( THCTensor_(data)(state, output), THCTensor_(data)(state, input), THCIndexTensor_(data)(state, target), weights ? THCTensor_(data)(state, weights) : NULL, 1, THTensor_sizeLegacyNoScalars(input, 0), reduction == at::Reduction::Mean, margin ); } else if (p == 2) { cunn_MultiMarginCriterion_updateOutput_kernel<2, scalar_t, accreal> <<<blocks,threads, 0, c10::cuda::getCurrentCUDAStream()>>>( THCTensor_(data)(state, output), THCTensor_(data)(state, input), THCIndexTensor_(data)(state, target), weights ? THCTensor_(data)(state, weights) : NULL, 1, THTensor_sizeLegacyNoScalars(input, 0), reduction == at::Reduction::Mean, margin ); } THCudaCheck(cudaGetLastError()); } else if (input->dim() == 2) { int nframe = input->size(0); THArgCheck(!target->is_empty() && (THTensor_nDimensionLegacyNoScalars(target) == 1) && (THTensor_sizeLegacyNoScalars(target, 0) == nframe), 3, "inconsistent target size"); dim3 blocks(input->size(0)); dim3 threads(MULTIMARGIN_THREADS); if (reduction == at::Reduction::None) { THCTensor_(resizeAs)(state, output, target); if (p == 1) { cunn_MultiMarginCriterion_updateOutput_kernel<1, scalar_t, accreal> <<<blocks,threads, 0, c10::cuda::getCurrentCUDAStream()>>>( THCTensor_(data)(state, output), THCTensor_(data)(state, input), THCIndexTensor_(data)(state, target), weights ? THCTensor_(data)(state, weights) : NULL, nframe, input->size(1), false, margin ); } else if (p == 2) { cunn_MultiMarginCriterion_updateOutput_kernel<2, scalar_t, accreal> <<<blocks,threads, 0, c10::cuda::getCurrentCUDAStream()>>>( THCTensor_(data)(state, output), THCTensor_(data)(state, input), THCIndexTensor_(data)(state, target), weights ? THCTensor_(data)(state, weights) : NULL, nframe, input->size(1), false, margin ); } THCudaCheck(cudaGetLastError()); } else { THCTensor_(resize0d)(state, output); THCTensor *output_ = THCTensor_(newWithSize1d)(state, input->size(0)); // tmp output buffer if (p == 1) { cunn_MultiMarginCriterion_updateOutput_kernel<1, scalar_t, accreal> <<<blocks,threads, 0, c10::cuda::getCurrentCUDAStream()>>>( THCTensor_(data)(state, output_), THCTensor_(data)(state, input), THCIndexTensor_(data)(state, target), weights ? THCTensor_(data)(state, weights) : NULL, nframe, input->size(1), reduction == at::Reduction::Mean, margin ); } else if (p == 2) { cunn_MultiMarginCriterion_updateOutput_kernel<2, scalar_t, accreal> <<<blocks,threads, 0, c10::cuda::getCurrentCUDAStream()>>>( THCTensor_(data)(state, output_), THCTensor_(data)(state, input), THCIndexTensor_(data)(state, target), weights ? THCTensor_(data)(state, weights) : NULL, input->size(0), input->size(1), reduction == at::Reduction::Mean, margin ); } THCudaCheck(cudaGetLastError()); float sum = THCTensor_(sumall)(state, output_); THCTensor_(set0d)(state, output, ScalarConvert<accreal, scalar_t>::to(sum)); THCTensor_(free)(state, output_); } } else { AT_ERROR("non-empty vector or matrix expected, got sizes: ", input->sizes()); } THCTensor_(free)(state, input); if(weights) THCTensor_(free)(state, weights); } void THNN_(MultiMarginCriterion_updateGradInput)( THCState *state, THCTensor *input, THCIndexTensor *target, THCTensor *gradOutput, THCTensor *gradInput, int64_t reduction, int p, THCTensor *weights, accreal margin_) { scalar_t margin = ScalarConvert<accreal, scalar_t>::to(margin_); THCUNN_assertSameGPU(state, 3, input, gradInput, target); input = THCTensor_(newContiguous)(state, input); gradOutput = THCTensor_(newContiguous)(state, gradOutput); THCTensor_(resizeAs)(state, gradInput, input); if(weights) weights = THCTensor_(newContiguous)(state, weights); if (THTensor_nDimensionLegacyNoScalars(input) == 1) { dim3 blocks(1); dim3 threads(MULTIMARGIN_THREADS); if (p == 1) { cunn_MultiMarginCriterion_updateGradInput_kernel<1, scalar_t, accreal> <<<blocks,threads, 0, c10::cuda::getCurrentCUDAStream()>>>( THCTensor_(data)(state, gradInput), THCTensor_(data)(state, gradOutput), THCTensor_(data)(state, input), THCIndexTensor_(data)(state, target), weights ? THCTensor_(data)(state, weights) : NULL, 1, THTensor_sizeLegacyNoScalars(gradInput, 0), reduction == at::Reduction::Mean, margin, reduction != at::Reduction::None ); } else if (p == 2) { cunn_MultiMarginCriterion_updateGradInput_kernel<2, scalar_t, accreal> <<<blocks,threads, 0, c10::cuda::getCurrentCUDAStream()>>>( THCTensor_(data)(state, gradInput), THCTensor_(data)(state, gradOutput), THCTensor_(data)(state, input), THCIndexTensor_(data)(state, target), weights ? THCTensor_(data)(state, weights) : NULL, 1, THTensor_sizeLegacyNoScalars(gradInput, 0), reduction == at::Reduction::Mean, margin, reduction != at::Reduction::None ); } THCudaCheck(cudaGetLastError()); } else if (input->dim() == 2) { int nframe = gradInput->size(0); THArgCheck(!target->is_empty() && (THTensor_nDimensionLegacyNoScalars(target) == 1) && (THTensor_sizeLegacyNoScalars(target, 0) == nframe), 3, "inconsistent target size"); dim3 blocks(gradInput->size(0)); dim3 threads(MULTIMARGIN_THREADS); if (p == 1) { cunn_MultiMarginCriterion_updateGradInput_kernel<1, scalar_t, accreal> <<<blocks,threads, 0, c10::cuda::getCurrentCUDAStream()>>>( THCTensor_(data)(state, gradInput), THCTensor_(data)(state, gradOutput), THCTensor_(data)(state, input), THCIndexTensor_(data)(state, target), weights ? THCTensor_(data)(state, weights) : NULL, nframe, gradInput->size(1), reduction == at::Reduction::Mean, margin, reduction != at::Reduction::None ); } else if (p == 2) { cunn_MultiMarginCriterion_updateGradInput_kernel<2, scalar_t, accreal> <<<blocks,threads, 0, c10::cuda::getCurrentCUDAStream()>>>( THCTensor_(data)(state, gradInput), THCTensor_(data)(state, gradOutput), THCTensor_(data)(state, input), THCIndexTensor_(data)(state, target), weights ? THCTensor_(data)(state, weights) : NULL, nframe, gradInput->size(1), reduction == at::Reduction::Mean, margin, reduction != at::Reduction::None ); } THCudaCheck(cudaGetLastError()); } else { AT_ERROR("non-empty vector or matrix expected, got ", input->sizes()); } THCTensor_(free)(state, input); THCTensor_(free)(state, gradOutput); if(weights) THCTensor_(free)(state, weights); } #endif
e51c655df903262d7676111a00742ec744a5034c.hip
// !!! This is a file automatically generated by hipify!!! // // Created by jiashuai on 18-1-17. // #include <omp.h> #include <thundergbm/util/cub_wrapper.h> #include "thundergbm/dataset.h" #include "hipsparse.h" #include "thrust/sort.h" #include "thrust/system/hip/detail/par.h" #include "thundergbm/util/device_lambda.cuh" void DataSet::load_from_file(string file_name) { LOG(INFO) << "loading LIBSVM dataset from file \"" << file_name << "\""; y_.clear(); features.clear(); line_num.clear(); //instances_.clear(); n_features_ = 123; std::ifstream ifs(file_name, std::ifstream::binary); CHECK(ifs.is_open()) << "file " << file_name << " not found"; std::array<char, 2 << 20> buffer{}; //16M const int nthread = omp_get_max_threads(); auto find_last_line = [](char *ptr, const char *begin) { while (ptr != begin && *ptr != '\n' && *ptr != '\r') --ptr; return ptr; }; string first_line; getline(ifs, first_line); std::stringstream first_ss(first_line); int n_f_first = 0; string tuple; while(first_ss >> tuple) n_f_first++; ifs.clear(); ifs.seekg (0, std::ios::beg); int n_sum_line = 0; while (ifs) { ifs.read(buffer.data(), buffer.size()); char *head = buffer.data(); size_t size = ifs.gcount(); vector<vector<float_type>> y_thread(nthread); //vector<node2d> instances_thread(nthread); vector<size_t> local_feature(nthread, 0); //vector<vector<vector<int>>> index_thread(nthread); vector<vector<vector<float_type>>> feature_thread(nthread); vector<vector<vector<int>>> line_thread(nthread); for(int i = 0; i < nthread; i++){ feature_thread[i].resize(n_f_first * 2); line_thread[i].resize(n_f_first * 2); } vector<int> n_line(nthread); #pragma omp parallel num_threads(nthread) { //get working area of this thread int tid = omp_get_thread_num(); size_t nstep = (size + nthread - 1) / nthread; size_t sbegin = ::min(tid * nstep, size - 1); size_t send = ::min((tid + 1) * nstep, size - 1); char *pbegin = find_last_line(head + sbegin, head); char *pend = find_last_line(head + send, head); //move stream start position to the end of last line if (tid == nthread - 1) ifs.seekg(pend - head - send, std::ios_base::cur); //read instances line by line char *lbegin = pbegin; char *lend = lbegin; int lid = 0; while (lend != pend) { //get one line lend = lbegin + 1; while (lend != pend && *lend != '\n' && *lend != '\r') { ++lend; } string line(lbegin, lend); std::stringstream ss(line); //read label of an instance y_thread[tid].emplace_back(); ss >> y_thread[tid].back(); string tuple; //int fid = 0; while(ss >> tuple){ int i; float v; CHECK_EQ(sscanf(tuple.c_str(), "%d:%f", &i, &v), 2) << "read error, using [index]:[value] format"; //index_thread[tid].back().emplace_back(i); if(i > local_feature[tid]){ local_feature[tid] = i; } if(i > feature_thread[tid].size()){ feature_thread[tid].resize(i); line_thread[tid].resize(i); // min_fea.resize(i); // max_fea.resize(i); // min_fea[i - 1] = INFINITY; // max_fea[i - 1] = -INFINITY; } feature_thread[tid][i-1].emplace_back(v); line_thread[tid][i-1].emplace_back(lid); //fid++; } lid++; //read next instance lbegin = lend; } n_line[tid] = lid; } for (int i = 0; i < nthread; i++) { if (local_feature[i] > n_features_) n_features_ = local_feature[i]; } // this->features.resize(n_features_); // this->line_num.resize(n_features_); this->features.resize(max_dimension); this->line_num.resize(max_dimension); for(int i = 0; i < nthread; i++) { for(int j = 0; j < local_feature[i]; j++) { this->features[j].insert(this->features[j].end(), feature_thread[i][j].begin(), feature_thread[i][j].end()); for (int k = 0; k < line_thread[i][j].size(); k++) { line_thread[i][j][k] += n_sum_line; } this->line_num[j].insert(this->line_num[j].end(), line_thread[i][j].begin(), line_thread[i][j].end()); } n_sum_line += n_line[i]; } for (int i = 0; i < nthread; i++) { this->y_.insert(y_.end(), y_thread[i].begin(), y_thread[i].end()); } } n_features_ = max_dimension; min_fea.resize(max_dimension); max_fea.resize(max_dimension); for(int i = 0; i < max_dimension; i++){ if(features[i].size() == 0){ min_fea[i] = INFINITY; max_fea[i] = -INFINITY; } else{ min_fea[i] = *std::min_element(features[i].begin(), features[i].end()); max_fea[i] = *std::max_element(features[i].begin(), features[i].end()); } } LOG(INFO) << "#instances = " << this->n_instances() << ", #features = " << this->n_features(); } void DataSet::load_from_file_csr(string file_name) { LOG(INFO) << "loading LIBSVM dataset from file \"" << file_name << "\""; y_.clear(); instances_.clear(); n_features_ = 0; std::ifstream ifs(file_name, std::ifstream::binary); CHECK(ifs.is_open()) << "file " << file_name << " not found"; std::array<char, 2 << 20> buffer{}; //16M const int nthread = omp_get_max_threads(); auto find_last_line = [](char *ptr, const char *begin) { while (ptr != begin && *ptr != '\n' && *ptr != '\r') --ptr; return ptr; }; while (ifs) { ifs.read(buffer.data(), buffer.size()); char *head = buffer.data(); size_t size = ifs.gcount(); vector<vector<float_type>> y_thread(nthread); vector<node2d> instances_thread(nthread); vector<size_t> local_feature(nthread, 0); #pragma omp parallel num_threads(nthread) { //get working area of this thread int tid = omp_get_thread_num(); size_t nstep = (size + nthread - 1) / nthread; size_t sbegin = ::min(tid * nstep, size - 1); size_t send = ::min((tid + 1) * nstep, size - 1); char *pbegin = find_last_line(head + sbegin, head); char *pend = find_last_line(head + send, head); //move stream start position to the end of last line if (tid == nthread - 1) ifs.seekg(pend - head - send, std::ios_base::cur); //read instances line by line char *lbegin = pbegin; char *lend = lbegin; while (lend != pend) { //get one line lend = lbegin + 1; while (lend != pend && *lend != '\n' && *lend != '\r') { ++lend; } string line(lbegin, lend); std::stringstream ss(line); //read label of an instance y_thread[tid].emplace_back(); ss >> y_thread[tid].back(); //read features of an instance instances_thread[tid].emplace_back(); string tuple; while (ss >> tuple) { int i; float v; CHECK_EQ(sscanf(tuple.c_str(), "%d:%f", &i, &v), 2) << "read error, using [index]:[value] format"; instances_thread[tid].back().emplace_back(i, v); if (i > local_feature[tid]) local_feature[tid] = i; }; //read next instance lbegin = lend; } } for (int i = 0; i < nthread; i++) { if (local_feature[i] > n_features_) n_features_ = local_feature[i]; } for (int i = 0; i < nthread; i++) { this->y_.insert(y_.end(), y_thread[i].begin(), y_thread[i].end()); this->instances_.insert(instances_.end(), instances_thread[i].begin(), instances_thread[i].end()); } } LOG(INFO) << "#instances = " << this->n_instances() << ", #features = " << this->n_features(); } void DataSet::load_from_file_two_dimension(string file_name){ LOG(INFO) << "loading LIBSVM dataset from file \"" << file_name << "\""; y_.clear(); features.clear(); line_num.clear(); //instances_.clear(); n_features_ = 0; std::ifstream ifs(file_name, std::ifstream::binary); CHECK(ifs.is_open()) << "file " << file_name << " not found"; std::array<char, 2 << 20> buffer{}; //16M const int nthread = omp_get_max_threads(); auto find_last_line = [](char *ptr, const char *begin) { while (ptr != begin && *ptr != '\n' && *ptr != '\r') --ptr; return ptr; }; string first_line; getline(ifs, first_line); std::stringstream first_ss(first_line); int n_f_first = 0; string tuple; while(first_ss >> tuple) n_f_first++; ifs.clear(); ifs.seekg (0, std::ios::beg); int n_sum_line = 0; while (ifs) { ifs.read(buffer.data(), buffer.size()); char *head = buffer.data(); size_t size = ifs.gcount(); vector<vector<float_type>> y_thread(nthread); vector<node2d> instances_thread(nthread); vector<size_t> local_feature(nthread, 0); //vector<vector<vector<int>>> index_thread(nthread); vector<vector<vector<float_type>>> feature_thread(nthread); vector<vector<vector<int>>> line_thread(nthread); for(int i = 0; i < nthread; i++){ feature_thread[i].resize(n_f_first * 2); line_thread[i].resize(n_f_first * 2); } vector<int> n_line(nthread); #pragma omp parallel num_threads(nthread) { //get working area of this thread int tid = omp_get_thread_num(); size_t nstep = (size + nthread - 1) / nthread; size_t sbegin = ::min(tid * nstep, size - 1); size_t send = ::min((tid + 1) * nstep, size - 1); char *pbegin = find_last_line(head + sbegin, head); char *pend = find_last_line(head + send, head); //move stream start position to the end of last line if (tid == nthread - 1) ifs.seekg(pend - head - send, std::ios_base::cur); //read instances line by line char *lbegin = pbegin; char *lend = lbegin; int lid = 0; while (lend != pend) { //get one line lend = lbegin + 1; while (lend != pend && *lend != '\n' && *lend != '\r') { ++lend; } string line(lbegin, lend); std::stringstream ss(line); //read label of an instance y_thread[tid].emplace_back(); ss >> y_thread[tid].back(); //read features of an instance instances_thread[tid].emplace_back(); string tuple; //int fid = 0; while(ss >> tuple){ int i; float v; CHECK_EQ(sscanf(tuple.c_str(), "%d:%f", &i, &v), 2) << "read error, using [index]:[value] format"; instances_thread[tid].back().emplace_back(i, v); if(i > local_feature[tid]){ local_feature[tid] = i; } if(i > feature_thread[tid].size()){ feature_thread[tid].resize(i); line_thread[tid].resize(i); } feature_thread[tid][i-1].emplace_back(v); line_thread[tid][i-1].emplace_back(lid); //fid++; } lid++; //read next instance lbegin = lend; } n_line[tid] = lid; } for (int i = 0; i < nthread; i++) { if (local_feature[i] > n_features_) n_features_ = local_feature[i]; } this->features.resize(n_features_); this->line_num.resize(n_features_); for(int i = 0; i < nthread; i++) { for(int j = 0; j < local_feature[i]; j++) { this->features[j].insert(this->features[j].end(), feature_thread[i][j].begin(), feature_thread[i][j].end()); for (int k = 0; k < line_thread[i][j].size(); k++) { line_thread[i][j][k] += n_sum_line; } this->line_num[j].insert(this->line_num[j].end(), line_thread[i][j].begin(), line_thread[i][j].end()); } n_sum_line += n_line[i]; } for (int i = 0; i < nthread; i++) { this->y_.insert(y_.end(), y_thread[i].begin(), y_thread[i].end()); this->instances_.insert(instances_.end(), instances_thread[i].begin(), instances_thread[i].end()); } } LOG(INFO) << "#instances = " << this->n_instances() << ", #features = " << this->n_features(); } const DataSet::node2d &DataSet::instances() const { return this->instances_; } size_t DataSet::n_features() const { return n_features_; } size_t DataSet::n_instances() const { //return this->instances_.size(); return this->y_.size(); } const vector<float_type> &DataSet::y() const { return this->y_; } void DataSet::compression() { }
e51c655df903262d7676111a00742ec744a5034c.cu
// // Created by jiashuai on 18-1-17. // #include <omp.h> #include <thundergbm/util/cub_wrapper.h> #include "thundergbm/dataset.h" #include "cusparse.h" #include "thrust/sort.h" #include "thrust/system/cuda/detail/par.h" #include "thundergbm/util/device_lambda.cuh" void DataSet::load_from_file(string file_name) { LOG(INFO) << "loading LIBSVM dataset from file \"" << file_name << "\""; y_.clear(); features.clear(); line_num.clear(); //instances_.clear(); n_features_ = 123; std::ifstream ifs(file_name, std::ifstream::binary); CHECK(ifs.is_open()) << "file " << file_name << " not found"; std::array<char, 2 << 20> buffer{}; //16M const int nthread = omp_get_max_threads(); auto find_last_line = [](char *ptr, const char *begin) { while (ptr != begin && *ptr != '\n' && *ptr != '\r') --ptr; return ptr; }; string first_line; getline(ifs, first_line); std::stringstream first_ss(first_line); int n_f_first = 0; string tuple; while(first_ss >> tuple) n_f_first++; ifs.clear(); ifs.seekg (0, std::ios::beg); int n_sum_line = 0; while (ifs) { ifs.read(buffer.data(), buffer.size()); char *head = buffer.data(); size_t size = ifs.gcount(); vector<vector<float_type>> y_thread(nthread); //vector<node2d> instances_thread(nthread); vector<size_t> local_feature(nthread, 0); //vector<vector<vector<int>>> index_thread(nthread); vector<vector<vector<float_type>>> feature_thread(nthread); vector<vector<vector<int>>> line_thread(nthread); for(int i = 0; i < nthread; i++){ feature_thread[i].resize(n_f_first * 2); line_thread[i].resize(n_f_first * 2); } vector<int> n_line(nthread); #pragma omp parallel num_threads(nthread) { //get working area of this thread int tid = omp_get_thread_num(); size_t nstep = (size + nthread - 1) / nthread; size_t sbegin = std::min(tid * nstep, size - 1); size_t send = std::min((tid + 1) * nstep, size - 1); char *pbegin = find_last_line(head + sbegin, head); char *pend = find_last_line(head + send, head); //move stream start position to the end of last line if (tid == nthread - 1) ifs.seekg(pend - head - send, std::ios_base::cur); //read instances line by line char *lbegin = pbegin; char *lend = lbegin; int lid = 0; while (lend != pend) { //get one line lend = lbegin + 1; while (lend != pend && *lend != '\n' && *lend != '\r') { ++lend; } string line(lbegin, lend); std::stringstream ss(line); //read label of an instance y_thread[tid].emplace_back(); ss >> y_thread[tid].back(); string tuple; //int fid = 0; while(ss >> tuple){ int i; float v; CHECK_EQ(sscanf(tuple.c_str(), "%d:%f", &i, &v), 2) << "read error, using [index]:[value] format"; //index_thread[tid].back().emplace_back(i); if(i > local_feature[tid]){ local_feature[tid] = i; } if(i > feature_thread[tid].size()){ feature_thread[tid].resize(i); line_thread[tid].resize(i); // min_fea.resize(i); // max_fea.resize(i); // min_fea[i - 1] = INFINITY; // max_fea[i - 1] = -INFINITY; } feature_thread[tid][i-1].emplace_back(v); line_thread[tid][i-1].emplace_back(lid); //fid++; } lid++; //read next instance lbegin = lend; } n_line[tid] = lid; } for (int i = 0; i < nthread; i++) { if (local_feature[i] > n_features_) n_features_ = local_feature[i]; } // this->features.resize(n_features_); // this->line_num.resize(n_features_); this->features.resize(max_dimension); this->line_num.resize(max_dimension); for(int i = 0; i < nthread; i++) { for(int j = 0; j < local_feature[i]; j++) { this->features[j].insert(this->features[j].end(), feature_thread[i][j].begin(), feature_thread[i][j].end()); for (int k = 0; k < line_thread[i][j].size(); k++) { line_thread[i][j][k] += n_sum_line; } this->line_num[j].insert(this->line_num[j].end(), line_thread[i][j].begin(), line_thread[i][j].end()); } n_sum_line += n_line[i]; } for (int i = 0; i < nthread; i++) { this->y_.insert(y_.end(), y_thread[i].begin(), y_thread[i].end()); } } n_features_ = max_dimension; min_fea.resize(max_dimension); max_fea.resize(max_dimension); for(int i = 0; i < max_dimension; i++){ if(features[i].size() == 0){ min_fea[i] = INFINITY; max_fea[i] = -INFINITY; } else{ min_fea[i] = *std::min_element(features[i].begin(), features[i].end()); max_fea[i] = *std::max_element(features[i].begin(), features[i].end()); } } LOG(INFO) << "#instances = " << this->n_instances() << ", #features = " << this->n_features(); } void DataSet::load_from_file_csr(string file_name) { LOG(INFO) << "loading LIBSVM dataset from file \"" << file_name << "\""; y_.clear(); instances_.clear(); n_features_ = 0; std::ifstream ifs(file_name, std::ifstream::binary); CHECK(ifs.is_open()) << "file " << file_name << " not found"; std::array<char, 2 << 20> buffer{}; //16M const int nthread = omp_get_max_threads(); auto find_last_line = [](char *ptr, const char *begin) { while (ptr != begin && *ptr != '\n' && *ptr != '\r') --ptr; return ptr; }; while (ifs) { ifs.read(buffer.data(), buffer.size()); char *head = buffer.data(); size_t size = ifs.gcount(); vector<vector<float_type>> y_thread(nthread); vector<node2d> instances_thread(nthread); vector<size_t> local_feature(nthread, 0); #pragma omp parallel num_threads(nthread) { //get working area of this thread int tid = omp_get_thread_num(); size_t nstep = (size + nthread - 1) / nthread; size_t sbegin = std::min(tid * nstep, size - 1); size_t send = std::min((tid + 1) * nstep, size - 1); char *pbegin = find_last_line(head + sbegin, head); char *pend = find_last_line(head + send, head); //move stream start position to the end of last line if (tid == nthread - 1) ifs.seekg(pend - head - send, std::ios_base::cur); //read instances line by line char *lbegin = pbegin; char *lend = lbegin; while (lend != pend) { //get one line lend = lbegin + 1; while (lend != pend && *lend != '\n' && *lend != '\r') { ++lend; } string line(lbegin, lend); std::stringstream ss(line); //read label of an instance y_thread[tid].emplace_back(); ss >> y_thread[tid].back(); //read features of an instance instances_thread[tid].emplace_back(); string tuple; while (ss >> tuple) { int i; float v; CHECK_EQ(sscanf(tuple.c_str(), "%d:%f", &i, &v), 2) << "read error, using [index]:[value] format"; instances_thread[tid].back().emplace_back(i, v); if (i > local_feature[tid]) local_feature[tid] = i; }; //read next instance lbegin = lend; } } for (int i = 0; i < nthread; i++) { if (local_feature[i] > n_features_) n_features_ = local_feature[i]; } for (int i = 0; i < nthread; i++) { this->y_.insert(y_.end(), y_thread[i].begin(), y_thread[i].end()); this->instances_.insert(instances_.end(), instances_thread[i].begin(), instances_thread[i].end()); } } LOG(INFO) << "#instances = " << this->n_instances() << ", #features = " << this->n_features(); } void DataSet::load_from_file_two_dimension(string file_name){ LOG(INFO) << "loading LIBSVM dataset from file \"" << file_name << "\""; y_.clear(); features.clear(); line_num.clear(); //instances_.clear(); n_features_ = 0; std::ifstream ifs(file_name, std::ifstream::binary); CHECK(ifs.is_open()) << "file " << file_name << " not found"; std::array<char, 2 << 20> buffer{}; //16M const int nthread = omp_get_max_threads(); auto find_last_line = [](char *ptr, const char *begin) { while (ptr != begin && *ptr != '\n' && *ptr != '\r') --ptr; return ptr; }; string first_line; getline(ifs, first_line); std::stringstream first_ss(first_line); int n_f_first = 0; string tuple; while(first_ss >> tuple) n_f_first++; ifs.clear(); ifs.seekg (0, std::ios::beg); int n_sum_line = 0; while (ifs) { ifs.read(buffer.data(), buffer.size()); char *head = buffer.data(); size_t size = ifs.gcount(); vector<vector<float_type>> y_thread(nthread); vector<node2d> instances_thread(nthread); vector<size_t> local_feature(nthread, 0); //vector<vector<vector<int>>> index_thread(nthread); vector<vector<vector<float_type>>> feature_thread(nthread); vector<vector<vector<int>>> line_thread(nthread); for(int i = 0; i < nthread; i++){ feature_thread[i].resize(n_f_first * 2); line_thread[i].resize(n_f_first * 2); } vector<int> n_line(nthread); #pragma omp parallel num_threads(nthread) { //get working area of this thread int tid = omp_get_thread_num(); size_t nstep = (size + nthread - 1) / nthread; size_t sbegin = std::min(tid * nstep, size - 1); size_t send = std::min((tid + 1) * nstep, size - 1); char *pbegin = find_last_line(head + sbegin, head); char *pend = find_last_line(head + send, head); //move stream start position to the end of last line if (tid == nthread - 1) ifs.seekg(pend - head - send, std::ios_base::cur); //read instances line by line char *lbegin = pbegin; char *lend = lbegin; int lid = 0; while (lend != pend) { //get one line lend = lbegin + 1; while (lend != pend && *lend != '\n' && *lend != '\r') { ++lend; } string line(lbegin, lend); std::stringstream ss(line); //read label of an instance y_thread[tid].emplace_back(); ss >> y_thread[tid].back(); //read features of an instance instances_thread[tid].emplace_back(); string tuple; //int fid = 0; while(ss >> tuple){ int i; float v; CHECK_EQ(sscanf(tuple.c_str(), "%d:%f", &i, &v), 2) << "read error, using [index]:[value] format"; instances_thread[tid].back().emplace_back(i, v); if(i > local_feature[tid]){ local_feature[tid] = i; } if(i > feature_thread[tid].size()){ feature_thread[tid].resize(i); line_thread[tid].resize(i); } feature_thread[tid][i-1].emplace_back(v); line_thread[tid][i-1].emplace_back(lid); //fid++; } lid++; //read next instance lbegin = lend; } n_line[tid] = lid; } for (int i = 0; i < nthread; i++) { if (local_feature[i] > n_features_) n_features_ = local_feature[i]; } this->features.resize(n_features_); this->line_num.resize(n_features_); for(int i = 0; i < nthread; i++) { for(int j = 0; j < local_feature[i]; j++) { this->features[j].insert(this->features[j].end(), feature_thread[i][j].begin(), feature_thread[i][j].end()); for (int k = 0; k < line_thread[i][j].size(); k++) { line_thread[i][j][k] += n_sum_line; } this->line_num[j].insert(this->line_num[j].end(), line_thread[i][j].begin(), line_thread[i][j].end()); } n_sum_line += n_line[i]; } for (int i = 0; i < nthread; i++) { this->y_.insert(y_.end(), y_thread[i].begin(), y_thread[i].end()); this->instances_.insert(instances_.end(), instances_thread[i].begin(), instances_thread[i].end()); } } LOG(INFO) << "#instances = " << this->n_instances() << ", #features = " << this->n_features(); } const DataSet::node2d &DataSet::instances() const { return this->instances_; } size_t DataSet::n_features() const { return n_features_; } size_t DataSet::n_instances() const { //return this->instances_.size(); return this->y_.size(); } const vector<float_type> &DataSet::y() const { return this->y_; } void DataSet::compression() { }
d0cb67efa84a7e2747cf665d825f1a1a8d494c93.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void square_matrix_kernel(int32_t num_rows, int32_t num_cols, const float* feats, int32_t ldf, float* feats_sq, int32_t lds) { for (int i = blockIdx.y * blockDim.y + threadIdx.y; i < num_rows; i += blockDim.y * gridDim.y) { for (int j = blockIdx.x * blockDim.x + threadIdx.x; j < num_cols; j += blockDim.x * gridDim.x) { float f = feats[i * ldf + j]; feats_sq[i * lds + j] = f * f; } } }
d0cb67efa84a7e2747cf665d825f1a1a8d494c93.cu
#include "includes.h" __global__ void square_matrix_kernel(int32_t num_rows, int32_t num_cols, const float* feats, int32_t ldf, float* feats_sq, int32_t lds) { for (int i = blockIdx.y * blockDim.y + threadIdx.y; i < num_rows; i += blockDim.y * gridDim.y) { for (int j = blockIdx.x * blockDim.x + threadIdx.x; j < num_cols; j += blockDim.x * gridDim.x) { float f = feats[i * ldf + j]; feats_sq[i * lds + j] = f * f; } } }
e1b18c0ab37547c4ec31bfeaba9dfd76f240ad79.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // MP 1 #include <wb.h> #include <cuda.h> #include <stdio.h> #include <stdlib.h> __global__ void vecAdd(float * in1, float * in2, float * out, int len) { //@@ Insert code to implement vector addition here int i; i = blockIdx.x * blockDim.x + threadIdx.x; if(i<len) { out[i] = in1[i] + in2[i]; } } int main(int argc, char ** argv) { wbArg_t args; int inputLength; float * hostInput1; float * hostInput2; float * hostOutput; float * deviceInput1; float * deviceInput2; float * deviceOutput; args = wbArg_read(argc, argv); hipError_t err; int size; wbTime_start(Generic, "Importing data and creating memory on host"); hostInput1 = (float *) wbImport(wbArg_getInputFile(args, 0), &inputLength); hostInput2 = (float *) wbImport(wbArg_getInputFile(args, 1), &inputLength); hostOutput = (float *) malloc(inputLength * sizeof(float)); wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The input length is ", inputLength); size = inputLength * sizeof(float); wbTime_start(GPU, "Allocating GPU memory."); //@@ Allocate GPU memory here err = hipMalloc((void**)&deviceInput1, size); if(err != hipSuccess) { printf("\nError...! Can't allocate memory for input array1 in device\n"); } err = hipMalloc((void**)&deviceInput2, size); if(err != hipSuccess) { printf("\nError...! Can't allocate memory for input array2 in device\n"); } err = hipMalloc((void**)&deviceOutput, size); if(err != hipSuccess) { printf("\nError...! Can't allocate memory for output array in device\n"); } wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU."); //@@ Copy memory to the GPU here err = hipMemcpy(deviceInput1, hostInput1, size, hipMemcpyHostToDevice); if(err != hipSuccess) printf("\nError...! Couldn't copy input array1 from host to device\n"); err = hipMemcpy(deviceInput2, hostInput2, size, hipMemcpyHostToDevice); if(err != hipSuccess) printf("\nError...! Couldn't copy input array2 from host to device\n"); wbTime_stop(GPU, "Copying input memory to the GPU."); //@@ Initialize the grid and block dimensions here dim3 Grid(((inputLength-1)/256)+1,1,1); dim3 Block(256,1,1); wbTime_start(Compute, "Performing CUDA computation"); //@@ Launch the GPU Kernel here hipLaunchKernelGGL(( vecAdd), dim3(Grid), dim3(Block), 0, 0, deviceInput1, deviceInput2, deviceOutput, inputLength); hipDeviceSynchronize(); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU"); //@@ Copy the GPU memory back to the CPU here err = hipMemcpy(hostOutput, deviceOutput, size, hipMemcpyDeviceToHost); if(err != hipSuccess) printf("\nError...! Couldn't copy output array from device to host\n"); wbTime_stop(Copy, "Copying output memory to the CPU"); wbTime_start(GPU, "Freeing GPU Memory"); //@@ Free the GPU memory here hipFree(deviceInput1); hipFree(deviceInput2); hipFree(deviceOutput); wbTime_stop(GPU, "Freeing GPU Memory"); wbSolution(args, hostOutput, inputLength); free(hostInput1); free(hostInput2); free(hostOutput); return 0; }
e1b18c0ab37547c4ec31bfeaba9dfd76f240ad79.cu
// MP 1 #include <wb.h> #include <cuda.h> #include <stdio.h> #include <stdlib.h> __global__ void vecAdd(float * in1, float * in2, float * out, int len) { //@@ Insert code to implement vector addition here int i; i = blockIdx.x * blockDim.x + threadIdx.x; if(i<len) { out[i] = in1[i] + in2[i]; } } int main(int argc, char ** argv) { wbArg_t args; int inputLength; float * hostInput1; float * hostInput2; float * hostOutput; float * deviceInput1; float * deviceInput2; float * deviceOutput; args = wbArg_read(argc, argv); cudaError_t err; int size; wbTime_start(Generic, "Importing data and creating memory on host"); hostInput1 = (float *) wbImport(wbArg_getInputFile(args, 0), &inputLength); hostInput2 = (float *) wbImport(wbArg_getInputFile(args, 1), &inputLength); hostOutput = (float *) malloc(inputLength * sizeof(float)); wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The input length is ", inputLength); size = inputLength * sizeof(float); wbTime_start(GPU, "Allocating GPU memory."); //@@ Allocate GPU memory here err = cudaMalloc((void**)&deviceInput1, size); if(err != cudaSuccess) { printf("\nError...! Can't allocate memory for input array1 in device\n"); } err = cudaMalloc((void**)&deviceInput2, size); if(err != cudaSuccess) { printf("\nError...! Can't allocate memory for input array2 in device\n"); } err = cudaMalloc((void**)&deviceOutput, size); if(err != cudaSuccess) { printf("\nError...! Can't allocate memory for output array in device\n"); } wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU."); //@@ Copy memory to the GPU here err = cudaMemcpy(deviceInput1, hostInput1, size, cudaMemcpyHostToDevice); if(err != cudaSuccess) printf("\nError...! Couldn't copy input array1 from host to device\n"); err = cudaMemcpy(deviceInput2, hostInput2, size, cudaMemcpyHostToDevice); if(err != cudaSuccess) printf("\nError...! Couldn't copy input array2 from host to device\n"); wbTime_stop(GPU, "Copying input memory to the GPU."); //@@ Initialize the grid and block dimensions here dim3 Grid(((inputLength-1)/256)+1,1,1); dim3 Block(256,1,1); wbTime_start(Compute, "Performing CUDA computation"); //@@ Launch the GPU Kernel here vecAdd<<<Grid, Block>>>(deviceInput1, deviceInput2, deviceOutput, inputLength); cudaThreadSynchronize(); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU"); //@@ Copy the GPU memory back to the CPU here err = cudaMemcpy(hostOutput, deviceOutput, size, cudaMemcpyDeviceToHost); if(err != cudaSuccess) printf("\nError...! Couldn't copy output array from device to host\n"); wbTime_stop(Copy, "Copying output memory to the CPU"); wbTime_start(GPU, "Freeing GPU Memory"); //@@ Free the GPU memory here cudaFree(deviceInput1); cudaFree(deviceInput2); cudaFree(deviceOutput); wbTime_stop(GPU, "Freeing GPU Memory"); wbSolution(args, hostOutput, inputLength); free(hostInput1); free(hostInput2); free(hostOutput); return 0; }
b2658ce108b94aec392c8009b9a348d574545011.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2018, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cstdlib> #include <iostream> #include <vector> #include <map> #include <type_traits> #include <memory> #include <thrust/device_vector.h> #include <thrust/sort.h> #include <thrust/gather.h> #include "thrust_rmm_allocator.h" #include "gtest/gtest.h" #include "gmock/gmock.h" #include <gdf/gdf.h> #include <gdf/cffi/functions.h> #include "gdf_test_utils.cuh" #include "gdf_test_fixtures.h" #include "../../gdf_table.cuh" #include "../../hashmap/hash_functions.cuh" #include "../../int_fastdiv.h" // Vector set to use rmmAlloc and rmmFree. template <typename T> using Vector = thrust::device_vector<T, rmm_allocator<T>>; template <template <typename> class hash_function, typename size_type> struct row_partition_mapper { __device__ row_partition_mapper(gdf_table<size_type> const & table_to_hash, const size_type _num_partitions) : the_table{table_to_hash}, num_partitions{_num_partitions} {} __device__ hash_value_type operator()(size_type row_index) const { return the_table.template hash_row<hash_function>(row_index) % num_partitions; } gdf_table<size_type> const & the_table; // Using int_fastdiv can return results different from using the normal modulus // operation, therefore we need to use it in result verfication as well size_type num_partitions; }; // Put all repeated setup and validation stuff here template <class test_parameters> struct HashPartitionTest : public GdfTest { constexpr static gdf_hash_func gdf_hash_function = test_parameters::gdf_hash_function; const int num_cols_to_hash = test_parameters::num_cols_to_hash; std::array<int, test_parameters::num_cols_to_hash> cols_to_hash = test_parameters::cols_to_hash; // multi_column_t is a tuple of vectors. The number of vectors in the tuple // determines the number of columns, and the value_type of each // vector determines the data type of the column using multi_column_t = typename test_parameters::multi_column_t; multi_column_t input_columns; multi_column_t output_columns; // Containers for unique_ptrs to gdf_columns // unique_ptrs are used to automate freeing device memory std::vector<gdf_col_pointer> gdf_input_columns; std::vector<gdf_col_pointer> gdf_output_columns; // Containers for the raw pointers to the gdf_columns std::vector<gdf_column*> raw_gdf_input_columns; std::vector<gdf_column*> raw_gdf_output_columns; HashPartitionTest() { // Use constant seed so the psuedo-random order is the same each time // Each time the class is constructed a new constant seed is used static size_t number_of_instantiations{0}; std::srand(number_of_instantiations++); } ~HashPartitionTest() { } void create_input( size_t num_rows, size_t max_value, bool print = false) { initialize_tuple(input_columns, num_rows, max_value); initialize_tuple(output_columns, num_rows, max_value); gdf_input_columns = initialize_gdf_columns(input_columns); gdf_output_columns = initialize_gdf_columns(output_columns); // Fill vector of raw pointers to gdf_columns for(auto const& c : gdf_input_columns){ this->raw_gdf_input_columns.push_back(c.get()); } for(auto const& c : gdf_output_columns){ this->raw_gdf_output_columns.push_back(c.get()); } if(print) { std::cout << "Input column(s) created. Size: " << std::get<0>(input_columns).size() << std::endl; print_tuple(input_columns); } } std::vector<int> compute_gdf_result(const int num_partitions, bool print = false) { const int num_columns = std::tuple_size<multi_column_t>::value; gdf_error result_error{GDF_SUCCESS}; gdf_column ** gdf_input_columns = raw_gdf_input_columns.data(); gdf_column ** gdf_output_columns = raw_gdf_output_columns.data(); std::vector<int> partition_offsets(num_partitions,0); result_error = gdf_hash_partition(num_columns, gdf_input_columns, this->cols_to_hash.data(), this->num_cols_to_hash, num_partitions, gdf_output_columns, partition_offsets.data(), gdf_hash_function); EXPECT_EQ(GDF_SUCCESS, result_error); if(print) { std::cout << "Partition offsets: "; for(int i = 0; i < num_partitions; ++i) { std::cout << partition_offsets[i] << " "; } std::cout << std::endl; } return partition_offsets; } void verify_gdf_result(int num_partitions, std::vector<int> partition_offsets, bool print = false) { std::vector<gdf_column*> gdf_cols_to_hash; for(int i = 0; i < num_cols_to_hash; ++i) { gdf_cols_to_hash.push_back(raw_gdf_output_columns[cols_to_hash[i]]); } // Create a table from the gdf output of only the columns that were hashed std::unique_ptr< gdf_table<int> > table_to_hash{new gdf_table<int>(num_cols_to_hash, gdf_cols_to_hash.data())}; Vector<int> row_partition_numbers(table_to_hash->get_column_length()); // Compute the partition number for every row in the result switch(gdf_hash_function) { case GDF_HASH_MURMUR3: { thrust::tabulate(thrust::device, row_partition_numbers.begin(), row_partition_numbers.end(), row_partition_mapper<MurmurHash3_32,int>(*table_to_hash,num_partitions)); break; } case GDF_HASH_IDENTITY: { thrust::tabulate(thrust::device, row_partition_numbers.begin(), row_partition_numbers.end(), row_partition_mapper<IdentityHash,int>(*table_to_hash,num_partitions)); break; } default: std::cerr << "Invalid GDF hash function.\n"; } std::vector<int> host_row_partition_numbers(table_to_hash->get_column_length()); hipMemcpy(host_row_partition_numbers.data(), row_partition_numbers.data().get(), table_to_hash->get_column_length() * sizeof(int), hipMemcpyDeviceToHost); if(print) { std::cout << "Row partition numbers:\n"; std::copy(host_row_partition_numbers.begin(), host_row_partition_numbers.end(), std::ostream_iterator<int>(std::cout, ", ")); std::cout << std::endl; } // Check that the partition number for every row is correct for(int partition_number = 0; partition_number < num_partitions; ++partition_number) { const int partition_start = partition_offsets[partition_number]; int partition_stop{0}; if(partition_number < (num_partitions - 1)) { partition_stop = partition_offsets[partition_number + 1]; } // The end of the last partition is the end of the table else { partition_stop = table_to_hash->get_column_length(); } // Everything in the current partition should have the same partition // number for(int i = partition_start; i < partition_stop; ++i) { EXPECT_EQ(partition_number, host_row_partition_numbers[i]) << "Partition number for row: " << i << " doesn't match!"; } } } }; template< typename tuple_of_vectors, gdf_hash_func hash, int... cols> struct TestParameters { static_assert((std::tuple_size<tuple_of_vectors>::value >= sizeof...(cols)), "The number of columns to hash must be less than or equal to the total number of columns."); // The tuple of vectors that determines the number and types of the columns using multi_column_t = tuple_of_vectors; // The hash function to use constexpr static const gdf_hash_func gdf_hash_function = hash; // The number of columns to hash constexpr static const int num_cols_to_hash{sizeof...(cols)}; // The indices of the columns that will be hashed to determine the partitions constexpr static const std::array<int, sizeof...(cols)> cols_to_hash{{cols...}}; }; // Using Google Tests "Type Parameterized Tests" // Every test defined as TYPED_TEST(HashPartitionTest, *) will be run once for every instance of // TestParameters defined below // The number and types of columns determined by the number and types of vectors // in the VTuple<...> // The hash function to be used is determined by the gdf_hash_func enum // The columns to be hashed to determine the partition assignment are the last N integer template // arguments, where N <= the number of columns specified in the VTuple typedef ::testing::Types< TestParameters< VTuple<int32_t>, GDF_HASH_IDENTITY, 0 >, TestParameters< VTuple<int32_t, int32_t>, GDF_HASH_MURMUR3, 0, 1>, TestParameters< VTuple<float, double>, GDF_HASH_MURMUR3, 1>, TestParameters< VTuple<int64_t, int32_t>, GDF_HASH_MURMUR3, 1>, TestParameters< VTuple<int64_t, int64_t>, GDF_HASH_MURMUR3, 0, 1>, TestParameters< VTuple<int64_t, int64_t, float, double>, GDF_HASH_IDENTITY, 2, 3>, TestParameters< VTuple<uint32_t, double, int32_t, double>, GDF_HASH_MURMUR3, 0, 2, 3>, TestParameters< VTuple<int64_t, int64_t, float, double>, GDF_HASH_MURMUR3, 1, 3>, TestParameters< VTuple<int64_t, int64_t>, GDF_HASH_MURMUR3, 0, 1>, TestParameters< VTuple<float, int32_t>, GDF_HASH_MURMUR3, 0> >Implementations; TYPED_TEST_CASE(HashPartitionTest, Implementations); TYPED_TEST(HashPartitionTest, ExampleTest) { const int num_partitions = 5; this->create_input(100, 100); std::vector<int> partition_offsets = this->compute_gdf_result(num_partitions); this->verify_gdf_result(num_partitions, partition_offsets); } TYPED_TEST(HashPartitionTest, OnePartition) { const int num_partitions = 1; this->create_input(100000, 1000); std::vector<int> partition_offsets = this->compute_gdf_result(num_partitions); this->verify_gdf_result(num_partitions, partition_offsets); } TYPED_TEST(HashPartitionTest, TenPartitions) { const int num_partitions = 10; this->create_input(1000000, 1000); std::vector<int> partition_offsets = this->compute_gdf_result(num_partitions); this->verify_gdf_result(num_partitions, partition_offsets); } TYPED_TEST(HashPartitionTest, EightPartitions) { const int num_partitions = 8; this->create_input(1000000, 1000); std::vector<int> partition_offsets = this->compute_gdf_result(num_partitions); this->verify_gdf_result(num_partitions, partition_offsets); } TYPED_TEST(HashPartitionTest, 257Partitions) { const int num_partitions = 257; this->create_input(1000000, 1000); std::vector<int> partition_offsets = this->compute_gdf_result(num_partitions); this->verify_gdf_result(num_partitions, partition_offsets); }
b2658ce108b94aec392c8009b9a348d574545011.cu
/* * Copyright (c) 2018, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cstdlib> #include <iostream> #include <vector> #include <map> #include <type_traits> #include <memory> #include <thrust/device_vector.h> #include <thrust/sort.h> #include <thrust/gather.h> #include "thrust_rmm_allocator.h" #include "gtest/gtest.h" #include "gmock/gmock.h" #include <gdf/gdf.h> #include <gdf/cffi/functions.h> #include "gdf_test_utils.cuh" #include "gdf_test_fixtures.h" #include "../../gdf_table.cuh" #include "../../hashmap/hash_functions.cuh" #include "../../int_fastdiv.h" // Vector set to use rmmAlloc and rmmFree. template <typename T> using Vector = thrust::device_vector<T, rmm_allocator<T>>; template <template <typename> class hash_function, typename size_type> struct row_partition_mapper { __device__ row_partition_mapper(gdf_table<size_type> const & table_to_hash, const size_type _num_partitions) : the_table{table_to_hash}, num_partitions{_num_partitions} {} __device__ hash_value_type operator()(size_type row_index) const { return the_table.template hash_row<hash_function>(row_index) % num_partitions; } gdf_table<size_type> const & the_table; // Using int_fastdiv can return results different from using the normal modulus // operation, therefore we need to use it in result verfication as well size_type num_partitions; }; // Put all repeated setup and validation stuff here template <class test_parameters> struct HashPartitionTest : public GdfTest { constexpr static gdf_hash_func gdf_hash_function = test_parameters::gdf_hash_function; const int num_cols_to_hash = test_parameters::num_cols_to_hash; std::array<int, test_parameters::num_cols_to_hash> cols_to_hash = test_parameters::cols_to_hash; // multi_column_t is a tuple of vectors. The number of vectors in the tuple // determines the number of columns, and the value_type of each // vector determines the data type of the column using multi_column_t = typename test_parameters::multi_column_t; multi_column_t input_columns; multi_column_t output_columns; // Containers for unique_ptrs to gdf_columns // unique_ptrs are used to automate freeing device memory std::vector<gdf_col_pointer> gdf_input_columns; std::vector<gdf_col_pointer> gdf_output_columns; // Containers for the raw pointers to the gdf_columns std::vector<gdf_column*> raw_gdf_input_columns; std::vector<gdf_column*> raw_gdf_output_columns; HashPartitionTest() { // Use constant seed so the psuedo-random order is the same each time // Each time the class is constructed a new constant seed is used static size_t number_of_instantiations{0}; std::srand(number_of_instantiations++); } ~HashPartitionTest() { } void create_input( size_t num_rows, size_t max_value, bool print = false) { initialize_tuple(input_columns, num_rows, max_value); initialize_tuple(output_columns, num_rows, max_value); gdf_input_columns = initialize_gdf_columns(input_columns); gdf_output_columns = initialize_gdf_columns(output_columns); // Fill vector of raw pointers to gdf_columns for(auto const& c : gdf_input_columns){ this->raw_gdf_input_columns.push_back(c.get()); } for(auto const& c : gdf_output_columns){ this->raw_gdf_output_columns.push_back(c.get()); } if(print) { std::cout << "Input column(s) created. Size: " << std::get<0>(input_columns).size() << std::endl; print_tuple(input_columns); } } std::vector<int> compute_gdf_result(const int num_partitions, bool print = false) { const int num_columns = std::tuple_size<multi_column_t>::value; gdf_error result_error{GDF_SUCCESS}; gdf_column ** gdf_input_columns = raw_gdf_input_columns.data(); gdf_column ** gdf_output_columns = raw_gdf_output_columns.data(); std::vector<int> partition_offsets(num_partitions,0); result_error = gdf_hash_partition(num_columns, gdf_input_columns, this->cols_to_hash.data(), this->num_cols_to_hash, num_partitions, gdf_output_columns, partition_offsets.data(), gdf_hash_function); EXPECT_EQ(GDF_SUCCESS, result_error); if(print) { std::cout << "Partition offsets: "; for(int i = 0; i < num_partitions; ++i) { std::cout << partition_offsets[i] << " "; } std::cout << std::endl; } return partition_offsets; } void verify_gdf_result(int num_partitions, std::vector<int> partition_offsets, bool print = false) { std::vector<gdf_column*> gdf_cols_to_hash; for(int i = 0; i < num_cols_to_hash; ++i) { gdf_cols_to_hash.push_back(raw_gdf_output_columns[cols_to_hash[i]]); } // Create a table from the gdf output of only the columns that were hashed std::unique_ptr< gdf_table<int> > table_to_hash{new gdf_table<int>(num_cols_to_hash, gdf_cols_to_hash.data())}; Vector<int> row_partition_numbers(table_to_hash->get_column_length()); // Compute the partition number for every row in the result switch(gdf_hash_function) { case GDF_HASH_MURMUR3: { thrust::tabulate(thrust::device, row_partition_numbers.begin(), row_partition_numbers.end(), row_partition_mapper<MurmurHash3_32,int>(*table_to_hash,num_partitions)); break; } case GDF_HASH_IDENTITY: { thrust::tabulate(thrust::device, row_partition_numbers.begin(), row_partition_numbers.end(), row_partition_mapper<IdentityHash,int>(*table_to_hash,num_partitions)); break; } default: std::cerr << "Invalid GDF hash function.\n"; } std::vector<int> host_row_partition_numbers(table_to_hash->get_column_length()); cudaMemcpy(host_row_partition_numbers.data(), row_partition_numbers.data().get(), table_to_hash->get_column_length() * sizeof(int), cudaMemcpyDeviceToHost); if(print) { std::cout << "Row partition numbers:\n"; std::copy(host_row_partition_numbers.begin(), host_row_partition_numbers.end(), std::ostream_iterator<int>(std::cout, ", ")); std::cout << std::endl; } // Check that the partition number for every row is correct for(int partition_number = 0; partition_number < num_partitions; ++partition_number) { const int partition_start = partition_offsets[partition_number]; int partition_stop{0}; if(partition_number < (num_partitions - 1)) { partition_stop = partition_offsets[partition_number + 1]; } // The end of the last partition is the end of the table else { partition_stop = table_to_hash->get_column_length(); } // Everything in the current partition should have the same partition // number for(int i = partition_start; i < partition_stop; ++i) { EXPECT_EQ(partition_number, host_row_partition_numbers[i]) << "Partition number for row: " << i << " doesn't match!"; } } } }; template< typename tuple_of_vectors, gdf_hash_func hash, int... cols> struct TestParameters { static_assert((std::tuple_size<tuple_of_vectors>::value >= sizeof...(cols)), "The number of columns to hash must be less than or equal to the total number of columns."); // The tuple of vectors that determines the number and types of the columns using multi_column_t = tuple_of_vectors; // The hash function to use constexpr static const gdf_hash_func gdf_hash_function = hash; // The number of columns to hash constexpr static const int num_cols_to_hash{sizeof...(cols)}; // The indices of the columns that will be hashed to determine the partitions constexpr static const std::array<int, sizeof...(cols)> cols_to_hash{{cols...}}; }; // Using Google Tests "Type Parameterized Tests" // Every test defined as TYPED_TEST(HashPartitionTest, *) will be run once for every instance of // TestParameters defined below // The number and types of columns determined by the number and types of vectors // in the VTuple<...> // The hash function to be used is determined by the gdf_hash_func enum // The columns to be hashed to determine the partition assignment are the last N integer template // arguments, where N <= the number of columns specified in the VTuple typedef ::testing::Types< TestParameters< VTuple<int32_t>, GDF_HASH_IDENTITY, 0 >, TestParameters< VTuple<int32_t, int32_t>, GDF_HASH_MURMUR3, 0, 1>, TestParameters< VTuple<float, double>, GDF_HASH_MURMUR3, 1>, TestParameters< VTuple<int64_t, int32_t>, GDF_HASH_MURMUR3, 1>, TestParameters< VTuple<int64_t, int64_t>, GDF_HASH_MURMUR3, 0, 1>, TestParameters< VTuple<int64_t, int64_t, float, double>, GDF_HASH_IDENTITY, 2, 3>, TestParameters< VTuple<uint32_t, double, int32_t, double>, GDF_HASH_MURMUR3, 0, 2, 3>, TestParameters< VTuple<int64_t, int64_t, float, double>, GDF_HASH_MURMUR3, 1, 3>, TestParameters< VTuple<int64_t, int64_t>, GDF_HASH_MURMUR3, 0, 1>, TestParameters< VTuple<float, int32_t>, GDF_HASH_MURMUR3, 0> >Implementations; TYPED_TEST_CASE(HashPartitionTest, Implementations); TYPED_TEST(HashPartitionTest, ExampleTest) { const int num_partitions = 5; this->create_input(100, 100); std::vector<int> partition_offsets = this->compute_gdf_result(num_partitions); this->verify_gdf_result(num_partitions, partition_offsets); } TYPED_TEST(HashPartitionTest, OnePartition) { const int num_partitions = 1; this->create_input(100000, 1000); std::vector<int> partition_offsets = this->compute_gdf_result(num_partitions); this->verify_gdf_result(num_partitions, partition_offsets); } TYPED_TEST(HashPartitionTest, TenPartitions) { const int num_partitions = 10; this->create_input(1000000, 1000); std::vector<int> partition_offsets = this->compute_gdf_result(num_partitions); this->verify_gdf_result(num_partitions, partition_offsets); } TYPED_TEST(HashPartitionTest, EightPartitions) { const int num_partitions = 8; this->create_input(1000000, 1000); std::vector<int> partition_offsets = this->compute_gdf_result(num_partitions); this->verify_gdf_result(num_partitions, partition_offsets); } TYPED_TEST(HashPartitionTest, 257Partitions) { const int num_partitions = 257; this->create_input(1000000, 1000); std::vector<int> partition_offsets = this->compute_gdf_result(num_partitions); this->verify_gdf_result(num_partitions, partition_offsets); }
385a03b91eae9ad098bdb3481f0dca473df2875f.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <stdlib.h> #include <math.h> __global__ void gpu_matrixmult(float *a, float *b, float *c, int n, int m, int p) { int col = threadIdx.x + blockDim.x * blockIdx.x; int row = threadIdx.y + blockDim.y * blockIdx.y; int indexb = col; int index = row * m + col; if(col < m && row < n) { c[index] = 0.; for (int indexa = row*p; indexa < (row*p + p); indexa++, indexb+=m){ c[index] += a[indexa]*b[indexb]; } } } void cpu_matrixmult(float *a,float *b, float *c, int n, int m, int p) { int index, indexa, indexb; float cvalue; for(int col=0;col < m; col++) for(int row=0;row < n; row++) { indexb = col; index = row * m + col; cvalue = 0.; for (indexa = row*p; indexa < (row*p + p); indexa++, indexb+=m) cvalue += a[indexa]*b[indexb]; c[index] -= cvalue; //NOTE: This calculates the diff between CPU and GPU computations. } } int main(int argc, char *argv[]) { int i, j; // loop counters int gpucount = 0; // Count of available GPUs int Grid_Dim_x = 1; //Grid dimension, x int Grid_Dim_y = 1; //Grid dimension, y int Block_Dim_x = 1; //Block dimension, x int Block_Dim_y = 1; //Block dimension, y int n,m,p; // matrix dimension float *a,*b,*c; float *dev_a, *dev_b, *dev_c; int size_a, size_b, size_c; // number of bytes in arrays hipEvent_t start, stop; // using cuda events to measure time float elapsed_time_ms; // which is applicable for asynchronous code also hipError_t errorcode; // --------------------SET PARAMETERS AND DATA ----------------------- errorcode = hipGetDeviceCount(&gpucount); if (errorcode == hipErrorNoDevice) { printf("No GPUs are visible\n"); exit(-1); } //else printf("Device count = %d\n",gpucount); if (sizeof(argv)<8) { printf("Usage: Task1GPUsp <n> <m> <p> <block dim x> <block dim y> <grid dim x> <grid dim y>\n"); exit (-1); } n = atoi(argv[1]); m = atoi(argv[2]); p = atoi(argv[3]); Block_Dim_x = atoi(argv[4]); // non-Square block, x dimension size (# of cols) Block_Dim_y = atoi(argv[5]); // non-Square block, y dimension size (# of rows) if (Block_Dim_x * Block_Dim_y > 1024) { printf("Error, too many threads in block\n"); exit (-1); } Grid_Dim_x = atoi(argv[6]); // non-Square grid, x diemnsion size (# of cols) Grid_Dim_y = atoi(argv[7]); // non-Square grid, y dimension size (# of rows) if (Grid_Dim_x*Block_Dim_x < m ) { printf("Error, number of threads in x dimensions less than number of array elements\n"); exit (-1); } if (Grid_Dim_y*Block_Dim_y < n) { printf("Error, number of threads in y dimensions less than number of array elements\n"); exit (-1); } /* printf("A Matrix Dimension = %dx%d\n",n,p); printf("B Matrix Dimension = %dx%d\n",p,m); printf("C Matrix Dimension = %dx%d\n",n,m); printf("Block_x = %d Block_y = %d, Grid_x = %d Grid_y = %d\n",Block_Dim_x, Block_Dim_y,Grid_Dim_x, Grid_Dim_y); */ dim3 Grid(Grid_Dim_x, Grid_Dim_y); //Grid structure dim3 Block(Block_Dim_x, Block_Dim_y); //Block structure size_a = n * p * sizeof(float); // number of bytes in total in arrays size_b = p * m * sizeof(float); // number of bytes in total in arrays size_c = n * m * sizeof(float); // number of bytes in total in arrays a = (float*) malloc(size_a); // dynamically allocated memory for arrays on host b = (float*) malloc(size_b); c = (float*) malloc(size_c); // results from GPU srand(12345); //int p = n; //Used here only to illustrate proper initialization for non-square case //printf ("a\n"); for(i=0;i < n;i++){ for(j=0;j < p;j++) { a[i * p + j] = (float) rand() / (float) RAND_MAX; //a[i * p + j] = (float) (i+j); //printf("%.2f ", a[i * p + j]); } //printf("\n"); } //printf("b\n"); for(i=0;i < p;i++){ for(j=0;j < m;j++) { b[i * m + j] = (float) rand() / (float) RAND_MAX; //b[i * m + j] = (float) (i+j); //printf("%.2f ", b[i * m + j]); } //printf("\n"); } // ------------- COMPUTATION DONE ON GPU ---------------------------- hipMalloc((void**)&dev_a, size_a); // allocate memory on device hipMalloc((void**)&dev_b, size_b); hipMalloc((void**)&dev_c, size_c); hipMemcpy(dev_a, a , size_a ,hipMemcpyHostToDevice); hipMemcpy(dev_b, b , size_b ,hipMemcpyHostToDevice); hipEventCreate(&start); // instrument code to measure start time hipEventCreate(&stop); hipEventRecord(start, 0); // hipEventSynchronize(start); // not needed hipLaunchKernelGGL(( gpu_matrixmult), dim3(Grid),dim3(Block), 0, 0, dev_a,dev_b,dev_c,n,m,p); hipEventRecord(stop, 0); // instrument code to measure end time hipEventSynchronize(stop); hipEventElapsedTime(&elapsed_time_ms, start, stop ); hipMemcpy(c,dev_c, size_c ,hipMemcpyDeviceToHost); //printf("Time to calculate results on GPU: %f ms.\n", elapsed_time_ms); // exec. time //printf("c\n"); for(i=0;i < n;i++){ for(j=0;j < m;j++) { printf("%.2f ", c[i * m + j]); } printf("\n"); } // ------------- COMPUTATION DONE ON HOST CPU ---------------------------- // DEBUGGING USE ONLY (AND FOR LIMITED NUMBERS OF TIMING RUNS) /* hipEventRecord(start, 0); // use same timing // hipEventSynchronize(start); // not needed cpu_matrixmult(a,b,c, n, m, p); // do calculation on host (NOTE: This computes the diff with GPU result.) hipEventRecord(stop, 0); // instrument code to measue end time hipEventSynchronize(stop); hipEventElapsedTime(&elapsed_time_ms, start, stop ); printf("Time to calculate results on CPU: %f ms.\n", elapsed_time_ms); // exec. time // ------------------- check device creates correct results ----------------- double error, suma, sumb, sumc, ai, bi, ci; suma = 0.; sumb = 0; sumc = 0; for(i=0;i < n*n;i++) { ai = (double) a[i]; bi = (double) b[i]; ci = (double) c[i]; suma += ai*ai; sumb += bi*bi; sumc += ci*ci; } suma = sqrt(suma); sumb = sqrt(sumb); sumc = sqrt(sumc); error = sumc/(n*suma*sumb); printf("Scaled error between GPU and CPU: %e\n", error); */ // -------------- clean up --------------------------------------- free(a); free(b); free(c); hipFree(dev_a); hipFree(dev_b); hipFree(dev_c); hipEventDestroy(start); hipEventDestroy(stop); return 0; }
385a03b91eae9ad098bdb3481f0dca473df2875f.cu
#include <stdio.h> #include <cuda.h> #include <stdlib.h> #include <math.h> __global__ void gpu_matrixmult(float *a, float *b, float *c, int n, int m, int p) { int col = threadIdx.x + blockDim.x * blockIdx.x; int row = threadIdx.y + blockDim.y * blockIdx.y; int indexb = col; int index = row * m + col; if(col < m && row < n) { c[index] = 0.; for (int indexa = row*p; indexa < (row*p + p); indexa++, indexb+=m){ c[index] += a[indexa]*b[indexb]; } } } void cpu_matrixmult(float *a,float *b, float *c, int n, int m, int p) { int index, indexa, indexb; float cvalue; for(int col=0;col < m; col++) for(int row=0;row < n; row++) { indexb = col; index = row * m + col; cvalue = 0.; for (indexa = row*p; indexa < (row*p + p); indexa++, indexb+=m) cvalue += a[indexa]*b[indexb]; c[index] -= cvalue; //NOTE: This calculates the diff between CPU and GPU computations. } } int main(int argc, char *argv[]) { int i, j; // loop counters int gpucount = 0; // Count of available GPUs int Grid_Dim_x = 1; //Grid dimension, x int Grid_Dim_y = 1; //Grid dimension, y int Block_Dim_x = 1; //Block dimension, x int Block_Dim_y = 1; //Block dimension, y int n,m,p; // matrix dimension float *a,*b,*c; float *dev_a, *dev_b, *dev_c; int size_a, size_b, size_c; // number of bytes in arrays cudaEvent_t start, stop; // using cuda events to measure time float elapsed_time_ms; // which is applicable for asynchronous code also cudaError_t errorcode; // --------------------SET PARAMETERS AND DATA ----------------------- errorcode = cudaGetDeviceCount(&gpucount); if (errorcode == cudaErrorNoDevice) { printf("No GPUs are visible\n"); exit(-1); } //else printf("Device count = %d\n",gpucount); if (sizeof(argv)<8) { printf("Usage: Task1GPUsp <n> <m> <p> <block dim x> <block dim y> <grid dim x> <grid dim y>\n"); exit (-1); } n = atoi(argv[1]); m = atoi(argv[2]); p = atoi(argv[3]); Block_Dim_x = atoi(argv[4]); // non-Square block, x dimension size (# of cols) Block_Dim_y = atoi(argv[5]); // non-Square block, y dimension size (# of rows) if (Block_Dim_x * Block_Dim_y > 1024) { printf("Error, too many threads in block\n"); exit (-1); } Grid_Dim_x = atoi(argv[6]); // non-Square grid, x diemnsion size (# of cols) Grid_Dim_y = atoi(argv[7]); // non-Square grid, y dimension size (# of rows) if (Grid_Dim_x*Block_Dim_x < m ) { printf("Error, number of threads in x dimensions less than number of array elements\n"); exit (-1); } if (Grid_Dim_y*Block_Dim_y < n) { printf("Error, number of threads in y dimensions less than number of array elements\n"); exit (-1); } /* printf("A Matrix Dimension = %dx%d\n",n,p); printf("B Matrix Dimension = %dx%d\n",p,m); printf("C Matrix Dimension = %dx%d\n",n,m); printf("Block_x = %d Block_y = %d, Grid_x = %d Grid_y = %d\n",Block_Dim_x, Block_Dim_y,Grid_Dim_x, Grid_Dim_y); */ dim3 Grid(Grid_Dim_x, Grid_Dim_y); //Grid structure dim3 Block(Block_Dim_x, Block_Dim_y); //Block structure size_a = n * p * sizeof(float); // number of bytes in total in arrays size_b = p * m * sizeof(float); // number of bytes in total in arrays size_c = n * m * sizeof(float); // number of bytes in total in arrays a = (float*) malloc(size_a); // dynamically allocated memory for arrays on host b = (float*) malloc(size_b); c = (float*) malloc(size_c); // results from GPU srand(12345); //int p = n; //Used here only to illustrate proper initialization for non-square case //printf ("a\n"); for(i=0;i < n;i++){ for(j=0;j < p;j++) { a[i * p + j] = (float) rand() / (float) RAND_MAX; //a[i * p + j] = (float) (i+j); //printf("%.2f ", a[i * p + j]); } //printf("\n"); } //printf("b\n"); for(i=0;i < p;i++){ for(j=0;j < m;j++) { b[i * m + j] = (float) rand() / (float) RAND_MAX; //b[i * m + j] = (float) (i+j); //printf("%.2f ", b[i * m + j]); } //printf("\n"); } // ------------- COMPUTATION DONE ON GPU ---------------------------- cudaMalloc((void**)&dev_a, size_a); // allocate memory on device cudaMalloc((void**)&dev_b, size_b); cudaMalloc((void**)&dev_c, size_c); cudaMemcpy(dev_a, a , size_a ,cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b , size_b ,cudaMemcpyHostToDevice); cudaEventCreate(&start); // instrument code to measure start time cudaEventCreate(&stop); cudaEventRecord(start, 0); // cudaEventSynchronize(start); // not needed gpu_matrixmult<<<Grid,Block>>>(dev_a,dev_b,dev_c,n,m,p); cudaEventRecord(stop, 0); // instrument code to measure end time cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_time_ms, start, stop ); cudaMemcpy(c,dev_c, size_c ,cudaMemcpyDeviceToHost); //printf("Time to calculate results on GPU: %f ms.\n", elapsed_time_ms); // exec. time //printf("c\n"); for(i=0;i < n;i++){ for(j=0;j < m;j++) { printf("%.2f ", c[i * m + j]); } printf("\n"); } // ------------- COMPUTATION DONE ON HOST CPU ---------------------------- // DEBUGGING USE ONLY (AND FOR LIMITED NUMBERS OF TIMING RUNS) /* cudaEventRecord(start, 0); // use same timing // cudaEventSynchronize(start); // not needed cpu_matrixmult(a,b,c, n, m, p); // do calculation on host (NOTE: This computes the diff with GPU result.) cudaEventRecord(stop, 0); // instrument code to measue end time cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_time_ms, start, stop ); printf("Time to calculate results on CPU: %f ms.\n", elapsed_time_ms); // exec. time // ------------------- check device creates correct results ----------------- double error, suma, sumb, sumc, ai, bi, ci; suma = 0.; sumb = 0; sumc = 0; for(i=0;i < n*n;i++) { ai = (double) a[i]; bi = (double) b[i]; ci = (double) c[i]; suma += ai*ai; sumb += bi*bi; sumc += ci*ci; } suma = sqrt(suma); sumb = sqrt(sumb); sumc = sqrt(sumc); error = sumc/(n*suma*sumb); printf("Scaled error between GPU and CPU: %e\n", error); */ // -------------- clean up --------------------------------------- free(a); free(b); free(c); cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); cudaEventDestroy(start); cudaEventDestroy(stop); return 0; }
df9c907b21c746bba4414159aed5ce6cb257781b.hip
// !!! This is a file automatically generated by hipify!!! #include<stdio.h> #include<stdlib.h> #include<cuda.h> #include<sys/time.h> #define SIZE atoi(argv[1]) void safe_call(hipError_t ret, int line) { if(ret!=hipSuccess) { printf("Error at line %d : %s\n",line,hipGetErrorString(ret)); exit(-1); } } void fill_mat(double *arr, int len) { int i; for(i=0;i<len;i++) arr[i] = drand48(); } int main(int argc, char **argv) { if(argc!=2) { printf("Syntax : exec <size>\n"); exit(-1); } double *h_A, *h_B; double *d_A, *d_B; hipEvent_t start, stop; double time, bandwidth; float diff; double time_start, time_end; struct timeval tv; struct timezone tz; safe_call(hipEventCreate(&start),__LINE__); safe_call(hipEventCreate(&stop),__LINE__); h_A = (double *) malloc(SIZE*sizeof(double)); h_B = (double *) malloc(SIZE*sizeof(double)); if(h_A==NULL || h_B==NULL) { printf("Error : host memory allocation\n"); exit(-1); } safe_call(hipMalloc((void **)&d_A, SIZE*sizeof(double)),__LINE__); safe_call(hipMalloc((void **)&d_B, SIZE*sizeof(double)),__LINE__); fill_mat(h_A,SIZE); gettimeofday(&tv, &tz); time_start = (double)tv.tv_sec + (double)tv.tv_usec / 1000000.0; memcpy((void *)h_B, (void *)h_A, SIZE*sizeof(double)); gettimeofday(&tv, &tz); time_end = (double)tv.tv_sec + (double)tv.tv_usec / 1000000.0; bandwidth = ( SIZE * sizeof(double) * 2.0 ) / ( 1024 * 1024 * 1024 * ( time_end - time_start ) ) ; printf("CPU Memcpy H2H Bandwidth = %f GB/s\n",bandwidth); safe_call(hipEventRecord(start, 0),__LINE__); safe_call(hipMemcpy((void *)d_A, (void *)h_A, SIZE*sizeof(double), hipMemcpyHostToDevice),__LINE__); safe_call(hipEventRecord(stop, 0),__LINE__); safe_call(hipEventSynchronize(stop),__LINE__); safe_call(hipEventElapsedTime(&diff,start,stop),__LINE__); time = diff*1.0e-3; bandwidth = ( SIZE * sizeof(double) * 2.0 ) / ( 1024 * 1024 * 1024 * time ) ; printf("CUDA Memcpy H2D Bandwidth = %f GB/s\n",bandwidth); safe_call(hipEventRecord(start, 0),__LINE__); safe_call(hipMemcpy((void *)d_B, (void *)d_A, SIZE*sizeof(double), hipMemcpyDeviceToDevice),__LINE__); safe_call(hipEventRecord(stop, 0),__LINE__); safe_call(hipEventSynchronize(stop),__LINE__); safe_call(hipEventElapsedTime(&diff,start,stop),__LINE__); time = diff*1.0e-3; bandwidth = ( SIZE * sizeof(double) * 2.0 ) / ( 1024 * 1024 * 1024 * time ) ; printf("CUDA Memcpy D2D Bandwidth = %f GB/s\n",bandwidth); safe_call(hipEventRecord(start, 0),__LINE__); safe_call(hipMemcpy((void *)h_B, (void *)d_B, SIZE*sizeof(double), hipMemcpyDeviceToHost),__LINE__); safe_call(hipEventRecord(stop, 0),__LINE__); safe_call(hipEventSynchronize(stop),__LINE__); safe_call(hipEventElapsedTime(&diff,start,stop),__LINE__); time = diff*1.0e-3; bandwidth = ( SIZE * sizeof(double) * 2.0 ) / ( 1024 * 1024 * 1024 * time ) ; printf("CUDA Memcpy D2H Bandwidth = %f GB/s\n",bandwidth); safe_call(hipEventDestroy(start),__LINE__); safe_call(hipEventDestroy(stop),__LINE__); safe_call(hipFree(d_A),__LINE__); safe_call(hipFree(d_B),__LINE__); free(h_A); free(h_B); return 0; }
df9c907b21c746bba4414159aed5ce6cb257781b.cu
#include<stdio.h> #include<stdlib.h> #include<cuda.h> #include<sys/time.h> #define SIZE atoi(argv[1]) void safe_call(cudaError_t ret, int line) { if(ret!=cudaSuccess) { printf("Error at line %d : %s\n",line,cudaGetErrorString(ret)); exit(-1); } } void fill_mat(double *arr, int len) { int i; for(i=0;i<len;i++) arr[i] = drand48(); } int main(int argc, char **argv) { if(argc!=2) { printf("Syntax : exec <size>\n"); exit(-1); } double *h_A, *h_B; double *d_A, *d_B; cudaEvent_t start, stop; double time, bandwidth; float diff; double time_start, time_end; struct timeval tv; struct timezone tz; safe_call(cudaEventCreate(&start),__LINE__); safe_call(cudaEventCreate(&stop),__LINE__); h_A = (double *) malloc(SIZE*sizeof(double)); h_B = (double *) malloc(SIZE*sizeof(double)); if(h_A==NULL || h_B==NULL) { printf("Error : host memory allocation\n"); exit(-1); } safe_call(cudaMalloc((void **)&d_A, SIZE*sizeof(double)),__LINE__); safe_call(cudaMalloc((void **)&d_B, SIZE*sizeof(double)),__LINE__); fill_mat(h_A,SIZE); gettimeofday(&tv, &tz); time_start = (double)tv.tv_sec + (double)tv.tv_usec / 1000000.0; memcpy((void *)h_B, (void *)h_A, SIZE*sizeof(double)); gettimeofday(&tv, &tz); time_end = (double)tv.tv_sec + (double)tv.tv_usec / 1000000.0; bandwidth = ( SIZE * sizeof(double) * 2.0 ) / ( 1024 * 1024 * 1024 * ( time_end - time_start ) ) ; printf("CPU Memcpy H2H Bandwidth = %f GB/s\n",bandwidth); safe_call(cudaEventRecord(start, 0),__LINE__); safe_call(cudaMemcpy((void *)d_A, (void *)h_A, SIZE*sizeof(double), cudaMemcpyHostToDevice),__LINE__); safe_call(cudaEventRecord(stop, 0),__LINE__); safe_call(cudaEventSynchronize(stop),__LINE__); safe_call(cudaEventElapsedTime(&diff,start,stop),__LINE__); time = diff*1.0e-3; bandwidth = ( SIZE * sizeof(double) * 2.0 ) / ( 1024 * 1024 * 1024 * time ) ; printf("CUDA Memcpy H2D Bandwidth = %f GB/s\n",bandwidth); safe_call(cudaEventRecord(start, 0),__LINE__); safe_call(cudaMemcpy((void *)d_B, (void *)d_A, SIZE*sizeof(double), cudaMemcpyDeviceToDevice),__LINE__); safe_call(cudaEventRecord(stop, 0),__LINE__); safe_call(cudaEventSynchronize(stop),__LINE__); safe_call(cudaEventElapsedTime(&diff,start,stop),__LINE__); time = diff*1.0e-3; bandwidth = ( SIZE * sizeof(double) * 2.0 ) / ( 1024 * 1024 * 1024 * time ) ; printf("CUDA Memcpy D2D Bandwidth = %f GB/s\n",bandwidth); safe_call(cudaEventRecord(start, 0),__LINE__); safe_call(cudaMemcpy((void *)h_B, (void *)d_B, SIZE*sizeof(double), cudaMemcpyDeviceToHost),__LINE__); safe_call(cudaEventRecord(stop, 0),__LINE__); safe_call(cudaEventSynchronize(stop),__LINE__); safe_call(cudaEventElapsedTime(&diff,start,stop),__LINE__); time = diff*1.0e-3; bandwidth = ( SIZE * sizeof(double) * 2.0 ) / ( 1024 * 1024 * 1024 * time ) ; printf("CUDA Memcpy D2H Bandwidth = %f GB/s\n",bandwidth); safe_call(cudaEventDestroy(start),__LINE__); safe_call(cudaEventDestroy(stop),__LINE__); safe_call(cudaFree(d_A),__LINE__); safe_call(cudaFree(d_B),__LINE__); free(h_A); free(h_B); return 0; }
a61518172013814681bd5b6fc4c7690ae80f0b09.hip
// !!! This is a file automatically generated by hipify!!! #define GLM_FORCE_CUDA #include <stdio.h> #include <hip/hip_runtime.h> #include <cmath> #include <glm/glm.hpp> #include "utilityCore.hpp" #include "kernel.h" // LOOK-2.1 potentially useful for doing grid-based neighbor search #ifndef imax #define imax( a, b ) ( ((a) > (b)) ? (a) : (b) ) #endif #ifndef imin #define imin( a, b ) ( ((a) < (b)) ? (a) : (b) ) #endif #define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__) /** * Check for CUDA errors; print and exit if there was a problem. */ void checkCUDAError(const char *msg, int line = -1) { hipError_t err = hipGetLastError(); if (hipSuccess != err) { if (line >= 0) { fprintf(stderr, "Line %d: ", line); } fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err)); exit(EXIT_FAILURE); } } /***************** * Configuration * *****************/ /*! Block size used for CUDA kernel launch. */ #define blockSize 512 // LOOK-1.2 Parameters for the boids algorithm. // These worked well in our reference implementation. #define rule1Distance 8.0f #define rule2Distance 4.0f #define rule3Distance 5.0f #define rule1Scale 0.01f #define rule2Scale 0.1f #define rule3Scale 0.1f #define maxSpeed 1.0f /*! Size of the starting area in simulation space. */ #define scene_scale 100.0f /*********************************************** * Kernel state (pointers are device pointers) * ***********************************************/ int numObjects; dim3 threadsPerBlock(blockSize); // LOOK-1.2 - These buffers are here to hold all your boid information. // These get allocated for you in Boids::initSimulation. // Consider why you would need two velocity buffers in a simulation where each // boid cares about its neighbors' velocities. // These are called ping-pong buffers. glm::vec3 *dev_pos; glm::vec3 *dev_vel1; glm::vec3 *dev_vel2; // LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust // pointers on your own too. // For efficient sorting and the uniform grid. These should always be parallel. int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle? int *dev_particleGridIndices; // What grid cell is this particle in? // needed for use with thrust thrust::device_ptr<int> dev_thrust_particleArrayIndices; thrust::device_ptr<int> dev_thrust_particleGridIndices; int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs int *dev_gridCellEndIndices; // to this cell? // TODO-2.3 - consider what additional buffers you might need to reshuffle // the position and velocity data to be coherent within cells. glm::vec3 *dev_pos_coherent; glm::vec3 *dev_vel_coherent; // LOOK-2.1 - Grid parameters based on simulation parameters. // These are automatically computed for you in Boids::initSimulation int gridCellCount; int gridSideCount; float gridCellWidth; float gridInverseCellWidth; glm::vec3 gridMinimum; /****************** * initSimulation * ******************/ __host__ __device__ unsigned int hash(unsigned int a) { a = (a + 0x7ed55d16) + (a << 12); a = (a ^ 0xc761c23c) ^ (a >> 19); a = (a + 0x165667b1) + (a << 5); a = (a + 0xd3a2646c) ^ (a << 9); a = (a + 0xfd7046c5) + (a << 3); a = (a ^ 0xb55a4f09) ^ (a >> 16); return a; } /** * LOOK-1.2 - this is a typical helper function for a CUDA kernel. * Function for generating a random vec3. */ __host__ __device__ glm::vec3 generateRandomVec3(float time, int index) { thrust::default_random_engine rng(hash((int)(index * time))); thrust::uniform_real_distribution<float> unitDistrib(-1, 1); return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng)); } /** * LOOK-1.2 - This is a basic CUDA kernel. * CUDA kernel for generating boids with a specified mass randomly around the star. */ __global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < N) { glm::vec3 rand = generateRandomVec3(time, index); arr[index].x = scale * rand.x; arr[index].y = scale * rand.y; arr[index].z = scale * rand.z; } } /** * Initialize memory, update some globals */ void Boids::initSimulation(int N) { numObjects = N; dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); // LOOK-1.2 - This is basic CUDA memory management and error checking. // Don't forget to hipFree in Boids::endSimulation. hipMalloc((void**)&dev_pos, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("hipMalloc dev_pos failed!"); hipMalloc((void**)&dev_vel1, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("hipMalloc dev_vel1 failed!"); hipMalloc((void**)&dev_vel2, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("hipMalloc dev_vel2 failed!"); // LOOK-1.2 - This is a typical CUDA kernel invocation. hipLaunchKernelGGL(( kernGenerateRandomPosArray), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, 1, numObjects, dev_pos, scene_scale); checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!"); // LOOK-2.1 computing grid params gridCellWidth = 2.0f * ::max(::max(rule1Distance, rule2Distance), rule3Distance); int halfSideCount = (int)(scene_scale / gridCellWidth) + 1; gridSideCount = 2 * halfSideCount; gridCellCount = gridSideCount * gridSideCount * gridSideCount; gridInverseCellWidth = 1.0f / gridCellWidth; float halfGridWidth = gridCellWidth * halfSideCount; gridMinimum.x -= halfGridWidth; gridMinimum.y -= halfGridWidth; gridMinimum.z -= halfGridWidth; // TODO-2.1 TODO-2.3 - Allocate additional buffers here. hipMalloc((void**)&dev_particleArrayIndices, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("hipMalloc dev_particleArrayIndices failed!"); hipMalloc((void**)&dev_particleGridIndices, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("hipMalloc dev_particleGridIndices failed!"); hipMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(glm::vec3)); checkCUDAErrorWithLine("hipMalloc dev_gridCellEndIndices failed!"); hipMalloc((void**)&dev_gridCellStartIndices, gridCellCount* sizeof(glm::vec3)); checkCUDAErrorWithLine("hipMalloc dev_gridCellStartIndices failed!"); hipMalloc((void**)&dev_pos_coherent, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("hipMalloc dev_pos_coherent failed!"); hipMalloc((void**)&dev_vel_coherent, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("hipMalloc dev_vel_coherent failed!"); hipDeviceSynchronize(); } /****************** * copyBoidsToVBO * ******************/ /** * Copy the boid positions into the VBO so that they can be drawn by OpenGL. */ __global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) { int index = threadIdx.x + (blockIdx.x * blockDim.x); float c_scale = -1.0f / s_scale; if (index < N) { vbo[4 * index + 0] = pos[index].x * c_scale; vbo[4 * index + 1] = pos[index].y * c_scale; vbo[4 * index + 2] = pos[index].z * c_scale; vbo[4 * index + 3] = 1.0f; } } __global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index < N) { vbo[4 * index + 0] = vel[index].x + 0.3f; vbo[4 * index + 1] = vel[index].y + 0.3f; vbo[4 * index + 2] = vel[index].z + 0.3f; vbo[4 * index + 3] = 1.0f; } } /** * Wrapper for call to the kernCopyboidsToVBO CUDA kernel. */ void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) { dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize); hipLaunchKernelGGL(( kernCopyPositionsToVBO) , dim3(fullBlocksPerGrid), dim3(blockSize) , 0, 0, numObjects, dev_pos, vbodptr_positions, scene_scale); hipLaunchKernelGGL(( kernCopyVelocitiesToVBO) , dim3(fullBlocksPerGrid), dim3(blockSize) , 0, 0, numObjects, dev_vel1, vbodptr_velocities, scene_scale); checkCUDAErrorWithLine("copyBoidsToVBO failed!"); hipDeviceSynchronize(); } /****************** * stepSimulation * ******************/ /** * LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce. * __device__ code can be called from a __global__ context * Compute the new velocity on the body with index `iSelf` due to the `N` boids * in the `pos` and `vel` arrays. */ __device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) { // Rule 1, Cohesion: boids fly towards their local perceived center of mass, which excludes themselves // Rule 2, Avoidance: boids try to stay a distance d away from each other // Rule 3, Matching: boids try to match the speed of surrounding boids // sum each contribution separately glm::vec3 cohesCenter = glm::vec3(0); glm::vec3 avoidVel = glm::vec3(0); glm::vec3 matchVel = glm::vec3(0); // different number of neighbors depending on rule settings float numCohes = 0.0f; float numAvoid = 0.0f; float numMatch = 0.0f; glm::vec3 selfPos = pos[iSelf]; for (int i = 0; i < N; i++) { if (i == iSelf) continue; glm::vec3 otherPos = pos[i]; float dist = glm::distance(otherPos, selfPos); if (dist < rule1Distance) { numCohes++; cohesCenter += otherPos; } if (dist < rule2Distance) { numAvoid++; avoidVel -= otherPos - selfPos; } if (dist < rule3Distance) { numMatch++; matchVel += vel[i]; } } cohesCenter = numCohes > 0.0f ? (cohesCenter / numCohes) : selfPos; avoidVel = numAvoid > 0.0f ? (avoidVel / numAvoid) : glm::vec3(0); matchVel = numMatch > 0.0f ? (matchVel / numMatch) : glm::vec3(0); return vel[iSelf] + (cohesCenter - selfPos) * rule1Scale + avoidVel * rule2Scale + matchVel * rule3Scale; } /** * TODO-1.2 implement basic flocking * For each of the `N` bodies, update its position based on its current velocity. */ __global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) { int idx = threadIdx.x + blockDim.x * blockIdx.x; glm::vec3 vel = computeVelocityChange(N, idx, pos, vel1); float speed = glm::length(vel); if (speed > maxSpeed) { vel = glm::normalize(vel) * maxSpeed; } vel2[idx] = vel; } /** * LOOK-1.2 Since this is pretty trivial, we implemented it for you. * For each of the `N` bodies, update its position based on its current velocity. */ __global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) { // Update position by velocity int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } glm::vec3 thisPos = pos[index]; thisPos += vel[index] * dt; // Wrap the boids around so we don't lose them thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x; thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y; thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z; thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x; thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y; thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z; pos[index] = thisPos; } // LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index. // LOOK-2.3 Looking at this method, what would be the most memory efficient // order for iterating over neighboring grid cells? // for(x) // for(y) // for(z)? Or some other order? // z y x __device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) { return x + y * gridResolution + z * gridResolution * gridResolution; } __global__ void kernComputeIndices(int N, int gridResolution, glm::vec3 gridMin, float inverseCellWidth, glm::vec3 *pos, int *indices, int *gridIndices) { // TODO-2.1 // - Label each boid with the index of its grid cell. // - Set up a parallel array of integer indices as pointers to the actual // boid data in pos and vel1/vel2 int idx = threadIdx.x + (blockIdx.x * blockDim.x); if (idx >= N) { return; } int dataIdx = idx;// indices[idx]; indices[idx] = idx; glm::vec3 curPos = pos[dataIdx]; curPos -= gridMin; curPos *= inverseCellWidth; curPos = floor(curPos); // now in grid-index-space int selfGridX = int(curPos.x); int selfGridY = int(curPos.x); int selfGridZ = int(curPos.z); int gridIdx = gridIndex3Dto1D(selfGridX, selfGridY, selfGridZ, gridResolution); gridIndices[idx] = gridIdx; } __global__ void kernResetIntBuffer(int N, int *intBuffer, int value) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < N) { intBuffer[index] = value; } } __global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices, int *gridCellStartIndices, int *gridCellEndIndices) { // TODO-2.1 // Identify the start point of each cell in the gridIndices array. // This is basically a parallel unrolling of a loop that goes // "this index doesn't match the one before it, must be a new cell!" int idx = threadIdx.x + (blockIdx.x * blockDim.x); if (idx > N - 1) { return; } else if (idx == N - 1) { int c = particleGridIndices[idx]; gridCellEndIndices[c] = idx; } else { int c1 = particleGridIndices[idx]; int c2 = particleGridIndices[idx + 1]; if (c1 != c2) { gridCellEndIndices[c1] = idx; gridCellStartIndices[c2] = idx + 1; } else if (idx == 0) { gridCellStartIndices[c1] = 0; } } } __global__ void kernUpdateVelNeighborSearchScattered( int N, int gridResolution, glm::vec3 gridMin, float inverseCellWidth, float cellWidth, int *gridCellStartIndices, int *gridCellEndIndices, int *particleArrayIndices, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) { int idx = threadIdx.x + blockDim.x * blockIdx.x; idx = particleArrayIndices[idx]; glm::vec3 cohesCenter = glm::vec3(0); glm::vec3 avoidVel = glm::vec3(0); glm::vec3 matchVel = glm::vec3(0); // different number of neighbors depending on rule settings float numCohes = 0.0f; float numAvoid = 0.0f; float numMatch = 0.0f; glm::vec3 selfPos = pos[idx]; glm::vec3 normPos = selfPos - gridMin; normPos *= inverseCellWidth; normPos = floor(normPos); // now in grid-index-space int selfGridX = int(normPos.x); int selfGridY = int(normPos.x); int selfGridZ = int(normPos.z); normPos += glm::vec3(0.5f); normPos *= cellWidth; // world space center of the grid, shifted to positive glm::vec3 shiftPos = selfPos - gridMin; int octx = ((shiftPos.x - normPos.x) > 0 ? 1 : 0); int octy = ((shiftPos.y - normPos.y) > 0 ? 1 : 0); int octz = ((shiftPos.z - normPos.z) > 0 ? 1 : 0); // outer 3 loops: for each cell in each axis for (int cz = octz - 1 + selfGridZ; cz <= selfGridZ + octz; cz++) { if (cz < 0 || cz > gridResolution) continue; for (int cy = octy - 1 + selfGridY; cy <= selfGridY + octy; cy++) { if (cy < 0 || cy > gridResolution) continue; for (int cx = octx - 1 + selfGridX; cx <= selfGridX + octx; cx++) { if (cx < 0 || cx > gridResolution) continue; int currGridIdx = gridIndex3Dto1D(cx, cy, cz, gridResolution); int gridStart = gridCellStartIndices[currGridIdx]; if (gridStart < 0) continue; // -1 indicates nothing in this cell int gridEnd = gridCellEndIndices[currGridIdx]; // iterate through all boids in this cell for (int gridCurr = gridStart; gridCurr <= gridEnd; gridCurr++) { int currBoidIdx = particleArrayIndices[gridCurr]; if (currBoidIdx == idx) continue; // same boid glm::vec3 otherPos = pos[currBoidIdx]; float dist = glm::distance(otherPos, selfPos); if (dist < rule1Distance) { numCohes++; cohesCenter += otherPos; } if (dist < rule2Distance) { numAvoid++; avoidVel -= otherPos - selfPos; } if (dist < rule3Distance) { numMatch++; matchVel += vel1[currBoidIdx]; } } } } } cohesCenter = numCohes > 0.0f ? (cohesCenter / numCohes) : selfPos; avoidVel = numAvoid > 0.0f ? (avoidVel / numAvoid) : glm::vec3(0); matchVel = numMatch > 0.0f ? (matchVel / numMatch) : glm::vec3(0); glm::vec3 finalVel = vel1[idx] + (cohesCenter - selfPos) * rule1Scale + avoidVel * rule2Scale + matchVel * rule3Scale; float speed = glm::length(finalVel); if (speed > maxSpeed) { finalVel = glm::normalize(finalVel) * maxSpeed; } vel2[idx] = finalVel; } __global__ void kernUpdateVelNeighborSearchCoherent( int N, int gridResolution, glm::vec3 gridMin, float inverseCellWidth, float cellWidth, int *gridCellStartIndices, int *gridCellEndIndices, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) { int idx = threadIdx.x + blockDim.x * blockIdx.x; glm::vec3 cohesCenter = glm::vec3(0); glm::vec3 avoidVel = glm::vec3(0); glm::vec3 matchVel = glm::vec3(0); // different number of neighbors depending on rule settings float numCohes = 0.0f; float numAvoid = 0.0f; float numMatch = 0.0f; glm::vec3 selfPos = pos[idx]; glm::vec3 normPos = selfPos - gridMin; normPos *= inverseCellWidth; normPos = floor(normPos); // now in grid-index-space int selfGridX = int(normPos.x); int selfGridY = int(normPos.x); int selfGridZ = int(normPos.z); normPos += glm::vec3(0.5f); normPos *= cellWidth; // world space center of the grid, shifted to positive glm::vec3 shiftPos = selfPos - gridMin; int octx = ((shiftPos.x - normPos.x) > 0 ? 1 : 0); int octy = ((shiftPos.y - normPos.y) > 0 ? 1 : 0); int octz = ((shiftPos.z - normPos.z) > 0 ? 1 : 0); // outer 3 loops: for each cell in each axis for (int cz = octz - 1 + selfGridZ; cz <= selfGridZ + octz; cz++) { if (cz < 0 || cz > gridResolution) continue; for (int cy = octy - 1 + selfGridY; cy <= selfGridY + octy; cy++) { if (cy < 0 || cy > gridResolution) continue; for (int cx = octx - 1 + selfGridX; cx <= selfGridX + octx; cx++) { if (cx < 0 || cx > gridResolution) continue; int currGridIdx = gridIndex3Dto1D(cx, cy, cz, gridResolution); int gridStart = gridCellStartIndices[currGridIdx]; if (gridStart < 0) continue; // -1 indicates nothing in this cell int gridEnd = gridCellEndIndices[currGridIdx]; // iterate through all boids in this cell for (int gridCurr = gridStart; gridCurr <= gridEnd; gridCurr++) { int currBoidIdx = gridCurr; if (currBoidIdx == idx) continue; // same boid glm::vec3 otherPos = pos[currBoidIdx]; float dist = glm::distance(otherPos, selfPos); if (dist < rule1Distance) { numCohes++; cohesCenter += otherPos; } if (dist < rule2Distance) { numAvoid++; avoidVel -= otherPos - selfPos; } if (dist < rule3Distance) { numMatch++; matchVel += vel1[currBoidIdx]; } } } } } cohesCenter = numCohes > 0.0f ? (cohesCenter / numCohes) : selfPos; avoidVel = numAvoid > 0.0f ? (avoidVel / numAvoid) : glm::vec3(0); matchVel = numMatch > 0.0f ? (matchVel / numMatch) : glm::vec3(0); glm::vec3 finalVel = vel1[idx] + (cohesCenter - selfPos) * rule1Scale + avoidVel * rule2Scale + matchVel * rule3Scale; float speed = glm::length(finalVel); if (speed > maxSpeed) { finalVel = glm::normalize(finalVel) * maxSpeed; } vel2[idx] = finalVel; } __global__ void kernSortBoidData(int N, int *sortedIndices, glm::vec3 *pos, glm::vec3 *vel, glm::vec3 *pos1, glm::vec3 *vel1) { int idx = (blockIdx.x * blockDim.x) + threadIdx.x; if (idx >= N) return; int boidIdx = sortedIndices[idx]; pos1[idx] = pos[boidIdx]; vel1[idx] = vel[boidIdx]; } /** * Step the entire N-body simulation by `dt` seconds. */ void Boids::stepSimulationNaive(float dt) { dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize); hipLaunchKernelGGL(( kernUpdateVelocityBruteForce) , fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, dev_vel1, dev_vel2); kernUpdatePos , fullBlocksPerGrid, blockSize >> >(numObjects, dt, dev_pos, dev_vel2); // ping-pong the velocity buffers glm::vec3 *temp = dev_vel1; dev_vel1 = dev_vel2; dev_vel2 = temp; } void Boids::stepSimulationScatteredGrid(float dt) { // TODO-2.1 // Uniform Grid Neighbor search using Thrust sort. // In Parallel: // - label each particle with its array index as well as its grid index. // Use 2x width grids. // - Unstable key sort using Thrust. A stable sort isn't necessary, but you // are welcome to do a performance comparison. // - Naively unroll the loop for finding the start and end indices of each // cell's data pointers in the array of boid indices // - Perform velocity updates using neighbor search // - Update positions // - Ping-pong buffers as needed dim3 cellCountSize((gridCellCount + blockSize - 1) / blockSize); dim3 boidCountSize((numObjects + blockSize - 1) / blockSize); // reset grid structure pointers kernResetIntBuffer , cellCountSize, blockSize , 0, 0, 0, gridCellCount, dev_gridCellStartIndices, -1); hipLaunchKernelGGL(( kernResetIntBuffer) , dim3(cellCountSize), dim3(blockSize) , 0, 0, gridCellCount, dev_gridCellEndIndices, -1); // compute grid indices based on current boid positions hipLaunchKernelGGL(( kernComputeIndices) , dim3(boidCountSize), dim3(blockSize) , 0, 0, numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices); // sort the boids based on grid indices thrust::device_ptr<int> dev_thrust_keys(dev_particleGridIndices); thrust::device_ptr<int> dev_thrust_values(dev_particleArrayIndices); thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + numObjects, dev_thrust_values); // initialize grid to boid pointers hipLaunchKernelGGL(( kernIdentifyCellStartEnd) , dim3(boidCountSize), dim3(blockSize) , 0, 0, numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices); // run the simulation hipLaunchKernelGGL(( kernUpdateVelNeighborSearchScattered) , dim3(boidCountSize), dim3(blockSize) , 0, 0, numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices, dev_gridCellEndIndices, dev_particleArrayIndices, dev_pos, dev_vel1, dev_vel2); hipLaunchKernelGGL(( kernUpdatePos) , dim3(boidCountSize), dim3(blockSize) , 0, 0, numObjects, dt, dev_pos, dev_vel2); glm::vec3 *temp = dev_vel1; dev_vel1 = dev_vel2; dev_vel2 = temp; } void Boids::stepSimulationCoherentGrid(float dt) { // TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid // Uniform Grid Neighbor search using Thrust sort on cell-coherent data. // In Parallel: // - Label each particle with its array index as well as its grid index. // Use 2x width grids // - Unstable key sort using Thrust. A stable sort isn't necessary, but you // are welcome to do a performance comparison. // - Naively unroll the loop for finding the start and end indices of each // cell's data pointers in the array of boid indices // - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all // the particle data in the simulation array. // CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED // - Perform velocity updates using neighbor search // - Update positions // - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE. dim3 cellCountSize((gridCellCount + blockSize - 1) / blockSize); dim3 boidCountSize((numObjects + blockSize - 1) / blockSize); // reset grid structure pointers kernResetIntBuffer << < cellCountSize, blockSize >> >(gridCellCount, dev_gridCellStartIndices, -1); kernResetIntBuffer << < cellCountSize, blockSize >> >(gridCellCount, dev_gridCellEndIndices, -1); // compute grid indices based on current boid positions kernComputeIndices << < boidCountSize, blockSize >> >(numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices); // sort the boids based on grid indices thrust::device_ptr<int> dev_thrust_keys(dev_particleGridIndices); thrust::device_ptr<int> dev_thrust_values(dev_particleArrayIndices); thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + numObjects, dev_thrust_values); // rearrange boids to be memory coherent, swap buffers kernSortBoidData << < boidCountSize, blockSize >> > (numObjects, dev_particleArrayIndices, dev_pos, dev_vel1, dev_pos_coherent, dev_vel_coherent); glm::vec3 *temp = dev_pos; dev_pos = dev_pos_coherent; dev_pos_coherent = temp; temp = dev_vel1; dev_vel1 = dev_vel_coherent; dev_vel_coherent = temp; // initialize grid to boid pointers kernIdentifyCellStartEnd << < boidCountSize, blockSize >> >(numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices); // run the simulation kernUpdateVelNeighborSearchCoherent << < boidCountSize, blockSize >> > (numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices, dev_gridCellEndIndices, dev_pos, dev_vel1, dev_vel2); kernUpdatePos << < boidCountSize, blockSize >> >(numObjects, dt, dev_pos, dev_vel2); temp = dev_vel1; dev_vel1 = dev_vel2; dev_vel2 = temp; } void Boids::endSimulation() { hipFree(dev_vel1); hipFree(dev_vel2); hipFree(dev_pos); // TODO-2.1 TODO-2.3 - Free any additional buffers here. hipFree(dev_gridCellStartIndices); hipFree(dev_gridCellEndIndices); hipFree(dev_particleArrayIndices); hipFree(dev_particleGridIndices); hipFree(dev_pos_coherent); hipFree(dev_vel_coherent); } void Boids::unitTest() { // LOOK-1.2 Feel free to write additional tests here. // test unstable sort int *dev_intKeys; int *dev_intValues; int N = 10; int *intKeys = new int[N]; int *intValues = new int[N]; intKeys[0] = 0; intValues[0] = 0; intKeys[1] = 1; intValues[1] = 1; intKeys[2] = 0; intValues[2] = 2; intKeys[3] = 3; intValues[3] = 3; intKeys[4] = 0; intValues[4] = 4; intKeys[5] = 2; intValues[5] = 5; intKeys[6] = 2; intValues[6] = 6; intKeys[7] = 0; intValues[7] = 7; intKeys[8] = 5; intValues[8] = 8; intKeys[9] = 6; intValues[9] = 9; hipMalloc((void**)&dev_intKeys, N * sizeof(int)); checkCUDAErrorWithLine("hipMalloc dev_intKeys failed!"); hipMalloc((void**)&dev_intValues, N * sizeof(int)); checkCUDAErrorWithLine("hipMalloc dev_intValues failed!"); dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); std::cout << "before unstable sort: " << std::endl; for (int i = 0; i < N; i++) { std::cout << " key: " << intKeys[i]; std::cout << " value: " << intValues[i] << std::endl; } // How to copy data to the GPU hipMemcpy(dev_intKeys, intKeys, sizeof(int) * N, hipMemcpyHostToDevice); hipMemcpy(dev_intValues, intValues, sizeof(int) * N, hipMemcpyHostToDevice); // Wrap device vectors in thrust iterators for use with thrust. thrust::device_ptr<int> dev_thrust_keys(dev_intKeys); thrust::device_ptr<int> dev_thrust_values(dev_intValues); // LOOK-2.1 Example for using thrust::sort_by_key thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values); // How to copy data back to the CPU side from the GPU hipMemcpy(intKeys, dev_intKeys, sizeof(int) * N, hipMemcpyDeviceToHost); hipMemcpy(intValues, dev_intValues, sizeof(int) * N, hipMemcpyDeviceToHost); checkCUDAErrorWithLine("memcpy back failed!"); std::cout << "after unstable sort: " << std::endl; for (int i = 0; i < N; i++) { std::cout << " key: " << intKeys[i]; std::cout << " value: " << intValues[i] << std::endl; } // cleanup delete[] intKeys; delete[] intValues; hipFree(dev_intKeys); hipFree(dev_intValues); checkCUDAErrorWithLine("hipFree failed!"); return; }
a61518172013814681bd5b6fc4c7690ae80f0b09.cu
#define GLM_FORCE_CUDA #include <stdio.h> #include <cuda.h> #include <cmath> #include <glm/glm.hpp> #include "utilityCore.hpp" #include "kernel.h" // LOOK-2.1 potentially useful for doing grid-based neighbor search #ifndef imax #define imax( a, b ) ( ((a) > (b)) ? (a) : (b) ) #endif #ifndef imin #define imin( a, b ) ( ((a) < (b)) ? (a) : (b) ) #endif #define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__) /** * Check for CUDA errors; print and exit if there was a problem. */ void checkCUDAError(const char *msg, int line = -1) { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { if (line >= 0) { fprintf(stderr, "Line %d: ", line); } fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err)); exit(EXIT_FAILURE); } } /***************** * Configuration * *****************/ /*! Block size used for CUDA kernel launch. */ #define blockSize 512 // LOOK-1.2 Parameters for the boids algorithm. // These worked well in our reference implementation. #define rule1Distance 8.0f #define rule2Distance 4.0f #define rule3Distance 5.0f #define rule1Scale 0.01f #define rule2Scale 0.1f #define rule3Scale 0.1f #define maxSpeed 1.0f /*! Size of the starting area in simulation space. */ #define scene_scale 100.0f /*********************************************** * Kernel state (pointers are device pointers) * ***********************************************/ int numObjects; dim3 threadsPerBlock(blockSize); // LOOK-1.2 - These buffers are here to hold all your boid information. // These get allocated for you in Boids::initSimulation. // Consider why you would need two velocity buffers in a simulation where each // boid cares about its neighbors' velocities. // These are called ping-pong buffers. glm::vec3 *dev_pos; glm::vec3 *dev_vel1; glm::vec3 *dev_vel2; // LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust // pointers on your own too. // For efficient sorting and the uniform grid. These should always be parallel. int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle? int *dev_particleGridIndices; // What grid cell is this particle in? // needed for use with thrust thrust::device_ptr<int> dev_thrust_particleArrayIndices; thrust::device_ptr<int> dev_thrust_particleGridIndices; int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs int *dev_gridCellEndIndices; // to this cell? // TODO-2.3 - consider what additional buffers you might need to reshuffle // the position and velocity data to be coherent within cells. glm::vec3 *dev_pos_coherent; glm::vec3 *dev_vel_coherent; // LOOK-2.1 - Grid parameters based on simulation parameters. // These are automatically computed for you in Boids::initSimulation int gridCellCount; int gridSideCount; float gridCellWidth; float gridInverseCellWidth; glm::vec3 gridMinimum; /****************** * initSimulation * ******************/ __host__ __device__ unsigned int hash(unsigned int a) { a = (a + 0x7ed55d16) + (a << 12); a = (a ^ 0xc761c23c) ^ (a >> 19); a = (a + 0x165667b1) + (a << 5); a = (a + 0xd3a2646c) ^ (a << 9); a = (a + 0xfd7046c5) + (a << 3); a = (a ^ 0xb55a4f09) ^ (a >> 16); return a; } /** * LOOK-1.2 - this is a typical helper function for a CUDA kernel. * Function for generating a random vec3. */ __host__ __device__ glm::vec3 generateRandomVec3(float time, int index) { thrust::default_random_engine rng(hash((int)(index * time))); thrust::uniform_real_distribution<float> unitDistrib(-1, 1); return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng)); } /** * LOOK-1.2 - This is a basic CUDA kernel. * CUDA kernel for generating boids with a specified mass randomly around the star. */ __global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < N) { glm::vec3 rand = generateRandomVec3(time, index); arr[index].x = scale * rand.x; arr[index].y = scale * rand.y; arr[index].z = scale * rand.z; } } /** * Initialize memory, update some globals */ void Boids::initSimulation(int N) { numObjects = N; dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); // LOOK-1.2 - This is basic CUDA memory management and error checking. // Don't forget to cudaFree in Boids::endSimulation. cudaMalloc((void**)&dev_pos, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("cudaMalloc dev_pos failed!"); cudaMalloc((void**)&dev_vel1, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("cudaMalloc dev_vel1 failed!"); cudaMalloc((void**)&dev_vel2, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("cudaMalloc dev_vel2 failed!"); // LOOK-1.2 - This is a typical CUDA kernel invocation. kernGenerateRandomPosArray<<<fullBlocksPerGrid, blockSize>>>(1, numObjects, dev_pos, scene_scale); checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!"); // LOOK-2.1 computing grid params gridCellWidth = 2.0f * std::max(std::max(rule1Distance, rule2Distance), rule3Distance); int halfSideCount = (int)(scene_scale / gridCellWidth) + 1; gridSideCount = 2 * halfSideCount; gridCellCount = gridSideCount * gridSideCount * gridSideCount; gridInverseCellWidth = 1.0f / gridCellWidth; float halfGridWidth = gridCellWidth * halfSideCount; gridMinimum.x -= halfGridWidth; gridMinimum.y -= halfGridWidth; gridMinimum.z -= halfGridWidth; // TODO-2.1 TODO-2.3 - Allocate additional buffers here. cudaMalloc((void**)&dev_particleArrayIndices, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("cudaMalloc dev_particleArrayIndices failed!"); cudaMalloc((void**)&dev_particleGridIndices, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("cudaMalloc dev_particleGridIndices failed!"); cudaMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(glm::vec3)); checkCUDAErrorWithLine("cudaMalloc dev_gridCellEndIndices failed!"); cudaMalloc((void**)&dev_gridCellStartIndices, gridCellCount* sizeof(glm::vec3)); checkCUDAErrorWithLine("cudaMalloc dev_gridCellStartIndices failed!"); cudaMalloc((void**)&dev_pos_coherent, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("cudaMalloc dev_pos_coherent failed!"); cudaMalloc((void**)&dev_vel_coherent, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("cudaMalloc dev_vel_coherent failed!"); cudaThreadSynchronize(); } /****************** * copyBoidsToVBO * ******************/ /** * Copy the boid positions into the VBO so that they can be drawn by OpenGL. */ __global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) { int index = threadIdx.x + (blockIdx.x * blockDim.x); float c_scale = -1.0f / s_scale; if (index < N) { vbo[4 * index + 0] = pos[index].x * c_scale; vbo[4 * index + 1] = pos[index].y * c_scale; vbo[4 * index + 2] = pos[index].z * c_scale; vbo[4 * index + 3] = 1.0f; } } __global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index < N) { vbo[4 * index + 0] = vel[index].x + 0.3f; vbo[4 * index + 1] = vel[index].y + 0.3f; vbo[4 * index + 2] = vel[index].z + 0.3f; vbo[4 * index + 3] = 1.0f; } } /** * Wrapper for call to the kernCopyboidsToVBO CUDA kernel. */ void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) { dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize); kernCopyPositionsToVBO <<<fullBlocksPerGrid, blockSize >>>(numObjects, dev_pos, vbodptr_positions, scene_scale); kernCopyVelocitiesToVBO <<<fullBlocksPerGrid, blockSize >>>(numObjects, dev_vel1, vbodptr_velocities, scene_scale); checkCUDAErrorWithLine("copyBoidsToVBO failed!"); cudaThreadSynchronize(); } /****************** * stepSimulation * ******************/ /** * LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce. * __device__ code can be called from a __global__ context * Compute the new velocity on the body with index `iSelf` due to the `N` boids * in the `pos` and `vel` arrays. */ __device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) { // Rule 1, Cohesion: boids fly towards their local perceived center of mass, which excludes themselves // Rule 2, Avoidance: boids try to stay a distance d away from each other // Rule 3, Matching: boids try to match the speed of surrounding boids // sum each contribution separately glm::vec3 cohesCenter = glm::vec3(0); glm::vec3 avoidVel = glm::vec3(0); glm::vec3 matchVel = glm::vec3(0); // different number of neighbors depending on rule settings float numCohes = 0.0f; float numAvoid = 0.0f; float numMatch = 0.0f; glm::vec3 selfPos = pos[iSelf]; for (int i = 0; i < N; i++) { if (i == iSelf) continue; glm::vec3 otherPos = pos[i]; float dist = glm::distance(otherPos, selfPos); if (dist < rule1Distance) { numCohes++; cohesCenter += otherPos; } if (dist < rule2Distance) { numAvoid++; avoidVel -= otherPos - selfPos; } if (dist < rule3Distance) { numMatch++; matchVel += vel[i]; } } cohesCenter = numCohes > 0.0f ? (cohesCenter / numCohes) : selfPos; avoidVel = numAvoid > 0.0f ? (avoidVel / numAvoid) : glm::vec3(0); matchVel = numMatch > 0.0f ? (matchVel / numMatch) : glm::vec3(0); return vel[iSelf] + (cohesCenter - selfPos) * rule1Scale + avoidVel * rule2Scale + matchVel * rule3Scale; } /** * TODO-1.2 implement basic flocking * For each of the `N` bodies, update its position based on its current velocity. */ __global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) { int idx = threadIdx.x + blockDim.x * blockIdx.x; glm::vec3 vel = computeVelocityChange(N, idx, pos, vel1); float speed = glm::length(vel); if (speed > maxSpeed) { vel = glm::normalize(vel) * maxSpeed; } vel2[idx] = vel; } /** * LOOK-1.2 Since this is pretty trivial, we implemented it for you. * For each of the `N` bodies, update its position based on its current velocity. */ __global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) { // Update position by velocity int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } glm::vec3 thisPos = pos[index]; thisPos += vel[index] * dt; // Wrap the boids around so we don't lose them thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x; thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y; thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z; thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x; thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y; thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z; pos[index] = thisPos; } // LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index. // LOOK-2.3 Looking at this method, what would be the most memory efficient // order for iterating over neighboring grid cells? // for(x) // for(y) // for(z)? Or some other order? // z y x __device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) { return x + y * gridResolution + z * gridResolution * gridResolution; } __global__ void kernComputeIndices(int N, int gridResolution, glm::vec3 gridMin, float inverseCellWidth, glm::vec3 *pos, int *indices, int *gridIndices) { // TODO-2.1 // - Label each boid with the index of its grid cell. // - Set up a parallel array of integer indices as pointers to the actual // boid data in pos and vel1/vel2 int idx = threadIdx.x + (blockIdx.x * blockDim.x); if (idx >= N) { return; } int dataIdx = idx;// indices[idx]; indices[idx] = idx; glm::vec3 curPos = pos[dataIdx]; curPos -= gridMin; curPos *= inverseCellWidth; curPos = floor(curPos); // now in grid-index-space int selfGridX = int(curPos.x); int selfGridY = int(curPos.x); int selfGridZ = int(curPos.z); int gridIdx = gridIndex3Dto1D(selfGridX, selfGridY, selfGridZ, gridResolution); gridIndices[idx] = gridIdx; } __global__ void kernResetIntBuffer(int N, int *intBuffer, int value) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < N) { intBuffer[index] = value; } } __global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices, int *gridCellStartIndices, int *gridCellEndIndices) { // TODO-2.1 // Identify the start point of each cell in the gridIndices array. // This is basically a parallel unrolling of a loop that goes // "this index doesn't match the one before it, must be a new cell!" int idx = threadIdx.x + (blockIdx.x * blockDim.x); if (idx > N - 1) { return; } else if (idx == N - 1) { int c = particleGridIndices[idx]; gridCellEndIndices[c] = idx; } else { int c1 = particleGridIndices[idx]; int c2 = particleGridIndices[idx + 1]; if (c1 != c2) { gridCellEndIndices[c1] = idx; gridCellStartIndices[c2] = idx + 1; } else if (idx == 0) { gridCellStartIndices[c1] = 0; } } } __global__ void kernUpdateVelNeighborSearchScattered( int N, int gridResolution, glm::vec3 gridMin, float inverseCellWidth, float cellWidth, int *gridCellStartIndices, int *gridCellEndIndices, int *particleArrayIndices, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) { int idx = threadIdx.x + blockDim.x * blockIdx.x; idx = particleArrayIndices[idx]; glm::vec3 cohesCenter = glm::vec3(0); glm::vec3 avoidVel = glm::vec3(0); glm::vec3 matchVel = glm::vec3(0); // different number of neighbors depending on rule settings float numCohes = 0.0f; float numAvoid = 0.0f; float numMatch = 0.0f; glm::vec3 selfPos = pos[idx]; glm::vec3 normPos = selfPos - gridMin; normPos *= inverseCellWidth; normPos = floor(normPos); // now in grid-index-space int selfGridX = int(normPos.x); int selfGridY = int(normPos.x); int selfGridZ = int(normPos.z); normPos += glm::vec3(0.5f); normPos *= cellWidth; // world space center of the grid, shifted to positive glm::vec3 shiftPos = selfPos - gridMin; int octx = ((shiftPos.x - normPos.x) > 0 ? 1 : 0); int octy = ((shiftPos.y - normPos.y) > 0 ? 1 : 0); int octz = ((shiftPos.z - normPos.z) > 0 ? 1 : 0); // outer 3 loops: for each cell in each axis for (int cz = octz - 1 + selfGridZ; cz <= selfGridZ + octz; cz++) { if (cz < 0 || cz > gridResolution) continue; for (int cy = octy - 1 + selfGridY; cy <= selfGridY + octy; cy++) { if (cy < 0 || cy > gridResolution) continue; for (int cx = octx - 1 + selfGridX; cx <= selfGridX + octx; cx++) { if (cx < 0 || cx > gridResolution) continue; int currGridIdx = gridIndex3Dto1D(cx, cy, cz, gridResolution); int gridStart = gridCellStartIndices[currGridIdx]; if (gridStart < 0) continue; // -1 indicates nothing in this cell int gridEnd = gridCellEndIndices[currGridIdx]; // iterate through all boids in this cell for (int gridCurr = gridStart; gridCurr <= gridEnd; gridCurr++) { int currBoidIdx = particleArrayIndices[gridCurr]; if (currBoidIdx == idx) continue; // same boid glm::vec3 otherPos = pos[currBoidIdx]; float dist = glm::distance(otherPos, selfPos); if (dist < rule1Distance) { numCohes++; cohesCenter += otherPos; } if (dist < rule2Distance) { numAvoid++; avoidVel -= otherPos - selfPos; } if (dist < rule3Distance) { numMatch++; matchVel += vel1[currBoidIdx]; } } } } } cohesCenter = numCohes > 0.0f ? (cohesCenter / numCohes) : selfPos; avoidVel = numAvoid > 0.0f ? (avoidVel / numAvoid) : glm::vec3(0); matchVel = numMatch > 0.0f ? (matchVel / numMatch) : glm::vec3(0); glm::vec3 finalVel = vel1[idx] + (cohesCenter - selfPos) * rule1Scale + avoidVel * rule2Scale + matchVel * rule3Scale; float speed = glm::length(finalVel); if (speed > maxSpeed) { finalVel = glm::normalize(finalVel) * maxSpeed; } vel2[idx] = finalVel; } __global__ void kernUpdateVelNeighborSearchCoherent( int N, int gridResolution, glm::vec3 gridMin, float inverseCellWidth, float cellWidth, int *gridCellStartIndices, int *gridCellEndIndices, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) { int idx = threadIdx.x + blockDim.x * blockIdx.x; glm::vec3 cohesCenter = glm::vec3(0); glm::vec3 avoidVel = glm::vec3(0); glm::vec3 matchVel = glm::vec3(0); // different number of neighbors depending on rule settings float numCohes = 0.0f; float numAvoid = 0.0f; float numMatch = 0.0f; glm::vec3 selfPos = pos[idx]; glm::vec3 normPos = selfPos - gridMin; normPos *= inverseCellWidth; normPos = floor(normPos); // now in grid-index-space int selfGridX = int(normPos.x); int selfGridY = int(normPos.x); int selfGridZ = int(normPos.z); normPos += glm::vec3(0.5f); normPos *= cellWidth; // world space center of the grid, shifted to positive glm::vec3 shiftPos = selfPos - gridMin; int octx = ((shiftPos.x - normPos.x) > 0 ? 1 : 0); int octy = ((shiftPos.y - normPos.y) > 0 ? 1 : 0); int octz = ((shiftPos.z - normPos.z) > 0 ? 1 : 0); // outer 3 loops: for each cell in each axis for (int cz = octz - 1 + selfGridZ; cz <= selfGridZ + octz; cz++) { if (cz < 0 || cz > gridResolution) continue; for (int cy = octy - 1 + selfGridY; cy <= selfGridY + octy; cy++) { if (cy < 0 || cy > gridResolution) continue; for (int cx = octx - 1 + selfGridX; cx <= selfGridX + octx; cx++) { if (cx < 0 || cx > gridResolution) continue; int currGridIdx = gridIndex3Dto1D(cx, cy, cz, gridResolution); int gridStart = gridCellStartIndices[currGridIdx]; if (gridStart < 0) continue; // -1 indicates nothing in this cell int gridEnd = gridCellEndIndices[currGridIdx]; // iterate through all boids in this cell for (int gridCurr = gridStart; gridCurr <= gridEnd; gridCurr++) { int currBoidIdx = gridCurr; if (currBoidIdx == idx) continue; // same boid glm::vec3 otherPos = pos[currBoidIdx]; float dist = glm::distance(otherPos, selfPos); if (dist < rule1Distance) { numCohes++; cohesCenter += otherPos; } if (dist < rule2Distance) { numAvoid++; avoidVel -= otherPos - selfPos; } if (dist < rule3Distance) { numMatch++; matchVel += vel1[currBoidIdx]; } } } } } cohesCenter = numCohes > 0.0f ? (cohesCenter / numCohes) : selfPos; avoidVel = numAvoid > 0.0f ? (avoidVel / numAvoid) : glm::vec3(0); matchVel = numMatch > 0.0f ? (matchVel / numMatch) : glm::vec3(0); glm::vec3 finalVel = vel1[idx] + (cohesCenter - selfPos) * rule1Scale + avoidVel * rule2Scale + matchVel * rule3Scale; float speed = glm::length(finalVel); if (speed > maxSpeed) { finalVel = glm::normalize(finalVel) * maxSpeed; } vel2[idx] = finalVel; } __global__ void kernSortBoidData(int N, int *sortedIndices, glm::vec3 *pos, glm::vec3 *vel, glm::vec3 *pos1, glm::vec3 *vel1) { int idx = (blockIdx.x * blockDim.x) + threadIdx.x; if (idx >= N) return; int boidIdx = sortedIndices[idx]; pos1[idx] = pos[boidIdx]; vel1[idx] = vel[boidIdx]; } /** * Step the entire N-body simulation by `dt` seconds. */ void Boids::stepSimulationNaive(float dt) { dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize); kernUpdateVelocityBruteForce <<<fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, dev_vel1, dev_vel2); kernUpdatePos <<<fullBlocksPerGrid, blockSize >> >(numObjects, dt, dev_pos, dev_vel2); // ping-pong the velocity buffers glm::vec3 *temp = dev_vel1; dev_vel1 = dev_vel2; dev_vel2 = temp; } void Boids::stepSimulationScatteredGrid(float dt) { // TODO-2.1 // Uniform Grid Neighbor search using Thrust sort. // In Parallel: // - label each particle with its array index as well as its grid index. // Use 2x width grids. // - Unstable key sort using Thrust. A stable sort isn't necessary, but you // are welcome to do a performance comparison. // - Naively unroll the loop for finding the start and end indices of each // cell's data pointers in the array of boid indices // - Perform velocity updates using neighbor search // - Update positions // - Ping-pong buffers as needed dim3 cellCountSize((gridCellCount + blockSize - 1) / blockSize); dim3 boidCountSize((numObjects + blockSize - 1) / blockSize); // reset grid structure pointers kernResetIntBuffer <<< cellCountSize, blockSize >>>(gridCellCount, dev_gridCellStartIndices, -1); kernResetIntBuffer <<< cellCountSize, blockSize >>>(gridCellCount, dev_gridCellEndIndices, -1); // compute grid indices based on current boid positions kernComputeIndices <<< boidCountSize, blockSize >>>(numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices); // sort the boids based on grid indices thrust::device_ptr<int> dev_thrust_keys(dev_particleGridIndices); thrust::device_ptr<int> dev_thrust_values(dev_particleArrayIndices); thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + numObjects, dev_thrust_values); // initialize grid to boid pointers kernIdentifyCellStartEnd <<< boidCountSize, blockSize >>>(numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices); // run the simulation kernUpdateVelNeighborSearchScattered <<< boidCountSize, blockSize >>> (numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices, dev_gridCellEndIndices, dev_particleArrayIndices, dev_pos, dev_vel1, dev_vel2); kernUpdatePos <<< boidCountSize, blockSize >>>(numObjects, dt, dev_pos, dev_vel2); glm::vec3 *temp = dev_vel1; dev_vel1 = dev_vel2; dev_vel2 = temp; } void Boids::stepSimulationCoherentGrid(float dt) { // TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid // Uniform Grid Neighbor search using Thrust sort on cell-coherent data. // In Parallel: // - Label each particle with its array index as well as its grid index. // Use 2x width grids // - Unstable key sort using Thrust. A stable sort isn't necessary, but you // are welcome to do a performance comparison. // - Naively unroll the loop for finding the start and end indices of each // cell's data pointers in the array of boid indices // - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all // the particle data in the simulation array. // CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED // - Perform velocity updates using neighbor search // - Update positions // - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE. dim3 cellCountSize((gridCellCount + blockSize - 1) / blockSize); dim3 boidCountSize((numObjects + blockSize - 1) / blockSize); // reset grid structure pointers kernResetIntBuffer << < cellCountSize, blockSize >> >(gridCellCount, dev_gridCellStartIndices, -1); kernResetIntBuffer << < cellCountSize, blockSize >> >(gridCellCount, dev_gridCellEndIndices, -1); // compute grid indices based on current boid positions kernComputeIndices << < boidCountSize, blockSize >> >(numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices); // sort the boids based on grid indices thrust::device_ptr<int> dev_thrust_keys(dev_particleGridIndices); thrust::device_ptr<int> dev_thrust_values(dev_particleArrayIndices); thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + numObjects, dev_thrust_values); // rearrange boids to be memory coherent, swap buffers kernSortBoidData << < boidCountSize, blockSize >> > (numObjects, dev_particleArrayIndices, dev_pos, dev_vel1, dev_pos_coherent, dev_vel_coherent); glm::vec3 *temp = dev_pos; dev_pos = dev_pos_coherent; dev_pos_coherent = temp; temp = dev_vel1; dev_vel1 = dev_vel_coherent; dev_vel_coherent = temp; // initialize grid to boid pointers kernIdentifyCellStartEnd << < boidCountSize, blockSize >> >(numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices); // run the simulation kernUpdateVelNeighborSearchCoherent << < boidCountSize, blockSize >> > (numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices, dev_gridCellEndIndices, dev_pos, dev_vel1, dev_vel2); kernUpdatePos << < boidCountSize, blockSize >> >(numObjects, dt, dev_pos, dev_vel2); temp = dev_vel1; dev_vel1 = dev_vel2; dev_vel2 = temp; } void Boids::endSimulation() { cudaFree(dev_vel1); cudaFree(dev_vel2); cudaFree(dev_pos); // TODO-2.1 TODO-2.3 - Free any additional buffers here. cudaFree(dev_gridCellStartIndices); cudaFree(dev_gridCellEndIndices); cudaFree(dev_particleArrayIndices); cudaFree(dev_particleGridIndices); cudaFree(dev_pos_coherent); cudaFree(dev_vel_coherent); } void Boids::unitTest() { // LOOK-1.2 Feel free to write additional tests here. // test unstable sort int *dev_intKeys; int *dev_intValues; int N = 10; int *intKeys = new int[N]; int *intValues = new int[N]; intKeys[0] = 0; intValues[0] = 0; intKeys[1] = 1; intValues[1] = 1; intKeys[2] = 0; intValues[2] = 2; intKeys[3] = 3; intValues[3] = 3; intKeys[4] = 0; intValues[4] = 4; intKeys[5] = 2; intValues[5] = 5; intKeys[6] = 2; intValues[6] = 6; intKeys[7] = 0; intValues[7] = 7; intKeys[8] = 5; intValues[8] = 8; intKeys[9] = 6; intValues[9] = 9; cudaMalloc((void**)&dev_intKeys, N * sizeof(int)); checkCUDAErrorWithLine("cudaMalloc dev_intKeys failed!"); cudaMalloc((void**)&dev_intValues, N * sizeof(int)); checkCUDAErrorWithLine("cudaMalloc dev_intValues failed!"); dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); std::cout << "before unstable sort: " << std::endl; for (int i = 0; i < N; i++) { std::cout << " key: " << intKeys[i]; std::cout << " value: " << intValues[i] << std::endl; } // How to copy data to the GPU cudaMemcpy(dev_intKeys, intKeys, sizeof(int) * N, cudaMemcpyHostToDevice); cudaMemcpy(dev_intValues, intValues, sizeof(int) * N, cudaMemcpyHostToDevice); // Wrap device vectors in thrust iterators for use with thrust. thrust::device_ptr<int> dev_thrust_keys(dev_intKeys); thrust::device_ptr<int> dev_thrust_values(dev_intValues); // LOOK-2.1 Example for using thrust::sort_by_key thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values); // How to copy data back to the CPU side from the GPU cudaMemcpy(intKeys, dev_intKeys, sizeof(int) * N, cudaMemcpyDeviceToHost); cudaMemcpy(intValues, dev_intValues, sizeof(int) * N, cudaMemcpyDeviceToHost); checkCUDAErrorWithLine("memcpy back failed!"); std::cout << "after unstable sort: " << std::endl; for (int i = 0; i < N; i++) { std::cout << " key: " << intKeys[i]; std::cout << " value: " << intValues[i] << std::endl; } // cleanup delete[] intKeys; delete[] intValues; cudaFree(dev_intKeys); cudaFree(dev_intValues); checkCUDAErrorWithLine("cudaFree failed!"); return; }
efd6efd9e65165dd6ff05a0a80428f64ded60fab.hip
// !!! This is a file automatically generated by hipify!!! /* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @author Jakub Kurzak @author Stan Tomov @author Mark Gates @author Azzam Haidar @author Ahmad Abdelfattah */ #include "magma_internal.h" #define PRECISION_d #include "gemm_template_kernel_vbatched_hip.cuh" #include "gemm_config/dgemm_param_nn.h" #include "gemm_config/dgemm_param_nt.h" #include "gemm_config/dgemm_param_tn.h" #include "gemm_config/dgemm_param_tt.h" #define version(s,v) s ## _V_ ## v /******************************************************************************/ extern "C" void magmablas_dgemm_vbatched_core( magma_trans_t transA, magma_trans_t transB, magma_int_t* m, magma_int_t* n, magma_int_t* k, double alpha, double const * const * dA_array, magma_int_t* ldda, double const * const * dB_array, magma_int_t* lddb, double beta, double **dC_array, magma_int_t* lddc, magma_int_t max_m, magma_int_t max_n, magma_int_t max_k, magma_int_t roffA, magma_int_t coffA, magma_int_t roffB, magma_int_t coffB, magma_int_t roffC, magma_int_t coffC, magma_int_t spec_m, magma_int_t spec_n, magma_int_t spec_k, magma_int_t batchCount, magma_queue_t queue ) { if (max_m <= 0 || max_n <= 0 || max_k <= 0) return; magma_int_t shape = 0; if (transA == MagmaNoTrans && transB == MagmaNoTrans) { shape = 0; } // nn else if (transA == MagmaNoTrans && transB == MagmaTrans) { shape = 1; } // nt else if (transA == MagmaNoTrans && transB == MagmaConjTrans) { shape = 2; } // nc else if (transA == MagmaTrans && transB == MagmaNoTrans) { shape = 3; } // tn else if (transA == MagmaTrans && transB == MagmaTrans) { shape = 4; } // tt else if (transA == MagmaTrans && transB == MagmaConjTrans) { shape = 5; } // tc else if (transA == MagmaConjTrans && transB == MagmaNoTrans) { shape = 6; } // cn else if (transA == MagmaConjTrans && transB == MagmaTrans) { shape = 7; } // ct else if (transA == MagmaConjTrans && transB == MagmaConjTrans) { shape = 8; } // cc switch(shape) { case 0: // nn { if(max_k < 32) { if(max_k==8 && max_n==24) gemm_template_vbatched_nn<double, version(NN,32), 0, 0> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); else if (max_n<32) gemm_template_vbatched_nn<double, version(NN,49), 0, 0> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); else gemm_template_vbatched_nn<double, version(NN,111), 0, 0> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } else { if(max_m < 80) { gemm_template_vbatched_nn<double, version(NN,93), 0, 0> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } else { gemm_template_vbatched_nn<double, version(NN,111), 0, 0> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } } } break; case 1: // nt { if(max_k < 128) { gemm_template_vbatched_nt<double, version(NT,160), 0, 0> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } else { if(max_m < 256) { gemm_template_vbatched_nt<double, version(NT,160), 0, 0> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } else { gemm_template_vbatched_nt<double, version(NT,190), 0, 0> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } } } break; case 2: // nc { if(max_k < 128) { gemm_template_vbatched_nt<double, version(NT,160), 0, 1> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } else { if(max_m < 256) { gemm_template_vbatched_nt<double, version(NT,160), 0, 1> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } else { gemm_template_vbatched_nt<double, version(NT,190), 0, 1> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } } } break; case 3: // tn { if(max_k < 64) { gemm_template_vbatched_tn<double, version(TN,207), 0, 0> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } else { if(max_m < 256) { gemm_template_vbatched_tn<double, version(TN,207), 0, 0> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } else { gemm_template_vbatched_tn<double, version(TN,209), 0, 0> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } } } break; case 6: // cn { if(max_k < 64) { gemm_template_vbatched_tn<double, version(TN,207), 1, 0> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } else { if(max_m < 256) { gemm_template_vbatched_tn<double, version(TN,207), 1, 0> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } else { gemm_template_vbatched_tn<double, version(TN,209), 1, 0> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } } } break; case 4: // tt { if(max_k < 128) { gemm_template_vbatched_tt<double, version(TT,81), 0, 0> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } else { if(max_m < 256) { gemm_template_vbatched_tt<double, version(TT,81), 0, 0> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } else { gemm_template_vbatched_tt<double, version(TT,85), 0, 0> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } } } break; case 5: // tc { if(max_k < 128) { gemm_template_vbatched_tt<double, version(TT,81), 0, 1> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } else { if(max_m < 256) { gemm_template_vbatched_tt<double, version(TT,81), 0, 1> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } else { gemm_template_vbatched_tt<double, version(TT,85), 0, 1> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } } } break; case 7: // ct { if(max_k < 128) { gemm_template_vbatched_tt<double, version(TT,81), 1, 0> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } else { if(max_m < 256) { gemm_template_vbatched_tt<double, version(TT,81), 1, 0> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } else { gemm_template_vbatched_tt<double, version(TT,85), 1, 0> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } } } break; case 8: // cc { if(max_k < 128) { gemm_template_vbatched_tt<double, version(TT,81), 1, 1> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } else { if(max_m < 256) { gemm_template_vbatched_tt<double, version(TT,81), 1, 1> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } else { gemm_template_vbatched_tt<double, version(TT,85), 1, 1> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } } } break; default:; // propose something } }
efd6efd9e65165dd6ff05a0a80428f64ded60fab.cu
/* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @author Jakub Kurzak @author Stan Tomov @author Mark Gates @author Azzam Haidar @author Ahmad Abdelfattah */ #include "magma_internal.h" #define PRECISION_d #include "gemm_template_kernel_vbatched.cuh" #include "gemm_config/dgemm_param_nn.h" #include "gemm_config/dgemm_param_nt.h" #include "gemm_config/dgemm_param_tn.h" #include "gemm_config/dgemm_param_tt.h" #define version(s,v) s ## _V_ ## v /******************************************************************************/ extern "C" void magmablas_dgemm_vbatched_core( magma_trans_t transA, magma_trans_t transB, magma_int_t* m, magma_int_t* n, magma_int_t* k, double alpha, double const * const * dA_array, magma_int_t* ldda, double const * const * dB_array, magma_int_t* lddb, double beta, double **dC_array, magma_int_t* lddc, magma_int_t max_m, magma_int_t max_n, magma_int_t max_k, magma_int_t roffA, magma_int_t coffA, magma_int_t roffB, magma_int_t coffB, magma_int_t roffC, magma_int_t coffC, magma_int_t spec_m, magma_int_t spec_n, magma_int_t spec_k, magma_int_t batchCount, magma_queue_t queue ) { if (max_m <= 0 || max_n <= 0 || max_k <= 0) return; magma_int_t shape = 0; if (transA == MagmaNoTrans && transB == MagmaNoTrans) { shape = 0; } // nn else if (transA == MagmaNoTrans && transB == MagmaTrans) { shape = 1; } // nt else if (transA == MagmaNoTrans && transB == MagmaConjTrans) { shape = 2; } // nc else if (transA == MagmaTrans && transB == MagmaNoTrans) { shape = 3; } // tn else if (transA == MagmaTrans && transB == MagmaTrans) { shape = 4; } // tt else if (transA == MagmaTrans && transB == MagmaConjTrans) { shape = 5; } // tc else if (transA == MagmaConjTrans && transB == MagmaNoTrans) { shape = 6; } // cn else if (transA == MagmaConjTrans && transB == MagmaTrans) { shape = 7; } // ct else if (transA == MagmaConjTrans && transB == MagmaConjTrans) { shape = 8; } // cc switch(shape) { case 0: // nn { if(max_k < 32) { if(max_k==8 && max_n==24) gemm_template_vbatched_nn<double, version(NN,32), 0, 0> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); else if (max_n<32) gemm_template_vbatched_nn<double, version(NN,49), 0, 0> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); else gemm_template_vbatched_nn<double, version(NN,111), 0, 0> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } else { if(max_m < 80) { gemm_template_vbatched_nn<double, version(NN,93), 0, 0> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } else { gemm_template_vbatched_nn<double, version(NN,111), 0, 0> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } } } break; case 1: // nt { if(max_k < 128) { gemm_template_vbatched_nt<double, version(NT,160), 0, 0> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } else { if(max_m < 256) { gemm_template_vbatched_nt<double, version(NT,160), 0, 0> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } else { gemm_template_vbatched_nt<double, version(NT,190), 0, 0> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } } } break; case 2: // nc { if(max_k < 128) { gemm_template_vbatched_nt<double, version(NT,160), 0, 1> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } else { if(max_m < 256) { gemm_template_vbatched_nt<double, version(NT,160), 0, 1> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } else { gemm_template_vbatched_nt<double, version(NT,190), 0, 1> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } } } break; case 3: // tn { if(max_k < 64) { gemm_template_vbatched_tn<double, version(TN,207), 0, 0> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } else { if(max_m < 256) { gemm_template_vbatched_tn<double, version(TN,207), 0, 0> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } else { gemm_template_vbatched_tn<double, version(TN,209), 0, 0> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } } } break; case 6: // cn { if(max_k < 64) { gemm_template_vbatched_tn<double, version(TN,207), 1, 0> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } else { if(max_m < 256) { gemm_template_vbatched_tn<double, version(TN,207), 1, 0> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } else { gemm_template_vbatched_tn<double, version(TN,209), 1, 0> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } } } break; case 4: // tt { if(max_k < 128) { gemm_template_vbatched_tt<double, version(TT,81), 0, 0> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } else { if(max_m < 256) { gemm_template_vbatched_tt<double, version(TT,81), 0, 0> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } else { gemm_template_vbatched_tt<double, version(TT,85), 0, 0> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } } } break; case 5: // tc { if(max_k < 128) { gemm_template_vbatched_tt<double, version(TT,81), 0, 1> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } else { if(max_m < 256) { gemm_template_vbatched_tt<double, version(TT,81), 0, 1> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } else { gemm_template_vbatched_tt<double, version(TT,85), 0, 1> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } } } break; case 7: // ct { if(max_k < 128) { gemm_template_vbatched_tt<double, version(TT,81), 1, 0> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } else { if(max_m < 256) { gemm_template_vbatched_tt<double, version(TT,81), 1, 0> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } else { gemm_template_vbatched_tt<double, version(TT,85), 1, 0> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } } } break; case 8: // cc { if(max_k < 128) { gemm_template_vbatched_tt<double, version(TT,81), 1, 1> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } else { if(max_m < 256) { gemm_template_vbatched_tt<double, version(TT,81), 1, 1> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } else { gemm_template_vbatched_tt<double, version(TT,85), 1, 1> (m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue); } } } break; default:; // propose something } }
2d656f03bf468ae6ae8311dfdda5f9e3da077f1e.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> void cudasafe(int error, char* message, char* file, int line) { if (error != hipSuccess) { fprintf(stderr, "CUDA Error: %s : %i. In %s line %d\n", message, error, file, line); exit(-1); } } int main(int argc, char ** argv) { int deviceCount; cudasafe(hipGetDeviceCount(&deviceCount), "GetDeviceCount", __FILE__, __LINE__); printf("Number of CUDA devices %d.\n", deviceCount); for (int dev = 0; dev < deviceCount; dev++) { hipDeviceProp_t deviceProp; cudasafe(hipGetDeviceProperties(&deviceProp, dev), "Get Device Properties", __FILE__, __LINE__); if (dev == 0) { if (deviceProp.major == 9999 && deviceProp.minor == 9999) { printf("No CUDA GPU has been detected\n"); return -1; } else if (deviceCount == 1) { printf("There is 1 device supporting CUDA\n"); } else { printf("There are %d devices supporting CUDA\n", deviceCount); } } printf("For device #%d\n", dev); printf("Device name: %s\n", deviceProp.name); printf("Major revision number: %d\n", deviceProp.major); printf("Minor revision Number: %d\n", deviceProp.minor); printf("Total Global Memory: %d\n", deviceProp.totalGlobalMem); printf("Total shared mem per block: %d\n", deviceProp.sharedMemPerBlock); printf("Total const mem size: %d\n", deviceProp.totalConstMem); printf("Warp size: %d\n", deviceProp.warpSize); printf("Maximum block dimensions: %d x %d x %d\n", deviceProp.maxThreadsDim[0], \ deviceProp.maxThreadsDim[1], \ deviceProp.maxThreadsDim[2]); printf("Maximum grid dimensions: %d x %d x %d\n", deviceProp.maxGridSize[0], \ deviceProp.maxGridSize[1], \ deviceProp.maxGridSize[2]); printf("Clock Rate: %d\n", deviceProp.clockRate); printf("Number of muliprocessors: %d\n", deviceProp.multiProcessorCount); } return 0; }
2d656f03bf468ae6ae8311dfdda5f9e3da077f1e.cu
#include <cuda.h> #include <stdio.h> void cudasafe(int error, char* message, char* file, int line) { if (error != cudaSuccess) { fprintf(stderr, "CUDA Error: %s : %i. In %s line %d\n", message, error, file, line); exit(-1); } } int main(int argc, char ** argv) { int deviceCount; cudasafe(cudaGetDeviceCount(&deviceCount), "GetDeviceCount", __FILE__, __LINE__); printf("Number of CUDA devices %d.\n", deviceCount); for (int dev = 0; dev < deviceCount; dev++) { cudaDeviceProp deviceProp; cudasafe(cudaGetDeviceProperties(&deviceProp, dev), "Get Device Properties", __FILE__, __LINE__); if (dev == 0) { if (deviceProp.major == 9999 && deviceProp.minor == 9999) { printf("No CUDA GPU has been detected\n"); return -1; } else if (deviceCount == 1) { printf("There is 1 device supporting CUDA\n"); } else { printf("There are %d devices supporting CUDA\n", deviceCount); } } printf("For device #%d\n", dev); printf("Device name: %s\n", deviceProp.name); printf("Major revision number: %d\n", deviceProp.major); printf("Minor revision Number: %d\n", deviceProp.minor); printf("Total Global Memory: %d\n", deviceProp.totalGlobalMem); printf("Total shared mem per block: %d\n", deviceProp.sharedMemPerBlock); printf("Total const mem size: %d\n", deviceProp.totalConstMem); printf("Warp size: %d\n", deviceProp.warpSize); printf("Maximum block dimensions: %d x %d x %d\n", deviceProp.maxThreadsDim[0], \ deviceProp.maxThreadsDim[1], \ deviceProp.maxThreadsDim[2]); printf("Maximum grid dimensions: %d x %d x %d\n", deviceProp.maxGridSize[0], \ deviceProp.maxGridSize[1], \ deviceProp.maxGridSize[2]); printf("Clock Rate: %d\n", deviceProp.clockRate); printf("Number of muliprocessors: %d\n", deviceProp.multiProcessorCount); } return 0; }
5bf1d08c8dd6bb3cf2912d3a27de5d579b990910.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <math.h> #include <stdio.h> #include <stdlib.h> #include "timer.h" #include "check.h" #define SOFTENING 1e-9f typedef struct { float x, y, z, vx, vy, vz; } Body; void randomizeBodies(float *data, int n) { for (int i = 0; i < n; i++) { data[i] = 2.0f * (rand() / (float)RAND_MAX) - 1.0f; } } __global__ void bodyForce(Body *p, float dt, int n) { // int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for(int i = index; i < n; i += stride) { float Fx = 0.0f; float Fy = 0.0f; float Fz = 0.0f; for (int j = 0; j < n; j++) { float dx = p[j].x - p[i].x; float dy = p[j].y - p[i].y; float dz = p[j].z - p[i].z; float distSqr = dx*dx + dy*dy + dz*dz + SOFTENING; float invDist = rsqrtf(distSqr); float invDist3 = invDist * invDist * invDist; Fx += dx * invDist3; Fy += dy * invDist3; Fz += dz * invDist3; } p[i].vx += dt*Fx; p[i].vy += dt*Fy; p[i].vz += dt*Fz; } } int main(const int argc, const char** argv) { int nBodies = 2<<11; int salt = 0; if (argc > 1) nBodies = 2<<atoi(argv[1]); if (argc > 2) salt = atoi(argv[2]); const float dt = 0.01f; // time step const int nIters = 10; // simulation iterations //GPU int deviceId; int numberOfSMs; hipGetDevice(&deviceId); hipDeviceGetAttribute(&numberOfSMs, hipDeviceAttributeMultiprocessorCount, deviceId); int bytes = nBodies * sizeof(Body); float *buf; hipMallocManaged(&buf, bytes); Body *p = (Body*)buf; hipMemPrefetchAsync(buf, bytes, hipCpuDeviceId); randomizeBodies(buf, 6 * nBodies); // Init pos / vel data double totalTime = 0.0; // Do not modify these 2 lines of code. for (int iter = 0; iter < nIters; iter++) { StartTimer(); /*******************************************************************/ /* * You will likely wish to refactor the work being done in `bodyForce`, * as well as the work to integrate the positions. */ hipMemPrefetchAsync(buf, bytes, deviceId); hipLaunchKernelGGL(( bodyForce), dim3(32*numberOfSMs),dim3(64), 0, 0, p, dt, nBodies); // compute interbody forces hipDeviceSynchronize(); // hipMemPrefetchAsync(buf, bytes, hipCpuDeviceId); for (int i = 0 ; i < nBodies; i++) { // integrate position p[i].x += p[i].vx*dt; p[i].y += p[i].vy*dt; p[i].z += p[i].vz*dt; } // /*******************************************************************/ // Do not modify the code in this section. const double tElapsed = GetTimer() / 1000.0; totalTime += tElapsed; } double avgTime = totalTime / (double)(nIters); float billionsOfOpsPerSecond = 1e-9 * nBodies * nBodies / avgTime; #ifdef ASSESS checkPerformance(buf, billionsOfOpsPerSecond, salt); #else checkAccuracy(buf, nBodies); printf("%d Bodies: average %0.3f Billion Interactions / second\n", nBodies, billionsOfOpsPerSecond); salt += 1; #endif /*******************************************************************/ /* * Feel free to modify code below. */ hipFree(buf); // free(buf); }
5bf1d08c8dd6bb3cf2912d3a27de5d579b990910.cu
#include <math.h> #include <stdio.h> #include <stdlib.h> #include "timer.h" #include "check.h" #define SOFTENING 1e-9f typedef struct { float x, y, z, vx, vy, vz; } Body; void randomizeBodies(float *data, int n) { for (int i = 0; i < n; i++) { data[i] = 2.0f * (rand() / (float)RAND_MAX) - 1.0f; } } __global__ void bodyForce(Body *p, float dt, int n) { //遍历每个物品 int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for(int i = index; i < n; i += stride) { float Fx = 0.0f; float Fy = 0.0f; float Fz = 0.0f; for (int j = 0; j < n; j++) { float dx = p[j].x - p[i].x; float dy = p[j].y - p[i].y; float dz = p[j].z - p[i].z; float distSqr = dx*dx + dy*dy + dz*dz + SOFTENING; float invDist = rsqrtf(distSqr); float invDist3 = invDist * invDist * invDist; Fx += dx * invDist3; Fy += dy * invDist3; Fz += dz * invDist3; } p[i].vx += dt*Fx; p[i].vy += dt*Fy; p[i].vz += dt*Fz; } } int main(const int argc, const char** argv) { int nBodies = 2<<11; int salt = 0; if (argc > 1) nBodies = 2<<atoi(argv[1]); if (argc > 2) salt = atoi(argv[2]); const float dt = 0.01f; // time step const int nIters = 10; // simulation iterations //获取GPU信息 int deviceId; int numberOfSMs; cudaGetDevice(&deviceId); cudaDeviceGetAttribute(&numberOfSMs, cudaDevAttrMultiProcessorCount, deviceId); int bytes = nBodies * sizeof(Body); float *buf; cudaMallocManaged(&buf, bytes); Body *p = (Body*)buf; cudaMemPrefetchAsync(buf, bytes, cudaCpuDeviceId); randomizeBodies(buf, 6 * nBodies); // Init pos / vel data double totalTime = 0.0; // Do not modify these 2 lines of code. for (int iter = 0; iter < nIters; iter++) { StartTimer(); /*******************************************************************/ /* * You will likely wish to refactor the work being done in `bodyForce`, * as well as the work to integrate the positions. */ cudaMemPrefetchAsync(buf, bytes, deviceId); bodyForce<<<32*numberOfSMs,64>>>(p, dt, nBodies); // compute interbody forces cudaDeviceSynchronize(); //更新坐标 cudaMemPrefetchAsync(buf, bytes, cudaCpuDeviceId); for (int i = 0 ; i < nBodies; i++) { // integrate position p[i].x += p[i].vx*dt; p[i].y += p[i].vy*dt; p[i].z += p[i].vz*dt; } //下面是计算平均时间 /*******************************************************************/ // Do not modify the code in this section. const double tElapsed = GetTimer() / 1000.0; totalTime += tElapsed; } double avgTime = totalTime / (double)(nIters); float billionsOfOpsPerSecond = 1e-9 * nBodies * nBodies / avgTime; #ifdef ASSESS checkPerformance(buf, billionsOfOpsPerSecond, salt); #else checkAccuracy(buf, nBodies); printf("%d Bodies: average %0.3f Billion Interactions / second\n", nBodies, billionsOfOpsPerSecond); salt += 1; #endif /*******************************************************************/ /* * Feel free to modify code below. */ cudaFree(buf); // free(buf); }
e90f06b382df309177bfbec0e531333ff4317c78.hip
// !!! This is a file automatically generated by hipify!!! /* iris.cu Classification of the iris dataset from Fisher using neural networks implemented in CUDA. Andrei de A. Formiga, 2012-05-21 */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include "mlpnnets.h" #define SEED 631814ULL #define MAX_ABS 1.2f #define EPOCHS 7000 #define LEARNING_RATE 0.003f #define MAX(a, b) (a >= b? a : b) // neurons per layer (4 inputs, 8 hidden, 3 outputs) int neuronsPerLayer[] = { 4, 8, 3 }; typedef enum tagClass { iris_setosa, iris_versicolor, iris_virginica } Class; DataSet* read_dataset(char *filename) { FILE *f; int i, j; float slen, swid, plen, pwid; char buffer[140]; DataSet *dset; f = fopen(filename, "r"); if (f == NULL) { fprintf(stderr, "File not found: %s\n", filename); return NULL; } // count lines in file to allocate dataset arrays i = 0; while (fgets(buffer, 140, f) != NULL) ++i; if (!feof(f) || ferror(f)) { fprintf(stderr, "IO error while reading from file\n"); fclose(f); return NULL; } fseek(f, 0, SEEK_SET); dset = CreateDataSet(i, 4, 3); if (dset == NULL) { fprintf(stderr, "Error creating dataset\n"); return NULL; } int iix = 0, oix = 0; bool done = false; while (!done) { j = fscanf(f, "%f,%f,%f,%f,%s\n", &slen, &swid, &plen, &pwid, buffer); if (j != 5) done = true; else { //printf("%f, %f, %f, %f\n", slen, swid, plen, pwid); dset->inputs[iix++] = slen; dset->inputs[iix++] = swid; dset->inputs[iix++] = plen; dset->inputs[iix++] = pwid; if (strstr(buffer, "setosa")) { dset->outputs[oix++] = 0.9f; dset->outputs[oix++] = 0.1f; dset->outputs[oix++] = 0.1f; } else if (strstr(buffer, "versicolor")) { dset->outputs[oix++] = 0.1f; dset->outputs[oix++] = 0.9f; dset->outputs[oix++] = 0.1f; } else { // assume class "virginica" dset->outputs[oix++] = 0.1f; dset->outputs[oix++] = 0.1f; dset->outputs[oix++] = 0.9f; } } } fclose(f); return dset; } void print_dataset(DataSet *dset) { int i, j; printf("Number of cases: %d\n", dset->nCases); for (i = 0; i < dset->nCases; ++i) { for (j = 0; j < dset->inputSize; ++j) printf("%3.2f ", dset->inputs[i*dset->inputSize+j]); printf(" | "); for (j = 0; j < dset->outputSize; ++j) printf("%3.2f ", dset->outputs[i*dset->outputSize+j]); printf("\n"); } } Class output_to_class(float *output) { double max; max = MAX(output[0], MAX(output[1], output[2])); if (output[0] == max) return iris_setosa; else if (output[1] == max) return iris_versicolor; return iris_virginica; } char *setosa = "setosa"; char *versicolor = "versicolor"; char *virginica = "virginica"; char *class_to_string(Class c) { char *res; switch(c) { case iris_setosa: res = setosa; break; case iris_versicolor: res = versicolor; break; default: res = virginica; } return res; } void print_network_data(MLPNetwork *net) { printf("nLayers = %d, d_weights = %lu, nWeights = %d, nCases = %d\n", net->nLayers, (unsigned long) net->d_weights, net->nWeights, net->nCases); printf("output ptr for first layer: %lu\n", (unsigned long) net->layers[0]->d_outs); printf("output ptr for last layer: %lu\n", (unsigned long) net->layers[net->nLayers-1]->d_outs); } int main(int argc, char **argv) { int i; int errors; DataSet *train_set; DataSet *test_set; float e; double acc; Class predicted, desired; MLPNetwork *irisnn; // training train_set = read_dataset("iris.train"); if (train_set == NULL) { fprintf(stderr, "Error reading training set\n"); exit(1); } irisnn = CreateNetwork(3, neuronsPerLayer); RandomWeights(irisnn, MAX_ABS, SEED); printf("Training network with %d epochs...\n", EPOCHS); e = BatchTrainBackprop(irisnn, train_set, EPOCHS, LEARNING_RATE, 1, 0); printf("Training finished, approximate final SSE: %f\n", e); printf("Weights after training:\n"); PrintWeights(irisnn); printf("-----------------------------------------\n"); // free the training dataset hipDeviceSynchronize(); DestroyDataSet(train_set); // testing test_set = read_dataset("iris.test"); if (test_set == NULL) { fprintf(stderr, "Error reading test set\n"); return -1; } errors = 0; if (!PrepareForTesting(irisnn, test_set->nCases)) { fprintf(stderr, "Error preparing network for testing\n"); return -1; } printf("Testing with %d cases...\n", test_set->nCases); PresentInputsFromDataSet(irisnn, test_set, ACTF_SIGMOID); hipDeviceSynchronize(); printf("Weights again:\n"); PrintWeights(irisnn); float *output = (float*) malloc(sizeof(float) * test_set->nCases * test_set->outputSize); if (output == NULL) { fprintf(stderr, "Could not allocate memory for copying output to host\n"); return -1; } if (!CopyNetworkOutputs(irisnn, output)) { fprintf(stderr, "Could not get device outputs\n"); return -1; } for (i = 0; i < test_set->nCases; ++i) { predicted = output_to_class(output + (i * test_set->outputSize)); desired = output_to_class(test_set->outputs + (i * test_set->outputSize)); if (predicted != desired) ++errors; printf("Case %d | predicted: %s, desired: %s, outputs: %4.3f %4.3f %4.3f\n", i, class_to_string(predicted), class_to_string(desired), output[i*test_set->outputSize], output[i*test_set->outputSize+1], output[i*test_set->outputSize+2]); } free(output); acc = 100.0 - (100.0 * errors / test_set->nCases); printf("Testing accuracy: %f\n", acc); printf("Total classificarion errors: %d\n", errors); DestroyNetwork(irisnn); DestroyDataSet(test_set); return 0; }
e90f06b382df309177bfbec0e531333ff4317c78.cu
/* iris.cu Classification of the iris dataset from Fisher using neural networks implemented in CUDA. Andrei de A. Formiga, 2012-05-21 */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include "mlpnnets.h" #define SEED 631814ULL #define MAX_ABS 1.2f #define EPOCHS 7000 #define LEARNING_RATE 0.003f #define MAX(a, b) (a >= b? a : b) // neurons per layer (4 inputs, 8 hidden, 3 outputs) int neuronsPerLayer[] = { 4, 8, 3 }; typedef enum tagClass { iris_setosa, iris_versicolor, iris_virginica } Class; DataSet* read_dataset(char *filename) { FILE *f; int i, j; float slen, swid, plen, pwid; char buffer[140]; DataSet *dset; f = fopen(filename, "r"); if (f == NULL) { fprintf(stderr, "File not found: %s\n", filename); return NULL; } // count lines in file to allocate dataset arrays i = 0; while (fgets(buffer, 140, f) != NULL) ++i; if (!feof(f) || ferror(f)) { fprintf(stderr, "IO error while reading from file\n"); fclose(f); return NULL; } fseek(f, 0, SEEK_SET); dset = CreateDataSet(i, 4, 3); if (dset == NULL) { fprintf(stderr, "Error creating dataset\n"); return NULL; } int iix = 0, oix = 0; bool done = false; while (!done) { j = fscanf(f, "%f,%f,%f,%f,%s\n", &slen, &swid, &plen, &pwid, buffer); if (j != 5) done = true; else { //printf("%f, %f, %f, %f\n", slen, swid, plen, pwid); dset->inputs[iix++] = slen; dset->inputs[iix++] = swid; dset->inputs[iix++] = plen; dset->inputs[iix++] = pwid; if (strstr(buffer, "setosa")) { dset->outputs[oix++] = 0.9f; dset->outputs[oix++] = 0.1f; dset->outputs[oix++] = 0.1f; } else if (strstr(buffer, "versicolor")) { dset->outputs[oix++] = 0.1f; dset->outputs[oix++] = 0.9f; dset->outputs[oix++] = 0.1f; } else { // assume class "virginica" dset->outputs[oix++] = 0.1f; dset->outputs[oix++] = 0.1f; dset->outputs[oix++] = 0.9f; } } } fclose(f); return dset; } void print_dataset(DataSet *dset) { int i, j; printf("Number of cases: %d\n", dset->nCases); for (i = 0; i < dset->nCases; ++i) { for (j = 0; j < dset->inputSize; ++j) printf("%3.2f ", dset->inputs[i*dset->inputSize+j]); printf(" | "); for (j = 0; j < dset->outputSize; ++j) printf("%3.2f ", dset->outputs[i*dset->outputSize+j]); printf("\n"); } } Class output_to_class(float *output) { double max; max = MAX(output[0], MAX(output[1], output[2])); if (output[0] == max) return iris_setosa; else if (output[1] == max) return iris_versicolor; return iris_virginica; } char *setosa = "setosa"; char *versicolor = "versicolor"; char *virginica = "virginica"; char *class_to_string(Class c) { char *res; switch(c) { case iris_setosa: res = setosa; break; case iris_versicolor: res = versicolor; break; default: res = virginica; } return res; } void print_network_data(MLPNetwork *net) { printf("nLayers = %d, d_weights = %lu, nWeights = %d, nCases = %d\n", net->nLayers, (unsigned long) net->d_weights, net->nWeights, net->nCases); printf("output ptr for first layer: %lu\n", (unsigned long) net->layers[0]->d_outs); printf("output ptr for last layer: %lu\n", (unsigned long) net->layers[net->nLayers-1]->d_outs); } int main(int argc, char **argv) { int i; int errors; DataSet *train_set; DataSet *test_set; float e; double acc; Class predicted, desired; MLPNetwork *irisnn; // training train_set = read_dataset("iris.train"); if (train_set == NULL) { fprintf(stderr, "Error reading training set\n"); exit(1); } irisnn = CreateNetwork(3, neuronsPerLayer); RandomWeights(irisnn, MAX_ABS, SEED); printf("Training network with %d epochs...\n", EPOCHS); e = BatchTrainBackprop(irisnn, train_set, EPOCHS, LEARNING_RATE, 1, 0); printf("Training finished, approximate final SSE: %f\n", e); printf("Weights after training:\n"); PrintWeights(irisnn); printf("-----------------------------------------\n"); // free the training dataset cudaThreadSynchronize(); DestroyDataSet(train_set); // testing test_set = read_dataset("iris.test"); if (test_set == NULL) { fprintf(stderr, "Error reading test set\n"); return -1; } errors = 0; if (!PrepareForTesting(irisnn, test_set->nCases)) { fprintf(stderr, "Error preparing network for testing\n"); return -1; } printf("Testing with %d cases...\n", test_set->nCases); PresentInputsFromDataSet(irisnn, test_set, ACTF_SIGMOID); cudaThreadSynchronize(); printf("Weights again:\n"); PrintWeights(irisnn); float *output = (float*) malloc(sizeof(float) * test_set->nCases * test_set->outputSize); if (output == NULL) { fprintf(stderr, "Could not allocate memory for copying output to host\n"); return -1; } if (!CopyNetworkOutputs(irisnn, output)) { fprintf(stderr, "Could not get device outputs\n"); return -1; } for (i = 0; i < test_set->nCases; ++i) { predicted = output_to_class(output + (i * test_set->outputSize)); desired = output_to_class(test_set->outputs + (i * test_set->outputSize)); if (predicted != desired) ++errors; printf("Case %d | predicted: %s, desired: %s, outputs: %4.3f %4.3f %4.3f\n", i, class_to_string(predicted), class_to_string(desired), output[i*test_set->outputSize], output[i*test_set->outputSize+1], output[i*test_set->outputSize+2]); } free(output); acc = 100.0 - (100.0 * errors / test_set->nCases); printf("Testing accuracy: %f\n", acc); printf("Total classificarion errors: %d\n", errors); DestroyNetwork(irisnn); DestroyDataSet(test_set); return 0; }
cf160f3bc509ae3cd31d6418386ac40cf96de520.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void print_thread_variables() { printf("Thread{%d,%d,%d}, Block{%d,%d,%d}, BlockDim{%d,%d,%d}, GridDim{%d,%d,%d}\n", threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z, gridDim.x, gridDim.y, gridDim.z ); }
cf160f3bc509ae3cd31d6418386ac40cf96de520.cu
#include "includes.h" __global__ void print_thread_variables() { printf("Thread{%d,%d,%d}, Block{%d,%d,%d}, BlockDim{%d,%d,%d}, GridDim{%d,%d,%d}\n", threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z, gridDim.x, gridDim.y, gridDim.z ); }
a8456658cd2d34aebf6dccdfd2172c60f9f360ba.hip
// !!! This is a file automatically generated by hipify!!! #define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/Dispatch.h> #include <ATen/native/ForeachUtils.h> #include <ATen/native/Lerp.h> #include <ATen/native/hip/ForeachFunctors.cuh> #include <ATen/native/hip/MultiTensorApply.cuh> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/NativeFunctions.h> #else #include <ATen/ops/_foreach_lerp_native.h> #include <ATen/ops/empty_like_native.h> #endif namespace at::native { template <typename T> struct LerpFunctor { inline C10_DEVICE T operator()(const T self, const T end, const T weight) { return lerp(self, end, weight); } }; std::vector<at::Tensor> foreach_tensor_lerp_ternary_cuda( TensorList tensors1, TensorList tensors2, TensorList tensors3) { check_foreach_api_restrictions(tensors1, tensors2, tensors3); if (!can_use_fast_route({tensors1, tensors2, tensors3})) { return foreach_tensor_ternary_lerp_slow(tensors1, tensors2, tensors3); } std::vector<at::Tensor> vec_res; vec_res.reserve(tensors1.size()); for (const auto& t : tensors1) { vec_res.emplace_back(at::native::empty_like(t)); } std::vector<std::vector<at::Tensor>> tensor_lists{ tensors1.vec(), tensors2.vec(), tensors3.vec(), vec_res}; AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, tensors1[0].scalar_type(), "foreach_tensor_lerp_ternary_cuda", [&]() { using opmath_t = typename at::opmath_type<scalar_t>; multi_tensor_apply<4>( tensor_lists, TernaryOpListFunctor< scalar_t, /* depth */ 4, /* r_args_depth */ 3, /* res_arg_index */ 3>(), LerpFunctor<opmath_t>()); }); return tensor_lists[3]; } void foreach_tensor_lerp_ternary_cuda_( TensorList tensors1, TensorList tensors2, TensorList tensors3) { check_foreach_api_restrictions(tensors1, tensors2, tensors3); if (!can_use_fast_route({tensors1, tensors2, tensors3})) { return foreach_tensor_ternary_lerp_slow_(tensors1, tensors2, tensors3); } std::vector<std::vector<at::Tensor>> tensor_lists{ tensors1.vec(), tensors2.vec(), tensors3.vec()}; AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, tensors1[0].scalar_type(), "foreach_tensor_lerp_ternary_cuda_", [&]() { using opmath_t = typename at::opmath_type<scalar_t>; multi_tensor_apply<3>( tensor_lists, TernaryOpListFunctor< scalar_t, /* depth */ 3, /* r_args_depth */ 3, /* res_arg_index */ 0>(), LerpFunctor<opmath_t>()); }); increment_version(tensors1); } std::vector<at::Tensor> foreach_tensor_lerp_list_cuda( TensorList tensors1, TensorList tensors2, const Scalar& weight) { check_foreach_api_restrictions(tensors1, tensors2); if (!can_use_fast_route({tensors1, tensors2})) { return foreach_tensor_lerp_list_kernel_slow(tensors1, tensors2, weight); } std::vector<at::Tensor> vec_res; vec_res.reserve(tensors1.size()); for (const auto& t : tensors1) { vec_res.emplace_back(at::native::empty_like(t)); } std::vector<std::vector<at::Tensor>> tensor_lists{ tensors1.vec(), tensors2.vec(), vec_res}; AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, tensors1[0].scalar_type(), "foreach_tensor_lerp_scalar_cuda", [&]() { using opmath_t = typename at::opmath_type<scalar_t>; multi_tensor_apply<3>( tensor_lists, TernaryOpScalarFunctor< scalar_t, /* depth */ 3, /* r_args_depth */ 2, /* res_arg_index */ 2>(), LerpFunctor<opmath_t>(), weight.to<opmath_t>()); }); return tensor_lists[2]; } void foreach_tensor_lerp_list_cuda_( TensorList tensors1, TensorList tensors2, const Scalar& weight) { check_foreach_api_restrictions(tensors1, tensors2); if (!can_use_fast_route({tensors1, tensors2})) { return foreach_tensor_lerp_list_kernel_slow_(tensors1, tensors2, weight); } std::vector<std::vector<at::Tensor>> tensor_lists{ tensors1.vec(), tensors2.vec()}; AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, tensors1[0].scalar_type(), "foreach_tensor_lerp_scalar_cuda_", [&]() { using opmath_t = typename at::opmath_type<scalar_t>; multi_tensor_apply<2>( tensor_lists, TernaryOpScalarFunctor< scalar_t, /* depth */ 2, /* r_args_depth */ 2, /* res_arg_index */ 0>(), LerpFunctor<opmath_t>(), weight.to<opmath_t>()); }); } } // namespace at::native
a8456658cd2d34aebf6dccdfd2172c60f9f360ba.cu
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/Dispatch.h> #include <ATen/native/ForeachUtils.h> #include <ATen/native/Lerp.h> #include <ATen/native/cuda/ForeachFunctors.cuh> #include <ATen/native/cuda/MultiTensorApply.cuh> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/NativeFunctions.h> #else #include <ATen/ops/_foreach_lerp_native.h> #include <ATen/ops/empty_like_native.h> #endif namespace at::native { template <typename T> struct LerpFunctor { inline C10_DEVICE T operator()(const T self, const T end, const T weight) { return lerp(self, end, weight); } }; std::vector<at::Tensor> foreach_tensor_lerp_ternary_cuda( TensorList tensors1, TensorList tensors2, TensorList tensors3) { check_foreach_api_restrictions(tensors1, tensors2, tensors3); if (!can_use_fast_route({tensors1, tensors2, tensors3})) { return foreach_tensor_ternary_lerp_slow(tensors1, tensors2, tensors3); } std::vector<at::Tensor> vec_res; vec_res.reserve(tensors1.size()); for (const auto& t : tensors1) { vec_res.emplace_back(at::native::empty_like(t)); } std::vector<std::vector<at::Tensor>> tensor_lists{ tensors1.vec(), tensors2.vec(), tensors3.vec(), vec_res}; AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, tensors1[0].scalar_type(), "foreach_tensor_lerp_ternary_cuda", [&]() { using opmath_t = typename at::opmath_type<scalar_t>; multi_tensor_apply<4>( tensor_lists, TernaryOpListFunctor< scalar_t, /* depth */ 4, /* r_args_depth */ 3, /* res_arg_index */ 3>(), LerpFunctor<opmath_t>()); }); return tensor_lists[3]; } void foreach_tensor_lerp_ternary_cuda_( TensorList tensors1, TensorList tensors2, TensorList tensors3) { check_foreach_api_restrictions(tensors1, tensors2, tensors3); if (!can_use_fast_route({tensors1, tensors2, tensors3})) { return foreach_tensor_ternary_lerp_slow_(tensors1, tensors2, tensors3); } std::vector<std::vector<at::Tensor>> tensor_lists{ tensors1.vec(), tensors2.vec(), tensors3.vec()}; AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, tensors1[0].scalar_type(), "foreach_tensor_lerp_ternary_cuda_", [&]() { using opmath_t = typename at::opmath_type<scalar_t>; multi_tensor_apply<3>( tensor_lists, TernaryOpListFunctor< scalar_t, /* depth */ 3, /* r_args_depth */ 3, /* res_arg_index */ 0>(), LerpFunctor<opmath_t>()); }); increment_version(tensors1); } std::vector<at::Tensor> foreach_tensor_lerp_list_cuda( TensorList tensors1, TensorList tensors2, const Scalar& weight) { check_foreach_api_restrictions(tensors1, tensors2); if (!can_use_fast_route({tensors1, tensors2})) { return foreach_tensor_lerp_list_kernel_slow(tensors1, tensors2, weight); } std::vector<at::Tensor> vec_res; vec_res.reserve(tensors1.size()); for (const auto& t : tensors1) { vec_res.emplace_back(at::native::empty_like(t)); } std::vector<std::vector<at::Tensor>> tensor_lists{ tensors1.vec(), tensors2.vec(), vec_res}; AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, tensors1[0].scalar_type(), "foreach_tensor_lerp_scalar_cuda", [&]() { using opmath_t = typename at::opmath_type<scalar_t>; multi_tensor_apply<3>( tensor_lists, TernaryOpScalarFunctor< scalar_t, /* depth */ 3, /* r_args_depth */ 2, /* res_arg_index */ 2>(), LerpFunctor<opmath_t>(), weight.to<opmath_t>()); }); return tensor_lists[2]; } void foreach_tensor_lerp_list_cuda_( TensorList tensors1, TensorList tensors2, const Scalar& weight) { check_foreach_api_restrictions(tensors1, tensors2); if (!can_use_fast_route({tensors1, tensors2})) { return foreach_tensor_lerp_list_kernel_slow_(tensors1, tensors2, weight); } std::vector<std::vector<at::Tensor>> tensor_lists{ tensors1.vec(), tensors2.vec()}; AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, tensors1[0].scalar_type(), "foreach_tensor_lerp_scalar_cuda_", [&]() { using opmath_t = typename at::opmath_type<scalar_t>; multi_tensor_apply<2>( tensor_lists, TernaryOpScalarFunctor< scalar_t, /* depth */ 2, /* r_args_depth */ 2, /* res_arg_index */ 0>(), LerpFunctor<opmath_t>(), weight.to<opmath_t>()); }); } } // namespace at::native
e1a1eda69ee658d748b89666ba2c52c8be7d5703.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // TF-specific helpers. #define OP_CHECK_CUDA_ERROR(CTX, CUDA_CALL) do { hipError_t err = CUDA_CALL; OP_REQUIRES(CTX, err == hipSuccess, errors::Internal("Cuda error: ", hipGetErrorName(err), "[", #CUDA_CALL, ";]")); } while (0) #define OP_CHECK_GL_ERROR(CTX, GL_CALL) do { GL_CALL; GLenum err = glGetError(); OP_REQUIRES(CTX, err == GL_NO_ERROR, errors::Internal("OpenGL error: ", getGLErrorString(err), "[", #GL_CALL, ";]")); } while (0) // Cuda kernels and CPP all together. What an absolute compilation unit. #define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ #include "../common/framework.h" #include "../common/glutil.cpp" #include "../common/common.h" #include "../common/common.cpp" #include "../common/rasterize.h" #include "../common/rasterize.cpp" #include "../common/rasterize.cu" #include "tf_rasterize.cu" #include "../common/interpolate.cu" #include "tf_interpolate.cu" #include "../common/texture.cpp" #include "../common/texture.cu" #include "tf_texture.cu" #include "../common/antialias.cu" #include "tf_antialias.cu"
e1a1eda69ee658d748b89666ba2c52c8be7d5703.cu
// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. // TF-specific helpers. #define OP_CHECK_CUDA_ERROR(CTX, CUDA_CALL) do { cudaError_t err = CUDA_CALL; OP_REQUIRES(CTX, err == cudaSuccess, errors::Internal("Cuda error: ", cudaGetErrorName(err), "[", #CUDA_CALL, ";]")); } while (0) #define OP_CHECK_GL_ERROR(CTX, GL_CALL) do { GL_CALL; GLenum err = glGetError(); OP_REQUIRES(CTX, err == GL_NO_ERROR, errors::Internal("OpenGL error: ", getGLErrorString(err), "[", #GL_CALL, ";]")); } while (0) // Cuda kernels and CPP all together. What an absolute compilation unit. #define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ #include "../common/framework.h" #include "../common/glutil.cpp" #include "../common/common.h" #include "../common/common.cpp" #include "../common/rasterize.h" #include "../common/rasterize.cpp" #include "../common/rasterize.cu" #include "tf_rasterize.cu" #include "../common/interpolate.cu" #include "tf_interpolate.cu" #include "../common/texture.cpp" #include "../common/texture.cu" #include "tf_texture.cu" #include "../common/antialias.cu" #include "tf_antialias.cu"
002f1385a41449eab25f5e18f2fcf223117bb489.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include "MSD_GPU_kernels_shared.cu" //---------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------- //------------------- Kernels //----------------------------------------------------------------------- //---------------> Computes partials for mean and standard deviation template<typename input_type> __global__ void MSD_GPU_calculate_partials_1d(input_type const* __restrict__ d_input, float *d_output_partial_MSD, int *d_output_partial_nElements, int x_steps, size_t nTimesamples, int offset) { __shared__ float s_par_MSD[2*MSD_NTHREADS]; __shared__ int s_par_nElements[MSD_NTHREADS]; float M, S, ftemp; int j; size_t gpos = blockIdx.z*nTimesamples; size_t spos = blockIdx.x*x_steps*MSD_NTHREADS + threadIdx.x; M=0; S=0; j=0; ftemp=0; if( spos<(nTimesamples-offset) ){ ftemp = (float) d_input[gpos + spos]; Initiate( &M, &S, &j, ftemp); spos = spos + MSD_NTHREADS; for (int xf = 1; xf < x_steps; xf++) { if( spos<(nTimesamples-offset) ){ ftemp = (float) d_input[gpos + spos]; Add_one( &M, &S, &j, ftemp); spos = spos + MSD_NTHREADS; } } } s_par_MSD[threadIdx.x] = M; s_par_MSD[MSD_NTHREADS + threadIdx.x] = S; s_par_nElements[threadIdx.x] = j; __syncthreads(); Reduce_SM( &M, &S, &j, s_par_MSD, s_par_nElements ); Reduce_WARP( &M, &S, &j); //---------------------------------------------- //---- Writing data if (threadIdx.x == 0) { gpos = blockIdx.z*gridDim.x + blockIdx.x; d_output_partial_MSD[MSD_PARTIAL_SIZE*gpos] = M; d_output_partial_MSD[MSD_PARTIAL_SIZE*gpos + 1] = S; d_output_partial_nElements[gpos] = j; //if(blockIdx.x<100) printf("result: b:[%d;%d;%d] M=%f; S=%f; j=%e\n", blockIdx.x, blockIdx.y, blockIdx.z, M, S, (double) j); } } template<typename input_type> __global__ void MSD_GPU_calculate_partials_1d_and_minmax(input_type const* __restrict__ d_input, float *d_output_partial_MSD, int *d_output_partial_nElements, int x_steps, size_t nTimesamples, int offset) { __shared__ float s_par_MSD[MSD_PARTIAL_SIZE*MSD_NTHREADS]; __shared__ int s_par_nElements[MSD_NTHREADS]; float M, S, max, min, ftemp; int j; size_t gpos = blockIdx.z*nTimesamples; size_t spos = blockIdx.x*x_steps*MSD_NTHREADS + threadIdx.x; M=0; S=0; j=0; ftemp=0; if( spos<(nTimesamples-offset) ){ ftemp = (float) d_input[gpos + spos]; Initiate( &M, &S, &j, ftemp); max = ftemp; min = ftemp; spos = spos + MSD_NTHREADS; for (int xf = 1; xf < x_steps; xf++) { if( spos<(nTimesamples-offset) ){ ftemp = (float) d_input[gpos + spos]; max = (fmaxf(max,ftemp)); min = (fminf(min,ftemp)); Add_one( &M, &S, &j, ftemp); spos = spos + MSD_NTHREADS; } } } s_par_MSD[threadIdx.x] = M; s_par_MSD[blockDim.x + threadIdx.x] = S; s_par_MSD[2*blockDim.x + threadIdx.x] = max; s_par_MSD[3*blockDim.x + threadIdx.x] = min; s_par_nElements[threadIdx.x] = j; __syncthreads(); Reduce_SM_max( &M, &S, &max, &min, &j, s_par_MSD, s_par_nElements ); Reduce_WARP_max( &M, &S, &max, &min, &j); //---------------------------------------------- //---- Writing data if (threadIdx.x == 0) { gpos = blockIdx.z*gridDim.x + blockIdx.x; d_output_partial_MSD[MSD_PARTIAL_SIZE*gpos] = M; d_output_partial_MSD[MSD_PARTIAL_SIZE*gpos + 1] = S; d_output_partial_MSD[MSD_PARTIAL_SIZE*gpos + 2] = max; d_output_partial_MSD[MSD_PARTIAL_SIZE*gpos + 3] = min; d_output_partial_nElements[gpos] = j; //if(blockIdx.x<5 && blockIdx.y<5) printf("M=%f; S=%f; max=%f; min=%f; j=%e\n", M, S, max, min, (double) j); } } template<typename input_type> __global__ void MSD_BLN_calculate_partials_1d_and_minmax_with_outlier_rejection(input_type const* __restrict__ d_input, float *d_output_partial_MSD, int *d_output_partial_nElements, float *d_MSD, int x_steps, size_t nTimesamples, int offset, float bln_sigma_constant) { __shared__ float s_par_MSD[MSD_PARTIAL_SIZE*MSD_NTHREADS]; __shared__ int s_par_nElements[MSD_NTHREADS]; float M, S, ftemp, max, min; int j; float limit_down = d_MSD[MSD_RESULTS_SIZE*blockIdx.z] - bln_sigma_constant*d_MSD[MSD_RESULTS_SIZE*blockIdx.z + 1]; float limit_up = d_MSD[MSD_RESULTS_SIZE*blockIdx.z] + bln_sigma_constant*d_MSD[MSD_RESULTS_SIZE*blockIdx.z + 1]; size_t temp_gpos = blockIdx.z*gridDim.x + blockIdx.x; max = d_output_partial_MSD[MSD_PARTIAL_SIZE*temp_gpos + 2]; min = d_output_partial_MSD[MSD_PARTIAL_SIZE*temp_gpos + 3]; if( (min>limit_down) && (max < limit_up) ) return; size_t gpos = blockIdx.z*nTimesamples; size_t spos = blockIdx.x*x_steps*MSD_NTHREADS + threadIdx.x; M=0; S=0; j=0; ftemp=0; max=0; min=0; if( spos<(nTimesamples-offset) ){ for (int xf = 0; xf < x_steps; xf++) { if( spos<(nTimesamples-offset) ){ ftemp = (float) d_input[gpos + spos]; if( (ftemp>limit_down) && (ftemp < limit_up) ){ if(j==0){ Initiate( &M, &S, &j, ftemp); max = ftemp; min = ftemp; } else{ Add_one( &M, &S, &j, ftemp); max = fmaxf(max, ftemp); min = fminf(min, ftemp); } } spos = spos + MSD_NTHREADS; } } } s_par_MSD[threadIdx.x] = M; s_par_MSD[blockDim.x + threadIdx.x] = S; s_par_MSD[2*blockDim.x + threadIdx.x] = max; s_par_MSD[3*blockDim.x + threadIdx.x] = min; s_par_nElements[threadIdx.x] = j; __syncthreads(); Reduce_SM_max( &M, &S, &max, &min, &j, s_par_MSD, s_par_nElements ); Reduce_WARP_max( &M, &S, &max, &min, &j); //---------------------------------------------- //---- Writing data if (threadIdx.x == 0) { gpos = blockIdx.z*gridDim.x + blockIdx.x; d_output_partial_MSD[MSD_PARTIAL_SIZE*gpos] = M; d_output_partial_MSD[MSD_PARTIAL_SIZE*gpos + 1] = S; d_output_partial_MSD[MSD_PARTIAL_SIZE*gpos + 2] = max; d_output_partial_MSD[MSD_PARTIAL_SIZE*gpos + 3] = min; d_output_partial_nElements[gpos] = j; //if(blockIdx.x<5 && blockIdx.y<5) printf("b:[%d;%d;%d] M=%f; S=%f; max=%f; min=%f; j=%e\n", blockIdx.x, blockIdx.y, blockIdx.z, M, S, max, min, (double) j); } } //------------------- Kernels with functions //---------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------- template<typename input_type> void call_MSD_GPU_calculate_partials_1d(const dim3 &grid_size, const dim3 &block_size, int shared_memory_bytes, hipStream_t streams, input_type *d_input, float *d_output_partial_MSD, int *d_output_partial_nElements, int y_steps, size_t nTimesamples, int offset){ hipLaunchKernelGGL(( MSD_GPU_calculate_partials_1d), dim3(grid_size), dim3(block_size), shared_memory_bytes, streams, d_input, d_output_partial_MSD, d_output_partial_nElements, y_steps, nTimesamples, offset); } template<typename input_type> void call_MSD_GPU_calculate_partials_1d_and_minmax(const dim3 &grid_size, const dim3 &block_size, int shared_memory_bytes, hipStream_t streams, input_type *d_input, float *d_output_partial_MSD, int *d_output_partial_nElements, int y_steps, size_t nTimesamples, int offset) { hipLaunchKernelGGL(( MSD_GPU_calculate_partials_1d_and_minmax), dim3(grid_size), dim3(block_size), shared_memory_bytes, streams, d_input, d_output_partial_MSD, d_output_partial_nElements, y_steps, nTimesamples, offset); } template<typename input_type> void call_MSD_BLN_calculate_partials_1d_and_minmax_with_outlier_rejection(const dim3 &grid_size, const dim3 &block_size, int shared_memory_bytes, hipStream_t streams, input_type *d_input, float *d_output_partial_MSD, int *d_output_partial_nElements, float *d_MSD, int y_steps, size_t nTimesamples, int offset, float bln_sigma_constant){ hipLaunchKernelGGL(( MSD_BLN_calculate_partials_1d_and_minmax_with_outlier_rejection), dim3(grid_size), dim3(block_size), shared_memory_bytes, streams, d_input, d_output_partial_MSD, d_output_partial_nElements, d_MSD, y_steps, nTimesamples, offset, bln_sigma_constant); } //---------------------------------------------------------------------------<
002f1385a41449eab25f5e18f2fcf223117bb489.cu
#include <cuda.h> #include <cuda_runtime.h> #include <cuda_runtime_api.h> #include "MSD_GPU_kernels_shared.cu" //---------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------- //------------------- Kernels //----------------------------------------------------------------------- //---------------> Computes partials for mean and standard deviation template<typename input_type> __global__ void MSD_GPU_calculate_partials_1d(input_type const* __restrict__ d_input, float *d_output_partial_MSD, int *d_output_partial_nElements, int x_steps, size_t nTimesamples, int offset) { __shared__ float s_par_MSD[2*MSD_NTHREADS]; __shared__ int s_par_nElements[MSD_NTHREADS]; float M, S, ftemp; int j; size_t gpos = blockIdx.z*nTimesamples; size_t spos = blockIdx.x*x_steps*MSD_NTHREADS + threadIdx.x; M=0; S=0; j=0; ftemp=0; if( spos<(nTimesamples-offset) ){ ftemp = (float) d_input[gpos + spos]; Initiate( &M, &S, &j, ftemp); spos = spos + MSD_NTHREADS; for (int xf = 1; xf < x_steps; xf++) { if( spos<(nTimesamples-offset) ){ ftemp = (float) d_input[gpos + spos]; Add_one( &M, &S, &j, ftemp); spos = spos + MSD_NTHREADS; } } } s_par_MSD[threadIdx.x] = M; s_par_MSD[MSD_NTHREADS + threadIdx.x] = S; s_par_nElements[threadIdx.x] = j; __syncthreads(); Reduce_SM( &M, &S, &j, s_par_MSD, s_par_nElements ); Reduce_WARP( &M, &S, &j); //---------------------------------------------- //---- Writing data if (threadIdx.x == 0) { gpos = blockIdx.z*gridDim.x + blockIdx.x; d_output_partial_MSD[MSD_PARTIAL_SIZE*gpos] = M; d_output_partial_MSD[MSD_PARTIAL_SIZE*gpos + 1] = S; d_output_partial_nElements[gpos] = j; //if(blockIdx.x<100) printf("result: b:[%d;%d;%d] M=%f; S=%f; j=%e\n", blockIdx.x, blockIdx.y, blockIdx.z, M, S, (double) j); } } template<typename input_type> __global__ void MSD_GPU_calculate_partials_1d_and_minmax(input_type const* __restrict__ d_input, float *d_output_partial_MSD, int *d_output_partial_nElements, int x_steps, size_t nTimesamples, int offset) { __shared__ float s_par_MSD[MSD_PARTIAL_SIZE*MSD_NTHREADS]; __shared__ int s_par_nElements[MSD_NTHREADS]; float M, S, max, min, ftemp; int j; size_t gpos = blockIdx.z*nTimesamples; size_t spos = blockIdx.x*x_steps*MSD_NTHREADS + threadIdx.x; M=0; S=0; j=0; ftemp=0; if( spos<(nTimesamples-offset) ){ ftemp = (float) d_input[gpos + spos]; Initiate( &M, &S, &j, ftemp); max = ftemp; min = ftemp; spos = spos + MSD_NTHREADS; for (int xf = 1; xf < x_steps; xf++) { if( spos<(nTimesamples-offset) ){ ftemp = (float) d_input[gpos + spos]; max = (fmaxf(max,ftemp)); min = (fminf(min,ftemp)); Add_one( &M, &S, &j, ftemp); spos = spos + MSD_NTHREADS; } } } s_par_MSD[threadIdx.x] = M; s_par_MSD[blockDim.x + threadIdx.x] = S; s_par_MSD[2*blockDim.x + threadIdx.x] = max; s_par_MSD[3*blockDim.x + threadIdx.x] = min; s_par_nElements[threadIdx.x] = j; __syncthreads(); Reduce_SM_max( &M, &S, &max, &min, &j, s_par_MSD, s_par_nElements ); Reduce_WARP_max( &M, &S, &max, &min, &j); //---------------------------------------------- //---- Writing data if (threadIdx.x == 0) { gpos = blockIdx.z*gridDim.x + blockIdx.x; d_output_partial_MSD[MSD_PARTIAL_SIZE*gpos] = M; d_output_partial_MSD[MSD_PARTIAL_SIZE*gpos + 1] = S; d_output_partial_MSD[MSD_PARTIAL_SIZE*gpos + 2] = max; d_output_partial_MSD[MSD_PARTIAL_SIZE*gpos + 3] = min; d_output_partial_nElements[gpos] = j; //if(blockIdx.x<5 && blockIdx.y<5) printf("M=%f; S=%f; max=%f; min=%f; j=%e\n", M, S, max, min, (double) j); } } template<typename input_type> __global__ void MSD_BLN_calculate_partials_1d_and_minmax_with_outlier_rejection(input_type const* __restrict__ d_input, float *d_output_partial_MSD, int *d_output_partial_nElements, float *d_MSD, int x_steps, size_t nTimesamples, int offset, float bln_sigma_constant) { __shared__ float s_par_MSD[MSD_PARTIAL_SIZE*MSD_NTHREADS]; __shared__ int s_par_nElements[MSD_NTHREADS]; float M, S, ftemp, max, min; int j; float limit_down = d_MSD[MSD_RESULTS_SIZE*blockIdx.z] - bln_sigma_constant*d_MSD[MSD_RESULTS_SIZE*blockIdx.z + 1]; float limit_up = d_MSD[MSD_RESULTS_SIZE*blockIdx.z] + bln_sigma_constant*d_MSD[MSD_RESULTS_SIZE*blockIdx.z + 1]; size_t temp_gpos = blockIdx.z*gridDim.x + blockIdx.x; max = d_output_partial_MSD[MSD_PARTIAL_SIZE*temp_gpos + 2]; min = d_output_partial_MSD[MSD_PARTIAL_SIZE*temp_gpos + 3]; if( (min>limit_down) && (max < limit_up) ) return; size_t gpos = blockIdx.z*nTimesamples; size_t spos = blockIdx.x*x_steps*MSD_NTHREADS + threadIdx.x; M=0; S=0; j=0; ftemp=0; max=0; min=0; if( spos<(nTimesamples-offset) ){ for (int xf = 0; xf < x_steps; xf++) { if( spos<(nTimesamples-offset) ){ ftemp = (float) d_input[gpos + spos]; if( (ftemp>limit_down) && (ftemp < limit_up) ){ if(j==0){ Initiate( &M, &S, &j, ftemp); max = ftemp; min = ftemp; } else{ Add_one( &M, &S, &j, ftemp); max = fmaxf(max, ftemp); min = fminf(min, ftemp); } } spos = spos + MSD_NTHREADS; } } } s_par_MSD[threadIdx.x] = M; s_par_MSD[blockDim.x + threadIdx.x] = S; s_par_MSD[2*blockDim.x + threadIdx.x] = max; s_par_MSD[3*blockDim.x + threadIdx.x] = min; s_par_nElements[threadIdx.x] = j; __syncthreads(); Reduce_SM_max( &M, &S, &max, &min, &j, s_par_MSD, s_par_nElements ); Reduce_WARP_max( &M, &S, &max, &min, &j); //---------------------------------------------- //---- Writing data if (threadIdx.x == 0) { gpos = blockIdx.z*gridDim.x + blockIdx.x; d_output_partial_MSD[MSD_PARTIAL_SIZE*gpos] = M; d_output_partial_MSD[MSD_PARTIAL_SIZE*gpos + 1] = S; d_output_partial_MSD[MSD_PARTIAL_SIZE*gpos + 2] = max; d_output_partial_MSD[MSD_PARTIAL_SIZE*gpos + 3] = min; d_output_partial_nElements[gpos] = j; //if(blockIdx.x<5 && blockIdx.y<5) printf("b:[%d;%d;%d] M=%f; S=%f; max=%f; min=%f; j=%e\n", blockIdx.x, blockIdx.y, blockIdx.z, M, S, max, min, (double) j); } } //------------------- Kernels with functions //---------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------- //---------------------------------------------------------------------------------------- template<typename input_type> void call_MSD_GPU_calculate_partials_1d(const dim3 &grid_size, const dim3 &block_size, int shared_memory_bytes, cudaStream_t streams, input_type *d_input, float *d_output_partial_MSD, int *d_output_partial_nElements, int y_steps, size_t nTimesamples, int offset){ MSD_GPU_calculate_partials_1d<<< grid_size, block_size, shared_memory_bytes, streams>>>(d_input, d_output_partial_MSD, d_output_partial_nElements, y_steps, nTimesamples, offset); } template<typename input_type> void call_MSD_GPU_calculate_partials_1d_and_minmax(const dim3 &grid_size, const dim3 &block_size, int shared_memory_bytes, cudaStream_t streams, input_type *d_input, float *d_output_partial_MSD, int *d_output_partial_nElements, int y_steps, size_t nTimesamples, int offset) { MSD_GPU_calculate_partials_1d_and_minmax<<< grid_size, block_size, shared_memory_bytes, streams>>>(d_input, d_output_partial_MSD, d_output_partial_nElements, y_steps, nTimesamples, offset); } template<typename input_type> void call_MSD_BLN_calculate_partials_1d_and_minmax_with_outlier_rejection(const dim3 &grid_size, const dim3 &block_size, int shared_memory_bytes, cudaStream_t streams, input_type *d_input, float *d_output_partial_MSD, int *d_output_partial_nElements, float *d_MSD, int y_steps, size_t nTimesamples, int offset, float bln_sigma_constant){ MSD_BLN_calculate_partials_1d_and_minmax_with_outlier_rejection<<< grid_size, block_size, shared_memory_bytes, streams>>>(d_input, d_output_partial_MSD, d_output_partial_nElements, d_MSD, y_steps, nTimesamples, offset, bln_sigma_constant); } //---------------------------------------------------------------------------<
08cd8399f93ed837bf6970ed83523b7bc9edf71c.hip
// !!! This is a file automatically generated by hipify!!! /* The MIT License (MIT) Copyright (c) 2014 Leonardo Kewitz Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include <stdio.h> #include <string.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "cuda_snippets.h" #define BSIZE 32 __global__ void kernel_iter(int w, int h, double alpha, double *X, const double *bound) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= w || y >= h) return; double vt, vb, vl, vr; vt = y == 0 ? bound[0] : X[(y-1)*w + x]; vb = y == h-1 ? bound[1] : X[(y+1)*w + x]; vl = x == 0 ? bound[2] : X[y*w + x-1]; vr = x == w-1 ? bound[3] : X[y*w + x+1]; X[y*w + x] = ((vt+vb+vl+vr)/4.0); return; } extern "C" void run(int w, int h, int ks, double alpha, const double *bound, double *X) { int k; hipDeviceProp_t prop; CudaSafeCall(hipGetDeviceProperties(&prop, 0) ); printf("[!] %s compiled in %s %s\n", __FILE__, __DATE__, __TIME__); printf("[!] Device Name: %s\n", prop.name); double *d_X, *d_B; hipMalloc(&d_B, sizeof(double)*4); hipMalloc(&d_X, sizeof(double)*w*h); hipMemcpy(d_B, bound, sizeof(double)*4, hipMemcpyHostToDevice); hipMemcpy(d_X, X, sizeof(double)*w*h, hipMemcpyHostToDevice); const dim3 threads(BSIZE, BSIZE); const dim3 blocks(1 + w/BSIZE, 1 + h/BSIZE); for (k = 0; k < ks; k++) { hipLaunchKernelGGL(( kernel_iter), dim3(blocks), dim3(threads), 0, 0, w, h, alpha, d_X, d_B); hipDeviceSynchronize(); } hipMemcpy(X, d_X, sizeof(double)*w*h, hipMemcpyDeviceToHost); hipFree(d_B); hipFree(d_X); return; } extern "C" void runCPU(int w, int h, int ks, double alpha, double *bound, double *X) { int k, x, y; double vt, vb, vl, vr; for (k = 0; k < ks; k++) { for (x = 0; x < w; x++) { for (y = 0; y < h; y++) { vt = y == 0 ? bound[0] : X[(y-1)*w + x]; vb = y == h-1 ? bound[1] : X[(y+1)*w + x]; vl = x == 0 ? bound[2] : X[y*w + x-1]; vr = x == w-1 ? bound[3] : X[y*w + x+1]; X[y*w + x] = ((vt+vb+vl+vr)/double(4.0)); } } } return; }
08cd8399f93ed837bf6970ed83523b7bc9edf71c.cu
/* The MIT License (MIT) Copyright (c) 2014 Leonardo Kewitz Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include <stdio.h> #include <string.h> #include <cuda.h> #include <cuda_runtime.h> #include "cuda_snippets.h" #define BSIZE 32 __global__ void kernel_iter(int w, int h, double alpha, double *X, const double *bound) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= w || y >= h) return; double vt, vb, vl, vr; vt = y == 0 ? bound[0] : X[(y-1)*w + x]; vb = y == h-1 ? bound[1] : X[(y+1)*w + x]; vl = x == 0 ? bound[2] : X[y*w + x-1]; vr = x == w-1 ? bound[3] : X[y*w + x+1]; X[y*w + x] = ((vt+vb+vl+vr)/4.0); return; } extern "C" void run(int w, int h, int ks, double alpha, const double *bound, double *X) { int k; cudaDeviceProp prop; CudaSafeCall(cudaGetDeviceProperties(&prop, 0) ); printf("[!] %s compiled in %s %s\n", __FILE__, __DATE__, __TIME__); printf("[!] Device Name: %s\n", prop.name); double *d_X, *d_B; cudaMalloc(&d_B, sizeof(double)*4); cudaMalloc(&d_X, sizeof(double)*w*h); cudaMemcpy(d_B, bound, sizeof(double)*4, cudaMemcpyHostToDevice); cudaMemcpy(d_X, X, sizeof(double)*w*h, cudaMemcpyHostToDevice); const dim3 threads(BSIZE, BSIZE); const dim3 blocks(1 + w/BSIZE, 1 + h/BSIZE); for (k = 0; k < ks; k++) { kernel_iter<<<blocks, threads>>>(w, h, alpha, d_X, d_B); cudaDeviceSynchronize(); } cudaMemcpy(X, d_X, sizeof(double)*w*h, cudaMemcpyDeviceToHost); cudaFree(d_B); cudaFree(d_X); return; } extern "C" void runCPU(int w, int h, int ks, double alpha, double *bound, double *X) { int k, x, y; double vt, vb, vl, vr; for (k = 0; k < ks; k++) { for (x = 0; x < w; x++) { for (y = 0; y < h; y++) { vt = y == 0 ? bound[0] : X[(y-1)*w + x]; vb = y == h-1 ? bound[1] : X[(y+1)*w + x]; vl = x == 0 ? bound[2] : X[y*w + x-1]; vr = x == w-1 ? bound[3] : X[y*w + x+1]; X[y*w + x] = ((vt+vb+vl+vr)/double(4.0)); } } } return; }
8c87a773ecbb870e6d0c239fa71572e843903a85.hip
// !!! This is a file automatically generated by hipify!!! /** * @brief * array_ops * * @copyright * Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu) * * @copyright * See LICENSE for clarification regarding multiple authors */ #include <algorithm> #include <vector> #include "k2/csrc/array_ops.h" namespace k2 { // See documentation in header of what this is supposed to do. // This is similar to the template Append() defined in ops_inl.h, // but with changes largely about adding `data_offsets`, and // subtracting one from the dims of all but the last array. Array1<int32_t> SpliceRowSplits(int32_t num_arrays, const Array1<int32_t> **src) { K2_CHECK_GT(num_arrays, 0); ContextPtr &c = src[0]->Context(); // row_splits_vec is the exclusive-sum of the modified dimensions of // the arrays in `src`. `Modified` means: is subtracted from the dims // of all but the last array. std::vector<int32_t> row_splits_vec(num_arrays + 1); int32_t sum = 0, max_dim = 0; row_splits_vec[0] = sum; // `last_elem_ptrs_vec` contains, for each of the arrays in `num_array`, a // pointer to the last element in that array. std::vector<const int32_t *> last_elem_ptrs_vec(num_arrays); for (int32_t i = 0; i < num_arrays; i++) { K2_CHECK_GE(src[i]->Dim(), 1); int32_t dim = src[i]->Dim() - (i + 1 < num_arrays ? 1 : 0); if (dim > max_dim) max_dim = dim; sum += dim; row_splits_vec[i + 1] = sum; last_elem_ptrs_vec[i] = src[i]->Data() + src[i]->Dim() - 1; } int32_t ans_size = sum; Array1<int32_t> ans(c, ans_size); int32_t *ans_data = ans.Data(); Array1<const int32_t *> last_elems_ptrs(c, last_elem_ptrs_vec); Array1<int32_t> data_offsets(c, num_arrays); // note as data_offsets.Dim() == last_elem_ptrs.Dim(), so the last element of // last_elem_ptrs.Dim() will not be summed to data_offsets, it's OK as we // don't need that value since we would not drop the last element of the last // array. ExclusiveSumDeref(last_elems_ptrs, &data_offsets); int32_t *data_offsets_data = data_offsets.Data(); if (c->GetDeviceType() == kCpu) { // a simple loop is faster, although the other branchs should still work on // CPU. for (int32_t i = 0; i < num_arrays; i++) { int32_t this_dim = src[i]->Dim(); const int32_t *this_src_data = src[i]->Data(); int32_t data_offset = data_offsets_data[i]; for (int32_t j = 0; j < this_dim; j++) { ans_data[j] = this_src_data[j] + data_offset; } // notice `this_dim - 1` here, it means we will overwrite the copy of last // element of src[i] when copying elements in src[i+1] in the next // for-loop, it generates the same result with dropping the last element // of src[i] as last-elment-of-src[i] == src[i+1]->Data()[0] (equals 0) + // data_offsets_data[i+1]. ans_data += this_dim - 1; } } else { K2_CHECK_EQ(c->GetDeviceType(), kCuda); Array1<int32_t> row_splits(c, row_splits_vec); const int32_t *row_splits_data = row_splits.Data(); std::vector<const int32_t *> src_ptrs_vec(num_arrays); for (int32_t i = 0; i < num_arrays; i++) src_ptrs_vec[i] = src[i]->Data(); Array1<const int32_t *> src_ptrs(c, src_ptrs_vec); const int32_t **src_ptrs_data = src_ptrs.Data(); int32_t avg_input_size = ans_size / num_arrays; if (max_dim < 2 * avg_input_size + 512) { // here, 2 is a heuristic factor. We're saying, "if the max length of any // of the source arrays is not too much larger than the average length of // the source arrays." The `+ 512` is an additional heuristic factor, as // we care less about launching too many GPU threads if the number of // elements being processed is small. What we're saying is that the // arrays' sizes are fairly balanced, so we launch with a simple // rectangular kernel. auto lambda_set_data = [=] __host__ __device__(int32_t i, int32_t j) -> void { int32_t row_start = row_splits_data[i], row_end = row_splits_data[i + 1]; const int32_t *src_ptr = src_ptrs_data[i]; // not we have dropped the last element of src[i] in row_splits_data, // so here it will not be copied. if (j < row_end - row_start) { ans_data[row_start + j] = src_ptr[j] + data_offsets_data[i]; } }; Eval2(c, num_arrays, max_dim, lambda_set_data); } else { int32_t block_dim = 256; while (block_dim * 4 < avg_input_size && block_dim < 8192) block_dim *= 2; // `index_map` will map from 'new index' to 'old index', with 0 <= // old_index < num_arrays... we handle each source array with multiple // blocks. // The elements of `index_map` will be of the form: // old_index + (block_of_this_array << 32). // where `old_index` is an index into `src` and `block_of_this_array` // tells us which block it is, as in 0, 1, 2, 3... // there won't be very many blocks, so it's not a problem to enumerate // them on CPU. std::vector<uint64_t> index_map; index_map.reserve((2 * ans_size) / block_dim); for (int32_t i = 0; i < num_arrays; i++) { int32_t this_array_size = src[i]->Dim(); int32_t this_num_blocks = NumBlocks(this_array_size, block_dim); for (int32_t j = 0; j < this_num_blocks; j++) { index_map.push_back((static_cast<uint64_t>(j) << 32) + static_cast<uint64_t>(i)); } } Array1<uint64_t> index_map_gpu(c, index_map); const uint64_t *index_map_data = index_map_gpu.Data(); auto lambda_set_data_blocks = [=] __host__ __device__(int32_t i, int32_t j) { uint64_t index = index_map_data[i]; uint32_t orig_i = static_cast<uint32_t>(index), block_index = static_cast<uint32_t>(index >> 32); int32_t row_start = row_splits_data[orig_i], row_end = row_splits_data[orig_i + 1], orig_j = (block_index * block_dim) + j; const int32_t *src_ptr = src_ptrs_data[orig_i]; if (orig_j < row_end - row_start) { ans_data[row_start + orig_j] = src_ptr[orig_j] + data_offsets_data[orig_i]; } }; Eval2(c, index_map_gpu.Dim(), block_dim, lambda_set_data_blocks); } } return ans; } bool ValidateRowIds(const Array1<int32_t> &row_ids, Array1<int32_t> *temp /*=nullptr*/) { ContextPtr &ctx = row_ids.Context(); const int32_t *data = row_ids.Data(); int32_t dim = row_ids.Dim(); if (dim == 0) return true; // will treat this as valid // note `row_ids[0]` may copy memory from device to host if (row_ids[0] < 0) return false; Array1<int32_t> temp_array; if (temp == nullptr || temp->Dim() == 0) { temp_array = Array1<int32_t>(ctx, 1); } else { K2_CHECK(IsCompatible(row_ids, *temp)); temp_array = temp->Range(0, 1); } temp = &temp_array; *temp = 0; int32_t *temp_data = temp->Data(); auto lambda_check_row_ids = [=] __host__ __device__(int32_t i) -> void { if (data[i] > data[i + 1]) *temp_data = 1; // means it's bad. }; // Note: we know that dim >= 1 as we would have returned above if dim == 0. // This will do nothing if (dim-1) == 0 as we have checked the first element. Eval(ctx, dim - 1, lambda_check_row_ids); return (*temp)[0] == 0; } bool ValidateRowSplits(const Array1<int32_t> &row_splits, Array1<int32_t> *temp /*=nullptr*/) { ContextPtr &ctx = row_splits.Context(); const int32_t *data = row_splits.Data(); int32_t dim = row_splits.Dim(); // must have at least one element and row_splits[0] == 0 if (dim == 0 || row_splits[0] != 0) return false; Array1<int32_t> temp_array; if (temp == nullptr || temp->Dim() == 0) { temp_array = Array1<int32_t>(ctx, 1); } else { K2_CHECK(IsCompatible(row_splits, *temp)); temp_array = temp->Range(0, 1); } temp = &temp_array; *temp = 0; int32_t *temp_data = temp->Data(); auto lambda_check_row_splits = [=] __host__ __device__(int32_t i) -> void { if (data[i] > data[i + 1]) *temp_data = 1; // means it's bad. }; // Note: we know that dim >= 1 as we would have returned above if dim == 0. // This will do nothing if (dim-1) == 0 as we have checked the first element. Eval(ctx, dim - 1, lambda_check_row_splits); return (*temp)[0] == 0; } bool ValidateRowSplitsAndIds(const Array1<int32_t> &row_splits, const Array1<int32_t> &row_ids, Array1<int32_t> *temp /*=nullptr*/) { // Check if their context are compatible or not while getting ContextPtr ctx = GetContext(row_splits, row_ids); int32_t num_rows = row_splits.Dim() - 1, num_elems = row_ids.Dim(); if (num_rows < 0 || (num_rows == 0 && num_elems > 0)) return false; if (row_splits[0] != 0 || row_ids[0] < 0) return false; if (num_elems != row_splits[num_rows]) return false; const int32_t *row_ids_data = row_ids.Data(), *row_splits_data = row_splits.Data(); Array1<int32_t> temp_array; if (temp == nullptr || temp->Dim() == 0) { temp_array = Array1<int32_t>(ctx, 1); } else { K2_CHECK(ctx->IsCompatible(*temp->Context())); temp_array = temp->Range(0, 1); } temp = &temp_array; *temp = 0; int32_t *temp_data = temp_array.Data(); auto lambda_check_row_ids = [=] __host__ __device__(int32_t i) -> void { // check row_splits bool invalid_splits = (i < num_rows && row_splits_data[i] > row_splits_data[i + 1]); // check row_ids bool invalid_ids = (i < (num_elems - 1) && row_ids_data[i] > row_ids_data[i + 1]); if (invalid_splits || invalid_ids) *temp_data = 1; // check if row_splits and row_ids agree with each other if (i < num_elems) { int32_t this_row = row_ids_data[i]; if (this_row < 0 || this_row >= num_rows || i < row_splits_data[this_row] || i >= row_splits_data[this_row + 1]) *temp_data = 1; } }; Eval(ctx, ::max(num_elems, num_rows), lambda_check_row_ids); return (*temp)[0] == 0; } void RowSplitsToRowIds(const Array1<int32_t> &row_splits, Array1<int32_t> *row_ids) { ContextPtr c = GetContext(row_splits, *row_ids); int32_t num_elems = row_ids->Dim(), num_rows = row_splits.Dim() - 1; K2_CHECK_GE(num_rows, 0); // if there are more than zero elems, there must be at least one row. K2_CHECK(num_elems == 0 || num_rows > 0); K2_CHECK_EQ(num_elems, row_splits[num_rows]); RowSplitsToRowIds(c, num_rows, row_splits.Data(), num_elems, row_ids->Data()); } void RowIdsToRowSplits(const Array1<int32_t> &row_ids, Array1<int32_t> *row_splits) { ContextPtr c = GetContext(*row_splits, row_ids); int32_t num_elems = row_ids.Dim(), num_rows = row_splits->Dim() - 1; K2_CHECK_GE(num_rows, 0); // if there are more than zero elems, there must be at least one row. K2_CHECK(num_elems == 0 || num_rows > 0); if (num_elems > 0) K2_CHECK_GT(num_rows, row_ids[num_elems - 1]); RowIdsToRowSplits(c, num_elems, row_ids.Data(), false, num_rows, row_splits->Data()); } Array1<int32_t> GetCounts(const Array1<int32_t> &src, int32_t n) { K2_CHECK_GE(n, 0); ContextPtr &c = src.Context(); int32_t dim = src.Dim(); const int32_t *src_data = src.Data(); Array1<int32_t> ans(c, n, 0); // init with 0 int32_t *ans_data = ans.Data(); if (n == 0) { K2_CHECK_EQ(dim, 0); return ans; } DeviceType d = c->GetDeviceType(); if (d == kCpu) { for (int32_t i = 0; i < dim; ++i) { ++ans_data[src_data[i]]; } } else { K2_CHECK_EQ(d, kCuda); std::size_t temp_storage_bytes = 0; K2_CHECK_CUDA_ERROR(cub::DeviceHistogram::HistogramEven( nullptr, temp_storage_bytes, src_data, ans_data, n + 1, 0, n, dim, c->GetCudaStream())); // The first time is to determine temporary // device storage requirements. Array1<int8_t> d_temp_storage(c, temp_storage_bytes); K2_CHECK_CUDA_ERROR(cub::DeviceHistogram::HistogramEven( d_temp_storage.Data(), temp_storage_bytes, src_data, ans_data, n + 1, 0, n, dim, c->GetCudaStream())); } return ans; } } // namespace k2
8c87a773ecbb870e6d0c239fa71572e843903a85.cu
/** * @brief * array_ops * * @copyright * Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu) * * @copyright * See LICENSE for clarification regarding multiple authors */ #include <algorithm> #include <vector> #include "k2/csrc/array_ops.h" namespace k2 { // See documentation in header of what this is supposed to do. // This is similar to the template Append() defined in ops_inl.h, // but with changes largely about adding `data_offsets`, and // subtracting one from the dims of all but the last array. Array1<int32_t> SpliceRowSplits(int32_t num_arrays, const Array1<int32_t> **src) { K2_CHECK_GT(num_arrays, 0); ContextPtr &c = src[0]->Context(); // row_splits_vec is the exclusive-sum of the modified dimensions of // the arrays in `src`. `Modified` means: is subtracted from the dims // of all but the last array. std::vector<int32_t> row_splits_vec(num_arrays + 1); int32_t sum = 0, max_dim = 0; row_splits_vec[0] = sum; // `last_elem_ptrs_vec` contains, for each of the arrays in `num_array`, a // pointer to the last element in that array. std::vector<const int32_t *> last_elem_ptrs_vec(num_arrays); for (int32_t i = 0; i < num_arrays; i++) { K2_CHECK_GE(src[i]->Dim(), 1); int32_t dim = src[i]->Dim() - (i + 1 < num_arrays ? 1 : 0); if (dim > max_dim) max_dim = dim; sum += dim; row_splits_vec[i + 1] = sum; last_elem_ptrs_vec[i] = src[i]->Data() + src[i]->Dim() - 1; } int32_t ans_size = sum; Array1<int32_t> ans(c, ans_size); int32_t *ans_data = ans.Data(); Array1<const int32_t *> last_elems_ptrs(c, last_elem_ptrs_vec); Array1<int32_t> data_offsets(c, num_arrays); // note as data_offsets.Dim() == last_elem_ptrs.Dim(), so the last element of // last_elem_ptrs.Dim() will not be summed to data_offsets, it's OK as we // don't need that value since we would not drop the last element of the last // array. ExclusiveSumDeref(last_elems_ptrs, &data_offsets); int32_t *data_offsets_data = data_offsets.Data(); if (c->GetDeviceType() == kCpu) { // a simple loop is faster, although the other branchs should still work on // CPU. for (int32_t i = 0; i < num_arrays; i++) { int32_t this_dim = src[i]->Dim(); const int32_t *this_src_data = src[i]->Data(); int32_t data_offset = data_offsets_data[i]; for (int32_t j = 0; j < this_dim; j++) { ans_data[j] = this_src_data[j] + data_offset; } // notice `this_dim - 1` here, it means we will overwrite the copy of last // element of src[i] when copying elements in src[i+1] in the next // for-loop, it generates the same result with dropping the last element // of src[i] as last-elment-of-src[i] == src[i+1]->Data()[0] (equals 0) + // data_offsets_data[i+1]. ans_data += this_dim - 1; } } else { K2_CHECK_EQ(c->GetDeviceType(), kCuda); Array1<int32_t> row_splits(c, row_splits_vec); const int32_t *row_splits_data = row_splits.Data(); std::vector<const int32_t *> src_ptrs_vec(num_arrays); for (int32_t i = 0; i < num_arrays; i++) src_ptrs_vec[i] = src[i]->Data(); Array1<const int32_t *> src_ptrs(c, src_ptrs_vec); const int32_t **src_ptrs_data = src_ptrs.Data(); int32_t avg_input_size = ans_size / num_arrays; if (max_dim < 2 * avg_input_size + 512) { // here, 2 is a heuristic factor. We're saying, "if the max length of any // of the source arrays is not too much larger than the average length of // the source arrays." The `+ 512` is an additional heuristic factor, as // we care less about launching too many GPU threads if the number of // elements being processed is small. What we're saying is that the // arrays' sizes are fairly balanced, so we launch with a simple // rectangular kernel. auto lambda_set_data = [=] __host__ __device__(int32_t i, int32_t j) -> void { int32_t row_start = row_splits_data[i], row_end = row_splits_data[i + 1]; const int32_t *src_ptr = src_ptrs_data[i]; // not we have dropped the last element of src[i] in row_splits_data, // so here it will not be copied. if (j < row_end - row_start) { ans_data[row_start + j] = src_ptr[j] + data_offsets_data[i]; } }; Eval2(c, num_arrays, max_dim, lambda_set_data); } else { int32_t block_dim = 256; while (block_dim * 4 < avg_input_size && block_dim < 8192) block_dim *= 2; // `index_map` will map from 'new index' to 'old index', with 0 <= // old_index < num_arrays... we handle each source array with multiple // blocks. // The elements of `index_map` will be of the form: // old_index + (block_of_this_array << 32). // where `old_index` is an index into `src` and `block_of_this_array` // tells us which block it is, as in 0, 1, 2, 3... // there won't be very many blocks, so it's not a problem to enumerate // them on CPU. std::vector<uint64_t> index_map; index_map.reserve((2 * ans_size) / block_dim); for (int32_t i = 0; i < num_arrays; i++) { int32_t this_array_size = src[i]->Dim(); int32_t this_num_blocks = NumBlocks(this_array_size, block_dim); for (int32_t j = 0; j < this_num_blocks; j++) { index_map.push_back((static_cast<uint64_t>(j) << 32) + static_cast<uint64_t>(i)); } } Array1<uint64_t> index_map_gpu(c, index_map); const uint64_t *index_map_data = index_map_gpu.Data(); auto lambda_set_data_blocks = [=] __host__ __device__(int32_t i, int32_t j) { uint64_t index = index_map_data[i]; uint32_t orig_i = static_cast<uint32_t>(index), block_index = static_cast<uint32_t>(index >> 32); int32_t row_start = row_splits_data[orig_i], row_end = row_splits_data[orig_i + 1], orig_j = (block_index * block_dim) + j; const int32_t *src_ptr = src_ptrs_data[orig_i]; if (orig_j < row_end - row_start) { ans_data[row_start + orig_j] = src_ptr[orig_j] + data_offsets_data[orig_i]; } }; Eval2(c, index_map_gpu.Dim(), block_dim, lambda_set_data_blocks); } } return ans; } bool ValidateRowIds(const Array1<int32_t> &row_ids, Array1<int32_t> *temp /*=nullptr*/) { ContextPtr &ctx = row_ids.Context(); const int32_t *data = row_ids.Data(); int32_t dim = row_ids.Dim(); if (dim == 0) return true; // will treat this as valid // note `row_ids[0]` may copy memory from device to host if (row_ids[0] < 0) return false; Array1<int32_t> temp_array; if (temp == nullptr || temp->Dim() == 0) { temp_array = Array1<int32_t>(ctx, 1); } else { K2_CHECK(IsCompatible(row_ids, *temp)); temp_array = temp->Range(0, 1); } temp = &temp_array; *temp = 0; int32_t *temp_data = temp->Data(); auto lambda_check_row_ids = [=] __host__ __device__(int32_t i) -> void { if (data[i] > data[i + 1]) *temp_data = 1; // means it's bad. }; // Note: we know that dim >= 1 as we would have returned above if dim == 0. // This will do nothing if (dim-1) == 0 as we have checked the first element. Eval(ctx, dim - 1, lambda_check_row_ids); return (*temp)[0] == 0; } bool ValidateRowSplits(const Array1<int32_t> &row_splits, Array1<int32_t> *temp /*=nullptr*/) { ContextPtr &ctx = row_splits.Context(); const int32_t *data = row_splits.Data(); int32_t dim = row_splits.Dim(); // must have at least one element and row_splits[0] == 0 if (dim == 0 || row_splits[0] != 0) return false; Array1<int32_t> temp_array; if (temp == nullptr || temp->Dim() == 0) { temp_array = Array1<int32_t>(ctx, 1); } else { K2_CHECK(IsCompatible(row_splits, *temp)); temp_array = temp->Range(0, 1); } temp = &temp_array; *temp = 0; int32_t *temp_data = temp->Data(); auto lambda_check_row_splits = [=] __host__ __device__(int32_t i) -> void { if (data[i] > data[i + 1]) *temp_data = 1; // means it's bad. }; // Note: we know that dim >= 1 as we would have returned above if dim == 0. // This will do nothing if (dim-1) == 0 as we have checked the first element. Eval(ctx, dim - 1, lambda_check_row_splits); return (*temp)[0] == 0; } bool ValidateRowSplitsAndIds(const Array1<int32_t> &row_splits, const Array1<int32_t> &row_ids, Array1<int32_t> *temp /*=nullptr*/) { // Check if their context are compatible or not while getting ContextPtr ctx = GetContext(row_splits, row_ids); int32_t num_rows = row_splits.Dim() - 1, num_elems = row_ids.Dim(); if (num_rows < 0 || (num_rows == 0 && num_elems > 0)) return false; if (row_splits[0] != 0 || row_ids[0] < 0) return false; if (num_elems != row_splits[num_rows]) return false; const int32_t *row_ids_data = row_ids.Data(), *row_splits_data = row_splits.Data(); Array1<int32_t> temp_array; if (temp == nullptr || temp->Dim() == 0) { temp_array = Array1<int32_t>(ctx, 1); } else { K2_CHECK(ctx->IsCompatible(*temp->Context())); temp_array = temp->Range(0, 1); } temp = &temp_array; *temp = 0; int32_t *temp_data = temp_array.Data(); auto lambda_check_row_ids = [=] __host__ __device__(int32_t i) -> void { // check row_splits bool invalid_splits = (i < num_rows && row_splits_data[i] > row_splits_data[i + 1]); // check row_ids bool invalid_ids = (i < (num_elems - 1) && row_ids_data[i] > row_ids_data[i + 1]); if (invalid_splits || invalid_ids) *temp_data = 1; // check if row_splits and row_ids agree with each other if (i < num_elems) { int32_t this_row = row_ids_data[i]; if (this_row < 0 || this_row >= num_rows || i < row_splits_data[this_row] || i >= row_splits_data[this_row + 1]) *temp_data = 1; } }; Eval(ctx, std::max(num_elems, num_rows), lambda_check_row_ids); return (*temp)[0] == 0; } void RowSplitsToRowIds(const Array1<int32_t> &row_splits, Array1<int32_t> *row_ids) { ContextPtr c = GetContext(row_splits, *row_ids); int32_t num_elems = row_ids->Dim(), num_rows = row_splits.Dim() - 1; K2_CHECK_GE(num_rows, 0); // if there are more than zero elems, there must be at least one row. K2_CHECK(num_elems == 0 || num_rows > 0); K2_CHECK_EQ(num_elems, row_splits[num_rows]); RowSplitsToRowIds(c, num_rows, row_splits.Data(), num_elems, row_ids->Data()); } void RowIdsToRowSplits(const Array1<int32_t> &row_ids, Array1<int32_t> *row_splits) { ContextPtr c = GetContext(*row_splits, row_ids); int32_t num_elems = row_ids.Dim(), num_rows = row_splits->Dim() - 1; K2_CHECK_GE(num_rows, 0); // if there are more than zero elems, there must be at least one row. K2_CHECK(num_elems == 0 || num_rows > 0); if (num_elems > 0) K2_CHECK_GT(num_rows, row_ids[num_elems - 1]); RowIdsToRowSplits(c, num_elems, row_ids.Data(), false, num_rows, row_splits->Data()); } Array1<int32_t> GetCounts(const Array1<int32_t> &src, int32_t n) { K2_CHECK_GE(n, 0); ContextPtr &c = src.Context(); int32_t dim = src.Dim(); const int32_t *src_data = src.Data(); Array1<int32_t> ans(c, n, 0); // init with 0 int32_t *ans_data = ans.Data(); if (n == 0) { K2_CHECK_EQ(dim, 0); return ans; } DeviceType d = c->GetDeviceType(); if (d == kCpu) { for (int32_t i = 0; i < dim; ++i) { ++ans_data[src_data[i]]; } } else { K2_CHECK_EQ(d, kCuda); std::size_t temp_storage_bytes = 0; K2_CHECK_CUDA_ERROR(cub::DeviceHistogram::HistogramEven( nullptr, temp_storage_bytes, src_data, ans_data, n + 1, 0, n, dim, c->GetCudaStream())); // The first time is to determine temporary // device storage requirements. Array1<int8_t> d_temp_storage(c, temp_storage_bytes); K2_CHECK_CUDA_ERROR(cub::DeviceHistogram::HistogramEven( d_temp_storage.Data(), temp_storage_bytes, src_data, ans_data, n + 1, 0, n, dim, c->GetCudaStream())); } return ans; } } // namespace k2
3ced6e1a44066429789270c5983ff9d351e29dba.hip
// !!! This is a file automatically generated by hipify!!! #include <gtest/gtest.h> #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <ATen/CUDAGeneratorImpl.h> #include <c10/hip/HIPException.h> #include <c10/hip/HIPFunctions.h> #include <ATen/core/PhiloxRNGEngine.h> #include <hip/hip_runtime.h> #include <hip/hip_fp16.h> #include <hip/hip_runtime.h> #include <assert.h> #include <thread> using namespace at; /* * Philox Engine Tests */ __global__ void testEngineReproducibility(){ int idx = blockIdx.x * blockDim.x + threadIdx.x; at::Philox4_32_10 engine1(0, idx, 4); at::Philox4_32_10 engine2(0, idx, 4); assert(engine1() == engine2()); } void test_engine_reproducibility(){ hipLaunchKernelGGL(( testEngineReproducibility), dim3(1), dim3(1), 0, 0, ); C10_HIP_KERNEL_LAUNCH_CHECK(); } TEST(CUDAGeneratorImpl, TestPhiloxEngineReproducibility) { // Test Description: // Tests if same inputs give same results. // launch one thread and create two engines. // Given same seed, idx and offset, assert that the engines // should be aligned and have the same sequence. if (!at::cuda::is_available()) return; test_engine_reproducibility(); hipError_t err = hipDeviceSynchronize(); bool isEQ = err == hipSuccess; ASSERT_TRUE(isEQ); } __global__ void testEngineOffset1(){ at::Philox4_32_10 engine1(123, 1, 0); // Note: offset is a multiple of 4. // So if you want to skip 8 values, offset would // be 2, since 2*4=8. at::Philox4_32_10 engine2(123, 1, 2); for(int i = 0; i < 8; i++){ // Note: instead of using the engine() call 8 times // we could have achieved the same functionality by // calling the incr() function twice. engine1(); } assert(engine1() == engine2()); } void test_engine_offset1(){ hipLaunchKernelGGL(( testEngineOffset1), dim3(1), dim3(1), 0, 0, ); C10_HIP_KERNEL_LAUNCH_CHECK(); } TEST(CUDAGeneratorImpl, TestPhiloxEngineOffset1) { // Test Description: // Tests offsetting in same thread. // launch one thread and create two engines. // make one engine skip the first 8 values and // make another engine increment to until the // first 8 values. Assert that the first call // of engine2 and the 9th call of engine1 are equal. if (!at::cuda::is_available()) return; test_engine_offset1(); hipError_t err = hipDeviceSynchronize(); bool isEQ = err == hipSuccess; ASSERT_TRUE(isEQ); } __global__ void testEngineOffset2(){ unsigned long long increment_val = ::ldexp(1.0, 64); at::Philox4_32_10 engine1(123, 0, increment_val); at::Philox4_32_10 engine2(123, increment_val, increment_val); engine2.incr_n(increment_val); engine2.incr(); assert(engine1() == engine2()); } void test_engine_offset2(){ hipLaunchKernelGGL(( testEngineOffset2), dim3(1), dim3(1), 0, 0, ); C10_HIP_KERNEL_LAUNCH_CHECK(); } TEST(CUDAGeneratorImpl, TestPhiloxEngineOffset2) { // Test Description: // Tests edge case at the end of the 2^190th value of the generator. // launch one thread and create two engines // make engine1 skip to the 2^64th 128 bit while being at thread 0 // make engine2 skip to the 2^64th 128 bit while being at 2^64th thread // Assert that engine2 should be increment_val+1 steps behind engine1. if (!at::cuda::is_available()) return; test_engine_offset2(); hipDeviceSynchronize(); bool isEQ = hipGetLastError() == hipSuccess; ASSERT_TRUE(isEQ); } __global__ void testEngineOffset3(){ unsigned long long increment_val = ::ldexp(1.0, 64); at::Philox4_32_10 engine1(123, 0, increment_val); at::Philox4_32_10 engine2(123, 1, 0); engine1.incr(); assert(engine1() == engine2()); } void test_engine_offset3(){ hipLaunchKernelGGL(( testEngineOffset2), dim3(1), dim3(1), 0, 0, ); C10_HIP_KERNEL_LAUNCH_CHECK(); } TEST(CUDAGeneratorImpl, TestPhiloxEngineOffset3) { // Test Description: // Tests edge case in between threads. // launch one thread and create two engines // make engine1 skip to the 2^64th 128 bit while being at thread 0 // start engine2 at thread 1, with offset 0 // Assert that engine1 is 1 step behind engine2. if (!at::cuda::is_available()) return; test_engine_offset3(); hipDeviceSynchronize(); bool isEQ = hipGetLastError() == hipSuccess; ASSERT_TRUE(isEQ); } __global__ void testEngineThreadIndex(){ at::Philox4_32_10 engine1(123456, 0, 4); at::Philox4_32_10 engine2(123456, 1, 4); assert(engine1() != engine2()); } void test_engine_thread_index(){ hipLaunchKernelGGL(( testEngineThreadIndex), dim3(1), dim3(1), 0, 0, ); C10_HIP_KERNEL_LAUNCH_CHECK(); } TEST(CUDAGeneratorImpl, TestPhiloxEngineIndex) { // Test Description: // Tests if thread indexing is working properly. // launch one thread and create two engines // with different thread index but same offset. // Assert that the engines have different sequences. if (!at::cuda::is_available()) return; test_engine_thread_index(); hipDeviceSynchronize(); bool isEQ = hipGetLastError() == hipSuccess; ASSERT_TRUE(isEQ); } /* * CUDA Generator Tests */ TEST(CUDAGeneratorImpl, TestGeneratorDynamicCast) { // Test Description: Check dynamic cast for CUDA if (!at::cuda::is_available()) return; auto foo = at::cuda::detail::createCUDAGenerator(); auto result = foo.get<CUDAGeneratorImpl>(); ASSERT_EQ(typeid(at::CUDAGeneratorImpl*).hash_code(), typeid(result).hash_code()); } TEST(CUDAGeneratorImpl, TestDefaultGenerator) { // Test Description: // Check if default generator state is created only once // address of generator should be same in all calls if (!at::cuda::is_available()) return; auto foo = at::cuda::detail::getDefaultCUDAGenerator(); auto bar = at::cuda::detail::getDefaultCUDAGenerator(); ASSERT_EQ(foo, bar); if (c10::hip::device_count() >= 2) { foo = at::cuda::detail::getDefaultCUDAGenerator(1); bar = at::cuda::detail::getDefaultCUDAGenerator(1); ASSERT_EQ(foo, bar); foo = at::cuda::detail::getDefaultCUDAGenerator(0); bar = at::cuda::detail::getDefaultCUDAGenerator(1); ASSERT_NE(foo, bar); } } TEST(CUDAGeneratorImpl, TestCloning) { // Test Description: // Check cloning of new generators. // Note that we don't allow cloning of other // generator states into default generators. if (!at::cuda::is_available()) return; auto gen1 = at::cuda::detail::createCUDAGenerator(); gen1.set_current_seed(123); // modify gen1 state auto cuda_gen1 = check_generator<CUDAGeneratorImpl>(gen1); cuda_gen1->set_philox_offset_per_thread(4); auto gen2 = at::cuda::detail::createCUDAGenerator(); gen2 = gen1.clone(); auto cuda_gen2 = check_generator<CUDAGeneratorImpl>(gen2); ASSERT_EQ(gen1.current_seed(), gen2.current_seed()); ASSERT_EQ( cuda_gen1->philox_offset_per_thread(), cuda_gen2->philox_offset_per_thread() ); } void thread_func_get_set_current_seed(Generator generator) { std::lock_guard<std::mutex> lock(generator.mutex()); auto current_seed = generator.current_seed(); current_seed++; generator.set_current_seed(current_seed); } TEST(CUDAGeneratorImpl, TestMultithreadingGetSetCurrentSeed) { // Test Description: // Test current seed getter and setter are thread safe // See Note [Acquire lock when using random generators] if (!at::cuda::is_available()) return; auto gen1 = at::cuda::detail::getDefaultCUDAGenerator(); auto initial_seed = gen1.current_seed(); std::thread t0{thread_func_get_set_current_seed, gen1}; std::thread t1{thread_func_get_set_current_seed, gen1}; std::thread t2{thread_func_get_set_current_seed, gen1}; t0.join(); t1.join(); t2.join(); ASSERT_EQ(gen1.current_seed(), initial_seed+3); } TEST(CUDAGeneratorImpl, TestRNGForking) { // Test Description: // Test that state of a generator can be frozen and // restored // See Note [Acquire lock when using random generators] if (!at::cuda::is_available()) return; auto default_gen = at::cuda::detail::getDefaultCUDAGenerator(); auto current_gen = at::cuda::detail::createCUDAGenerator(); { std::lock_guard<std::mutex> lock(default_gen.mutex()); current_gen = default_gen.clone(); // capture the current state of default generator } auto target_value = at::randn({1000}, at::kCUDA); // Dramatically alter the internal state of the main generator auto x = at::randn({100000}, at::kCUDA); auto forked_value = at::randn({1000}, current_gen, at::kCUDA); ASSERT_EQ(target_value.sum().item<double>(), forked_value.sum().item<double>()); } void makeRandomNumber() { hipSetDevice(std::rand() % 2); auto x = at::randn({1000}); } void testCudaRNGMultithread() { auto threads = std::vector<std::thread>(); for (auto i = 0; i < 1000; i++) { threads.emplace_back(makeRandomNumber); } for (auto& t : threads) { t.join(); } }; TEST(CUDAGeneratorImpl, TestMultithreadRNG) { if (!at::cuda::is_available()) return; testCudaRNGMultithread(); }
3ced6e1a44066429789270c5983ff9d351e29dba.cu
#include <gtest/gtest.h> #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/CUDAGeneratorImpl.h> #include <c10/cuda/CUDAException.h> #include <c10/cuda/CUDAFunctions.h> #include <ATen/core/PhiloxRNGEngine.h> #include <cuda.h> #include <cuda_fp16.h> #include <cuda_runtime.h> #include <assert.h> #include <thread> using namespace at; /* * Philox Engine Tests */ __global__ void testEngineReproducibility(){ int idx = blockIdx.x * blockDim.x + threadIdx.x; at::Philox4_32_10 engine1(0, idx, 4); at::Philox4_32_10 engine2(0, idx, 4); assert(engine1() == engine2()); } void test_engine_reproducibility(){ testEngineReproducibility<<<1, 1>>>(); C10_CUDA_KERNEL_LAUNCH_CHECK(); } TEST(CUDAGeneratorImpl, TestPhiloxEngineReproducibility) { // Test Description: // Tests if same inputs give same results. // launch one thread and create two engines. // Given same seed, idx and offset, assert that the engines // should be aligned and have the same sequence. if (!at::cuda::is_available()) return; test_engine_reproducibility(); cudaError_t err = cudaDeviceSynchronize(); bool isEQ = err == cudaSuccess; ASSERT_TRUE(isEQ); } __global__ void testEngineOffset1(){ at::Philox4_32_10 engine1(123, 1, 0); // Note: offset is a multiple of 4. // So if you want to skip 8 values, offset would // be 2, since 2*4=8. at::Philox4_32_10 engine2(123, 1, 2); for(int i = 0; i < 8; i++){ // Note: instead of using the engine() call 8 times // we could have achieved the same functionality by // calling the incr() function twice. engine1(); } assert(engine1() == engine2()); } void test_engine_offset1(){ testEngineOffset1<<<1, 1>>>(); C10_CUDA_KERNEL_LAUNCH_CHECK(); } TEST(CUDAGeneratorImpl, TestPhiloxEngineOffset1) { // Test Description: // Tests offsetting in same thread. // launch one thread and create two engines. // make one engine skip the first 8 values and // make another engine increment to until the // first 8 values. Assert that the first call // of engine2 and the 9th call of engine1 are equal. if (!at::cuda::is_available()) return; test_engine_offset1(); cudaError_t err = cudaDeviceSynchronize(); bool isEQ = err == cudaSuccess; ASSERT_TRUE(isEQ); } __global__ void testEngineOffset2(){ unsigned long long increment_val = ::ldexp(1.0, 64); at::Philox4_32_10 engine1(123, 0, increment_val); at::Philox4_32_10 engine2(123, increment_val, increment_val); engine2.incr_n(increment_val); engine2.incr(); assert(engine1() == engine2()); } void test_engine_offset2(){ testEngineOffset2<<<1, 1>>>(); C10_CUDA_KERNEL_LAUNCH_CHECK(); } TEST(CUDAGeneratorImpl, TestPhiloxEngineOffset2) { // Test Description: // Tests edge case at the end of the 2^190th value of the generator. // launch one thread and create two engines // make engine1 skip to the 2^64th 128 bit while being at thread 0 // make engine2 skip to the 2^64th 128 bit while being at 2^64th thread // Assert that engine2 should be increment_val+1 steps behind engine1. if (!at::cuda::is_available()) return; test_engine_offset2(); cudaDeviceSynchronize(); bool isEQ = cudaGetLastError() == cudaSuccess; ASSERT_TRUE(isEQ); } __global__ void testEngineOffset3(){ unsigned long long increment_val = ::ldexp(1.0, 64); at::Philox4_32_10 engine1(123, 0, increment_val); at::Philox4_32_10 engine2(123, 1, 0); engine1.incr(); assert(engine1() == engine2()); } void test_engine_offset3(){ testEngineOffset2<<<1, 1>>>(); C10_CUDA_KERNEL_LAUNCH_CHECK(); } TEST(CUDAGeneratorImpl, TestPhiloxEngineOffset3) { // Test Description: // Tests edge case in between threads. // launch one thread and create two engines // make engine1 skip to the 2^64th 128 bit while being at thread 0 // start engine2 at thread 1, with offset 0 // Assert that engine1 is 1 step behind engine2. if (!at::cuda::is_available()) return; test_engine_offset3(); cudaDeviceSynchronize(); bool isEQ = cudaGetLastError() == cudaSuccess; ASSERT_TRUE(isEQ); } __global__ void testEngineThreadIndex(){ at::Philox4_32_10 engine1(123456, 0, 4); at::Philox4_32_10 engine2(123456, 1, 4); assert(engine1() != engine2()); } void test_engine_thread_index(){ testEngineThreadIndex<<<1, 1>>>(); C10_CUDA_KERNEL_LAUNCH_CHECK(); } TEST(CUDAGeneratorImpl, TestPhiloxEngineIndex) { // Test Description: // Tests if thread indexing is working properly. // launch one thread and create two engines // with different thread index but same offset. // Assert that the engines have different sequences. if (!at::cuda::is_available()) return; test_engine_thread_index(); cudaDeviceSynchronize(); bool isEQ = cudaGetLastError() == cudaSuccess; ASSERT_TRUE(isEQ); } /* * CUDA Generator Tests */ TEST(CUDAGeneratorImpl, TestGeneratorDynamicCast) { // Test Description: Check dynamic cast for CUDA if (!at::cuda::is_available()) return; auto foo = at::cuda::detail::createCUDAGenerator(); auto result = foo.get<CUDAGeneratorImpl>(); ASSERT_EQ(typeid(at::CUDAGeneratorImpl*).hash_code(), typeid(result).hash_code()); } TEST(CUDAGeneratorImpl, TestDefaultGenerator) { // Test Description: // Check if default generator state is created only once // address of generator should be same in all calls if (!at::cuda::is_available()) return; auto foo = at::cuda::detail::getDefaultCUDAGenerator(); auto bar = at::cuda::detail::getDefaultCUDAGenerator(); ASSERT_EQ(foo, bar); if (c10::cuda::device_count() >= 2) { foo = at::cuda::detail::getDefaultCUDAGenerator(1); bar = at::cuda::detail::getDefaultCUDAGenerator(1); ASSERT_EQ(foo, bar); foo = at::cuda::detail::getDefaultCUDAGenerator(0); bar = at::cuda::detail::getDefaultCUDAGenerator(1); ASSERT_NE(foo, bar); } } TEST(CUDAGeneratorImpl, TestCloning) { // Test Description: // Check cloning of new generators. // Note that we don't allow cloning of other // generator states into default generators. if (!at::cuda::is_available()) return; auto gen1 = at::cuda::detail::createCUDAGenerator(); gen1.set_current_seed(123); // modify gen1 state auto cuda_gen1 = check_generator<CUDAGeneratorImpl>(gen1); cuda_gen1->set_philox_offset_per_thread(4); auto gen2 = at::cuda::detail::createCUDAGenerator(); gen2 = gen1.clone(); auto cuda_gen2 = check_generator<CUDAGeneratorImpl>(gen2); ASSERT_EQ(gen1.current_seed(), gen2.current_seed()); ASSERT_EQ( cuda_gen1->philox_offset_per_thread(), cuda_gen2->philox_offset_per_thread() ); } void thread_func_get_set_current_seed(Generator generator) { std::lock_guard<std::mutex> lock(generator.mutex()); auto current_seed = generator.current_seed(); current_seed++; generator.set_current_seed(current_seed); } TEST(CUDAGeneratorImpl, TestMultithreadingGetSetCurrentSeed) { // Test Description: // Test current seed getter and setter are thread safe // See Note [Acquire lock when using random generators] if (!at::cuda::is_available()) return; auto gen1 = at::cuda::detail::getDefaultCUDAGenerator(); auto initial_seed = gen1.current_seed(); std::thread t0{thread_func_get_set_current_seed, gen1}; std::thread t1{thread_func_get_set_current_seed, gen1}; std::thread t2{thread_func_get_set_current_seed, gen1}; t0.join(); t1.join(); t2.join(); ASSERT_EQ(gen1.current_seed(), initial_seed+3); } TEST(CUDAGeneratorImpl, TestRNGForking) { // Test Description: // Test that state of a generator can be frozen and // restored // See Note [Acquire lock when using random generators] if (!at::cuda::is_available()) return; auto default_gen = at::cuda::detail::getDefaultCUDAGenerator(); auto current_gen = at::cuda::detail::createCUDAGenerator(); { std::lock_guard<std::mutex> lock(default_gen.mutex()); current_gen = default_gen.clone(); // capture the current state of default generator } auto target_value = at::randn({1000}, at::kCUDA); // Dramatically alter the internal state of the main generator auto x = at::randn({100000}, at::kCUDA); auto forked_value = at::randn({1000}, current_gen, at::kCUDA); ASSERT_EQ(target_value.sum().item<double>(), forked_value.sum().item<double>()); } void makeRandomNumber() { cudaSetDevice(std::rand() % 2); auto x = at::randn({1000}); } void testCudaRNGMultithread() { auto threads = std::vector<std::thread>(); for (auto i = 0; i < 1000; i++) { threads.emplace_back(makeRandomNumber); } for (auto& t : threads) { t.join(); } }; TEST(CUDAGeneratorImpl, TestMultithreadRNG) { if (!at::cuda::is_available()) return; testCudaRNGMultithread(); }
60f3fd566157380e059a8a4c2210525f3efaec0c.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* * Quadro and Tesla GPUs with compute capability >= 2.0 can overlap two memcopies * with kernel execution. This sample illustrates the usage of CUDA streams to * achieve overlapping of kernel execution with copying data to and from the device. * * Additionally, this sample uses CUDA events to measure elapsed time for * CUDA calls. Events are a part of CUDA API and provide a system independent * way to measure execution times on CUDA devices with approximately 0.5 * microsecond precision. * * Elapsed times are averaged over nreps repetitions (10 by default). * */ const char *sSDKname = "simpleMultiCopy"; // includes, system #include <stdio.h> // include CUDA #include <hip/hip_runtime.h> // includes, project #include <helper_cuda.h> #include <helper_functions.h> // helper for shared that are common to CUDA Samples // includes, kernels // Declare the CUDA kernels here and main() code that is needed to launch // Compute workload on the system __global__ void incKernel(int *g_out, int *g_in, int N, int inner_reps) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < N) { for (int i=0; i<inner_reps; ++i) { g_out[idx] = g_in[idx] + 1; } } } #define STREAM_COUNT 4 // Uncomment to simulate data source/sink IO times //#define SIMULATE_IO int *h_data_source; int *h_data_sink; int *h_data_in[STREAM_COUNT]; int *d_data_in[STREAM_COUNT]; int *h_data_out[STREAM_COUNT]; int *d_data_out[STREAM_COUNT]; hipEvent_t cycleDone[STREAM_COUNT]; hipStream_t stream[STREAM_COUNT]; hipEvent_t start, stop; int N = 1 << 22; int nreps = 10; // number of times each experiment is repeated int inner_reps = 5; int memsize; dim3 block(512); dim3 grid; int thread_blocks; float processWithStreams(int streams_used); void init(); bool test(); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char *argv[]) { int cuda_device = 0; float scale_factor; hipDeviceProp_t deviceProp; printf("[%s] - Starting...\n", sSDKname); if (checkCmdLineFlag(argc, (const char **)argv, "device")) { cuda_device = getCmdLineArgumentInt(argc, (const char **)argv, "device="); if (cuda_device < 0) { printf("Invalid command line parameters\n"); exit(EXIT_FAILURE); } else { printf("cuda_device = %d\n", cuda_device); cuda_device = gpuDeviceInit(cuda_device); if (cuda_device < 0) { printf("No CUDA Capable devices found, exiting...\n"); exit(EXIT_SUCCESS); } } } else { // Otherwise pick the device with the highest Gflops/s cuda_device = gpuGetMaxGflopsDeviceId(); checkCudaErrors(hipSetDevice(cuda_device)); checkCudaErrors(hipGetDeviceProperties(&deviceProp, cuda_device)); printf("> Using CUDA device [%d]: %s\n", cuda_device, deviceProp.name); } checkCudaErrors(hipGetDeviceProperties(&deviceProp, cuda_device)); printf("[%s] has %d MP(s) x %d (Cores/MP) = %d (Cores)\n", deviceProp.name, deviceProp.multiProcessorCount, _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor), _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount); // Anything that is less than 32 Cores will have scaled down workload scale_factor = max((32.0f / (_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * (float)deviceProp.multiProcessorCount)), 1.0f); N = (int)((float)N / scale_factor); printf("> Device name: %s\n", deviceProp.name); printf("> CUDA Capability %d.%d hardware with %d multi-processors\n", deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount); printf("> scale_factor = %.2f\n", 1.0f/scale_factor); printf("> array_size = %d\n\n", N); memsize = N * sizeof(int); thread_blocks = N / block.x; grid.x = thread_blocks % 65535; grid.y = (thread_blocks / 65535 + 1); // Allocate resources h_data_source = (int *) malloc(memsize); h_data_sink = (int *) malloc(memsize); for (int i =0; i<STREAM_COUNT; ++i) { checkCudaErrors(hipHostMalloc(&h_data_in[i], memsize, hipHostMallocDefault)); checkCudaErrors(hipMalloc(&d_data_in[i], memsize)); checkCudaErrors(hipHostMalloc(&h_data_out[i], memsize, hipHostMallocDefault)); checkCudaErrors(hipMalloc(&d_data_out[i], memsize)); checkCudaErrors(hipStreamCreate(&stream[i])); checkCudaErrors(hipEventCreate(&cycleDone[i])); hipEventRecord(cycleDone[i], stream[i]); } hipEventCreate(&start); hipEventCreate(&stop); init(); // Kernel warmup hipLaunchKernelGGL(( incKernel), dim3(grid), dim3(block), 0, 0, d_data_out[0], d_data_in[0], N, inner_reps); // Time copies and kernel hipEventRecord(start,0); checkCudaErrors(hipMemcpyAsync(d_data_in[0], h_data_in[0], memsize, hipMemcpyHostToDevice,0)); hipEventRecord(stop,0); hipEventSynchronize(stop); float memcpy_h2d_time; hipEventElapsedTime(&memcpy_h2d_time, start, stop); hipEventRecord(start,0); checkCudaErrors(hipMemcpyAsync(h_data_out[0], d_data_out[0], memsize, hipMemcpyDeviceToHost, 0)); hipEventRecord(stop,0); hipEventSynchronize(stop); float memcpy_d2h_time; hipEventElapsedTime(&memcpy_d2h_time, start, stop); hipEventRecord(start,0); hipLaunchKernelGGL(( incKernel), dim3(grid), dim3(block),0,0, d_data_out[0], d_data_in[0], N, inner_reps); hipEventRecord(stop,0); hipEventSynchronize(stop); float kernel_time; hipEventElapsedTime(&kernel_time, start, stop); printf("\n"); printf("Relevant properties of this CUDA device\n"); printf("(%s) Can overlap one CPU<>GPU data transfer with GPU kernel execution (device property \"deviceOverlap\")\n", deviceProp.deviceOverlap ? "X" : " "); //printf("(%s) Can execute several GPU kernels simultaneously (compute capability >= 2.0)\n", deviceProp.major >= 2 ? "X": " "); printf("(%s) Can overlap two CPU<>GPU data transfers with GPU kernel execution\n" " (Compute Capability >= 2.0 AND (Tesla product OR Quadro 4000/5000/6000/K5000)\n", (deviceProp.major >= 2 && deviceProp.asyncEngineCount > 1) ? "X" : " "); printf("\n"); printf("Measured timings (throughput):\n"); printf(" Memcpy host to device\t: %f ms (%f GB/s)\n", memcpy_h2d_time, (memsize * 1e-6)/ memcpy_h2d_time); printf(" Memcpy device to host\t: %f ms (%f GB/s)\n", memcpy_d2h_time, (memsize * 1e-6)/ memcpy_d2h_time); printf(" Kernel\t\t\t: %f ms (%f GB/s)\n", kernel_time, (inner_reps *memsize * 2e-6)/ kernel_time); printf("\n"); printf("Theoretical limits for speedup gained from overlapped data transfers:\n"); printf("No overlap at all (transfer-kernel-transfer): %f ms \n", memcpy_h2d_time + memcpy_d2h_time + kernel_time); printf("Compute can overlap with one transfer: %f ms\n", max((memcpy_h2d_time + memcpy_d2h_time), kernel_time)); printf("Compute can overlap with both data transfers: %f ms\n", max(max(memcpy_h2d_time,memcpy_d2h_time), kernel_time)); // Process pipelined work float serial_time = processWithStreams(1); float overlap_time = processWithStreams(STREAM_COUNT); printf("\nAverage measured timings over %d repetitions:\n", nreps); printf(" Avg. time when execution fully serialized\t: %f ms\n", serial_time / nreps); printf(" Avg. time when overlapped using %d streams\t: %f ms\n", STREAM_COUNT, overlap_time / nreps); printf(" Avg. speedup gained (serialized - overlapped)\t: %f ms\n", (serial_time - overlap_time) / nreps); printf("\nMeasured throughput:\n"); printf(" Fully serialized execution\t\t: %f GB/s\n", (nreps * (memsize * 2e-6))/ serial_time); printf(" Overlapped using %d streams\t\t: %f GB/s\n", STREAM_COUNT, (nreps * (memsize * 2e-6))/ overlap_time); // Verify the results, we will use the results for final output bool bResults = test(); // Free resources free(h_data_source); free(h_data_sink); for (int i =0; i<STREAM_COUNT; ++i) { hipHostFree(h_data_in[i]); hipFree(d_data_in[i]); hipHostFree(h_data_out[i]); hipFree(d_data_out[i]); hipStreamDestroy(stream[i]); hipEventDestroy(cycleDone[i]); } hipEventDestroy(start); hipEventDestroy(stop); // hipDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling hipDeviceReset causes all profile data to be // flushed before the application exits hipDeviceReset(); // Test result exit(bResults ? EXIT_SUCCESS : EXIT_FAILURE); } float processWithStreams(int streams_used) { int current_stream = 0; float time; // Do processing in a loop // // Note: All memory commands are processed in the order they are issued, // independent of the stream they are enqueued in. Hence the pattern by // which the copy and kernel commands are enqueued in the stream // has an influence on the achieved overlap. hipEventRecord(start, 0); for (int i=0; i<nreps; ++i) { int next_stream = (current_stream + 1) % streams_used; #ifdef SIMULATE_IO // Store the result memcpy(h_data_sink, h_data_out[current_stream],memsize); // Read new input memcpy(h_data_in[next_stream], h_data_source, memsize); #endif // Ensure that processing and copying of the last cycle has finished hipEventSynchronize(cycleDone[next_stream]); // Process current frame hipLaunchKernelGGL(( incKernel), dim3(grid), dim3(block), 0, stream[current_stream], d_data_out[current_stream], d_data_in[current_stream], N, inner_reps); // Upload next frame checkCudaErrors(hipMemcpyAsync( d_data_in[next_stream], h_data_in[next_stream], memsize, hipMemcpyHostToDevice, stream[next_stream])); // Download current frame checkCudaErrors(hipMemcpyAsync( h_data_out[current_stream], d_data_out[current_stream], memsize, hipMemcpyDeviceToHost, stream[current_stream])); checkCudaErrors(hipEventRecord( cycleDone[current_stream], stream[current_stream])); current_stream = next_stream; } hipEventRecord(stop, 0); hipDeviceSynchronize(); hipEventElapsedTime(&time, start, stop); return time; } void init() { for (int i=0; i<N; ++i) { h_data_source[i] = 0; } for (int i =0; i<STREAM_COUNT; ++i) { memcpy(h_data_in[i], h_data_source, memsize); } } bool test() { bool passed = true; for (int j =0; j<STREAM_COUNT; ++j) { for (int i =0; i<N; ++i) { passed &= (h_data_out[j][i] == 1); } } return passed; }
60f3fd566157380e059a8a4c2210525f3efaec0c.cu
/* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* * Quadro and Tesla GPUs with compute capability >= 2.0 can overlap two memcopies * with kernel execution. This sample illustrates the usage of CUDA streams to * achieve overlapping of kernel execution with copying data to and from the device. * * Additionally, this sample uses CUDA events to measure elapsed time for * CUDA calls. Events are a part of CUDA API and provide a system independent * way to measure execution times on CUDA devices with approximately 0.5 * microsecond precision. * * Elapsed times are averaged over nreps repetitions (10 by default). * */ const char *sSDKname = "simpleMultiCopy"; // includes, system #include <stdio.h> // include CUDA #include <cuda_runtime.h> // includes, project #include <helper_cuda.h> #include <helper_functions.h> // helper for shared that are common to CUDA Samples // includes, kernels // Declare the CUDA kernels here and main() code that is needed to launch // Compute workload on the system __global__ void incKernel(int *g_out, int *g_in, int N, int inner_reps) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < N) { for (int i=0; i<inner_reps; ++i) { g_out[idx] = g_in[idx] + 1; } } } #define STREAM_COUNT 4 // Uncomment to simulate data source/sink IO times //#define SIMULATE_IO int *h_data_source; int *h_data_sink; int *h_data_in[STREAM_COUNT]; int *d_data_in[STREAM_COUNT]; int *h_data_out[STREAM_COUNT]; int *d_data_out[STREAM_COUNT]; cudaEvent_t cycleDone[STREAM_COUNT]; cudaStream_t stream[STREAM_COUNT]; cudaEvent_t start, stop; int N = 1 << 22; int nreps = 10; // number of times each experiment is repeated int inner_reps = 5; int memsize; dim3 block(512); dim3 grid; int thread_blocks; float processWithStreams(int streams_used); void init(); bool test(); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char *argv[]) { int cuda_device = 0; float scale_factor; cudaDeviceProp deviceProp; printf("[%s] - Starting...\n", sSDKname); if (checkCmdLineFlag(argc, (const char **)argv, "device")) { cuda_device = getCmdLineArgumentInt(argc, (const char **)argv, "device="); if (cuda_device < 0) { printf("Invalid command line parameters\n"); exit(EXIT_FAILURE); } else { printf("cuda_device = %d\n", cuda_device); cuda_device = gpuDeviceInit(cuda_device); if (cuda_device < 0) { printf("No CUDA Capable devices found, exiting...\n"); exit(EXIT_SUCCESS); } } } else { // Otherwise pick the device with the highest Gflops/s cuda_device = gpuGetMaxGflopsDeviceId(); checkCudaErrors(cudaSetDevice(cuda_device)); checkCudaErrors(cudaGetDeviceProperties(&deviceProp, cuda_device)); printf("> Using CUDA device [%d]: %s\n", cuda_device, deviceProp.name); } checkCudaErrors(cudaGetDeviceProperties(&deviceProp, cuda_device)); printf("[%s] has %d MP(s) x %d (Cores/MP) = %d (Cores)\n", deviceProp.name, deviceProp.multiProcessorCount, _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor), _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount); // Anything that is less than 32 Cores will have scaled down workload scale_factor = max((32.0f / (_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * (float)deviceProp.multiProcessorCount)), 1.0f); N = (int)((float)N / scale_factor); printf("> Device name: %s\n", deviceProp.name); printf("> CUDA Capability %d.%d hardware with %d multi-processors\n", deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount); printf("> scale_factor = %.2f\n", 1.0f/scale_factor); printf("> array_size = %d\n\n", N); memsize = N * sizeof(int); thread_blocks = N / block.x; grid.x = thread_blocks % 65535; grid.y = (thread_blocks / 65535 + 1); // Allocate resources h_data_source = (int *) malloc(memsize); h_data_sink = (int *) malloc(memsize); for (int i =0; i<STREAM_COUNT; ++i) { checkCudaErrors(cudaHostAlloc(&h_data_in[i], memsize, cudaHostAllocDefault)); checkCudaErrors(cudaMalloc(&d_data_in[i], memsize)); checkCudaErrors(cudaHostAlloc(&h_data_out[i], memsize, cudaHostAllocDefault)); checkCudaErrors(cudaMalloc(&d_data_out[i], memsize)); checkCudaErrors(cudaStreamCreate(&stream[i])); checkCudaErrors(cudaEventCreate(&cycleDone[i])); cudaEventRecord(cycleDone[i], stream[i]); } cudaEventCreate(&start); cudaEventCreate(&stop); init(); // Kernel warmup incKernel<<<grid, block>>>(d_data_out[0], d_data_in[0], N, inner_reps); // Time copies and kernel cudaEventRecord(start,0); checkCudaErrors(cudaMemcpyAsync(d_data_in[0], h_data_in[0], memsize, cudaMemcpyHostToDevice,0)); cudaEventRecord(stop,0); cudaEventSynchronize(stop); float memcpy_h2d_time; cudaEventElapsedTime(&memcpy_h2d_time, start, stop); cudaEventRecord(start,0); checkCudaErrors(cudaMemcpyAsync(h_data_out[0], d_data_out[0], memsize, cudaMemcpyDeviceToHost, 0)); cudaEventRecord(stop,0); cudaEventSynchronize(stop); float memcpy_d2h_time; cudaEventElapsedTime(&memcpy_d2h_time, start, stop); cudaEventRecord(start,0); incKernel<<<grid, block,0,0>>>(d_data_out[0], d_data_in[0], N, inner_reps); cudaEventRecord(stop,0); cudaEventSynchronize(stop); float kernel_time; cudaEventElapsedTime(&kernel_time, start, stop); printf("\n"); printf("Relevant properties of this CUDA device\n"); printf("(%s) Can overlap one CPU<>GPU data transfer with GPU kernel execution (device property \"deviceOverlap\")\n", deviceProp.deviceOverlap ? "X" : " "); //printf("(%s) Can execute several GPU kernels simultaneously (compute capability >= 2.0)\n", deviceProp.major >= 2 ? "X": " "); printf("(%s) Can overlap two CPU<>GPU data transfers with GPU kernel execution\n" " (Compute Capability >= 2.0 AND (Tesla product OR Quadro 4000/5000/6000/K5000)\n", (deviceProp.major >= 2 && deviceProp.asyncEngineCount > 1) ? "X" : " "); printf("\n"); printf("Measured timings (throughput):\n"); printf(" Memcpy host to device\t: %f ms (%f GB/s)\n", memcpy_h2d_time, (memsize * 1e-6)/ memcpy_h2d_time); printf(" Memcpy device to host\t: %f ms (%f GB/s)\n", memcpy_d2h_time, (memsize * 1e-6)/ memcpy_d2h_time); printf(" Kernel\t\t\t: %f ms (%f GB/s)\n", kernel_time, (inner_reps *memsize * 2e-6)/ kernel_time); printf("\n"); printf("Theoretical limits for speedup gained from overlapped data transfers:\n"); printf("No overlap at all (transfer-kernel-transfer): %f ms \n", memcpy_h2d_time + memcpy_d2h_time + kernel_time); printf("Compute can overlap with one transfer: %f ms\n", max((memcpy_h2d_time + memcpy_d2h_time), kernel_time)); printf("Compute can overlap with both data transfers: %f ms\n", max(max(memcpy_h2d_time,memcpy_d2h_time), kernel_time)); // Process pipelined work float serial_time = processWithStreams(1); float overlap_time = processWithStreams(STREAM_COUNT); printf("\nAverage measured timings over %d repetitions:\n", nreps); printf(" Avg. time when execution fully serialized\t: %f ms\n", serial_time / nreps); printf(" Avg. time when overlapped using %d streams\t: %f ms\n", STREAM_COUNT, overlap_time / nreps); printf(" Avg. speedup gained (serialized - overlapped)\t: %f ms\n", (serial_time - overlap_time) / nreps); printf("\nMeasured throughput:\n"); printf(" Fully serialized execution\t\t: %f GB/s\n", (nreps * (memsize * 2e-6))/ serial_time); printf(" Overlapped using %d streams\t\t: %f GB/s\n", STREAM_COUNT, (nreps * (memsize * 2e-6))/ overlap_time); // Verify the results, we will use the results for final output bool bResults = test(); // Free resources free(h_data_source); free(h_data_sink); for (int i =0; i<STREAM_COUNT; ++i) { cudaFreeHost(h_data_in[i]); cudaFree(d_data_in[i]); cudaFreeHost(h_data_out[i]); cudaFree(d_data_out[i]); cudaStreamDestroy(stream[i]); cudaEventDestroy(cycleDone[i]); } cudaEventDestroy(start); cudaEventDestroy(stop); // cudaDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling cudaDeviceReset causes all profile data to be // flushed before the application exits cudaDeviceReset(); // Test result exit(bResults ? EXIT_SUCCESS : EXIT_FAILURE); } float processWithStreams(int streams_used) { int current_stream = 0; float time; // Do processing in a loop // // Note: All memory commands are processed in the order they are issued, // independent of the stream they are enqueued in. Hence the pattern by // which the copy and kernel commands are enqueued in the stream // has an influence on the achieved overlap. cudaEventRecord(start, 0); for (int i=0; i<nreps; ++i) { int next_stream = (current_stream + 1) % streams_used; #ifdef SIMULATE_IO // Store the result memcpy(h_data_sink, h_data_out[current_stream],memsize); // Read new input memcpy(h_data_in[next_stream], h_data_source, memsize); #endif // Ensure that processing and copying of the last cycle has finished cudaEventSynchronize(cycleDone[next_stream]); // Process current frame incKernel<<<grid, block, 0, stream[current_stream]>>>( d_data_out[current_stream], d_data_in[current_stream], N, inner_reps); // Upload next frame checkCudaErrors(cudaMemcpyAsync( d_data_in[next_stream], h_data_in[next_stream], memsize, cudaMemcpyHostToDevice, stream[next_stream])); // Download current frame checkCudaErrors(cudaMemcpyAsync( h_data_out[current_stream], d_data_out[current_stream], memsize, cudaMemcpyDeviceToHost, stream[current_stream])); checkCudaErrors(cudaEventRecord( cycleDone[current_stream], stream[current_stream])); current_stream = next_stream; } cudaEventRecord(stop, 0); cudaDeviceSynchronize(); cudaEventElapsedTime(&time, start, stop); return time; } void init() { for (int i=0; i<N; ++i) { h_data_source[i] = 0; } for (int i =0; i<STREAM_COUNT; ++i) { memcpy(h_data_in[i], h_data_source, memsize); } } bool test() { bool passed = true; for (int j =0; j<STREAM_COUNT; ++j) { for (int i =0; i<N; ++i) { passed &= (h_data_out[j][i] == 1); } } return passed; }
e09652fffcfc43714b5316406264f4ac28478a12.hip
// !!! This is a file automatically generated by hipify!!! // Created by Liu Chengjian on 17/10/9. // Copyright (c) 2017 csliu. All rights reserved. // #include "GCRSMatrix.h" #include "utils.h" #include "kernels.hip" int main(int argc, const char * argv[]) { if (argc != 3) { printf("Usage: ./%s workSizePerDataParityBlockInMB numberOfTasks\n", argv[0]); exit(0); } int bufSize = atoi(argv[1]) * 1024 * 1024; // workSize per data parity block int taskNum = atoi(argv[2]); double encode_time = 0.0; #ifdef DUMP for (int m = 4; m <= 4; ++m) { for (int n = 8; n <= 8; ++n) { // w is updated in the nested loop for (int k = MAX_K; k <= MAX_K; ++k) { #else for (int m = 1; m <= 4; ++m) { for (int n = 4; n <= 8; ++n) { // w is updated in the nested loop for (int k = m; k <= MAX_K; ++k) { #endif int w = gcrs_check_k_m_w(k, m, n); if (w < 0) continue; #ifdef DUMP printf("k:%d, m:%d w:%d\n",k,m,w); #endif int *bitmatrix = gcrs_create_bitmatrix(k, m, w); //printMatrix(bitmatrix, k*w, m*w); // adjust the bufSize int bufSizePerTask = align_value(bufSize / taskNum, sizeof(long) * w); bufSize = bufSizePerTask * taskNum; // compute the bufSize for the last task int bufSizeForLastTask = bufSize - (bufSizePerTask * (taskNum - 1)); #ifdef DUMP printf("Total Size:%d Size per task:%d Size for last task:%d\n", bufSize, bufSizePerTask, bufSizeForLastTask); #endif // allocate host buffers char* data = (char*) malloc (bufSize * k); char* code = (char*) malloc (bufSize * m); // initialize host buffer generateRandomValue(data, bufSize * k); // allocate device buffers char* d_data, *d_code; hipMalloc((void**)&d_data, bufSize * k); hipMalloc((void**)&d_code, bufSize * m); int dataSizePerAssign = bufSizePerTask * k; int codeSizePerAssign = bufSizePerTask * m; // pointers to the device buffers char** d_data_ptr = (char**) malloc (sizeof(char*) * taskNum); char** d_code_ptr = (char**) malloc (sizeof(char*) * taskNum); for (int i = 0; i < taskNum; ++i) { d_data_ptr[i] = d_data + dataSizePerAssign * i; d_code_ptr[i] = d_code + codeSizePerAssign * i; } // taskSize will determine the number of kernels to run on a device int taskSize = 1; int mRemain = m; // adjust taskSize if (m >= MAX_M) { taskSize = m / MAX_M; if (m % MAX_M != 0) ++taskSize; } #ifdef DUMP printf("task size: %d\n", taskSize); #endif // set up kernel execution parameters int *mValue = (int*) malloc (sizeof(int) * taskSize); int *index = (int*) malloc (sizeof(int) * taskSize); coding_func *coding_function_ptrs = (coding_func*) malloc (sizeof(coding_func) * taskSize); for (int i = 0; i < taskSize; ++i) { if (mRemain < MAX_M) { mValue[i] = mRemain; }else{ mValue[i] = MAX_M; mRemain = mRemain - MAX_M; } if (i == 0) { index[i] = 0; }else{ index[i] = index[i-1] + k * w; } coding_function_ptrs[i] = coding_func_array[(mValue[i] - 1) * (MAX_W - MIN_W + 1)+ w - MIN_W]; } // create and then update encoding bit matrix unsigned int *all_columns_bitmatrix = (unsigned int*) malloc (sizeof(unsigned int) * k * w * taskSize); int mValueSum = 0; for (int i = 0; i < taskSize; ++i) { unsigned int *column_bitmatrix = gcrs_create_column_coding_bitmatrix( k, mValue[i], w, bitmatrix + k * w * mValueSum * w); memcpy((all_columns_bitmatrix + i * k * w), column_bitmatrix, k * w * sizeof(unsigned int)); free(column_bitmatrix); mValueSum += mValue[i]; } // allocate bitmatrix on a device unsigned int *d_bitmatrix; hipMalloc((void**)&d_bitmatrix, sizeof(unsigned int) * k * w * taskSize); hipMemcpy(d_bitmatrix, all_columns_bitmatrix, sizeof(unsigned int) * k * w * taskSize, hipMemcpyHostToDevice); int warpThreadNum = 32; int threadNum = MAX_THREAD_NUM; size_t workSizePerWarp = warpThreadNum / w * w; size_t workSizePerBlock = threadNum / warpThreadNum * workSizePerWarp * sizeof(size_t); size_t blockNum = bufSizePerTask / workSizePerBlock; if ((bufSizePerTask % workSizePerBlock) != 0) { blockNum = blockNum + 1; } #ifdef DUMP printf("#blocks: %zu blockSize: %d\n", blockNum, threadNum); #endif struct timeval startEncodeTime, endEncodeTime; gettimeofday(&startEncodeTime, NULL); for (int i = 0; i < taskNum; ++i) { int count = (i == taskNum-1) ? bufSizeForLastTask : bufSizePerTask; hipMemcpyAsync(d_data + i * k * bufSizePerTask, data + i * k * bufSizePerTask, (k * count), hipMemcpyHostToDevice, 0); int workSizePerGrid = count / sizeof(long); int size = workSizePerGrid * sizeof(long); mValueSum = 0; for (int j = 0; j < taskSize; ++j) { coding_function_ptrs[j](k, index[j], d_data_ptr[i], d_code_ptr[i]+mValueSum*size, d_bitmatrix, threadNum, blockNum, workSizePerGrid); mValueSum += mValue[j]; } hipMemcpyAsync( code + i * m * bufSizePerTask, d_code + i * m * bufSizePerTask, (m * count), hipMemcpyDeviceToHost, 0); } hipDeviceSynchronize(); gettimeofday(&endEncodeTime, NULL); double etime = elapsed_time_in_ms(startEncodeTime, endEncodeTime); #ifdef DUMP printf("Encoding time over %d tasks: %lf (ms)\n", taskNum, etime); #endif encode_time += etime; #ifdef DUMP for (int i = 0; i < bufSize*m; i++) printf("%d\n", code[i]); printf("\n"); #endif hipFree(d_data); hipFree(d_code); hipFree(d_bitmatrix); free(mValue); free(index); free(coding_function_ptrs); free(bitmatrix); free(all_columns_bitmatrix); free(d_data_ptr); free(d_code_ptr); free(code); free(data); } } } printf("Total encoding time %lf (s)\n", encode_time * 1e-3); return 0; }
e09652fffcfc43714b5316406264f4ac28478a12.cu
// Created by Liu Chengjian on 17/10/9. // Copyright (c) 2017 csliu. All rights reserved. // #include "GCRSMatrix.h" #include "utils.h" #include "kernels.cu" int main(int argc, const char * argv[]) { if (argc != 3) { printf("Usage: ./%s workSizePerDataParityBlockInMB numberOfTasks\n", argv[0]); exit(0); } int bufSize = atoi(argv[1]) * 1024 * 1024; // workSize per data parity block int taskNum = atoi(argv[2]); double encode_time = 0.0; #ifdef DUMP for (int m = 4; m <= 4; ++m) { for (int n = 8; n <= 8; ++n) { // w is updated in the nested loop for (int k = MAX_K; k <= MAX_K; ++k) { #else for (int m = 1; m <= 4; ++m) { for (int n = 4; n <= 8; ++n) { // w is updated in the nested loop for (int k = m; k <= MAX_K; ++k) { #endif int w = gcrs_check_k_m_w(k, m, n); if (w < 0) continue; #ifdef DUMP printf("k:%d, m:%d w:%d\n",k,m,w); #endif int *bitmatrix = gcrs_create_bitmatrix(k, m, w); //printMatrix(bitmatrix, k*w, m*w); // adjust the bufSize int bufSizePerTask = align_value(bufSize / taskNum, sizeof(long) * w); bufSize = bufSizePerTask * taskNum; // compute the bufSize for the last task int bufSizeForLastTask = bufSize - (bufSizePerTask * (taskNum - 1)); #ifdef DUMP printf("Total Size:%d Size per task:%d Size for last task:%d\n", bufSize, bufSizePerTask, bufSizeForLastTask); #endif // allocate host buffers char* data = (char*) malloc (bufSize * k); char* code = (char*) malloc (bufSize * m); // initialize host buffer generateRandomValue(data, bufSize * k); // allocate device buffers char* d_data, *d_code; hipMalloc((void**)&d_data, bufSize * k); hipMalloc((void**)&d_code, bufSize * m); int dataSizePerAssign = bufSizePerTask * k; int codeSizePerAssign = bufSizePerTask * m; // pointers to the device buffers char** d_data_ptr = (char**) malloc (sizeof(char*) * taskNum); char** d_code_ptr = (char**) malloc (sizeof(char*) * taskNum); for (int i = 0; i < taskNum; ++i) { d_data_ptr[i] = d_data + dataSizePerAssign * i; d_code_ptr[i] = d_code + codeSizePerAssign * i; } // taskSize will determine the number of kernels to run on a device int taskSize = 1; int mRemain = m; // adjust taskSize if (m >= MAX_M) { taskSize = m / MAX_M; if (m % MAX_M != 0) ++taskSize; } #ifdef DUMP printf("task size: %d\n", taskSize); #endif // set up kernel execution parameters int *mValue = (int*) malloc (sizeof(int) * taskSize); int *index = (int*) malloc (sizeof(int) * taskSize); coding_func *coding_function_ptrs = (coding_func*) malloc (sizeof(coding_func) * taskSize); for (int i = 0; i < taskSize; ++i) { if (mRemain < MAX_M) { mValue[i] = mRemain; }else{ mValue[i] = MAX_M; mRemain = mRemain - MAX_M; } if (i == 0) { index[i] = 0; }else{ index[i] = index[i-1] + k * w; } coding_function_ptrs[i] = coding_func_array[(mValue[i] - 1) * (MAX_W - MIN_W + 1)+ w - MIN_W]; } // create and then update encoding bit matrix unsigned int *all_columns_bitmatrix = (unsigned int*) malloc (sizeof(unsigned int) * k * w * taskSize); int mValueSum = 0; for (int i = 0; i < taskSize; ++i) { unsigned int *column_bitmatrix = gcrs_create_column_coding_bitmatrix( k, mValue[i], w, bitmatrix + k * w * mValueSum * w); memcpy((all_columns_bitmatrix + i * k * w), column_bitmatrix, k * w * sizeof(unsigned int)); free(column_bitmatrix); mValueSum += mValue[i]; } // allocate bitmatrix on a device unsigned int *d_bitmatrix; hipMalloc((void**)&d_bitmatrix, sizeof(unsigned int) * k * w * taskSize); hipMemcpy(d_bitmatrix, all_columns_bitmatrix, sizeof(unsigned int) * k * w * taskSize, hipMemcpyHostToDevice); int warpThreadNum = 32; int threadNum = MAX_THREAD_NUM; size_t workSizePerWarp = warpThreadNum / w * w; size_t workSizePerBlock = threadNum / warpThreadNum * workSizePerWarp * sizeof(size_t); size_t blockNum = bufSizePerTask / workSizePerBlock; if ((bufSizePerTask % workSizePerBlock) != 0) { blockNum = blockNum + 1; } #ifdef DUMP printf("#blocks: %zu blockSize: %d\n", blockNum, threadNum); #endif struct timeval startEncodeTime, endEncodeTime; gettimeofday(&startEncodeTime, NULL); for (int i = 0; i < taskNum; ++i) { int count = (i == taskNum-1) ? bufSizeForLastTask : bufSizePerTask; hipMemcpyAsync(d_data + i * k * bufSizePerTask, data + i * k * bufSizePerTask, (k * count), hipMemcpyHostToDevice, 0); int workSizePerGrid = count / sizeof(long); int size = workSizePerGrid * sizeof(long); mValueSum = 0; for (int j = 0; j < taskSize; ++j) { coding_function_ptrs[j](k, index[j], d_data_ptr[i], d_code_ptr[i]+mValueSum*size, d_bitmatrix, threadNum, blockNum, workSizePerGrid); mValueSum += mValue[j]; } hipMemcpyAsync( code + i * m * bufSizePerTask, d_code + i * m * bufSizePerTask, (m * count), hipMemcpyDeviceToHost, 0); } hipDeviceSynchronize(); gettimeofday(&endEncodeTime, NULL); double etime = elapsed_time_in_ms(startEncodeTime, endEncodeTime); #ifdef DUMP printf("Encoding time over %d tasks: %lf (ms)\n", taskNum, etime); #endif encode_time += etime; #ifdef DUMP for (int i = 0; i < bufSize*m; i++) printf("%d\n", code[i]); printf("\n"); #endif hipFree(d_data); hipFree(d_code); hipFree(d_bitmatrix); free(mValue); free(index); free(coding_function_ptrs); free(bitmatrix); free(all_columns_bitmatrix); free(d_data_ptr); free(d_code_ptr); free(code); free(data); } } } printf("Total encoding time %lf (s)\n", encode_time * 1e-3); return 0; }
3c6b7f73cd19363b09acf0d7b8f7ef33e7ae147f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * Copyright (c) 2017 by Contributors * Copyright (c) 2017 Microsoft * Licensed under The Apache-2.0 License [see LICENSE for details] * \file psroi_pooling.cu * \brief psroi pooling operator * \author Yi Li, Tairui Chen, Guodong Zhang, Haozhi Qi, Jifeng Dai */ #include "./psroi_pooling3d-inl.h" #include <mshadow/tensor.h> #include <mshadow/cuda/reduce.cuh> #include <algorithm> #include <vector> #include "../../common/cuda_utils.h" #include "../mxnet_op.h" #define PSROIPOOLING_CUDA_CHECK(condition) \ /* Code block avoids redefinition of hipError_t error */ \ do { \ hipError_t error = condition; \ CHECK_EQ(error, hipSuccess) << " " << hipGetErrorString(error); \ } while (0) #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) namespace mshadow { namespace cuda { template <typename DType> __global__ void PSROIPoolForwardKernel( const int count, const DType* bottom_data, const DType spatial_scale, const int channels, const int depth,const int height, const int width, const int pooled_depth,const int pooled_height, const int pooled_width, const DType* bottom_rois, const int output_dim, const int group_size, DType* top_data) { CUDA_KERNEL_LOOP(index, count) { // The output is in order (n, ctop, ph, pw) int pz = inde % pooled_depth; int pw = (index / pooled_depth) % pooled_width; int ph = (index / pooled_depth / pooled_width) % pooled_height; int ctop = (index /pooled_depth/pooled_width / pooled_height) % output_dim; int n = index / pooled_depth / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling const DType* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; DType roi_start_z = static_cast<DType>(round(offset_bottom_rois[1])) * spatial_scale; DType roi_start_w = static_cast<DType>(round(offset_bottom_rois[2])) * spatial_scale; DType roi_start_h = static_cast<DType>(round(offset_bottom_rois[3])) * spatial_scale; DType roi_end_z = static_cast<DType>(round(offset_bottom_rois[4]) + 1.) * spatial_scale; DType roi_end_w = static_cast<DType>(round(offset_bottom_rois[5]) + 1.) * spatial_scale; DType roi_end_h = static_cast<DType>(round(offset_bottom_rois[6]) + 1.) * spatial_scale; // Force too small ROIs to be 1x1 DType roi_depth = max(roi_end_z - roi_start_z,0.1);//avoid 0 DType roi_width = max(roi_end_w - roi_start_w, 0.1); DType roi_height = max(roi_end_h - roi_start_h, 0.1); // Compute w and h at bottom DType bin_size_z = roi_depth / static_cast<DType>(pooled_height); DType bin_size_h = roi_height / static_cast<DType>(pooled_height); DType bin_size_w = roi_width / static_cast<DType>(pooled_width); int zstart = floor(static_cast<DType>(pz) * bin_size_z + roi_start_z); int hstart = floor(static_cast<DType>(ph) * bin_size_h + roi_start_h); int wstart = floor(static_cast<DType>(pw)* bin_size_w + roi_start_w); int zend = ceil(static_cast<DType>(pz + 1) * bin_size_z + roi_start_z); int hend = ceil(static_cast<DType>(ph + 1) * bin_size_h + roi_start_h); int wend = ceil(static_cast<DType>(pw + 1) * bin_size_w + roi_start_w); // Add roi offsets and clip to input boundaries zstart = min(max(zstart, 0), depth); zend = min(max(zend, 0),zend); wstart = min(max(wstart, 0),width); hstart = min(max(hstart, 0), height); hend = min(max(hend, 0), height); wstart = min(max(wstart, 0),width); wend = min(max(wend, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart) || (zend <= zstart); int gz = floor(static_cast<DType>(pz)* group_size / pooled_depth); int gw = floor(static_cast<DType>(pw)* group_size / pooled_width); int gh = floor(static_cast<DType>(ph)* group_size / pooled_height); gz = min(max(gz, 0), group_size - 1); gw = min(max(gw, 0), group_size - 1); gh = min(max(gh, 0), group_size - 1); int c = ((ctop*group_size + gz)*group_size + gh) + gw; const DType* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width*depth; DType out_sum = 0; for(int z = zstart;z < zend; ++z){ for (int h = hstart; h < hend; ++h){ for (int w = wstart; w < wend; ++w){ int bottom_index = (z * height + h )*width + w; out_sum += offset_bottom_data[bottom_index]; } } } DType bin_area = (hend - hstart)*(wend - wstart); top_data[index] = is_empty? (DType)0. : out_sum/bin_area; } } template<typename DType> inline void PSROIPoolForward(const Tensor<gpu, 4, DType> &out, const Tensor<gpu, 4, DType> &data, const Tensor<gpu, 2, DType> &bbox, const float spatial_scale, const int output_dim_, const int group_size_) { const DType *bottom_data = data.dptr_; const DType *bottom_rois = bbox.dptr_; DType *top_data = out.dptr_; const int count = out.shape_.Size(); const int channels = data.size(1); const int height = data.size(2); const int width = data.size(3); const int pooled_height = out.size(2); const int pooled_width = out.size(3); hipStream_t stream = Stream<gpu>::GetStream(out.stream_); PSROIPoolForwardKernel<DType> << <mxnet::op::mxnet_op::cuda_get_num_blocks(count), kBaseThreadNum, 0, stream >> >( count, bottom_data, spatial_scale, channels, height, width, pooled_height, pooled_width, bottom_rois, output_dim_, group_size_, top_data); PSROIPOOLING_CUDA_CHECK(hipPeekAtLastError()); } template <typename DType> __global__ void PSROIPoolBackwardAccKernel( const int count, const DType* top_diff, const int num_rois, const DType spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int group_size, const int output_dim, DType* bottom_diff, const DType* bottom_rois) { CUDA_KERNEL_LOOP(index, count) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int ctop = (index / pooled_width / pooled_height) % output_dim; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling const DType* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; DType roi_start_w = static_cast<DType>(round(offset_bottom_rois[1])) * spatial_scale; DType roi_start_h = static_cast<DType>(round(offset_bottom_rois[2])) * spatial_scale; DType roi_end_w = static_cast<DType>(round(offset_bottom_rois[3]) + 1.) * spatial_scale; DType roi_end_h = static_cast<DType>(round(offset_bottom_rois[4]) + 1.) * spatial_scale; // Force too small ROIs to be 1x1 DType roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0 DType roi_height = max(roi_end_h - roi_start_h, 0.1); // Compute w and h at bottom DType bin_size_h = roi_height / static_cast<DType>(pooled_height); DType bin_size_w = roi_width / static_cast<DType>(pooled_width); int hstart = floor(static_cast<DType>(ph)* bin_size_h + roi_start_h); int wstart = floor(static_cast<DType>(pw)* bin_size_w + roi_start_w); int hend = ceil(static_cast<DType>(ph + 1) * bin_size_h + roi_start_h); int wend = ceil(static_cast<DType>(pw + 1) * bin_size_w + roi_start_w); // Add roi offsets and clip to input boundaries hstart = min(max(hstart, 0), height); hend = min(max(hend, 0), height); wstart = min(max(wstart, 0), width); wend = min(max(wend, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); // Compute c at bottom int gw = floor(static_cast<DType>(pw)* group_size / pooled_width); int gh = floor(static_cast<DType>(ph)* group_size / pooled_height); gw = min(max(gw, 0), group_size - 1); gh = min(max(gh, 0), group_size - 1); int c = (ctop*group_size + gh)*group_size + gw; DType* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width; DType bin_area = (hend - hstart)*(wend - wstart); DType diff_val = is_empty ? (DType)0. : top_diff[index] / bin_area; for (int h = hstart; h < hend; ++h){ for (int w = wstart; w < wend; ++w){ int bottom_index = h*width + w; atomicAdd(offset_bottom_diff + bottom_index, diff_val); } } } } template<typename DType> inline void PSROIPoolBackwardAcc(const Tensor<gpu, 4, DType> &in_grad, const Tensor<gpu, 4, DType> &out_grad, const Tensor<gpu, 2, DType> &bbox, const float spatial_scale, const int output_dim_, const int group_size_) { // LOG(INFO) << "PSROIPoolBackward"; const DType *top_diff = out_grad.dptr_; const DType *bottom_rois = bbox.dptr_; DType *bottom_diff = in_grad.dptr_; const int count = out_grad.shape_.Size(); const int num_rois = bbox.size(0); const int channels = in_grad.size(1); const int height = in_grad.size(2); const int width = in_grad.size(3); const int pooled_height = out_grad.size(2); const int pooled_width = out_grad.size(3); hipStream_t stream = Stream<gpu>::GetStream(in_grad.stream_); PSROIPoolBackwardAccKernel<DType> << <mxnet::op::mxnet_op::cuda_get_num_blocks(count), kBaseThreadNum, 0, stream >> >( count, top_diff, num_rois, spatial_scale, channels, height, width, pooled_height, pooled_width, group_size_, output_dim_, bottom_diff, bottom_rois); PSROIPOOLING_CUDA_CHECK(hipPeekAtLastError()); } } // namespace cuda template<typename DType> inline void PSROIPoolForward(const Tensor<gpu, 4, DType> &out, const Tensor<gpu, 4, DType> &data, const Tensor<gpu, 2, DType> &bbox, const float spatial_scale, const int output_dim_, const int group_size_) { cuda::PSROIPoolForward(out, data, bbox, spatial_scale, output_dim_, group_size_); } template<typename DType> inline void PSROIPoolBackwardAcc(const Tensor<gpu, 4, DType> &in_grad, const Tensor<gpu, 4, DType> &out_grad, const Tensor<gpu, 2, DType> &bbox, const float spatial_scale, const int output_dim_, const int group_size_) { cuda::PSROIPoolBackwardAcc(in_grad, out_grad, bbox, spatial_scale, output_dim_, group_size_); } } // namespace mshadow namespace mxnet { namespace op { template<> Operator* CreateOp<gpu>(PSROIPoolingParam param, int dtype) { Operator* op = NULL; MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new PSROIPoolingOp<gpu, DType>(param); }); return op; } } // namespace op } // namespace mxnet
3c6b7f73cd19363b09acf0d7b8f7ef33e7ae147f.cu
/*! * Copyright (c) 2017 by Contributors * Copyright (c) 2017 Microsoft * Licensed under The Apache-2.0 License [see LICENSE for details] * \file psroi_pooling.cu * \brief psroi pooling operator * \author Yi Li, Tairui Chen, Guodong Zhang, Haozhi Qi, Jifeng Dai */ #include "./psroi_pooling3d-inl.h" #include <mshadow/tensor.h> #include <mshadow/cuda/reduce.cuh> #include <algorithm> #include <vector> #include "../../common/cuda_utils.h" #include "../mxnet_op.h" #define PSROIPOOLING_CUDA_CHECK(condition) \ /* Code block avoids redefinition of cudaError_t error */ \ do { \ cudaError_t error = condition; \ CHECK_EQ(error, cudaSuccess) << " " << cudaGetErrorString(error); \ } while (0) #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) namespace mshadow { namespace cuda { template <typename DType> __global__ void PSROIPoolForwardKernel( const int count, const DType* bottom_data, const DType spatial_scale, const int channels, const int depth,const int height, const int width, const int pooled_depth,const int pooled_height, const int pooled_width, const DType* bottom_rois, const int output_dim, const int group_size, DType* top_data) { CUDA_KERNEL_LOOP(index, count) { // The output is in order (n, ctop, ph, pw) int pz = inde % pooled_depth; int pw = (index / pooled_depth) % pooled_width; int ph = (index / pooled_depth / pooled_width) % pooled_height; int ctop = (index /pooled_depth/pooled_width / pooled_height) % output_dim; int n = index / pooled_depth / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling const DType* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; DType roi_start_z = static_cast<DType>(round(offset_bottom_rois[1])) * spatial_scale; DType roi_start_w = static_cast<DType>(round(offset_bottom_rois[2])) * spatial_scale; DType roi_start_h = static_cast<DType>(round(offset_bottom_rois[3])) * spatial_scale; DType roi_end_z = static_cast<DType>(round(offset_bottom_rois[4]) + 1.) * spatial_scale; DType roi_end_w = static_cast<DType>(round(offset_bottom_rois[5]) + 1.) * spatial_scale; DType roi_end_h = static_cast<DType>(round(offset_bottom_rois[6]) + 1.) * spatial_scale; // Force too small ROIs to be 1x1 DType roi_depth = max(roi_end_z - roi_start_z,0.1);//avoid 0 DType roi_width = max(roi_end_w - roi_start_w, 0.1); DType roi_height = max(roi_end_h - roi_start_h, 0.1); // Compute w and h at bottom DType bin_size_z = roi_depth / static_cast<DType>(pooled_height); DType bin_size_h = roi_height / static_cast<DType>(pooled_height); DType bin_size_w = roi_width / static_cast<DType>(pooled_width); int zstart = floor(static_cast<DType>(pz) * bin_size_z + roi_start_z); int hstart = floor(static_cast<DType>(ph) * bin_size_h + roi_start_h); int wstart = floor(static_cast<DType>(pw)* bin_size_w + roi_start_w); int zend = ceil(static_cast<DType>(pz + 1) * bin_size_z + roi_start_z); int hend = ceil(static_cast<DType>(ph + 1) * bin_size_h + roi_start_h); int wend = ceil(static_cast<DType>(pw + 1) * bin_size_w + roi_start_w); // Add roi offsets and clip to input boundaries zstart = min(max(zstart, 0), depth); zend = min(max(zend, 0),zend); wstart = min(max(wstart, 0),width); hstart = min(max(hstart, 0), height); hend = min(max(hend, 0), height); wstart = min(max(wstart, 0),width); wend = min(max(wend, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart) || (zend <= zstart); int gz = floor(static_cast<DType>(pz)* group_size / pooled_depth); int gw = floor(static_cast<DType>(pw)* group_size / pooled_width); int gh = floor(static_cast<DType>(ph)* group_size / pooled_height); gz = min(max(gz, 0), group_size - 1); gw = min(max(gw, 0), group_size - 1); gh = min(max(gh, 0), group_size - 1); int c = ((ctop*group_size + gz)*group_size + gh) + gw; const DType* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width*depth; DType out_sum = 0; for(int z = zstart;z < zend; ++z){ for (int h = hstart; h < hend; ++h){ for (int w = wstart; w < wend; ++w){ int bottom_index = (z * height + h )*width + w; out_sum += offset_bottom_data[bottom_index]; } } } DType bin_area = (hend - hstart)*(wend - wstart); top_data[index] = is_empty? (DType)0. : out_sum/bin_area; } } template<typename DType> inline void PSROIPoolForward(const Tensor<gpu, 4, DType> &out, const Tensor<gpu, 4, DType> &data, const Tensor<gpu, 2, DType> &bbox, const float spatial_scale, const int output_dim_, const int group_size_) { const DType *bottom_data = data.dptr_; const DType *bottom_rois = bbox.dptr_; DType *top_data = out.dptr_; const int count = out.shape_.Size(); const int channels = data.size(1); const int height = data.size(2); const int width = data.size(3); const int pooled_height = out.size(2); const int pooled_width = out.size(3); cudaStream_t stream = Stream<gpu>::GetStream(out.stream_); PSROIPoolForwardKernel<DType> << <mxnet::op::mxnet_op::cuda_get_num_blocks(count), kBaseThreadNum, 0, stream >> >( count, bottom_data, spatial_scale, channels, height, width, pooled_height, pooled_width, bottom_rois, output_dim_, group_size_, top_data); PSROIPOOLING_CUDA_CHECK(cudaPeekAtLastError()); } template <typename DType> __global__ void PSROIPoolBackwardAccKernel( const int count, const DType* top_diff, const int num_rois, const DType spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int group_size, const int output_dim, DType* bottom_diff, const DType* bottom_rois) { CUDA_KERNEL_LOOP(index, count) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int ctop = (index / pooled_width / pooled_height) % output_dim; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling const DType* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; DType roi_start_w = static_cast<DType>(round(offset_bottom_rois[1])) * spatial_scale; DType roi_start_h = static_cast<DType>(round(offset_bottom_rois[2])) * spatial_scale; DType roi_end_w = static_cast<DType>(round(offset_bottom_rois[3]) + 1.) * spatial_scale; DType roi_end_h = static_cast<DType>(round(offset_bottom_rois[4]) + 1.) * spatial_scale; // Force too small ROIs to be 1x1 DType roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0 DType roi_height = max(roi_end_h - roi_start_h, 0.1); // Compute w and h at bottom DType bin_size_h = roi_height / static_cast<DType>(pooled_height); DType bin_size_w = roi_width / static_cast<DType>(pooled_width); int hstart = floor(static_cast<DType>(ph)* bin_size_h + roi_start_h); int wstart = floor(static_cast<DType>(pw)* bin_size_w + roi_start_w); int hend = ceil(static_cast<DType>(ph + 1) * bin_size_h + roi_start_h); int wend = ceil(static_cast<DType>(pw + 1) * bin_size_w + roi_start_w); // Add roi offsets and clip to input boundaries hstart = min(max(hstart, 0), height); hend = min(max(hend, 0), height); wstart = min(max(wstart, 0), width); wend = min(max(wend, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); // Compute c at bottom int gw = floor(static_cast<DType>(pw)* group_size / pooled_width); int gh = floor(static_cast<DType>(ph)* group_size / pooled_height); gw = min(max(gw, 0), group_size - 1); gh = min(max(gh, 0), group_size - 1); int c = (ctop*group_size + gh)*group_size + gw; DType* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width; DType bin_area = (hend - hstart)*(wend - wstart); DType diff_val = is_empty ? (DType)0. : top_diff[index] / bin_area; for (int h = hstart; h < hend; ++h){ for (int w = wstart; w < wend; ++w){ int bottom_index = h*width + w; atomicAdd(offset_bottom_diff + bottom_index, diff_val); } } } } template<typename DType> inline void PSROIPoolBackwardAcc(const Tensor<gpu, 4, DType> &in_grad, const Tensor<gpu, 4, DType> &out_grad, const Tensor<gpu, 2, DType> &bbox, const float spatial_scale, const int output_dim_, const int group_size_) { // LOG(INFO) << "PSROIPoolBackward"; const DType *top_diff = out_grad.dptr_; const DType *bottom_rois = bbox.dptr_; DType *bottom_diff = in_grad.dptr_; const int count = out_grad.shape_.Size(); const int num_rois = bbox.size(0); const int channels = in_grad.size(1); const int height = in_grad.size(2); const int width = in_grad.size(3); const int pooled_height = out_grad.size(2); const int pooled_width = out_grad.size(3); cudaStream_t stream = Stream<gpu>::GetStream(in_grad.stream_); PSROIPoolBackwardAccKernel<DType> << <mxnet::op::mxnet_op::cuda_get_num_blocks(count), kBaseThreadNum, 0, stream >> >( count, top_diff, num_rois, spatial_scale, channels, height, width, pooled_height, pooled_width, group_size_, output_dim_, bottom_diff, bottom_rois); PSROIPOOLING_CUDA_CHECK(cudaPeekAtLastError()); } } // namespace cuda template<typename DType> inline void PSROIPoolForward(const Tensor<gpu, 4, DType> &out, const Tensor<gpu, 4, DType> &data, const Tensor<gpu, 2, DType> &bbox, const float spatial_scale, const int output_dim_, const int group_size_) { cuda::PSROIPoolForward(out, data, bbox, spatial_scale, output_dim_, group_size_); } template<typename DType> inline void PSROIPoolBackwardAcc(const Tensor<gpu, 4, DType> &in_grad, const Tensor<gpu, 4, DType> &out_grad, const Tensor<gpu, 2, DType> &bbox, const float spatial_scale, const int output_dim_, const int group_size_) { cuda::PSROIPoolBackwardAcc(in_grad, out_grad, bbox, spatial_scale, output_dim_, group_size_); } } // namespace mshadow namespace mxnet { namespace op { template<> Operator* CreateOp<gpu>(PSROIPoolingParam param, int dtype) { Operator* op = NULL; MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new PSROIPoolingOp<gpu, DType>(param); }); return op; } } // namespace op } // namespace mxnet
d56f7b116e7cd9af1cfb9388db05cbfc481ffdd9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "star2d2r-256-5-256_kernel.hu" __device__ float __sbref_wrap(float *sb, size_t index) { return sb[index]; } __global__ void kernel0_5(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 236; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_0_3; float __reg_0_4; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_1_3; float __reg_1_4; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_2_3; float __reg_2_4; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_3_3; float __reg_3_4; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_4_3; float __reg_4_4; __shared__ float __c_sb_double[__blockSize * 2]; float *__c_sb = __c_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __storeValid = __writeValid5; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0) #define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0) if (__c1Id == 0) { __LOAD(__reg_4_0, 0); __LOAD(__reg_4_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_4_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_4_0, __reg_4_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_4_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_4_0, __reg_4_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_4_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_4_0, __reg_4_1, __reg_3_2, __reg_3_3, __reg_3_4); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_4_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __LOAD(__reg_0_3, 13); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __LOAD(__reg_0_4, 14); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(5, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(6, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(7, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __LOAD(__reg_0_3, 18); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __LOAD(__reg_0_4, 19); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(9, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 20); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __LOAD(__reg_0_3, 13); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 14); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __LOAD(__reg_0_3, 18); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_4, 19); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __LOAD(__reg_0_0, 20); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); } __c_sb = __c_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 21; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4); __STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0); __STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0); __STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0); __STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1); __STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __LOAD(__reg_0_3, __h + 2); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3); __STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2); __STORE(__h + 0, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __LOAD(__reg_0_3, __h + 2); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __LOAD(__reg_0_4, __h + 3); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3); __STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4); __STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h + 0, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3); __STORE(__h + 1, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4); } } else { for (__h = 21; __h <= __side1LenOl - 5;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __h++; } } __global__ void kernel0_4(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 240; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_0_3; float __reg_0_4; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_1_3; float __reg_1_4; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_2_3; float __reg_2_4; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_3_3; float __reg_3_4; __shared__ float __c_sb_double[__blockSize * 2]; float *__c_sb = __c_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __storeValid = __writeValid4; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0) #define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0) if (__c1Id == 0) { __LOAD(__reg_3_0, 0); __LOAD(__reg_3_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_3_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_3_0, __reg_3_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_3_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_3_0, __reg_3_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_3_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __LOAD(__reg_0_3, 13); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(5, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_4, 14); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(7, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __LOAD(__reg_0_3, 13); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 14); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); } __c_sb = __c_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 17; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0); __STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1); __STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0); __STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1); __STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1); __STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __LOAD(__reg_0_3, __h + 1); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3); __STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2); __STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __LOAD(__reg_0_3, __h + 1); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_4, __h + 2); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3); __STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4); __STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3); __STORE(__h + 0, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __LOAD(__reg_0_3, __h + 1); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_4, __h + 2); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __LOAD(__reg_0_0, __h + 3); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4); __STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0); __STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h + 0, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4); __STORE(__h + 1, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0); } } else { for (__h = 17; __h <= __side1LenOl - 5;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __h++; } } __global__ void kernel0_3(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 244; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_0_3; float __reg_0_4; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_1_3; float __reg_1_4; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_2_3; float __reg_2_4; __shared__ float __c_sb_double[__blockSize * 2]; float *__c_sb = __c_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __storeValid = __writeValid3; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0) #define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0) if (__c1Id == 0) { __LOAD(__reg_2_0, 0); __LOAD(__reg_2_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_2_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_2_0, __reg_2_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_2_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __STORE(5, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __STORE(6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __STORE(6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); } __c_sb = __c_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;) { __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2); __STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1); __STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_3, __h + 0); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2); __STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3); __STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2); __STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_3, __h + 0); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, __h + 1); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3); __STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4); __STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3); __STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_3, __h + 0); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, __h + 1); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, __h + 2); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4); __STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0); __STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4); __STORE(__h + 0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_3, __h + 0); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, __h + 1); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, __h + 2); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, __h + 3); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0); __STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1); __STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h + 0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0); __STORE(__h + 1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1); } } else { for (__h = 13; __h <= __side1LenOl - 5;) { __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __h++; } } __global__ void kernel0_2(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 248; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_0_3; float __reg_0_4; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_1_3; float __reg_1_4; __shared__ float __c_sb_double[__blockSize * 2]; float *__c_sb = __c_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __storeValid = __writeValid2; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0) #define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0) if (__c1Id == 0) { __LOAD(__reg_1_0, 0); __LOAD(__reg_1_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_1_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __STORE(2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); } __c_sb = __c_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;) { __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2); __STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_4, __h + 0); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_4, __h + 0); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4); __STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_4, __h + 0); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_1, __h + 2); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0); __STORE(__h + 0, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_4, __h + 0); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_1, __h + 2); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, __h + 3); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __STORE(__h + 0, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1); __STORE(__h + 1, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2); } } else { for (__h = 9; __h <= __side1LenOl - 5;) { __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __h++; } } __global__ void kernel0_1(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 252; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_0_3; float __reg_0_4; __shared__ float __c_sb_double[__blockSize * 2]; float *__c_sb = __c_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __storeValid = __writeValid1; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0) #define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0) if (__c1Id == 0) { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __STORE(2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __STORE(2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); } __c_sb = __c_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;) { __LOAD(__reg_0_0, __h); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __h++; __LOAD(__reg_0_1, __h); __STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __h++; __LOAD(__reg_0_2, __h); __STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __h++; __LOAD(__reg_0_3, __h); __STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __h++; __LOAD(__reg_0_4, __h); __STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, __h + 1); __STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, __h + 1); __STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, __h + 2); __STORE(__h + 0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, __h + 1); __STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, __h + 2); __STORE(__h + 0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, __h + 3); __STORE(__h + 1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); } } else { for (__h = 5; __h <= __side1LenOl - 5;) { __LOAD(__reg_0_0, __h); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __h++; __LOAD(__reg_0_1, __h); __STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __h++; __LOAD(__reg_0_2, __h); __STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __h++; __LOAD(__reg_0_3, __h); __STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __h++; __LOAD(__reg_0_4, __h); __STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __h++; } }
d56f7b116e7cd9af1cfb9388db05cbfc481ffdd9.cu
#include "star2d2r-256-5-256_kernel.hu" __device__ float __sbref_wrap(float *sb, size_t index) { return sb[index]; } __global__ void kernel0_5(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 236; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_0_3; float __reg_0_4; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_1_3; float __reg_1_4; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_2_3; float __reg_2_4; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_3_3; float __reg_3_4; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_4_3; float __reg_4_4; __shared__ float __c_sb_double[__blockSize * 2]; float *__c_sb = __c_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __storeValid = __writeValid5; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0) #define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0) if (__c1Id == 0) { __LOAD(__reg_4_0, 0); __LOAD(__reg_4_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_4_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_4_0, __reg_4_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_4_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_4_0, __reg_4_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_4_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_4_0, __reg_4_1, __reg_3_2, __reg_3_3, __reg_3_4); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_4_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __LOAD(__reg_0_3, 13); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __LOAD(__reg_0_4, 14); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(5, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(6, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(7, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __LOAD(__reg_0_3, 18); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __LOAD(__reg_0_4, 19); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(9, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 20); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __LOAD(__reg_0_3, 13); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 14); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __LOAD(__reg_0_3, 18); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_4, 19); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __LOAD(__reg_0_0, 20); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); } __c_sb = __c_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 21; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4); __STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0); __STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0); __STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0); __STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1); __STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __LOAD(__reg_0_3, __h + 2); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3); __STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2); __STORE(__h + 0, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __LOAD(__reg_0_3, __h + 2); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __LOAD(__reg_0_4, __h + 3); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3); __STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4); __STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h + 0, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3); __STORE(__h + 1, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4); } } else { for (__h = 21; __h <= __side1LenOl - 5;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __h++; } } __global__ void kernel0_4(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 240; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_0_3; float __reg_0_4; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_1_3; float __reg_1_4; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_2_3; float __reg_2_4; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_3_3; float __reg_3_4; __shared__ float __c_sb_double[__blockSize * 2]; float *__c_sb = __c_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __storeValid = __writeValid4; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0) #define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0) if (__c1Id == 0) { __LOAD(__reg_3_0, 0); __LOAD(__reg_3_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_3_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_3_0, __reg_3_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_3_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_3_0, __reg_3_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_3_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __LOAD(__reg_0_3, 13); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(5, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_4, 14); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(7, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __LOAD(__reg_0_3, 13); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 14); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); } __c_sb = __c_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 17; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0); __STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1); __STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0); __STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1); __STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1); __STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __LOAD(__reg_0_3, __h + 1); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3); __STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2); __STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __LOAD(__reg_0_3, __h + 1); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_4, __h + 2); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3); __STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4); __STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3); __STORE(__h + 0, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __LOAD(__reg_0_3, __h + 1); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_4, __h + 2); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __LOAD(__reg_0_0, __h + 3); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4); __STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0); __STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h + 0, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4); __STORE(__h + 1, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0); } } else { for (__h = 17; __h <= __side1LenOl - 5;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __h++; } } __global__ void kernel0_3(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 244; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_0_3; float __reg_0_4; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_1_3; float __reg_1_4; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_2_3; float __reg_2_4; __shared__ float __c_sb_double[__blockSize * 2]; float *__c_sb = __c_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __storeValid = __writeValid3; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0) #define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0) if (__c1Id == 0) { __LOAD(__reg_2_0, 0); __LOAD(__reg_2_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_2_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_2_0, __reg_2_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_2_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __STORE(5, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __STORE(6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __STORE(6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); } __c_sb = __c_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;) { __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2); __STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1); __STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_3, __h + 0); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2); __STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3); __STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2); __STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_3, __h + 0); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, __h + 1); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3); __STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4); __STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3); __STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_3, __h + 0); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, __h + 1); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, __h + 2); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4); __STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0); __STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4); __STORE(__h + 0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_3, __h + 0); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, __h + 1); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, __h + 2); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, __h + 3); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0); __STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1); __STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h + 0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0); __STORE(__h + 1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1); } } else { for (__h = 13; __h <= __side1LenOl - 5;) { __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __h++; } } __global__ void kernel0_2(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 248; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_0_3; float __reg_0_4; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_1_3; float __reg_1_4; __shared__ float __c_sb_double[__blockSize * 2]; float *__c_sb = __c_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __storeValid = __writeValid2; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0) #define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0) if (__c1Id == 0) { __LOAD(__reg_1_0, 0); __LOAD(__reg_1_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_1_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __STORE(2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); } __c_sb = __c_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;) { __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2); __STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_4, __h + 0); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_4, __h + 0); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4); __STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_4, __h + 0); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_1, __h + 2); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0); __STORE(__h + 0, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_4, __h + 0); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_1, __h + 2); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, __h + 3); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __STORE(__h + 0, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1); __STORE(__h + 1, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2); } } else { for (__h = 9; __h <= __side1LenOl - 5;) { __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __h++; } } __global__ void kernel0_1(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 252; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_0_3; float __reg_0_4; __shared__ float __c_sb_double[__blockSize * 2]; float *__c_sb = __c_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __storeValid = __writeValid1; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0) #define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0) if (__c1Id == 0) { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __STORE(2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __STORE(2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); } __c_sb = __c_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;) { __LOAD(__reg_0_0, __h); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __h++; __LOAD(__reg_0_1, __h); __STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __h++; __LOAD(__reg_0_2, __h); __STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __h++; __LOAD(__reg_0_3, __h); __STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __h++; __LOAD(__reg_0_4, __h); __STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, __h + 1); __STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, __h + 1); __STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, __h + 2); __STORE(__h + 0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, __h + 1); __STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, __h + 2); __STORE(__h + 0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, __h + 3); __STORE(__h + 1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); } } else { for (__h = 5; __h <= __side1LenOl - 5;) { __LOAD(__reg_0_0, __h); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __h++; __LOAD(__reg_0_1, __h); __STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __h++; __LOAD(__reg_0_2, __h); __STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __h++; __LOAD(__reg_0_3, __h); __STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __h++; __LOAD(__reg_0_4, __h); __STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __h++; } }
989ba583b1e688cc4940c6d873edef78fa923c9c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit #include <thrust/device_vector.h> #include <thrust/functional.h> // thrust::plus #include <thrust/reduce.h> #include <cmath> #include "caffe/common.hpp" #include "caffe/util/math_functions.hpp" #define THREADS_PER_BLOCK_CSR 32 namespace caffe { template <> void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t cuTransB = (TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; CUBLAS_CHECK(hipblasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const double alpha, const double* A, const double* B, const double beta, double* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t cuTransB = (TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; CUBLAS_CHECK(hipblasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const float alpha, const float* A, const float* x, const float beta, float* y) { hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N; CUBLAS_CHECK(hipblasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const double alpha, const double* A, const double* x, const double beta, double* y) { hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N; CUBLAS_CHECK(hipblasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X, float* Y) { CUBLAS_CHECK(hipblasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } template <> void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X, double* Y) { CUBLAS_CHECK(hipblasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) { if (X != Y) { CUDA_CHECK(hipMemcpy(Y, X, N, hipMemcpyDefault)); // NOLINT(caffe/alt_fn) } } template <> void caffe_gpu_scal<float>(const int N, const float alpha, float *X) { CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_scal<double>(const int N, const double alpha, double *X) { CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X, const float beta, float* Y) { caffe_gpu_scal<float>(N, beta, Y); caffe_gpu_axpy<float>(N, alpha, X, Y); } template <> void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X, const double beta, double* Y) { caffe_gpu_scal<double>(N, beta, Y); caffe_gpu_axpy<double>(N, alpha, X, Y); } template <> void caffe_gpu_dot<float>(const int n, const float* x, const float* y, float* out) { CUBLAS_CHECK(hipblasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_dot<double>(const int n, const double* x, const double* y, double * out) { CUBLAS_CHECK(hipblasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_asum<float>(const int n, const float* x, float* y) { CUBLAS_CHECK(hipblasSasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_asum<double>(const int n, const double* x, double* y) { CUBLAS_CHECK(hipblasDasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_scale<float>(const int n, const float alpha, const float *x, float* y) { CUBLAS_CHECK(hipblasScopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <> void caffe_gpu_scale<double>(const int n, const double alpha, const double *x, double* y) { CUBLAS_CHECK(hipblasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <typename Dtype> __global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = alpha; } } template <typename Dtype> void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) { if (alpha == 0) { CUDA_CHECK(hipMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn) return; } // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( set_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, alpha, Y); } template void caffe_gpu_set<int>(const int N, const int alpha, int* Y); template void caffe_gpu_set<float>(const int N, const float alpha, float* Y); template void caffe_gpu_set<double>(const int N, const double alpha, double* Y); template <typename Dtype> __global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] += alpha; } } template <> void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_scalar_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, alpha, Y); } template <> void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_scalar_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, alpha, Y); } template <typename Dtype> __global__ void add_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] + b[index]; } } template <> void caffe_gpu_add<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_add<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void sub_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] - b[index]; } } template <> void caffe_gpu_sub<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sub_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_sub<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sub_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void mul_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] * b[index]; } } template <> void caffe_gpu_mul<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( mul_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_mul<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( mul_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void div_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] / b[index]; } } template <> void caffe_gpu_div<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( div_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_div<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( div_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = abs(a[index]); } } template <> void caffe_gpu_abs<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( abs_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <> void caffe_gpu_abs<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( abs_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <typename Dtype> __global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = exp(a[index]); } } template <> void caffe_gpu_exp<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( exp_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <> void caffe_gpu_exp<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( exp_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <typename Dtype> __global__ void log_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = log(a[index]); } } template <> void caffe_gpu_log<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( log_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <> void caffe_gpu_log<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( log_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <typename Dtype> __global__ void powx_kernel(const int n, const Dtype* a, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = pow(a[index], alpha); } } template <> void caffe_gpu_powx<float>(const int N, const float* a, const float alpha, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( powx_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, alpha, y); } template <> void caffe_gpu_powx<double>(const int N, const double* a, const double alpha, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( powx_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, alpha, y); } template <typename Dtype> __global__ void sqrt_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = sqrt(a[index]); } } template <> void caffe_gpu_sqrt<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sqrt_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <> void caffe_gpu_sqrt<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sqrt_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index]) - (x[index] < Dtype(0))); DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index])); void caffe_gpu_rng_uniform(const int n, unsigned int* r) { CURAND_CHECK(hiprandGenerate(Caffe::curand_generator(), r, n)); } template <> void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b, float* r) { CURAND_CHECK(hiprandGenerateUniform(Caffe::curand_generator(), r, n)); const float range = b - a; if (range != static_cast<float>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<float>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b, double* r) { CURAND_CHECK(hiprandGenerateUniformDouble(Caffe::curand_generator(), r, n)); const double range = b - a; if (range != static_cast<double>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<double>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma, float* r) { CURAND_CHECK( hiprandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma)); } template <> void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma, double* r) { CURAND_CHECK( hiprandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma)); } template<typename Dtype> __device__ void caffe_gpu_csr_gemm_kernel_core(const int M, const int N, const int K, const Dtype alpha, int nzz, const Dtype* A, const int* indices, const int* ptr, const Dtype* B, const int ldb1, const int ldb2, const Dtype beta, Dtype* C, const int ldc1, const int ldc2) { __shared__ volatile Dtype sums[THREADS_PER_BLOCK_CSR * 2]; for (int rowA = blockIdx.x; rowA < M; rowA += gridDim.x) { const int begin = ptr[rowA]; const int end = ptr[rowA + 1]; const int offset_c_part = rowA * ldc1; for (int colC = blockIdx.y; colC < N; colC += gridDim.y) { Dtype sum = 0.0; const int offset_b_part = colC * ldb2; for (int pos = begin + threadIdx.x; pos < end; pos += THREADS_PER_BLOCK_CSR) { const int colA = indices[pos]; sum += A[pos] * B[colA * ldb1 + offset_b_part]; } sums[threadIdx.x] = sum; __syncthreads(); /* hardcoded reduction for 32 threads */ sums[threadIdx.x] += sums[threadIdx.x + 16]; sums[threadIdx.x] += sums[threadIdx.x + 8]; sums[threadIdx.x] += sums[threadIdx.x + 4]; sums[threadIdx.x] += sums[threadIdx.x + 2]; sums[threadIdx.x] += sums[threadIdx.x + 1]; if (threadIdx.x == 0) { const int offsetC = offset_c_part + colC * ldc2; C[offsetC] = beta * C[offsetC] + alpha * sums[0]; } } } } template<typename Dtype> __global__ void caffe_gpu_csr_gemm_kernel(const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const Dtype alpha, int nzz, const Dtype* A, const int* indices, const int* ptr, const Dtype* B, const Dtype beta, Dtype* C, const CBLAS_ORDER orderC) { if (orderC == CblasRowMajor) { if (TransB == CblasNoTrans) { caffe_gpu_csr_gemm_kernel_core(M, N, K, alpha, nzz, A, indices, ptr, B, N, 1, beta, C, N, 1); } else { caffe_gpu_csr_gemm_kernel_core(M, N, K, alpha, nzz, A, indices, ptr, B, 1, K, beta, C, N, 1); } } else { if (TransB == CblasNoTrans) { caffe_gpu_csr_gemm_kernel_core(M, N, K, alpha, nzz, A, indices, ptr, B, N, 1, beta, C, 1, M); } else { caffe_gpu_csr_gemm_kernel_core(M, N, K, alpha, nzz, A, indices, ptr, B, 1, K, beta, C, 1, M); } } } template<typename Dtype> __device__ void caffe_gpu_csr_rank1_update_kernel_core(const int M, const int N, const Dtype alpha, const Dtype* A, const int* indices, const int* ptr, const Dtype* B, int ldb, Dtype* C, const int ldc1, const int ldc2) { const int begin = ptr[0]; const int end = ptr[1]; for (int pos = blockIdx.x * blockDim.x + begin + threadIdx.x; pos < end; pos += blockDim.x * gridDim.x) { const Dtype valA = A[pos] * alpha; const int offset_part = indices[pos] * ldc1; for (int colC = blockIdx.y * blockDim.y + threadIdx.y; colC < N; colC += blockDim.y * gridDim.y) { const int C_offset = offset_part + colC * ldc2; C[C_offset] = C[C_offset] + B[colC * ldb] * valA; } } } // C = alpha A * B^T + C where A and B are vectors. // A is a sprase vector and B is a dense vector template<typename Dtype> __device__ void caffe_gpu_csr_rank1_update_kernel(const int M, const int N, const Dtype alpha, const Dtype* A, const int* indices, const int* ptr, const Dtype* B, int ldb, Dtype* C, const CBLAS_ORDER orderC) { if (orderC == CblasRowMajor) { caffe_gpu_csr_rank1_update_kernel_core(M, N, alpha, A, indices, ptr, B, ldb, C, N, 1); } else { caffe_gpu_csr_rank1_update_kernel_core(M, N, alpha, A, indices, ptr, B, ldb, C, 1, M); } } template<typename Dtype> __global__ void caffe_gpu_csr_rank1_update_kernel_multi( const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const Dtype alpha, const Dtype* A, const int* indices, const int* ptr, const Dtype* B, int ldb, Dtype* C, const CBLAS_ORDER orderC) { if (TransB == CblasNoTrans) { for (int i = 0; i < K; i++) { caffe_gpu_csr_rank1_update_kernel(M, N, alpha, A, indices, ptr + i, B + (N * i), 1, C, orderC); } } else { for (int i = 0; i < K; i++) { caffe_gpu_csr_rank1_update_kernel(M, N, alpha, A, indices, ptr + i, B + i, K, C, orderC); } } } template<> void caffe_gpu_csr_gemm<float>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, int nzz, const float* A, const int* indices, const int* ptr, const float* B, const float beta, float* C, const CBLAS_ORDER orderC) { if (TransA == CblasNoTrans) { dim3 grids(M, N); dim3 threads(THREADS_PER_BLOCK_CSR, 1); caffe_gpu_csr_gemm_kernel<float><< <grids, threads>>>(TransB, M, N, K, alpha, nzz, A, indices, ptr, B, beta, C, orderC); } else { // scale C by beta if (beta != 1.0) { CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle() , M * N, &beta, C, 1)); } const int average_nzz_per_row = nzz/K+1; dim3 grids((average_nzz_per_row+64-1)/64, N); dim3 threads(64, 1); caffe_gpu_csr_rank1_update_kernel_multi<float><< <grids, threads>>>(TransB, M, N, K, alpha, A, indices, ptr , B, 1, C, orderC); } } template<> void caffe_gpu_csr_gemm<double>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const double alpha, int nzz, const double* A, const int* indices, const int* ptr, const double* B, const double beta, double* C, const CBLAS_ORDER orderC) { if (TransA == CblasNoTrans) { dim3 grids(M, N); dim3 threads(THREADS_PER_BLOCK_CSR, 1); caffe_gpu_csr_gemm_kernel<double><< <grids, threads>>> (TransB, M, N, K, alpha, nzz, A, indices, ptr, B, beta, C, orderC); } else { // scale C by beta if (beta != 1.0) { CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle() , M * N, &beta, C, 1)); } const int average_nzz_per_row = nzz/K+1; dim3 grids((average_nzz_per_row+64-1)/64, N); dim3 threads(64, 1); caffe_gpu_csr_rank1_update_kernel_multi<double><< <grids, threads>>>(TransB, M, N, K, alpha, A, indices, ptr , B, 1, C, orderC); } } /* Other implementation using cusparse that is very slow at least using it like this template <> void caffe_gpu_csr_gemm<float>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, int nzz, const float* A, const int* indices, const int* ptr, const float* B, const float beta, float* C, const CBLAS_ORDER orderC) { //std::cout << "M: " << M << " N: " << N << " K: " << K << " NZZ: " << nzz <<"\n" ; int ldb = (TransB == CblasNoTrans) ? N : K; hipsparseOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPSPARSE_OPERATION_NON_TRANSPOSE : HIPSPARSE_OPERATION_TRANSPOSE; hipsparseOperation_t cuTransB = (TransB == CblasNoTrans) ? HIPSPARSE_OPERATION_TRANSPOSE : HIPSPARSE_OPERATION_NON_TRANSPOSE; float* Bt; int ldb_t; bool reuiqre_transpose_B = (cuTransA == HIPSPARSE_OPERATION_TRANSPOSE) && (cuTransB == HIPSPARSE_OPERATION_TRANSPOSE); if (reuiqre_transpose_B){ //we need to transpose B because this operation is not supported by cusparse (god knows why) ldb_t = K; const float zero = 0.0; const float one = 1.0; CUDA_CHECK(hipMalloc((void**)&Bt, sizeof(float)*K*N)); CUBLAS_CHECK(hipblasSgeam(Caffe::cublas_handle(), HIPBLAS_OP_T, HIPBLAS_OP_T, K, N, &one, B, ldb, &zero, B, ldb, Bt, ldb_t)); } int msparse = (TransA == CblasNoTrans) ? M : K; int ksparse = (TransA == CblasNoTrans) ? K : M; if (orderC == CblasRowMajor){ float* Ct; CUDA_CHECK(hipMalloc((void**)&Ct, sizeof(float)*M*N)); const float zero = 0.0; const float one = 1.0; if (reuiqre_transpose_B){ CUSPARSE_CHECK(hipsparseScsrmm2(Caffe::cusparse_handle(), cuTransA, HIPSPARSE_OPERATION_NON_TRANSPOSE, msparse, N, ksparse,nzz, &alpha, Caffe::cusparse_mat_descr(), A, ptr, indices, Bt, ldb_t, &zero, Ct, M)); CUDA_CHECK(hipFree(Bt)); }else{ CUSPARSE_CHECK(hipsparseScsrmm2(Caffe::cusparse_handle(), cuTransA, cuTransB, msparse, N, ksparse,nzz, &alpha, Caffe::cusparse_mat_descr(), A, ptr, indices, B, ldb, &zero, Ct, M)); } CUBLAS_CHECK(hipblasSgeam(Caffe::cublas_handle(), HIPBLAS_OP_T , HIPBLAS_OP_N, N, M, &one, Ct, M, &beta, C, N, C, N)); CUDA_CHECK(hipFree(Ct)); }else{ //this is the default of CUSPARSE by the Matrix B is by default rowmajor if (reuiqre_transpose_B){ CUSPARSE_CHECK(hipsparseScsrmm2(Caffe::cusparse_handle(), cuTransA, HIPSPARSE_OPERATION_NON_TRANSPOSE, msparse, N, ksparse,nzz, &alpha, Caffe::cusparse_mat_descr(), A, ptr, indices, Bt, ldb_t, &beta, C, M)); CUDA_CHECK(hipFree(Bt)); }else{ CUSPARSE_CHECK(hipsparseScsrmm2(Caffe::cusparse_handle(), cuTransA, cuTransB, msparse, N, ksparse,nzz, &alpha, Caffe::cusparse_mat_descr(), A, ptr, indices, B, ldb, &beta, C, M)); } } } template <> void caffe_gpu_csr_gemm<double>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const double alpha, int nzz, const double* A, const int* indices, const int* ptr, const double* B, const double beta, double* C, const CBLAS_ORDER orderC) { //std::cout << "M: " << M << "N: " << N << "K: " << K << "NZZ: " << nzz ; int ldb = (TransB == CblasNoTrans) ? N : K; hipsparseOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPSPARSE_OPERATION_NON_TRANSPOSE : HIPSPARSE_OPERATION_TRANSPOSE; hipsparseOperation_t cuTransB = (TransB == CblasNoTrans) ? HIPSPARSE_OPERATION_TRANSPOSE : HIPSPARSE_OPERATION_NON_TRANSPOSE; double* Bt; int ldb_t; bool reuiqre_transpose_B = (cuTransA == HIPSPARSE_OPERATION_TRANSPOSE) && (cuTransB == HIPSPARSE_OPERATION_TRANSPOSE); if (reuiqre_transpose_B){ //we need to transpose B because this operation is not supported by cusparse (god knows why) ldb_t = K; const double zero = 0.0; const double one = 1.0; CUDA_CHECK(hipMalloc((void**)&Bt, sizeof(double)*K*N)); CUBLAS_CHECK(hipblasDgeam(Caffe::cublas_handle(), HIPBLAS_OP_T, HIPBLAS_OP_T, K, N, &one, B, ldb, &zero, B, ldb, Bt, ldb_t)); } int msparse = (TransA == CblasNoTrans) ? M : K; int ksparse = (TransA == CblasNoTrans) ? K : M; if (orderC == CblasRowMajor){ double* Ct; CUDA_CHECK(hipMalloc((void**)&Ct, sizeof(double)*M*N)); const double zero = 0.0; const double one = 1.0; if (reuiqre_transpose_B){ CUSPARSE_CHECK(hipsparseDcsrmm2(Caffe::cusparse_handle(), cuTransA, HIPSPARSE_OPERATION_NON_TRANSPOSE, msparse, N, ksparse,nzz, &alpha, Caffe::cusparse_mat_descr(), A, ptr, indices, Bt, ldb_t, &zero, Ct, M)); CUDA_CHECK(hipFree(Bt)); }else{ CUSPARSE_CHECK(hipsparseDcsrmm2(Caffe::cusparse_handle(), cuTransA, cuTransB, msparse, N, ksparse,nzz, &alpha, Caffe::cusparse_mat_descr(), A, ptr, indices, B, ldb, &zero, Ct, M)); } CUBLAS_CHECK(hipblasDgeam(Caffe::cublas_handle(), HIPBLAS_OP_T , HIPBLAS_OP_N, N, M, &one, Ct, M, &beta, C, N, C, N)); CUDA_CHECK(hipFree(Ct)); }else{ //this is the default of CUSPARSE by the Matrix B is by default rowmajor if (reuiqre_transpose_B){ CUSPARSE_CHECK(hipsparseDcsrmm2(Caffe::cusparse_handle(), cuTransA, HIPSPARSE_OPERATION_NON_TRANSPOSE, msparse, N, ksparse,nzz, &alpha, Caffe::cusparse_mat_descr(), A, ptr, indices, Bt, ldb_t, &beta, C, M)); CUDA_CHECK(hipFree(Bt)); }else{ CUSPARSE_CHECK(hipsparseDcsrmm2(Caffe::cusparse_handle(), cuTransA, cuTransB, msparse, N, ksparse,nzz, &alpha, Caffe::cusparse_mat_descr(), A, ptr, indices, B, ldb, &beta, C, M)); } } } */ } // namespace caffe
989ba583b1e688cc4940c6d873edef78fa923c9c.cu
#include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit #include <thrust/device_vector.h> #include <thrust/functional.h> // thrust::plus #include <thrust/reduce.h> #include <cmath> #include "caffe/common.hpp" #include "caffe/util/math_functions.hpp" #define THREADS_PER_BLOCK_CSR 32 namespace caffe { template <> void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const double alpha, const double* A, const double* B, const double beta, double* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const float alpha, const float* A, const float* x, const float beta, float* y) { cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const double alpha, const double* A, const double* x, const double beta, double* y) { cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; CUBLAS_CHECK(cublasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X, float* Y) { CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } template <> void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X, double* Y) { CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) { if (X != Y) { CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn) } } template <> void caffe_gpu_scal<float>(const int N, const float alpha, float *X) { CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_scal<double>(const int N, const double alpha, double *X) { CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X, const float beta, float* Y) { caffe_gpu_scal<float>(N, beta, Y); caffe_gpu_axpy<float>(N, alpha, X, Y); } template <> void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X, const double beta, double* Y) { caffe_gpu_scal<double>(N, beta, Y); caffe_gpu_axpy<double>(N, alpha, X, Y); } template <> void caffe_gpu_dot<float>(const int n, const float* x, const float* y, float* out) { CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_dot<double>(const int n, const double* x, const double* y, double * out) { CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_asum<float>(const int n, const float* x, float* y) { CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_asum<double>(const int n, const double* x, double* y) { CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_scale<float>(const int n, const float alpha, const float *x, float* y) { CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <> void caffe_gpu_scale<double>(const int n, const double alpha, const double *x, double* y) { CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <typename Dtype> __global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = alpha; } } template <typename Dtype> void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) { if (alpha == 0) { CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn) return; } // NOLINT_NEXT_LINE(whitespace/operators) set_kernel<Dtype><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, alpha, Y); } template void caffe_gpu_set<int>(const int N, const int alpha, int* Y); template void caffe_gpu_set<float>(const int N, const float alpha, float* Y); template void caffe_gpu_set<double>(const int N, const double alpha, double* Y); template <typename Dtype> __global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] += alpha; } } template <> void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) { // NOLINT_NEXT_LINE(whitespace/operators) add_scalar_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, alpha, Y); } template <> void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) { // NOLINT_NEXT_LINE(whitespace/operators) add_scalar_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, alpha, Y); } template <typename Dtype> __global__ void add_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] + b[index]; } } template <> void caffe_gpu_add<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) add_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_add<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) add_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void sub_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] - b[index]; } } template <> void caffe_gpu_sub<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) sub_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_sub<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) sub_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void mul_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] * b[index]; } } template <> void caffe_gpu_mul<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) mul_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_mul<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) mul_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void div_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] / b[index]; } } template <> void caffe_gpu_div<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) div_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_div<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) div_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = abs(a[index]); } } template <> void caffe_gpu_abs<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) abs_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_abs<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) abs_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <typename Dtype> __global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = exp(a[index]); } } template <> void caffe_gpu_exp<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) exp_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_exp<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) exp_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <typename Dtype> __global__ void log_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = log(a[index]); } } template <> void caffe_gpu_log<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) log_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_log<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) log_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <typename Dtype> __global__ void powx_kernel(const int n, const Dtype* a, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = pow(a[index], alpha); } } template <> void caffe_gpu_powx<float>(const int N, const float* a, const float alpha, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) powx_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, alpha, y); } template <> void caffe_gpu_powx<double>(const int N, const double* a, const double alpha, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) powx_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, alpha, y); } template <typename Dtype> __global__ void sqrt_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = sqrt(a[index]); } } template <> void caffe_gpu_sqrt<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) sqrt_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_sqrt<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) sqrt_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index]) - (x[index] < Dtype(0))); DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index])); void caffe_gpu_rng_uniform(const int n, unsigned int* r) { CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n)); } template <> void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b, float* r) { CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), r, n)); const float range = b - a; if (range != static_cast<float>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<float>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b, double* r) { CURAND_CHECK(curandGenerateUniformDouble(Caffe::curand_generator(), r, n)); const double range = b - a; if (range != static_cast<double>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<double>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma, float* r) { CURAND_CHECK( curandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma)); } template <> void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma, double* r) { CURAND_CHECK( curandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma)); } template<typename Dtype> __device__ void caffe_gpu_csr_gemm_kernel_core(const int M, const int N, const int K, const Dtype alpha, int nzz, const Dtype* A, const int* indices, const int* ptr, const Dtype* B, const int ldb1, const int ldb2, const Dtype beta, Dtype* C, const int ldc1, const int ldc2) { __shared__ volatile Dtype sums[THREADS_PER_BLOCK_CSR * 2]; for (int rowA = blockIdx.x; rowA < M; rowA += gridDim.x) { const int begin = ptr[rowA]; const int end = ptr[rowA + 1]; const int offset_c_part = rowA * ldc1; for (int colC = blockIdx.y; colC < N; colC += gridDim.y) { Dtype sum = 0.0; const int offset_b_part = colC * ldb2; for (int pos = begin + threadIdx.x; pos < end; pos += THREADS_PER_BLOCK_CSR) { const int colA = indices[pos]; sum += A[pos] * B[colA * ldb1 + offset_b_part]; } sums[threadIdx.x] = sum; __syncthreads(); /* hardcoded reduction for 32 threads */ sums[threadIdx.x] += sums[threadIdx.x + 16]; sums[threadIdx.x] += sums[threadIdx.x + 8]; sums[threadIdx.x] += sums[threadIdx.x + 4]; sums[threadIdx.x] += sums[threadIdx.x + 2]; sums[threadIdx.x] += sums[threadIdx.x + 1]; if (threadIdx.x == 0) { const int offsetC = offset_c_part + colC * ldc2; C[offsetC] = beta * C[offsetC] + alpha * sums[0]; } } } } template<typename Dtype> __global__ void caffe_gpu_csr_gemm_kernel(const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const Dtype alpha, int nzz, const Dtype* A, const int* indices, const int* ptr, const Dtype* B, const Dtype beta, Dtype* C, const CBLAS_ORDER orderC) { if (orderC == CblasRowMajor) { if (TransB == CblasNoTrans) { caffe_gpu_csr_gemm_kernel_core(M, N, K, alpha, nzz, A, indices, ptr, B, N, 1, beta, C, N, 1); } else { caffe_gpu_csr_gemm_kernel_core(M, N, K, alpha, nzz, A, indices, ptr, B, 1, K, beta, C, N, 1); } } else { if (TransB == CblasNoTrans) { caffe_gpu_csr_gemm_kernel_core(M, N, K, alpha, nzz, A, indices, ptr, B, N, 1, beta, C, 1, M); } else { caffe_gpu_csr_gemm_kernel_core(M, N, K, alpha, nzz, A, indices, ptr, B, 1, K, beta, C, 1, M); } } } template<typename Dtype> __device__ void caffe_gpu_csr_rank1_update_kernel_core(const int M, const int N, const Dtype alpha, const Dtype* A, const int* indices, const int* ptr, const Dtype* B, int ldb, Dtype* C, const int ldc1, const int ldc2) { const int begin = ptr[0]; const int end = ptr[1]; for (int pos = blockIdx.x * blockDim.x + begin + threadIdx.x; pos < end; pos += blockDim.x * gridDim.x) { const Dtype valA = A[pos] * alpha; const int offset_part = indices[pos] * ldc1; for (int colC = blockIdx.y * blockDim.y + threadIdx.y; colC < N; colC += blockDim.y * gridDim.y) { const int C_offset = offset_part + colC * ldc2; C[C_offset] = C[C_offset] + B[colC * ldb] * valA; } } } // C = alpha A * B^T + C where A and B are vectors. // A is a sprase vector and B is a dense vector template<typename Dtype> __device__ void caffe_gpu_csr_rank1_update_kernel(const int M, const int N, const Dtype alpha, const Dtype* A, const int* indices, const int* ptr, const Dtype* B, int ldb, Dtype* C, const CBLAS_ORDER orderC) { if (orderC == CblasRowMajor) { caffe_gpu_csr_rank1_update_kernel_core(M, N, alpha, A, indices, ptr, B, ldb, C, N, 1); } else { caffe_gpu_csr_rank1_update_kernel_core(M, N, alpha, A, indices, ptr, B, ldb, C, 1, M); } } template<typename Dtype> __global__ void caffe_gpu_csr_rank1_update_kernel_multi( const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const Dtype alpha, const Dtype* A, const int* indices, const int* ptr, const Dtype* B, int ldb, Dtype* C, const CBLAS_ORDER orderC) { if (TransB == CblasNoTrans) { for (int i = 0; i < K; i++) { caffe_gpu_csr_rank1_update_kernel(M, N, alpha, A, indices, ptr + i, B + (N * i), 1, C, orderC); } } else { for (int i = 0; i < K; i++) { caffe_gpu_csr_rank1_update_kernel(M, N, alpha, A, indices, ptr + i, B + i, K, C, orderC); } } } template<> void caffe_gpu_csr_gemm<float>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, int nzz, const float* A, const int* indices, const int* ptr, const float* B, const float beta, float* C, const CBLAS_ORDER orderC) { if (TransA == CblasNoTrans) { dim3 grids(M, N); dim3 threads(THREADS_PER_BLOCK_CSR, 1); caffe_gpu_csr_gemm_kernel<float><< <grids, threads>>>(TransB, M, N, K, alpha, nzz, A, indices, ptr, B, beta, C, orderC); } else { // scale C by beta if (beta != 1.0) { CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle() , M * N, &beta, C, 1)); } const int average_nzz_per_row = nzz/K+1; dim3 grids((average_nzz_per_row+64-1)/64, N); dim3 threads(64, 1); caffe_gpu_csr_rank1_update_kernel_multi<float><< <grids, threads>>>(TransB, M, N, K, alpha, A, indices, ptr , B, 1, C, orderC); } } template<> void caffe_gpu_csr_gemm<double>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const double alpha, int nzz, const double* A, const int* indices, const int* ptr, const double* B, const double beta, double* C, const CBLAS_ORDER orderC) { if (TransA == CblasNoTrans) { dim3 grids(M, N); dim3 threads(THREADS_PER_BLOCK_CSR, 1); caffe_gpu_csr_gemm_kernel<double><< <grids, threads>>> (TransB, M, N, K, alpha, nzz, A, indices, ptr, B, beta, C, orderC); } else { // scale C by beta if (beta != 1.0) { CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle() , M * N, &beta, C, 1)); } const int average_nzz_per_row = nzz/K+1; dim3 grids((average_nzz_per_row+64-1)/64, N); dim3 threads(64, 1); caffe_gpu_csr_rank1_update_kernel_multi<double><< <grids, threads>>>(TransB, M, N, K, alpha, A, indices, ptr , B, 1, C, orderC); } } /* Other implementation using cusparse that is very slow at least using it like this template <> void caffe_gpu_csr_gemm<float>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, int nzz, const float* A, const int* indices, const int* ptr, const float* B, const float beta, float* C, const CBLAS_ORDER orderC) { //std::cout << "M: " << M << " N: " << N << " K: " << K << " NZZ: " << nzz <<"\n" ; int ldb = (TransB == CblasNoTrans) ? N : K; cusparseOperation_t cuTransA = (TransA == CblasNoTrans) ? CUSPARSE_OPERATION_NON_TRANSPOSE : CUSPARSE_OPERATION_TRANSPOSE; cusparseOperation_t cuTransB = (TransB == CblasNoTrans) ? CUSPARSE_OPERATION_TRANSPOSE : CUSPARSE_OPERATION_NON_TRANSPOSE; float* Bt; int ldb_t; bool reuiqre_transpose_B = (cuTransA == CUSPARSE_OPERATION_TRANSPOSE) && (cuTransB == CUSPARSE_OPERATION_TRANSPOSE); if (reuiqre_transpose_B){ //we need to transpose B because this operation is not supported by cusparse (god knows why) ldb_t = K; const float zero = 0.0; const float one = 1.0; CUDA_CHECK(cudaMalloc((void**)&Bt, sizeof(float)*K*N)); CUBLAS_CHECK(cublasSgeam(Caffe::cublas_handle(), CUBLAS_OP_T, CUBLAS_OP_T, K, N, &one, B, ldb, &zero, B, ldb, Bt, ldb_t)); } int msparse = (TransA == CblasNoTrans) ? M : K; int ksparse = (TransA == CblasNoTrans) ? K : M; if (orderC == CblasRowMajor){ float* Ct; CUDA_CHECK(cudaMalloc((void**)&Ct, sizeof(float)*M*N)); const float zero = 0.0; const float one = 1.0; if (reuiqre_transpose_B){ CUSPARSE_CHECK(cusparseScsrmm2(Caffe::cusparse_handle(), cuTransA, CUSPARSE_OPERATION_NON_TRANSPOSE, msparse, N, ksparse,nzz, &alpha, Caffe::cusparse_mat_descr(), A, ptr, indices, Bt, ldb_t, &zero, Ct, M)); CUDA_CHECK(cudaFree(Bt)); }else{ CUSPARSE_CHECK(cusparseScsrmm2(Caffe::cusparse_handle(), cuTransA, cuTransB, msparse, N, ksparse,nzz, &alpha, Caffe::cusparse_mat_descr(), A, ptr, indices, B, ldb, &zero, Ct, M)); } CUBLAS_CHECK(cublasSgeam(Caffe::cublas_handle(), CUBLAS_OP_T , CUBLAS_OP_N, N, M, &one, Ct, M, &beta, C, N, C, N)); CUDA_CHECK(cudaFree(Ct)); }else{ //this is the default of CUSPARSE by the Matrix B is by default rowmajor if (reuiqre_transpose_B){ CUSPARSE_CHECK(cusparseScsrmm2(Caffe::cusparse_handle(), cuTransA, CUSPARSE_OPERATION_NON_TRANSPOSE, msparse, N, ksparse,nzz, &alpha, Caffe::cusparse_mat_descr(), A, ptr, indices, Bt, ldb_t, &beta, C, M)); CUDA_CHECK(cudaFree(Bt)); }else{ CUSPARSE_CHECK(cusparseScsrmm2(Caffe::cusparse_handle(), cuTransA, cuTransB, msparse, N, ksparse,nzz, &alpha, Caffe::cusparse_mat_descr(), A, ptr, indices, B, ldb, &beta, C, M)); } } } template <> void caffe_gpu_csr_gemm<double>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const double alpha, int nzz, const double* A, const int* indices, const int* ptr, const double* B, const double beta, double* C, const CBLAS_ORDER orderC) { //std::cout << "M: " << M << "N: " << N << "K: " << K << "NZZ: " << nzz ; int ldb = (TransB == CblasNoTrans) ? N : K; cusparseOperation_t cuTransA = (TransA == CblasNoTrans) ? CUSPARSE_OPERATION_NON_TRANSPOSE : CUSPARSE_OPERATION_TRANSPOSE; cusparseOperation_t cuTransB = (TransB == CblasNoTrans) ? CUSPARSE_OPERATION_TRANSPOSE : CUSPARSE_OPERATION_NON_TRANSPOSE; double* Bt; int ldb_t; bool reuiqre_transpose_B = (cuTransA == CUSPARSE_OPERATION_TRANSPOSE) && (cuTransB == CUSPARSE_OPERATION_TRANSPOSE); if (reuiqre_transpose_B){ //we need to transpose B because this operation is not supported by cusparse (god knows why) ldb_t = K; const double zero = 0.0; const double one = 1.0; CUDA_CHECK(cudaMalloc((void**)&Bt, sizeof(double)*K*N)); CUBLAS_CHECK(cublasDgeam(Caffe::cublas_handle(), CUBLAS_OP_T, CUBLAS_OP_T, K, N, &one, B, ldb, &zero, B, ldb, Bt, ldb_t)); } int msparse = (TransA == CblasNoTrans) ? M : K; int ksparse = (TransA == CblasNoTrans) ? K : M; if (orderC == CblasRowMajor){ double* Ct; CUDA_CHECK(cudaMalloc((void**)&Ct, sizeof(double)*M*N)); const double zero = 0.0; const double one = 1.0; if (reuiqre_transpose_B){ CUSPARSE_CHECK(cusparseDcsrmm2(Caffe::cusparse_handle(), cuTransA, CUSPARSE_OPERATION_NON_TRANSPOSE, msparse, N, ksparse,nzz, &alpha, Caffe::cusparse_mat_descr(), A, ptr, indices, Bt, ldb_t, &zero, Ct, M)); CUDA_CHECK(cudaFree(Bt)); }else{ CUSPARSE_CHECK(cusparseDcsrmm2(Caffe::cusparse_handle(), cuTransA, cuTransB, msparse, N, ksparse,nzz, &alpha, Caffe::cusparse_mat_descr(), A, ptr, indices, B, ldb, &zero, Ct, M)); } CUBLAS_CHECK(cublasDgeam(Caffe::cublas_handle(), CUBLAS_OP_T , CUBLAS_OP_N, N, M, &one, Ct, M, &beta, C, N, C, N)); CUDA_CHECK(cudaFree(Ct)); }else{ //this is the default of CUSPARSE by the Matrix B is by default rowmajor if (reuiqre_transpose_B){ CUSPARSE_CHECK(cusparseDcsrmm2(Caffe::cusparse_handle(), cuTransA, CUSPARSE_OPERATION_NON_TRANSPOSE, msparse, N, ksparse,nzz, &alpha, Caffe::cusparse_mat_descr(), A, ptr, indices, Bt, ldb_t, &beta, C, M)); CUDA_CHECK(cudaFree(Bt)); }else{ CUSPARSE_CHECK(cusparseDcsrmm2(Caffe::cusparse_handle(), cuTransA, cuTransB, msparse, N, ksparse,nzz, &alpha, Caffe::cusparse_mat_descr(), A, ptr, indices, B, ldb, &beta, C, M)); } } } */ } // namespace caffe
a134e45f19f179ac8f379d3fcee6d679701e0791.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ // This file is auto-generated. See "generate_kernels.py" #include <ATen/native/transformers/hip/mem_eff_attention/kernel_forward.h> __global__ void __launch_bounds__( AttentionKernel<cutlass::bfloat16_t, cutlass::arch::Sm80, true, 64, 64, 64, true, true>::kNumThreads, AttentionKernel<cutlass::bfloat16_t, cutlass::arch::Sm80, true, 64, 64, 64, true, true>::kMinBlocksPerSm) fmha_cutlassF_bf16_aligned_64x64_rf_sm80(typename AttentionKernel<cutlass::bfloat16_t, cutlass::arch::Sm80, true, 64, 64, 64, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 800 #if __CUDA_ARCH__ < 1000 if (!p.advance_to_block()) { return; } AttentionKernel<cutlass::bfloat16_t, cutlass::arch::Sm80, true, 64, 64, 64, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_bf16_aligned_64x64_rf_sm80` is for sm80-sm100, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<cutlass::bfloat16_t, cutlass::arch::Sm80, true, 64, 128, 128, true, true>::kNumThreads, AttentionKernel<cutlass::bfloat16_t, cutlass::arch::Sm80, true, 64, 128, 128, true, true>::kMinBlocksPerSm) fmha_cutlassF_bf16_aligned_64x128_rf_sm80(typename AttentionKernel<cutlass::bfloat16_t, cutlass::arch::Sm80, true, 64, 128, 128, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 800 #if __CUDA_ARCH__ < 1000 if (!p.advance_to_block()) { return; } AttentionKernel<cutlass::bfloat16_t, cutlass::arch::Sm80, true, 64, 128, 128, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_bf16_aligned_64x128_rf_sm80` is for sm80-sm100, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<cutlass::bfloat16_t, cutlass::arch::Sm80, true, 32, 128, 65536, true, true>::kNumThreads, AttentionKernel<cutlass::bfloat16_t, cutlass::arch::Sm80, true, 32, 128, 65536, true, true>::kMinBlocksPerSm) fmha_cutlassF_bf16_aligned_32x128_gmem_sm80(typename AttentionKernel<cutlass::bfloat16_t, cutlass::arch::Sm80, true, 32, 128, 65536, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 800 #if __CUDA_ARCH__ < 1000 if (!p.advance_to_block()) { return; } AttentionKernel<cutlass::bfloat16_t, cutlass::arch::Sm80, true, 32, 128, 65536, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_bf16_aligned_32x128_gmem_sm80` is for sm80-sm100, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif }
a134e45f19f179ac8f379d3fcee6d679701e0791.cu
/* * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ // This file is auto-generated. See "generate_kernels.py" #include <ATen/native/transformers/cuda/mem_eff_attention/kernel_forward.h> __global__ void __launch_bounds__( AttentionKernel<cutlass::bfloat16_t, cutlass::arch::Sm80, true, 64, 64, 64, true, true>::kNumThreads, AttentionKernel<cutlass::bfloat16_t, cutlass::arch::Sm80, true, 64, 64, 64, true, true>::kMinBlocksPerSm) fmha_cutlassF_bf16_aligned_64x64_rf_sm80(typename AttentionKernel<cutlass::bfloat16_t, cutlass::arch::Sm80, true, 64, 64, 64, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 800 #if __CUDA_ARCH__ < 1000 if (!p.advance_to_block()) { return; } AttentionKernel<cutlass::bfloat16_t, cutlass::arch::Sm80, true, 64, 64, 64, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_bf16_aligned_64x64_rf_sm80` is for sm80-sm100, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<cutlass::bfloat16_t, cutlass::arch::Sm80, true, 64, 128, 128, true, true>::kNumThreads, AttentionKernel<cutlass::bfloat16_t, cutlass::arch::Sm80, true, 64, 128, 128, true, true>::kMinBlocksPerSm) fmha_cutlassF_bf16_aligned_64x128_rf_sm80(typename AttentionKernel<cutlass::bfloat16_t, cutlass::arch::Sm80, true, 64, 128, 128, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 800 #if __CUDA_ARCH__ < 1000 if (!p.advance_to_block()) { return; } AttentionKernel<cutlass::bfloat16_t, cutlass::arch::Sm80, true, 64, 128, 128, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_bf16_aligned_64x128_rf_sm80` is for sm80-sm100, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<cutlass::bfloat16_t, cutlass::arch::Sm80, true, 32, 128, 65536, true, true>::kNumThreads, AttentionKernel<cutlass::bfloat16_t, cutlass::arch::Sm80, true, 32, 128, 65536, true, true>::kMinBlocksPerSm) fmha_cutlassF_bf16_aligned_32x128_gmem_sm80(typename AttentionKernel<cutlass::bfloat16_t, cutlass::arch::Sm80, true, 32, 128, 65536, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 800 #if __CUDA_ARCH__ < 1000 if (!p.advance_to_block()) { return; } AttentionKernel<cutlass::bfloat16_t, cutlass::arch::Sm80, true, 32, 128, 65536, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_bf16_aligned_32x128_gmem_sm80` is for sm80-sm100, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif }
5da67e0537a4fe54ff8afeaa6b43b84356e26091.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_advec_mom_kernel2_z; int xdim0_advec_mom_kernel2_z_h = -1; __constant__ int ydim0_advec_mom_kernel2_z; int ydim0_advec_mom_kernel2_z_h = -1; __constant__ int xdim1_advec_mom_kernel2_z; int xdim1_advec_mom_kernel2_z_h = -1; __constant__ int ydim1_advec_mom_kernel2_z; int ydim1_advec_mom_kernel2_z_h = -1; __constant__ int xdim2_advec_mom_kernel2_z; int xdim2_advec_mom_kernel2_z_h = -1; __constant__ int ydim2_advec_mom_kernel2_z; int ydim2_advec_mom_kernel2_z_h = -1; __constant__ int xdim3_advec_mom_kernel2_z; int xdim3_advec_mom_kernel2_z_h = -1; __constant__ int ydim3_advec_mom_kernel2_z; int ydim3_advec_mom_kernel2_z_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #define OPS_ACC0(x,y,z) (x+xdim0_advec_mom_kernel2_z*(y)+xdim0_advec_mom_kernel2_z*ydim0_advec_mom_kernel2_z*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_advec_mom_kernel2_z*(y)+xdim1_advec_mom_kernel2_z*ydim1_advec_mom_kernel2_z*(z)) #define OPS_ACC2(x,y,z) (x+xdim2_advec_mom_kernel2_z*(y)+xdim2_advec_mom_kernel2_z*ydim2_advec_mom_kernel2_z*(z)) #define OPS_ACC3(x,y,z) (x+xdim3_advec_mom_kernel2_z*(y)+xdim3_advec_mom_kernel2_z*ydim3_advec_mom_kernel2_z*(z)) //user function __device__ inline void advec_mom_kernel2_z_gpu( double *vel1, const double *node_mass_post, const double *node_mass_pre, const double *mom_flux) { vel1[OPS_ACC0(0,0,0)] = ( vel1[OPS_ACC0(0,0,0)] * node_mass_pre[OPS_ACC2(0,0,0)] + mom_flux[OPS_ACC3(0,0,-1)] - mom_flux[OPS_ACC3(0,0,0)] ) / node_mass_post[OPS_ACC1(0,0,0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 __global__ void ops_advec_mom_kernel2_z( double* __restrict arg0, const double* __restrict arg1, const double* __restrict arg2, const double* __restrict arg3, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_advec_mom_kernel2_z + idx_z * 1*1 * xdim0_advec_mom_kernel2_z * ydim0_advec_mom_kernel2_z; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_advec_mom_kernel2_z + idx_z * 1*1 * xdim1_advec_mom_kernel2_z * ydim1_advec_mom_kernel2_z; arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_advec_mom_kernel2_z + idx_z * 1*1 * xdim2_advec_mom_kernel2_z * ydim2_advec_mom_kernel2_z; arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_advec_mom_kernel2_z + idx_z * 1*1 * xdim3_advec_mom_kernel2_z * ydim3_advec_mom_kernel2_z; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { advec_mom_kernel2_z_gpu(arg0, arg1, arg2, arg3); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_advec_mom_kernel2_z(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { #else void ops_par_loop_advec_mom_kernel2_z_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; #endif //Timing double t1,t2,c1,c2; ops_arg args[4] = { arg0, arg1, arg2, arg3}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,4,range,137)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(137,"advec_mom_kernel2_z"); OPS_kernels[137].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; if (xdim0 != xdim0_advec_mom_kernel2_z_h || ydim0 != ydim0_advec_mom_kernel2_z_h || xdim1 != xdim1_advec_mom_kernel2_z_h || ydim1 != ydim1_advec_mom_kernel2_z_h || xdim2 != xdim2_advec_mom_kernel2_z_h || ydim2 != ydim2_advec_mom_kernel2_z_h || xdim3 != xdim3_advec_mom_kernel2_z_h || ydim3 != ydim3_advec_mom_kernel2_z_h) { hipMemcpyToSymbol( xdim0_advec_mom_kernel2_z, &xdim0, sizeof(int) ); xdim0_advec_mom_kernel2_z_h = xdim0; hipMemcpyToSymbol( ydim0_advec_mom_kernel2_z, &ydim0, sizeof(int) ); ydim0_advec_mom_kernel2_z_h = ydim0; hipMemcpyToSymbol( xdim1_advec_mom_kernel2_z, &xdim1, sizeof(int) ); xdim1_advec_mom_kernel2_z_h = xdim1; hipMemcpyToSymbol( ydim1_advec_mom_kernel2_z, &ydim1, sizeof(int) ); ydim1_advec_mom_kernel2_z_h = ydim1; hipMemcpyToSymbol( xdim2_advec_mom_kernel2_z, &xdim2, sizeof(int) ); xdim2_advec_mom_kernel2_z_h = xdim2; hipMemcpyToSymbol( ydim2_advec_mom_kernel2_z, &ydim2, sizeof(int) ); ydim2_advec_mom_kernel2_z_h = ydim2; hipMemcpyToSymbol( xdim3_advec_mom_kernel2_z, &xdim3, sizeof(int) ); xdim3_advec_mom_kernel2_z_h = xdim3; hipMemcpyToSymbol( ydim3_advec_mom_kernel2_z, &ydim3, sizeof(int) ); ydim3_advec_mom_kernel2_z_h = ydim3; } dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); char *p_a[4]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 4); ops_halo_exchanges(args,4,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[137].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) hipLaunchKernelGGL(( ops_advec_mom_kernel2_z), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[137].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 4); ops_set_halo_dirtybit3(&args[0],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[137].mpi_time += t2-t1; OPS_kernels[137].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[137].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[137].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[137].transfer += ops_compute_transfer(dim, start, end, &arg3); } } #ifdef OPS_LAZY void ops_par_loop_advec_mom_kernel2_z(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 137; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 137; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 4; desc->args = (ops_arg*)malloc(4*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->function = ops_par_loop_advec_mom_kernel2_z_execute; if (OPS_diags > 1) { ops_timing_realloc(137,"advec_mom_kernel2_z"); } ops_enqueue_kernel(desc); } #endif
5da67e0537a4fe54ff8afeaa6b43b84356e26091.cu
// // auto-generated by ops.py // __constant__ int xdim0_advec_mom_kernel2_z; int xdim0_advec_mom_kernel2_z_h = -1; __constant__ int ydim0_advec_mom_kernel2_z; int ydim0_advec_mom_kernel2_z_h = -1; __constant__ int xdim1_advec_mom_kernel2_z; int xdim1_advec_mom_kernel2_z_h = -1; __constant__ int ydim1_advec_mom_kernel2_z; int ydim1_advec_mom_kernel2_z_h = -1; __constant__ int xdim2_advec_mom_kernel2_z; int xdim2_advec_mom_kernel2_z_h = -1; __constant__ int ydim2_advec_mom_kernel2_z; int ydim2_advec_mom_kernel2_z_h = -1; __constant__ int xdim3_advec_mom_kernel2_z; int xdim3_advec_mom_kernel2_z_h = -1; __constant__ int ydim3_advec_mom_kernel2_z; int ydim3_advec_mom_kernel2_z_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #define OPS_ACC0(x,y,z) (x+xdim0_advec_mom_kernel2_z*(y)+xdim0_advec_mom_kernel2_z*ydim0_advec_mom_kernel2_z*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_advec_mom_kernel2_z*(y)+xdim1_advec_mom_kernel2_z*ydim1_advec_mom_kernel2_z*(z)) #define OPS_ACC2(x,y,z) (x+xdim2_advec_mom_kernel2_z*(y)+xdim2_advec_mom_kernel2_z*ydim2_advec_mom_kernel2_z*(z)) #define OPS_ACC3(x,y,z) (x+xdim3_advec_mom_kernel2_z*(y)+xdim3_advec_mom_kernel2_z*ydim3_advec_mom_kernel2_z*(z)) //user function __device__ inline void advec_mom_kernel2_z_gpu( double *vel1, const double *node_mass_post, const double *node_mass_pre, const double *mom_flux) { vel1[OPS_ACC0(0,0,0)] = ( vel1[OPS_ACC0(0,0,0)] * node_mass_pre[OPS_ACC2(0,0,0)] + mom_flux[OPS_ACC3(0,0,-1)] - mom_flux[OPS_ACC3(0,0,0)] ) / node_mass_post[OPS_ACC1(0,0,0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 __global__ void ops_advec_mom_kernel2_z( double* __restrict arg0, const double* __restrict arg1, const double* __restrict arg2, const double* __restrict arg3, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_advec_mom_kernel2_z + idx_z * 1*1 * xdim0_advec_mom_kernel2_z * ydim0_advec_mom_kernel2_z; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_advec_mom_kernel2_z + idx_z * 1*1 * xdim1_advec_mom_kernel2_z * ydim1_advec_mom_kernel2_z; arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_advec_mom_kernel2_z + idx_z * 1*1 * xdim2_advec_mom_kernel2_z * ydim2_advec_mom_kernel2_z; arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_advec_mom_kernel2_z + idx_z * 1*1 * xdim3_advec_mom_kernel2_z * ydim3_advec_mom_kernel2_z; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { advec_mom_kernel2_z_gpu(arg0, arg1, arg2, arg3); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_advec_mom_kernel2_z(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { #else void ops_par_loop_advec_mom_kernel2_z_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; #endif //Timing double t1,t2,c1,c2; ops_arg args[4] = { arg0, arg1, arg2, arg3}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,4,range,137)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(137,"advec_mom_kernel2_z"); OPS_kernels[137].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; if (xdim0 != xdim0_advec_mom_kernel2_z_h || ydim0 != ydim0_advec_mom_kernel2_z_h || xdim1 != xdim1_advec_mom_kernel2_z_h || ydim1 != ydim1_advec_mom_kernel2_z_h || xdim2 != xdim2_advec_mom_kernel2_z_h || ydim2 != ydim2_advec_mom_kernel2_z_h || xdim3 != xdim3_advec_mom_kernel2_z_h || ydim3 != ydim3_advec_mom_kernel2_z_h) { cudaMemcpyToSymbol( xdim0_advec_mom_kernel2_z, &xdim0, sizeof(int) ); xdim0_advec_mom_kernel2_z_h = xdim0; cudaMemcpyToSymbol( ydim0_advec_mom_kernel2_z, &ydim0, sizeof(int) ); ydim0_advec_mom_kernel2_z_h = ydim0; cudaMemcpyToSymbol( xdim1_advec_mom_kernel2_z, &xdim1, sizeof(int) ); xdim1_advec_mom_kernel2_z_h = xdim1; cudaMemcpyToSymbol( ydim1_advec_mom_kernel2_z, &ydim1, sizeof(int) ); ydim1_advec_mom_kernel2_z_h = ydim1; cudaMemcpyToSymbol( xdim2_advec_mom_kernel2_z, &xdim2, sizeof(int) ); xdim2_advec_mom_kernel2_z_h = xdim2; cudaMemcpyToSymbol( ydim2_advec_mom_kernel2_z, &ydim2, sizeof(int) ); ydim2_advec_mom_kernel2_z_h = ydim2; cudaMemcpyToSymbol( xdim3_advec_mom_kernel2_z, &xdim3, sizeof(int) ); xdim3_advec_mom_kernel2_z_h = xdim3; cudaMemcpyToSymbol( ydim3_advec_mom_kernel2_z, &ydim3, sizeof(int) ); ydim3_advec_mom_kernel2_z_h = ydim3; } dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); char *p_a[4]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 4); ops_halo_exchanges(args,4,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[137].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) ops_advec_mom_kernel2_z<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[137].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 4); ops_set_halo_dirtybit3(&args[0],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[137].mpi_time += t2-t1; OPS_kernels[137].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[137].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[137].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[137].transfer += ops_compute_transfer(dim, start, end, &arg3); } } #ifdef OPS_LAZY void ops_par_loop_advec_mom_kernel2_z(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 137; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 137; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 4; desc->args = (ops_arg*)malloc(4*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->function = ops_par_loop_advec_mom_kernel2_z_execute; if (OPS_diags > 1) { ops_timing_realloc(137,"advec_mom_kernel2_z"); } ops_enqueue_kernel(desc); } #endif
0c9b4651129756d4b5d609be0e5e41f5e19a5d5d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "partitions.cuh" #include "fill.cuh" #include <catboost/cuda/cuda_lib/kernel/arch.cuh> #include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh> #include <catboost/cuda/cuda_lib/kernel/kernel.cuh> namespace NKernel { __global__ void UpdatePartitionSizes(TDataPartition* parts, ui32 partCount, const int* sortedBins, ui32 size) { ui32 i = blockIdx.x * blockDim.x + threadIdx.x; while (i < size) { int bin0 = __ldg(sortedBins + i); int bin1 = i ? __ldg(sortedBins + i - 1) : 0; if (bin0 != bin1) { int b = bin1; while (b < bin0) { parts[b].Size = i - parts[b].Offset; b++; } } if ((i + 1) == size) { parts[bin0].Size = size - parts[bin0].Offset; int b = bin0 + 1; while (b < partCount) { parts[b].Size = 0; b++; } } i += blockDim.x * gridDim.x; } } __global__ void ComputeSizes(ui32* beginOffsets, ui32* endOffsets, ui32 count, float* dst) { ui32 i = blockIdx.x * blockDim.x + threadIdx.x; if (i < count) { dst[i] = static_cast<float>(endOffsets[i] - beginOffsets[i]); } } struct TPartitionOffsetWriter { using TStorageType = TDataPartition; TDataPartition* Parts; __device__ TPartitionOffsetWriter(TDataPartition* parts) : Parts(parts) { } __device__ void Write(ui32 bin, ui32 offset) { Parts[bin].Offset = offset; } }; struct TVecOffsetWriter { using TStorageType = ui32; ui32* BinOffsets; __device__ TVecOffsetWriter(ui32* offsets) : BinOffsets(offsets) { } __device__ void Write(ui32 bin, ui32 offset) { BinOffsets[bin] = offset; } }; template <class TWriter, bool DONT_WRITE_EMPTY_SUFFIX> __global__ void UpdatePartitionOffsets(typename TWriter::TStorageType* parts, ui32 partCount, const int* sortedBins, ui32 size) { ui32 i = blockIdx.x * blockDim.x + threadIdx.x; int lastBin = DONT_WRITE_EMPTY_SUFFIX ? LdgWithFallback(sortedBins + size - 1, 0) : 1 << 31; TWriter writer(parts); while (i < size) { int bin0 = __ldg(sortedBins + i); int bin1 = i ? __ldg(sortedBins + i - 1) : -1; if (bin0 != bin1) { int b = bin0; while (b > bin1) { writer.Write(b, i); b--; } } if (i == (size - 1)) { int b = bin0 + 1; while (b < min(lastBin, partCount)) { writer.Write(b, size); b++; } } i += blockDim.x * gridDim.x; } } __global__ void ZeroPartitions(TDataPartition* __restrict parts, ui32 partCount) { ui32 i = blockIdx.x * blockDim.x + threadIdx.x; while (i < partCount) { parts[i].Size = 0; parts[i].Offset = 0; i += blockDim.x * gridDim.x; } } void UpdatePartitionDimensions(TDataPartition* parts, ui32 partCount, const ui32* sortedBins, ui32 size, TCudaStream stream) { const ui32 blockSize = 256; const ui32 numBlocks = min((size + blockSize - 1) / blockSize, (ui32)TArchProps::MaxBlockCount()); if (numBlocks) { UpdatePartitionOffsets<TPartitionOffsetWriter, false> << < numBlocks, blockSize, 0, stream >> > (parts, partCount, (int*)sortedBins, size); UpdatePartitionSizes << < numBlocks, blockSize, 0, stream >> > (parts, partCount, (int*)sortedBins, size); } else { const ui32 numBlocksClear = (partCount + blockSize - 1) / blockSize; hipLaunchKernelGGL(( ZeroPartitions), dim3(numBlocksClear), dim3(blockSize), 0, stream, parts, partCount); } } __global__ void ComputeSegmentSizesImpl(const ui32* beginOffsets, const ui32* endOffsets, ui32 count, float* dst) { ui32 i = blockIdx.x * blockDim.x + threadIdx.x; if (i < count) { dst[i] = static_cast<float>(endOffsets[i] - beginOffsets[i]); } } void ComputeSegmentSizes(const ui32* offsets, ui32 size, float* dst, TCudaStream stream) { size -= 1; const ui32* begin = offsets; const ui32* end = offsets + 1; const ui32 blockSize = 256; const ui32 numBlocks = (size + blockSize - 1) / blockSize; hipLaunchKernelGGL(( ComputeSegmentSizesImpl) , dim3(numBlocks), dim3(blockSize), 0, stream , begin, end, size, dst); } void UpdatePartitionOffsets(ui32* partOffsets, ui32 partCount, const ui32* sortedBins, ui32 size, TCudaStream stream) { const ui32 blockSize = 256; const ui32 numBlocks = min((size + blockSize - 1) / blockSize, (ui32)TArchProps::MaxBlockCount()); //partOffsets are copyMapping of bins, usually with empty tail bool skipSuffixBins = false; if (numBlocks) { if (partCount == size) { FillBuffer(partOffsets, size, size, stream); skipSuffixBins = true; } if (skipSuffixBins) { UpdatePartitionOffsets<TVecOffsetWriter, true> << < numBlocks, blockSize, 0, stream >>>(partOffsets, partCount, (int*) sortedBins, size); } else { UpdatePartitionOffsets<TVecOffsetWriter, false> << < numBlocks, blockSize, 0, stream >>>(partOffsets, partCount, (int*) sortedBins, size); } } else { FillBuffer(partOffsets, static_cast<ui32>(0), partCount, stream); } } }
0c9b4651129756d4b5d609be0e5e41f5e19a5d5d.cu
#include "partitions.cuh" #include "fill.cuh" #include <catboost/cuda/cuda_lib/kernel/arch.cuh> #include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh> #include <catboost/cuda/cuda_lib/kernel/kernel.cuh> namespace NKernel { __global__ void UpdatePartitionSizes(TDataPartition* parts, ui32 partCount, const int* sortedBins, ui32 size) { ui32 i = blockIdx.x * blockDim.x + threadIdx.x; while (i < size) { int bin0 = __ldg(sortedBins + i); int bin1 = i ? __ldg(sortedBins + i - 1) : 0; if (bin0 != bin1) { int b = bin1; while (b < bin0) { parts[b].Size = i - parts[b].Offset; b++; } } if ((i + 1) == size) { parts[bin0].Size = size - parts[bin0].Offset; int b = bin0 + 1; while (b < partCount) { parts[b].Size = 0; b++; } } i += blockDim.x * gridDim.x; } } __global__ void ComputeSizes(ui32* beginOffsets, ui32* endOffsets, ui32 count, float* dst) { ui32 i = blockIdx.x * blockDim.x + threadIdx.x; if (i < count) { dst[i] = static_cast<float>(endOffsets[i] - beginOffsets[i]); } } struct TPartitionOffsetWriter { using TStorageType = TDataPartition; TDataPartition* Parts; __device__ TPartitionOffsetWriter(TDataPartition* parts) : Parts(parts) { } __device__ void Write(ui32 bin, ui32 offset) { Parts[bin].Offset = offset; } }; struct TVecOffsetWriter { using TStorageType = ui32; ui32* BinOffsets; __device__ TVecOffsetWriter(ui32* offsets) : BinOffsets(offsets) { } __device__ void Write(ui32 bin, ui32 offset) { BinOffsets[bin] = offset; } }; template <class TWriter, bool DONT_WRITE_EMPTY_SUFFIX> __global__ void UpdatePartitionOffsets(typename TWriter::TStorageType* parts, ui32 partCount, const int* sortedBins, ui32 size) { ui32 i = blockIdx.x * blockDim.x + threadIdx.x; int lastBin = DONT_WRITE_EMPTY_SUFFIX ? LdgWithFallback(sortedBins + size - 1, 0) : 1 << 31; TWriter writer(parts); while (i < size) { int bin0 = __ldg(sortedBins + i); int bin1 = i ? __ldg(sortedBins + i - 1) : -1; if (bin0 != bin1) { int b = bin0; while (b > bin1) { writer.Write(b, i); b--; } } if (i == (size - 1)) { int b = bin0 + 1; while (b < min(lastBin, partCount)) { writer.Write(b, size); b++; } } i += blockDim.x * gridDim.x; } } __global__ void ZeroPartitions(TDataPartition* __restrict parts, ui32 partCount) { ui32 i = blockIdx.x * blockDim.x + threadIdx.x; while (i < partCount) { parts[i].Size = 0; parts[i].Offset = 0; i += blockDim.x * gridDim.x; } } void UpdatePartitionDimensions(TDataPartition* parts, ui32 partCount, const ui32* sortedBins, ui32 size, TCudaStream stream) { const ui32 blockSize = 256; const ui32 numBlocks = min((size + blockSize - 1) / blockSize, (ui32)TArchProps::MaxBlockCount()); if (numBlocks) { UpdatePartitionOffsets<TPartitionOffsetWriter, false> << < numBlocks, blockSize, 0, stream >> > (parts, partCount, (int*)sortedBins, size); UpdatePartitionSizes << < numBlocks, blockSize, 0, stream >> > (parts, partCount, (int*)sortedBins, size); } else { const ui32 numBlocksClear = (partCount + blockSize - 1) / blockSize; ZeroPartitions<<<numBlocksClear, blockSize, 0, stream>>>(parts, partCount); } } __global__ void ComputeSegmentSizesImpl(const ui32* beginOffsets, const ui32* endOffsets, ui32 count, float* dst) { ui32 i = blockIdx.x * blockDim.x + threadIdx.x; if (i < count) { dst[i] = static_cast<float>(endOffsets[i] - beginOffsets[i]); } } void ComputeSegmentSizes(const ui32* offsets, ui32 size, float* dst, TCudaStream stream) { size -= 1; const ui32* begin = offsets; const ui32* end = offsets + 1; const ui32 blockSize = 256; const ui32 numBlocks = (size + blockSize - 1) / blockSize; ComputeSegmentSizesImpl <<< numBlocks, blockSize, 0, stream >>> (begin, end, size, dst); } void UpdatePartitionOffsets(ui32* partOffsets, ui32 partCount, const ui32* sortedBins, ui32 size, TCudaStream stream) { const ui32 blockSize = 256; const ui32 numBlocks = min((size + blockSize - 1) / blockSize, (ui32)TArchProps::MaxBlockCount()); //partOffsets are copyMapping of bins, usually with empty tail bool skipSuffixBins = false; if (numBlocks) { if (partCount == size) { FillBuffer(partOffsets, size, size, stream); skipSuffixBins = true; } if (skipSuffixBins) { UpdatePartitionOffsets<TVecOffsetWriter, true> << < numBlocks, blockSize, 0, stream >>>(partOffsets, partCount, (int*) sortedBins, size); } else { UpdatePartitionOffsets<TVecOffsetWriter, false> << < numBlocks, blockSize, 0, stream >>>(partOffsets, partCount, (int*) sortedBins, size); } } else { FillBuffer(partOffsets, static_cast<ui32>(0), partCount, stream); } } }
526e836a9cdf0d972d81f8a463150538efbce00a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Attention : Extension .cu #include <iostream> #include "cudaTools.h" #include "Device.h" using std::cout; using std::endl; /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __host__ bool helloCuda(void); //__host__ facultatif /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ __global__ static void kernelHello(void); __device__ static void doSomethingHello(void); /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ /** * http://www.icl.utk.edu/~mgates3/docs/cuda.html */ __host__ bool helloCuda(void) //__host__ facultatif { cout << endl << "[Hello Cuda] : kernel empty" << endl; // Specifier nb thread : ici 1 thread au total ! dim3 dg = dim3(1, 1, 1); dim3 db = dim3(1, 1, 1); //Device::gridHeuristic(dg, db); hipLaunchKernelGGL(( kernelHello), dim3(dg),dim3(db), 0, 0, ); // asynchrone !! return true; } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /** * output : void */ __global__ void kernelHello(void) { doSomethingHello(); } /** * Can be call only by device * inliner by nvcc (nvidia compiler) */ __device__ void doSomethingHello(void) { // rien } /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
526e836a9cdf0d972d81f8a463150538efbce00a.cu
// Attention : Extension .cu #include <iostream> #include "cudaTools.h" #include "Device.h" using std::cout; using std::endl; /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __host__ bool helloCuda(void); //__host__ facultatif /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ __global__ static void kernelHello(void); __device__ static void doSomethingHello(void); /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ /** * http://www.icl.utk.edu/~mgates3/docs/cuda.html */ __host__ bool helloCuda(void) //__host__ facultatif { cout << endl << "[Hello Cuda] : kernel empty" << endl; // Specifier nb thread : ici 1 thread au total ! dim3 dg = dim3(1, 1, 1); dim3 db = dim3(1, 1, 1); //Device::gridHeuristic(dg, db); kernelHello<<<dg,db>>>(); // asynchrone !! return true; } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /** * output : void */ __global__ void kernelHello(void) { doSomethingHello(); } /** * Can be call only by device * inliner by nvcc (nvidia compiler) */ __device__ void doSomethingHello(void) { // rien } /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
c63174e294948eb70362d9d1084ba5f203abd708.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright (c) 2017 Darius Rckert * Licensed under the MIT License. * See LICENSE file for more information. */ #include "saiga/cuda/cudaHelper.h" #include "saiga/cuda/imageProcessing/imageProcessing.h" #include "saiga/cuda/tests/test.h" #include "saiga/cuda/tests/test_helper.h" #include "saiga/cuda/thread_info.h" namespace Saiga { namespace CUDA { __constant__ float d_Kernel[3][3]; template <unsigned int TILE_W, unsigned int TILE_H> __global__ static void d_convolve3x3(ImageView<float> src, ImageView<float> dst) { const unsigned int tx = threadIdx.x; const unsigned int ty = threadIdx.y; // const unsigned int t = ty * TILE_W + tx; const unsigned int x_tile = blockIdx.x * TILE_W; const unsigned int y_tile = blockIdx.y * TILE_H; const unsigned int x = x_tile + tx; const unsigned int y = y_tile + ty; float sum = 0; for (int dy = -1; dy <= 1; ++dy) { for (int dx = -1; dx <= 1; ++dx) { int gx = x + dx; int gy = y + dy; src.clampToEdge(gy, gx); sum += src(gy, gx); } } dst(y, x) = sum; } template <unsigned int TILE_W, unsigned int TILE_H> __global__ static void d_convolve3x3Shared(ImageView<float> src, ImageView<float> dst) { const unsigned int tx = threadIdx.x; const unsigned int ty = threadIdx.y; const unsigned int t = ty * TILE_W + tx; const unsigned int x_tile = blockIdx.x * TILE_W; const unsigned int y_tile = blockIdx.y * TILE_H; const unsigned int blockStartX = x_tile - 1; const unsigned int blockStartY = y_tile - 1; const unsigned int TILE_SIZE = TILE_H * TILE_W; const unsigned int TILE_SIZE_WITH_BORDER = (TILE_H + 2) * (TILE_W + 2); __shared__ float sbuffer[TILE_H + 2][TILE_W + 2]; for (int i = t; i < TILE_SIZE_WITH_BORDER; i += TILE_SIZE) { int x = i % (TILE_W + 2); int y = i / (TILE_W + 2); int gx = x + blockStartX; int gy = y + blockStartY; src.clampToEdge(gy, gx); sbuffer[y][x] = src(gy, gx); } __syncthreads(); float sum = 0; #if 1 for (int dy = -1; dy <= 1; ++dy) { for (int dx = -1; dx <= 1; ++dx) { int x = tx + 1 + dx; int y = ty + 1 + dy; sum += sbuffer[y][x]; } } #endif const unsigned int x = x_tile + tx; const unsigned int y = y_tile + ty; dst(y, x) = sum; } template <unsigned int TILE_W, unsigned int TILE_H> __global__ static void d_convolve3x3Shared2(ImageView<float> src, ImageView<float> dst) { const unsigned int tx = threadIdx.x; const unsigned int ty = threadIdx.y; const unsigned int t = ty * TILE_W + tx; const unsigned int warp_lane = t / 32; const unsigned int lane_id = t & 31; const unsigned int x_tile = blockIdx.x * TILE_W; const unsigned int y_tile = blockIdx.y * TILE_H * 2; const unsigned int x = x_tile + tx; const unsigned int y = y_tile + ty; __shared__ float sbuffer[TILE_H * 2 + 2][TILE_W + 2]; // copy main data for (int i = 0; i < 2; ++i) sbuffer[ty + i * TILE_H + 1][tx + 1] = src.clampedRead(y + i * TILE_H, x); // top halo if (warp_lane == 0) { sbuffer[0][lane_id + 1] = src.clampedRead(y_tile - 1, x_tile + lane_id); } // bottom if (warp_lane == 1) { sbuffer[TILE_H * 2 + 1][lane_id + 1] = src.clampedRead(y_tile + TILE_H * 2, x_tile + lane_id); } // left if (warp_lane == 2) { sbuffer[lane_id + 1][0] = src.clampedRead(y_tile + lane_id, x_tile - 1); } // right if (warp_lane == 3) { sbuffer[lane_id + 1][TILE_W + 1] = src.clampedRead(y_tile + lane_id, x_tile + TILE_W); } // corners if (warp_lane == 4) { if (lane_id == 0) sbuffer[0][0] = src.clampedRead(y_tile - 1, x_tile - 1); if (lane_id == 1) sbuffer[0][TILE_W + 1] = src.clampedRead(y_tile - 1, x_tile + TILE_W); if (lane_id == 2) sbuffer[TILE_H * 2 + 1][0] = src.clampedRead(y_tile + TILE_H * 2, x_tile - 1); if (lane_id == 3) sbuffer[TILE_H * 2 + 1][TILE_W + 1] = src.clampedRead(y_tile + TILE_H * 2, x_tile + TILE_W); } __syncthreads(); for (int i = 0; i < 2; ++i) { float sum = 0; #if 1 for (int dy = -1; dy <= 1; ++dy) { for (int dx = -1; dx <= 1; ++dx) { int x = tx + 1 + dx; int y = ty + 1 + dy + i * TILE_H; sum += sbuffer[y][x]; } } #endif dst(y + i * TILE_H, x) = sum; } } template <unsigned int TILE_W, unsigned int TILE_H, unsigned int Y_ELEMENTS = 2> __global__ static void d_convolve3x3Shared3(ImageView<float> src, ImageView<float> dst) { const unsigned int TILE_H2 = TILE_H * Y_ELEMENTS; const unsigned int tx = threadIdx.x; const unsigned int ty = threadIdx.y; int x_tile = blockIdx.x * (TILE_W - 2) - 1; int y_tile = blockIdx.y * (TILE_H2 - 2) - 1; int x = x_tile + tx; int y = y_tile + ty; __shared__ float sbuffer[TILE_H2][TILE_W]; // copy main data for (int i = 0; i < Y_ELEMENTS; ++i) sbuffer[ty + i * TILE_H][tx] = src.clampedRead(y + i * TILE_H, x); __syncthreads(); for (int i = 0; i < Y_ELEMENTS; ++i) { int gx = x; int gy = y + i * TILE_H; int lx = tx; int ly = ty + i * TILE_H; if (!dst.inImage(gy, gx)) continue; if (lx > 0 && lx < TILE_W - 1 && ly > 0 && ly < TILE_H2 - 1) { float sum = 0; for (int dy = -1; dy <= 1; ++dy) { for (int dx = -1; dx <= 1; ++dx) { sum += sbuffer[ly + dy][lx + dx]; } } dst(gy, gx) = sum; } } } template <unsigned int TILE_W, unsigned int TILE_H> __global__ static void d_copySharedSync(ImageView<float> src, ImageView<float> dst) { const unsigned int tx = threadIdx.x; const unsigned int ty = threadIdx.y; // const unsigned int t = ty * TILE_W + tx; const unsigned int x_tile = blockIdx.x * TILE_W; const unsigned int y_tile = blockIdx.y * TILE_H; const unsigned int x = x_tile + tx; const unsigned int y = y_tile + ty; __shared__ float sbuffer[TILE_H][TILE_W]; sbuffer[ty][tx] = src(y, x); __syncthreads(); dst(y, x) = sbuffer[ty][tx]; } template <unsigned int TILE_W, unsigned int TILE_H, unsigned int Y_ELEMENTS = 2> __global__ static void d_copySharedSync2(ImageView<float> src, ImageView<float> dst) { const unsigned int TILE_H2 = TILE_H * Y_ELEMENTS; const unsigned int tx = threadIdx.x; const unsigned int ty = threadIdx.y; // const unsigned int t = ty * TILE_W + tx; const unsigned int x_tile = blockIdx.x * TILE_W; const unsigned int y_tile = blockIdx.y * TILE_H2; const unsigned int x = x_tile + tx; const unsigned int y = y_tile + ty; __shared__ float sbuffer[TILE_H2][TILE_W]; for (int i = 0; i < Y_ELEMENTS; ++i) sbuffer[ty + i * TILE_H][tx] = src.clampedRead(y + i * TILE_H, x); __syncthreads(); for (int i = 0; i < Y_ELEMENTS; ++i) dst.clampedWrite(y + i * TILE_H, x, sbuffer[ty + i * TILE_H][tx]); } void convolutionTest3x3() { CUDA_SYNC_CHECK_ERROR(); int h = 2048; int w = h * 2; size_t N = w * h; size_t readWrites = N * 2 * sizeof(float); Saiga::CUDA::PerformanceTestHelper pth("filter 3x3 separable", readWrites); thrust::device_vector<float> src(N, 0.1); thrust::device_vector<float> dest(N, 0.1); thrust::device_vector<float> tmp(N, 0.1); thrust::host_vector<float> h_src = src; thrust::host_vector<float> h_dest = dest; thrust::host_vector<float> h_tmp = dest; thrust::host_vector<float> h_ref = dest; ImageView<float> imgSrc(w, h, thrust::raw_pointer_cast(src.data())); ImageView<float> imgDst(w, h, thrust::raw_pointer_cast(dest.data())); ImageView<float> imgTmp(w, h, thrust::raw_pointer_cast(tmp.data())); ImageView<float> h_imgSrc(w, h, thrust::raw_pointer_cast(h_src.data())); ImageView<float> h_imgDst(w, h, thrust::raw_pointer_cast(h_dest.data())); ImageView<float> h_imgTmp(w, h, thrust::raw_pointer_cast(h_tmp.data())); thrust::host_vector<float> h_kernel(9, 1); CHECK_CUDA_ERROR( hipMemcpyToSymbol(d_Kernel, h_kernel.data(), h_kernel.size() * sizeof(float), 0, hipMemcpyHostToDevice)); { for (int y = 0; y < h; ++y) { for (int x = 0; x < w; ++x) { h_imgSrc(y, x) = (rand() % 3) - 1; } } src = h_src; } { for (int y = 0; y < h; ++y) { for (int x = 0; x < w; ++x) { float sum = 0; for (int dy = -1; dy <= 1; ++dy) { for (int dx = -1; dx <= 1; ++dx) { int gx = x + dx; int gy = y + dy; h_imgSrc.clampToEdge(gy, gx); sum += h_imgSrc(gy, gx); } } h_imgDst(y, x) = sum; } } h_ref = h_dest; } int its = 50; { auto st = Saiga::measureObject<Saiga::CUDA::CudaScopedTimer>(its, [&]() { const int TILE_W = 128; const int TILE_H = 1; dim3 blocks(Saiga::iDivUp(w, TILE_W), Saiga::iDivUp(h, TILE_H), 1); dim3 threads(TILE_W, TILE_H); hipLaunchKernelGGL(( d_convolve3x3<TILE_W, TILE_H>), dim3(blocks), dim3(threads), 0, 0, imgSrc, imgDst); }); pth.addMeassurement("d_convolve3x3", st.median); h_dest = dest; SAIGA_ASSERT(h_dest == h_ref); } { auto st = Saiga::measureObject<Saiga::CUDA::CudaScopedTimer>(its, [&]() { const int TILE_W = 32; const int TILE_H = 16; dim3 blocks(Saiga::iDivUp(w, TILE_W), Saiga::iDivUp(h, TILE_H), 1); dim3 threads(TILE_W, TILE_H); hipLaunchKernelGGL(( d_convolve3x3Shared<TILE_W, TILE_H>), dim3(blocks), dim3(threads), 0, 0, imgSrc, imgDst); }); pth.addMeassurement("d_convolve3x3Shared", st.median); h_dest = dest; SAIGA_ASSERT(h_dest == h_ref); } { dest = tmp; auto st = Saiga::measureObject<Saiga::CUDA::CudaScopedTimer>(its, [&]() { const int TILE_W = 32; const int TILE_H = 16; dim3 blocks(Saiga::iDivUp(w, TILE_W), Saiga::iDivUp(h, TILE_H * 2), 1); dim3 threads(TILE_W, TILE_H); hipLaunchKernelGGL(( d_convolve3x3Shared2<TILE_W, TILE_H>), dim3(blocks), dim3(threads), 0, 0, imgSrc, imgDst); }); pth.addMeassurement("d_convolve3x3Shared2", st.median); h_dest = dest; SAIGA_ASSERT(h_dest == h_ref); } { dest = tmp; auto st = Saiga::measureObject<Saiga::CUDA::CudaScopedTimer>(its, [&]() { const int TILE_W = 32; const int TILE_H = 8; const int Y_ELEMENTS = 4; dim3 blocks(Saiga::iDivUp(w, TILE_W - 2), Saiga::iDivUp(h, TILE_H * Y_ELEMENTS - 2), 1); dim3 threads(TILE_W, TILE_H); hipLaunchKernelGGL(( d_convolve3x3Shared3<TILE_W, TILE_H, Y_ELEMENTS>), dim3(blocks), dim3(threads), 0, 0, imgSrc, imgDst); }); pth.addMeassurement("d_convolve3x3Shared3", st.median); h_dest = dest; SAIGA_ASSERT(h_dest == h_ref); } { auto st = Saiga::measureObject<Saiga::CUDA::CudaScopedTimer>(its, [&]() { const int TILE_W = 32; const int TILE_H = 16; dim3 blocks(Saiga::iDivUp(w, TILE_W), Saiga::iDivUp(h, TILE_H), 1); dim3 threads(TILE_W, TILE_H); hipLaunchKernelGGL(( d_copySharedSync<TILE_W, TILE_H>), dim3(blocks), dim3(threads), 0, 0, imgSrc, imgDst); }); pth.addMeassurement("d_copySharedSync", st.median); h_dest = dest; // SAIGA_ASSERT(h_dest == h_ref); } { auto st = Saiga::measureObject<Saiga::CUDA::CudaScopedTimer>(its, [&]() { const int TILE_W = 32; const int TILE_H = 16; dim3 blocks(Saiga::iDivUp(w, TILE_W), Saiga::iDivUp(h, TILE_H * 2), 1); dim3 threads(TILE_W, TILE_H); hipLaunchKernelGGL(( d_copySharedSync2<TILE_W, TILE_H>), dim3(blocks), dim3(threads), 0, 0, imgSrc, imgDst); }); pth.addMeassurement("d_copySharedSync2", st.median); h_dest = dest; // SAIGA_ASSERT(h_dest == h_ref); } { auto st = Saiga::measureObject<Saiga::CUDA::CudaScopedTimer>(its, [&]() { hipMemcpy(thrust::raw_pointer_cast(dest.data()), thrust::raw_pointer_cast(src.data()), N * sizeof(int), hipMemcpyDeviceToDevice); }); pth.addMeassurement("hipMemcpy", st.median); } CUDA_SYNC_CHECK_ERROR(); } } // namespace CUDA } // namespace Saiga
c63174e294948eb70362d9d1084ba5f203abd708.cu
/** * Copyright (c) 2017 Darius Rückert * Licensed under the MIT License. * See LICENSE file for more information. */ #include "saiga/cuda/cudaHelper.h" #include "saiga/cuda/imageProcessing/imageProcessing.h" #include "saiga/cuda/tests/test.h" #include "saiga/cuda/tests/test_helper.h" #include "saiga/cuda/thread_info.h" namespace Saiga { namespace CUDA { __constant__ float d_Kernel[3][3]; template <unsigned int TILE_W, unsigned int TILE_H> __global__ static void d_convolve3x3(ImageView<float> src, ImageView<float> dst) { const unsigned int tx = threadIdx.x; const unsigned int ty = threadIdx.y; // const unsigned int t = ty * TILE_W + tx; const unsigned int x_tile = blockIdx.x * TILE_W; const unsigned int y_tile = blockIdx.y * TILE_H; const unsigned int x = x_tile + tx; const unsigned int y = y_tile + ty; float sum = 0; for (int dy = -1; dy <= 1; ++dy) { for (int dx = -1; dx <= 1; ++dx) { int gx = x + dx; int gy = y + dy; src.clampToEdge(gy, gx); sum += src(gy, gx); } } dst(y, x) = sum; } template <unsigned int TILE_W, unsigned int TILE_H> __global__ static void d_convolve3x3Shared(ImageView<float> src, ImageView<float> dst) { const unsigned int tx = threadIdx.x; const unsigned int ty = threadIdx.y; const unsigned int t = ty * TILE_W + tx; const unsigned int x_tile = blockIdx.x * TILE_W; const unsigned int y_tile = blockIdx.y * TILE_H; const unsigned int blockStartX = x_tile - 1; const unsigned int blockStartY = y_tile - 1; const unsigned int TILE_SIZE = TILE_H * TILE_W; const unsigned int TILE_SIZE_WITH_BORDER = (TILE_H + 2) * (TILE_W + 2); __shared__ float sbuffer[TILE_H + 2][TILE_W + 2]; for (int i = t; i < TILE_SIZE_WITH_BORDER; i += TILE_SIZE) { int x = i % (TILE_W + 2); int y = i / (TILE_W + 2); int gx = x + blockStartX; int gy = y + blockStartY; src.clampToEdge(gy, gx); sbuffer[y][x] = src(gy, gx); } __syncthreads(); float sum = 0; #if 1 for (int dy = -1; dy <= 1; ++dy) { for (int dx = -1; dx <= 1; ++dx) { int x = tx + 1 + dx; int y = ty + 1 + dy; sum += sbuffer[y][x]; } } #endif const unsigned int x = x_tile + tx; const unsigned int y = y_tile + ty; dst(y, x) = sum; } template <unsigned int TILE_W, unsigned int TILE_H> __global__ static void d_convolve3x3Shared2(ImageView<float> src, ImageView<float> dst) { const unsigned int tx = threadIdx.x; const unsigned int ty = threadIdx.y; const unsigned int t = ty * TILE_W + tx; const unsigned int warp_lane = t / 32; const unsigned int lane_id = t & 31; const unsigned int x_tile = blockIdx.x * TILE_W; const unsigned int y_tile = blockIdx.y * TILE_H * 2; const unsigned int x = x_tile + tx; const unsigned int y = y_tile + ty; __shared__ float sbuffer[TILE_H * 2 + 2][TILE_W + 2]; // copy main data for (int i = 0; i < 2; ++i) sbuffer[ty + i * TILE_H + 1][tx + 1] = src.clampedRead(y + i * TILE_H, x); // top halo if (warp_lane == 0) { sbuffer[0][lane_id + 1] = src.clampedRead(y_tile - 1, x_tile + lane_id); } // bottom if (warp_lane == 1) { sbuffer[TILE_H * 2 + 1][lane_id + 1] = src.clampedRead(y_tile + TILE_H * 2, x_tile + lane_id); } // left if (warp_lane == 2) { sbuffer[lane_id + 1][0] = src.clampedRead(y_tile + lane_id, x_tile - 1); } // right if (warp_lane == 3) { sbuffer[lane_id + 1][TILE_W + 1] = src.clampedRead(y_tile + lane_id, x_tile + TILE_W); } // corners if (warp_lane == 4) { if (lane_id == 0) sbuffer[0][0] = src.clampedRead(y_tile - 1, x_tile - 1); if (lane_id == 1) sbuffer[0][TILE_W + 1] = src.clampedRead(y_tile - 1, x_tile + TILE_W); if (lane_id == 2) sbuffer[TILE_H * 2 + 1][0] = src.clampedRead(y_tile + TILE_H * 2, x_tile - 1); if (lane_id == 3) sbuffer[TILE_H * 2 + 1][TILE_W + 1] = src.clampedRead(y_tile + TILE_H * 2, x_tile + TILE_W); } __syncthreads(); for (int i = 0; i < 2; ++i) { float sum = 0; #if 1 for (int dy = -1; dy <= 1; ++dy) { for (int dx = -1; dx <= 1; ++dx) { int x = tx + 1 + dx; int y = ty + 1 + dy + i * TILE_H; sum += sbuffer[y][x]; } } #endif dst(y + i * TILE_H, x) = sum; } } template <unsigned int TILE_W, unsigned int TILE_H, unsigned int Y_ELEMENTS = 2> __global__ static void d_convolve3x3Shared3(ImageView<float> src, ImageView<float> dst) { const unsigned int TILE_H2 = TILE_H * Y_ELEMENTS; const unsigned int tx = threadIdx.x; const unsigned int ty = threadIdx.y; int x_tile = blockIdx.x * (TILE_W - 2) - 1; int y_tile = blockIdx.y * (TILE_H2 - 2) - 1; int x = x_tile + tx; int y = y_tile + ty; __shared__ float sbuffer[TILE_H2][TILE_W]; // copy main data for (int i = 0; i < Y_ELEMENTS; ++i) sbuffer[ty + i * TILE_H][tx] = src.clampedRead(y + i * TILE_H, x); __syncthreads(); for (int i = 0; i < Y_ELEMENTS; ++i) { int gx = x; int gy = y + i * TILE_H; int lx = tx; int ly = ty + i * TILE_H; if (!dst.inImage(gy, gx)) continue; if (lx > 0 && lx < TILE_W - 1 && ly > 0 && ly < TILE_H2 - 1) { float sum = 0; for (int dy = -1; dy <= 1; ++dy) { for (int dx = -1; dx <= 1; ++dx) { sum += sbuffer[ly + dy][lx + dx]; } } dst(gy, gx) = sum; } } } template <unsigned int TILE_W, unsigned int TILE_H> __global__ static void d_copySharedSync(ImageView<float> src, ImageView<float> dst) { const unsigned int tx = threadIdx.x; const unsigned int ty = threadIdx.y; // const unsigned int t = ty * TILE_W + tx; const unsigned int x_tile = blockIdx.x * TILE_W; const unsigned int y_tile = blockIdx.y * TILE_H; const unsigned int x = x_tile + tx; const unsigned int y = y_tile + ty; __shared__ float sbuffer[TILE_H][TILE_W]; sbuffer[ty][tx] = src(y, x); __syncthreads(); dst(y, x) = sbuffer[ty][tx]; } template <unsigned int TILE_W, unsigned int TILE_H, unsigned int Y_ELEMENTS = 2> __global__ static void d_copySharedSync2(ImageView<float> src, ImageView<float> dst) { const unsigned int TILE_H2 = TILE_H * Y_ELEMENTS; const unsigned int tx = threadIdx.x; const unsigned int ty = threadIdx.y; // const unsigned int t = ty * TILE_W + tx; const unsigned int x_tile = blockIdx.x * TILE_W; const unsigned int y_tile = blockIdx.y * TILE_H2; const unsigned int x = x_tile + tx; const unsigned int y = y_tile + ty; __shared__ float sbuffer[TILE_H2][TILE_W]; for (int i = 0; i < Y_ELEMENTS; ++i) sbuffer[ty + i * TILE_H][tx] = src.clampedRead(y + i * TILE_H, x); __syncthreads(); for (int i = 0; i < Y_ELEMENTS; ++i) dst.clampedWrite(y + i * TILE_H, x, sbuffer[ty + i * TILE_H][tx]); } void convolutionTest3x3() { CUDA_SYNC_CHECK_ERROR(); int h = 2048; int w = h * 2; size_t N = w * h; size_t readWrites = N * 2 * sizeof(float); Saiga::CUDA::PerformanceTestHelper pth("filter 3x3 separable", readWrites); thrust::device_vector<float> src(N, 0.1); thrust::device_vector<float> dest(N, 0.1); thrust::device_vector<float> tmp(N, 0.1); thrust::host_vector<float> h_src = src; thrust::host_vector<float> h_dest = dest; thrust::host_vector<float> h_tmp = dest; thrust::host_vector<float> h_ref = dest; ImageView<float> imgSrc(w, h, thrust::raw_pointer_cast(src.data())); ImageView<float> imgDst(w, h, thrust::raw_pointer_cast(dest.data())); ImageView<float> imgTmp(w, h, thrust::raw_pointer_cast(tmp.data())); ImageView<float> h_imgSrc(w, h, thrust::raw_pointer_cast(h_src.data())); ImageView<float> h_imgDst(w, h, thrust::raw_pointer_cast(h_dest.data())); ImageView<float> h_imgTmp(w, h, thrust::raw_pointer_cast(h_tmp.data())); thrust::host_vector<float> h_kernel(9, 1); CHECK_CUDA_ERROR( cudaMemcpyToSymbol(d_Kernel, h_kernel.data(), h_kernel.size() * sizeof(float), 0, cudaMemcpyHostToDevice)); { for (int y = 0; y < h; ++y) { for (int x = 0; x < w; ++x) { h_imgSrc(y, x) = (rand() % 3) - 1; } } src = h_src; } { for (int y = 0; y < h; ++y) { for (int x = 0; x < w; ++x) { float sum = 0; for (int dy = -1; dy <= 1; ++dy) { for (int dx = -1; dx <= 1; ++dx) { int gx = x + dx; int gy = y + dy; h_imgSrc.clampToEdge(gy, gx); sum += h_imgSrc(gy, gx); } } h_imgDst(y, x) = sum; } } h_ref = h_dest; } int its = 50; { auto st = Saiga::measureObject<Saiga::CUDA::CudaScopedTimer>(its, [&]() { const int TILE_W = 128; const int TILE_H = 1; dim3 blocks(Saiga::iDivUp(w, TILE_W), Saiga::iDivUp(h, TILE_H), 1); dim3 threads(TILE_W, TILE_H); d_convolve3x3<TILE_W, TILE_H><<<blocks, threads>>>(imgSrc, imgDst); }); pth.addMeassurement("d_convolve3x3", st.median); h_dest = dest; SAIGA_ASSERT(h_dest == h_ref); } { auto st = Saiga::measureObject<Saiga::CUDA::CudaScopedTimer>(its, [&]() { const int TILE_W = 32; const int TILE_H = 16; dim3 blocks(Saiga::iDivUp(w, TILE_W), Saiga::iDivUp(h, TILE_H), 1); dim3 threads(TILE_W, TILE_H); d_convolve3x3Shared<TILE_W, TILE_H><<<blocks, threads>>>(imgSrc, imgDst); }); pth.addMeassurement("d_convolve3x3Shared", st.median); h_dest = dest; SAIGA_ASSERT(h_dest == h_ref); } { dest = tmp; auto st = Saiga::measureObject<Saiga::CUDA::CudaScopedTimer>(its, [&]() { const int TILE_W = 32; const int TILE_H = 16; dim3 blocks(Saiga::iDivUp(w, TILE_W), Saiga::iDivUp(h, TILE_H * 2), 1); dim3 threads(TILE_W, TILE_H); d_convolve3x3Shared2<TILE_W, TILE_H><<<blocks, threads>>>(imgSrc, imgDst); }); pth.addMeassurement("d_convolve3x3Shared2", st.median); h_dest = dest; SAIGA_ASSERT(h_dest == h_ref); } { dest = tmp; auto st = Saiga::measureObject<Saiga::CUDA::CudaScopedTimer>(its, [&]() { const int TILE_W = 32; const int TILE_H = 8; const int Y_ELEMENTS = 4; dim3 blocks(Saiga::iDivUp(w, TILE_W - 2), Saiga::iDivUp(h, TILE_H * Y_ELEMENTS - 2), 1); dim3 threads(TILE_W, TILE_H); d_convolve3x3Shared3<TILE_W, TILE_H, Y_ELEMENTS><<<blocks, threads>>>(imgSrc, imgDst); }); pth.addMeassurement("d_convolve3x3Shared3", st.median); h_dest = dest; SAIGA_ASSERT(h_dest == h_ref); } { auto st = Saiga::measureObject<Saiga::CUDA::CudaScopedTimer>(its, [&]() { const int TILE_W = 32; const int TILE_H = 16; dim3 blocks(Saiga::iDivUp(w, TILE_W), Saiga::iDivUp(h, TILE_H), 1); dim3 threads(TILE_W, TILE_H); d_copySharedSync<TILE_W, TILE_H><<<blocks, threads>>>(imgSrc, imgDst); }); pth.addMeassurement("d_copySharedSync", st.median); h_dest = dest; // SAIGA_ASSERT(h_dest == h_ref); } { auto st = Saiga::measureObject<Saiga::CUDA::CudaScopedTimer>(its, [&]() { const int TILE_W = 32; const int TILE_H = 16; dim3 blocks(Saiga::iDivUp(w, TILE_W), Saiga::iDivUp(h, TILE_H * 2), 1); dim3 threads(TILE_W, TILE_H); d_copySharedSync2<TILE_W, TILE_H><<<blocks, threads>>>(imgSrc, imgDst); }); pth.addMeassurement("d_copySharedSync2", st.median); h_dest = dest; // SAIGA_ASSERT(h_dest == h_ref); } { auto st = Saiga::measureObject<Saiga::CUDA::CudaScopedTimer>(its, [&]() { cudaMemcpy(thrust::raw_pointer_cast(dest.data()), thrust::raw_pointer_cast(src.data()), N * sizeof(int), cudaMemcpyDeviceToDevice); }); pth.addMeassurement("cudaMemcpy", st.median); } CUDA_SYNC_CHECK_ERROR(); } } // namespace CUDA } // namespace Saiga
bac88307c013cebd2dbbb556a96ec9235c63a957.hip
// !!! This is a file automatically generated by hipify!!! #include "kabukinai.h" #include <stdio.h> const float input_pixels[] = { 0.0, 0.0, 0.0, 1.0, 2.0, 0.0 , 0.0, 0.0,12.0,13.0,14.0, 0.0 , 0.0, 0.0,24.0,25.0,26.0, 0.0 , 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 , 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 , 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 , 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 , 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 , 0.0, 0.0, 5.0, 4.0, 3.0, 0.0 , 0.0, 0.0,17.0,16.0,15.0, 0.0 , 0.0, 0.0,29.0,28.0,27.0, 0.0 , 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 , 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 , 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 , 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 , 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 , 0.0, 0.0, 6.0, 7.0, 8.0, 0.0 , 0.0, 0.0,18.0,19.0,20.0, 0.0 , 0.0, 0.0,30.0,31.0,32.0, 0.0 , 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 , 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 , 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 , 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 , 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 , 0.0, 0.0,11.0,10.0, 9.0, 0.0 , 0.0, 0.0,23.0,22.0,21.0, 0.0 , 0.0, 0.0,35.0,34.0,33.0, 0.0 , 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 , 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 , 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 , 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 , 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 }; const float expected[] = { 0.000000,0.000000,0.000000,2.000000,4.000000,0.000000, 0.000000,0.000000,24.000000,26.000000,28.000000,0.000000, 0.000000,0.000000,48.000000,50.000000,52.000000,0.000000, 0.000000,0.000000,18.000000,19.500000,21.000000,0.000000, 0.000000,0.000000,18.000000,19.500000,21.000000,0.000000, 0.000000,0.000000,18.000000,19.500000,21.000000,0.000000, 0.000000,0.000000,0.000000,0.000000,0.000000,0.000000, 0.000000,0.000000,0.000000,0.000000,0.000000,0.000000, 0.000000,0.000000,10.000000,8.000000,6.000000,0.000000, 0.000000,0.000000,34.000000,32.000000,30.000000,0.000000, 0.000000,0.000000,58.000000,56.000000,54.000000,0.000000, 0.000000,0.000000,25.500000,24.000000,22.500000,0.000000, 0.000000,0.000000,25.500000,24.000000,22.500000,0.000000, 0.000000,0.000000,25.500000,24.000000,22.500000,0.000000, 0.000000,0.000000,0.000000,0.000000,0.000000,0.000000, 0.000000,0.000000,0.000000,0.000000,0.000000,0.000000, 0.000000,0.000000,12.000000,14.000000,16.000000,0.000000, 0.000000,0.000000,36.000000,38.000000,40.000000,0.000000, 0.000000,0.000000,60.000000,62.000000,64.000000,0.000000, 0.000000,0.000000,27.000000,28.500000,30.000000,0.000000, 0.000000,0.000000,27.000000,28.500000,30.000000,0.000000, 0.000000,0.000000,27.000000,28.500000,30.000000,0.000000, 0.000000,0.000000,0.000000,0.000000,0.000000,0.000000, 0.000000,0.000000,0.000000,0.000000,0.000000,0.000000, 0.000000,0.000000,22.000000,20.000000,18.000000,0.000000, 0.000000,0.000000,46.000000,44.000000,42.000000,0.000000, 0.000000,0.000000,70.000000,68.000000,66.000000,0.000000, 0.000000,0.000000,34.500000,33.000000,31.500000,0.000000, 0.000000,0.000000,34.500000,33.000000,31.500000,0.000000, 0.000000,0.000000,34.500000,33.000000,31.500000,0.000000, 0.000000,0.000000,0.000000,0.000000,0.000000,0.000000, 0.000000,0.000000,0.000000,0.000000,0.000000,0.000000 }; int main() { simulation_data d; float image_pixels[4*8*6]; // enough to hold sliced image PANIC_ON_BAD_CUDA_STATUS(hipMalloc((void **) &d.image_pixels, 4*8*6*sizeof(float))); PANIC_ON_BAD_CUDA_STATUS(hipMemcpy(d.image_pixels, input_pixels, 4*8*6*sizeof(float),hipMemcpyHostToDevice)); d.dimensions[0] = 3; d.dimensions[1] = 12; d.number_of_slices = 4; d.early_dark_pixels = 2; d.late_dark_pixels = 1; d.smear_rows = 3; d.final_dark_rows = 2; d.smear_ratio = 0.25; // chosen for exact results for test d.exposure_time = 2.0; printf( "Calling expose_and_smear: " ); expose_and_smear( &d ); printf( "success!\n" ); PANIC_ON_BAD_CUDA_STATUS(hipMemcpy(image_pixels, d.image_pixels, 4*8*6*sizeof(float),hipMemcpyDeviceToHost)); int result = KABUKINAI_SUCCESS; for( int i = 0; i < 4*8*6; i+=1 ) { if( image_pixels[i] != expected[i] ) { printf( "At pixel %d expected %f got %f\n", i, expected[i], image_pixels[i] ); result = KABUKINAI_FAILURE; } } return result; }
bac88307c013cebd2dbbb556a96ec9235c63a957.cu
#include "kabukinai.h" #include <stdio.h> const float input_pixels[] = { 0.0, 0.0, 0.0, 1.0, 2.0, 0.0 , 0.0, 0.0,12.0,13.0,14.0, 0.0 , 0.0, 0.0,24.0,25.0,26.0, 0.0 , 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 , 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 , 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 , 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 , 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 , 0.0, 0.0, 5.0, 4.0, 3.0, 0.0 , 0.0, 0.0,17.0,16.0,15.0, 0.0 , 0.0, 0.0,29.0,28.0,27.0, 0.0 , 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 , 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 , 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 , 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 , 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 , 0.0, 0.0, 6.0, 7.0, 8.0, 0.0 , 0.0, 0.0,18.0,19.0,20.0, 0.0 , 0.0, 0.0,30.0,31.0,32.0, 0.0 , 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 , 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 , 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 , 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 , 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 , 0.0, 0.0,11.0,10.0, 9.0, 0.0 , 0.0, 0.0,23.0,22.0,21.0, 0.0 , 0.0, 0.0,35.0,34.0,33.0, 0.0 , 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 , 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 , 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 , 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 , 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 }; const float expected[] = { 0.000000,0.000000,0.000000,2.000000,4.000000,0.000000, 0.000000,0.000000,24.000000,26.000000,28.000000,0.000000, 0.000000,0.000000,48.000000,50.000000,52.000000,0.000000, 0.000000,0.000000,18.000000,19.500000,21.000000,0.000000, 0.000000,0.000000,18.000000,19.500000,21.000000,0.000000, 0.000000,0.000000,18.000000,19.500000,21.000000,0.000000, 0.000000,0.000000,0.000000,0.000000,0.000000,0.000000, 0.000000,0.000000,0.000000,0.000000,0.000000,0.000000, 0.000000,0.000000,10.000000,8.000000,6.000000,0.000000, 0.000000,0.000000,34.000000,32.000000,30.000000,0.000000, 0.000000,0.000000,58.000000,56.000000,54.000000,0.000000, 0.000000,0.000000,25.500000,24.000000,22.500000,0.000000, 0.000000,0.000000,25.500000,24.000000,22.500000,0.000000, 0.000000,0.000000,25.500000,24.000000,22.500000,0.000000, 0.000000,0.000000,0.000000,0.000000,0.000000,0.000000, 0.000000,0.000000,0.000000,0.000000,0.000000,0.000000, 0.000000,0.000000,12.000000,14.000000,16.000000,0.000000, 0.000000,0.000000,36.000000,38.000000,40.000000,0.000000, 0.000000,0.000000,60.000000,62.000000,64.000000,0.000000, 0.000000,0.000000,27.000000,28.500000,30.000000,0.000000, 0.000000,0.000000,27.000000,28.500000,30.000000,0.000000, 0.000000,0.000000,27.000000,28.500000,30.000000,0.000000, 0.000000,0.000000,0.000000,0.000000,0.000000,0.000000, 0.000000,0.000000,0.000000,0.000000,0.000000,0.000000, 0.000000,0.000000,22.000000,20.000000,18.000000,0.000000, 0.000000,0.000000,46.000000,44.000000,42.000000,0.000000, 0.000000,0.000000,70.000000,68.000000,66.000000,0.000000, 0.000000,0.000000,34.500000,33.000000,31.500000,0.000000, 0.000000,0.000000,34.500000,33.000000,31.500000,0.000000, 0.000000,0.000000,34.500000,33.000000,31.500000,0.000000, 0.000000,0.000000,0.000000,0.000000,0.000000,0.000000, 0.000000,0.000000,0.000000,0.000000,0.000000,0.000000 }; int main() { simulation_data d; float image_pixels[4*8*6]; // enough to hold sliced image PANIC_ON_BAD_CUDA_STATUS(cudaMalloc((void **) &d.image_pixels, 4*8*6*sizeof(float))); PANIC_ON_BAD_CUDA_STATUS(cudaMemcpy(d.image_pixels, input_pixels, 4*8*6*sizeof(float),cudaMemcpyHostToDevice)); d.dimensions[0] = 3; d.dimensions[1] = 12; d.number_of_slices = 4; d.early_dark_pixels = 2; d.late_dark_pixels = 1; d.smear_rows = 3; d.final_dark_rows = 2; d.smear_ratio = 0.25; // chosen for exact results for test d.exposure_time = 2.0; printf( "Calling expose_and_smear: " ); expose_and_smear( &d ); printf( "success!\n" ); PANIC_ON_BAD_CUDA_STATUS(cudaMemcpy(image_pixels, d.image_pixels, 4*8*6*sizeof(float),cudaMemcpyDeviceToHost)); int result = KABUKINAI_SUCCESS; for( int i = 0; i < 4*8*6; i+=1 ) { if( image_pixels[i] != expected[i] ) { printf( "At pixel %d expected %f got %f\n", i, expected[i], image_pixels[i] ); result = KABUKINAI_FAILURE; } } return result; }
597f07cdfab775a09b6f12fe5c71e680e2f10215.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // XXX: We allow the instantiation of masked_l2_nn here: // raft::linkage::FixConnectivitiesRedOp<value_idx, value_t> red_op(params.n_row); // raft::linkage::cross_component_nn<value_idx, value_t>( // handle, out_edges, data.data(), colors.data(), params.n_row, params.n_col, red_op); // // TODO: consider adding this to libraft.so or creating an instance in a // separate translation unit for this test. // // TODO: edge case testing. Reference: https://github.com/rapidsai/raft/issues/1669 #include <gtest/gtest.h> #include <raft/core/resource/cuda_stream.hpp> #include <hipcub/hipcub.hpp> #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> #include <vector> #include <raft/sparse/linalg/symmetrize.cuh> #include <raft/sparse/mst/mst.cuh> #include <raft/sparse/neighbors/knn_graph.cuh> #include <raft/sparse/selection/cross_component_nn.cuh> #include <raft/distance/distance_types.hpp> #include <raft/linalg/transpose.cuh> #include <raft/sparse/convert/csr.cuh> #include <raft/sparse/coo.hpp> #include <raft/sparse/hierarchy/single_linkage.cuh> #include <rmm/device_uvector.hpp> #include "../../test_utils.cuh" namespace raft { namespace sparse { using namespace std; template <typename value_t, typename value_idx> struct ConnectComponentsInputs { value_idx n_row; value_idx n_col; std::vector<value_t> data; int c; }; template <typename value_idx, typename value_t> class ConnectComponentsTest : public ::testing::TestWithParam<ConnectComponentsInputs<value_t, value_idx>> { protected: void basicTest() { raft::resources handle; auto stream = resource::get_cuda_stream(handle); params = ::testing::TestWithParam<ConnectComponentsInputs<value_t, value_idx>>::GetParam(); raft::sparse::COO<value_t, value_idx> out_edges(resource::get_cuda_stream(handle)); raft::sparse::COO<value_t, value_idx> out_edges_batched(resource::get_cuda_stream(handle)); rmm::device_uvector<value_t> data(params.n_row * params.n_col, resource::get_cuda_stream(handle)); raft::copy(data.data(), params.data.data(), data.size(), resource::get_cuda_stream(handle)); rmm::device_uvector<value_idx> indptr(params.n_row + 1, stream); /** * 1. Construct knn graph */ raft::sparse::COO<value_t, value_idx> knn_graph_coo(stream); raft::sparse::neighbors::knn_graph(handle, data.data(), params.n_row, params.n_col, raft::distance::DistanceType::L2SqrtExpanded, knn_graph_coo, params.c); raft::sparse::convert::sorted_coo_to_csr( knn_graph_coo.rows(), knn_graph_coo.nnz, indptr.data(), params.n_row + 1, stream); /** * 2. Construct MST, sorted by weights */ rmm::device_uvector<value_idx> colors(params.n_row, stream); auto mst_coo = raft::mst::mst<value_idx, value_idx, value_t, double>(handle, indptr.data(), knn_graph_coo.cols(), knn_graph_coo.vals(), params.n_row, knn_graph_coo.nnz, colors.data(), stream, false, true); /** * 3. cross_component_nn to fix connectivities */ raft::linkage::FixConnectivitiesRedOp<value_idx, value_t> red_op(params.n_row); raft::linkage::cross_component_nn<value_idx, value_t>(handle, out_edges, data.data(), colors.data(), params.n_row, params.n_col, red_op, params.n_row, params.n_col); raft::linkage::cross_component_nn<value_idx, value_t>(handle, out_edges_batched, data.data(), colors.data(), params.n_row, params.n_col, red_op, params.n_row / 2, params.n_col / 2); ASSERT_TRUE(out_edges.nnz == out_edges_batched.nnz); ASSERT_TRUE( devArrMatch(out_edges.rows(), out_edges_batched.rows(), out_edges.nnz, Compare<int>())); ASSERT_TRUE( devArrMatch(out_edges.cols(), out_edges_batched.cols(), out_edges.nnz, Compare<int>())); ASSERT_TRUE(devArrMatch( out_edges.vals(), out_edges_batched.vals(), out_edges.nnz, CompareApprox<float>(1e-4))); /** * Construct final edge list */ rmm::device_uvector<value_idx> indptr2(params.n_row + 1, stream); raft::sparse::convert::sorted_coo_to_csr( out_edges.rows(), out_edges.nnz, indptr2.data(), params.n_row + 1, stream); auto output_mst = raft::mst::mst<value_idx, value_idx, value_t>(handle, indptr2.data(), out_edges.cols(), out_edges.vals(), params.n_row, out_edges.nnz, colors.data(), stream, false, false); resource::sync_stream(handle, stream); // The sum of edges for both MST runs should be n_rows - 1 final_edges = output_mst.n_edges + mst_coo.n_edges; } void SetUp() override { basicTest(); } void TearDown() override {} protected: ConnectComponentsInputs<value_t, value_idx> params; value_idx final_edges; }; const std::vector<ConnectComponentsInputs<float, int>> fix_conn_inputsf2 = { // Test n_clusters == n_points {10, 5, {0.21390334, 0.50261639, 0.91036676, 0.59166485, 0.71162682, 0.10248392, 0.77782677, 0.43772379, 0.4035871, 0.3282796, 0.47544681, 0.59862974, 0.12319357, 0.06239463, 0.28200272, 0.1345717, 0.50498218, 0.5113505, 0.16233086, 0.62165332, 0.42281548, 0.933117, 0.41386077, 0.23264562, 0.73325968, 0.37537541, 0.70719873, 0.14522645, 0.73279625, 0.9126674, 0.84854131, 0.28890216, 0.85267903, 0.74703138, 0.83842071, 0.34942792, 0.27864171, 0.70911132, 0.21338564, 0.32035554, 0.73788331, 0.46926692, 0.57570162, 0.42559178, 0.87120209, 0.22734951, 0.01847905, 0.75549396, 0.76166195, 0.66613745}, -1}, // Test n_points == 100 {100, 10, {6.26168372e-01, 9.30437651e-01, 6.02450208e-01, 2.73025296e-01, 9.53050619e-01, 3.32164396e-01, 6.88942598e-01, 5.79163537e-01, 6.70341547e-01, 2.70140602e-02, 9.30429671e-01, 7.17721157e-01, 9.89948537e-01, 7.75253347e-01, 1.34491522e-02, 2.48522428e-02, 3.51413378e-01, 7.64405834e-01, 7.86373507e-01, 7.18748577e-01, 8.66998621e-01, 6.80316582e-01, 2.51288712e-01, 4.91078420e-01, 3.76246281e-01, 4.86828710e-01, 5.67464772e-01, 5.30734742e-01, 8.99478296e-01, 7.66699088e-01, 9.49339111e-01, 3.55248484e-01, 9.06046929e-01, 4.48407772e-01, 6.96395305e-01, 2.44277335e-01, 7.74840000e-01, 5.21046603e-01, 4.66423971e-02, 5.12019638e-02, 8.95019614e-01, 5.28956953e-01, 4.31536306e-01, 5.83857744e-01, 4.41787364e-01, 4.68656523e-01, 5.73971433e-01, 6.79989654e-01, 3.19650588e-01, 6.12579596e-01, 6.49126442e-02, 8.39131142e-01, 2.85252117e-01, 5.84848929e-01, 9.46507115e-01, 8.58440748e-01, 3.61528940e-01, 2.44215959e-01, 3.80101125e-01, 4.57128957e-02, 8.82216988e-01, 8.31498633e-01, 7.23474381e-01, 7.75788607e-01, 1.40864146e-01, 6.62092382e-01, 5.13985168e-01, 3.00686418e-01, 8.70109949e-01, 2.43187753e-01, 2.89391938e-01, 2.84214238e-01, 8.70985521e-01, 8.77491176e-01, 6.72537226e-01, 3.30929686e-01, 1.85934324e-01, 9.16222614e-01, 6.18239142e-01, 2.64768597e-01, 5.76145451e-01, 8.62961369e-01, 6.84757925e-01, 7.60549082e-01, 1.27645356e-01, 4.51004673e-01, 3.92292980e-01, 4.63170803e-01, 4.35449330e-02, 2.17583404e-01, 5.71832605e-02, 2.06763039e-01, 3.70116249e-01, 2.09750028e-01, 6.17283019e-01, 8.62549231e-01, 9.84156240e-02, 2.66249156e-01, 3.87635103e-01, 2.85591012e-02, 4.24826068e-01, 4.45795088e-01, 6.86227676e-01, 1.08848960e-01, 5.96731841e-02, 3.71770228e-01, 1.91548833e-01, 6.95136078e-01, 9.00700636e-01, 8.76363105e-01, 2.67334632e-01, 1.80619709e-01, 7.94060419e-01, 1.42854171e-02, 1.09372387e-01, 8.74028108e-01, 6.46403232e-01, 4.86588834e-01, 5.93446175e-02, 6.11886291e-01, 8.83865057e-01, 3.15879821e-01, 2.27043992e-01, 9.76764951e-01, 6.15620336e-01, 9.76199360e-01, 2.40548962e-01, 3.21795663e-01, 8.75087904e-02, 8.11234663e-01, 6.96070480e-01, 8.12062321e-01, 1.21958818e-01, 3.44348628e-02, 8.72630414e-01, 3.06162776e-01, 1.76043529e-02, 9.45894971e-01, 5.33896401e-01, 6.21642973e-01, 4.93062535e-01, 4.48984262e-01, 2.24560379e-01, 4.24052195e-02, 4.43447610e-01, 8.95646149e-01, 6.05220676e-01, 1.81840491e-01, 9.70831206e-01, 2.12563586e-02, 6.92582693e-01, 7.55946922e-01, 7.95086143e-01, 6.05328941e-01, 3.99350764e-01, 4.32846636e-01, 9.81114529e-01, 4.98266428e-01, 6.37127930e-03, 1.59085889e-01, 6.34682067e-05, 5.59429440e-01, 7.38827633e-01, 8.93214770e-01, 2.16494306e-01, 9.35430573e-02, 4.75665868e-02, 7.80503518e-01, 7.86240041e-01, 7.06854594e-01, 2.13725879e-02, 7.68246091e-01, 4.50234808e-01, 5.21231104e-01, 5.01989826e-03, 4.22081572e-02, 1.65337732e-01, 8.54134740e-01, 4.99430262e-01, 8.94525601e-01, 1.14028379e-01, 3.69739861e-01, 1.32955599e-01, 2.65563824e-01, 2.52811151e-01, 1.44792843e-01, 6.88449594e-01, 4.44921417e-01, 8.23296587e-01, 1.93266317e-01, 1.19033309e-01, 1.36368966e-01, 3.42600285e-01, 5.64505195e-01, 5.57594559e-01, 7.44257892e-01, 8.38231569e-02, 4.11548847e-01, 3.21010077e-01, 8.55081359e-01, 4.30105779e-01, 1.16229135e-01, 9.87731964e-02, 3.14712335e-01, 4.50880592e-01, 2.72289598e-01, 6.31615256e-01, 8.97432958e-01, 4.44764250e-01, 8.03776440e-01, 2.68767748e-02, 2.43374608e-01, 4.02141103e-01, 4.98881209e-01, 5.33173003e-01, 8.82890436e-01, 7.16149148e-01, 4.19664401e-01, 2.29335357e-01, 2.88637806e-01, 3.44696803e-01, 6.78171906e-01, 5.69849716e-01, 5.86454477e-01, 3.54474989e-01, 9.03876540e-01, 6.45980000e-01, 6.34887593e-01, 7.88039746e-02, 2.04814126e-01, 7.82251754e-01, 2.43147074e-01, 7.50951808e-01, 1.72799092e-02, 2.95349590e-01, 6.57991826e-01, 8.81214312e-01, 5.73970708e-01, 2.77610881e-01, 1.82155097e-01, 7.69797417e-02, 6.44792402e-01, 9.46950998e-01, 7.73064845e-01, 6.04733624e-01, 5.80094567e-01, 1.67498426e-01, 2.66514296e-01, 6.50140368e-01, 1.91170299e-01, 2.08752199e-01, 3.01664091e-01, 9.85033484e-01, 2.92909152e-01, 8.65816607e-01, 1.85222119e-01, 2.28814559e-01, 1.34286382e-02, 2.89234322e-01, 8.18668708e-01, 4.71706924e-01, 9.23199803e-01, 2.80879188e-01, 1.47319284e-01, 4.13915748e-01, 9.31274932e-02, 6.66322195e-01, 9.66953974e-01, 3.19405786e-01, 6.69486551e-01, 5.03096313e-02, 6.95225201e-01, 5.78469859e-01, 6.29481655e-01, 1.39252534e-01, 1.22564968e-01, 6.80663678e-01, 6.34607157e-01, 6.42765834e-01, 1.57127410e-02, 2.92132086e-01, 5.24423878e-01, 4.68676824e-01, 2.86003928e-01, 7.18608322e-01, 8.95617933e-01, 5.48844309e-01, 1.74517278e-01, 5.24379196e-01, 2.13526524e-01, 5.88375435e-01, 9.88560185e-01, 4.17435771e-01, 6.14438688e-01, 9.53760881e-01, 5.27151288e-01, 7.03017278e-01, 3.44448559e-01, 4.47059676e-01, 2.83414901e-01, 1.98979011e-01, 4.24917361e-01, 5.73172761e-01, 2.32398853e-02, 1.65887230e-01, 4.05552785e-01, 9.29665524e-01, 2.26135696e-01, 9.20563384e-01, 7.65259963e-01, 4.54820075e-01, 8.97710267e-01, 3.78559302e-03, 9.15219382e-01, 3.55705698e-01, 6.94905124e-01, 8.58540202e-01, 3.89790666e-01, 2.49478206e-01, 7.93679304e-01, 4.75830027e-01, 4.40425353e-01, 3.70579459e-01, 1.40578049e-01, 1.70386675e-01, 7.04056121e-01, 4.85963102e-01, 9.68450060e-01, 6.77178001e-01, 2.65934654e-01, 2.58915007e-01, 6.70052890e-01, 2.61945109e-01, 8.46207759e-01, 1.01928951e-01, 2.85611334e-01, 2.45776933e-01, 2.66658783e-01, 3.71724077e-01, 4.34319025e-01, 4.24407347e-01, 7.15417683e-01, 8.07997684e-01, 1.64296275e-01, 6.01638065e-01, 8.60606804e-02, 2.68719187e-01, 5.11764101e-01, 9.75844338e-01, 7.81226782e-01, 2.20925515e-01, 7.18135040e-01, 9.82395577e-01, 8.39160243e-01, 9.08058083e-01, 6.88010677e-01, 8.14271847e-01, 5.12460821e-01, 1.17311345e-01, 5.96075228e-01, 9.17455497e-01, 2.12052706e-01, 7.04074603e-01, 8.72872565e-02, 8.76047818e-01, 6.96235046e-01, 8.54801557e-01, 2.49729159e-01, 9.76594604e-01, 2.87386363e-01, 2.36461559e-02, 9.94075254e-01, 4.25193986e-01, 7.61869994e-01, 5.13334255e-01, 6.44711165e-02, 8.92156689e-01, 3.55235167e-01, 1.08154647e-01, 8.78446825e-01, 2.43833016e-01, 9.23071293e-01, 2.72724115e-01, 9.46631338e-01, 3.74510294e-01, 4.08451278e-02, 9.78392777e-01, 3.65079221e-01, 6.37199516e-01, 5.51144906e-01, 5.25978080e-01, 1.42803678e-01, 4.05451674e-01, 7.79788219e-01, 6.26009784e-01, 3.35249497e-01, 1.43159543e-02, 1.80363779e-01, 5.05096904e-01, 2.82619947e-01, 5.83561392e-01, 3.10951324e-01, 8.73223968e-01, 4.38545619e-01, 4.81348800e-01, 6.68497085e-01, 3.79345401e-01, 9.58832501e-01, 1.89869550e-01, 2.34083070e-01, 2.94066207e-01, 5.74892667e-02, 6.92106828e-02, 9.61127686e-02, 6.72650672e-02, 8.47345378e-01, 2.80916761e-01, 7.32177357e-03, 9.80785961e-01, 5.73192225e-02, 8.48781331e-01, 8.83225408e-01, 7.34398275e-01, 7.70381941e-01, 6.20778343e-01, 8.96822048e-01, 5.40732486e-01, 3.69704071e-01, 5.77305837e-01, 2.08221827e-01, 7.34275341e-01, 1.06110900e-01, 3.49496706e-01, 8.34948910e-01, 1.56403291e-02, 6.78576376e-01, 8.96141268e-01, 5.94835119e-01, 1.43943153e-01, 3.49618530e-01, 2.10440392e-01, 3.46585620e-01, 1.05153093e-01, 3.45446174e-01, 2.72177079e-01, 7.07946300e-01, 4.33717726e-02, 3.31232203e-01, 3.91874320e-01, 4.76338141e-01, 6.22777789e-01, 2.95989228e-02, 4.32855769e-01, 7.61049310e-01, 3.63279149e-01, 9.47210350e-01, 6.43721247e-01, 6.58025802e-01, 1.05247633e-02, 5.29974442e-01, 7.30675767e-01, 4.30041079e-01, 6.62634841e-01, 8.25936616e-01, 9.91253704e-01, 6.79399281e-01, 5.44177006e-01, 7.52876048e-01, 3.32139049e-01, 7.98732398e-01, 7.38865223e-01, 9.16055132e-01, 6.11736493e-01, 9.63672879e-01, 1.83778839e-01, 7.27558919e-02, 5.91602822e-01, 3.25235484e-01, 2.34741217e-01, 9.52346277e-01, 9.18556407e-01, 9.35373324e-01, 6.89209070e-01, 2.56049054e-01, 6.17975395e-01, 7.82285691e-01, 9.84983432e-01, 6.62322741e-01, 2.04144457e-01, 3.98446577e-01, 1.38918297e-01, 3.05919921e-01, 3.14043787e-01, 5.91072666e-01, 7.44703771e-01, 8.92272567e-01, 9.78017873e-01, 9.01203161e-01, 1.41526372e-01, 4.14878484e-01, 6.80683651e-01, 5.01733152e-02, 8.14635389e-01, 2.27926375e-01, 9.03269815e-01, 8.68443745e-01, 9.86939190e-01, 7.40779486e-01, 2.61005311e-01, 3.19276232e-01, 9.69509248e-01, 1.11908818e-01, 4.49198556e-01, 1.27056715e-01, 3.84064823e-01, 5.14591811e-01, 2.10747488e-01, 9.53884090e-01, 8.43167950e-01, 4.51187972e-01, 3.75331782e-01, 6.23566461e-01, 3.55290379e-01, 2.95705968e-01, 1.69622690e-01, 1.42981830e-01, 2.72180991e-01, 9.46468040e-01, 3.70932500e-01, 9.94292830e-01, 4.62587505e-01, 7.14817405e-01, 2.45370540e-02, 3.00906377e-01, 5.75768304e-01, 9.71448393e-01, 6.95574827e-02, 3.93693854e-01, 5.29306116e-01, 5.04694554e-01, 6.73797120e-02, 6.76596969e-01, 5.50948898e-01, 3.24909641e-01, 7.70337719e-01, 6.51842631e-03, 3.03264879e-01, 7.61037886e-03, 2.72289601e-01, 1.50502041e-01, 6.71103888e-02, 7.41503703e-01, 1.92088941e-01, 2.19043977e-01, 9.09320161e-01, 2.37993569e-01, 6.18107973e-02, 8.31447852e-01, 2.23355609e-01, 1.84789435e-01, 4.16104518e-01, 4.21573859e-01, 8.72446305e-02, 2.97294197e-01, 4.50328256e-01, 8.72199917e-01, 2.51279916e-01, 4.86219272e-01, 7.57071329e-01, 4.85655942e-01, 1.06187277e-01, 4.92341327e-01, 1.46017513e-01, 5.25421017e-01, 4.22637906e-01, 2.24685018e-01, 8.72648431e-01, 5.54051490e-01, 1.80745062e-01, 2.12756336e-01, 5.20883169e-01, 7.60363654e-01, 8.30254678e-01, 5.00003328e-01, 4.69017439e-01, 6.38105527e-01, 3.50638261e-02, 5.22217353e-02, 9.06516882e-02, 8.52975842e-01, 1.19985883e-01, 3.74926753e-01, 6.50302066e-01, 1.98875727e-01, 6.28362507e-02, 4.32693501e-01, 3.10500685e-01, 6.20732833e-01, 4.58503272e-01, 3.20790034e-01, 7.91284868e-01, 7.93054570e-01, 2.93406765e-01, 8.95399023e-01, 1.06441034e-01, 7.53085241e-02, 8.67523104e-01, 1.47963482e-01, 1.25584706e-01, 3.81545040e-02, 6.34338619e-01, 1.76368938e-02, 5.75553531e-02, 5.31607516e-01, 2.63869588e-01, 9.41945823e-01, 9.24028838e-02, 5.21496463e-01, 7.74866558e-01, 5.65210610e-01, 7.28015327e-02, 6.51963790e-01, 8.94727453e-01, 4.49571590e-01, 1.29932405e-01, 8.64026259e-01, 9.92599934e-01, 7.43721560e-01, 8.87300215e-01, 1.06369925e-01, 8.11335531e-01, 7.87734900e-01, 9.87344678e-01, 5.32502820e-01, 4.42612382e-01, 9.64041183e-01, 1.66085871e-01, 1.12937664e-01, 5.24423470e-01, 6.54689333e-01, 4.59119726e-01, 5.22774091e-01, 3.08722276e-02, 6.26979315e-01, 4.49754105e-01, 8.07495757e-01, 2.34199499e-01, 1.67765675e-01, 9.22168418e-01, 3.73210378e-01, 8.04432575e-01, 5.61890354e-01, 4.47025593e-01, 6.43155678e-01, 2.40407640e-01, 5.91631279e-01, 1.59369206e-01, 7.75799090e-01, 8.32067212e-01, 5.59791576e-02, 6.39105224e-01, 4.85274738e-01, 2.12630838e-01, 2.81431312e-02, 7.16205363e-01, 6.83885011e-01, 5.23869697e-01, 9.99418314e-01, 8.35331599e-01, 4.69877463e-02, 6.74712562e-01, 7.99273684e-01, 2.77001890e-02, 5.75809742e-01, 2.78513031e-01, 8.36209905e-01, 7.25472379e-01, 4.87173943e-01, 7.88311357e-01, 9.64676177e-01, 1.75752651e-01, 4.98112580e-01, 8.08850418e-02, 6.40981131e-01, 4.06647450e-01, 8.46539387e-01, 2.12620694e-01, 9.11012851e-01, 8.25041445e-01, 8.90065575e-01, 9.63626055e-01, 5.96689242e-01, 1.63372670e-01, 4.51640148e-01, 3.43026542e-01, 5.80658851e-01, 2.82327625e-01, 4.75535418e-01, 6.27760926e-01, 8.46314115e-01, 9.61961932e-01, 3.19806094e-01, 5.05508062e-01, 5.28102944e-01, 6.13045057e-01, 7.44714938e-01, 1.50586073e-01, 7.91878033e-01, 4.89839179e-01, 3.10496849e-01, 8.82309038e-01, 2.86922314e-01, 4.84687559e-01, 5.20838630e-01, 4.62955493e-01, 2.38185305e-01, 5.47259907e-02, 7.10916137e-01, 7.31887202e-01, 6.25602317e-01, 8.77741168e-01, 4.19881322e-01, 4.81222328e-01, 1.28224501e-01, 2.46034010e-01, 3.34971854e-01, 7.37216484e-01, 5.62134821e-02, 7.14089724e-01, 9.85549393e-01, 4.66295827e-01, 3.08722434e-03, 4.70237690e-01, 2.66524167e-01, 7.93875484e-01, 4.54795911e-02, 8.09702944e-01, 1.47709735e-02, 1.70082405e-01, 6.35905179e-01, 3.75379109e-01, 4.30315011e-01, 3.15788760e-01, 5.58065230e-01, 2.24643800e-01, 2.42142981e-01, 6.57283636e-01, 3.34921891e-01, 1.26588975e-01, 7.68064155e-01, 9.43856291e-01, 4.47518596e-01, 5.44453573e-01, 9.95764932e-01, 7.16444391e-01, 8.51019765e-01, 1.01179183e-01, 4.45473958e-01, 4.60327322e-01, 4.96895844e-02, 4.72907738e-01, 5.58987444e-01, 3.41027487e-01, 1.56175026e-01, 7.58283148e-01, 6.83600909e-01, 2.14623396e-01, 3.27348880e-01, 3.92517893e-01, 6.70418431e-01, 5.16440832e-01, 8.63140348e-01, 5.73277464e-01, 3.46608058e-01, 7.39396341e-01, 7.20852434e-01, 2.35653246e-02, 3.89935659e-01, 7.53783745e-01, 6.34563528e-01, 8.79339335e-01, 7.41599159e-02, 5.62433904e-01, 6.15553852e-01, 4.56956324e-01, 5.20047447e-01, 5.26845015e-02, 5.58471266e-01, 1.63632233e-01, 5.38936665e-02, 6.49593683e-01, 2.56838748e-01, 8.99035326e-01, 7.20847756e-01, 5.68954684e-01, 7.43684755e-01, 5.70924238e-01, 3.82318724e-01, 4.89328290e-01, 5.62208561e-01, 4.97540804e-02, 4.18011085e-01, 6.88041565e-01, 2.16234653e-01, 7.89548214e-01, 8.46136387e-01, 8.46816189e-01, 1.73842353e-01, 6.11627842e-02, 8.44440559e-01, 4.50646654e-01, 3.74785037e-01, 4.87196697e-01, 4.56276448e-01, 9.13284391e-01, 4.15715464e-01, 7.13597697e-01, 1.23641270e-02, 5.10031271e-01, 4.74601930e-02, 2.55731159e-01, 3.22090006e-01, 1.91165703e-01, 4.51170940e-01, 7.50843157e-01, 4.42420576e-01, 4.25380660e-01, 4.50667257e-01, 6.55689206e-01, 9.68257670e-02, 1.96528793e-01, 8.97343028e-01, 4.99940904e-01, 6.65504083e-01, 9.41828079e-01, 4.54397338e-01, 5.61893331e-01, 5.09839880e-01, 4.53117514e-01, 8.96804127e-02, 1.74888861e-01, 6.65641378e-01, 2.81668336e-01, 1.89532742e-01, 5.61668382e-01, 8.68330157e-02, 8.25092797e-01, 5.18106324e-01, 1.71904024e-01, 3.68385523e-01, 1.62005436e-01, 7.48507399e-01, 9.30274827e-01, 2.38198517e-01, 9.52222901e-01, 5.23587800e-01, 6.94384557e-01, 1.09338652e-01, 4.83356794e-01, 2.73050402e-01, 3.68027050e-01, 5.92366466e-01, 1.83192289e-01, 8.60376029e-01, 7.13926203e-01, 8.16750052e-01, 1.57890291e-01, 6.25691951e-01, 5.24831646e-01, 1.73873797e-01, 1.02429784e-01, 9.17488471e-01, 4.03584434e-01, 9.31170884e-01, 2.79386137e-01, 8.77745206e-01, 2.45200576e-01, 1.28896951e-01, 3.15713052e-01, 5.27874291e-01, 2.16444335e-01, 7.03883817e-01, 7.74738919e-02, 8.42422142e-01, 3.75598924e-01, 3.51002411e-01, 6.22752776e-01, 4.82407943e-01, 7.43107867e-01, 9.46182666e-01, 9.44344819e-01, 3.28124763e-01, 1.06147431e-01, 1.65102684e-01, 3.84060507e-01, 2.91057722e-01, 7.68173662e-02, 1.03543651e-01, 6.76698940e-01, 1.43141994e-01, 7.21342202e-01, 6.69471294e-03, 9.07298311e-01, 5.57080171e-01, 8.10954489e-01, 4.11120526e-01, 2.06407453e-01, 2.59590556e-01, 7.58512718e-01, 5.79873897e-01, 2.92875650e-01, 2.83686529e-01, 2.42829343e-01, 9.19323719e-01, 3.46832864e-01, 3.58238858e-01, 7.42827585e-01, 2.05760059e-01, 9.58438860e-01, 5.66326411e-01, 6.60292846e-01, 5.61095078e-02, 6.79465531e-01, 7.05118513e-01, 4.44713264e-01, 2.09732933e-01, 5.22732436e-01, 1.74396512e-01, 5.29356748e-01, 4.38475687e-01, 4.94036404e-01, 4.09785794e-01, 6.40025507e-01, 5.79371821e-01, 1.57726118e-01, 6.04572263e-01, 5.41072639e-01, 5.18847173e-01, 1.97093284e-01, 8.91767002e-01, 4.29050835e-01, 8.25490570e-01, 3.87699807e-01, 4.50705808e-01, 2.49371643e-01, 3.36074898e-01, 9.29925118e-01, 6.65393649e-01, 9.07275994e-01, 3.73075859e-01, 4.14044139e-03, 2.37463702e-01, 2.25893784e-01, 2.46900245e-01, 4.50350196e-01, 3.48618117e-01, 5.07193932e-01, 5.23435142e-01, 8.13611417e-01, 8.92715622e-01, 1.02623450e-01, 3.06088345e-01, 7.80461650e-01, 2.21453645e-01, 2.01419652e-01, 2.84254457e-01, 3.68286735e-01, 7.39358243e-01, 8.97879394e-01, 9.81599566e-01, 7.56526442e-01, 7.37645545e-01, 4.23976657e-02, 8.25922012e-01, 2.60956996e-01, 2.90702065e-01, 8.98388344e-01, 3.03733299e-01, 8.49071471e-01, 3.45835425e-01, 7.65458276e-01, 5.68094872e-01, 8.93770930e-01, 9.93161641e-01, 5.63368667e-02, 4.26548945e-01, 5.46745780e-01, 5.75674571e-01, 7.94599487e-01, 7.18935553e-02, 4.46492976e-01, 6.40240123e-01, 2.73246969e-01, 2.00465968e-01, 1.30718835e-01, 1.92492005e-01, 1.96617189e-01, 6.61271644e-01, 8.12687657e-01, 8.66342445e-01 }, -4}}; typedef ConnectComponentsTest<int, float> ConnectComponentsTestF_Int; TEST_P(ConnectComponentsTestF_Int, Result) { /** * Verify the src & dst vertices on each edge have different colors */ EXPECT_TRUE(final_edges == params.n_row - 1); } INSTANTIATE_TEST_CASE_P(ConnectComponentsTest, ConnectComponentsTestF_Int, ::testing::ValuesIn(fix_conn_inputsf2)); template <typename value_idx, typename value_t> struct MutualReachabilityFixConnectivitiesRedOp { value_t* core_dists; value_idx m; DI MutualReachabilityFixConnectivitiesRedOp() : m(0) {} MutualReachabilityFixConnectivitiesRedOp(value_t* core_dists_, value_idx m_) : core_dists(core_dists_), m(m_){}; typedef typename raft::KeyValuePair<value_idx, value_t> KVP; DI void operator()(value_idx rit, KVP* out, const KVP& other) const { if (rit < m && other.value < std::numeric_limits<value_t>::max()) { value_t core_dist_rit = core_dists[rit]; value_t core_dist_other = max(core_dist_rit, max(core_dists[other.key], other.value)); value_t core_dist_out; if (out->key > -1) { core_dist_out = max(core_dist_rit, max(core_dists[out->key], out->value)); } else { core_dist_out = out->value; } bool smaller = core_dist_other < core_dist_out; out->key = smaller ? other.key : out->key; out->value = smaller ? core_dist_other : core_dist_out; } } DI KVP operator()(value_idx rit, const KVP& a, const KVP& b) const { if (rit < m && a.key > -1) { value_t core_dist_rit = core_dists[rit]; value_t core_dist_a = max(core_dist_rit, max(core_dists[a.key], a.value)); value_t core_dist_b; if (b.key > -1) { core_dist_b = max(core_dist_rit, max(core_dists[b.key], b.value)); } else { core_dist_b = b.value; } return core_dist_a < core_dist_b ? KVP(a.key, core_dist_a) : KVP(b.key, core_dist_b); } return b; } DI void init(value_t* out, value_t maxVal) const { *out = maxVal; } DI void init(KVP* out, value_t maxVal) const { out->key = -1; out->value = maxVal; } DI void init_key(value_t& out, value_idx idx) const { return; } DI void init_key(KVP& out, value_idx idx) const { out.key = idx; } DI value_t get_value(KVP& out) const { return out.value; } DI value_t get_value(value_t& out) const { return out; } void gather(const raft::resources& handle, value_idx* map) { auto tmp_core_dists = raft::make_device_vector<value_t>(handle, m); thrust::gather(raft::resource::get_thrust_policy(handle), map, map + m, core_dists, tmp_core_dists.data_handle()); raft::copy_async( core_dists, tmp_core_dists.data_handle(), m, raft::resource::get_cuda_stream(handle)); } void scatter(const raft::resources& handle, value_idx* map) { auto tmp_core_dists = raft::make_device_vector<value_t>(handle, m); thrust::scatter(raft::resource::get_thrust_policy(handle), core_dists, core_dists + m, map, tmp_core_dists.data_handle()); raft::copy_async( core_dists, tmp_core_dists.data_handle(), m, raft::resource::get_cuda_stream(handle)); } }; template <typename value_t, typename value_idx> struct ConnectComponentsMutualReachabilityInputs { value_idx n_row; value_idx n_col; std::vector<value_t> data; std::vector<value_t> core_dists; std::vector<value_idx> colors; std::vector<value_idx> expected_rows; std::vector<value_idx> expected_cols; std::vector<value_t> expected_vals; }; template <typename value_idx, typename value_t> class ConnectComponentsEdgesTest : public ::testing::TestWithParam<ConnectComponentsMutualReachabilityInputs<value_t, value_idx>> { protected: void basicTest() { raft::resources handle; auto stream = resource::get_cuda_stream(handle); params = ::testing::TestWithParam< ConnectComponentsMutualReachabilityInputs<value_t, value_idx>>::GetParam(); raft::sparse::COO<value_t, value_idx> out_edges_unbatched(resource::get_cuda_stream(handle)); raft::sparse::COO<value_t, value_idx> out_edges_batched(resource::get_cuda_stream(handle)); rmm::device_uvector<value_t> data(params.n_row * params.n_col, resource::get_cuda_stream(handle)); rmm::device_uvector<value_t> core_dists(params.n_row, resource::get_cuda_stream(handle)); rmm::device_uvector<value_idx> colors(params.n_row, resource::get_cuda_stream(handle)); raft::copy(data.data(), params.data.data(), data.size(), resource::get_cuda_stream(handle)); raft::copy(core_dists.data(), params.core_dists.data(), core_dists.size(), resource::get_cuda_stream(handle)); raft::copy( colors.data(), params.colors.data(), colors.size(), resource::get_cuda_stream(handle)); /** * 3. cross_component_nn to fix connectivities */ MutualReachabilityFixConnectivitiesRedOp<value_idx, value_t> red_op(core_dists.data(), params.n_row); raft::linkage::cross_component_nn<value_idx, value_t>(handle, out_edges_unbatched, data.data(), colors.data(), params.n_row, params.n_col, red_op, params.n_row, params.n_col); raft::linkage::cross_component_nn<value_idx, value_t>(handle, out_edges_batched, data.data(), colors.data(), params.n_row, params.n_col, red_op, 11, 1); ASSERT_TRUE(out_edges_unbatched.nnz == out_edges_batched.nnz && out_edges_unbatched.nnz == params.expected_rows.size()); ASSERT_TRUE(devArrMatch(out_edges_unbatched.rows(), params.expected_rows.data(), out_edges_unbatched.nnz, Compare<int>())); ASSERT_TRUE(devArrMatch(out_edges_unbatched.cols(), params.expected_cols.data(), out_edges_unbatched.nnz, Compare<int>())); ASSERT_TRUE(devArrMatch(out_edges_unbatched.vals(), params.expected_vals.data(), out_edges_unbatched.nnz, CompareApprox<float>(1e-4))); ASSERT_TRUE(devArrMatch(out_edges_batched.rows(), params.expected_rows.data(), out_edges_batched.nnz, Compare<int>())); ASSERT_TRUE(devArrMatch(out_edges_batched.cols(), params.expected_cols.data(), out_edges_batched.nnz, Compare<int>())); ASSERT_TRUE(devArrMatch(out_edges_batched.vals(), params.expected_vals.data(), out_edges_batched.nnz, CompareApprox<float>(1e-4))); } void SetUp() override { basicTest(); } void TearDown() override {} protected: ConnectComponentsMutualReachabilityInputs<value_t, value_idx> params; }; const std::vector<ConnectComponentsMutualReachabilityInputs<float, int>> mr_fix_conn_inputsf2 = { {100, 2, {-7.72642, -8.39496, 5.4534, 0.742305, -2.97867, 9.55685, 6.04267, 0.571319, -6.52184, -6.31932, 3.64934, 1.40687, -2.17793, 9.98983, 4.42021, 2.33028, 4.73696, 2.94181, -3.66019, 9.38998, -3.05358, 9.12521, -6.65217, -5.57297, -6.35769, -6.58313, -3.61553, 7.81808, -1.77073, 9.18565, -7.95052, -6.39764, -6.60294, -6.05293, -2.58121, 10.0178, -7.76348, -6.72638, -6.40639, -6.95294, -2.97262, 8.54856, -6.95673, -6.53896, -7.32614, -6.02371, -2.1478, 10.5523, -2.54502, 10.5789, -2.96984, 10.0714, 3.22451, 1.55252, -6.25396, -7.73727, -7.85431, -6.09303, -8.11658, -8.20057, -7.55965, -6.64786, 4.936, 2.23423, 4.44752, 2.27472, -5.72103, -7.70079, -0.929985, 9.78172, -3.10984, 8.72259, -2.44167, 7.58954, -2.18511, 8.6292, 5.55528, 2.30192, 4.73164, -0.0143992, -8.2573, -7.81793, -2.98837, 8.82863, 4.60517, 0.804492, -3.83738, 9.21115, -2.62485, 8.71318, 3.57758, 2.44676, -8.48711, -6.69548, -6.70645, -6.49479, -6.86663, -5.42658, 3.83139, 1.47141, 2.02013, 2.79507, 4.64499, 1.73858, -1.69667, 10.3705, -6.61974, -6.09829, -6.05757, -4.98332, -7.10309, -6.16611, -3.52203, 9.32853, -2.26724, 7.10101, 6.11777, 1.4549, -4.23412, 8.452, -6.58655, -7.59446, 3.93783, 1.64551, -7.12502, -7.63385, 2.72111, 1.94666, -7.14428, -4.15994, -6.66553, -8.12585, 4.70011, 4.43641, -7.76914, -7.69592, 4.11012, 2.48644, 4.89743, 1.89872, 4.29716, 1.17089, -6.62913, -6.53366, -8.07093, -6.22356, -2.16558, 7.25125, 4.73953, 1.46969, -5.91625, -6.46733, 5.43091, 1.06378, -6.82142, -8.02308, 6.52606, 2.14775, 3.08922, 2.04173, -2.14756, 8.36917, 3.85663, 1.65111, -1.68665, 7.79344, -5.01385, -6.40628, -2.52269, 7.95658, -2.30033, 7.05462, -1.04355, 8.78851, 3.72045, 3.5231, -3.98772, 8.29444, 4.24777, 0.509655, 4.72693, 1.67416, 5.7827, 2.7251, -3.41722, 7.60198, 5.22674, 4.16363, -3.1109, 10.8666, -3.18612, 9.62596, -1.4782, 9.94557, 4.47859, 2.37722, -5.79658, -5.82631, -3.34842, 8.70507}, {0.978428, 1.01917, 0.608673, 1.45629, 0.310713, 0.689461, 0.701126, 0.63296, 0.774788, 0.701648, 0.513282, 0.757651, 0.45638, 0.973111, 0.901396, 0.613692, 0.482497, 0.688143, 0.72428, 0.666345, 0.58232, 0.554756, 0.710315, 0.903611, 0.694115, 0.796099, 0.639759, 0.798998, 0.639839, 1.30727, 0.663729, 0.57476, 0.571348, 1.14662, 1.26518, 0.485068, 0.78207, 0.791621, 1.01678, 1.28509, 1.14715, 0.381395, 0.850507, 0.788511, 0.588341, 0.878516, 0.928669, 0.405874, 0.776421, 0.612274, 1.84963, 0.57476, 0.95226, 0.488078, 1.24868, 0.515136, 0.589378, 0.903632, 1.01678, 1.09964, 0.666345, 0.713265, 0.877168, 1.10053, 1.96887, 1.03574, 2.03728, 0.969553, 0.774788, 0.586338, 0.65168, 0.435472, 0.664396, 0.790584, 0.678637, 0.715964, 0.865494, 0.978428, 1.59242, 0.861109, 0.833259, 0.65168, 0.903632, 1.49599, 0.76347, 0.960453, 1.1848, 1.37398, 0.928957, 1.07848, 0.661798, 1.21104, 1.04579, 1.89047, 1.24288, 0.529553, 0.903611, 0.620897, 0.882467, 0.647189}, {0, 1, 2, 1, 0, 1, 2, 1, 1, 2, 2, 0, 0, 2, 2, 0, 0, 2, 0, 0, 2, 0, 0, 2, 2, 2, 1, 0, 0, 0, 0, 1, 1, 0, 2, 2, 2, 2, 1, 1, 0, 2, 1, 2, 2, 1, 0, 0, 0, 1, 1, 1, 2, 0, 0, 0, 2, 2, 1, 2, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 2, 1, 0, 1, 0, 1, 1, 2, 1, 2, 0, 2, 2, 2, 1, 2, 1, 1, 1, 2, 1, 2, 2, 2, 1, 0, 2}, {50, 54, 57, 63, 82, 87}, {57, 63, 50, 54, 87, 82}, {6.0764, 11.1843, 6.0764, 11.1843, 6.89004, 6.89004}}, {1000, 2, {-6.59634, -7.13901, -6.13753, -6.58082, 5.19821, 2.04918, -2.96856, 8.16444, -2.76879, 7.51114, -6.82261, -6.61152, 5.02008, 2.58376, 5.55621, 2.31966, 4.86379, 3.33731, 5.84639, 1.15623, -2.17159, 8.60241, -4.97844, -6.94077, -2.31014, 8.41407, 5.5582, 0.402669, 5.25265, 0.919754, 5.85298, 2.11489, -3.29245, 8.69222, -1.9621, 8.81209, -1.53408, 8.86723, -2.18227, 8.79519, 4.60519, 2.20738, -6.4759, -6.9043, -7.18766, -6.10045, -9.00148, -7.48793, 4.01674, 1.41769, -2.45347, 10.1085, -3.20892, 9.22827, -3.18612, 9.62596, 4.81977, 3.36517, 4.90693, 2.8628, -6.44269, -5.68946, -8.30144, -5.37878, 4.61485, 2.79094, -1.98726, 9.31127, -3.66019, 9.38998, -6.58607, -8.23669, -7.46015, -6.29153, 4.08468, 3.85433, -6.36842, -5.50645, -6.83602, -5.18506, -0.627173, 10.3597, 3.98846, 1.48928, -2.9968, 8.58173, -7.2144, -7.28376, -0.660242, 10.1409, -4.23528, -8.38308, -3.15984, 8.52716, -2.40987, 9.76567, -8.7548, -6.76508, 4.56971, 0.312209, -7.5487, -5.8402, -1.6096, 9.32159, 5.04813, 0.270586, -7.6525, -6.47306, -1.79758, 7.88964, -9.0153, -3.74236, -3.5715, 9.48788, -1.65154, 8.85435, -3.47412, 9.70034, 6.31245, 2.39219, 4.03851, 2.29295, -3.17098, 9.86672, -6.90693, -7.81338, -6.22373, -6.68537, -3.22204, 9.12072, -0.365254, 9.6482, -7.76712, -7.31757, 4.15669, 3.54716, 4.1937, 0.083629, -3.03896, 9.52755, -6.29293, -7.35501, -2.95926, 9.63714, 4.02709, 1.58547, 4.56828, 1.93595, 5.6242, 1.75918, -7.36237, -7.83344, 5.32177, 3.81988, -2.43183, 8.153, -1.97939, 10.4559, -3.49492, 9.51833, 3.39602, 1.28026, -2.42215, 8.71528, -3.57682, 8.87191, -2.77385, 11.7345, 5.71351, 0.946654, -6.50253, -6.90937, 4.08239, 0.603367, -5.64134, -6.85884, -2.76177, 7.7665, -2.25165, 8.93984, -3.49071, 9.47639, -1.06792, 7.57842, 5.15754, 1.24743, 3.63574, 1.20537, -6.07969, -8.49642, 4.12227, 2.19696, -7.17144, -8.4433, -1.92234, 11.2047, 3.23237, 1.19535, 3.85389, 0.641937, 4.82665, 1.21779, -7.68923, -6.45605, -7.00816, -8.76196, -5.12894, 9.83619, -5.66247, -5.35879, 3.05598, 2.73358, 6.06038, 1.40242, -1.69568, 7.78342, 5.13391, 2.23384, -2.96984, 10.0714, -5.36618, -6.2493, 5.55896, 1.6829, 3.55882, 2.58911, 5.36155, 0.844118, -0.0634456, 9.14351, 4.88368, 1.40909, -7.04675, -6.59753, -7.78333, -6.55575, 5.39881, 2.25436, -2.85189, 8.64285, -2.22821, 8.39159, 3.88591, 1.69249, -7.55481, -7.02463, 4.60032, 2.65467, -6.90615, -7.76198, -6.76005, -7.85318, 4.15044, 3.01733, -7.18884, -7.63227, 4.68874, 2.01376, 3.51716, 2.35558, -3.81367, 9.68396, 4.42644, 3.4639, 4.81758, 0.637825, -6.20705, -4.98023, -1.68603, 9.0876, -4.99504, -5.33687, -1.77073, 9.18565, 4.86433, 3.02027, 4.20538, 1.664, 4.59042, 2.64799, -3.09856, 9.86389, -3.02306, 7.95507, -6.32402, -6.79053, -7.67205, -7.18807, -8.10918, -6.38341, -1.67979, 6.80315, 4.00249, 3.16219, -2.54391, 7.84561, -3.22764, 8.80084, -2.63712, 8.05875, -2.41744, 7.02672, -6.71117, -5.56251, 5.18348, 1.60256, -7.40824, -6.29375, -4.22233, 10.3682, 4.8509, 1.87646, -2.99456, 9.09616, 5.1332, 2.15801, -2.27358, 9.78515, -6.73874, -8.64855, 4.96124, 2.39509, -3.70949, 8.67978, -4.13674, 9.06237, 2.80367, 2.48116, -0.876786, 7.58414, -3.7005, 9.67084, 6.48652, 0.903085, 6.28189, 2.98299, -6.07922, -6.12582, -5.67921, -7.537, 4.55014, 3.41329, -1.63688, 9.19763, -4.02439, 10.3812, 5.23053, 3.08187, -2.2951, 7.76855, -6.24491, -5.77041, 6.02415, 2.53708, -6.91286, -7.08823, 4.83193, 1.66405, -7.07454, -5.74634, -2.09576, 10.8911, 3.29543, 1.05452, -3.49973, 8.44799, 5.2922, 0.396778, -2.54502, 10.5789, -6.38865, -6.14523, -1.75221, 8.09212, -9.30387, -5.99606, -2.98113, 10.1032, -6.2017, -7.36802, 4.63628, 0.814805, -1.81905, 8.61307, 4.88926, 3.55062, 3.08325, 2.57918, -2.51717, 10.4942, -5.75358, -6.9315, 6.36742, 2.40949, 5.74806, 0.933264, 4.74408, 1.91058, -7.41496, -6.97064, -2.98414, 8.36096, 6.72825, 1.83358, -2.95349, 9.39159, -3.35599, 7.49944, 6.18738, 3.76905, -3.17182, 9.58488, 5.17863, 1.0525, -3.0397, 8.43847, -2.23874, 8.96405, 3.04689, 2.41364, 6.14064, 2.82339, -6.33334, -6.87369, -7.92444, -8.84647, 3.65129, 0.86958, 5.29842, 3.98337, -2.06538, 9.78892, -6.89494, -6.30082, -2.52144, 8.11703, -8.11398, -7.47257, 5.3381, 2.36666, -6.93452, -6.59456, -7.50634, -6.01772, 6.23438, 1.12621, -2.15218, 8.32138, -7.04777, -7.3522, -2.52771, 8.72563, -2.77907, 8.03552, 4.29123, 1.62391, -8.07551, -6.43551, -3.28202, 8.77747, -2.21308, 9.27534, -8.25153, -8.49367, -3.54644, 8.82395, -8.05867, -5.69243, 4.46681, 1.98875, 3.8362, 3.61229, -6.96231, -7.00186, 5.18993, 1.00483, -5.35116, -6.37227, 5.23298, 1.66362, -5.68306, -7.03864, -9.03144, -7.59926, -6.10127, -7.4313, 4.83572, 0.994797, -7.32695, -5.59909, 0.569683, 10.1339, 3.35957, 2.84563, -2.4122, 9.60944, 5.00855, 1.57983, -2.57528, 7.80327, 3.96349, 3.77411, 4.59429, 2.21651, -6.54765, -6.68961, 4.76798, 1.29212, -1.67351, 7.88458, 5.63615, 1.47941, -2.5301, 9.13161, 4.26075, 1.76959, 4.67788, 2.0932, 4.39955, 1.59835, 3.91274, 1.72565, -4.1786, 9.55765, -7.34566, -8.47481, 4.8364, 2.68217, -7.36848, -7.99973, -5.84708, -5.7534, 5.37252, 1.89245, -2.1707, 8.599, -1.3299, 9.0818, -6.79122, -5.40258, 5.56391, 1.78827, -0.194539, 7.14702, 4.60489, 3.74397, 5.50995, 2.46885, -3.98772, 8.29444, -5.21837, -7.33721, -1.63959, 10.3699, -5.92932, -5.1695, -5.88358, -7.6369, 4.11716, 3.02218, -6.54114, -7.17551, 3.97179, 2.96521, -6.75325, -4.94118, 5.26169, 0.402945, 3.25031, 0.327771, -0.44845, 10.7696, -2.15141, 9.57507, 7.04329, 1.91555, -3.74615, 7.69383, -7.52318, -5.85015, -6.80419, -8.48208, -4.57664, 8.92517, 4.57574, 2.30193, 4.84098, 3.02382, -9.43355, -5.94579, -3.52203, 9.32853, 3.43018, 2.5731, -6.15725, -7.25294, -6.69861, -8.17694, -2.40955, 8.51081, -4.82342, -7.98332, -7.10611, -6.51274, 5.86755, 0.763529, -6.56045, -5.53966, -3.61553, 7.81808, 4.3825, 0.304586, -6.52818, -5.80996, 4.59972, 0.542395, -6.90603, -6.59995, -6.3585, -6.23489, -6.01915, -7.46319, -5.38694, -7.15123, -7.83475, -6.45651, 5.89564, 1.07856, -5.15266, -7.27975, -6.97978, -7.08378, 5.83493, 0.449983, -2.62374, 10.2521, -7.34494, -6.98606, -6.79719, -8.33766, 3.54757, 1.65676, -8.40528, -5.61753, -5.85556, -6.28758, 4.66862, 3.25162, -6.26047, -4.82261, 4.61552, 4.11544, -1.36637, 9.76622, 4.2517, 2.14359, -2.45099, 7.87132, -0.376164, 7.0622, 4.34493, 3.22091, 6.95921, 2.36649, -6.70319, -7.24714, -5.56932, -5.48443, -7.43149, -4.32191, -3.23956, 9.23074, -5.77255, -7.00049, 4.96601, 0.722056, -7.88617, -5.74023, 4.18757, -0.45071, -7.12569, -7.72336, 5.27366, 2.38697, 3.93487, 1.9174, 3.19186, -0.225636, -3.41722, 7.60198, -3.08286, 8.46743, -5.87905, -7.55073, -5.26425, -7.20243, -2.97867, 9.55685, -1.23153, 8.42272, -2.33602, 9.3996, -3.33819, 8.45411, -3.58009, 9.49676, 3.78152, 2.67348, -1.54582, 9.42707, -4.04331, 10.292, 3.3452, 3.134, -2.75494, 8.74156, -3.26555, 7.59203, -7.27139, -7.80252, 3.5293, 3.72544, 6.11642, 3.35326, 4.01611, 3.8872, 4.89591, 2.95586, -7.06677, -5.89438, 4.19438, 3.42655, -6.11355, -5.65318, -7.59645, -8.74665, -5.80362, -6.8588, 3.80453, 4.11832, 5.70655, 3.14247, -4.98084, 8.21739, -1.87642, 11.285, 4.39864, 2.32523, -3.48388, 9.80137, 4.02836, 0.566509, -2.41212, 9.98293, -5.40846, -7.08943, 4.01506, 1.99926, -3.43613, 8.95476, -7.24458, -7.71932, 6.02204, 2.62188, -6.29999, -6.55431, 6.19038, 0.974816, 3.55882, 3.02632, -7.06011, -3.687, -1.55877, 8.43738, -5.14711, -4.64881, 4.7167, 0.690177, -7.90381, -5.02602, 4.17218, 2.31967, -0.643423, 9.48812, -7.95237, -6.64086, -4.05986, 9.08285, -6.24158, -6.37927, -6.6105, -7.2233, -6.21675, -5.70664, -3.29967, 9.48575, 3.41775, 2.68617, -2.24948, 8.10997, -2.24931, 9.79611, -9.0523, -6.03269, -2.2587, 9.36073, 5.20965, 2.42088, -3.10159, 8.1503, -6.67906, -5.73147, 4.0687, 2.54575, -1.24229, 8.30662, -2.09627, 8.45056, -7.87801, -6.57832, 4.72216, 3.03865, -0.929985, 9.78172, -8.56307, -7.68598, -7.05257, -5.1684, -7.09076, -7.86729, 4.61432, 3.1459, -6.34133, -5.8076, -3.82943, 10.8457, -8.46082, -5.98507, 5.34763, 1.4107, -1.68714, 10.9111, -1.67886, 8.1582, -0.623012, 9.18886, -4.21258, 8.95874, -2.16744, 10.8905, -6.57158, -7.27176, 2.14047, 4.26411, -8.44217, -7.40916, 5.29008, 1.87399, 4.31824, 4.04992, -3.77008, 9.93215, -2.72688, 10.1131, -6.14278, -7.16144, -3.92457, 8.59364, -5.92649, -6.59299, 4.68369, 1.82617, -6.89905, -7.18329, 3.95173, 4.22561, -7.66453, -6.23183, -2.44167, 7.58954, -6.36603, -7.41281, -6.45081, -6.187, -6.6125, -6.37138, 5.46036, 2.48044, -2.14756, 8.36917, -2.3889, 9.52872, 3.80752, 2.44459, -3.98778, 10.158, -6.63887, -4.27843, -8.65266, -5.61819, -7.97003, -5.46918, -5.9604, -7.54825, -0.916011, 8.50307, -3.69246, 6.97505, -7.98533, -7.09503, -2.30033, 7.05462, 4.76218, 2.51647, -7.04981, -7.33334, 3.66401, 3.02681, -2.50408, 8.7797, 7.19996, 1.87711, 4.01291, 3.78562, -0.356015, 8.24694, -0.958046, 9.12996, 4.60675, 3.76773, 6.21945, 1.45031, 4.27744, 0.8535, -4.72232, -7.48582, 6.03923, 2.8978, -3.26833, 9.16468, -7.97059, -7.29092, -2.3998, 9.74005, -2.66721, 8.58741, -7.36269, -6.73332, -7.87893, -7.38488, 4.65023, 0.661333, -4.8171, -7.94764, -4.11564, 9.21775, 4.80633, 2.46562, -2.72887, 9.3714, -5.26735, -5.5652, 4.9826, 2.42992, -6.17018, -7.3156, 4.38084, 1.77682, 5.35084, 2.41743, -2.61796, 9.416, 5.27229, 2.94572, -7.52315, -5.95227, -1.45077, 7.25555, -3.79916, 7.71921, -2.23251, 9.84147, 3.70054, 1.82908, -1.93831, 10.1499, -6.18324, -5.9248, -3.33142, 9.25797, -6.08536, -8.1344, 5.95727, 2.17077, 4.87366, 0.417274, -6.529, -6.39092, -9.24256, -7.88984, -6.36652, -7.13966, -3.90777, 9.57726, -7.06252, -5.50523, -2.26423, 8.50734, -2.84498, 10.6833, 5.0391, 2.62037, -2.74815, 8.10672, 3.35945, 3.72796, -4.11668, 9.19892, 5.66903, 2.44577, -1.63807, 8.68826, -7.42587, -6.48831, 6.17063, 3.19193, -2.28511, 9.02688, -7.10088, -7.15692, 4.46293, 1.17487, -5.91017, -6.45292, -2.26724, 7.10101, -2.43339, 8.33712, -4.63309, 8.48853, -3.31769, 8.51253, -2.49078, 10.6907, -1.30798, 8.60621, 6.30535, 2.98754, -5.79384, -6.78213, -1.93213, 8.81124, 4.55773, 3.09047, 6.37584, 2.17108, 4.3927, 1.29119, -3.2245, 9.69388, -1.69634, 9.64392, 2.799, 0.693593, -2.1426, 8.07441, -8.4505, -8.00688, 4.736, 1.51089, -2.5863, 9.35544, -2.94924, 9.14503, 6.2054, 1.90742, 5.67172, 0.487609, -5.69071, -6.17181, -8.24651, -7.10488, -7.34424, -6.67895, -6.71977, -7.90778, -1.82294, 7.40157, -9.40991, -7.16611, -4.37999, 8.66277, -1.42615, 10.0681, -2.00828, 8.03673, -7.50228, -6.6855, -5.65859, -6.29801, -8.02335, -6.77155, -3.40761, 9.50621, -2.82447, 9.77326, -1.5938, 9.34304, -3.5213, 7.35943, -3.36961, 8.62973, -7.01708, -5.92724, 5.20886, 3.60157, -1.71817, 8.1049, -2.46363, 8.36269, -2.77809, 7.90776, -2.75459, 8.26055, -2.03596, 8.94146, -4.53434, 9.20074, -7.44387, -6.69556, -6.90099, -7.62732, 3.29169, 2.71643, 6.08686, 2.16972, -2.31111, 8.86993, -5.75046, 7.9899, 4.69951, 1.32623, 4.71851, -0.025031, -6.42374, -4.71511, -8.04974, -8.68209, -3.16103, 9.06168, -6.18267, -7.21393, -7.94202, -6.4518, -7.07697, -7.03138, 3.93554, 0.564708, -1.20372, 9.03529, -7.10611, -7.83955, -7.47529, -5.50567, -6.15453, -6.36393, -2.98024, 9.24634, -7.75761, -7.70699, -3.08597, 9.76968, -8.04954, -9.75237, 5.2534, 0.950377, 5.63789, -0.923086, -5.7065, -6.51047, -8.02132, -7.07377, -8.28594, -6.96322, -7.70722, -6.79397, -2.4962, 10.4678, 5.02846, 4.46617, 4.02648, 1.6707, -0.319395, 8.20599, 4.74525, 0.639144, -1.0313, 8.49602, 4.08766, 2.6061, 3.63826, 1.69207, 2.55795, 3.66963, 5.2826, 3.30232, -1.04355, 8.78851, -6.84762, -7.63353, -4.70868, -7.056, 3.53651, -0.179721, -3.38482, 7.63149, -5.9265, -6.36702, -0.986074, 9.5532, -2.42261, 8.85861, -7.42835, -6.78726, -4.02857, 8.53005, -8.22675, -7.85172, -5.57529, -8.5426, 6.03009, 2.53098, -7.10448, -7.53011, -3.4988, 8.8885, -2.62485, 8.71318, -6.39489, -7.72647, 3.93789, 1.31027, 4.27627, 1.91622, -0.923181, 7.77647, -5.16017, 10.1058, -6.44307, -5.97617, -7.24495, -6.69543, 6.27331, 0.826824, -6.55655, -7.13246, 5.66245, 4.41292, -2.13805, 8.4103, 5.23463, 2.82659, -4.86624, -6.74357, -6.14082, -6.26474, -2.67048, 9.41834, -1.26311, 6.9409, -7.20231, -7.13094, -1.35109, 9.80595, 3.9906, 0.749229, -6.75696, -5.25543, 4.84826, -0.0685652, -7.4914, -6.91715, 4.46725, 2.85683, -2.95571, 9.87068, 6.32381, 1.51429, -6.81177, -6.02734, -2.57188, 9.96943, -4.28792, 10.5103, 3.65025, 2.91394, -7.11856, -7.24693, -6.98693, -6.43239, 4.7651, 1.54376, 4.00092, 0.65008, -7.14816, -7.7713, -7.58803, -8.39382, 4.3321, 2.19232, -7.89545, -6.81843, -2.11475, 8.5933, -0.743743, 9.41927, 3.64849, -0.18022, -1.68665, 7.79344, 4.00214, 1.44217, -6.96799, -7.25012, -1.58302, 10.9237, -6.68524, -7.23328, 4.65831, 2.32075, 4.62024, 2.52566, -4.23412, 8.452, -0.822056, 9.89593, -7.19868, -7.67614, -3.32742, 11.1067, 5.27861, 0.830165, 4.48982, 2.09875, -6.58087, -7.6319, -0.880582, 7.63418, -7.01088, -6.80326, -7.31601, -6.98972, -6.85883, -7.60811, 6.14328, 2.85053, -7.49206, -6.51861, -2.28174, 10.3214, 4.81074, 1.78919, -5.58987, -6.20693, 4.08096, 2.35038, -1.5029, 8.43739, 4.11536, 2.46254, -3.28299, 7.76963, 4.31953, 2.39734, 4.91146, 0.696421, -1.4782, 9.94557, -3.34842, 8.70507, -6.97822, -6.86126, 4.10012, 1.19486, -2.50395, 9.06127, 4.41891, 2.00006, -2.73266, 9.72829, 3.5436, 0.533119, 5.78864, 0.233456, -6.62589, -6.41242, -2.21942, 11.0897, -6.76636, -8.31839, -2.71732, 8.52129, -5.20972, -6.48544, 3.26056, 1.24224, 3.45228, 2.28299, 4.72171, 1.87428, -7.52585, -5.1048, 5.0695, 2.18086, -6.55646, -7.02771, 3.23727, 3.72275, 3.41411, 0.508795, -7.80698, -6.64174, -5.90443, -6.37902, -0.387041, 10.0468, -1.3506, 8.1936, -6.08614, -8.62864, -5.91478, -5.26453, -2.61623, 7.97904, 4.45459, 1.84335, -6.66643, -7.63208, 3.6729, 1.92546, -1.32976, 8.54511, 6.31758, 1.41958, 4.63381, 2.81166, -7.01394, -6.0693, -2.7786, 9.73183, -2.90131, 7.55077, -7.13842, -5.28146, 6.71514, 1.28398, -6.98408, -7.04893, -3.03946, 8.22141, -2.76417, 10.5183, -7.35347, -6.89456, 4.19345, 2.16726, -2.02819, 9.23817, 4.97076, 2.8067, -0.544473, 9.04955, 4.90727, 2.29487, -6.31871, -7.17559, 3.71665, 0.621485, 4.7903, 2.33813, -6.47994, -7.53147, -6.80958, -5.71823, -8.07326, -5.96096, 4.77342, 1.8207, 5.71856, 1.93466, -2.70156, 9.31583, -2.1478, 10.5523, 4.78855, 1.63608, 5.53507, 2.60834, -7.00058, -6.46058, 5.4738, 2.43235, -1.34603, 9.02452, -7.5337, -8.71074, -7.30893, -7.57253, -5.33752, -4.87402, -7.01364, -6.86542, -7.93331, -7.94791, -5.69392, -6.16116, -7.32291, -7.76491, -6.41965, -7.55783, -7.87996, -7.55785, -6.69005, -5.87906, 3.92147, 2.86809, -1.5552, 9.66568, 5.07989, 1.47112, -7.48524, -5.0541, -1.82724, 8.70402, -2.00421, 9.88004, -2.62153, 8.79332, -7.52111, -6.44819, 4.06424, 2.09518, -6.65494, -5.94752, 6.93878, 1.61033, -3.95728, 7.60682, 5.67016, 2.21196, -7.81507, -5.79413, -2.41152, 8.24128, -3.83738, 9.21115, 4.5516, 4.55288, -5.75551, -5.93258, 4.56545, 2.59384, -7.45614, -9.47115, -2.39568, 9.67642, 5.57816, 1.45712, -7.48184, -6.41134, -1.99415, 12.867, -8.35854, -6.69675, -7.52559, -7.6793, 5.7454, 3.1602, 2.94692, 1.87483, -8.77324, -6.66682, -3.21125, 8.68662, -6.25806, -7.24972, 5.17639, 1.0747, -2.44897, 11.4775, -3.30172, 8.89955, -2.85191, 8.21201, -8.85893, -6.1322, 4.08957, 1.30155, -5.88132, -7.31173, -7.10309, -7.22943, -2.46068, 8.18334, -7.01226, -7.85464, 4.75411, 2.12347, -3.42862, 10.5642, 7.16681, 1.4423, 5.42568, 2.39863, -6.00833, -8.22609, -1.7619, 9.62466, -2.49527, 8.99016, -2.98837, 8.82863, -2.97262, 8.54856, -1.34142, 9.26871, -5.99652, -6.95795, -1.87061, 7.35277, -8.68277, -8.46425, -7.01808, -8.10441, -7.04269, -7.62501, -7.69783, -6.88348, -2.19829, 10.4896, 4.67396, 1.2032, -5.58263, -6.90298, -5.69224, -4.29055, 4.77285, 1.27305, -3.33469, 8.6929, -2.54195, 8.47086, 4.46492, 1.21742, 5.41158, -0.875373, -8.68069, -7.42278, -3.88687, 8.07646, 4.6682, 2.00293, -8.29799, -8.64092, -1.86382, 10.3829, -6.51234, -5.04193, 4.54458, 2.25219, -1.93264, 9.32554, -3.06285, 7.81641, -6.90714, -5.10786, 4.69653, 2.50286, 6.43757, 2.61401, -1.85483, 8.9587, 4.60224, 3.07647, 4.4492, 2.1906, 5.02181, 2.40321, -2.22923, 7.8888, 5.68943, 1.43793, -6.71097, -6.43817, -5.00633, -5.80006, -2.43763, 8.53663, 5.72577, 2.44787, -6.57079, -5.17789, -5.77867, -4.92176, -6.57222, -6.06437, 3.96639, 2.25216, -7.95177, -9.80146, 4.92574, 2.30763, -7.6221, -8.20013, -6.4132, -6.91575, 4.01432, 2.36897, 3.0833, 1.54505, -1.99416, 9.52807, -7.85128, -8.25973, -0.86423, 8.76525, -6.31412, -8.64087, -8.07355, -6.73717, -2.52821, 8.01176, -5.82357, -6.65687, -7.08865, -7.73063, -5.56251, -6.99818, -2.12513, 8.98159, -6.89834, -7.26863, -7.92654, -6.34346, 4.86201, 1.49442, 4.92905, 4.42847, -5.57789, -5.3186, 4.34232, 3.34888, 2.64614, 2.34723, -4.10363, 8.41491, -2.18648, 8.18706, -3.39871, 8.19848, -2.66098, 9.6026, -6.95927, -6.42774, -5.61392, -7.74628, 5.60376, 4.18369, 5.28536, 4.13642, 4.8428, 0.457426, -6.33816, -6.12095, -2.4394, 8.62897, 4.56938, 2.45967, 4.0582, 0.958413, 5.62164, 1.64834, 5.73119, 2.58231, 4.66806, 1.96405, -6.71905, -6.87706, -2.18503, 8.88414, -6.03901, -6.33338, -8.38435, -6.12005, 0.0641622, 9.0735, 5.19967, 3.05395, -5.48716, -7.13016, -6.85541, -5.46789, -1.88353, 8.15713, 4.27891, 3.1325, -2.75816, 9.98586, -2.03022, 9.34795, -7.66741, -7.50096, -3.39305, 9.16801, -8.49476, -5.71537, -1.68378, 9.8278, -7.41559, -6.07205, -3.15577, 7.93274, 5.22381, 1.61388, 3.65739, 1.74854, 4.94251, 1.21889, -7.12832, -5.27276, -9.58286, -6.20223, -2.21613, 8.29993, 5.34799, 2.92987, 4.09496, 2.37231, -7.25183, -5.79136, -6.46981, -7.12137, -6.28607, -9.8205, 4.52865, 1.06926, -3.10984, 8.72259, 3.61865, 2.68153, -5.96604, -7.68329, 3.11435, 1.28126, -1.1064, 7.61243, -2.17688, 8.2658, -3.27246, 7.2094, -5.55143, -6.32388, -1.69667, 10.3705, -2.16558, 7.25125, -6.36572, -6.70053, 4.12259, 3.38252, -4.80554, -7.79949, -5.23966, -6.13798, 4.21969, 1.69139, -1.98985, 10.547, -2.52269, 7.95658, -6.75642, -6.32862, -3.51521, 7.8001, 4.70435, -0.00229688, 6.25359, 2.4267, 5.82935, 0.745562, 5.24778, 2.15978, 5.48052, 1.32055, -3.05358, 9.12521, -3.18922, 9.24654, 4.47276, 2.11988, 5.36751, 2.02512, -2.18511, 8.6292, -2.48469, 9.51228, 5.57556, 3.24472, -2.58121, 10.0178, -6.12629, -6.49895, -4.54732, 8.0062, -4.20166, 10.5438, -7.61422, -7.69036, -4.42797, 8.98777, 4.45301, 1.53344, 4.59296, 2.45021, -6.81264, -6.36417, 4.62346, 3.16156, -5.93007, -8.36501, -2.78425, 6.71237, -6.17141, -6.64689, -5.20608, 8.95999, -7.30598, -5.73166, 4.39572, 2.93726, -1.89503, 9.77179, -5.683, -7.48989, 4.80924, 0.559455, -2.17793, 9.98983, 5.23728, 2.67434, -7.03976, -6.20877, 3.90435, 3.20926, -7.78536, -7.53388, -1.00684, 9.08838, -5.26741, -5.98327, 3.28002, 2.71942, -1.47166, 8.50427, -2.32733, 9.26251, 5.16271, 1.39947, -6.59093, -6.61979, -2.44492, 7.93654, -1.05805, 9.97356, -3.1109, 10.8666, 3.38834, 3.41693, 4.83098, 2.01961, -2.74013, 9.71049, -3.34892, 8.41489, 4.94768, 0.263001, 3.57477, 1.66795, 5.78915, 1.26999, -4.81812, -5.67174, -1.88508, 9.64263, 3.69048, 4.60555, 4.03037, 1.7862, -7.4418, -7.08933}, {0.127717, 0.211407, 0.195547, 0.21633, 0.39671, 0.229008, 0.20839, 0.169236, 0.314314, 0.322473, 0.169506, 0.45499, 0.147819, 0.296502, 0.15198, 0.356444, 0.0992833, 0.220833, 0.296206, 0.178067, 0.135359, 0.189725, 0.243099, 0.519986, 0.168105, 0.273465, 0.126033, 0.18045, 0.282832, 0.193901, 0.213704, 0.425046, 0.203191, 0.228674, 0.209267, 0.355039, 0.212918, 0.315495, 0.294112, 0.257576, 0.5786, 0.186019, 0.171919, 0.171919, 0.449151, 1.34947, 0.171919, 0.16341, 0.641387, 0.342115, 0.267343, 0.246125, 0.277612, 0.181462, 0.22944, 1.95598, 0.164897, 0.235803, 0.228273, 0.314629, 0.127403, 0.241241, 0.189362, 0.151691, 0.130085, 0.526707, 0.217069, 0.282306, 0.531523, 0.177035, 0.169776, 0.20395, 0.177165, 0.146628, 0.280013, 0.223033, 0.50947, 0.184133, 0.295329, 0.183219, 0.28166, 0.179348, 0.276462, 1.00283, 0.248147, 0.214453, 0.231732, 0.170672, 0.256893, 0.133271, 0.151137, 0.500823, 0.23678, 0.376983, 0.362061, 0.140013, 0.388863, 0.398552, 0.38015, 0.190081, 0.167115, 0.206884, 0.473849, 1.05117, 0.435665, 0.323618, 0.326201, 0.32226, 0.201787, 0.246496, 0.28325, 0.226596, 0.238153, 0.277268, 0.674629, 0.179433, 0.175651, 0.154778, 0.178195, 0.192796, 0.103571, 0.227621, 0.201124, 0.160525, 0.160964, 0.240099, 0.258027, 0.134127, 0.127717, 0.341378, 0.311595, 0.282306, 0.168988, 0.40775, 0.246125, 0.583131, 0.236804, 0.238633, 0.194824, 0.169315, 0.244227, 0.249511, 0.189725, 0.305662, 0.301415, 0.658641, 0.250944, 0.151792, 0.141383, 0.143843, 0.563347, 0.184216, 0.204155, 0.221764, 0.314908, 0.144518, 0.228808, 0.255785, 0.163457, 0.424705, 0.170202, 0.312598, 0.300629, 0.532614, 0.661392, 0.228273, 0.543432, 0.257175, 0.258994, 0.281413, 0.273897, 0.246837, 0.293489, 0.25533, 0.260492, 0.213704, 0.3091, 0.17103, 0.172285, 0.241399, 0.35999, 0.372243, 0.269191, 0.390239, 0.31761, 0.200593, 0.22197, 0.752914, 0.266571, 0.13102, 0.268659, 0.293723, 0.356294, 0.296258, 0.264531, 0.15468, 0.358535, 0.243711, 0.112147, 0.121659, 0.197101, 0.515292, 0.245628, 0.279863, 0.789807, 0.195156, 0.196073, 0.149564, 0.118675, 0.389373, 0.233821, 0.176128, 0.481088, 0.360027, 0.553152, 0.208207, 0.171608, 0.160489, 0.334298, 0.139426, 0.168603, 0.266199, 0.326458, 0.103571, 0.171208, 0.130961, 0.190887, 0.177229, 0.241651, 0.115152, 0.196753, 0.481088, 0.230965, 0.354631, 0.14591, 0.328543, 0.141544, 0.195888, 0.290379, 0.245954, 0.184547, 0.575214, 0.186929, 0.28527, 0.292213, 1.20033, 0.281528, 0.15625, 0.211524, 0.186398, 0.298061, 0.147393, 0.245349, 0.164527, 0.224771, 0.222382, 0.251643, 0.148835, 0.135359, 0.204967, 0.193024, 0.486309, 0.389686, 0.211921, 0.307405, 0.38666, 0.26802, 0.16605, 0.323134, 0.268397, 0.217894, 0.974118, 0.371618, 0.156201, 0.305787, 0.339305, 0.371032, 0.381765, 0.22747, 0.24906, 0.100884, 0.253192, 0.314253, 0.388289, 0.580947, 1.00267, 0.241998, 0.489101, 0.341501, 0.247423, 0.328311, 0.440281, 0.14927, 0.244469, 0.846828, 0.191725, 0.217429, 0.123403, 0.322875, 0.145373, 0.757259, 0.190086, 0.316286, 0.268397, 0.296721, 0.440472, 0.186848, 0.232134, 0.180239, 0.219724, 0.205886, 0.250975, 0.145636, 0.312476, 0.366418, 0.128135, 0.315235, 0.264531, 0.161815, 0.31631, 0.296489, 0.37171, 0.197217, 0.195625, 0.479579, 0.443037, 0.323347, 0.193616, 0.160251, 0.8952, 0.256291, 0.593345, 0.177165, 0.409514, 0.847863, 0.111448, 0.210031, 0.251347, 0.351953, 0.705204, 0.117901, 0.182343, 0.230179, 0.83632, 0.22104, 0.145163, 0.200326, 0.23431, 0.21868, 0.253575, 0.186562, 0.192757, 0.172716, 0.27396, 0.258581, 0.327892, 0.376138, 0.223477, 0.302375, 0.145845, 0.436902, 0.421794, 0.328543, 0.19246, 0.238889, 0.254866, 0.284674, 0.457849, 0.202937, 0.392568, 0.453083, 0.782713, 0.465401, 0.178623, 0.304863, 0.190081, 0.228641, 0.255135, 0.245037, 0.217526, 0.109584, 0.276462, 0.182301, 0.38582, 0.349942, 1.3889, 0.30235, 0.796353, 0.160168, 0.643204, 0.153752, 0.410268, 0.186439, 0.256834, 0.185783, 0.0957629, 0.226596, 0.197951, 0.17123, 0.192836, 0.18405, 0.575784, 0.228874, 0.201787, 0.241209, 0.217386, 0.195751, 0.291585, 0.144531, 0.14176, 0.157635, 0.410268, 0.476338, 0.308148, 0.148077, 0.152093, 0.196791, 0.568087, 0.414026, 0.250587, 0.473463, 0.293645, 0.396768, 0.2766, 0.38664, 0.135034, 1.50827, 0.472527, 0.268418, 0.40383, 0.375914, 0.246496, 0.176474, 0.340405, 0.220833, 0.138782, 0.159009, 0.444219, 0.259582, 0.33638, 0.195586, 0.210974, 0.200288, 0.148129, 0.0974216, 0.211588, 0.280081, 0.44113, 0.773921, 0.553848, 0.448079, 0.183136, 0.380854, 0.685021, 0.308767, 0.553276, 0.181578, 0.164759, 0.313889, 0.137886, 0.545387, 0.278449, 0.736895, 0.360054, 0.358929, 0.457315, 0.343278, 0.507662, 0.280829, 0.113886, 0.23146, 0.160584, 0.192796, 0.147561, 0.241272, 0.168988, 0.730511, 0.27836, 0.179847, 0.22555, 0.418069, 0.158348, 0.128965, 0.179454, 0.126366, 0.164434, 0.273633, 0.309556, 0.500823, 0.367852, 0.192875, 0.230262, 0.32724, 0.249969, 0.142618, 0.494229, 0.36108, 0.227931, 0.23113, 0.742825, 0.190126, 0.33741, 0.280598, 0.145268, 0.378423, 0.211921, 0.183594, 0.59201, 0.279563, 0.195683, 0.248101, 0.199754, 0.342494, 0.174343, 0.14149, 0.28085, 0.175781, 0.518738, 0.17223, 0.489904, 0.181167, 0.354286, 0.297824, 0.280829, 0.219412, 0.22814, 0.195625, 0.313949, 0.294708, 0.211551, 0.236255, 0.666933, 0.204808, 0.52591, 0.180725, 0.186889, 0.246589, 0.410575, 0.338348, 0.206219, 0.361766, 0.158143, 0.280816, 0.4149, 0.773082, 0.340046, 0.369672, 0.256923, 0.167195, 0.197217, 0.252339, 0.172716, 0.191526, 0.263085, 0.345698, 0.168286, 0.243099, 0.434631, 0.22944, 0.161862, 0.206589, 0.23457, 0.181924, 0.419063, 0.183427, 0.186152, 0.236352, 0.306336, 0.149002, 1.50086, 0.188231, 0.442757, 0.485602, 0.466662, 0.17329, 0.141329, 0.180619, 0.160061, 0.192569, 0.270999, 0.117901, 0.362693, 0.217561, 0.208975, 0.233658, 0.175173, 1.10307, 0.14625, 1.31124, 0.237608, 0.286784, 0.325112, 0.2485, 0.259641, 0.553152, 0.179039, 0.780781, 0.174758, 0.297824, 0.2558, 0.235949, 0.952186, 0.356744, 0.312646, 0.189362, 0.574524, 0.705204, 0.213168, 0.225956, 0.424165, 0.169506, 0.137109, 0.352451, 0.454554, 0.653302, 0.31261, 0.194412, 0.23719, 0.137886, 0.31498, 0.199085, 0.203875, 0.597248, 1.10036, 0.196869, 0.22104, 0.451345, 0.105613, 0.683928, 0.135204, 0.25533, 0.607871, 0.219724, 0.184464, 0.725001, 0.160061, 0.333407, 0.192569, 0.234147, 0.47178, 0.161815, 0.242455, 0.215305, 0.410575, 0.242376, 0.211335, 0.462804, 0.275065, 0.126878, 0.170404, 0.179433, 0.147244, 0.109584, 0.352905, 0.158215, 0.197604, 0.172407, 0.407506, 0.645446, 0.313061, 0.165602, 0.136663, 0.55444, 0.15527, 0.133128, 0.125912, 0.340405, 0.44521, 0.122783, 0.814526, 0.243773, 0.15743, 0.266743, 0.684458, 0.22221, 0.181294, 0.193901, 0.258802, 0.167195, 0.292056, 0.132309, 0.227671, 0.117334, 0.271758, 0.146185, 0.225042, 0.225964, 0.194863, 0.290274, 0.138438, 0.196714, 0.266012, 0.267771, 0.162544, 0.244258, 0.358038, 0.522617, 0.192875, 0.45066, 0.330396, 0.223477, 0.42967, 0.350884, 0.404655, 0.123155, 0.431583, 0.191675, 0.147354, 0.609034, 0.459487, 0.187337, 0.215128, 0.604169, 0.330165, 0.494229, 0.40775, 0.167377, 0.192648, 0.234635, 0.275578, 0.253094, 0.420063, 0.228299, 0.206478, 0.20395, 0.377656, 0.317393, 0.478623, 0.159009, 0.217034, 0.300933, 0.139754, 0.153901, 0.261077, 0.22834, 0.449609, 0.157672, 0.176474, 0.285704, 0.180186, 0.212738, 0.266428, 0.388313, 0.0954637, 0.298093, 0.251643, 0.330696, 0.159572, 0.210666, 0.149411, 0.139618, 0.338472, 0.450304, 0.208793, 0.583609, 0.185865, 0.400576, 0.21626, 0.174867, 0.239144, 0.249113, 0.200402, 0.275065, 0.238793, 0.205784, 0.4475, 0.231262, 0.259082, 0.20934, 0.16806, 0.193616, 0.213811, 0.395632, 0.482465, 0.274649, 0.307405, 0.165866, 0.334275, 0.683337, 0.368825, 0.14625, 0.780742, 0.163457, 0.226596, 0.138713, 1.79155, 0.400443, 0.233658, 0.426399, 0.623024, 0.670955, 0.123588, 0.110899, 0.173751, 0.651068, 0.199983, 0.190887, 0.541435, 0.21324, 0.266571, 0.134638, 0.179348, 0.145636, 0.170929, 0.623252, 0.587738, 0.109688, 0.515314, 0.217666, 0.213311, 0.249144, 0.187947, 0.270999, 0.268311, 0.469782, 0.763609, 0.32124, 0.146315, 0.265223, 0.298694, 0.197623, 0.21349, 0.845778, 0.175466, 0.123588, 0.17223, 0.258603, 1.17119, 0.538142, 0.407675, 0.120288, 0.587238, 0.244664, 0.333956, 0.132812, 0.21399, 0.302375, 0.275882, 0.134284, 0.377555, 0.228541, 0.187307, 0.143804, 0.180545, 0.222451, 0.239638, 0.188028, 0.46334, 0.175868, 0.242392, 0.314762, 0.44473, 0.21962, 0.175966, 1.12364, 0.138837, 0.400576, 0.18184, 0.137706, 0.409763, 0.216894, 0.466662, 0.376604, 0.487155, 0.283143, 0.118547, 0.221591, 0.122783, 0.179007, 0.16628, 0.180999, 0.239845, 0.169607, 0.578402, 0.396537, 0.222288, 0.563237, 0.371238, 0.138658, 0.324336, 0.191526, 0.168603, 0.357715, 0.640905, 0.460706, 0.220902, 0.240797, 0.164062, 0.157853, 0.34457, 0.196092, 0.289353, 0.104597, 0.259641, 0.126878, 0.175781, 0.441458, 0.820108, 0.261864, 0.23431, 0.254506, 0.271955, 0.227529, 0.22834, 0.196753, 0.224906, 0.193783, 0.419481, 0.236933, 0.229706, 0.29785, 0.222947, 0.177606, 0.216911, 0.305188, 0.933438, 0.116666, 0.278483, 0.0973824, 0.271224, 0.127717, 1.28139, 0.276283, 0.180704, 0.234554, 0.285984, 0.290172, 0.49594, 0.135879, 0.436784, 0.206219, 0.342215, 0.374165, 0.182217, 0.274864, 0.625, 0.356925, 0.194324, 0.342215, 0.113012, 0.155123, 0.254207, 0.438919, 0.262548, 0.302299, 0.179528, 0.312744, 0.168513, 0.142618, 0.150543, 0.231361, 0.166004, 0.186725, 0.38848, 0.179857, 0.182301, 0.629476, 0.44113, 0.289669, 0.328543, 0.279938, 0.14625, 0.187174, 0.157635, 0.396749, 0.798931, 0.201541, 0.778619, 0.265883, 0.258027, 0.218576, 0.266571, 0.160168, 0.230303, 0.273633, 0.233298, 0.30175, 0.217069, 0.345145, 0.397901, 0.224499, 0.248101, 0.241335, 0.222947, 0.237094, 0.176518, 0.380032, 0.634775, 0.426193, 0.16362, 0.231097, 0.219898, 0.343789, 0.275578, 0.282022, 0.628542, 0.232184, 0.848367, 0.200754, 0.179177}, {0, 0, 2, 3, 3, 0, 2, 2, 2, 2, 3, 0, 3, 2, 2, 2, 3, 3, 3, 3, 2, 0, 0, 0, 2, 3, 3, 3, 2, 2, 0, 0, 2, 3, 3, 0, 0, 2, 0, 0, 3, 2, 3, 0, 3, 0, 3, 3, 0, 2, 0, 3, 2, 0, 3, 0, 3, 3, 3, 2, 2, 3, 0, 0, 3, 3, 0, 2, 2, 3, 0, 3, 2, 2, 2, 0, 2, 3, 3, 3, 2, 3, 3, 3, 2, 0, 2, 0, 3, 3, 3, 3, 2, 2, 0, 2, 0, 3, 2, 2, 2, 0, 0, 3, 0, 2, 2, 3, 2, 3, 0, 2, 2, 2, 3, 2, 0, 0, 2, 3, 3, 2, 0, 2, 0, 0, 2, 0, 2, 2, 3, 2, 2, 0, 3, 0, 3, 2, 2, 2, 3, 3, 0, 0, 0, 3, 2, 3, 3, 3, 3, 0, 2, 0, 3, 2, 3, 2, 3, 0, 2, 3, 3, 2, 3, 3, 2, 2, 0, 0, 2, 3, 3, 2, 3, 0, 2, 0, 2, 0, 3, 2, 3, 2, 3, 0, 3, 0, 3, 0, 2, 3, 2, 2, 3, 0, 2, 2, 2, 0, 3, 2, 3, 3, 2, 3, 2, 3, 3, 2, 2, 0, 0, 2, 2, 3, 0, 3, 0, 2, 0, 0, 2, 3, 0, 3, 3, 2, 0, 3, 3, 0, 3, 0, 2, 2, 0, 2, 0, 2, 0, 0, 0, 2, 0, 3, 2, 3, 2, 3, 2, 2, 0, 2, 3, 2, 3, 2, 2, 2, 2, 3, 0, 2, 0, 0, 2, 3, 3, 0, 2, 3, 2, 2, 3, 0, 3, 0, 0, 2, 0, 2, 0, 2, 2, 3, 3, 2, 3, 0, 0, 3, 2, 2, 0, 3, 2, 0, 0, 3, 0, 0, 2, 0, 3, 2, 0, 2, 0, 0, 0, 0, 0, 2, 0, 0, 2, 3, 0, 0, 2, 0, 0, 2, 0, 2, 3, 2, 3, 3, 2, 2, 0, 0, 0, 3, 0, 2, 0, 2, 0, 2, 2, 2, 3, 3, 0, 0, 3, 3, 3, 3, 3, 2, 3, 3, 2, 3, 3, 0, 2, 2, 2, 2, 0, 2, 0, 0, 0, 2, 2, 3, 3, 2, 3, 2, 3, 0, 2, 3, 0, 2, 0, 2, 2, 0, 3, 0, 2, 0, 2, 3, 0, 3, 0, 0, 0, 3, 2, 3, 3, 0, 3, 2, 3, 0, 2, 3, 3, 0, 2, 3, 0, 0, 0, 2, 0, 3, 0, 2, 3, 3, 3, 3, 3, 0, 2, 0, 2, 2, 3, 3, 0, 3, 0, 2, 0, 2, 0, 3, 0, 0, 0, 2, 3, 3, 2, 3, 0, 0, 0, 0, 3, 3, 0, 3, 2, 0, 2, 3, 2, 2, 3, 3, 2, 2, 2, 0, 2, 3, 0, 3, 3, 0, 0, 2, 0, 3, 2, 3, 0, 2, 0, 2, 2, 3, 2, 0, 3, 3, 3, 2, 3, 0, 3, 0, 2, 2, 0, 0, 0, 3, 0, 3, 3, 2, 3, 2, 3, 2, 3, 0, 2, 3, 0, 2, 0, 3, 3, 3, 3, 3, 3, 2, 0, 3, 2, 2, 2, 3, 3, 2, 3, 0, 2, 3, 3, 2, 2, 0, 0, 0, 0, 3, 0, 3, 3, 3, 0, 0, 0, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 3, 3, 0, 0, 2, 2, 3, 3, 2, 2, 0, 0, 3, 0, 0, 0, 2, 3, 0, 0, 0, 3, 0, 3, 0, 2, 2, 0, 0, 0, 0, 3, 2, 2, 3, 2, 3, 2, 2, 2, 2, 3, 0, 0, 2, 3, 0, 3, 3, 0, 3, 0, 0, 2, 0, 3, 3, 0, 2, 2, 3, 3, 0, 0, 2, 0, 2, 3, 2, 0, 0, 3, 3, 0, 3, 2, 0, 2, 0, 2, 3, 2, 0, 3, 3, 2, 0, 0, 2, 2, 0, 0, 2, 0, 3, 3, 2, 3, 2, 0, 3, 0, 2, 2, 3, 3, 0, 3, 2, 2, 0, 3, 0, 0, 0, 2, 0, 3, 2, 0, 2, 3, 2, 3, 2, 2, 3, 3, 0, 2, 3, 2, 3, 2, 2, 0, 3, 0, 3, 0, 2, 2, 2, 0, 2, 0, 2, 2, 0, 0, 3, 3, 0, 0, 3, 2, 0, 2, 3, 2, 2, 0, 3, 3, 0, 2, 0, 3, 3, 0, 2, 3, 2, 3, 2, 0, 2, 2, 0, 0, 0, 2, 2, 3, 3, 2, 2, 0, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 2, 0, 3, 3, 3, 0, 2, 0, 2, 3, 2, 0, 3, 3, 2, 0, 2, 0, 3, 2, 0, 3, 0, 0, 2, 2, 0, 3, 0, 2, 3, 3, 3, 0, 2, 0, 0, 3, 0, 2, 3, 2, 2, 0, 3, 3, 3, 3, 3, 0, 3, 0, 0, 0, 0, 3, 2, 0, 0, 2, 3, 3, 2, 2, 0, 3, 2, 0, 3, 0, 2, 3, 3, 0, 2, 2, 3, 2, 2, 2, 3, 2, 0, 0, 3, 2, 0, 0, 0, 2, 0, 2, 0, 0, 2, 2, 3, 0, 3, 0, 0, 3, 0, 0, 0, 3, 0, 0, 2, 2, 0, 2, 2, 3, 3, 3, 3, 0, 0, 2, 2, 2, 0, 3, 2, 2, 2, 2, 2, 0, 3, 0, 0, 3, 2, 0, 0, 3, 2, 3, 3, 0, 3, 0, 3, 0, 3, 2, 2, 2, 0, 0, 3, 2, 2, 0, 0, 0, 2, 3, 2, 0, 2, 3, 3, 3, 0, 3, 3, 0, 2, 0, 0, 2, 3, 3, 0, 3, 2, 2, 2, 2, 2, 3, 3, 2, 2, 3, 3, 2, 3, 0, 3, 3, 0, 3, 2, 2, 0, 2, 0, 3, 0, 3, 0, 2, 3, 0, 2, 3, 2, 0, 2, 0, 3, 0, 2, 3, 3, 2, 0, 3, 3, 3, 2, 2, 3, 3, 2, 2, 2, 0, 3, 2, 2, 0}, {271, 271, 329, 343, 387, 426, 426, 601}, {426, 601, 426, 387, 343, 271, 329, 271}, {3.70991, 4.43491, 3.76334, 9.43944, 9.43944, 3.70991, 3.76334, 4.43491}}}; typedef ConnectComponentsEdgesTest<int, float> ConnectComponentsEdgesTestF_Int; TEST_P(ConnectComponentsEdgesTestF_Int, Result) { EXPECT_TRUE(true); } INSTANTIATE_TEST_CASE_P(ConnectComponentsEdgesTest, ConnectComponentsEdgesTestF_Int, ::testing::ValuesIn(mr_fix_conn_inputsf2)); }; // namespace sparse }; // end namespace raft
597f07cdfab775a09b6f12fe5c71e680e2f10215.cu
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // XXX: We allow the instantiation of masked_l2_nn here: // raft::linkage::FixConnectivitiesRedOp<value_idx, value_t> red_op(params.n_row); // raft::linkage::cross_component_nn<value_idx, value_t>( // handle, out_edges, data.data(), colors.data(), params.n_row, params.n_col, red_op); // // TODO: consider adding this to libraft.so or creating an instance in a // separate translation unit for this test. // // TODO: edge case testing. Reference: https://github.com/rapidsai/raft/issues/1669 #include <gtest/gtest.h> #include <raft/core/resource/cuda_stream.hpp> #include <cub/cub.cuh> #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> #include <vector> #include <raft/sparse/linalg/symmetrize.cuh> #include <raft/sparse/mst/mst.cuh> #include <raft/sparse/neighbors/knn_graph.cuh> #include <raft/sparse/selection/cross_component_nn.cuh> #include <raft/distance/distance_types.hpp> #include <raft/linalg/transpose.cuh> #include <raft/sparse/convert/csr.cuh> #include <raft/sparse/coo.hpp> #include <raft/sparse/hierarchy/single_linkage.cuh> #include <rmm/device_uvector.hpp> #include "../../test_utils.cuh" namespace raft { namespace sparse { using namespace std; template <typename value_t, typename value_idx> struct ConnectComponentsInputs { value_idx n_row; value_idx n_col; std::vector<value_t> data; int c; }; template <typename value_idx, typename value_t> class ConnectComponentsTest : public ::testing::TestWithParam<ConnectComponentsInputs<value_t, value_idx>> { protected: void basicTest() { raft::resources handle; auto stream = resource::get_cuda_stream(handle); params = ::testing::TestWithParam<ConnectComponentsInputs<value_t, value_idx>>::GetParam(); raft::sparse::COO<value_t, value_idx> out_edges(resource::get_cuda_stream(handle)); raft::sparse::COO<value_t, value_idx> out_edges_batched(resource::get_cuda_stream(handle)); rmm::device_uvector<value_t> data(params.n_row * params.n_col, resource::get_cuda_stream(handle)); raft::copy(data.data(), params.data.data(), data.size(), resource::get_cuda_stream(handle)); rmm::device_uvector<value_idx> indptr(params.n_row + 1, stream); /** * 1. Construct knn graph */ raft::sparse::COO<value_t, value_idx> knn_graph_coo(stream); raft::sparse::neighbors::knn_graph(handle, data.data(), params.n_row, params.n_col, raft::distance::DistanceType::L2SqrtExpanded, knn_graph_coo, params.c); raft::sparse::convert::sorted_coo_to_csr( knn_graph_coo.rows(), knn_graph_coo.nnz, indptr.data(), params.n_row + 1, stream); /** * 2. Construct MST, sorted by weights */ rmm::device_uvector<value_idx> colors(params.n_row, stream); auto mst_coo = raft::mst::mst<value_idx, value_idx, value_t, double>(handle, indptr.data(), knn_graph_coo.cols(), knn_graph_coo.vals(), params.n_row, knn_graph_coo.nnz, colors.data(), stream, false, true); /** * 3. cross_component_nn to fix connectivities */ raft::linkage::FixConnectivitiesRedOp<value_idx, value_t> red_op(params.n_row); raft::linkage::cross_component_nn<value_idx, value_t>(handle, out_edges, data.data(), colors.data(), params.n_row, params.n_col, red_op, params.n_row, params.n_col); raft::linkage::cross_component_nn<value_idx, value_t>(handle, out_edges_batched, data.data(), colors.data(), params.n_row, params.n_col, red_op, params.n_row / 2, params.n_col / 2); ASSERT_TRUE(out_edges.nnz == out_edges_batched.nnz); ASSERT_TRUE( devArrMatch(out_edges.rows(), out_edges_batched.rows(), out_edges.nnz, Compare<int>())); ASSERT_TRUE( devArrMatch(out_edges.cols(), out_edges_batched.cols(), out_edges.nnz, Compare<int>())); ASSERT_TRUE(devArrMatch( out_edges.vals(), out_edges_batched.vals(), out_edges.nnz, CompareApprox<float>(1e-4))); /** * Construct final edge list */ rmm::device_uvector<value_idx> indptr2(params.n_row + 1, stream); raft::sparse::convert::sorted_coo_to_csr( out_edges.rows(), out_edges.nnz, indptr2.data(), params.n_row + 1, stream); auto output_mst = raft::mst::mst<value_idx, value_idx, value_t>(handle, indptr2.data(), out_edges.cols(), out_edges.vals(), params.n_row, out_edges.nnz, colors.data(), stream, false, false); resource::sync_stream(handle, stream); // The sum of edges for both MST runs should be n_rows - 1 final_edges = output_mst.n_edges + mst_coo.n_edges; } void SetUp() override { basicTest(); } void TearDown() override {} protected: ConnectComponentsInputs<value_t, value_idx> params; value_idx final_edges; }; const std::vector<ConnectComponentsInputs<float, int>> fix_conn_inputsf2 = { // Test n_clusters == n_points {10, 5, {0.21390334, 0.50261639, 0.91036676, 0.59166485, 0.71162682, 0.10248392, 0.77782677, 0.43772379, 0.4035871, 0.3282796, 0.47544681, 0.59862974, 0.12319357, 0.06239463, 0.28200272, 0.1345717, 0.50498218, 0.5113505, 0.16233086, 0.62165332, 0.42281548, 0.933117, 0.41386077, 0.23264562, 0.73325968, 0.37537541, 0.70719873, 0.14522645, 0.73279625, 0.9126674, 0.84854131, 0.28890216, 0.85267903, 0.74703138, 0.83842071, 0.34942792, 0.27864171, 0.70911132, 0.21338564, 0.32035554, 0.73788331, 0.46926692, 0.57570162, 0.42559178, 0.87120209, 0.22734951, 0.01847905, 0.75549396, 0.76166195, 0.66613745}, -1}, // Test n_points == 100 {100, 10, {6.26168372e-01, 9.30437651e-01, 6.02450208e-01, 2.73025296e-01, 9.53050619e-01, 3.32164396e-01, 6.88942598e-01, 5.79163537e-01, 6.70341547e-01, 2.70140602e-02, 9.30429671e-01, 7.17721157e-01, 9.89948537e-01, 7.75253347e-01, 1.34491522e-02, 2.48522428e-02, 3.51413378e-01, 7.64405834e-01, 7.86373507e-01, 7.18748577e-01, 8.66998621e-01, 6.80316582e-01, 2.51288712e-01, 4.91078420e-01, 3.76246281e-01, 4.86828710e-01, 5.67464772e-01, 5.30734742e-01, 8.99478296e-01, 7.66699088e-01, 9.49339111e-01, 3.55248484e-01, 9.06046929e-01, 4.48407772e-01, 6.96395305e-01, 2.44277335e-01, 7.74840000e-01, 5.21046603e-01, 4.66423971e-02, 5.12019638e-02, 8.95019614e-01, 5.28956953e-01, 4.31536306e-01, 5.83857744e-01, 4.41787364e-01, 4.68656523e-01, 5.73971433e-01, 6.79989654e-01, 3.19650588e-01, 6.12579596e-01, 6.49126442e-02, 8.39131142e-01, 2.85252117e-01, 5.84848929e-01, 9.46507115e-01, 8.58440748e-01, 3.61528940e-01, 2.44215959e-01, 3.80101125e-01, 4.57128957e-02, 8.82216988e-01, 8.31498633e-01, 7.23474381e-01, 7.75788607e-01, 1.40864146e-01, 6.62092382e-01, 5.13985168e-01, 3.00686418e-01, 8.70109949e-01, 2.43187753e-01, 2.89391938e-01, 2.84214238e-01, 8.70985521e-01, 8.77491176e-01, 6.72537226e-01, 3.30929686e-01, 1.85934324e-01, 9.16222614e-01, 6.18239142e-01, 2.64768597e-01, 5.76145451e-01, 8.62961369e-01, 6.84757925e-01, 7.60549082e-01, 1.27645356e-01, 4.51004673e-01, 3.92292980e-01, 4.63170803e-01, 4.35449330e-02, 2.17583404e-01, 5.71832605e-02, 2.06763039e-01, 3.70116249e-01, 2.09750028e-01, 6.17283019e-01, 8.62549231e-01, 9.84156240e-02, 2.66249156e-01, 3.87635103e-01, 2.85591012e-02, 4.24826068e-01, 4.45795088e-01, 6.86227676e-01, 1.08848960e-01, 5.96731841e-02, 3.71770228e-01, 1.91548833e-01, 6.95136078e-01, 9.00700636e-01, 8.76363105e-01, 2.67334632e-01, 1.80619709e-01, 7.94060419e-01, 1.42854171e-02, 1.09372387e-01, 8.74028108e-01, 6.46403232e-01, 4.86588834e-01, 5.93446175e-02, 6.11886291e-01, 8.83865057e-01, 3.15879821e-01, 2.27043992e-01, 9.76764951e-01, 6.15620336e-01, 9.76199360e-01, 2.40548962e-01, 3.21795663e-01, 8.75087904e-02, 8.11234663e-01, 6.96070480e-01, 8.12062321e-01, 1.21958818e-01, 3.44348628e-02, 8.72630414e-01, 3.06162776e-01, 1.76043529e-02, 9.45894971e-01, 5.33896401e-01, 6.21642973e-01, 4.93062535e-01, 4.48984262e-01, 2.24560379e-01, 4.24052195e-02, 4.43447610e-01, 8.95646149e-01, 6.05220676e-01, 1.81840491e-01, 9.70831206e-01, 2.12563586e-02, 6.92582693e-01, 7.55946922e-01, 7.95086143e-01, 6.05328941e-01, 3.99350764e-01, 4.32846636e-01, 9.81114529e-01, 4.98266428e-01, 6.37127930e-03, 1.59085889e-01, 6.34682067e-05, 5.59429440e-01, 7.38827633e-01, 8.93214770e-01, 2.16494306e-01, 9.35430573e-02, 4.75665868e-02, 7.80503518e-01, 7.86240041e-01, 7.06854594e-01, 2.13725879e-02, 7.68246091e-01, 4.50234808e-01, 5.21231104e-01, 5.01989826e-03, 4.22081572e-02, 1.65337732e-01, 8.54134740e-01, 4.99430262e-01, 8.94525601e-01, 1.14028379e-01, 3.69739861e-01, 1.32955599e-01, 2.65563824e-01, 2.52811151e-01, 1.44792843e-01, 6.88449594e-01, 4.44921417e-01, 8.23296587e-01, 1.93266317e-01, 1.19033309e-01, 1.36368966e-01, 3.42600285e-01, 5.64505195e-01, 5.57594559e-01, 7.44257892e-01, 8.38231569e-02, 4.11548847e-01, 3.21010077e-01, 8.55081359e-01, 4.30105779e-01, 1.16229135e-01, 9.87731964e-02, 3.14712335e-01, 4.50880592e-01, 2.72289598e-01, 6.31615256e-01, 8.97432958e-01, 4.44764250e-01, 8.03776440e-01, 2.68767748e-02, 2.43374608e-01, 4.02141103e-01, 4.98881209e-01, 5.33173003e-01, 8.82890436e-01, 7.16149148e-01, 4.19664401e-01, 2.29335357e-01, 2.88637806e-01, 3.44696803e-01, 6.78171906e-01, 5.69849716e-01, 5.86454477e-01, 3.54474989e-01, 9.03876540e-01, 6.45980000e-01, 6.34887593e-01, 7.88039746e-02, 2.04814126e-01, 7.82251754e-01, 2.43147074e-01, 7.50951808e-01, 1.72799092e-02, 2.95349590e-01, 6.57991826e-01, 8.81214312e-01, 5.73970708e-01, 2.77610881e-01, 1.82155097e-01, 7.69797417e-02, 6.44792402e-01, 9.46950998e-01, 7.73064845e-01, 6.04733624e-01, 5.80094567e-01, 1.67498426e-01, 2.66514296e-01, 6.50140368e-01, 1.91170299e-01, 2.08752199e-01, 3.01664091e-01, 9.85033484e-01, 2.92909152e-01, 8.65816607e-01, 1.85222119e-01, 2.28814559e-01, 1.34286382e-02, 2.89234322e-01, 8.18668708e-01, 4.71706924e-01, 9.23199803e-01, 2.80879188e-01, 1.47319284e-01, 4.13915748e-01, 9.31274932e-02, 6.66322195e-01, 9.66953974e-01, 3.19405786e-01, 6.69486551e-01, 5.03096313e-02, 6.95225201e-01, 5.78469859e-01, 6.29481655e-01, 1.39252534e-01, 1.22564968e-01, 6.80663678e-01, 6.34607157e-01, 6.42765834e-01, 1.57127410e-02, 2.92132086e-01, 5.24423878e-01, 4.68676824e-01, 2.86003928e-01, 7.18608322e-01, 8.95617933e-01, 5.48844309e-01, 1.74517278e-01, 5.24379196e-01, 2.13526524e-01, 5.88375435e-01, 9.88560185e-01, 4.17435771e-01, 6.14438688e-01, 9.53760881e-01, 5.27151288e-01, 7.03017278e-01, 3.44448559e-01, 4.47059676e-01, 2.83414901e-01, 1.98979011e-01, 4.24917361e-01, 5.73172761e-01, 2.32398853e-02, 1.65887230e-01, 4.05552785e-01, 9.29665524e-01, 2.26135696e-01, 9.20563384e-01, 7.65259963e-01, 4.54820075e-01, 8.97710267e-01, 3.78559302e-03, 9.15219382e-01, 3.55705698e-01, 6.94905124e-01, 8.58540202e-01, 3.89790666e-01, 2.49478206e-01, 7.93679304e-01, 4.75830027e-01, 4.40425353e-01, 3.70579459e-01, 1.40578049e-01, 1.70386675e-01, 7.04056121e-01, 4.85963102e-01, 9.68450060e-01, 6.77178001e-01, 2.65934654e-01, 2.58915007e-01, 6.70052890e-01, 2.61945109e-01, 8.46207759e-01, 1.01928951e-01, 2.85611334e-01, 2.45776933e-01, 2.66658783e-01, 3.71724077e-01, 4.34319025e-01, 4.24407347e-01, 7.15417683e-01, 8.07997684e-01, 1.64296275e-01, 6.01638065e-01, 8.60606804e-02, 2.68719187e-01, 5.11764101e-01, 9.75844338e-01, 7.81226782e-01, 2.20925515e-01, 7.18135040e-01, 9.82395577e-01, 8.39160243e-01, 9.08058083e-01, 6.88010677e-01, 8.14271847e-01, 5.12460821e-01, 1.17311345e-01, 5.96075228e-01, 9.17455497e-01, 2.12052706e-01, 7.04074603e-01, 8.72872565e-02, 8.76047818e-01, 6.96235046e-01, 8.54801557e-01, 2.49729159e-01, 9.76594604e-01, 2.87386363e-01, 2.36461559e-02, 9.94075254e-01, 4.25193986e-01, 7.61869994e-01, 5.13334255e-01, 6.44711165e-02, 8.92156689e-01, 3.55235167e-01, 1.08154647e-01, 8.78446825e-01, 2.43833016e-01, 9.23071293e-01, 2.72724115e-01, 9.46631338e-01, 3.74510294e-01, 4.08451278e-02, 9.78392777e-01, 3.65079221e-01, 6.37199516e-01, 5.51144906e-01, 5.25978080e-01, 1.42803678e-01, 4.05451674e-01, 7.79788219e-01, 6.26009784e-01, 3.35249497e-01, 1.43159543e-02, 1.80363779e-01, 5.05096904e-01, 2.82619947e-01, 5.83561392e-01, 3.10951324e-01, 8.73223968e-01, 4.38545619e-01, 4.81348800e-01, 6.68497085e-01, 3.79345401e-01, 9.58832501e-01, 1.89869550e-01, 2.34083070e-01, 2.94066207e-01, 5.74892667e-02, 6.92106828e-02, 9.61127686e-02, 6.72650672e-02, 8.47345378e-01, 2.80916761e-01, 7.32177357e-03, 9.80785961e-01, 5.73192225e-02, 8.48781331e-01, 8.83225408e-01, 7.34398275e-01, 7.70381941e-01, 6.20778343e-01, 8.96822048e-01, 5.40732486e-01, 3.69704071e-01, 5.77305837e-01, 2.08221827e-01, 7.34275341e-01, 1.06110900e-01, 3.49496706e-01, 8.34948910e-01, 1.56403291e-02, 6.78576376e-01, 8.96141268e-01, 5.94835119e-01, 1.43943153e-01, 3.49618530e-01, 2.10440392e-01, 3.46585620e-01, 1.05153093e-01, 3.45446174e-01, 2.72177079e-01, 7.07946300e-01, 4.33717726e-02, 3.31232203e-01, 3.91874320e-01, 4.76338141e-01, 6.22777789e-01, 2.95989228e-02, 4.32855769e-01, 7.61049310e-01, 3.63279149e-01, 9.47210350e-01, 6.43721247e-01, 6.58025802e-01, 1.05247633e-02, 5.29974442e-01, 7.30675767e-01, 4.30041079e-01, 6.62634841e-01, 8.25936616e-01, 9.91253704e-01, 6.79399281e-01, 5.44177006e-01, 7.52876048e-01, 3.32139049e-01, 7.98732398e-01, 7.38865223e-01, 9.16055132e-01, 6.11736493e-01, 9.63672879e-01, 1.83778839e-01, 7.27558919e-02, 5.91602822e-01, 3.25235484e-01, 2.34741217e-01, 9.52346277e-01, 9.18556407e-01, 9.35373324e-01, 6.89209070e-01, 2.56049054e-01, 6.17975395e-01, 7.82285691e-01, 9.84983432e-01, 6.62322741e-01, 2.04144457e-01, 3.98446577e-01, 1.38918297e-01, 3.05919921e-01, 3.14043787e-01, 5.91072666e-01, 7.44703771e-01, 8.92272567e-01, 9.78017873e-01, 9.01203161e-01, 1.41526372e-01, 4.14878484e-01, 6.80683651e-01, 5.01733152e-02, 8.14635389e-01, 2.27926375e-01, 9.03269815e-01, 8.68443745e-01, 9.86939190e-01, 7.40779486e-01, 2.61005311e-01, 3.19276232e-01, 9.69509248e-01, 1.11908818e-01, 4.49198556e-01, 1.27056715e-01, 3.84064823e-01, 5.14591811e-01, 2.10747488e-01, 9.53884090e-01, 8.43167950e-01, 4.51187972e-01, 3.75331782e-01, 6.23566461e-01, 3.55290379e-01, 2.95705968e-01, 1.69622690e-01, 1.42981830e-01, 2.72180991e-01, 9.46468040e-01, 3.70932500e-01, 9.94292830e-01, 4.62587505e-01, 7.14817405e-01, 2.45370540e-02, 3.00906377e-01, 5.75768304e-01, 9.71448393e-01, 6.95574827e-02, 3.93693854e-01, 5.29306116e-01, 5.04694554e-01, 6.73797120e-02, 6.76596969e-01, 5.50948898e-01, 3.24909641e-01, 7.70337719e-01, 6.51842631e-03, 3.03264879e-01, 7.61037886e-03, 2.72289601e-01, 1.50502041e-01, 6.71103888e-02, 7.41503703e-01, 1.92088941e-01, 2.19043977e-01, 9.09320161e-01, 2.37993569e-01, 6.18107973e-02, 8.31447852e-01, 2.23355609e-01, 1.84789435e-01, 4.16104518e-01, 4.21573859e-01, 8.72446305e-02, 2.97294197e-01, 4.50328256e-01, 8.72199917e-01, 2.51279916e-01, 4.86219272e-01, 7.57071329e-01, 4.85655942e-01, 1.06187277e-01, 4.92341327e-01, 1.46017513e-01, 5.25421017e-01, 4.22637906e-01, 2.24685018e-01, 8.72648431e-01, 5.54051490e-01, 1.80745062e-01, 2.12756336e-01, 5.20883169e-01, 7.60363654e-01, 8.30254678e-01, 5.00003328e-01, 4.69017439e-01, 6.38105527e-01, 3.50638261e-02, 5.22217353e-02, 9.06516882e-02, 8.52975842e-01, 1.19985883e-01, 3.74926753e-01, 6.50302066e-01, 1.98875727e-01, 6.28362507e-02, 4.32693501e-01, 3.10500685e-01, 6.20732833e-01, 4.58503272e-01, 3.20790034e-01, 7.91284868e-01, 7.93054570e-01, 2.93406765e-01, 8.95399023e-01, 1.06441034e-01, 7.53085241e-02, 8.67523104e-01, 1.47963482e-01, 1.25584706e-01, 3.81545040e-02, 6.34338619e-01, 1.76368938e-02, 5.75553531e-02, 5.31607516e-01, 2.63869588e-01, 9.41945823e-01, 9.24028838e-02, 5.21496463e-01, 7.74866558e-01, 5.65210610e-01, 7.28015327e-02, 6.51963790e-01, 8.94727453e-01, 4.49571590e-01, 1.29932405e-01, 8.64026259e-01, 9.92599934e-01, 7.43721560e-01, 8.87300215e-01, 1.06369925e-01, 8.11335531e-01, 7.87734900e-01, 9.87344678e-01, 5.32502820e-01, 4.42612382e-01, 9.64041183e-01, 1.66085871e-01, 1.12937664e-01, 5.24423470e-01, 6.54689333e-01, 4.59119726e-01, 5.22774091e-01, 3.08722276e-02, 6.26979315e-01, 4.49754105e-01, 8.07495757e-01, 2.34199499e-01, 1.67765675e-01, 9.22168418e-01, 3.73210378e-01, 8.04432575e-01, 5.61890354e-01, 4.47025593e-01, 6.43155678e-01, 2.40407640e-01, 5.91631279e-01, 1.59369206e-01, 7.75799090e-01, 8.32067212e-01, 5.59791576e-02, 6.39105224e-01, 4.85274738e-01, 2.12630838e-01, 2.81431312e-02, 7.16205363e-01, 6.83885011e-01, 5.23869697e-01, 9.99418314e-01, 8.35331599e-01, 4.69877463e-02, 6.74712562e-01, 7.99273684e-01, 2.77001890e-02, 5.75809742e-01, 2.78513031e-01, 8.36209905e-01, 7.25472379e-01, 4.87173943e-01, 7.88311357e-01, 9.64676177e-01, 1.75752651e-01, 4.98112580e-01, 8.08850418e-02, 6.40981131e-01, 4.06647450e-01, 8.46539387e-01, 2.12620694e-01, 9.11012851e-01, 8.25041445e-01, 8.90065575e-01, 9.63626055e-01, 5.96689242e-01, 1.63372670e-01, 4.51640148e-01, 3.43026542e-01, 5.80658851e-01, 2.82327625e-01, 4.75535418e-01, 6.27760926e-01, 8.46314115e-01, 9.61961932e-01, 3.19806094e-01, 5.05508062e-01, 5.28102944e-01, 6.13045057e-01, 7.44714938e-01, 1.50586073e-01, 7.91878033e-01, 4.89839179e-01, 3.10496849e-01, 8.82309038e-01, 2.86922314e-01, 4.84687559e-01, 5.20838630e-01, 4.62955493e-01, 2.38185305e-01, 5.47259907e-02, 7.10916137e-01, 7.31887202e-01, 6.25602317e-01, 8.77741168e-01, 4.19881322e-01, 4.81222328e-01, 1.28224501e-01, 2.46034010e-01, 3.34971854e-01, 7.37216484e-01, 5.62134821e-02, 7.14089724e-01, 9.85549393e-01, 4.66295827e-01, 3.08722434e-03, 4.70237690e-01, 2.66524167e-01, 7.93875484e-01, 4.54795911e-02, 8.09702944e-01, 1.47709735e-02, 1.70082405e-01, 6.35905179e-01, 3.75379109e-01, 4.30315011e-01, 3.15788760e-01, 5.58065230e-01, 2.24643800e-01, 2.42142981e-01, 6.57283636e-01, 3.34921891e-01, 1.26588975e-01, 7.68064155e-01, 9.43856291e-01, 4.47518596e-01, 5.44453573e-01, 9.95764932e-01, 7.16444391e-01, 8.51019765e-01, 1.01179183e-01, 4.45473958e-01, 4.60327322e-01, 4.96895844e-02, 4.72907738e-01, 5.58987444e-01, 3.41027487e-01, 1.56175026e-01, 7.58283148e-01, 6.83600909e-01, 2.14623396e-01, 3.27348880e-01, 3.92517893e-01, 6.70418431e-01, 5.16440832e-01, 8.63140348e-01, 5.73277464e-01, 3.46608058e-01, 7.39396341e-01, 7.20852434e-01, 2.35653246e-02, 3.89935659e-01, 7.53783745e-01, 6.34563528e-01, 8.79339335e-01, 7.41599159e-02, 5.62433904e-01, 6.15553852e-01, 4.56956324e-01, 5.20047447e-01, 5.26845015e-02, 5.58471266e-01, 1.63632233e-01, 5.38936665e-02, 6.49593683e-01, 2.56838748e-01, 8.99035326e-01, 7.20847756e-01, 5.68954684e-01, 7.43684755e-01, 5.70924238e-01, 3.82318724e-01, 4.89328290e-01, 5.62208561e-01, 4.97540804e-02, 4.18011085e-01, 6.88041565e-01, 2.16234653e-01, 7.89548214e-01, 8.46136387e-01, 8.46816189e-01, 1.73842353e-01, 6.11627842e-02, 8.44440559e-01, 4.50646654e-01, 3.74785037e-01, 4.87196697e-01, 4.56276448e-01, 9.13284391e-01, 4.15715464e-01, 7.13597697e-01, 1.23641270e-02, 5.10031271e-01, 4.74601930e-02, 2.55731159e-01, 3.22090006e-01, 1.91165703e-01, 4.51170940e-01, 7.50843157e-01, 4.42420576e-01, 4.25380660e-01, 4.50667257e-01, 6.55689206e-01, 9.68257670e-02, 1.96528793e-01, 8.97343028e-01, 4.99940904e-01, 6.65504083e-01, 9.41828079e-01, 4.54397338e-01, 5.61893331e-01, 5.09839880e-01, 4.53117514e-01, 8.96804127e-02, 1.74888861e-01, 6.65641378e-01, 2.81668336e-01, 1.89532742e-01, 5.61668382e-01, 8.68330157e-02, 8.25092797e-01, 5.18106324e-01, 1.71904024e-01, 3.68385523e-01, 1.62005436e-01, 7.48507399e-01, 9.30274827e-01, 2.38198517e-01, 9.52222901e-01, 5.23587800e-01, 6.94384557e-01, 1.09338652e-01, 4.83356794e-01, 2.73050402e-01, 3.68027050e-01, 5.92366466e-01, 1.83192289e-01, 8.60376029e-01, 7.13926203e-01, 8.16750052e-01, 1.57890291e-01, 6.25691951e-01, 5.24831646e-01, 1.73873797e-01, 1.02429784e-01, 9.17488471e-01, 4.03584434e-01, 9.31170884e-01, 2.79386137e-01, 8.77745206e-01, 2.45200576e-01, 1.28896951e-01, 3.15713052e-01, 5.27874291e-01, 2.16444335e-01, 7.03883817e-01, 7.74738919e-02, 8.42422142e-01, 3.75598924e-01, 3.51002411e-01, 6.22752776e-01, 4.82407943e-01, 7.43107867e-01, 9.46182666e-01, 9.44344819e-01, 3.28124763e-01, 1.06147431e-01, 1.65102684e-01, 3.84060507e-01, 2.91057722e-01, 7.68173662e-02, 1.03543651e-01, 6.76698940e-01, 1.43141994e-01, 7.21342202e-01, 6.69471294e-03, 9.07298311e-01, 5.57080171e-01, 8.10954489e-01, 4.11120526e-01, 2.06407453e-01, 2.59590556e-01, 7.58512718e-01, 5.79873897e-01, 2.92875650e-01, 2.83686529e-01, 2.42829343e-01, 9.19323719e-01, 3.46832864e-01, 3.58238858e-01, 7.42827585e-01, 2.05760059e-01, 9.58438860e-01, 5.66326411e-01, 6.60292846e-01, 5.61095078e-02, 6.79465531e-01, 7.05118513e-01, 4.44713264e-01, 2.09732933e-01, 5.22732436e-01, 1.74396512e-01, 5.29356748e-01, 4.38475687e-01, 4.94036404e-01, 4.09785794e-01, 6.40025507e-01, 5.79371821e-01, 1.57726118e-01, 6.04572263e-01, 5.41072639e-01, 5.18847173e-01, 1.97093284e-01, 8.91767002e-01, 4.29050835e-01, 8.25490570e-01, 3.87699807e-01, 4.50705808e-01, 2.49371643e-01, 3.36074898e-01, 9.29925118e-01, 6.65393649e-01, 9.07275994e-01, 3.73075859e-01, 4.14044139e-03, 2.37463702e-01, 2.25893784e-01, 2.46900245e-01, 4.50350196e-01, 3.48618117e-01, 5.07193932e-01, 5.23435142e-01, 8.13611417e-01, 8.92715622e-01, 1.02623450e-01, 3.06088345e-01, 7.80461650e-01, 2.21453645e-01, 2.01419652e-01, 2.84254457e-01, 3.68286735e-01, 7.39358243e-01, 8.97879394e-01, 9.81599566e-01, 7.56526442e-01, 7.37645545e-01, 4.23976657e-02, 8.25922012e-01, 2.60956996e-01, 2.90702065e-01, 8.98388344e-01, 3.03733299e-01, 8.49071471e-01, 3.45835425e-01, 7.65458276e-01, 5.68094872e-01, 8.93770930e-01, 9.93161641e-01, 5.63368667e-02, 4.26548945e-01, 5.46745780e-01, 5.75674571e-01, 7.94599487e-01, 7.18935553e-02, 4.46492976e-01, 6.40240123e-01, 2.73246969e-01, 2.00465968e-01, 1.30718835e-01, 1.92492005e-01, 1.96617189e-01, 6.61271644e-01, 8.12687657e-01, 8.66342445e-01 }, -4}}; typedef ConnectComponentsTest<int, float> ConnectComponentsTestF_Int; TEST_P(ConnectComponentsTestF_Int, Result) { /** * Verify the src & dst vertices on each edge have different colors */ EXPECT_TRUE(final_edges == params.n_row - 1); } INSTANTIATE_TEST_CASE_P(ConnectComponentsTest, ConnectComponentsTestF_Int, ::testing::ValuesIn(fix_conn_inputsf2)); template <typename value_idx, typename value_t> struct MutualReachabilityFixConnectivitiesRedOp { value_t* core_dists; value_idx m; DI MutualReachabilityFixConnectivitiesRedOp() : m(0) {} MutualReachabilityFixConnectivitiesRedOp(value_t* core_dists_, value_idx m_) : core_dists(core_dists_), m(m_){}; typedef typename raft::KeyValuePair<value_idx, value_t> KVP; DI void operator()(value_idx rit, KVP* out, const KVP& other) const { if (rit < m && other.value < std::numeric_limits<value_t>::max()) { value_t core_dist_rit = core_dists[rit]; value_t core_dist_other = max(core_dist_rit, max(core_dists[other.key], other.value)); value_t core_dist_out; if (out->key > -1) { core_dist_out = max(core_dist_rit, max(core_dists[out->key], out->value)); } else { core_dist_out = out->value; } bool smaller = core_dist_other < core_dist_out; out->key = smaller ? other.key : out->key; out->value = smaller ? core_dist_other : core_dist_out; } } DI KVP operator()(value_idx rit, const KVP& a, const KVP& b) const { if (rit < m && a.key > -1) { value_t core_dist_rit = core_dists[rit]; value_t core_dist_a = max(core_dist_rit, max(core_dists[a.key], a.value)); value_t core_dist_b; if (b.key > -1) { core_dist_b = max(core_dist_rit, max(core_dists[b.key], b.value)); } else { core_dist_b = b.value; } return core_dist_a < core_dist_b ? KVP(a.key, core_dist_a) : KVP(b.key, core_dist_b); } return b; } DI void init(value_t* out, value_t maxVal) const { *out = maxVal; } DI void init(KVP* out, value_t maxVal) const { out->key = -1; out->value = maxVal; } DI void init_key(value_t& out, value_idx idx) const { return; } DI void init_key(KVP& out, value_idx idx) const { out.key = idx; } DI value_t get_value(KVP& out) const { return out.value; } DI value_t get_value(value_t& out) const { return out; } void gather(const raft::resources& handle, value_idx* map) { auto tmp_core_dists = raft::make_device_vector<value_t>(handle, m); thrust::gather(raft::resource::get_thrust_policy(handle), map, map + m, core_dists, tmp_core_dists.data_handle()); raft::copy_async( core_dists, tmp_core_dists.data_handle(), m, raft::resource::get_cuda_stream(handle)); } void scatter(const raft::resources& handle, value_idx* map) { auto tmp_core_dists = raft::make_device_vector<value_t>(handle, m); thrust::scatter(raft::resource::get_thrust_policy(handle), core_dists, core_dists + m, map, tmp_core_dists.data_handle()); raft::copy_async( core_dists, tmp_core_dists.data_handle(), m, raft::resource::get_cuda_stream(handle)); } }; template <typename value_t, typename value_idx> struct ConnectComponentsMutualReachabilityInputs { value_idx n_row; value_idx n_col; std::vector<value_t> data; std::vector<value_t> core_dists; std::vector<value_idx> colors; std::vector<value_idx> expected_rows; std::vector<value_idx> expected_cols; std::vector<value_t> expected_vals; }; template <typename value_idx, typename value_t> class ConnectComponentsEdgesTest : public ::testing::TestWithParam<ConnectComponentsMutualReachabilityInputs<value_t, value_idx>> { protected: void basicTest() { raft::resources handle; auto stream = resource::get_cuda_stream(handle); params = ::testing::TestWithParam< ConnectComponentsMutualReachabilityInputs<value_t, value_idx>>::GetParam(); raft::sparse::COO<value_t, value_idx> out_edges_unbatched(resource::get_cuda_stream(handle)); raft::sparse::COO<value_t, value_idx> out_edges_batched(resource::get_cuda_stream(handle)); rmm::device_uvector<value_t> data(params.n_row * params.n_col, resource::get_cuda_stream(handle)); rmm::device_uvector<value_t> core_dists(params.n_row, resource::get_cuda_stream(handle)); rmm::device_uvector<value_idx> colors(params.n_row, resource::get_cuda_stream(handle)); raft::copy(data.data(), params.data.data(), data.size(), resource::get_cuda_stream(handle)); raft::copy(core_dists.data(), params.core_dists.data(), core_dists.size(), resource::get_cuda_stream(handle)); raft::copy( colors.data(), params.colors.data(), colors.size(), resource::get_cuda_stream(handle)); /** * 3. cross_component_nn to fix connectivities */ MutualReachabilityFixConnectivitiesRedOp<value_idx, value_t> red_op(core_dists.data(), params.n_row); raft::linkage::cross_component_nn<value_idx, value_t>(handle, out_edges_unbatched, data.data(), colors.data(), params.n_row, params.n_col, red_op, params.n_row, params.n_col); raft::linkage::cross_component_nn<value_idx, value_t>(handle, out_edges_batched, data.data(), colors.data(), params.n_row, params.n_col, red_op, 11, 1); ASSERT_TRUE(out_edges_unbatched.nnz == out_edges_batched.nnz && out_edges_unbatched.nnz == params.expected_rows.size()); ASSERT_TRUE(devArrMatch(out_edges_unbatched.rows(), params.expected_rows.data(), out_edges_unbatched.nnz, Compare<int>())); ASSERT_TRUE(devArrMatch(out_edges_unbatched.cols(), params.expected_cols.data(), out_edges_unbatched.nnz, Compare<int>())); ASSERT_TRUE(devArrMatch(out_edges_unbatched.vals(), params.expected_vals.data(), out_edges_unbatched.nnz, CompareApprox<float>(1e-4))); ASSERT_TRUE(devArrMatch(out_edges_batched.rows(), params.expected_rows.data(), out_edges_batched.nnz, Compare<int>())); ASSERT_TRUE(devArrMatch(out_edges_batched.cols(), params.expected_cols.data(), out_edges_batched.nnz, Compare<int>())); ASSERT_TRUE(devArrMatch(out_edges_batched.vals(), params.expected_vals.data(), out_edges_batched.nnz, CompareApprox<float>(1e-4))); } void SetUp() override { basicTest(); } void TearDown() override {} protected: ConnectComponentsMutualReachabilityInputs<value_t, value_idx> params; }; const std::vector<ConnectComponentsMutualReachabilityInputs<float, int>> mr_fix_conn_inputsf2 = { {100, 2, {-7.72642, -8.39496, 5.4534, 0.742305, -2.97867, 9.55685, 6.04267, 0.571319, -6.52184, -6.31932, 3.64934, 1.40687, -2.17793, 9.98983, 4.42021, 2.33028, 4.73696, 2.94181, -3.66019, 9.38998, -3.05358, 9.12521, -6.65217, -5.57297, -6.35769, -6.58313, -3.61553, 7.81808, -1.77073, 9.18565, -7.95052, -6.39764, -6.60294, -6.05293, -2.58121, 10.0178, -7.76348, -6.72638, -6.40639, -6.95294, -2.97262, 8.54856, -6.95673, -6.53896, -7.32614, -6.02371, -2.1478, 10.5523, -2.54502, 10.5789, -2.96984, 10.0714, 3.22451, 1.55252, -6.25396, -7.73727, -7.85431, -6.09303, -8.11658, -8.20057, -7.55965, -6.64786, 4.936, 2.23423, 4.44752, 2.27472, -5.72103, -7.70079, -0.929985, 9.78172, -3.10984, 8.72259, -2.44167, 7.58954, -2.18511, 8.6292, 5.55528, 2.30192, 4.73164, -0.0143992, -8.2573, -7.81793, -2.98837, 8.82863, 4.60517, 0.804492, -3.83738, 9.21115, -2.62485, 8.71318, 3.57758, 2.44676, -8.48711, -6.69548, -6.70645, -6.49479, -6.86663, -5.42658, 3.83139, 1.47141, 2.02013, 2.79507, 4.64499, 1.73858, -1.69667, 10.3705, -6.61974, -6.09829, -6.05757, -4.98332, -7.10309, -6.16611, -3.52203, 9.32853, -2.26724, 7.10101, 6.11777, 1.4549, -4.23412, 8.452, -6.58655, -7.59446, 3.93783, 1.64551, -7.12502, -7.63385, 2.72111, 1.94666, -7.14428, -4.15994, -6.66553, -8.12585, 4.70011, 4.43641, -7.76914, -7.69592, 4.11012, 2.48644, 4.89743, 1.89872, 4.29716, 1.17089, -6.62913, -6.53366, -8.07093, -6.22356, -2.16558, 7.25125, 4.73953, 1.46969, -5.91625, -6.46733, 5.43091, 1.06378, -6.82142, -8.02308, 6.52606, 2.14775, 3.08922, 2.04173, -2.14756, 8.36917, 3.85663, 1.65111, -1.68665, 7.79344, -5.01385, -6.40628, -2.52269, 7.95658, -2.30033, 7.05462, -1.04355, 8.78851, 3.72045, 3.5231, -3.98772, 8.29444, 4.24777, 0.509655, 4.72693, 1.67416, 5.7827, 2.7251, -3.41722, 7.60198, 5.22674, 4.16363, -3.1109, 10.8666, -3.18612, 9.62596, -1.4782, 9.94557, 4.47859, 2.37722, -5.79658, -5.82631, -3.34842, 8.70507}, {0.978428, 1.01917, 0.608673, 1.45629, 0.310713, 0.689461, 0.701126, 0.63296, 0.774788, 0.701648, 0.513282, 0.757651, 0.45638, 0.973111, 0.901396, 0.613692, 0.482497, 0.688143, 0.72428, 0.666345, 0.58232, 0.554756, 0.710315, 0.903611, 0.694115, 0.796099, 0.639759, 0.798998, 0.639839, 1.30727, 0.663729, 0.57476, 0.571348, 1.14662, 1.26518, 0.485068, 0.78207, 0.791621, 1.01678, 1.28509, 1.14715, 0.381395, 0.850507, 0.788511, 0.588341, 0.878516, 0.928669, 0.405874, 0.776421, 0.612274, 1.84963, 0.57476, 0.95226, 0.488078, 1.24868, 0.515136, 0.589378, 0.903632, 1.01678, 1.09964, 0.666345, 0.713265, 0.877168, 1.10053, 1.96887, 1.03574, 2.03728, 0.969553, 0.774788, 0.586338, 0.65168, 0.435472, 0.664396, 0.790584, 0.678637, 0.715964, 0.865494, 0.978428, 1.59242, 0.861109, 0.833259, 0.65168, 0.903632, 1.49599, 0.76347, 0.960453, 1.1848, 1.37398, 0.928957, 1.07848, 0.661798, 1.21104, 1.04579, 1.89047, 1.24288, 0.529553, 0.903611, 0.620897, 0.882467, 0.647189}, {0, 1, 2, 1, 0, 1, 2, 1, 1, 2, 2, 0, 0, 2, 2, 0, 0, 2, 0, 0, 2, 0, 0, 2, 2, 2, 1, 0, 0, 0, 0, 1, 1, 0, 2, 2, 2, 2, 1, 1, 0, 2, 1, 2, 2, 1, 0, 0, 0, 1, 1, 1, 2, 0, 0, 0, 2, 2, 1, 2, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 2, 1, 0, 1, 0, 1, 1, 2, 1, 2, 0, 2, 2, 2, 1, 2, 1, 1, 1, 2, 1, 2, 2, 2, 1, 0, 2}, {50, 54, 57, 63, 82, 87}, {57, 63, 50, 54, 87, 82}, {6.0764, 11.1843, 6.0764, 11.1843, 6.89004, 6.89004}}, {1000, 2, {-6.59634, -7.13901, -6.13753, -6.58082, 5.19821, 2.04918, -2.96856, 8.16444, -2.76879, 7.51114, -6.82261, -6.61152, 5.02008, 2.58376, 5.55621, 2.31966, 4.86379, 3.33731, 5.84639, 1.15623, -2.17159, 8.60241, -4.97844, -6.94077, -2.31014, 8.41407, 5.5582, 0.402669, 5.25265, 0.919754, 5.85298, 2.11489, -3.29245, 8.69222, -1.9621, 8.81209, -1.53408, 8.86723, -2.18227, 8.79519, 4.60519, 2.20738, -6.4759, -6.9043, -7.18766, -6.10045, -9.00148, -7.48793, 4.01674, 1.41769, -2.45347, 10.1085, -3.20892, 9.22827, -3.18612, 9.62596, 4.81977, 3.36517, 4.90693, 2.8628, -6.44269, -5.68946, -8.30144, -5.37878, 4.61485, 2.79094, -1.98726, 9.31127, -3.66019, 9.38998, -6.58607, -8.23669, -7.46015, -6.29153, 4.08468, 3.85433, -6.36842, -5.50645, -6.83602, -5.18506, -0.627173, 10.3597, 3.98846, 1.48928, -2.9968, 8.58173, -7.2144, -7.28376, -0.660242, 10.1409, -4.23528, -8.38308, -3.15984, 8.52716, -2.40987, 9.76567, -8.7548, -6.76508, 4.56971, 0.312209, -7.5487, -5.8402, -1.6096, 9.32159, 5.04813, 0.270586, -7.6525, -6.47306, -1.79758, 7.88964, -9.0153, -3.74236, -3.5715, 9.48788, -1.65154, 8.85435, -3.47412, 9.70034, 6.31245, 2.39219, 4.03851, 2.29295, -3.17098, 9.86672, -6.90693, -7.81338, -6.22373, -6.68537, -3.22204, 9.12072, -0.365254, 9.6482, -7.76712, -7.31757, 4.15669, 3.54716, 4.1937, 0.083629, -3.03896, 9.52755, -6.29293, -7.35501, -2.95926, 9.63714, 4.02709, 1.58547, 4.56828, 1.93595, 5.6242, 1.75918, -7.36237, -7.83344, 5.32177, 3.81988, -2.43183, 8.153, -1.97939, 10.4559, -3.49492, 9.51833, 3.39602, 1.28026, -2.42215, 8.71528, -3.57682, 8.87191, -2.77385, 11.7345, 5.71351, 0.946654, -6.50253, -6.90937, 4.08239, 0.603367, -5.64134, -6.85884, -2.76177, 7.7665, -2.25165, 8.93984, -3.49071, 9.47639, -1.06792, 7.57842, 5.15754, 1.24743, 3.63574, 1.20537, -6.07969, -8.49642, 4.12227, 2.19696, -7.17144, -8.4433, -1.92234, 11.2047, 3.23237, 1.19535, 3.85389, 0.641937, 4.82665, 1.21779, -7.68923, -6.45605, -7.00816, -8.76196, -5.12894, 9.83619, -5.66247, -5.35879, 3.05598, 2.73358, 6.06038, 1.40242, -1.69568, 7.78342, 5.13391, 2.23384, -2.96984, 10.0714, -5.36618, -6.2493, 5.55896, 1.6829, 3.55882, 2.58911, 5.36155, 0.844118, -0.0634456, 9.14351, 4.88368, 1.40909, -7.04675, -6.59753, -7.78333, -6.55575, 5.39881, 2.25436, -2.85189, 8.64285, -2.22821, 8.39159, 3.88591, 1.69249, -7.55481, -7.02463, 4.60032, 2.65467, -6.90615, -7.76198, -6.76005, -7.85318, 4.15044, 3.01733, -7.18884, -7.63227, 4.68874, 2.01376, 3.51716, 2.35558, -3.81367, 9.68396, 4.42644, 3.4639, 4.81758, 0.637825, -6.20705, -4.98023, -1.68603, 9.0876, -4.99504, -5.33687, -1.77073, 9.18565, 4.86433, 3.02027, 4.20538, 1.664, 4.59042, 2.64799, -3.09856, 9.86389, -3.02306, 7.95507, -6.32402, -6.79053, -7.67205, -7.18807, -8.10918, -6.38341, -1.67979, 6.80315, 4.00249, 3.16219, -2.54391, 7.84561, -3.22764, 8.80084, -2.63712, 8.05875, -2.41744, 7.02672, -6.71117, -5.56251, 5.18348, 1.60256, -7.40824, -6.29375, -4.22233, 10.3682, 4.8509, 1.87646, -2.99456, 9.09616, 5.1332, 2.15801, -2.27358, 9.78515, -6.73874, -8.64855, 4.96124, 2.39509, -3.70949, 8.67978, -4.13674, 9.06237, 2.80367, 2.48116, -0.876786, 7.58414, -3.7005, 9.67084, 6.48652, 0.903085, 6.28189, 2.98299, -6.07922, -6.12582, -5.67921, -7.537, 4.55014, 3.41329, -1.63688, 9.19763, -4.02439, 10.3812, 5.23053, 3.08187, -2.2951, 7.76855, -6.24491, -5.77041, 6.02415, 2.53708, -6.91286, -7.08823, 4.83193, 1.66405, -7.07454, -5.74634, -2.09576, 10.8911, 3.29543, 1.05452, -3.49973, 8.44799, 5.2922, 0.396778, -2.54502, 10.5789, -6.38865, -6.14523, -1.75221, 8.09212, -9.30387, -5.99606, -2.98113, 10.1032, -6.2017, -7.36802, 4.63628, 0.814805, -1.81905, 8.61307, 4.88926, 3.55062, 3.08325, 2.57918, -2.51717, 10.4942, -5.75358, -6.9315, 6.36742, 2.40949, 5.74806, 0.933264, 4.74408, 1.91058, -7.41496, -6.97064, -2.98414, 8.36096, 6.72825, 1.83358, -2.95349, 9.39159, -3.35599, 7.49944, 6.18738, 3.76905, -3.17182, 9.58488, 5.17863, 1.0525, -3.0397, 8.43847, -2.23874, 8.96405, 3.04689, 2.41364, 6.14064, 2.82339, -6.33334, -6.87369, -7.92444, -8.84647, 3.65129, 0.86958, 5.29842, 3.98337, -2.06538, 9.78892, -6.89494, -6.30082, -2.52144, 8.11703, -8.11398, -7.47257, 5.3381, 2.36666, -6.93452, -6.59456, -7.50634, -6.01772, 6.23438, 1.12621, -2.15218, 8.32138, -7.04777, -7.3522, -2.52771, 8.72563, -2.77907, 8.03552, 4.29123, 1.62391, -8.07551, -6.43551, -3.28202, 8.77747, -2.21308, 9.27534, -8.25153, -8.49367, -3.54644, 8.82395, -8.05867, -5.69243, 4.46681, 1.98875, 3.8362, 3.61229, -6.96231, -7.00186, 5.18993, 1.00483, -5.35116, -6.37227, 5.23298, 1.66362, -5.68306, -7.03864, -9.03144, -7.59926, -6.10127, -7.4313, 4.83572, 0.994797, -7.32695, -5.59909, 0.569683, 10.1339, 3.35957, 2.84563, -2.4122, 9.60944, 5.00855, 1.57983, -2.57528, 7.80327, 3.96349, 3.77411, 4.59429, 2.21651, -6.54765, -6.68961, 4.76798, 1.29212, -1.67351, 7.88458, 5.63615, 1.47941, -2.5301, 9.13161, 4.26075, 1.76959, 4.67788, 2.0932, 4.39955, 1.59835, 3.91274, 1.72565, -4.1786, 9.55765, -7.34566, -8.47481, 4.8364, 2.68217, -7.36848, -7.99973, -5.84708, -5.7534, 5.37252, 1.89245, -2.1707, 8.599, -1.3299, 9.0818, -6.79122, -5.40258, 5.56391, 1.78827, -0.194539, 7.14702, 4.60489, 3.74397, 5.50995, 2.46885, -3.98772, 8.29444, -5.21837, -7.33721, -1.63959, 10.3699, -5.92932, -5.1695, -5.88358, -7.6369, 4.11716, 3.02218, -6.54114, -7.17551, 3.97179, 2.96521, -6.75325, -4.94118, 5.26169, 0.402945, 3.25031, 0.327771, -0.44845, 10.7696, -2.15141, 9.57507, 7.04329, 1.91555, -3.74615, 7.69383, -7.52318, -5.85015, -6.80419, -8.48208, -4.57664, 8.92517, 4.57574, 2.30193, 4.84098, 3.02382, -9.43355, -5.94579, -3.52203, 9.32853, 3.43018, 2.5731, -6.15725, -7.25294, -6.69861, -8.17694, -2.40955, 8.51081, -4.82342, -7.98332, -7.10611, -6.51274, 5.86755, 0.763529, -6.56045, -5.53966, -3.61553, 7.81808, 4.3825, 0.304586, -6.52818, -5.80996, 4.59972, 0.542395, -6.90603, -6.59995, -6.3585, -6.23489, -6.01915, -7.46319, -5.38694, -7.15123, -7.83475, -6.45651, 5.89564, 1.07856, -5.15266, -7.27975, -6.97978, -7.08378, 5.83493, 0.449983, -2.62374, 10.2521, -7.34494, -6.98606, -6.79719, -8.33766, 3.54757, 1.65676, -8.40528, -5.61753, -5.85556, -6.28758, 4.66862, 3.25162, -6.26047, -4.82261, 4.61552, 4.11544, -1.36637, 9.76622, 4.2517, 2.14359, -2.45099, 7.87132, -0.376164, 7.0622, 4.34493, 3.22091, 6.95921, 2.36649, -6.70319, -7.24714, -5.56932, -5.48443, -7.43149, -4.32191, -3.23956, 9.23074, -5.77255, -7.00049, 4.96601, 0.722056, -7.88617, -5.74023, 4.18757, -0.45071, -7.12569, -7.72336, 5.27366, 2.38697, 3.93487, 1.9174, 3.19186, -0.225636, -3.41722, 7.60198, -3.08286, 8.46743, -5.87905, -7.55073, -5.26425, -7.20243, -2.97867, 9.55685, -1.23153, 8.42272, -2.33602, 9.3996, -3.33819, 8.45411, -3.58009, 9.49676, 3.78152, 2.67348, -1.54582, 9.42707, -4.04331, 10.292, 3.3452, 3.134, -2.75494, 8.74156, -3.26555, 7.59203, -7.27139, -7.80252, 3.5293, 3.72544, 6.11642, 3.35326, 4.01611, 3.8872, 4.89591, 2.95586, -7.06677, -5.89438, 4.19438, 3.42655, -6.11355, -5.65318, -7.59645, -8.74665, -5.80362, -6.8588, 3.80453, 4.11832, 5.70655, 3.14247, -4.98084, 8.21739, -1.87642, 11.285, 4.39864, 2.32523, -3.48388, 9.80137, 4.02836, 0.566509, -2.41212, 9.98293, -5.40846, -7.08943, 4.01506, 1.99926, -3.43613, 8.95476, -7.24458, -7.71932, 6.02204, 2.62188, -6.29999, -6.55431, 6.19038, 0.974816, 3.55882, 3.02632, -7.06011, -3.687, -1.55877, 8.43738, -5.14711, -4.64881, 4.7167, 0.690177, -7.90381, -5.02602, 4.17218, 2.31967, -0.643423, 9.48812, -7.95237, -6.64086, -4.05986, 9.08285, -6.24158, -6.37927, -6.6105, -7.2233, -6.21675, -5.70664, -3.29967, 9.48575, 3.41775, 2.68617, -2.24948, 8.10997, -2.24931, 9.79611, -9.0523, -6.03269, -2.2587, 9.36073, 5.20965, 2.42088, -3.10159, 8.1503, -6.67906, -5.73147, 4.0687, 2.54575, -1.24229, 8.30662, -2.09627, 8.45056, -7.87801, -6.57832, 4.72216, 3.03865, -0.929985, 9.78172, -8.56307, -7.68598, -7.05257, -5.1684, -7.09076, -7.86729, 4.61432, 3.1459, -6.34133, -5.8076, -3.82943, 10.8457, -8.46082, -5.98507, 5.34763, 1.4107, -1.68714, 10.9111, -1.67886, 8.1582, -0.623012, 9.18886, -4.21258, 8.95874, -2.16744, 10.8905, -6.57158, -7.27176, 2.14047, 4.26411, -8.44217, -7.40916, 5.29008, 1.87399, 4.31824, 4.04992, -3.77008, 9.93215, -2.72688, 10.1131, -6.14278, -7.16144, -3.92457, 8.59364, -5.92649, -6.59299, 4.68369, 1.82617, -6.89905, -7.18329, 3.95173, 4.22561, -7.66453, -6.23183, -2.44167, 7.58954, -6.36603, -7.41281, -6.45081, -6.187, -6.6125, -6.37138, 5.46036, 2.48044, -2.14756, 8.36917, -2.3889, 9.52872, 3.80752, 2.44459, -3.98778, 10.158, -6.63887, -4.27843, -8.65266, -5.61819, -7.97003, -5.46918, -5.9604, -7.54825, -0.916011, 8.50307, -3.69246, 6.97505, -7.98533, -7.09503, -2.30033, 7.05462, 4.76218, 2.51647, -7.04981, -7.33334, 3.66401, 3.02681, -2.50408, 8.7797, 7.19996, 1.87711, 4.01291, 3.78562, -0.356015, 8.24694, -0.958046, 9.12996, 4.60675, 3.76773, 6.21945, 1.45031, 4.27744, 0.8535, -4.72232, -7.48582, 6.03923, 2.8978, -3.26833, 9.16468, -7.97059, -7.29092, -2.3998, 9.74005, -2.66721, 8.58741, -7.36269, -6.73332, -7.87893, -7.38488, 4.65023, 0.661333, -4.8171, -7.94764, -4.11564, 9.21775, 4.80633, 2.46562, -2.72887, 9.3714, -5.26735, -5.5652, 4.9826, 2.42992, -6.17018, -7.3156, 4.38084, 1.77682, 5.35084, 2.41743, -2.61796, 9.416, 5.27229, 2.94572, -7.52315, -5.95227, -1.45077, 7.25555, -3.79916, 7.71921, -2.23251, 9.84147, 3.70054, 1.82908, -1.93831, 10.1499, -6.18324, -5.9248, -3.33142, 9.25797, -6.08536, -8.1344, 5.95727, 2.17077, 4.87366, 0.417274, -6.529, -6.39092, -9.24256, -7.88984, -6.36652, -7.13966, -3.90777, 9.57726, -7.06252, -5.50523, -2.26423, 8.50734, -2.84498, 10.6833, 5.0391, 2.62037, -2.74815, 8.10672, 3.35945, 3.72796, -4.11668, 9.19892, 5.66903, 2.44577, -1.63807, 8.68826, -7.42587, -6.48831, 6.17063, 3.19193, -2.28511, 9.02688, -7.10088, -7.15692, 4.46293, 1.17487, -5.91017, -6.45292, -2.26724, 7.10101, -2.43339, 8.33712, -4.63309, 8.48853, -3.31769, 8.51253, -2.49078, 10.6907, -1.30798, 8.60621, 6.30535, 2.98754, -5.79384, -6.78213, -1.93213, 8.81124, 4.55773, 3.09047, 6.37584, 2.17108, 4.3927, 1.29119, -3.2245, 9.69388, -1.69634, 9.64392, 2.799, 0.693593, -2.1426, 8.07441, -8.4505, -8.00688, 4.736, 1.51089, -2.5863, 9.35544, -2.94924, 9.14503, 6.2054, 1.90742, 5.67172, 0.487609, -5.69071, -6.17181, -8.24651, -7.10488, -7.34424, -6.67895, -6.71977, -7.90778, -1.82294, 7.40157, -9.40991, -7.16611, -4.37999, 8.66277, -1.42615, 10.0681, -2.00828, 8.03673, -7.50228, -6.6855, -5.65859, -6.29801, -8.02335, -6.77155, -3.40761, 9.50621, -2.82447, 9.77326, -1.5938, 9.34304, -3.5213, 7.35943, -3.36961, 8.62973, -7.01708, -5.92724, 5.20886, 3.60157, -1.71817, 8.1049, -2.46363, 8.36269, -2.77809, 7.90776, -2.75459, 8.26055, -2.03596, 8.94146, -4.53434, 9.20074, -7.44387, -6.69556, -6.90099, -7.62732, 3.29169, 2.71643, 6.08686, 2.16972, -2.31111, 8.86993, -5.75046, 7.9899, 4.69951, 1.32623, 4.71851, -0.025031, -6.42374, -4.71511, -8.04974, -8.68209, -3.16103, 9.06168, -6.18267, -7.21393, -7.94202, -6.4518, -7.07697, -7.03138, 3.93554, 0.564708, -1.20372, 9.03529, -7.10611, -7.83955, -7.47529, -5.50567, -6.15453, -6.36393, -2.98024, 9.24634, -7.75761, -7.70699, -3.08597, 9.76968, -8.04954, -9.75237, 5.2534, 0.950377, 5.63789, -0.923086, -5.7065, -6.51047, -8.02132, -7.07377, -8.28594, -6.96322, -7.70722, -6.79397, -2.4962, 10.4678, 5.02846, 4.46617, 4.02648, 1.6707, -0.319395, 8.20599, 4.74525, 0.639144, -1.0313, 8.49602, 4.08766, 2.6061, 3.63826, 1.69207, 2.55795, 3.66963, 5.2826, 3.30232, -1.04355, 8.78851, -6.84762, -7.63353, -4.70868, -7.056, 3.53651, -0.179721, -3.38482, 7.63149, -5.9265, -6.36702, -0.986074, 9.5532, -2.42261, 8.85861, -7.42835, -6.78726, -4.02857, 8.53005, -8.22675, -7.85172, -5.57529, -8.5426, 6.03009, 2.53098, -7.10448, -7.53011, -3.4988, 8.8885, -2.62485, 8.71318, -6.39489, -7.72647, 3.93789, 1.31027, 4.27627, 1.91622, -0.923181, 7.77647, -5.16017, 10.1058, -6.44307, -5.97617, -7.24495, -6.69543, 6.27331, 0.826824, -6.55655, -7.13246, 5.66245, 4.41292, -2.13805, 8.4103, 5.23463, 2.82659, -4.86624, -6.74357, -6.14082, -6.26474, -2.67048, 9.41834, -1.26311, 6.9409, -7.20231, -7.13094, -1.35109, 9.80595, 3.9906, 0.749229, -6.75696, -5.25543, 4.84826, -0.0685652, -7.4914, -6.91715, 4.46725, 2.85683, -2.95571, 9.87068, 6.32381, 1.51429, -6.81177, -6.02734, -2.57188, 9.96943, -4.28792, 10.5103, 3.65025, 2.91394, -7.11856, -7.24693, -6.98693, -6.43239, 4.7651, 1.54376, 4.00092, 0.65008, -7.14816, -7.7713, -7.58803, -8.39382, 4.3321, 2.19232, -7.89545, -6.81843, -2.11475, 8.5933, -0.743743, 9.41927, 3.64849, -0.18022, -1.68665, 7.79344, 4.00214, 1.44217, -6.96799, -7.25012, -1.58302, 10.9237, -6.68524, -7.23328, 4.65831, 2.32075, 4.62024, 2.52566, -4.23412, 8.452, -0.822056, 9.89593, -7.19868, -7.67614, -3.32742, 11.1067, 5.27861, 0.830165, 4.48982, 2.09875, -6.58087, -7.6319, -0.880582, 7.63418, -7.01088, -6.80326, -7.31601, -6.98972, -6.85883, -7.60811, 6.14328, 2.85053, -7.49206, -6.51861, -2.28174, 10.3214, 4.81074, 1.78919, -5.58987, -6.20693, 4.08096, 2.35038, -1.5029, 8.43739, 4.11536, 2.46254, -3.28299, 7.76963, 4.31953, 2.39734, 4.91146, 0.696421, -1.4782, 9.94557, -3.34842, 8.70507, -6.97822, -6.86126, 4.10012, 1.19486, -2.50395, 9.06127, 4.41891, 2.00006, -2.73266, 9.72829, 3.5436, 0.533119, 5.78864, 0.233456, -6.62589, -6.41242, -2.21942, 11.0897, -6.76636, -8.31839, -2.71732, 8.52129, -5.20972, -6.48544, 3.26056, 1.24224, 3.45228, 2.28299, 4.72171, 1.87428, -7.52585, -5.1048, 5.0695, 2.18086, -6.55646, -7.02771, 3.23727, 3.72275, 3.41411, 0.508795, -7.80698, -6.64174, -5.90443, -6.37902, -0.387041, 10.0468, -1.3506, 8.1936, -6.08614, -8.62864, -5.91478, -5.26453, -2.61623, 7.97904, 4.45459, 1.84335, -6.66643, -7.63208, 3.6729, 1.92546, -1.32976, 8.54511, 6.31758, 1.41958, 4.63381, 2.81166, -7.01394, -6.0693, -2.7786, 9.73183, -2.90131, 7.55077, -7.13842, -5.28146, 6.71514, 1.28398, -6.98408, -7.04893, -3.03946, 8.22141, -2.76417, 10.5183, -7.35347, -6.89456, 4.19345, 2.16726, -2.02819, 9.23817, 4.97076, 2.8067, -0.544473, 9.04955, 4.90727, 2.29487, -6.31871, -7.17559, 3.71665, 0.621485, 4.7903, 2.33813, -6.47994, -7.53147, -6.80958, -5.71823, -8.07326, -5.96096, 4.77342, 1.8207, 5.71856, 1.93466, -2.70156, 9.31583, -2.1478, 10.5523, 4.78855, 1.63608, 5.53507, 2.60834, -7.00058, -6.46058, 5.4738, 2.43235, -1.34603, 9.02452, -7.5337, -8.71074, -7.30893, -7.57253, -5.33752, -4.87402, -7.01364, -6.86542, -7.93331, -7.94791, -5.69392, -6.16116, -7.32291, -7.76491, -6.41965, -7.55783, -7.87996, -7.55785, -6.69005, -5.87906, 3.92147, 2.86809, -1.5552, 9.66568, 5.07989, 1.47112, -7.48524, -5.0541, -1.82724, 8.70402, -2.00421, 9.88004, -2.62153, 8.79332, -7.52111, -6.44819, 4.06424, 2.09518, -6.65494, -5.94752, 6.93878, 1.61033, -3.95728, 7.60682, 5.67016, 2.21196, -7.81507, -5.79413, -2.41152, 8.24128, -3.83738, 9.21115, 4.5516, 4.55288, -5.75551, -5.93258, 4.56545, 2.59384, -7.45614, -9.47115, -2.39568, 9.67642, 5.57816, 1.45712, -7.48184, -6.41134, -1.99415, 12.867, -8.35854, -6.69675, -7.52559, -7.6793, 5.7454, 3.1602, 2.94692, 1.87483, -8.77324, -6.66682, -3.21125, 8.68662, -6.25806, -7.24972, 5.17639, 1.0747, -2.44897, 11.4775, -3.30172, 8.89955, -2.85191, 8.21201, -8.85893, -6.1322, 4.08957, 1.30155, -5.88132, -7.31173, -7.10309, -7.22943, -2.46068, 8.18334, -7.01226, -7.85464, 4.75411, 2.12347, -3.42862, 10.5642, 7.16681, 1.4423, 5.42568, 2.39863, -6.00833, -8.22609, -1.7619, 9.62466, -2.49527, 8.99016, -2.98837, 8.82863, -2.97262, 8.54856, -1.34142, 9.26871, -5.99652, -6.95795, -1.87061, 7.35277, -8.68277, -8.46425, -7.01808, -8.10441, -7.04269, -7.62501, -7.69783, -6.88348, -2.19829, 10.4896, 4.67396, 1.2032, -5.58263, -6.90298, -5.69224, -4.29055, 4.77285, 1.27305, -3.33469, 8.6929, -2.54195, 8.47086, 4.46492, 1.21742, 5.41158, -0.875373, -8.68069, -7.42278, -3.88687, 8.07646, 4.6682, 2.00293, -8.29799, -8.64092, -1.86382, 10.3829, -6.51234, -5.04193, 4.54458, 2.25219, -1.93264, 9.32554, -3.06285, 7.81641, -6.90714, -5.10786, 4.69653, 2.50286, 6.43757, 2.61401, -1.85483, 8.9587, 4.60224, 3.07647, 4.4492, 2.1906, 5.02181, 2.40321, -2.22923, 7.8888, 5.68943, 1.43793, -6.71097, -6.43817, -5.00633, -5.80006, -2.43763, 8.53663, 5.72577, 2.44787, -6.57079, -5.17789, -5.77867, -4.92176, -6.57222, -6.06437, 3.96639, 2.25216, -7.95177, -9.80146, 4.92574, 2.30763, -7.6221, -8.20013, -6.4132, -6.91575, 4.01432, 2.36897, 3.0833, 1.54505, -1.99416, 9.52807, -7.85128, -8.25973, -0.86423, 8.76525, -6.31412, -8.64087, -8.07355, -6.73717, -2.52821, 8.01176, -5.82357, -6.65687, -7.08865, -7.73063, -5.56251, -6.99818, -2.12513, 8.98159, -6.89834, -7.26863, -7.92654, -6.34346, 4.86201, 1.49442, 4.92905, 4.42847, -5.57789, -5.3186, 4.34232, 3.34888, 2.64614, 2.34723, -4.10363, 8.41491, -2.18648, 8.18706, -3.39871, 8.19848, -2.66098, 9.6026, -6.95927, -6.42774, -5.61392, -7.74628, 5.60376, 4.18369, 5.28536, 4.13642, 4.8428, 0.457426, -6.33816, -6.12095, -2.4394, 8.62897, 4.56938, 2.45967, 4.0582, 0.958413, 5.62164, 1.64834, 5.73119, 2.58231, 4.66806, 1.96405, -6.71905, -6.87706, -2.18503, 8.88414, -6.03901, -6.33338, -8.38435, -6.12005, 0.0641622, 9.0735, 5.19967, 3.05395, -5.48716, -7.13016, -6.85541, -5.46789, -1.88353, 8.15713, 4.27891, 3.1325, -2.75816, 9.98586, -2.03022, 9.34795, -7.66741, -7.50096, -3.39305, 9.16801, -8.49476, -5.71537, -1.68378, 9.8278, -7.41559, -6.07205, -3.15577, 7.93274, 5.22381, 1.61388, 3.65739, 1.74854, 4.94251, 1.21889, -7.12832, -5.27276, -9.58286, -6.20223, -2.21613, 8.29993, 5.34799, 2.92987, 4.09496, 2.37231, -7.25183, -5.79136, -6.46981, -7.12137, -6.28607, -9.8205, 4.52865, 1.06926, -3.10984, 8.72259, 3.61865, 2.68153, -5.96604, -7.68329, 3.11435, 1.28126, -1.1064, 7.61243, -2.17688, 8.2658, -3.27246, 7.2094, -5.55143, -6.32388, -1.69667, 10.3705, -2.16558, 7.25125, -6.36572, -6.70053, 4.12259, 3.38252, -4.80554, -7.79949, -5.23966, -6.13798, 4.21969, 1.69139, -1.98985, 10.547, -2.52269, 7.95658, -6.75642, -6.32862, -3.51521, 7.8001, 4.70435, -0.00229688, 6.25359, 2.4267, 5.82935, 0.745562, 5.24778, 2.15978, 5.48052, 1.32055, -3.05358, 9.12521, -3.18922, 9.24654, 4.47276, 2.11988, 5.36751, 2.02512, -2.18511, 8.6292, -2.48469, 9.51228, 5.57556, 3.24472, -2.58121, 10.0178, -6.12629, -6.49895, -4.54732, 8.0062, -4.20166, 10.5438, -7.61422, -7.69036, -4.42797, 8.98777, 4.45301, 1.53344, 4.59296, 2.45021, -6.81264, -6.36417, 4.62346, 3.16156, -5.93007, -8.36501, -2.78425, 6.71237, -6.17141, -6.64689, -5.20608, 8.95999, -7.30598, -5.73166, 4.39572, 2.93726, -1.89503, 9.77179, -5.683, -7.48989, 4.80924, 0.559455, -2.17793, 9.98983, 5.23728, 2.67434, -7.03976, -6.20877, 3.90435, 3.20926, -7.78536, -7.53388, -1.00684, 9.08838, -5.26741, -5.98327, 3.28002, 2.71942, -1.47166, 8.50427, -2.32733, 9.26251, 5.16271, 1.39947, -6.59093, -6.61979, -2.44492, 7.93654, -1.05805, 9.97356, -3.1109, 10.8666, 3.38834, 3.41693, 4.83098, 2.01961, -2.74013, 9.71049, -3.34892, 8.41489, 4.94768, 0.263001, 3.57477, 1.66795, 5.78915, 1.26999, -4.81812, -5.67174, -1.88508, 9.64263, 3.69048, 4.60555, 4.03037, 1.7862, -7.4418, -7.08933}, {0.127717, 0.211407, 0.195547, 0.21633, 0.39671, 0.229008, 0.20839, 0.169236, 0.314314, 0.322473, 0.169506, 0.45499, 0.147819, 0.296502, 0.15198, 0.356444, 0.0992833, 0.220833, 0.296206, 0.178067, 0.135359, 0.189725, 0.243099, 0.519986, 0.168105, 0.273465, 0.126033, 0.18045, 0.282832, 0.193901, 0.213704, 0.425046, 0.203191, 0.228674, 0.209267, 0.355039, 0.212918, 0.315495, 0.294112, 0.257576, 0.5786, 0.186019, 0.171919, 0.171919, 0.449151, 1.34947, 0.171919, 0.16341, 0.641387, 0.342115, 0.267343, 0.246125, 0.277612, 0.181462, 0.22944, 1.95598, 0.164897, 0.235803, 0.228273, 0.314629, 0.127403, 0.241241, 0.189362, 0.151691, 0.130085, 0.526707, 0.217069, 0.282306, 0.531523, 0.177035, 0.169776, 0.20395, 0.177165, 0.146628, 0.280013, 0.223033, 0.50947, 0.184133, 0.295329, 0.183219, 0.28166, 0.179348, 0.276462, 1.00283, 0.248147, 0.214453, 0.231732, 0.170672, 0.256893, 0.133271, 0.151137, 0.500823, 0.23678, 0.376983, 0.362061, 0.140013, 0.388863, 0.398552, 0.38015, 0.190081, 0.167115, 0.206884, 0.473849, 1.05117, 0.435665, 0.323618, 0.326201, 0.32226, 0.201787, 0.246496, 0.28325, 0.226596, 0.238153, 0.277268, 0.674629, 0.179433, 0.175651, 0.154778, 0.178195, 0.192796, 0.103571, 0.227621, 0.201124, 0.160525, 0.160964, 0.240099, 0.258027, 0.134127, 0.127717, 0.341378, 0.311595, 0.282306, 0.168988, 0.40775, 0.246125, 0.583131, 0.236804, 0.238633, 0.194824, 0.169315, 0.244227, 0.249511, 0.189725, 0.305662, 0.301415, 0.658641, 0.250944, 0.151792, 0.141383, 0.143843, 0.563347, 0.184216, 0.204155, 0.221764, 0.314908, 0.144518, 0.228808, 0.255785, 0.163457, 0.424705, 0.170202, 0.312598, 0.300629, 0.532614, 0.661392, 0.228273, 0.543432, 0.257175, 0.258994, 0.281413, 0.273897, 0.246837, 0.293489, 0.25533, 0.260492, 0.213704, 0.3091, 0.17103, 0.172285, 0.241399, 0.35999, 0.372243, 0.269191, 0.390239, 0.31761, 0.200593, 0.22197, 0.752914, 0.266571, 0.13102, 0.268659, 0.293723, 0.356294, 0.296258, 0.264531, 0.15468, 0.358535, 0.243711, 0.112147, 0.121659, 0.197101, 0.515292, 0.245628, 0.279863, 0.789807, 0.195156, 0.196073, 0.149564, 0.118675, 0.389373, 0.233821, 0.176128, 0.481088, 0.360027, 0.553152, 0.208207, 0.171608, 0.160489, 0.334298, 0.139426, 0.168603, 0.266199, 0.326458, 0.103571, 0.171208, 0.130961, 0.190887, 0.177229, 0.241651, 0.115152, 0.196753, 0.481088, 0.230965, 0.354631, 0.14591, 0.328543, 0.141544, 0.195888, 0.290379, 0.245954, 0.184547, 0.575214, 0.186929, 0.28527, 0.292213, 1.20033, 0.281528, 0.15625, 0.211524, 0.186398, 0.298061, 0.147393, 0.245349, 0.164527, 0.224771, 0.222382, 0.251643, 0.148835, 0.135359, 0.204967, 0.193024, 0.486309, 0.389686, 0.211921, 0.307405, 0.38666, 0.26802, 0.16605, 0.323134, 0.268397, 0.217894, 0.974118, 0.371618, 0.156201, 0.305787, 0.339305, 0.371032, 0.381765, 0.22747, 0.24906, 0.100884, 0.253192, 0.314253, 0.388289, 0.580947, 1.00267, 0.241998, 0.489101, 0.341501, 0.247423, 0.328311, 0.440281, 0.14927, 0.244469, 0.846828, 0.191725, 0.217429, 0.123403, 0.322875, 0.145373, 0.757259, 0.190086, 0.316286, 0.268397, 0.296721, 0.440472, 0.186848, 0.232134, 0.180239, 0.219724, 0.205886, 0.250975, 0.145636, 0.312476, 0.366418, 0.128135, 0.315235, 0.264531, 0.161815, 0.31631, 0.296489, 0.37171, 0.197217, 0.195625, 0.479579, 0.443037, 0.323347, 0.193616, 0.160251, 0.8952, 0.256291, 0.593345, 0.177165, 0.409514, 0.847863, 0.111448, 0.210031, 0.251347, 0.351953, 0.705204, 0.117901, 0.182343, 0.230179, 0.83632, 0.22104, 0.145163, 0.200326, 0.23431, 0.21868, 0.253575, 0.186562, 0.192757, 0.172716, 0.27396, 0.258581, 0.327892, 0.376138, 0.223477, 0.302375, 0.145845, 0.436902, 0.421794, 0.328543, 0.19246, 0.238889, 0.254866, 0.284674, 0.457849, 0.202937, 0.392568, 0.453083, 0.782713, 0.465401, 0.178623, 0.304863, 0.190081, 0.228641, 0.255135, 0.245037, 0.217526, 0.109584, 0.276462, 0.182301, 0.38582, 0.349942, 1.3889, 0.30235, 0.796353, 0.160168, 0.643204, 0.153752, 0.410268, 0.186439, 0.256834, 0.185783, 0.0957629, 0.226596, 0.197951, 0.17123, 0.192836, 0.18405, 0.575784, 0.228874, 0.201787, 0.241209, 0.217386, 0.195751, 0.291585, 0.144531, 0.14176, 0.157635, 0.410268, 0.476338, 0.308148, 0.148077, 0.152093, 0.196791, 0.568087, 0.414026, 0.250587, 0.473463, 0.293645, 0.396768, 0.2766, 0.38664, 0.135034, 1.50827, 0.472527, 0.268418, 0.40383, 0.375914, 0.246496, 0.176474, 0.340405, 0.220833, 0.138782, 0.159009, 0.444219, 0.259582, 0.33638, 0.195586, 0.210974, 0.200288, 0.148129, 0.0974216, 0.211588, 0.280081, 0.44113, 0.773921, 0.553848, 0.448079, 0.183136, 0.380854, 0.685021, 0.308767, 0.553276, 0.181578, 0.164759, 0.313889, 0.137886, 0.545387, 0.278449, 0.736895, 0.360054, 0.358929, 0.457315, 0.343278, 0.507662, 0.280829, 0.113886, 0.23146, 0.160584, 0.192796, 0.147561, 0.241272, 0.168988, 0.730511, 0.27836, 0.179847, 0.22555, 0.418069, 0.158348, 0.128965, 0.179454, 0.126366, 0.164434, 0.273633, 0.309556, 0.500823, 0.367852, 0.192875, 0.230262, 0.32724, 0.249969, 0.142618, 0.494229, 0.36108, 0.227931, 0.23113, 0.742825, 0.190126, 0.33741, 0.280598, 0.145268, 0.378423, 0.211921, 0.183594, 0.59201, 0.279563, 0.195683, 0.248101, 0.199754, 0.342494, 0.174343, 0.14149, 0.28085, 0.175781, 0.518738, 0.17223, 0.489904, 0.181167, 0.354286, 0.297824, 0.280829, 0.219412, 0.22814, 0.195625, 0.313949, 0.294708, 0.211551, 0.236255, 0.666933, 0.204808, 0.52591, 0.180725, 0.186889, 0.246589, 0.410575, 0.338348, 0.206219, 0.361766, 0.158143, 0.280816, 0.4149, 0.773082, 0.340046, 0.369672, 0.256923, 0.167195, 0.197217, 0.252339, 0.172716, 0.191526, 0.263085, 0.345698, 0.168286, 0.243099, 0.434631, 0.22944, 0.161862, 0.206589, 0.23457, 0.181924, 0.419063, 0.183427, 0.186152, 0.236352, 0.306336, 0.149002, 1.50086, 0.188231, 0.442757, 0.485602, 0.466662, 0.17329, 0.141329, 0.180619, 0.160061, 0.192569, 0.270999, 0.117901, 0.362693, 0.217561, 0.208975, 0.233658, 0.175173, 1.10307, 0.14625, 1.31124, 0.237608, 0.286784, 0.325112, 0.2485, 0.259641, 0.553152, 0.179039, 0.780781, 0.174758, 0.297824, 0.2558, 0.235949, 0.952186, 0.356744, 0.312646, 0.189362, 0.574524, 0.705204, 0.213168, 0.225956, 0.424165, 0.169506, 0.137109, 0.352451, 0.454554, 0.653302, 0.31261, 0.194412, 0.23719, 0.137886, 0.31498, 0.199085, 0.203875, 0.597248, 1.10036, 0.196869, 0.22104, 0.451345, 0.105613, 0.683928, 0.135204, 0.25533, 0.607871, 0.219724, 0.184464, 0.725001, 0.160061, 0.333407, 0.192569, 0.234147, 0.47178, 0.161815, 0.242455, 0.215305, 0.410575, 0.242376, 0.211335, 0.462804, 0.275065, 0.126878, 0.170404, 0.179433, 0.147244, 0.109584, 0.352905, 0.158215, 0.197604, 0.172407, 0.407506, 0.645446, 0.313061, 0.165602, 0.136663, 0.55444, 0.15527, 0.133128, 0.125912, 0.340405, 0.44521, 0.122783, 0.814526, 0.243773, 0.15743, 0.266743, 0.684458, 0.22221, 0.181294, 0.193901, 0.258802, 0.167195, 0.292056, 0.132309, 0.227671, 0.117334, 0.271758, 0.146185, 0.225042, 0.225964, 0.194863, 0.290274, 0.138438, 0.196714, 0.266012, 0.267771, 0.162544, 0.244258, 0.358038, 0.522617, 0.192875, 0.45066, 0.330396, 0.223477, 0.42967, 0.350884, 0.404655, 0.123155, 0.431583, 0.191675, 0.147354, 0.609034, 0.459487, 0.187337, 0.215128, 0.604169, 0.330165, 0.494229, 0.40775, 0.167377, 0.192648, 0.234635, 0.275578, 0.253094, 0.420063, 0.228299, 0.206478, 0.20395, 0.377656, 0.317393, 0.478623, 0.159009, 0.217034, 0.300933, 0.139754, 0.153901, 0.261077, 0.22834, 0.449609, 0.157672, 0.176474, 0.285704, 0.180186, 0.212738, 0.266428, 0.388313, 0.0954637, 0.298093, 0.251643, 0.330696, 0.159572, 0.210666, 0.149411, 0.139618, 0.338472, 0.450304, 0.208793, 0.583609, 0.185865, 0.400576, 0.21626, 0.174867, 0.239144, 0.249113, 0.200402, 0.275065, 0.238793, 0.205784, 0.4475, 0.231262, 0.259082, 0.20934, 0.16806, 0.193616, 0.213811, 0.395632, 0.482465, 0.274649, 0.307405, 0.165866, 0.334275, 0.683337, 0.368825, 0.14625, 0.780742, 0.163457, 0.226596, 0.138713, 1.79155, 0.400443, 0.233658, 0.426399, 0.623024, 0.670955, 0.123588, 0.110899, 0.173751, 0.651068, 0.199983, 0.190887, 0.541435, 0.21324, 0.266571, 0.134638, 0.179348, 0.145636, 0.170929, 0.623252, 0.587738, 0.109688, 0.515314, 0.217666, 0.213311, 0.249144, 0.187947, 0.270999, 0.268311, 0.469782, 0.763609, 0.32124, 0.146315, 0.265223, 0.298694, 0.197623, 0.21349, 0.845778, 0.175466, 0.123588, 0.17223, 0.258603, 1.17119, 0.538142, 0.407675, 0.120288, 0.587238, 0.244664, 0.333956, 0.132812, 0.21399, 0.302375, 0.275882, 0.134284, 0.377555, 0.228541, 0.187307, 0.143804, 0.180545, 0.222451, 0.239638, 0.188028, 0.46334, 0.175868, 0.242392, 0.314762, 0.44473, 0.21962, 0.175966, 1.12364, 0.138837, 0.400576, 0.18184, 0.137706, 0.409763, 0.216894, 0.466662, 0.376604, 0.487155, 0.283143, 0.118547, 0.221591, 0.122783, 0.179007, 0.16628, 0.180999, 0.239845, 0.169607, 0.578402, 0.396537, 0.222288, 0.563237, 0.371238, 0.138658, 0.324336, 0.191526, 0.168603, 0.357715, 0.640905, 0.460706, 0.220902, 0.240797, 0.164062, 0.157853, 0.34457, 0.196092, 0.289353, 0.104597, 0.259641, 0.126878, 0.175781, 0.441458, 0.820108, 0.261864, 0.23431, 0.254506, 0.271955, 0.227529, 0.22834, 0.196753, 0.224906, 0.193783, 0.419481, 0.236933, 0.229706, 0.29785, 0.222947, 0.177606, 0.216911, 0.305188, 0.933438, 0.116666, 0.278483, 0.0973824, 0.271224, 0.127717, 1.28139, 0.276283, 0.180704, 0.234554, 0.285984, 0.290172, 0.49594, 0.135879, 0.436784, 0.206219, 0.342215, 0.374165, 0.182217, 0.274864, 0.625, 0.356925, 0.194324, 0.342215, 0.113012, 0.155123, 0.254207, 0.438919, 0.262548, 0.302299, 0.179528, 0.312744, 0.168513, 0.142618, 0.150543, 0.231361, 0.166004, 0.186725, 0.38848, 0.179857, 0.182301, 0.629476, 0.44113, 0.289669, 0.328543, 0.279938, 0.14625, 0.187174, 0.157635, 0.396749, 0.798931, 0.201541, 0.778619, 0.265883, 0.258027, 0.218576, 0.266571, 0.160168, 0.230303, 0.273633, 0.233298, 0.30175, 0.217069, 0.345145, 0.397901, 0.224499, 0.248101, 0.241335, 0.222947, 0.237094, 0.176518, 0.380032, 0.634775, 0.426193, 0.16362, 0.231097, 0.219898, 0.343789, 0.275578, 0.282022, 0.628542, 0.232184, 0.848367, 0.200754, 0.179177}, {0, 0, 2, 3, 3, 0, 2, 2, 2, 2, 3, 0, 3, 2, 2, 2, 3, 3, 3, 3, 2, 0, 0, 0, 2, 3, 3, 3, 2, 2, 0, 0, 2, 3, 3, 0, 0, 2, 0, 0, 3, 2, 3, 0, 3, 0, 3, 3, 0, 2, 0, 3, 2, 0, 3, 0, 3, 3, 3, 2, 2, 3, 0, 0, 3, 3, 0, 2, 2, 3, 0, 3, 2, 2, 2, 0, 2, 3, 3, 3, 2, 3, 3, 3, 2, 0, 2, 0, 3, 3, 3, 3, 2, 2, 0, 2, 0, 3, 2, 2, 2, 0, 0, 3, 0, 2, 2, 3, 2, 3, 0, 2, 2, 2, 3, 2, 0, 0, 2, 3, 3, 2, 0, 2, 0, 0, 2, 0, 2, 2, 3, 2, 2, 0, 3, 0, 3, 2, 2, 2, 3, 3, 0, 0, 0, 3, 2, 3, 3, 3, 3, 0, 2, 0, 3, 2, 3, 2, 3, 0, 2, 3, 3, 2, 3, 3, 2, 2, 0, 0, 2, 3, 3, 2, 3, 0, 2, 0, 2, 0, 3, 2, 3, 2, 3, 0, 3, 0, 3, 0, 2, 3, 2, 2, 3, 0, 2, 2, 2, 0, 3, 2, 3, 3, 2, 3, 2, 3, 3, 2, 2, 0, 0, 2, 2, 3, 0, 3, 0, 2, 0, 0, 2, 3, 0, 3, 3, 2, 0, 3, 3, 0, 3, 0, 2, 2, 0, 2, 0, 2, 0, 0, 0, 2, 0, 3, 2, 3, 2, 3, 2, 2, 0, 2, 3, 2, 3, 2, 2, 2, 2, 3, 0, 2, 0, 0, 2, 3, 3, 0, 2, 3, 2, 2, 3, 0, 3, 0, 0, 2, 0, 2, 0, 2, 2, 3, 3, 2, 3, 0, 0, 3, 2, 2, 0, 3, 2, 0, 0, 3, 0, 0, 2, 0, 3, 2, 0, 2, 0, 0, 0, 0, 0, 2, 0, 0, 2, 3, 0, 0, 2, 0, 0, 2, 0, 2, 3, 2, 3, 3, 2, 2, 0, 0, 0, 3, 0, 2, 0, 2, 0, 2, 2, 2, 3, 3, 0, 0, 3, 3, 3, 3, 3, 2, 3, 3, 2, 3, 3, 0, 2, 2, 2, 2, 0, 2, 0, 0, 0, 2, 2, 3, 3, 2, 3, 2, 3, 0, 2, 3, 0, 2, 0, 2, 2, 0, 3, 0, 2, 0, 2, 3, 0, 3, 0, 0, 0, 3, 2, 3, 3, 0, 3, 2, 3, 0, 2, 3, 3, 0, 2, 3, 0, 0, 0, 2, 0, 3, 0, 2, 3, 3, 3, 3, 3, 0, 2, 0, 2, 2, 3, 3, 0, 3, 0, 2, 0, 2, 0, 3, 0, 0, 0, 2, 3, 3, 2, 3, 0, 0, 0, 0, 3, 3, 0, 3, 2, 0, 2, 3, 2, 2, 3, 3, 2, 2, 2, 0, 2, 3, 0, 3, 3, 0, 0, 2, 0, 3, 2, 3, 0, 2, 0, 2, 2, 3, 2, 0, 3, 3, 3, 2, 3, 0, 3, 0, 2, 2, 0, 0, 0, 3, 0, 3, 3, 2, 3, 2, 3, 2, 3, 0, 2, 3, 0, 2, 0, 3, 3, 3, 3, 3, 3, 2, 0, 3, 2, 2, 2, 3, 3, 2, 3, 0, 2, 3, 3, 2, 2, 0, 0, 0, 0, 3, 0, 3, 3, 3, 0, 0, 0, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 3, 3, 0, 0, 2, 2, 3, 3, 2, 2, 0, 0, 3, 0, 0, 0, 2, 3, 0, 0, 0, 3, 0, 3, 0, 2, 2, 0, 0, 0, 0, 3, 2, 2, 3, 2, 3, 2, 2, 2, 2, 3, 0, 0, 2, 3, 0, 3, 3, 0, 3, 0, 0, 2, 0, 3, 3, 0, 2, 2, 3, 3, 0, 0, 2, 0, 2, 3, 2, 0, 0, 3, 3, 0, 3, 2, 0, 2, 0, 2, 3, 2, 0, 3, 3, 2, 0, 0, 2, 2, 0, 0, 2, 0, 3, 3, 2, 3, 2, 0, 3, 0, 2, 2, 3, 3, 0, 3, 2, 2, 0, 3, 0, 0, 0, 2, 0, 3, 2, 0, 2, 3, 2, 3, 2, 2, 3, 3, 0, 2, 3, 2, 3, 2, 2, 0, 3, 0, 3, 0, 2, 2, 2, 0, 2, 0, 2, 2, 0, 0, 3, 3, 0, 0, 3, 2, 0, 2, 3, 2, 2, 0, 3, 3, 0, 2, 0, 3, 3, 0, 2, 3, 2, 3, 2, 0, 2, 2, 0, 0, 0, 2, 2, 3, 3, 2, 2, 0, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 2, 0, 3, 3, 3, 0, 2, 0, 2, 3, 2, 0, 3, 3, 2, 0, 2, 0, 3, 2, 0, 3, 0, 0, 2, 2, 0, 3, 0, 2, 3, 3, 3, 0, 2, 0, 0, 3, 0, 2, 3, 2, 2, 0, 3, 3, 3, 3, 3, 0, 3, 0, 0, 0, 0, 3, 2, 0, 0, 2, 3, 3, 2, 2, 0, 3, 2, 0, 3, 0, 2, 3, 3, 0, 2, 2, 3, 2, 2, 2, 3, 2, 0, 0, 3, 2, 0, 0, 0, 2, 0, 2, 0, 0, 2, 2, 3, 0, 3, 0, 0, 3, 0, 0, 0, 3, 0, 0, 2, 2, 0, 2, 2, 3, 3, 3, 3, 0, 0, 2, 2, 2, 0, 3, 2, 2, 2, 2, 2, 0, 3, 0, 0, 3, 2, 0, 0, 3, 2, 3, 3, 0, 3, 0, 3, 0, 3, 2, 2, 2, 0, 0, 3, 2, 2, 0, 0, 0, 2, 3, 2, 0, 2, 3, 3, 3, 0, 3, 3, 0, 2, 0, 0, 2, 3, 3, 0, 3, 2, 2, 2, 2, 2, 3, 3, 2, 2, 3, 3, 2, 3, 0, 3, 3, 0, 3, 2, 2, 0, 2, 0, 3, 0, 3, 0, 2, 3, 0, 2, 3, 2, 0, 2, 0, 3, 0, 2, 3, 3, 2, 0, 3, 3, 3, 2, 2, 3, 3, 2, 2, 2, 0, 3, 2, 2, 0}, {271, 271, 329, 343, 387, 426, 426, 601}, {426, 601, 426, 387, 343, 271, 329, 271}, {3.70991, 4.43491, 3.76334, 9.43944, 9.43944, 3.70991, 3.76334, 4.43491}}}; typedef ConnectComponentsEdgesTest<int, float> ConnectComponentsEdgesTestF_Int; TEST_P(ConnectComponentsEdgesTestF_Int, Result) { EXPECT_TRUE(true); } INSTANTIATE_TEST_CASE_P(ConnectComponentsEdgesTest, ConnectComponentsEdgesTestF_Int, ::testing::ValuesIn(mr_fix_conn_inputsf2)); }; // namespace sparse }; // end namespace raft
99ec548080bfbc7301b7e9dd95e41d9edd96918f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ********************************************** * CS314 Principles of Programming Languages * * Spring 2020 * ********************************************** */ #include <stdio.h> #include <stdlib.h> __global__ void exclusive_prefix_sum_gpu(int * oldSum, int * newSum, int distance, int numElements) { /*YOUR CODE HERE*/ int tid = blockIdx.x * blockDim.x + threadIdx.x;//this is the thread id and will be used as index while( tid < numElements ) { if( distance == 0 )//since it's exclusive prefix sum, when distance is 0 shift all values over to the right once { if( tid == 0 ) { newSum[tid] = 0; } else { newSum[tid] = oldSum[tid - 1]; } } else { if( tid - distance >= 0 )//if index - stride is still in the array, add those old sums together { newSum[tid] = oldSum[tid] + oldSum[tid - distance]; } else//otherwise just move the element to the same index { newSum[tid] = oldSum[tid]; } } tid += ( blockDim.x * gridDim.x ); } return; }
99ec548080bfbc7301b7e9dd95e41d9edd96918f.cu
/* ********************************************** * CS314 Principles of Programming Languages * * Spring 2020 * ********************************************** */ #include <stdio.h> #include <stdlib.h> __global__ void exclusive_prefix_sum_gpu(int * oldSum, int * newSum, int distance, int numElements) { /*YOUR CODE HERE*/ int tid = blockIdx.x * blockDim.x + threadIdx.x;//this is the thread id and will be used as index while( tid < numElements ) { if( distance == 0 )//since it's exclusive prefix sum, when distance is 0 shift all values over to the right once { if( tid == 0 ) { newSum[tid] = 0; } else { newSum[tid] = oldSum[tid - 1]; } } else { if( tid - distance >= 0 )//if index - stride is still in the array, add those old sums together { newSum[tid] = oldSum[tid] + oldSum[tid - distance]; } else//otherwise just move the element to the same index { newSum[tid] = oldSum[tid]; } } tid += ( blockDim.x * gridDim.x ); } return; }
4052f4c7c4f4b323034d7d525291ca4fa66e0a6d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //******************************************************************************************************************// // Copyright (c) 2021, University of North Carolina at Charlotte // and Lawrence Livermore National Security, LLC. // SPDX-License-Identifier: (BSD-3-Clause) //*****************************************************************************************************************// #include "matadd_2D.h" #include"cuda_runtime.h" #include"device_launch_parameters.h" #include <stdio.h> #define BLOCK_SIZE 16 texture<float,2>texMatrixA; texture<float,2>texMatrixB; //constant memory __constant__ int cons_M; __constant__ int cons_N; __global__ void add_warmingup(float * d_matrixA, float * d_matrixB, float *d_Result, int d_M, int d_N) { const int tidx = blockDim.x * blockIdx.x + threadIdx.x; const int tidy = blockDim.y * blockIdx.y + threadIdx.y; if(tidx<d_M && tidy<d_N) { d_Result[tidx * d_N + tidy] = d_matrixA[tidx * d_N + tidy] + d_matrixB[tidx * d_N + tidy]; } } __global__ void add(float * d_matrixA, float * d_matrixB, float *d_Result, int d_M, int d_N) { const int tidx = blockDim.x * blockIdx.x + threadIdx.x; const int tidy = blockDim.y * blockIdx.y + threadIdx.y; if(tidx<d_M && tidy<d_N) { d_Result[tidx * d_N + tidy] = d_matrixA[tidx * d_N + tidy] + d_matrixB[tidx * d_N + tidy]; } } __global__ void add_const(float * d_matrixA, float * d_matrixB, float *d_Result) { const int tidx = blockDim.x * blockIdx.x + threadIdx.x; const int tidy = blockDim.y * blockIdx.y + threadIdx.y; if(tidx<cons_M && tidy<cons_N) { d_Result[tidx * cons_N + tidy] = d_matrixA[tidx * cons_N + tidy] + d_matrixB[tidx * cons_N + tidy]; } } __global__ static void add_texture(float *d_Result, int d_M, int d_N) { const int tidx = blockDim.x * blockIdx.x + threadIdx.x; const int tidy = blockDim.y * blockIdx.y + threadIdx.y; if(tidx<d_M && tidy<d_N) { float u = tex2D(texMatrixA,tidx,tidy); float v = tex2D(texMatrixB,tidx,tidy); d_Result[tidx * d_N + tidy] = u + v; } } __global__ static void add_texture_constant(float *d_Result) { const int tidx = blockDim.x * blockIdx.x + threadIdx.x; const int tidy = blockDim.y * blockIdx.y + threadIdx.y; if(tidx<cons_M && tidy<cons_N) { float u = tex2D(texMatrixA,tidx,tidy); float v = tex2D(texMatrixB,tidx,tidy); d_Result[tidx * cons_N+ tidy] = u + v; } } void matadd(float * h_matrixA, float * h_matrixB, int M, int N, float * h_result) { hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>(); float *d_matrixA = NULL, *d_matrixB = NULL, *d_result = NULL; hipMalloc(&d_matrixA, M * N * sizeof(float)); hipMalloc(&d_matrixB, M * N * sizeof(float)); hipMalloc(&d_result, M * N * sizeof(float)); hipMemcpy(d_matrixA, h_matrixA, M * N * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_matrixB, h_matrixB, M * N * sizeof(float), hipMemcpyHostToDevice); hipBindTexture2D(0, texMatrixA, d_matrixA, channelDesc, N, M, M * sizeof(float)); hipBindTexture2D(0, texMatrixB, d_matrixB, channelDesc, N, M, M * sizeof(float)); hipMemcpyToSymbol(cons_M,&M,sizeof(float),0); hipMemcpyToSymbol(cons_N,&N,sizeof(float),0); dim3 blocks(1,1,1); dim3 threadsperblock(BLOCK_SIZE,BLOCK_SIZE,1); blocks.x=((M/BLOCK_SIZE) + (((M)%BLOCK_SIZE)==0?0:1)); blocks.y=((N/BLOCK_SIZE) + (((N)%BLOCK_SIZE)==0?0:1)); hipLaunchKernelGGL(( add_warmingup), dim3(blocks),dim3(threadsperblock), 0, 0, d_matrixA,d_matrixB,d_result,M,N); hipDeviceSynchronize(); hipLaunchKernelGGL(( add), dim3(blocks),dim3(threadsperblock), 0, 0, d_matrixA,d_matrixB,d_result,M,N); hipDeviceSynchronize(); hipLaunchKernelGGL(( add_const), dim3(blocks),dim3(threadsperblock), 0, 0, d_matrixA,d_matrixB,d_result); hipDeviceSynchronize(); hipLaunchKernelGGL(( add_texture), dim3(blocks),dim3(threadsperblock), 0, 0, d_result,M,N); hipDeviceSynchronize(); hipLaunchKernelGGL(( add_texture_constant), dim3(blocks),dim3(threadsperblock), 0, 0, d_result); hipDeviceSynchronize(); hipDeviceSynchronize(); hipMemcpy(h_result,d_result,M * N * sizeof(float), hipMemcpyDeviceToHost); hipUnbindTexture(texMatrixA); hipUnbindTexture(texMatrixB); hipFree(d_matrixA); hipFree(d_matrixB); hipFree(d_result); }
4052f4c7c4f4b323034d7d525291ca4fa66e0a6d.cu
//******************************************************************************************************************// // Copyright (c) 2021, University of North Carolina at Charlotte // and Lawrence Livermore National Security, LLC. // SPDX-License-Identifier: (BSD-3-Clause) //*****************************************************************************************************************// #include "matadd_2D.h" #include"cuda_runtime.h" #include"device_launch_parameters.h" #include <stdio.h> #define BLOCK_SIZE 16 texture<float,2>texMatrixA; texture<float,2>texMatrixB; //constant memory __constant__ int cons_M; __constant__ int cons_N; __global__ void add_warmingup(float * d_matrixA, float * d_matrixB, float *d_Result, int d_M, int d_N) { const int tidx = blockDim.x * blockIdx.x + threadIdx.x; const int tidy = blockDim.y * blockIdx.y + threadIdx.y; if(tidx<d_M && tidy<d_N) { d_Result[tidx * d_N + tidy] = d_matrixA[tidx * d_N + tidy] + d_matrixB[tidx * d_N + tidy]; } } __global__ void add(float * d_matrixA, float * d_matrixB, float *d_Result, int d_M, int d_N) { const int tidx = blockDim.x * blockIdx.x + threadIdx.x; const int tidy = blockDim.y * blockIdx.y + threadIdx.y; if(tidx<d_M && tidy<d_N) { d_Result[tidx * d_N + tidy] = d_matrixA[tidx * d_N + tidy] + d_matrixB[tidx * d_N + tidy]; } } __global__ void add_const(float * d_matrixA, float * d_matrixB, float *d_Result) { const int tidx = blockDim.x * blockIdx.x + threadIdx.x; const int tidy = blockDim.y * blockIdx.y + threadIdx.y; if(tidx<cons_M && tidy<cons_N) { d_Result[tidx * cons_N + tidy] = d_matrixA[tidx * cons_N + tidy] + d_matrixB[tidx * cons_N + tidy]; } } __global__ static void add_texture(float *d_Result, int d_M, int d_N) { const int tidx = blockDim.x * blockIdx.x + threadIdx.x; const int tidy = blockDim.y * blockIdx.y + threadIdx.y; if(tidx<d_M && tidy<d_N) { float u = tex2D(texMatrixA,tidx,tidy); float v = tex2D(texMatrixB,tidx,tidy); d_Result[tidx * d_N + tidy] = u + v; } } __global__ static void add_texture_constant(float *d_Result) { const int tidx = blockDim.x * blockIdx.x + threadIdx.x; const int tidy = blockDim.y * blockIdx.y + threadIdx.y; if(tidx<cons_M && tidy<cons_N) { float u = tex2D(texMatrixA,tidx,tidy); float v = tex2D(texMatrixB,tidx,tidy); d_Result[tidx * cons_N+ tidy] = u + v; } } void matadd(float * h_matrixA, float * h_matrixB, int M, int N, float * h_result) { cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>(); float *d_matrixA = NULL, *d_matrixB = NULL, *d_result = NULL; cudaMalloc(&d_matrixA, M * N * sizeof(float)); cudaMalloc(&d_matrixB, M * N * sizeof(float)); cudaMalloc(&d_result, M * N * sizeof(float)); cudaMemcpy(d_matrixA, h_matrixA, M * N * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_matrixB, h_matrixB, M * N * sizeof(float), cudaMemcpyHostToDevice); cudaBindTexture2D(0, texMatrixA, d_matrixA, channelDesc, N, M, M * sizeof(float)); cudaBindTexture2D(0, texMatrixB, d_matrixB, channelDesc, N, M, M * sizeof(float)); cudaMemcpyToSymbol(cons_M,&M,sizeof(float),0); cudaMemcpyToSymbol(cons_N,&N,sizeof(float),0); dim3 blocks(1,1,1); dim3 threadsperblock(BLOCK_SIZE,BLOCK_SIZE,1); blocks.x=((M/BLOCK_SIZE) + (((M)%BLOCK_SIZE)==0?0:1)); blocks.y=((N/BLOCK_SIZE) + (((N)%BLOCK_SIZE)==0?0:1)); add_warmingup<<<blocks,threadsperblock>>>(d_matrixA,d_matrixB,d_result,M,N); cudaDeviceSynchronize(); add<<<blocks,threadsperblock>>>(d_matrixA,d_matrixB,d_result,M,N); cudaDeviceSynchronize(); add_const<<<blocks,threadsperblock>>>(d_matrixA,d_matrixB,d_result); cudaDeviceSynchronize(); add_texture<<<blocks,threadsperblock>>>(d_result,M,N); cudaDeviceSynchronize(); add_texture_constant<<<blocks,threadsperblock>>>(d_result); cudaDeviceSynchronize(); cudaDeviceSynchronize(); cudaMemcpy(h_result,d_result,M * N * sizeof(float), cudaMemcpyDeviceToHost); cudaUnbindTexture(texMatrixA); cudaUnbindTexture(texMatrixB); cudaFree(d_matrixA); cudaFree(d_matrixB); cudaFree(d_result); }
33dfe1376af289b767825646fa4260515450dc8d.hip
// !!! This is a file automatically generated by hipify!!! #include <THH/THHTensorMasked.cuh> #include "THHTensor.hpp" #include "../generic/THCTensorMasked.cu" #include <THH/THHGenerateDoubleType.h>
33dfe1376af289b767825646fa4260515450dc8d.cu
#include <THC/THCTensorMasked.cuh> #include "THCTensor.hpp" #include "../generic/THCTensorMasked.cu" #include <THC/THCGenerateDoubleType.h>
0258b5de110058a0e77c4f660e2f7c24da2966b7.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #include <omp.h> #include <math.h> #include "hip/hip_fp16.h" int PrintCu(const int N,int *a); __global__ void MetNormal(int *a_d,const int N ); __global__ void diagonal(int *a_d ,const int N ,int Nm ,int xi ,int yi,int zi,int n_rec); __global__ void rec(int *a_d ,const int N ,int Nm ,int xi ,int yi,int zi,int n_rec,const int N_CORTE, const int BlockSize){ int x=blockIdx.x*blockDim.x+threadIdx.x+xi; int x0=blockIdx.x*blockDim.x+threadIdx.x; int y=blockIdx.y*blockDim.y+threadIdx.y+yi; int y0=blockIdx.y*blockDim.y+threadIdx.y; int z=blockIdx.z*blockDim.z+threadIdx.z+zi; int z0=blockIdx.z*blockDim.z+threadIdx.z; int tid1=z*N*N+y*N+x; int tid=z0*N*N+y0*N+x0; int NmF= (int)ceil((float)((Nm)/2.0f)); //printf("pspace x y z (%i %i %i) dominio x y z (%i %i %i) tid1 dominio %i tid pspace %i \n",x0, y0, z0, x, y, z,tid1,tid); //if(Nm>=BlockSize ){ if(x < N && y < N && z < N){ //a_d[tid1]=n_rec; a_d[tid1]=1; if(x+y >z){ a_d[tid1]=9; } } //} if(Nm<=N_CORTE && tid==0){ // AQUI NCORTE(pensar en calcular N_corte en base al basico optimo <7>) int Nbloques= (Nm+BlockSize-1)/BlockSize; dim3 b(BlockSize,BlockSize,BlockSize); dim3 g(Nbloques,Nbloques,Nbloques); hipStream_t s1; hipStream_t s2; hipStream_t s3; hipStreamCreateWithFlags(&s1,hipStreamNonBlocking); hipStreamCreateWithFlags(&s2,hipStreamNonBlocking); hipStreamCreateWithFlags(&s3,hipStreamNonBlocking); hipLaunchKernelGGL(( diagonal), dim3(g),dim3(b),0,s1, a_d,N,Nm,xi+Nm,yi,zi,n_rec); hipLaunchKernelGGL(( diagonal), dim3(g),dim3(b),0,s2, a_d,N,Nm,xi,yi+Nm,zi,n_rec); hipLaunchKernelGGL(( diagonal), dim3(g),dim3(b),0,s3, a_d,N,Nm,xi,yi,zi-Nm,n_rec); //diagonal kernel nuevo return; } //busca los nuevos puntos a considerar para el siguiente mapeo if( tid==0 && Nm!=0){ //int Nbloques=(((int) ceil((float)(Nm)/2.0f))+BlockSize-1)/BlockSize; int Nbloques=(NmF+BlockSize-1)/BlockSize; //printf("nbloques= %i %i \n", Nbloques, BlockSize); dim3 b(BlockSize,BlockSize,BlockSize); dim3 g(Nbloques,Nbloques,Nbloques); n_rec=n_rec+1; hipStream_t s1; hipStream_t s2; hipStream_t s3; hipStreamCreateWithFlags(&s1,hipStreamNonBlocking); hipStreamCreateWithFlags(&s2,hipStreamNonBlocking); hipStreamCreateWithFlags(&s3,hipStreamNonBlocking); // //int NmF= (int)ceil((float)((Nm)/2.0f)); //printf("kernel Nm = %i NmF= %i \n",Nm, NmF); hipLaunchKernelGGL(( rec), dim3(g),dim3(b),0,s1, a_d,N,NmF,xi+Nm,yi,zi+NmF,n_rec,N_CORTE,BlockSize); hipLaunchKernelGGL(( rec), dim3(g),dim3(b),0,s2, a_d,N,NmF,xi,yi+Nm,zi+NmF,n_rec,N_CORTE,BlockSize); hipLaunchKernelGGL(( rec), dim3(g),dim3(b),0,s3, a_d,N,NmF,xi,yi,zi-NmF,n_rec,N_CORTE,BlockSize); } } __global__ void diagonal(int *a_d ,const int N ,int Nm ,int xi ,int yi,int zi,int n_rec){ int x= blockIdx.x*blockDim.x+threadIdx.x+xi; int y= blockIdx.y*blockDim.y+threadIdx.y+yi; int z= blockIdx.z*blockDim.z+threadIdx.z+zi; int tid1= z*N*N+y*N+x; if(x < N && y < N && z < N){ if(x+y<= z){ a_d[tid1]=1; } } } __global__ void MetNormal(int *a_d,const int N){ int x= blockIdx.x*blockDim.x+threadIdx.x; int y= blockIdx.y*blockDim.y+threadIdx.y; int z= blockIdx.z*blockDim.z+threadIdx.z; int ind=z*N*N+y*N+x; if( x+y<=z){ a_d[ind]=1; } } int PrintCu(const int N, int *a){ for(int k=0;k<N;k++){ printf("z= %i \n",k); for (int i=0;i<N;i++){ for(int j=0;j<N;j++){ if(a[N*N*k+i*N+j] != 9){ printf("%i ",a[N*N*k+i*N+j]); } else{ printf("* "); } } printf("\n"); } printf("\n"); } //printf("Ok\n"); return 0; } int Ver_Resultado(const int N, int *a){ for(int i=0;i<N;i++){ for (int j=0;j<N;j++){ for(int k=0;k<N;k++){ if( !((i+j<=k && a[N*N*k+i*N+j]==1) || (i+j>k && a[N*N*k+i*N+j]==9) ) ){ printf("Error en Matriz \n" ); exit(1); } } } } //printf("Matriz correcta \n"); return (0); } int main(int argc ,char **argv){ if (argc !=6){ fprintf(stderr,"error, ejecutar programa como ./prog N met rep ncorte BlockSize\n"); exit(EXIT_FAILURE); } unsigned long N=atoi(argv[1]); unsigned long met=atoi(argv[2]); unsigned long rep=atoi(argv[3]); unsigned long ncort=atoi(argv[4]); //int nt=atoi(argv[5]);//numero de threads por bloque(eliminar ) int BSize=atoi(argv[5]); int *a,*a_d, xi=0,yi=0,zi=(int) ceil((float)(N/2.0f)); //double *datos; //printf("malloc ..."); fflush(stdout); a=(int*)malloc(sizeof(int)*N*N*N); //datos=(double*)malloc(sizeof(double)*rep); //printf("ok ...\ncuda malloc..."); fflush(stdout); hipMalloc((void ** ) &a_d,N*N*N*sizeof(int)); //printf("ok ...\n"); fflush(stdout); dim3 Bloque(BSize,BSize,BSize);//un bloquede nt float NB=(float)N/(float)(2*BSize); int B=(int) ceil(NB); dim3 Grid(B,B,B);//bgrid de B*b*b bloque dim3 GridBruto((N+BSize-1)/BSize,(N+BSize-1)/BSize,(N+BSize-1)/BSize); //printf("inicializando con N= %i ...",N); fflush(stdout); for(int i=0;i<N;i++){ for (int j=0;j<N;j++){ for(int k=0;k<N;k++){ a[N*N*k+i*N+j]=9; } } } //printf(" ok..\n"); fflush(stdout); int n_rec=0; double t1=omp_get_wtime(); hipMemcpy(a_d,a,N*N*N*sizeof(int),hipMemcpyHostToDevice); //printf("calculo GPU...\n"); fflush(stdout); double t2; double t3; if(ncort >= BSize && (met==1 || met==2 )){ if(met==1){// aqui se supone que viene un while o for para las iteraciones //printf("Metodo recursivo......\n"); fflush(stdout); for(int i=0;i<150;i++){ hipLaunchKernelGGL(( rec), dim3(Grid),dim3(Bloque), 0, 0, a_d,N,(int) ceil((float)(N)/2.0f),xi,yi,zi,n_rec,ncort,BSize); hipDeviceSynchronize(); hipError_t error = hipGetLastError(); if(error != hipSuccess) { // print the CUDA error message and exit printf("CUDA error: %s\n", hipGetErrorString(error)); exit(-1); } } t2=omp_get_wtime(); for(int i=0;i<rep;i++){ hipLaunchKernelGGL(( rec), dim3(Grid),dim3(Bloque), 0, 0, a_d,N,(int) ceil((float)(N)/2.0f),xi,yi,zi,n_rec,ncort,BSize); hipDeviceSynchronize(); hipError_t error = hipGetLastError(); if(error != hipSuccess) { // print the CUDA error message and exit printf("CUDA error: %s\n", hipGetErrorString(error)); exit(-1); } } t3=omp_get_wtime(); //printf("ok\n"); fflush(stdout); } if(met==2){ //printf("Metodo bruto...\n"); for(int i =0;i<150;i++){ hipLaunchKernelGGL(( MetNormal), dim3(GridBruto),dim3(Bloque), 0, 0, a_d,N); hipDeviceSynchronize(); } t2=omp_get_wtime(); for(int i =0;i<rep;i++){ hipLaunchKernelGGL(( MetNormal), dim3(GridBruto),dim3(Bloque), 0, 0, a_d,N); hipDeviceSynchronize(); } t3=omp_get_wtime(); fflush(stdout); } } else{ printf("Error, N de corte menor a tamao de bloque o metodo invalido\n"); return; } //aqui calculo el promedio de los tiempos double media=(t3-t2)/rep; //printf("Tiempo promedio con %i iteraciones: %f \n",rep,media); fflush(stdout); hipDeviceSynchronize(); //printf("ok..\n"); hipMemcpy(a,a_d,N*N*N*sizeof(int),hipMemcpyDeviceToHost); double t4=omp_get_wtime(); /*printf("calculo cpu..."); fflush(stdout); double t5=omp_get_wtime(); printf("ok..\n"); printf("verificando...\n"); */ //if(N < 128){ //PrintCu(N,a);//imprime cubo //} Ver_Resultado(N,a); //printf("grid : %i %i %i,Bloque: %i %i %i \n",Bloque.x,Bloque.y,Bloque.z ,Grid.x,Grid.y,Grid.z); //printf("gridBruto : %i %i %i,Bloque: %i %i %i \n",GridBruto.x,GridBruto.y,GridBruto.z ,Grid.x,Grid.y,Grid.z); //printf("tiempo copy a gpu : %f\ntiempo kernel: %f\ntiempo copy to host: %f tiempo total: %f\n",t2-t1,media,t4-t3,t4-t1); //printf("tiempo cpu %f\n",t5-t4); printf("%f\n",1000*media); return 0; }
0258b5de110058a0e77c4f660e2f7c24da2966b7.cu
#include <cuda.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #include <omp.h> #include <math.h> #include "cuda_fp16.h" int PrintCu(const int N,int *a); __global__ void MetNormal(int *a_d,const int N ); __global__ void diagonal(int *a_d ,const int N ,int Nm ,int xi ,int yi,int zi,int n_rec); __global__ void rec(int *a_d ,const int N ,int Nm ,int xi ,int yi,int zi,int n_rec,const int N_CORTE, const int BlockSize){ int x=blockIdx.x*blockDim.x+threadIdx.x+xi; int x0=blockIdx.x*blockDim.x+threadIdx.x; int y=blockIdx.y*blockDim.y+threadIdx.y+yi; int y0=blockIdx.y*blockDim.y+threadIdx.y; int z=blockIdx.z*blockDim.z+threadIdx.z+zi; int z0=blockIdx.z*blockDim.z+threadIdx.z; int tid1=z*N*N+y*N+x; int tid=z0*N*N+y0*N+x0; int NmF= (int)ceil((float)((Nm)/2.0f)); //printf("pspace x y z (%i %i %i) dominio x y z (%i %i %i) tid1 dominio %i tid pspace %i \n",x0, y0, z0, x, y, z,tid1,tid); //if(Nm>=BlockSize ){ if(x < N && y < N && z < N){ //a_d[tid1]=n_rec; a_d[tid1]=1; if(x+y >z){ a_d[tid1]=9; } } //} if(Nm<=N_CORTE && tid==0){ // AQUI NCORTE(pensar en calcular N_corte en base al basico optimo <7>) int Nbloques= (Nm+BlockSize-1)/BlockSize; dim3 b(BlockSize,BlockSize,BlockSize); dim3 g(Nbloques,Nbloques,Nbloques); cudaStream_t s1; cudaStream_t s2; cudaStream_t s3; cudaStreamCreateWithFlags(&s1,cudaStreamNonBlocking); cudaStreamCreateWithFlags(&s2,cudaStreamNonBlocking); cudaStreamCreateWithFlags(&s3,cudaStreamNonBlocking); diagonal<<<g,b,0,s1>>>(a_d,N,Nm,xi+Nm,yi,zi,n_rec); diagonal<<<g,b,0,s2>>>(a_d,N,Nm,xi,yi+Nm,zi,n_rec); diagonal<<<g,b,0,s3>>>(a_d,N,Nm,xi,yi,zi-Nm,n_rec); //diagonal kernel nuevo return; } //busca los nuevos puntos a considerar para el siguiente mapeo if( tid==0 && Nm!=0){ //int Nbloques=(((int) ceil((float)(Nm)/2.0f))+BlockSize-1)/BlockSize; int Nbloques=(NmF+BlockSize-1)/BlockSize; //printf("nbloques= %i %i \n", Nbloques, BlockSize); dim3 b(BlockSize,BlockSize,BlockSize); dim3 g(Nbloques,Nbloques,Nbloques); n_rec=n_rec+1; cudaStream_t s1; cudaStream_t s2; cudaStream_t s3; cudaStreamCreateWithFlags(&s1,cudaStreamNonBlocking); cudaStreamCreateWithFlags(&s2,cudaStreamNonBlocking); cudaStreamCreateWithFlags(&s3,cudaStreamNonBlocking); // //int NmF= (int)ceil((float)((Nm)/2.0f)); //printf("kernel Nm = %i NmF= %i \n",Nm, NmF); rec<<<g,b,0,s1>>>(a_d,N,NmF,xi+Nm,yi,zi+NmF,n_rec,N_CORTE,BlockSize); rec<<<g,b,0,s2>>>(a_d,N,NmF,xi,yi+Nm,zi+NmF,n_rec,N_CORTE,BlockSize); rec<<<g,b,0,s3>>>(a_d,N,NmF,xi,yi,zi-NmF,n_rec,N_CORTE,BlockSize); } } __global__ void diagonal(int *a_d ,const int N ,int Nm ,int xi ,int yi,int zi,int n_rec){ int x= blockIdx.x*blockDim.x+threadIdx.x+xi; int y= blockIdx.y*blockDim.y+threadIdx.y+yi; int z= blockIdx.z*blockDim.z+threadIdx.z+zi; int tid1= z*N*N+y*N+x; if(x < N && y < N && z < N){ if(x+y<= z){ a_d[tid1]=1; } } } __global__ void MetNormal(int *a_d,const int N){ int x= blockIdx.x*blockDim.x+threadIdx.x; int y= blockIdx.y*blockDim.y+threadIdx.y; int z= blockIdx.z*blockDim.z+threadIdx.z; int ind=z*N*N+y*N+x; if( x+y<=z){ a_d[ind]=1; } } int PrintCu(const int N, int *a){ for(int k=0;k<N;k++){ printf("z= %i \n",k); for (int i=0;i<N;i++){ for(int j=0;j<N;j++){ if(a[N*N*k+i*N+j] != 9){ printf("%i ",a[N*N*k+i*N+j]); } else{ printf("* "); } } printf("\n"); } printf("\n"); } //printf("Ok\n"); return 0; } int Ver_Resultado(const int N, int *a){ for(int i=0;i<N;i++){ for (int j=0;j<N;j++){ for(int k=0;k<N;k++){ if( !((i+j<=k && a[N*N*k+i*N+j]==1) || (i+j>k && a[N*N*k+i*N+j]==9) ) ){ printf("Error en Matriz \n" ); exit(1); } } } } //printf("Matriz correcta \n"); return (0); } int main(int argc ,char **argv){ if (argc !=6){ fprintf(stderr,"error, ejecutar programa como ./prog N met rep ncorte BlockSize\n"); exit(EXIT_FAILURE); } unsigned long N=atoi(argv[1]); unsigned long met=atoi(argv[2]); unsigned long rep=atoi(argv[3]); unsigned long ncort=atoi(argv[4]); //int nt=atoi(argv[5]);//numero de threads por bloque(eliminar ) int BSize=atoi(argv[5]); int *a,*a_d, xi=0,yi=0,zi=(int) ceil((float)(N/2.0f)); //double *datos; //printf("malloc ..."); fflush(stdout); a=(int*)malloc(sizeof(int)*N*N*N); //datos=(double*)malloc(sizeof(double)*rep); //printf("ok ...\ncuda malloc..."); fflush(stdout); cudaMalloc((void ** ) &a_d,N*N*N*sizeof(int)); //printf("ok ...\n"); fflush(stdout); dim3 Bloque(BSize,BSize,BSize);//un bloquede nt float NB=(float)N/(float)(2*BSize); int B=(int) ceil(NB); dim3 Grid(B,B,B);//bgrid de B*b*b bloque dim3 GridBruto((N+BSize-1)/BSize,(N+BSize-1)/BSize,(N+BSize-1)/BSize); //printf("inicializando con N= %i ...",N); fflush(stdout); for(int i=0;i<N;i++){ for (int j=0;j<N;j++){ for(int k=0;k<N;k++){ a[N*N*k+i*N+j]=9; } } } //printf(" ok..\n"); fflush(stdout); int n_rec=0; double t1=omp_get_wtime(); cudaMemcpy(a_d,a,N*N*N*sizeof(int),cudaMemcpyHostToDevice); //printf("calculo GPU...\n"); fflush(stdout); double t2; double t3; if(ncort >= BSize && (met==1 || met==2 )){ if(met==1){// aqui se supone que viene un while o for para las iteraciones //printf("Metodo recursivo......\n"); fflush(stdout); for(int i=0;i<150;i++){ rec<<<Grid,Bloque>>>(a_d,N,(int) ceil((float)(N)/2.0f),xi,yi,zi,n_rec,ncort,BSize); cudaDeviceSynchronize(); cudaError_t error = cudaGetLastError(); if(error != cudaSuccess) { // print the CUDA error message and exit printf("CUDA error: %s\n", cudaGetErrorString(error)); exit(-1); } } t2=omp_get_wtime(); for(int i=0;i<rep;i++){ rec<<<Grid,Bloque>>>(a_d,N,(int) ceil((float)(N)/2.0f),xi,yi,zi,n_rec,ncort,BSize); cudaDeviceSynchronize(); cudaError_t error = cudaGetLastError(); if(error != cudaSuccess) { // print the CUDA error message and exit printf("CUDA error: %s\n", cudaGetErrorString(error)); exit(-1); } } t3=omp_get_wtime(); //printf("ok\n"); fflush(stdout); } if(met==2){ //printf("Metodo bruto...\n"); for(int i =0;i<150;i++){ MetNormal<<<GridBruto,Bloque>>>(a_d,N); cudaDeviceSynchronize(); } t2=omp_get_wtime(); for(int i =0;i<rep;i++){ MetNormal<<<GridBruto,Bloque>>>(a_d,N); cudaDeviceSynchronize(); } t3=omp_get_wtime(); fflush(stdout); } } else{ printf("Error, N de corte menor a tamaño de bloque o metodo invalido\n"); return; } //aqui calculo el promedio de los tiempos double media=(t3-t2)/rep; //printf("Tiempo promedio con %i iteraciones: %f \n",rep,media); fflush(stdout); cudaDeviceSynchronize(); //printf("ok..\n"); cudaMemcpy(a,a_d,N*N*N*sizeof(int),cudaMemcpyDeviceToHost); double t4=omp_get_wtime(); /*printf("calculo cpu..."); fflush(stdout); double t5=omp_get_wtime(); printf("ok..\n"); printf("verificando...\n"); */ //if(N < 128){ //PrintCu(N,a);//imprime cubo //} Ver_Resultado(N,a); //printf("grid : %i %i %i,Bloque: %i %i %i \n",Bloque.x,Bloque.y,Bloque.z ,Grid.x,Grid.y,Grid.z); //printf("gridBruto : %i %i %i,Bloque: %i %i %i \n",GridBruto.x,GridBruto.y,GridBruto.z ,Grid.x,Grid.y,Grid.z); //printf("tiempo copy a gpu : %f\ntiempo kernel: %f\ntiempo copy to host: %f tiempo total: %f\n",t2-t1,media,t4-t3,t4-t1); //printf("tiempo cpu %f\n",t5-t4); printf("%f\n",1000*media); return 0; }
5abf6a573c22a996e199c8cb2702b77a909e69b9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // @author Yurii Shyrma, created on 28.11.2018 // #include <ops/specials_cuda.h> ////////////////////////////////////////////////////////////////////////// template<typename T> __device__ void oesTadKernel(void *vx, Nd4jLong *xShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, bool descending) { auto x = static_cast<T*>(vx); __shared__ int xLength; __shared__ int xTadLength; __shared__ int numTads; __shared__ T *shmem; __shared__ T *dx; if (threadIdx.x == 0) { xLength = shape::length(xShapeInfo); xTadLength = shape::tadLength(xShapeInfo, dimension, dimensionLength); numTads = xLength / xTadLength; extern __shared__ unsigned char shrd[]; shmem = (T *) shrd; } __syncthreads(); for (int r = blockIdx.x; r < numTads; r += gridDim.x) { if (threadIdx.x == 0) { dx = x + tadOffsets[r]; } __syncthreads(); // this is general loop, we go uncached int rem = xTadLength % 2; if (xTadLength > 1024) { for (int i = 0; i < (xTadLength / 2) + rem; i++) { // since we can have TAD larger then blockDim, we'll have this loop here for (int tid = threadIdx.x; tid < xTadLength; tid += blockDim.x ) { if((!(tid & 1)) && tid < xTadLength - 1) { int t0 = getDevicePosition(tadShapeInfo, tid); int t1 = getDevicePosition(tadShapeInfo, tid+1); if(!descending == (dx[t0] > dx[t1])) { T temp = dx[t1]; dx[t1] = dx[t0]; dx[t0] = temp; } } } __syncthreads(); for (int tid = threadIdx.x; tid < xTadLength; tid += blockDim.x ) { if((tid & 1) && tid < xTadLength - 1) { int t0 = getDevicePosition(tadShapeInfo, tid); int t1 = getDevicePosition(tadShapeInfo, tid+1); if(!descending == (dx[t0] > dx[t1])) { T temp = dx[t1]; dx[t1] = dx[t0]; dx[t0] = temp; } } } __syncthreads(); } } else { // we just load up to 1024 elements into shared memory, and sort will be applied there for (int e = threadIdx.x; e < xTadLength; e += blockDim.x) shmem[e] = dx[getDevicePosition(tadShapeInfo, e)]; __syncthreads(); for(int i=0; i < (xTadLength / 2) + rem; i++) { for (int tid = threadIdx.x; tid < xTadLength; tid += blockDim.x ) { if((!(tid & 1)) && tid < xTadLength - 1) { int t0 = tid; int t1 = tid+1; if(!descending == (shmem[t0] > shmem[t1])) { T temp = shmem[t1]; shmem[t1] = shmem[t0]; shmem[t0] = temp; } } __syncthreads(); if((tid & 1) && tid < xTadLength - 1) { int t0 = tid; int t1 = tid+1; if(!descending == (shmem[t0] > shmem[t1])) { T temp = shmem[t1]; shmem[t1] = shmem[t0]; shmem[t0] = temp; } } __syncthreads(); } } // we're dumping our shared memory back to device memory for (int e = threadIdx.x; e < xTadLength; e += blockDim.x) dx[getDevicePosition(tadShapeInfo, e)] = shmem[e]; __syncthreads(); } __syncthreads(); } } ////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void execOesTadKernel(void *vx, Nd4jLong *xShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, bool descending) { oesTadKernel<T>(vx, xShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, descending); } ////////////////////////////////////////////////////////////////////////// template<typename T> __host__ void oesTadGeneric(dim3 &launchDims, hipStream_t *stream, void *vx, Nd4jLong *xShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, bool descending) { hipLaunchKernelGGL(( execOesTadKernel<T>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, vx, xShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, descending); } BUILD_SINGLE_TEMPLATE(template void ND4J_EXPORT oesTadGeneric, (dim3 &launchDims, hipStream_t *stream, void *vx, Nd4jLong *xShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, bool descending), LIBND4J_TYPES);
5abf6a573c22a996e199c8cb2702b77a909e69b9.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // @author Yurii Shyrma, created on 28.11.2018 // #include <ops/specials_cuda.h> ////////////////////////////////////////////////////////////////////////// template<typename T> __device__ void oesTadKernel(void *vx, Nd4jLong *xShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, bool descending) { auto x = static_cast<T*>(vx); __shared__ int xLength; __shared__ int xTadLength; __shared__ int numTads; __shared__ T *shmem; __shared__ T *dx; if (threadIdx.x == 0) { xLength = shape::length(xShapeInfo); xTadLength = shape::tadLength(xShapeInfo, dimension, dimensionLength); numTads = xLength / xTadLength; extern __shared__ unsigned char shrd[]; shmem = (T *) shrd; } __syncthreads(); for (int r = blockIdx.x; r < numTads; r += gridDim.x) { if (threadIdx.x == 0) { dx = x + tadOffsets[r]; } __syncthreads(); // this is general loop, we go uncached int rem = xTadLength % 2; if (xTadLength > 1024) { for (int i = 0; i < (xTadLength / 2) + rem; i++) { // since we can have TAD larger then blockDim, we'll have this loop here for (int tid = threadIdx.x; tid < xTadLength; tid += blockDim.x ) { if((!(tid & 1)) && tid < xTadLength - 1) { int t0 = getDevicePosition(tadShapeInfo, tid); int t1 = getDevicePosition(tadShapeInfo, tid+1); if(!descending == (dx[t0] > dx[t1])) { T temp = dx[t1]; dx[t1] = dx[t0]; dx[t0] = temp; } } } __syncthreads(); for (int tid = threadIdx.x; tid < xTadLength; tid += blockDim.x ) { if((tid & 1) && tid < xTadLength - 1) { int t0 = getDevicePosition(tadShapeInfo, tid); int t1 = getDevicePosition(tadShapeInfo, tid+1); if(!descending == (dx[t0] > dx[t1])) { T temp = dx[t1]; dx[t1] = dx[t0]; dx[t0] = temp; } } } __syncthreads(); } } else { // we just load up to 1024 elements into shared memory, and sort will be applied there for (int e = threadIdx.x; e < xTadLength; e += blockDim.x) shmem[e] = dx[getDevicePosition(tadShapeInfo, e)]; __syncthreads(); for(int i=0; i < (xTadLength / 2) + rem; i++) { for (int tid = threadIdx.x; tid < xTadLength; tid += blockDim.x ) { if((!(tid & 1)) && tid < xTadLength - 1) { int t0 = tid; int t1 = tid+1; if(!descending == (shmem[t0] > shmem[t1])) { T temp = shmem[t1]; shmem[t1] = shmem[t0]; shmem[t0] = temp; } } __syncthreads(); if((tid & 1) && tid < xTadLength - 1) { int t0 = tid; int t1 = tid+1; if(!descending == (shmem[t0] > shmem[t1])) { T temp = shmem[t1]; shmem[t1] = shmem[t0]; shmem[t0] = temp; } } __syncthreads(); } } // we're dumping our shared memory back to device memory for (int e = threadIdx.x; e < xTadLength; e += blockDim.x) dx[getDevicePosition(tadShapeInfo, e)] = shmem[e]; __syncthreads(); } __syncthreads(); } } ////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void execOesTadKernel(void *vx, Nd4jLong *xShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, bool descending) { oesTadKernel<T>(vx, xShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, descending); } ////////////////////////////////////////////////////////////////////////// template<typename T> __host__ void oesTadGeneric(dim3 &launchDims, cudaStream_t *stream, void *vx, Nd4jLong *xShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, bool descending) { execOesTadKernel<T><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(vx, xShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, descending); } BUILD_SINGLE_TEMPLATE(template void ND4J_EXPORT oesTadGeneric, (dim3 &launchDims, cudaStream_t *stream, void *vx, Nd4jLong *xShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, bool descending), LIBND4J_TYPES);
619e4772b64199c1ed2046607fd02e424532ffda.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<iostream> #include<cstdio> #include<opencv2/core/core.hpp> #include<opencv2/highgui/highgui.hpp> #include<opencv2/imgproc/imgproc.hpp> #include<cuda_runtime.h> using std::cout; using std::endl; __global__ void Kernel( unsigned char* FrameA, unsigned char* FrameB,unsigned char* Frame,unsigned char* FrameF,int width,int height,int colorWidthStep, int grayWidthStep) { const int xIndex = blockIdx.x * blockDim.x + threadIdx.x; const int yIndex = blockIdx.y * blockDim.y + threadIdx.y; if((xIndex>0) && (yIndex>0) && (xIndex<width) && (yIndex<height)) { const int color_tid = yIndex * colorWidthStep + (3 * xIndex); const int gray_tid = yIndex * grayWidthStep + xIndex; Frame[color_tid]=FrameB[color_tid]-FrameA[color_tid]; Frame[color_tid+1]=FrameB[color_tid+1]-FrameA[color_tid+1]; Frame[color_tid+2]=FrameB[color_tid+2]-FrameA[color_tid+2]; __syncthreads(); const unsigned char blue = Frame[color_tid]; const unsigned char green = Frame[color_tid + 1]; const unsigned char red = Frame[color_tid + 2]; const float gray = red * 0.3f + green * 0.59f + blue * 0.11f; FrameF[gray_tid] = static_cast<unsigned char>(gray); } } void ImageGrayConverter(const cv::Mat& FrameA, const cv::Mat& FrameB, cv::Mat& FrameFinal) { const int colorBytes = FrameA.step * FrameA.rows; const int grayBytes = FrameFinal.step * FrameFinal.rows; unsigned char *D_FrameA, *D_FrameB, *D_Frame, *D_FrameFinal; hipMalloc<unsigned char>(&D_FrameA,colorBytes); hipMalloc<unsigned char>(&D_FrameB,colorBytes); hipMalloc<unsigned char>(&D_Frame,colorBytes); hipMalloc<unsigned char>(&D_FrameFinal,grayBytes); hipMemcpy(D_FrameA, FrameA.ptr(),colorBytes,hipMemcpyHostToDevice); hipMemcpy(D_FrameB, FrameB.ptr(),colorBytes,hipMemcpyHostToDevice); const dim3 block(16,16); const dim3 grid((FrameA.cols + block.x - 1)/block.x, (FrameA.rows + block.y - 1)/block.y); hipLaunchKernelGGL(( Kernel), dim3(grid),dim3(block), 0, 0, D_FrameA,D_FrameB,D_Frame,D_FrameFinal,FrameA.cols,FrameA.rows,FrameA.step,FrameFinal.step); hipDeviceSynchronize(); hipMemcpy(FrameFinal.ptr(),D_FrameFinal,grayBytes,hipMemcpyDeviceToHost); hipFree(D_FrameA); hipFree(D_FrameB); hipFree(D_Frame); hipFree(D_FrameFinal); } using namespace std; using namespace cv; int main(int argc, char** argv) { Mat InputA; Mat InputB; cv::VideoCapture cap; cap.open(string(argv[1])); while(1) { cap>>InputA; cap>>InputB; cv::Mat FrameFinal(InputA.rows,InputA.cols,CV_8U); ImageGrayConverter(InputA,InputB,FrameFinal); cv::imshow("Input",InputA); cv::imshow("Output",FrameFinal); if(cv::waitKey(33)>=0) break; } return 0; }
619e4772b64199c1ed2046607fd02e424532ffda.cu
#include<iostream> #include<cstdio> #include<opencv2/core/core.hpp> #include<opencv2/highgui/highgui.hpp> #include<opencv2/imgproc/imgproc.hpp> #include<cuda_runtime.h> using std::cout; using std::endl; __global__ void Kernel( unsigned char* FrameA, unsigned char* FrameB,unsigned char* Frame,unsigned char* FrameF,int width,int height,int colorWidthStep, int grayWidthStep) { const int xIndex = blockIdx.x * blockDim.x + threadIdx.x; const int yIndex = blockIdx.y * blockDim.y + threadIdx.y; if((xIndex>0) && (yIndex>0) && (xIndex<width) && (yIndex<height)) { const int color_tid = yIndex * colorWidthStep + (3 * xIndex); const int gray_tid = yIndex * grayWidthStep + xIndex; Frame[color_tid]=FrameB[color_tid]-FrameA[color_tid]; Frame[color_tid+1]=FrameB[color_tid+1]-FrameA[color_tid+1]; Frame[color_tid+2]=FrameB[color_tid+2]-FrameA[color_tid+2]; __syncthreads(); const unsigned char blue = Frame[color_tid]; const unsigned char green = Frame[color_tid + 1]; const unsigned char red = Frame[color_tid + 2]; const float gray = red * 0.3f + green * 0.59f + blue * 0.11f; FrameF[gray_tid] = static_cast<unsigned char>(gray); } } void ImageGrayConverter(const cv::Mat& FrameA, const cv::Mat& FrameB, cv::Mat& FrameFinal) { const int colorBytes = FrameA.step * FrameA.rows; const int grayBytes = FrameFinal.step * FrameFinal.rows; unsigned char *D_FrameA, *D_FrameB, *D_Frame, *D_FrameFinal; cudaMalloc<unsigned char>(&D_FrameA,colorBytes); cudaMalloc<unsigned char>(&D_FrameB,colorBytes); cudaMalloc<unsigned char>(&D_Frame,colorBytes); cudaMalloc<unsigned char>(&D_FrameFinal,grayBytes); cudaMemcpy(D_FrameA, FrameA.ptr(),colorBytes,cudaMemcpyHostToDevice); cudaMemcpy(D_FrameB, FrameB.ptr(),colorBytes,cudaMemcpyHostToDevice); const dim3 block(16,16); const dim3 grid((FrameA.cols + block.x - 1)/block.x, (FrameA.rows + block.y - 1)/block.y); Kernel<<<grid,block>>>(D_FrameA,D_FrameB,D_Frame,D_FrameFinal,FrameA.cols,FrameA.rows,FrameA.step,FrameFinal.step); cudaDeviceSynchronize(); cudaMemcpy(FrameFinal.ptr(),D_FrameFinal,grayBytes,cudaMemcpyDeviceToHost); cudaFree(D_FrameA); cudaFree(D_FrameB); cudaFree(D_Frame); cudaFree(D_FrameFinal); } using namespace std; using namespace cv; int main(int argc, char** argv) { Mat InputA; Mat InputB; cv::VideoCapture cap; cap.open(string(argv[1])); while(1) { cap>>InputA; cap>>InputB; cv::Mat FrameFinal(InputA.rows,InputA.cols,CV_8U); ImageGrayConverter(InputA,InputB,FrameFinal); cv::imshow("Input",InputA); cv::imshow("Output",FrameFinal); if(cv::waitKey(33)>=0) break; } return 0; }
259d50e69359efb2d28b4b974b0edd899e30da74.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright 2020-2023 by XGBoost Contributors */ #include <thrust/iterator/transform_iterator.h> #include <thrust/reduce.h> #include <algorithm> #include <cstdint> // uint32_t #include <limits> #include "../../collective/aggregator.h" #include "../../common/deterministic.cuh" #include "../../common/device_helpers.cuh" #include "../../data/ellpack_page.cuh" #include "histogram.cuh" #include "row_partitioner_hip.cuh" #include "xgboost/base.h" namespace xgboost { namespace tree { namespace { struct Pair { GradientPair first; GradientPair second; }; __host__ XGBOOST_DEV_INLINE Pair operator+(Pair const& lhs, Pair const& rhs) { return {lhs.first + rhs.first, lhs.second + rhs.second}; } } // anonymous namespace struct Clip : public thrust::unary_function<GradientPair, Pair> { static XGBOOST_DEV_INLINE float Pclip(float v) { return v > 0 ? v : 0; } static XGBOOST_DEV_INLINE float Nclip(float v) { return v < 0 ? abs(v) : 0; } XGBOOST_DEV_INLINE Pair operator()(GradientPair x) const { auto pg = Pclip(x.GetGrad()); auto ph = Pclip(x.GetHess()); auto ng = Nclip(x.GetGrad()); auto nh = Nclip(x.GetHess()); return {GradientPair{pg, ph}, GradientPair{ng, nh}}; } }; /** * In algorithm 5 (see common::CreateRoundingFactor) the bound is calculated as * $max(|v_i|) * n$. Here we use the bound: * * \begin{equation} * max( fl(\sum^{V}_{v_i>0}{v_i}), fl(\sum^{V}_{v_i<0}|v_i|) ) * \end{equation} * * to avoid outliers, as the full reduction is reproducible on GPU with reduction tree. */ GradientQuantiser::GradientQuantiser(common::Span<GradientPair const> gpair, MetaInfo const& info) { using GradientSumT = GradientPairPrecise; using T = typename GradientSumT::ValueT; dh::XGBCachingDeviceAllocator<char> alloc; thrust::device_ptr<GradientPair const> gpair_beg{gpair.data()}; auto beg = thrust::make_transform_iterator(gpair_beg, Clip()); Pair p = dh::Reduce(thrust::hip::par(alloc), beg, beg + gpair.size(), Pair{}, thrust::plus<Pair>{}); // Treat pair as array of 4 primitive types to allreduce using ReduceT = typename decltype(p.first)::ValueT; static_assert(sizeof(Pair) == sizeof(ReduceT) * 4, "Expected to reduce four elements."); collective::GlobalSum(info, reinterpret_cast<ReduceT*>(&p), 4); GradientPair positive_sum{p.first}, negative_sum{p.second}; std::size_t total_rows = gpair.size(); collective::GlobalSum(info, &total_rows, 1); auto histogram_rounding = GradientSumT{common::CreateRoundingFactor<T>( ::max(positive_sum.GetGrad(), negative_sum.GetGrad()), total_rows), common::CreateRoundingFactor<T>( ::max(positive_sum.GetHess(), negative_sum.GetHess()), total_rows)}; using IntT = typename GradientPairInt64::ValueT; /** * Factor for converting gradients from fixed-point to floating-point. */ to_floating_point_ = histogram_rounding / static_cast<T>(static_cast<IntT>(1) << (sizeof(typename GradientSumT::ValueT) * 8 - 2)); // keep 1 for sign bit /** * Factor for converting gradients from floating-point to fixed-point. For * f64: * * Precision = 64 - 1 - log2(rounding) * * rounding is calcuated as exp(m), see the rounding factor calcuation for * details. */ to_fixed_point_ = GradientSumT(static_cast<T>(1) / to_floating_point_.GetGrad(), static_cast<T>(1) / to_floating_point_.GetHess()); } XGBOOST_DEV_INLINE void AtomicAddGpairShared(xgboost::GradientPairInt64 *dest, xgboost::GradientPairInt64 const &gpair) { auto dst_ptr = reinterpret_cast<int64_t *>(dest); auto g = gpair.GetQuantisedGrad(); auto h = gpair.GetQuantisedHess(); AtomicAdd64As32(dst_ptr, g); AtomicAdd64As32(dst_ptr + 1, h); } // Global 64 bit integer atomics at the time of writing do not benefit from being separated into two // 32 bit atomics XGBOOST_DEV_INLINE void AtomicAddGpairGlobal(xgboost::GradientPairInt64* dest, xgboost::GradientPairInt64 const& gpair) { auto dst_ptr = reinterpret_cast<uint64_t*>(dest); auto g = gpair.GetQuantisedGrad(); auto h = gpair.GetQuantisedHess(); atomicAdd(dst_ptr, *reinterpret_cast<uint64_t*>(&g)); atomicAdd(dst_ptr + 1, *reinterpret_cast<uint64_t*>(&h)); } template <int kBlockThreads, int kItemsPerThread, int kItemsPerTile = kBlockThreads* kItemsPerThread> class HistogramAgent { GradientPairInt64* smem_arr_; GradientPairInt64* d_node_hist_; dh::LDGIterator<const RowPartitioner::RowIndexT> d_ridx_; const GradientPair* d_gpair_; const FeatureGroup group_; const EllpackDeviceAccessor& matrix_; const int feature_stride_; const std::size_t n_elements_; const GradientQuantiser& rounding_; public: __device__ HistogramAgent(GradientPairInt64* smem_arr, GradientPairInt64* __restrict__ d_node_hist, const FeatureGroup& group, const EllpackDeviceAccessor& matrix, common::Span<const RowPartitioner::RowIndexT> d_ridx, const GradientQuantiser& rounding, const GradientPair* d_gpair) : smem_arr_(smem_arr), d_node_hist_(d_node_hist), d_ridx_(d_ridx.data()), group_(group), matrix_(matrix), feature_stride_(matrix.is_dense ? group.num_features : matrix.row_stride), n_elements_(feature_stride_ * d_ridx.size()), rounding_(rounding), d_gpair_(d_gpair) {} __device__ void ProcessPartialTileShared(std::size_t offset) { for (std::size_t idx = offset + threadIdx.x; idx < ::min(offset + kBlockThreads * kItemsPerTile, n_elements_); idx += kBlockThreads) { int ridx = d_ridx_[idx / feature_stride_]; int gidx = matrix_ .gidx_iter[ridx * matrix_.row_stride + group_.start_feature + idx % feature_stride_] - group_.start_bin; if (matrix_.is_dense || gidx != matrix_.NumBins()) { auto adjusted = rounding_.ToFixedPoint(d_gpair_[ridx]); AtomicAddGpairShared(smem_arr_ + gidx, adjusted); } } } // Instruction level parallelism by loop unrolling // Allows the kernel to pipeline many operations while waiting for global memory // Increases the throughput of this kernel significantly __device__ void ProcessFullTileShared(std::size_t offset) { std::size_t idx[kItemsPerThread]; int ridx[kItemsPerThread]; int gidx[kItemsPerThread]; GradientPair gpair[kItemsPerThread]; #pragma unroll for (int i = 0; i < kItemsPerThread; i++) { idx[i] = offset + i * kBlockThreads + threadIdx.x; } #pragma unroll for (int i = 0; i < kItemsPerThread; i++) { ridx[i] = d_ridx_[idx[i] / feature_stride_]; } #pragma unroll for (int i = 0; i < kItemsPerThread; i++) { gpair[i] = d_gpair_[ridx[i]]; gidx[i] = matrix_.gidx_iter[ridx[i] * matrix_.row_stride + group_.start_feature + idx[i] % feature_stride_]; } #pragma unroll for (int i = 0; i < kItemsPerThread; i++) { if ((matrix_.is_dense || gidx[i] != matrix_.NumBins())) { auto adjusted = rounding_.ToFixedPoint(gpair[i]); AtomicAddGpairShared(smem_arr_ + gidx[i] - group_.start_bin, adjusted); } } } __device__ void BuildHistogramWithShared() { dh::BlockFill(smem_arr_, group_.num_bins, GradientPairInt64()); __syncthreads(); std::size_t offset = blockIdx.x * kItemsPerTile; while (offset + kItemsPerTile <= n_elements_) { ProcessFullTileShared(offset); offset += kItemsPerTile * gridDim.x; } ProcessPartialTileShared(offset); // Write shared memory back to global memory __syncthreads(); for (auto i : dh::BlockStrideRange(0, group_.num_bins)) { AtomicAddGpairGlobal(d_node_hist_ + group_.start_bin + i, smem_arr_[i]); } } __device__ void BuildHistogramWithGlobal() { for (auto idx : dh::GridStrideRange(static_cast<std::size_t>(0), n_elements_)) { int ridx = d_ridx_[idx / feature_stride_]; int gidx = matrix_ .gidx_iter[ridx * matrix_.row_stride + group_.start_feature + idx % feature_stride_]; if (matrix_.is_dense || gidx != matrix_.NumBins()) { auto adjusted = rounding_.ToFixedPoint(d_gpair_[ridx]); AtomicAddGpairGlobal(d_node_hist_ + gidx, adjusted); } } } }; template <bool use_shared_memory_histograms, int kBlockThreads, int kItemsPerThread> __global__ void __launch_bounds__(kBlockThreads) SharedMemHistKernel(const EllpackDeviceAccessor matrix, const FeatureGroupsAccessor feature_groups, common::Span<const RowPartitioner::RowIndexT> d_ridx, GradientPairInt64* __restrict__ d_node_hist, const GradientPair* __restrict__ d_gpair, GradientQuantiser const rounding) { extern __shared__ char smem[]; const FeatureGroup group = feature_groups[blockIdx.y]; auto smem_arr = reinterpret_cast<GradientPairInt64*>(smem); auto agent = HistogramAgent<kBlockThreads, kItemsPerThread>( smem_arr, d_node_hist, group, matrix, d_ridx, rounding, d_gpair); if (use_shared_memory_histograms) { agent.BuildHistogramWithShared(); } else { agent.BuildHistogramWithGlobal(); } } void BuildGradientHistogram(CUDAContext const* ctx, EllpackDeviceAccessor const& matrix, FeatureGroupsAccessor const& feature_groups, common::Span<GradientPair const> gpair, common::Span<const uint32_t> d_ridx, common::Span<GradientPairInt64> histogram, GradientQuantiser rounding, bool force_global_memory) { // decide whether to use shared memory int device = 0; dh::safe_cuda(hipGetDevice(&device)); // opt into maximum shared memory for the kernel if necessary size_t max_shared_memory = dh::MaxSharedMemoryOptin(device); size_t smem_size = sizeof(GradientPairInt64) * feature_groups.max_group_bins; bool shared = !force_global_memory && smem_size <= max_shared_memory; smem_size = shared ? smem_size : 0; constexpr int kBlockThreads = 1024; constexpr int kItemsPerThread = 8; constexpr int kItemsPerTile = kBlockThreads * kItemsPerThread; auto runit = [&, kMinItemsPerBlock = kItemsPerTile](auto kernel) { if (shared) { dh::safe_cuda(hipFuncSetAttribute(kernel, hipFuncAttributeMaxDynamicSharedMemorySize, max_shared_memory)); } // determine the launch configuration int num_groups = feature_groups.NumGroups(); int n_mps = 0; dh::safe_cuda(hipDeviceGetAttribute(&n_mps, hipDeviceAttributeMultiprocessorCount, device)); int n_blocks_per_mp = 0; dh::safe_cuda(hipOccupancyMaxActiveBlocksPerMultiprocessor(&n_blocks_per_mp, kernel, kBlockThreads, smem_size)); // This gives the number of blocks to keep the device occupied // Use this as the maximum number of blocks unsigned grid_size = n_blocks_per_mp * n_mps; // Otherwise launch blocks such that each block has a minimum amount of work to do // There are fixed costs to launching each block, e.g. zeroing shared memory // The below amount of minimum work was found by experimentation int columns_per_group = common::DivRoundUp(matrix.row_stride, feature_groups.NumGroups()); // Average number of matrix elements processed by each group std::size_t items_per_group = d_ridx.size() * columns_per_group; // Allocate number of blocks such that each block has about kMinItemsPerBlock work // Up to a maximum where the device is saturated grid_size = ::min(grid_size, static_cast<std::uint32_t>( common::DivRoundUp(items_per_group, kMinItemsPerBlock))); dh::LaunchKernel {dim3(grid_size, num_groups), static_cast<uint32_t>(kBlockThreads), smem_size, ctx->Stream()} (kernel, matrix, feature_groups, d_ridx, histogram.data(), gpair.data(), rounding); }; if (shared) { runit(SharedMemHistKernel<true, kBlockThreads, kItemsPerThread>); } else { runit(SharedMemHistKernel<false, kBlockThreads, kItemsPerThread>); } dh::safe_cuda(hipGetLastError()); } } // namespace tree } // namespace xgboost
259d50e69359efb2d28b4b974b0edd899e30da74.cu
/** * Copyright 2020-2023 by XGBoost Contributors */ #include <thrust/iterator/transform_iterator.h> #include <thrust/reduce.h> #include <algorithm> #include <cstdint> // uint32_t #include <limits> #include "../../collective/aggregator.h" #include "../../common/deterministic.cuh" #include "../../common/device_helpers.cuh" #include "../../data/ellpack_page.cuh" #include "histogram.cuh" #include "row_partitioner.cuh" #include "xgboost/base.h" namespace xgboost { namespace tree { namespace { struct Pair { GradientPair first; GradientPair second; }; __host__ XGBOOST_DEV_INLINE Pair operator+(Pair const& lhs, Pair const& rhs) { return {lhs.first + rhs.first, lhs.second + rhs.second}; } } // anonymous namespace struct Clip : public thrust::unary_function<GradientPair, Pair> { static XGBOOST_DEV_INLINE float Pclip(float v) { return v > 0 ? v : 0; } static XGBOOST_DEV_INLINE float Nclip(float v) { return v < 0 ? abs(v) : 0; } XGBOOST_DEV_INLINE Pair operator()(GradientPair x) const { auto pg = Pclip(x.GetGrad()); auto ph = Pclip(x.GetHess()); auto ng = Nclip(x.GetGrad()); auto nh = Nclip(x.GetHess()); return {GradientPair{pg, ph}, GradientPair{ng, nh}}; } }; /** * In algorithm 5 (see common::CreateRoundingFactor) the bound is calculated as * $max(|v_i|) * n$. Here we use the bound: * * \begin{equation} * max( fl(\sum^{V}_{v_i>0}{v_i}), fl(\sum^{V}_{v_i<0}|v_i|) ) * \end{equation} * * to avoid outliers, as the full reduction is reproducible on GPU with reduction tree. */ GradientQuantiser::GradientQuantiser(common::Span<GradientPair const> gpair, MetaInfo const& info) { using GradientSumT = GradientPairPrecise; using T = typename GradientSumT::ValueT; dh::XGBCachingDeviceAllocator<char> alloc; thrust::device_ptr<GradientPair const> gpair_beg{gpair.data()}; auto beg = thrust::make_transform_iterator(gpair_beg, Clip()); Pair p = dh::Reduce(thrust::cuda::par(alloc), beg, beg + gpair.size(), Pair{}, thrust::plus<Pair>{}); // Treat pair as array of 4 primitive types to allreduce using ReduceT = typename decltype(p.first)::ValueT; static_assert(sizeof(Pair) == sizeof(ReduceT) * 4, "Expected to reduce four elements."); collective::GlobalSum(info, reinterpret_cast<ReduceT*>(&p), 4); GradientPair positive_sum{p.first}, negative_sum{p.second}; std::size_t total_rows = gpair.size(); collective::GlobalSum(info, &total_rows, 1); auto histogram_rounding = GradientSumT{common::CreateRoundingFactor<T>( std::max(positive_sum.GetGrad(), negative_sum.GetGrad()), total_rows), common::CreateRoundingFactor<T>( std::max(positive_sum.GetHess(), negative_sum.GetHess()), total_rows)}; using IntT = typename GradientPairInt64::ValueT; /** * Factor for converting gradients from fixed-point to floating-point. */ to_floating_point_ = histogram_rounding / static_cast<T>(static_cast<IntT>(1) << (sizeof(typename GradientSumT::ValueT) * 8 - 2)); // keep 1 for sign bit /** * Factor for converting gradients from floating-point to fixed-point. For * f64: * * Precision = 64 - 1 - log2(rounding) * * rounding is calcuated as exp(m), see the rounding factor calcuation for * details. */ to_fixed_point_ = GradientSumT(static_cast<T>(1) / to_floating_point_.GetGrad(), static_cast<T>(1) / to_floating_point_.GetHess()); } XGBOOST_DEV_INLINE void AtomicAddGpairShared(xgboost::GradientPairInt64 *dest, xgboost::GradientPairInt64 const &gpair) { auto dst_ptr = reinterpret_cast<int64_t *>(dest); auto g = gpair.GetQuantisedGrad(); auto h = gpair.GetQuantisedHess(); AtomicAdd64As32(dst_ptr, g); AtomicAdd64As32(dst_ptr + 1, h); } // Global 64 bit integer atomics at the time of writing do not benefit from being separated into two // 32 bit atomics XGBOOST_DEV_INLINE void AtomicAddGpairGlobal(xgboost::GradientPairInt64* dest, xgboost::GradientPairInt64 const& gpair) { auto dst_ptr = reinterpret_cast<uint64_t*>(dest); auto g = gpair.GetQuantisedGrad(); auto h = gpair.GetQuantisedHess(); atomicAdd(dst_ptr, *reinterpret_cast<uint64_t*>(&g)); atomicAdd(dst_ptr + 1, *reinterpret_cast<uint64_t*>(&h)); } template <int kBlockThreads, int kItemsPerThread, int kItemsPerTile = kBlockThreads* kItemsPerThread> class HistogramAgent { GradientPairInt64* smem_arr_; GradientPairInt64* d_node_hist_; dh::LDGIterator<const RowPartitioner::RowIndexT> d_ridx_; const GradientPair* d_gpair_; const FeatureGroup group_; const EllpackDeviceAccessor& matrix_; const int feature_stride_; const std::size_t n_elements_; const GradientQuantiser& rounding_; public: __device__ HistogramAgent(GradientPairInt64* smem_arr, GradientPairInt64* __restrict__ d_node_hist, const FeatureGroup& group, const EllpackDeviceAccessor& matrix, common::Span<const RowPartitioner::RowIndexT> d_ridx, const GradientQuantiser& rounding, const GradientPair* d_gpair) : smem_arr_(smem_arr), d_node_hist_(d_node_hist), d_ridx_(d_ridx.data()), group_(group), matrix_(matrix), feature_stride_(matrix.is_dense ? group.num_features : matrix.row_stride), n_elements_(feature_stride_ * d_ridx.size()), rounding_(rounding), d_gpair_(d_gpair) {} __device__ void ProcessPartialTileShared(std::size_t offset) { for (std::size_t idx = offset + threadIdx.x; idx < std::min(offset + kBlockThreads * kItemsPerTile, n_elements_); idx += kBlockThreads) { int ridx = d_ridx_[idx / feature_stride_]; int gidx = matrix_ .gidx_iter[ridx * matrix_.row_stride + group_.start_feature + idx % feature_stride_] - group_.start_bin; if (matrix_.is_dense || gidx != matrix_.NumBins()) { auto adjusted = rounding_.ToFixedPoint(d_gpair_[ridx]); AtomicAddGpairShared(smem_arr_ + gidx, adjusted); } } } // Instruction level parallelism by loop unrolling // Allows the kernel to pipeline many operations while waiting for global memory // Increases the throughput of this kernel significantly __device__ void ProcessFullTileShared(std::size_t offset) { std::size_t idx[kItemsPerThread]; int ridx[kItemsPerThread]; int gidx[kItemsPerThread]; GradientPair gpair[kItemsPerThread]; #pragma unroll for (int i = 0; i < kItemsPerThread; i++) { idx[i] = offset + i * kBlockThreads + threadIdx.x; } #pragma unroll for (int i = 0; i < kItemsPerThread; i++) { ridx[i] = d_ridx_[idx[i] / feature_stride_]; } #pragma unroll for (int i = 0; i < kItemsPerThread; i++) { gpair[i] = d_gpair_[ridx[i]]; gidx[i] = matrix_.gidx_iter[ridx[i] * matrix_.row_stride + group_.start_feature + idx[i] % feature_stride_]; } #pragma unroll for (int i = 0; i < kItemsPerThread; i++) { if ((matrix_.is_dense || gidx[i] != matrix_.NumBins())) { auto adjusted = rounding_.ToFixedPoint(gpair[i]); AtomicAddGpairShared(smem_arr_ + gidx[i] - group_.start_bin, adjusted); } } } __device__ void BuildHistogramWithShared() { dh::BlockFill(smem_arr_, group_.num_bins, GradientPairInt64()); __syncthreads(); std::size_t offset = blockIdx.x * kItemsPerTile; while (offset + kItemsPerTile <= n_elements_) { ProcessFullTileShared(offset); offset += kItemsPerTile * gridDim.x; } ProcessPartialTileShared(offset); // Write shared memory back to global memory __syncthreads(); for (auto i : dh::BlockStrideRange(0, group_.num_bins)) { AtomicAddGpairGlobal(d_node_hist_ + group_.start_bin + i, smem_arr_[i]); } } __device__ void BuildHistogramWithGlobal() { for (auto idx : dh::GridStrideRange(static_cast<std::size_t>(0), n_elements_)) { int ridx = d_ridx_[idx / feature_stride_]; int gidx = matrix_ .gidx_iter[ridx * matrix_.row_stride + group_.start_feature + idx % feature_stride_]; if (matrix_.is_dense || gidx != matrix_.NumBins()) { auto adjusted = rounding_.ToFixedPoint(d_gpair_[ridx]); AtomicAddGpairGlobal(d_node_hist_ + gidx, adjusted); } } } }; template <bool use_shared_memory_histograms, int kBlockThreads, int kItemsPerThread> __global__ void __launch_bounds__(kBlockThreads) SharedMemHistKernel(const EllpackDeviceAccessor matrix, const FeatureGroupsAccessor feature_groups, common::Span<const RowPartitioner::RowIndexT> d_ridx, GradientPairInt64* __restrict__ d_node_hist, const GradientPair* __restrict__ d_gpair, GradientQuantiser const rounding) { extern __shared__ char smem[]; const FeatureGroup group = feature_groups[blockIdx.y]; auto smem_arr = reinterpret_cast<GradientPairInt64*>(smem); auto agent = HistogramAgent<kBlockThreads, kItemsPerThread>( smem_arr, d_node_hist, group, matrix, d_ridx, rounding, d_gpair); if (use_shared_memory_histograms) { agent.BuildHistogramWithShared(); } else { agent.BuildHistogramWithGlobal(); } } void BuildGradientHistogram(CUDAContext const* ctx, EllpackDeviceAccessor const& matrix, FeatureGroupsAccessor const& feature_groups, common::Span<GradientPair const> gpair, common::Span<const uint32_t> d_ridx, common::Span<GradientPairInt64> histogram, GradientQuantiser rounding, bool force_global_memory) { // decide whether to use shared memory int device = 0; dh::safe_cuda(cudaGetDevice(&device)); // opt into maximum shared memory for the kernel if necessary size_t max_shared_memory = dh::MaxSharedMemoryOptin(device); size_t smem_size = sizeof(GradientPairInt64) * feature_groups.max_group_bins; bool shared = !force_global_memory && smem_size <= max_shared_memory; smem_size = shared ? smem_size : 0; constexpr int kBlockThreads = 1024; constexpr int kItemsPerThread = 8; constexpr int kItemsPerTile = kBlockThreads * kItemsPerThread; auto runit = [&, kMinItemsPerBlock = kItemsPerTile](auto kernel) { if (shared) { dh::safe_cuda(cudaFuncSetAttribute(kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, max_shared_memory)); } // determine the launch configuration int num_groups = feature_groups.NumGroups(); int n_mps = 0; dh::safe_cuda(cudaDeviceGetAttribute(&n_mps, cudaDevAttrMultiProcessorCount, device)); int n_blocks_per_mp = 0; dh::safe_cuda(cudaOccupancyMaxActiveBlocksPerMultiprocessor(&n_blocks_per_mp, kernel, kBlockThreads, smem_size)); // This gives the number of blocks to keep the device occupied // Use this as the maximum number of blocks unsigned grid_size = n_blocks_per_mp * n_mps; // Otherwise launch blocks such that each block has a minimum amount of work to do // There are fixed costs to launching each block, e.g. zeroing shared memory // The below amount of minimum work was found by experimentation int columns_per_group = common::DivRoundUp(matrix.row_stride, feature_groups.NumGroups()); // Average number of matrix elements processed by each group std::size_t items_per_group = d_ridx.size() * columns_per_group; // Allocate number of blocks such that each block has about kMinItemsPerBlock work // Up to a maximum where the device is saturated grid_size = std::min(grid_size, static_cast<std::uint32_t>( common::DivRoundUp(items_per_group, kMinItemsPerBlock))); dh::LaunchKernel {dim3(grid_size, num_groups), static_cast<uint32_t>(kBlockThreads), smem_size, ctx->Stream()} (kernel, matrix, feature_groups, d_ridx, histogram.data(), gpair.data(), rounding); }; if (shared) { runit(SharedMemHistKernel<true, kBlockThreads, kItemsPerThread>); } else { runit(SharedMemHistKernel<false, kBlockThreads, kItemsPerThread>); } dh::safe_cuda(cudaGetLastError()); } } // namespace tree } // namespace xgboost
vectorSum.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" //Alfred Shaker //10-13-2015 // CUDA kernel __global__ void vectorSum(int *a, int *b, int *c, int n) { //get the id of global thread int id = blockIdx.x*blockDim.x+threadIdx.x; //checks to make sure we're not out of bounds if(id < n) c[id] = a[id] + b[id]; }
vectorSum.cu
#include "includes.h" //Alfred Shaker //10-13-2015 // CUDA kernel __global__ void vectorSum(int *a, int *b, int *c, int n) { //get the id of global thread int id = blockIdx.x*blockDim.x+threadIdx.x; //checks to make sure we're not out of bounds if(id < n) c[id] = a[id] + b[id]; }
9be9acdf0c62a0acb482ab821402b039192fd709.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<iostream> #include<stdio.h> #include<chrono> using namespace std; using namespace std::chrono; //standard deviation formula: (sqrt(summation(x - mean)^2)/n) __global__ void standard_deviation(int *a, float *b, float mean, int n) { int large_id = blockIdx.x * blockDim.x + threadIdx.x; float sum = 0.0; for(int i = large_id; i < min(large_id + 256, n); i++) { sum += (a[i] - mean) * (a[i] - mean); //printf("Sum: %f\t a[%d]: %d\n", sum, i, a[i]); } b[large_id] = sum/n; } void standard_deviation_cpu(int *a, float *b, float mean, int n) { for(int i = 0; i < n; i++) { b[0] += (a[i] - mean) * (a[i] - mean); } b[0] /=n; } int main(void) { int *a, *dev_a, n; float *b, *dev_b, mean; cout<<"Enter number of elements in array: "<<endl; cin>>n; a = (int *)malloc(n * sizeof(int)); //a = new int[n]; b = (float *) malloc (sizeof(int)); // b = new float[n]; //cout<<"The input numbers are: "<<endl; for(int i = 0; i < n; i++) { a[i] = i + 1; //cout<<a[i]<<"\t"; } cout<<endl; mean = (n + 1)/2; cout<<"Mean: "<<mean<<endl; hipMalloc(&dev_a, n * sizeof(int)); hipMalloc(&dev_b, sizeof(float)); int blocks, threads; blocks = threads = ceil(n * 1.0f/256.0f); hipMemcpy(dev_a, a, n * sizeof(int), hipMemcpyHostToDevice); auto start = high_resolution_clock::now(); hipLaunchKernelGGL(( standard_deviation) , dim3(blocks), dim3(threads), 0, 0, dev_a, dev_b, mean, n); auto stop = high_resolution_clock::now(); cout<<"For GPU: "<<endl; hipMemcpy(b, dev_b, sizeof(float), hipMemcpyDeviceToHost); cout<<"Standard deviation is: "<< sqrt(b[0]) <<"\nTime taken for parallel execution is: "<<duration_cast <microseconds> (stop - start).count() <<endl; b[0] = 0.0; cout<<"For CPU:" <<endl; start = high_resolution_clock::now(); standard_deviation_cpu(a, b, mean, n); stop = high_resolution_clock::now(); cout<<"Standard deviation is "<<sqrt(b[0]) <<"\nTime taken for serial execution is: "<< duration_cast <microseconds> (stop - start).count()<<endl; }
9be9acdf0c62a0acb482ab821402b039192fd709.cu
#include<iostream> #include<stdio.h> #include<chrono> using namespace std; using namespace std::chrono; //standard deviation formula: (sqrt(summation(x - mean)^2)/n) __global__ void standard_deviation(int *a, float *b, float mean, int n) { int large_id = blockIdx.x * blockDim.x + threadIdx.x; float sum = 0.0; for(int i = large_id; i < min(large_id + 256, n); i++) { sum += (a[i] - mean) * (a[i] - mean); //printf("Sum: %f\t a[%d]: %d\n", sum, i, a[i]); } b[large_id] = sum/n; } void standard_deviation_cpu(int *a, float *b, float mean, int n) { for(int i = 0; i < n; i++) { b[0] += (a[i] - mean) * (a[i] - mean); } b[0] /=n; } int main(void) { int *a, *dev_a, n; float *b, *dev_b, mean; cout<<"Enter number of elements in array: "<<endl; cin>>n; a = (int *)malloc(n * sizeof(int)); //a = new int[n]; b = (float *) malloc (sizeof(int)); // b = new float[n]; //cout<<"The input numbers are: "<<endl; for(int i = 0; i < n; i++) { a[i] = i + 1; //cout<<a[i]<<"\t"; } cout<<endl; mean = (n + 1)/2; cout<<"Mean: "<<mean<<endl; cudaMalloc(&dev_a, n * sizeof(int)); cudaMalloc(&dev_b, sizeof(float)); int blocks, threads; blocks = threads = ceil(n * 1.0f/256.0f); cudaMemcpy(dev_a, a, n * sizeof(int), cudaMemcpyHostToDevice); auto start = high_resolution_clock::now(); standard_deviation <<<blocks, threads>>> (dev_a, dev_b, mean, n); auto stop = high_resolution_clock::now(); cout<<"For GPU: "<<endl; cudaMemcpy(b, dev_b, sizeof(float), cudaMemcpyDeviceToHost); cout<<"Standard deviation is: "<< sqrt(b[0]) <<"\nTime taken for parallel execution is: "<<duration_cast <microseconds> (stop - start).count() <<endl; b[0] = 0.0; cout<<"For CPU:" <<endl; start = high_resolution_clock::now(); standard_deviation_cpu(a, b, mean, n); stop = high_resolution_clock::now(); cout<<"Standard deviation is "<<sqrt(b[0]) <<"\nTime taken for serial execution is: "<< duration_cast <microseconds> (stop - start).count()<<endl; }
085ba56c01fde799545905dc8ff09258b7e98997.hip
// !!! This is a file automatically generated by hipify!!! #include "math_functions.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" __global__ void compute_outputDW_kernel( double *weight_output, const double *output_fm_hidden, const int nHidden,const int T_size,const double *direction, const int nT, const int *Time, const int * index, const double decay1, const double decay2,const double V0_param) { //determine the id of the thread int hidden_id = blockIdx.x*blockDim.x +threadIdx.x; if(hidden_id > nHidden - 1 ) {return;} int t_hiddenid = hidden_id*T_size; int curT = 0; int time_value = Time[curT]; int curt = 1; double Mn = 0; double Sn = 0; double tmp = 0; while(curT < nT) { tmp = V0_param * output_fm_hidden[t_hiddenid + curt - 1]; Mn = decay1*Mn + tmp; Sn = decay2*Sn + tmp; while(curt == time_value) { weight_output[hidden_id+(index[curT]-1)*nHidden] = (Mn - Sn)*direction[curT]; curT = curT + 1; if(curT >= nT) { break; } time_value = Time[curT]; } curt = curt + 1; } }
085ba56c01fde799545905dc8ff09258b7e98997.cu
#include "math_functions.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" __global__ void compute_outputDW_kernel( double *weight_output, const double *output_fm_hidden, const int nHidden,const int T_size,const double *direction, const int nT, const int *Time, const int * index, const double decay1, const double decay2,const double V0_param) { //determine the id of the thread int hidden_id = blockIdx.x*blockDim.x +threadIdx.x; if(hidden_id > nHidden - 1 ) {return;} int t_hiddenid = hidden_id*T_size; int curT = 0; int time_value = Time[curT]; int curt = 1; double Mn = 0; double Sn = 0; double tmp = 0; while(curT < nT) { tmp = V0_param * output_fm_hidden[t_hiddenid + curt - 1]; Mn = decay1*Mn + tmp; Sn = decay2*Sn + tmp; while(curt == time_value) { weight_output[hidden_id+(index[curT]-1)*nHidden] = (Mn - Sn)*direction[curT]; curT = curT + 1; if(curT >= nT) { break; } time_value = Time[curT]; } curt = curt + 1; } }
8d631476bb46c5ee4a3d8b55f75632cab5d14448.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2009-2017 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. #include "ComputeFreeVolumeGPU.cuh" #include "IntegratorHPMCMonoGPU.cuh" #include "IntegratorHPMCMonoImplicitGPU.cuh" #include "IntegratorHPMCMonoImplicitNewGPU.cuh" #include "ShapeSphere.h" #include "ShapeConvexPolygon.h" #include "ShapePolyhedron.h" #include "ShapeConvexPolyhedron.h" #include "ShapeSpheropolyhedron.h" #include "ShapeSpheropolygon.h" #include "ShapeSimplePolygon.h" #include "ShapeEllipsoid.h" #include "ShapeFacetedSphere.h" #include "ShapeSphinx.h" #include "ShapeUnion.h" namespace hpmc { namespace detail { //! HPMC kernels for ShapeUnion<ShapeConvexPolyhedron> template hipError_t gpu_hpmc_free_volume<ShapeUnion<ShapeConvexPolyhedron> >(const hpmc_free_volume_args_t &args, const typename ShapeUnion<ShapeConvexPolyhedron> ::param_type *d_params); template hipError_t gpu_hpmc_update<ShapeUnion<ShapeConvexPolyhedron> >(const hpmc_args_t& args, const typename ShapeUnion<ShapeConvexPolyhedron> ::param_type *d_params); template hipError_t gpu_hpmc_implicit_count_overlaps<ShapeUnion<ShapeConvexPolyhedron> >(const hpmc_implicit_args_t& args, const typename ShapeUnion<ShapeConvexPolyhedron> ::param_type *d_params); template hipError_t gpu_hpmc_implicit_accept_reject<ShapeUnion<ShapeConvexPolyhedron> >(const hpmc_implicit_args_t& args, const typename ShapeUnion<ShapeConvexPolyhedron> ::param_type *d_params); template hipError_t gpu_hpmc_insert_depletants_queue<ShapeUnion<ShapeConvexPolyhedron> >(const hpmc_implicit_args_new_t& args, const typename ShapeUnion<ShapeConvexPolyhedron> ::param_type *d_params); template hipError_t gpu_hpmc_implicit_accept_reject_new<ShapeUnion<ShapeConvexPolyhedron> >(const hpmc_implicit_args_new_t& args, const typename ShapeUnion<ShapeConvexPolyhedron> ::param_type *d_params); }; // end namespace detail } // end namespace hpmc
8d631476bb46c5ee4a3d8b55f75632cab5d14448.cu
// Copyright (c) 2009-2017 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. #include "ComputeFreeVolumeGPU.cuh" #include "IntegratorHPMCMonoGPU.cuh" #include "IntegratorHPMCMonoImplicitGPU.cuh" #include "IntegratorHPMCMonoImplicitNewGPU.cuh" #include "ShapeSphere.h" #include "ShapeConvexPolygon.h" #include "ShapePolyhedron.h" #include "ShapeConvexPolyhedron.h" #include "ShapeSpheropolyhedron.h" #include "ShapeSpheropolygon.h" #include "ShapeSimplePolygon.h" #include "ShapeEllipsoid.h" #include "ShapeFacetedSphere.h" #include "ShapeSphinx.h" #include "ShapeUnion.h" namespace hpmc { namespace detail { //! HPMC kernels for ShapeUnion<ShapeConvexPolyhedron> template cudaError_t gpu_hpmc_free_volume<ShapeUnion<ShapeConvexPolyhedron> >(const hpmc_free_volume_args_t &args, const typename ShapeUnion<ShapeConvexPolyhedron> ::param_type *d_params); template cudaError_t gpu_hpmc_update<ShapeUnion<ShapeConvexPolyhedron> >(const hpmc_args_t& args, const typename ShapeUnion<ShapeConvexPolyhedron> ::param_type *d_params); template cudaError_t gpu_hpmc_implicit_count_overlaps<ShapeUnion<ShapeConvexPolyhedron> >(const hpmc_implicit_args_t& args, const typename ShapeUnion<ShapeConvexPolyhedron> ::param_type *d_params); template cudaError_t gpu_hpmc_implicit_accept_reject<ShapeUnion<ShapeConvexPolyhedron> >(const hpmc_implicit_args_t& args, const typename ShapeUnion<ShapeConvexPolyhedron> ::param_type *d_params); template cudaError_t gpu_hpmc_insert_depletants_queue<ShapeUnion<ShapeConvexPolyhedron> >(const hpmc_implicit_args_new_t& args, const typename ShapeUnion<ShapeConvexPolyhedron> ::param_type *d_params); template cudaError_t gpu_hpmc_implicit_accept_reject_new<ShapeUnion<ShapeConvexPolyhedron> >(const hpmc_implicit_args_new_t& args, const typename ShapeUnion<ShapeConvexPolyhedron> ::param_type *d_params); }; // end namespace detail } // end namespace hpmc
98b3f883e3fb1173c4010bbfe1d17369494c9167.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // Created by Yurii Shyrma on 07.12.2017. // #include "ResultSet.h" #include <ops/declarable/helpers/matrixSetDiag.h> namespace nd4j { namespace ops { namespace helpers { template <typename T> static __global__ void matrixSetDiagKernel(void* outputBuffer, Nd4jLong* outputShape, void const* diagonalBuffer, Nd4jLong* diagonalShape, Nd4jLong lastDimSize, Nd4jLong last2DimSize, Nd4jLong lastSmallDim, Nd4jLong batchSize) { __shared__ T* z; __shared__ T const* x; __shared__ Nd4jLong outLength, diagonalLen; if (threadIdx.x == 0) { z = reinterpret_cast<T*>(outputBuffer); x = reinterpret_cast<T const*>(diagonalBuffer); outLength = shape::length(outputShape); diagonalLen = shape::length(diagonalShape); } for(int i = blockIdx.x; i < batchSize; i+= gridDim.x ) for(int j = threadIdx.x; j < lastSmallDim; j += blockDim.x) { // z[i * last2DimSize + j * (lastDimSize + 1)] = x[i * lastSmallDim + j]; z[shape::getIndexOffset(i * last2DimSize + j * (lastDimSize + 1), outputShape, outLength)] = x[shape::getIndexOffset(i * lastSmallDim + j, diagonalShape, diagonalLen)]; } } ////////////////////////////////////////////////////////////////////////// // Returns a batched matrix tensor with new batched diagonal values. // for detailed explanations please take a look on web page: https://www.tensorflow.org/api_docs/python/tf/matrix_set_diag template <typename T> static void _matrixSetDiag(nd4j::LaunchContext * context, const NDArray* input, const NDArray* diagonal, NDArray* output) { *output = *input; const int lastDimSize = input->sizeAt(-1); const int last2DimSize = input->sizeAt(-1) * input->sizeAt(-2); const int lastSmallDim = diagonal->sizeAt(-1); const int batchSize = input->lengthOf()/last2DimSize; auto stream = context->getCudaStream(); dim3 launchDims(256, 512, 8192); hipLaunchKernelGGL(( matrixSetDiagKernel<T>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, output->specialBuffer(), output->specialShapeInfo(), diagonal->getSpecialBuffer(), diagonal->getSpecialShapeInfo(), lastDimSize, last2DimSize, lastSmallDim, batchSize); //// #pragma omp parallel for if(batchSize > Environment::getInstance()->elementwiseThreshold()) schedule(static) // for(int i = 0; i < batchSize; ++i ) // for(int j = 0; j < lastSmallDim; ++j) { // output->p(i*last2DimSize + j*(lastDimSize + 1), diagonal->e<T>(i*lastSmallDim + j)); // } } void matrixSetDiag(nd4j::LaunchContext * context, const NDArray* input, const NDArray* diagonal, NDArray* output) { BUILD_SINGLE_SELECTOR(input->dataType(), _matrixSetDiag, (context, input, diagonal, output), LIBND4J_TYPES); } BUILD_SINGLE_TEMPLATE(template void _matrixSetDiag, (nd4j::LaunchContext * context, const NDArray* input, const NDArray* diagonal, NDArray* output), LIBND4J_TYPES); } } }
98b3f883e3fb1173c4010bbfe1d17369494c9167.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // Created by Yurii Shyrma on 07.12.2017. // #include "ResultSet.h" #include <ops/declarable/helpers/matrixSetDiag.h> namespace nd4j { namespace ops { namespace helpers { template <typename T> static __global__ void matrixSetDiagKernel(void* outputBuffer, Nd4jLong* outputShape, void const* diagonalBuffer, Nd4jLong* diagonalShape, Nd4jLong lastDimSize, Nd4jLong last2DimSize, Nd4jLong lastSmallDim, Nd4jLong batchSize) { __shared__ T* z; __shared__ T const* x; __shared__ Nd4jLong outLength, diagonalLen; if (threadIdx.x == 0) { z = reinterpret_cast<T*>(outputBuffer); x = reinterpret_cast<T const*>(diagonalBuffer); outLength = shape::length(outputShape); diagonalLen = shape::length(diagonalShape); } for(int i = blockIdx.x; i < batchSize; i+= gridDim.x ) for(int j = threadIdx.x; j < lastSmallDim; j += blockDim.x) { // z[i * last2DimSize + j * (lastDimSize + 1)] = x[i * lastSmallDim + j]; z[shape::getIndexOffset(i * last2DimSize + j * (lastDimSize + 1), outputShape, outLength)] = x[shape::getIndexOffset(i * lastSmallDim + j, diagonalShape, diagonalLen)]; } } ////////////////////////////////////////////////////////////////////////// // Returns a batched matrix tensor with new batched diagonal values. // for detailed explanations please take a look on web page: https://www.tensorflow.org/api_docs/python/tf/matrix_set_diag template <typename T> static void _matrixSetDiag(nd4j::LaunchContext * context, const NDArray* input, const NDArray* diagonal, NDArray* output) { *output = *input; const int lastDimSize = input->sizeAt(-1); const int last2DimSize = input->sizeAt(-1) * input->sizeAt(-2); const int lastSmallDim = diagonal->sizeAt(-1); const int batchSize = input->lengthOf()/last2DimSize; auto stream = context->getCudaStream(); dim3 launchDims(256, 512, 8192); matrixSetDiagKernel<T><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(output->specialBuffer(), output->specialShapeInfo(), diagonal->getSpecialBuffer(), diagonal->getSpecialShapeInfo(), lastDimSize, last2DimSize, lastSmallDim, batchSize); //// #pragma omp parallel for if(batchSize > Environment::getInstance()->elementwiseThreshold()) schedule(static) // for(int i = 0; i < batchSize; ++i ) // for(int j = 0; j < lastSmallDim; ++j) { // output->p(i*last2DimSize + j*(lastDimSize + 1), diagonal->e<T>(i*lastSmallDim + j)); // } } void matrixSetDiag(nd4j::LaunchContext * context, const NDArray* input, const NDArray* diagonal, NDArray* output) { BUILD_SINGLE_SELECTOR(input->dataType(), _matrixSetDiag, (context, input, diagonal, output), LIBND4J_TYPES); } BUILD_SINGLE_TEMPLATE(template void _matrixSetDiag, (nd4j::LaunchContext * context, const NDArray* input, const NDArray* diagonal, NDArray* output), LIBND4J_TYPES); } } }
389b7835dd3a97ae6598b154f62a07050e25a874.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" #define INF 2147483647 extern "C" { } __global__ void oneReduction(int * tab, int len, int mod) { __shared__ int begin, end; __shared__ int tmp_T[1024]; if(threadIdx.x == 0) { begin = blockIdx.x*len; end = blockIdx.x*len + len; } __syncthreads(); if(blockIdx.x % mod < mod/2) { for(int k = len/2; k >= 1024; k /= 2) { for(int g = begin; g < end; g += 2*k) { for(int j = g; j < g + k; j += 512) { __syncthreads(); if(threadIdx.x < 512) tmp_T[threadIdx.x] = tab[j + threadIdx.x]; else tmp_T[threadIdx.x] = tab[j + threadIdx.x - 512 + k]; __syncthreads(); if(threadIdx.x < 512 && tmp_T[threadIdx.x] > tmp_T[threadIdx.x + 512]) { tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + 512]; tmp_T[threadIdx.x + 512] ^= tmp_T[threadIdx.x]; tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + 512]; } __syncthreads(); if(threadIdx.x < 512) tab[j + threadIdx.x] = tmp_T[threadIdx.x]; else tab[j + threadIdx.x - 512 + k] = tmp_T[threadIdx.x]; } } } for(int i = begin; i < begin+len; i += 1024) { __syncthreads(); tmp_T[threadIdx.x] = tab[i + threadIdx.x]; __syncthreads(); for(int jump = 512; jump >= 1; jump /= 2) { if(threadIdx.x % (jump*2) < jump && threadIdx.x + jump < 1024 && tmp_T[threadIdx.x] > tmp_T[threadIdx.x + jump]) { tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + jump]; tmp_T[threadIdx.x + jump] ^= tmp_T[threadIdx.x]; tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + jump]; } __syncthreads(); } tab[i + threadIdx.x] = tmp_T[threadIdx.x]; } } else { for(int k = len/2; k >= 1024; k /= 2) { for(int g = begin; g < end; g += 2*k) { for(int j = g; j < g + k; j += 512) { __syncthreads(); if(threadIdx.x < 512) tmp_T[threadIdx.x] = tab[j + threadIdx.x]; else tmp_T[threadIdx.x] = tab[j + threadIdx.x - 512 + k]; __syncthreads(); if(threadIdx.x < 512 && tmp_T[threadIdx.x] < tmp_T[threadIdx.x + 512]) { tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + 512]; tmp_T[threadIdx.x + 512] ^= tmp_T[threadIdx.x]; tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + 512]; } __syncthreads(); if(threadIdx.x < 512) tab[j + threadIdx.x] = tmp_T[threadIdx.x]; else tab[j + threadIdx.x - 512 + k] = tmp_T[threadIdx.x]; } } } for(int i = begin; i < begin + len; i += 1024) { __syncthreads(); tmp_T[threadIdx.x] = tab[i + threadIdx.x]; __syncthreads(); for(int jump = 512; jump >= 1; jump /= 2) { if(threadIdx.x % (jump*2) < jump && threadIdx.x + jump < 1024 && tmp_T[threadIdx.x] < tmp_T[threadIdx.x + jump]) { tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + jump]; tmp_T[threadIdx.x + jump] ^= tmp_T[threadIdx.x]; tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + jump]; } __syncthreads(); } tab[i + threadIdx.x] = tmp_T[threadIdx.x]; } } }
389b7835dd3a97ae6598b154f62a07050e25a874.cu
#include "includes.h" #define INF 2147483647 extern "C" { } __global__ void oneReduction(int * tab, int len, int mod) { __shared__ int begin, end; __shared__ int tmp_T[1024]; if(threadIdx.x == 0) { begin = blockIdx.x*len; end = blockIdx.x*len + len; } __syncthreads(); if(blockIdx.x % mod < mod/2) { for(int k = len/2; k >= 1024; k /= 2) { for(int g = begin; g < end; g += 2*k) { for(int j = g; j < g + k; j += 512) { __syncthreads(); if(threadIdx.x < 512) tmp_T[threadIdx.x] = tab[j + threadIdx.x]; else tmp_T[threadIdx.x] = tab[j + threadIdx.x - 512 + k]; __syncthreads(); if(threadIdx.x < 512 && tmp_T[threadIdx.x] > tmp_T[threadIdx.x + 512]) { tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + 512]; tmp_T[threadIdx.x + 512] ^= tmp_T[threadIdx.x]; tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + 512]; } __syncthreads(); if(threadIdx.x < 512) tab[j + threadIdx.x] = tmp_T[threadIdx.x]; else tab[j + threadIdx.x - 512 + k] = tmp_T[threadIdx.x]; } } } for(int i = begin; i < begin+len; i += 1024) { __syncthreads(); tmp_T[threadIdx.x] = tab[i + threadIdx.x]; __syncthreads(); for(int jump = 512; jump >= 1; jump /= 2) { if(threadIdx.x % (jump*2) < jump && threadIdx.x + jump < 1024 && tmp_T[threadIdx.x] > tmp_T[threadIdx.x + jump]) { tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + jump]; tmp_T[threadIdx.x + jump] ^= tmp_T[threadIdx.x]; tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + jump]; } __syncthreads(); } tab[i + threadIdx.x] = tmp_T[threadIdx.x]; } } else { for(int k = len/2; k >= 1024; k /= 2) { for(int g = begin; g < end; g += 2*k) { for(int j = g; j < g + k; j += 512) { __syncthreads(); if(threadIdx.x < 512) tmp_T[threadIdx.x] = tab[j + threadIdx.x]; else tmp_T[threadIdx.x] = tab[j + threadIdx.x - 512 + k]; __syncthreads(); if(threadIdx.x < 512 && tmp_T[threadIdx.x] < tmp_T[threadIdx.x + 512]) { tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + 512]; tmp_T[threadIdx.x + 512] ^= tmp_T[threadIdx.x]; tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + 512]; } __syncthreads(); if(threadIdx.x < 512) tab[j + threadIdx.x] = tmp_T[threadIdx.x]; else tab[j + threadIdx.x - 512 + k] = tmp_T[threadIdx.x]; } } } for(int i = begin; i < begin + len; i += 1024) { __syncthreads(); tmp_T[threadIdx.x] = tab[i + threadIdx.x]; __syncthreads(); for(int jump = 512; jump >= 1; jump /= 2) { if(threadIdx.x % (jump*2) < jump && threadIdx.x + jump < 1024 && tmp_T[threadIdx.x] < tmp_T[threadIdx.x + jump]) { tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + jump]; tmp_T[threadIdx.x + jump] ^= tmp_T[threadIdx.x]; tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + jump]; } __syncthreads(); } tab[i + threadIdx.x] = tmp_T[threadIdx.x]; } } }
f2c3e00c6263233d237e125b40974ad261fd6e34.hip
// !!! This is a file automatically generated by hipify!!! #define TORCH_ASSERT_NO_OPERATORS #include <ATen/Dispatch.h> #include <ATen/native/hip/JitLoops.cuh> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/BinaryOps.h> #include <ATen/native/Math.h> #include <ATen/native/hip/Math.cuh> #include <ATen/native/hip/jit_utils.h> namespace at { namespace native { namespace { const char chebyshev_polynomial_t_name[] = "chebyshev_polynomial_t_forward"; void chebyshev_polynomial_t_kernel_cuda(TensorIteratorBase& iterator) { #if AT_USE_JITERATOR() AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "chebyshev_polynomial_t_cuda", [&]() { opmath_jitted_gpu_kernel_with_scalars<chebyshev_polynomial_t_name, scalar_t, scalar_t>(iterator, chebyshev_polynomial_t_string); }); #else AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "chebyshev_polynomial_t_cuda", [&]() { gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t { return chebyshev_polynomial_t_forward<scalar_t, true>(x, n); }); }); #endif } // chebyshev_polynomial_t_kernel_cuda } // namespace (anonymous) REGISTER_DISPATCH(chebyshev_polynomial_t_stub, &chebyshev_polynomial_t_kernel_cuda); } // namespace native } // namespace at
f2c3e00c6263233d237e125b40974ad261fd6e34.cu
#define TORCH_ASSERT_NO_OPERATORS #include <ATen/Dispatch.h> #include <ATen/native/cuda/JitLoops.cuh> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/BinaryOps.h> #include <ATen/native/Math.h> #include <ATen/native/cuda/Math.cuh> #include <ATen/native/cuda/jit_utils.h> namespace at { namespace native { namespace { const char chebyshev_polynomial_t_name[] = "chebyshev_polynomial_t_forward"; void chebyshev_polynomial_t_kernel_cuda(TensorIteratorBase& iterator) { #if AT_USE_JITERATOR() AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "chebyshev_polynomial_t_cuda", [&]() { opmath_jitted_gpu_kernel_with_scalars<chebyshev_polynomial_t_name, scalar_t, scalar_t>(iterator, chebyshev_polynomial_t_string); }); #else AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "chebyshev_polynomial_t_cuda", [&]() { gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t { return chebyshev_polynomial_t_forward<scalar_t, true>(x, n); }); }); #endif } // chebyshev_polynomial_t_kernel_cuda } // namespace (anonymous) REGISTER_DISPATCH(chebyshev_polynomial_t_stub, &chebyshev_polynomial_t_kernel_cuda); } // namespace native } // namespace at
5dc36dfda7bf1ccb7bf7323571f1357b2fc24d51.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "THHGeneral.h" #include "THHTensorMath.h" #include "THHTensorCopy.h" #include <algorithm> #ifdef USE_MAGMA #include <magma.h> #else #include "THHBlas.h" #endif #ifndef DIVUP #define DIVUP(x, y) (((x) + (y) - 1) / (y)) #endif #define NoMagma(name) "No CUDA implementation of '" #name "'. Install MAGMA and rebuild cutorch (http://icl.cs.utk.edu/magma/)" void THCMagma_init(THCState *state) { #ifdef USE_MAGMA magma_init(); #endif } #ifdef USE_MAGMA static inline float* th_magma_smalloc_pinned(size_t n) { float* ptr; if (MAGMA_SUCCESS != magma_smalloc_pinned(&ptr, n)) THError("$ Torch: not enough memory: you tried to allocate %dGB. Buy new RAM!", n/268435456); return ptr; } static inline int* th_magma_imalloc_pinned(size_t n) { int* ptr; if (MAGMA_SUCCESS != magma_imalloc_pinned(&ptr, n)) THError("$ Torch: not enough memory: you tried to allocate %dGB. Buy new RAM!", n/268435456); return ptr; } static void THCudaTensor_copyArray1d(THCState *state, THCudaTensor *self, float *src, int k) { long size[1] = { k }; long stride[1] = { 1 }; THCudaTensor_rawResize(state, self, 1, size, stride); size_t len = k * sizeof(float); THCudaCheck(hipMemcpy(self->storage->data + self->storageOffset, src, len, hipMemcpyHostToDevice)); } static void THCudaTensor_copyArray2d(THCState *state, THCudaTensor *self, float *src, int m, int n) { long size[2] = { m, n }; long stride[2] = { 1, m }; THCudaTensor_rawResize(state, self, 2, size, stride); size_t len = m * n * sizeof(float); THCudaCheck(hipMemcpy(self->storage->data + self->storageOffset, src, len, hipMemcpyHostToDevice)); } static void THCudaTensor_copyTensor2d(THCState *state, float *dst, THCudaTensor *self) { THAssert(self->nDimension == 2); size_t len = THCudaTensor_nElement(state, self)*sizeof(float); THCudaTensor *temp = THCudaTensor_newTranspose(state, self, 0, 1); THCudaTensor *selfc = THCudaTensor_newContiguous(state, temp); THCudaCheck(hipMemcpy(dst, selfc->storage->data + selfc->storageOffset, len, hipMemcpyDeviceToHost)); THCudaTensor_free(state, temp); THCudaTensor_free(state, selfc); } #endif static THCudaTensor* THCudaTensor_newColumnMajor(THCState *state, THCudaTensor *self, THCudaTensor *src) { THAssert(src->nDimension == 2); if (self == src && self->stride[0] == 1 && self->stride[1] == self->size[0]) { THCudaTensor_retain(state, self); return self; } if (self == src) self = THCudaTensor_new(state); else THCudaTensor_retain(state, self); long size[2] = { src->size[0], src->size[1] }; long stride[2] = { 1, src->size[0] }; THCudaTensor_rawResize(state, self, 2, size, stride); THCudaTensor_copy(state, self, src); return self; } void THCudaTensor_gesv(THCState *state, THCudaTensor *rb_, THCudaTensor *ra_, THCudaTensor *b_, THCudaTensor *a_) { #ifdef USE_MAGMA THArgCheck(a_->nDimension == 2, 1, "A should be 2 dimensional"); THArgCheck(b_->nDimension == 2, 2, "b should be 2 dimensional"); THArgCheck(a_->size[0] == a_->size[1], 1, "A should be square"); THArgCheck(b_->size[0] == a_->size[0], 2, "A,b size incompatible"); int n = a_->size[0]; int nrhs = b_->size[1]; THCudaTensor *a = THCudaTensor_newColumnMajor(state, ra_, a_); THCudaTensor *b = THCudaTensor_newColumnMajor(state, rb_, b_); float *a_data = THCudaTensor_data(state, a); float *b_data = THCudaTensor_data(state, b); int *ipiv = th_magma_imalloc_pinned(n); int info; magma_sgesv_gpu(n, nrhs, a_data, n, ipiv, b_data, n, &info); if (info < 0) THError("MAGMA gesv : Argument %d : illegal value", -info); else if (info > 0) THError("MAGMA gesv : U(%d,%d) is zero, singular U.", info, info); magma_free_pinned(ipiv); THCudaTensor_freeCopyTo(state, a, ra_); THCudaTensor_freeCopyTo(state, b, rb_); #else THError(NoMagma(gesv)); #endif } void THCudaTensor_gels(THCState *state, THCudaTensor *rb_, THCudaTensor *ra_, THCudaTensor *b_, THCudaTensor *a_) { #ifdef USE_MAGMA THArgCheck(a_->nDimension == 2, 1, "A should be 2 dimensional"); THArgCheck(b_->nDimension == 2, 1, "b should be 2 dimensional"); THArgCheck(a_->size[0] == b_->size[0], 2, "size incompatible A,b"); THArgCheck(a_->size[0] >= a_->size[1], 2, "A should have m >= n"); THCudaTensor *a = THCudaTensor_newColumnMajor(state, ra_, a_); THCudaTensor *b = THCudaTensor_newColumnMajor(state, rb_, b_); float *a_data = THCudaTensor_data(state, a); float *b_data = THCudaTensor_data(state, b); int m = a->size[0]; int n = a->size[1]; int nrhs = b->size[1]; float wkopt; int info; magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, &wkopt, -1, &info); float *hwork = th_magma_smalloc_pinned((size_t)wkopt); magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, hwork, (int)wkopt, &info); magma_free_pinned(hwork); if (info != 0) THError("MAGMA gels : Argument %d : illegal value", -info); THCudaTensor_freeCopyTo(state, a, ra_); THCudaTensor_freeCopyTo(state, b, rb_); #else THError(NoMagma(gels)); #endif } void THCudaTensor_syev(THCState *state, THCudaTensor *re_, THCudaTensor *rv_, THCudaTensor *a, const char *jobzs, const char *uplos) { #ifdef USE_MAGMA int n = a->size[0]; int lda = n; magma_uplo_t uplo = uplos[0] == 'U' ? MagmaUpper : MagmaLower; magma_vec_t jobz = jobzs[0] == 'N' ? MagmaNoVec : MagmaVec; THCudaTensor *input = THCudaTensor_newColumnMajor(state, rv_, a); float *input_data = THCudaTensor_data(state, input); // eigen values and workspace float *w = th_magma_smalloc_pinned(n); float *wA = th_magma_smalloc_pinned(lda); // compute optimal size of work array int info; float lwork; int liwork; magma_ssyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, &lwork, -1, &liwork, -1, &info); float *work = th_magma_smalloc_pinned((size_t)lwork); int *iwork = th_magma_imalloc_pinned(liwork); // compute eigenvalues and, optionally, eigenvectors magma_ssyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, work, (int) lwork, iwork, liwork, &info); // copy eigen values from w to re_ if (info == 0) THCudaTensor_copyArray1d(state, re_, w, n); magma_free_pinned(iwork); magma_free_pinned(work); magma_free_pinned(wA); magma_free_pinned(w); // check error value if (info > 0) THError("MAGMA syev : Failed to converge. %d off-diagonal elements of an didn't converge to zero", info); else if (info < 0) THError("MAGMA syev : Argument %d : illegal value", -info); THCudaTensor_freeCopyTo(state, input, rv_); #else THError(NoMagma(syev)); #endif } void THCudaTensor_geev(THCState *state, THCudaTensor *re_, THCudaTensor *rv_, THCudaTensor *a_, const char *jobvrs) { #ifdef USE_MAGMA THArgCheck(a_->nDimension == 2, 3, "A should be 2 dimensional"); THArgCheck(a_->size[0] == a_->size[1], 3, "A should be square"); magma_vec_t jobvr = jobvrs[0] == 'N' ? MagmaNoVec : MagmaVec; int n = a_->size[0]; float *a_data = th_magma_smalloc_pinned(n * n); THCudaTensor_copyTensor2d(state, a_data, a_); float *wr = th_magma_smalloc_pinned(n); float *wi = th_magma_smalloc_pinned(n); float *vr_data = NULL; int ldvr = 1; if (jobvr == MagmaVec) { vr_data = th_magma_smalloc_pinned(n * n); ldvr = n; } float wkopt; int info; magma_sgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, &wkopt, -1, &info); int lwork = (int) wkopt; float *work_data = th_magma_smalloc_pinned(lwork); magma_sgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, work_data, lwork, &info); if (info > 0) THError("MAGMA geev : Failed to converge. %d off-diagonal elements of an didn't converge to zero", info); else if (info < 0) THError("MAGMA geev : Argument %d : illegal value", -info); { THCudaTensor_resize2d(state, re_, 2, n); THCudaTensor *re = THCudaTensor_newContiguous(state, re_); THCudaCheck(hipMemcpy(re->storage->data + re->storageOffset, wr, n*sizeof(float), hipMemcpyHostToDevice)); THCudaCheck(hipMemcpy(re->storage->data + re->storageOffset + n, wi, n*sizeof(float), hipMemcpyHostToDevice)); THCudaTensor_freeCopyTo(state, re, re_); THCudaTensor_transpose(state, re_, NULL, 0, 1); } if (jobvr == MagmaVec) THCudaTensor_copyArray2d(state, rv_, vr_data, n, n); magma_free_pinned(work_data); magma_free_pinned(vr_data); magma_free_pinned(wi); magma_free_pinned(wr); magma_free_pinned(a_data); #else THError(NoMagma(geev)); #endif } void THCudaTensor_gesvd(THCState *state, THCudaTensor *ru_, THCudaTensor *rs_, THCudaTensor *rv_, THCudaTensor *a, const char *jobu) { #ifdef USE_MAGMA THCudaTensor *ra_ = THCudaTensor_new(state); THCudaTensor_gesvd2(state, ru_, rs_, rv_, ra_, a, jobu); THCudaTensor_free(state, ra_); #else THError(NoMagma(gesvd)); #endif } void THCudaTensor_gesvd2(THCState *state, THCudaTensor *ru_, THCudaTensor *rs_, THCudaTensor *rv_, THCudaTensor *ra_, THCudaTensor *a, const char *jobus) { #ifdef USE_MAGMA THArgCheck(a->nDimension == 2, 2, "A should be 2 dimensional"); magma_vec_t jobu = jobus[0] == 'A' ? MagmaAllVec : jobus[0] == 'S' ? MagmaSomeVec : jobus[0] == 'O' ? MagmaOverwriteVec : MagmaNoVec; magma_vec_t jobvt = jobu; int m = a->size[0]; int n = a->size[1]; int k = m < n ? m : n; int j = (jobu == MagmaAllVec) ? m : k; float *a_data = th_magma_smalloc_pinned(m * n); THCudaTensor_copyTensor2d(state, a_data, a); float *rs_data = th_magma_smalloc_pinned(k); float *ru_data = th_magma_smalloc_pinned(m * j); float *rv_data = th_magma_smalloc_pinned(n * n); float wkopt; int info; magma_sgesvd(jobu, jobvt, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, &wkopt, -1, &info); int lwork = (int) wkopt; float *work_data = th_magma_smalloc_pinned(lwork); magma_sgesvd(jobu, jobvt, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, work_data, lwork, &info); if (info > 0) THError("MAGMA gesvd : %d superdiagonals failed to converge", info); else if (info < 0) THError("MAGMA gesvd : Argument %d : illegal value", -info); THCudaTensor_copyArray2d(state, rv_, rv_data, n, n); THCudaTensor_transpose(state, rv_, NULL, 0, 1); THCudaTensor_copyArray2d(state, ru_, ru_data, m, j); THCudaTensor_copyArray1d(state, rs_, rs_data, k); THCudaTensor_copyArray2d(state, ra_, a_data, m, n); magma_free_pinned(work_data); magma_free_pinned(rv_data); magma_free_pinned(ru_data); magma_free_pinned(rs_data); magma_free_pinned(a_data); #else THError(NoMagma(gesvd2)); #endif } void THCudaTensor_getri(THCState *state, THCudaTensor *ra_, THCudaTensor *a) { #ifdef USE_MAGMA THArgCheck(a->nDimension == 2, 2, "A should be 2 dimensional"); THArgCheck(a->size[0] == a->size[1], 2, "A should be square"); int info; int n = a->size[0]; int lwork = n * magma_get_sgetri_nb(n); THCudaTensor *input = THCudaTensor_newColumnMajor(state, ra_, a); float *input_data = THCudaTensor_data(state, input); int *ipiv = th_magma_imalloc_pinned(n); THCudaTensor *work = THCudaTensor_newWithSize1d(state, lwork); float *work_data = THCudaTensor_data(state, work); // Run LU magma_sgetrf_gpu(n, n, input_data, n, ipiv, &info); if (info > 0) THError("MAGMA getrf : U(%d,%d) is 0, U is singular", info, info); else if (info < 0) THError("MAGMA getrf : Argument %d : illegal value", -info); // Inverse magma_sgetri_gpu(n, input_data, n, ipiv, work_data, lwork, &info); if (info > 0) THError("MAGMA getri : U(%d,%d) is 0, U is singular", info, info); else if (info < 0) THError("MAGMA getri : Argument %d : illegal value", -info); THCudaTensor_free(state, work); magma_free_pinned(ipiv); THCudaTensor_freeCopyTo(state, input, ra_); #else THArgCheck(a->nDimension == 2, 2, "A should be 2 dimensional"); THArgCheck(a->size[0] == a->size[1], 2, "A should be square"); int n = a->size[0]; // input THCudaTensor *input = THCudaTensor_newColumnMajor(state, ra_, a); // output THCudaTensor *output = THCudaTensor_newColumnMajor(state, ra_, a); size_t matrices_size = sizeof(float*); float **matrices1 = (float **)THAlloc(matrices_size); const float **matrices1_const = (const float **)THAlloc(matrices_size); float **matrices2 = (float **)THAlloc(matrices_size); matrices1[0] = THCudaTensor_data(state, input); matrices1_const[0] = THCudaTensor_data(state, input); matrices2[0] = THCudaTensor_data(state, output); // Copy pointers to device. float **d_matrices1, **d_matrices2; const float **d_matrices1_const; THCudaCheck(THCudaMalloc(state, (void**)&d_matrices1, matrices_size)); THCudaCheck(THCudaMalloc(state, (void**)&d_matrices1_const, matrices_size)); THCudaCheck(THCudaMalloc(state, (void**)&d_matrices2, matrices_size)); THCudaCheck(hipMemcpyAsync(d_matrices1, matrices1, matrices_size, hipMemcpyHostToDevice, THCState_getCurrentStream(state))); THCudaCheck(hipMemcpyAsync(d_matrices1_const, matrices1_const, matrices_size, hipMemcpyHostToDevice, THCState_getCurrentStream(state))); THCudaCheck(hipMemcpyAsync(d_matrices2, matrices2, matrices_size, hipMemcpyHostToDevice, THCState_getCurrentStream(state))); int info; int *info_gpu; THCudaCheck(THCudaMalloc(state, (void**)&info_gpu, sizeof(int))); int *ipiv_gpu; THCudaCheck(THCudaMalloc(state, (void**)&ipiv_gpu, n * sizeof(int))); // Run LU THCudaBlas_Sgetrf(state, n, d_matrices1, n, ipiv_gpu, info_gpu, 1); THCudaCheck(hipMemcpy(&info, info_gpu, sizeof(int), hipMemcpyDeviceToHost)); if (info > 0) THError("CUBLAS getrf : U(%d,%d) is 0, U is singular", info, info); else if (info < 0) THError("CUBLAS getrf : Argument %d : illegal value", -info); // Inverse THCudaBlas_Sgetri(state, n, d_matrices1_const, n, ipiv_gpu, d_matrices2, n, info_gpu, 1); if (info > 0) THError("CUBLAS getri : U(%d,%d) is 0, U is singular", info, info); else if (info < 0) THError("CUBLAS getri : Argument %d : illegal value", -info); THCudaCheck(THCudaFree(state, ipiv_gpu)); THCudaCheck(THCudaFree(state, info_gpu)); THCudaTensor_freeCopyTo(state, output, input); #endif } __global__ void THCudaTensor_copyUpperSymmetric(float *input, int n, int len) { for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < len; idx += 65535) { const int r = idx % n; const int c = idx / n; if (r > c) { input[idx] = input[r*n + c]; } } } void THCudaTensor_potri(THCState *state, THCudaTensor *ra_, THCudaTensor *a) { #ifdef USE_MAGMA THArgCheck(a->nDimension == 2, 2, "A should be 2 dimensional"); THArgCheck(a->size[0] == a->size[1], 2, "A should be square"); int n = a->size[0]; THCudaTensor *input = THCudaTensor_newColumnMajor(state, ra_, a); float *input_data = THCudaTensor_data(state, input); int info; magma_spotrf_gpu(MagmaUpper, n, input_data, n, &info); if (info > 0) THError("MAGMA potrf : A(%d,%d) is 0, A cannot be factorized", info, info); else if (info < 0) THError("MAGMA potrf : Argument %d : illegal value", -info); magma_spotri_gpu(MagmaUpper, n, input_data, n, &info); if (info > 0) THError("MAGMA potri : A(%d,%d) is 0, A cannot be factorized", info, info); else if (info < 0) THError("MAGMA potri : Argument %d : illegal value", -info); hipStream_t stream = THCState_getCurrentStream(state); const int len = n*n; dim3 blocks(::min(DIVUP(len, 128), 65535)); dim3 threads(128); hipLaunchKernelGGL(( THCudaTensor_copyUpperSymmetric), dim3(blocks), dim3(threads), 0, stream, input_data, n, len); THCudaTensor_freeCopyTo(state, input, ra_); #else THError(NoMagma(potri)); #endif } void THCudaTensor_potrf(THCState *state, THCudaTensor *ra_, THCudaTensor *a) { #ifdef USE_MAGMA THArgCheck(a->nDimension == 2, 2, "A should be 2 dimensional"); THArgCheck(a->size[0] == a->size[1], 2, "A should be square"); int n = a->size[0]; THCudaTensor *input = THCudaTensor_newColumnMajor(state, ra_, a); float *input_data = THCudaTensor_data(state, input); int info; magma_spotrf_gpu(MagmaUpper, n, input_data, n, &info); // check error value if (info > 0) THError("MAGMA potrf : A(%d,%d) is 0, A cannot be factorized", info, info); else if (info < 0) THError("MAGMA potrf : Argument %d : illegal value", -info); THCudaTensor_triu(state, ra_, input, 0); THCudaTensor_free(state, input); #else THError(NoMagma(potrf)); #endif } void THCudaTensor_potrs(THCState *state, THCudaTensor *rb_, THCudaTensor *b, THCudaTensor *a) { #ifdef USE_MAGMA THArgCheck(a->size[0] == a->size[1], 2, "A should be square"); int n = a->size[0]; int nrhs = b->size[1]; THCudaTensor *b_ = THCudaTensor_newColumnMajor(state, rb_, b); float *b_data = THCudaTensor_data(state, b_); THCudaTensor *a_ = THCudaTensor_newColumnMajor(state, a, a); float *a_data = THCudaTensor_data(state, a_); int info; magma_spotrs_gpu(MagmaUpper, n, nrhs, a_data, n, b_data, n, &info); // check error value if (info < 0) THError("MAGMA potrs : Argument %d : illegal value", -info); THCudaTensor_freeCopyTo(state, b_, rb_); THCudaTensor_free(state, a_); #else THError(NoMagma(potrs)); #endif } void THCudaTensor_qr(THCState *state, THCudaTensor *rq_, THCudaTensor *rr_, THCudaTensor *a_) { #ifdef USE_MAGMA THArgCheck(a_->nDimension == 2, 2, "A should be 2 dimensional"); THCudaTensor *a = THCudaTensor_newColumnMajor(state, rr_, a_); int m = a->size[0]; int n = a->size[1]; int k = (m < n ? m : n); #ifdef MAGMA_V2 int nb = magma_get_sgeqrf_nb(m, n); #else int nb = magma_get_sgeqrf_nb(m); #endif float *a_data = THCudaTensor_data(state, a); float *tau_data = th_magma_smalloc_pinned(n*n); THCudaTensor *work = THCudaTensor_newWithSize1d(state, (2*k + ((n+31)/32)*32)*nb); float *work_data = THCudaTensor_data(state, work); int info; magma_sgeqrf_gpu(m, n, a_data, m, tau_data, work_data, &info); if (info != 0) THError("MAGMA geqrf : Argument %d : illegal value.", -info); THCudaTensor *q = THCudaTensor_newColumnMajor(state, rq_, a); float *q_data = THCudaTensor_data(state, q); THCudaTensor_narrow(state, a, a, 0, 0, k); THCudaTensor_triu(state, rr_, a, 0); THCudaTensor_free(state, a); magma_sorgqr_gpu(m, n, k, q_data, m, tau_data, work_data, nb, &info); if (info != 0) THError("MAGMA orgqr : Argument %d : illegal value.", -info); THCudaTensor_free(state, work); magma_free_pinned(tau_data); THCudaTensor_narrow(state, q, q, 1, 0, k); THCudaTensor_freeCopyTo(state, q, rq_); #else THError(NoMagma(qr)); #endif }
5dc36dfda7bf1ccb7bf7323571f1357b2fc24d51.cu
#include "THCGeneral.h" #include "THCTensorMath.h" #include "THCTensorCopy.h" #include <algorithm> #ifdef USE_MAGMA #include <magma.h> #else #include "THCBlas.h" #endif #ifndef DIVUP #define DIVUP(x, y) (((x) + (y) - 1) / (y)) #endif #define NoMagma(name) "No CUDA implementation of '" #name "'. Install MAGMA and rebuild cutorch (http://icl.cs.utk.edu/magma/)" void THCMagma_init(THCState *state) { #ifdef USE_MAGMA magma_init(); #endif } #ifdef USE_MAGMA static inline float* th_magma_smalloc_pinned(size_t n) { float* ptr; if (MAGMA_SUCCESS != magma_smalloc_pinned(&ptr, n)) THError("$ Torch: not enough memory: you tried to allocate %dGB. Buy new RAM!", n/268435456); return ptr; } static inline int* th_magma_imalloc_pinned(size_t n) { int* ptr; if (MAGMA_SUCCESS != magma_imalloc_pinned(&ptr, n)) THError("$ Torch: not enough memory: you tried to allocate %dGB. Buy new RAM!", n/268435456); return ptr; } static void THCudaTensor_copyArray1d(THCState *state, THCudaTensor *self, float *src, int k) { long size[1] = { k }; long stride[1] = { 1 }; THCudaTensor_rawResize(state, self, 1, size, stride); size_t len = k * sizeof(float); THCudaCheck(cudaMemcpy(self->storage->data + self->storageOffset, src, len, cudaMemcpyHostToDevice)); } static void THCudaTensor_copyArray2d(THCState *state, THCudaTensor *self, float *src, int m, int n) { long size[2] = { m, n }; long stride[2] = { 1, m }; THCudaTensor_rawResize(state, self, 2, size, stride); size_t len = m * n * sizeof(float); THCudaCheck(cudaMemcpy(self->storage->data + self->storageOffset, src, len, cudaMemcpyHostToDevice)); } static void THCudaTensor_copyTensor2d(THCState *state, float *dst, THCudaTensor *self) { THAssert(self->nDimension == 2); size_t len = THCudaTensor_nElement(state, self)*sizeof(float); THCudaTensor *temp = THCudaTensor_newTranspose(state, self, 0, 1); THCudaTensor *selfc = THCudaTensor_newContiguous(state, temp); THCudaCheck(cudaMemcpy(dst, selfc->storage->data + selfc->storageOffset, len, cudaMemcpyDeviceToHost)); THCudaTensor_free(state, temp); THCudaTensor_free(state, selfc); } #endif static THCudaTensor* THCudaTensor_newColumnMajor(THCState *state, THCudaTensor *self, THCudaTensor *src) { THAssert(src->nDimension == 2); if (self == src && self->stride[0] == 1 && self->stride[1] == self->size[0]) { THCudaTensor_retain(state, self); return self; } if (self == src) self = THCudaTensor_new(state); else THCudaTensor_retain(state, self); long size[2] = { src->size[0], src->size[1] }; long stride[2] = { 1, src->size[0] }; THCudaTensor_rawResize(state, self, 2, size, stride); THCudaTensor_copy(state, self, src); return self; } void THCudaTensor_gesv(THCState *state, THCudaTensor *rb_, THCudaTensor *ra_, THCudaTensor *b_, THCudaTensor *a_) { #ifdef USE_MAGMA THArgCheck(a_->nDimension == 2, 1, "A should be 2 dimensional"); THArgCheck(b_->nDimension == 2, 2, "b should be 2 dimensional"); THArgCheck(a_->size[0] == a_->size[1], 1, "A should be square"); THArgCheck(b_->size[0] == a_->size[0], 2, "A,b size incompatible"); int n = a_->size[0]; int nrhs = b_->size[1]; THCudaTensor *a = THCudaTensor_newColumnMajor(state, ra_, a_); THCudaTensor *b = THCudaTensor_newColumnMajor(state, rb_, b_); float *a_data = THCudaTensor_data(state, a); float *b_data = THCudaTensor_data(state, b); int *ipiv = th_magma_imalloc_pinned(n); int info; magma_sgesv_gpu(n, nrhs, a_data, n, ipiv, b_data, n, &info); if (info < 0) THError("MAGMA gesv : Argument %d : illegal value", -info); else if (info > 0) THError("MAGMA gesv : U(%d,%d) is zero, singular U.", info, info); magma_free_pinned(ipiv); THCudaTensor_freeCopyTo(state, a, ra_); THCudaTensor_freeCopyTo(state, b, rb_); #else THError(NoMagma(gesv)); #endif } void THCudaTensor_gels(THCState *state, THCudaTensor *rb_, THCudaTensor *ra_, THCudaTensor *b_, THCudaTensor *a_) { #ifdef USE_MAGMA THArgCheck(a_->nDimension == 2, 1, "A should be 2 dimensional"); THArgCheck(b_->nDimension == 2, 1, "b should be 2 dimensional"); THArgCheck(a_->size[0] == b_->size[0], 2, "size incompatible A,b"); THArgCheck(a_->size[0] >= a_->size[1], 2, "A should have m >= n"); THCudaTensor *a = THCudaTensor_newColumnMajor(state, ra_, a_); THCudaTensor *b = THCudaTensor_newColumnMajor(state, rb_, b_); float *a_data = THCudaTensor_data(state, a); float *b_data = THCudaTensor_data(state, b); int m = a->size[0]; int n = a->size[1]; int nrhs = b->size[1]; float wkopt; int info; magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, &wkopt, -1, &info); float *hwork = th_magma_smalloc_pinned((size_t)wkopt); magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, hwork, (int)wkopt, &info); magma_free_pinned(hwork); if (info != 0) THError("MAGMA gels : Argument %d : illegal value", -info); THCudaTensor_freeCopyTo(state, a, ra_); THCudaTensor_freeCopyTo(state, b, rb_); #else THError(NoMagma(gels)); #endif } void THCudaTensor_syev(THCState *state, THCudaTensor *re_, THCudaTensor *rv_, THCudaTensor *a, const char *jobzs, const char *uplos) { #ifdef USE_MAGMA int n = a->size[0]; int lda = n; magma_uplo_t uplo = uplos[0] == 'U' ? MagmaUpper : MagmaLower; magma_vec_t jobz = jobzs[0] == 'N' ? MagmaNoVec : MagmaVec; THCudaTensor *input = THCudaTensor_newColumnMajor(state, rv_, a); float *input_data = THCudaTensor_data(state, input); // eigen values and workspace float *w = th_magma_smalloc_pinned(n); float *wA = th_magma_smalloc_pinned(lda); // compute optimal size of work array int info; float lwork; int liwork; magma_ssyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, &lwork, -1, &liwork, -1, &info); float *work = th_magma_smalloc_pinned((size_t)lwork); int *iwork = th_magma_imalloc_pinned(liwork); // compute eigenvalues and, optionally, eigenvectors magma_ssyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, work, (int) lwork, iwork, liwork, &info); // copy eigen values from w to re_ if (info == 0) THCudaTensor_copyArray1d(state, re_, w, n); magma_free_pinned(iwork); magma_free_pinned(work); magma_free_pinned(wA); magma_free_pinned(w); // check error value if (info > 0) THError("MAGMA syev : Failed to converge. %d off-diagonal elements of an didn't converge to zero", info); else if (info < 0) THError("MAGMA syev : Argument %d : illegal value", -info); THCudaTensor_freeCopyTo(state, input, rv_); #else THError(NoMagma(syev)); #endif } void THCudaTensor_geev(THCState *state, THCudaTensor *re_, THCudaTensor *rv_, THCudaTensor *a_, const char *jobvrs) { #ifdef USE_MAGMA THArgCheck(a_->nDimension == 2, 3, "A should be 2 dimensional"); THArgCheck(a_->size[0] == a_->size[1], 3, "A should be square"); magma_vec_t jobvr = jobvrs[0] == 'N' ? MagmaNoVec : MagmaVec; int n = a_->size[0]; float *a_data = th_magma_smalloc_pinned(n * n); THCudaTensor_copyTensor2d(state, a_data, a_); float *wr = th_magma_smalloc_pinned(n); float *wi = th_magma_smalloc_pinned(n); float *vr_data = NULL; int ldvr = 1; if (jobvr == MagmaVec) { vr_data = th_magma_smalloc_pinned(n * n); ldvr = n; } float wkopt; int info; magma_sgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, &wkopt, -1, &info); int lwork = (int) wkopt; float *work_data = th_magma_smalloc_pinned(lwork); magma_sgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, work_data, lwork, &info); if (info > 0) THError("MAGMA geev : Failed to converge. %d off-diagonal elements of an didn't converge to zero", info); else if (info < 0) THError("MAGMA geev : Argument %d : illegal value", -info); { THCudaTensor_resize2d(state, re_, 2, n); THCudaTensor *re = THCudaTensor_newContiguous(state, re_); THCudaCheck(cudaMemcpy(re->storage->data + re->storageOffset, wr, n*sizeof(float), cudaMemcpyHostToDevice)); THCudaCheck(cudaMemcpy(re->storage->data + re->storageOffset + n, wi, n*sizeof(float), cudaMemcpyHostToDevice)); THCudaTensor_freeCopyTo(state, re, re_); THCudaTensor_transpose(state, re_, NULL, 0, 1); } if (jobvr == MagmaVec) THCudaTensor_copyArray2d(state, rv_, vr_data, n, n); magma_free_pinned(work_data); magma_free_pinned(vr_data); magma_free_pinned(wi); magma_free_pinned(wr); magma_free_pinned(a_data); #else THError(NoMagma(geev)); #endif } void THCudaTensor_gesvd(THCState *state, THCudaTensor *ru_, THCudaTensor *rs_, THCudaTensor *rv_, THCudaTensor *a, const char *jobu) { #ifdef USE_MAGMA THCudaTensor *ra_ = THCudaTensor_new(state); THCudaTensor_gesvd2(state, ru_, rs_, rv_, ra_, a, jobu); THCudaTensor_free(state, ra_); #else THError(NoMagma(gesvd)); #endif } void THCudaTensor_gesvd2(THCState *state, THCudaTensor *ru_, THCudaTensor *rs_, THCudaTensor *rv_, THCudaTensor *ra_, THCudaTensor *a, const char *jobus) { #ifdef USE_MAGMA THArgCheck(a->nDimension == 2, 2, "A should be 2 dimensional"); magma_vec_t jobu = jobus[0] == 'A' ? MagmaAllVec : jobus[0] == 'S' ? MagmaSomeVec : jobus[0] == 'O' ? MagmaOverwriteVec : MagmaNoVec; magma_vec_t jobvt = jobu; int m = a->size[0]; int n = a->size[1]; int k = m < n ? m : n; int j = (jobu == MagmaAllVec) ? m : k; float *a_data = th_magma_smalloc_pinned(m * n); THCudaTensor_copyTensor2d(state, a_data, a); float *rs_data = th_magma_smalloc_pinned(k); float *ru_data = th_magma_smalloc_pinned(m * j); float *rv_data = th_magma_smalloc_pinned(n * n); float wkopt; int info; magma_sgesvd(jobu, jobvt, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, &wkopt, -1, &info); int lwork = (int) wkopt; float *work_data = th_magma_smalloc_pinned(lwork); magma_sgesvd(jobu, jobvt, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, work_data, lwork, &info); if (info > 0) THError("MAGMA gesvd : %d superdiagonals failed to converge", info); else if (info < 0) THError("MAGMA gesvd : Argument %d : illegal value", -info); THCudaTensor_copyArray2d(state, rv_, rv_data, n, n); THCudaTensor_transpose(state, rv_, NULL, 0, 1); THCudaTensor_copyArray2d(state, ru_, ru_data, m, j); THCudaTensor_copyArray1d(state, rs_, rs_data, k); THCudaTensor_copyArray2d(state, ra_, a_data, m, n); magma_free_pinned(work_data); magma_free_pinned(rv_data); magma_free_pinned(ru_data); magma_free_pinned(rs_data); magma_free_pinned(a_data); #else THError(NoMagma(gesvd2)); #endif } void THCudaTensor_getri(THCState *state, THCudaTensor *ra_, THCudaTensor *a) { #ifdef USE_MAGMA THArgCheck(a->nDimension == 2, 2, "A should be 2 dimensional"); THArgCheck(a->size[0] == a->size[1], 2, "A should be square"); int info; int n = a->size[0]; int lwork = n * magma_get_sgetri_nb(n); THCudaTensor *input = THCudaTensor_newColumnMajor(state, ra_, a); float *input_data = THCudaTensor_data(state, input); int *ipiv = th_magma_imalloc_pinned(n); THCudaTensor *work = THCudaTensor_newWithSize1d(state, lwork); float *work_data = THCudaTensor_data(state, work); // Run LU magma_sgetrf_gpu(n, n, input_data, n, ipiv, &info); if (info > 0) THError("MAGMA getrf : U(%d,%d) is 0, U is singular", info, info); else if (info < 0) THError("MAGMA getrf : Argument %d : illegal value", -info); // Inverse magma_sgetri_gpu(n, input_data, n, ipiv, work_data, lwork, &info); if (info > 0) THError("MAGMA getri : U(%d,%d) is 0, U is singular", info, info); else if (info < 0) THError("MAGMA getri : Argument %d : illegal value", -info); THCudaTensor_free(state, work); magma_free_pinned(ipiv); THCudaTensor_freeCopyTo(state, input, ra_); #else THArgCheck(a->nDimension == 2, 2, "A should be 2 dimensional"); THArgCheck(a->size[0] == a->size[1], 2, "A should be square"); int n = a->size[0]; // input THCudaTensor *input = THCudaTensor_newColumnMajor(state, ra_, a); // output THCudaTensor *output = THCudaTensor_newColumnMajor(state, ra_, a); size_t matrices_size = sizeof(float*); float **matrices1 = (float **)THAlloc(matrices_size); const float **matrices1_const = (const float **)THAlloc(matrices_size); float **matrices2 = (float **)THAlloc(matrices_size); matrices1[0] = THCudaTensor_data(state, input); matrices1_const[0] = THCudaTensor_data(state, input); matrices2[0] = THCudaTensor_data(state, output); // Copy pointers to device. float **d_matrices1, **d_matrices2; const float **d_matrices1_const; THCudaCheck(THCudaMalloc(state, (void**)&d_matrices1, matrices_size)); THCudaCheck(THCudaMalloc(state, (void**)&d_matrices1_const, matrices_size)); THCudaCheck(THCudaMalloc(state, (void**)&d_matrices2, matrices_size)); THCudaCheck(cudaMemcpyAsync(d_matrices1, matrices1, matrices_size, cudaMemcpyHostToDevice, THCState_getCurrentStream(state))); THCudaCheck(cudaMemcpyAsync(d_matrices1_const, matrices1_const, matrices_size, cudaMemcpyHostToDevice, THCState_getCurrentStream(state))); THCudaCheck(cudaMemcpyAsync(d_matrices2, matrices2, matrices_size, cudaMemcpyHostToDevice, THCState_getCurrentStream(state))); int info; int *info_gpu; THCudaCheck(THCudaMalloc(state, (void**)&info_gpu, sizeof(int))); int *ipiv_gpu; THCudaCheck(THCudaMalloc(state, (void**)&ipiv_gpu, n * sizeof(int))); // Run LU THCudaBlas_Sgetrf(state, n, d_matrices1, n, ipiv_gpu, info_gpu, 1); THCudaCheck(cudaMemcpy(&info, info_gpu, sizeof(int), cudaMemcpyDeviceToHost)); if (info > 0) THError("CUBLAS getrf : U(%d,%d) is 0, U is singular", info, info); else if (info < 0) THError("CUBLAS getrf : Argument %d : illegal value", -info); // Inverse THCudaBlas_Sgetri(state, n, d_matrices1_const, n, ipiv_gpu, d_matrices2, n, info_gpu, 1); if (info > 0) THError("CUBLAS getri : U(%d,%d) is 0, U is singular", info, info); else if (info < 0) THError("CUBLAS getri : Argument %d : illegal value", -info); THCudaCheck(THCudaFree(state, ipiv_gpu)); THCudaCheck(THCudaFree(state, info_gpu)); THCudaTensor_freeCopyTo(state, output, input); #endif } __global__ void THCudaTensor_copyUpperSymmetric(float *input, int n, int len) { for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < len; idx += 65535) { const int r = idx % n; const int c = idx / n; if (r > c) { input[idx] = input[r*n + c]; } } } void THCudaTensor_potri(THCState *state, THCudaTensor *ra_, THCudaTensor *a) { #ifdef USE_MAGMA THArgCheck(a->nDimension == 2, 2, "A should be 2 dimensional"); THArgCheck(a->size[0] == a->size[1], 2, "A should be square"); int n = a->size[0]; THCudaTensor *input = THCudaTensor_newColumnMajor(state, ra_, a); float *input_data = THCudaTensor_data(state, input); int info; magma_spotrf_gpu(MagmaUpper, n, input_data, n, &info); if (info > 0) THError("MAGMA potrf : A(%d,%d) is 0, A cannot be factorized", info, info); else if (info < 0) THError("MAGMA potrf : Argument %d : illegal value", -info); magma_spotri_gpu(MagmaUpper, n, input_data, n, &info); if (info > 0) THError("MAGMA potri : A(%d,%d) is 0, A cannot be factorized", info, info); else if (info < 0) THError("MAGMA potri : Argument %d : illegal value", -info); cudaStream_t stream = THCState_getCurrentStream(state); const int len = n*n; dim3 blocks(std::min(DIVUP(len, 128), 65535)); dim3 threads(128); THCudaTensor_copyUpperSymmetric<<<blocks, threads, 0, stream>>>(input_data, n, len); THCudaTensor_freeCopyTo(state, input, ra_); #else THError(NoMagma(potri)); #endif } void THCudaTensor_potrf(THCState *state, THCudaTensor *ra_, THCudaTensor *a) { #ifdef USE_MAGMA THArgCheck(a->nDimension == 2, 2, "A should be 2 dimensional"); THArgCheck(a->size[0] == a->size[1], 2, "A should be square"); int n = a->size[0]; THCudaTensor *input = THCudaTensor_newColumnMajor(state, ra_, a); float *input_data = THCudaTensor_data(state, input); int info; magma_spotrf_gpu(MagmaUpper, n, input_data, n, &info); // check error value if (info > 0) THError("MAGMA potrf : A(%d,%d) is 0, A cannot be factorized", info, info); else if (info < 0) THError("MAGMA potrf : Argument %d : illegal value", -info); THCudaTensor_triu(state, ra_, input, 0); THCudaTensor_free(state, input); #else THError(NoMagma(potrf)); #endif } void THCudaTensor_potrs(THCState *state, THCudaTensor *rb_, THCudaTensor *b, THCudaTensor *a) { #ifdef USE_MAGMA THArgCheck(a->size[0] == a->size[1], 2, "A should be square"); int n = a->size[0]; int nrhs = b->size[1]; THCudaTensor *b_ = THCudaTensor_newColumnMajor(state, rb_, b); float *b_data = THCudaTensor_data(state, b_); THCudaTensor *a_ = THCudaTensor_newColumnMajor(state, a, a); float *a_data = THCudaTensor_data(state, a_); int info; magma_spotrs_gpu(MagmaUpper, n, nrhs, a_data, n, b_data, n, &info); // check error value if (info < 0) THError("MAGMA potrs : Argument %d : illegal value", -info); THCudaTensor_freeCopyTo(state, b_, rb_); THCudaTensor_free(state, a_); #else THError(NoMagma(potrs)); #endif } void THCudaTensor_qr(THCState *state, THCudaTensor *rq_, THCudaTensor *rr_, THCudaTensor *a_) { #ifdef USE_MAGMA THArgCheck(a_->nDimension == 2, 2, "A should be 2 dimensional"); THCudaTensor *a = THCudaTensor_newColumnMajor(state, rr_, a_); int m = a->size[0]; int n = a->size[1]; int k = (m < n ? m : n); #ifdef MAGMA_V2 int nb = magma_get_sgeqrf_nb(m, n); #else int nb = magma_get_sgeqrf_nb(m); #endif float *a_data = THCudaTensor_data(state, a); float *tau_data = th_magma_smalloc_pinned(n*n); THCudaTensor *work = THCudaTensor_newWithSize1d(state, (2*k + ((n+31)/32)*32)*nb); float *work_data = THCudaTensor_data(state, work); int info; magma_sgeqrf_gpu(m, n, a_data, m, tau_data, work_data, &info); if (info != 0) THError("MAGMA geqrf : Argument %d : illegal value.", -info); THCudaTensor *q = THCudaTensor_newColumnMajor(state, rq_, a); float *q_data = THCudaTensor_data(state, q); THCudaTensor_narrow(state, a, a, 0, 0, k); THCudaTensor_triu(state, rr_, a, 0); THCudaTensor_free(state, a); magma_sorgqr_gpu(m, n, k, q_data, m, tau_data, work_data, nb, &info); if (info != 0) THError("MAGMA orgqr : Argument %d : illegal value.", -info); THCudaTensor_free(state, work); magma_free_pinned(tau_data); THCudaTensor_narrow(state, q, q, 1, 0, k); THCudaTensor_freeCopyTo(state, q, rq_); #else THError(NoMagma(qr)); #endif }
f4d61bbb2120874f0f068fc412a05ddf8b3995e5.hip
// !!! This is a file automatically generated by hipify!!! // includes, system #include "stdafx.h" #include "mex.h" #include "MultiDASEnv.h" #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes CUDA #include <hip/hip_runtime.h> // includes, project #include <hip/device_functions.h> #include <device_launch_parameters.h> #include <helper_cuda.h> #include <helper_functions.h> // helper functions for SDK examples //define parameter #define pi 3.1415 #define MAXCUDADEVICES 1 #define threadNum 246 __device__ int numZ_Dev, numX_Dev, datalength_Dev, transNum_Dev, pickLength_Dev, lgLength_Dev, inhibitor_Dev, windowSize_Dev; __device__ float pitch_Dev, pSize_Dev, sampFreq_Dev, acoustVel_Dev, angleAperture_Dev, delayCoef_Dev; int numZ_Host, numX_Host, datalength_Host, transNum_Host, pickLength_Host, lgLength_Host, inhibitor_Host, windowSize_Host; float pitch_Host, pSize_Host, sampFreq_Host, acoustVel_Host, angleAperture_Host, delayCoef_Host; float *data_Dev, *imgRecons_Dev, *dataPick, *y_real, *y_imag; int *krev; float *w_real, *w_imag; // paraInt : 0-transElement, 1-inhibitor, 2-lgLength, 3-dataLength, 4-windowSize // paraIntDev : 0-transElement, 1-inhibitor, 2-lgLength, 3-dataLength, 4-windowSize // paraFloat : 0-delayCoef, 1-acoustVel, 2-pitch, 3-pixelSize, 4-samFreq, 5-angleApertureTan // paraFloatDev : 0-delayCoef, 1-acoustVel, 2-pitch, 3-pixelSize, 4-samFreq, 5-angleApertureTan extern "C" void initcudas(int *paraInt, float *paraFloat, float *data, int MAXZ_host, int MAXX_host) { // imgSize_Host numZ_Host = MAXZ_host; numX_Host = MAXX_host; // paraInt_Host transNum_Host = *paraInt; inhibitor_Host = *(paraInt + 1); lgLength_Host = *(paraInt + 2); datalength_Host = *(paraInt + 3); windowSize_Host = *(paraInt + 4); pickLength_Host = 1; pickLength_Host <<= lgLength_Host; // paraFloat_Host delayCoef_Host = *paraFloat; acoustVel_Host = *(paraFloat + 1); pitch_Host = *(paraFloat + 2); pSize_Host = *(paraFloat + 3); sampFreq_Host = *(paraFloat + 4); angleAperture_Host = *(paraFloat + 5); // imgSize_Dev checkCudaErrors(hipSetDevice(0)); checkCudaErrors(hipMemcpyToSymbol(numZ_Dev, &numZ_Host, sizeof(int))); checkCudaErrors(hipMemcpyToSymbol(numX_Dev, &numX_Host, sizeof(int))); // paraInt_Dev checkCudaErrors(hipMemcpyToSymbol(transNum_Dev, &transNum_Host, sizeof(int))); checkCudaErrors(hipMemcpyToSymbol(inhibitor_Dev, &inhibitor_Host, sizeof(int))); checkCudaErrors(hipMemcpyToSymbol(lgLength_Dev, &lgLength_Host, sizeof(int))); checkCudaErrors(hipMemcpyToSymbol(datalength_Dev, &datalength_Host, sizeof(int))); checkCudaErrors(hipMemcpyToSymbol(windowSize_Dev, &windowSize_Host, sizeof(int))); checkCudaErrors(hipMemcpyToSymbol(pickLength_Dev, &pickLength_Host, sizeof(int))); // paraFloat_Dev checkCudaErrors(hipMemcpyToSymbol(delayCoef_Dev, &delayCoef_Host, sizeof(float))); checkCudaErrors(hipMemcpyToSymbol(acoustVel_Dev, &acoustVel_Host, sizeof(float))); checkCudaErrors(hipMemcpyToSymbol(pitch_Dev, &pitch_Host, sizeof(float))); checkCudaErrors(hipMemcpyToSymbol(pSize_Dev, &pSize_Host, sizeof(float))); checkCudaErrors(hipMemcpyToSymbol(sampFreq_Dev, &sampFreq_Host, sizeof(float))); checkCudaErrors(hipMemcpyToSymbol(angleAperture_Dev, &angleAperture_Host, sizeof(float))); // float*_Dev malloc checkCudaErrors(hipMalloc((void**)&(data_Dev), transNum_Host*datalength_Host*sizeof(float))); checkCudaErrors(hipMalloc((void**)&(imgRecons_Dev), numZ_Host*numX_Host*sizeof(float))); checkCudaErrors(hipMalloc((void**)&(dataPick), pickLength_Host*threadNum*numX_Host*sizeof(float))); checkCudaErrors(hipMalloc((void**)&(y_real), pickLength_Host*threadNum*numX_Host*sizeof(float))); checkCudaErrors(hipMalloc((void**)&(y_imag), pickLength_Host*threadNum*numX_Host*sizeof(float))); checkCudaErrors(hipMalloc((void**)&(w_real), (pickLength_Host - 1)*sizeof(float))); checkCudaErrors(hipMalloc((void**)&(w_imag), (pickLength_Host - 1)*sizeof(float))); // int*_Dev malloc checkCudaErrors(hipMalloc((void**)&(krev), pickLength_Host*sizeof(int))); //calculate parameter of fft int *krev_Host = (int *)malloc(pickLength_Host*sizeof(int)); for (int k = 0; k < pickLength_Host; ++k) { int r = k; *(krev_Host + k) = (r & 0x1); for (int j = 1; j < lgLength_Host; ++j) { *(krev_Host + k) = (*(krev_Host + k)) << 1; r = r >> 1; if (r & 0x1) ++(*(krev_Host + k)); } } checkCudaErrors(hipMemcpy(krev, krev_Host, pickLength_Host*sizeof(int), hipMemcpyHostToDevice)); free(krev_Host); float *wreal_Host = (float *)malloc((pickLength_Host - 1)*sizeof(float)), *wimag_Host = (float *)malloc((pickLength_Host - 1)*sizeof(float)); int m = 1; float wm_real, wm_imag, w_realRec, w_imagRec, *wreal_now = wreal_Host, *wimag_now = wimag_Host; for (int s = 1; s <= lgLength_Host; ++s) { m *= 2; wm_real = cos(2 * pi * 1 / m); wm_imag = -sin(2 * pi * 1 / m); w_realRec = 1; w_imagRec = 0; for (int j = 0; j < (m / 2); ++j) { //w = w * wm = t * wm; *(wreal_now + j) = w_realRec; *(wimag_now + j) = w_imagRec; w_realRec = *(wreal_now + j)*wm_real - *(wimag_now + j)*wm_imag; w_imagRec = *(wreal_now + j)*wm_imag + *(wimag_now + j)*wm_real; } wreal_now += m / 2; wimag_now += m / 2; } checkCudaErrors(hipMemcpy(w_real, wreal_Host, (pickLength_Host - 1)*sizeof(float), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(w_imag, wimag_Host, (pickLength_Host - 1)*sizeof(float), hipMemcpyHostToDevice)); free(wreal_Host); free(wimag_Host); // copy host data to device checkCudaErrors(hipMemcpy(data_Dev, data, transNum_Host*datalength_Host*sizeof(float), hipMemcpyHostToDevice)); } extern "C" void clearcudas() { checkCudaErrors(hipFree(data_Dev)); checkCudaErrors(hipFree(imgRecons_Dev)); checkCudaErrors(hipFree(dataPick)); checkCudaErrors(hipFree(y_real)); checkCudaErrors(hipFree(y_imag)); checkCudaErrors(hipFree(w_real)); checkCudaErrors(hipFree(w_imag)); checkCudaErrors(hipFree(krev)); } __device__ void getEvelope(int *krev, float *w_real, float *w_imag, float *x, float *y_real, float *y_imag) { // 2_DFT float *px = x; for (int k = 0; k < pickLength_Dev; ++k) { *(y_real + *(krev + k)) = *px; *(y_imag + *(krev + k)) = 0; ++px; } int m = 1; float t_real, t_imag, u_real, u_imag, *wreal_now = w_real, *wimag_now = w_imag; for (int s = 1; s <= lgLength_Dev; ++s) { m *= 2; for (int k = 0; k < pickLength_Dev; k += m) { for (int j = 0; j < (m / 2); ++j) { //t = w * (*(y+k+j+m/2)) t_real = *(wreal_now + j)*(*(y_real + k + j + m / 2)) - *(wimag_now + j)*(*(y_imag + k + j + m / 2)); t_imag = *(wreal_now + j)*(*(y_imag + k + j + m / 2)) + *(wimag_now + j)*(*(y_real + k + j + m / 2)); u_real = *(y_real + k + j); u_imag = *(y_imag + k + j); *(y_real + k + j) = u_real + t_real; *(y_imag + k + j) = u_imag + t_imag; *(y_real + k + j + m / 2) = u_real - t_real; *(y_imag + k + j + m / 2) = u_imag - t_imag; } } wreal_now += m / 2; wimag_now += m / 2; } // HilbertTran int count = 0; for (count = 1; count < pickLength_Dev / 2; ++count) //pickLength must be even { (*(y_real + count)) *= 2; (*(y_imag + count)) *= 2; } for (count += 1; count < pickLength_Dev; ++count) { (*(y_real + count)) *= 0; (*(y_imag + count)) *= 0; } for (int k = 0; k < pickLength_Dev; ++k) { count = *(krev + k); if (count == k) { *(y_imag + k) = -(*(y_imag + k)); } else if (k < count) { t_real = *(y_real + k); t_imag = *(y_imag + k); *(y_real + k) = *(y_real + count); *(y_imag + k) = -(*(y_imag + count)); *(y_real + count) = t_real; *(y_imag + count) = -t_imag; } } m = 1; wreal_now = w_real; wimag_now = w_imag; for (int s = 1; s <= lgLength_Dev; ++s) { m *= 2; for (int k = 0; k < pickLength_Dev; k += m) { for (int j = 0; j < (m / 2); ++j) { //t = w * (*(y+k+j+m/2)) t_real = *(wreal_now + j)*(*(y_real + k + j + m / 2)) - *(wimag_now + j)*(*(y_imag + k + j + m / 2)); t_imag = *(wreal_now + j)*(*(y_imag + k + j + m / 2)) + *(wimag_now + j)*(*(y_real + k + j + m / 2)); u_real = *(y_real + k + j); u_imag = *(y_imag + k + j); *(y_real + k + j) = u_real + t_real; *(y_imag + k + j) = u_imag + t_imag; *(y_real + k + j + m / 2) = u_real - t_real; *(y_imag + k + j + m / 2) = u_imag - t_imag; } } wreal_now += m / 2; wimag_now += m / 2; } int div_len = pickLength_Dev*pickLength_Dev; for (int i = 0; i < pickLength_Dev; ++i) { *(x + i) = (*(y_real + i))*(*(y_real + i)) + (*(y_imag + i))*(*(y_imag + i)); *(x + i) /= div_len; } } __global__ void PArecon(float *data_Dev, float *imgRecons_Dev, float *dataPick, int *krev, float *w_real, float *w_imag, float *y_real, float *y_imag, int zdepth, int zstart) { // access thread id const unsigned int tidx = threadIdx.x; // access block id const unsigned int bidx = blockIdx.x; if (bidx < zstart) { return; } float Distan; float Y, Z, y; int POINTER, pointer = pickLength_Dev*((bidx % threadNum)*numX_Dev + tidx); float *pickBeg = dataPick + pointer; int pick_offset = pickLength_Dev / 2; Z = bidx * pSize_Dev; Y = tidx * pSize_Dev; int y_start = (int)((Y - Z*angleAperture_Dev) / pitch_Dev - 0.5); if (y_start < 0) { y_start = 0; } int y_end = (int)((Y + Z*angleAperture_Dev) / pitch_Dev - 0.5); if (y_end > transNum_Dev - 1) { y_end = transNum_Dev - 1; } for (int len = 0; len < pickLength_Dev; ++len) { *(pickBeg + len) = 0; } int lenMax; for (int bidy = y_start; bidy <= y_end; ++bidy) { y = (bidy + 0.5) * pitch_Dev; Distan = sqrt((Y - y)*(Y - y) + Z*Z); POINTER = (int)((Distan / acoustVel_Dev - delayCoef_Dev)*sampFreq_Dev + 0.5) - pick_offset; lenMax = pickLength_Dev; if (POINTER + lenMax >= datalength_Dev){ lenMax = datalength_Dev - 1 - POINTER; } if (POINTER >= 0 && POINTER < datalength_Dev) { POINTER = POINTER + bidy*datalength_Dev; for (int len = 0; len < lenMax; ++len) { *(pickBeg + len) += *(data_Dev + POINTER + len); } } } getEvelope(krev, w_real, w_imag, pickBeg, y_real + pointer, y_imag + pointer); lenMax = pick_offset; for (int len = pick_offset - windowSize_Dev + 1; len < pick_offset + windowSize_Dev; ++len) { if (len >= 0 && len < pickLength_Dev && *(pickBeg + len) > *(pickBeg + lenMax)) { lenMax = len; } } if (*(pickBeg + lenMax) > 0) { *(imgRecons_Dev + tidx*zdepth + bidx) = *(pickBeg + pick_offset); for (int i = 1; i <= inhibitor_Dev; ++i) { *(imgRecons_Dev + tidx*zdepth + bidx) *= *(pickBeg + pick_offset); *(imgRecons_Dev + tidx*zdepth + bidx) /= *(pickBeg + lenMax); } } *(imgRecons_Dev + tidx*zdepth + bidx) = sqrt(*(imgRecons_Dev + tidx*zdepth + bidx)); __syncthreads(); } __host__ void parecon(int cudadeviceindex, int zdepth, int zstart, float *imgRecons) { // use command-line specified CUDA device, otherwise use device with highest Gflops/s checkCudaErrors(hipSetDevice(cudadeviceindex)); // setup execution parameters dim3 grids(numZ_Host, 1, 1); dim3 threads(numX_Host, 1, 1); // execute the kernel // calcualte pixels which are posistioned at the same depth at the same time // so that the threads may spend similar time completing calculation PArecon << < grids, threads >> >(data_Dev, imgRecons_Dev, dataPick, krev, w_real, w_imag, y_real, y_imag, zdepth, zstart); // check if kernel execution generated and error getLastCudaError("Kernel execution failed"); // copy result from device to host checkCudaErrors(hipMemcpy(imgRecons, imgRecons_Dev, numX_Host*zdepth*sizeof(float), hipMemcpyDeviceToHost)); // check if kernel execution generated and error getLastCudaError("Kernel execution failed"); } void MultiDASEnv(int *paraInt, float *paraFloat, float *data, float *imgRecons, int MAXZ_host, int MAXX_host) { int devID = 0; initcudas(paraInt, paraFloat, data, MAXZ_host, MAXX_host); parecon(devID, MAXZ_host, 0, imgRecons); clearcudas(); }
f4d61bbb2120874f0f068fc412a05ddf8b3995e5.cu
// includes, system #include "stdafx.h" #include "mex.h" #include "MultiDASEnv.h" #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes CUDA #include <cuda_runtime.h> // includes, project #include <device_functions.h> #include <device_launch_parameters.h> #include <helper_cuda.h> #include <helper_functions.h> // helper functions for SDK examples //define parameter #define pi 3.1415 #define MAXCUDADEVICES 1 #define threadNum 246 __device__ int numZ_Dev, numX_Dev, datalength_Dev, transNum_Dev, pickLength_Dev, lgLength_Dev, inhibitor_Dev, windowSize_Dev; __device__ float pitch_Dev, pSize_Dev, sampFreq_Dev, acoustVel_Dev, angleAperture_Dev, delayCoef_Dev; int numZ_Host, numX_Host, datalength_Host, transNum_Host, pickLength_Host, lgLength_Host, inhibitor_Host, windowSize_Host; float pitch_Host, pSize_Host, sampFreq_Host, acoustVel_Host, angleAperture_Host, delayCoef_Host; float *data_Dev, *imgRecons_Dev, *dataPick, *y_real, *y_imag; int *krev; float *w_real, *w_imag; // paraInt : 0-transElement, 1-inhibitor, 2-lgLength, 3-dataLength, 4-windowSize // paraIntDev : 0-transElement, 1-inhibitor, 2-lgLength, 3-dataLength, 4-windowSize // paraFloat : 0-delayCoef, 1-acoustVel, 2-pitch, 3-pixelSize, 4-samFreq, 5-angleApertureTan // paraFloatDev : 0-delayCoef, 1-acoustVel, 2-pitch, 3-pixelSize, 4-samFreq, 5-angleApertureTan extern "C" void initcudas(int *paraInt, float *paraFloat, float *data, int MAXZ_host, int MAXX_host) { // imgSize_Host numZ_Host = MAXZ_host; numX_Host = MAXX_host; // paraInt_Host transNum_Host = *paraInt; inhibitor_Host = *(paraInt + 1); lgLength_Host = *(paraInt + 2); datalength_Host = *(paraInt + 3); windowSize_Host = *(paraInt + 4); pickLength_Host = 1; pickLength_Host <<= lgLength_Host; // paraFloat_Host delayCoef_Host = *paraFloat; acoustVel_Host = *(paraFloat + 1); pitch_Host = *(paraFloat + 2); pSize_Host = *(paraFloat + 3); sampFreq_Host = *(paraFloat + 4); angleAperture_Host = *(paraFloat + 5); // imgSize_Dev checkCudaErrors(cudaSetDevice(0)); checkCudaErrors(cudaMemcpyToSymbol(numZ_Dev, &numZ_Host, sizeof(int))); checkCudaErrors(cudaMemcpyToSymbol(numX_Dev, &numX_Host, sizeof(int))); // paraInt_Dev checkCudaErrors(cudaMemcpyToSymbol(transNum_Dev, &transNum_Host, sizeof(int))); checkCudaErrors(cudaMemcpyToSymbol(inhibitor_Dev, &inhibitor_Host, sizeof(int))); checkCudaErrors(cudaMemcpyToSymbol(lgLength_Dev, &lgLength_Host, sizeof(int))); checkCudaErrors(cudaMemcpyToSymbol(datalength_Dev, &datalength_Host, sizeof(int))); checkCudaErrors(cudaMemcpyToSymbol(windowSize_Dev, &windowSize_Host, sizeof(int))); checkCudaErrors(cudaMemcpyToSymbol(pickLength_Dev, &pickLength_Host, sizeof(int))); // paraFloat_Dev checkCudaErrors(cudaMemcpyToSymbol(delayCoef_Dev, &delayCoef_Host, sizeof(float))); checkCudaErrors(cudaMemcpyToSymbol(acoustVel_Dev, &acoustVel_Host, sizeof(float))); checkCudaErrors(cudaMemcpyToSymbol(pitch_Dev, &pitch_Host, sizeof(float))); checkCudaErrors(cudaMemcpyToSymbol(pSize_Dev, &pSize_Host, sizeof(float))); checkCudaErrors(cudaMemcpyToSymbol(sampFreq_Dev, &sampFreq_Host, sizeof(float))); checkCudaErrors(cudaMemcpyToSymbol(angleAperture_Dev, &angleAperture_Host, sizeof(float))); // float*_Dev malloc checkCudaErrors(cudaMalloc((void**)&(data_Dev), transNum_Host*datalength_Host*sizeof(float))); checkCudaErrors(cudaMalloc((void**)&(imgRecons_Dev), numZ_Host*numX_Host*sizeof(float))); checkCudaErrors(cudaMalloc((void**)&(dataPick), pickLength_Host*threadNum*numX_Host*sizeof(float))); checkCudaErrors(cudaMalloc((void**)&(y_real), pickLength_Host*threadNum*numX_Host*sizeof(float))); checkCudaErrors(cudaMalloc((void**)&(y_imag), pickLength_Host*threadNum*numX_Host*sizeof(float))); checkCudaErrors(cudaMalloc((void**)&(w_real), (pickLength_Host - 1)*sizeof(float))); checkCudaErrors(cudaMalloc((void**)&(w_imag), (pickLength_Host - 1)*sizeof(float))); // int*_Dev malloc checkCudaErrors(cudaMalloc((void**)&(krev), pickLength_Host*sizeof(int))); //calculate parameter of fft int *krev_Host = (int *)malloc(pickLength_Host*sizeof(int)); for (int k = 0; k < pickLength_Host; ++k) { int r = k; *(krev_Host + k) = (r & 0x1); for (int j = 1; j < lgLength_Host; ++j) { *(krev_Host + k) = (*(krev_Host + k)) << 1; r = r >> 1; if (r & 0x1) ++(*(krev_Host + k)); } } checkCudaErrors(cudaMemcpy(krev, krev_Host, pickLength_Host*sizeof(int), cudaMemcpyHostToDevice)); free(krev_Host); float *wreal_Host = (float *)malloc((pickLength_Host - 1)*sizeof(float)), *wimag_Host = (float *)malloc((pickLength_Host - 1)*sizeof(float)); int m = 1; float wm_real, wm_imag, w_realRec, w_imagRec, *wreal_now = wreal_Host, *wimag_now = wimag_Host; for (int s = 1; s <= lgLength_Host; ++s) { m *= 2; wm_real = cos(2 * pi * 1 / m); wm_imag = -sin(2 * pi * 1 / m); w_realRec = 1; w_imagRec = 0; for (int j = 0; j < (m / 2); ++j) { //w = w * wm = t * wm; *(wreal_now + j) = w_realRec; *(wimag_now + j) = w_imagRec; w_realRec = *(wreal_now + j)*wm_real - *(wimag_now + j)*wm_imag; w_imagRec = *(wreal_now + j)*wm_imag + *(wimag_now + j)*wm_real; } wreal_now += m / 2; wimag_now += m / 2; } checkCudaErrors(cudaMemcpy(w_real, wreal_Host, (pickLength_Host - 1)*sizeof(float), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(w_imag, wimag_Host, (pickLength_Host - 1)*sizeof(float), cudaMemcpyHostToDevice)); free(wreal_Host); free(wimag_Host); // copy host data to device checkCudaErrors(cudaMemcpy(data_Dev, data, transNum_Host*datalength_Host*sizeof(float), cudaMemcpyHostToDevice)); } extern "C" void clearcudas() { checkCudaErrors(cudaFree(data_Dev)); checkCudaErrors(cudaFree(imgRecons_Dev)); checkCudaErrors(cudaFree(dataPick)); checkCudaErrors(cudaFree(y_real)); checkCudaErrors(cudaFree(y_imag)); checkCudaErrors(cudaFree(w_real)); checkCudaErrors(cudaFree(w_imag)); checkCudaErrors(cudaFree(krev)); } __device__ void getEvelope(int *krev, float *w_real, float *w_imag, float *x, float *y_real, float *y_imag) { // 2_DFT float *px = x; for (int k = 0; k < pickLength_Dev; ++k) { *(y_real + *(krev + k)) = *px; *(y_imag + *(krev + k)) = 0; ++px; } int m = 1; float t_real, t_imag, u_real, u_imag, *wreal_now = w_real, *wimag_now = w_imag; for (int s = 1; s <= lgLength_Dev; ++s) { m *= 2; for (int k = 0; k < pickLength_Dev; k += m) { for (int j = 0; j < (m / 2); ++j) { //t = w * (*(y+k+j+m/2)) t_real = *(wreal_now + j)*(*(y_real + k + j + m / 2)) - *(wimag_now + j)*(*(y_imag + k + j + m / 2)); t_imag = *(wreal_now + j)*(*(y_imag + k + j + m / 2)) + *(wimag_now + j)*(*(y_real + k + j + m / 2)); u_real = *(y_real + k + j); u_imag = *(y_imag + k + j); *(y_real + k + j) = u_real + t_real; *(y_imag + k + j) = u_imag + t_imag; *(y_real + k + j + m / 2) = u_real - t_real; *(y_imag + k + j + m / 2) = u_imag - t_imag; } } wreal_now += m / 2; wimag_now += m / 2; } // HilbertTran int count = 0; for (count = 1; count < pickLength_Dev / 2; ++count) //pickLength must be even { (*(y_real + count)) *= 2; (*(y_imag + count)) *= 2; } for (count += 1; count < pickLength_Dev; ++count) { (*(y_real + count)) *= 0; (*(y_imag + count)) *= 0; } for (int k = 0; k < pickLength_Dev; ++k) { count = *(krev + k); if (count == k) { *(y_imag + k) = -(*(y_imag + k)); } else if (k < count) { t_real = *(y_real + k); t_imag = *(y_imag + k); *(y_real + k) = *(y_real + count); *(y_imag + k) = -(*(y_imag + count)); *(y_real + count) = t_real; *(y_imag + count) = -t_imag; } } m = 1; wreal_now = w_real; wimag_now = w_imag; for (int s = 1; s <= lgLength_Dev; ++s) { m *= 2; for (int k = 0; k < pickLength_Dev; k += m) { for (int j = 0; j < (m / 2); ++j) { //t = w * (*(y+k+j+m/2)) t_real = *(wreal_now + j)*(*(y_real + k + j + m / 2)) - *(wimag_now + j)*(*(y_imag + k + j + m / 2)); t_imag = *(wreal_now + j)*(*(y_imag + k + j + m / 2)) + *(wimag_now + j)*(*(y_real + k + j + m / 2)); u_real = *(y_real + k + j); u_imag = *(y_imag + k + j); *(y_real + k + j) = u_real + t_real; *(y_imag + k + j) = u_imag + t_imag; *(y_real + k + j + m / 2) = u_real - t_real; *(y_imag + k + j + m / 2) = u_imag - t_imag; } } wreal_now += m / 2; wimag_now += m / 2; } int div_len = pickLength_Dev*pickLength_Dev; for (int i = 0; i < pickLength_Dev; ++i) { *(x + i) = (*(y_real + i))*(*(y_real + i)) + (*(y_imag + i))*(*(y_imag + i)); *(x + i) /= div_len; } } __global__ void PArecon(float *data_Dev, float *imgRecons_Dev, float *dataPick, int *krev, float *w_real, float *w_imag, float *y_real, float *y_imag, int zdepth, int zstart) { // access thread id const unsigned int tidx = threadIdx.x; // access block id const unsigned int bidx = blockIdx.x; if (bidx < zstart) { return; } float Distan; float Y, Z, y; int POINTER, pointer = pickLength_Dev*((bidx % threadNum)*numX_Dev + tidx); float *pickBeg = dataPick + pointer; int pick_offset = pickLength_Dev / 2; Z = bidx * pSize_Dev; Y = tidx * pSize_Dev; int y_start = (int)((Y - Z*angleAperture_Dev) / pitch_Dev - 0.5); if (y_start < 0) { y_start = 0; } int y_end = (int)((Y + Z*angleAperture_Dev) / pitch_Dev - 0.5); if (y_end > transNum_Dev - 1) { y_end = transNum_Dev - 1; } for (int len = 0; len < pickLength_Dev; ++len) { *(pickBeg + len) = 0; } int lenMax; for (int bidy = y_start; bidy <= y_end; ++bidy) { y = (bidy + 0.5) * pitch_Dev; Distan = sqrt((Y - y)*(Y - y) + Z*Z); POINTER = (int)((Distan / acoustVel_Dev - delayCoef_Dev)*sampFreq_Dev + 0.5) - pick_offset; lenMax = pickLength_Dev; if (POINTER + lenMax >= datalength_Dev){ lenMax = datalength_Dev - 1 - POINTER; } if (POINTER >= 0 && POINTER < datalength_Dev) { POINTER = POINTER + bidy*datalength_Dev; for (int len = 0; len < lenMax; ++len) { *(pickBeg + len) += *(data_Dev + POINTER + len); } } } getEvelope(krev, w_real, w_imag, pickBeg, y_real + pointer, y_imag + pointer); lenMax = pick_offset; for (int len = pick_offset - windowSize_Dev + 1; len < pick_offset + windowSize_Dev; ++len) { if (len >= 0 && len < pickLength_Dev && *(pickBeg + len) > *(pickBeg + lenMax)) { lenMax = len; } } if (*(pickBeg + lenMax) > 0) { *(imgRecons_Dev + tidx*zdepth + bidx) = *(pickBeg + pick_offset); for (int i = 1; i <= inhibitor_Dev; ++i) { *(imgRecons_Dev + tidx*zdepth + bidx) *= *(pickBeg + pick_offset); *(imgRecons_Dev + tidx*zdepth + bidx) /= *(pickBeg + lenMax); } } *(imgRecons_Dev + tidx*zdepth + bidx) = sqrt(*(imgRecons_Dev + tidx*zdepth + bidx)); __syncthreads(); } __host__ void parecon(int cudadeviceindex, int zdepth, int zstart, float *imgRecons) { // use command-line specified CUDA device, otherwise use device with highest Gflops/s checkCudaErrors(cudaSetDevice(cudadeviceindex)); // setup execution parameters dim3 grids(numZ_Host, 1, 1); dim3 threads(numX_Host, 1, 1); // execute the kernel // calcualte pixels which are posistioned at the same depth at the same time // so that the threads may spend similar time completing calculation PArecon << < grids, threads >> >(data_Dev, imgRecons_Dev, dataPick, krev, w_real, w_imag, y_real, y_imag, zdepth, zstart); // check if kernel execution generated and error getLastCudaError("Kernel execution failed"); // copy result from device to host checkCudaErrors(cudaMemcpy(imgRecons, imgRecons_Dev, numX_Host*zdepth*sizeof(float), cudaMemcpyDeviceToHost)); // check if kernel execution generated and error getLastCudaError("Kernel execution failed"); } void MultiDASEnv(int *paraInt, float *paraFloat, float *data, float *imgRecons, int MAXZ_host, int MAXX_host) { int devID = 0; initcudas(paraInt, paraFloat, data, MAXZ_host, MAXX_host); parecon(devID, MAXZ_host, 0, imgRecons); clearcudas(); }
721dbcfffe48761dc2434abaa8badc70eff83986.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void hello_from_gpu(void) { printf("Hello World from the GPU!\n"); }
721dbcfffe48761dc2434abaa8badc70eff83986.cu
#include "includes.h" __global__ void hello_from_gpu(void) { printf("Hello World from the GPU!\n"); }
cf934c9fcd1519fd0c8a21e7ffe696c069a55edf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2020, American University of Beirut // See LICENSE.txt for copyright license #include "graph.h" #include "kernel.h" #include "timer.h" namespace Device { namespace Undirected { namespace TiledCOOCSRInput { template < unsigned int CHECK_IF_DELETED, unsigned int RECOUNT_ALL_EDGES > __global__ void count_triangles_kernel(TiledCOOCSRGraph* g, unsigned int k, Info info) { unsigned int e = blockIdx.x*blockDim.x + threadIdx.x; if(e < g->numEdges) { if(RECOUNT_ALL_EDGES || info.edgeAffected[e] == DIRECTLY_AFFECTED) { unsigned int* srcIdx = g->srcIdx; unsigned int* dstIdx = g->dstIdx; unsigned int* tileSrcPtr = g->tileSrcPtr; unsigned int dst = dstIdx[e]; if(!CHECK_IF_DELETED || dst != DELETED) { unsigned int tileSize = g->tileSize; unsigned int tilesPerDim = g->tilesPerDim; unsigned int src1 = srcIdx[e]; unsigned int src2 = dst; unsigned int src1Tile = src1/tileSize; unsigned int src2Tile = src2/tileSize; unsigned int numTriangles_e = 0; for(unsigned int xTile = blockIdx.y; xTile < tilesPerDim && numTriangles_e < k - 2; xTile += gridDim.y) { unsigned int tileSrc1 = (src1Tile*tilesPerDim + xTile)*tileSize + src1%tileSize; unsigned int tileSrc2 = (src2Tile*tilesPerDim + xTile)*tileSize + src2%tileSize; unsigned int e1 = tileSrcPtr[tileSrc1]; unsigned int e2 = tileSrcPtr[tileSrc2]; unsigned int end1 = tileSrcPtr[tileSrc1 + 1]; unsigned int end2 = tileSrcPtr[tileSrc2 + 1]; while(e1 < end1 && e2 < end2 && numTriangles_e < k - 2) { unsigned int dst1 = dstIdx[e1]; if(CHECK_IF_DELETED && dst1 == DELETED) { ++e1; } else { unsigned int dst2 = dstIdx[e2]; if(CHECK_IF_DELETED && dst2 == DELETED) { ++e2; } else { if(dst1 < dst2) { ++e1; } else if(dst1 > dst2) { ++e2; } else { // dst1 == dst2 ++e1; ++e2; ++numTriangles_e; } } } } } if(gridDim.y == 1) { info.numTriangles[e] = numTriangles_e; } else { atomicAdd(&info.numTriangles[e], numTriangles_e); } } } } } template < unsigned int CHECK_IF_DELETED, unsigned int RECOUNT_ALL_EDGES > __global__ void mark_deleted_edges_kernel(TiledCOOCSRGraph* g, unsigned int k, Info info) { unsigned int e = blockIdx.x*blockDim.x + threadIdx.x; if(e < g->numEdges) { if(RECOUNT_ALL_EDGES || info.edgeAffected[e] == DIRECTLY_AFFECTED) { unsigned int* dstIdx = g->dstIdx; unsigned int dst = dstIdx[e]; if(!CHECK_IF_DELETED || dst != DELETED) { if(info.numTriangles[e] < k - 2) { dstIdx[e] = DELETED; *info.changed = 1; if(!RECOUNT_ALL_EDGES) { // If only affected edges are going to be recounted, mark which nodes are directly affected unsigned int src = g->srcIdx[e]; info.nodeAffected[src] = DIRECTLY_AFFECTED; info.nodeAffected[dst] = DIRECTLY_AFFECTED; } } } } } } template < unsigned int CHECK_IF_DELETED > __global__ void mark_directly_affected_edges_kernel(TiledCOOCSRGraph* g, Info info) { unsigned int e = blockIdx.x*blockDim.x + threadIdx.x; if(e < g->numEdges) { unsigned int edgeAffected_e = NOT_AFFECTED; unsigned int dst = g->dstIdx[e]; if(!CHECK_IF_DELETED || dst != DELETED) { unsigned int src = g->srcIdx[e]; if(info.nodeAffected[src] == DIRECTLY_AFFECTED || info.nodeAffected[dst] == DIRECTLY_AFFECTED) { edgeAffected_e = DIRECTLY_AFFECTED; } } info.edgeAffected[e] = edgeAffected_e; } } void ktruss(TiledCOOCSRGraph* g_d, Info info, Config config) { unsigned int k = config.k; unsigned int numEdges = config.numEdges; unsigned int iter = 0; unsigned int graphHasDeletedEdges = 0; unsigned int changed; initInfoOnDevice(info, config); do { if(config.verbosity >= 2) printf(" Iteration %u\n", iter); clearIterInfoOnDevice(info, config); // Count triangles Timer iterTimer = initTimer(config.verbosity >= 2); startTimer(&iterTimer); unsigned int numThreadsPerBlock = config.blockSize; unsigned int numBlocks = (numEdges + numThreadsPerBlock - 1)/numThreadsPerBlock; if(graphHasDeletedEdges) { if(config.recount == ALL) { hipLaunchKernelGGL(( count_triangles_kernel<1,1>) , dim3(dim3(numBlocks, config.numParallelTiles)), dim3(numThreadsPerBlock) , 0, 0, g_d, k, info); } else { // config.recount == AFFECTED hipLaunchKernelGGL(( count_triangles_kernel<1,0>) , dim3(dim3(numBlocks, config.numParallelTiles)), dim3(numThreadsPerBlock) , 0, 0, g_d, k, info); } } else { if(config.recount == ALL) { hipLaunchKernelGGL(( count_triangles_kernel<0,1>) , dim3(dim3(numBlocks, config.numParallelTiles)), dim3(numThreadsPerBlock) , 0, 0, g_d, k, info); } else { // config.recount == AFFECTED hipLaunchKernelGGL(( count_triangles_kernel<0,0>) , dim3(dim3(numBlocks, config.numParallelTiles)), dim3(numThreadsPerBlock) , 0, 0, g_d, k, info); } } syncStopAndPrintElapsed(&iterTimer, " Triangle counting time"); // Mark deleted edges startTimer(&iterTimer); if(graphHasDeletedEdges) { if(config.recount == ALL) { hipLaunchKernelGGL(( mark_deleted_edges_kernel<1,1>) , dim3(numBlocks), dim3(numThreadsPerBlock) , 0, 0, g_d, k, info); } else { // config.recount == AFFECTED hipLaunchKernelGGL(( mark_deleted_edges_kernel<1,0>) , dim3(numBlocks), dim3(numThreadsPerBlock) , 0, 0, g_d, k, info); } } else { if(config.recount == ALL) { hipLaunchKernelGGL(( mark_deleted_edges_kernel<0,1>) , dim3(numBlocks), dim3(numThreadsPerBlock) , 0, 0, g_d, k, info); } else { // config.recount == AFFECTED hipLaunchKernelGGL(( mark_deleted_edges_kernel<0,0>) , dim3(numBlocks), dim3(numThreadsPerBlock) , 0, 0, g_d, k, info); } } syncStopAndPrintElapsed(&iterTimer, " Mark deleted edges"); // Check if the graph changed hipMemcpy(&changed, info.changed, sizeof(unsigned int), hipMemcpyDeviceToHost); hipDeviceSynchronize(); if(changed) { // Remove deleted edges if(iter < config.numEdgeRemoveIter) { startTimer(&iterTimer); removeTiledCOOCSRDeletedEdgesOnDevice(g_d); graphHasDeletedEdges = 0; hipMemcpy(&numEdges, &g_d->numEdges, sizeof(unsigned int), hipMemcpyDeviceToHost); syncStopAndPrintElapsed(&iterTimer, " Remove deleted edges"); if(config.verbosity >= 2) printf(" # edges remaining = %u\n", numEdges); } else { graphHasDeletedEdges = 1; } // If k=3, no need to recount if(k == 3) { break; } // Mark affected edges if(config.recount == AFFECTED) { startTimer(&iterTimer); if(graphHasDeletedEdges) { hipLaunchKernelGGL(( mark_directly_affected_edges_kernel<1>) , dim3((numEdges + 1024 - 1)/1024), dim3(1024) , 0, 0, g_d, info); } else { hipLaunchKernelGGL(( mark_directly_affected_edges_kernel<0>) , dim3((numEdges + 1024 - 1)/1024), dim3(1024) , 0, 0, g_d, info); } syncStopAndPrintElapsed(&iterTimer, " Mark affected edges"); } } ++iter; } while(changed); // Remove deleted edges if(graphHasDeletedEdges) { Timer timer = initTimer(config.verbosity >= 2); startTimer(&timer); removeTiledCOOCSRDeletedEdgesOnDevice(g_d); syncStopAndPrintElapsed(&timer, " Remove deleted edges"); } } void ktruss(COOGraph* graph_d, COOGraph* truss_d, Info info, Config config) { Timer timer = initTimer(config.verbosity >= 1); // Convert COO to tiled COOCSR startTimer(&timer); TiledCOOCSRGraph* g_d = createEmptyTiledCOOCSROnDevice(config.numNodes, config.numTiles, config.numEdges); coo2tiledcoocsrOnDevice(graph_d, g_d); syncStopAndPrintElapsed(&timer, " Convert undirected COO to tiled COOCSR (not optimized)"); // Runs for(unsigned int i = 0; i < config.numWarmupRuns + config.numTimedRuns; ++i) { // K-truss printAndStart(&timer, " Performing K-truss\n"); Timer ktrussTimer = initTimer(config.verbosity == 0 && i >= config.numWarmupRuns); startTimer(&ktrussTimer); ktruss(g_d, info, config); if(config.verbosity == 0 && i >= config.numWarmupRuns) printConfigAsCSV(config); syncStopAndPrintElapsed(&ktrussTimer); syncStopAndPrintElapsed(&timer, " Total K-truss time", GREEN); // Restore graph if(i < config.numWarmupRuns + config.numTimedRuns - 1) { coo2tiledcoocsrOnDevice(graph_d, g_d); hipDeviceSynchronize(); } } // Convert tiled COOCSR to COO startTimer(&timer); tiledcoocsr2cooOnDevice(g_d, truss_d); syncStopAndPrintElapsed(&timer, " Convert tiled COOCSR to COO"); // Deallocate tiled COOCSR startTimer(&timer); freeTiledCOOCSRGraphOnDevice(g_d); syncStopAndPrintElapsed(&timer, " Deallocate tiled COOCSR"); } } } } // end namespace
cf934c9fcd1519fd0c8a21e7ffe696c069a55edf.cu
// Copyright (c) 2020, American University of Beirut // See LICENSE.txt for copyright license #include "graph.h" #include "kernel.h" #include "timer.h" namespace Device { namespace Undirected { namespace TiledCOOCSRInput { template < unsigned int CHECK_IF_DELETED, unsigned int RECOUNT_ALL_EDGES > __global__ void count_triangles_kernel(TiledCOOCSRGraph* g, unsigned int k, Info info) { unsigned int e = blockIdx.x*blockDim.x + threadIdx.x; if(e < g->numEdges) { if(RECOUNT_ALL_EDGES || info.edgeAffected[e] == DIRECTLY_AFFECTED) { unsigned int* srcIdx = g->srcIdx; unsigned int* dstIdx = g->dstIdx; unsigned int* tileSrcPtr = g->tileSrcPtr; unsigned int dst = dstIdx[e]; if(!CHECK_IF_DELETED || dst != DELETED) { unsigned int tileSize = g->tileSize; unsigned int tilesPerDim = g->tilesPerDim; unsigned int src1 = srcIdx[e]; unsigned int src2 = dst; unsigned int src1Tile = src1/tileSize; unsigned int src2Tile = src2/tileSize; unsigned int numTriangles_e = 0; for(unsigned int xTile = blockIdx.y; xTile < tilesPerDim && numTriangles_e < k - 2; xTile += gridDim.y) { unsigned int tileSrc1 = (src1Tile*tilesPerDim + xTile)*tileSize + src1%tileSize; unsigned int tileSrc2 = (src2Tile*tilesPerDim + xTile)*tileSize + src2%tileSize; unsigned int e1 = tileSrcPtr[tileSrc1]; unsigned int e2 = tileSrcPtr[tileSrc2]; unsigned int end1 = tileSrcPtr[tileSrc1 + 1]; unsigned int end2 = tileSrcPtr[tileSrc2 + 1]; while(e1 < end1 && e2 < end2 && numTriangles_e < k - 2) { unsigned int dst1 = dstIdx[e1]; if(CHECK_IF_DELETED && dst1 == DELETED) { ++e1; } else { unsigned int dst2 = dstIdx[e2]; if(CHECK_IF_DELETED && dst2 == DELETED) { ++e2; } else { if(dst1 < dst2) { ++e1; } else if(dst1 > dst2) { ++e2; } else { // dst1 == dst2 ++e1; ++e2; ++numTriangles_e; } } } } } if(gridDim.y == 1) { info.numTriangles[e] = numTriangles_e; } else { atomicAdd(&info.numTriangles[e], numTriangles_e); } } } } } template < unsigned int CHECK_IF_DELETED, unsigned int RECOUNT_ALL_EDGES > __global__ void mark_deleted_edges_kernel(TiledCOOCSRGraph* g, unsigned int k, Info info) { unsigned int e = blockIdx.x*blockDim.x + threadIdx.x; if(e < g->numEdges) { if(RECOUNT_ALL_EDGES || info.edgeAffected[e] == DIRECTLY_AFFECTED) { unsigned int* dstIdx = g->dstIdx; unsigned int dst = dstIdx[e]; if(!CHECK_IF_DELETED || dst != DELETED) { if(info.numTriangles[e] < k - 2) { dstIdx[e] = DELETED; *info.changed = 1; if(!RECOUNT_ALL_EDGES) { // If only affected edges are going to be recounted, mark which nodes are directly affected unsigned int src = g->srcIdx[e]; info.nodeAffected[src] = DIRECTLY_AFFECTED; info.nodeAffected[dst] = DIRECTLY_AFFECTED; } } } } } } template < unsigned int CHECK_IF_DELETED > __global__ void mark_directly_affected_edges_kernel(TiledCOOCSRGraph* g, Info info) { unsigned int e = blockIdx.x*blockDim.x + threadIdx.x; if(e < g->numEdges) { unsigned int edgeAffected_e = NOT_AFFECTED; unsigned int dst = g->dstIdx[e]; if(!CHECK_IF_DELETED || dst != DELETED) { unsigned int src = g->srcIdx[e]; if(info.nodeAffected[src] == DIRECTLY_AFFECTED || info.nodeAffected[dst] == DIRECTLY_AFFECTED) { edgeAffected_e = DIRECTLY_AFFECTED; } } info.edgeAffected[e] = edgeAffected_e; } } void ktruss(TiledCOOCSRGraph* g_d, Info info, Config config) { unsigned int k = config.k; unsigned int numEdges = config.numEdges; unsigned int iter = 0; unsigned int graphHasDeletedEdges = 0; unsigned int changed; initInfoOnDevice(info, config); do { if(config.verbosity >= 2) printf(" Iteration %u\n", iter); clearIterInfoOnDevice(info, config); // Count triangles Timer iterTimer = initTimer(config.verbosity >= 2); startTimer(&iterTimer); unsigned int numThreadsPerBlock = config.blockSize; unsigned int numBlocks = (numEdges + numThreadsPerBlock - 1)/numThreadsPerBlock; if(graphHasDeletedEdges) { if(config.recount == ALL) { count_triangles_kernel<1,1> <<< dim3(numBlocks, config.numParallelTiles), numThreadsPerBlock >>> (g_d, k, info); } else { // config.recount == AFFECTED count_triangles_kernel<1,0> <<< dim3(numBlocks, config.numParallelTiles), numThreadsPerBlock >>> (g_d, k, info); } } else { if(config.recount == ALL) { count_triangles_kernel<0,1> <<< dim3(numBlocks, config.numParallelTiles), numThreadsPerBlock >>> (g_d, k, info); } else { // config.recount == AFFECTED count_triangles_kernel<0,0> <<< dim3(numBlocks, config.numParallelTiles), numThreadsPerBlock >>> (g_d, k, info); } } syncStopAndPrintElapsed(&iterTimer, " Triangle counting time"); // Mark deleted edges startTimer(&iterTimer); if(graphHasDeletedEdges) { if(config.recount == ALL) { mark_deleted_edges_kernel<1,1> <<< numBlocks, numThreadsPerBlock >>> (g_d, k, info); } else { // config.recount == AFFECTED mark_deleted_edges_kernel<1,0> <<< numBlocks, numThreadsPerBlock >>> (g_d, k, info); } } else { if(config.recount == ALL) { mark_deleted_edges_kernel<0,1> <<< numBlocks, numThreadsPerBlock >>> (g_d, k, info); } else { // config.recount == AFFECTED mark_deleted_edges_kernel<0,0> <<< numBlocks, numThreadsPerBlock >>> (g_d, k, info); } } syncStopAndPrintElapsed(&iterTimer, " Mark deleted edges"); // Check if the graph changed cudaMemcpy(&changed, info.changed, sizeof(unsigned int), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); if(changed) { // Remove deleted edges if(iter < config.numEdgeRemoveIter) { startTimer(&iterTimer); removeTiledCOOCSRDeletedEdgesOnDevice(g_d); graphHasDeletedEdges = 0; cudaMemcpy(&numEdges, &g_d->numEdges, sizeof(unsigned int), cudaMemcpyDeviceToHost); syncStopAndPrintElapsed(&iterTimer, " Remove deleted edges"); if(config.verbosity >= 2) printf(" # edges remaining = %u\n", numEdges); } else { graphHasDeletedEdges = 1; } // If k=3, no need to recount if(k == 3) { break; } // Mark affected edges if(config.recount == AFFECTED) { startTimer(&iterTimer); if(graphHasDeletedEdges) { mark_directly_affected_edges_kernel<1> <<< (numEdges + 1024 - 1)/1024, 1024 >>> (g_d, info); } else { mark_directly_affected_edges_kernel<0> <<< (numEdges + 1024 - 1)/1024, 1024 >>> (g_d, info); } syncStopAndPrintElapsed(&iterTimer, " Mark affected edges"); } } ++iter; } while(changed); // Remove deleted edges if(graphHasDeletedEdges) { Timer timer = initTimer(config.verbosity >= 2); startTimer(&timer); removeTiledCOOCSRDeletedEdgesOnDevice(g_d); syncStopAndPrintElapsed(&timer, " Remove deleted edges"); } } void ktruss(COOGraph* graph_d, COOGraph* truss_d, Info info, Config config) { Timer timer = initTimer(config.verbosity >= 1); // Convert COO to tiled COOCSR startTimer(&timer); TiledCOOCSRGraph* g_d = createEmptyTiledCOOCSROnDevice(config.numNodes, config.numTiles, config.numEdges); coo2tiledcoocsrOnDevice(graph_d, g_d); syncStopAndPrintElapsed(&timer, " Convert undirected COO to tiled COOCSR (not optimized)"); // Runs for(unsigned int i = 0; i < config.numWarmupRuns + config.numTimedRuns; ++i) { // K-truss printAndStart(&timer, " Performing K-truss\n"); Timer ktrussTimer = initTimer(config.verbosity == 0 && i >= config.numWarmupRuns); startTimer(&ktrussTimer); ktruss(g_d, info, config); if(config.verbosity == 0 && i >= config.numWarmupRuns) printConfigAsCSV(config); syncStopAndPrintElapsed(&ktrussTimer); syncStopAndPrintElapsed(&timer, " Total K-truss time", GREEN); // Restore graph if(i < config.numWarmupRuns + config.numTimedRuns - 1) { coo2tiledcoocsrOnDevice(graph_d, g_d); cudaDeviceSynchronize(); } } // Convert tiled COOCSR to COO startTimer(&timer); tiledcoocsr2cooOnDevice(g_d, truss_d); syncStopAndPrintElapsed(&timer, " Convert tiled COOCSR to COO"); // Deallocate tiled COOCSR startTimer(&timer); freeTiledCOOCSRGraphOnDevice(g_d); syncStopAndPrintElapsed(&timer, " Deallocate tiled COOCSR"); } } } } // end namespace
4bfe9ff37e8be85b26d2079831b9f9d7ed3de7ab.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/HIPContext.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <c10/util/Exception.h> #include <THH/THHAtomics.cuh> #include <THH/THHGeneral.h> #include <THH/THHNumerics.cuh> #include <THH/THHDeviceUtils.cuh> #include <algorithm> #include <cfloat> #include <cmath> namespace at { namespace native { __host__ __device__ __forceinline__ int imin(int a, int b) { return a > b ? b : a; } __host__ __device__ __forceinline__ int imax(int a, int b) { return a > b ? a : b; } namespace { template <typename scalar_t> __global__ void replication_pad_forward_kernel1d( PackedTensorAccessor64<scalar_t, 3> input, PackedTensorAccessor64<scalar_t, 3> output, int padL, int padR) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; if (outputPointId >= output.size(2)) { return; } int outputPointX = outputPointId % output.size(2); int iStartX = imax(0, -padL); int oStartX = imax(0, padL); int inputPointX = imin(imax(padL, outputPointX), input.size(2) + padL - 1) - oStartX + iStartX; scalar_t valueToCopy = input[batch][plane][inputPointX]; output[batch][plane][outputPointX] = valueToCopy; } template <typename scalar_t> __global__ void replication_pad_backward_kernel( PackedTensorAccessor64<scalar_t, 3> gradInput, PackedTensorAccessor64<scalar_t, 3> gradOutput, int padL, int padR) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; if (outputPointId >= gradOutput.size(2)) { return; } int outputPointX = outputPointId % gradOutput.size(2); int iStartX = imax(0, -padL); int oStartX = imax(0, padL); int inputPointX = imin(imax(padL, outputPointX), gradInput.size(2) + padL - 1) - oStartX + iStartX; scalar_t valueToCopy = gradOutput[batch][plane][outputPointX]; gpuAtomicAdd(&gradInput[batch][plane][inputPointX], valueToCopy); } template <typename scalar_t> __global__ void replication_pad_forward_kernel2d( PackedTensorAccessor64<scalar_t, 4> input, PackedTensorAccessor64<scalar_t, 4> output, int padT, int padB, int padL, int padR) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; if (outputPointId >= output.size(2) * output.size(3)) { return; } int outputPointX = outputPointId % output.size(3); int outputPointY = outputPointId / output.size(3); int iStartX = imax(0, -padL); int iStartY = imax(0, -padT); int oStartX = imax(0, padL); int oStartY = imax(0, padT); int inputPointX = imin(imax(padL, outputPointX), input.size(3) + padL - 1) - oStartX + iStartX; int inputPointY = imin(imax(padT, outputPointY), input.size(2) + padT - 1) - oStartY + iStartY; scalar_t valueToCopy = input[batch][plane][inputPointY][inputPointX]; output[batch][plane][outputPointY][outputPointX] = valueToCopy; } template <typename scalar_t> __global__ void replication_pad_backward_kernel( PackedTensorAccessor64<scalar_t, 4> gradInput, PackedTensorAccessor64<scalar_t, 4> gradOutput, int padT, int padB, int padL, int padR) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; if (outputPointId >= gradOutput.size(2) * gradOutput.size(3)) { return; } int outputPointX = outputPointId % gradOutput.size(3); int outputPointY = outputPointId / gradOutput.size(3); int iStartX = imax(0, -padL); int iStartY = imax(0, -padT); int oStartX = imax(0, padL); int oStartY = imax(0, padT); int inputPointX = imin(imax(padL, outputPointX), gradInput.size(3) + padL - 1) - oStartX + iStartX; int inputPointY = imin(imax(padT, outputPointY), gradInput.size(2) + padT - 1) - oStartY + iStartY; scalar_t valueToCopy = gradOutput[batch][plane][outputPointY][outputPointX]; gpuAtomicAdd(&gradInput[batch][plane][inputPointY][inputPointX], valueToCopy); } template <typename scalar_t> __global__ void replication_pad_forward_kernel3d( PackedTensorAccessor64<scalar_t, 5> input, PackedTensorAccessor64<scalar_t, 5> output, int pfront, int pback, int ptop, int pbottom, int pleft, int pright) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; if (outputPointId >= (output.size(2) * output.size(3) * output.size(4))) { return; } int outputPointX = outputPointId % output.size(4); int outputPointY = (outputPointId / output.size(4)) % output.size(3); int outputPointZ = outputPointId / (output.size(3) * output.size(4)); int iStartX = imax(0, -pleft); int iStartY = imax(0, -ptop); int iStartZ = imax(0, -pfront); int oStartX = imax(0, pleft); int oStartY = imax(0, ptop); int oStartZ = imax(0, pfront); int inputPointX = imin(imax(pleft, outputPointX), input.size(4) + pleft - 1) - oStartX + iStartX; int inputPointY = imin(imax(ptop, outputPointY), input.size(3) + ptop - 1) - oStartY + iStartY; int inputPointZ = imin(imax(pfront, outputPointZ), input.size(2) + pfront - 1) - oStartZ + iStartZ; scalar_t valueToCopy = input[batch][plane][inputPointZ][inputPointY][inputPointX]; output[batch][plane][outputPointZ][outputPointY][outputPointX] = valueToCopy; } template <typename scalar_t> __global__ void replication_pad_backward_kernel( PackedTensorAccessor64<scalar_t, 5> gradInput, PackedTensorAccessor64<scalar_t, 5> gradOutput, int pfront, int pback, int ptop, int pbottom, int pleft, int pright) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; if (outputPointId >= (gradOutput.size(2) * gradOutput.size(3) * gradOutput.size(4))) { return; } int outputPointX = outputPointId % gradOutput.size(4); int outputPointY = (outputPointId / gradOutput.size(4)) % gradOutput.size(3); int outputPointZ = outputPointId / (gradOutput.size(3) * gradOutput.size(4)); int iStartX = imax(0, -pleft); int iStartY = imax(0, -ptop); int iStartZ = imax(0, -pfront); int oStartX = imax(0, pleft); int oStartY = imax(0, ptop); int oStartZ = imax(0, pfront); int inputPointX = imin(imax(pleft, outputPointX), gradInput.size(4) + pleft - 1) - oStartX + iStartX; int inputPointY = imin(imax(ptop, outputPointY), gradInput.size(3) + ptop - 1) - oStartY + iStartY; int inputPointZ = imin(imax(pfront, outputPointZ), gradInput.size(2) + pfront - 1) - oStartZ + iStartZ; scalar_t valueToCopy = gradOutput[batch][plane][outputPointZ][outputPointY][outputPointX]; gpuAtomicAdd(&gradInput[batch][plane][inputPointZ][inputPointY][inputPointX], valueToCopy); } void replication_pad1d_out_cuda_template( Tensor& output, const Tensor& input, IntArrayRef paddingSize) { TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); TORCH_CHECK(paddingSize.size() == 2, "padding Size is expected to be 2"); int padL = paddingSize[0]; int padR = paddingSize[1]; int planeDim = 0; int dimw = 1; int numBatch = 1; int numInputDims = input.ndimension(); TORCH_CHECK( (numInputDims == 2 && input.size(0) != 0 && input.size(1) != 0) || (numInputDims == 3 && input.size(1) != 0 && input.size(2) != 0), "Expected 2D or 3D (batch mode) tensor with possibly 0 batch size and other non-zero dimensions for input, but got: ", input.sizes()); if (numInputDims == 3) { numBatch = input.size(0); planeDim++; dimw++; } int numPlanes = input.size(planeDim); int inputW = input.size(dimw); int outputW = inputW + padL + padR; TORCH_CHECK(outputW >= 1, "input (W: ", inputW, ")is too small." " Calculated output W: ", outputW); if (numInputDims == 2) { output.resize_({numPlanes, outputW}); } else { output.resize_({numBatch, numPlanes, outputW}); } if (input.numel() == 0) { return; } AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "replication_pad1d_cuda", [&] { if (numInputDims == 2) { auto input_ = input.unsqueeze(0); auto output_ = output.unsqueeze(0); auto devInput = input_.packed_accessor64<scalar_t, 3>(); auto devOutput = output_.packed_accessor64<scalar_t, 3>(); int outputPlaneSize = devOutput.size(2); dim3 gridSize(THCCeilDiv(outputPlaneSize, 256), devOutput.size(1), devOutput.size(0)); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); hipLaunchKernelGGL(( replication_pad_forward_kernel1d) , dim3(gridSize), dim3(blockSize), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), devInput, devOutput, padL, padR); } else { auto devInput = input.packed_accessor64<scalar_t, 3>(); auto devOutput = output.packed_accessor64<scalar_t, 3>(); int outputPlaneSize = devOutput.size(2); dim3 gridSize(THCCeilDiv(outputPlaneSize, 256), devOutput.size(1), devOutput.size(0)); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); hipLaunchKernelGGL(( replication_pad_forward_kernel1d) , dim3(gridSize), dim3(blockSize), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), devInput, devOutput, padL, padR); } } ); AT_CUDA_CHECK(hipGetLastError()); } void replication_pad1d_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize) { TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(gradOutput), "output gradient tensor must fit into 32-bit index math"); TORCH_CHECK(paddingSize.size() == 2, "padding Size is expected to be 2"); int padL = paddingSize[0]; int padR = paddingSize[1]; int planeDim = 0; int dimw = 1; int numInputDims = input.ndimension(); if (numInputDims == 3) { planeDim++; dimw++; } int iwidth = input.size(dimw); int owidth = iwidth + padL + padR; TORCH_CHECK(owidth == gradOutput.size(dimw), "gradOutput width unexpected. Expected: ", owidth, ", Got: ", gradOutput.size(dimw)); gradInput.resize_as_(input); if (gradInput.numel() == 0) { return; } gradInput.zero_(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "replication_pad1d_backward_cuda", [&] { auto gradInput_ = gradInput; auto gradOutput_ = gradOutput; if (numInputDims == 2) { gradInput_ = gradInput.unsqueeze(0); gradOutput_ = gradOutput.unsqueeze(0); } auto devGradInput = gradInput_.packed_accessor64<scalar_t, 3>(); auto devGradOutput = gradOutput_.packed_accessor64<scalar_t, 3>(); int outputPlaneSize = devGradOutput.size(2); dim3 gridSize(THCCeilDiv(outputPlaneSize, 256), devGradOutput.size(1), devGradOutput.size(0)); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); hipLaunchKernelGGL(( replication_pad_backward_kernel) , dim3(gridSize), dim3(blockSize), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), devGradInput, devGradOutput, padL, padR); } ); AT_CUDA_CHECK(hipGetLastError()); } void replication_pad2d_out_cuda_template( Tensor& output, const Tensor& input, IntArrayRef paddingSize) { TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); TORCH_CHECK(paddingSize.size() == 4, "padding Size is expected to be 4"); int padL = paddingSize[0]; int padR = paddingSize[1]; int padT = paddingSize[2]; int padB = paddingSize[3]; int planeDim = 0; int dimh = 1; int dimw = 2; int numBatch = 1; int numInputDims = input.dim(); bool valid_dims = input.size(1) != 0 && input.size(2) != 0; TORCH_CHECK( (numInputDims == 3 && input.size(0) != 0 && valid_dims) || (numInputDims == 4 && valid_dims && input.size(3) != 0), "Expected 3D or 4D (batch mode) tensor with possibly 0 batch size and other non-zero dimensions for input, but got: ", input.sizes()); if (numInputDims == 4) { numBatch = input.size(0); planeDim++; dimh++; dimw++; } int numPlanes = input.size(planeDim); int inputH = input.size(dimh); int inputW = input.size(dimw); int outputH = inputH + padT + padB; int outputW = inputW + padL + padR; TORCH_CHECK(outputW >= 1 || outputH >= 1, "input (H: ", inputH, ", W: ", inputW, ") is too small." " Calculated output H: ", outputH, " W: ", outputW); if (numInputDims == 3) { output.resize_({numPlanes, outputH, outputW}); } else { output.resize_({numBatch, numPlanes, outputH, outputW}); } if (input.numel() == 0) { return; } AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "replication_pad2d_cuda", [&] { if (numInputDims == 3) { auto input_ = input.unsqueeze(0); auto output_ = output.unsqueeze(0); auto devInput = input_.packed_accessor64<scalar_t, 4>(); auto devOutput = output_.packed_accessor64<scalar_t, 4>(); int outputPlaneSize = devOutput.size(2) * devOutput.size(3); dim3 gridSize(THCCeilDiv(outputPlaneSize, 256), devOutput.size(1), devOutput.size(0)); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); hipLaunchKernelGGL(( replication_pad_forward_kernel2d) , dim3(gridSize), dim3(blockSize), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), devInput, devOutput, padT, padB, padL, padR); } else { auto devInput = input.packed_accessor64<scalar_t, 4>(); auto devOutput = output.packed_accessor64<scalar_t, 4>(); int outputPlaneSize = devOutput.size(2) * devOutput.size(3); dim3 gridSize(THCCeilDiv(outputPlaneSize, 256), devOutput.size(1), devOutput.size(0)); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); hipLaunchKernelGGL(( replication_pad_forward_kernel2d) , dim3(gridSize), dim3(blockSize), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), devInput, devOutput, padT, padB, padL, padR); } } ); AT_CUDA_CHECK(hipGetLastError()); } void replication_pad2d_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize) { TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(gradOutput), "output gradient tensor must fit into 32-bit index math"); TORCH_CHECK(paddingSize.size() == 4, "padding Size is expected to be 4"); int padL = paddingSize[0]; int padR = paddingSize[1]; int padT = paddingSize[2]; int padB = paddingSize[3]; int planeDim = 0; int dimh = 1; int dimw = 2; int numInputDims = input.dim(); if (numInputDims == 4) { planeDim++; dimh++; dimw++; } int iheight = input.size(dimh); int iwidth = input.size(dimw); int oheight = iheight + padT + padB; int owidth = iwidth + padL + padR; TORCH_CHECK(owidth == gradOutput.size(dimw), "gradOutput width unexpected. Expected: ", owidth, ", Got: ", gradOutput.size(dimw)); TORCH_CHECK(oheight == gradOutput.size(dimh), "gradOutput height unexpected. Expected: ", oheight, ", Got: ", gradOutput.size(dimh)); gradInput.resize_as_(input); if (gradInput.numel() == 0) { return; } gradInput.zero_(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "replication_pad2d_backward_cuda", [&] { auto gradInput_ = gradInput; auto gradOutput_ = gradOutput; if (numInputDims == 3) { gradInput_ = gradInput.unsqueeze(0); gradOutput_ = gradOutput.unsqueeze(0); } auto devGradInput = gradInput_.packed_accessor64<scalar_t, 4>(); auto devGradOutput = gradOutput_.packed_accessor64<scalar_t, 4>(); int outputPlaneSize = devGradOutput.size(2) * devGradOutput.size(3); dim3 gridSize(THCCeilDiv(outputPlaneSize, 256), devGradOutput.size(1), devGradOutput.size(0)); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); hipLaunchKernelGGL(( replication_pad_backward_kernel) , dim3(gridSize), dim3(blockSize), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), devGradInput, devGradOutput, padT, padB, padL, padR); } ); AT_CUDA_CHECK(hipGetLastError()); } static inline void shapeCheck3d( const Tensor& input, int pleft, int pright, int ptop, int pbottom, int pfront, int pback) { TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); int numInputDims = input.dim(); bool valid_dims = input.size(1) != 0 && input.size(2) != 0 && input.size(3) != 0; TORCH_CHECK( (numInputDims == 4 && input.size(0) != 0 && valid_dims) || (numInputDims == 5 && valid_dims && input.size(4) != 0), "Expected 4D or 5D (batch mode) tensor with possibly 0 batch size and other non-zero dimensions for input, but got: ", input.sizes()); int planeDim = 0; int dimd = 1; int dimh = 2; int dimw = 3; if (numInputDims == 5) { planeDim++; dimd++; dimh++; dimw++; } int numPlanes = input.size(planeDim); int idepth = input.size(dimd); int iheight = input.size(dimh); int iwidth = input.size(dimw); int odepth = idepth + pfront + pback; int oheight = iheight + ptop + pbottom; int owidth = iwidth + pleft + pright; TORCH_CHECK(owidth >= 1 || oheight >= 1 || odepth >= 1, "input (D: ", idepth, " H: ", iheight, ", W: ", iwidth, ") is too small." " Calculated output D: ", odepth, " H: ", oheight, " W: ", owidth); } static inline void shapeAndGradOutputCheck3d( const Tensor& input, const Tensor& gradOutput, int pleft, int pright, int ptop, int pbottom, int pfront, int pback) { TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); int numInputDims = input.dim(); bool valid_dims = input.size(1) != 0 && input.size(2) != 0 && input.size(3) != 0; TORCH_CHECK( (numInputDims == 4 && valid_dims) || (numInputDims == 5 && valid_dims && input.size(4) != 0), "Expected 4D or 5D (batch mode) tensor with possibly 0 batch size and other non-zero dimensions for input, but got: ", input.sizes()); int planeDim = 0; int dimd = 1; int dimh = 2; int dimw = 3; if (numInputDims == 5) { planeDim++; dimd++; dimh++; dimw++; } int numPlanes = input.size(planeDim); int idepth = input.size(dimd); int iheight = input.size(dimh); int iwidth = input.size(dimw); int odepth = idepth + pfront + pback; int oheight = iheight + ptop + pbottom; int owidth = iwidth + pleft + pright; TORCH_CHECK(owidth >= 1 || oheight >= 1 || odepth >= 1, "input (D: ", idepth, " H: ", iheight, ", W: ", iwidth, ") is too small." " Calculated output D: ", odepth, " H: ", oheight, " W: ", owidth); TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(gradOutput), "output gradient tensor must fit into 32-bit index math"); TORCH_CHECK(numPlanes == gradOutput.size(planeDim), "gradOutput width unexpected. Expected: ", numPlanes, ", Got: ", gradOutput.size(planeDim)); TORCH_CHECK(owidth == gradOutput.size(dimw), "gradOutput width unexpected. Expected: ", owidth, ", Got: ", gradOutput.size(dimw)); TORCH_CHECK(oheight == gradOutput.size(dimh), "gradOutput height unexpected. Expected: ", oheight, ", Got: ", gradOutput.size(dimh)); TORCH_CHECK(odepth == gradOutput.size(dimd), "gradOutput depth unexpected. Expected: ", odepth, ", Got: ", gradOutput.size(dimd)); } void replication_pad3d_out_cuda_template( Tensor& output, const Tensor& input, IntArrayRef paddingSize) { TORCH_CHECK(paddingSize.size() == 6, "padding Size is expected to be 6"); int pleft = paddingSize[0]; int pright = paddingSize[1]; int ptop = paddingSize[2]; int pbottom = paddingSize[3]; int pfront = paddingSize[4]; int pback = paddingSize[5]; shapeCheck3d(input, pleft, pright, ptop, pbottom, pfront, pback); int planeDim = 0; int dimd = 1; int dimh = 2; int dimw = 3; int numBatch = 1; int numInputDims = input.dim(); if (numInputDims == 5) { numBatch = input.size(0); planeDim++; dimd++; dimh++; dimw++; } int numPlanes = input.size(planeDim); int inputD = input.size(dimd); int inputH = input.size(dimh); int inputW = input.size(dimw); int outputD = inputD + pfront + pback; int outputH = inputH + ptop + pbottom; int outputW = inputW + pleft + pright; if (numInputDims == 4) { output.resize_({numPlanes, outputD, outputH, outputW}); } else { output.resize_({numBatch, numPlanes, outputD, outputH, outputW}); } if (input.numel() == 0) { return; } AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "replication_pad3d_cuda", [&] { if (numInputDims == 4) { auto input_ = input.unsqueeze(0); auto output_ = output.unsqueeze(0); auto devInput = input_.packed_accessor64<scalar_t, 5>(); auto devOutput = output_.packed_accessor64<scalar_t, 5>(); int outputPlaneSize = devOutput.size(2) * devOutput.size(3) * devOutput.size(4); dim3 gridSize(THCCeilDiv(outputPlaneSize, 256), devOutput.size(1), devOutput.size(0)); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); hipLaunchKernelGGL(( replication_pad_forward_kernel3d) , dim3(gridSize), dim3(blockSize), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), devInput, devOutput, pfront, pback, ptop, pbottom, pleft, pright); } else { auto devInput = input.packed_accessor64<scalar_t, 5>(); auto devOutput = output.packed_accessor64<scalar_t, 5>(); int outputPlaneSize = devOutput.size(2) * devOutput.size(3) * devOutput.size(4); dim3 gridSize(THCCeilDiv(outputPlaneSize, 256), devOutput.size(1), devOutput.size(0)); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); hipLaunchKernelGGL(( replication_pad_forward_kernel3d) , dim3(gridSize), dim3(blockSize), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), devInput, devOutput, pfront, pback, ptop, pbottom, pleft, pright); } } ); AT_CUDA_CHECK(hipGetLastError()); } void replication_pad3d_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize) { TORCH_CHECK(paddingSize.size() == 6, "padding Size is expected to be 6"); int pleft = paddingSize[0]; int pright = paddingSize[1]; int ptop = paddingSize[2]; int pbottom = paddingSize[3]; int pfront = paddingSize[4]; int pback = paddingSize[5]; shapeAndGradOutputCheck3d(input, gradOutput, pleft, pright, ptop, pbottom, pfront, pback); int planeDim = 0; int dimd = 1; int dimh = 2; int dimw = 3; int numInputDims = input.dim(); if (numInputDims == 5) { planeDim++; dimd++; dimh++; dimw++; } gradInput.resize_as_(input); if (gradInput.numel() == 0) { return; } gradInput.zero_(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "replication_pad3d_backward_cuda", [&] { auto gradInput_ = gradInput; auto gradOutput_ = gradOutput; if (numInputDims == 4) { gradInput_ = gradInput.unsqueeze(0); gradOutput_ = gradOutput.unsqueeze(0); } auto devGradInput = gradInput_.packed_accessor64<scalar_t, 5>(); auto devGradOutput = gradOutput_.packed_accessor64<scalar_t, 5>(); int outputPlaneSize = devGradOutput.size(2) * devGradOutput.size(3) * devGradOutput.size(4); dim3 gridSize(THCCeilDiv(outputPlaneSize, 256), devGradOutput.size(1), devGradOutput.size(0)); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); hipLaunchKernelGGL(( replication_pad_backward_kernel) , dim3(gridSize), dim3(blockSize), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), devGradInput, devGradOutput, pfront, pback, ptop, pbottom, pleft, pright); } ); AT_CUDA_CHECK(hipGetLastError()); } } // namespace Tensor& replication_pad1d_out_cuda( Tensor& output, const Tensor& input, IntArrayRef paddingSize) { replication_pad1d_out_cuda_template( output, input, paddingSize); return output; } Tensor replication_pad1d_cuda( const Tensor& input, IntArrayRef paddingSize) { auto output = at::empty({0}, input.options()); replication_pad1d_out_cuda_template( output, input, paddingSize); return output; } Tensor& replication_pad1d_backward_out_cuda( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("replication_pad1d_backward_out_cuda"); replication_pad1d_backward_out_cuda_template( gradInput, gradOutput, input, paddingSize); return gradInput; } Tensor replication_pad1d_backward_cuda( const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("replication_pad1d_backward_cuda"); auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); replication_pad1d_backward_out_cuda_template( gradInput, gradOutput, input, paddingSize); return gradInput; } Tensor& replication_pad2d_out_cuda( Tensor& output, const Tensor& input, IntArrayRef paddingSize) { replication_pad2d_out_cuda_template( output, input, paddingSize); return output; } Tensor replication_pad2d_cuda( const Tensor& input, IntArrayRef paddingSize) { auto output = at::empty({0}, input.options()); replication_pad2d_out_cuda_template( output, input, paddingSize); return output; } Tensor& replication_pad2d_backward_out_cuda( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("replication_pad2d_backward_out_cuda"); replication_pad2d_backward_out_cuda_template( gradInput, gradOutput, input, paddingSize); return gradInput; } Tensor replication_pad2d_backward_cuda( const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("replication_pad2d_backward_cuda"); auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); replication_pad2d_backward_out_cuda_template( gradInput, gradOutput, input, paddingSize); return gradInput; } Tensor& replication_pad3d_out_cuda( Tensor& output, const Tensor& input, IntArrayRef paddingSize) { replication_pad3d_out_cuda_template( output, input, paddingSize); return output; } Tensor replication_pad3d_cuda( const Tensor& input, IntArrayRef paddingSize) { auto output = at::empty({0}, input.options()); replication_pad3d_out_cuda_template( output, input, paddingSize); return output; } Tensor& replication_pad3d_backward_out_cuda( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("replication_pad3d_backward_out_cuda"); replication_pad3d_backward_out_cuda_template( gradInput, gradOutput, input, paddingSize); return gradInput; } Tensor replication_pad3d_backward_cuda( const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("replication_pad3d_backward_cuda"); auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); replication_pad3d_backward_out_cuda_template( gradInput, gradOutput, input, paddingSize); return gradInput; } } // at::native } // at
4bfe9ff37e8be85b26d2079831b9f9d7ed3de7ab.cu
#include <ATen/ATen.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/CUDAContext.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <c10/util/Exception.h> #include <THC/THCAtomics.cuh> #include <THC/THCGeneral.h> #include <THC/THCNumerics.cuh> #include <THC/THCDeviceUtils.cuh> #include <algorithm> #include <cfloat> #include <cmath> namespace at { namespace native { __host__ __device__ __forceinline__ int imin(int a, int b) { return a > b ? b : a; } __host__ __device__ __forceinline__ int imax(int a, int b) { return a > b ? a : b; } namespace { template <typename scalar_t> __global__ void replication_pad_forward_kernel1d( PackedTensorAccessor64<scalar_t, 3> input, PackedTensorAccessor64<scalar_t, 3> output, int padL, int padR) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; if (outputPointId >= output.size(2)) { return; } int outputPointX = outputPointId % output.size(2); int iStartX = imax(0, -padL); int oStartX = imax(0, padL); int inputPointX = imin(imax(padL, outputPointX), input.size(2) + padL - 1) - oStartX + iStartX; scalar_t valueToCopy = input[batch][plane][inputPointX]; output[batch][plane][outputPointX] = valueToCopy; } template <typename scalar_t> __global__ void replication_pad_backward_kernel( PackedTensorAccessor64<scalar_t, 3> gradInput, PackedTensorAccessor64<scalar_t, 3> gradOutput, int padL, int padR) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; if (outputPointId >= gradOutput.size(2)) { return; } int outputPointX = outputPointId % gradOutput.size(2); int iStartX = imax(0, -padL); int oStartX = imax(0, padL); int inputPointX = imin(imax(padL, outputPointX), gradInput.size(2) + padL - 1) - oStartX + iStartX; scalar_t valueToCopy = gradOutput[batch][plane][outputPointX]; gpuAtomicAdd(&gradInput[batch][plane][inputPointX], valueToCopy); } template <typename scalar_t> __global__ void replication_pad_forward_kernel2d( PackedTensorAccessor64<scalar_t, 4> input, PackedTensorAccessor64<scalar_t, 4> output, int padT, int padB, int padL, int padR) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; if (outputPointId >= output.size(2) * output.size(3)) { return; } int outputPointX = outputPointId % output.size(3); int outputPointY = outputPointId / output.size(3); int iStartX = imax(0, -padL); int iStartY = imax(0, -padT); int oStartX = imax(0, padL); int oStartY = imax(0, padT); int inputPointX = imin(imax(padL, outputPointX), input.size(3) + padL - 1) - oStartX + iStartX; int inputPointY = imin(imax(padT, outputPointY), input.size(2) + padT - 1) - oStartY + iStartY; scalar_t valueToCopy = input[batch][plane][inputPointY][inputPointX]; output[batch][plane][outputPointY][outputPointX] = valueToCopy; } template <typename scalar_t> __global__ void replication_pad_backward_kernel( PackedTensorAccessor64<scalar_t, 4> gradInput, PackedTensorAccessor64<scalar_t, 4> gradOutput, int padT, int padB, int padL, int padR) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; if (outputPointId >= gradOutput.size(2) * gradOutput.size(3)) { return; } int outputPointX = outputPointId % gradOutput.size(3); int outputPointY = outputPointId / gradOutput.size(3); int iStartX = imax(0, -padL); int iStartY = imax(0, -padT); int oStartX = imax(0, padL); int oStartY = imax(0, padT); int inputPointX = imin(imax(padL, outputPointX), gradInput.size(3) + padL - 1) - oStartX + iStartX; int inputPointY = imin(imax(padT, outputPointY), gradInput.size(2) + padT - 1) - oStartY + iStartY; scalar_t valueToCopy = gradOutput[batch][plane][outputPointY][outputPointX]; gpuAtomicAdd(&gradInput[batch][plane][inputPointY][inputPointX], valueToCopy); } template <typename scalar_t> __global__ void replication_pad_forward_kernel3d( PackedTensorAccessor64<scalar_t, 5> input, PackedTensorAccessor64<scalar_t, 5> output, int pfront, int pback, int ptop, int pbottom, int pleft, int pright) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; if (outputPointId >= (output.size(2) * output.size(3) * output.size(4))) { return; } int outputPointX = outputPointId % output.size(4); int outputPointY = (outputPointId / output.size(4)) % output.size(3); int outputPointZ = outputPointId / (output.size(3) * output.size(4)); int iStartX = imax(0, -pleft); int iStartY = imax(0, -ptop); int iStartZ = imax(0, -pfront); int oStartX = imax(0, pleft); int oStartY = imax(0, ptop); int oStartZ = imax(0, pfront); int inputPointX = imin(imax(pleft, outputPointX), input.size(4) + pleft - 1) - oStartX + iStartX; int inputPointY = imin(imax(ptop, outputPointY), input.size(3) + ptop - 1) - oStartY + iStartY; int inputPointZ = imin(imax(pfront, outputPointZ), input.size(2) + pfront - 1) - oStartZ + iStartZ; scalar_t valueToCopy = input[batch][plane][inputPointZ][inputPointY][inputPointX]; output[batch][plane][outputPointZ][outputPointY][outputPointX] = valueToCopy; } template <typename scalar_t> __global__ void replication_pad_backward_kernel( PackedTensorAccessor64<scalar_t, 5> gradInput, PackedTensorAccessor64<scalar_t, 5> gradOutput, int pfront, int pback, int ptop, int pbottom, int pleft, int pright) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; if (outputPointId >= (gradOutput.size(2) * gradOutput.size(3) * gradOutput.size(4))) { return; } int outputPointX = outputPointId % gradOutput.size(4); int outputPointY = (outputPointId / gradOutput.size(4)) % gradOutput.size(3); int outputPointZ = outputPointId / (gradOutput.size(3) * gradOutput.size(4)); int iStartX = imax(0, -pleft); int iStartY = imax(0, -ptop); int iStartZ = imax(0, -pfront); int oStartX = imax(0, pleft); int oStartY = imax(0, ptop); int oStartZ = imax(0, pfront); int inputPointX = imin(imax(pleft, outputPointX), gradInput.size(4) + pleft - 1) - oStartX + iStartX; int inputPointY = imin(imax(ptop, outputPointY), gradInput.size(3) + ptop - 1) - oStartY + iStartY; int inputPointZ = imin(imax(pfront, outputPointZ), gradInput.size(2) + pfront - 1) - oStartZ + iStartZ; scalar_t valueToCopy = gradOutput[batch][plane][outputPointZ][outputPointY][outputPointX]; gpuAtomicAdd(&gradInput[batch][plane][inputPointZ][inputPointY][inputPointX], valueToCopy); } void replication_pad1d_out_cuda_template( Tensor& output, const Tensor& input, IntArrayRef paddingSize) { TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); TORCH_CHECK(paddingSize.size() == 2, "padding Size is expected to be 2"); int padL = paddingSize[0]; int padR = paddingSize[1]; int planeDim = 0; int dimw = 1; int numBatch = 1; int numInputDims = input.ndimension(); TORCH_CHECK( (numInputDims == 2 && input.size(0) != 0 && input.size(1) != 0) || (numInputDims == 3 && input.size(1) != 0 && input.size(2) != 0), "Expected 2D or 3D (batch mode) tensor with possibly 0 batch size and other non-zero dimensions for input, but got: ", input.sizes()); if (numInputDims == 3) { numBatch = input.size(0); planeDim++; dimw++; } int numPlanes = input.size(planeDim); int inputW = input.size(dimw); int outputW = inputW + padL + padR; TORCH_CHECK(outputW >= 1, "input (W: ", inputW, ")is too small." " Calculated output W: ", outputW); if (numInputDims == 2) { output.resize_({numPlanes, outputW}); } else { output.resize_({numBatch, numPlanes, outputW}); } if (input.numel() == 0) { return; } AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "replication_pad1d_cuda", [&] { if (numInputDims == 2) { auto input_ = input.unsqueeze(0); auto output_ = output.unsqueeze(0); auto devInput = input_.packed_accessor64<scalar_t, 3>(); auto devOutput = output_.packed_accessor64<scalar_t, 3>(); int outputPlaneSize = devOutput.size(2); dim3 gridSize(THCCeilDiv(outputPlaneSize, 256), devOutput.size(1), devOutput.size(0)); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); replication_pad_forward_kernel1d <<<gridSize, blockSize, 0, at::cuda::getCurrentCUDAStream()>>>(devInput, devOutput, padL, padR); } else { auto devInput = input.packed_accessor64<scalar_t, 3>(); auto devOutput = output.packed_accessor64<scalar_t, 3>(); int outputPlaneSize = devOutput.size(2); dim3 gridSize(THCCeilDiv(outputPlaneSize, 256), devOutput.size(1), devOutput.size(0)); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); replication_pad_forward_kernel1d <<<gridSize, blockSize, 0, at::cuda::getCurrentCUDAStream()>>>(devInput, devOutput, padL, padR); } } ); AT_CUDA_CHECK(cudaGetLastError()); } void replication_pad1d_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize) { TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(gradOutput), "output gradient tensor must fit into 32-bit index math"); TORCH_CHECK(paddingSize.size() == 2, "padding Size is expected to be 2"); int padL = paddingSize[0]; int padR = paddingSize[1]; int planeDim = 0; int dimw = 1; int numInputDims = input.ndimension(); if (numInputDims == 3) { planeDim++; dimw++; } int iwidth = input.size(dimw); int owidth = iwidth + padL + padR; TORCH_CHECK(owidth == gradOutput.size(dimw), "gradOutput width unexpected. Expected: ", owidth, ", Got: ", gradOutput.size(dimw)); gradInput.resize_as_(input); if (gradInput.numel() == 0) { return; } gradInput.zero_(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "replication_pad1d_backward_cuda", [&] { auto gradInput_ = gradInput; auto gradOutput_ = gradOutput; if (numInputDims == 2) { gradInput_ = gradInput.unsqueeze(0); gradOutput_ = gradOutput.unsqueeze(0); } auto devGradInput = gradInput_.packed_accessor64<scalar_t, 3>(); auto devGradOutput = gradOutput_.packed_accessor64<scalar_t, 3>(); int outputPlaneSize = devGradOutput.size(2); dim3 gridSize(THCCeilDiv(outputPlaneSize, 256), devGradOutput.size(1), devGradOutput.size(0)); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); replication_pad_backward_kernel <<<gridSize, blockSize, 0, at::cuda::getCurrentCUDAStream()>>>(devGradInput, devGradOutput, padL, padR); } ); AT_CUDA_CHECK(cudaGetLastError()); } void replication_pad2d_out_cuda_template( Tensor& output, const Tensor& input, IntArrayRef paddingSize) { TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); TORCH_CHECK(paddingSize.size() == 4, "padding Size is expected to be 4"); int padL = paddingSize[0]; int padR = paddingSize[1]; int padT = paddingSize[2]; int padB = paddingSize[3]; int planeDim = 0; int dimh = 1; int dimw = 2; int numBatch = 1; int numInputDims = input.dim(); bool valid_dims = input.size(1) != 0 && input.size(2) != 0; TORCH_CHECK( (numInputDims == 3 && input.size(0) != 0 && valid_dims) || (numInputDims == 4 && valid_dims && input.size(3) != 0), "Expected 3D or 4D (batch mode) tensor with possibly 0 batch size and other non-zero dimensions for input, but got: ", input.sizes()); if (numInputDims == 4) { numBatch = input.size(0); planeDim++; dimh++; dimw++; } int numPlanes = input.size(planeDim); int inputH = input.size(dimh); int inputW = input.size(dimw); int outputH = inputH + padT + padB; int outputW = inputW + padL + padR; TORCH_CHECK(outputW >= 1 || outputH >= 1, "input (H: ", inputH, ", W: ", inputW, ") is too small." " Calculated output H: ", outputH, " W: ", outputW); if (numInputDims == 3) { output.resize_({numPlanes, outputH, outputW}); } else { output.resize_({numBatch, numPlanes, outputH, outputW}); } if (input.numel() == 0) { return; } AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "replication_pad2d_cuda", [&] { if (numInputDims == 3) { auto input_ = input.unsqueeze(0); auto output_ = output.unsqueeze(0); auto devInput = input_.packed_accessor64<scalar_t, 4>(); auto devOutput = output_.packed_accessor64<scalar_t, 4>(); int outputPlaneSize = devOutput.size(2) * devOutput.size(3); dim3 gridSize(THCCeilDiv(outputPlaneSize, 256), devOutput.size(1), devOutput.size(0)); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); replication_pad_forward_kernel2d <<<gridSize, blockSize, 0, at::cuda::getCurrentCUDAStream()>>>( devInput, devOutput, padT, padB, padL, padR); } else { auto devInput = input.packed_accessor64<scalar_t, 4>(); auto devOutput = output.packed_accessor64<scalar_t, 4>(); int outputPlaneSize = devOutput.size(2) * devOutput.size(3); dim3 gridSize(THCCeilDiv(outputPlaneSize, 256), devOutput.size(1), devOutput.size(0)); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); replication_pad_forward_kernel2d <<<gridSize, blockSize, 0, at::cuda::getCurrentCUDAStream()>>>(devInput, devOutput, padT, padB, padL, padR); } } ); AT_CUDA_CHECK(cudaGetLastError()); } void replication_pad2d_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize) { TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(gradOutput), "output gradient tensor must fit into 32-bit index math"); TORCH_CHECK(paddingSize.size() == 4, "padding Size is expected to be 4"); int padL = paddingSize[0]; int padR = paddingSize[1]; int padT = paddingSize[2]; int padB = paddingSize[3]; int planeDim = 0; int dimh = 1; int dimw = 2; int numInputDims = input.dim(); if (numInputDims == 4) { planeDim++; dimh++; dimw++; } int iheight = input.size(dimh); int iwidth = input.size(dimw); int oheight = iheight + padT + padB; int owidth = iwidth + padL + padR; TORCH_CHECK(owidth == gradOutput.size(dimw), "gradOutput width unexpected. Expected: ", owidth, ", Got: ", gradOutput.size(dimw)); TORCH_CHECK(oheight == gradOutput.size(dimh), "gradOutput height unexpected. Expected: ", oheight, ", Got: ", gradOutput.size(dimh)); gradInput.resize_as_(input); if (gradInput.numel() == 0) { return; } gradInput.zero_(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "replication_pad2d_backward_cuda", [&] { auto gradInput_ = gradInput; auto gradOutput_ = gradOutput; if (numInputDims == 3) { gradInput_ = gradInput.unsqueeze(0); gradOutput_ = gradOutput.unsqueeze(0); } auto devGradInput = gradInput_.packed_accessor64<scalar_t, 4>(); auto devGradOutput = gradOutput_.packed_accessor64<scalar_t, 4>(); int outputPlaneSize = devGradOutput.size(2) * devGradOutput.size(3); dim3 gridSize(THCCeilDiv(outputPlaneSize, 256), devGradOutput.size(1), devGradOutput.size(0)); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); replication_pad_backward_kernel <<<gridSize, blockSize, 0, at::cuda::getCurrentCUDAStream()>>>(devGradInput, devGradOutput, padT, padB, padL, padR); } ); AT_CUDA_CHECK(cudaGetLastError()); } static inline void shapeCheck3d( const Tensor& input, int pleft, int pright, int ptop, int pbottom, int pfront, int pback) { TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); int numInputDims = input.dim(); bool valid_dims = input.size(1) != 0 && input.size(2) != 0 && input.size(3) != 0; TORCH_CHECK( (numInputDims == 4 && input.size(0) != 0 && valid_dims) || (numInputDims == 5 && valid_dims && input.size(4) != 0), "Expected 4D or 5D (batch mode) tensor with possibly 0 batch size and other non-zero dimensions for input, but got: ", input.sizes()); int planeDim = 0; int dimd = 1; int dimh = 2; int dimw = 3; if (numInputDims == 5) { planeDim++; dimd++; dimh++; dimw++; } int numPlanes = input.size(planeDim); int idepth = input.size(dimd); int iheight = input.size(dimh); int iwidth = input.size(dimw); int odepth = idepth + pfront + pback; int oheight = iheight + ptop + pbottom; int owidth = iwidth + pleft + pright; TORCH_CHECK(owidth >= 1 || oheight >= 1 || odepth >= 1, "input (D: ", idepth, " H: ", iheight, ", W: ", iwidth, ") is too small." " Calculated output D: ", odepth, " H: ", oheight, " W: ", owidth); } static inline void shapeAndGradOutputCheck3d( const Tensor& input, const Tensor& gradOutput, int pleft, int pright, int ptop, int pbottom, int pfront, int pback) { TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); int numInputDims = input.dim(); bool valid_dims = input.size(1) != 0 && input.size(2) != 0 && input.size(3) != 0; TORCH_CHECK( (numInputDims == 4 && valid_dims) || (numInputDims == 5 && valid_dims && input.size(4) != 0), "Expected 4D or 5D (batch mode) tensor with possibly 0 batch size and other non-zero dimensions for input, but got: ", input.sizes()); int planeDim = 0; int dimd = 1; int dimh = 2; int dimw = 3; if (numInputDims == 5) { planeDim++; dimd++; dimh++; dimw++; } int numPlanes = input.size(planeDim); int idepth = input.size(dimd); int iheight = input.size(dimh); int iwidth = input.size(dimw); int odepth = idepth + pfront + pback; int oheight = iheight + ptop + pbottom; int owidth = iwidth + pleft + pright; TORCH_CHECK(owidth >= 1 || oheight >= 1 || odepth >= 1, "input (D: ", idepth, " H: ", iheight, ", W: ", iwidth, ") is too small." " Calculated output D: ", odepth, " H: ", oheight, " W: ", owidth); TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(gradOutput), "output gradient tensor must fit into 32-bit index math"); TORCH_CHECK(numPlanes == gradOutput.size(planeDim), "gradOutput width unexpected. Expected: ", numPlanes, ", Got: ", gradOutput.size(planeDim)); TORCH_CHECK(owidth == gradOutput.size(dimw), "gradOutput width unexpected. Expected: ", owidth, ", Got: ", gradOutput.size(dimw)); TORCH_CHECK(oheight == gradOutput.size(dimh), "gradOutput height unexpected. Expected: ", oheight, ", Got: ", gradOutput.size(dimh)); TORCH_CHECK(odepth == gradOutput.size(dimd), "gradOutput depth unexpected. Expected: ", odepth, ", Got: ", gradOutput.size(dimd)); } void replication_pad3d_out_cuda_template( Tensor& output, const Tensor& input, IntArrayRef paddingSize) { TORCH_CHECK(paddingSize.size() == 6, "padding Size is expected to be 6"); int pleft = paddingSize[0]; int pright = paddingSize[1]; int ptop = paddingSize[2]; int pbottom = paddingSize[3]; int pfront = paddingSize[4]; int pback = paddingSize[5]; shapeCheck3d(input, pleft, pright, ptop, pbottom, pfront, pback); int planeDim = 0; int dimd = 1; int dimh = 2; int dimw = 3; int numBatch = 1; int numInputDims = input.dim(); if (numInputDims == 5) { numBatch = input.size(0); planeDim++; dimd++; dimh++; dimw++; } int numPlanes = input.size(planeDim); int inputD = input.size(dimd); int inputH = input.size(dimh); int inputW = input.size(dimw); int outputD = inputD + pfront + pback; int outputH = inputH + ptop + pbottom; int outputW = inputW + pleft + pright; if (numInputDims == 4) { output.resize_({numPlanes, outputD, outputH, outputW}); } else { output.resize_({numBatch, numPlanes, outputD, outputH, outputW}); } if (input.numel() == 0) { return; } AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "replication_pad3d_cuda", [&] { if (numInputDims == 4) { auto input_ = input.unsqueeze(0); auto output_ = output.unsqueeze(0); auto devInput = input_.packed_accessor64<scalar_t, 5>(); auto devOutput = output_.packed_accessor64<scalar_t, 5>(); int outputPlaneSize = devOutput.size(2) * devOutput.size(3) * devOutput.size(4); dim3 gridSize(THCCeilDiv(outputPlaneSize, 256), devOutput.size(1), devOutput.size(0)); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); replication_pad_forward_kernel3d <<<gridSize, blockSize, 0, at::cuda::getCurrentCUDAStream()>>>( devInput, devOutput, pfront, pback, ptop, pbottom, pleft, pright); } else { auto devInput = input.packed_accessor64<scalar_t, 5>(); auto devOutput = output.packed_accessor64<scalar_t, 5>(); int outputPlaneSize = devOutput.size(2) * devOutput.size(3) * devOutput.size(4); dim3 gridSize(THCCeilDiv(outputPlaneSize, 256), devOutput.size(1), devOutput.size(0)); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); replication_pad_forward_kernel3d <<<gridSize, blockSize, 0, at::cuda::getCurrentCUDAStream()>>>( devInput, devOutput, pfront, pback, ptop, pbottom, pleft, pright); } } ); AT_CUDA_CHECK(cudaGetLastError()); } void replication_pad3d_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize) { TORCH_CHECK(paddingSize.size() == 6, "padding Size is expected to be 6"); int pleft = paddingSize[0]; int pright = paddingSize[1]; int ptop = paddingSize[2]; int pbottom = paddingSize[3]; int pfront = paddingSize[4]; int pback = paddingSize[5]; shapeAndGradOutputCheck3d(input, gradOutput, pleft, pright, ptop, pbottom, pfront, pback); int planeDim = 0; int dimd = 1; int dimh = 2; int dimw = 3; int numInputDims = input.dim(); if (numInputDims == 5) { planeDim++; dimd++; dimh++; dimw++; } gradInput.resize_as_(input); if (gradInput.numel() == 0) { return; } gradInput.zero_(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "replication_pad3d_backward_cuda", [&] { auto gradInput_ = gradInput; auto gradOutput_ = gradOutput; if (numInputDims == 4) { gradInput_ = gradInput.unsqueeze(0); gradOutput_ = gradOutput.unsqueeze(0); } auto devGradInput = gradInput_.packed_accessor64<scalar_t, 5>(); auto devGradOutput = gradOutput_.packed_accessor64<scalar_t, 5>(); int outputPlaneSize = devGradOutput.size(2) * devGradOutput.size(3) * devGradOutput.size(4); dim3 gridSize(THCCeilDiv(outputPlaneSize, 256), devGradOutput.size(1), devGradOutput.size(0)); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); replication_pad_backward_kernel <<<gridSize, blockSize, 0, at::cuda::getCurrentCUDAStream()>>>( devGradInput, devGradOutput, pfront, pback, ptop, pbottom, pleft, pright); } ); AT_CUDA_CHECK(cudaGetLastError()); } } // namespace Tensor& replication_pad1d_out_cuda( Tensor& output, const Tensor& input, IntArrayRef paddingSize) { replication_pad1d_out_cuda_template( output, input, paddingSize); return output; } Tensor replication_pad1d_cuda( const Tensor& input, IntArrayRef paddingSize) { auto output = at::empty({0}, input.options()); replication_pad1d_out_cuda_template( output, input, paddingSize); return output; } Tensor& replication_pad1d_backward_out_cuda( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("replication_pad1d_backward_out_cuda"); replication_pad1d_backward_out_cuda_template( gradInput, gradOutput, input, paddingSize); return gradInput; } Tensor replication_pad1d_backward_cuda( const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("replication_pad1d_backward_cuda"); auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); replication_pad1d_backward_out_cuda_template( gradInput, gradOutput, input, paddingSize); return gradInput; } Tensor& replication_pad2d_out_cuda( Tensor& output, const Tensor& input, IntArrayRef paddingSize) { replication_pad2d_out_cuda_template( output, input, paddingSize); return output; } Tensor replication_pad2d_cuda( const Tensor& input, IntArrayRef paddingSize) { auto output = at::empty({0}, input.options()); replication_pad2d_out_cuda_template( output, input, paddingSize); return output; } Tensor& replication_pad2d_backward_out_cuda( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("replication_pad2d_backward_out_cuda"); replication_pad2d_backward_out_cuda_template( gradInput, gradOutput, input, paddingSize); return gradInput; } Tensor replication_pad2d_backward_cuda( const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("replication_pad2d_backward_cuda"); auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); replication_pad2d_backward_out_cuda_template( gradInput, gradOutput, input, paddingSize); return gradInput; } Tensor& replication_pad3d_out_cuda( Tensor& output, const Tensor& input, IntArrayRef paddingSize) { replication_pad3d_out_cuda_template( output, input, paddingSize); return output; } Tensor replication_pad3d_cuda( const Tensor& input, IntArrayRef paddingSize) { auto output = at::empty({0}, input.options()); replication_pad3d_out_cuda_template( output, input, paddingSize); return output; } Tensor& replication_pad3d_backward_out_cuda( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("replication_pad3d_backward_out_cuda"); replication_pad3d_backward_out_cuda_template( gradInput, gradOutput, input, paddingSize); return gradInput; } Tensor replication_pad3d_backward_cuda( const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("replication_pad3d_backward_cuda"); auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); replication_pad3d_backward_out_cuda_template( gradInput, gradOutput, input, paddingSize); return gradInput; } } // at::native } // at
5ea1b91369230344546556a4b139d425cb0a4518.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> __global__ void helloWorld() { printf("Hello World\n"); } int main() { hipLaunchKernelGGL(( helloWorld) , dim3(1), dim3(1), 0, 0, ); return 0; }
5ea1b91369230344546556a4b139d425cb0a4518.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> __global__ void helloWorld() { printf("Hello World\n"); } int main() { helloWorld <<<1, 1>>> (); return 0; }
8dffb81c0d4fe97b4a527bed11b7ac39f8e300bc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* A solution for the queues part of hw2. Feel free to replace with your own or * to change it to your needs. */ #include "ex3.h" #include "ex2.h" #include <cassert> #include <cuda/atomic> using cuda::memory_order_relaxed; using cuda::memory_order_acquire; using cuda::memory_order_release; __device__ void prefix_sum(int arr[], int arr_size) { int tid = threadIdx.x; int increment; for (int stride = 1; stride < min(blockDim.x, arr_size); stride *= 2) { if (tid >= stride && tid < arr_size) { increment = arr[tid - stride]; } __syncthreads(); if (tid >= stride && tid < arr_size) { arr[tid] += increment; } __syncthreads(); } } __device__ void gpu_process_image_helper(uchar *in, uchar *out) { __shared__ int histogram[256]; __shared__ uchar map[256]; int tid = threadIdx.x; if (tid < 256) { histogram[tid] = 0; } __syncthreads(); for (int i = tid; i < IMG_SZ; i += blockDim.x) { uchar c = in[i]; atomicAdd(&histogram[c], 1); } __syncthreads(); prefix_sum(histogram, 256); if (tid < 256) { float map_value = float(histogram[tid]) / IMG_SZ; map[tid] = ((uchar)(N_COLORS * map_value)) * (256 / N_COLORS); } __syncthreads(); for (int i = tid; i < IMG_SZ; i += blockDim.x) { out[i] = map[in[i]]; } } __global__ void process_image_kernel(uchar *all_in, uchar *all_out) { uchar *in = &all_in[blockIdx.x * IMG_SZ]; uchar *out = &all_out[blockIdx.x * IMG_SZ]; gpu_process_image_helper(in, out); } // Code assumes it is a power of two #define NSLOTS 16 struct cpu_to_gpu_entry { int img_idx; uchar *img_in, *img_out; }; struct gpu_to_cpu_entry { int img_idx; }; template <typename entry_type> struct queue { using entry = entry_type; entry data[NSLOTS]; cuda::atomic<int> pi; cuda::atomic<int> ci; cuda::atomic<bool> kill; queue() : pi(0), ci(0), kill(false) {} __host__ __device__ bool empty() { return pi.load(memory_order_acquire) == ci.load(memory_order_relaxed); } __host__ __device__ entry* peek() { return &data[ci & (NSLOTS - 1)]; } __host__ __device__ int wraparound_slots_inc(int last) { return (last + 1); } __host__ __device__ void pop() { auto cur_ci = ci.load(memory_order_relaxed); ci.store(wraparound_slots_inc(cur_ci), memory_order_release); } __host__ __device__ bool full() { auto cur_pi = pi.load(memory_order_relaxed); auto cur_ci = ci.load(memory_order_acquire); return (cur_pi - cur_ci) == NSLOTS; } __host__ __device__ entry* next() { return &data[pi & (NSLOTS - 1)]; } __host__ __device__ void push() { auto cur_pi = pi.load(memory_order_relaxed); pi.store(wraparound_slots_inc(cur_pi), memory_order_release); } }; __global__ void gpu_process_image_consumer(queue<cpu_to_gpu_entry> *cpu_to_gpu, queue<gpu_to_cpu_entry> *gpu_to_cpu) { auto & h2g = cpu_to_gpu[blockIdx.x]; auto & g2h = gpu_to_cpu[blockIdx.x]; int tid = threadIdx.x; __shared__ cpu_to_gpu_entry entry; __shared__ bool kill; if (tid == 0) kill = false; while (true) { if (tid == 0) { while (h2g.empty()) { if (h2g.kill.load(memory_order_relaxed)) { kill = true; break; } } entry = *h2g.peek(); dbg_printf("[%d] got image (%d). pi = %d, ci = %d\n", blockIdx.x, entry.img_idx, h2g.pi.load(), h2g.ci.load()); } __syncthreads(); if (kill) { if (tid == 0) dbg_printf("[%d:%d] got kill\n", blockIdx.x, threadIdx.x); return; } if (tid == 0) { h2g.pop(); dbg_printf("[%d] popped image. pi = %d, ci = %d\n", blockIdx.x, h2g.pi.load(), h2g.ci.load()); } gpu_process_image_helper(entry.img_in, entry.img_out); __syncthreads(); if (tid == 0) { while (g2h.full()) ; auto out_entry = g2h.next(); // must have one? out_entry->img_idx = entry.img_idx; dbg_printf("[%d] pushing image (%d). pi = %d, ci = %d\n", blockIdx.x, entry.img_idx, g2h.pi.load(), g2h.ci.load()); } __syncthreads(); if (tid == 0) { g2h.push(); dbg_printf("[%d] pushed image. pi = %d, ci = %d\n", blockIdx.x, g2h.pi.load(), g2h.ci.load()); } } } class queues_gpu_context : public gpu_image_processing_context { private: // TODO define queue server context (memory buffers, etc...) int blocks; char *queue_buffer; queue<cpu_to_gpu_entry> *cpu_to_gpu; queue<gpu_to_cpu_entry> *gpu_to_cpu; int next_block = 0; public: explicit queues_gpu_context(int threads) : blocks(calc_blocks(threads)) { // TODO initialize host state CUDA_CHECK(hipHostMalloc(&queue_buffer, sizeof(*cpu_to_gpu) * blocks + sizeof(*gpu_to_cpu) * blocks)); cpu_to_gpu = new (queue_buffer) queue<cpu_to_gpu_entry>[blocks]; gpu_to_cpu = new (queue_buffer + sizeof(queue<cpu_to_gpu_entry>[blocks])) queue<gpu_to_cpu_entry>[blocks]; // TODO launch GPU producer-consumer kernel with given number of threads hipLaunchKernelGGL(( gpu_process_image_consumer), dim3(blocks), dim3(threads), 0, 0, cpu_to_gpu, gpu_to_cpu); } ~queues_gpu_context() override { // TODO free resources allocated in constructor for (int b = 0; b < blocks; ++b) cpu_to_gpu[b].kill.store(true, memory_order_relaxed); CUDA_CHECK(hipDeviceSynchronize()); cpu_to_gpu->~queue<cpu_to_gpu_entry>(); gpu_to_cpu->~queue<gpu_to_cpu_entry>(); CUDA_CHECK(hipHostFree(queue_buffer)); } bool enqueue(int img_id, uchar *img_in, uchar *img_out) override { // TODO push new task into queue if possible auto &next = cpu_to_gpu[img_id % blocks]; // find_next_queue(cpu_to_gpu, blocks); if (next.full()) return false; dbg_printf("enqueued img id: %d\n", img_id); auto *entry = next.next(); entry->img_idx = img_id; entry->img_in = img_in; entry->img_out = img_out; next.push(); return true; } bool dequeue(int *img_id) override { // TODO query (don't block) the producer-consumer queue for any responses. int block = next_block; for (int i = 0; i < blocks; ++i, ++block) { if (block >= blocks) block -= blocks; if (!gpu_to_cpu[block].empty()) { auto *entry = gpu_to_cpu[block].peek(); *img_id = entry->img_idx; dbg_printf("[CPU] got image %d\n", *img_id); gpu_to_cpu[block].pop(); dbg_printf("block %d i %d next_block %d\n", block, i, next_block); next_block = block - 1; if (next_block < 0) next_block += blocks; return true; } } return false; } int calc_blocks(int threads_per_block) { int device; hipDeviceProp_t prop; CUDA_CHECK(hipGetDevice(&device)); CUDA_CHECK(hipGetDeviceProperties(&prop, device)); int maxByRegsPerSM = prop.regsPerMultiprocessor / threads_per_block / 32; int maxBySharedMemory = prop.sharedMemPerMultiprocessor / 1312; int maxByThreads = prop.maxThreadsPerMultiProcessor / threads_per_block; printf("maxByRegsPerSM: %d\nmaxBySharedMemoryPerSM: %d\nmaxByThreadsPerSM: %d\n", maxByRegsPerSM, maxBySharedMemory, maxByThreads); auto blocks = min(min(maxByRegsPerSM, maxBySharedMemory), maxByThreads) * prop.multiProcessorCount; printf("number of blocks: %d\n", blocks); return blocks; } }; std::unique_ptr<gpu_image_processing_context> create_queues_server(int threads) { return std::make_unique<queues_gpu_context>(threads); }
8dffb81c0d4fe97b4a527bed11b7ac39f8e300bc.cu
/* A solution for the queues part of hw2. Feel free to replace with your own or * to change it to your needs. */ #include "ex3.h" #include "ex2.h" #include <cassert> #include <cuda/atomic> using cuda::memory_order_relaxed; using cuda::memory_order_acquire; using cuda::memory_order_release; __device__ void prefix_sum(int arr[], int arr_size) { int tid = threadIdx.x; int increment; for (int stride = 1; stride < min(blockDim.x, arr_size); stride *= 2) { if (tid >= stride && tid < arr_size) { increment = arr[tid - stride]; } __syncthreads(); if (tid >= stride && tid < arr_size) { arr[tid] += increment; } __syncthreads(); } } __device__ void gpu_process_image_helper(uchar *in, uchar *out) { __shared__ int histogram[256]; __shared__ uchar map[256]; int tid = threadIdx.x; if (tid < 256) { histogram[tid] = 0; } __syncthreads(); for (int i = tid; i < IMG_SZ; i += blockDim.x) { uchar c = in[i]; atomicAdd(&histogram[c], 1); } __syncthreads(); prefix_sum(histogram, 256); if (tid < 256) { float map_value = float(histogram[tid]) / IMG_SZ; map[tid] = ((uchar)(N_COLORS * map_value)) * (256 / N_COLORS); } __syncthreads(); for (int i = tid; i < IMG_SZ; i += blockDim.x) { out[i] = map[in[i]]; } } __global__ void process_image_kernel(uchar *all_in, uchar *all_out) { uchar *in = &all_in[blockIdx.x * IMG_SZ]; uchar *out = &all_out[blockIdx.x * IMG_SZ]; gpu_process_image_helper(in, out); } // Code assumes it is a power of two #define NSLOTS 16 struct cpu_to_gpu_entry { int img_idx; uchar *img_in, *img_out; }; struct gpu_to_cpu_entry { int img_idx; }; template <typename entry_type> struct queue { using entry = entry_type; entry data[NSLOTS]; cuda::atomic<int> pi; cuda::atomic<int> ci; cuda::atomic<bool> kill; queue() : pi(0), ci(0), kill(false) {} __host__ __device__ bool empty() { return pi.load(memory_order_acquire) == ci.load(memory_order_relaxed); } __host__ __device__ entry* peek() { return &data[ci & (NSLOTS - 1)]; } __host__ __device__ int wraparound_slots_inc(int last) { return (last + 1); } __host__ __device__ void pop() { auto cur_ci = ci.load(memory_order_relaxed); ci.store(wraparound_slots_inc(cur_ci), memory_order_release); } __host__ __device__ bool full() { auto cur_pi = pi.load(memory_order_relaxed); auto cur_ci = ci.load(memory_order_acquire); return (cur_pi - cur_ci) == NSLOTS; } __host__ __device__ entry* next() { return &data[pi & (NSLOTS - 1)]; } __host__ __device__ void push() { auto cur_pi = pi.load(memory_order_relaxed); pi.store(wraparound_slots_inc(cur_pi), memory_order_release); } }; __global__ void gpu_process_image_consumer(queue<cpu_to_gpu_entry> *cpu_to_gpu, queue<gpu_to_cpu_entry> *gpu_to_cpu) { auto & h2g = cpu_to_gpu[blockIdx.x]; auto & g2h = gpu_to_cpu[blockIdx.x]; int tid = threadIdx.x; __shared__ cpu_to_gpu_entry entry; __shared__ bool kill; if (tid == 0) kill = false; while (true) { if (tid == 0) { while (h2g.empty()) { if (h2g.kill.load(memory_order_relaxed)) { kill = true; break; } } entry = *h2g.peek(); dbg_printf("[%d] got image (%d). pi = %d, ci = %d\n", blockIdx.x, entry.img_idx, h2g.pi.load(), h2g.ci.load()); } __syncthreads(); if (kill) { if (tid == 0) dbg_printf("[%d:%d] got kill\n", blockIdx.x, threadIdx.x); return; } if (tid == 0) { h2g.pop(); dbg_printf("[%d] popped image. pi = %d, ci = %d\n", blockIdx.x, h2g.pi.load(), h2g.ci.load()); } gpu_process_image_helper(entry.img_in, entry.img_out); __syncthreads(); if (tid == 0) { while (g2h.full()) ; auto out_entry = g2h.next(); // must have one? out_entry->img_idx = entry.img_idx; dbg_printf("[%d] pushing image (%d). pi = %d, ci = %d\n", blockIdx.x, entry.img_idx, g2h.pi.load(), g2h.ci.load()); } __syncthreads(); if (tid == 0) { g2h.push(); dbg_printf("[%d] pushed image. pi = %d, ci = %d\n", blockIdx.x, g2h.pi.load(), g2h.ci.load()); } } } class queues_gpu_context : public gpu_image_processing_context { private: // TODO define queue server context (memory buffers, etc...) int blocks; char *queue_buffer; queue<cpu_to_gpu_entry> *cpu_to_gpu; queue<gpu_to_cpu_entry> *gpu_to_cpu; int next_block = 0; public: explicit queues_gpu_context(int threads) : blocks(calc_blocks(threads)) { // TODO initialize host state CUDA_CHECK(cudaMallocHost(&queue_buffer, sizeof(*cpu_to_gpu) * blocks + sizeof(*gpu_to_cpu) * blocks)); cpu_to_gpu = new (queue_buffer) queue<cpu_to_gpu_entry>[blocks]; gpu_to_cpu = new (queue_buffer + sizeof(queue<cpu_to_gpu_entry>[blocks])) queue<gpu_to_cpu_entry>[blocks]; // TODO launch GPU producer-consumer kernel with given number of threads gpu_process_image_consumer<<<blocks, threads>>>(cpu_to_gpu, gpu_to_cpu); } ~queues_gpu_context() override { // TODO free resources allocated in constructor for (int b = 0; b < blocks; ++b) cpu_to_gpu[b].kill.store(true, memory_order_relaxed); CUDA_CHECK(cudaDeviceSynchronize()); cpu_to_gpu->~queue<cpu_to_gpu_entry>(); gpu_to_cpu->~queue<gpu_to_cpu_entry>(); CUDA_CHECK(cudaFreeHost(queue_buffer)); } bool enqueue(int img_id, uchar *img_in, uchar *img_out) override { // TODO push new task into queue if possible auto &next = cpu_to_gpu[img_id % blocks]; // find_next_queue(cpu_to_gpu, blocks); if (next.full()) return false; dbg_printf("enqueued img id: %d\n", img_id); auto *entry = next.next(); entry->img_idx = img_id; entry->img_in = img_in; entry->img_out = img_out; next.push(); return true; } bool dequeue(int *img_id) override { // TODO query (don't block) the producer-consumer queue for any responses. int block = next_block; for (int i = 0; i < blocks; ++i, ++block) { if (block >= blocks) block -= blocks; if (!gpu_to_cpu[block].empty()) { auto *entry = gpu_to_cpu[block].peek(); *img_id = entry->img_idx; dbg_printf("[CPU] got image %d\n", *img_id); gpu_to_cpu[block].pop(); dbg_printf("block %d i %d next_block %d\n", block, i, next_block); next_block = block - 1; if (next_block < 0) next_block += blocks; return true; } } return false; } int calc_blocks(int threads_per_block) { int device; cudaDeviceProp prop; CUDA_CHECK(cudaGetDevice(&device)); CUDA_CHECK(cudaGetDeviceProperties(&prop, device)); int maxByRegsPerSM = prop.regsPerMultiprocessor / threads_per_block / 32; int maxBySharedMemory = prop.sharedMemPerMultiprocessor / 1312; int maxByThreads = prop.maxThreadsPerMultiProcessor / threads_per_block; printf("maxByRegsPerSM: %d\nmaxBySharedMemoryPerSM: %d\nmaxByThreadsPerSM: %d\n", maxByRegsPerSM, maxBySharedMemory, maxByThreads); auto blocks = min(min(maxByRegsPerSM, maxBySharedMemory), maxByThreads) * prop.multiProcessorCount; printf("number of blocks: %d\n", blocks); return blocks; } }; std::unique_ptr<gpu_image_processing_context> create_queues_server(int threads) { return std::make_unique<queues_gpu_context>(threads); }
487175bdc0ba07282eb2b02b9e538eb7aa0a8438.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef __GPETKERNAL__ #define __GPETKERNAL__ #include "gPET.h" #include "gPET_kernals.h" __global__ void setupcuseed(int* iseed1) //Setup random seeds, used for random sampling in GPU code { int id = blockIdx.x*blockDim.x + threadIdx.x; // obtain current id on thread if( id < NRAND) { hiprand_init(iseed1[id], id, 0, &cuseed[id]); if(id<5) printf("first 5 cuseeds are %u \n",cuseed[id]); } } __device__ int4 getAbsVox(float3 xtemp) //Return the absolute vox index according to the coordinate { int4 temp; temp.z = (xtemp.z-Offsetz_gBrachy)*idz_gBrachy; temp.y = (xtemp.y-Offsety_gBrachy)*idy_gBrachy; temp.x = (xtemp.x-Offsetx_gBrachy)*idx_gBrachy; //The following give the boundry condition temp.w = (temp.x <= 0.0f || temp.x >= Unxvox || temp.y <= 0.0f || temp.y >= Unyvox || temp.z <= 0.0f || temp.z >= Unzvox)?-1 : 1; return temp; } __device__ float lamwck(float e) //Minimum Mean free path prepared to play the Woodcock trick for materials in phantom { float i = idlewk*(e-wcke0) + 0.5f; return tex1D(woock_tex, i); } __device__ float lamwckde(float e) //Minimum Mean free path prepared to play the Woodcock trick for materials in detector { float i = idlewk*(e-wcke0) + 0.5f; return tex1D(woockde_tex, i); } __device__ float itphip_G(int matid, float e) //Photon total inverse mean free path { float i = idleph*(e-elaph0) + 0.5; return tex1D(lamph_tex,matid * NLAPH + i); } __device__ float irylip(int matid, float e) //Inverse Rayleigh mean free path { float i = idlerl*(e - erayl0) + 0.5; return tex1D(rayle_tex,matid * NRAYL + i); } __device__ float icptip(int matid, float e) //Inverse Compton mean free path { float i = idlecp*(e - ecmpt0) + 0.5; return tex1D(compt_tex,matid * NCMPT + i); } __device__ void comsam(float energytemp, hiprandState_t *localState_pt, float *efrac, float *costhe, int matid) //this is the KN model crosssection from table /******************************************************************* c* Samples a Compton event following Klein-Nishina DCS * c* * c* Input: * c* energy -> photon energy in eV * c* Output: * c* efrac -> fraction of initial energy kept by 2nd photon * c* costhe -> cos(theta) of the 2nd photon * c* Comments: * c* -> inirng() must be called before 1st call * c******************************************************************/ { float indcp = hiprand_uniform(localState_pt)*idcpcm; float inde = energytemp*idecm; float temp = tex3D(s_tex,inde+0.5f, indcp+0.5f, matid+0.5f); if(temp > 1.0f) temp = 1.0f; if(temp < -1.0f) temp = -1.0f; *costhe = temp; *efrac = 1.0f/(1.0f + energytemp*IMC2*(1.0f-temp)); } __device__ void comsam(float energytemp, hiprandState_t *localState_pt, float *efrac, float *costhe) //this is the standard KN model which treats electron as free and no dopler effect /******************************************************************* c* Samples a Compton event following Klein-Nishina DCS * c* * c* Input: * c* energy -> photon energy in eV * c* Output: * c* efrac -> fraction of initial energy kept by 2nd photon * c* costhe -> cos(theta) of the 2nd photon * c* Comments: * c* -> inirng() must be called before 1st call * c******************************************************************/ { float e0,twoe,kmin2,loge,mess; e0 = energytemp*IMC2; twoe = 2.0*e0; kmin2 = 1.0/((1.0+twoe)*(1.0+twoe)); loge = __logf(1.0+twoe); for(;;) { if (hiprand_uniform(localState_pt)*(loge+twoe*(1.0+e0)*kmin2) < loge) { *efrac = expf(-hiprand_uniform(localState_pt)*loge); } else { *efrac = sqrtf(kmin2+hiprand_uniform(localState_pt)*(1.0-kmin2)); } mess = e0*e0*(*efrac)*(1.0+(*efrac)*(*efrac)); if (hiprand_uniform(localState_pt)*mess <= mess-(1.0-*efrac)*((1.0+twoe)*(*efrac)-1.0))break; } *costhe = 1.0-(1.0-*efrac)/((*efrac)*e0); } __device__ void rylsam(float energytemp, int matid, hiprandState_t *localState_pt, float *costhe) /******************************************************************* c* Samples a Rayleigh event following its DCS * c* * c* Input: * c* energy -> photon energy in eV * c* Output: * c* costhe -> cos(theta) of the 2nd photon * c* Comments: * c* -> inirng() must be called before 1st call * c******************************************************************/ { float indcp = hiprand_uniform(localState_pt)*idcprl; float inde = energytemp*iderl; float temp = tex3D(f_tex,inde+0.5f, indcp+0.5f, matid+0.5f); if(temp > 1.0f) temp = 1.0f; if(temp < -1.0f) temp = -1.0f; *costhe = temp; } __device__ float getDistance(float3 coords, float4 direcs) //special case for sphere, need modification dor other cases /******************************************************************* c* get the distance to the recording plane * c* distance-> nearest distance to current body boundaries * c******************************************************************/ { coords.x=coords.x-recordsphere_d[0]; coords.y=coords.y-recordsphere_d[1]; coords.z=coords.z-recordsphere_d[2]; float t; float a= direcs.x * direcs.x + direcs.y * direcs.y + direcs.z * direcs.z; float b = 2.0f*(direcs.x * coords.x + direcs.y * coords.y + direcs.z * coords.z); float c = (coords.x * coords.x + coords.y * coords.y + coords.z * coords.z)-(recordsphere_d[3])*(recordsphere_d[3]); if(b*b-4*a*c<0) return 0; if(c<0) t=(-b+sqrtf(b*b-4*a*c))/(2*a); else if(b<0) t=(-b-sqrtf(b*b-4*a*c))/(2*a); else t=(-b+sqrtf(b*b-4*a*c))/(2*a); return t;//direction normalized } __device__ void rotate(float *u, float *v, float *w, float costh, float phi) /******************************************************************* c* Rotates a vector; the rotation is specified by giving * c* the polar and azimuthal angles in the "self-frame", as * c* determined by the vector to be rotated. * c* * c* Input: * c* (u,v,w) -> input vector (=d) in the lab. frame * c* costh -> cos(theta), angle between d before and after turn * c* phi -> azimuthal angle (rad) turned by d in its self-frame * c* Output: * c* (u,v,w) -> rotated vector components in the lab. frame * c* Comments: * c* -> (u,v,w) should have norm=1 on input; if not, it is * c* renormalized on output, provided norm>0. * c* -> The algorithm is based on considering the turned vector * c* d' expressed in the self-frame S', * c* d' = (sin(th)cos(ph), sin(th)sin(ph), cos(th)) * c* and then apply a change of frame from S' to the lab * c* frame. S' is defined as having its z' axis coincident * c* with d, its y' axis perpendicular to z and z' and its * c* x' axis equal to y'*z'. The matrix of the change is then* c* / uv/rho -v/rho u \ * c* S ->lab: | vw/rho u/rho v | , rho=(u^2+v^2)^0.5* c* \ -rho 0 w / * c* -> When rho=0 (w=1 or -1) z and z' are parallel and the y' * c* axis cannot be defined in this way. Instead y' is set to* c* y and therefore either x'=x (if w=1) or x'=-x (w=-1) * c******************************************************************/ { float rho2,sinphi,cosphi,sthrho,urho,vrho,sinth,norm; rho2 = (*u)*(*u)+(*v)*(*v); norm = rho2 + (*w)*(*w); // Check normalization: if (fabs(norm-1.0) > SZERO) { // Renormalize: norm = 1.0/__fsqrt_rn(norm); *u = (*u)*norm; *v = (*v)*norm; *w = (*w)*norm; } sinphi = __sinf(phi); cosphi = __cosf(phi); // Case z' not= z: float temp = costh*costh; if (rho2 > ZERO) { if(temp < 1.0f) sthrho = __fsqrt_rn((1.00-temp)/rho2); else sthrho = 0.0f; urho = (*u)*sthrho; vrho = (*v)*sthrho; *u = (*u)*costh - vrho*sinphi + (*w)*urho*cosphi; *v = (*v)*costh + urho*sinphi + (*w)*vrho*cosphi; *w = (*w)*costh - rho2*sthrho*cosphi; } else // 2 especial cases when z'=z or z'=-z: { if(temp < 1.0f) sinth = __fsqrt_rn(1.00-temp); else sinth = 0.0f; *v = sinth*sinphi; if (*w > 0.0) { *u = sinth*cosphi; *w = costh; } else { *u = -sinth*cosphi; *w = -costh; } } } __global__ void photon(const int nactive) /******************************************************************* c* Transports a photon until it either escapes from the * c* phantom or its energy drops below EabsPhoton * c******************************************************************/ { int id = blockIdx.x*blockDim.x + threadIdx.x; // obtain current id on thread const double spe=29979.2458; //cm/us hiprandState_t localState = cuseed[id%NRAND]; while( id < nactive) { float3 xtemp = x_gBrachy[id]; float4 vxtemp = vx_gBrachy[id]; double tof = d_time[id]; int rp = 0; int cp = 0; //if(id <5 ) printf("x=%f,y=%f,z=%f,t = %f, vx=%f,vy=%f,vz=%f,e=%f\n",xtemp.x,xtemp.y,xtemp.z,tof,vxtemp.x,vxtemp.y,vxtemp.z,vxtemp.w); if(vxtemp.w<0||tof<=0) {id+=blockDim.x*gridDim.x;continue;} // Loop until it either escapes or is absorbed: while(1) { // Get lambda from the minimum lambda at the current energy: float lammin = lamwck(vxtemp.w); float s = -lammin*__logf(hiprand_uniform(&localState)); xtemp.x += s*vxtemp.x; xtemp.y += s*vxtemp.y; xtemp.z += s*vxtemp.z; //xtemp.w += s/spe; tof += s/spe; int4 absvoxtemp = getAbsVox(xtemp); //if(id <5 ) printf("id %d absvoxtem.w=%d, x=%f,y=%f,z=%f,vx=%f,vy=%f,vz=%f,s=%f\n",id, absvoxtemp.w, xtemp.x,xtemp.y,xtemp.z,vxtemp.x,vxtemp.y,vxtemp.z,s); if (absvoxtemp.w == -1)//means the particle is outside the phantom { #if RECORDPSF==-1 float r=getDistance(xtemp,vxtemp); xtemp.x +=r*vxtemp.x; xtemp.y +=r*vxtemp.y; xtemp.z +=r*vxtemp.z; tof += r/spe; #endif break; } // get density float voxden = tex3D(dens_tex, absvoxtemp.x, absvoxtemp.y, absvoxtemp.z); // get mat id int voxmatid = tex3D(mat_tex, absvoxtemp.x, absvoxtemp.y, absvoxtemp.z); // Apply Woodcock trick: float lamden = lammin*voxden; float prob = 1.0-lamden*itphip_G(voxmatid, vxtemp.w); float randno = hiprand_uniform(&localState); // No real event; continue jumping: if (randno < prob) continue; // Compton: prob += lamden*icptip(voxmatid, vxtemp.w); if (randno < prob) { cp++; float efrac, costhe; comsam(vxtemp.w, &localState, &efrac, &costhe, voxmatid); // comsam(vxtemp.w, &localState, &efrac, &costhe); float phi = TWOPI*hiprand_uniform(&localState); vxtemp.w *= efrac; if (vxtemp.w < eabsph) break; rotate(&vxtemp.x,&vxtemp.y,&vxtemp.z,costhe,phi); continue; } // Rayleigh: prob += lamden*irylip(voxmatid, vxtemp.w); if (randno < prob) { rp++; float costhe; rylsam(vxtemp.w, voxmatid, &localState, &costhe); float phi = TWOPI*hiprand_uniform(&localState); rotate(&vxtemp.x,&vxtemp.y,&vxtemp.z,costhe,phi); continue; } // Photoelectric: tof = -0.5f;// t<0 means dead in phantom, won't be simulated any more break; } x_gBrachy[id] = xtemp; d_time[id] = tof; vx_gBrachy[id] = vxtemp; CP[id] = cp; RP[id] = rp; id+=blockDim.x*gridDim.x; } cuseed[id%NRAND] = localState; } __device__ float3 setPositronRange(float3 xtemp, float4 vxtemp, hiprandState_t* plocalState, int usedirection) /******************************************************************* c* Finds positron range according to its energy * c* * c* Input: * c* xtemp -> current position, vxtemp -> vx vy vz energy, * c* usedirection -> 0 sample positron direction or 1 predefined * c* Output: * c* position + range * c******************************************************************/ { int id = blockDim.x*blockIdx.x+threadIdx.x; float3 distance; float ekin = vxtemp.w/1e6; //transfer to MeV //float Zeff = 7.2222; //1/9*1+8/9*8; //float Aeff = 13; //Zeff/(1/9*1/1+8/9*8/16) float b1 = 5.44040782;//4.569*Aeff/powf(Zeff,1.209); float b2 = 0.369516529; // 1/(2.873-0.02309*Zeff); float Rex = 0.1*b1*ekin*ekin/(b2+ekin); float sigma = Rex/(2*1.0f); //put material density here distance.x = sigma*hiprand_normal(plocalState); distance.y = sigma*hiprand_normal(plocalState); distance.z = sigma*hiprand_normal(plocalState); float r=sqrtf(distance.x*distance.x+distance.y*distance.y+distance.z*distance.z); if(usedirection) { float tmp = sqrtf(vxtemp.x*vxtemp.x+vxtemp.y*vxtemp.y+vxtemp.z*vxtemp.z); distance.x = r*vxtemp.x/tmp;//reassign direction distance.y = r*vxtemp.y/tmp; distance.z = r*vxtemp.z/tmp; } float s = 0, step =100; int4 absvoxtemp = getAbsVox(xtemp); while(s<r) { step = 1000; if(absvoxtemp.w>0) { b1 = (Offsetx_gBrachy+(absvoxtemp.x+(distance.x>0))*dx_gBrachy-xtemp.x)/distance.x;//remaining voxel length along x direction if(step > b1) {step = b1;absvoxtemp.w=1;} b1 = (Offsety_gBrachy+(absvoxtemp.y+(distance.y>0))*dy_gBrachy-xtemp.y)/distance.y; if(step > b1) {step = b1;absvoxtemp.w=2;} b1 = (Offsetz_gBrachy+(absvoxtemp.z+(distance.z>0))*dz_gBrachy-xtemp.z)/distance.z; if(step > b1) {step = b1;absvoxtemp.w=3;} if(absvoxtemp.w == 1) absvoxtemp.x += (distance.x>0)?1:-1; else if(absvoxtemp.w == 2) absvoxtemp.y += (distance.y>0)?1:-1; else absvoxtemp.z += (distance.z>0)?1:-1; b2 = tex3D(dens_tex, absvoxtemp.x, absvoxtemp.y, absvoxtemp.z); step = step*r; s += step*b2; if(s > r) step += (r-s)/b2; } else { step += (r-s)/0.0012905; s = r + 100;//make s > r } xtemp.x += step*distance.x/r; xtemp.y += step*distance.y/r; xtemp.z += step*distance.z/r; if(xtemp.x < Offsetx_gBrachy || xtemp.x > (Offsetx_gBrachy+Unxvox*dx_gBrachy)) absvoxtemp.w = -1; if(xtemp.y < Offsety_gBrachy || xtemp.y > (Offsety_gBrachy+Unyvox*dy_gBrachy)) absvoxtemp.w = -1; if(xtemp.z < Offsetz_gBrachy || xtemp.z > (Offsetz_gBrachy+Unzvox*dz_gBrachy)) absvoxtemp.w = -1; } return xtemp; } __device__ float sampleEkPositron(int type, float* d_coef,hiprandState_t* plocalState) /******************************************************************* c* Finds positron energy according to fitted parameters * c* * c* Input: * c* type -> source type, d_coef -> fitted coefficients * c* Output: * c* kinetic energy in eV * c* Comments: * c* coefficients can be fitted from calculated spectrum * c* refer to Levin et al PMB 44, 781-799 * c******************************************************************/ { float u=100, E=0, sumE=0; while(u> sumE) { E=hiprand_uniform(plocalState)*(d_coef[8*type]-0.511)+0.511; u=d_coef[8*type+1]*hiprand_uniform(plocalState); sumE=0; for(int i=0;i<6;i++) sumE+=d_coef[8*type+2+i]*powf(E,5-i); } return (E-0.511)*1e6;//consistent in unit eV } __device__ float3 getPositionFromShape(int index, int shapeindex, float* shapecoeff, hiprandState_t* plocalState) /******************************************************************* c* Sample position uniformly inside a volume * c* * c* Input: * c* type -> source type, shapeindex -> shape type * c* Output: * c* position * c******************************************************************/ { float3 position; //int id = blockDim.x*blockIdx.x+threadIdx.x; if(shapeindex<0 || shapeindex>2) shapeindex=0; if(shapeindex==0)//box { position.x=shapecoeff[6*index]+shapecoeff[6*index+3]*(-1+2*hiprand_uniform(plocalState))*0.5; position.y=shapecoeff[6*index+1]+shapecoeff[6*index+4]*(-1+2*hiprand_uniform(plocalState))*0.5; position.z=shapecoeff[6*index+2]+shapecoeff[6*index+5]*(-1+2*hiprand_uniform(plocalState))*0.5; } else if(shapeindex==1)//cylinder { float phi=TWOPI*hiprand_uniform(plocalState); float r= shapecoeff[6*index+3]*sqrtf(hiprand_uniform(plocalState)); position.x=shapecoeff[6*index]+r*cosf(phi); position.y=shapecoeff[6*index+1]+r*sinf(phi); position.z=shapecoeff[6*index+2]+shapecoeff[6*index+4]*(-1+2*hiprand_uniform(plocalState))*0.5; } else if(shapeindex==2)//sphere { float phi=TWOPI*hiprand_uniform(plocalState); float costheta=-1+2*hiprand_uniform(plocalState); float r= shapecoeff[6*index+3]*cbrtf(hiprand_uniform(plocalState)); position.x=shapecoeff[6*index]+r*sqrtf(1-costheta*costheta)*cosf(phi); position.y=shapecoeff[6*index+1]+r*sqrtf(1-costheta*costheta)*sinf(phi); position.z=shapecoeff[6*index+2]+r*costheta; } return position; } __global__ void setPosition(int nsource, unsigned int totalatom, float tref, float t, unsigned int* d_natom, unsigned int* d_sumpartial, int* d_type, int* d_shape, float* d_shapecoeff, float* d_halftime, float* d_decayRatio, float* d_coef, int useprange) /******************************************************************* c* Sample photon information from a positron source * c* for a given time period * c******************************************************************/ { int id=blockIdx.x*blockDim.x + threadIdx.x; hiprandState_t localState = cuseed[id%NRAND]; int sourceindex=0, istart, iend, imid; float randno, phi; double ptime=0, tmp=0; float3 xtemp; float4 vxtemp; while(id<totalatom) { #if BISEARCH == 0 for(sourceindex=0;sourceindex<nsource;sourceindex++) { if(id<d_sumpartial[sourceindex]) break; } #else if(id < d_sumpartial[0]) sourceindex = 0; else { istart = 0; iend = nsource-1; while(iend - istart >1) { imid = (iend+istart)/2; if(id<d_sumpartial[imid]) iend = imid; else istart = imid; } sourceindex = iend; } #endif tmp = double(-d_halftime[d_type[sourceindex]]*1.442695); ptime = tmp*log(hiprand_uniform_double(&localState)); if(ptime<t) { atomicSub(&(d_natom[sourceindex]),1); randno=hiprand_uniform(&localState); if(randno < d_decayRatio[d_type[sourceindex]]) { int ind = atomicAdd(&d_curemitted,1); xtemp = getPositionFromShape(sourceindex,d_shape[sourceindex],d_shapecoeff, &localState); if(useprange == 1) { vxtemp.w = sampleEkPositron(d_type[sourceindex], d_coef, &localState); xtemp = setPositronRange(xtemp, vxtemp,&localState,0); } randno=-1+2*hiprand_uniform(&localState); phi = TWOPI*hiprand_uniform(&localState); vxtemp.x = sqrtf(1-randno*randno)*cosf(phi); vxtemp.y = sqrtf(1-randno*randno)*sinf(phi); vxtemp.z = randno; //printf("%d vx vy vz are %f %f %f\n", id, vxtemp.x,vxtemp.y,vxtemp.z); x_gBrachy[(2*ind)%NPART]=xtemp; //avoid boundary excess x_gBrachy[(2*ind+1)%NPART]=xtemp; d_time[(2*ind)%NPART]=double(tref+ptime)*1e6; d_time[(2*ind+1)%NPART]=double(tref+ptime)*1e6; d_eventid[(2*ind)%NPART]=ind; d_eventid[(2*ind+1)%NPART]=ind; phi = TWOPI*hiprand_uniform(&localState); randno=hiprand_normal(&localState)*NonAngle_d; vxtemp.w=MC2+randno*MC2/2.0;//Noncollinearity vx_gBrachy[(2*ind)%NPART]=vxtemp; rotate(&(vxtemp.x), &(vxtemp.y), &(vxtemp.z),-cosf(randno), phi); vxtemp.w=MC2-randno*MC2/2.0; vx_gBrachy[(2*ind+1)%NPART]=vxtemp; } } id+=blockDim.x*gridDim.x; } cuseed[id%NRAND]=localState; } __global__ void setPositionForPhoton(int total,int curpar, int useprange) /******************************************************************* c* Sample photon information from positron PSF * c******************************************************************/ { int id=blockIdx.x*blockDim.x + threadIdx.x; hiprandState_t localState = cuseed[id%NRAND]; float randno, phi; float3 xtemp, tmptmp; float4 vxtemp; while(id<total) { xtemp = x_gBrachy[id]; vxtemp = vx_gBrachy[id]; if(useprange == 1) { xtemp = setPositronRange(xtemp, vxtemp, &localState, 1); } randno = -1+2*hiprand_uniform(&localState); phi = TWOPI*hiprand_uniform(&localState); vxtemp.x = sqrtf(1-randno*randno)*cosf(phi); vxtemp.y = sqrtf(1-randno*randno)*sinf(phi); vxtemp.z = randno; x_gBrachy[id]=xtemp; //avoid boundary excess x_gBrachy[(id+total)%NPART]=xtemp; d_eventid[id]=curpar+id; d_eventid[(id+total)%NPART]=curpar+id; phi = TWOPI*hiprand_uniform(&localState); randno=hiprand_normal(&localState)*NonAngle_d; vxtemp.w=MC2+randno*MC2/2.0; vx_gBrachy[id]=vxtemp; rotate(&(vxtemp.x), &(vxtemp.y), &(vxtemp.z),-cosf(randno), phi); vxtemp.w=MC2-randno*MC2/2.0; vx_gBrachy[(id+total)%NPART]=vxtemp; id+=blockDim.x*gridDim.x; } cuseed[id%NRAND]=localState; } //digitizer void __global__ setSitenum(int total, Event* events_d,int depth) /******************************************************************* c* set site index according to depth * c******************************************************************/ { int id=blockIdx.x*blockDim.x + threadIdx.x; while(id<total) { switch(depth) { case 0: { events_d[id].siten=0; break; } case 1: { events_d[id].siten=events_d[id].pann; break; } case 2: { events_d[id].siten=events_d[id].pann*moduleN+events_d[id].modn; break; } case 3: { events_d[id].siten=events_d[id].pann*moduleN*crystalN+events_d[id].modn*crystalN+events_d[id].cryn; break; } } id+=blockDim.x*gridDim.x; } } void __global__ energywindow(int* counts, Event* events,int total, float thresholder, float upholder) //this is for the energy window part in digitizer { int id=blockIdx.x*blockDim.x + threadIdx.x; int num=0; while(id<total) { if(events[id].E<thresholder || events[id].E>upholder) { events[id].t=MAXT; num++; } id+=blockDim.x*gridDim.x; } if(num) atomicSub(counts,num); } void __global__ deadtime(int* counts,Event* events,int total, float interval, int deadtype) { //this is the deadtime part in digitizer //deadtype 0 for paralysable, 1 for non int id=blockIdx.x*blockDim.x + threadIdx.x; int start,current,i,k; float tdead; while(id<total) { start=id; if(start==0||events[start].siten!=events[start-1].siten||events[start].t>(events[start-1].t+interval))//find the start index { current=start; i=current+1; k=0; tdead=events[start].t; while(i<total) { while(events[i].siten==events[current].siten && events[i].t<(tdead+interval)) { //events[current].E+=events[i].E; if(!deadtype) { tdead=events[i].t; //paralyzable accounts for pile-up effect //events[current].t=events[i].t; } events[i].t=MAXT; i++; k++; if(i==total) break; } if(i==total) break; if(events[i].siten!=events[i-1].siten||events[i].t>(events[i-1].t+interval)) break; current=i; tdead=events[current].t; i++; } atomicSub(counts,k); } id+=blockDim.x*gridDim.x; } } void __global__ addnoise(int* counts, Event* events_d, float lambda, float Emean, float sigma, float interval) { //this is the noise part for digitizer int id = blockIdx.x*blockDim.x + threadIdx.x; Event events[6]; float t=id*interval;//0; hiprandState_t localstate = cuseed[id%NRAND]; int i=0, ind=0; while(t<(id+1)*interval) { t+=-__logf(hiprand_uniform(&localstate))*lambda; if(t<(id+1)*interval) { events[i].t=t;//+id*interval; events[i].E=Emean+sigma*hiprand_normal(&localstate);//2.355; events[i].x=hiprand_uniform(&localstate);//need to be implemented to be matched to global coordinates events[i].y=hiprand_uniform(&localstate); events[i].z=hiprand_uniform(&localstate); events[i].parn=-1; events[i].pann=int(dev_totalPanels*hiprand_uniform(&localstate)); events[i].modn=int(moduleN*hiprand_uniform(&localstate)); events[i].cryn=int(crystalN*hiprand_uniform(&localstate)); events[i].siten=events[i].pann*moduleN*crystalN+events[i].modn*crystalN+events[i].cryn; i=(i+1)%6; if(!i) { ind=atomicAdd(counts,6); for(int j=0; j<6; j++) events_d[ind+j]=events[j]; } } } cuseed[id]=localstate; ind=atomicAdd(counts,i); for(int j=0; j<i; j++) events_d[ind+j]=events[j]; } int __device__ adder(int* counts_d, Event* events_d, Event event) { //this is the adder part in digitizer for(int i=0; i < counts_d[0]; i++) { if(event.siten == events_d[i].siten) { events_d[i].x = (events_d[i].x*events_d[i].E + event.x*event.E)/(events_d[i].E + event.E); events_d[i].y = (events_d[i].y*events_d[i].E + event.y*event.E)/(events_d[i].E + event.E); events_d[i].z = (events_d[i].z*events_d[i].E + event.z*event.E)/(events_d[i].E + event.E); events_d[i].E = (events_d[i].E + event.E); return 1; } } //no recorded event inside the the same crystal events_d[counts_d[0]]=event; counts_d[0]++; return 1; } int __device__ readout(int* counts_d, Event* events_d,int depth, int policy) { //this is for the readout part in digitizer //depth means the readout level. 0,1,2,3 represents world,panel,module,cry //policy 0,1 for winnertakeall and energy centroid if(depth==3) return 1; if(policy==1) depth = 2; //the readout part switch(depth) { case 0: { for(int i=0; i<counts_d[0]; i++) events_d[i].siten=0; break; } case 1: { for(int i=0; i<counts_d[0]; i++) events_d[i].siten=events_d[i].pann; break; } case 2: { for(int i=0; i<counts_d[0]; i++) events_d[i].siten=events_d[i].pann*moduleN+events_d[i].modn; break; } } int ind=0; for(int i=0; i<counts_d[0]; i++) { Event event0 = events_d[i]; if(event0.t>MAXT*0.1) continue; for(int j=i+1; j<counts_d[0]; j++) { Event event = events_d[j]; if((event.parn==event0.parn)&&(event.siten == event0.siten)) { if(policy==1) { event0.x = (event0.x*event0.E + event.x*event.E)/(event0.E + event.E); event0.y = (event0.y*event0.E + event.y*event.E)/(event0.E + event.E); event0.z = (event0.z*event0.E + event.z*event.E)/(event0.E + event.E); event0.E = (event0.E + event.E); events_d[j].t=MAXT; continue; } event0=(event0.E>event.E)?event0:event; events_d[j].t=MAXT; } } events_d[ind]=event0; ind++; } counts_d[1]=ind; return 1; } void __global__ blur(int total, Event* events, int Eblurpolicy, float Eref, float Rref, float slope, float Spaceblur) { //this is the energy blurring part in digitizer int i = blockIdx.x*blockDim.x + threadIdx.x; float R=0; hiprandState_t localstate=cuseed[i%NRAND]; while(i<total) { if(Eblurpolicy==0) R = sqrt(Eref/events[i].E)*Rref; if(Eblurpolicy==1) R = Rref+slope*(events[i].E-Eref)/1e6; //slope in 1/MeV if(R<0) R=0; events[i].E += hiprand_normal(&localstate)*R*events[i].E/2.35482; //other distribution of energy blurring need to be implemented if(Spaceblur>0) { events[i].x+=Spaceblur*hiprand_normal(&localstate); events[i].y+=Spaceblur*hiprand_normal(&localstate); events[i].z+=Spaceblur*hiprand_normal(&localstate); } i+=blockDim.x*gridDim.x; } cuseed[i%NRAND]=localstate; } __global__ void photonde(Event* events_d, int* counts_d, int nactive, int bufferID, float* dens, int *mat, int *panelID, float *lenx, float *leny, float *lenz, float *MODx, float *MODy, float *MODz, float *Msx, float *Msy, float *Msz, float *LSOx, float *LSOy, float *LSOz, float *sx, float *sy, float *sz, float *ox, float *oy, float *oz, float *dx, float *dy, float *dz, float *UXx, float *UXy, float *UXz, float *UYx, float *UYy, float *UYz,float *UZx, float *UZy, float *UZz) /******************************************************************* c* Transports a photon until it either escapes from the * c* detector or its energy drops below EabsPhoton * c* * c******************************************************************/ { int id = blockIdx.x*blockDim.x + threadIdx.x; int tid=threadIdx.x; Event events[4]; int counts[2]= {0,4}; Event event; int rc = 0; int cc = 0; float tempDen=0.0f; int tempMat=0; __shared__ int nsstktemp; if(tid==0) { nsstktemp = 0; } __syncthreads(); __shared__ float sftemp[NSSTACKSHARED]; __shared__ int sidtemp[NSSTACKSHARED]; extern __shared__ float s[]; float *dens_S = s; int *mat_S = (int*)&dens_S[2]; int *panelID_S = (int*)&mat_S[2]; float *lenx_S = (float*)&panelID_S[dev_totalPanels]; float *leny_S = (float*)&lenx_S[dev_totalPanels]; float *lenz_S = (float*)&leny_S[dev_totalPanels]; float *MODx_S = (float*)&lenz_S[dev_totalPanels]; float *MODy_S = (float*)&MODx_S[dev_totalPanels]; float *MODz_S = (float*)&MODy_S[dev_totalPanels]; float *Msx_S = (float*)&MODz_S[dev_totalPanels]; float *Msy_S = (float*)&Msx_S[dev_totalPanels]; float *Msz_S = (float*)&Msy_S[dev_totalPanels]; float *LSOx_S = (float*)&Msz_S[dev_totalPanels]; float *LSOy_S = (float*)&LSOx_S[dev_totalPanels]; float *LSOz_S = (float*)&LSOy_S[dev_totalPanels]; float *sx_S = (float*)&LSOz_S[dev_totalPanels]; float *sy_S = (float*)&sx_S[dev_totalPanels]; float *sz_S = (float*)&sy_S[dev_totalPanels]; float *ox_S = (float*)&sz_S[dev_totalPanels]; float *oy_S = (float*)&ox_S[dev_totalPanels]; float *oz_S = (float*)&oy_S[dev_totalPanels]; float *dx_S = (float*)&oz_S[dev_totalPanels]; float *dy_S = (float*)&dx_S[dev_totalPanels]; float *dz_S = (float*)&dy_S[dev_totalPanels]; float *UXx_S = (float*)&dz_S[dev_totalPanels]; float *UXy_S = (float*)&UXx_S[dev_totalPanels]; float *UXz_S = (float*)&UXy_S[dev_totalPanels]; float *UYx_S = (float*)&UXz_S[dev_totalPanels]; float *UYy_S = (float*)&UYx_S[dev_totalPanels]; float *UYz_S = (float*)&UYy_S[dev_totalPanels]; float *UZx_S = (float*)&UYz_S[dev_totalPanels]; float *UZy_S = (float*)&UZx_S[dev_totalPanels]; float *UZz_S = (float*)&UZy_S[dev_totalPanels]; if(tid==0) { for (int i=0; i<2; i++) { mat_S[i]=mat[i]; dens_S[i]=dens[i]; } for(int i=0; i<dev_totalPanels; i++) { panelID_S[i]=panelID[i]; lenx_S[i]=lenx[i]; leny_S[i]=leny[i]; lenz_S[i]=lenz[i]; MODx_S[i]=MODx[i]; MODy_S[i]=MODy[i]; MODz_S[i]=MODz[i]; Msx_S[i]=Msx[i]; Msy_S[i]=Msy[i]; Msz_S[i]=Msz[i]; LSOx_S[i]=LSOx[i]; LSOy_S[i]=LSOy[i]; LSOz_S[i]=LSOz[i]; sx_S[i]=sx[i]; sy_S[i]=sy[i]; sz_S[i]=sz[i]; ox_S[i]=ox[i]; oy_S[i]=oy[i]; oz_S[i]=oz[i]; dx_S[i]=dx[i]; dy_S[i]=dy[i]; dz_S[i]=dz[i]; UXx_S[i]=UXx[i]; UXy_S[i]=UXy[i]; UXz_S[i]=UXz[i]; UYx_S[i]=UYx[i]; UYy_S[i]=UYy[i]; UYz_S[i]=UYz[i]; UZx_S[i]=UZx[i]; UZy_S[i]=UZy[i]; UZz_S[i]=UZz[i]; }//*/ } __syncthreads(); // obtain current id on thread hiprandState_t localState = cuseed[id%NRAND]; if( id < nactive ) { if(d_time[id]>0) { float3 xtemp = x_gBrachy[id]; float4 vxtemp = vx_gBrachy[id]; double tof = d_time[id]; int eid = d_eventid[id]; int rp = RP[id]; int cp = CP[id]; // change global coordinates to local coordinates int paID=-1; float3 xtemp2; float4 vxtemp2; //get the panelid crystal id that the particle enters??? for (int i=0; i<dev_totalPanels; i++) { //new coordinates float tempx=(xtemp.x-ox_S[i])*UXx_S[i]+(xtemp.y-oy_S[i])*UXy_S[i]+(xtemp.z-oz_S[i])*UXz_S[i]; float tempy=(xtemp.x-ox_S[i])*UYx_S[i]+(xtemp.y-oy_S[i])*UYy_S[i]+(xtemp.z-oz_S[i])*UYz_S[i]; float tempz=(xtemp.x-ox_S[i])*UZx_S[i]+(xtemp.y-oy_S[i])*UZy_S[i]+(xtemp.z-oz_S[i])*UZz_S[i]; float tempvx=vxtemp.x*UXx[i]+vxtemp.y*UXy[i]+vxtemp.z*UXz[i];//component along different directions float tempvy=vxtemp.x*UYx[i]+vxtemp.y*UYy[i]+vxtemp.z*UYz[i]; float tempvz=vxtemp.x*UZx[i]+vxtemp.y*UZy[i]+vxtemp.z*UZz[i]; float tempx2=0.0f; if(tempvx*dx_S[i]>=0) { float tempy2=tempy-tempx/tempvx*tempvy; float tempz2=tempz-tempx/tempvx*tempvz; if(abs(tempy2)<leny_S[i]/2 & abs(tempz2)<lenz_S[i]/2) { xtemp2.x=tempx2; xtemp2.y=tempy2; xtemp2.z=tempz2; //xtemp2.w=xtemp.w; tof += -tempx/(29979.2458*tempvx); vxtemp2.x=tempvx; vxtemp2.y=tempvy; vxtemp2.z=tempvz; vxtemp2.w=vxtemp.w; paID=panelID_S[i]; break; } } float tempy2=tempy-tempx/tempvx*tempvy; float tempz2=tempz-tempx/tempvx*tempvz; xtemp2.x=tempx2; xtemp2.y=tempy2; xtemp2.z=tempz2; //xtemp2.w=xtemp.w; vxtemp2.x=tempvx; vxtemp2.y=tempvy; vxtemp2.z=tempvz; vxtemp2.w=vxtemp.w; paID=-1; } // Loop until it either escapes or is absorbed: float lammin,s; for(;;) { if (paID==-1) break; // Get lambda from the minimum lambda at the current energy: lammin = lamwckde(vxtemp2.w); //0.1;//make sure use the one corresponding to detector s = -lammin*__logf(hiprand_uniform(&localState)); // Get the coordinates of the photon after passing a free length xtemp2.x += s*vxtemp2.x; xtemp2.y += s*vxtemp2.y; xtemp2.z += s*vxtemp2.z; tof += s/29979.2458; // if out of panel if (paID==-1|abs(xtemp2.y)>leny_S[paID]*0.5| abs(xtemp2.z)>lenz_S[paID]*0.5 |(xtemp2.x*dx_S[paID])<0 |(xtemp2.x*dx_S[paID])>lenx_S[paID]) { break; } int m_id=-1; //material id int M_id=-1; // module id int L_id=-1; // LSO id crystalSearch(xtemp2,leny_S[paID],lenz_S[paID],MODy_S[paID],MODz_S[paID],Msy_S[paID],Msz_S[paID], LSOy_S[paID],LSOz_S[paID],sy_S[paID],sz_S[paID],dy_S[paID], dz_S[paID], &m_id, &M_id, &L_id); tempDen = dens_S[m_id]; tempMat = mat_S[m_id]; // Apply Woodcock trick: float lamden = lammin*tempDen; float prob = 1.0-lamden*itphip_G(tempMat, vxtemp2.w); if(prob<0) prob=0; float randno = hiprand_uniform(&localState); // Compton: // No real event; continue jumping: if (randno < prob) continue; prob += lamden*icptip(tempMat, vxtemp2.w); if (randno < prob) { float efrac, costhe; comsam(vxtemp2.w, &localState, &efrac, &costhe);//, tempMat); float de = vxtemp2.w * (1.0f-efrac); float phi = TWOPI*hiprand_uniform(&localState); //record events if (nsstktemp!= NSSTACKSHARED && m_id==0)//only events inside crystal can be recorded { int ind = atomicAdd(&nsstktemp,5); sidtemp[ind] = id+bufferID; sidtemp[ind+1] = paID; sidtemp[ind+2] = M_id; sidtemp[ind+3] = L_id; sidtemp[ind+4] = 1; event.parn= id+bufferID; event.cryn=L_id; event.modn=M_id; event.pann=paID; event.siten=event.pann*moduleN*crystalN+event.modn*crystalN+event.cryn;//maybe should use event.pann-1 or event.cryn-1 to make sure siten start from 0 event.eventid = eid; event.CP = cp; event.RP = rp; event.CC = 0; event.RC = rc; sftemp[ind] = de;//s; sftemp[ind+1] = tof;//s;s;// sftemp[ind+2] = xtemp2.x; sftemp[ind+3] = xtemp2.y; sftemp[ind+4] = xtemp2.z; event.E = de; event.t = tof; event.x = xtemp2.x; event.y = xtemp2.y; event.z = xtemp2.z; //ind=atomicAdd(counts_d,1); //events_d[ind]=event; adder(counts,events,event); } vxtemp2.w -= de; if (vxtemp2.w < eabsph) { if (nsstktemp!= NSSTACKSHARED && m_id==0) { int ind = atomicAdd(&nsstktemp,5); sidtemp[ind] = id+bufferID; sidtemp[ind+1] = paID; sidtemp[ind+2] = M_id; sidtemp[ind+3] = L_id; sidtemp[ind+4] = 2; event.parn=id+bufferID; event.cryn=L_id; event.modn= M_id; event.pann=paID; event.siten=event.pann*moduleN*crystalN+event.modn*crystalN+event.cryn; event.eventid = eid; event.CP = cp; event.RP = rp; event.CC = 0; event.RC = rc; sftemp[ind] = vxtemp2.w;//s; sftemp[ind+1] = tof;//s;s;// sftemp[ind+2] = xtemp2.x; sftemp[ind+3] = xtemp2.y; sftemp[ind+4] = xtemp2.z; event.E = vxtemp2.w; event.t = tof; event.x =xtemp2.x; event.y = xtemp2.y; event.z = xtemp2.z; //ind=atomicAdd(counts_d,1); //events_d[ind]=event; adder(counts,events,event); } break; } rotate(&vxtemp2.x,&vxtemp2.y,&vxtemp2.z,costhe,phi); continue; } // Rayleigh: prob += lamden*irylip(tempMat, vxtemp2.w); if (randno < prob) { rc++; float costhe; rylsam(vxtemp2.w, tempMat, &localState, &costhe); float phi = TWOPI*hiprand_uniform(&localState); #if RECORDRayleigh==1 if (nsstktemp!= NSSTACKSHARED && m_id==0) { int ind = atomicAdd(&nsstktemp,5); sidtemp[ind] = id+bufferID; sidtemp[ind+1] = paID; sidtemp[ind+2] = M_id; sidtemp[ind+3] = L_id; sidtemp[ind+4] = 3; sftemp[ind] = 0.0f;//s; sftemp[ind+1] = tof;//s;s;// sftemp[ind+2] = xtemp2.x; sftemp[ind+3] = xtemp2.y; sftemp[ind+4] = xtemp2.z; } #endif rotate(&vxtemp2.x,&vxtemp2.y,&vxtemp2.z,costhe,phi); continue; } // Photoelectric: //if(id <1) printf("photo den lamda prob are %f %f %f\n", tempDen, lamden,1-prob); if (nsstktemp!= NSSTACKSHARED && m_id==0) { int ind = atomicAdd(&nsstktemp,5); sidtemp[ind] = id+bufferID; sidtemp[ind+1] = paID; sidtemp[ind+2] = M_id; sidtemp[ind+3] = L_id; sidtemp[ind+4] = 4; sftemp[ind] = vxtemp2.w; sftemp[ind+1] = tof;//s; sftemp[ind+2] = xtemp2.x; sftemp[ind+3] = xtemp2.y; sftemp[ind+4] = xtemp2.z; event.parn=id+bufferID; event.cryn=L_id; event.modn= M_id; event.pann=paID; event.siten=event.pann*moduleN*crystalN+event.modn*crystalN+event.cryn; event.eventid = eid; event.CP = cp; event.RP = rp; event.CC = 0; event.RC = rc; event.E = vxtemp2.w; event.t = tof; event.x = xtemp2.x; event.y = xtemp2.y; event.z = xtemp2.z; //ind=atomicAdd(counts_d,1); //events_d[ind]=event; adder(counts, events, event);//this is for digitizer } break; } if(counts[0]) { readout(counts,events,rdepth_d,rpolicy_d); int ind=atomicAdd(counts_d,counts[1]); for(int i=0; i<counts[1]; i++) events_d[ind+i]=events[i]; }//*/ } //id+=blockDim.x*gridDim.x; } __syncthreads(); if(id<NRAND) cuseed[id] = localState; __shared__ int istart; if(threadIdx.x==0) { //printf("nsstktemp1 = %d\n",nsstktemp); istart = atomicAdd(&nsstk, nsstktemp); //printf("istart = %d\n",istart); } __syncthreads(); // if(id==0) printf("total events=%d\ncurrent total hits=%d\n", counts_d[0],nsstk); for(int i = 0; i < 1+(nsstktemp)/blockDim.x; i++) { if(nsstktemp == 0) break; int ind = istart + i*blockDim.x + tid; if(ind < istart + nsstktemp && ind<NSSTACK) { sf[ind] = sftemp[i*blockDim.x + tid];//this is for hits events sid[ind] = sidtemp[i*blockDim.x + tid]; } } __syncthreads();//*/ } // crystal index and material type __device__ void crystalSearch(float3 xtemp2,float leny_S,float lenz_S,float MODy_S,float MODz_S,float Msy_S,float Msz_S,float LSOy_S,float LSOz_S,float sy_S,float sz_S,float dy_S, float dz_S, int *m_id, int *M_id, int *L_id) { float x=xtemp2.x; float y=xtemp2.y; float z=xtemp2.z; for(int tmp=0;tmp<Nsurface_d;tmp++) { if((surface_d[tmp*10+0]*x*x+surface_d[tmp*10+1]*y*y+surface_d[tmp*10+2]*z*z+surface_d[tmp*10+3]*x*y+surface_d[tmp*10+4]*x*z+surface_d[tmp*10+5]*y*z +surface_d[tmp*10+6]*x+surface_d[tmp*10+7]*y+surface_d[tmp*10+8]*z+surface_d[tmp*10+9])<0) { *m_id=1; return; } } y=leny_S/2+xtemp2.y;//*dy_S; z=lenz_S/2+xtemp2.z;//*dz_S; int M_id_y=floorf(y/(MODy_S+Msy_S))>0?int(y/(MODy_S+Msy_S)):0; int M_id_z=floorf(z/(MODz_S+Msz_S))>0?int(z/(MODz_S+Msz_S)):0; *M_id=M_id_z*moduleNy+M_id_y; y=y-M_id_y*(MODy_S+Msy_S); z=z-M_id_z*(MODz_S+Msz_S); if(y> MODy_S || z> MODz_S) { *m_id=1; return; } int L_id_y=floorf(y/(LSOy_S+sy_S))>0?int(y/(LSOy_S+sy_S)):0; int L_id_z=floorf(z/(LSOz_S+sz_S))>0?int(z/(LSOz_S+sz_S)):0; *L_id=L_id_z*crystalNy+L_id_y; y=y-L_id_y*(LSOy_S+sy_S); z=z-L_id_z*(LSOz_S+sz_S); if(y>LSOy_S || z> LSOz_S) { *m_id=1; return; } *m_id=0;//*/ } #endif
487175bdc0ba07282eb2b02b9e538eb7aa0a8438.cu
#ifndef __GPETKERNAL__ #define __GPETKERNAL__ #include "gPET.h" #include "gPET_kernals.h" __global__ void setupcuseed(int* iseed1) //Setup random seeds, used for random sampling in GPU code { int id = blockIdx.x*blockDim.x + threadIdx.x; // obtain current id on thread if( id < NRAND) { curand_init(iseed1[id], id, 0, &cuseed[id]); if(id<5) printf("first 5 cuseeds are %u \n",cuseed[id]); } } __device__ int4 getAbsVox(float3 xtemp) //Return the absolute vox index according to the coordinate { int4 temp; temp.z = (xtemp.z-Offsetz_gBrachy)*idz_gBrachy; temp.y = (xtemp.y-Offsety_gBrachy)*idy_gBrachy; temp.x = (xtemp.x-Offsetx_gBrachy)*idx_gBrachy; //The following give the boundry condition temp.w = (temp.x <= 0.0f || temp.x >= Unxvox || temp.y <= 0.0f || temp.y >= Unyvox || temp.z <= 0.0f || temp.z >= Unzvox)?-1 : 1; return temp; } __device__ float lamwck(float e) //Minimum Mean free path prepared to play the Woodcock trick for materials in phantom { float i = idlewk*(e-wcke0) + 0.5f; return tex1D(woock_tex, i); } __device__ float lamwckde(float e) //Minimum Mean free path prepared to play the Woodcock trick for materials in detector { float i = idlewk*(e-wcke0) + 0.5f; return tex1D(woockde_tex, i); } __device__ float itphip_G(int matid, float e) //Photon total inverse mean free path { float i = idleph*(e-elaph0) + 0.5; return tex1D(lamph_tex,matid * NLAPH + i); } __device__ float irylip(int matid, float e) //Inverse Rayleigh mean free path { float i = idlerl*(e - erayl0) + 0.5; return tex1D(rayle_tex,matid * NRAYL + i); } __device__ float icptip(int matid, float e) //Inverse Compton mean free path { float i = idlecp*(e - ecmpt0) + 0.5; return tex1D(compt_tex,matid * NCMPT + i); } __device__ void comsam(float energytemp, curandState *localState_pt, float *efrac, float *costhe, int matid) //this is the KN model crosssection from table /******************************************************************* c* Samples a Compton event following Klein-Nishina DCS * c* * c* Input: * c* energy -> photon energy in eV * c* Output: * c* efrac -> fraction of initial energy kept by 2nd photon * c* costhe -> cos(theta) of the 2nd photon * c* Comments: * c* -> inirng() must be called before 1st call * c******************************************************************/ { float indcp = curand_uniform(localState_pt)*idcpcm; float inde = energytemp*idecm; float temp = tex3D(s_tex,inde+0.5f, indcp+0.5f, matid+0.5f); if(temp > 1.0f) temp = 1.0f; if(temp < -1.0f) temp = -1.0f; *costhe = temp; *efrac = 1.0f/(1.0f + energytemp*IMC2*(1.0f-temp)); } __device__ void comsam(float energytemp, curandState *localState_pt, float *efrac, float *costhe) //this is the standard KN model which treats electron as free and no dopler effect /******************************************************************* c* Samples a Compton event following Klein-Nishina DCS * c* * c* Input: * c* energy -> photon energy in eV * c* Output: * c* efrac -> fraction of initial energy kept by 2nd photon * c* costhe -> cos(theta) of the 2nd photon * c* Comments: * c* -> inirng() must be called before 1st call * c******************************************************************/ { float e0,twoe,kmin2,loge,mess; e0 = energytemp*IMC2; twoe = 2.0*e0; kmin2 = 1.0/((1.0+twoe)*(1.0+twoe)); loge = __logf(1.0+twoe); for(;;) { if (curand_uniform(localState_pt)*(loge+twoe*(1.0+e0)*kmin2) < loge) { *efrac = expf(-curand_uniform(localState_pt)*loge); } else { *efrac = sqrtf(kmin2+curand_uniform(localState_pt)*(1.0-kmin2)); } mess = e0*e0*(*efrac)*(1.0+(*efrac)*(*efrac)); if (curand_uniform(localState_pt)*mess <= mess-(1.0-*efrac)*((1.0+twoe)*(*efrac)-1.0))break; } *costhe = 1.0-(1.0-*efrac)/((*efrac)*e0); } __device__ void rylsam(float energytemp, int matid, curandState *localState_pt, float *costhe) /******************************************************************* c* Samples a Rayleigh event following its DCS * c* * c* Input: * c* energy -> photon energy in eV * c* Output: * c* costhe -> cos(theta) of the 2nd photon * c* Comments: * c* -> inirng() must be called before 1st call * c******************************************************************/ { float indcp = curand_uniform(localState_pt)*idcprl; float inde = energytemp*iderl; float temp = tex3D(f_tex,inde+0.5f, indcp+0.5f, matid+0.5f); if(temp > 1.0f) temp = 1.0f; if(temp < -1.0f) temp = -1.0f; *costhe = temp; } __device__ float getDistance(float3 coords, float4 direcs) //special case for sphere, need modification dor other cases /******************************************************************* c* get the distance to the recording plane * c* distance-> nearest distance to current body boundaries * c******************************************************************/ { coords.x=coords.x-recordsphere_d[0]; coords.y=coords.y-recordsphere_d[1]; coords.z=coords.z-recordsphere_d[2]; float t; float a= direcs.x * direcs.x + direcs.y * direcs.y + direcs.z * direcs.z; float b = 2.0f*(direcs.x * coords.x + direcs.y * coords.y + direcs.z * coords.z); float c = (coords.x * coords.x + coords.y * coords.y + coords.z * coords.z)-(recordsphere_d[3])*(recordsphere_d[3]); if(b*b-4*a*c<0) return 0; if(c<0) t=(-b+sqrtf(b*b-4*a*c))/(2*a); else if(b<0) t=(-b-sqrtf(b*b-4*a*c))/(2*a); else t=(-b+sqrtf(b*b-4*a*c))/(2*a); return t;//direction normalized } __device__ void rotate(float *u, float *v, float *w, float costh, float phi) /******************************************************************* c* Rotates a vector; the rotation is specified by giving * c* the polar and azimuthal angles in the "self-frame", as * c* determined by the vector to be rotated. * c* * c* Input: * c* (u,v,w) -> input vector (=d) in the lab. frame * c* costh -> cos(theta), angle between d before and after turn * c* phi -> azimuthal angle (rad) turned by d in its self-frame * c* Output: * c* (u,v,w) -> rotated vector components in the lab. frame * c* Comments: * c* -> (u,v,w) should have norm=1 on input; if not, it is * c* renormalized on output, provided norm>0. * c* -> The algorithm is based on considering the turned vector * c* d' expressed in the self-frame S', * c* d' = (sin(th)cos(ph), sin(th)sin(ph), cos(th)) * c* and then apply a change of frame from S' to the lab * c* frame. S' is defined as having its z' axis coincident * c* with d, its y' axis perpendicular to z and z' and its * c* x' axis equal to y'*z'. The matrix of the change is then* c* / uv/rho -v/rho u \ * c* S ->lab: | vw/rho u/rho v | , rho=(u^2+v^2)^0.5* c* \ -rho 0 w / * c* -> When rho=0 (w=1 or -1) z and z' are parallel and the y' * c* axis cannot be defined in this way. Instead y' is set to* c* y and therefore either x'=x (if w=1) or x'=-x (w=-1) * c******************************************************************/ { float rho2,sinphi,cosphi,sthrho,urho,vrho,sinth,norm; rho2 = (*u)*(*u)+(*v)*(*v); norm = rho2 + (*w)*(*w); // Check normalization: if (fabs(norm-1.0) > SZERO) { // Renormalize: norm = 1.0/__fsqrt_rn(norm); *u = (*u)*norm; *v = (*v)*norm; *w = (*w)*norm; } sinphi = __sinf(phi); cosphi = __cosf(phi); // Case z' not= z: float temp = costh*costh; if (rho2 > ZERO) { if(temp < 1.0f) sthrho = __fsqrt_rn((1.00-temp)/rho2); else sthrho = 0.0f; urho = (*u)*sthrho; vrho = (*v)*sthrho; *u = (*u)*costh - vrho*sinphi + (*w)*urho*cosphi; *v = (*v)*costh + urho*sinphi + (*w)*vrho*cosphi; *w = (*w)*costh - rho2*sthrho*cosphi; } else // 2 especial cases when z'=z or z'=-z: { if(temp < 1.0f) sinth = __fsqrt_rn(1.00-temp); else sinth = 0.0f; *v = sinth*sinphi; if (*w > 0.0) { *u = sinth*cosphi; *w = costh; } else { *u = -sinth*cosphi; *w = -costh; } } } __global__ void photon(const int nactive) /******************************************************************* c* Transports a photon until it either escapes from the * c* phantom or its energy drops below EabsPhoton * c******************************************************************/ { int id = blockIdx.x*blockDim.x + threadIdx.x; // obtain current id on thread const double spe=29979.2458; //cm/us curandState localState = cuseed[id%NRAND]; while( id < nactive) { float3 xtemp = x_gBrachy[id]; float4 vxtemp = vx_gBrachy[id]; double tof = d_time[id]; int rp = 0; int cp = 0; //if(id <5 ) printf("x=%f,y=%f,z=%f,t = %f, vx=%f,vy=%f,vz=%f,e=%f\n",xtemp.x,xtemp.y,xtemp.z,tof,vxtemp.x,vxtemp.y,vxtemp.z,vxtemp.w); if(vxtemp.w<0||tof<=0) {id+=blockDim.x*gridDim.x;continue;} // Loop until it either escapes or is absorbed: while(1) { // Get lambda from the minimum lambda at the current energy: float lammin = lamwck(vxtemp.w); float s = -lammin*__logf(curand_uniform(&localState)); xtemp.x += s*vxtemp.x; xtemp.y += s*vxtemp.y; xtemp.z += s*vxtemp.z; //xtemp.w += s/spe; tof += s/spe; int4 absvoxtemp = getAbsVox(xtemp); //if(id <5 ) printf("id %d absvoxtem.w=%d, x=%f,y=%f,z=%f,vx=%f,vy=%f,vz=%f,s=%f\n",id, absvoxtemp.w, xtemp.x,xtemp.y,xtemp.z,vxtemp.x,vxtemp.y,vxtemp.z,s); if (absvoxtemp.w == -1)//means the particle is outside the phantom { #if RECORDPSF==-1 float r=getDistance(xtemp,vxtemp); xtemp.x +=r*vxtemp.x; xtemp.y +=r*vxtemp.y; xtemp.z +=r*vxtemp.z; tof += r/spe; #endif break; } // get density float voxden = tex3D(dens_tex, absvoxtemp.x, absvoxtemp.y, absvoxtemp.z); // get mat id int voxmatid = tex3D(mat_tex, absvoxtemp.x, absvoxtemp.y, absvoxtemp.z); // Apply Woodcock trick: float lamden = lammin*voxden; float prob = 1.0-lamden*itphip_G(voxmatid, vxtemp.w); float randno = curand_uniform(&localState); // No real event; continue jumping: if (randno < prob) continue; // Compton: prob += lamden*icptip(voxmatid, vxtemp.w); if (randno < prob) { cp++; float efrac, costhe; comsam(vxtemp.w, &localState, &efrac, &costhe, voxmatid); // comsam(vxtemp.w, &localState, &efrac, &costhe); float phi = TWOPI*curand_uniform(&localState); vxtemp.w *= efrac; if (vxtemp.w < eabsph) break; rotate(&vxtemp.x,&vxtemp.y,&vxtemp.z,costhe,phi); continue; } // Rayleigh: prob += lamden*irylip(voxmatid, vxtemp.w); if (randno < prob) { rp++; float costhe; rylsam(vxtemp.w, voxmatid, &localState, &costhe); float phi = TWOPI*curand_uniform(&localState); rotate(&vxtemp.x,&vxtemp.y,&vxtemp.z,costhe,phi); continue; } // Photoelectric: tof = -0.5f;// t<0 means dead in phantom, won't be simulated any more break; } x_gBrachy[id] = xtemp; d_time[id] = tof; vx_gBrachy[id] = vxtemp; CP[id] = cp; RP[id] = rp; id+=blockDim.x*gridDim.x; } cuseed[id%NRAND] = localState; } __device__ float3 setPositronRange(float3 xtemp, float4 vxtemp, curandState* plocalState, int usedirection) /******************************************************************* c* Finds positron range according to its energy * c* * c* Input: * c* xtemp -> current position, vxtemp -> vx vy vz energy, * c* usedirection -> 0 sample positron direction or 1 predefined * c* Output: * c* position + range * c******************************************************************/ { int id = blockDim.x*blockIdx.x+threadIdx.x; float3 distance; float ekin = vxtemp.w/1e6; //transfer to MeV //float Zeff = 7.2222; //1/9*1+8/9*8; //float Aeff = 13; //Zeff/(1/9*1/1+8/9*8/16) float b1 = 5.44040782;//4.569*Aeff/powf(Zeff,1.209); float b2 = 0.369516529; // 1/(2.873-0.02309*Zeff); float Rex = 0.1*b1*ekin*ekin/(b2+ekin); float sigma = Rex/(2*1.0f); //put material density here distance.x = sigma*curand_normal(plocalState); distance.y = sigma*curand_normal(plocalState); distance.z = sigma*curand_normal(plocalState); float r=sqrtf(distance.x*distance.x+distance.y*distance.y+distance.z*distance.z); if(usedirection) { float tmp = sqrtf(vxtemp.x*vxtemp.x+vxtemp.y*vxtemp.y+vxtemp.z*vxtemp.z); distance.x = r*vxtemp.x/tmp;//reassign direction distance.y = r*vxtemp.y/tmp; distance.z = r*vxtemp.z/tmp; } float s = 0, step =100; int4 absvoxtemp = getAbsVox(xtemp); while(s<r) { step = 1000; if(absvoxtemp.w>0) { b1 = (Offsetx_gBrachy+(absvoxtemp.x+(distance.x>0))*dx_gBrachy-xtemp.x)/distance.x;//remaining voxel length along x direction if(step > b1) {step = b1;absvoxtemp.w=1;} b1 = (Offsety_gBrachy+(absvoxtemp.y+(distance.y>0))*dy_gBrachy-xtemp.y)/distance.y; if(step > b1) {step = b1;absvoxtemp.w=2;} b1 = (Offsetz_gBrachy+(absvoxtemp.z+(distance.z>0))*dz_gBrachy-xtemp.z)/distance.z; if(step > b1) {step = b1;absvoxtemp.w=3;} if(absvoxtemp.w == 1) absvoxtemp.x += (distance.x>0)?1:-1; else if(absvoxtemp.w == 2) absvoxtemp.y += (distance.y>0)?1:-1; else absvoxtemp.z += (distance.z>0)?1:-1; b2 = tex3D(dens_tex, absvoxtemp.x, absvoxtemp.y, absvoxtemp.z); step = step*r; s += step*b2; if(s > r) step += (r-s)/b2; } else { step += (r-s)/0.0012905; s = r + 100;//make s > r } xtemp.x += step*distance.x/r; xtemp.y += step*distance.y/r; xtemp.z += step*distance.z/r; if(xtemp.x < Offsetx_gBrachy || xtemp.x > (Offsetx_gBrachy+Unxvox*dx_gBrachy)) absvoxtemp.w = -1; if(xtemp.y < Offsety_gBrachy || xtemp.y > (Offsety_gBrachy+Unyvox*dy_gBrachy)) absvoxtemp.w = -1; if(xtemp.z < Offsetz_gBrachy || xtemp.z > (Offsetz_gBrachy+Unzvox*dz_gBrachy)) absvoxtemp.w = -1; } return xtemp; } __device__ float sampleEkPositron(int type, float* d_coef,curandState* plocalState) /******************************************************************* c* Finds positron energy according to fitted parameters * c* * c* Input: * c* type -> source type, d_coef -> fitted coefficients * c* Output: * c* kinetic energy in eV * c* Comments: * c* coefficients can be fitted from calculated spectrum * c* refer to Levin et al PMB 44, 781-799 * c******************************************************************/ { float u=100, E=0, sumE=0; while(u> sumE) { E=curand_uniform(plocalState)*(d_coef[8*type]-0.511)+0.511; u=d_coef[8*type+1]*curand_uniform(plocalState); sumE=0; for(int i=0;i<6;i++) sumE+=d_coef[8*type+2+i]*powf(E,5-i); } return (E-0.511)*1e6;//consistent in unit eV } __device__ float3 getPositionFromShape(int index, int shapeindex, float* shapecoeff, curandState* plocalState) /******************************************************************* c* Sample position uniformly inside a volume * c* * c* Input: * c* type -> source type, shapeindex -> shape type * c* Output: * c* position * c******************************************************************/ { float3 position; //int id = blockDim.x*blockIdx.x+threadIdx.x; if(shapeindex<0 || shapeindex>2) shapeindex=0; if(shapeindex==0)//box { position.x=shapecoeff[6*index]+shapecoeff[6*index+3]*(-1+2*curand_uniform(plocalState))*0.5; position.y=shapecoeff[6*index+1]+shapecoeff[6*index+4]*(-1+2*curand_uniform(plocalState))*0.5; position.z=shapecoeff[6*index+2]+shapecoeff[6*index+5]*(-1+2*curand_uniform(plocalState))*0.5; } else if(shapeindex==1)//cylinder { float phi=TWOPI*curand_uniform(plocalState); float r= shapecoeff[6*index+3]*sqrtf(curand_uniform(plocalState)); position.x=shapecoeff[6*index]+r*cosf(phi); position.y=shapecoeff[6*index+1]+r*sinf(phi); position.z=shapecoeff[6*index+2]+shapecoeff[6*index+4]*(-1+2*curand_uniform(plocalState))*0.5; } else if(shapeindex==2)//sphere { float phi=TWOPI*curand_uniform(plocalState); float costheta=-1+2*curand_uniform(plocalState); float r= shapecoeff[6*index+3]*cbrtf(curand_uniform(plocalState)); position.x=shapecoeff[6*index]+r*sqrtf(1-costheta*costheta)*cosf(phi); position.y=shapecoeff[6*index+1]+r*sqrtf(1-costheta*costheta)*sinf(phi); position.z=shapecoeff[6*index+2]+r*costheta; } return position; } __global__ void setPosition(int nsource, unsigned int totalatom, float tref, float t, unsigned int* d_natom, unsigned int* d_sumpartial, int* d_type, int* d_shape, float* d_shapecoeff, float* d_halftime, float* d_decayRatio, float* d_coef, int useprange) /******************************************************************* c* Sample photon information from a positron source * c* for a given time period * c******************************************************************/ { int id=blockIdx.x*blockDim.x + threadIdx.x; curandState localState = cuseed[id%NRAND]; int sourceindex=0, istart, iend, imid; float randno, phi; double ptime=0, tmp=0; float3 xtemp; float4 vxtemp; while(id<totalatom) { #if BISEARCH == 0 for(sourceindex=0;sourceindex<nsource;sourceindex++) { if(id<d_sumpartial[sourceindex]) break; } #else if(id < d_sumpartial[0]) sourceindex = 0; else { istart = 0; iend = nsource-1; while(iend - istart >1) { imid = (iend+istart)/2; if(id<d_sumpartial[imid]) iend = imid; else istart = imid; } sourceindex = iend; } #endif tmp = double(-d_halftime[d_type[sourceindex]]*1.442695); ptime = tmp*log(curand_uniform_double(&localState)); if(ptime<t) { atomicSub(&(d_natom[sourceindex]),1); randno=curand_uniform(&localState); if(randno < d_decayRatio[d_type[sourceindex]]) { int ind = atomicAdd(&d_curemitted,1); xtemp = getPositionFromShape(sourceindex,d_shape[sourceindex],d_shapecoeff, &localState); if(useprange == 1) { vxtemp.w = sampleEkPositron(d_type[sourceindex], d_coef, &localState); xtemp = setPositronRange(xtemp, vxtemp,&localState,0); } randno=-1+2*curand_uniform(&localState); phi = TWOPI*curand_uniform(&localState); vxtemp.x = sqrtf(1-randno*randno)*cosf(phi); vxtemp.y = sqrtf(1-randno*randno)*sinf(phi); vxtemp.z = randno; //printf("%d vx vy vz are %f %f %f\n", id, vxtemp.x,vxtemp.y,vxtemp.z); x_gBrachy[(2*ind)%NPART]=xtemp; //avoid boundary excess x_gBrachy[(2*ind+1)%NPART]=xtemp; d_time[(2*ind)%NPART]=double(tref+ptime)*1e6; d_time[(2*ind+1)%NPART]=double(tref+ptime)*1e6; d_eventid[(2*ind)%NPART]=ind; d_eventid[(2*ind+1)%NPART]=ind; phi = TWOPI*curand_uniform(&localState); randno=curand_normal(&localState)*NonAngle_d; vxtemp.w=MC2+randno*MC2/2.0;//Noncollinearity vx_gBrachy[(2*ind)%NPART]=vxtemp; rotate(&(vxtemp.x), &(vxtemp.y), &(vxtemp.z),-cosf(randno), phi); vxtemp.w=MC2-randno*MC2/2.0; vx_gBrachy[(2*ind+1)%NPART]=vxtemp; } } id+=blockDim.x*gridDim.x; } cuseed[id%NRAND]=localState; } __global__ void setPositionForPhoton(int total,int curpar, int useprange) /******************************************************************* c* Sample photon information from positron PSF * c******************************************************************/ { int id=blockIdx.x*blockDim.x + threadIdx.x; curandState localState = cuseed[id%NRAND]; float randno, phi; float3 xtemp, tmptmp; float4 vxtemp; while(id<total) { xtemp = x_gBrachy[id]; vxtemp = vx_gBrachy[id]; if(useprange == 1) { xtemp = setPositronRange(xtemp, vxtemp, &localState, 1); } randno = -1+2*curand_uniform(&localState); phi = TWOPI*curand_uniform(&localState); vxtemp.x = sqrtf(1-randno*randno)*cosf(phi); vxtemp.y = sqrtf(1-randno*randno)*sinf(phi); vxtemp.z = randno; x_gBrachy[id]=xtemp; //avoid boundary excess x_gBrachy[(id+total)%NPART]=xtemp; d_eventid[id]=curpar+id; d_eventid[(id+total)%NPART]=curpar+id; phi = TWOPI*curand_uniform(&localState); randno=curand_normal(&localState)*NonAngle_d; vxtemp.w=MC2+randno*MC2/2.0; vx_gBrachy[id]=vxtemp; rotate(&(vxtemp.x), &(vxtemp.y), &(vxtemp.z),-cosf(randno), phi); vxtemp.w=MC2-randno*MC2/2.0; vx_gBrachy[(id+total)%NPART]=vxtemp; id+=blockDim.x*gridDim.x; } cuseed[id%NRAND]=localState; } //digitizer void __global__ setSitenum(int total, Event* events_d,int depth) /******************************************************************* c* set site index according to depth * c******************************************************************/ { int id=blockIdx.x*blockDim.x + threadIdx.x; while(id<total) { switch(depth) { case 0: { events_d[id].siten=0; break; } case 1: { events_d[id].siten=events_d[id].pann; break; } case 2: { events_d[id].siten=events_d[id].pann*moduleN+events_d[id].modn; break; } case 3: { events_d[id].siten=events_d[id].pann*moduleN*crystalN+events_d[id].modn*crystalN+events_d[id].cryn; break; } } id+=blockDim.x*gridDim.x; } } void __global__ energywindow(int* counts, Event* events,int total, float thresholder, float upholder) //this is for the energy window part in digitizer { int id=blockIdx.x*blockDim.x + threadIdx.x; int num=0; while(id<total) { if(events[id].E<thresholder || events[id].E>upholder) { events[id].t=MAXT; num++; } id+=blockDim.x*gridDim.x; } if(num) atomicSub(counts,num); } void __global__ deadtime(int* counts,Event* events,int total, float interval, int deadtype) { //this is the deadtime part in digitizer //deadtype 0 for paralysable, 1 for non int id=blockIdx.x*blockDim.x + threadIdx.x; int start,current,i,k; float tdead; while(id<total) { start=id; if(start==0||events[start].siten!=events[start-1].siten||events[start].t>(events[start-1].t+interval))//find the start index { current=start; i=current+1; k=0; tdead=events[start].t; while(i<total) { while(events[i].siten==events[current].siten && events[i].t<(tdead+interval)) { //events[current].E+=events[i].E; if(!deadtype) { tdead=events[i].t; //paralyzable accounts for pile-up effect //events[current].t=events[i].t; } events[i].t=MAXT; i++; k++; if(i==total) break; } if(i==total) break; if(events[i].siten!=events[i-1].siten||events[i].t>(events[i-1].t+interval)) break; current=i; tdead=events[current].t; i++; } atomicSub(counts,k); } id+=blockDim.x*gridDim.x; } } void __global__ addnoise(int* counts, Event* events_d, float lambda, float Emean, float sigma, float interval) { //this is the noise part for digitizer int id = blockIdx.x*blockDim.x + threadIdx.x; Event events[6]; float t=id*interval;//0; curandState localstate = cuseed[id%NRAND]; int i=0, ind=0; while(t<(id+1)*interval) { t+=-__logf(curand_uniform(&localstate))*lambda; if(t<(id+1)*interval) { events[i].t=t;//+id*interval; events[i].E=Emean+sigma*curand_normal(&localstate);//2.355; events[i].x=curand_uniform(&localstate);//need to be implemented to be matched to global coordinates events[i].y=curand_uniform(&localstate); events[i].z=curand_uniform(&localstate); events[i].parn=-1; events[i].pann=int(dev_totalPanels*curand_uniform(&localstate)); events[i].modn=int(moduleN*curand_uniform(&localstate)); events[i].cryn=int(crystalN*curand_uniform(&localstate)); events[i].siten=events[i].pann*moduleN*crystalN+events[i].modn*crystalN+events[i].cryn; i=(i+1)%6; if(!i) { ind=atomicAdd(counts,6); for(int j=0; j<6; j++) events_d[ind+j]=events[j]; } } } cuseed[id]=localstate; ind=atomicAdd(counts,i); for(int j=0; j<i; j++) events_d[ind+j]=events[j]; } int __device__ adder(int* counts_d, Event* events_d, Event event) { //this is the adder part in digitizer for(int i=0; i < counts_d[0]; i++) { if(event.siten == events_d[i].siten) { events_d[i].x = (events_d[i].x*events_d[i].E + event.x*event.E)/(events_d[i].E + event.E); events_d[i].y = (events_d[i].y*events_d[i].E + event.y*event.E)/(events_d[i].E + event.E); events_d[i].z = (events_d[i].z*events_d[i].E + event.z*event.E)/(events_d[i].E + event.E); events_d[i].E = (events_d[i].E + event.E); return 1; } } //no recorded event inside the the same crystal events_d[counts_d[0]]=event; counts_d[0]++; return 1; } int __device__ readout(int* counts_d, Event* events_d,int depth, int policy) { //this is for the readout part in digitizer //depth means the readout level. 0,1,2,3 represents world,panel,module,cry //policy 0,1 for winnertakeall and energy centroid if(depth==3) return 1; if(policy==1) depth = 2; //the readout part switch(depth) { case 0: { for(int i=0; i<counts_d[0]; i++) events_d[i].siten=0; break; } case 1: { for(int i=0; i<counts_d[0]; i++) events_d[i].siten=events_d[i].pann; break; } case 2: { for(int i=0; i<counts_d[0]; i++) events_d[i].siten=events_d[i].pann*moduleN+events_d[i].modn; break; } } int ind=0; for(int i=0; i<counts_d[0]; i++) { Event event0 = events_d[i]; if(event0.t>MAXT*0.1) continue; for(int j=i+1; j<counts_d[0]; j++) { Event event = events_d[j]; if((event.parn==event0.parn)&&(event.siten == event0.siten)) { if(policy==1) { event0.x = (event0.x*event0.E + event.x*event.E)/(event0.E + event.E); event0.y = (event0.y*event0.E + event.y*event.E)/(event0.E + event.E); event0.z = (event0.z*event0.E + event.z*event.E)/(event0.E + event.E); event0.E = (event0.E + event.E); events_d[j].t=MAXT; continue; } event0=(event0.E>event.E)?event0:event; events_d[j].t=MAXT; } } events_d[ind]=event0; ind++; } counts_d[1]=ind; return 1; } void __global__ blur(int total, Event* events, int Eblurpolicy, float Eref, float Rref, float slope, float Spaceblur) { //this is the energy blurring part in digitizer int i = blockIdx.x*blockDim.x + threadIdx.x; float R=0; curandState localstate=cuseed[i%NRAND]; while(i<total) { if(Eblurpolicy==0) R = sqrt(Eref/events[i].E)*Rref; if(Eblurpolicy==1) R = Rref+slope*(events[i].E-Eref)/1e6; //slope in 1/MeV if(R<0) R=0; events[i].E += curand_normal(&localstate)*R*events[i].E/2.35482; //other distribution of energy blurring need to be implemented if(Spaceblur>0) { events[i].x+=Spaceblur*curand_normal(&localstate); events[i].y+=Spaceblur*curand_normal(&localstate); events[i].z+=Spaceblur*curand_normal(&localstate); } i+=blockDim.x*gridDim.x; } cuseed[i%NRAND]=localstate; } __global__ void photonde(Event* events_d, int* counts_d, int nactive, int bufferID, float* dens, int *mat, int *panelID, float *lenx, float *leny, float *lenz, float *MODx, float *MODy, float *MODz, float *Msx, float *Msy, float *Msz, float *LSOx, float *LSOy, float *LSOz, float *sx, float *sy, float *sz, float *ox, float *oy, float *oz, float *dx, float *dy, float *dz, float *UXx, float *UXy, float *UXz, float *UYx, float *UYy, float *UYz,float *UZx, float *UZy, float *UZz) /******************************************************************* c* Transports a photon until it either escapes from the * c* detector or its energy drops below EabsPhoton * c* * c******************************************************************/ { int id = blockIdx.x*blockDim.x + threadIdx.x; int tid=threadIdx.x; Event events[4]; int counts[2]= {0,4}; Event event; int rc = 0; int cc = 0; float tempDen=0.0f; int tempMat=0; __shared__ int nsstktemp; if(tid==0) { nsstktemp = 0; } __syncthreads(); __shared__ float sftemp[NSSTACKSHARED]; __shared__ int sidtemp[NSSTACKSHARED]; extern __shared__ float s[]; float *dens_S = s; int *mat_S = (int*)&dens_S[2]; int *panelID_S = (int*)&mat_S[2]; float *lenx_S = (float*)&panelID_S[dev_totalPanels]; float *leny_S = (float*)&lenx_S[dev_totalPanels]; float *lenz_S = (float*)&leny_S[dev_totalPanels]; float *MODx_S = (float*)&lenz_S[dev_totalPanels]; float *MODy_S = (float*)&MODx_S[dev_totalPanels]; float *MODz_S = (float*)&MODy_S[dev_totalPanels]; float *Msx_S = (float*)&MODz_S[dev_totalPanels]; float *Msy_S = (float*)&Msx_S[dev_totalPanels]; float *Msz_S = (float*)&Msy_S[dev_totalPanels]; float *LSOx_S = (float*)&Msz_S[dev_totalPanels]; float *LSOy_S = (float*)&LSOx_S[dev_totalPanels]; float *LSOz_S = (float*)&LSOy_S[dev_totalPanels]; float *sx_S = (float*)&LSOz_S[dev_totalPanels]; float *sy_S = (float*)&sx_S[dev_totalPanels]; float *sz_S = (float*)&sy_S[dev_totalPanels]; float *ox_S = (float*)&sz_S[dev_totalPanels]; float *oy_S = (float*)&ox_S[dev_totalPanels]; float *oz_S = (float*)&oy_S[dev_totalPanels]; float *dx_S = (float*)&oz_S[dev_totalPanels]; float *dy_S = (float*)&dx_S[dev_totalPanels]; float *dz_S = (float*)&dy_S[dev_totalPanels]; float *UXx_S = (float*)&dz_S[dev_totalPanels]; float *UXy_S = (float*)&UXx_S[dev_totalPanels]; float *UXz_S = (float*)&UXy_S[dev_totalPanels]; float *UYx_S = (float*)&UXz_S[dev_totalPanels]; float *UYy_S = (float*)&UYx_S[dev_totalPanels]; float *UYz_S = (float*)&UYy_S[dev_totalPanels]; float *UZx_S = (float*)&UYz_S[dev_totalPanels]; float *UZy_S = (float*)&UZx_S[dev_totalPanels]; float *UZz_S = (float*)&UZy_S[dev_totalPanels]; if(tid==0) { for (int i=0; i<2; i++) { mat_S[i]=mat[i]; dens_S[i]=dens[i]; } for(int i=0; i<dev_totalPanels; i++) { panelID_S[i]=panelID[i]; lenx_S[i]=lenx[i]; leny_S[i]=leny[i]; lenz_S[i]=lenz[i]; MODx_S[i]=MODx[i]; MODy_S[i]=MODy[i]; MODz_S[i]=MODz[i]; Msx_S[i]=Msx[i]; Msy_S[i]=Msy[i]; Msz_S[i]=Msz[i]; LSOx_S[i]=LSOx[i]; LSOy_S[i]=LSOy[i]; LSOz_S[i]=LSOz[i]; sx_S[i]=sx[i]; sy_S[i]=sy[i]; sz_S[i]=sz[i]; ox_S[i]=ox[i]; oy_S[i]=oy[i]; oz_S[i]=oz[i]; dx_S[i]=dx[i]; dy_S[i]=dy[i]; dz_S[i]=dz[i]; UXx_S[i]=UXx[i]; UXy_S[i]=UXy[i]; UXz_S[i]=UXz[i]; UYx_S[i]=UYx[i]; UYy_S[i]=UYy[i]; UYz_S[i]=UYz[i]; UZx_S[i]=UZx[i]; UZy_S[i]=UZy[i]; UZz_S[i]=UZz[i]; }//*/ } __syncthreads(); // obtain current id on thread curandState localState = cuseed[id%NRAND]; if( id < nactive ) { if(d_time[id]>0) { float3 xtemp = x_gBrachy[id]; float4 vxtemp = vx_gBrachy[id]; double tof = d_time[id]; int eid = d_eventid[id]; int rp = RP[id]; int cp = CP[id]; // change global coordinates to local coordinates int paID=-1; float3 xtemp2; float4 vxtemp2; //get the panelid crystal id that the particle enters??? for (int i=0; i<dev_totalPanels; i++) { //new coordinates float tempx=(xtemp.x-ox_S[i])*UXx_S[i]+(xtemp.y-oy_S[i])*UXy_S[i]+(xtemp.z-oz_S[i])*UXz_S[i]; float tempy=(xtemp.x-ox_S[i])*UYx_S[i]+(xtemp.y-oy_S[i])*UYy_S[i]+(xtemp.z-oz_S[i])*UYz_S[i]; float tempz=(xtemp.x-ox_S[i])*UZx_S[i]+(xtemp.y-oy_S[i])*UZy_S[i]+(xtemp.z-oz_S[i])*UZz_S[i]; float tempvx=vxtemp.x*UXx[i]+vxtemp.y*UXy[i]+vxtemp.z*UXz[i];//component along different directions float tempvy=vxtemp.x*UYx[i]+vxtemp.y*UYy[i]+vxtemp.z*UYz[i]; float tempvz=vxtemp.x*UZx[i]+vxtemp.y*UZy[i]+vxtemp.z*UZz[i]; float tempx2=0.0f; if(tempvx*dx_S[i]>=0) { float tempy2=tempy-tempx/tempvx*tempvy; float tempz2=tempz-tempx/tempvx*tempvz; if(abs(tempy2)<leny_S[i]/2 & abs(tempz2)<lenz_S[i]/2) { xtemp2.x=tempx2; xtemp2.y=tempy2; xtemp2.z=tempz2; //xtemp2.w=xtemp.w; tof += -tempx/(29979.2458*tempvx); vxtemp2.x=tempvx; vxtemp2.y=tempvy; vxtemp2.z=tempvz; vxtemp2.w=vxtemp.w; paID=panelID_S[i]; break; } } float tempy2=tempy-tempx/tempvx*tempvy; float tempz2=tempz-tempx/tempvx*tempvz; xtemp2.x=tempx2; xtemp2.y=tempy2; xtemp2.z=tempz2; //xtemp2.w=xtemp.w; vxtemp2.x=tempvx; vxtemp2.y=tempvy; vxtemp2.z=tempvz; vxtemp2.w=vxtemp.w; paID=-1; } // Loop until it either escapes or is absorbed: float lammin,s; for(;;) { if (paID==-1) break; // Get lambda from the minimum lambda at the current energy: lammin = lamwckde(vxtemp2.w); //0.1;//make sure use the one corresponding to detector s = -lammin*__logf(curand_uniform(&localState)); // Get the coordinates of the photon after passing a free length xtemp2.x += s*vxtemp2.x; xtemp2.y += s*vxtemp2.y; xtemp2.z += s*vxtemp2.z; tof += s/29979.2458; // if out of panel if (paID==-1|abs(xtemp2.y)>leny_S[paID]*0.5| abs(xtemp2.z)>lenz_S[paID]*0.5 |(xtemp2.x*dx_S[paID])<0 |(xtemp2.x*dx_S[paID])>lenx_S[paID]) { break; } int m_id=-1; //material id int M_id=-1; // module id int L_id=-1; // LSO id crystalSearch(xtemp2,leny_S[paID],lenz_S[paID],MODy_S[paID],MODz_S[paID],Msy_S[paID],Msz_S[paID], LSOy_S[paID],LSOz_S[paID],sy_S[paID],sz_S[paID],dy_S[paID], dz_S[paID], &m_id, &M_id, &L_id); tempDen = dens_S[m_id]; tempMat = mat_S[m_id]; // Apply Woodcock trick: float lamden = lammin*tempDen; float prob = 1.0-lamden*itphip_G(tempMat, vxtemp2.w); if(prob<0) prob=0; float randno = curand_uniform(&localState); // Compton: // No real event; continue jumping: if (randno < prob) continue; prob += lamden*icptip(tempMat, vxtemp2.w); if (randno < prob) { float efrac, costhe; comsam(vxtemp2.w, &localState, &efrac, &costhe);//, tempMat); float de = vxtemp2.w * (1.0f-efrac); float phi = TWOPI*curand_uniform(&localState); //record events if (nsstktemp!= NSSTACKSHARED && m_id==0)//only events inside crystal can be recorded { int ind = atomicAdd(&nsstktemp,5); sidtemp[ind] = id+bufferID; sidtemp[ind+1] = paID; sidtemp[ind+2] = M_id; sidtemp[ind+3] = L_id; sidtemp[ind+4] = 1; event.parn= id+bufferID; event.cryn=L_id; event.modn=M_id; event.pann=paID; event.siten=event.pann*moduleN*crystalN+event.modn*crystalN+event.cryn;//maybe should use event.pann-1 or event.cryn-1 to make sure siten start from 0 event.eventid = eid; event.CP = cp; event.RP = rp; event.CC = 0; event.RC = rc; sftemp[ind] = de;//s; sftemp[ind+1] = tof;//s;s;// sftemp[ind+2] = xtemp2.x; sftemp[ind+3] = xtemp2.y; sftemp[ind+4] = xtemp2.z; event.E = de; event.t = tof; event.x = xtemp2.x; event.y = xtemp2.y; event.z = xtemp2.z; //ind=atomicAdd(counts_d,1); //events_d[ind]=event; adder(counts,events,event); } vxtemp2.w -= de; if (vxtemp2.w < eabsph) { if (nsstktemp!= NSSTACKSHARED && m_id==0) { int ind = atomicAdd(&nsstktemp,5); sidtemp[ind] = id+bufferID; sidtemp[ind+1] = paID; sidtemp[ind+2] = M_id; sidtemp[ind+3] = L_id; sidtemp[ind+4] = 2; event.parn=id+bufferID; event.cryn=L_id; event.modn= M_id; event.pann=paID; event.siten=event.pann*moduleN*crystalN+event.modn*crystalN+event.cryn; event.eventid = eid; event.CP = cp; event.RP = rp; event.CC = 0; event.RC = rc; sftemp[ind] = vxtemp2.w;//s; sftemp[ind+1] = tof;//s;s;// sftemp[ind+2] = xtemp2.x; sftemp[ind+3] = xtemp2.y; sftemp[ind+4] = xtemp2.z; event.E = vxtemp2.w; event.t = tof; event.x =xtemp2.x; event.y = xtemp2.y; event.z = xtemp2.z; //ind=atomicAdd(counts_d,1); //events_d[ind]=event; adder(counts,events,event); } break; } rotate(&vxtemp2.x,&vxtemp2.y,&vxtemp2.z,costhe,phi); continue; } // Rayleigh: prob += lamden*irylip(tempMat, vxtemp2.w); if (randno < prob) { rc++; float costhe; rylsam(vxtemp2.w, tempMat, &localState, &costhe); float phi = TWOPI*curand_uniform(&localState); #if RECORDRayleigh==1 if (nsstktemp!= NSSTACKSHARED && m_id==0) { int ind = atomicAdd(&nsstktemp,5); sidtemp[ind] = id+bufferID; sidtemp[ind+1] = paID; sidtemp[ind+2] = M_id; sidtemp[ind+3] = L_id; sidtemp[ind+4] = 3; sftemp[ind] = 0.0f;//s; sftemp[ind+1] = tof;//s;s;// sftemp[ind+2] = xtemp2.x; sftemp[ind+3] = xtemp2.y; sftemp[ind+4] = xtemp2.z; } #endif rotate(&vxtemp2.x,&vxtemp2.y,&vxtemp2.z,costhe,phi); continue; } // Photoelectric: //if(id <1) printf("photo den lamda prob are %f %f %f\n", tempDen, lamden,1-prob); if (nsstktemp!= NSSTACKSHARED && m_id==0) { int ind = atomicAdd(&nsstktemp,5); sidtemp[ind] = id+bufferID; sidtemp[ind+1] = paID; sidtemp[ind+2] = M_id; sidtemp[ind+3] = L_id; sidtemp[ind+4] = 4; sftemp[ind] = vxtemp2.w; sftemp[ind+1] = tof;//s; sftemp[ind+2] = xtemp2.x; sftemp[ind+3] = xtemp2.y; sftemp[ind+4] = xtemp2.z; event.parn=id+bufferID; event.cryn=L_id; event.modn= M_id; event.pann=paID; event.siten=event.pann*moduleN*crystalN+event.modn*crystalN+event.cryn; event.eventid = eid; event.CP = cp; event.RP = rp; event.CC = 0; event.RC = rc; event.E = vxtemp2.w; event.t = tof; event.x = xtemp2.x; event.y = xtemp2.y; event.z = xtemp2.z; //ind=atomicAdd(counts_d,1); //events_d[ind]=event; adder(counts, events, event);//this is for digitizer } break; } if(counts[0]) { readout(counts,events,rdepth_d,rpolicy_d); int ind=atomicAdd(counts_d,counts[1]); for(int i=0; i<counts[1]; i++) events_d[ind+i]=events[i]; }//*/ } //id+=blockDim.x*gridDim.x; } __syncthreads(); if(id<NRAND) cuseed[id] = localState; __shared__ int istart; if(threadIdx.x==0) { //printf("nsstktemp1 = %d\n",nsstktemp); istart = atomicAdd(&nsstk, nsstktemp); //printf("istart = %d\n",istart); } __syncthreads(); // if(id==0) printf("total events=%d\ncurrent total hits=%d\n", counts_d[0],nsstk); for(int i = 0; i < 1+(nsstktemp)/blockDim.x; i++) { if(nsstktemp == 0) break; int ind = istart + i*blockDim.x + tid; if(ind < istart + nsstktemp && ind<NSSTACK) { sf[ind] = sftemp[i*blockDim.x + tid];//this is for hits events sid[ind] = sidtemp[i*blockDim.x + tid]; } } __syncthreads();//*/ } // crystal index and material type __device__ void crystalSearch(float3 xtemp2,float leny_S,float lenz_S,float MODy_S,float MODz_S,float Msy_S,float Msz_S,float LSOy_S,float LSOz_S,float sy_S,float sz_S,float dy_S, float dz_S, int *m_id, int *M_id, int *L_id) { float x=xtemp2.x; float y=xtemp2.y; float z=xtemp2.z; for(int tmp=0;tmp<Nsurface_d;tmp++) { if((surface_d[tmp*10+0]*x*x+surface_d[tmp*10+1]*y*y+surface_d[tmp*10+2]*z*z+surface_d[tmp*10+3]*x*y+surface_d[tmp*10+4]*x*z+surface_d[tmp*10+5]*y*z +surface_d[tmp*10+6]*x+surface_d[tmp*10+7]*y+surface_d[tmp*10+8]*z+surface_d[tmp*10+9])<0) { *m_id=1; return; } } y=leny_S/2+xtemp2.y;//*dy_S; z=lenz_S/2+xtemp2.z;//*dz_S; int M_id_y=floorf(y/(MODy_S+Msy_S))>0?int(y/(MODy_S+Msy_S)):0; int M_id_z=floorf(z/(MODz_S+Msz_S))>0?int(z/(MODz_S+Msz_S)):0; *M_id=M_id_z*moduleNy+M_id_y; y=y-M_id_y*(MODy_S+Msy_S); z=z-M_id_z*(MODz_S+Msz_S); if(y> MODy_S || z> MODz_S) { *m_id=1; return; } int L_id_y=floorf(y/(LSOy_S+sy_S))>0?int(y/(LSOy_S+sy_S)):0; int L_id_z=floorf(z/(LSOz_S+sz_S))>0?int(z/(LSOz_S+sz_S)):0; *L_id=L_id_z*crystalNy+L_id_y; y=y-L_id_y*(LSOy_S+sy_S); z=z-L_id_z*(LSOz_S+sz_S); if(y>LSOy_S || z> LSOz_S) { *m_id=1; return; } *m_id=0;//*/ } #endif
9dad41e3f665d3df8e218dfa4fbcc2e1e6485dd8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef USE_ROCM #include "dragon/core/context_cuda.h" #include "dragon/utils/device/common_cub.h" #include "dragon/utils/math_functions.h" #include "dragon/utils/op_kernels.h" namespace dragon { namespace kernels { namespace { #define LDG(x, i) __ldg(x + i) template <typename T, typename AccT, int kPatchH, int kPatchW> __global__ void _DepthwiseConv2dNCHW( const int nthreads, const int C, const int H, const int W, const int out_h, const int out_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const T* x, const T* filter, T* y) { const int patch_h = kPatchH < 0 ? kernel_h : kPatchH; const int patch_w = kPatchW < 0 ? kernel_w : kPatchW; const auto Multiplies = math::MultipliesFunctor<T>(); CUDA_1D_KERNEL_LOOP(yi, nthreads) { const int w_out = yi % out_w; const int h_out = (yi / out_w) % out_h; const int c = (yi / out_w / out_h) % C; const int n = yi / out_w / out_h / C; const int hstart = h_out * stride_h - pad_h; const int wstart = w_out * stride_w - pad_w; const int x_offset = (n * C + c) * H * W; int fi = c * patch_h * patch_w; AccT val = AccT(0); #pragma unroll for (int h_k = 0; h_k < patch_h; ++h_k) { #pragma unroll for (int w_k = 0; w_k < patch_w; ++w_k) { const int h = hstart + h_k * dilation_h; const int w = wstart + w_k * dilation_w; if (math::utils::IsAGeZeroAndALtB(h, H) && math::utils::IsAGeZeroAndALtB(w, W)) { const int xi = x_offset + h * W + w; val += convert::To<AccT>(Multiplies(LDG(x, xi), LDG(filter, fi))); } ++fi; } } y[yi] = convert::To<T>(val); } } template <typename T, typename AccT, int kPatchH, int kPatchW> __global__ void _DepthwiseConv2dNHWC( const int nthreads, const int C, const int H, const int W, const int out_h, const int out_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const T* x, const T* filter, T* y) { const int patch_h = kPatchH < 0 ? kernel_h : kPatchH; const int patch_w = kPatchW < 0 ? kernel_w : kPatchW; const auto Multiplies = math::MultipliesFunctor<T>(); CUDA_1D_KERNEL_LOOP(yi, nthreads) { const int c = yi % C; const int w_out = (yi / C) % out_w; const int h_out = (yi / C / out_w) % out_h; const int n = yi / C / out_h / out_h; const int hstart = h_out * stride_h - pad_h; const int wstart = w_out * stride_w - pad_w; const int x_offset = n * H * W * C + c; int fi = c * patch_h * patch_w; AccT val = AccT(0); #pragma unroll for (int h_k = 0; h_k < patch_h; ++h_k) { #pragma unroll for (int w_k = 0; w_k < patch_w; ++w_k) { const int h = hstart + h_k * dilation_h; const int w = wstart + w_k * dilation_w; if (math::utils::IsAGeZeroAndALtB(h, H) && math::utils::IsAGeZeroAndALtB(w, W)) { const int xi = x_offset + (h * W + w) * C; val += convert::To<AccT>(Multiplies(LDG(x, xi), LDG(filter, fi))); } ++fi; } } y[yi] = convert::To<T>(val); } } template <typename T, typename AccT, int kPatchH, int kPatchW> __global__ void _DepthwiseConv2dGradNCHW( const int nthreads, const int C, const int H, const int W, const int out_h, const int out_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const T* dy, const T* filter, T* dx) { const int patch_h = kPatchH < 0 ? kernel_h : kPatchH; const int patch_w = kPatchW < 0 ? kernel_w : kPatchW; const auto Multiplies = math::MultipliesFunctor<T>(); CUDA_1D_KERNEL_LOOP(xi, nthreads) { const int w = xi % W; const int h = (xi / W) % H; const int c = (xi / W / H) % C; const int n = xi / W / H / C; const int y_offset = (n * C + c) * out_h * out_w; int fi = c * patch_h * patch_w; AccT val = AccT(0); #pragma unroll for (int h_k = 0; h_k < patch_h; ++h_k) { #pragma unroll for (int w_k = 0; w_k < patch_w; ++w_k) { int h_out = h + pad_h - h_k * dilation_h; int w_out = w + pad_w - w_k * dilation_w; if ((h_out % stride_h == 0) && (w_out % stride_w == 0)) { h_out = h_out / stride_h; w_out = w_out / stride_w; if (math::utils::IsAGeZeroAndALtB(h_out, out_h) && math::utils::IsAGeZeroAndALtB(w_out, out_w)) { const int yi = y_offset + h_out * out_w + w_out; val += convert::To<AccT>(Multiplies(LDG(dy, yi), LDG(filter, fi))); } } ++fi; } } dx[xi] = convert::To<T>(val); } } template <typename T, typename AccT, int kPatchH, int kPatchW> __global__ void _DepthwiseConv2dGradNHWC( const int nthreads, const int C, const int H, const int W, const int out_h, const int out_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const T* dy, const T* filter, T* dx) { const int patch_h = kPatchH < 0 ? kernel_h : kPatchH; const int patch_w = kPatchW < 0 ? kernel_w : kPatchW; const auto Multiplies = math::MultipliesFunctor<T>(); CUDA_1D_KERNEL_LOOP(xi, nthreads) { const int c = xi % C; const int w = (xi / C) % W; const int h = (xi / C / W) % H; const int n = xi / C / W / H; const int y_offset = n * out_h * out_w * C + c; int fi = c * patch_h * patch_w; AccT val = AccT(0); #pragma unroll for (int h_k = 0; h_k < patch_h; ++h_k) { #pragma unroll for (int w_k = 0; w_k < patch_w; ++w_k) { int h_out = h + pad_h - h_k * dilation_h; int w_out = w + pad_w - w_k * dilation_w; if ((h_out % stride_h == 0) && (w_out % stride_w == 0)) { h_out = h_out / stride_h; w_out = w_out / stride_w; if (math::utils::IsAGeZeroAndALtB(h_out, out_h) && math::utils::IsAGeZeroAndALtB(w_out, out_w)) { const int yi = y_offset + (h_out * out_w + w_out) * C; val += convert::To<AccT>(Multiplies(LDG(dy, yi), LDG(filter, fi))); } } ++fi; } } dx[xi] = val; } } template <typename T, typename AccT> __global__ void _DepthwiseConv2dWGradNCHW( const int N, const int C, const int H, const int W, const int out_h, const int out_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const T* dy, const T* x, T* dfilter) { const auto Multiplies = math::MultipliesFunctor<T>(); const int block_idx = blockIdx.x; const int w_k = block_idx % kernel_w; const int h_k = (block_idx / kernel_w) % kernel_h; const int c = block_idx / kernel_w / kernel_h; const int n = threadIdx.x / 32; const int nwarps = blockDim.x / 32; const int lane_idx = threadIdx.x % 32; const int out_dim = out_h * out_w; AccT val = AccT(0); for (int i = n; i < N; i += nwarps) { for (int j = lane_idx; j < out_dim; j += 32) { const int h = (j / out_w) * stride_h - pad_h + h_k * dilation_h; const int w = (j % out_w) * stride_w - pad_w + w_k * dilation_w; if (math::utils::IsAGeZeroAndALtB(h, H) && math::utils::IsAGeZeroAndALtB(w, W)) { const int xi = ((i * C + c) * H + h) * W + w; const int yi = (i * C + c) * out_dim + j; val += convert::To<AccT>(Multiplies(LDG(dy, yi), LDG(x, xi))); } } } typedef hipcub::BlockReduce<AccT, 256> Reduce; __shared__ typename Reduce::TempStorage storage; val = Reduce(storage).Sum(val); if (threadIdx.x == 0) { dfilter[block_idx] = convert::To<T>(val); } } template <typename T, typename AccT> __global__ void _DepthwiseConv2dWGradNHWC( const int N, const int C, const int H, const int W, const int out_h, const int out_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const T* dy, const T* x, T* dfilter) { const auto Multiplies = math::MultipliesFunctor<T>(); const int block_idx = blockIdx.x; const int w_k = block_idx % kernel_w; const int h_k = (block_idx / kernel_w) % kernel_h; const int c = block_idx / kernel_w / kernel_h; const int n = threadIdx.x / 32; const int nwarps = blockDim.x / 32; const int lane_idx = threadIdx.x % 32; const int out_dim = out_h * out_w; AccT val = AccT(0); for (int i = n; i < N; i += nwarps) { for (int j = lane_idx; j < out_dim; j += 32) { const int h = (j / out_w) * stride_h - pad_h + h_k * dilation_h; const int w = (j % out_w) * stride_w - pad_w + w_k * dilation_w; if (math::utils::IsAGeZeroAndALtB(h, H) && math::utils::IsAGeZeroAndALtB(w, W)) { const int xi = ((i * H + h) * W + w) * C + c; const int yi = (i * out_dim + j) * C + c; val += convert::To<AccT>(Multiplies(LDG(dy, yi), LDG(x, xi))); } } } typedef hipcub::BlockReduce<AccT, 256> Reduce; __shared__ typename Reduce::TempStorage storage; val = Reduce(storage).Sum(val); if (threadIdx.x == 0) { dfilter[block_idx] = convert::To<T>(val); } } #undef LDG } // namespace /* ------------------- Launcher Separator ------------------- */ #define DISPATCH_DATA_KERNEL(name, T, AccT, kBlocks, kThreads, ...) \ if (data_format == "NCHW") { \ if (kernel_h == 3 && kernel_w == 3) { \ hipLaunchKernelGGL(( name##NCHW<T, AccT, 3, 3>) \ , dim3(kBlocks), dim3(kThreads), 0, ctx->cuda_stream(), __VA_ARGS__); \ } else if (kernel_h == 5 && kernel_w == 5) { \ hipLaunchKernelGGL(( name##NCHW<T, AccT, 5, 5>) \ , dim3(kBlocks), dim3(kThreads), 0, ctx->cuda_stream(), __VA_ARGS__); \ } else if (kernel_h == 7 && kernel_w == 7) { \ hipLaunchKernelGGL(( name##NCHW<T, AccT, 7, 7>) \ , dim3(kBlocks), dim3(kThreads), 0, ctx->cuda_stream(), __VA_ARGS__); \ } else { \ hipLaunchKernelGGL(( name##NCHW<T, AccT, -1, -1>) \ , dim3(kBlocks), dim3(kThreads), 0, ctx->cuda_stream(), __VA_ARGS__); \ } \ } else if (data_format == "NHWC") { \ if (kernel_h == 3 && kernel_w == 3) { \ hipLaunchKernelGGL(( name##NHWC<T, AccT, 3, 3>) \ , dim3(kBlocks), dim3(kThreads), 0, ctx->cuda_stream(), __VA_ARGS__); \ } else if (kernel_h == 5 && kernel_w == 5) { \ hipLaunchKernelGGL(( name##NHWC<T, AccT, 5, 5>) \ , dim3(kBlocks), dim3(kThreads), 0, ctx->cuda_stream(), __VA_ARGS__); \ } else if (kernel_h == 7 && kernel_w == 7) { \ hipLaunchKernelGGL(( name##NHWC<T, AccT, 7, 7>) \ , dim3(kBlocks), dim3(kThreads), 0, ctx->cuda_stream(), __VA_ARGS__); \ } else { \ hipLaunchKernelGGL(( name##NHWC<T, AccT, -1, -1>) \ , dim3(kBlocks), dim3(kThreads), 0, ctx->cuda_stream(), __VA_ARGS__); \ } \ } else { \ LOG(FATAL) << "Unknown DataFormat: " << data_format; \ } #define DISPATCH_FILTER_KERNEL(name, T, AccT, kBlocks, kThreads, ...) \ if (data_format == "NCHW") { \ hipLaunchKernelGGL(( name##NCHW<T, AccT>) \ , dim3(kBlocks), dim3(kThreads), 0, ctx->cuda_stream(), __VA_ARGS__); \ } else if (data_format == "NHWC") { \ hipLaunchKernelGGL(( name##NHWC<T, AccT>) \ , dim3(kBlocks), dim3(kThreads), 0, ctx->cuda_stream(), __VA_ARGS__); \ } else { \ LOG(FATAL) << "Unknown DataFormat: " << data_format; \ } #define DEFINE_KERNEL_LAUNCHER(T, AccT) \ template <> \ void DepthwiseConv2d<T, CUDAContext>( \ const int N, \ const int C, \ const int H, \ const int W, \ const int out_h, \ const int out_w, \ const int kernel_h, \ const int kernel_w, \ const int stride_h, \ const int stride_w, \ const int pad_h, \ const int pad_w, \ const int dilation_h, \ const int dilation_w, \ const string& data_format, \ const T* x, \ const T* filter, \ T* y, \ CUDAContext* ctx) { \ const auto nthreads = N * C * out_h * out_w; \ DISPATCH_DATA_KERNEL( \ _DepthwiseConv2d, \ math::ScalarType<T>::type, \ AccT, \ CUDA_BLOCKS(nthreads), \ CUDA_THREADS, \ nthreads, \ C, \ H, \ W, \ out_h, \ out_w, \ kernel_h, \ kernel_w, \ stride_h, \ stride_w, \ pad_h, \ pad_w, \ dilation_h, \ dilation_w, \ reinterpret_cast<const math::ScalarType<T>::type*>(x), \ reinterpret_cast<const math::ScalarType<T>::type*>(filter), \ reinterpret_cast<math::ScalarType<T>::type*>(y)); \ } #define DEFINE_GRAD_KERNEL_LAUNCHER(T, AccT) \ template <> \ void DepthwiseConv2dGrad<T, CUDAContext>( \ const int N, \ const int C, \ const int H, \ const int W, \ const int out_h, \ const int out_w, \ const int kernel_h, \ const int kernel_w, \ const int stride_h, \ const int stride_w, \ const int pad_h, \ const int pad_w, \ const int dilation_h, \ const int dilation_w, \ const string& data_format, \ const T* dy, \ const T* filter, \ T* dx, \ CUDAContext* ctx) { \ auto nthreads = N * C * H * W; \ DISPATCH_DATA_KERNEL( \ _DepthwiseConv2dGrad, \ math::ScalarType<T>::type, \ AccT, \ CUDA_BLOCKS(nthreads), \ CUDA_THREADS, \ nthreads, \ C, \ H, \ W, \ out_h, \ out_w, \ kernel_h, \ kernel_w, \ stride_h, \ stride_w, \ pad_h, \ pad_w, \ dilation_h, \ dilation_w, \ reinterpret_cast<const math::ScalarType<T>::type*>(dy), \ reinterpret_cast<const math::ScalarType<T>::type*>(filter), \ reinterpret_cast<math::ScalarType<T>::type*>(dx)); \ } \ template <> \ void DepthwiseConv2dWGrad<T, CUDAContext>( \ const int N, \ const int C, \ const int H, \ const int W, \ const int out_h, \ const int out_w, \ const int kernel_h, \ const int kernel_w, \ const int stride_h, \ const int stride_w, \ const int pad_h, \ const int pad_w, \ const int dilation_h, \ const int dilation_w, \ const string& data_format, \ const T* dy, \ const T* x, \ T* dfilter, \ CUDAContext* ctx) { \ const auto kBlocks = C * kernel_h * kernel_w; \ const auto nthreads = 256; \ DISPATCH_FILTER_KERNEL( \ _DepthwiseConv2dWGrad, \ math::ScalarType<T>::type, \ AccT, \ kBlocks, \ nthreads, \ N, \ C, \ H, \ W, \ out_h, \ out_w, \ kernel_h, \ kernel_w, \ stride_h, \ stride_w, \ pad_h, \ pad_w, \ dilation_h, \ dilation_w, \ reinterpret_cast<const math::ScalarType<T>::type*>(dy), \ reinterpret_cast<const math::ScalarType<T>::type*>(x), \ reinterpret_cast<math::ScalarType<T>::type*>(dfilter)); \ } DEFINE_KERNEL_LAUNCHER(float16, float); DEFINE_KERNEL_LAUNCHER(float, float); DEFINE_GRAD_KERNEL_LAUNCHER(float16, float); DEFINE_GRAD_KERNEL_LAUNCHER(float, float); #undef DEFINE_KERNEL_LAUNCHER #undef DEFINE_GRAD_KERNEL_LAUNCHER #undef DISPATCH_DATA_KERNEL #undef DISPATCH_FILTER_KERNEL } // namespace kernels } // namespace dragon #endif // USE_ROCM
9dad41e3f665d3df8e218dfa4fbcc2e1e6485dd8.cu
#ifdef USE_CUDA #include "dragon/core/context_cuda.h" #include "dragon/utils/device/common_cub.h" #include "dragon/utils/math_functions.h" #include "dragon/utils/op_kernels.h" namespace dragon { namespace kernels { namespace { #define LDG(x, i) __ldg(x + i) template <typename T, typename AccT, int kPatchH, int kPatchW> __global__ void _DepthwiseConv2dNCHW( const int nthreads, const int C, const int H, const int W, const int out_h, const int out_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const T* x, const T* filter, T* y) { const int patch_h = kPatchH < 0 ? kernel_h : kPatchH; const int patch_w = kPatchW < 0 ? kernel_w : kPatchW; const auto Multiplies = math::MultipliesFunctor<T>(); CUDA_1D_KERNEL_LOOP(yi, nthreads) { const int w_out = yi % out_w; const int h_out = (yi / out_w) % out_h; const int c = (yi / out_w / out_h) % C; const int n = yi / out_w / out_h / C; const int hstart = h_out * stride_h - pad_h; const int wstart = w_out * stride_w - pad_w; const int x_offset = (n * C + c) * H * W; int fi = c * patch_h * patch_w; AccT val = AccT(0); #pragma unroll for (int h_k = 0; h_k < patch_h; ++h_k) { #pragma unroll for (int w_k = 0; w_k < patch_w; ++w_k) { const int h = hstart + h_k * dilation_h; const int w = wstart + w_k * dilation_w; if (math::utils::IsAGeZeroAndALtB(h, H) && math::utils::IsAGeZeroAndALtB(w, W)) { const int xi = x_offset + h * W + w; val += convert::To<AccT>(Multiplies(LDG(x, xi), LDG(filter, fi))); } ++fi; } } y[yi] = convert::To<T>(val); } } template <typename T, typename AccT, int kPatchH, int kPatchW> __global__ void _DepthwiseConv2dNHWC( const int nthreads, const int C, const int H, const int W, const int out_h, const int out_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const T* x, const T* filter, T* y) { const int patch_h = kPatchH < 0 ? kernel_h : kPatchH; const int patch_w = kPatchW < 0 ? kernel_w : kPatchW; const auto Multiplies = math::MultipliesFunctor<T>(); CUDA_1D_KERNEL_LOOP(yi, nthreads) { const int c = yi % C; const int w_out = (yi / C) % out_w; const int h_out = (yi / C / out_w) % out_h; const int n = yi / C / out_h / out_h; const int hstart = h_out * stride_h - pad_h; const int wstart = w_out * stride_w - pad_w; const int x_offset = n * H * W * C + c; int fi = c * patch_h * patch_w; AccT val = AccT(0); #pragma unroll for (int h_k = 0; h_k < patch_h; ++h_k) { #pragma unroll for (int w_k = 0; w_k < patch_w; ++w_k) { const int h = hstart + h_k * dilation_h; const int w = wstart + w_k * dilation_w; if (math::utils::IsAGeZeroAndALtB(h, H) && math::utils::IsAGeZeroAndALtB(w, W)) { const int xi = x_offset + (h * W + w) * C; val += convert::To<AccT>(Multiplies(LDG(x, xi), LDG(filter, fi))); } ++fi; } } y[yi] = convert::To<T>(val); } } template <typename T, typename AccT, int kPatchH, int kPatchW> __global__ void _DepthwiseConv2dGradNCHW( const int nthreads, const int C, const int H, const int W, const int out_h, const int out_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const T* dy, const T* filter, T* dx) { const int patch_h = kPatchH < 0 ? kernel_h : kPatchH; const int patch_w = kPatchW < 0 ? kernel_w : kPatchW; const auto Multiplies = math::MultipliesFunctor<T>(); CUDA_1D_KERNEL_LOOP(xi, nthreads) { const int w = xi % W; const int h = (xi / W) % H; const int c = (xi / W / H) % C; const int n = xi / W / H / C; const int y_offset = (n * C + c) * out_h * out_w; int fi = c * patch_h * patch_w; AccT val = AccT(0); #pragma unroll for (int h_k = 0; h_k < patch_h; ++h_k) { #pragma unroll for (int w_k = 0; w_k < patch_w; ++w_k) { int h_out = h + pad_h - h_k * dilation_h; int w_out = w + pad_w - w_k * dilation_w; if ((h_out % stride_h == 0) && (w_out % stride_w == 0)) { h_out = h_out / stride_h; w_out = w_out / stride_w; if (math::utils::IsAGeZeroAndALtB(h_out, out_h) && math::utils::IsAGeZeroAndALtB(w_out, out_w)) { const int yi = y_offset + h_out * out_w + w_out; val += convert::To<AccT>(Multiplies(LDG(dy, yi), LDG(filter, fi))); } } ++fi; } } dx[xi] = convert::To<T>(val); } } template <typename T, typename AccT, int kPatchH, int kPatchW> __global__ void _DepthwiseConv2dGradNHWC( const int nthreads, const int C, const int H, const int W, const int out_h, const int out_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const T* dy, const T* filter, T* dx) { const int patch_h = kPatchH < 0 ? kernel_h : kPatchH; const int patch_w = kPatchW < 0 ? kernel_w : kPatchW; const auto Multiplies = math::MultipliesFunctor<T>(); CUDA_1D_KERNEL_LOOP(xi, nthreads) { const int c = xi % C; const int w = (xi / C) % W; const int h = (xi / C / W) % H; const int n = xi / C / W / H; const int y_offset = n * out_h * out_w * C + c; int fi = c * patch_h * patch_w; AccT val = AccT(0); #pragma unroll for (int h_k = 0; h_k < patch_h; ++h_k) { #pragma unroll for (int w_k = 0; w_k < patch_w; ++w_k) { int h_out = h + pad_h - h_k * dilation_h; int w_out = w + pad_w - w_k * dilation_w; if ((h_out % stride_h == 0) && (w_out % stride_w == 0)) { h_out = h_out / stride_h; w_out = w_out / stride_w; if (math::utils::IsAGeZeroAndALtB(h_out, out_h) && math::utils::IsAGeZeroAndALtB(w_out, out_w)) { const int yi = y_offset + (h_out * out_w + w_out) * C; val += convert::To<AccT>(Multiplies(LDG(dy, yi), LDG(filter, fi))); } } ++fi; } } dx[xi] = val; } } template <typename T, typename AccT> __global__ void _DepthwiseConv2dWGradNCHW( const int N, const int C, const int H, const int W, const int out_h, const int out_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const T* dy, const T* x, T* dfilter) { const auto Multiplies = math::MultipliesFunctor<T>(); const int block_idx = blockIdx.x; const int w_k = block_idx % kernel_w; const int h_k = (block_idx / kernel_w) % kernel_h; const int c = block_idx / kernel_w / kernel_h; const int n = threadIdx.x / 32; const int nwarps = blockDim.x / 32; const int lane_idx = threadIdx.x % 32; const int out_dim = out_h * out_w; AccT val = AccT(0); for (int i = n; i < N; i += nwarps) { for (int j = lane_idx; j < out_dim; j += 32) { const int h = (j / out_w) * stride_h - pad_h + h_k * dilation_h; const int w = (j % out_w) * stride_w - pad_w + w_k * dilation_w; if (math::utils::IsAGeZeroAndALtB(h, H) && math::utils::IsAGeZeroAndALtB(w, W)) { const int xi = ((i * C + c) * H + h) * W + w; const int yi = (i * C + c) * out_dim + j; val += convert::To<AccT>(Multiplies(LDG(dy, yi), LDG(x, xi))); } } } typedef cub::BlockReduce<AccT, 256> Reduce; __shared__ typename Reduce::TempStorage storage; val = Reduce(storage).Sum(val); if (threadIdx.x == 0) { dfilter[block_idx] = convert::To<T>(val); } } template <typename T, typename AccT> __global__ void _DepthwiseConv2dWGradNHWC( const int N, const int C, const int H, const int W, const int out_h, const int out_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const T* dy, const T* x, T* dfilter) { const auto Multiplies = math::MultipliesFunctor<T>(); const int block_idx = blockIdx.x; const int w_k = block_idx % kernel_w; const int h_k = (block_idx / kernel_w) % kernel_h; const int c = block_idx / kernel_w / kernel_h; const int n = threadIdx.x / 32; const int nwarps = blockDim.x / 32; const int lane_idx = threadIdx.x % 32; const int out_dim = out_h * out_w; AccT val = AccT(0); for (int i = n; i < N; i += nwarps) { for (int j = lane_idx; j < out_dim; j += 32) { const int h = (j / out_w) * stride_h - pad_h + h_k * dilation_h; const int w = (j % out_w) * stride_w - pad_w + w_k * dilation_w; if (math::utils::IsAGeZeroAndALtB(h, H) && math::utils::IsAGeZeroAndALtB(w, W)) { const int xi = ((i * H + h) * W + w) * C + c; const int yi = (i * out_dim + j) * C + c; val += convert::To<AccT>(Multiplies(LDG(dy, yi), LDG(x, xi))); } } } typedef cub::BlockReduce<AccT, 256> Reduce; __shared__ typename Reduce::TempStorage storage; val = Reduce(storage).Sum(val); if (threadIdx.x == 0) { dfilter[block_idx] = convert::To<T>(val); } } #undef LDG } // namespace /* ------------------- Launcher Separator ------------------- */ #define DISPATCH_DATA_KERNEL(name, T, AccT, kBlocks, kThreads, ...) \ if (data_format == "NCHW") { \ if (kernel_h == 3 && kernel_w == 3) { \ name##NCHW<T, AccT, 3, 3> \ <<<kBlocks, kThreads, 0, ctx->cuda_stream()>>>(__VA_ARGS__); \ } else if (kernel_h == 5 && kernel_w == 5) { \ name##NCHW<T, AccT, 5, 5> \ <<<kBlocks, kThreads, 0, ctx->cuda_stream()>>>(__VA_ARGS__); \ } else if (kernel_h == 7 && kernel_w == 7) { \ name##NCHW<T, AccT, 7, 7> \ <<<kBlocks, kThreads, 0, ctx->cuda_stream()>>>(__VA_ARGS__); \ } else { \ name##NCHW<T, AccT, -1, -1> \ <<<kBlocks, kThreads, 0, ctx->cuda_stream()>>>(__VA_ARGS__); \ } \ } else if (data_format == "NHWC") { \ if (kernel_h == 3 && kernel_w == 3) { \ name##NHWC<T, AccT, 3, 3> \ <<<kBlocks, kThreads, 0, ctx->cuda_stream()>>>(__VA_ARGS__); \ } else if (kernel_h == 5 && kernel_w == 5) { \ name##NHWC<T, AccT, 5, 5> \ <<<kBlocks, kThreads, 0, ctx->cuda_stream()>>>(__VA_ARGS__); \ } else if (kernel_h == 7 && kernel_w == 7) { \ name##NHWC<T, AccT, 7, 7> \ <<<kBlocks, kThreads, 0, ctx->cuda_stream()>>>(__VA_ARGS__); \ } else { \ name##NHWC<T, AccT, -1, -1> \ <<<kBlocks, kThreads, 0, ctx->cuda_stream()>>>(__VA_ARGS__); \ } \ } else { \ LOG(FATAL) << "Unknown DataFormat: " << data_format; \ } #define DISPATCH_FILTER_KERNEL(name, T, AccT, kBlocks, kThreads, ...) \ if (data_format == "NCHW") { \ name##NCHW<T, AccT> \ <<<kBlocks, kThreads, 0, ctx->cuda_stream()>>>(__VA_ARGS__); \ } else if (data_format == "NHWC") { \ name##NHWC<T, AccT> \ <<<kBlocks, kThreads, 0, ctx->cuda_stream()>>>(__VA_ARGS__); \ } else { \ LOG(FATAL) << "Unknown DataFormat: " << data_format; \ } #define DEFINE_KERNEL_LAUNCHER(T, AccT) \ template <> \ void DepthwiseConv2d<T, CUDAContext>( \ const int N, \ const int C, \ const int H, \ const int W, \ const int out_h, \ const int out_w, \ const int kernel_h, \ const int kernel_w, \ const int stride_h, \ const int stride_w, \ const int pad_h, \ const int pad_w, \ const int dilation_h, \ const int dilation_w, \ const string& data_format, \ const T* x, \ const T* filter, \ T* y, \ CUDAContext* ctx) { \ const auto nthreads = N * C * out_h * out_w; \ DISPATCH_DATA_KERNEL( \ _DepthwiseConv2d, \ math::ScalarType<T>::type, \ AccT, \ CUDA_BLOCKS(nthreads), \ CUDA_THREADS, \ nthreads, \ C, \ H, \ W, \ out_h, \ out_w, \ kernel_h, \ kernel_w, \ stride_h, \ stride_w, \ pad_h, \ pad_w, \ dilation_h, \ dilation_w, \ reinterpret_cast<const math::ScalarType<T>::type*>(x), \ reinterpret_cast<const math::ScalarType<T>::type*>(filter), \ reinterpret_cast<math::ScalarType<T>::type*>(y)); \ } #define DEFINE_GRAD_KERNEL_LAUNCHER(T, AccT) \ template <> \ void DepthwiseConv2dGrad<T, CUDAContext>( \ const int N, \ const int C, \ const int H, \ const int W, \ const int out_h, \ const int out_w, \ const int kernel_h, \ const int kernel_w, \ const int stride_h, \ const int stride_w, \ const int pad_h, \ const int pad_w, \ const int dilation_h, \ const int dilation_w, \ const string& data_format, \ const T* dy, \ const T* filter, \ T* dx, \ CUDAContext* ctx) { \ auto nthreads = N * C * H * W; \ DISPATCH_DATA_KERNEL( \ _DepthwiseConv2dGrad, \ math::ScalarType<T>::type, \ AccT, \ CUDA_BLOCKS(nthreads), \ CUDA_THREADS, \ nthreads, \ C, \ H, \ W, \ out_h, \ out_w, \ kernel_h, \ kernel_w, \ stride_h, \ stride_w, \ pad_h, \ pad_w, \ dilation_h, \ dilation_w, \ reinterpret_cast<const math::ScalarType<T>::type*>(dy), \ reinterpret_cast<const math::ScalarType<T>::type*>(filter), \ reinterpret_cast<math::ScalarType<T>::type*>(dx)); \ } \ template <> \ void DepthwiseConv2dWGrad<T, CUDAContext>( \ const int N, \ const int C, \ const int H, \ const int W, \ const int out_h, \ const int out_w, \ const int kernel_h, \ const int kernel_w, \ const int stride_h, \ const int stride_w, \ const int pad_h, \ const int pad_w, \ const int dilation_h, \ const int dilation_w, \ const string& data_format, \ const T* dy, \ const T* x, \ T* dfilter, \ CUDAContext* ctx) { \ const auto kBlocks = C * kernel_h * kernel_w; \ const auto nthreads = 256; \ DISPATCH_FILTER_KERNEL( \ _DepthwiseConv2dWGrad, \ math::ScalarType<T>::type, \ AccT, \ kBlocks, \ nthreads, \ N, \ C, \ H, \ W, \ out_h, \ out_w, \ kernel_h, \ kernel_w, \ stride_h, \ stride_w, \ pad_h, \ pad_w, \ dilation_h, \ dilation_w, \ reinterpret_cast<const math::ScalarType<T>::type*>(dy), \ reinterpret_cast<const math::ScalarType<T>::type*>(x), \ reinterpret_cast<math::ScalarType<T>::type*>(dfilter)); \ } DEFINE_KERNEL_LAUNCHER(float16, float); DEFINE_KERNEL_LAUNCHER(float, float); DEFINE_GRAD_KERNEL_LAUNCHER(float16, float); DEFINE_GRAD_KERNEL_LAUNCHER(float, float); #undef DEFINE_KERNEL_LAUNCHER #undef DEFINE_GRAD_KERNEL_LAUNCHER #undef DISPATCH_DATA_KERNEL #undef DISPATCH_FILTER_KERNEL } // namespace kernels } // namespace dragon #endif // USE_CUDA
6a00785609de4330a9cd6e99900526cde0befbcd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #define TILE_WIDTH (16) void fill_matrix(double *mat, unsigned numRows, unsigned numCols) { for(unsigned i=0; i < numRows; i++) for(unsigned j=0; j < numCols; j++) { mat[i*numCols + j] = i*2.1f + j*3.2f; } } void print_matrix_to_file(double *mat, unsigned numRows, unsigned numCols) { const char *fname = "assignment2_4_out"; FILE *f = fopen(fname, "w"); for(unsigned i=0; i < numRows; i++) { for(unsigned j=0; j < numCols; j++) fprintf(f,"%4.4f ", mat[i*numCols + j]); fprintf(f,"\n"); } fclose(f); } //template<int TILE_WIDTH> __global__ void MatrixMulKernel_col_maj(double* M, double* N, double* Q, int Width) { //extern __shared__ double buffer[]; //double *ds_M = &buffer[0]; //double *ds_N = &buffer[Width*Width]; __shared__ double ds_M[TILE_WIDTH][TILE_WIDTH]; __shared__ double ds_N[TILE_WIDTH][TILE_WIDTH]; // Generate IDs double Pvalue=0; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int Row = by * blockDim.y + ty; int Col = bx * blockDim.x + tx; // Loop over the M and N tiles required to compute the P element for (int p = 0; p < (Width)/TILE_WIDTH; ++p) { if ( (Row < Width) && (tx + p*TILE_WIDTH) < Width){ // Collaborative loading of M and N tiles into shared memory ds_M[ty][tx] = M[Row*Width + p*TILE_WIDTH+tx]; } else{ ds_M[ty][tx]=0.0; } if ( (Col < Width) && (ty + p*TILE_WIDTH) < Width){ ds_N[ty][tx] = N[(p*TILE_WIDTH+ty)*Width + Col]; } else{ ds_N[ty][tx]=0.0; } __syncthreads(); for (int i = 0; i < TILE_WIDTH; ++i){ Pvalue += ds_M[ty][i] * ds_N[i][tx]; } __syncthreads(); } if ((Row < Width) && (Col < Width)){ Q[Row*Width+Col] = Pvalue; } } int main(int argc,char **argv) { int N; N=8192; int loop1, loop2; // loop variables float time_spent; size_t size = N *N* sizeof(double); double*h_matA = (double*)malloc(size); double*h_matB = (double*)malloc(size); double*h_matC = (double*)malloc(size); // result fill_matrix(h_matA,N,N); fill_matrix(h_matB,N,N); printf("\nMatrix A (first 10*10 inputs)\n"); for(loop1 = 0; loop1 < 10; loop1++){ for (loop2=0;loop2 < 10; loop2++) printf("%f ", *(h_matA + N*loop1 + loop2)); printf("\n"); } printf("\n\nMatrix B (first 10*10 inputs)\n"); for(loop1 = 0; loop1 < 10; loop1++){ for (loop2=0;loop2 < 10; loop2++) printf("%f ", *(h_matB + N*loop1 + loop2)); printf("\n"); } double* d_matA; hipMalloc(&d_matA, size); double* d_matB; hipMalloc(&d_matB, size); double* d_matC; hipMalloc(&d_matC, size); //GPU timing hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // Copy vectors from host memory to device memory hipMemcpy(d_matA, h_matA, size,hipMemcpyHostToDevice); hipMemcpy(d_matB, h_matB, size,hipMemcpyHostToDevice); // Invoke kernel dim3 threadsPerBlock(TILE_WIDTH,TILE_WIDTH); dim3 blocksPerGrid ((N + threadsPerBlock.x-1) /threadsPerBlock.x,(N + threadsPerBlock.y-1) /threadsPerBlock.y); hipEventRecord(start, 0); hipLaunchKernelGGL(( MatrixMulKernel_col_maj), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_matA,d_matB, d_matC, N); //hipDeviceSynchronize();//To synchronize the device hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time_spent, start, stop); printf("\nTime spent in col maj %f\n",time_spent); // h_C contains the result in host memory hipMemcpy(h_matC, d_matC, size,hipMemcpyDeviceToHost); printf("\n\nMatrix C (first 10*10 outputs)\n"); for(loop1 = 0; loop1 < 10; loop1++){ for (loop2=0;loop2 < 10; loop2++) printf("%f ", *(h_matC + N*loop1 + loop2)); printf("\n"); } // Log outputs printf("\nWritting to file assignment_2_1_out as Mat C"); print_matrix_to_file(h_matC,N,N); // Free device memory hipFree(d_matA); hipFree(d_matB); hipFree(d_matC); // Free host memory free(h_matA); free(h_matB); free(h_matC); return 0; }
6a00785609de4330a9cd6e99900526cde0befbcd.cu
#include <stdlib.h> #include <stdio.h> #define TILE_WIDTH (16) void fill_matrix(double *mat, unsigned numRows, unsigned numCols) { for(unsigned i=0; i < numRows; i++) for(unsigned j=0; j < numCols; j++) { mat[i*numCols + j] = i*2.1f + j*3.2f; } } void print_matrix_to_file(double *mat, unsigned numRows, unsigned numCols) { const char *fname = "assignment2_4_out"; FILE *f = fopen(fname, "w"); for(unsigned i=0; i < numRows; i++) { for(unsigned j=0; j < numCols; j++) fprintf(f,"%4.4f ", mat[i*numCols + j]); fprintf(f,"\n"); } fclose(f); } //template<int TILE_WIDTH> __global__ void MatrixMulKernel_col_maj(double* M, double* N, double* Q, int Width) { //extern __shared__ double buffer[]; //double *ds_M = &buffer[0]; //double *ds_N = &buffer[Width*Width]; __shared__ double ds_M[TILE_WIDTH][TILE_WIDTH]; __shared__ double ds_N[TILE_WIDTH][TILE_WIDTH]; // Generate IDs double Pvalue=0; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int Row = by * blockDim.y + ty; int Col = bx * blockDim.x + tx; // Loop over the M and N tiles required to compute the P element for (int p = 0; p < (Width)/TILE_WIDTH; ++p) { if ( (Row < Width) && (tx + p*TILE_WIDTH) < Width){ // Collaborative loading of M and N tiles into shared memory ds_M[ty][tx] = M[Row*Width + p*TILE_WIDTH+tx]; } else{ ds_M[ty][tx]=0.0; } if ( (Col < Width) && (ty + p*TILE_WIDTH) < Width){ ds_N[ty][tx] = N[(p*TILE_WIDTH+ty)*Width + Col]; } else{ ds_N[ty][tx]=0.0; } __syncthreads(); for (int i = 0; i < TILE_WIDTH; ++i){ Pvalue += ds_M[ty][i] * ds_N[i][tx]; } __syncthreads(); } if ((Row < Width) && (Col < Width)){ Q[Row*Width+Col] = Pvalue; } } int main(int argc,char **argv) { int N; N=8192; int loop1, loop2; // loop variables float time_spent; size_t size = N *N* sizeof(double); double*h_matA = (double*)malloc(size); double*h_matB = (double*)malloc(size); double*h_matC = (double*)malloc(size); // result fill_matrix(h_matA,N,N); fill_matrix(h_matB,N,N); printf("\nMatrix A (first 10*10 inputs)\n"); for(loop1 = 0; loop1 < 10; loop1++){ for (loop2=0;loop2 < 10; loop2++) printf("%f ", *(h_matA + N*loop1 + loop2)); printf("\n"); } printf("\n\nMatrix B (first 10*10 inputs)\n"); for(loop1 = 0; loop1 < 10; loop1++){ for (loop2=0;loop2 < 10; loop2++) printf("%f ", *(h_matB + N*loop1 + loop2)); printf("\n"); } double* d_matA; cudaMalloc(&d_matA, size); double* d_matB; cudaMalloc(&d_matB, size); double* d_matC; cudaMalloc(&d_matC, size); //GPU timing cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // Copy vectors from host memory to device memory cudaMemcpy(d_matA, h_matA, size,cudaMemcpyHostToDevice); cudaMemcpy(d_matB, h_matB, size,cudaMemcpyHostToDevice); // Invoke kernel dim3 threadsPerBlock(TILE_WIDTH,TILE_WIDTH); dim3 blocksPerGrid ((N + threadsPerBlock.x-1) /threadsPerBlock.x,(N + threadsPerBlock.y-1) /threadsPerBlock.y); cudaEventRecord(start, 0); MatrixMulKernel_col_maj<<<blocksPerGrid, threadsPerBlock>>>(d_matA,d_matB, d_matC, N); //cudaDeviceSynchronize();//To synchronize the device cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time_spent, start, stop); printf("\nTime spent in col maj %f\n",time_spent); // h_C contains the result in host memory cudaMemcpy(h_matC, d_matC, size,cudaMemcpyDeviceToHost); printf("\n\nMatrix C (first 10*10 outputs)\n"); for(loop1 = 0; loop1 < 10; loop1++){ for (loop2=0;loop2 < 10; loop2++) printf("%f ", *(h_matC + N*loop1 + loop2)); printf("\n"); } // Log outputs printf("\nWritting to file assignment_2_1_out as Mat C"); print_matrix_to_file(h_matC,N,N); // Free device memory cudaFree(d_matA); cudaFree(d_matB); cudaFree(d_matC); // Free host memory free(h_matA); free(h_matB); free(h_matC); return 0; }
323e3703f50107bd75ae65053497acdf77f2d596.hip
// !!! This is a file automatically generated by hipify!!! #include "../common/common.h" #include "hip/hip_runtime.h" #include "stdio.h" #define BDIMX 32 #define BDIMY 32 #define IPAD 1 void printData(char *msg, int *in, const int size){ printf("%s: ", msg); for(int i=0; i < size; i++){ printf("%5d", in[i]); fflush(stdout); } printf("\n"); return; } __global__ void setRowReadRow(int *out){ __shared__ int tile[BDIMY][BDIMX]; unsigned int idx = threadIdx.y * blockDim.x + threadIdx.x; tile[threadIdx.y][threadIdx.x] = idx; __syncthreads(); out[idx] = tile[threadIdx.y][threadIdx.x]; } __global__ void setColReadCol(int *out){ __shared__ int tile[BDIMY][BDIMX]; unsigned int idx = threadIdx.y * blockDim.x + threadIdx.x; tile[threadIdx.x][threadIdx.y] = idx; __syncthreads(); out[idx] = tile[threadIdx.x][threadIdx.y]; } __global__ void setRowReadCol(int *out){ __shared__ int tile[BDIMY][BDIMX]; unsigned int idx = threadIdx.y * blockDim.x + threadIdx.x; tile[threadIdx.y][threadIdx.x] = idx; __syncthreads(); out[idx] = tile[threadIdx.x][threadIdx.y]; } __global__ void setRowReadColDyn(int *out){ extern __shared__ int tile[]; unsigned int row_idx = threadIdx.y * blockDim.x + threadIdx.x; unsigned int col_idx = threadIdx.x * blockDim.y + threadIdx.y; tile[row_idx] = row_idx; __syncthreads(); out[row_idx] = tile[col_idx]; } __global__ void setRowReadColPad(int *out){ __shared__ int tile[BDIMY][BDIMX+IPAD]; unsigned int idx = threadIdx.y * blockDim.x + threadIdx.x; tile[threadIdx.y][threadIdx.x] = idx; __syncthreads(); out[idx] = tile[threadIdx.y][threadIdx.x]; } __global__ void setRowReadColDynPad(int *out){ extern __shared__ int tile[]; unsigned int row_idx = threadIdx.y * (blockDim.x + 1) + threadIdx.x; unsigned int col_idx = threadIdx.x * (blockDim.x + 1) + threadIdx.y; unsigned int g_idx = threadIdx.y * blockDim.x + threadIdx.x; tile[row_idx] = g_idx; __syncthreads(); out[g_idx] = tile[col_idx]; } int main(int argc, char** argv){ int dev = 0; hipDeviceProp_t deviceProp; CHECK(hipGetDeviceProperties(&deviceProp, dev)); printf("%s at ", argv[0]); printf("device %d: %s", dev, deviceProp.name); CHECK(hipSetDevice(dev)); hipSharedMemConfig pConfig; CHECK(hipDeviceGetSharedMemConfig(&pConfig)); printf("with Bank Mode: %s ", pConfig == 1 ? "4-Byte": "8-Byte"); int nx = BDIMX; int ny = BDIMY; bool iprintf = 0; if(argc > 1) iprintf = atoi(argv[1]); size_t nBytes = nx * ny * sizeof(int); dim3 block(BDIMX, BDIMY); dim3 grid(1, 1); printf("<<< grid (%d, %d) block (%d, %d)>>>\n", grid.x, grid.y, block.x, block.y); int *d_C; CHECK(hipMalloc((int**)&d_C, nBytes)); int *gpuRef = (int *)malloc(nBytes); CHECK(hipMemset(d_C, 0, nBytes)); hipLaunchKernelGGL(( setColReadCol), dim3(grid), dim3(block), 0, 0, d_C); CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost)); if(iprintf) printData("set col read col: ", gpuRef, nx*ny); CHECK(hipMemset(d_C, 0, nBytes)); hipLaunchKernelGGL(( setRowReadRow), dim3(grid), dim3(block), 0, 0, d_C); CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost)); if(iprintf) printData("set row read row: ", gpuRef, nx*ny); CHECK(hipMemset(d_C, 0, nBytes)); hipLaunchKernelGGL(( setRowReadCol), dim3(grid), dim3(block), 0, 0, d_C); CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost)); if(iprintf) printData("set row read col: ", gpuRef, nx * ny); CHECK(hipMemset(d_C, 0, nBytes)); hipLaunchKernelGGL(( setRowReadColDyn), dim3(grid), dim3(block), BDIMX*BDIMY*sizeof(int), 0, d_C); CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost)); if(iprintf) printData("set row read col dyn", gpuRef, nx * ny); CHECK(hipMemset(d_C, 0, nBytes)); hipLaunchKernelGGL(( setRowReadColPad), dim3(grid), dim3(block), 0, 0, d_C); CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost)); if(iprintf) printData("set row read col pad", gpuRef, nx * ny); CHECK(hipMemset(d_C, 0, nBytes)); hipLaunchKernelGGL(( setRowReadColDynPad), dim3(grid), dim3(block), (BDIMX + IPAD)*BDIMY*sizeof(int), 0, d_C); CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost)); if(iprintf) printData("set row read col DP ", gpuRef, nx * ny); CHECK(hipFree(d_C)); free(gpuRef); CHECK(hipDeviceReset()); return EXIT_SUCCESS; }
323e3703f50107bd75ae65053497acdf77f2d596.cu
#include "../common/common.h" #include "cuda_runtime.h" #include "stdio.h" #define BDIMX 32 #define BDIMY 32 #define IPAD 1 void printData(char *msg, int *in, const int size){ printf("%s: ", msg); for(int i=0; i < size; i++){ printf("%5d", in[i]); fflush(stdout); } printf("\n"); return; } __global__ void setRowReadRow(int *out){ __shared__ int tile[BDIMY][BDIMX]; unsigned int idx = threadIdx.y * blockDim.x + threadIdx.x; tile[threadIdx.y][threadIdx.x] = idx; __syncthreads(); out[idx] = tile[threadIdx.y][threadIdx.x]; } __global__ void setColReadCol(int *out){ __shared__ int tile[BDIMY][BDIMX]; unsigned int idx = threadIdx.y * blockDim.x + threadIdx.x; tile[threadIdx.x][threadIdx.y] = idx; __syncthreads(); out[idx] = tile[threadIdx.x][threadIdx.y]; } __global__ void setRowReadCol(int *out){ __shared__ int tile[BDIMY][BDIMX]; unsigned int idx = threadIdx.y * blockDim.x + threadIdx.x; tile[threadIdx.y][threadIdx.x] = idx; __syncthreads(); out[idx] = tile[threadIdx.x][threadIdx.y]; } __global__ void setRowReadColDyn(int *out){ extern __shared__ int tile[]; unsigned int row_idx = threadIdx.y * blockDim.x + threadIdx.x; unsigned int col_idx = threadIdx.x * blockDim.y + threadIdx.y; tile[row_idx] = row_idx; __syncthreads(); out[row_idx] = tile[col_idx]; } __global__ void setRowReadColPad(int *out){ __shared__ int tile[BDIMY][BDIMX+IPAD]; unsigned int idx = threadIdx.y * blockDim.x + threadIdx.x; tile[threadIdx.y][threadIdx.x] = idx; __syncthreads(); out[idx] = tile[threadIdx.y][threadIdx.x]; } __global__ void setRowReadColDynPad(int *out){ extern __shared__ int tile[]; unsigned int row_idx = threadIdx.y * (blockDim.x + 1) + threadIdx.x; unsigned int col_idx = threadIdx.x * (blockDim.x + 1) + threadIdx.y; unsigned int g_idx = threadIdx.y * blockDim.x + threadIdx.x; tile[row_idx] = g_idx; __syncthreads(); out[g_idx] = tile[col_idx]; } int main(int argc, char** argv){ int dev = 0; cudaDeviceProp deviceProp; CHECK(cudaGetDeviceProperties(&deviceProp, dev)); printf("%s at ", argv[0]); printf("device %d: %s", dev, deviceProp.name); CHECK(cudaSetDevice(dev)); cudaSharedMemConfig pConfig; CHECK(cudaDeviceGetSharedMemConfig(&pConfig)); printf("with Bank Mode: %s ", pConfig == 1 ? "4-Byte": "8-Byte"); int nx = BDIMX; int ny = BDIMY; bool iprintf = 0; if(argc > 1) iprintf = atoi(argv[1]); size_t nBytes = nx * ny * sizeof(int); dim3 block(BDIMX, BDIMY); dim3 grid(1, 1); printf("<<< grid (%d, %d) block (%d, %d)>>>\n", grid.x, grid.y, block.x, block.y); int *d_C; CHECK(cudaMalloc((int**)&d_C, nBytes)); int *gpuRef = (int *)malloc(nBytes); CHECK(cudaMemset(d_C, 0, nBytes)); setColReadCol<<<grid, block>>>(d_C); CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost)); if(iprintf) printData("set col read col: ", gpuRef, nx*ny); CHECK(cudaMemset(d_C, 0, nBytes)); setRowReadRow<<<grid, block>>>(d_C); CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost)); if(iprintf) printData("set row read row: ", gpuRef, nx*ny); CHECK(cudaMemset(d_C, 0, nBytes)); setRowReadCol<<<grid, block>>>(d_C); CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost)); if(iprintf) printData("set row read col: ", gpuRef, nx * ny); CHECK(cudaMemset(d_C, 0, nBytes)); setRowReadColDyn<<<grid, block, BDIMX*BDIMY*sizeof(int)>>>(d_C); CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost)); if(iprintf) printData("set row read col dyn", gpuRef, nx * ny); CHECK(cudaMemset(d_C, 0, nBytes)); setRowReadColPad<<<grid, block>>>(d_C); CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost)); if(iprintf) printData("set row read col pad", gpuRef, nx * ny); CHECK(cudaMemset(d_C, 0, nBytes)); setRowReadColDynPad<<<grid, block, (BDIMX + IPAD)*BDIMY*sizeof(int)>>>(d_C); CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost)); if(iprintf) printData("set row read col DP ", gpuRef, nx * ny); CHECK(cudaFree(d_C)); free(gpuRef); CHECK(cudaDeviceReset()); return EXIT_SUCCESS; }
54b9880e770722f69f9162bf662c005800ae4ba2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <math.h> #include <stdio.h> #include <stdlib.h> #include "cuda_utils.h" // input: new_xyz(b, m, 3) xyz(b, n, 3) // output: idx(b, m, nsample) __global__ void query_ball_point_kernel(int b, int n, int m, float radius, int nsample, const float *__restrict__ new_xyz, const float *__restrict__ xyz, int *__restrict__ idx) { int batch_index = blockIdx.x; xyz += batch_index * n * 3; new_xyz += batch_index * m * 3; idx += m * nsample * batch_index; int index = threadIdx.x; int stride = blockDim.x; float radius2 = radius * radius; for (int j = index; j < m; j += stride) { float new_x = new_xyz[j * 3 + 0]; float new_y = new_xyz[j * 3 + 1]; float new_z = new_xyz[j * 3 + 2]; for (int k = 0, cnt = 0; k < n && cnt < nsample; ++k) { float x = xyz[k * 3 + 0]; float y = xyz[k * 3 + 1]; float z = xyz[k * 3 + 2]; float d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) + (new_z - z) * (new_z - z); if (d2 < radius2) { if (cnt == 0) { for (int l = 0; l < nsample; ++l) { idx[j * nsample + l] = k; } } idx[j * nsample + cnt] = k; ++cnt; } } } } void query_ball_point_kernel_wrapper(int b, int n, int m, float radius, int nsample, const float *new_xyz, const float *xyz, int *idx) { hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( query_ball_point_kernel), dim3(b), dim3(opt_n_threads(m)), 0, stream, b, n, m, radius, nsample, new_xyz, xyz, idx); CUDA_CHECK_ERRORS(); } // input: new_xyz(b, m, f) xyz(b, n, f) // output: idx(b, m, nsample) __global__ void query_ball_feats_kernel(int b, int n, int m, int f, float radius, int nsample, const float *__restrict__ new_xyz, const float *__restrict__ xyz, int *__restrict__ idx) { int batch_index = blockIdx.x; xyz += batch_index * n * f; new_xyz += batch_index * m * f; idx += m * nsample * batch_index; int index = threadIdx.x; int stride = blockDim.x; float radius2 = radius * radius; for (int j = index; j < m; j += stride) { const float* new_xyz_cur = &new_xyz[j * f]; for (int k = 0, cnt = 0; k < n && cnt < nsample; ++k) { const float* xyz_cur = &xyz[k * f]; float d2=0.f; for (int fi=0; fi<f; ++fi) d2+=(xyz_cur[fi]-new_xyz_cur[fi])*(xyz_cur[fi]-new_xyz_cur[fi]); if (d2 < radius2) { if (cnt == 0) { for (int l = 0; l < nsample; ++l) { idx[j * nsample + l] = k; } } idx[j * nsample + cnt] = k; ++cnt; } } } } void query_ball_feats_kernel_wrapper(int b, int n, int m, int f, float radius, int nsample, const float *new_xyz, const float *xyz, int *idx) { hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( query_ball_feats_kernel), dim3(b), dim3(opt_n_threads(m)), 0, stream, b, n, m, f, radius, nsample, new_xyz, xyz, idx); CUDA_CHECK_ERRORS(); }
54b9880e770722f69f9162bf662c005800ae4ba2.cu
#include <math.h> #include <stdio.h> #include <stdlib.h> #include "cuda_utils.h" // input: new_xyz(b, m, 3) xyz(b, n, 3) // output: idx(b, m, nsample) __global__ void query_ball_point_kernel(int b, int n, int m, float radius, int nsample, const float *__restrict__ new_xyz, const float *__restrict__ xyz, int *__restrict__ idx) { int batch_index = blockIdx.x; xyz += batch_index * n * 3; new_xyz += batch_index * m * 3; idx += m * nsample * batch_index; int index = threadIdx.x; int stride = blockDim.x; float radius2 = radius * radius; for (int j = index; j < m; j += stride) { float new_x = new_xyz[j * 3 + 0]; float new_y = new_xyz[j * 3 + 1]; float new_z = new_xyz[j * 3 + 2]; for (int k = 0, cnt = 0; k < n && cnt < nsample; ++k) { float x = xyz[k * 3 + 0]; float y = xyz[k * 3 + 1]; float z = xyz[k * 3 + 2]; float d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) + (new_z - z) * (new_z - z); if (d2 < radius2) { if (cnt == 0) { for (int l = 0; l < nsample; ++l) { idx[j * nsample + l] = k; } } idx[j * nsample + cnt] = k; ++cnt; } } } } void query_ball_point_kernel_wrapper(int b, int n, int m, float radius, int nsample, const float *new_xyz, const float *xyz, int *idx) { cudaStream_t stream = at::cuda::getCurrentCUDAStream(); query_ball_point_kernel<<<b, opt_n_threads(m), 0, stream>>>( b, n, m, radius, nsample, new_xyz, xyz, idx); CUDA_CHECK_ERRORS(); } // input: new_xyz(b, m, f) xyz(b, n, f) // output: idx(b, m, nsample) __global__ void query_ball_feats_kernel(int b, int n, int m, int f, float radius, int nsample, const float *__restrict__ new_xyz, const float *__restrict__ xyz, int *__restrict__ idx) { int batch_index = blockIdx.x; xyz += batch_index * n * f; new_xyz += batch_index * m * f; idx += m * nsample * batch_index; int index = threadIdx.x; int stride = blockDim.x; float radius2 = radius * radius; for (int j = index; j < m; j += stride) { const float* new_xyz_cur = &new_xyz[j * f]; for (int k = 0, cnt = 0; k < n && cnt < nsample; ++k) { const float* xyz_cur = &xyz[k * f]; float d2=0.f; for (int fi=0; fi<f; ++fi) d2+=(xyz_cur[fi]-new_xyz_cur[fi])*(xyz_cur[fi]-new_xyz_cur[fi]); if (d2 < radius2) { if (cnt == 0) { for (int l = 0; l < nsample; ++l) { idx[j * nsample + l] = k; } } idx[j * nsample + cnt] = k; ++cnt; } } } } void query_ball_feats_kernel_wrapper(int b, int n, int m, int f, float radius, int nsample, const float *new_xyz, const float *xyz, int *idx) { cudaStream_t stream = at::cuda::getCurrentCUDAStream(); query_ball_feats_kernel<<<b, opt_n_threads(m), 0, stream>>>( b, n, m, f, radius, nsample, new_xyz, xyz, idx); CUDA_CHECK_ERRORS(); }
be5d184a9706f502ca51fabb89060546f36c3a76.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * cg.cu * Created on: July 22, 2016 * Author: Wei Tan ([email protected]) * CUDA kernels related to batch CG solver used in ALS * CG solver: https://en.wikipedia.org/wiki/Conjugate_gradient_method * Code optimized for F = 100, and on cc 3.5, 3.7 platforms. Also tested in cc 5.2 */ #include "als.h" #include "device_utilities.h" #include "host_utilities.h" #include <fstream> #define SCAN_BATCH 24 #define CG_ERROR 1e-4 #undef DEBUG //CG (iterative solve) kernel //each block solves a A*x=b __global__ void updateXWithCGKernel(float * A, float * x, float * b, const int batchSize, const int f, const float cgIter){ extern __shared__ float smem[]; float *sharedx = &smem[0]; float *sharedp = &smem[f]; float *sharedr = &smem[2*f]; float *sharedap = &smem[3*f]; float *rsold = &smem[4*f]; float *alpha = &smem[4*f+1]; float *rsnew = &smem[4*f+2]; float *beta = &smem[4*f+3]; //sharedx<--x sharedx[threadIdx.x] = x[blockIdx.x*blockDim.x + threadIdx.x]; //sharedx[threadIdx.x] = 0; __syncthreads(); //r=b-A*x; float temp = 0; for(int i = 0; i < f; i++) //this is math correct and coalesced because A is symmetric temp += A[blockIdx.x*f*f + f*i + threadIdx.x]*sharedx[i]; sharedr[threadIdx.x] = b[blockIdx.x*blockDim.x + threadIdx.x] - temp; //p=r; sharedp[threadIdx.x] = sharedr[threadIdx.x]; //rsold=r'*r; if(threadIdx.x == 0){ rsold[0] = 0; } temp = sharedr[threadIdx.x] *sharedr[threadIdx.x]; blockReduceSumWithAtomics(rsold, temp); //temp = blockReduceSum(shared, temp); __syncthreads(); #ifdef DEBUG if(threadIdx.x==0){ printf("***rsold:\n"); printf("rsold = %f \n", rsold[0]); printf("***shared memory content after 1st blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif for(int iter = 0; iter < cgIter; iter++){ //ap=A*p; //WARN: set temp to zero since the next operation is +=! temp = 0; for(int i = 0; i < f; i++) //this is math correct and coalesced because A is symmetric temp += A[blockIdx.x*f*f + f*i + threadIdx.x]*sharedp[i]; sharedap[threadIdx.x] = temp; #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("----------CG iteration %d \n", iter); printf("***ap:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); printf("***shared memory content before 2rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif if(threadIdx.x == 0){ rsnew[0] = 0; } //no need to have sync before blockReduce //because there is a __syncthreads() in blockReduce //pAp=p'*Ap temp = sharedp[threadIdx.x] *sharedap[threadIdx.x]; //temp = blockReduceSum(shared, temp); blockReduceSumWithAtomics(rsnew, temp); //sync needed, to let all atomicAdd threads completes __syncthreads(); if(threadIdx.x == 0){ //pAp = temp; //alpha=rsold/(p'*Ap); use rsnew to store pAp alpha[0] = rsold[0]/rsnew[0]; #ifdef DEBUG printf("***rsold:\n"); printf("rsold = %f \n", rsold[0]); printf("***pAp:\n"); printf("pAp = %f \n", rsnew[0]); printf("***alpha:\n"); printf("alpha = %f \n", alpha[0]); #endif rsnew[0] = 0; } //needed, aplpha[0] to be used by all threads __syncthreads(); //x=x+alpha*p; sharedx[threadIdx.x] = sharedx[threadIdx.x] + alpha[0] * sharedp[threadIdx.x]; //r=r-alpha*Ap; sharedr[threadIdx.x] = sharedr[threadIdx.x] - alpha[0] * sharedap[threadIdx.x]; //NOT needed? //__syncthreads(); #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("***shared memory content before 3rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif //rsnew=r'*r; /* temp = sharedr[threadIdx.x]*sharedr[threadIdx.x]; temp = blockReduceSum(shared, temp); __syncthreads(); if(threadIdx.x == 0){ rsnew[0] = temp; } */ temp = sharedr[threadIdx.x]*sharedr[threadIdx.x]; blockReduceSumWithAtomics(rsnew, temp); //WARN: has to have this sync! __syncthreads(); #ifdef DEBUG if(threadIdx.x==0){ printf("***rsnew:\n"); printf("rsnew = %f \n", rsnew[0]); printf("***shared memory content after 3rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif if(rsnew[0]<CG_ERROR) break; //NOT needed? //__syncthreads(); //beta if(threadIdx.x == 0){ beta[0] = rsnew[0]/rsold[0]; //rsold=rsnew; rsold[0] = rsnew[0]; } //need sync since every thread needs beta[0] __syncthreads(); //p=r+(rsnew/rsold)*p; sharedp[threadIdx.x] = sharedr[threadIdx.x] + beta[0] * sharedp[threadIdx.x]; //need sync as every thread needs sharedp at the beginning of for __syncthreads(); #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("***shared memory content after update p:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } __syncthreads(); #endif }//end of CG iterations //x<--sharedx x[blockIdx.x*blockDim.x + threadIdx.x] = sharedx[threadIdx.x]; } //CG (iterative solve) kernel //each block solves a A*x=b and A in fp16 __global__ void updateXWithCGKernel3(half * A, float * x, float * b, const int batchSize, const int f, const float cgIter){ extern __shared__ float smem[]; float *sharedx = &smem[0]; float *sharedp = &smem[f]; float *sharedr = &smem[2*f]; float *sharedap = &smem[3*f]; float *rsold = &smem[4*f]; float *alpha = &smem[4*f+1]; float *rsnew = &smem[4*f+2]; float *beta = &smem[4*f+3]; //sharedx<--x sharedx[threadIdx.x] = x[blockIdx.x*blockDim.x + threadIdx.x]; __syncthreads(); //r=b-A*x; float temp = 0; for(int i = 0; i < f; i++) //this is math correct and coalesced because A is symmetric temp += __half2float(A[blockIdx.x*f*f + f*i + threadIdx.x])*sharedx[i]; sharedr[threadIdx.x] = b[blockIdx.x*blockDim.x + threadIdx.x] - temp; //p=r; sharedp[threadIdx.x] = sharedr[threadIdx.x]; //rsold=r'*r; if(threadIdx.x == 0){ rsold[0] = 0; } temp = sharedr[threadIdx.x] *sharedr[threadIdx.x]; blockReduceSumWithAtomics(rsold, temp); //temp = blockReduceSum(shared, temp); __syncthreads(); #ifdef DEBUG if(threadIdx.x==0){ printf("***rsold:\n"); printf("rsold = %f \n", rsold[0]); printf("***shared memory content after 1st blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif for(int iter = 0; iter < cgIter; iter++){ //ap=A*p; //WARN: set temp to zero since the next operation is +=! temp = 0; for(int i = 0; i < f; i++) //this is math correct and coalesced because A is symmetric temp += __half2float(A[blockIdx.x*f*f + f*i + threadIdx.x])*sharedp[i]; sharedap[threadIdx.x] = temp; #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("----------CG iteration %d \n", iter); printf("***ap:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); printf("***shared memory content before 2rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif if(threadIdx.x == 0){ rsnew[0] = 0; } //no need to have sync before blockReduce //because there is a __syncthreads() in blockReduce //pAp=p'*Ap temp = sharedp[threadIdx.x] *sharedap[threadIdx.x]; //temp = blockReduceSum(shared, temp); blockReduceSumWithAtomics(rsnew, temp); //sync needed, to let all atomicAdd threads completes __syncthreads(); if(threadIdx.x == 0){ //pAp = temp; //alpha=rsold/(p'*Ap); use rsnew to store pAp alpha[0] = rsold[0]/rsnew[0]; #ifdef DEBUG printf("***rsold:\n"); printf("rsold = %f \n", rsold[0]); printf("***pAp:\n"); printf("pAp = %f \n", rsnew[0]); printf("***alpha:\n"); printf("alpha = %f \n", alpha[0]); #endif rsnew[0] = 0; } //needed, aplpha[0] to be used by all threads __syncthreads(); //x=x+alpha*p; sharedx[threadIdx.x] = sharedx[threadIdx.x] + alpha[0] * sharedp[threadIdx.x]; //r=r-alpha*Ap; sharedr[threadIdx.x] = sharedr[threadIdx.x] - alpha[0] * sharedap[threadIdx.x]; //NOT needed? //__syncthreads(); #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("***shared memory content before 3rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif //rsnew=r'*r; /* temp = sharedr[threadIdx.x]*sharedr[threadIdx.x]; temp = blockReduceSum(shared, temp); __syncthreads(); if(threadIdx.x == 0){ rsnew[0] = temp; } */ temp = sharedr[threadIdx.x]*sharedr[threadIdx.x]; blockReduceSumWithAtomics(rsnew, temp); //WARN: has to have this sync! __syncthreads(); #ifdef DEBUG if(threadIdx.x==0){ printf("***rsnew:\n"); printf("rsnew = %f \n", rsnew[0]); printf("***shared memory content after 3rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif if(rsnew[0]<CG_ERROR) break; //NOT needed? //__syncthreads(); //beta if(threadIdx.x == 0){ beta[0] = rsnew[0]/rsold[0]; //rsold=rsnew; rsold[0] = rsnew[0]; } //need sync since every thread needs beta[0] __syncthreads(); //p=r+(rsnew/rsold)*p; sharedp[threadIdx.x] = sharedr[threadIdx.x] + beta[0] * sharedp[threadIdx.x]; //need sync as every thread needs sharedp at the beginning of for __syncthreads(); #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("***shared memory content after update p:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } __syncthreads(); #endif }//end of CG iterations //x<--sharedx x[blockIdx.x*blockDim.x + threadIdx.x] = sharedx[threadIdx.x]; } //blockDim.x=64 or 96 (two or three WARPs) instead of 100 -- WARP shuffle seems requiring this __global__ void updateXWithCGKernel2(float * A, float * x, float * b, const int batchSize, const int f, const float cgIter){ extern __shared__ float smem[]; float *sharedx = &smem[0]; float *sharedp = &smem[f]; float *sharedr = &smem[2*f]; float *sharedap = &smem[3*f]; float *rsold = &smem[4*f]; float *alpha = &smem[4*f+1]; float *rsnew = &smem[4*f+2]; float *beta = &smem[4*f+3]; //sharedx<--x for(int k = threadIdx.x; k < f; k += blockDim.x) sharedx[k] = x[blockIdx.x*f + k]; __syncthreads(); //r=b-A*x; float temp = 0; for(int k = threadIdx.x; k < f; k += blockDim.x){ temp = 0; for(int i = 0; i < f; i++) temp += A[blockIdx.x*f*f + f*i + k]*sharedx[i]; sharedr[k] = b[blockIdx.x*f + k] - temp; //p=r; sharedp[k] = sharedr[k]; } //rsold=r'*r; if(threadIdx.x == 0){ rsold[0] = 0; } temp = 0; for(int k = threadIdx.x; k < f; k += blockDim.x){ temp += sharedr[k]*sharedr[k]; } blockReduceSumWithAtomics(rsold, temp); //temp = blockReduceSum(shared, temp); __syncthreads(); #ifdef DEBUG if(threadIdx.x==0){ printf("***rsold:\n"); printf("rsold = %f \n", rsold[0]); printf("***shared memory content after 1st blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif for(int iter = 0; iter < cgIter; iter++){ //ap=A*p; //WARN: set temp to zero since the next operation is +=! for(int k = threadIdx.x; k < f; k += blockDim.x){ temp = 0; for(int i = 0; i < f; i++) temp += A[blockIdx.x*f*f + f*i + k]*sharedp[i]; sharedap[k] = temp; } #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("----------CG iteration %d \n", iter); printf("***ap:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); printf("***shared memory content before 2rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif if(threadIdx.x == 0){ rsnew[0] = 0; } //no need to have sync before blockReduce //because there is a __syncthreads() in blockReduce //pAp=p'*Ap temp = 0; for(int k = threadIdx.x; k < f; k += blockDim.x) temp += sharedp[k]*sharedap[k]; //temp = blockReduceSum(shared, temp); blockReduceSumWithAtomics(rsnew, temp); //sync needed, to let all atomicAdd threads completes __syncthreads(); if(threadIdx.x == 0){ //pAp = temp; //alpha=rsold/(p'*Ap); use rsnew to store pAp alpha[0] = rsold[0]/rsnew[0]; #ifdef DEBUG printf("***rsold:\n"); printf("rsold = %f \n", rsold[0]); printf("***pAp:\n"); printf("pAp = %f \n", rsnew[0]); printf("***alpha:\n"); printf("alpha = %f \n", alpha[0]); #endif rsnew[0] = 0; } //needed, aplpha[0] to be used by all threads __syncthreads(); for(int k = threadIdx.x; k < f; k += blockDim.x){ //x=x+alpha*p; sharedx[k] = sharedx[k] + alpha[0] * sharedp[k]; //r=r-alpha*Ap; sharedr[k] = sharedr[k] - alpha[0] * sharedap[k]; } //NOT needed? //__syncthreads(); #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("***shared memory content before 3rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif //rsnew=r'*r; /* temp = sharedr[threadIdx.x]*sharedr[threadIdx.x]; temp = blockReduceSum(shared, temp); __syncthreads(); if(threadIdx.x == 0){ rsnew[0] = temp; } */ temp = 0; for(int k = threadIdx.x; k < f; k += blockDim.x) temp += sharedr[k]*sharedr[k]; blockReduceSumWithAtomics(rsnew, temp); //WARN: has to have this sync! __syncthreads(); #ifdef DEBUG if(threadIdx.x==0){ printf("***rsnew:\n"); printf("rsnew = %f \n", rsnew[0]); printf("***shared memory content after 3rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif if(rsnew[0]<CG_ERROR) break; //NOT needed? //__syncthreads(); //beta if(threadIdx.x == 0){ beta[0] = rsnew[0]/rsold[0]; //rsold=rsnew; rsold[0] = rsnew[0]; } //need sync since every thread needs beta[0] __syncthreads(); for(int k = threadIdx.x; k < f; k += blockDim.x) //p=r+(rsnew/rsold)*p; sharedp[k] = sharedr[k] + beta[0] * sharedp[k]; //need sync as every thread needs sharedp at the beginning of for __syncthreads(); #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("***shared memory content after update p:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } __syncthreads(); #endif }//end of CG iterations for(int k = threadIdx.x; k < f; k += blockDim.x) //x<--sharedx x[blockIdx.x*f + k] = sharedx[k]; } void updateXWithCGHost_tt_fp16(float * A, float * x, float * b, const int batchSize, const int f, const float cgIter){ hipLaunchKernelGGL(( updateXWithCGKernel3), dim3(batchSize), dim3(f), (4*f+4)*sizeof(float), 0, (half*)A, x, b, batchSize, f, cgIter); hipDeviceSynchronize(); cudaCheckError(); #ifdef DEBUG printf("***A[0]:\n"); float *h_A = new float[f * f]; float *A_fp32; cudacall(hipMalloc((void** ) &A_fp32, f * f * sizeof(A_fp32[0]))); hipLaunchKernelGGL(( fp16Array2fp32Array), dim3((f*f-1)/1024 + 1), dim3(1024), 0, 0, A_fp32, (half*)A, f*f); hipDeviceSynchronize(); cudaCheckError(); cudacall(hipMemcpy(h_A, A_fp32, f * f * sizeof(float), hipMemcpyDeviceToHost)); for(int i = 0; i < f*f; i++) printf("%f ", h_A[i]); printf("\n"); delete [] h_A; cudacall(hipFree(A_fp32)); printf("***x[0]:\n"); float *h_x = new float[f]; cudacall(hipMemcpy(h_x, x, f * sizeof(float), hipMemcpyDeviceToHost)); for(int i = 0; i < f; i++) printf("%f ", h_x[i]); printf("\n"); delete [] h_x; /* printf("***b[0]:\n"); float *h_b = new float[f]; cudacall(hipMemcpy(h_b, b, f * sizeof(float), hipMemcpyDeviceToHost)); for(int i = 0; i < f; i++) printf("%f ", h_b[i]); printf("\n"); delete [] h_b; */ #endif } void updateXWithCGHost(float * A, float * x, float * b, const int batchSize, const int f, const float cgIter){ hipLaunchKernelGGL(( updateXWithCGKernel), dim3(batchSize), dim3(f), (4*f+4)*sizeof(float), 0, //updateXWithCGKernel2, batchSize, 96, 4*f+4)*sizeof(float)>>> (A, x, b, batchSize, f, cgIter); hipDeviceSynchronize(); cudaCheckError(); #ifdef DEBUG printf("***A[0]:\n"); float *h_A = new float[f * f]; float *A_fp32; cudacall(hipMalloc((void** ) &A_fp32, f * f * sizeof(A_fp32[0]))); hipLaunchKernelGGL(( fp16Array2fp32Array), dim3((f*f-1)/1024 + 1), dim3(1024), 0, 0, A_fp32, (half*)A, f*f); hipDeviceSynchronize(); cudaCheckError(); cudacall(hipMemcpy(h_A, A_fp32, f * f * sizeof(float), hipMemcpyDeviceToHost)); for(int i = 0; i < f*f; i++) printf("%f ", h_A[i]); printf("\n"); delete [] h_A; cudacall(hipFree(A_fp32)); printf("***x[0]:\n"); float *h_x = new float[f]; cudacall(hipMemcpy(h_x, x, f * sizeof(float), hipMemcpyDeviceToHost)); for(int i = 0; i < f; i++) printf("%f ", h_x[i]); printf("\n"); delete [] h_x; /* printf("***b[0]:\n"); float *h_b = new float[f]; cudacall(hipMemcpy(h_b, b, f * sizeof(float), hipMemcpyDeviceToHost)); for(int i = 0; i < f; i++) printf("%f ", h_b[i]); printf("\n"); delete [] h_b; */ #endif } void updateXWithCGHostAsync(float * A, float * x, float * b, const int batchSize, const int f, const float cgIter, hipStream_t *stream){ hipLaunchKernelGGL(( updateXWithCGKernel), dim3(batchSize), dim3(f), (4*f+4)*sizeof(float), *stream, A, x, b, batchSize, f, cgIter); } //fused kernel, use thetaT to update XT __global__ void __launch_bounds__(64) alsUpdateFeature100(const int batch_offset, const int* csrRowIndex, const int* csrColIndex, const float lambda, const int m, const int F, const float* thetaT, float* XT, float* ythetaT, int cgIter) { extern __shared__ float2 thetaTemp[]; int row = blockIdx.x + batch_offset; if (row < m) { //this block needs to handle end - start thetaT columns int start = csrRowIndex[row]; int end = csrRowIndex[row + 1]; //slide through [start, end] by window size SCAN_BATCH int iterations = (end - start - 1)/SCAN_BATCH + 1; float temp0= 0, temp1= 0, temp2= 0, temp3= 0, temp4= 0, temp5= 0, temp6= 0, temp7= 0, temp8= 0, temp9 = 0; float temp10= 0, temp11= 0, temp12= 0, temp13= 0, temp14= 0, temp15= 0, temp16= 0, temp17= 0, temp18= 0, temp19 = 0; float temp20= 0, temp21= 0, temp22= 0, temp23= 0, temp24= 0, temp25= 0, temp26= 0, temp27= 0, temp28= 0, temp29 = 0; float temp30= 0, temp31= 0, temp32= 0, temp33= 0, temp34= 0, temp35= 0, temp36= 0, temp37= 0, temp38= 0, temp39 = 0; float temp40= 0, temp41= 0, temp42= 0, temp43= 0, temp44= 0, temp45= 0, temp46= 0, temp47= 0, temp48= 0, temp49 = 0; float temp50= 0, temp51= 0, temp52= 0, temp53= 0, temp54= 0, temp55= 0, temp56= 0, temp57= 0, temp58= 0, temp59 = 0; float temp60= 0, temp61= 0, temp62= 0, temp63= 0, temp64= 0, temp65= 0, temp66= 0, temp67= 0, temp68= 0, temp69 = 0; float temp70= 0, temp71= 0, temp72= 0, temp73= 0, temp74= 0, temp75= 0, temp76= 0, temp77= 0, temp78= 0, temp79 = 0; float temp80= 0, temp81= 0, temp82= 0, temp83= 0, temp84= 0, temp85= 0, temp86= 0, temp87= 0, temp88= 0, temp89 = 0; float temp90= 0, temp91= 0, temp92= 0, temp93= 0, temp94= 0, temp95= 0, temp96= 0, temp97= 0, temp98= 0, temp99 = 0; int tile_x = 0; int tile_y = 0; int tile = F/10; for ( int i = 0; i < 10; i++){ int end = ((20-i)*(i+1))/2; if(threadIdx.x < end){ tile_x = i * tile; tile_y = (10 + threadIdx.x - end) * tile; break; } } //iteration: copy gmem-->smem; aggregate smem-->register for (int iter = 0; iter < iterations; iter ++){ float2 theta; //copy texture --> smem, and sync //two layers: warp divergence unless we split at 32 //require 32 >= SCAN_BATCH if(threadIdx.x < 2*32 ){ //int index = threadIdx.x; int index = threadIdx.x - (threadIdx.x/32)*32; //0 to 31; if(index < SCAN_BATCH){ if(iter*SCAN_BATCH + index < end - start){ //for (int k = 50*(threadIdx.x/32); k < 50*(threadIdx.x/32) + 50; k += 2){ //IMPORTANT: for loop has constant and identical start and end if(threadIdx.x < 32){ for (int k = 0; k < 50; k += 2){ theta.x = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k]); theta.y = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k+1]); thetaTemp[index * F/2 + k/2] = theta; } } else { for (int k = 0; k < 50; k += 2){ theta.x = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 50]); theta.y = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 51]); thetaTemp[index * F/2 + k/2 + 25] = theta; } } } //must be the last iteration; no need to check //not enough theta to copy, set zero else memset(&thetaTemp[index*F/2], 0, F*sizeof(float)); } } __syncthreads(); //tile: 10*10 if(threadIdx.x < 55 ){ for(int k = 0; k < SCAN_BATCH; k++){ accumulate_in_registers(); } } } //end of iteration in copying from smem and aggregating in register __syncthreads(); #ifdef DEBUG if(blockIdx.x==0 && threadIdx.x==0){ printf("***temp 0~9: %f %f %f %f %f %f %f %f %f %f\n", temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9); } #endif //newly added CG phase //reuse the abundant shared memory float *sharedx = (float*)&thetaTemp[0]; float *sharedp = (float*)&thetaTemp[50]; float *sharedr = (float*)&thetaTemp[100]; float *sharedap = (float*)&thetaTemp[150]; float *sharedax = (float*)&thetaTemp[200]; float *rsold = (float*)&thetaTemp[250]; float *alpha = (float*)&thetaTemp[251]; float *rsnew = (float*)&thetaTemp[252]; float *beta = (float*)&thetaTemp[253]; //sharedx<--x for(int k = threadIdx.x; k < F; k += 64){ sharedx[k] = XT[blockIdx.x*F + k]; sharedax[k] = 0; } __syncthreads(); float temp = 0; //only uses 55 threads for A*p and A*x if(threadIdx.x < 55){ //add regularization if(tile_x==tile_y){ temp = (end - start) * lambda; temp0 += temp; temp11 += temp; temp22 += temp; temp33 += temp; temp44 += temp; temp55 += temp; temp66 += temp; temp77 += temp; temp88 += temp; temp99 += temp; } #ifdef DEBUG if(blockIdx.x==0 && threadIdx.x==0){ printf("***temp 0~9: %f %f %f %f %f %f %f %f %f %f\n", temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9); } #endif //r=b-A*x; //step1: ax=A*x atomicAdd(&sharedax[tile_y], temp0*sharedx[tile_x] + temp10*sharedx[tile_x+1] + temp20*sharedx[tile_x+2] + temp30*sharedx[tile_x+3] + temp40*sharedx[tile_x + 4] + temp50*sharedx[tile_x + 5] + temp60*sharedx[tile_x + 6] + temp70*sharedx[tile_x + 7] + temp80*sharedx[tile_x + 8] + temp90*sharedx[tile_x + 9]); atomicAdd(&sharedax[tile_y+1], temp1*sharedx[tile_x] + temp11*sharedx[tile_x+1] + temp21*sharedx[tile_x+2] + temp31*sharedx[tile_x+3] + temp41*sharedx[tile_x+4] + temp51*sharedx[tile_x+5] + temp61*sharedx[tile_x+6] + temp71*sharedx[tile_x+7] + temp81*sharedx[tile_x+8] + temp91*sharedx[tile_x+9]); atomicAdd(&sharedax[tile_y+2], temp2*sharedx[tile_x] + temp12*sharedx[tile_x+1] + temp22*sharedx[tile_x+2] + temp32*sharedx[tile_x+3] + temp42*sharedx[tile_x+4] + temp52*sharedx[tile_x+5] + temp62*sharedx[tile_x+6] + temp72*sharedx[tile_x+7] + temp82*sharedx[tile_x+8] + temp92*sharedx[tile_x+9]); atomicAdd(&sharedax[tile_y+3], temp3*sharedx[tile_x] + temp13*sharedx[tile_x+1] + temp23*sharedx[tile_x+2] + temp33*sharedx[tile_x+3] + temp43*sharedx[tile_x+4] + temp53*sharedx[tile_x+5] + temp63*sharedx[tile_x+6] + temp73*sharedx[tile_x+7] + temp83*sharedx[tile_x+8] + temp93*sharedx[tile_x+9]); atomicAdd(&sharedax[tile_y+4], temp4*sharedx[tile_x] + temp14*sharedx[tile_x+1] + temp24*sharedx[tile_x+2] + temp34*sharedx[tile_x+3] + temp44*sharedx[tile_x+4] + temp54*sharedx[tile_x+5] + temp64*sharedx[tile_x+6] + temp74*sharedx[tile_x+7] + temp84*sharedx[tile_x+8] + temp94*sharedx[tile_x+9]); atomicAdd(&sharedax[tile_y+5], temp5*sharedx[tile_x] + temp15*sharedx[tile_x+1] + temp25*sharedx[tile_x+2] + temp35*sharedx[tile_x+3] + temp45*sharedx[tile_x+4] + temp55*sharedx[tile_x+5] + temp65*sharedx[tile_x+6] + temp75*sharedx[tile_x+7] + temp85*sharedx[tile_x+8] + temp95*sharedx[tile_x+9]); atomicAdd(&sharedax[tile_y+6], temp6*sharedx[tile_x] + temp16*sharedx[tile_x+1] + temp26*sharedx[tile_x+2] + temp36*sharedx[tile_x+3] + temp46*sharedx[tile_x+4] + temp56*sharedx[tile_x+5] + temp66*sharedx[tile_x+6] + temp76*sharedx[tile_x+7] + temp86*sharedx[tile_x+8] + temp96*sharedx[tile_x+9]); atomicAdd(&sharedax[tile_y+7], temp7*sharedx[tile_x] + temp17*sharedx[tile_x+1] + temp27*sharedx[tile_x+2] + temp37*sharedx[tile_x+3] + temp47*sharedx[tile_x+4] + temp57*sharedx[tile_x+5] + temp67*sharedx[tile_x+6] + temp77*sharedx[tile_x+7] + temp87*sharedx[tile_x+8] + temp97*sharedx[tile_x+9]); atomicAdd(&sharedax[tile_y+8], temp8*sharedx[tile_x] + temp18*sharedx[tile_x+1] + temp28*sharedx[tile_x+2] + temp38*sharedx[tile_x+3] + temp48*sharedx[tile_x+4] + temp58*sharedx[tile_x+5] + temp68*sharedx[tile_x+6] + temp78*sharedx[tile_x+7] + temp88*sharedx[tile_x+8] + temp98*sharedx[tile_x+9]); atomicAdd(&sharedax[tile_y+9], temp9*sharedx[tile_x] + temp19*sharedx[tile_x+1] + temp29*sharedx[tile_x+2] + temp39*sharedx[tile_x+3] + temp49*sharedx[tile_x+4] + temp59*sharedx[tile_x+5] + temp69*sharedx[tile_x+6] + temp79*sharedx[tile_x+7] + temp89*sharedx[tile_x+8] + temp99*sharedx[tile_x+9]); if(tile_x!=tile_y){ atomicAdd(&sharedax[tile_x], temp0*sharedx[tile_y] + temp1*sharedx[tile_y + 1] + temp2*sharedx[tile_y + 2] + temp3*sharedx[tile_y + 3] + temp4*sharedx[tile_y + 4] + temp5*sharedx[tile_y + 5] + temp6*sharedx[tile_y + 6] + temp7*sharedx[tile_y + 7] + temp8*sharedx[tile_y + 8] + temp9*sharedx[tile_y + 9]); atomicAdd(&sharedax[tile_x+1], temp10*sharedx[tile_y] + temp11*sharedx[tile_y+1] + temp12*sharedx[tile_y+2] + temp13*sharedx[tile_y+3] + temp14*sharedx[tile_y+4] + temp15*sharedx[tile_y+5] + temp16*sharedx[tile_y+6] + temp17*sharedx[tile_y+7] + temp18*sharedx[tile_y+8] + temp19*sharedx[tile_y+9]); atomicAdd(&sharedax[tile_x+2], temp20*sharedx[tile_y] + temp21*sharedx[tile_y+1] + temp22*sharedx[tile_y+2] + temp23*sharedx[tile_y+3] + temp24*sharedx[tile_y+4] + temp25*sharedx[tile_y+5] + temp26*sharedx[tile_y+6] + temp27*sharedx[tile_y+7] + temp28*sharedx[tile_y+8] + temp29*sharedx[tile_y+9]); atomicAdd(&sharedax[tile_x+3], temp30*sharedx[tile_y] + temp31*sharedx[tile_y+1] + temp32*sharedx[tile_y+2] + temp33*sharedx[tile_y+3] + temp34*sharedx[tile_y+4] + temp35*sharedx[tile_y+5] + temp36*sharedx[tile_y+6] + temp37*sharedx[tile_y+7] + temp38*sharedx[tile_y+8] + temp39*sharedx[tile_y+9]); atomicAdd(&sharedax[tile_x+4], temp40*sharedx[tile_y] + temp41*sharedx[tile_y+1] + temp42*sharedx[tile_y+2] + temp43*sharedx[tile_y+3] + temp44*sharedx[tile_y+4] + temp45*sharedx[tile_y+5] + temp46*sharedx[tile_y+6] + temp47*sharedx[tile_y+7] + temp48*sharedx[tile_y+8] + temp49*sharedx[tile_y+9]); atomicAdd(&sharedax[tile_x+5], temp50*sharedx[tile_y] + temp51*sharedx[tile_y+1] + temp52*sharedx[tile_y+2] + temp53*sharedx[tile_y+3] + temp54*sharedx[tile_y+4] + temp55*sharedx[tile_y+5] + temp56*sharedx[tile_y+6] + temp57*sharedx[tile_y+7] + temp58*sharedx[tile_y+8] + temp59*sharedx[tile_y+9]); atomicAdd(&sharedax[tile_x+6], temp60*sharedx[tile_y] + temp61*sharedx[tile_y+1] + temp62*sharedx[tile_y+2] + temp63*sharedx[tile_y+3] + temp64*sharedx[tile_y+4] + temp65*sharedx[tile_y+5] + temp66*sharedx[tile_y+6] + temp67*sharedx[tile_y+7] + temp68*sharedx[tile_y+8] + temp69*sharedx[tile_y+9]); atomicAdd(&sharedax[tile_x+7], temp70*sharedx[tile_y] + temp71*sharedx[tile_y+1] + temp72*sharedx[tile_y+2] + temp73*sharedx[tile_y+3] + temp74*sharedx[tile_y+4] + temp75*sharedx[tile_y+5] + temp76*sharedx[tile_y+6] + temp77*sharedx[tile_y+7] + temp78*sharedx[tile_y+8] + temp79*sharedx[tile_y+9]); atomicAdd(&sharedax[tile_x+8], temp80*sharedx[tile_y] + temp81*sharedx[tile_y+1] + temp82*sharedx[tile_y+2] + temp83*sharedx[tile_y+3] + temp84*sharedx[tile_y+4] + temp85*sharedx[tile_y+5] + temp86*sharedx[tile_y+6] + temp87*sharedx[tile_y+7] + temp88*sharedx[tile_y+8] + temp89*sharedx[tile_y+9]); atomicAdd(&sharedax[tile_x+9], temp90*sharedx[tile_y] + temp91*sharedx[tile_y+1] + temp92*sharedx[tile_y+2] + temp93*sharedx[tile_y+3] + temp94*sharedx[tile_y+4] + temp95*sharedx[tile_y+5] + temp96*sharedx[tile_y+6] + temp97*sharedx[tile_y+7] + temp98*sharedx[tile_y+8] + temp99*sharedx[tile_y+9]); } } __syncthreads(); #ifdef DEBUG if(blockIdx.x==0 && threadIdx.x==0){ printf("***x:\n"); for(int i = 0; i < 100; i++) printf("%f ", sharedx[i]); printf("\n\n"); printf("***r=Ax:\n"); for(int i = 0; i < 100; i++) printf("%f ", sharedax[i]); printf("\n\n"); } #endif for(int k = threadIdx.x; k < F; k += 64){ //r=b-Ax sharedr[k] = ythetaT[blockIdx.x*blockDim.x + k] - sharedax[k]; //p=r; sharedp[k] = sharedr[k]; } //rsold=r'*r; if(threadIdx.x == 0){ rsold[0] = 0; } for(int k = threadIdx.x; k < F; k += 64){ temp += sharedr[k]*sharedr[k]; } blockReduceSumWithAtomics(rsold, temp); __syncthreads(); #ifdef DEBUG if(blockIdx.x==0 && threadIdx.x==0){ printf("***rsold:\n"); printf("rsold = %f \n", rsold[0]); printf("***shared memory content after 1st blockReduceSum:\n"); for(int i = 0; i < 100; i++) printf("%f ", sharedx[i]); printf("\n"); for(int i = 0; i < 100; i++) printf("%f ", sharedax[i]); printf("\n\n"); for(int i = 0; i < 100; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < 100; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < 100; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif ///* //CG iterations for(int iter = 0; iter < cgIter; iter++){ //ap=A*p; for(int k = threadIdx.x; k < F; k += 64) sharedap[k] = 0; __syncthreads(); //only uses 55 threads for A*p and A*x if(threadIdx.x < 55){ atomicAdd(&sharedap[tile_y], temp0*sharedp[tile_x] + temp10*sharedp[tile_x+1] + temp20*sharedp[tile_x+2] + temp30*sharedp[tile_x+3] + temp40*sharedp[tile_x + 4] + temp50*sharedp[tile_x + 5] + temp60*sharedp[tile_x + 6] + temp70*sharedp[tile_x + 7] + temp80*sharedp[tile_x + 8] + temp90*sharedp[tile_x + 9]); atomicAdd(&sharedap[tile_y+1], temp1*sharedp[tile_x] + temp11*sharedp[tile_x+1] + temp21*sharedp[tile_x+2] + temp31*sharedp[tile_x+3] + temp41*sharedp[tile_x+4] + temp51*sharedp[tile_x+5] + temp61*sharedp[tile_x+6] + temp71*sharedp[tile_x+7] + temp81*sharedp[tile_x+8] + temp91*sharedp[tile_x+9]); atomicAdd(&sharedap[tile_y+2], temp2*sharedp[tile_x] + temp12*sharedp[tile_x+1] + temp22*sharedp[tile_x+2] + temp32*sharedp[tile_x+3] + temp42*sharedp[tile_x+4] + temp52*sharedp[tile_x+5] + temp62*sharedp[tile_x+6] + temp72*sharedp[tile_x+7] + temp82*sharedp[tile_x+8] + temp92*sharedp[tile_x+9]); atomicAdd(&sharedap[tile_y+3], temp3*sharedp[tile_x] + temp13*sharedp[tile_x+1] + temp23*sharedp[tile_x+2] + temp33*sharedp[tile_x+3] + temp43*sharedp[tile_x+4] + temp53*sharedp[tile_x+5] + temp63*sharedp[tile_x+6] + temp73*sharedp[tile_x+7] + temp83*sharedp[tile_x+8] + temp93*sharedp[tile_x+9]); atomicAdd(&sharedap[tile_y+4], temp4*sharedp[tile_x] + temp14*sharedp[tile_x+1] + temp24*sharedp[tile_x+2] + temp34*sharedp[tile_x+3] + temp44*sharedp[tile_x+4] + temp54*sharedp[tile_x+5] + temp64*sharedp[tile_x+6] + temp74*sharedp[tile_x+7] + temp84*sharedp[tile_x+8] + temp94*sharedp[tile_x+9]); atomicAdd(&sharedap[tile_y+5], temp5*sharedp[tile_x] + temp15*sharedp[tile_x+1] + temp25*sharedp[tile_x+2] + temp35*sharedp[tile_x+3] + temp45*sharedp[tile_x+4] + temp55*sharedp[tile_x+5] + temp65*sharedp[tile_x+6] + temp75*sharedp[tile_x+7] + temp85*sharedp[tile_x+8] + temp95*sharedp[tile_x+9]); atomicAdd(&sharedap[tile_y+6], temp6*sharedp[tile_x] + temp16*sharedp[tile_x+1] + temp26*sharedp[tile_x+2] + temp36*sharedp[tile_x+3] + temp46*sharedp[tile_x+4] + temp56*sharedp[tile_x+5] + temp66*sharedp[tile_x+6] + temp76*sharedp[tile_x+7] + temp86*sharedp[tile_x+8] + temp96*sharedp[tile_x+9]); atomicAdd(&sharedap[tile_y+7], temp7*sharedp[tile_x] + temp17*sharedp[tile_x+1] + temp27*sharedp[tile_x+2] + temp37*sharedp[tile_x+3] + temp47*sharedp[tile_x+4] + temp57*sharedp[tile_x+5] + temp67*sharedp[tile_x+6] + temp77*sharedp[tile_x+7] + temp87*sharedp[tile_x+8] + temp97*sharedp[tile_x+9]); atomicAdd(&sharedap[tile_y+8], temp8*sharedp[tile_x] + temp18*sharedp[tile_x+1] + temp28*sharedp[tile_x+2] + temp38*sharedp[tile_x+3] + temp48*sharedp[tile_x+4] + temp58*sharedp[tile_x+5] + temp68*sharedp[tile_x+6] + temp78*sharedp[tile_x+7] + temp88*sharedp[tile_x+8] + temp98*sharedp[tile_x+9]); atomicAdd(&sharedap[tile_y+9], temp9*sharedp[tile_x] + temp19*sharedp[tile_x+1] + temp29*sharedp[tile_x+2] + temp39*sharedp[tile_x+3] + temp49*sharedp[tile_x+4] + temp59*sharedp[tile_x+5] + temp69*sharedp[tile_x+6] + temp79*sharedp[tile_x+7] + temp89*sharedp[tile_x+8] + temp99*sharedp[tile_x+9]); if(tile_x!=tile_y){ atomicAdd(&sharedap[tile_x], temp0*sharedp[tile_y] + temp1*sharedp[tile_y + 1] + temp2*sharedp[tile_y + 2] + temp3*sharedp[tile_y + 3] + temp4*sharedp[tile_y + 4] + temp5*sharedp[tile_y + 5] + temp6*sharedp[tile_y + 6] + temp7*sharedp[tile_y + 7] + temp8*sharedp[tile_y + 8] + temp9*sharedp[tile_y + 9]); atomicAdd(&sharedap[tile_x+1], temp10*sharedp[tile_y] + temp11*sharedp[tile_y+1] + temp12*sharedp[tile_y+2] + temp13*sharedp[tile_y+3] + temp14*sharedp[tile_y+4] + temp15*sharedp[tile_y+5] + temp16*sharedp[tile_y+6] + temp17*sharedp[tile_y+7] + temp18*sharedp[tile_y+8] + temp19*sharedp[tile_y+9]); atomicAdd(&sharedap[tile_x+2], temp20*sharedp[tile_y] + temp21*sharedp[tile_y+1] + temp22*sharedp[tile_y+2] + temp23*sharedp[tile_y+3] + temp24*sharedp[tile_y+4] + temp25*sharedp[tile_y+5] + temp26*sharedp[tile_y+6] + temp27*sharedp[tile_y+7] + temp28*sharedp[tile_y+8] + temp29*sharedp[tile_y+9]); atomicAdd(&sharedap[tile_x+3], temp30*sharedp[tile_y] + temp31*sharedp[tile_y+1] + temp32*sharedp[tile_y+2] + temp33*sharedp[tile_y+3] + temp34*sharedp[tile_y+4] + temp35*sharedp[tile_y+5] + temp36*sharedp[tile_y+6] + temp37*sharedp[tile_y+7] + temp38*sharedp[tile_y+8] + temp39*sharedp[tile_y+9]); atomicAdd(&sharedap[tile_x+4], temp40*sharedp[tile_y] + temp41*sharedp[tile_y+1] + temp42*sharedp[tile_y+2] + temp43*sharedp[tile_y+3] + temp44*sharedp[tile_y+4] + temp45*sharedp[tile_y+5] + temp46*sharedp[tile_y+6] + temp47*sharedp[tile_y+7] + temp48*sharedp[tile_y+8] + temp49*sharedp[tile_y+9]); atomicAdd(&sharedap[tile_x+5], temp50*sharedp[tile_y] + temp51*sharedp[tile_y+1] + temp52*sharedp[tile_y+2] + temp53*sharedp[tile_y+3] + temp54*sharedp[tile_y+4] + temp55*sharedp[tile_y+5] + temp56*sharedp[tile_y+6] + temp57*sharedp[tile_y+7] + temp58*sharedp[tile_y+8] + temp59*sharedp[tile_y+9]); atomicAdd(&sharedap[tile_x+6], temp60*sharedp[tile_y] + temp61*sharedp[tile_y+1] + temp62*sharedp[tile_y+2] + temp63*sharedp[tile_y+3] + temp64*sharedp[tile_y+4] + temp65*sharedp[tile_y+5] + temp66*sharedp[tile_y+6] + temp67*sharedp[tile_y+7] + temp68*sharedp[tile_y+8] + temp69*sharedp[tile_y+9]); atomicAdd(&sharedap[tile_x+7], temp70*sharedp[tile_y] + temp71*sharedp[tile_y+1] + temp72*sharedp[tile_y+2] + temp73*sharedp[tile_y+3] + temp74*sharedp[tile_y+4] + temp75*sharedp[tile_y+5] + temp76*sharedp[tile_y+6] + temp77*sharedp[tile_y+7] + temp78*sharedp[tile_y+8] + temp79*sharedp[tile_y+9]); atomicAdd(&sharedap[tile_x+8], temp80*sharedp[tile_y] + temp81*sharedp[tile_y+1] + temp82*sharedp[tile_y+2] + temp83*sharedp[tile_y+3] + temp84*sharedp[tile_y+4] + temp85*sharedp[tile_y+5] + temp86*sharedp[tile_y+6] + temp87*sharedp[tile_y+7] + temp88*sharedp[tile_y+8] + temp89*sharedp[tile_y+9]); atomicAdd(&sharedap[tile_x+9], temp90*sharedp[tile_y] + temp91*sharedp[tile_y+1] + temp92*sharedp[tile_y+2] + temp93*sharedp[tile_y+3] + temp94*sharedp[tile_y+4] + temp95*sharedp[tile_y+5] + temp96*sharedp[tile_y+6] + temp97*sharedp[tile_y+7] + temp98*sharedp[tile_y+8] + temp99*sharedp[tile_y+9]); } } __syncthreads(); #ifdef DEBUG if(blockIdx.x==0 && threadIdx.x==0){ printf("----------CG iteration %d \n", iter); printf("***ap:\n"); for(int i = 0; i < F; i++) printf("%f ", sharedap[i]); printf("\n\n"); printf("***shared memory content before 2rd blockReduceSum:\n"); for(int i = 0; i < F; i++) printf("%f ", sharedp[i]); printf("\n\n"); for(int i = 0; i < F; i++) printf("%f ", sharedr[i]); printf("\n\n"); for(int i = 0; i < F; i++) printf("%f ", sharedap[i]); printf("\n\n"); } #endif if(threadIdx.x == 0){ rsnew[0] = 0; } //no need to have sync before blockReduce //because there is a __syncthreads() in blockReduce //pAp=p'*Ap temp = 0; for(int k = threadIdx.x; k < F; k += 64) temp += sharedp[k]*sharedap[k]; //temp = blockReduceSum(shared, temp); blockReduceSumWithAtomics(rsnew, temp); //sync needed, to let all atomicAdd threads completes __syncthreads(); if(threadIdx.x == 0){ //pAp = temp; //alpha=rsold/(p'*Ap); use rsnew to store pAp alpha[0] = rsold[0]/rsnew[0]; #ifdef DEBUG if(blockIdx.x==0){ printf("***rsold:\n"); printf("rsold = %f \n", rsold[0]); printf("***pAp:\n"); printf("pAp = %f \n", rsnew[0]); printf("***alpha:\n"); printf("alpha = %f \n", alpha[0]); } #endif rsnew[0] = 0; } //needed, aplpha[0] to be used by all threads __syncthreads(); for(int k = threadIdx.x; k < F; k += 64){ //x=x+alpha*p; sharedx[k] = sharedx[k] + alpha[0] * sharedp[k]; //r=r-alpha*Ap; sharedr[k] = sharedr[k] - alpha[0] * sharedap[k]; //NOT needed? //__syncthreads(); } __syncthreads(); #ifdef DEBUG if(blockIdx.x==0 && threadIdx.x==0){ printf("***shared memory content before 3rd blockReduceSum:\n"); for(int i = 0; i < F; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < F; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < F; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif //rsnew=r'*r; temp = 0; for(int k = threadIdx.x; k < F; k += 64) temp += sharedr[k]*sharedr[k]; blockReduceSumWithAtomics(rsnew, temp); //WARN: has to have this sync! __syncthreads(); #ifdef DEBUG if(blockIdx.x==0 && threadIdx.x==0){ printf("***rsnew:\n"); printf("rsnew = %f \n", rsnew[0]); printf("***shared memory content after 3rd blockReduceSum:\n"); for(int i = 0; i < F; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < F; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < F; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif if(rsnew[0]<CG_ERROR) break; //NOT needed? //__syncthreads(); //beta if(threadIdx.x == 0){ beta[0] = rsnew[0]/rsold[0]; //rsold=rsnew; rsold[0] = rsnew[0]; } //need sync since every thread needs beta[0] __syncthreads(); //p=r+(rsnew/rsold)*p; for(int k = threadIdx.x; k < F; k += 64) sharedp[k] = sharedr[k] + beta[0] * sharedp[k]; //need sync as every thread needs sharedp at the beginning of for __syncthreads(); #ifdef DEBUG __syncthreads(); if(blockIdx.x==0 && threadIdx.x==0){ printf("***shared memory content after update p:\n"); for(int i = 0; i < F; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < F; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < F; i++) printf("%f ", sharedap[i]); printf("\n"); } __syncthreads(); #endif }//end of CG iterations //x<--sharedx for(int k = threadIdx.x; k < F; k += 64) XT[blockIdx.x*F + k] = sharedx[k]; //*/ } } void alsUpdateFeature100Host(const int batch_offset, const int* csrRowIndex, const int* csrColIndex, const float lambda, const int m, const int F, const float* __restrict__ thetaT, float* XT, float* ythetaT, int cgIter){ hipLaunchKernelGGL(( alsUpdateFeature100), dim3(m), dim3(64), SCAN_BATCH * F/2*sizeof(float2), 0, batch_offset, csrRowIndex, csrColIndex, lambda, m, F, thetaT, XT, ythetaT, cgIter); hipDeviceSynchronize(); cudaCheckError(); }
be5d184a9706f502ca51fabb89060546f36c3a76.cu
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * cg.cu * Created on: July 22, 2016 * Author: Wei Tan ([email protected]) * CUDA kernels related to batch CG solver used in ALS * CG solver: https://en.wikipedia.org/wiki/Conjugate_gradient_method * Code optimized for F = 100, and on cc 3.5, 3.7 platforms. Also tested in cc 5.2 */ #include "als.h" #include "device_utilities.h" #include "host_utilities.h" #include <fstream> #define SCAN_BATCH 24 #define CG_ERROR 1e-4 #undef DEBUG //CG (iterative solve) kernel //each block solves a A*x=b __global__ void updateXWithCGKernel(float * A, float * x, float * b, const int batchSize, const int f, const float cgIter){ extern __shared__ float smem[]; float *sharedx = &smem[0]; float *sharedp = &smem[f]; float *sharedr = &smem[2*f]; float *sharedap = &smem[3*f]; float *rsold = &smem[4*f]; float *alpha = &smem[4*f+1]; float *rsnew = &smem[4*f+2]; float *beta = &smem[4*f+3]; //sharedx<--x sharedx[threadIdx.x] = x[blockIdx.x*blockDim.x + threadIdx.x]; //sharedx[threadIdx.x] = 0; __syncthreads(); //r=b-A*x; float temp = 0; for(int i = 0; i < f; i++) //this is math correct and coalesced because A is symmetric temp += A[blockIdx.x*f*f + f*i + threadIdx.x]*sharedx[i]; sharedr[threadIdx.x] = b[blockIdx.x*blockDim.x + threadIdx.x] - temp; //p=r; sharedp[threadIdx.x] = sharedr[threadIdx.x]; //rsold=r'*r; if(threadIdx.x == 0){ rsold[0] = 0; } temp = sharedr[threadIdx.x] *sharedr[threadIdx.x]; blockReduceSumWithAtomics(rsold, temp); //temp = blockReduceSum(shared, temp); __syncthreads(); #ifdef DEBUG if(threadIdx.x==0){ printf("***rsold:\n"); printf("rsold = %f \n", rsold[0]); printf("***shared memory content after 1st blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif for(int iter = 0; iter < cgIter; iter++){ //ap=A*p; //WARN: set temp to zero since the next operation is +=! temp = 0; for(int i = 0; i < f; i++) //this is math correct and coalesced because A is symmetric temp += A[blockIdx.x*f*f + f*i + threadIdx.x]*sharedp[i]; sharedap[threadIdx.x] = temp; #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("----------CG iteration %d \n", iter); printf("***ap:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); printf("***shared memory content before 2rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif if(threadIdx.x == 0){ rsnew[0] = 0; } //no need to have sync before blockReduce //because there is a __syncthreads() in blockReduce //pAp=p'*Ap temp = sharedp[threadIdx.x] *sharedap[threadIdx.x]; //temp = blockReduceSum(shared, temp); blockReduceSumWithAtomics(rsnew, temp); //sync needed, to let all atomicAdd threads completes __syncthreads(); if(threadIdx.x == 0){ //pAp = temp; //alpha=rsold/(p'*Ap); use rsnew to store pAp alpha[0] = rsold[0]/rsnew[0]; #ifdef DEBUG printf("***rsold:\n"); printf("rsold = %f \n", rsold[0]); printf("***pAp:\n"); printf("pAp = %f \n", rsnew[0]); printf("***alpha:\n"); printf("alpha = %f \n", alpha[0]); #endif rsnew[0] = 0; } //needed, aplpha[0] to be used by all threads __syncthreads(); //x=x+alpha*p; sharedx[threadIdx.x] = sharedx[threadIdx.x] + alpha[0] * sharedp[threadIdx.x]; //r=r-alpha*Ap; sharedr[threadIdx.x] = sharedr[threadIdx.x] - alpha[0] * sharedap[threadIdx.x]; //NOT needed? //__syncthreads(); #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("***shared memory content before 3rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif //rsnew=r'*r; /* temp = sharedr[threadIdx.x]*sharedr[threadIdx.x]; temp = blockReduceSum(shared, temp); __syncthreads(); if(threadIdx.x == 0){ rsnew[0] = temp; } */ temp = sharedr[threadIdx.x]*sharedr[threadIdx.x]; blockReduceSumWithAtomics(rsnew, temp); //WARN: has to have this sync! __syncthreads(); #ifdef DEBUG if(threadIdx.x==0){ printf("***rsnew:\n"); printf("rsnew = %f \n", rsnew[0]); printf("***shared memory content after 3rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif if(rsnew[0]<CG_ERROR) break; //NOT needed? //__syncthreads(); //beta if(threadIdx.x == 0){ beta[0] = rsnew[0]/rsold[0]; //rsold=rsnew; rsold[0] = rsnew[0]; } //need sync since every thread needs beta[0] __syncthreads(); //p=r+(rsnew/rsold)*p; sharedp[threadIdx.x] = sharedr[threadIdx.x] + beta[0] * sharedp[threadIdx.x]; //need sync as every thread needs sharedp at the beginning of for __syncthreads(); #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("***shared memory content after update p:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } __syncthreads(); #endif }//end of CG iterations //x<--sharedx x[blockIdx.x*blockDim.x + threadIdx.x] = sharedx[threadIdx.x]; } //CG (iterative solve) kernel //each block solves a A*x=b and A in fp16 __global__ void updateXWithCGKernel3(half * A, float * x, float * b, const int batchSize, const int f, const float cgIter){ extern __shared__ float smem[]; float *sharedx = &smem[0]; float *sharedp = &smem[f]; float *sharedr = &smem[2*f]; float *sharedap = &smem[3*f]; float *rsold = &smem[4*f]; float *alpha = &smem[4*f+1]; float *rsnew = &smem[4*f+2]; float *beta = &smem[4*f+3]; //sharedx<--x sharedx[threadIdx.x] = x[blockIdx.x*blockDim.x + threadIdx.x]; __syncthreads(); //r=b-A*x; float temp = 0; for(int i = 0; i < f; i++) //this is math correct and coalesced because A is symmetric temp += __half2float(A[blockIdx.x*f*f + f*i + threadIdx.x])*sharedx[i]; sharedr[threadIdx.x] = b[blockIdx.x*blockDim.x + threadIdx.x] - temp; //p=r; sharedp[threadIdx.x] = sharedr[threadIdx.x]; //rsold=r'*r; if(threadIdx.x == 0){ rsold[0] = 0; } temp = sharedr[threadIdx.x] *sharedr[threadIdx.x]; blockReduceSumWithAtomics(rsold, temp); //temp = blockReduceSum(shared, temp); __syncthreads(); #ifdef DEBUG if(threadIdx.x==0){ printf("***rsold:\n"); printf("rsold = %f \n", rsold[0]); printf("***shared memory content after 1st blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif for(int iter = 0; iter < cgIter; iter++){ //ap=A*p; //WARN: set temp to zero since the next operation is +=! temp = 0; for(int i = 0; i < f; i++) //this is math correct and coalesced because A is symmetric temp += __half2float(A[blockIdx.x*f*f + f*i + threadIdx.x])*sharedp[i]; sharedap[threadIdx.x] = temp; #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("----------CG iteration %d \n", iter); printf("***ap:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); printf("***shared memory content before 2rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif if(threadIdx.x == 0){ rsnew[0] = 0; } //no need to have sync before blockReduce //because there is a __syncthreads() in blockReduce //pAp=p'*Ap temp = sharedp[threadIdx.x] *sharedap[threadIdx.x]; //temp = blockReduceSum(shared, temp); blockReduceSumWithAtomics(rsnew, temp); //sync needed, to let all atomicAdd threads completes __syncthreads(); if(threadIdx.x == 0){ //pAp = temp; //alpha=rsold/(p'*Ap); use rsnew to store pAp alpha[0] = rsold[0]/rsnew[0]; #ifdef DEBUG printf("***rsold:\n"); printf("rsold = %f \n", rsold[0]); printf("***pAp:\n"); printf("pAp = %f \n", rsnew[0]); printf("***alpha:\n"); printf("alpha = %f \n", alpha[0]); #endif rsnew[0] = 0; } //needed, aplpha[0] to be used by all threads __syncthreads(); //x=x+alpha*p; sharedx[threadIdx.x] = sharedx[threadIdx.x] + alpha[0] * sharedp[threadIdx.x]; //r=r-alpha*Ap; sharedr[threadIdx.x] = sharedr[threadIdx.x] - alpha[0] * sharedap[threadIdx.x]; //NOT needed? //__syncthreads(); #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("***shared memory content before 3rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif //rsnew=r'*r; /* temp = sharedr[threadIdx.x]*sharedr[threadIdx.x]; temp = blockReduceSum(shared, temp); __syncthreads(); if(threadIdx.x == 0){ rsnew[0] = temp; } */ temp = sharedr[threadIdx.x]*sharedr[threadIdx.x]; blockReduceSumWithAtomics(rsnew, temp); //WARN: has to have this sync! __syncthreads(); #ifdef DEBUG if(threadIdx.x==0){ printf("***rsnew:\n"); printf("rsnew = %f \n", rsnew[0]); printf("***shared memory content after 3rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif if(rsnew[0]<CG_ERROR) break; //NOT needed? //__syncthreads(); //beta if(threadIdx.x == 0){ beta[0] = rsnew[0]/rsold[0]; //rsold=rsnew; rsold[0] = rsnew[0]; } //need sync since every thread needs beta[0] __syncthreads(); //p=r+(rsnew/rsold)*p; sharedp[threadIdx.x] = sharedr[threadIdx.x] + beta[0] * sharedp[threadIdx.x]; //need sync as every thread needs sharedp at the beginning of for __syncthreads(); #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("***shared memory content after update p:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } __syncthreads(); #endif }//end of CG iterations //x<--sharedx x[blockIdx.x*blockDim.x + threadIdx.x] = sharedx[threadIdx.x]; } //blockDim.x=64 or 96 (two or three WARPs) instead of 100 -- WARP shuffle seems requiring this __global__ void updateXWithCGKernel2(float * A, float * x, float * b, const int batchSize, const int f, const float cgIter){ extern __shared__ float smem[]; float *sharedx = &smem[0]; float *sharedp = &smem[f]; float *sharedr = &smem[2*f]; float *sharedap = &smem[3*f]; float *rsold = &smem[4*f]; float *alpha = &smem[4*f+1]; float *rsnew = &smem[4*f+2]; float *beta = &smem[4*f+3]; //sharedx<--x for(int k = threadIdx.x; k < f; k += blockDim.x) sharedx[k] = x[blockIdx.x*f + k]; __syncthreads(); //r=b-A*x; float temp = 0; for(int k = threadIdx.x; k < f; k += blockDim.x){ temp = 0; for(int i = 0; i < f; i++) temp += A[blockIdx.x*f*f + f*i + k]*sharedx[i]; sharedr[k] = b[blockIdx.x*f + k] - temp; //p=r; sharedp[k] = sharedr[k]; } //rsold=r'*r; if(threadIdx.x == 0){ rsold[0] = 0; } temp = 0; for(int k = threadIdx.x; k < f; k += blockDim.x){ temp += sharedr[k]*sharedr[k]; } blockReduceSumWithAtomics(rsold, temp); //temp = blockReduceSum(shared, temp); __syncthreads(); #ifdef DEBUG if(threadIdx.x==0){ printf("***rsold:\n"); printf("rsold = %f \n", rsold[0]); printf("***shared memory content after 1st blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif for(int iter = 0; iter < cgIter; iter++){ //ap=A*p; //WARN: set temp to zero since the next operation is +=! for(int k = threadIdx.x; k < f; k += blockDim.x){ temp = 0; for(int i = 0; i < f; i++) temp += A[blockIdx.x*f*f + f*i + k]*sharedp[i]; sharedap[k] = temp; } #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("----------CG iteration %d \n", iter); printf("***ap:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); printf("***shared memory content before 2rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif if(threadIdx.x == 0){ rsnew[0] = 0; } //no need to have sync before blockReduce //because there is a __syncthreads() in blockReduce //pAp=p'*Ap temp = 0; for(int k = threadIdx.x; k < f; k += blockDim.x) temp += sharedp[k]*sharedap[k]; //temp = blockReduceSum(shared, temp); blockReduceSumWithAtomics(rsnew, temp); //sync needed, to let all atomicAdd threads completes __syncthreads(); if(threadIdx.x == 0){ //pAp = temp; //alpha=rsold/(p'*Ap); use rsnew to store pAp alpha[0] = rsold[0]/rsnew[0]; #ifdef DEBUG printf("***rsold:\n"); printf("rsold = %f \n", rsold[0]); printf("***pAp:\n"); printf("pAp = %f \n", rsnew[0]); printf("***alpha:\n"); printf("alpha = %f \n", alpha[0]); #endif rsnew[0] = 0; } //needed, aplpha[0] to be used by all threads __syncthreads(); for(int k = threadIdx.x; k < f; k += blockDim.x){ //x=x+alpha*p; sharedx[k] = sharedx[k] + alpha[0] * sharedp[k]; //r=r-alpha*Ap; sharedr[k] = sharedr[k] - alpha[0] * sharedap[k]; } //NOT needed? //__syncthreads(); #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("***shared memory content before 3rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif //rsnew=r'*r; /* temp = sharedr[threadIdx.x]*sharedr[threadIdx.x]; temp = blockReduceSum(shared, temp); __syncthreads(); if(threadIdx.x == 0){ rsnew[0] = temp; } */ temp = 0; for(int k = threadIdx.x; k < f; k += blockDim.x) temp += sharedr[k]*sharedr[k]; blockReduceSumWithAtomics(rsnew, temp); //WARN: has to have this sync! __syncthreads(); #ifdef DEBUG if(threadIdx.x==0){ printf("***rsnew:\n"); printf("rsnew = %f \n", rsnew[0]); printf("***shared memory content after 3rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif if(rsnew[0]<CG_ERROR) break; //NOT needed? //__syncthreads(); //beta if(threadIdx.x == 0){ beta[0] = rsnew[0]/rsold[0]; //rsold=rsnew; rsold[0] = rsnew[0]; } //need sync since every thread needs beta[0] __syncthreads(); for(int k = threadIdx.x; k < f; k += blockDim.x) //p=r+(rsnew/rsold)*p; sharedp[k] = sharedr[k] + beta[0] * sharedp[k]; //need sync as every thread needs sharedp at the beginning of for __syncthreads(); #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("***shared memory content after update p:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } __syncthreads(); #endif }//end of CG iterations for(int k = threadIdx.x; k < f; k += blockDim.x) //x<--sharedx x[blockIdx.x*f + k] = sharedx[k]; } void updateXWithCGHost_tt_fp16(float * A, float * x, float * b, const int batchSize, const int f, const float cgIter){ updateXWithCGKernel3<<<batchSize, f, (4*f+4)*sizeof(float)>>> ((half*)A, x, b, batchSize, f, cgIter); cudaDeviceSynchronize(); cudaCheckError(); #ifdef DEBUG printf("***A[0]:\n"); float *h_A = new float[f * f]; float *A_fp32; cudacall(cudaMalloc((void** ) &A_fp32, f * f * sizeof(A_fp32[0]))); fp16Array2fp32Array<<<(f*f-1)/1024 + 1, 1024>>>(A_fp32, (half*)A, f*f); cudaDeviceSynchronize(); cudaCheckError(); cudacall(cudaMemcpy(h_A, A_fp32, f * f * sizeof(float), cudaMemcpyDeviceToHost)); for(int i = 0; i < f*f; i++) printf("%f ", h_A[i]); printf("\n"); delete [] h_A; cudacall(cudaFree(A_fp32)); printf("***x[0]:\n"); float *h_x = new float[f]; cudacall(cudaMemcpy(h_x, x, f * sizeof(float), cudaMemcpyDeviceToHost)); for(int i = 0; i < f; i++) printf("%f ", h_x[i]); printf("\n"); delete [] h_x; /* printf("***b[0]:\n"); float *h_b = new float[f]; cudacall(cudaMemcpy(h_b, b, f * sizeof(float), cudaMemcpyDeviceToHost)); for(int i = 0; i < f; i++) printf("%f ", h_b[i]); printf("\n"); delete [] h_b; */ #endif } void updateXWithCGHost(float * A, float * x, float * b, const int batchSize, const int f, const float cgIter){ updateXWithCGKernel<<<batchSize, f, (4*f+4)*sizeof(float)>>> //updateXWithCGKernel2<<<batchSize, 96, (4*f+4)*sizeof(float)>>> (A, x, b, batchSize, f, cgIter); cudaDeviceSynchronize(); cudaCheckError(); #ifdef DEBUG printf("***A[0]:\n"); float *h_A = new float[f * f]; float *A_fp32; cudacall(cudaMalloc((void** ) &A_fp32, f * f * sizeof(A_fp32[0]))); fp16Array2fp32Array<<<(f*f-1)/1024 + 1, 1024>>>(A_fp32, (half*)A, f*f); cudaDeviceSynchronize(); cudaCheckError(); cudacall(cudaMemcpy(h_A, A_fp32, f * f * sizeof(float), cudaMemcpyDeviceToHost)); for(int i = 0; i < f*f; i++) printf("%f ", h_A[i]); printf("\n"); delete [] h_A; cudacall(cudaFree(A_fp32)); printf("***x[0]:\n"); float *h_x = new float[f]; cudacall(cudaMemcpy(h_x, x, f * sizeof(float), cudaMemcpyDeviceToHost)); for(int i = 0; i < f; i++) printf("%f ", h_x[i]); printf("\n"); delete [] h_x; /* printf("***b[0]:\n"); float *h_b = new float[f]; cudacall(cudaMemcpy(h_b, b, f * sizeof(float), cudaMemcpyDeviceToHost)); for(int i = 0; i < f; i++) printf("%f ", h_b[i]); printf("\n"); delete [] h_b; */ #endif } void updateXWithCGHostAsync(float * A, float * x, float * b, const int batchSize, const int f, const float cgIter, cudaStream_t *stream){ updateXWithCGKernel<<<batchSize, f, (4*f+4)*sizeof(float), *stream>>>(A, x, b, batchSize, f, cgIter); } //fused kernel, use thetaT to update XT __global__ void __launch_bounds__(64) alsUpdateFeature100(const int batch_offset, const int* csrRowIndex, const int* csrColIndex, const float lambda, const int m, const int F, const float* thetaT, float* XT, float* ythetaT, int cgIter) { extern __shared__ float2 thetaTemp[]; int row = blockIdx.x + batch_offset; if (row < m) { //this block needs to handle end - start thetaT columns int start = csrRowIndex[row]; int end = csrRowIndex[row + 1]; //slide through [start, end] by window size SCAN_BATCH int iterations = (end - start - 1)/SCAN_BATCH + 1; float temp0= 0, temp1= 0, temp2= 0, temp3= 0, temp4= 0, temp5= 0, temp6= 0, temp7= 0, temp8= 0, temp9 = 0; float temp10= 0, temp11= 0, temp12= 0, temp13= 0, temp14= 0, temp15= 0, temp16= 0, temp17= 0, temp18= 0, temp19 = 0; float temp20= 0, temp21= 0, temp22= 0, temp23= 0, temp24= 0, temp25= 0, temp26= 0, temp27= 0, temp28= 0, temp29 = 0; float temp30= 0, temp31= 0, temp32= 0, temp33= 0, temp34= 0, temp35= 0, temp36= 0, temp37= 0, temp38= 0, temp39 = 0; float temp40= 0, temp41= 0, temp42= 0, temp43= 0, temp44= 0, temp45= 0, temp46= 0, temp47= 0, temp48= 0, temp49 = 0; float temp50= 0, temp51= 0, temp52= 0, temp53= 0, temp54= 0, temp55= 0, temp56= 0, temp57= 0, temp58= 0, temp59 = 0; float temp60= 0, temp61= 0, temp62= 0, temp63= 0, temp64= 0, temp65= 0, temp66= 0, temp67= 0, temp68= 0, temp69 = 0; float temp70= 0, temp71= 0, temp72= 0, temp73= 0, temp74= 0, temp75= 0, temp76= 0, temp77= 0, temp78= 0, temp79 = 0; float temp80= 0, temp81= 0, temp82= 0, temp83= 0, temp84= 0, temp85= 0, temp86= 0, temp87= 0, temp88= 0, temp89 = 0; float temp90= 0, temp91= 0, temp92= 0, temp93= 0, temp94= 0, temp95= 0, temp96= 0, temp97= 0, temp98= 0, temp99 = 0; int tile_x = 0; int tile_y = 0; int tile = F/10; for ( int i = 0; i < 10; i++){ int end = ((20-i)*(i+1))/2; if(threadIdx.x < end){ tile_x = i * tile; tile_y = (10 + threadIdx.x - end) * tile; break; } } //iteration: copy gmem-->smem; aggregate smem-->register for (int iter = 0; iter < iterations; iter ++){ float2 theta; //copy texture --> smem, and sync //two layers: warp divergence unless we split at 32 //require 32 >= SCAN_BATCH if(threadIdx.x < 2*32 ){ //int index = threadIdx.x; int index = threadIdx.x - (threadIdx.x/32)*32; //0 to 31; if(index < SCAN_BATCH){ if(iter*SCAN_BATCH + index < end - start){ //for (int k = 50*(threadIdx.x/32); k < 50*(threadIdx.x/32) + 50; k += 2){ //IMPORTANT: for loop has constant and identical start and end if(threadIdx.x < 32){ for (int k = 0; k < 50; k += 2){ theta.x = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k]); theta.y = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k+1]); thetaTemp[index * F/2 + k/2] = theta; } } else { for (int k = 0; k < 50; k += 2){ theta.x = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 50]); theta.y = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 51]); thetaTemp[index * F/2 + k/2 + 25] = theta; } } } //must be the last iteration; no need to check //not enough theta to copy, set zero else memset(&thetaTemp[index*F/2], 0, F*sizeof(float)); } } __syncthreads(); //tile: 10*10 if(threadIdx.x < 55 ){ for(int k = 0; k < SCAN_BATCH; k++){ accumulate_in_registers(); } } } //end of iteration in copying from smem and aggregating in register __syncthreads(); #ifdef DEBUG if(blockIdx.x==0 && threadIdx.x==0){ printf("***temp 0~9: %f %f %f %f %f %f %f %f %f %f\n", temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9); } #endif //newly added CG phase //reuse the abundant shared memory float *sharedx = (float*)&thetaTemp[0]; float *sharedp = (float*)&thetaTemp[50]; float *sharedr = (float*)&thetaTemp[100]; float *sharedap = (float*)&thetaTemp[150]; float *sharedax = (float*)&thetaTemp[200]; float *rsold = (float*)&thetaTemp[250]; float *alpha = (float*)&thetaTemp[251]; float *rsnew = (float*)&thetaTemp[252]; float *beta = (float*)&thetaTemp[253]; //sharedx<--x for(int k = threadIdx.x; k < F; k += 64){ sharedx[k] = XT[blockIdx.x*F + k]; sharedax[k] = 0; } __syncthreads(); float temp = 0; //only uses 55 threads for A*p and A*x if(threadIdx.x < 55){ //add regularization if(tile_x==tile_y){ temp = (end - start) * lambda; temp0 += temp; temp11 += temp; temp22 += temp; temp33 += temp; temp44 += temp; temp55 += temp; temp66 += temp; temp77 += temp; temp88 += temp; temp99 += temp; } #ifdef DEBUG if(blockIdx.x==0 && threadIdx.x==0){ printf("***temp 0~9: %f %f %f %f %f %f %f %f %f %f\n", temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9); } #endif //r=b-A*x; //step1: ax=A*x atomicAdd(&sharedax[tile_y], temp0*sharedx[tile_x] + temp10*sharedx[tile_x+1] + temp20*sharedx[tile_x+2] + temp30*sharedx[tile_x+3] + temp40*sharedx[tile_x + 4] + temp50*sharedx[tile_x + 5] + temp60*sharedx[tile_x + 6] + temp70*sharedx[tile_x + 7] + temp80*sharedx[tile_x + 8] + temp90*sharedx[tile_x + 9]); atomicAdd(&sharedax[tile_y+1], temp1*sharedx[tile_x] + temp11*sharedx[tile_x+1] + temp21*sharedx[tile_x+2] + temp31*sharedx[tile_x+3] + temp41*sharedx[tile_x+4] + temp51*sharedx[tile_x+5] + temp61*sharedx[tile_x+6] + temp71*sharedx[tile_x+7] + temp81*sharedx[tile_x+8] + temp91*sharedx[tile_x+9]); atomicAdd(&sharedax[tile_y+2], temp2*sharedx[tile_x] + temp12*sharedx[tile_x+1] + temp22*sharedx[tile_x+2] + temp32*sharedx[tile_x+3] + temp42*sharedx[tile_x+4] + temp52*sharedx[tile_x+5] + temp62*sharedx[tile_x+6] + temp72*sharedx[tile_x+7] + temp82*sharedx[tile_x+8] + temp92*sharedx[tile_x+9]); atomicAdd(&sharedax[tile_y+3], temp3*sharedx[tile_x] + temp13*sharedx[tile_x+1] + temp23*sharedx[tile_x+2] + temp33*sharedx[tile_x+3] + temp43*sharedx[tile_x+4] + temp53*sharedx[tile_x+5] + temp63*sharedx[tile_x+6] + temp73*sharedx[tile_x+7] + temp83*sharedx[tile_x+8] + temp93*sharedx[tile_x+9]); atomicAdd(&sharedax[tile_y+4], temp4*sharedx[tile_x] + temp14*sharedx[tile_x+1] + temp24*sharedx[tile_x+2] + temp34*sharedx[tile_x+3] + temp44*sharedx[tile_x+4] + temp54*sharedx[tile_x+5] + temp64*sharedx[tile_x+6] + temp74*sharedx[tile_x+7] + temp84*sharedx[tile_x+8] + temp94*sharedx[tile_x+9]); atomicAdd(&sharedax[tile_y+5], temp5*sharedx[tile_x] + temp15*sharedx[tile_x+1] + temp25*sharedx[tile_x+2] + temp35*sharedx[tile_x+3] + temp45*sharedx[tile_x+4] + temp55*sharedx[tile_x+5] + temp65*sharedx[tile_x+6] + temp75*sharedx[tile_x+7] + temp85*sharedx[tile_x+8] + temp95*sharedx[tile_x+9]); atomicAdd(&sharedax[tile_y+6], temp6*sharedx[tile_x] + temp16*sharedx[tile_x+1] + temp26*sharedx[tile_x+2] + temp36*sharedx[tile_x+3] + temp46*sharedx[tile_x+4] + temp56*sharedx[tile_x+5] + temp66*sharedx[tile_x+6] + temp76*sharedx[tile_x+7] + temp86*sharedx[tile_x+8] + temp96*sharedx[tile_x+9]); atomicAdd(&sharedax[tile_y+7], temp7*sharedx[tile_x] + temp17*sharedx[tile_x+1] + temp27*sharedx[tile_x+2] + temp37*sharedx[tile_x+3] + temp47*sharedx[tile_x+4] + temp57*sharedx[tile_x+5] + temp67*sharedx[tile_x+6] + temp77*sharedx[tile_x+7] + temp87*sharedx[tile_x+8] + temp97*sharedx[tile_x+9]); atomicAdd(&sharedax[tile_y+8], temp8*sharedx[tile_x] + temp18*sharedx[tile_x+1] + temp28*sharedx[tile_x+2] + temp38*sharedx[tile_x+3] + temp48*sharedx[tile_x+4] + temp58*sharedx[tile_x+5] + temp68*sharedx[tile_x+6] + temp78*sharedx[tile_x+7] + temp88*sharedx[tile_x+8] + temp98*sharedx[tile_x+9]); atomicAdd(&sharedax[tile_y+9], temp9*sharedx[tile_x] + temp19*sharedx[tile_x+1] + temp29*sharedx[tile_x+2] + temp39*sharedx[tile_x+3] + temp49*sharedx[tile_x+4] + temp59*sharedx[tile_x+5] + temp69*sharedx[tile_x+6] + temp79*sharedx[tile_x+7] + temp89*sharedx[tile_x+8] + temp99*sharedx[tile_x+9]); if(tile_x!=tile_y){ atomicAdd(&sharedax[tile_x], temp0*sharedx[tile_y] + temp1*sharedx[tile_y + 1] + temp2*sharedx[tile_y + 2] + temp3*sharedx[tile_y + 3] + temp4*sharedx[tile_y + 4] + temp5*sharedx[tile_y + 5] + temp6*sharedx[tile_y + 6] + temp7*sharedx[tile_y + 7] + temp8*sharedx[tile_y + 8] + temp9*sharedx[tile_y + 9]); atomicAdd(&sharedax[tile_x+1], temp10*sharedx[tile_y] + temp11*sharedx[tile_y+1] + temp12*sharedx[tile_y+2] + temp13*sharedx[tile_y+3] + temp14*sharedx[tile_y+4] + temp15*sharedx[tile_y+5] + temp16*sharedx[tile_y+6] + temp17*sharedx[tile_y+7] + temp18*sharedx[tile_y+8] + temp19*sharedx[tile_y+9]); atomicAdd(&sharedax[tile_x+2], temp20*sharedx[tile_y] + temp21*sharedx[tile_y+1] + temp22*sharedx[tile_y+2] + temp23*sharedx[tile_y+3] + temp24*sharedx[tile_y+4] + temp25*sharedx[tile_y+5] + temp26*sharedx[tile_y+6] + temp27*sharedx[tile_y+7] + temp28*sharedx[tile_y+8] + temp29*sharedx[tile_y+9]); atomicAdd(&sharedax[tile_x+3], temp30*sharedx[tile_y] + temp31*sharedx[tile_y+1] + temp32*sharedx[tile_y+2] + temp33*sharedx[tile_y+3] + temp34*sharedx[tile_y+4] + temp35*sharedx[tile_y+5] + temp36*sharedx[tile_y+6] + temp37*sharedx[tile_y+7] + temp38*sharedx[tile_y+8] + temp39*sharedx[tile_y+9]); atomicAdd(&sharedax[tile_x+4], temp40*sharedx[tile_y] + temp41*sharedx[tile_y+1] + temp42*sharedx[tile_y+2] + temp43*sharedx[tile_y+3] + temp44*sharedx[tile_y+4] + temp45*sharedx[tile_y+5] + temp46*sharedx[tile_y+6] + temp47*sharedx[tile_y+7] + temp48*sharedx[tile_y+8] + temp49*sharedx[tile_y+9]); atomicAdd(&sharedax[tile_x+5], temp50*sharedx[tile_y] + temp51*sharedx[tile_y+1] + temp52*sharedx[tile_y+2] + temp53*sharedx[tile_y+3] + temp54*sharedx[tile_y+4] + temp55*sharedx[tile_y+5] + temp56*sharedx[tile_y+6] + temp57*sharedx[tile_y+7] + temp58*sharedx[tile_y+8] + temp59*sharedx[tile_y+9]); atomicAdd(&sharedax[tile_x+6], temp60*sharedx[tile_y] + temp61*sharedx[tile_y+1] + temp62*sharedx[tile_y+2] + temp63*sharedx[tile_y+3] + temp64*sharedx[tile_y+4] + temp65*sharedx[tile_y+5] + temp66*sharedx[tile_y+6] + temp67*sharedx[tile_y+7] + temp68*sharedx[tile_y+8] + temp69*sharedx[tile_y+9]); atomicAdd(&sharedax[tile_x+7], temp70*sharedx[tile_y] + temp71*sharedx[tile_y+1] + temp72*sharedx[tile_y+2] + temp73*sharedx[tile_y+3] + temp74*sharedx[tile_y+4] + temp75*sharedx[tile_y+5] + temp76*sharedx[tile_y+6] + temp77*sharedx[tile_y+7] + temp78*sharedx[tile_y+8] + temp79*sharedx[tile_y+9]); atomicAdd(&sharedax[tile_x+8], temp80*sharedx[tile_y] + temp81*sharedx[tile_y+1] + temp82*sharedx[tile_y+2] + temp83*sharedx[tile_y+3] + temp84*sharedx[tile_y+4] + temp85*sharedx[tile_y+5] + temp86*sharedx[tile_y+6] + temp87*sharedx[tile_y+7] + temp88*sharedx[tile_y+8] + temp89*sharedx[tile_y+9]); atomicAdd(&sharedax[tile_x+9], temp90*sharedx[tile_y] + temp91*sharedx[tile_y+1] + temp92*sharedx[tile_y+2] + temp93*sharedx[tile_y+3] + temp94*sharedx[tile_y+4] + temp95*sharedx[tile_y+5] + temp96*sharedx[tile_y+6] + temp97*sharedx[tile_y+7] + temp98*sharedx[tile_y+8] + temp99*sharedx[tile_y+9]); } } __syncthreads(); #ifdef DEBUG if(blockIdx.x==0 && threadIdx.x==0){ printf("***x:\n"); for(int i = 0; i < 100; i++) printf("%f ", sharedx[i]); printf("\n\n"); printf("***r=Ax:\n"); for(int i = 0; i < 100; i++) printf("%f ", sharedax[i]); printf("\n\n"); } #endif for(int k = threadIdx.x; k < F; k += 64){ //r=b-Ax sharedr[k] = ythetaT[blockIdx.x*blockDim.x + k] - sharedax[k]; //p=r; sharedp[k] = sharedr[k]; } //rsold=r'*r; if(threadIdx.x == 0){ rsold[0] = 0; } for(int k = threadIdx.x; k < F; k += 64){ temp += sharedr[k]*sharedr[k]; } blockReduceSumWithAtomics(rsold, temp); __syncthreads(); #ifdef DEBUG if(blockIdx.x==0 && threadIdx.x==0){ printf("***rsold:\n"); printf("rsold = %f \n", rsold[0]); printf("***shared memory content after 1st blockReduceSum:\n"); for(int i = 0; i < 100; i++) printf("%f ", sharedx[i]); printf("\n"); for(int i = 0; i < 100; i++) printf("%f ", sharedax[i]); printf("\n\n"); for(int i = 0; i < 100; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < 100; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < 100; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif ///* //CG iterations for(int iter = 0; iter < cgIter; iter++){ //ap=A*p; for(int k = threadIdx.x; k < F; k += 64) sharedap[k] = 0; __syncthreads(); //only uses 55 threads for A*p and A*x if(threadIdx.x < 55){ atomicAdd(&sharedap[tile_y], temp0*sharedp[tile_x] + temp10*sharedp[tile_x+1] + temp20*sharedp[tile_x+2] + temp30*sharedp[tile_x+3] + temp40*sharedp[tile_x + 4] + temp50*sharedp[tile_x + 5] + temp60*sharedp[tile_x + 6] + temp70*sharedp[tile_x + 7] + temp80*sharedp[tile_x + 8] + temp90*sharedp[tile_x + 9]); atomicAdd(&sharedap[tile_y+1], temp1*sharedp[tile_x] + temp11*sharedp[tile_x+1] + temp21*sharedp[tile_x+2] + temp31*sharedp[tile_x+3] + temp41*sharedp[tile_x+4] + temp51*sharedp[tile_x+5] + temp61*sharedp[tile_x+6] + temp71*sharedp[tile_x+7] + temp81*sharedp[tile_x+8] + temp91*sharedp[tile_x+9]); atomicAdd(&sharedap[tile_y+2], temp2*sharedp[tile_x] + temp12*sharedp[tile_x+1] + temp22*sharedp[tile_x+2] + temp32*sharedp[tile_x+3] + temp42*sharedp[tile_x+4] + temp52*sharedp[tile_x+5] + temp62*sharedp[tile_x+6] + temp72*sharedp[tile_x+7] + temp82*sharedp[tile_x+8] + temp92*sharedp[tile_x+9]); atomicAdd(&sharedap[tile_y+3], temp3*sharedp[tile_x] + temp13*sharedp[tile_x+1] + temp23*sharedp[tile_x+2] + temp33*sharedp[tile_x+3] + temp43*sharedp[tile_x+4] + temp53*sharedp[tile_x+5] + temp63*sharedp[tile_x+6] + temp73*sharedp[tile_x+7] + temp83*sharedp[tile_x+8] + temp93*sharedp[tile_x+9]); atomicAdd(&sharedap[tile_y+4], temp4*sharedp[tile_x] + temp14*sharedp[tile_x+1] + temp24*sharedp[tile_x+2] + temp34*sharedp[tile_x+3] + temp44*sharedp[tile_x+4] + temp54*sharedp[tile_x+5] + temp64*sharedp[tile_x+6] + temp74*sharedp[tile_x+7] + temp84*sharedp[tile_x+8] + temp94*sharedp[tile_x+9]); atomicAdd(&sharedap[tile_y+5], temp5*sharedp[tile_x] + temp15*sharedp[tile_x+1] + temp25*sharedp[tile_x+2] + temp35*sharedp[tile_x+3] + temp45*sharedp[tile_x+4] + temp55*sharedp[tile_x+5] + temp65*sharedp[tile_x+6] + temp75*sharedp[tile_x+7] + temp85*sharedp[tile_x+8] + temp95*sharedp[tile_x+9]); atomicAdd(&sharedap[tile_y+6], temp6*sharedp[tile_x] + temp16*sharedp[tile_x+1] + temp26*sharedp[tile_x+2] + temp36*sharedp[tile_x+3] + temp46*sharedp[tile_x+4] + temp56*sharedp[tile_x+5] + temp66*sharedp[tile_x+6] + temp76*sharedp[tile_x+7] + temp86*sharedp[tile_x+8] + temp96*sharedp[tile_x+9]); atomicAdd(&sharedap[tile_y+7], temp7*sharedp[tile_x] + temp17*sharedp[tile_x+1] + temp27*sharedp[tile_x+2] + temp37*sharedp[tile_x+3] + temp47*sharedp[tile_x+4] + temp57*sharedp[tile_x+5] + temp67*sharedp[tile_x+6] + temp77*sharedp[tile_x+7] + temp87*sharedp[tile_x+8] + temp97*sharedp[tile_x+9]); atomicAdd(&sharedap[tile_y+8], temp8*sharedp[tile_x] + temp18*sharedp[tile_x+1] + temp28*sharedp[tile_x+2] + temp38*sharedp[tile_x+3] + temp48*sharedp[tile_x+4] + temp58*sharedp[tile_x+5] + temp68*sharedp[tile_x+6] + temp78*sharedp[tile_x+7] + temp88*sharedp[tile_x+8] + temp98*sharedp[tile_x+9]); atomicAdd(&sharedap[tile_y+9], temp9*sharedp[tile_x] + temp19*sharedp[tile_x+1] + temp29*sharedp[tile_x+2] + temp39*sharedp[tile_x+3] + temp49*sharedp[tile_x+4] + temp59*sharedp[tile_x+5] + temp69*sharedp[tile_x+6] + temp79*sharedp[tile_x+7] + temp89*sharedp[tile_x+8] + temp99*sharedp[tile_x+9]); if(tile_x!=tile_y){ atomicAdd(&sharedap[tile_x], temp0*sharedp[tile_y] + temp1*sharedp[tile_y + 1] + temp2*sharedp[tile_y + 2] + temp3*sharedp[tile_y + 3] + temp4*sharedp[tile_y + 4] + temp5*sharedp[tile_y + 5] + temp6*sharedp[tile_y + 6] + temp7*sharedp[tile_y + 7] + temp8*sharedp[tile_y + 8] + temp9*sharedp[tile_y + 9]); atomicAdd(&sharedap[tile_x+1], temp10*sharedp[tile_y] + temp11*sharedp[tile_y+1] + temp12*sharedp[tile_y+2] + temp13*sharedp[tile_y+3] + temp14*sharedp[tile_y+4] + temp15*sharedp[tile_y+5] + temp16*sharedp[tile_y+6] + temp17*sharedp[tile_y+7] + temp18*sharedp[tile_y+8] + temp19*sharedp[tile_y+9]); atomicAdd(&sharedap[tile_x+2], temp20*sharedp[tile_y] + temp21*sharedp[tile_y+1] + temp22*sharedp[tile_y+2] + temp23*sharedp[tile_y+3] + temp24*sharedp[tile_y+4] + temp25*sharedp[tile_y+5] + temp26*sharedp[tile_y+6] + temp27*sharedp[tile_y+7] + temp28*sharedp[tile_y+8] + temp29*sharedp[tile_y+9]); atomicAdd(&sharedap[tile_x+3], temp30*sharedp[tile_y] + temp31*sharedp[tile_y+1] + temp32*sharedp[tile_y+2] + temp33*sharedp[tile_y+3] + temp34*sharedp[tile_y+4] + temp35*sharedp[tile_y+5] + temp36*sharedp[tile_y+6] + temp37*sharedp[tile_y+7] + temp38*sharedp[tile_y+8] + temp39*sharedp[tile_y+9]); atomicAdd(&sharedap[tile_x+4], temp40*sharedp[tile_y] + temp41*sharedp[tile_y+1] + temp42*sharedp[tile_y+2] + temp43*sharedp[tile_y+3] + temp44*sharedp[tile_y+4] + temp45*sharedp[tile_y+5] + temp46*sharedp[tile_y+6] + temp47*sharedp[tile_y+7] + temp48*sharedp[tile_y+8] + temp49*sharedp[tile_y+9]); atomicAdd(&sharedap[tile_x+5], temp50*sharedp[tile_y] + temp51*sharedp[tile_y+1] + temp52*sharedp[tile_y+2] + temp53*sharedp[tile_y+3] + temp54*sharedp[tile_y+4] + temp55*sharedp[tile_y+5] + temp56*sharedp[tile_y+6] + temp57*sharedp[tile_y+7] + temp58*sharedp[tile_y+8] + temp59*sharedp[tile_y+9]); atomicAdd(&sharedap[tile_x+6], temp60*sharedp[tile_y] + temp61*sharedp[tile_y+1] + temp62*sharedp[tile_y+2] + temp63*sharedp[tile_y+3] + temp64*sharedp[tile_y+4] + temp65*sharedp[tile_y+5] + temp66*sharedp[tile_y+6] + temp67*sharedp[tile_y+7] + temp68*sharedp[tile_y+8] + temp69*sharedp[tile_y+9]); atomicAdd(&sharedap[tile_x+7], temp70*sharedp[tile_y] + temp71*sharedp[tile_y+1] + temp72*sharedp[tile_y+2] + temp73*sharedp[tile_y+3] + temp74*sharedp[tile_y+4] + temp75*sharedp[tile_y+5] + temp76*sharedp[tile_y+6] + temp77*sharedp[tile_y+7] + temp78*sharedp[tile_y+8] + temp79*sharedp[tile_y+9]); atomicAdd(&sharedap[tile_x+8], temp80*sharedp[tile_y] + temp81*sharedp[tile_y+1] + temp82*sharedp[tile_y+2] + temp83*sharedp[tile_y+3] + temp84*sharedp[tile_y+4] + temp85*sharedp[tile_y+5] + temp86*sharedp[tile_y+6] + temp87*sharedp[tile_y+7] + temp88*sharedp[tile_y+8] + temp89*sharedp[tile_y+9]); atomicAdd(&sharedap[tile_x+9], temp90*sharedp[tile_y] + temp91*sharedp[tile_y+1] + temp92*sharedp[tile_y+2] + temp93*sharedp[tile_y+3] + temp94*sharedp[tile_y+4] + temp95*sharedp[tile_y+5] + temp96*sharedp[tile_y+6] + temp97*sharedp[tile_y+7] + temp98*sharedp[tile_y+8] + temp99*sharedp[tile_y+9]); } } __syncthreads(); #ifdef DEBUG if(blockIdx.x==0 && threadIdx.x==0){ printf("----------CG iteration %d \n", iter); printf("***ap:\n"); for(int i = 0; i < F; i++) printf("%f ", sharedap[i]); printf("\n\n"); printf("***shared memory content before 2rd blockReduceSum:\n"); for(int i = 0; i < F; i++) printf("%f ", sharedp[i]); printf("\n\n"); for(int i = 0; i < F; i++) printf("%f ", sharedr[i]); printf("\n\n"); for(int i = 0; i < F; i++) printf("%f ", sharedap[i]); printf("\n\n"); } #endif if(threadIdx.x == 0){ rsnew[0] = 0; } //no need to have sync before blockReduce //because there is a __syncthreads() in blockReduce //pAp=p'*Ap temp = 0; for(int k = threadIdx.x; k < F; k += 64) temp += sharedp[k]*sharedap[k]; //temp = blockReduceSum(shared, temp); blockReduceSumWithAtomics(rsnew, temp); //sync needed, to let all atomicAdd threads completes __syncthreads(); if(threadIdx.x == 0){ //pAp = temp; //alpha=rsold/(p'*Ap); use rsnew to store pAp alpha[0] = rsold[0]/rsnew[0]; #ifdef DEBUG if(blockIdx.x==0){ printf("***rsold:\n"); printf("rsold = %f \n", rsold[0]); printf("***pAp:\n"); printf("pAp = %f \n", rsnew[0]); printf("***alpha:\n"); printf("alpha = %f \n", alpha[0]); } #endif rsnew[0] = 0; } //needed, aplpha[0] to be used by all threads __syncthreads(); for(int k = threadIdx.x; k < F; k += 64){ //x=x+alpha*p; sharedx[k] = sharedx[k] + alpha[0] * sharedp[k]; //r=r-alpha*Ap; sharedr[k] = sharedr[k] - alpha[0] * sharedap[k]; //NOT needed? //__syncthreads(); } __syncthreads(); #ifdef DEBUG if(blockIdx.x==0 && threadIdx.x==0){ printf("***shared memory content before 3rd blockReduceSum:\n"); for(int i = 0; i < F; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < F; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < F; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif //rsnew=r'*r; temp = 0; for(int k = threadIdx.x; k < F; k += 64) temp += sharedr[k]*sharedr[k]; blockReduceSumWithAtomics(rsnew, temp); //WARN: has to have this sync! __syncthreads(); #ifdef DEBUG if(blockIdx.x==0 && threadIdx.x==0){ printf("***rsnew:\n"); printf("rsnew = %f \n", rsnew[0]); printf("***shared memory content after 3rd blockReduceSum:\n"); for(int i = 0; i < F; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < F; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < F; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif if(rsnew[0]<CG_ERROR) break; //NOT needed? //__syncthreads(); //beta if(threadIdx.x == 0){ beta[0] = rsnew[0]/rsold[0]; //rsold=rsnew; rsold[0] = rsnew[0]; } //need sync since every thread needs beta[0] __syncthreads(); //p=r+(rsnew/rsold)*p; for(int k = threadIdx.x; k < F; k += 64) sharedp[k] = sharedr[k] + beta[0] * sharedp[k]; //need sync as every thread needs sharedp at the beginning of for __syncthreads(); #ifdef DEBUG __syncthreads(); if(blockIdx.x==0 && threadIdx.x==0){ printf("***shared memory content after update p:\n"); for(int i = 0; i < F; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < F; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < F; i++) printf("%f ", sharedap[i]); printf("\n"); } __syncthreads(); #endif }//end of CG iterations //x<--sharedx for(int k = threadIdx.x; k < F; k += 64) XT[blockIdx.x*F + k] = sharedx[k]; //*/ } } void alsUpdateFeature100Host(const int batch_offset, const int* csrRowIndex, const int* csrColIndex, const float lambda, const int m, const int F, const float* __restrict__ thetaT, float* XT, float* ythetaT, int cgIter){ alsUpdateFeature100<<<m, 64, SCAN_BATCH * F/2*sizeof(float2)>>> (batch_offset, csrRowIndex, csrColIndex, lambda, m, F, thetaT, XT, ythetaT, cgIter); cudaDeviceSynchronize(); cudaCheckError(); }
2d5cfa60df7a367d574f4cc9a80cd8742539e798.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 paddlepaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <algorithm> #include <vector> #include "paddle/fluid/operators/math/pooling.h" #include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { namespace math { template <typename PoolProcess, typename T> __global__ void KernelPool2D(const int nthreads, const T* input_data, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, PoolProcess pool_process, bool exclusive, T* output_data) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int c = (index / output_width / output_height) % channels; int batch_idx = index / output_width / output_height / channels; int hstart = ph * stride_height - padding_height; int hend = min(hstart + ksize_height, input_height); hstart = max(hstart, 0); int wstart = pw * stride_width - padding_width; int wend = min(wstart + ksize_width, input_width); wstart = max(wstart, 0); input_data += (batch_idx * channels + c) * input_height * input_width; T ele = pool_process.initial(); for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { pool_process.compute(input_data[h * input_width + w], &ele); } } int pool_size = exclusive ? (hend - hstart) * (wend - wstart) : ksize_height * ksize_width; pool_process.finalize(static_cast<T>(pool_size), &ele); output_data[index] = ele; } } template <typename PoolProcess, typename T> __global__ void KernelPool2DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, PoolProcess pool_process, bool exclusive, T* input_grad) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int offsetW = index % input_width + padding_width; int offsetH = (index / input_width) % input_height + padding_height; int offsetC = (index / input_width / input_height) % channels; int batch_idx = index / input_width / input_height / channels; int phstart = (offsetH < ksize_height) ? 0 : (offsetH - ksize_height) / stride_height + 1; int pwstart = (offsetW < ksize_width) ? 0 : (offsetW - ksize_width) / stride_width + 1; int phend = min(offsetH / stride_height + 1, output_height); int pwend = min(offsetW / stride_width + 1, output_width); T gradient = 0; T input = input_data[index]; int output_idx = (batch_idx * channels + offsetC) * output_height * output_width; output_data += output_idx; output_grad += output_idx; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; int hend = min(hstart + ksize_height, input_height); int wend = min(wstart + ksize_width, input_width); hstart = max(hstart, 0); wstart = max(wstart, 0); int pool_size = exclusive ? (hend - hstart) * (wend - wstart) : ksize_height * ksize_width; int output_sub_idx = ph * output_width + pw; pool_process.compute(input, output_data[output_sub_idx], output_grad[output_sub_idx], static_cast<T>(1.0 / pool_size), &gradient); } } input_grad[index] = gradient; } } template <typename T> __global__ void KernelMaxPool2DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, T* input_grad) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int c = (index / output_width / output_height) % channels; int batch_idx = index / output_width / output_height / channels; int hstart = ph * stride_height - padding_height; int hend = min(hstart + ksize_height, input_height); hstart = max(hstart, 0); int wstart = pw * stride_width - padding_width; int wend = min(wstart + ksize_width, input_width); wstart = max(wstart, 0); input_data += (batch_idx * channels + c) * input_height * input_width; input_grad += (batch_idx * channels + c) * input_height * input_width; T ele = output_data[index]; int maxIndex = -1; bool stop = false; for (int h = hstart; h < hend && !stop; ++h) { for (int w = wstart; w < wend && !stop; ++w) { if (ele == input_data[h * input_width + w]) { maxIndex = h * input_width + w; stop = true; } } } if (maxIndex != -1) { // atomic add platform::CudaAtomicAdd(input_grad + maxIndex, output_grad[index]); } } } /* * All tensors are in NCHW format. * Ksize, strides, paddings are two elements. These two elements represent * height and width, respectively. */ template <typename PoolProcess, typename T> class Pool2dFunctor<platform::CUDADeviceContext, PoolProcess, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, PoolProcess pool_process, bool exclusive, framework::Tensor* output) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output->dims()[1]; const int output_height = output->dims()[2]; const int output_width = output->dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); T* output_data = output->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelPool2D<PoolProcess, T>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, pool_process, exclusive, output_data); } }; /* * All tensors are in NCHW format. * Ksize, strides, paddings are two elements. These two elements represent * height and width, respectively. */ template <typename PoolProcess, typename T> class Pool2dGradFunctor<platform::CUDADeviceContext, PoolProcess, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, PoolProcess pool_process, bool exclusive, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_height = output.dims()[2]; const int output_width = output.dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * input_channels * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelPool2DGrad<PoolProcess, T>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, output_data, output_grad_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, pool_process, exclusive, input_grad_data); } }; /* * All tensors are in NCHW format. * Ksize, strides, paddings are two elements. These two elements represent * height and width, respectively. */ template <typename T> class MaxPool2dGradFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output.dims()[1]; const int output_height = output.dims()[2]; const int output_width = output.dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelMaxPool2DGrad<T>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, output_data, output_grad_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, input_grad_data); } }; template class MaxPool2dGradFunctor<platform::CUDADeviceContext, float>; template class MaxPool2dGradFunctor<platform::CUDADeviceContext, double>; template class Pool2dFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPool<float>, float>; template class Pool2dFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPool<float>, float>; template class Pool2dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPoolGrad<float>, float>; template class Pool2dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPoolGrad<float>, float>; template class Pool2dFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPool<double>, double>; template class Pool2dFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPool<double>, double>; template class Pool2dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPoolGrad<double>, double>; template class Pool2dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPoolGrad<double>, double>; template <typename PoolProcess, typename T> __global__ void KernelPool3D( const int nthreads, const T* input_data, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, PoolProcess pool_process, bool exclusive, T* output_data) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int pd = (index / output_width / output_height) % output_depth; int c = (index / output_width / output_height / output_depth) % channels; int batch_idx = index / output_width / output_height / output_depth / channels; int dstart = pd * stride_depth - padding_depth; int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; int dend = min(dstart + ksize_depth, input_depth); int hend = min(hstart + ksize_height, input_height); int wend = min(wstart + ksize_width, input_width); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); T ele = pool_process.initial(); input_data += (batch_idx * channels + c) * input_depth * input_height * input_width; for (int d = dstart; d < dend; ++d) { for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { pool_process.compute( input_data[(d * input_height + h) * input_width + w], &ele); } } } int pool_size = exclusive ? (dend - dstart) * (hend - hstart) * (wend - wstart) : ksize_depth * ksize_height * ksize_width; pool_process.finalize(static_cast<T>(pool_size), &ele); output_data[index] = ele; } } template <typename PoolProcess, typename T> __global__ void KernelPool3DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, PoolProcess pool_process, bool exclusive, T* input_grad) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int offsetW = index % input_width + padding_width; int offsetH = (index / input_width) % input_height + padding_height; int offsetD = (index / input_width / input_height) % input_depth + padding_depth; int offsetC = (index / input_width / input_height / input_depth) % channels; int batch_idx = index / input_width / input_height / input_depth / channels; int pdstart = (offsetD < ksize_depth) ? 0 : (offsetD - ksize_depth) / stride_depth + 1; int phstart = (offsetH < ksize_height) ? 0 : (offsetH - ksize_height) / stride_height + 1; int pwstart = (offsetW < ksize_width) ? 0 : (offsetW - ksize_width) / stride_width + 1; int pdend = min((offsetD) / stride_depth + 1, output_depth); int phend = min((offsetH) / stride_height + 1, output_height); int pwend = min((offsetW) / stride_width + 1, output_width); T gradient = 0; T input = input_data[index]; int output_idx = (batch_idx * channels + offsetC) * output_depth * output_height * output_width; output_data += output_idx; output_grad += output_idx; for (int pd = pdstart; pd < pdend; ++pd) { for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int dstart = pd * stride_depth - padding_depth; int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; int dend = min(dstart + ksize_depth, input_depth); int hend = min(hstart + ksize_height, input_height); int wend = min(wstart + ksize_width, input_width); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); int pool_size = exclusive ? (dend - dstart) * (hend - hstart) * (wend - wstart) : ksize_depth * ksize_height * ksize_width; int output_sub_idx = (pd * output_height + ph) * output_width + pw; pool_process.compute(input, output_data[output_sub_idx], output_grad[output_sub_idx], static_cast<T>(1.0 / pool_size), &gradient); } } } input_grad[index] = gradient; } } template <typename T> __global__ void KernelMaxPool3DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, T* input_grad) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int pd = (index / output_width / output_height) % output_depth; int c = (index / output_width / output_height / output_depth) % channels; int batch_idx = index / output_width / output_height / output_depth / channels; int dstart = pd * stride_depth - padding_depth; int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; int dend = min(dstart + ksize_depth, input_depth); int hend = min(hstart + ksize_height, input_height); int wend = min(wstart + ksize_width, input_width); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); T ele = output_data[index]; bool stop = false; int maxIdx = -1; input_data += (batch_idx * channels + c) * input_depth * input_height * input_width; input_grad += (batch_idx * channels + c) * input_depth * input_height * input_width; for (int d = dstart; d < dend && !stop; ++d) { for (int h = hstart; h < hend && !stop; ++h) { for (int w = wstart; w < wend && !stop; ++w) { if (ele == input_data[(d * input_height + h) * input_width + w]) { stop = true; maxIdx = (d * input_height + h) * input_width + w; } } } } if (maxIdx != -1) { // atomic add platform::CudaAtomicAdd(input_grad + maxIdx, output_grad[index]); } } } /* * All tensors are in NCDHW format. * Ksize, strides, paddings are three elements. These three elements represent * depth, height and width, respectively. */ template <typename PoolProcess, class T> class Pool3dFunctor<platform::CUDADeviceContext, PoolProcess, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, PoolProcess pool_process, bool exclusive, framework::Tensor* output) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; const int input_width = input.dims()[4]; const int output_channels = output->dims()[1]; const int output_depth = output->dims()[2]; const int output_height = output->dims()[3]; const int output_width = output->dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T* input_data = input.data<T>(); T* output_data = output->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_depth * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelPool3D<PoolProcess, T>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, pool_process, exclusive, output_data); } }; /* * All tensors are in NCDHW format. * Ksize, strides, paddings are three elements. These three elements represent * depth, height and width, respectively. */ template <typename PoolProcess, class T> class Pool3dGradFunctor<platform::CUDADeviceContext, PoolProcess, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, PoolProcess pool_process, bool exclusive, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; const int input_width = input.dims()[4]; const int output_channels = output.dims()[1]; const int output_depth = output.dims()[2]; const int output_height = output.dims()[3]; const int output_width = output.dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * input_channels * input_depth * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelPool3DGrad<PoolProcess, T>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, output_data, output_grad_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, pool_process, exclusive, input_grad_data); } }; /* * All tensors are in NCDHW format. * Ksize, strides, paddings are three elements. These three elements represent * depth, height and width, respectively. */ template <class T> class MaxPool3dGradFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; const int input_width = input.dims()[4]; const int output_channels = output.dims()[1]; const int output_depth = output.dims()[2]; const int output_height = output.dims()[3]; const int output_width = output.dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_depth * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelMaxPool3DGrad<T>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, output_data, output_grad_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, input_grad_data); } }; template class MaxPool3dGradFunctor<platform::CUDADeviceContext, float>; template class MaxPool3dGradFunctor<platform::CUDADeviceContext, double>; template class Pool3dFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPool<float>, float>; template class Pool3dFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPool<float>, float>; template class Pool3dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPoolGrad<float>, float>; template class Pool3dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPoolGrad<float>, float>; template class Pool3dFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPool<double>, double>; template class Pool3dFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPool<double>, double>; template class Pool3dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPoolGrad<double>, double>; template class Pool3dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPoolGrad<double>, double>; template <typename T1, typename T2> __global__ void KernelMaxPool2dWithIdx( const int nthreads, const T1* input_data, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, T1* output_data, T2* mask_data) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int c = (index / output_width / output_height) % channels; int batch_idx = index / output_width / output_height / channels; int hstart = ph * stride_height - padding_height; int hend = min(hstart + ksize_height, input_height); hstart = max(hstart, 0); int wstart = pw * stride_width - padding_width; int wend = min(wstart + ksize_width, input_width); wstart = max(wstart, 0); input_data += (batch_idx * channels + c) * input_height * input_width; T1 ele = -FLT_MAX; int max_index = -1; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int input_index = h * input_width + w; if (ele < input_data[input_index]) { max_index = input_index; ele = input_data[input_index]; } } } output_data[index] = ele; mask_data[index] = max_index; } } template <typename T1, typename T2> __global__ void KernelMaxPool2DWithIdxGrad( const int nthreads, const T1* output_grad, const T2* mask_data, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, T1* input_grad) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int w_offset = index % input_width; int h_offset = (index / input_width) % input_height; int c_offset = (index / input_width / input_height) % channels; int batch_idx = index / input_width / input_height / channels; int ph_start = (h_offset + padding_height < ksize_height) ? 0 : (h_offset + padding_height - ksize_height) / stride_height + 1; int pw_start = (w_offset + padding_width < ksize_width) ? 0 : (w_offset + padding_width - ksize_width) / stride_width + 1; int ph_end = min((h_offset + padding_height) / stride_height + 1, output_height); int pw_end = min((w_offset + padding_width) / stride_width + 1, output_width); T1 gradient = 0; int input_current_featuremap_idx = h_offset * input_width + w_offset; int output_idx = (batch_idx * channels + c_offset) * output_height * output_width; mask_data += output_idx; output_grad += output_idx; for (int ph = ph_start; ph < ph_end; ++ph) { for (int pw = pw_start; pw < pw_end; ++pw) { if (mask_data[ph * output_width + pw] == input_current_featuremap_idx) gradient += output_grad[ph * output_width + pw]; } } input_grad[index] = gradient; } } /* * All tensors are in NCHW format. * Ksize, strides, paddings are two elements. These two elements represent * height and width, respectively. */ template <typename T1, typename T2> class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, T1, T2> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, framework::Tensor* output, framework::Tensor* mask) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output->dims()[1]; const int output_height = output->dims()[2]; const int output_width = output->dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T1* input_data = input.data<T1>(); T1* output_data = output->mutable_data<T1>(context.GetPlace()); T2* mask_data = mask->mutable_data<T2>(context.GetPlace()); int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelMaxPool2dWithIdx<T1, T2>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, output_data, mask_data); } }; /* * All tensors are in NCHW format. * Ksize, strides, paddings are two elements. These two elements represent * height and width, respectively. */ template <typename T1, typename T2> class MaxPool2dWithIndexGradFunctor<platform::CUDADeviceContext, T1, T2> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& output_grad, const framework::Tensor& mask, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, framework::Tensor* input_grad) { const int batch_size = input_grad->dims()[0]; const int input_channels = input_grad->dims()[1]; const int input_height = input_grad->dims()[2]; const int input_width = input_grad->dims()[3]; const int output_height = output_grad.dims()[2]; const int output_width = output_grad.dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T2* mask_data = mask.data<T2>(); const T1* output_grad_data = output_grad.data<T1>(); T1* input_grad_data = input_grad->mutable_data<T1>(context.GetPlace()); int nthreads = batch_size * input_channels * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelMaxPool2DWithIdxGrad<T1, T2>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, output_grad_data, mask_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, input_grad_data); } }; template class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, float, int>; template class MaxPool2dWithIndexGradFunctor<platform::CUDADeviceContext, float, int>; template class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, double, int>; template class MaxPool2dWithIndexGradFunctor<platform::CUDADeviceContext, double, int>; template <typename T1, typename T2> __global__ void KernelMaxPool3DWithIdx( const int nthreads, const T1* input_data, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, T1* output_data, T2* mask_data) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int pd = (index / output_width / output_height) % output_depth; int c = (index / output_width / output_height / output_depth) % channels; int batch_idx = index / output_width / output_height / output_depth / channels; int dstart = pd * stride_depth - padding_depth; int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; int dend = min(dstart + ksize_depth, input_depth); int hend = min(hstart + ksize_height, input_height); int wend = min(wstart + ksize_width, input_width); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); T1 ele = -FLT_MAX; int max_index = -1; input_data += (batch_idx * channels + c) * input_depth * input_height * input_width; for (int d = dstart; d < dend; ++d) { for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { if (ele < input_data[(d * input_height + h) * input_width + w]) { max_index = (d * input_height + h) * input_width + w; ele = input_data[max_index]; } } } } output_data[index] = ele; mask_data[index] = max_index; } } template <typename T1, typename T2> __global__ void KernelMaxPool3DWithIdxGrad( const int nthreads, const T1* output_grad, const T2* mask, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, T1* input_grad) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int w_offset = index % input_width; int h_offset = (index / input_width) % input_height; int d_offset = (index / input_width / input_height) % input_depth; int c_offset = (index / input_width / input_height / input_depth) % channels; int batch_idx = index / input_width / input_height / input_depth / channels; int pd_start = (d_offset + padding_depth < ksize_depth) ? 0 : (d_offset + padding_depth - ksize_depth) / stride_depth + 1; int ph_start = (h_offset + padding_height < ksize_height) ? 0 : (h_offset + padding_height - ksize_height) / stride_height + 1; int pw_start = (w_offset + padding_width < ksize_width) ? 0 : (w_offset + padding_width - ksize_width) / stride_width + 1; int pd_end = min((d_offset + padding_depth) / stride_depth + 1, output_depth); int ph_end = min((h_offset + padding_height) / stride_height + 1, output_height); int pw_end = min((w_offset + padding_width) / stride_width + 1, output_width); T1 gradient = 0; int input_current_feature_map_idx = (d_offset * input_height + h_offset) * input_width + w_offset; int output_idx = (batch_idx * channels + c_offset) * output_depth * output_height * output_width; mask += output_idx; output_grad += output_idx; for (int pd = pd_start; pd < pd_end; ++pd) { for (int ph = ph_start; ph < ph_end; ++ph) { for (int pw = pw_start; pw < pw_end; ++pw) { if (mask[(pd * output_height + ph) * output_width + pw] == input_current_feature_map_idx) gradient += output_grad[(pd * output_height + ph) * output_width + pw]; } } } input_grad[index] = gradient; } } /* * All tensors are in NCDHW format. * Ksize, strides, paddings are three elements. These three elements represent * depth, height and width, respectively. */ template <typename T1, typename T2> class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, T1, T2> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, framework::Tensor* output, framework::Tensor* mask) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; const int input_width = input.dims()[4]; const int output_channels = output->dims()[1]; const int output_depth = output->dims()[2]; const int output_height = output->dims()[3]; const int output_width = output->dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T1* input_data = input.data<T1>(); T1* output_data = output->mutable_data<T1>(context.GetPlace()); T2* mask_data = mask->mutable_data<T2>(context.GetPlace()); int nthreads = batch_size * output_channels * output_depth * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelMaxPool3DWithIdx<T1, T2>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, output_data, mask_data); } }; /* * All tensors are in NCDHW format. * Ksize, strides, paddings are three elements. These three elements represent * depth, height and width, respectively. */ template <typename T1, typename T2> class MaxPool3dWithIndexGradFunctor<platform::CUDADeviceContext, T1, T2> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& output_grad, const framework::Tensor& mask, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, framework::Tensor* input_grad) { const int batch_size = input_grad->dims()[0]; const int input_channels = input_grad->dims()[1]; const int input_depth = input_grad->dims()[2]; const int input_height = input_grad->dims()[3]; const int input_width = input_grad->dims()[4]; const int output_depth = output_grad.dims()[2]; const int output_height = output_grad.dims()[3]; const int output_width = output_grad.dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T1* output_grad_data = output_grad.data<T1>(); const T2* mask_data = mask.data<T2>(); T1* input_grad_data = input_grad->mutable_data<T1>(context.GetPlace()); int nthreads = batch_size * input_channels * input_depth * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelMaxPool3DWithIdxGrad<T1, T2>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, output_grad_data, mask_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, input_grad_data); } }; template class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, float, int>; template class MaxPool3dWithIndexGradFunctor<platform::CUDADeviceContext, float, int>; template class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, double, int>; template class MaxPool3dWithIndexGradFunctor<platform::CUDADeviceContext, double, int>; } // namespace math } // namespace operators } // namespace paddle
2d5cfa60df7a367d574f4cc9a80cd8742539e798.cu
/* Copyright (c) 2016 paddlepaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <algorithm> #include <vector> #include "paddle/fluid/operators/math/pooling.h" #include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { namespace math { template <typename PoolProcess, typename T> __global__ void KernelPool2D(const int nthreads, const T* input_data, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, PoolProcess pool_process, bool exclusive, T* output_data) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int c = (index / output_width / output_height) % channels; int batch_idx = index / output_width / output_height / channels; int hstart = ph * stride_height - padding_height; int hend = min(hstart + ksize_height, input_height); hstart = max(hstart, 0); int wstart = pw * stride_width - padding_width; int wend = min(wstart + ksize_width, input_width); wstart = max(wstart, 0); input_data += (batch_idx * channels + c) * input_height * input_width; T ele = pool_process.initial(); for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { pool_process.compute(input_data[h * input_width + w], &ele); } } int pool_size = exclusive ? (hend - hstart) * (wend - wstart) : ksize_height * ksize_width; pool_process.finalize(static_cast<T>(pool_size), &ele); output_data[index] = ele; } } template <typename PoolProcess, typename T> __global__ void KernelPool2DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, PoolProcess pool_process, bool exclusive, T* input_grad) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int offsetW = index % input_width + padding_width; int offsetH = (index / input_width) % input_height + padding_height; int offsetC = (index / input_width / input_height) % channels; int batch_idx = index / input_width / input_height / channels; int phstart = (offsetH < ksize_height) ? 0 : (offsetH - ksize_height) / stride_height + 1; int pwstart = (offsetW < ksize_width) ? 0 : (offsetW - ksize_width) / stride_width + 1; int phend = min(offsetH / stride_height + 1, output_height); int pwend = min(offsetW / stride_width + 1, output_width); T gradient = 0; T input = input_data[index]; int output_idx = (batch_idx * channels + offsetC) * output_height * output_width; output_data += output_idx; output_grad += output_idx; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; int hend = min(hstart + ksize_height, input_height); int wend = min(wstart + ksize_width, input_width); hstart = max(hstart, 0); wstart = max(wstart, 0); int pool_size = exclusive ? (hend - hstart) * (wend - wstart) : ksize_height * ksize_width; int output_sub_idx = ph * output_width + pw; pool_process.compute(input, output_data[output_sub_idx], output_grad[output_sub_idx], static_cast<T>(1.0 / pool_size), &gradient); } } input_grad[index] = gradient; } } template <typename T> __global__ void KernelMaxPool2DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, T* input_grad) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int c = (index / output_width / output_height) % channels; int batch_idx = index / output_width / output_height / channels; int hstart = ph * stride_height - padding_height; int hend = min(hstart + ksize_height, input_height); hstart = max(hstart, 0); int wstart = pw * stride_width - padding_width; int wend = min(wstart + ksize_width, input_width); wstart = max(wstart, 0); input_data += (batch_idx * channels + c) * input_height * input_width; input_grad += (batch_idx * channels + c) * input_height * input_width; T ele = output_data[index]; int maxIndex = -1; bool stop = false; for (int h = hstart; h < hend && !stop; ++h) { for (int w = wstart; w < wend && !stop; ++w) { if (ele == input_data[h * input_width + w]) { maxIndex = h * input_width + w; stop = true; } } } if (maxIndex != -1) { // atomic add platform::CudaAtomicAdd(input_grad + maxIndex, output_grad[index]); } } } /* * All tensors are in NCHW format. * Ksize, strides, paddings are two elements. These two elements represent * height and width, respectively. */ template <typename PoolProcess, typename T> class Pool2dFunctor<platform::CUDADeviceContext, PoolProcess, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, PoolProcess pool_process, bool exclusive, framework::Tensor* output) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output->dims()[1]; const int output_height = output->dims()[2]; const int output_width = output->dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); T* output_data = output->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelPool2D<PoolProcess, T><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, pool_process, exclusive, output_data); } }; /* * All tensors are in NCHW format. * Ksize, strides, paddings are two elements. These two elements represent * height and width, respectively. */ template <typename PoolProcess, typename T> class Pool2dGradFunctor<platform::CUDADeviceContext, PoolProcess, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, PoolProcess pool_process, bool exclusive, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_height = output.dims()[2]; const int output_width = output.dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * input_channels * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelPool2DGrad<PoolProcess, T><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, output_data, output_grad_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, pool_process, exclusive, input_grad_data); } }; /* * All tensors are in NCHW format. * Ksize, strides, paddings are two elements. These two elements represent * height and width, respectively. */ template <typename T> class MaxPool2dGradFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output.dims()[1]; const int output_height = output.dims()[2]; const int output_width = output.dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelMaxPool2DGrad<T><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, output_data, output_grad_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, input_grad_data); } }; template class MaxPool2dGradFunctor<platform::CUDADeviceContext, float>; template class MaxPool2dGradFunctor<platform::CUDADeviceContext, double>; template class Pool2dFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPool<float>, float>; template class Pool2dFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPool<float>, float>; template class Pool2dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPoolGrad<float>, float>; template class Pool2dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPoolGrad<float>, float>; template class Pool2dFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPool<double>, double>; template class Pool2dFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPool<double>, double>; template class Pool2dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPoolGrad<double>, double>; template class Pool2dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPoolGrad<double>, double>; template <typename PoolProcess, typename T> __global__ void KernelPool3D( const int nthreads, const T* input_data, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, PoolProcess pool_process, bool exclusive, T* output_data) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int pd = (index / output_width / output_height) % output_depth; int c = (index / output_width / output_height / output_depth) % channels; int batch_idx = index / output_width / output_height / output_depth / channels; int dstart = pd * stride_depth - padding_depth; int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; int dend = min(dstart + ksize_depth, input_depth); int hend = min(hstart + ksize_height, input_height); int wend = min(wstart + ksize_width, input_width); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); T ele = pool_process.initial(); input_data += (batch_idx * channels + c) * input_depth * input_height * input_width; for (int d = dstart; d < dend; ++d) { for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { pool_process.compute( input_data[(d * input_height + h) * input_width + w], &ele); } } } int pool_size = exclusive ? (dend - dstart) * (hend - hstart) * (wend - wstart) : ksize_depth * ksize_height * ksize_width; pool_process.finalize(static_cast<T>(pool_size), &ele); output_data[index] = ele; } } template <typename PoolProcess, typename T> __global__ void KernelPool3DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, PoolProcess pool_process, bool exclusive, T* input_grad) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int offsetW = index % input_width + padding_width; int offsetH = (index / input_width) % input_height + padding_height; int offsetD = (index / input_width / input_height) % input_depth + padding_depth; int offsetC = (index / input_width / input_height / input_depth) % channels; int batch_idx = index / input_width / input_height / input_depth / channels; int pdstart = (offsetD < ksize_depth) ? 0 : (offsetD - ksize_depth) / stride_depth + 1; int phstart = (offsetH < ksize_height) ? 0 : (offsetH - ksize_height) / stride_height + 1; int pwstart = (offsetW < ksize_width) ? 0 : (offsetW - ksize_width) / stride_width + 1; int pdend = min((offsetD) / stride_depth + 1, output_depth); int phend = min((offsetH) / stride_height + 1, output_height); int pwend = min((offsetW) / stride_width + 1, output_width); T gradient = 0; T input = input_data[index]; int output_idx = (batch_idx * channels + offsetC) * output_depth * output_height * output_width; output_data += output_idx; output_grad += output_idx; for (int pd = pdstart; pd < pdend; ++pd) { for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int dstart = pd * stride_depth - padding_depth; int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; int dend = min(dstart + ksize_depth, input_depth); int hend = min(hstart + ksize_height, input_height); int wend = min(wstart + ksize_width, input_width); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); int pool_size = exclusive ? (dend - dstart) * (hend - hstart) * (wend - wstart) : ksize_depth * ksize_height * ksize_width; int output_sub_idx = (pd * output_height + ph) * output_width + pw; pool_process.compute(input, output_data[output_sub_idx], output_grad[output_sub_idx], static_cast<T>(1.0 / pool_size), &gradient); } } } input_grad[index] = gradient; } } template <typename T> __global__ void KernelMaxPool3DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, T* input_grad) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int pd = (index / output_width / output_height) % output_depth; int c = (index / output_width / output_height / output_depth) % channels; int batch_idx = index / output_width / output_height / output_depth / channels; int dstart = pd * stride_depth - padding_depth; int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; int dend = min(dstart + ksize_depth, input_depth); int hend = min(hstart + ksize_height, input_height); int wend = min(wstart + ksize_width, input_width); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); T ele = output_data[index]; bool stop = false; int maxIdx = -1; input_data += (batch_idx * channels + c) * input_depth * input_height * input_width; input_grad += (batch_idx * channels + c) * input_depth * input_height * input_width; for (int d = dstart; d < dend && !stop; ++d) { for (int h = hstart; h < hend && !stop; ++h) { for (int w = wstart; w < wend && !stop; ++w) { if (ele == input_data[(d * input_height + h) * input_width + w]) { stop = true; maxIdx = (d * input_height + h) * input_width + w; } } } } if (maxIdx != -1) { // atomic add platform::CudaAtomicAdd(input_grad + maxIdx, output_grad[index]); } } } /* * All tensors are in NCDHW format. * Ksize, strides, paddings are three elements. These three elements represent * depth, height and width, respectively. */ template <typename PoolProcess, class T> class Pool3dFunctor<platform::CUDADeviceContext, PoolProcess, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, PoolProcess pool_process, bool exclusive, framework::Tensor* output) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; const int input_width = input.dims()[4]; const int output_channels = output->dims()[1]; const int output_depth = output->dims()[2]; const int output_height = output->dims()[3]; const int output_width = output->dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T* input_data = input.data<T>(); T* output_data = output->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_depth * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelPool3D<PoolProcess, T><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, pool_process, exclusive, output_data); } }; /* * All tensors are in NCDHW format. * Ksize, strides, paddings are three elements. These three elements represent * depth, height and width, respectively. */ template <typename PoolProcess, class T> class Pool3dGradFunctor<platform::CUDADeviceContext, PoolProcess, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, PoolProcess pool_process, bool exclusive, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; const int input_width = input.dims()[4]; const int output_channels = output.dims()[1]; const int output_depth = output.dims()[2]; const int output_height = output.dims()[3]; const int output_width = output.dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * input_channels * input_depth * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelPool3DGrad<PoolProcess, T><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, output_data, output_grad_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, pool_process, exclusive, input_grad_data); } }; /* * All tensors are in NCDHW format. * Ksize, strides, paddings are three elements. These three elements represent * depth, height and width, respectively. */ template <class T> class MaxPool3dGradFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; const int input_width = input.dims()[4]; const int output_channels = output.dims()[1]; const int output_depth = output.dims()[2]; const int output_height = output.dims()[3]; const int output_width = output.dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_depth * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelMaxPool3DGrad<T><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, output_data, output_grad_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, input_grad_data); } }; template class MaxPool3dGradFunctor<platform::CUDADeviceContext, float>; template class MaxPool3dGradFunctor<platform::CUDADeviceContext, double>; template class Pool3dFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPool<float>, float>; template class Pool3dFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPool<float>, float>; template class Pool3dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPoolGrad<float>, float>; template class Pool3dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPoolGrad<float>, float>; template class Pool3dFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPool<double>, double>; template class Pool3dFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPool<double>, double>; template class Pool3dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::MaxPoolGrad<double>, double>; template class Pool3dGradFunctor<platform::CUDADeviceContext, paddle::operators::math::AvgPoolGrad<double>, double>; template <typename T1, typename T2> __global__ void KernelMaxPool2dWithIdx( const int nthreads, const T1* input_data, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, T1* output_data, T2* mask_data) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int c = (index / output_width / output_height) % channels; int batch_idx = index / output_width / output_height / channels; int hstart = ph * stride_height - padding_height; int hend = min(hstart + ksize_height, input_height); hstart = max(hstart, 0); int wstart = pw * stride_width - padding_width; int wend = min(wstart + ksize_width, input_width); wstart = max(wstart, 0); input_data += (batch_idx * channels + c) * input_height * input_width; T1 ele = -FLT_MAX; int max_index = -1; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int input_index = h * input_width + w; if (ele < input_data[input_index]) { max_index = input_index; ele = input_data[input_index]; } } } output_data[index] = ele; mask_data[index] = max_index; } } template <typename T1, typename T2> __global__ void KernelMaxPool2DWithIdxGrad( const int nthreads, const T1* output_grad, const T2* mask_data, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, T1* input_grad) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int w_offset = index % input_width; int h_offset = (index / input_width) % input_height; int c_offset = (index / input_width / input_height) % channels; int batch_idx = index / input_width / input_height / channels; int ph_start = (h_offset + padding_height < ksize_height) ? 0 : (h_offset + padding_height - ksize_height) / stride_height + 1; int pw_start = (w_offset + padding_width < ksize_width) ? 0 : (w_offset + padding_width - ksize_width) / stride_width + 1; int ph_end = min((h_offset + padding_height) / stride_height + 1, output_height); int pw_end = min((w_offset + padding_width) / stride_width + 1, output_width); T1 gradient = 0; int input_current_featuremap_idx = h_offset * input_width + w_offset; int output_idx = (batch_idx * channels + c_offset) * output_height * output_width; mask_data += output_idx; output_grad += output_idx; for (int ph = ph_start; ph < ph_end; ++ph) { for (int pw = pw_start; pw < pw_end; ++pw) { if (mask_data[ph * output_width + pw] == input_current_featuremap_idx) gradient += output_grad[ph * output_width + pw]; } } input_grad[index] = gradient; } } /* * All tensors are in NCHW format. * Ksize, strides, paddings are two elements. These two elements represent * height and width, respectively. */ template <typename T1, typename T2> class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, T1, T2> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, framework::Tensor* output, framework::Tensor* mask) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output->dims()[1]; const int output_height = output->dims()[2]; const int output_width = output->dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T1* input_data = input.data<T1>(); T1* output_data = output->mutable_data<T1>(context.GetPlace()); T2* mask_data = mask->mutable_data<T2>(context.GetPlace()); int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelMaxPool2dWithIdx<T1, T2><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, output_data, mask_data); } }; /* * All tensors are in NCHW format. * Ksize, strides, paddings are two elements. These two elements represent * height and width, respectively. */ template <typename T1, typename T2> class MaxPool2dWithIndexGradFunctor<platform::CUDADeviceContext, T1, T2> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& output_grad, const framework::Tensor& mask, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, framework::Tensor* input_grad) { const int batch_size = input_grad->dims()[0]; const int input_channels = input_grad->dims()[1]; const int input_height = input_grad->dims()[2]; const int input_width = input_grad->dims()[3]; const int output_height = output_grad.dims()[2]; const int output_width = output_grad.dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T2* mask_data = mask.data<T2>(); const T1* output_grad_data = output_grad.data<T1>(); T1* input_grad_data = input_grad->mutable_data<T1>(context.GetPlace()); int nthreads = batch_size * input_channels * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelMaxPool2DWithIdxGrad<T1, T2><<<grid, threads, 0, context.stream()>>>( nthreads, output_grad_data, mask_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, input_grad_data); } }; template class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, float, int>; template class MaxPool2dWithIndexGradFunctor<platform::CUDADeviceContext, float, int>; template class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, double, int>; template class MaxPool2dWithIndexGradFunctor<platform::CUDADeviceContext, double, int>; template <typename T1, typename T2> __global__ void KernelMaxPool3DWithIdx( const int nthreads, const T1* input_data, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, T1* output_data, T2* mask_data) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int pd = (index / output_width / output_height) % output_depth; int c = (index / output_width / output_height / output_depth) % channels; int batch_idx = index / output_width / output_height / output_depth / channels; int dstart = pd * stride_depth - padding_depth; int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; int dend = min(dstart + ksize_depth, input_depth); int hend = min(hstart + ksize_height, input_height); int wend = min(wstart + ksize_width, input_width); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); T1 ele = -FLT_MAX; int max_index = -1; input_data += (batch_idx * channels + c) * input_depth * input_height * input_width; for (int d = dstart; d < dend; ++d) { for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { if (ele < input_data[(d * input_height + h) * input_width + w]) { max_index = (d * input_height + h) * input_width + w; ele = input_data[max_index]; } } } } output_data[index] = ele; mask_data[index] = max_index; } } template <typename T1, typename T2> __global__ void KernelMaxPool3DWithIdxGrad( const int nthreads, const T1* output_grad, const T2* mask, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, T1* input_grad) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int w_offset = index % input_width; int h_offset = (index / input_width) % input_height; int d_offset = (index / input_width / input_height) % input_depth; int c_offset = (index / input_width / input_height / input_depth) % channels; int batch_idx = index / input_width / input_height / input_depth / channels; int pd_start = (d_offset + padding_depth < ksize_depth) ? 0 : (d_offset + padding_depth - ksize_depth) / stride_depth + 1; int ph_start = (h_offset + padding_height < ksize_height) ? 0 : (h_offset + padding_height - ksize_height) / stride_height + 1; int pw_start = (w_offset + padding_width < ksize_width) ? 0 : (w_offset + padding_width - ksize_width) / stride_width + 1; int pd_end = min((d_offset + padding_depth) / stride_depth + 1, output_depth); int ph_end = min((h_offset + padding_height) / stride_height + 1, output_height); int pw_end = min((w_offset + padding_width) / stride_width + 1, output_width); T1 gradient = 0; int input_current_feature_map_idx = (d_offset * input_height + h_offset) * input_width + w_offset; int output_idx = (batch_idx * channels + c_offset) * output_depth * output_height * output_width; mask += output_idx; output_grad += output_idx; for (int pd = pd_start; pd < pd_end; ++pd) { for (int ph = ph_start; ph < ph_end; ++ph) { for (int pw = pw_start; pw < pw_end; ++pw) { if (mask[(pd * output_height + ph) * output_width + pw] == input_current_feature_map_idx) gradient += output_grad[(pd * output_height + ph) * output_width + pw]; } } } input_grad[index] = gradient; } } /* * All tensors are in NCDHW format. * Ksize, strides, paddings are three elements. These three elements represent * depth, height and width, respectively. */ template <typename T1, typename T2> class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, T1, T2> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, framework::Tensor* output, framework::Tensor* mask) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; const int input_width = input.dims()[4]; const int output_channels = output->dims()[1]; const int output_depth = output->dims()[2]; const int output_height = output->dims()[3]; const int output_width = output->dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T1* input_data = input.data<T1>(); T1* output_data = output->mutable_data<T1>(context.GetPlace()); T2* mask_data = mask->mutable_data<T2>(context.GetPlace()); int nthreads = batch_size * output_channels * output_depth * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelMaxPool3DWithIdx<T1, T2><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, output_data, mask_data); } }; /* * All tensors are in NCDHW format. * Ksize, strides, paddings are three elements. These three elements represent * depth, height and width, respectively. */ template <typename T1, typename T2> class MaxPool3dWithIndexGradFunctor<platform::CUDADeviceContext, T1, T2> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& output_grad, const framework::Tensor& mask, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, framework::Tensor* input_grad) { const int batch_size = input_grad->dims()[0]; const int input_channels = input_grad->dims()[1]; const int input_depth = input_grad->dims()[2]; const int input_height = input_grad->dims()[3]; const int input_width = input_grad->dims()[4]; const int output_depth = output_grad.dims()[2]; const int output_height = output_grad.dims()[3]; const int output_width = output_grad.dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T1* output_grad_data = output_grad.data<T1>(); const T2* mask_data = mask.data<T2>(); T1* input_grad_data = input_grad->mutable_data<T1>(context.GetPlace()); int nthreads = batch_size * input_channels * input_depth * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelMaxPool3DWithIdxGrad<T1, T2><<<grid, threads, 0, context.stream()>>>( nthreads, output_grad_data, mask_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, input_grad_data); } }; template class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, float, int>; template class MaxPool3dWithIndexGradFunctor<platform::CUDADeviceContext, float, int>; template class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, double, int>; template class MaxPool3dWithIndexGradFunctor<platform::CUDADeviceContext, double, int>; } // namespace math } // namespace operators } // namespace paddle
b98d2297a8c17517b64ce1fabf6a96bb75500694.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @precisions normal z -> c d s */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 #define PRECISION_z #define Ablockinfo(i,j) Ablockinfo[(i)*c_blocks + (j)] #define Bblockinfo(i,j) Bblockinfo[(i)*c_blocks + (j)] #define A(i,j) ((Ablockinfo(i,j)-1)*size_b*size_b) #define B(i,j) ((Bblockinfo(i,j)-1)*size_b*size_b) //============================================================ #define ldb m #define lda m #define ldc m #define fetch_x_A(i) (((i)<m*m)?Aval[i]:0) #define fetch_x_B(i) (((i)<m*m)?B[i]:0) // every multiprocessor handles one BCSR-block __global__ void zbcsr_gemm_kernel32( int m, int n, int kblocks, mdouble **Avals, double **Bval, double **Cval) { #if (__CUDA_ARCH__ >= 200) #if defined(PRECISION_d) const int tx = threadIdx.x; const int ty = threadIdx.y; const int idt = ty * 64 + tx; const int tx2 = idt%16; const int ty2 = idt/16; double xxB[4]; magmaDouble_ptr B; int trackA = ty2*lda + tx2; magmaDouble_ptr Aval = Avals[blockIdx.z]; __shared__ double Abs[64][65]; __shared__ double Bb[16][65]; for(int j=ty2; j < 64; j += 16) { for(int y=tx2; y < 64; y += 16) { Abs[y][j] = fetch_x_A(trackA + y-tx2); } trackA += 16*m; } for(int k=0; k < kblocks; k++) { B = Bval[k]; int trackB = tx2 + ty2*16*ldb; // Prefetch part of B #pragma unroll for(int y=0; y < 4; y++) { Bb[tx2][ty2*4+y] = fetch_x_B( trackB + y * ldb); } __syncthreads(); // this is necessary!!! double Axs[4]; double Bxp[4]; double Cb[16] = {0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0}; int k1; for(k1=0; k1 < m-16; k1 += 16) { trackB += 16; #pragma unroll for( int y=0; y < 4; y++) xxB[y] = fetch_x_B( trackB + y*ldb); #pragma unroll for( int j1=0; j1 < 16; j1++) { #pragma unroll for( int y=0; y < 4; y++) { Axs[y] = Abs[tx2+y*16][j1+k1]; } #pragma unroll for( int y=0; y < 4; y++) { Bxp[y]= Bb[j1][ty2+y*16]; } #pragma unroll for( int x=0; x < 4; x++) { #pragma unroll for( int y=0; y < 4; y++) { Cb[x*4+y] += Axs[x]*Bxp[y]; } } } #pragma unroll for(int y=0; y < 4; y++) Bb[tx2][ty2*4 + y] = xxB[y]; __syncthreads(); // this is necessary!!! } // Prepare where to write the result magmaDouble_ptr C = Cval[blockIdx.z * kblocks + k]; C += tx2 + ty2*ldc; #pragma unroll for(int j1=0; j1 < 16; j1++) { #pragma unroll for( int y=0; y < 4; y++) Axs[y] = Abs[tx2 + y*16][j1+k1]; #pragma unroll for( int y=0; y < 4; y++) Bxp[y]= Bb[j1][ty2 + y*16]; #pragma unroll for( int x=0; x < 4; x++) { #pragma unroll for( int y=0; y < 4; y++) { Cb[x*4 + y] += Axs[x]*Bxp[y]; } } } int gy = ty2; #pragma unroll for( int y=0; y < 4; y++, gy += 16) { int gx = tx2; #pragma unroll for(int x=0; x < 4; x++, gx += 16) { if (gx < m && gy < n) { C[x*16] -= Cb[y+x*4]; } } C += ldc*16; } } #endif #endif } // every multiprocessor handles one BCSR-block __global__ void zbcsr_gemm_kernel64( int m, int n, int kblocks, double **Avals, double **Bval, double **Cval) { #if (__CUDA_ARCH__ >= 200) #if defined(PRECISION_d) const int tx = threadIdx.x; const int ty = threadIdx.y; const int idt = ty * 64 + tx; const int tx2 = idt%16; const int ty2 = idt/16; double xxB[4]; magmaDouble_ptr B; int trackA = ty2*lda + tx2; magmaDouble_ptr Aval = Avals[blockIdx.z]; __shared__ double Abs[64][65]; __shared__ double Bb[16][65]; for(int j=ty2; j < 64; j += 16) { for(int y=tx2; y < 64; y += 16) { Abs[y][j] = fetch_x_A(trackA + y-tx2); } trackA += 16*m; } for(int k=0; k < kblocks; k++) { B = Bval[k]; int trackB = tx2 + ty2*4*ldb; // Prefetch part of B #pragma unroll for(int y=0; y < 4; y++) { Bb[tx2][ty2*4+y] = fetch_x_B( trackB + y * ldb); } __syncthreads(); // this is necessary!!! double Axs[4]; double Bxp[4]; double Cb[16] = {0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0}; int k1; for(k1=0; k1 < m-16; k1 += 16) { trackB += 16; #pragma unroll for( int y=0; y < 4; y++) xxB[y] = fetch_x_B( trackB + y*ldb); #pragma unroll for( int j1=0; j1 < 16; j1++) { #pragma unroll for( int y=0; y < 4; y++) { Axs[y] = Abs[tx2+y*16][j1+k1]; } #pragma unroll for( int y=0; y < 4; y++) { Bxp[y] = Bb[j1][ty2+y*16]; } #pragma unroll for( int x=0; x < 4; x++) { #pragma unroll for( int y=0; y < 4; y++) { Cb[x*4+y] += Axs[x]*Bxp[y]; } } } __syncthreads(); #pragma unroll for(int y=0; y < 4; y++) Bb[tx2][ty2*4 + y] = xxB[y]; __syncthreads(); // this is necessary!!! } // Prepare where to write the result magmaDouble_ptr C = Cval[blockIdx.z * kblocks + k]; C += tx2 + ty2*ldc; #pragma unroll for(int j1=0; j1 < 16; j1++) { #pragma unroll for( int y=0; y < 4; y++) Axs[y] = Abs[tx2 + y*16][j1+k1]; #pragma unroll for( int y=0; y < 4; y++) Bxp[y]= Bb[j1][ty2 + y*16]; #pragma unroll for( int x=0; x < 4; x++) { #pragma unroll for( int y=0; y < 4; y++) { Cb[x*4 + y] += Axs[x]*Bxp[y]; } } } int gy = ty2; #pragma unroll for( int y=0; y < 4; y++, gy += 16) { int gx = tx2; #pragma unroll for(int x=0; x < 4; x++, gx += 16) { if (gx < m && gy < n) { C[x*16] -= Cb[y+x*4]; } } C += ldc*16; } } #endif // PRECISION_d #endif // __CUDA_ARCH__ >= 200 } /** Purpose ------- For a Block-CSR ILU factorization, this routine updates all blocks in the trailing matrix. Arguments --------- @param[in] size_b magma_int_t blocksize in BCSR @param[in] num_brows magma_int_t number of block rows @param[in] kblocks magma_int_t number of blocks in row @param[in] dA magmaDoubleComplex** input blocks of matrix A @param[in] dB magmaDoubleComplex** input blocks of matrix B @param[in] dC magmaDoubleComplex** output blocks of matrix C @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zgegpuk ********************************************************************/ extern "C" magma_int_t magma_zbcsrluegemm( magma_int_t size_b, magma_int_t num_brows, magma_int_t kblocks, magmaDoubleComplex_ptr *dA, magmaDoubleComplex_ptr *dB, magmaDoubleComplex_ptr *dC, magma_queue_t queue ) { #if defined(PRECISION_d) magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 ) { printf("error: magma_zbcsrluegemm needs a CUDA architecture" " with at least 48K shared memory (Fermi +).\n" "Please run zbcsrlu.cpp using CUBLAS batched.\n"); } else { dim3 threads( 64, 4 ); dim3 grid(1, 1, num_brows); hipLaunchKernelGGL(( zbcsr_gemm_kernel64), dim3(grid), dim3(threads), 0, queue->cuda_stream() , size_b, size_b, kblocks, dA, dB, dC ); } #else printf("error: currently only supported for double precision.\n" "Please run zbcsrlu.cpp using CUBLAS batched.\n"); #endif return MAGMA_SUCCESS; }
b98d2297a8c17517b64ce1fabf6a96bb75500694.cu
/* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @precisions normal z -> c d s */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 #define PRECISION_z #define Ablockinfo(i,j) Ablockinfo[(i)*c_blocks + (j)] #define Bblockinfo(i,j) Bblockinfo[(i)*c_blocks + (j)] #define A(i,j) ((Ablockinfo(i,j)-1)*size_b*size_b) #define B(i,j) ((Bblockinfo(i,j)-1)*size_b*size_b) //============================================================ #define ldb m #define lda m #define ldc m #define fetch_x_A(i) (((i)<m*m)?Aval[i]:0) #define fetch_x_B(i) (((i)<m*m)?B[i]:0) // every multiprocessor handles one BCSR-block __global__ void zbcsr_gemm_kernel32( int m, int n, int kblocks, mdouble **Avals, double **Bval, double **Cval) { #if (__CUDA_ARCH__ >= 200) #if defined(PRECISION_d) const int tx = threadIdx.x; const int ty = threadIdx.y; const int idt = ty * 64 + tx; const int tx2 = idt%16; const int ty2 = idt/16; double xxB[4]; magmaDouble_ptr B; int trackA = ty2*lda + tx2; magmaDouble_ptr Aval = Avals[blockIdx.z]; __shared__ double Abs[64][65]; __shared__ double Bb[16][65]; for(int j=ty2; j < 64; j += 16) { for(int y=tx2; y < 64; y += 16) { Abs[y][j] = fetch_x_A(trackA + y-tx2); } trackA += 16*m; } for(int k=0; k < kblocks; k++) { B = Bval[k]; int trackB = tx2 + ty2*16*ldb; // Prefetch part of B #pragma unroll for(int y=0; y < 4; y++) { Bb[tx2][ty2*4+y] = fetch_x_B( trackB + y * ldb); } __syncthreads(); // this is necessary!!! double Axs[4]; double Bxp[4]; double Cb[16] = {0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0}; int k1; for(k1=0; k1 < m-16; k1 += 16) { trackB += 16; #pragma unroll for( int y=0; y < 4; y++) xxB[y] = fetch_x_B( trackB + y*ldb); #pragma unroll for( int j1=0; j1 < 16; j1++) { #pragma unroll for( int y=0; y < 4; y++) { Axs[y] = Abs[tx2+y*16][j1+k1]; } #pragma unroll for( int y=0; y < 4; y++) { Bxp[y]= Bb[j1][ty2+y*16]; } #pragma unroll for( int x=0; x < 4; x++) { #pragma unroll for( int y=0; y < 4; y++) { Cb[x*4+y] += Axs[x]*Bxp[y]; } } } #pragma unroll for(int y=0; y < 4; y++) Bb[tx2][ty2*4 + y] = xxB[y]; __syncthreads(); // this is necessary!!! } // Prepare where to write the result magmaDouble_ptr C = Cval[blockIdx.z * kblocks + k]; C += tx2 + ty2*ldc; #pragma unroll for(int j1=0; j1 < 16; j1++) { #pragma unroll for( int y=0; y < 4; y++) Axs[y] = Abs[tx2 + y*16][j1+k1]; #pragma unroll for( int y=0; y < 4; y++) Bxp[y]= Bb[j1][ty2 + y*16]; #pragma unroll for( int x=0; x < 4; x++) { #pragma unroll for( int y=0; y < 4; y++) { Cb[x*4 + y] += Axs[x]*Bxp[y]; } } } int gy = ty2; #pragma unroll for( int y=0; y < 4; y++, gy += 16) { int gx = tx2; #pragma unroll for(int x=0; x < 4; x++, gx += 16) { if (gx < m && gy < n) { C[x*16] -= Cb[y+x*4]; } } C += ldc*16; } } #endif #endif } // every multiprocessor handles one BCSR-block __global__ void zbcsr_gemm_kernel64( int m, int n, int kblocks, double **Avals, double **Bval, double **Cval) { #if (__CUDA_ARCH__ >= 200) #if defined(PRECISION_d) const int tx = threadIdx.x; const int ty = threadIdx.y; const int idt = ty * 64 + tx; const int tx2 = idt%16; const int ty2 = idt/16; double xxB[4]; magmaDouble_ptr B; int trackA = ty2*lda + tx2; magmaDouble_ptr Aval = Avals[blockIdx.z]; __shared__ double Abs[64][65]; __shared__ double Bb[16][65]; for(int j=ty2; j < 64; j += 16) { for(int y=tx2; y < 64; y += 16) { Abs[y][j] = fetch_x_A(trackA + y-tx2); } trackA += 16*m; } for(int k=0; k < kblocks; k++) { B = Bval[k]; int trackB = tx2 + ty2*4*ldb; // Prefetch part of B #pragma unroll for(int y=0; y < 4; y++) { Bb[tx2][ty2*4+y] = fetch_x_B( trackB + y * ldb); } __syncthreads(); // this is necessary!!! double Axs[4]; double Bxp[4]; double Cb[16] = {0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0}; int k1; for(k1=0; k1 < m-16; k1 += 16) { trackB += 16; #pragma unroll for( int y=0; y < 4; y++) xxB[y] = fetch_x_B( trackB + y*ldb); #pragma unroll for( int j1=0; j1 < 16; j1++) { #pragma unroll for( int y=0; y < 4; y++) { Axs[y] = Abs[tx2+y*16][j1+k1]; } #pragma unroll for( int y=0; y < 4; y++) { Bxp[y] = Bb[j1][ty2+y*16]; } #pragma unroll for( int x=0; x < 4; x++) { #pragma unroll for( int y=0; y < 4; y++) { Cb[x*4+y] += Axs[x]*Bxp[y]; } } } __syncthreads(); #pragma unroll for(int y=0; y < 4; y++) Bb[tx2][ty2*4 + y] = xxB[y]; __syncthreads(); // this is necessary!!! } // Prepare where to write the result magmaDouble_ptr C = Cval[blockIdx.z * kblocks + k]; C += tx2 + ty2*ldc; #pragma unroll for(int j1=0; j1 < 16; j1++) { #pragma unroll for( int y=0; y < 4; y++) Axs[y] = Abs[tx2 + y*16][j1+k1]; #pragma unroll for( int y=0; y < 4; y++) Bxp[y]= Bb[j1][ty2 + y*16]; #pragma unroll for( int x=0; x < 4; x++) { #pragma unroll for( int y=0; y < 4; y++) { Cb[x*4 + y] += Axs[x]*Bxp[y]; } } } int gy = ty2; #pragma unroll for( int y=0; y < 4; y++, gy += 16) { int gx = tx2; #pragma unroll for(int x=0; x < 4; x++, gx += 16) { if (gx < m && gy < n) { C[x*16] -= Cb[y+x*4]; } } C += ldc*16; } } #endif // PRECISION_d #endif // __CUDA_ARCH__ >= 200 } /** Purpose ------- For a Block-CSR ILU factorization, this routine updates all blocks in the trailing matrix. Arguments --------- @param[in] size_b magma_int_t blocksize in BCSR @param[in] num_brows magma_int_t number of block rows @param[in] kblocks magma_int_t number of blocks in row @param[in] dA magmaDoubleComplex** input blocks of matrix A @param[in] dB magmaDoubleComplex** input blocks of matrix B @param[in] dC magmaDoubleComplex** output blocks of matrix C @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zgegpuk ********************************************************************/ extern "C" magma_int_t magma_zbcsrluegemm( magma_int_t size_b, magma_int_t num_brows, magma_int_t kblocks, magmaDoubleComplex_ptr *dA, magmaDoubleComplex_ptr *dB, magmaDoubleComplex_ptr *dC, magma_queue_t queue ) { #if defined(PRECISION_d) magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 ) { printf("error: magma_zbcsrluegemm needs a CUDA architecture" " with at least 48K shared memory (Fermi +).\n" "Please run zbcsrlu.cpp using CUBLAS batched.\n"); } else { dim3 threads( 64, 4 ); dim3 grid(1, 1, num_brows); zbcsr_gemm_kernel64<<< grid, threads, 0, queue->cuda_stream() >>>( size_b, size_b, kblocks, dA, dB, dC ); } #else printf("error: currently only supported for double precision.\n" "Please run zbcsrlu.cpp using CUBLAS batched.\n"); #endif return MAGMA_SUCCESS; }
1a2552242e4faca28ee14ca9d48bd81577b1e001.hip
// !!! This is a file automatically generated by hipify!!! #include "tools/tools.cuh" #include "compression/afl.cuh" #include <hip/hip_runtime.h> #include <stdio.h> #include <math.h> int gpuid_0=1, gpuid_1=2; #define PPRINT_THROUGPUT(name, data_size) printf("%c[1;34m",27); printf name; printf("%c[30m,%c[37m ", 27,27); TIMEIT_PRINT_THROUGPUT(data_size); template <typename T> __global__ void saxpy(unsigned long n, int a, T *x, T *y) { // Determine element to process from thread index for (long tid = blockIdx.x * blockDim.x + threadIdx.x; tid < n; tid += blockDim.x * gridDim.x) y[tid] += a*x[tid]; } template <typename T> void multi_gpu_compress(unsigned long max_size, unsigned int bit_length, bool direct_copy) { mmManager manager; T *dev0_data, *dev0_comp_out; T *dev1_data, *dev1_data_out, *dev1_comp_out; T *host_data, *host_data2; unsigned long comp_size = ((max_size * bit_length)/(sizeof(T)*8) + (sizeof(T)*8)) * sizeof(T); unsigned long data_size = max_size * sizeof(T); TIMEIT_SETUP(); mmCudaMallocHost(manager, (void**)&host_data, max_size * sizeof(T)); mmCudaMallocHost(manager, (void**)&host_data2, max_size * sizeof(T)); big_random_block(max_size, bit_length, host_data); gpuErrchk(hipSetDevice(gpuid_0)); mmCudaMalloc(manager, (void **) &dev0_data, max_size * sizeof(T)); mmCudaMalloc(manager, (void **) &dev0_comp_out, comp_size); TIMEIT_START(); gpuErrchk( hipMemcpy(dev0_data, host_data, data_size, hipMemcpyHostToDevice) ); TIMEIT_END("M->G"); gpuErrchk(hipSetDevice(gpuid_1)); mmCudaMalloc(manager, (void **) &dev1_data, max_size * sizeof(T)); mmCudaMalloc(manager, (void **) &dev1_comp_out, comp_size); gpuErrchk(hipSetDevice(gpuid_0)); TIMEIT_START(); run_afl_compress_gpu <T, FL_ALGORITHM_MOD_AFL> (bit_length, dev0_data, dev0_comp_out, max_size); cudaErrorCheck(); TIMEIT_END("*C"); T *dev_data_source = dev0_comp_out; gpuErrchk(hipSetDevice(gpuid_1)); if (direct_copy) { TIMEIT_START(); hipMemcpyPeer(dev1_comp_out, gpuid_1, dev0_comp_out, gpuid_0, comp_size); TIMEIT_END("*copy"); dev_data_source = dev1_comp_out; cudaErrorCheck(); } TIMEIT_START(); run_afl_decompress_gpu <T, FL_ALGORITHM_MOD_AFL> (bit_length, dev_data_source, dev1_data, max_size); cudaErrorCheck(); TIMEIT_END("*D"); mmCudaFree(manager, dev_data_source); mmCudaMalloc(manager, (void **) &dev1_data_out, max_size * sizeof(T)); TIMEIT_START(); hipLaunchKernelGGL(( saxpy) , dim3(4096), dim3(512), 0, 0, max_size, 10, dev1_data, dev1_data_out); cudaErrorCheck(); TIMEIT_END("saxpy"); hipMemset(host_data2, 0, data_size); TIMEIT_START(); gpuErrchk(hipMemcpy(host_data2, dev1_data, data_size, hipMemcpyDeviceToHost)); TIMEIT_END("G->M"); compare_arrays(host_data2, host_data, max_size); PPRINT_THROUGPUT(("MGPU%s compr afl%d", direct_copy ? "copy":"access", bit_length), max_size * sizeof(T)); mmCudaFreeAll(manager); } template <typename T> void multi_gpu(unsigned long max_size, bool direct_copy) { mmManager manager; T *dev0_data, *dev1_data; T *dev1_data_out; T *host_data, *host_data2; unsigned long data_size = max_size * sizeof(T); gpuErrchk(hipSetDevice(gpuid_0)); mmCudaMalloc(manager, (void **) &dev0_data, max_size * sizeof(T)); gpuErrchk(hipSetDevice(gpuid_1)); mmCudaMalloc(manager, (void **) &dev1_data, max_size * sizeof(T)); mmCudaMalloc(manager, (void **) &dev1_data_out, max_size * sizeof(T)); mmCudaMallocHost(manager, (void**)&host_data, max_size * sizeof(T)); mmCudaMallocHost(manager, (void**)&host_data2, max_size * sizeof(T)); big_random_block(max_size, 31, host_data); // We do not compress this so any bitlen is OK TIMEIT_SETUP(); TIMEIT_START(); gpuErrchk( hipMemcpy(dev0_data, host_data, data_size, hipMemcpyHostToDevice) ); TIMEIT_END("M->G"); T *dev_data_source = dev0_data; if (direct_copy) { TIMEIT_START(); /*hipMemcpy(dev1_data, dev0_data, max_size * sizeof(T), hipMemcpyDefault);*/ hipMemcpyPeer(dev1_data, gpuid_1, dev0_data, gpuid_0, max_size * sizeof(T)); TIMEIT_END("*copy"); dev_data_source = dev1_data; cudaErrorCheck(); } TIMEIT_START(); hipLaunchKernelGGL(( saxpy) , dim3(4096), dim3(512), 0, 0, max_size, 10, dev_data_source, dev1_data_out); cudaErrorCheck(); TIMEIT_END("saxpy"); hipMemset(host_data2, 0, data_size); TIMEIT_START(); gpuErrchk(hipMemcpy(host_data2, dev_data_source, data_size, hipMemcpyDeviceToHost)); TIMEIT_END("G->M"); compare_arrays(host_data2, host_data, max_size); PPRINT_THROUGPUT(("MGPU%s", direct_copy ? "copy":"access"), max_size * sizeof(T)); mmCudaFreeAll(manager); } int main(int argc, char *argv[]) { unsigned long max_size = 10000000; printf("%s [size] [dev0_id, dev1_id]\n", argv[0]); if(argc > 1) { if ( atol(argv[1])) max_size = atol(argv[1]); if (argc == 4) { gpuid_0 = atoi(argv[2]); gpuid_1 = atoi(argv[3]); } } printf("Data size: %ld,using device %d and device %d\n", max_size, gpuid_0, gpuid_1 ); int can_access_peer_0_1, can_access_peer_1_0; gpuErrchk(hipDeviceCanAccessPeer(&can_access_peer_0_1, gpuid_0, gpuid_1)); gpuErrchk(hipDeviceCanAccessPeer(&can_access_peer_1_0, gpuid_1, gpuid_0)); printf("can acces device %d->%d: %d %d->%d %d\n",gpuid_0, gpuid_1, can_access_peer_0_1, gpuid_1, gpuid_0, can_access_peer_1_0 ); gpuErrchk(hipSetDevice(gpuid_0)); gpuErrchk(hipDeviceEnablePeerAccess(gpuid_1, 0)); gpuErrchk(hipSetDevice(gpuid_1)); gpuErrchk(hipDeviceEnablePeerAccess(gpuid_0, 0)); multi_gpu <int> (max_size, true); multi_gpu <int> (max_size, false); multi_gpu <long> (max_size, true); multi_gpu <long> (max_size, false); for (int i = 2; i < 32; ++i) { multi_gpu_compress <int> (max_size, i, true); multi_gpu_compress <int> (max_size, i, false); } for (int i = 32; i < 64; ++i) { multi_gpu_compress <long> (max_size, i, true); multi_gpu_compress <long> (max_size, i, false); } return 0; }
1a2552242e4faca28ee14ca9d48bd81577b1e001.cu
#include "tools/tools.cuh" #include "compression/afl.cuh" #include <cuda.h> #include <stdio.h> #include <math.h> int gpuid_0=1, gpuid_1=2; #define PPRINT_THROUGPUT(name, data_size) printf("%c[1;34m",27); printf name; printf("%c[30m,%c[37m ", 27,27); TIMEIT_PRINT_THROUGPUT(data_size); template <typename T> __global__ void saxpy(unsigned long n, int a, T *x, T *y) { // Determine element to process from thread index for (long tid = blockIdx.x * blockDim.x + threadIdx.x; tid < n; tid += blockDim.x * gridDim.x) y[tid] += a*x[tid]; } template <typename T> void multi_gpu_compress(unsigned long max_size, unsigned int bit_length, bool direct_copy) { mmManager manager; T *dev0_data, *dev0_comp_out; T *dev1_data, *dev1_data_out, *dev1_comp_out; T *host_data, *host_data2; unsigned long comp_size = ((max_size * bit_length)/(sizeof(T)*8) + (sizeof(T)*8)) * sizeof(T); unsigned long data_size = max_size * sizeof(T); TIMEIT_SETUP(); mmCudaMallocHost(manager, (void**)&host_data, max_size * sizeof(T)); mmCudaMallocHost(manager, (void**)&host_data2, max_size * sizeof(T)); big_random_block(max_size, bit_length, host_data); gpuErrchk(cudaSetDevice(gpuid_0)); mmCudaMalloc(manager, (void **) &dev0_data, max_size * sizeof(T)); mmCudaMalloc(manager, (void **) &dev0_comp_out, comp_size); TIMEIT_START(); gpuErrchk( cudaMemcpy(dev0_data, host_data, data_size, cudaMemcpyHostToDevice) ); TIMEIT_END("M->G"); gpuErrchk(cudaSetDevice(gpuid_1)); mmCudaMalloc(manager, (void **) &dev1_data, max_size * sizeof(T)); mmCudaMalloc(manager, (void **) &dev1_comp_out, comp_size); gpuErrchk(cudaSetDevice(gpuid_0)); TIMEIT_START(); run_afl_compress_gpu <T, FL_ALGORITHM_MOD_AFL> (bit_length, dev0_data, dev0_comp_out, max_size); cudaErrorCheck(); TIMEIT_END("*C"); T *dev_data_source = dev0_comp_out; gpuErrchk(cudaSetDevice(gpuid_1)); if (direct_copy) { TIMEIT_START(); cudaMemcpyPeer(dev1_comp_out, gpuid_1, dev0_comp_out, gpuid_0, comp_size); TIMEIT_END("*copy"); dev_data_source = dev1_comp_out; cudaErrorCheck(); } TIMEIT_START(); run_afl_decompress_gpu <T, FL_ALGORITHM_MOD_AFL> (bit_length, dev_data_source, dev1_data, max_size); cudaErrorCheck(); TIMEIT_END("*D"); mmCudaFree(manager, dev_data_source); mmCudaMalloc(manager, (void **) &dev1_data_out, max_size * sizeof(T)); TIMEIT_START(); saxpy <<<4096, 512>>> (max_size, 10, dev1_data, dev1_data_out); cudaErrorCheck(); TIMEIT_END("saxpy"); cudaMemset(host_data2, 0, data_size); TIMEIT_START(); gpuErrchk(cudaMemcpy(host_data2, dev1_data, data_size, cudaMemcpyDeviceToHost)); TIMEIT_END("G->M"); compare_arrays(host_data2, host_data, max_size); PPRINT_THROUGPUT(("MGPU%s compr afl%d", direct_copy ? "copy":"access", bit_length), max_size * sizeof(T)); mmCudaFreeAll(manager); } template <typename T> void multi_gpu(unsigned long max_size, bool direct_copy) { mmManager manager; T *dev0_data, *dev1_data; T *dev1_data_out; T *host_data, *host_data2; unsigned long data_size = max_size * sizeof(T); gpuErrchk(cudaSetDevice(gpuid_0)); mmCudaMalloc(manager, (void **) &dev0_data, max_size * sizeof(T)); gpuErrchk(cudaSetDevice(gpuid_1)); mmCudaMalloc(manager, (void **) &dev1_data, max_size * sizeof(T)); mmCudaMalloc(manager, (void **) &dev1_data_out, max_size * sizeof(T)); mmCudaMallocHost(manager, (void**)&host_data, max_size * sizeof(T)); mmCudaMallocHost(manager, (void**)&host_data2, max_size * sizeof(T)); big_random_block(max_size, 31, host_data); // We do not compress this so any bitlen is OK TIMEIT_SETUP(); TIMEIT_START(); gpuErrchk( cudaMemcpy(dev0_data, host_data, data_size, cudaMemcpyHostToDevice) ); TIMEIT_END("M->G"); T *dev_data_source = dev0_data; if (direct_copy) { TIMEIT_START(); /*cudaMemcpy(dev1_data, dev0_data, max_size * sizeof(T), cudaMemcpyDefault);*/ cudaMemcpyPeer(dev1_data, gpuid_1, dev0_data, gpuid_0, max_size * sizeof(T)); TIMEIT_END("*copy"); dev_data_source = dev1_data; cudaErrorCheck(); } TIMEIT_START(); saxpy <<<4096, 512>>> (max_size, 10, dev_data_source, dev1_data_out); cudaErrorCheck(); TIMEIT_END("saxpy"); cudaMemset(host_data2, 0, data_size); TIMEIT_START(); gpuErrchk(cudaMemcpy(host_data2, dev_data_source, data_size, cudaMemcpyDeviceToHost)); TIMEIT_END("G->M"); compare_arrays(host_data2, host_data, max_size); PPRINT_THROUGPUT(("MGPU%s", direct_copy ? "copy":"access"), max_size * sizeof(T)); mmCudaFreeAll(manager); } int main(int argc, char *argv[]) { unsigned long max_size = 10000000; printf("%s [size] [dev0_id, dev1_id]\n", argv[0]); if(argc > 1) { if ( atol(argv[1])) max_size = atol(argv[1]); if (argc == 4) { gpuid_0 = atoi(argv[2]); gpuid_1 = atoi(argv[3]); } } printf("Data size: %ld,using device %d and device %d\n", max_size, gpuid_0, gpuid_1 ); int can_access_peer_0_1, can_access_peer_1_0; gpuErrchk(cudaDeviceCanAccessPeer(&can_access_peer_0_1, gpuid_0, gpuid_1)); gpuErrchk(cudaDeviceCanAccessPeer(&can_access_peer_1_0, gpuid_1, gpuid_0)); printf("can acces device %d->%d: %d %d->%d %d\n",gpuid_0, gpuid_1, can_access_peer_0_1, gpuid_1, gpuid_0, can_access_peer_1_0 ); gpuErrchk(cudaSetDevice(gpuid_0)); gpuErrchk(cudaDeviceEnablePeerAccess(gpuid_1, 0)); gpuErrchk(cudaSetDevice(gpuid_1)); gpuErrchk(cudaDeviceEnablePeerAccess(gpuid_0, 0)); multi_gpu <int> (max_size, true); multi_gpu <int> (max_size, false); multi_gpu <long> (max_size, true); multi_gpu <long> (max_size, false); for (int i = 2; i < 32; ++i) { multi_gpu_compress <int> (max_size, i, true); multi_gpu_compress <int> (max_size, i, false); } for (int i = 32; i < 64; ++i) { multi_gpu_compress <long> (max_size, i, true); multi_gpu_compress <long> (max_size, i, false); } return 0; }
6250529d808d416ab97f9361fe9602f5527e2b0d.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <utility> #include <memory> #include <list> #include "dali/pipeline/operator/builtin/external_source.h" namespace dali { template<> void ExternalSource<GPUBackend>::RunImpl(DeviceWorkspace &ws) { std::list<uptr_tl_type> tensor_list_elm; std::list<uptr_cuda_event_type> internal_copy_to_storage; ExternalSourceState state_info; { std::unique_lock<std::mutex> busy_lock(busy_m_); tensor_list_elm = tl_data_.PopFront(); state_info = state_.front(); state_.pop_front(); // even with no_copy we may have copied from TensorVector to TensorList and we // need to sync with that if (!no_copy_ || state_info.copied_shared_data) { internal_copy_to_storage = copy_to_storage_events_.PopFront(); } } auto &output = ws.Output<GPUBackend>(0); hipStream_t stream_used = ws.has_stream() ? ws.stream() : 0; if (!no_copy_ || state_info.copied_shared_data) { CUDA_CALL(hipStreamWaitEvent(stream_used, *internal_copy_to_storage.front(), 0)); } std::swap(output, *tensor_list_elm.front()); if (!no_copy_ || state_info.copied_shared_data) { RecycleBuffer(tensor_list_elm, &internal_copy_to_storage); } else { RecycleBuffer(tensor_list_elm); } } DALI_REGISTER_OPERATOR(_ExternalSource, ExternalSource<GPUBackend>, GPU); DALI_REGISTER_OPERATOR(ExternalSource, ExternalSource<GPUBackend>, GPU); } // namespace dali
6250529d808d416ab97f9361fe9602f5527e2b0d.cu
// Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <utility> #include <memory> #include <list> #include "dali/pipeline/operator/builtin/external_source.h" namespace dali { template<> void ExternalSource<GPUBackend>::RunImpl(DeviceWorkspace &ws) { std::list<uptr_tl_type> tensor_list_elm; std::list<uptr_cuda_event_type> internal_copy_to_storage; ExternalSourceState state_info; { std::unique_lock<std::mutex> busy_lock(busy_m_); tensor_list_elm = tl_data_.PopFront(); state_info = state_.front(); state_.pop_front(); // even with no_copy we may have copied from TensorVector to TensorList and we // need to sync with that if (!no_copy_ || state_info.copied_shared_data) { internal_copy_to_storage = copy_to_storage_events_.PopFront(); } } auto &output = ws.Output<GPUBackend>(0); cudaStream_t stream_used = ws.has_stream() ? ws.stream() : 0; if (!no_copy_ || state_info.copied_shared_data) { CUDA_CALL(cudaStreamWaitEvent(stream_used, *internal_copy_to_storage.front(), 0)); } std::swap(output, *tensor_list_elm.front()); if (!no_copy_ || state_info.copied_shared_data) { RecycleBuffer(tensor_list_elm, &internal_copy_to_storage); } else { RecycleBuffer(tensor_list_elm); } } DALI_REGISTER_OPERATOR(_ExternalSource, ExternalSource<GPUBackend>, GPU); DALI_REGISTER_OPERATOR(ExternalSource, ExternalSource<GPUBackend>, GPU); } // namespace dali
5ba214f77a440f3e3cf3d5e03a7161a90621c770.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #include <string.h> #include <omp.h> #include <iostream> // REMINDER: unsigned int: 4 Bytes, NUM_CHUNKS * (N * 4) = 1024^2 = 1 MiB #define NUM_BYTES 4 // function declaration void warmUpGPU(); __global__ void warmUp( unsigned int *tmp ); using namespace std; int main( int argc, char **argv ) { // warm up GPU for time trialing warmUpGPU(); // grab arguments from command line int numChunks = atoi( argv[ 1 ] ); int bytesToTransfer = atoi( argv[ 2 ] ); // init variables // set iterators int index, trialIndex; // set lowest chunk size to 1 unsigned int LOWEST_CHUNK_SIZE = 1; // set the number of trials to 10 int NUM_TRIALS = 10; // CUDA error code: hipError_t errCode = hipSuccess; // loop until chunk size is 1, decrementing number of chunks by half each iteration while( numChunks >= LOWEST_CHUNK_SIZE ) { for( trialIndex = 0; trialIndex < NUM_TRIALS; trialIndex++ ) { // data to transfer in the form of an array of unsigned integers // and device data to allocate on GPU unsigned int * dataArray[ numChunks ]; unsigned int * deviceData = NULL; // chunk size variable: N int N = bytesToTransfer / ( numChunks * NUM_BYTES ); // Loop through the array and allocate memory for each element of dataArray for( index = 0; index < numChunks; index++ ) { dataArray[ index ] = ( unsigned int * )malloc( sizeof(unsigned int) * N ); } // allocate on the device: deviceData errCode = hipMalloc( (unsigned int**) &deviceData, sizeof(unsigned int) * N ); if( errCode != hipSuccess ) { cout << "\nError: A error with code " << errCode << endl; } /* // Print the size of data to transfer printf( "\nSize of transferred data (Bytes): %lu\n\n", sizeof(unsigned int) * N * numChunks ); */ // Loop through array and copy each element in dataArray from Host to Device // Do this NUM_CHUNKS amount of times for( index = 0; index < numChunks; index++ ) { errCode = hipMemcpy( deviceData, dataArray[ index ], sizeof(unsigned int) * N, hipMemcpyHostToDevice ); if( errCode != hipSuccess ) { cout << "\nError: A memcpy error with code " << errCode << endl; } hipDeviceSynchronize(); } // free all data on host and device for( index = 0; index < numChunks; index++ ) { free( dataArray[ index ] ); } hipFree( deviceData ); } // decrement chunk size by half numChunks = numChunks / 2; } return EXIT_SUCCESS; } __global__ void warmUp( unsigned int *tmp ) { if( threadIdx.x == 0 ) { *tmp = 555; } return; } void warmUpGPU() { printf( "Warming up GPU for time trialing...\n\n" ); unsigned int *devTmp; unsigned int *tmp; hipError_t errCode = hipSuccess; tmp = (unsigned int *) malloc( sizeof(unsigned int) ); errCode = hipMalloc( (unsigned int **) &devTmp, sizeof(unsigned int) ); if( errCode != hipSuccess ) { cout << "Error: devTmp error with code " << errCode << endl; } hipLaunchKernelGGL(( warmUp), dim3(1),dim3(256), 0, 0, devTmp ); errCode = hipMemcpy( tmp, devTmp, sizeof(unsigned int), hipMemcpyDeviceToHost ); if( errCode != hipSuccess ) { cout << "Error: getting tmp result from GPU error with code " << errCode << endl; } hipDeviceSynchronize(); printf( "tmp (changed to 555 on GPU): %d\n\n", *tmp ); hipFree( devTmp ); return; }
5ba214f77a440f3e3cf3d5e03a7161a90621c770.cu
#include <stdlib.h> #include <stdio.h> #include <string.h> #include <omp.h> #include <iostream> // REMINDER: unsigned int: 4 Bytes, NUM_CHUNKS * (N * 4) = 1024^2 = 1 MiB #define NUM_BYTES 4 // function declaration void warmUpGPU(); __global__ void warmUp( unsigned int *tmp ); using namespace std; int main( int argc, char **argv ) { // warm up GPU for time trialing warmUpGPU(); // grab arguments from command line int numChunks = atoi( argv[ 1 ] ); int bytesToTransfer = atoi( argv[ 2 ] ); // init variables // set iterators int index, trialIndex; // set lowest chunk size to 1 unsigned int LOWEST_CHUNK_SIZE = 1; // set the number of trials to 10 int NUM_TRIALS = 10; // CUDA error code: cudaError_t errCode = cudaSuccess; // loop until chunk size is 1, decrementing number of chunks by half each iteration while( numChunks >= LOWEST_CHUNK_SIZE ) { for( trialIndex = 0; trialIndex < NUM_TRIALS; trialIndex++ ) { // data to transfer in the form of an array of unsigned integers // and device data to allocate on GPU unsigned int * dataArray[ numChunks ]; unsigned int * deviceData = NULL; // chunk size variable: N int N = bytesToTransfer / ( numChunks * NUM_BYTES ); // Loop through the array and allocate memory for each element of dataArray for( index = 0; index < numChunks; index++ ) { dataArray[ index ] = ( unsigned int * )malloc( sizeof(unsigned int) * N ); } // allocate on the device: deviceData errCode = cudaMalloc( (unsigned int**) &deviceData, sizeof(unsigned int) * N ); if( errCode != cudaSuccess ) { cout << "\nError: A error with code " << errCode << endl; } /* // Print the size of data to transfer printf( "\nSize of transferred data (Bytes): %lu\n\n", sizeof(unsigned int) * N * numChunks ); */ // Loop through array and copy each element in dataArray from Host to Device // Do this NUM_CHUNKS amount of times for( index = 0; index < numChunks; index++ ) { errCode = cudaMemcpy( deviceData, dataArray[ index ], sizeof(unsigned int) * N, cudaMemcpyHostToDevice ); if( errCode != cudaSuccess ) { cout << "\nError: A memcpy error with code " << errCode << endl; } cudaDeviceSynchronize(); } // free all data on host and device for( index = 0; index < numChunks; index++ ) { free( dataArray[ index ] ); } cudaFree( deviceData ); } // decrement chunk size by half numChunks = numChunks / 2; } return EXIT_SUCCESS; } __global__ void warmUp( unsigned int *tmp ) { if( threadIdx.x == 0 ) { *tmp = 555; } return; } void warmUpGPU() { printf( "Warming up GPU for time trialing...\n\n" ); unsigned int *devTmp; unsigned int *tmp; cudaError_t errCode = cudaSuccess; tmp = (unsigned int *) malloc( sizeof(unsigned int) ); errCode = cudaMalloc( (unsigned int **) &devTmp, sizeof(unsigned int) ); if( errCode != cudaSuccess ) { cout << "Error: devTmp error with code " << errCode << endl; } warmUp<<<1,256>>>( devTmp ); errCode = cudaMemcpy( tmp, devTmp, sizeof(unsigned int), cudaMemcpyDeviceToHost ); if( errCode != cudaSuccess ) { cout << "Error: getting tmp result from GPU error with code " << errCode << endl; } cudaDeviceSynchronize(); printf( "tmp (changed to 555 on GPU): %d\n\n", *tmp ); cudaFree( devTmp ); return; }
80a0b55b7bf363a3be3f2676561a7c5680169a24.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include <nppi.h> #include <stdio.h> #define INF (255.0f * 255.0f * 3 * 8 + 1) #define _FIXED(x) rintf(1e1f * (x)) struct _GMM_t { float det; float sigma_inv[9]; unsigned int count; } GMM_t; __device__ __forceinline__ float get_component(uchar4 pixel, int i) { switch (i) { case 0 : return 1.0f; case 1 : return pixel.x; case 2 : return pixel.y; case 3 : return pixel.z; case 4 : return pixel.x * pixel.x; case 5 : return pixel.x * pixel.y; case 6 : return pixel.x * pixel.z; case 7 : return pixel.y * pixel.y; case 8 : return pixel.y * pixel.z; case 9 : return pixel.z * pixel.z; }; return 0.0f; } __device__ __forceinline__ float get_constant(float *gmm, int i) { const float epsilon = 1.0e-3f; switch (i) { case 0 : return 0.0f; case 1 : return 0.0f; case 2 : return 0.0f; case 3 : return 0.0f; case 4 : return gmm[1] * gmm[1] + epsilon; case 5 : return gmm[1] * gmm[2]; case 6 : return gmm[1] * gmm[3]; case 7 : return gmm[2] * gmm[2] + epsilon; case 8 : return gmm[2] * gmm[3]; case 9 : return gmm[3] * gmm[3] + epsilon; }; return 0.0f; } // Tile Size: 32x32, Block Size 32xwarp_N template<int warp_N, bool create_gmm_flags> __global__ void GMMReductionKernel(int gmm_idx, float *gmm, int gmm_pitch, const uchar4 *image, int image_pitch, unsigned char *alpha, int alpha_pitch, int width, int height, unsigned int *tile_gmms) { __shared__ uchar4 s_lists[32*32]; __shared__ volatile float s_gmm[32*warp_N]; __shared__ float s_final[warp_N]; __shared__ int gmm_flags[32]; const int warp_idx = threadIdx.y; const int thread_idx = threadIdx.y * 32 + threadIdx.x; const int lane_idx = threadIdx.x; float *block_gmm = &gmm[(gridDim.x * gridDim.y * gmm_idx + blockIdx.y * gridDim.x + blockIdx.x) * gmm_pitch]; volatile float *warp_gmm = &s_gmm[warp_idx * 32]; if (create_gmm_flags) { if (threadIdx.y == 0) { gmm_flags[threadIdx.x] = 0; } __syncthreads(); } else { unsigned int gmm_mask = tile_gmms[blockIdx.y * gridDim.x + blockIdx.x]; if ((gmm_mask & (1u << gmm_idx)) == 0) { if (threadIdx.x < 10 && threadIdx.y ==0) { block_gmm[threadIdx.x] = 0.0f; } return; } } int list_idx = 0; int y = blockIdx.y * 32 + threadIdx.y; int x = blockIdx.x * 32 + threadIdx.x; // Build lists of pixels that belong to this GMM for (int k=0; k < (32/warp_N); ++k) { if (x < width && y < height) { int my_gmm_idx = alpha[y * alpha_pitch + x]; if (create_gmm_flags) { gmm_flags[my_gmm_idx] = 1; } if (my_gmm_idx == gmm_idx) { uchar4 pixel = image[y * image_pitch + x]; s_lists[thread_idx + list_idx * (32*warp_N)] = pixel; ++list_idx; } } y += warp_N; } __syncthreads(); if (threadIdx.y == 0 && create_gmm_flags) { #if __CUDA_ARCH__ < 200 unsigned int gmm_flags_bvec = 0; for (int i=0; i<32; ++i) { if (gmm_flags[i] > 0) { gmm_flags_bvec |= 1 << i; } } tile_gmms[blockIdx.y * gridDim.x + blockIdx.x] = gmm_flags_bvec; #else tile_gmms[blockIdx.y * gridDim.x + blockIdx.x] = __ballot(gmm_flags[threadIdx.x] > 0); #endif } // Reduce for each global GMM element for (int i=0; i<10; ++i) { float thread_gmm; if (i == 0) { // thread_gmm = list_idx for first component thread_gmm = list_idx; } else { thread_gmm = list_idx > 0 ? get_component(s_lists[thread_idx],i) : 0.0f; for (int k=1; k<(32/warp_N) && k < list_idx; ++k) { thread_gmm += get_component(s_lists[thread_idx + k * (32*warp_N)], i); } } warp_gmm[lane_idx] = thread_gmm; // Warp Reductions thread_gmm += warp_gmm[(lane_idx + 16) & 31]; warp_gmm[lane_idx] = thread_gmm; thread_gmm += warp_gmm[(lane_idx + 8) & 31]; warp_gmm[lane_idx] = thread_gmm; thread_gmm += warp_gmm[(lane_idx + 4) & 31]; warp_gmm[lane_idx] = thread_gmm; thread_gmm += warp_gmm[(lane_idx + 2) & 31]; warp_gmm[lane_idx] = thread_gmm; thread_gmm += warp_gmm[(lane_idx + 1) & 31]; s_final[warp_idx] = thread_gmm; __syncthreads(); // Final Reduction if (warp_idx ==0 && lane_idx == 0) { for (int j=1; j<warp_N; ++j) { thread_gmm += s_final[j]; } block_gmm[i] = thread_gmm; } } } __constant__ int det_indices[] = { (9 << (4*4)) + (4 << (3*4)) + (6 << (2*4)) + (5 << (1*4)) + (4 << (0*4)), (5 << (4*4)) + (8 << (3*4)) + (6 << (2*4)) + (6 << (1*4)) + (7 << (0*4)), (5 << (4*4)) + (8 << (3*4)) + (7 << (2*4)) + (8 << (1*4)) + (9 << (0*4)) }; __constant__ int inv_indices[] = { (4 << (5*4)) + (5 << (4*4)) + (4 << (3*4)) + (5 << (2*4)) + (6 << (1*4)) + (7 << (0*4)), (7 << (5*4)) + (6 << (4*4)) + (9 << (3*4)) + (8 << (2*4)) + (8 << (1*4)) + (9 << (0*4)), (5 << (5*4)) + (4 << (4*4)) + (6 << (3*4)) + (6 << (2*4)) + (5 << (1*4)) + (8 << (0*4)), (5 << (5*4)) + (8 << (4*4)) + (6 << (3*4)) + (7 << (2*4)) + (9 << (1*4)) + (8 << (0*4)) }; // One block per GMM, 32*warp_N threads (1-dim) template <int warp_N, bool invertSigma> __global__ void GMMFinalizeKernel(float *gmm, float *gmm_scratch, int gmm_pitch, int N) { __shared__ volatile float s_gmm[warp_N*32]; __shared__ float s_final[warp_N]; __shared__ float final_gmm[15]; const int thread_N = warp_N * 32; float *gmm_partial = &gmm_scratch[N*blockIdx.x*gmm_pitch]; volatile float *warp_gmm = &s_gmm[threadIdx.x & 0x0ffe0]; int thread_idx = threadIdx.x; int lane_idx = threadIdx.x & 31; int warp_idx = threadIdx.x >> 5; float norm_factor = 1.0f; for (int i=0; i<10; ++i) { float thread_gmm = 0.0f; for (int j=thread_idx; j < N; j+= thread_N) { thread_gmm += gmm_partial[j * gmm_pitch + i]; } warp_gmm[lane_idx] = thread_gmm; // Warp Reduction thread_gmm += warp_gmm[(lane_idx + 16) & 31]; warp_gmm[lane_idx] = thread_gmm; thread_gmm += warp_gmm[(lane_idx + 8) & 31]; warp_gmm[lane_idx] = thread_gmm; thread_gmm += warp_gmm[(lane_idx + 4) & 31]; warp_gmm[lane_idx] = thread_gmm; thread_gmm += warp_gmm[(lane_idx + 2) & 31]; warp_gmm[lane_idx] = thread_gmm; thread_gmm += warp_gmm[(lane_idx + 1) & 31]; s_final[warp_idx] = thread_gmm; __syncthreads(); // Final Reduction if (warp_idx ==0 && lane_idx == 0) { for (int j=1; j<warp_N; ++j) { thread_gmm += s_final[j]; } final_gmm[i] = norm_factor * thread_gmm - get_constant(final_gmm, i); if (i == 0) { if (thread_gmm > 0) { norm_factor = 1.0f / thread_gmm; } } } } if (threadIdx.y == 0) { // Compute det(Sigma) using final_gmm [10-14] as scratch mem if (threadIdx.x < 5) { int idx0 = (det_indices[0] & (15 << (threadIdx.x * 4))) >> (threadIdx.x * 4); int idx1 = (det_indices[1] & (15 << (threadIdx.x * 4))) >> (threadIdx.x * 4); int idx2 = (det_indices[2] & (15 << (threadIdx.x * 4))) >> (threadIdx.x * 4); final_gmm[10 + threadIdx.x] = final_gmm[idx0] * final_gmm[idx1] * final_gmm[idx2]; float det = final_gmm[10] + 2.0f * final_gmm[11] - final_gmm[12] - final_gmm[13] - final_gmm[14]; final_gmm[10] = det; } // Compute inv(Sigma) if (invertSigma && threadIdx.x < 6) { int idx0 = (inv_indices[0] & (15 << (threadIdx.x * 4))) >> (threadIdx.x * 4); int idx1 = (inv_indices[1] & (15 << (threadIdx.x * 4))) >> (threadIdx.x * 4); int idx2 = (inv_indices[2] & (15 << (threadIdx.x * 4))) >> (threadIdx.x * 4); int idx3 = (inv_indices[3] & (15 << (threadIdx.x * 4))) >> (threadIdx.x * 4); float temp = final_gmm[idx0] * final_gmm[idx1] - final_gmm[idx2] * final_gmm[idx3]; if (final_gmm[10] > 0.0f) { final_gmm[4+threadIdx.x] = temp / final_gmm[10]; } else { final_gmm[4+threadIdx.x] = 0.0f; } } if (threadIdx.x < 11) { gmm[blockIdx.x * gmm_pitch + threadIdx.x] = final_gmm[threadIdx.x]; } } } // Single block, 32x2 __global__ void GMMcommonTerm(int gmmK, float *gmm, int gmm_pitch) { __shared__ volatile float s_n[2][32]; int gmm_idx = (threadIdx.x * 2) | threadIdx.y; float gmm_n = threadIdx.x < gmmK ? gmm[gmm_idx * gmm_pitch] : 0.0f; float sum = gmm_n; s_n[threadIdx.y][threadIdx.x] = sum; // Warp Reduction sum += s_n[threadIdx.y][(threadIdx.x + 16) & 31]; s_n[threadIdx.y][threadIdx.x] = sum; sum += s_n[threadIdx.y][(threadIdx.x + 8) & 31]; s_n[threadIdx.y][threadIdx.x] = sum; sum += s_n[threadIdx.y][(threadIdx.x + 4) & 31]; s_n[threadIdx.y][threadIdx.x] = sum; sum += s_n[threadIdx.y][(threadIdx.x + 2) & 31]; s_n[threadIdx.y][threadIdx.x] = sum; sum += s_n[threadIdx.y][(threadIdx.x + 1) & 31]; if (threadIdx.x < gmmK) { float det = gmm[gmm_idx * gmm_pitch + 10]; float commonTerm = gmm_n / (sqrtf(det) * sum); gmm[gmm_idx * gmm_pitch + 10] = commonTerm; } } hipError_t GMMUpdate(int gmm_N, float *gmm, float *scratch_mem, int gmm_pitch, const uchar4 *image, int image_pitch, unsigned char *alpha, int alpha_pitch, int width, int height) { dim3 grid((width+31) / 32, (height+31) / 32); dim3 block(32,4); hipLaunchKernelGGL(( GMMReductionKernel<4, true>), dim3(grid), dim3(block), 0, 0, 0, &scratch_mem[grid.x *grid.y], gmm_pitch/4, image, image_pitch/4, alpha, alpha_pitch, width, height, (unsigned int *) scratch_mem); for (int i=1; i<gmm_N; ++i) { hipLaunchKernelGGL(( GMMReductionKernel<4, false>), dim3(grid), dim3(block), 0, 0, i, &scratch_mem[grid.x *grid.y], gmm_pitch/4, image, image_pitch/4, alpha, alpha_pitch, width, height, (unsigned int *) scratch_mem); } hipLaunchKernelGGL(( GMMFinalizeKernel<4, true>), dim3(gmm_N), dim3(32 *4), 0, 0, gmm, &scratch_mem[grid.x *grid.y], gmm_pitch/4, grid.x *grid.y); block.x = 32; block.y = 2; hipLaunchKernelGGL(( GMMcommonTerm), dim3(1), dim3(block), 0, 0, gmm_N / 2, gmm, gmm_pitch/4); return hipGetLastError(); } __device__ float GMMTerm(uchar4 pixel, const float *gmm) { float3 v = make_float3(pixel.x - gmm[1], pixel.y - gmm[2], pixel.z - gmm[3]); float xxa = v.x * v.x * gmm[4]; float yyd = v.y * v.y * gmm[7]; float zzf = v.z * v.z * gmm[9]; float yxb = v.x * v.y * gmm[5]; float zxc = v.z * v.x * gmm[6]; float zye = v.z * v.y * gmm[8]; return gmm[10] * expf(-0.5f * (xxa + yyd + zzf + 2.0f * (yxb + zxc + zye))); } __global__ void GMMDataTermKernel(Npp32s *terminals, int terminal_pitch, int gmmN, const float *gmm, int gmm_pitch, const uchar4 *image, int image_pitch, const unsigned char *trimap, int trimap_pitch, int width, int height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < width && y < height) { unsigned char c = trimap[y*trimap_pitch+x]; Npp32f data; if (c == 0) { // Definitely Background data = -INF; } else if (c == 2) { // Definitely Foreground data = + INF; } else { // Unknown uchar4 pixel = image[y * image_pitch + x]; Npp32f data_bg = GMMTerm(pixel, gmm); Npp32f data_fg = GMMTerm(pixel, &gmm[gmm_pitch]); for (int i=2; i<gmmN; i+=2) { data_bg += GMMTerm(pixel, &gmm[(i) * gmm_pitch]); data_fg += GMMTerm(pixel, &gmm[(i+1) * gmm_pitch]); } data_bg = -logf(data_bg); data_fg = -logf(data_fg); data = data_bg - data_fg; data = max(min(data, INF),-INF); } terminals[y*terminal_pitch + x] = _FIXED(data); } } hipError_t GMMDataTerm(Npp32s *terminals, int terminal_pitch, int gmmN, const float *gmm, int gmm_pitch, const uchar4 *image, int image_pitch, const unsigned char *trimap, int trimap_pitch, int width, int height) { dim3 block(32,8); dim3 grid((width+block.x-1) / block.x, (height+block.y-1) / block.y); hipLaunchKernelGGL(( GMMDataTermKernel), dim3(grid), dim3(block), 0, 0, terminals, terminal_pitch/4, gmmN, gmm, gmm_pitch/4, image, image_pitch/4, trimap, trimap_pitch, width, height); return hipGetLastError(); } __global__ void GMMAssignKernel(int gmmN, const float *gmm, int gmm_pitch, const uchar4 *image, int image_pitch, unsigned char *g_alpha, int alpha_pitch, int width, int height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < width && y < height) { unsigned char alpha = g_alpha[y*alpha_pitch+x] & 1; // Unknown uchar4 pixel = image[y * image_pitch + x]; int alpha_min = alpha; float max_prob = GMMTerm(pixel, &gmm[(alpha_min) * gmm_pitch]); for (int i=alpha+2; i<gmmN; i+=2) { float prob = GMMTerm(pixel, &gmm[(i) * gmm_pitch]); if (prob > max_prob) { alpha_min = i; max_prob = prob; } } g_alpha[y*alpha_pitch+x] = alpha_min; } } hipError_t GMMAssign(int gmmN, const float *gmm, int gmm_pitch, const uchar4 *image, int image_pitch, unsigned char *alpha, int alpha_pitch, int width, int height) { dim3 block(32,16); dim3 grid((width+block.x-1) / block.x, (height+block.y-1) / block.y); hipLaunchKernelGGL(( GMMAssignKernel), dim3(grid), dim3(block), 0, 0, gmmN, gmm, gmm_pitch/4, image, image_pitch/4, alpha, alpha_pitch, width, height); return hipGetLastError(); } __device__ float3 normalize(float3 v) { float norm = 1.0f / sqrtf(v.x * v.x + v.y * v.y + v.z * v.z); return make_float3(v.x * norm, v.y * norm, v.z * norm); } __device__ float3 mul_right(const float *M, float3 v) { return make_float3( M[0] * v.x + M[1] * v.y + M[2] * v.z, M[1] * v.x + M[3] * v.y + M[4] * v.z, M[2] * v.x + M[4] * v.y + M[5] * v.z); } __device__ float largest_eigenvalue(const float *M) { float norm = M[0] > M[3] ? M[0] : M[3]; norm = M[0] > M[5] ? M[0] : M[5]; norm = 1.0f / norm; float a00 = norm * M[0]; float a01 = norm * M[1]; float a02 = norm * M[2]; float a11 = norm * M[3]; float a12 = norm * M[4]; float a22 = norm * M[5]; float c0 = a00*a11*a22 + 2.0f*a01*a02*a12 - a00*a12*a12 - a11*a02*a02 - a22*a01*a01; float c1 = a00*a11 - a01*a01 + a00*a22 - a02*a02 + a11*a22 - a12*a12; float c2 = a00 + a11 + a22; const float inv3 = 1.0f / 3.0f; const float root3 = sqrtf(3.0f); float c2Div3 = c2*inv3; float aDiv3 = (c1 - c2*c2Div3)*inv3; if (aDiv3 > 0.0f) { aDiv3 = 0.0f; } float mbDiv2 = 0.5f*(c0 + c2Div3*(2.0f*c2Div3*c2Div3 - c1)); float q = mbDiv2*mbDiv2 + aDiv3*aDiv3*aDiv3; if (q > 0.0f) { q = 0.0f; } float magnitude = sqrtf(-aDiv3); float angle = atan2(sqrtf(-q),mbDiv2)*inv3; float cs = cos(angle); float sn = sin(angle); float largest_eigenvalue = c2Div3 + 2.0f*magnitude*cs; float eigenvalue = c2Div3 - magnitude*(cs + root3*sn); if (eigenvalue > largest_eigenvalue) { largest_eigenvalue = eigenvalue; } eigenvalue = c2Div3 - magnitude*(cs - root3*sn); if (eigenvalue > largest_eigenvalue) { largest_eigenvalue = eigenvalue; } return largest_eigenvalue / norm; } __device__ float3 cross_prod(float3 a, float3 b) { return make_float3((a.y*b.z)-(a.z*b.y), (a.z*b.x)-(a.x*b.z), (a.x*b.y)-(a.y*b.x)); } __device__ float3 compute_eigenvector(const float *M, float eigenvalue) { float3 r0 = make_float3(M[0] - eigenvalue, M[1], M[2]); float3 r1 = make_float3(M[2] , M[3]- eigenvalue, M[4]); float3 eigenvector = cross_prod(r0,r1); return normalize(eigenvector); } __device__ void largest_eigenvalue_eigenvector(const float *M, float3 &evec, float &eval) { eval = largest_eigenvalue(M); evec = compute_eigenvector(M, eval); } __device__ float scalar_prod(float3 a, float3 b) { return a.x * b.x + a.y * b.y + a.z * b.z; } struct GMMSplit_t { int idx; float threshold; float3 eigenvector; }; // 1 Block, 32x2 __global__ void GMMFindSplit(GMMSplit_t *gmmSplit, int gmmK, float *gmm, int gmm_pitch) { __shared__ float s_eigenvalues[2][32]; int gmm_idx = (threadIdx.x << 1) + threadIdx.y; float eigenvalue = 0; float3 eigenvector; if (threadIdx.x < gmmK) { largest_eigenvalue_eigenvector(&gmm[gmm_idx * gmm_pitch + 4], eigenvector, eigenvalue); } // Warp Reduction float maxvalue = eigenvalue; s_eigenvalues[threadIdx.y][threadIdx.x] = maxvalue; maxvalue = max(maxvalue, s_eigenvalues[threadIdx.y][(threadIdx.x+16) & 31]); s_eigenvalues[threadIdx.y][threadIdx.x] = maxvalue; maxvalue = max(maxvalue, s_eigenvalues[threadIdx.y][(threadIdx.x+8) & 31]); s_eigenvalues[threadIdx.y][threadIdx.x] = maxvalue; maxvalue = max(maxvalue, s_eigenvalues[threadIdx.y][(threadIdx.x+4) & 31]); s_eigenvalues[threadIdx.y][threadIdx.x] = maxvalue; maxvalue = max(maxvalue, s_eigenvalues[threadIdx.y][(threadIdx.x+2) & 31]); s_eigenvalues[threadIdx.y][threadIdx.x] = maxvalue; maxvalue = max(maxvalue, s_eigenvalues[threadIdx.y][(threadIdx.x+1) & 31]); if (maxvalue == eigenvalue) { GMMSplit_t split; split.idx = threadIdx.x; split.threshold = scalar_prod(make_float3(gmm[gmm_idx * gmm_pitch + 1], gmm[gmm_idx * gmm_pitch + 2], gmm[gmm_idx * gmm_pitch + 3]), eigenvector); split.eigenvector = eigenvector; gmmSplit[threadIdx.y] = split; } } __global__ void GMMDoSplit(const GMMSplit_t *gmmSplit, int k, float *gmm, int gmm_pitch, const uchar4 *image, int image_pitch, unsigned char *alpha, int alpha_pitch, int width, int height) { __shared__ GMMSplit_t s_gmmSplit[2]; int *s_linear = (int *) s_gmmSplit; int *g_linear = (int *) gmmSplit; if (threadIdx.y ==0 && threadIdx.x < 10) { s_linear[threadIdx.x] = g_linear[threadIdx.x]; } __syncthreads(); int x = blockIdx.x * 32 + threadIdx.x; int y0 = blockIdx.y * 32; for (int i = threadIdx.y; i < 32; i += blockDim.y) { int y = y0 + i; if (x < width && y < height) { unsigned char my_alpha = alpha[y * alpha_pitch + x]; int select = my_alpha & 1; int gmm_idx = my_alpha >> 1; if (gmm_idx == s_gmmSplit[select].idx) { // in the split cluster now uchar4 pixel = image[y * image_pitch + x]; float value = scalar_prod(s_gmmSplit[select].eigenvector, make_float3(pixel.x, pixel.y, pixel.z)); if (value > s_gmmSplit[select].threshold) { // assign pixel to new cluster alpha[y * alpha_pitch + x] = k + select; } } } } } hipError_t GMMInitialize(int gmm_N, float *gmm, float *scratch_mem, int gmm_pitch, const uchar4 *image, int image_pitch, unsigned char *alpha, int alpha_pitch, int width, int height) { dim3 grid((width+31) / 32, (height+31) / 32); dim3 block(32,4); dim3 smallblock(32,2); for (int k = 2; k < gmm_N; k+=2) { hipLaunchKernelGGL(( GMMReductionKernel<4, true>), dim3(grid), dim3(block), 0, 0, 0, &scratch_mem[grid.x *grid.y], gmm_pitch/4, image, image_pitch/4, alpha, alpha_pitch, width, height, (unsigned int *) scratch_mem); for (int i=1; i < k; ++i) { hipLaunchKernelGGL(( GMMReductionKernel<4, false>), dim3(grid), dim3(block), 0, 0, i, &scratch_mem[grid.x *grid.y], gmm_pitch/4, image, image_pitch/4, alpha, alpha_pitch, width, height, (unsigned int *) scratch_mem); } hipLaunchKernelGGL(( GMMFinalizeKernel<4, false>), dim3(k), dim3(32 *4), 0, 0, gmm, &scratch_mem[grid.x *grid.y], gmm_pitch/4, grid.x *grid.y); hipLaunchKernelGGL(( GMMFindSplit), dim3(1), dim3(smallblock), 0, 0, (GMMSplit_t *) scratch_mem, k / 2, gmm, gmm_pitch/4); hipLaunchKernelGGL(( GMMDoSplit), dim3(grid), dim3(block), 0, 0, (GMMSplit_t *) scratch_mem, (k/2) << 1, gmm, gmm_pitch/4, image, image_pitch / 4, alpha, alpha_pitch, width, height); } return hipGetLastError(); }
80a0b55b7bf363a3be3f2676561a7c5680169a24.cu
/* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include <nppi.h> #include <stdio.h> #define INF (255.0f * 255.0f * 3 * 8 + 1) #define _FIXED(x) rintf(1e1f * (x)) struct _GMM_t { float det; float sigma_inv[9]; unsigned int count; } GMM_t; __device__ __forceinline__ float get_component(uchar4 pixel, int i) { switch (i) { case 0 : return 1.0f; case 1 : return pixel.x; case 2 : return pixel.y; case 3 : return pixel.z; case 4 : return pixel.x * pixel.x; case 5 : return pixel.x * pixel.y; case 6 : return pixel.x * pixel.z; case 7 : return pixel.y * pixel.y; case 8 : return pixel.y * pixel.z; case 9 : return pixel.z * pixel.z; }; return 0.0f; } __device__ __forceinline__ float get_constant(float *gmm, int i) { const float epsilon = 1.0e-3f; switch (i) { case 0 : return 0.0f; case 1 : return 0.0f; case 2 : return 0.0f; case 3 : return 0.0f; case 4 : return gmm[1] * gmm[1] + epsilon; case 5 : return gmm[1] * gmm[2]; case 6 : return gmm[1] * gmm[3]; case 7 : return gmm[2] * gmm[2] + epsilon; case 8 : return gmm[2] * gmm[3]; case 9 : return gmm[3] * gmm[3] + epsilon; }; return 0.0f; } // Tile Size: 32x32, Block Size 32xwarp_N template<int warp_N, bool create_gmm_flags> __global__ void GMMReductionKernel(int gmm_idx, float *gmm, int gmm_pitch, const uchar4 *image, int image_pitch, unsigned char *alpha, int alpha_pitch, int width, int height, unsigned int *tile_gmms) { __shared__ uchar4 s_lists[32*32]; __shared__ volatile float s_gmm[32*warp_N]; __shared__ float s_final[warp_N]; __shared__ int gmm_flags[32]; const int warp_idx = threadIdx.y; const int thread_idx = threadIdx.y * 32 + threadIdx.x; const int lane_idx = threadIdx.x; float *block_gmm = &gmm[(gridDim.x * gridDim.y * gmm_idx + blockIdx.y * gridDim.x + blockIdx.x) * gmm_pitch]; volatile float *warp_gmm = &s_gmm[warp_idx * 32]; if (create_gmm_flags) { if (threadIdx.y == 0) { gmm_flags[threadIdx.x] = 0; } __syncthreads(); } else { unsigned int gmm_mask = tile_gmms[blockIdx.y * gridDim.x + blockIdx.x]; if ((gmm_mask & (1u << gmm_idx)) == 0) { if (threadIdx.x < 10 && threadIdx.y ==0) { block_gmm[threadIdx.x] = 0.0f; } return; } } int list_idx = 0; int y = blockIdx.y * 32 + threadIdx.y; int x = blockIdx.x * 32 + threadIdx.x; // Build lists of pixels that belong to this GMM for (int k=0; k < (32/warp_N); ++k) { if (x < width && y < height) { int my_gmm_idx = alpha[y * alpha_pitch + x]; if (create_gmm_flags) { gmm_flags[my_gmm_idx] = 1; } if (my_gmm_idx == gmm_idx) { uchar4 pixel = image[y * image_pitch + x]; s_lists[thread_idx + list_idx * (32*warp_N)] = pixel; ++list_idx; } } y += warp_N; } __syncthreads(); if (threadIdx.y == 0 && create_gmm_flags) { #if __CUDA_ARCH__ < 200 unsigned int gmm_flags_bvec = 0; for (int i=0; i<32; ++i) { if (gmm_flags[i] > 0) { gmm_flags_bvec |= 1 << i; } } tile_gmms[blockIdx.y * gridDim.x + blockIdx.x] = gmm_flags_bvec; #else tile_gmms[blockIdx.y * gridDim.x + blockIdx.x] = __ballot(gmm_flags[threadIdx.x] > 0); #endif } // Reduce for each global GMM element for (int i=0; i<10; ++i) { float thread_gmm; if (i == 0) { // thread_gmm = list_idx for first component thread_gmm = list_idx; } else { thread_gmm = list_idx > 0 ? get_component(s_lists[thread_idx],i) : 0.0f; for (int k=1; k<(32/warp_N) && k < list_idx; ++k) { thread_gmm += get_component(s_lists[thread_idx + k * (32*warp_N)], i); } } warp_gmm[lane_idx] = thread_gmm; // Warp Reductions thread_gmm += warp_gmm[(lane_idx + 16) & 31]; warp_gmm[lane_idx] = thread_gmm; thread_gmm += warp_gmm[(lane_idx + 8) & 31]; warp_gmm[lane_idx] = thread_gmm; thread_gmm += warp_gmm[(lane_idx + 4) & 31]; warp_gmm[lane_idx] = thread_gmm; thread_gmm += warp_gmm[(lane_idx + 2) & 31]; warp_gmm[lane_idx] = thread_gmm; thread_gmm += warp_gmm[(lane_idx + 1) & 31]; s_final[warp_idx] = thread_gmm; __syncthreads(); // Final Reduction if (warp_idx ==0 && lane_idx == 0) { for (int j=1; j<warp_N; ++j) { thread_gmm += s_final[j]; } block_gmm[i] = thread_gmm; } } } __constant__ int det_indices[] = { (9 << (4*4)) + (4 << (3*4)) + (6 << (2*4)) + (5 << (1*4)) + (4 << (0*4)), (5 << (4*4)) + (8 << (3*4)) + (6 << (2*4)) + (6 << (1*4)) + (7 << (0*4)), (5 << (4*4)) + (8 << (3*4)) + (7 << (2*4)) + (8 << (1*4)) + (9 << (0*4)) }; __constant__ int inv_indices[] = { (4 << (5*4)) + (5 << (4*4)) + (4 << (3*4)) + (5 << (2*4)) + (6 << (1*4)) + (7 << (0*4)), (7 << (5*4)) + (6 << (4*4)) + (9 << (3*4)) + (8 << (2*4)) + (8 << (1*4)) + (9 << (0*4)), (5 << (5*4)) + (4 << (4*4)) + (6 << (3*4)) + (6 << (2*4)) + (5 << (1*4)) + (8 << (0*4)), (5 << (5*4)) + (8 << (4*4)) + (6 << (3*4)) + (7 << (2*4)) + (9 << (1*4)) + (8 << (0*4)) }; // One block per GMM, 32*warp_N threads (1-dim) template <int warp_N, bool invertSigma> __global__ void GMMFinalizeKernel(float *gmm, float *gmm_scratch, int gmm_pitch, int N) { __shared__ volatile float s_gmm[warp_N*32]; __shared__ float s_final[warp_N]; __shared__ float final_gmm[15]; const int thread_N = warp_N * 32; float *gmm_partial = &gmm_scratch[N*blockIdx.x*gmm_pitch]; volatile float *warp_gmm = &s_gmm[threadIdx.x & 0x0ffe0]; int thread_idx = threadIdx.x; int lane_idx = threadIdx.x & 31; int warp_idx = threadIdx.x >> 5; float norm_factor = 1.0f; for (int i=0; i<10; ++i) { float thread_gmm = 0.0f; for (int j=thread_idx; j < N; j+= thread_N) { thread_gmm += gmm_partial[j * gmm_pitch + i]; } warp_gmm[lane_idx] = thread_gmm; // Warp Reduction thread_gmm += warp_gmm[(lane_idx + 16) & 31]; warp_gmm[lane_idx] = thread_gmm; thread_gmm += warp_gmm[(lane_idx + 8) & 31]; warp_gmm[lane_idx] = thread_gmm; thread_gmm += warp_gmm[(lane_idx + 4) & 31]; warp_gmm[lane_idx] = thread_gmm; thread_gmm += warp_gmm[(lane_idx + 2) & 31]; warp_gmm[lane_idx] = thread_gmm; thread_gmm += warp_gmm[(lane_idx + 1) & 31]; s_final[warp_idx] = thread_gmm; __syncthreads(); // Final Reduction if (warp_idx ==0 && lane_idx == 0) { for (int j=1; j<warp_N; ++j) { thread_gmm += s_final[j]; } final_gmm[i] = norm_factor * thread_gmm - get_constant(final_gmm, i); if (i == 0) { if (thread_gmm > 0) { norm_factor = 1.0f / thread_gmm; } } } } if (threadIdx.y == 0) { // Compute det(Sigma) using final_gmm [10-14] as scratch mem if (threadIdx.x < 5) { int idx0 = (det_indices[0] & (15 << (threadIdx.x * 4))) >> (threadIdx.x * 4); int idx1 = (det_indices[1] & (15 << (threadIdx.x * 4))) >> (threadIdx.x * 4); int idx2 = (det_indices[2] & (15 << (threadIdx.x * 4))) >> (threadIdx.x * 4); final_gmm[10 + threadIdx.x] = final_gmm[idx0] * final_gmm[idx1] * final_gmm[idx2]; float det = final_gmm[10] + 2.0f * final_gmm[11] - final_gmm[12] - final_gmm[13] - final_gmm[14]; final_gmm[10] = det; } // Compute inv(Sigma) if (invertSigma && threadIdx.x < 6) { int idx0 = (inv_indices[0] & (15 << (threadIdx.x * 4))) >> (threadIdx.x * 4); int idx1 = (inv_indices[1] & (15 << (threadIdx.x * 4))) >> (threadIdx.x * 4); int idx2 = (inv_indices[2] & (15 << (threadIdx.x * 4))) >> (threadIdx.x * 4); int idx3 = (inv_indices[3] & (15 << (threadIdx.x * 4))) >> (threadIdx.x * 4); float temp = final_gmm[idx0] * final_gmm[idx1] - final_gmm[idx2] * final_gmm[idx3]; if (final_gmm[10] > 0.0f) { final_gmm[4+threadIdx.x] = temp / final_gmm[10]; } else { final_gmm[4+threadIdx.x] = 0.0f; } } if (threadIdx.x < 11) { gmm[blockIdx.x * gmm_pitch + threadIdx.x] = final_gmm[threadIdx.x]; } } } // Single block, 32x2 __global__ void GMMcommonTerm(int gmmK, float *gmm, int gmm_pitch) { __shared__ volatile float s_n[2][32]; int gmm_idx = (threadIdx.x * 2) | threadIdx.y; float gmm_n = threadIdx.x < gmmK ? gmm[gmm_idx * gmm_pitch] : 0.0f; float sum = gmm_n; s_n[threadIdx.y][threadIdx.x] = sum; // Warp Reduction sum += s_n[threadIdx.y][(threadIdx.x + 16) & 31]; s_n[threadIdx.y][threadIdx.x] = sum; sum += s_n[threadIdx.y][(threadIdx.x + 8) & 31]; s_n[threadIdx.y][threadIdx.x] = sum; sum += s_n[threadIdx.y][(threadIdx.x + 4) & 31]; s_n[threadIdx.y][threadIdx.x] = sum; sum += s_n[threadIdx.y][(threadIdx.x + 2) & 31]; s_n[threadIdx.y][threadIdx.x] = sum; sum += s_n[threadIdx.y][(threadIdx.x + 1) & 31]; if (threadIdx.x < gmmK) { float det = gmm[gmm_idx * gmm_pitch + 10]; float commonTerm = gmm_n / (sqrtf(det) * sum); gmm[gmm_idx * gmm_pitch + 10] = commonTerm; } } cudaError_t GMMUpdate(int gmm_N, float *gmm, float *scratch_mem, int gmm_pitch, const uchar4 *image, int image_pitch, unsigned char *alpha, int alpha_pitch, int width, int height) { dim3 grid((width+31) / 32, (height+31) / 32); dim3 block(32,4); GMMReductionKernel<4, true><<<grid, block>>>(0, &scratch_mem[grid.x *grid.y], gmm_pitch/4, image, image_pitch/4, alpha, alpha_pitch, width, height, (unsigned int *) scratch_mem); for (int i=1; i<gmm_N; ++i) { GMMReductionKernel<4, false><<<grid, block>>>(i, &scratch_mem[grid.x *grid.y], gmm_pitch/4, image, image_pitch/4, alpha, alpha_pitch, width, height, (unsigned int *) scratch_mem); } GMMFinalizeKernel<4, true><<<gmm_N, 32 *4>>>(gmm, &scratch_mem[grid.x *grid.y], gmm_pitch/4, grid.x *grid.y); block.x = 32; block.y = 2; GMMcommonTerm<<<1, block>>>(gmm_N / 2, gmm, gmm_pitch/4); return cudaGetLastError(); } __device__ float GMMTerm(uchar4 pixel, const float *gmm) { float3 v = make_float3(pixel.x - gmm[1], pixel.y - gmm[2], pixel.z - gmm[3]); float xxa = v.x * v.x * gmm[4]; float yyd = v.y * v.y * gmm[7]; float zzf = v.z * v.z * gmm[9]; float yxb = v.x * v.y * gmm[5]; float zxc = v.z * v.x * gmm[6]; float zye = v.z * v.y * gmm[8]; return gmm[10] * expf(-0.5f * (xxa + yyd + zzf + 2.0f * (yxb + zxc + zye))); } __global__ void GMMDataTermKernel(Npp32s *terminals, int terminal_pitch, int gmmN, const float *gmm, int gmm_pitch, const uchar4 *image, int image_pitch, const unsigned char *trimap, int trimap_pitch, int width, int height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < width && y < height) { unsigned char c = trimap[y*trimap_pitch+x]; Npp32f data; if (c == 0) { // Definitely Background data = -INF; } else if (c == 2) { // Definitely Foreground data = + INF; } else { // Unknown uchar4 pixel = image[y * image_pitch + x]; Npp32f data_bg = GMMTerm(pixel, gmm); Npp32f data_fg = GMMTerm(pixel, &gmm[gmm_pitch]); for (int i=2; i<gmmN; i+=2) { data_bg += GMMTerm(pixel, &gmm[(i) * gmm_pitch]); data_fg += GMMTerm(pixel, &gmm[(i+1) * gmm_pitch]); } data_bg = -logf(data_bg); data_fg = -logf(data_fg); data = data_bg - data_fg; data = max(min(data, INF),-INF); } terminals[y*terminal_pitch + x] = _FIXED(data); } } cudaError_t GMMDataTerm(Npp32s *terminals, int terminal_pitch, int gmmN, const float *gmm, int gmm_pitch, const uchar4 *image, int image_pitch, const unsigned char *trimap, int trimap_pitch, int width, int height) { dim3 block(32,8); dim3 grid((width+block.x-1) / block.x, (height+block.y-1) / block.y); GMMDataTermKernel<<<grid, block>>>(terminals, terminal_pitch/4, gmmN, gmm, gmm_pitch/4, image, image_pitch/4, trimap, trimap_pitch, width, height); return cudaGetLastError(); } __global__ void GMMAssignKernel(int gmmN, const float *gmm, int gmm_pitch, const uchar4 *image, int image_pitch, unsigned char *g_alpha, int alpha_pitch, int width, int height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < width && y < height) { unsigned char alpha = g_alpha[y*alpha_pitch+x] & 1; // Unknown uchar4 pixel = image[y * image_pitch + x]; int alpha_min = alpha; float max_prob = GMMTerm(pixel, &gmm[(alpha_min) * gmm_pitch]); for (int i=alpha+2; i<gmmN; i+=2) { float prob = GMMTerm(pixel, &gmm[(i) * gmm_pitch]); if (prob > max_prob) { alpha_min = i; max_prob = prob; } } g_alpha[y*alpha_pitch+x] = alpha_min; } } cudaError_t GMMAssign(int gmmN, const float *gmm, int gmm_pitch, const uchar4 *image, int image_pitch, unsigned char *alpha, int alpha_pitch, int width, int height) { dim3 block(32,16); dim3 grid((width+block.x-1) / block.x, (height+block.y-1) / block.y); GMMAssignKernel<<<grid, block>>>(gmmN, gmm, gmm_pitch/4, image, image_pitch/4, alpha, alpha_pitch, width, height); return cudaGetLastError(); } __device__ float3 normalize(float3 v) { float norm = 1.0f / sqrtf(v.x * v.x + v.y * v.y + v.z * v.z); return make_float3(v.x * norm, v.y * norm, v.z * norm); } __device__ float3 mul_right(const float *M, float3 v) { return make_float3( M[0] * v.x + M[1] * v.y + M[2] * v.z, M[1] * v.x + M[3] * v.y + M[4] * v.z, M[2] * v.x + M[4] * v.y + M[5] * v.z); } __device__ float largest_eigenvalue(const float *M) { float norm = M[0] > M[3] ? M[0] : M[3]; norm = M[0] > M[5] ? M[0] : M[5]; norm = 1.0f / norm; float a00 = norm * M[0]; float a01 = norm * M[1]; float a02 = norm * M[2]; float a11 = norm * M[3]; float a12 = norm * M[4]; float a22 = norm * M[5]; float c0 = a00*a11*a22 + 2.0f*a01*a02*a12 - a00*a12*a12 - a11*a02*a02 - a22*a01*a01; float c1 = a00*a11 - a01*a01 + a00*a22 - a02*a02 + a11*a22 - a12*a12; float c2 = a00 + a11 + a22; const float inv3 = 1.0f / 3.0f; const float root3 = sqrtf(3.0f); float c2Div3 = c2*inv3; float aDiv3 = (c1 - c2*c2Div3)*inv3; if (aDiv3 > 0.0f) { aDiv3 = 0.0f; } float mbDiv2 = 0.5f*(c0 + c2Div3*(2.0f*c2Div3*c2Div3 - c1)); float q = mbDiv2*mbDiv2 + aDiv3*aDiv3*aDiv3; if (q > 0.0f) { q = 0.0f; } float magnitude = sqrtf(-aDiv3); float angle = atan2(sqrtf(-q),mbDiv2)*inv3; float cs = cos(angle); float sn = sin(angle); float largest_eigenvalue = c2Div3 + 2.0f*magnitude*cs; float eigenvalue = c2Div3 - magnitude*(cs + root3*sn); if (eigenvalue > largest_eigenvalue) { largest_eigenvalue = eigenvalue; } eigenvalue = c2Div3 - magnitude*(cs - root3*sn); if (eigenvalue > largest_eigenvalue) { largest_eigenvalue = eigenvalue; } return largest_eigenvalue / norm; } __device__ float3 cross_prod(float3 a, float3 b) { return make_float3((a.y*b.z)-(a.z*b.y), (a.z*b.x)-(a.x*b.z), (a.x*b.y)-(a.y*b.x)); } __device__ float3 compute_eigenvector(const float *M, float eigenvalue) { float3 r0 = make_float3(M[0] - eigenvalue, M[1], M[2]); float3 r1 = make_float3(M[2] , M[3]- eigenvalue, M[4]); float3 eigenvector = cross_prod(r0,r1); return normalize(eigenvector); } __device__ void largest_eigenvalue_eigenvector(const float *M, float3 &evec, float &eval) { eval = largest_eigenvalue(M); evec = compute_eigenvector(M, eval); } __device__ float scalar_prod(float3 a, float3 b) { return a.x * b.x + a.y * b.y + a.z * b.z; } struct GMMSplit_t { int idx; float threshold; float3 eigenvector; }; // 1 Block, 32x2 __global__ void GMMFindSplit(GMMSplit_t *gmmSplit, int gmmK, float *gmm, int gmm_pitch) { __shared__ float s_eigenvalues[2][32]; int gmm_idx = (threadIdx.x << 1) + threadIdx.y; float eigenvalue = 0; float3 eigenvector; if (threadIdx.x < gmmK) { largest_eigenvalue_eigenvector(&gmm[gmm_idx * gmm_pitch + 4], eigenvector, eigenvalue); } // Warp Reduction float maxvalue = eigenvalue; s_eigenvalues[threadIdx.y][threadIdx.x] = maxvalue; maxvalue = max(maxvalue, s_eigenvalues[threadIdx.y][(threadIdx.x+16) & 31]); s_eigenvalues[threadIdx.y][threadIdx.x] = maxvalue; maxvalue = max(maxvalue, s_eigenvalues[threadIdx.y][(threadIdx.x+8) & 31]); s_eigenvalues[threadIdx.y][threadIdx.x] = maxvalue; maxvalue = max(maxvalue, s_eigenvalues[threadIdx.y][(threadIdx.x+4) & 31]); s_eigenvalues[threadIdx.y][threadIdx.x] = maxvalue; maxvalue = max(maxvalue, s_eigenvalues[threadIdx.y][(threadIdx.x+2) & 31]); s_eigenvalues[threadIdx.y][threadIdx.x] = maxvalue; maxvalue = max(maxvalue, s_eigenvalues[threadIdx.y][(threadIdx.x+1) & 31]); if (maxvalue == eigenvalue) { GMMSplit_t split; split.idx = threadIdx.x; split.threshold = scalar_prod(make_float3(gmm[gmm_idx * gmm_pitch + 1], gmm[gmm_idx * gmm_pitch + 2], gmm[gmm_idx * gmm_pitch + 3]), eigenvector); split.eigenvector = eigenvector; gmmSplit[threadIdx.y] = split; } } __global__ void GMMDoSplit(const GMMSplit_t *gmmSplit, int k, float *gmm, int gmm_pitch, const uchar4 *image, int image_pitch, unsigned char *alpha, int alpha_pitch, int width, int height) { __shared__ GMMSplit_t s_gmmSplit[2]; int *s_linear = (int *) s_gmmSplit; int *g_linear = (int *) gmmSplit; if (threadIdx.y ==0 && threadIdx.x < 10) { s_linear[threadIdx.x] = g_linear[threadIdx.x]; } __syncthreads(); int x = blockIdx.x * 32 + threadIdx.x; int y0 = blockIdx.y * 32; for (int i = threadIdx.y; i < 32; i += blockDim.y) { int y = y0 + i; if (x < width && y < height) { unsigned char my_alpha = alpha[y * alpha_pitch + x]; int select = my_alpha & 1; int gmm_idx = my_alpha >> 1; if (gmm_idx == s_gmmSplit[select].idx) { // in the split cluster now uchar4 pixel = image[y * image_pitch + x]; float value = scalar_prod(s_gmmSplit[select].eigenvector, make_float3(pixel.x, pixel.y, pixel.z)); if (value > s_gmmSplit[select].threshold) { // assign pixel to new cluster alpha[y * alpha_pitch + x] = k + select; } } } } } cudaError_t GMMInitialize(int gmm_N, float *gmm, float *scratch_mem, int gmm_pitch, const uchar4 *image, int image_pitch, unsigned char *alpha, int alpha_pitch, int width, int height) { dim3 grid((width+31) / 32, (height+31) / 32); dim3 block(32,4); dim3 smallblock(32,2); for (int k = 2; k < gmm_N; k+=2) { GMMReductionKernel<4, true><<<grid, block>>>(0, &scratch_mem[grid.x *grid.y], gmm_pitch/4, image, image_pitch/4, alpha, alpha_pitch, width, height, (unsigned int *) scratch_mem); for (int i=1; i < k; ++i) { GMMReductionKernel<4, false><<<grid, block>>>(i, &scratch_mem[grid.x *grid.y], gmm_pitch/4, image, image_pitch/4, alpha, alpha_pitch, width, height, (unsigned int *) scratch_mem); } GMMFinalizeKernel<4, false><<<k, 32 *4>>>(gmm, &scratch_mem[grid.x *grid.y], gmm_pitch/4, grid.x *grid.y); GMMFindSplit<<<1, smallblock>>>((GMMSplit_t *) scratch_mem, k / 2, gmm, gmm_pitch/4); GMMDoSplit<<<grid, block>>>((GMMSplit_t *) scratch_mem, (k/2) << 1, gmm, gmm_pitch/4, image, image_pitch / 4, alpha, alpha_pitch, width, height); } return cudaGetLastError(); }
efbebeed4e32c952a95644d6d4868707d0460cf9.hip
// !!! This is a file automatically generated by hipify!!! #if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2) // generated by gen_cutlass_matrix_mul_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #pragma GCC diagnostic ignored "-Wuninitialized" #pragma GCC diagnostic ignored "-Wmaybe-uninitialized" #include "src/cuda/matrix_mul/fp32_simt/matrix_mul_float_simt_cutlass_wrapper.cuinl" using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::RowMajor; using ThreadBlockShape = cutlass::gemm::GemmShape<256, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using EpilogueOp = cutlass::epilogue::thread::LinearCombination<float, 1, float, float>; using Gemm = cutlass::gemm::device::Gemm< float, LayoutA, float, LayoutB, float, cutlass::layout::RowMajor, float, cutlass::arch::OpClassSimt, cutlass::arch::Sm50, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; template void megdnn::cuda::cutlass_wrapper::cutlass_matrix_mul_wrapper<Gemm>( const typename Gemm::ElementA* d_A, size_t lda, const typename Gemm::ElementB* d_B, size_t ldb, typename Gemm::ElementC* d_C, size_t ldc, int* workspace, cutlass::gemm::GemmCoord const& problem_size, typename Gemm::EpilogueOutputOp::Params const& epilogue, hipStream_t stream, int split_k_slices); #pragma GCC diagnostic pop #endif
efbebeed4e32c952a95644d6d4868707d0460cf9.cu
#if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2) // generated by gen_cutlass_matrix_mul_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #pragma GCC diagnostic ignored "-Wuninitialized" #pragma GCC diagnostic ignored "-Wmaybe-uninitialized" #include "src/cuda/matrix_mul/fp32_simt/matrix_mul_float_simt_cutlass_wrapper.cuinl" using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::RowMajor; using ThreadBlockShape = cutlass::gemm::GemmShape<256, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using EpilogueOp = cutlass::epilogue::thread::LinearCombination<float, 1, float, float>; using Gemm = cutlass::gemm::device::Gemm< float, LayoutA, float, LayoutB, float, cutlass::layout::RowMajor, float, cutlass::arch::OpClassSimt, cutlass::arch::Sm50, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; template void megdnn::cuda::cutlass_wrapper::cutlass_matrix_mul_wrapper<Gemm>( const typename Gemm::ElementA* d_A, size_t lda, const typename Gemm::ElementB* d_B, size_t ldb, typename Gemm::ElementC* d_C, size_t ldc, int* workspace, cutlass::gemm::GemmCoord const& problem_size, typename Gemm::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream, int split_k_slices); #pragma GCC diagnostic pop #endif
054a926f84aad163f27a882137b363fed36ff0b8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * @file exercise2.cu * @author Navid Shamszadeh * @brief Constant memory version of the finite difference kernel in exercise1.cu * @details Write a constant memory version of the diff kernel where the array dev_u is put in constant memory. * Use cudaDeviceGetProperties or cudaDeviceGetAttribue to determine the maximum size of the array dev_u that will * fit in the constant memory. * @date 2021-04-14 * */ #include <cmath> #include <stdio.h> #include <stdlib.h> #include "../error_check_cuda.h" #ifndef M_PI #define MPI 3.1415926535897932384626433832795 #endif // set the threads per block as a constant multiple of the warp size const int threadsPerBlock = 256; // declare constant memory on the device __constant__ double dev_u[8190]; // total constant memory on rtx 3070 is 65,536 bytes, ie 8192 doubles, save room for other variables __constant__ int dev_N; __constant__ double dev_dx; // declare kernel __global__ void diff(double* dev_du); // all the other variables are stored in constant memory /** * @brief Demonstrate a simple example for implementing a parallel finite difference operator * using shared and constant device memory. * @param argc Should be 2. * @param argv[1] Length of the vector of data. * @return int. */ int main(int argc, char* argv[]) { int N = atoi(argv[1]); if (N > 8190) { printf("Error: N (%d) + 2 exceeds maximum constant memory capacity! (65,536 bytes).", N); exit(-1); } const int blocksPerGrid = N / threadsPerBlock + (N % threadsPerBlock > 0 ? 1 : 0); // allocate host memory double* u = (double*)malloc(N * sizeof(double)); double* du = (double*)malloc(N * sizeof(double)); // initialize data on the host double dx = 2 * M_PI / N; for (int i = 0; i < N; i++) { u[i] = sin(i * dx); } // allocate device memory double* dev_du; CheckError(hipMalloc((void**)&dev_du, N * sizeof(double))); // copy data from host to device // hipMemcpyToSymbol copies data from host to symbol address (in this case somewhere in constant memory). CheckError(hipMemcpyToSymbol(dev_u, u, N * sizeof(double))); CheckError(hipMemcpyToSymbol(dev_N, &N, sizeof(int))); CheckError(hipMemcpyToSymbol(dev_dx, &dx, sizeof(double))); // kernel call no longer needs dev_N, dev_u, or dev_dx to be passed as parameters hipLaunchKernelGGL(( diff), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_du); // copy results from device to host CheckError(hipMemcpy(du, dev_du, N * sizeof(double), hipMemcpyDeviceToHost)); // notice we don't need to free constant memory pointers CheckError(hipFree(dev_du)); // print the results for (int i = 0; i < N; i++) { printf("%f\n", du[i]); } // free host memory free(u); free(du); exit(0); } __global__ void diff(double* du) { /* * TODO: Obtain performance metrics of transferring constant memory to shared memory vs only using constant memory. */ // __shared__ double local_u[threadsPerBlock + 2]; // for now, we don't move constant memory to shared memory __shared__ double local_du[threadsPerBlock]; // define global index int g_i = (threadIdx.x + blockIdx.x * blockDim.x) % dev_N; // since we aren't copying any data from global to shared memory, we do not need to call __syncthreads() local_du[threadIdx.x] = (dev_u[(g_i + 1) % dev_N] - dev_u[(g_i + dev_N - 1) % dev_N]) / (2 * dev_dx); // copy result into global memory du[g_i] = local_du[threadIdx.x]; }
054a926f84aad163f27a882137b363fed36ff0b8.cu
/** * @file exercise2.cu * @author Navid Shamszadeh * @brief Constant memory version of the finite difference kernel in exercise1.cu * @details Write a constant memory version of the diff kernel where the array dev_u is put in constant memory. * Use cudaDeviceGetProperties or cudaDeviceGetAttribue to determine the maximum size of the array dev_u that will * fit in the constant memory. * @date 2021-04-14 * */ #include <cmath> #include <stdio.h> #include <stdlib.h> #include "../error_check_cuda.h" #ifndef M_PI #define MPI 3.1415926535897932384626433832795 #endif // set the threads per block as a constant multiple of the warp size const int threadsPerBlock = 256; // declare constant memory on the device __constant__ double dev_u[8190]; // total constant memory on rtx 3070 is 65,536 bytes, ie 8192 doubles, save room for other variables __constant__ int dev_N; __constant__ double dev_dx; // declare kernel __global__ void diff(double* dev_du); // all the other variables are stored in constant memory /** * @brief Demonstrate a simple example for implementing a parallel finite difference operator * using shared and constant device memory. * @param argc Should be 2. * @param argv[1] Length of the vector of data. * @return int. */ int main(int argc, char* argv[]) { int N = atoi(argv[1]); if (N > 8190) { printf("Error: N (%d) + 2 exceeds maximum constant memory capacity! (65,536 bytes).", N); exit(-1); } const int blocksPerGrid = N / threadsPerBlock + (N % threadsPerBlock > 0 ? 1 : 0); // allocate host memory double* u = (double*)malloc(N * sizeof(double)); double* du = (double*)malloc(N * sizeof(double)); // initialize data on the host double dx = 2 * M_PI / N; for (int i = 0; i < N; i++) { u[i] = sin(i * dx); } // allocate device memory double* dev_du; CheckError(cudaMalloc((void**)&dev_du, N * sizeof(double))); // copy data from host to device // cudaMemcpyToSymbol copies data from host to symbol address (in this case somewhere in constant memory). CheckError(cudaMemcpyToSymbol(dev_u, u, N * sizeof(double))); CheckError(cudaMemcpyToSymbol(dev_N, &N, sizeof(int))); CheckError(cudaMemcpyToSymbol(dev_dx, &dx, sizeof(double))); // kernel call no longer needs dev_N, dev_u, or dev_dx to be passed as parameters diff<<<blocksPerGrid, threadsPerBlock>>>(dev_du); // copy results from device to host CheckError(cudaMemcpy(du, dev_du, N * sizeof(double), cudaMemcpyDeviceToHost)); // notice we don't need to free constant memory pointers CheckError(cudaFree(dev_du)); // print the results for (int i = 0; i < N; i++) { printf("%f\n", du[i]); } // free host memory free(u); free(du); exit(0); } __global__ void diff(double* du) { /* * TODO: Obtain performance metrics of transferring constant memory to shared memory vs only using constant memory. */ // __shared__ double local_u[threadsPerBlock + 2]; // for now, we don't move constant memory to shared memory __shared__ double local_du[threadsPerBlock]; // define global index int g_i = (threadIdx.x + blockIdx.x * blockDim.x) % dev_N; // since we aren't copying any data from global to shared memory, we do not need to call __syncthreads() local_du[threadIdx.x] = (dev_u[(g_i + 1) % dev_N] - dev_u[(g_i + dev_N - 1) % dev_N]) / (2 * dev_dx); // copy result into global memory du[g_i] = local_du[threadIdx.x]; }
2b57fcd2ee2194ea82ee26c9f0ef39a473114ff1.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "block_sum.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const float *input = NULL; hipMalloc(&input, XSIZE*YSIZE); float *per_block_results = NULL; hipMalloc(&per_block_results, XSIZE*YSIZE); const size_t n = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( block_sum), dim3(gridBlock),dim3(threadBlock), 0, 0, input,per_block_results,n); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( block_sum), dim3(gridBlock),dim3(threadBlock), 0, 0, input,per_block_results,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( block_sum), dim3(gridBlock),dim3(threadBlock), 0, 0, input,per_block_results,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
2b57fcd2ee2194ea82ee26c9f0ef39a473114ff1.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "block_sum.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const float *input = NULL; cudaMalloc(&input, XSIZE*YSIZE); float *per_block_results = NULL; cudaMalloc(&per_block_results, XSIZE*YSIZE); const size_t n = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); block_sum<<<gridBlock,threadBlock>>>(input,per_block_results,n); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { block_sum<<<gridBlock,threadBlock>>>(input,per_block_results,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { block_sum<<<gridBlock,threadBlock>>>(input,per_block_results,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}