hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
6d24580164978ae8e4340ce87278632e98b778ce.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "RadixTree.hpp"
#include "CudaCommon.hpp"
#include "hipcub/hipcub.hpp"
#include "hipcub/hipcub.hpp"
#include "hipcub/hipcub.hpp"
#include <memory>
#include <array>
#include <algorithm>
#include <limits>
#include <cstdint>
#include <type_traits>
#include <math.h>
using namespace RT;
using hipcub::DeviceReduce;
using hipcub::DeviceRadixSort;
using hipcub::DeviceScan;
template <typename T>
__global__ void makeCodes(
const T min_coord,
const T range,
const T* __restrict__ x_vals,
const T* __restrict__ y_vals,
const T* __restrict__ z_vals,
Code_t* codes,
const size_t N) {
// only supports 1-dimension blocks and grids
assert(threadIdx.y == threadIdx.z == 1);
assert(blockIdx.y == blockIdx.z == 1);
size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) {
// printf("Raw point %d = (%f, %f, %f)\n", idx, x_vals[idx], y_vals[idx], z_vals[idx]);
codes[idx] = pointToCode(x_vals[idx], y_vals[idx], z_vals[idx], min_coord, range);
}
}
// computes ceil(a / b)
template<typename T>
__device__ inline T ceil_div(T a, T b) {
// If a + b might overflow, do the following instead? (untested):
// 1 + ((x - 1) / y); // if x != 0
assert(!std::is_signed<decltype(a)>() || a >= 0);
assert(!std::is_signed<decltype(b)>() || b >= 0);
return (a + b - 1) / b;
}
__device__ inline int log2_ceil(Code_t x) {
static_assert(sizeof(x) == sizeof(long long int), "__clzll(x) is for long long int");
// Counting from LSB to MSB, number of bits before last '1'
// This is floor(log(x))
int n_lower_bits = (8 * sizeof(x)) - __clzll(x) - 1;
// Add 1 if 2^n_lower_bits is less than x
// (i.e. we rounded down because x was not a power of 2)
return n_lower_bits + (x > (1 << n_lower_bits));
}
// delta(a, b) is the length of the longest prefix between codes a and b
__device__ inline int_fast8_t delta(const Code_t a, const Code_t b) {
// Assuming first bit is 0. Asserts check that.
// Not necessary, so if want to store info in that bit in the future, requires a change
Code_t bit1_mask = (Code_t)1 << (sizeof(a) * 8 - 1);
assert((a & bit1_mask) == 0);
assert((b & bit1_mask) == 0);
return __clzll(a ^ b) - 1;
}
__global__ void findDups(
const Code_t* codes,
int* contributes,
const int N
) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx > 0 && idx < N) {
// code only contributse to all nodes if different than its left neighbor
contributes[idx] = (codes[idx] != codes[idx - 1]);
}
if (idx == 0) {
// set as 0 to make array 0-indexed (even though it contributes)
contributes[idx] = 0;
}
}
__global__ void moveDups(
const Code_t* in_codes,
Code_t* out_codes,
const int* out_idx,
const int N
) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) {
out_codes[out_idx[idx]] = in_codes[idx];
}
}
__global__ void constructTree(
const Code_t* codes,
bool* hasLeafLeft,
bool* hasLeafRight,
int* leftChild,
int* parent,
uint8_t* prefixN,
const size_t N) {
assert(threadIdx.y == threadIdx.z == 1);
assert(blockIdx.y == blockIdx.z == 1);
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
auto code_i = codes[i];
// Determine direction of the range (+1 or -1)
int d;
if (i == 0) {
d = 1;
}
else {
auto delta_diff_right = delta(code_i, codes[i+1]);
auto delta_diff_left = delta(code_i, codes[i-1]);
int direction_difference = delta_diff_right - delta_diff_left;
d = (direction_difference > 0) - (direction_difference < 0);
}
// Compute upper bound for the length of the range
Code_t l = 0;
if (i == 0) {
// First node is root, covering whole tree
l = N - 1;
}
//else if (i == N - 1) {
// l = 1;
//}
else {
auto delta_min = delta(code_i, codes[i - d]);
Code_t l_max = 2;
// Cast to ptrdiff_t so in case the result is negative (since d is +/- 1), we can catch it and not index out of bounds
while (i + static_cast<ptrdiff_t>(l_max)*d >= 0 &&
i + l_max*d <= N &&
delta(code_i, codes[i + l_max * d]) > delta_min) {
l_max *= 2;
}
int l_cutoff = (d==-1) ? i : N - i;
int t;
int divisor;
// Find the other end using binary search
for (t = l_max / 2, divisor = 2; t >= 1; divisor *= 2, t = l_max / divisor) {
if (l + t <= l_cutoff &&
delta(code_i, codes[i + (l + t)*d]) > delta_min) {
l += t;
}
}
// for (t = l_max / 2, divisor = 2; t >= 1; divisor *= 2, t = l_max / divisor) {
// if (i + static_cast<ptrdiff_t>(l + t)*d >= 0 &&
// i + (l + t)*d < N &&
// delta(code_i, codes[i + (l + t)*d]) > delta_min) {
// l += t;
// }
// }
}
int j = i + l*d;
// Find the split position using binary search
auto delta_node = delta(codes[i], codes[j]);
prefixN[i] = delta_node;
int s = 0;
int t;
int max_divisor = 1 << log2_ceil(l);
int divisor = 2;
int s_cutoff = (d == -1) ? i - 1 : N - i - 1;
for (t = ceil_div<Code_t>(l, 2); divisor <= max_divisor; divisor <<= 1, t = ceil_div<Code_t>(l, divisor)) {
// for (t = ceil_div<Code_t>(l, 2), divisor = 2; t >= 1; divisor *= 2, t = ceil_div<Code_t>(l, divisor)) {
if (s + t <= s_cutoff &&
delta(code_i, codes[i + (s + t)*d]) > delta_node) {
s += t;
}
}
// Split position
int gamma = i + s*d + min(d, 0);
leftChild[i] = gamma;
hasLeafLeft[i] = (min(i, j) == gamma);
hasLeafRight[i] = (max(i, j) == gamma+1);
// Set parents of left and right children, if they aren't leaves
// can't set this node as parent of its leaves, because the
// leaf also represents an internal node with a differnent parent
if (!hasLeafLeft[i]) {
parent[gamma] = i;
}
if (!hasLeafRight[i]) {
parent[gamma + 1] = i;
}
}
}
void RadixTree::encodePoints(const PointCloud<float>& cloud) {
// Allocate for raw data points
size_t data_size = n_pts * sizeof(cloud.x_vals[0]);
float *d_data_x, *d_data_y, *d_data_z;
CudaCheckCall(hipMalloc(&d_data_x, data_size));
CudaCheckCall(hipMalloc(&d_data_y, data_size));
CudaCheckCall(hipMalloc(&d_data_z, data_size));
// Copy points to GPU
CudaCheckCall(hipMemcpyAsync(d_data_x, &cloud.x_vals[0], data_size, hipMemcpyHostToDevice));
CudaCheckCall(hipMemcpyAsync(d_data_y, &cloud.y_vals[0], data_size, hipMemcpyHostToDevice));
CudaCheckCall(hipMemcpyAsync(d_data_z, &cloud.z_vals[0], data_size, hipMemcpyHostToDevice));
hipDeviceSynchronize();
// Find maximum and minumum values in data
// std::array<float, 3> mins, maxes;
float *mins, *maxes;
CudaCheckCall(hipMallocManaged(&mins, sizeof(float) * 3));
CudaCheckCall(hipMallocManaged(&maxes, sizeof(float) * 3));
// float *d_mins, *d_maxes;
// CudaCheckCall(g_allocator.DeviceAllocate((void**)&d_mins, sizeof(float) * 3));
// CudaCheckCall(g_allocator.DeviceAllocate((void**)&d_maxes, sizeof(float) * 3));
size_t temp_storage_reqd = 0;
void* d_temp_storage = nullptr;
// get amount of required memory
DeviceReduce::Max(d_temp_storage, temp_storage_reqd, d_data_x, &maxes[0], n_pts);
// allocate temporary storage
CudaCheckCall(g_allocator.DeviceAllocate((void**)&d_temp_storage, temp_storage_reqd));
// Find maximum
DeviceReduce::Max(d_temp_storage, temp_storage_reqd, d_data_x, &maxes[0], n_pts);
DeviceReduce::Max(d_temp_storage, temp_storage_reqd, d_data_y, &maxes[1], n_pts);
DeviceReduce::Max(d_temp_storage, temp_storage_reqd, d_data_z, &maxes[2], n_pts);
DeviceReduce::Min(d_temp_storage, temp_storage_reqd, d_data_x, &mins[0], n_pts);
DeviceReduce::Min(d_temp_storage, temp_storage_reqd, d_data_y, &mins[1], n_pts);
DeviceReduce::Min(d_temp_storage, temp_storage_reqd, d_data_z, &mins[2], n_pts);
hipDeviceSynchronize();
CudaCheckError();
// hipMemcpy(&mins[0], d_mins, sizeof(float) * mins.size(), hipMemcpyDeviceToHost);
// hipMemcpy(&maxes[0], d_maxes, sizeof(float) * maxes.size(), hipMemcpyDeviceToHost);
// g_allocator.DeviceFree(d_mins);
// g_allocator.DeviceFree(d_maxes);
g_allocator.DeviceFree(d_temp_storage);
hipDeviceSynchronize();
max_coord = *std::max_element(&maxes[0], &maxes[3]);
min_coord = *std::min_element(&mins[0], &mins[3]);
// std::cout << "range = [" << min_val << ", " << max_val << "]" << std::endl;
int blocks, tpb;
std::tie(blocks, tpb) = makeLaunchParams(n_pts);
hipLaunchKernelGGL(( makeCodes), dim3(blocks), dim3(tpb), 0, 0, min_coord, max_coord - min_coord, d_data_x, d_data_y, d_data_z, d_tree.mortonCode, n_pts);
hipDeviceSynchronize();
CudaCheckError();
// Now that codes created, raw values not needed
CudaCheckCall(hipFree(d_data_x));
CudaCheckCall(hipFree(d_data_y));
CudaCheckCall(hipFree(d_data_z));
CudaCheckCall(hipFree(mins));
CudaCheckCall(hipFree(maxes));
}
void RadixTree::removeDuplicates(Code_t* d_codes_sorted) {
int blocks, tpb;
std::tie(blocks, tpb) = makeLaunchParams(n_pts);
int* contributions;
CudaCheckCall(hipMallocManaged(&contributions, n_pts * sizeof(*contributions)));
hipLaunchKernelGGL(( findDups), dim3(blocks), dim3(tpb), 0, 0, d_codes_sorted, contributions, n_pts);
int* final_pt_idx;
CudaCheckCall(hipMallocManaged(&final_pt_idx, n_pts * sizeof(*final_pt_idx)));
// prefix sum to find output indices
void* d_temp_storage = nullptr;
size_t temp_storage_reqd = 0;
CudaCheckCall(
DeviceScan::InclusiveSum(d_temp_storage,
temp_storage_reqd,
contributions,
final_pt_idx,
n_pts)
);
CudaCheckCall(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_reqd));
CudaCheckCall(
DeviceScan::InclusiveSum(d_temp_storage,
temp_storage_reqd,
contributions,
final_pt_idx,
n_pts)
);
hipDeviceSynchronize();
CudaCheckError();
CudaCheckCall(g_allocator.DeviceFree(d_temp_storage));
// get number of unique codes (index of last element is 1 less than number of elements)
int n_old_pts = n_pts;
n_pts = final_pt_idx[n_pts - 1] + 1;
CudaCheckCall(hipFree(contributions));
// move points into their final positions
// allocate space for un-duplicated points
CudaCheckCall(hipMallocManaged(&d_tree.mortonCode, n_pts * sizeof(*d_tree.mortonCode)));
hipLaunchKernelGGL(( moveDups), dim3(blocks), dim3(tpb), 0, 0, d_codes_sorted, d_tree.mortonCode, final_pt_idx, n_old_pts);
hipDeviceSynchronize();
CudaCheckError();
CudaCheckCall(hipFree(final_pt_idx));
printf("%d duplicates removed\n", n_old_pts - n_pts);
}
RadixTree::RadixTree(const PointCloud<float>& cloud) {
// Check that the cast is okay
assert(cloud.x_vals.size() <= std::numeric_limits<decltype(n_pts)>::max());
n_pts = static_cast<decltype(n_pts)>(cloud.x_vals.size());
// allocate memory for codes in tree
CudaCheckCall(hipMallocManaged(&d_tree.mortonCode, sizeof(*d_tree.mortonCode) * n_pts));
// fill up d_tree.mortonCode
encodePoints(cloud);
// Sort the Morton codes nodes in ascending order
Code_t* d_codes_sorted;
CudaCheckCall(hipMallocManaged(&d_codes_sorted, sizeof(*d_codes_sorted) * n_pts));
void* d_temp_storage = nullptr;
size_t temp_storage_reqd = 0;
CudaCheckCall(
// get storage requirements
DeviceRadixSort::SortKeys(d_temp_storage, temp_storage_reqd,
d_tree.mortonCode, d_codes_sorted,
n_pts)
);
CudaCheckCall(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_reqd));
// sort key-value pairs, where key is morton code (d_keys), and values are tree nodes
CudaCheckCall(
DeviceRadixSort::SortKeys(d_temp_storage, temp_storage_reqd,
d_tree.mortonCode, d_codes_sorted,
n_pts)
);
hipDeviceSynchronize();
CudaCheckError();
g_allocator.DeviceFree(d_temp_storage);
// free these now that they have been sorted, will be re-allocated after duplicate removal
CudaCheckCall(hipFree(d_tree.mortonCode));
// Remove duplicates
removeDuplicates(d_codes_sorted);
CudaCheckCall(hipFree(d_codes_sorted));
// allocate memory for d_tree now that we know the true number of points
CudaCheckCall(hipMallocManaged(&d_tree.hasLeafLeft, sizeof(*d_tree.hasLeafRight) * n_pts));
CudaCheckCall(hipMallocManaged(&d_tree.hasLeafRight, sizeof(*d_tree.hasLeafRight) * n_pts));
CudaCheckCall(hipMallocManaged(&d_tree.prefixN, sizeof(*d_tree.prefixN) * n_pts));
CudaCheckCall(hipMallocManaged(&d_tree.leftChild, sizeof(*d_tree.leftChild) * n_pts));
CudaCheckCall(hipMallocManaged(&d_tree.parent, sizeof(*d_tree.parent) * n_pts));
// Make tree
// number of nodes is one less than points
n_nodes = n_pts - 1;
int blocks, tpb;
std::tie(blocks, tpb) = makeLaunchParams(n_nodes);
hipLaunchKernelGGL(( constructTree), dim3(blocks), dim3(tpb), 0, 0, d_tree.mortonCode,
d_tree.hasLeafLeft,
d_tree.hasLeafRight,
d_tree.leftChild,
d_tree.parent,
d_tree.prefixN,
n_nodes);
hipDeviceSynchronize();
CudaCheckError();
// for (int i = 0; i < n_nodes; ++i) {
// printf("idx = %d, code = %llx, prefixN = %d, left = %d, parent = %d, leftLeaf=%d, rightLeft=%d\n",
// i, d_tree.mortonCode[i], (int)d_tree.prefixN[i], d_tree.leftChild[i], d_tree.parent[i], (int)d_tree.hasLeafLeft[i], (int)d_tree.hasLeafRight[i]);
// }
// verify radix tree
// for (int i = 0; i < n_nodes; ++i) {
// int this_code_len = d_tree.prefixN[i];
// Code_t left_code = d_tree.mortonCode[d_tree.leftChild[i]];
// int left_code_len = d_tree.prefixN[d_tree.leftChild[i]];
// Code_t right_code = d_tree.mortonCode[d_tree.leftChild[i] + 1];
// int right_code_len = d_tree.prefixN[d_tree.leftChild[i] + 1];
// if (left_code >> (CODE_LEN - this_code_len) != right_code >> (CODE_LEN - this_code_len)) {
// assert(false);
// }
// }
}
RadixTree::~RadixTree() {
CudaCheckCall(hipFree(d_tree.mortonCode));
CudaCheckCall(hipFree(d_tree.hasLeafLeft));
CudaCheckCall(hipFree(d_tree.hasLeafRight));
CudaCheckCall(hipFree(d_tree.prefixN));
CudaCheckCall(hipFree(d_tree.leftChild));
CudaCheckCall(hipFree(d_tree.parent));
}
| 6d24580164978ae8e4340ce87278632e98b778ce.cu | #include "RadixTree.hpp"
#include "CudaCommon.hpp"
#include "cub/device/device_reduce.cuh"
#include "cub/device/device_radix_sort.cuh"
#include "cub/device/device_scan.cuh"
#include <memory>
#include <array>
#include <algorithm>
#include <limits>
#include <cstdint>
#include <type_traits>
#include <math.h>
using namespace RT;
using cub::DeviceReduce;
using cub::DeviceRadixSort;
using cub::DeviceScan;
template <typename T>
__global__ void makeCodes(
const T min_coord,
const T range,
const T* __restrict__ x_vals,
const T* __restrict__ y_vals,
const T* __restrict__ z_vals,
Code_t* codes,
const size_t N) {
// only supports 1-dimension blocks and grids
assert(threadIdx.y == threadIdx.z == 1);
assert(blockIdx.y == blockIdx.z == 1);
size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) {
// printf("Raw point %d = (%f, %f, %f)\n", idx, x_vals[idx], y_vals[idx], z_vals[idx]);
codes[idx] = pointToCode(x_vals[idx], y_vals[idx], z_vals[idx], min_coord, range);
}
}
// computes ceil(a / b)
template<typename T>
__device__ inline T ceil_div(T a, T b) {
// If a + b might overflow, do the following instead? (untested):
// 1 + ((x - 1) / y); // if x != 0
assert(!std::is_signed<decltype(a)>() || a >= 0);
assert(!std::is_signed<decltype(b)>() || b >= 0);
return (a + b - 1) / b;
}
__device__ inline int log2_ceil(Code_t x) {
static_assert(sizeof(x) == sizeof(long long int), "__clzll(x) is for long long int");
// Counting from LSB to MSB, number of bits before last '1'
// This is floor(log(x))
int n_lower_bits = (8 * sizeof(x)) - __clzll(x) - 1;
// Add 1 if 2^n_lower_bits is less than x
// (i.e. we rounded down because x was not a power of 2)
return n_lower_bits + (x > (1 << n_lower_bits));
}
// delta(a, b) is the length of the longest prefix between codes a and b
__device__ inline int_fast8_t delta(const Code_t a, const Code_t b) {
// Assuming first bit is 0. Asserts check that.
// Not necessary, so if want to store info in that bit in the future, requires a change
Code_t bit1_mask = (Code_t)1 << (sizeof(a) * 8 - 1);
assert((a & bit1_mask) == 0);
assert((b & bit1_mask) == 0);
return __clzll(a ^ b) - 1;
}
__global__ void findDups(
const Code_t* codes,
int* contributes,
const int N
) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx > 0 && idx < N) {
// code only contributse to all nodes if different than its left neighbor
contributes[idx] = (codes[idx] != codes[idx - 1]);
}
if (idx == 0) {
// set as 0 to make array 0-indexed (even though it contributes)
contributes[idx] = 0;
}
}
__global__ void moveDups(
const Code_t* in_codes,
Code_t* out_codes,
const int* out_idx,
const int N
) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) {
out_codes[out_idx[idx]] = in_codes[idx];
}
}
__global__ void constructTree(
const Code_t* codes,
bool* hasLeafLeft,
bool* hasLeafRight,
int* leftChild,
int* parent,
uint8_t* prefixN,
const size_t N) {
assert(threadIdx.y == threadIdx.z == 1);
assert(blockIdx.y == blockIdx.z == 1);
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
auto code_i = codes[i];
// Determine direction of the range (+1 or -1)
int d;
if (i == 0) {
d = 1;
}
else {
auto delta_diff_right = delta(code_i, codes[i+1]);
auto delta_diff_left = delta(code_i, codes[i-1]);
int direction_difference = delta_diff_right - delta_diff_left;
d = (direction_difference > 0) - (direction_difference < 0);
}
// Compute upper bound for the length of the range
Code_t l = 0;
if (i == 0) {
// First node is root, covering whole tree
l = N - 1;
}
//else if (i == N - 1) {
// l = 1;
//}
else {
auto delta_min = delta(code_i, codes[i - d]);
Code_t l_max = 2;
// Cast to ptrdiff_t so in case the result is negative (since d is +/- 1), we can catch it and not index out of bounds
while (i + static_cast<ptrdiff_t>(l_max)*d >= 0 &&
i + l_max*d <= N &&
delta(code_i, codes[i + l_max * d]) > delta_min) {
l_max *= 2;
}
int l_cutoff = (d==-1) ? i : N - i;
int t;
int divisor;
// Find the other end using binary search
for (t = l_max / 2, divisor = 2; t >= 1; divisor *= 2, t = l_max / divisor) {
if (l + t <= l_cutoff &&
delta(code_i, codes[i + (l + t)*d]) > delta_min) {
l += t;
}
}
// for (t = l_max / 2, divisor = 2; t >= 1; divisor *= 2, t = l_max / divisor) {
// if (i + static_cast<ptrdiff_t>(l + t)*d >= 0 &&
// i + (l + t)*d < N &&
// delta(code_i, codes[i + (l + t)*d]) > delta_min) {
// l += t;
// }
// }
}
int j = i + l*d;
// Find the split position using binary search
auto delta_node = delta(codes[i], codes[j]);
prefixN[i] = delta_node;
int s = 0;
int t;
int max_divisor = 1 << log2_ceil(l);
int divisor = 2;
int s_cutoff = (d == -1) ? i - 1 : N - i - 1;
for (t = ceil_div<Code_t>(l, 2); divisor <= max_divisor; divisor <<= 1, t = ceil_div<Code_t>(l, divisor)) {
// for (t = ceil_div<Code_t>(l, 2), divisor = 2; t >= 1; divisor *= 2, t = ceil_div<Code_t>(l, divisor)) {
if (s + t <= s_cutoff &&
delta(code_i, codes[i + (s + t)*d]) > delta_node) {
s += t;
}
}
// Split position
int gamma = i + s*d + min(d, 0);
leftChild[i] = gamma;
hasLeafLeft[i] = (min(i, j) == gamma);
hasLeafRight[i] = (max(i, j) == gamma+1);
// Set parents of left and right children, if they aren't leaves
// can't set this node as parent of its leaves, because the
// leaf also represents an internal node with a differnent parent
if (!hasLeafLeft[i]) {
parent[gamma] = i;
}
if (!hasLeafRight[i]) {
parent[gamma + 1] = i;
}
}
}
void RadixTree::encodePoints(const PointCloud<float>& cloud) {
// Allocate for raw data points
size_t data_size = n_pts * sizeof(cloud.x_vals[0]);
float *d_data_x, *d_data_y, *d_data_z;
CudaCheckCall(cudaMalloc(&d_data_x, data_size));
CudaCheckCall(cudaMalloc(&d_data_y, data_size));
CudaCheckCall(cudaMalloc(&d_data_z, data_size));
// Copy points to GPU
CudaCheckCall(cudaMemcpyAsync(d_data_x, &cloud.x_vals[0], data_size, cudaMemcpyHostToDevice));
CudaCheckCall(cudaMemcpyAsync(d_data_y, &cloud.y_vals[0], data_size, cudaMemcpyHostToDevice));
CudaCheckCall(cudaMemcpyAsync(d_data_z, &cloud.z_vals[0], data_size, cudaMemcpyHostToDevice));
cudaDeviceSynchronize();
// Find maximum and minumum values in data
// std::array<float, 3> mins, maxes;
float *mins, *maxes;
CudaCheckCall(cudaMallocManaged(&mins, sizeof(float) * 3));
CudaCheckCall(cudaMallocManaged(&maxes, sizeof(float) * 3));
// float *d_mins, *d_maxes;
// CudaCheckCall(g_allocator.DeviceAllocate((void**)&d_mins, sizeof(float) * 3));
// CudaCheckCall(g_allocator.DeviceAllocate((void**)&d_maxes, sizeof(float) * 3));
size_t temp_storage_reqd = 0;
void* d_temp_storage = nullptr;
// get amount of required memory
DeviceReduce::Max(d_temp_storage, temp_storage_reqd, d_data_x, &maxes[0], n_pts);
// allocate temporary storage
CudaCheckCall(g_allocator.DeviceAllocate((void**)&d_temp_storage, temp_storage_reqd));
// Find maximum
DeviceReduce::Max(d_temp_storage, temp_storage_reqd, d_data_x, &maxes[0], n_pts);
DeviceReduce::Max(d_temp_storage, temp_storage_reqd, d_data_y, &maxes[1], n_pts);
DeviceReduce::Max(d_temp_storage, temp_storage_reqd, d_data_z, &maxes[2], n_pts);
DeviceReduce::Min(d_temp_storage, temp_storage_reqd, d_data_x, &mins[0], n_pts);
DeviceReduce::Min(d_temp_storage, temp_storage_reqd, d_data_y, &mins[1], n_pts);
DeviceReduce::Min(d_temp_storage, temp_storage_reqd, d_data_z, &mins[2], n_pts);
cudaDeviceSynchronize();
CudaCheckError();
// cudaMemcpy(&mins[0], d_mins, sizeof(float) * mins.size(), cudaMemcpyDeviceToHost);
// cudaMemcpy(&maxes[0], d_maxes, sizeof(float) * maxes.size(), cudaMemcpyDeviceToHost);
// g_allocator.DeviceFree(d_mins);
// g_allocator.DeviceFree(d_maxes);
g_allocator.DeviceFree(d_temp_storage);
cudaDeviceSynchronize();
max_coord = *std::max_element(&maxes[0], &maxes[3]);
min_coord = *std::min_element(&mins[0], &mins[3]);
// std::cout << "range = [" << min_val << ", " << max_val << "]" << std::endl;
int blocks, tpb;
std::tie(blocks, tpb) = makeLaunchParams(n_pts);
makeCodes<<<blocks, tpb>>>(min_coord, max_coord - min_coord, d_data_x, d_data_y, d_data_z, d_tree.mortonCode, n_pts);
cudaDeviceSynchronize();
CudaCheckError();
// Now that codes created, raw values not needed
CudaCheckCall(cudaFree(d_data_x));
CudaCheckCall(cudaFree(d_data_y));
CudaCheckCall(cudaFree(d_data_z));
CudaCheckCall(cudaFree(mins));
CudaCheckCall(cudaFree(maxes));
}
void RadixTree::removeDuplicates(Code_t* d_codes_sorted) {
int blocks, tpb;
std::tie(blocks, tpb) = makeLaunchParams(n_pts);
int* contributions;
CudaCheckCall(cudaMallocManaged(&contributions, n_pts * sizeof(*contributions)));
findDups<<<blocks, tpb>>>(d_codes_sorted, contributions, n_pts);
int* final_pt_idx;
CudaCheckCall(cudaMallocManaged(&final_pt_idx, n_pts * sizeof(*final_pt_idx)));
// prefix sum to find output indices
void* d_temp_storage = nullptr;
size_t temp_storage_reqd = 0;
CudaCheckCall(
DeviceScan::InclusiveSum(d_temp_storage,
temp_storage_reqd,
contributions,
final_pt_idx,
n_pts)
);
CudaCheckCall(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_reqd));
CudaCheckCall(
DeviceScan::InclusiveSum(d_temp_storage,
temp_storage_reqd,
contributions,
final_pt_idx,
n_pts)
);
cudaDeviceSynchronize();
CudaCheckError();
CudaCheckCall(g_allocator.DeviceFree(d_temp_storage));
// get number of unique codes (index of last element is 1 less than number of elements)
int n_old_pts = n_pts;
n_pts = final_pt_idx[n_pts - 1] + 1;
CudaCheckCall(cudaFree(contributions));
// move points into their final positions
// allocate space for un-duplicated points
CudaCheckCall(cudaMallocManaged(&d_tree.mortonCode, n_pts * sizeof(*d_tree.mortonCode)));
moveDups<<<blocks, tpb>>>(d_codes_sorted, d_tree.mortonCode, final_pt_idx, n_old_pts);
cudaDeviceSynchronize();
CudaCheckError();
CudaCheckCall(cudaFree(final_pt_idx));
printf("%d duplicates removed\n", n_old_pts - n_pts);
}
RadixTree::RadixTree(const PointCloud<float>& cloud) {
// Check that the cast is okay
assert(cloud.x_vals.size() <= std::numeric_limits<decltype(n_pts)>::max());
n_pts = static_cast<decltype(n_pts)>(cloud.x_vals.size());
// allocate memory for codes in tree
CudaCheckCall(cudaMallocManaged(&d_tree.mortonCode, sizeof(*d_tree.mortonCode) * n_pts));
// fill up d_tree.mortonCode
encodePoints(cloud);
// Sort the Morton codes nodes in ascending order
Code_t* d_codes_sorted;
CudaCheckCall(cudaMallocManaged(&d_codes_sorted, sizeof(*d_codes_sorted) * n_pts));
void* d_temp_storage = nullptr;
size_t temp_storage_reqd = 0;
CudaCheckCall(
// get storage requirements
DeviceRadixSort::SortKeys(d_temp_storage, temp_storage_reqd,
d_tree.mortonCode, d_codes_sorted,
n_pts)
);
CudaCheckCall(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_reqd));
// sort key-value pairs, where key is morton code (d_keys), and values are tree nodes
CudaCheckCall(
DeviceRadixSort::SortKeys(d_temp_storage, temp_storage_reqd,
d_tree.mortonCode, d_codes_sorted,
n_pts)
);
cudaDeviceSynchronize();
CudaCheckError();
g_allocator.DeviceFree(d_temp_storage);
// free these now that they have been sorted, will be re-allocated after duplicate removal
CudaCheckCall(cudaFree(d_tree.mortonCode));
// Remove duplicates
removeDuplicates(d_codes_sorted);
CudaCheckCall(cudaFree(d_codes_sorted));
// allocate memory for d_tree now that we know the true number of points
CudaCheckCall(cudaMallocManaged(&d_tree.hasLeafLeft, sizeof(*d_tree.hasLeafRight) * n_pts));
CudaCheckCall(cudaMallocManaged(&d_tree.hasLeafRight, sizeof(*d_tree.hasLeafRight) * n_pts));
CudaCheckCall(cudaMallocManaged(&d_tree.prefixN, sizeof(*d_tree.prefixN) * n_pts));
CudaCheckCall(cudaMallocManaged(&d_tree.leftChild, sizeof(*d_tree.leftChild) * n_pts));
CudaCheckCall(cudaMallocManaged(&d_tree.parent, sizeof(*d_tree.parent) * n_pts));
// Make tree
// number of nodes is one less than points
n_nodes = n_pts - 1;
int blocks, tpb;
std::tie(blocks, tpb) = makeLaunchParams(n_nodes);
constructTree<<<blocks, tpb>>>(d_tree.mortonCode,
d_tree.hasLeafLeft,
d_tree.hasLeafRight,
d_tree.leftChild,
d_tree.parent,
d_tree.prefixN,
n_nodes);
cudaDeviceSynchronize();
CudaCheckError();
// for (int i = 0; i < n_nodes; ++i) {
// printf("idx = %d, code = %llx, prefixN = %d, left = %d, parent = %d, leftLeaf=%d, rightLeft=%d\n",
// i, d_tree.mortonCode[i], (int)d_tree.prefixN[i], d_tree.leftChild[i], d_tree.parent[i], (int)d_tree.hasLeafLeft[i], (int)d_tree.hasLeafRight[i]);
// }
// verify radix tree
// for (int i = 0; i < n_nodes; ++i) {
// int this_code_len = d_tree.prefixN[i];
// Code_t left_code = d_tree.mortonCode[d_tree.leftChild[i]];
// int left_code_len = d_tree.prefixN[d_tree.leftChild[i]];
// Code_t right_code = d_tree.mortonCode[d_tree.leftChild[i] + 1];
// int right_code_len = d_tree.prefixN[d_tree.leftChild[i] + 1];
// if (left_code >> (CODE_LEN - this_code_len) != right_code >> (CODE_LEN - this_code_len)) {
// assert(false);
// }
// }
}
RadixTree::~RadixTree() {
CudaCheckCall(cudaFree(d_tree.mortonCode));
CudaCheckCall(cudaFree(d_tree.hasLeafLeft));
CudaCheckCall(cudaFree(d_tree.hasLeafRight));
CudaCheckCall(cudaFree(d_tree.prefixN));
CudaCheckCall(cudaFree(d_tree.leftChild));
CudaCheckCall(cudaFree(d_tree.parent));
}
|
f9e73bf86c8631a1d84cb7187eb6ea599abe54d6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define THREADS 32
// from http://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void device_global(unsigned int *input_array, int num_elements) {
int my_index = blockIdx.x * blockDim.x + threadIdx.x;
// all threads write their index into the array
input_array[my_index] = my_index;
__syncthreads();
// all threads write to the array from values in th array
// written by the second neighbor thread, causing a read-write race
// the mod is so the last even thread reads from the first
// even thread's index
//
// since the value never goes into an indexing position there is no
// flow
input_array[my_index] = input_array[(my_index+2) % THREADS];
}
int main(void) {
// how big our array for interfacing with the GPU will be
int num_elements = THREADS;
int num_bytes = sizeof(unsigned int) * num_elements;
// pointers for the interfacing arrays
unsigned int *host_array = 0;
unsigned int *device_array = 0;
// malloc for host and device
host_array = (unsigned int*) malloc(num_bytes);
hipMalloc((void **) &device_array, num_bytes);
// check the mallocs
if (host_array == 0) {
printf("Unable to allocate memory on host");
return 1;
}
if (device_array == 0) {
printf("Unable to allocate memory on device");
return 1;
}
// set host array values
for (int i = 0; i<num_elements; i++) {
host_array[i] = 777777;
}
// copy them to the GPU
hipMemcpy(device_array, host_array, num_bytes, hipMemcpyHostToDevice);
// define block and grid sizes
int block_size = THREADS;
int grid_size = (num_elements + block_size - 1) / block_size;
// run GPU code
hipLaunchKernelGGL(( device_global), dim3(grid_size), dim3(block_size), 0, 0, device_array, num_elements);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
// copy output to host
hipMemcpy(host_array, device_array, num_bytes, hipMemcpyDeviceToHost);
// print any information
for (int i=0; i<num_elements; i++) {
printf("%6u, ", host_array[i]);
if (i%10 == 9) {
printf(" \n");
}
}
printf("\n");
// free memory
free(host_array);
hipFree(device_array);
}
| f9e73bf86c8631a1d84cb7187eb6ea599abe54d6.cu | #include <stdio.h>
#define THREADS 32
// from http://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void device_global(unsigned int *input_array, int num_elements) {
int my_index = blockIdx.x * blockDim.x + threadIdx.x;
// all threads write their index into the array
input_array[my_index] = my_index;
__syncthreads();
// all threads write to the array from values in th array
// written by the second neighbor thread, causing a read-write race
// the mod is so the last even thread reads from the first
// even thread's index
//
// since the value never goes into an indexing position there is no
// flow
input_array[my_index] = input_array[(my_index+2) % THREADS];
}
int main(void) {
// how big our array for interfacing with the GPU will be
int num_elements = THREADS;
int num_bytes = sizeof(unsigned int) * num_elements;
// pointers for the interfacing arrays
unsigned int *host_array = 0;
unsigned int *device_array = 0;
// malloc for host and device
host_array = (unsigned int*) malloc(num_bytes);
cudaMalloc((void **) &device_array, num_bytes);
// check the mallocs
if (host_array == 0) {
printf("Unable to allocate memory on host");
return 1;
}
if (device_array == 0) {
printf("Unable to allocate memory on device");
return 1;
}
// set host array values
for (int i = 0; i<num_elements; i++) {
host_array[i] = 777777;
}
// copy them to the GPU
cudaMemcpy(device_array, host_array, num_bytes, cudaMemcpyHostToDevice);
// define block and grid sizes
int block_size = THREADS;
int grid_size = (num_elements + block_size - 1) / block_size;
// run GPU code
device_global<<<grid_size, block_size>>>(device_array, num_elements);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
// copy output to host
cudaMemcpy(host_array, device_array, num_bytes, cudaMemcpyDeviceToHost);
// print any information
for (int i=0; i<num_elements; i++) {
printf("%6u, ", host_array[i]);
if (i%10 == 9) {
printf(" \n");
}
}
printf("\n");
// free memory
free(host_array);
cudaFree(device_array);
}
|
d2c70089f2eaed0161bd133063e7b872eb20c667.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/diagonal_grad_kernel.h"
#include "paddle/phi/kernels/funcs/diagonal.h"
namespace phi {
using paddle::platform::PADDLE_CUDA_NUM_THREADS;
template <typename T, typename Context>
void DiagonalGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& out_grad,
int offset,
int axis1,
int axis2,
DenseTensor* in_grad) {
const auto* dout = &out_grad;
const auto* dout_data = dout->data<T>();
auto dout_dim = dout->dims().Get();
auto dout_dim_size = dout->dims().size();
std::vector<int64_t> res_dout = vectorize(phi::stride(dout->dims()));
DenseTensor dout_stride_tensor;
paddle::framework::TensorFromVector<int64_t>(
res_dout, dev_ctx, &dout_stride_tensor);
int64_t* dout_stride = dout_stride_tensor.data<int64_t>();
auto* dx = in_grad;
auto* dx_data = dev_ctx.template Alloc<T>(dx);
auto dx_dim = dx->dims().Get();
auto dx_dim_size = dx->dims().size();
std::vector<int64_t> res_dx = vectorize(phi::stride(dx->dims()));
DenseTensor dx_stride_tensor;
paddle::framework::TensorFromVector<int64_t>(
res_dx, dev_ctx, &dx_stride_tensor);
int64_t* dx_stride = dx_stride_tensor.data<int64_t>();
const int64_t offset_ = offset;
int64_t axis1_ = axis1 < 0 ? dx_dim_size + axis1 : axis1;
int64_t axis2_ = axis2 < 0 ? dx_dim_size + axis2 : axis2;
int64_t numel = dx->numel();
int threads = PADDLE_CUDA_NUM_THREADS;
int blocks = (numel + threads - 1) / threads;
switch (dx_dim_size) {
case 2:
hipLaunchKernelGGL(( funcs::DiagonalCuda<T, 2, 1>), dim3(blocks), dim3(threads), 0, 0, dout_data,
dx_data,
offset_,
axis1_,
axis2_,
dx_stride,
dout_stride,
numel,
true);
break;
case 3:
hipLaunchKernelGGL(( funcs::DiagonalCuda<T, 3, 2>), dim3(blocks), dim3(threads), 0, 0, dout_data,
dx_data,
offset_,
axis1_,
axis2_,
dx_stride,
dout_stride,
numel,
true);
break;
case 4:
hipLaunchKernelGGL(( funcs::DiagonalCuda<T, 4, 3>), dim3(blocks), dim3(threads), 0, 0, dout_data,
dx_data,
offset_,
axis1_,
axis2_,
dx_stride,
dout_stride,
numel,
true);
break;
case 5:
hipLaunchKernelGGL(( funcs::DiagonalCuda<T, 5, 4>), dim3(blocks), dim3(threads), 0, 0, dout_data,
dx_data,
offset_,
axis1_,
axis2_,
dx_stride,
dout_stride,
numel,
true);
break;
case 6:
hipLaunchKernelGGL(( funcs::DiagonalCuda<T, 6, 5>), dim3(blocks), dim3(threads), 0, 0, dout_data,
dx_data,
offset_,
axis1_,
axis2_,
dx_stride,
dout_stride,
numel,
true);
break;
case 7:
hipLaunchKernelGGL(( funcs::DiagonalCuda<T, 7, 6>), dim3(blocks), dim3(threads), 0, 0, dout_data,
dx_data,
offset_,
axis1_,
axis2_,
dx_stride,
dout_stride,
numel,
true);
break;
case 8:
hipLaunchKernelGGL(( funcs::DiagonalCuda<T, 8, 7>), dim3(blocks), dim3(threads), 0, 0, dout_data,
dx_data,
offset_,
axis1_,
axis2_,
dx_stride,
dout_stride,
numel,
true);
break;
case 9:
hipLaunchKernelGGL(( funcs::DiagonalCuda<T, 9, 8>), dim3(blocks), dim3(threads), 0, 0, dout_data,
dx_data,
offset_,
axis1_,
axis2_,
dx_stride,
dout_stride,
numel,
true);
break;
default:
PADDLE_THROW(errors::InvalidArgument(
"The rank of output(input@Grad) should be less than 10, but "
"received %d.",
dx_dim_size));
}
}
} // namespace phi
PD_REGISTER_KERNEL(diagonal_grad,
GPU,
ALL_LAYOUT,
phi::DiagonalGradKernel,
float,
double,
int,
int64_t) {}
| d2c70089f2eaed0161bd133063e7b872eb20c667.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/diagonal_grad_kernel.h"
#include "paddle/phi/kernels/funcs/diagonal.h"
namespace phi {
using paddle::platform::PADDLE_CUDA_NUM_THREADS;
template <typename T, typename Context>
void DiagonalGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& out_grad,
int offset,
int axis1,
int axis2,
DenseTensor* in_grad) {
const auto* dout = &out_grad;
const auto* dout_data = dout->data<T>();
auto dout_dim = dout->dims().Get();
auto dout_dim_size = dout->dims().size();
std::vector<int64_t> res_dout = vectorize(phi::stride(dout->dims()));
DenseTensor dout_stride_tensor;
paddle::framework::TensorFromVector<int64_t>(
res_dout, dev_ctx, &dout_stride_tensor);
int64_t* dout_stride = dout_stride_tensor.data<int64_t>();
auto* dx = in_grad;
auto* dx_data = dev_ctx.template Alloc<T>(dx);
auto dx_dim = dx->dims().Get();
auto dx_dim_size = dx->dims().size();
std::vector<int64_t> res_dx = vectorize(phi::stride(dx->dims()));
DenseTensor dx_stride_tensor;
paddle::framework::TensorFromVector<int64_t>(
res_dx, dev_ctx, &dx_stride_tensor);
int64_t* dx_stride = dx_stride_tensor.data<int64_t>();
const int64_t offset_ = offset;
int64_t axis1_ = axis1 < 0 ? dx_dim_size + axis1 : axis1;
int64_t axis2_ = axis2 < 0 ? dx_dim_size + axis2 : axis2;
int64_t numel = dx->numel();
int threads = PADDLE_CUDA_NUM_THREADS;
int blocks = (numel + threads - 1) / threads;
switch (dx_dim_size) {
case 2:
funcs::DiagonalCuda<T, 2, 1><<<blocks, threads>>>(dout_data,
dx_data,
offset_,
axis1_,
axis2_,
dx_stride,
dout_stride,
numel,
true);
break;
case 3:
funcs::DiagonalCuda<T, 3, 2><<<blocks, threads>>>(dout_data,
dx_data,
offset_,
axis1_,
axis2_,
dx_stride,
dout_stride,
numel,
true);
break;
case 4:
funcs::DiagonalCuda<T, 4, 3><<<blocks, threads>>>(dout_data,
dx_data,
offset_,
axis1_,
axis2_,
dx_stride,
dout_stride,
numel,
true);
break;
case 5:
funcs::DiagonalCuda<T, 5, 4><<<blocks, threads>>>(dout_data,
dx_data,
offset_,
axis1_,
axis2_,
dx_stride,
dout_stride,
numel,
true);
break;
case 6:
funcs::DiagonalCuda<T, 6, 5><<<blocks, threads>>>(dout_data,
dx_data,
offset_,
axis1_,
axis2_,
dx_stride,
dout_stride,
numel,
true);
break;
case 7:
funcs::DiagonalCuda<T, 7, 6><<<blocks, threads>>>(dout_data,
dx_data,
offset_,
axis1_,
axis2_,
dx_stride,
dout_stride,
numel,
true);
break;
case 8:
funcs::DiagonalCuda<T, 8, 7><<<blocks, threads>>>(dout_data,
dx_data,
offset_,
axis1_,
axis2_,
dx_stride,
dout_stride,
numel,
true);
break;
case 9:
funcs::DiagonalCuda<T, 9, 8><<<blocks, threads>>>(dout_data,
dx_data,
offset_,
axis1_,
axis2_,
dx_stride,
dout_stride,
numel,
true);
break;
default:
PADDLE_THROW(errors::InvalidArgument(
"The rank of output(input@Grad) should be less than 10, but "
"received %d.",
dx_dim_size));
}
}
} // namespace phi
PD_REGISTER_KERNEL(diagonal_grad,
GPU,
ALL_LAYOUT,
phi::DiagonalGradKernel,
float,
double,
int,
int64_t) {}
|
16bf26e3ca175821db333d0a7de593d33d895f98.hip | // !!! This is a file automatically generated by hipify!!!
#include "Prerequisites.cuh"
#include "Helper.cuh"
namespace gtom
{
hipArray_t d_MallocArray(int2 dims)
{
hipChannelFormatDesc desc = hipCreateChannelDesc<tfloat>();
hipArray_t a_input;
hipMallocArray(&a_input, &desc, dims.x, dims.y);
return a_input;
}
void d_MemcpyToArray(tfloat* d_input, hipArray_t a_output, int2 dims)
{
hipMemcpyToArray(a_output, 0, 0, d_input, dims.x * dims.y * sizeof(tfloat), hipMemcpyDeviceToDevice);
}
void d_BindTextureToArray(tfloat* d_input, hipArray_t &createdarray, cudaTex &createdtexture, int2 dims, hipTextureFilterMode filtermode, bool normalizedcoords)
{
hipChannelFormatDesc desc = hipCreateChannelDesc<tfloat>();
hipArray* a_input;
hipMallocArray(&a_input, &desc, dims.x, dims.y);
hipMemcpyToArray(a_input, 0, 0, d_input, dims.x * dims.y * sizeof(tfloat), hipMemcpyDeviceToDevice);
struct hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = hipResourceTypeArray;
resDesc.res.array.array = a_input;
struct hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.filterMode = filtermode;
texDesc.readMode = hipReadModeElementType;
texDesc.normalizedCoords = normalizedcoords;
texDesc.addressMode[0] = hipAddressModeWrap;
texDesc.addressMode[1] = hipAddressModeWrap;
texDesc.addressMode[2] = hipAddressModeWrap;
cudaTex texObj = 0;
hipCreateTextureObject(&texObj, &resDesc, &texDesc, NULL);
createdarray = a_input;
createdtexture = texObj;
}
void d_BindTextureToArray(hipArray_t a_input, cudaTex& createdtexture, int2 dims, hipTextureFilterMode filtermode, bool normalizedcoords)
{
struct hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = hipResourceTypeArray;
resDesc.res.array.array = a_input;
struct hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.filterMode = filtermode;
texDesc.readMode = hipReadModeElementType;
texDesc.normalizedCoords = normalizedcoords;
texDesc.addressMode[0] = hipAddressModeWrap;
texDesc.addressMode[1] = hipAddressModeWrap;
texDesc.addressMode[2] = hipAddressModeWrap;
cudaTex texObj = 0;
hipCreateTextureObject(&texObj, &resDesc, &texDesc, NULL);
createdtexture = texObj;
}
void d_BindTextureToArray(tfloat* d_input, hipArray_t* &h_createdarrays, cudaTex* &h_createdtextures, int2 dims, hipTextureFilterMode filtermode, bool normalizedcoords, int nimages)
{
for (int n = 0; n < nimages; n++)
{
hipChannelFormatDesc desc = hipCreateChannelDesc<tfloat>();
hipArray* a_input;
hipMallocArray(&a_input, &desc, dims.x, dims.y);
hipMemcpyToArray(a_input, 0, 0, d_input + Elements2(dims) * n, dims.x * dims.y * sizeof(tfloat), hipMemcpyDeviceToDevice);
struct hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = hipResourceTypeArray;
resDesc.res.array.array = a_input;
struct hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.filterMode = filtermode;
texDesc.readMode = hipReadModeElementType;
texDesc.normalizedCoords = normalizedcoords;
texDesc.addressMode[0] = hipAddressModeWrap;
texDesc.addressMode[1] = hipAddressModeWrap;
texDesc.addressMode[2] = hipAddressModeWrap;
cudaTex texObj = 0;
hipCreateTextureObject(&texObj, &resDesc, &texDesc, NULL);
h_createdarrays[n] = a_input;
h_createdtextures[n] = texObj;
}
}
void d_BindTextureTo3DArray(tfloat* d_input, hipArray_t &createdarray, cudaTex &createdtexture, int3 dims, hipTextureFilterMode filtermode, bool normalizedcoords)
{
hipChannelFormatDesc desc = hipCreateChannelDesc<tfloat>();
hipArray* a_input;
hipMalloc3DArray(&a_input, &desc, make_hipExtent(dims.x, dims.y, dims.z));
hipMemcpy3DParms p = { 0 };
p.extent = make_hipExtent(dims.x, dims.y, dims.z);
p.srcPtr = make_hipPitchedPtr(d_input, dims.x * sizeof(tfloat), dims.x, dims.y);
p.dstArray = a_input;
p.kind = hipMemcpyDeviceToDevice;
hipMemcpy3D(&p);
struct hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(hipResourceDesc));
resDesc.resType = hipResourceTypeArray;
resDesc.res.array.array = a_input;
struct hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(hipTextureDesc));
texDesc.filterMode = filtermode;
texDesc.readMode = hipReadModeElementType;
texDesc.normalizedCoords = normalizedcoords;
texDesc.addressMode[0] = hipAddressModeWrap;
texDesc.addressMode[1] = hipAddressModeWrap;
texDesc.addressMode[2] = hipAddressModeWrap;
cudaTex texObj = 0;
hipCreateTextureObject(&texObj, &resDesc, &texDesc, NULL);
createdarray = a_input;
createdtexture = texObj;
}
void d_BindTextureTo3DArray(tfloat* d_input, hipArray_t* &h_createdarrays, cudaTex* &h_createdtextures, int3 dims, hipTextureFilterMode filtermode, bool normalizedcoords, int nvolumes)
{
for (int n = 0; n < nvolumes; n++)
{
hipChannelFormatDesc desc = hipCreateChannelDesc<tfloat>();
hipArray* a_input;
hipMalloc3DArray(&a_input, &desc, make_hipExtent(dims.x, dims.y, dims.z));
hipMemcpy3DParms p = { 0 };
p.extent = make_hipExtent(dims.x, dims.y, dims.z);
p.srcPtr = make_hipPitchedPtr(d_input + Elements(dims) * n, dims.x * sizeof(tfloat), dims.x, dims.y);
p.dstArray = a_input;
p.kind = hipMemcpyDeviceToDevice;
hipMemcpy3D(&p);
struct hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = hipResourceTypeArray;
resDesc.res.array.array = a_input;
struct hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.filterMode = filtermode;
texDesc.readMode = hipReadModeElementType;
texDesc.normalizedCoords = normalizedcoords;
texDesc.addressMode[0] = hipAddressModeWrap;
texDesc.addressMode[1] = hipAddressModeWrap;
texDesc.addressMode[2] = hipAddressModeWrap;
cudaTex texObj = 0;
hipCreateTextureObject(&texObj, &resDesc, &texDesc, NULL);
h_createdarrays[n] = a_input;
h_createdtextures[n] = texObj;
}
}
} | 16bf26e3ca175821db333d0a7de593d33d895f98.cu | #include "Prerequisites.cuh"
#include "Helper.cuh"
namespace gtom
{
cudaArray_t d_MallocArray(int2 dims)
{
cudaChannelFormatDesc desc = cudaCreateChannelDesc<tfloat>();
cudaArray_t a_input;
cudaMallocArray(&a_input, &desc, dims.x, dims.y);
return a_input;
}
void d_MemcpyToArray(tfloat* d_input, cudaArray_t a_output, int2 dims)
{
cudaMemcpyToArray(a_output, 0, 0, d_input, dims.x * dims.y * sizeof(tfloat), cudaMemcpyDeviceToDevice);
}
void d_BindTextureToArray(tfloat* d_input, cudaArray_t &createdarray, cudaTex &createdtexture, int2 dims, cudaTextureFilterMode filtermode, bool normalizedcoords)
{
cudaChannelFormatDesc desc = cudaCreateChannelDesc<tfloat>();
cudaArray* a_input;
cudaMallocArray(&a_input, &desc, dims.x, dims.y);
cudaMemcpyToArray(a_input, 0, 0, d_input, dims.x * dims.y * sizeof(tfloat), cudaMemcpyDeviceToDevice);
struct cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeArray;
resDesc.res.array.array = a_input;
struct cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.filterMode = filtermode;
texDesc.readMode = cudaReadModeElementType;
texDesc.normalizedCoords = normalizedcoords;
texDesc.addressMode[0] = cudaAddressModeWrap;
texDesc.addressMode[1] = cudaAddressModeWrap;
texDesc.addressMode[2] = cudaAddressModeWrap;
cudaTex texObj = 0;
cudaCreateTextureObject(&texObj, &resDesc, &texDesc, NULL);
createdarray = a_input;
createdtexture = texObj;
}
void d_BindTextureToArray(cudaArray_t a_input, cudaTex& createdtexture, int2 dims, cudaTextureFilterMode filtermode, bool normalizedcoords)
{
struct cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeArray;
resDesc.res.array.array = a_input;
struct cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.filterMode = filtermode;
texDesc.readMode = cudaReadModeElementType;
texDesc.normalizedCoords = normalizedcoords;
texDesc.addressMode[0] = cudaAddressModeWrap;
texDesc.addressMode[1] = cudaAddressModeWrap;
texDesc.addressMode[2] = cudaAddressModeWrap;
cudaTex texObj = 0;
cudaCreateTextureObject(&texObj, &resDesc, &texDesc, NULL);
createdtexture = texObj;
}
void d_BindTextureToArray(tfloat* d_input, cudaArray_t* &h_createdarrays, cudaTex* &h_createdtextures, int2 dims, cudaTextureFilterMode filtermode, bool normalizedcoords, int nimages)
{
for (int n = 0; n < nimages; n++)
{
cudaChannelFormatDesc desc = cudaCreateChannelDesc<tfloat>();
cudaArray* a_input;
cudaMallocArray(&a_input, &desc, dims.x, dims.y);
cudaMemcpyToArray(a_input, 0, 0, d_input + Elements2(dims) * n, dims.x * dims.y * sizeof(tfloat), cudaMemcpyDeviceToDevice);
struct cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeArray;
resDesc.res.array.array = a_input;
struct cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.filterMode = filtermode;
texDesc.readMode = cudaReadModeElementType;
texDesc.normalizedCoords = normalizedcoords;
texDesc.addressMode[0] = cudaAddressModeWrap;
texDesc.addressMode[1] = cudaAddressModeWrap;
texDesc.addressMode[2] = cudaAddressModeWrap;
cudaTex texObj = 0;
cudaCreateTextureObject(&texObj, &resDesc, &texDesc, NULL);
h_createdarrays[n] = a_input;
h_createdtextures[n] = texObj;
}
}
void d_BindTextureTo3DArray(tfloat* d_input, cudaArray_t &createdarray, cudaTex &createdtexture, int3 dims, cudaTextureFilterMode filtermode, bool normalizedcoords)
{
cudaChannelFormatDesc desc = cudaCreateChannelDesc<tfloat>();
cudaArray* a_input;
cudaMalloc3DArray(&a_input, &desc, make_cudaExtent(dims.x, dims.y, dims.z));
cudaMemcpy3DParms p = { 0 };
p.extent = make_cudaExtent(dims.x, dims.y, dims.z);
p.srcPtr = make_cudaPitchedPtr(d_input, dims.x * sizeof(tfloat), dims.x, dims.y);
p.dstArray = a_input;
p.kind = cudaMemcpyDeviceToDevice;
cudaMemcpy3D(&p);
struct cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(cudaResourceDesc));
resDesc.resType = cudaResourceTypeArray;
resDesc.res.array.array = a_input;
struct cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(cudaTextureDesc));
texDesc.filterMode = filtermode;
texDesc.readMode = cudaReadModeElementType;
texDesc.normalizedCoords = normalizedcoords;
texDesc.addressMode[0] = cudaAddressModeWrap;
texDesc.addressMode[1] = cudaAddressModeWrap;
texDesc.addressMode[2] = cudaAddressModeWrap;
cudaTex texObj = 0;
cudaCreateTextureObject(&texObj, &resDesc, &texDesc, NULL);
createdarray = a_input;
createdtexture = texObj;
}
void d_BindTextureTo3DArray(tfloat* d_input, cudaArray_t* &h_createdarrays, cudaTex* &h_createdtextures, int3 dims, cudaTextureFilterMode filtermode, bool normalizedcoords, int nvolumes)
{
for (int n = 0; n < nvolumes; n++)
{
cudaChannelFormatDesc desc = cudaCreateChannelDesc<tfloat>();
cudaArray* a_input;
cudaMalloc3DArray(&a_input, &desc, make_cudaExtent(dims.x, dims.y, dims.z));
cudaMemcpy3DParms p = { 0 };
p.extent = make_cudaExtent(dims.x, dims.y, dims.z);
p.srcPtr = make_cudaPitchedPtr(d_input + Elements(dims) * n, dims.x * sizeof(tfloat), dims.x, dims.y);
p.dstArray = a_input;
p.kind = cudaMemcpyDeviceToDevice;
cudaMemcpy3D(&p);
struct cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeArray;
resDesc.res.array.array = a_input;
struct cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.filterMode = filtermode;
texDesc.readMode = cudaReadModeElementType;
texDesc.normalizedCoords = normalizedcoords;
texDesc.addressMode[0] = cudaAddressModeWrap;
texDesc.addressMode[1] = cudaAddressModeWrap;
texDesc.addressMode[2] = cudaAddressModeWrap;
cudaTex texObj = 0;
cudaCreateTextureObject(&texObj, &resDesc, &texDesc, NULL);
h_createdarrays[n] = a_input;
h_createdtextures[n] = texObj;
}
}
} |
ac184426ed123afe655496bdc41acea41c9fe3cb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <hiprand/hiprand_kernel.h>
__device__ float generate(
hiprandState_t * globalState,
int idx )
{
idx = threadIdx.x + blockIdx.x * blockDim.x;
hiprandState_t localState = globalState[ idx ];
float RANDOM = hiprand_uniform( &localState );
globalState[ idx ] = localState;
return RANDOM;
}
__global__ void setup_kernel(
hiprandState_t * state,
unsigned long seed )
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
hiprand_init( seed, id, 0, &state[ id ] );
return;
}
__global__ void InitializeTemperature(
float * const ioArray,
hiprandState_t * globalState,
const int inArrSize )
{
// generate random numbers
for ( int i = 0; i < inArrSize; i++ )
{
float k = generate( globalState, i );
ioArray[ i ] = k;
}
return;
}
/* ========================================================================== */
/* HeatConduction */
/* -------------------------------------------------------------------------- */
/*!
* @function HeatConduction2D
*
* @abstract function to calculate the 2D heat conduction in a body
*
* @ Initial conditions:
T(0,y,t) = T(Lx,y,t) = 0
T(x,0,t) = T(x,Ly,t) = 0
T(x,y,0) = initial temperature
0 <= x <= Lx
0 <= y <= Ly
0 <= t <= T
We are assuming a square body and we divide it to small squares each of
them having a temperature.
The temperature flows from warmer to colder square.
Temperature can flow to the neighboor squares (left,right,top,bottom)
We are using the appropriate offset in order to move in the above places.
We are applying the appropriate boundary conditions when trying to move
to neighbor places.
* @param inWidth [ input ] The width of the body (cm)
*
* @param inHeight [ input ] The heoght of the body (cm)
*
* @param inTemp [ input ] The initial temperature of body
*
* @param ouTemp [ output ] The temperature of body after solving the system
*/
/* ========================================================================== */
__global__ void HeatConduction2D(
const int inWidth,
const int inHeight,
const float * const inTemp,
float * const ouTemp )
{
int rowIdx = threadIdx.y + blockIdx.y * blockDim.y;
int colIdx = threadIdx.x + blockIdx.x * blockIdx.x;
int offset = rowIdx * inWidth + colIdx;
if ( rowIdx >= inHeight || colIdx >= inWidth ) return;
// new offsets
int left = offset - 1;
int right = offset + 1;
int top = offset + inWidth;
int bottom = offset - inWidth;
//boundary conditions
if ( 0 == colIdx ) left += inWidth;
if ( inWidth - 1 == colIdx ) right -= inWidth;
if ( 0 == rowIdx ) bottom += inWidth * inHeight;
if ( inHeight - 1 == rowIdx ) top -= inWidth * inHeight;
ouTemp[ offset ] = inTemp[ offset ] + (1.f/4.f) * ( inTemp[ left ] + inTemp[ right ] + inTemp[ top ] + inTemp[ bottom ] - 4 * inTemp[ offset ] );
}
| ac184426ed123afe655496bdc41acea41c9fe3cb.cu | #include <curand_kernel.h>
__device__ float generate(
curandState * globalState,
int idx )
{
idx = threadIdx.x + blockIdx.x * blockDim.x;
curandState localState = globalState[ idx ];
float RANDOM = curand_uniform( &localState );
globalState[ idx ] = localState;
return RANDOM;
}
__global__ void setup_kernel(
curandState * state,
unsigned long seed )
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
curand_init( seed, id, 0, &state[ id ] );
return;
}
__global__ void InitializeTemperature(
float * const ioArray,
curandState * globalState,
const int inArrSize )
{
// generate random numbers
for ( int i = 0; i < inArrSize; i++ )
{
float k = generate( globalState, i );
ioArray[ i ] = k;
}
return;
}
/* ========================================================================== */
/* HeatConduction */
/* -------------------------------------------------------------------------- */
/*!
* @function HeatConduction2D
*
* @abstract function to calculate the 2D heat conduction in a body
*
* @ Initial conditions:
T(0,y,t) = T(Lx,y,t) = 0
T(x,0,t) = T(x,Ly,t) = 0
T(x,y,0) = initial temperature
0 <= x <= Lx
0 <= y <= Ly
0 <= t <= T
We are assuming a square body and we divide it to small squares each of
them having a temperature.
The temperature flows from warmer to colder square.
Temperature can flow to the neighboor squares (left,right,top,bottom)
We are using the appropriate offset in order to move in the above places.
We are applying the appropriate boundary conditions when trying to move
to neighbor places.
* @param inWidth [ input ] The width of the body (cm)
*
* @param inHeight [ input ] The heoght of the body (cm)
*
* @param inTemp [ input ] The initial temperature of body
*
* @param ouTemp [ output ] The temperature of body after solving the system
*/
/* ========================================================================== */
__global__ void HeatConduction2D(
const int inWidth,
const int inHeight,
const float * const inTemp,
float * const ouTemp )
{
int rowIdx = threadIdx.y + blockIdx.y * blockDim.y;
int colIdx = threadIdx.x + blockIdx.x * blockIdx.x;
int offset = rowIdx * inWidth + colIdx;
if ( rowIdx >= inHeight || colIdx >= inWidth ) return;
// new offsets
int left = offset - 1;
int right = offset + 1;
int top = offset + inWidth;
int bottom = offset - inWidth;
//boundary conditions
if ( 0 == colIdx ) left += inWidth;
if ( inWidth - 1 == colIdx ) right -= inWidth;
if ( 0 == rowIdx ) bottom += inWidth * inHeight;
if ( inHeight - 1 == rowIdx ) top -= inWidth * inHeight;
ouTemp[ offset ] = inTemp[ offset ] + (1.f/4.f) * ( inTemp[ left ] + inTemp[ right ] + inTemp[ top ] + inTemp[ bottom ] - 4 * inTemp[ offset ] );
}
|
44ea1a5f3530f8bc5d8ff571ad168ace377cbf87.hip | // !!! This is a file automatically generated by hipify!!!
// fdk-ts-t.cu
// Threaded versions of FDK back-projection
// For detector index (t,s).
// Copyright 2008-10-09, Jeff Fessler, University of Michigan
#include "jf-cuda.h"
#include "def,fdk.h"
#include "jf,thread1.h"
#include "fdk-gpu.h"
typedef struct {
float *image; // [nz nx ny] <- trick!
const cbct_ig *ig; // image geometry
const cbct_cg *cg; // cone-beam CT system geometry
int na; // # of views
cfloat *proj; // [nt ns na] <- trick! projection views
cdouble *beta; // [na] source angles [radians]
} fdk_ts_s;
//
// fdk_ts_back_init()
// interface routine for threaded versions
//
static sof fdk_ts_back_init(void *in, cint id, cint nthread)
{
fdk_ts_s *pa = (fdk_ts_s *) in;
const cbct_ig *ig = pa->ig;
const cbct_cg *cg = pa->cg;
cint na = pa->na;
cfloat *proj = pa->proj;
cdouble *beta = pa->beta;
cint nst = cg->ns * cg->nt;
(void) nthread;
printf("nx: %d, ny: %d, nz: %d\n", ig->nx, ig->ny, ig->nz);
printf("nt: %d, ns: %d, na: %d\n", cg->nt, cg->ns, na);
#ifdef fdk_gpu
cint nxyz = ig->nx * ig->ny * ig->nz;
float *dev_img;
jf_gpu_malloc(dev_img, nxyz) // image memory on device
jf_gpu_memset(dev_img, 0, nxyz) // initialize device image to 0
hipBindTexture( 0, tex_img, dev_img, nxyz*sizeof(float) );
float *dev_proj;
int proj_pitch = cg->nt * sizeof(float);
jf_gpu_malloc(dev_proj, nst) // one projection view on device
byte *dev_mask2;
cint nxy = ig->nx * ig->ny;
jf_gpu_malloc(dev_mask2, nxy) // 2D mask
jf_gpu_put(dev_mask2, ig->mask2, nxy)
hipBindTexture( 0, tex_mask2, dev_mask2, nxy*sizeof(byte));
#ifdef tex_1d
//set 1D texture settings
tex_proj.normalized = 0;
tex_proj.filterMode = hipFilterModeLinear;
tex_proj.addressMode[0] = hipAddressModeClamp;
tex_proj.addressMode[1] = hipAddressModeClamp;
tex_proj.addressMode[2] = hipAddressModeClamp;
#else
//set 2D texture settings
tex_proj2d.normalized = 0;
tex_proj2d.filterMode = hipFilterModeLinear;
tex_proj2d.addressMode[0] = hipAddressModeClamp;
tex_proj2d.addressMode[1] = hipAddressModeClamp;
tex_proj2d.addressMode[2] = hipAddressModeClamp;
#endif
#endif
for (int ia=0; ia < na; ++ia, proj += nst) { // each view
#ifdef fdk_gpu
// copy this view to gpu and bind to texture
jf_gpu_put(dev_proj, proj, nst)
#ifdef tex_1d
hipBindTexture( 0, tex_proj, dev_proj, nst*sizeof(float) );
#else
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
if(
hipBindTexture2D(0, tex_proj2d, dev_proj, channelDesc, cg->nt, cg->ns, proj_pitch)
!= hipSuccess)
Fail("proj2D bind fail")
#endif
#else
float *dev_img = pa->image; // already zeroed
cfloat *dev_proj = proj;
cbyte *dev_mask2 = ig->mask2;
#endif
if (!fdk_ts_back1_gpu(dev_img,
ig->nx, ig->ny, ig->nz,
ig->dx, ig->dy, ig->dz,
ig->offset_x, ig->offset_y, ig->offset_z,
dev_mask2, id + 1, // each thread does some voxels only
cg->dso, cg->dsd, cg->dfs,
cg->ns, cg->nt,
cg->ds, cg->dt, cg->offset_s, cg->offset_t,
dev_proj, beta[ia]))
Fail("fdk_ts_back1_gpu()")
}
#ifdef fdk_gpu
hipUnbindTexture( tex_img );
hipUnbindTexture( tex_proj );
hipUnbindTexture( tex_mask2 );
hipUnbindTexture( tex_proj2d );
Note("Copying image to host")
jf_gpu_get(pa->image, dev_img, nxyz) // caution: works only for 1 thread!
Note("freeing dev_img memory")
jf_gpu_free(dev_img)
Note("freeing dev_proj memory\n")
jf_gpu_free(dev_proj)
#endif
Ok
}
//
// fdk_ts_back_t()
// entry point for threaded FDK back-projector
//
sof fdk_ts_back_t(
float *image, // [nz nx ny] <- trick!
const cbct_ig *ig,
const cbct_cg *cg,
cint na, // # of views
cfloat *proj, // [nt ns na] <- trick! projection views
cdouble *beta, // [na] source angles [radians]
cint nthread, // # of threads
cint chat)
{
fdk_ts_s st;
#define put(arg) st.arg = arg;
put(image)
put(ig)
put(cg)
put(na)
put(proj)
put(beta)
#undef put
Bzero(image, ig->nx * ig->ny * ig->nz) // initialize image volume to 0
Call(jf_thread1_top, (fdk_ts_back_init,
NULL /* wrap up */, &st, nthread, Chat))
Ok
}
| 44ea1a5f3530f8bc5d8ff571ad168ace377cbf87.cu | // fdk-ts-t.cu
// Threaded versions of FDK back-projection
// For detector index (t,s).
// Copyright 2008-10-09, Jeff Fessler, University of Michigan
#include "jf-cuda.h"
#include "def,fdk.h"
#include "jf,thread1.h"
#include "fdk-gpu.h"
typedef struct {
float *image; // [nz nx ny] <- trick!
const cbct_ig *ig; // image geometry
const cbct_cg *cg; // cone-beam CT system geometry
int na; // # of views
cfloat *proj; // [nt ns na] <- trick! projection views
cdouble *beta; // [na] source angles [radians]
} fdk_ts_s;
//
// fdk_ts_back_init()
// interface routine for threaded versions
//
static sof fdk_ts_back_init(void *in, cint id, cint nthread)
{
fdk_ts_s *pa = (fdk_ts_s *) in;
const cbct_ig *ig = pa->ig;
const cbct_cg *cg = pa->cg;
cint na = pa->na;
cfloat *proj = pa->proj;
cdouble *beta = pa->beta;
cint nst = cg->ns * cg->nt;
(void) nthread;
printf("nx: %d, ny: %d, nz: %d\n", ig->nx, ig->ny, ig->nz);
printf("nt: %d, ns: %d, na: %d\n", cg->nt, cg->ns, na);
#ifdef fdk_gpu
cint nxyz = ig->nx * ig->ny * ig->nz;
float *dev_img;
jf_gpu_malloc(dev_img, nxyz) // image memory on device
jf_gpu_memset(dev_img, 0, nxyz) // initialize device image to 0
cudaBindTexture( 0, tex_img, dev_img, nxyz*sizeof(float) );
float *dev_proj;
int proj_pitch = cg->nt * sizeof(float);
jf_gpu_malloc(dev_proj, nst) // one projection view on device
byte *dev_mask2;
cint nxy = ig->nx * ig->ny;
jf_gpu_malloc(dev_mask2, nxy) // 2D mask
jf_gpu_put(dev_mask2, ig->mask2, nxy)
cudaBindTexture( 0, tex_mask2, dev_mask2, nxy*sizeof(byte));
#ifdef tex_1d
//set 1D texture settings
tex_proj.normalized = 0;
tex_proj.filterMode = cudaFilterModeLinear;
tex_proj.addressMode[0] = cudaAddressModeClamp;
tex_proj.addressMode[1] = cudaAddressModeClamp;
tex_proj.addressMode[2] = cudaAddressModeClamp;
#else
//set 2D texture settings
tex_proj2d.normalized = 0;
tex_proj2d.filterMode = cudaFilterModeLinear;
tex_proj2d.addressMode[0] = cudaAddressModeClamp;
tex_proj2d.addressMode[1] = cudaAddressModeClamp;
tex_proj2d.addressMode[2] = cudaAddressModeClamp;
#endif
#endif
for (int ia=0; ia < na; ++ia, proj += nst) { // each view
#ifdef fdk_gpu
// copy this view to gpu and bind to texture
jf_gpu_put(dev_proj, proj, nst)
#ifdef tex_1d
cudaBindTexture( 0, tex_proj, dev_proj, nst*sizeof(float) );
#else
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
if(
cudaBindTexture2D(0, tex_proj2d, dev_proj, channelDesc, cg->nt, cg->ns, proj_pitch)
!= cudaSuccess)
Fail("proj2D bind fail")
#endif
#else
float *dev_img = pa->image; // already zeroed
cfloat *dev_proj = proj;
cbyte *dev_mask2 = ig->mask2;
#endif
if (!fdk_ts_back1_gpu(dev_img,
ig->nx, ig->ny, ig->nz,
ig->dx, ig->dy, ig->dz,
ig->offset_x, ig->offset_y, ig->offset_z,
dev_mask2, id + 1, // each thread does some voxels only
cg->dso, cg->dsd, cg->dfs,
cg->ns, cg->nt,
cg->ds, cg->dt, cg->offset_s, cg->offset_t,
dev_proj, beta[ia]))
Fail("fdk_ts_back1_gpu()")
}
#ifdef fdk_gpu
cudaUnbindTexture( tex_img );
cudaUnbindTexture( tex_proj );
cudaUnbindTexture( tex_mask2 );
cudaUnbindTexture( tex_proj2d );
Note("Copying image to host")
jf_gpu_get(pa->image, dev_img, nxyz) // caution: works only for 1 thread!
Note("freeing dev_img memory")
jf_gpu_free(dev_img)
Note("freeing dev_proj memory\n")
jf_gpu_free(dev_proj)
#endif
Ok
}
//
// fdk_ts_back_t()
// entry point for threaded FDK back-projector
//
sof fdk_ts_back_t(
float *image, // [nz nx ny] <- trick!
const cbct_ig *ig,
const cbct_cg *cg,
cint na, // # of views
cfloat *proj, // [nt ns na] <- trick! projection views
cdouble *beta, // [na] source angles [radians]
cint nthread, // # of threads
cint chat)
{
fdk_ts_s st;
#define put(arg) st.arg = arg;
put(image)
put(ig)
put(cg)
put(na)
put(proj)
put(beta)
#undef put
Bzero(image, ig->nx * ig->ny * ig->nz) // initialize image volume to 0
Call(jf_thread1_top, (fdk_ts_back_init,
NULL /* wrap up */, &st, nthread, Chat))
Ok
}
|
ed4eb8c87446d1443387be34a832608fb3c9191a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "mat_mul_gpu.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *vec_one = NULL;
hipMalloc(&vec_one, XSIZE*YSIZE);
float *vec_two = NULL;
hipMalloc(&vec_two, XSIZE*YSIZE);
float *ret_vec = NULL;
hipMalloc(&ret_vec, XSIZE*YSIZE);
int vec_one_row = 1;
int vec_one_col = 1;
int vec_two_col = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
mat_mul_gpu), dim3(gridBlock),dim3(threadBlock), 0, 0, vec_one,vec_two,ret_vec,vec_one_row,vec_one_col,vec_two_col);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
mat_mul_gpu), dim3(gridBlock),dim3(threadBlock), 0, 0, vec_one,vec_two,ret_vec,vec_one_row,vec_one_col,vec_two_col);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
mat_mul_gpu), dim3(gridBlock),dim3(threadBlock), 0, 0, vec_one,vec_two,ret_vec,vec_one_row,vec_one_col,vec_two_col);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | ed4eb8c87446d1443387be34a832608fb3c9191a.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "mat_mul_gpu.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *vec_one = NULL;
cudaMalloc(&vec_one, XSIZE*YSIZE);
float *vec_two = NULL;
cudaMalloc(&vec_two, XSIZE*YSIZE);
float *ret_vec = NULL;
cudaMalloc(&ret_vec, XSIZE*YSIZE);
int vec_one_row = 1;
int vec_one_col = 1;
int vec_two_col = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
mat_mul_gpu<<<gridBlock,threadBlock>>>(vec_one,vec_two,ret_vec,vec_one_row,vec_one_col,vec_two_col);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
mat_mul_gpu<<<gridBlock,threadBlock>>>(vec_one,vec_two,ret_vec,vec_one_row,vec_one_col,vec_two_col);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
mat_mul_gpu<<<gridBlock,threadBlock>>>(vec_one,vec_two,ret_vec,vec_one_row,vec_one_col,vec_two_col);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
7673c860f936ce9ea2cfd034abfc4f85413bb9c0.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "addBias.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *Z = NULL;
hipMalloc(&Z, XSIZE*YSIZE);
float *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
int Z_x_dim = 1;
int Z_y_dim = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
addBias), dim3(gridBlock),dim3(threadBlock), 0, 0, Z,b,Z_x_dim,Z_y_dim);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
addBias), dim3(gridBlock),dim3(threadBlock), 0, 0, Z,b,Z_x_dim,Z_y_dim);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
addBias), dim3(gridBlock),dim3(threadBlock), 0, 0, Z,b,Z_x_dim,Z_y_dim);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 7673c860f936ce9ea2cfd034abfc4f85413bb9c0.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "addBias.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *Z = NULL;
cudaMalloc(&Z, XSIZE*YSIZE);
float *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
int Z_x_dim = 1;
int Z_y_dim = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
addBias<<<gridBlock,threadBlock>>>(Z,b,Z_x_dim,Z_y_dim);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
addBias<<<gridBlock,threadBlock>>>(Z,b,Z_x_dim,Z_y_dim);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
addBias<<<gridBlock,threadBlock>>>(Z,b,Z_x_dim,Z_y_dim);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
e96f883766bcdc74e99e57b13bdfe5648dbf141b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright (c) 2018, NVIDIA Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#include <simt/cstddef.hxx>
#include <simt/cstdint.hxx>
#include <simt/atomic.hxx>
template<class T> static constexpr T minimum(T a, T b) { return a < b ? a : b; }
struct trie {
struct ref {
simt::atomic<trie*, simt::thread_scope_device> ptr = ATOMIC_VAR_INIT(nullptr);
// the flag will protect against multiple pointer updates
simt::std::atomic_flag flag = ATOMIC_FLAG_INIT;
} next[26];
simt::std::atomic<short> count = ATOMIC_VAR_INIT(0);
};
__host__ __device__
int index_of(char c) {
if(c >= 'a' && c <= 'z') return c - 'a';
if(c >= 'A' && c <= 'Z') return c - 'A';
return -1;
};
__host__ __device__
void make_trie(/* trie to insert word counts into */ trie& root,
/* bump allocator to get new nodes*/ simt::std::atomic<trie*>& bump,
/* input */ const char* begin, const char* end,
/* thread this invocation is for */ unsigned index,
/* how many threads there are */ unsigned domain) {
auto const size = end - begin;
auto const stride = (size / domain + 1);
auto off = minimum(size, stride * index);
auto const last = minimum(size, off + stride);
for(char c = begin[off]; off < size && off != last && c != 0 && index_of(c) != -1; ++off, c = begin[off]);
for(char c = begin[off]; off < size && off != last && c != 0 && index_of(c) == -1; ++off, c = begin[off]);
trie *n = &root;
for(char c = begin[off]; ; ++off, c = begin[off]) {
auto const index = off >= size ? -1 : index_of(c);
if(index == -1) {
if(n != &root) {
n->count.fetch_add(1, simt::std::memory_order_relaxed);
n = &root;
}
//end of last word?
if(off >= size || off > last)
break;
else
continue;
}
if(n->next[index].ptr.load(simt::memory_order_acquire) == nullptr) {
if(n->next[index].flag.test_and_set(simt::std::memory_order_relaxed))
n->next[index].ptr.wait(nullptr, simt::std::memory_order_acquire);
else {
auto next = bump.fetch_add(1, simt::std::memory_order_relaxed);
n->next[index].ptr.store(next, simt::std::memory_order_release);
n->next[index].ptr.notify_all();
}
}
n = n->next[index].ptr.load(simt::std::memory_order_relaxed);
}
}
__global__ // __launch_bounds__(1024, 1)
void call_make_trie(trie* t, simt::std::atomic<trie*>* bump, const char* begin, const char* end) {
auto const index = blockDim.x * blockIdx.x + threadIdx.x;
auto const domain = gridDim.x * blockDim.x;
make_trie(*t, *bump, begin, end, index, domain);
}
__global__ void do_nothing() { }
#include <iostream.hxx>
#include <cassert.hxx>
#include <fstream.hxx>
#include <utility.hxx>
#include <chrono.hxx>
#include <thread.hxx>
#include <memory.hxx>
#include <vector.hxx>
#include <string.hxx>
#define check(ans) { assert_((ans), __FILE__, __LINE__); }
inline void assert_(hipError_t code, const char *file, int line) {
if (code == hipSuccess) return;
std::cerr << "check failed: " << hipGetErrorString(code) << " : " << file << '@' << line << std::endl;
abort();
}
template <class T>
struct managed_allocator {
typedef simt::std::size_t size_type;
typedef simt::std::ptrdiff_t difference_type;
typedef T value_type;
typedef T* pointer;// (deprecated in C++17)(removed in C++20) T*
typedef const T* const_pointer;// (deprecated in C++17)(removed in C++20) const T*
typedef T& reference;// (deprecated in C++17)(removed in C++20) T&
typedef const T& const_reference;// (deprecated in C++17)(removed in C++20) const T&
template< class U > struct rebind { typedef managed_allocator<U> other; };
managed_allocator() = default;
template <class U> constexpr managed_allocator(const managed_allocator<U>&) noexcept {}
T* allocate(std::size_t n) {
void* out = nullptr;
check(hipMallocManaged(&out, n*sizeof(T)));
return static_cast<T*>(out);
}
void deallocate(T* p, std::size_t) noexcept {
check(hipFree(p));
}
};
template<class T, class... Args>
T* make_(Args &&... args) {
managed_allocator<T> ma;
return new (ma.allocate(1)) T(std::forward<Args>(args)...);
}
template<class String>
void do_trie(String const& input, bool use_simt, int blocks, int threads) {
std::vector<trie, managed_allocator<trie>> nodes(1<<17);
if(use_simt) check(hipMemset(nodes.data(), 0, nodes.size()*sizeof(trie)));
auto t = nodes.data();
auto b = make_<simt::std::atomic<trie*>>(nodes.data()+1);
auto const begin = std::chrono::steady_clock::now();
std::atomic_signal_fence(std::memory_order_seq_cst);
if(use_simt) {
hipLaunchKernelGGL(( call_make_trie), dim3(blocks),dim3(threads), 0, 0, t, b, input.data(), input.data() + input.size());
check(hipDeviceSynchronize());
}
else {
assert(blocks == 1);
std::vector<std::thread> tv(threads);
for(auto count = threads; count; --count)
tv[count - 1] = std::thread([&, count]() {
make_trie(*t, *b, input.data(), input.data() + input.size(), count - 1, threads);
});
for(auto& t : tv)
t.join();
}
std::atomic_signal_fence(std::memory_order_seq_cst);
auto const end = std::chrono::steady_clock::now();
auto const time = std::chrono::duration_cast<std::chrono::milliseconds>(end - begin).count();
auto const count = b->load() - nodes.data();
std::cout << "Assembled " << count << " nodes on " << blocks << "x" << threads << " " << (use_simt ? "simt" : "cpu") << " threads in " << time << "ms." << std::endl;
}
int main() {
std::basic_string<char, std::char_traits<char>, managed_allocator<char>> input;
char const* files[] = {
"2600-0.txt", "2701-0.txt", "35-0.txt", "84-0.txt", "8800.txt",
"pg1727.txt", "pg55.txt", "pg6130.txt", "pg996.txt", "1342-0.txt"
};
for(auto* ptr : files) {
auto const cur = input.size();
std::ifstream in(ptr);
in.seekg(0, std::ios_base::end);
auto const pos = in.tellg();
input.resize(cur + pos);
in.seekg(0, std::ios_base::beg);
in.read((char*)input.data() + cur, pos);
}
do_trie(input, false, 1, 1);
do_trie(input, false, 1, 1);
do_trie(input, false, 1, std::thread::hardware_concurrency());
do_trie(input, false, 1, std::thread::hardware_concurrency());
assert(hipSuccess == hipSetDevice(0));
hipDeviceProp_t deviceProp;
assert(hipSuccess == hipGetDeviceProperties(&deviceProp, 0));
do_trie(input, true, deviceProp.multiProcessorCount * deviceProp.maxThreadsPerMultiProcessor >> 10, 1<<10);
do_trie(input, true, deviceProp.multiProcessorCount * deviceProp.maxThreadsPerMultiProcessor >> 10, 1<<10);
return 0;
}
| e96f883766bcdc74e99e57b13bdfe5648dbf141b.cu | /*
Copyright (c) 2018, NVIDIA Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#include <simt/cstddef.hxx>
#include <simt/cstdint.hxx>
#include <simt/atomic.hxx>
template<class T> static constexpr T minimum(T a, T b) { return a < b ? a : b; }
struct trie {
struct ref {
simt::atomic<trie*, simt::thread_scope_device> ptr = ATOMIC_VAR_INIT(nullptr);
// the flag will protect against multiple pointer updates
simt::std::atomic_flag flag = ATOMIC_FLAG_INIT;
} next[26];
simt::std::atomic<short> count = ATOMIC_VAR_INIT(0);
};
__host__ __device__
int index_of(char c) {
if(c >= 'a' && c <= 'z') return c - 'a';
if(c >= 'A' && c <= 'Z') return c - 'A';
return -1;
};
__host__ __device__
void make_trie(/* trie to insert word counts into */ trie& root,
/* bump allocator to get new nodes*/ simt::std::atomic<trie*>& bump,
/* input */ const char* begin, const char* end,
/* thread this invocation is for */ unsigned index,
/* how many threads there are */ unsigned domain) {
auto const size = end - begin;
auto const stride = (size / domain + 1);
auto off = minimum(size, stride * index);
auto const last = minimum(size, off + stride);
for(char c = begin[off]; off < size && off != last && c != 0 && index_of(c) != -1; ++off, c = begin[off]);
for(char c = begin[off]; off < size && off != last && c != 0 && index_of(c) == -1; ++off, c = begin[off]);
trie *n = &root;
for(char c = begin[off]; ; ++off, c = begin[off]) {
auto const index = off >= size ? -1 : index_of(c);
if(index == -1) {
if(n != &root) {
n->count.fetch_add(1, simt::std::memory_order_relaxed);
n = &root;
}
//end of last word?
if(off >= size || off > last)
break;
else
continue;
}
if(n->next[index].ptr.load(simt::memory_order_acquire) == nullptr) {
if(n->next[index].flag.test_and_set(simt::std::memory_order_relaxed))
n->next[index].ptr.wait(nullptr, simt::std::memory_order_acquire);
else {
auto next = bump.fetch_add(1, simt::std::memory_order_relaxed);
n->next[index].ptr.store(next, simt::std::memory_order_release);
n->next[index].ptr.notify_all();
}
}
n = n->next[index].ptr.load(simt::std::memory_order_relaxed);
}
}
__global__ // __launch_bounds__(1024, 1)
void call_make_trie(trie* t, simt::std::atomic<trie*>* bump, const char* begin, const char* end) {
auto const index = blockDim.x * blockIdx.x + threadIdx.x;
auto const domain = gridDim.x * blockDim.x;
make_trie(*t, *bump, begin, end, index, domain);
}
__global__ void do_nothing() { }
#include <iostream.hxx>
#include <cassert.hxx>
#include <fstream.hxx>
#include <utility.hxx>
#include <chrono.hxx>
#include <thread.hxx>
#include <memory.hxx>
#include <vector.hxx>
#include <string.hxx>
#define check(ans) { assert_((ans), __FILE__, __LINE__); }
inline void assert_(cudaError_t code, const char *file, int line) {
if (code == cudaSuccess) return;
std::cerr << "check failed: " << cudaGetErrorString(code) << " : " << file << '@' << line << std::endl;
abort();
}
template <class T>
struct managed_allocator {
typedef simt::std::size_t size_type;
typedef simt::std::ptrdiff_t difference_type;
typedef T value_type;
typedef T* pointer;// (deprecated in C++17)(removed in C++20) T*
typedef const T* const_pointer;// (deprecated in C++17)(removed in C++20) const T*
typedef T& reference;// (deprecated in C++17)(removed in C++20) T&
typedef const T& const_reference;// (deprecated in C++17)(removed in C++20) const T&
template< class U > struct rebind { typedef managed_allocator<U> other; };
managed_allocator() = default;
template <class U> constexpr managed_allocator(const managed_allocator<U>&) noexcept {}
T* allocate(std::size_t n) {
void* out = nullptr;
check(cudaMallocManaged(&out, n*sizeof(T)));
return static_cast<T*>(out);
}
void deallocate(T* p, std::size_t) noexcept {
check(cudaFree(p));
}
};
template<class T, class... Args>
T* make_(Args &&... args) {
managed_allocator<T> ma;
return new (ma.allocate(1)) T(std::forward<Args>(args)...);
}
template<class String>
void do_trie(String const& input, bool use_simt, int blocks, int threads) {
std::vector<trie, managed_allocator<trie>> nodes(1<<17);
if(use_simt) check(cudaMemset(nodes.data(), 0, nodes.size()*sizeof(trie)));
auto t = nodes.data();
auto b = make_<simt::std::atomic<trie*>>(nodes.data()+1);
auto const begin = std::chrono::steady_clock::now();
std::atomic_signal_fence(std::memory_order_seq_cst);
if(use_simt) {
call_make_trie<<<blocks,threads>>>(t, b, input.data(), input.data() + input.size());
check(cudaDeviceSynchronize());
}
else {
assert(blocks == 1);
std::vector<std::thread> tv(threads);
for(auto count = threads; count; --count)
tv[count - 1] = std::thread([&, count]() {
make_trie(*t, *b, input.data(), input.data() + input.size(), count - 1, threads);
});
for(auto& t : tv)
t.join();
}
std::atomic_signal_fence(std::memory_order_seq_cst);
auto const end = std::chrono::steady_clock::now();
auto const time = std::chrono::duration_cast<std::chrono::milliseconds>(end - begin).count();
auto const count = b->load() - nodes.data();
std::cout << "Assembled " << count << " nodes on " << blocks << "x" << threads << " " << (use_simt ? "simt" : "cpu") << " threads in " << time << "ms." << std::endl;
}
int main() {
std::basic_string<char, std::char_traits<char>, managed_allocator<char>> input;
char const* files[] = {
"2600-0.txt", "2701-0.txt", "35-0.txt", "84-0.txt", "8800.txt",
"pg1727.txt", "pg55.txt", "pg6130.txt", "pg996.txt", "1342-0.txt"
};
for(auto* ptr : files) {
auto const cur = input.size();
std::ifstream in(ptr);
in.seekg(0, std::ios_base::end);
auto const pos = in.tellg();
input.resize(cur + pos);
in.seekg(0, std::ios_base::beg);
in.read((char*)input.data() + cur, pos);
}
do_trie(input, false, 1, 1);
do_trie(input, false, 1, 1);
do_trie(input, false, 1, std::thread::hardware_concurrency());
do_trie(input, false, 1, std::thread::hardware_concurrency());
assert(cudaSuccess == cudaSetDevice(0));
cudaDeviceProp deviceProp;
assert(cudaSuccess == cudaGetDeviceProperties(&deviceProp, 0));
do_trie(input, true, deviceProp.multiProcessorCount * deviceProp.maxThreadsPerMultiProcessor >> 10, 1<<10);
do_trie(input, true, deviceProp.multiProcessorCount * deviceProp.maxThreadsPerMultiProcessor >> 10, 1<<10);
return 0;
}
|
b7ef6914a3dc32d81cef594abf4e1ad992447b3a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "lab3.h"
#include <cstdio>
#include "iostream"
using namespace std;
__device__ __host__ int CeilDiv(int a, int b) { return (a-1)/b + 1; }
__device__ __host__ int CeilAlign(int a, int b) { return CeilDiv(a, b) * b; }
__global__ void backgroundCopy(
const float *background,
const float *target,
const float *mask,
float *output,
const int wb, const int hb, const int wt, const int ht,
const int oy, const int ox
)
{
const int yt = blockIdx.y * blockDim.y + threadIdx.y;
const int xt = blockIdx.x * blockDim.x + threadIdx.x;
const int curt = wt*yt+xt;
if (yt < ht && xt < wt ) {
const int yb = oy+yt, xb = ox+xt;
const int curb = wb*yb+xb;
if (0 <= yb && yb < hb && 0 <= xb && xb < wb) {
output[curt*3+0] = background[curb*3+0];
output[curt*3+1] = background[curb*3+1];
output[curt*3+2] = background[curb*3+2];
}
}
}
__global__ void SimpleClone(
const float *target,
const float *mask,
float *output,
const int wb, const int hb, const int wt, const int ht,
const int oy, const int ox
)
{
const int yt = blockIdx.y * blockDim.y + threadIdx.y;
const int xt = blockIdx.x * blockDim.x + threadIdx.x;
const int curt = wt*yt+xt;
if (yt < ht && xt < wt && mask[curt] > 127.0f) {
const int yb = oy+yt, xb = ox+xt;
const int curb = wb*yb+xb;
if (0 <= yb & yb < hb && 0 <= xb && xb < wb) {
output[curb*3+0] = target[curt*3+0];
output[curb*3+1] = target[curt*3+1];
output[curb*3+2] = target[curt*3+2];
}
}
}
__host__ __device__ float saturator(float num){
if(num>=255) return 255.0;
else if (num<=0) return 0;
else
return num;
}
__global__ void PoissonImageEditing(
const float *background,
const float *target,
const float *mask,
float *output,
const int wb, const int hb, const int wt, const int ht,
const int oy, const int ox,int i
)
{
const int yt = blockIdx.y * blockDim.y + threadIdx.y;
const int xt = blockIdx.x * blockDim.x + threadIdx.x;
int curt = yt*wt+xt;
const int yb = yt+oy;
const int xb = xt+ox;
const int curb = yb*wb+xb;
if (yt>=0 &&yt < ht && xt>=0 && xt < wt){
if(mask[curt] > 127.0f) {
float count =4;
float countb =4;
float tmpTN1 = (yt-1>=0)?target[(curt-wt)*3+0]:0;
float tmpTN2 = (yt-1>=0)?target[(curt-wt)*3+1]:0;
float tmpTN3 = (yt-1>=0)?target[(curt-wt)*3+2]:0;
count = (yt-1>=0)?count:count-1;
float tmpTS1 = (yt+1<ht)?target[(curt+wt)*3+0]:0;
float tmpTS2 = (yt+1<ht)?target[(curt+wt)*3+1]:0;
float tmpTS3 = (yt+1<ht)?target[(curt+wt)*3+2]:0;
count = (yt+1<ht)?count:count-1;
float tmpTW1 = (xt-1>=0)?target[(curt-1)*3+0]:0;
float tmpTW2 = (xt-1>=0)?target[(curt-1)*3+1]:0;
float tmpTW3 = (xt-1>=0)?target[(curt-1)*3+2]:0;
count = (xt-1>=0)?count:count-1;
float tmpTE1 = (xt+1<wt)?target[(curt+1)*3+0]:0;
float tmpTE2 = (xt+1<wt)?target[(curt+1)*3+1]:0;
float tmpTE3 = (xt+1<wt)?target[(curt+1)*3+2]:0;
count = (xt+1<wt)?count:count-1;
float tmpBN1 = (yb-1>=0)?background[(curb-wb)*3+0]:0;
float tmpBN2 = (yb-1>=0)?background[(curb-wb)*3+1]:0;
float tmpBN3 = (yb-1>=0)?background[(curb-wb)*3+2]:0;
countb = (yb-1>=0)?countb:countb-1;
float tmpBS1 = (yb+1<hb)?background[(curb+wb)*3+0]:0;
float tmpBS2 = (yb+1<hb)?background[(curb+wb)*3+1]:0;
float tmpBS3 = (yb+1<hb)?background[(curb+wb)*3+2]:0;
countb = (yb+1<hb)?countb:countb-1;
float tmpBW1 = (xb-1>=0)?background[(curb-1)*3+0]:0;
float tmpBW2 = (xb-1>=0)?background[(curb-1)*3+1]:0;
float tmpBW3 = (xb-1>=0)?background[(curb-1)*3+2]:0;
countb = (xb-1>=0)?countb:countb-1;
float tmpBE1 = (xb+1<wb)?background[(curb+1)*3+0]:0;
float tmpBE2 = (xb+1<wb)?background[(curb+1)*3+1]:0;
float tmpBE3 = (xb+1<wb)?background[(curb+1)*3+2]:0;
countb = (xb+1<wb)?countb:countb-1;
output[(curb)*3+0] = (count*target[curt*3+0]-(tmpTN1+tmpTS1+tmpTW1+tmpTE1)+(tmpBE1+tmpBW1+tmpBS1+tmpBN1))/4;
output[(curb)*3+1] = (count*target[curt*3+1]-(tmpTN2+tmpTS2+tmpTW2+tmpTE2)+(tmpBE2+tmpBW2+tmpBS2+tmpBN2))/4;
output[(curb)*3+2] = (count*target[curt*3+2]-(tmpTN3+tmpTS3+tmpTW3+tmpTE3)+(tmpBE3+tmpBW3+tmpBS3+tmpBN3))/4;
}
}
}
__global__ void upSampling2(
float* output,float* bufferOutput2,
const int wb, const int hb,
int wb2,int hb2,
bool flagx,bool flagy
)
{
const int yb = blockIdx.y * blockDim.y + threadIdx.y;
const int xb = blockIdx.x * blockDim.x + threadIdx.x;
const int xb2 = xb / 2;
const int yb2 = yb / 2;
int curb = yb*wb+xb;
int curb2 = yb2*wb2+xb2;
if (yb < hb && xb < wb){
output[curb*3+0] = bufferOutput2[curb2*3+0];
output[curb*3+1] = bufferOutput2[curb2*3+1];
output[curb*3+2] = bufferOutput2[curb2*3+2];
}
}
__global__ void downSampling2(
const float *oriImg,
float *downSampleImg,
const int w2, const int h2,
const int w, const int h
)
{
const int y2 = blockIdx.y * blockDim.y + threadIdx.y;
const int x2 = blockIdx.x * blockDim.x + threadIdx.x;
const int cur2 = w2*y2+x2;
if (y2 < h2 && x2 < w2){
downSampleImg[cur2*3+0] = (oriImg[(w*(y2*2)+x2*2)*3+0] + oriImg[(w*(y2*2)+x2*2+1)*3+0] + oriImg[(w*(y2*2+1)+x2*2)*3+0] +oriImg[(w*(y2*2+1)+x2*2+1)*3+0])/4;
downSampleImg[cur2*3+1] = (oriImg[(w*(y2*2)+x2*2)*3+1] + oriImg[(w*(y2*2)+x2*2+1)*3+1] + oriImg[(w*(y2*2+1)+x2*2)*3+1] +oriImg[(w*(y2*2+1)+x2*2+1)*3+1])/4;
downSampleImg[cur2*3+2] = (oriImg[(w*(y2*2)+x2*2)*3+2] + oriImg[(w*(y2*2)+x2*2+1)*3+2] + oriImg[(w*(y2*2+1)+x2*2)*3+2] +oriImg[(w*(y2*2+1)+x2*2+1)*3+2])/4;
}
}
__global__ void maskDownSampling2(
const float *oriImg,
float *downSampleImg,
const int w2, const int h2,
const int w, const int h
)
{
const int y2 = blockIdx.y * blockDim.y + threadIdx.y;
const int x2 = blockIdx.x * blockDim.x + threadIdx.x;
const int cur2 = w2*y2+x2;
if (y2 < h2 && x2 < w2){
downSampleImg[cur2] = (oriImg[(w*(y2*2)+x2*2)] + oriImg[(w*(y2*2)+x2*2+1)] + oriImg[(w*(y2*2+1)+x2*2)] +oriImg[(w*(y2*2+1)+x2*2+1)])/4;
}
}
__global__ void copy2output(
float *output,
const float *tmp,
const int wb, const int hb, const int wt, const int ht,
const int oy, const int ox
)
{
const int yt = blockIdx.y * blockDim.y + threadIdx.y;
const int xt = blockIdx.x * blockDim.x + threadIdx.x;
const int curt = wt*yt+xt;
const int yb = oy+yt;
const int xb = ox+xt;
const int curb = wb*yb+xb;
if (yt < ht && xt < wt){
output[curb*3+0] = tmp[curt*3+0];
output[curb*3+1] = tmp[curt*3+1];
output[curb*3+2] = tmp[curt*3+2];
}
}
void PoissonImageCloning(
const float *background,
const float *target,
const float *mask,
float *output,
const int wb, const int hb, const int wt, const int ht,
const int oy, const int ox
)
{
bool flagx = (wb%2==0)?false:true;
bool flagy = (hb%2==0)?false:true;
//hipMemcpy(output, background, wb*hb*sizeof(float)*3, hipMemcpyDeviceToDevice);
int wb2 = wb/2;
int hb2 = hb/2;
int wt2 = wt/2;
int ht2 = ht/2;
int ox2 = ox/2;
int oy2 = oy/2;
int compleW = wb2+2;
int compleH = hb2+2;
float * background2;
float * newbackground;
float * newComplementbackground;
float * complebackground2;
float * target2;
float * mask2;
float * bufferOutput2;
float * bufferOutput2_second;
//hipMalloc((void**)&bufferOutput,wt*ht*sizeof(float)*3);
hipMalloc((void**)&bufferOutput2,wb2*hb2*sizeof(float)*3);
hipMalloc((void**)&bufferOutput2_second,wb2*hb2*sizeof(float)*3);
hipMalloc((void**)&background2,hb2*wb2*sizeof(float)*3);
hipMalloc((void**)&newComplementbackground,compleW*compleH*sizeof(float)*3);
hipMalloc((void**)&complebackground2,compleW/2*compleH/2*sizeof(float)*3);
hipMalloc((void**)&target2,wt2*ht2*sizeof(float)*3);
hipMalloc((void**)&mask2,wt2*ht2*sizeof(float));
// down sample or upsample wrong
hipLaunchKernelGGL(( downSampling2), dim3(dim3(CeilDiv(wb2,32), CeilDiv(hb2,16))), dim3(dim3(32,16)), 0, 0,
background,background2,wb2,hb2,wb,hb
);
hipLaunchKernelGGL(( maskDownSampling2), dim3(dim3(CeilDiv(wt2,32), CeilDiv(ht2,16))), dim3(dim3(32,16)), 0, 0,
mask,mask2,wt2,ht2,wt,ht
);
hipLaunchKernelGGL(( downSampling2), dim3(dim3(CeilDiv(wt2,32), CeilDiv(ht2,16))), dim3(dim3(32,16)), 0, 0,
target,target2,wt2,ht2,wt,ht
);
//hipMemcpy(output,target2,ht2*wt2*sizeof(float)*3, hipMemcpyDeviceToDevice);
hipLaunchKernelGGL(( SimpleClone), dim3(dim3(CeilDiv(wt/2,32), CeilDiv(ht/2,16))), dim3(dim3(32,16)), 0, 0,
target2, mask2,background2,
wb2, hb2, wt2, ht2,oy2, ox2
);
hipMemcpy(bufferOutput2,background2,hb2*wb2*sizeof(float)*3, hipMemcpyDeviceToDevice);
hipMemcpy(bufferOutput2_second,background2,hb2*wb2*sizeof(float)*3, hipMemcpyDeviceToDevice);
int iteration = 6000;
for(int i=0;i<iteration;i++){
if(i%2 ==0){
hipLaunchKernelGGL(( PoissonImageEditing), dim3(dim3(CeilDiv(wt2,32), CeilDiv(ht2,16))), dim3(dim3(32,16)), 0, 0,
bufferOutput2, target2, mask2, bufferOutput2_second,
wb2,hb2,
wt2,ht2,
oy2,ox2,i
);
}
else{
hipLaunchKernelGGL(( PoissonImageEditing), dim3(dim3(CeilDiv(wt2,32), CeilDiv(ht2,16))), dim3(dim3(32,16)), 0, 0,
bufferOutput2_second, target2,mask2,bufferOutput2,
wb2,hb2,
wt2,ht2,
oy2,ox2,i
);
}
}
hipMemcpy(output,mask,ht*wt*sizeof(float),hipMemcpyDeviceToDevice);
if(iteration%2 == 0){
hipLaunchKernelGGL(( upSampling2), dim3(dim3(CeilDiv(wb,32), CeilDiv(hb,16))), dim3(dim3(32,16)), 0, 0,
output,bufferOutput2,wb,hb,wb2,hb2,flagx,flagy
);
}
else{
float* tmp;
hipMalloc((void**)&tmp,wt*ht*sizeof(float)*3);
hipLaunchKernelGGL(( upSampling2), dim3(dim3(CeilDiv(wb,32), CeilDiv(hb,16))), dim3(dim3(32,16)), 0, 0,
output,bufferOutput2,wb,hb,wb2,hb2,flagx,flagy
);
}
hipFree(bufferOutput2);
hipFree(bufferOutput2_second);
}
| b7ef6914a3dc32d81cef594abf4e1ad992447b3a.cu | #include "lab3.h"
#include <cstdio>
#include "iostream"
using namespace std;
__device__ __host__ int CeilDiv(int a, int b) { return (a-1)/b + 1; }
__device__ __host__ int CeilAlign(int a, int b) { return CeilDiv(a, b) * b; }
__global__ void backgroundCopy(
const float *background,
const float *target,
const float *mask,
float *output,
const int wb, const int hb, const int wt, const int ht,
const int oy, const int ox
)
{
const int yt = blockIdx.y * blockDim.y + threadIdx.y;
const int xt = blockIdx.x * blockDim.x + threadIdx.x;
const int curt = wt*yt+xt;
if (yt < ht && xt < wt ) {
const int yb = oy+yt, xb = ox+xt;
const int curb = wb*yb+xb;
if (0 <= yb && yb < hb && 0 <= xb && xb < wb) {
output[curt*3+0] = background[curb*3+0];
output[curt*3+1] = background[curb*3+1];
output[curt*3+2] = background[curb*3+2];
}
}
}
__global__ void SimpleClone(
const float *target,
const float *mask,
float *output,
const int wb, const int hb, const int wt, const int ht,
const int oy, const int ox
)
{
const int yt = blockIdx.y * blockDim.y + threadIdx.y;
const int xt = blockIdx.x * blockDim.x + threadIdx.x;
const int curt = wt*yt+xt;
if (yt < ht && xt < wt && mask[curt] > 127.0f) {
const int yb = oy+yt, xb = ox+xt;
const int curb = wb*yb+xb;
if (0 <= yb & yb < hb && 0 <= xb && xb < wb) {
output[curb*3+0] = target[curt*3+0];
output[curb*3+1] = target[curt*3+1];
output[curb*3+2] = target[curt*3+2];
}
}
}
__host__ __device__ float saturator(float num){
if(num>=255) return 255.0;
else if (num<=0) return 0;
else
return num;
}
__global__ void PoissonImageEditing(
const float *background,
const float *target,
const float *mask,
float *output,
const int wb, const int hb, const int wt, const int ht,
const int oy, const int ox,int i
)
{
const int yt = blockIdx.y * blockDim.y + threadIdx.y;
const int xt = blockIdx.x * blockDim.x + threadIdx.x;
int curt = yt*wt+xt;
const int yb = yt+oy;
const int xb = xt+ox;
const int curb = yb*wb+xb;
if (yt>=0 &&yt < ht && xt>=0 && xt < wt){
if(mask[curt] > 127.0f) {
float count =4;
float countb =4;
float tmpTN1 = (yt-1>=0)?target[(curt-wt)*3+0]:0;
float tmpTN2 = (yt-1>=0)?target[(curt-wt)*3+1]:0;
float tmpTN3 = (yt-1>=0)?target[(curt-wt)*3+2]:0;
count = (yt-1>=0)?count:count-1;
float tmpTS1 = (yt+1<ht)?target[(curt+wt)*3+0]:0;
float tmpTS2 = (yt+1<ht)?target[(curt+wt)*3+1]:0;
float tmpTS3 = (yt+1<ht)?target[(curt+wt)*3+2]:0;
count = (yt+1<ht)?count:count-1;
float tmpTW1 = (xt-1>=0)?target[(curt-1)*3+0]:0;
float tmpTW2 = (xt-1>=0)?target[(curt-1)*3+1]:0;
float tmpTW3 = (xt-1>=0)?target[(curt-1)*3+2]:0;
count = (xt-1>=0)?count:count-1;
float tmpTE1 = (xt+1<wt)?target[(curt+1)*3+0]:0;
float tmpTE2 = (xt+1<wt)?target[(curt+1)*3+1]:0;
float tmpTE3 = (xt+1<wt)?target[(curt+1)*3+2]:0;
count = (xt+1<wt)?count:count-1;
float tmpBN1 = (yb-1>=0)?background[(curb-wb)*3+0]:0;
float tmpBN2 = (yb-1>=0)?background[(curb-wb)*3+1]:0;
float tmpBN3 = (yb-1>=0)?background[(curb-wb)*3+2]:0;
countb = (yb-1>=0)?countb:countb-1;
float tmpBS1 = (yb+1<hb)?background[(curb+wb)*3+0]:0;
float tmpBS2 = (yb+1<hb)?background[(curb+wb)*3+1]:0;
float tmpBS3 = (yb+1<hb)?background[(curb+wb)*3+2]:0;
countb = (yb+1<hb)?countb:countb-1;
float tmpBW1 = (xb-1>=0)?background[(curb-1)*3+0]:0;
float tmpBW2 = (xb-1>=0)?background[(curb-1)*3+1]:0;
float tmpBW3 = (xb-1>=0)?background[(curb-1)*3+2]:0;
countb = (xb-1>=0)?countb:countb-1;
float tmpBE1 = (xb+1<wb)?background[(curb+1)*3+0]:0;
float tmpBE2 = (xb+1<wb)?background[(curb+1)*3+1]:0;
float tmpBE3 = (xb+1<wb)?background[(curb+1)*3+2]:0;
countb = (xb+1<wb)?countb:countb-1;
output[(curb)*3+0] = (count*target[curt*3+0]-(tmpTN1+tmpTS1+tmpTW1+tmpTE1)+(tmpBE1+tmpBW1+tmpBS1+tmpBN1))/4;
output[(curb)*3+1] = (count*target[curt*3+1]-(tmpTN2+tmpTS2+tmpTW2+tmpTE2)+(tmpBE2+tmpBW2+tmpBS2+tmpBN2))/4;
output[(curb)*3+2] = (count*target[curt*3+2]-(tmpTN3+tmpTS3+tmpTW3+tmpTE3)+(tmpBE3+tmpBW3+tmpBS3+tmpBN3))/4;
}
}
}
__global__ void upSampling2(
float* output,float* bufferOutput2,
const int wb, const int hb,
int wb2,int hb2,
bool flagx,bool flagy
)
{
const int yb = blockIdx.y * blockDim.y + threadIdx.y;
const int xb = blockIdx.x * blockDim.x + threadIdx.x;
const int xb2 = xb / 2;
const int yb2 = yb / 2;
int curb = yb*wb+xb;
int curb2 = yb2*wb2+xb2;
if (yb < hb && xb < wb){
output[curb*3+0] = bufferOutput2[curb2*3+0];
output[curb*3+1] = bufferOutput2[curb2*3+1];
output[curb*3+2] = bufferOutput2[curb2*3+2];
}
}
__global__ void downSampling2(
const float *oriImg,
float *downSampleImg,
const int w2, const int h2,
const int w, const int h
)
{
const int y2 = blockIdx.y * blockDim.y + threadIdx.y;
const int x2 = blockIdx.x * blockDim.x + threadIdx.x;
const int cur2 = w2*y2+x2;
if (y2 < h2 && x2 < w2){
downSampleImg[cur2*3+0] = (oriImg[(w*(y2*2)+x2*2)*3+0] + oriImg[(w*(y2*2)+x2*2+1)*3+0] + oriImg[(w*(y2*2+1)+x2*2)*3+0] +oriImg[(w*(y2*2+1)+x2*2+1)*3+0])/4;
downSampleImg[cur2*3+1] = (oriImg[(w*(y2*2)+x2*2)*3+1] + oriImg[(w*(y2*2)+x2*2+1)*3+1] + oriImg[(w*(y2*2+1)+x2*2)*3+1] +oriImg[(w*(y2*2+1)+x2*2+1)*3+1])/4;
downSampleImg[cur2*3+2] = (oriImg[(w*(y2*2)+x2*2)*3+2] + oriImg[(w*(y2*2)+x2*2+1)*3+2] + oriImg[(w*(y2*2+1)+x2*2)*3+2] +oriImg[(w*(y2*2+1)+x2*2+1)*3+2])/4;
}
}
__global__ void maskDownSampling2(
const float *oriImg,
float *downSampleImg,
const int w2, const int h2,
const int w, const int h
)
{
const int y2 = blockIdx.y * blockDim.y + threadIdx.y;
const int x2 = blockIdx.x * blockDim.x + threadIdx.x;
const int cur2 = w2*y2+x2;
if (y2 < h2 && x2 < w2){
downSampleImg[cur2] = (oriImg[(w*(y2*2)+x2*2)] + oriImg[(w*(y2*2)+x2*2+1)] + oriImg[(w*(y2*2+1)+x2*2)] +oriImg[(w*(y2*2+1)+x2*2+1)])/4;
}
}
__global__ void copy2output(
float *output,
const float *tmp,
const int wb, const int hb, const int wt, const int ht,
const int oy, const int ox
)
{
const int yt = blockIdx.y * blockDim.y + threadIdx.y;
const int xt = blockIdx.x * blockDim.x + threadIdx.x;
const int curt = wt*yt+xt;
const int yb = oy+yt;
const int xb = ox+xt;
const int curb = wb*yb+xb;
if (yt < ht && xt < wt){
output[curb*3+0] = tmp[curt*3+0];
output[curb*3+1] = tmp[curt*3+1];
output[curb*3+2] = tmp[curt*3+2];
}
}
void PoissonImageCloning(
const float *background,
const float *target,
const float *mask,
float *output,
const int wb, const int hb, const int wt, const int ht,
const int oy, const int ox
)
{
bool flagx = (wb%2==0)?false:true;
bool flagy = (hb%2==0)?false:true;
//cudaMemcpy(output, background, wb*hb*sizeof(float)*3, cudaMemcpyDeviceToDevice);
int wb2 = wb/2;
int hb2 = hb/2;
int wt2 = wt/2;
int ht2 = ht/2;
int ox2 = ox/2;
int oy2 = oy/2;
int compleW = wb2+2;
int compleH = hb2+2;
float * background2;
float * newbackground;
float * newComplementbackground;
float * complebackground2;
float * target2;
float * mask2;
float * bufferOutput2;
float * bufferOutput2_second;
//cudaMalloc((void**)&bufferOutput,wt*ht*sizeof(float)*3);
cudaMalloc((void**)&bufferOutput2,wb2*hb2*sizeof(float)*3);
cudaMalloc((void**)&bufferOutput2_second,wb2*hb2*sizeof(float)*3);
cudaMalloc((void**)&background2,hb2*wb2*sizeof(float)*3);
cudaMalloc((void**)&newComplementbackground,compleW*compleH*sizeof(float)*3);
cudaMalloc((void**)&complebackground2,compleW/2*compleH/2*sizeof(float)*3);
cudaMalloc((void**)&target2,wt2*ht2*sizeof(float)*3);
cudaMalloc((void**)&mask2,wt2*ht2*sizeof(float));
// down sample or upsample wrong
downSampling2<<<dim3(CeilDiv(wb2,32), CeilDiv(hb2,16)), dim3(32,16)>>>(
background,background2,wb2,hb2,wb,hb
);
maskDownSampling2<<<dim3(CeilDiv(wt2,32), CeilDiv(ht2,16)), dim3(32,16)>>>(
mask,mask2,wt2,ht2,wt,ht
);
downSampling2<<<dim3(CeilDiv(wt2,32), CeilDiv(ht2,16)), dim3(32,16)>>>(
target,target2,wt2,ht2,wt,ht
);
//cudaMemcpy(output,target2,ht2*wt2*sizeof(float)*3, cudaMemcpyDeviceToDevice);
SimpleClone<<<dim3(CeilDiv(wt/2,32), CeilDiv(ht/2,16)), dim3(32,16)>>>(
target2, mask2,background2,
wb2, hb2, wt2, ht2,oy2, ox2
);
cudaMemcpy(bufferOutput2,background2,hb2*wb2*sizeof(float)*3, cudaMemcpyDeviceToDevice);
cudaMemcpy(bufferOutput2_second,background2,hb2*wb2*sizeof(float)*3, cudaMemcpyDeviceToDevice);
int iteration = 6000;
for(int i=0;i<iteration;i++){
if(i%2 ==0){
PoissonImageEditing<<<dim3(CeilDiv(wt2,32), CeilDiv(ht2,16)), dim3(32,16)>>>(
bufferOutput2, target2, mask2, bufferOutput2_second,
wb2,hb2,
wt2,ht2,
oy2,ox2,i
);
}
else{
PoissonImageEditing<<<dim3(CeilDiv(wt2,32), CeilDiv(ht2,16)), dim3(32,16)>>>(
bufferOutput2_second, target2,mask2,bufferOutput2,
wb2,hb2,
wt2,ht2,
oy2,ox2,i
);
}
}
cudaMemcpy(output,mask,ht*wt*sizeof(float),cudaMemcpyDeviceToDevice);
if(iteration%2 == 0){
upSampling2<<<dim3(CeilDiv(wb,32), CeilDiv(hb,16)), dim3(32,16)>>>(
output,bufferOutput2,wb,hb,wb2,hb2,flagx,flagy
);
}
else{
float* tmp;
cudaMalloc((void**)&tmp,wt*ht*sizeof(float)*3);
upSampling2<<<dim3(CeilDiv(wb,32), CeilDiv(hb,16)), dim3(32,16)>>>(
output,bufferOutput2,wb,hb,wb2,hb2,flagx,flagy
);
}
cudaFree(bufferOutput2);
cudaFree(bufferOutput2_second);
}
|
e1618a1e733f3b26a68bd3c96be83c9fd000f105.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include <rmm/rmm.h>
#include <mpi.h>
#include <iostream>
#include <hip/hip_runtime_api.h>
#include "../src/communicator.h"
#include "../src/error.cuh"
#define SIZE 800'000'000LL
#define BUFFER_SIZE 25'000'000LL
#define REPEAT 4
int main(int argc, char *argv[])
{
UCXBufferCommunicator communicator;
communicator.initialize(argc, argv);
int mpi_rank = communicator.mpi_rank;
int mpi_size = communicator.mpi_size;
communicator.setup_cache(2 * mpi_size, BUFFER_SIZE);
communicator.warmup_cache();
/* Allocate data buffers */
std::vector<void *> send_buffer(mpi_size, nullptr);
std::vector<void *> recv_buffer(mpi_size, nullptr);
for (int irank = 0; irank < mpi_size; irank ++) {
RMM_CALL(RMM_ALLOC(&send_buffer[irank], SIZE / mpi_size, 0));
}
std::vector<comm_handle_t> send_reqs(mpi_size, nullptr);
std::vector<comm_handle_t> recv_reqs(mpi_size, nullptr);
/* Communication */
UCX_CALL(ucp_worker_flush(communicator.ucp_worker));
MPI_Barrier(MPI_COMM_WORLD);
hipProfilerStart();
double start = MPI_Wtime();
for (int icol = 0; icol < REPEAT; icol ++)
{
for (int irank = 0; irank < mpi_size; irank++) {
if (irank != mpi_rank)
send_reqs[irank] = communicator.send(send_buffer[irank], SIZE / mpi_size, 1, irank, 20);
else
send_reqs[irank] = nullptr;
}
for (int irank = 0; irank < mpi_size; irank++) {
if (irank != mpi_rank)
recv_reqs[irank] = communicator.recv(&recv_buffer[irank], nullptr, 1, irank, 20);
else
recv_reqs[irank] = nullptr;
}
communicator.waitall(send_reqs);
communicator.waitall(recv_reqs);
for (int irank = 0; irank < mpi_rank; irank ++)
RMM_CALL(RMM_FREE(recv_buffer[irank], 0));
}
double stop = MPI_Wtime();
hipProfilerStop();
MPI_Barrier(MPI_COMM_WORLD);
if (mpi_rank == 0) {
std::cerr << "Elasped time (s) " << stop - start << std::endl;
std::cerr << "Bandwidth (GB/s) " << (double)SIZE * (mpi_size - 5) * REPEAT / (stop - start) / 1e9 << std::endl;
}
/* Cleanup */
for(int irank = 0; irank < mpi_rank; irank++) {
RMM_CALL(RMM_FREE(send_buffer[irank], 0));
}
communicator.finalize();
return 0;
}
| e1618a1e733f3b26a68bd3c96be83c9fd000f105.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include <rmm/rmm.h>
#include <mpi.h>
#include <iostream>
#include <cuda_profiler_api.h>
#include "../src/communicator.h"
#include "../src/error.cuh"
#define SIZE 800'000'000LL
#define BUFFER_SIZE 25'000'000LL
#define REPEAT 4
int main(int argc, char *argv[])
{
UCXBufferCommunicator communicator;
communicator.initialize(argc, argv);
int mpi_rank = communicator.mpi_rank;
int mpi_size = communicator.mpi_size;
communicator.setup_cache(2 * mpi_size, BUFFER_SIZE);
communicator.warmup_cache();
/* Allocate data buffers */
std::vector<void *> send_buffer(mpi_size, nullptr);
std::vector<void *> recv_buffer(mpi_size, nullptr);
for (int irank = 0; irank < mpi_size; irank ++) {
RMM_CALL(RMM_ALLOC(&send_buffer[irank], SIZE / mpi_size, 0));
}
std::vector<comm_handle_t> send_reqs(mpi_size, nullptr);
std::vector<comm_handle_t> recv_reqs(mpi_size, nullptr);
/* Communication */
UCX_CALL(ucp_worker_flush(communicator.ucp_worker));
MPI_Barrier(MPI_COMM_WORLD);
cudaProfilerStart();
double start = MPI_Wtime();
for (int icol = 0; icol < REPEAT; icol ++)
{
for (int irank = 0; irank < mpi_size; irank++) {
if (irank != mpi_rank)
send_reqs[irank] = communicator.send(send_buffer[irank], SIZE / mpi_size, 1, irank, 20);
else
send_reqs[irank] = nullptr;
}
for (int irank = 0; irank < mpi_size; irank++) {
if (irank != mpi_rank)
recv_reqs[irank] = communicator.recv(&recv_buffer[irank], nullptr, 1, irank, 20);
else
recv_reqs[irank] = nullptr;
}
communicator.waitall(send_reqs);
communicator.waitall(recv_reqs);
for (int irank = 0; irank < mpi_rank; irank ++)
RMM_CALL(RMM_FREE(recv_buffer[irank], 0));
}
double stop = MPI_Wtime();
cudaProfilerStop();
MPI_Barrier(MPI_COMM_WORLD);
if (mpi_rank == 0) {
std::cerr << "Elasped time (s) " << stop - start << std::endl;
std::cerr << "Bandwidth (GB/s) " << (double)SIZE * (mpi_size - 5) * REPEAT / (stop - start) / 1e9 << std::endl;
}
/* Cleanup */
for(int irank = 0; irank < mpi_rank; irank++) {
RMM_CALL(RMM_FREE(send_buffer[irank], 0));
}
communicator.finalize();
return 0;
}
|
8c3bb2135ee2e8203a1e84b01f24d3c8dbd57bab.hip | // !!! This is a file automatically generated by hipify!!!
// build with command nvcc -lcublas -lcudart -lcurand -arch=sm_70 mixed.cu
// use max clock
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <rocblas.h>
// Must be multiples of 16 to fit TensorCore
#define SIZE 8192 // 4096 8192 10240 16384 24576
#define MATRIX_M SIZE
#define MATRIX_N SIZE
#define MATRIX_K SIZE
#define num_clock 1530
#define num_SM 80
#define num_TC 8
#define num_FMA 2
#define num_mma 64
#define FP16_OP num_clock*num_SM*num_TC*num_FMA*num_mma
#define TOTAL_OP MATRIX_M * MATRIX_N * MATRIX_K * 2
#define TOTAL_OP2 (MATRIX_M*MATRIX_N) * (2*MATRIX_K+2)
__global__ void convertFp32ToFp16 (half *out, float *in, int n);
int main(int argc, char* argv[]) {
printf("FP32 Matrix Memory Size : %f \n", (float) (sizeof(float) * (float) (MATRIX_M*MATRIX_M) / ( 1024 * 1024 ) ) );
printf("FP16 Matrix Memory Size : %f \n", (float) (sizeof(half) * (float) (MATRIX_M*MATRIX_M) / ( 1024 * 1024 ) ) );
float *a_fp32;
float *b_fp32;
half *a_fp16;
half *b_fp16;
float *c;
float *c_cublas;
float *c_host_cublas;
printf(" Step1. Initialize GPU API handles...\n");
hiprandGenerator_t gen;
hipblasHandle_t cublasHandle;
hipblasCreate(&cublasHandle);
hipEvent_t startcublas;
hipEvent_t stopcublas;
hipEventCreate(&startcublas);
hipEventCreate(&stopcublas);
// Use tensor cores
cublasSetMathMode(cublasHandle, CUBLAS_TENSOR_OP_MATH);
printf(" Step2. Memory Mallocation ...\n");
hipMalloc((void**)&a_fp32, MATRIX_M * MATRIX_K * sizeof(float));
hipMalloc((void**)&b_fp32, MATRIX_K * MATRIX_N * sizeof(float));
hipMalloc((void**)&a_fp16, MATRIX_M * MATRIX_K * sizeof(half));
hipMalloc((void**)&b_fp16, MATRIX_K * MATRIX_N * sizeof(half));
hipMalloc((void**)&c, MATRIX_M * MATRIX_N * sizeof(float));
hipMalloc((void**)&c_cublas, MATRIX_M * MATRIX_N * sizeof(float));
c_host_cublas = (float*)malloc(MATRIX_M * MATRIX_N * sizeof(float));
printf(" Step3. Data init with cuRAND ...\n");
hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT);
hiprandSetPseudoRandomGeneratorSeed(gen, 1337ULL);
hiprandGenerateUniform(gen, a_fp32, MATRIX_M * MATRIX_K);
hiprandGenerateUniform(gen, b_fp32, MATRIX_K * MATRIX_N);
hiprandGenerateUniform(gen, c, MATRIX_M * MATRIX_N);
hipMemcpy(c_cublas, c, MATRIX_M * MATRIX_N * sizeof(float), hipMemcpyDeviceToDevice);
printf(" Step4. convert FP32 to FP16 for FP16 benchmark...\n");
// hiprand doesn't currently support fp16 so we generate in fp32 and convert to fp16.
hipLaunchKernelGGL(( convertFp32ToFp16) , dim3((MATRIX_M * MATRIX_K + 255) / 256), dim3(256) , 0, 0, a_fp16, a_fp32, MATRIX_M * MATRIX_K);
hipLaunchKernelGGL(( convertFp32ToFp16) , dim3((MATRIX_K * MATRIX_N + 255) / 256), dim3(256) , 0, 0, b_fp16, b_fp32, MATRIX_K * MATRIX_N);
hiprandDestroyGenerator(gen);
float alpha = 2.0f;
float beta = 2.0f;
printf(" Step5. Ready to Run...\n");
printf("\nM = %d, N = %d, K = %d. alpha = %f, beta = %f\n\n", MATRIX_M, MATRIX_N, MATRIX_K, alpha, beta);
// Now using cuBLAS
printf("warm up...");
hipblasGemmEx(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N,
MATRIX_M, MATRIX_N, MATRIX_K,
&alpha,
a_fp16, HIP_R_16F, MATRIX_M,
b_fp16, HIP_R_16F, MATRIX_K,
&beta,
c, HIP_R_32F, MATRIX_M,
HIP_R_32F, CUBLAS_GEMM_DFALT_TENSOR_OP);
printf(" Step6. Running with cuBLAS...\n");
hipEventRecord(startcublas);
hipblasGemmEx(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N,
MATRIX_M, MATRIX_N, MATRIX_K,
&alpha,
a_fp16, HIP_R_16F, MATRIX_M,
b_fp16, HIP_R_16F, MATRIX_K,
&beta,
c_cublas, HIP_R_32F, MATRIX_M,
HIP_R_32F, CUBLAS_GEMM_DFALT_TENSOR_OP);
hipEventRecord(stopcublas);
printf(" Step7. Download results...\n");
hipMemcpy( c_host_cublas, c_cublas, MATRIX_M * MATRIX_N * sizeof(float), hipMemcpyDeviceToHost);
float cublasTime;
hipEventSynchronize(stopcublas);
hipEventElapsedTime(&cublasTime, startcublas, stopcublas);
printf("cublas took %fms", cublasTime);
printf(" with Operation %.2f\n", (double) TOTAL_OP );
printf(" RPeak FP16 TFLOPS: %.2f with max clock %d Mhz \n", (double) FP16_OP /(1000000) , num_clock );
printf(" RMax FP16 TFLOPS %.2f\n", (double) ( ((double)TOTAL_OP / (double) (1000000) ) / ((double) cublasTime)/1000 ) );
hipEventDestroy(startcublas);
hipEventDestroy(stopcublas);
hipFree(a_fp32);
hipFree(b_fp32);
hipFree(a_fp16);
hipFree(b_fp16);
hipFree(c);
hipFree(c_cublas);
free(c_host_cublas);
hipDeviceReset();
return 0;
}
__global__ void convertFp32ToFp16 (half *out, float *in, int n) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < n) {
out[idx] = in[idx];
}
}
| 8c3bb2135ee2e8203a1e84b01f24d3c8dbd57bab.cu | // build with command nvcc -lcublas -lcudart -lcurand -arch=sm_70 mixed.cu
// use max clock
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <curand.h>
#include <cublas_v2.h>
// Must be multiples of 16 to fit TensorCore
#define SIZE 8192 // 4096 8192 10240 16384 24576
#define MATRIX_M SIZE
#define MATRIX_N SIZE
#define MATRIX_K SIZE
#define num_clock 1530
#define num_SM 80
#define num_TC 8
#define num_FMA 2
#define num_mma 64
#define FP16_OP num_clock*num_SM*num_TC*num_FMA*num_mma
#define TOTAL_OP MATRIX_M * MATRIX_N * MATRIX_K * 2
#define TOTAL_OP2 (MATRIX_M*MATRIX_N) * (2*MATRIX_K+2)
__global__ void convertFp32ToFp16 (half *out, float *in, int n);
int main(int argc, char* argv[]) {
printf("FP32 Matrix Memory Size : %f \n", (float) (sizeof(float) * (float) (MATRIX_M*MATRIX_M) / ( 1024 * 1024 ) ) );
printf("FP16 Matrix Memory Size : %f \n", (float) (sizeof(half) * (float) (MATRIX_M*MATRIX_M) / ( 1024 * 1024 ) ) );
float *a_fp32;
float *b_fp32;
half *a_fp16;
half *b_fp16;
float *c;
float *c_cublas;
float *c_host_cublas;
printf(" Step1. Initialize GPU API handles...\n");
curandGenerator_t gen;
cublasHandle_t cublasHandle;
cublasCreate(&cublasHandle);
cudaEvent_t startcublas;
cudaEvent_t stopcublas;
cudaEventCreate(&startcublas);
cudaEventCreate(&stopcublas);
// Use tensor cores
cublasSetMathMode(cublasHandle, CUBLAS_TENSOR_OP_MATH);
printf(" Step2. Memory Mallocation ...\n");
cudaMalloc((void**)&a_fp32, MATRIX_M * MATRIX_K * sizeof(float));
cudaMalloc((void**)&b_fp32, MATRIX_K * MATRIX_N * sizeof(float));
cudaMalloc((void**)&a_fp16, MATRIX_M * MATRIX_K * sizeof(half));
cudaMalloc((void**)&b_fp16, MATRIX_K * MATRIX_N * sizeof(half));
cudaMalloc((void**)&c, MATRIX_M * MATRIX_N * sizeof(float));
cudaMalloc((void**)&c_cublas, MATRIX_M * MATRIX_N * sizeof(float));
c_host_cublas = (float*)malloc(MATRIX_M * MATRIX_N * sizeof(float));
printf(" Step3. Data init with cuRAND ...\n");
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(gen, 1337ULL);
curandGenerateUniform(gen, a_fp32, MATRIX_M * MATRIX_K);
curandGenerateUniform(gen, b_fp32, MATRIX_K * MATRIX_N);
curandGenerateUniform(gen, c, MATRIX_M * MATRIX_N);
cudaMemcpy(c_cublas, c, MATRIX_M * MATRIX_N * sizeof(float), cudaMemcpyDeviceToDevice);
printf(" Step4. convert FP32 to FP16 for FP16 benchmark...\n");
// curand doesn't currently support fp16 so we generate in fp32 and convert to fp16.
convertFp32ToFp16 <<< (MATRIX_M * MATRIX_K + 255) / 256, 256 >>> (a_fp16, a_fp32, MATRIX_M * MATRIX_K);
convertFp32ToFp16 <<< (MATRIX_K * MATRIX_N + 255) / 256, 256 >>> (b_fp16, b_fp32, MATRIX_K * MATRIX_N);
curandDestroyGenerator(gen);
float alpha = 2.0f;
float beta = 2.0f;
printf(" Step5. Ready to Run...\n");
printf("\nM = %d, N = %d, K = %d. alpha = %f, beta = %f\n\n", MATRIX_M, MATRIX_N, MATRIX_K, alpha, beta);
// Now using cuBLAS
printf("warm up...");
cublasGemmEx(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N,
MATRIX_M, MATRIX_N, MATRIX_K,
&alpha,
a_fp16, CUDA_R_16F, MATRIX_M,
b_fp16, CUDA_R_16F, MATRIX_K,
&beta,
c, CUDA_R_32F, MATRIX_M,
CUDA_R_32F, CUBLAS_GEMM_DFALT_TENSOR_OP);
printf(" Step6. Running with cuBLAS...\n");
cudaEventRecord(startcublas);
cublasGemmEx(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N,
MATRIX_M, MATRIX_N, MATRIX_K,
&alpha,
a_fp16, CUDA_R_16F, MATRIX_M,
b_fp16, CUDA_R_16F, MATRIX_K,
&beta,
c_cublas, CUDA_R_32F, MATRIX_M,
CUDA_R_32F, CUBLAS_GEMM_DFALT_TENSOR_OP);
cudaEventRecord(stopcublas);
printf(" Step7. Download results...\n");
cudaMemcpy( c_host_cublas, c_cublas, MATRIX_M * MATRIX_N * sizeof(float), cudaMemcpyDeviceToHost);
float cublasTime;
cudaEventSynchronize(stopcublas);
cudaEventElapsedTime(&cublasTime, startcublas, stopcublas);
printf("cublas took %fms", cublasTime);
printf(" with Operation %.2f\n", (double) TOTAL_OP );
printf(" RPeak FP16 TFLOPS: %.2f with max clock %d Mhz \n", (double) FP16_OP /(1000000) , num_clock );
printf(" RMax FP16 TFLOPS %.2f\n", (double) ( ((double)TOTAL_OP / (double) (1000000) ) / ((double) cublasTime)/1000 ) );
cudaEventDestroy(startcublas);
cudaEventDestroy(stopcublas);
cudaFree(a_fp32);
cudaFree(b_fp32);
cudaFree(a_fp16);
cudaFree(b_fp16);
cudaFree(c);
cudaFree(c_cublas);
free(c_host_cublas);
cudaDeviceReset();
return 0;
}
__global__ void convertFp32ToFp16 (half *out, float *in, int n) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < n) {
out[idx] = in[idx];
}
}
|
714c577e20c9794864c76d606ffa4e5984ae139d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel1_t2 [7][1];
static int dims_update_halo_kernel1_t2_h [7][1] = {0};
//user function
__device__
inline void update_halo_kernel1_t2_gpu(ACC<double> &density0,
ACC<double> &energy0,
ACC<double> &energy1,
ACC<double> &u,
ACC<double> &p,
ACC<double> &sd,
const int* fields) {
if(fields[FIELD_DENSITY] == 1) density0(0,0) = density0(0,-3);
if(fields[FIELD_ENERGY0] == 1) energy0(0,0) = energy0(0,-3);
if(fields[FIELD_ENERGY1] == 1) energy1(0,0) = energy1(0,-3);
if(fields[FIELD_U] == 1) u(0,0) = u(0,-3);
if(fields[FIELD_P] == 1) p(0,0) = p(0,-3);
if(fields[FIELD_SD] == 1) sd(0,0) = sd(0,-3);
}
__global__ void ops_update_halo_kernel1_t2(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
const int* __restrict arg6,
int size0,
int size1 ){
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_t2[0][0];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_t2[1][0];
arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_t2[2][0];
arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_t2[3][0];
arg4 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_t2[4][0];
arg5 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_t2[5][0];
if (idx_x < size0 && idx_y < size1) {
ACC<double> argp0(dims_update_halo_kernel1_t2[0][0], arg0);
ACC<double> argp1(dims_update_halo_kernel1_t2[1][0], arg1);
ACC<double> argp2(dims_update_halo_kernel1_t2[2][0], arg2);
ACC<double> argp3(dims_update_halo_kernel1_t2[3][0], arg3);
ACC<double> argp4(dims_update_halo_kernel1_t2[4][0], arg4);
ACC<double> argp5(dims_update_halo_kernel1_t2[5][0], arg5);
update_halo_kernel1_t2_gpu(argp0, argp1, argp2, argp3,
argp4, argp5, arg6);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel1_t2(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6) {
#else
void ops_par_loop_update_halo_kernel1_t2_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[7] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,7,range,51)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(51,"update_halo_kernel1_t2");
OPS_kernels[51].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[2];
int end[2];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[2];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 7,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<2; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
int xdim3 = args[3].dat->size[0];
int xdim4 = args[4].dat->size[0];
int xdim5 = args[5].dat->size[0];
if (xdim0 != dims_update_halo_kernel1_t2_h[0][0] || xdim1 != dims_update_halo_kernel1_t2_h[1][0] || xdim2 != dims_update_halo_kernel1_t2_h[2][0] || xdim3 != dims_update_halo_kernel1_t2_h[3][0] || xdim4 != dims_update_halo_kernel1_t2_h[4][0] || xdim5 != dims_update_halo_kernel1_t2_h[5][0]) {
dims_update_halo_kernel1_t2_h[0][0] = xdim0;
dims_update_halo_kernel1_t2_h[1][0] = xdim1;
dims_update_halo_kernel1_t2_h[2][0] = xdim2;
dims_update_halo_kernel1_t2_h[3][0] = xdim3;
dims_update_halo_kernel1_t2_h[4][0] = xdim4;
dims_update_halo_kernel1_t2_h[5][0] = xdim5;
cutilSafeCall(hipMemcpyToSymbol( dims_update_halo_kernel1_t2, dims_update_halo_kernel1_t2_h, sizeof(dims_update_halo_kernel1_t2)));
}
int *arg6h = (int *)arg6.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg6.data = OPS_consts_h + consts_bytes;
arg6.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg6.data)[d] = arg6h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
char *p_a[7];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
p_a[5] = (char *)args[5].data_d + base5;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 7);
ops_halo_exchanges(args,7,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[51].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0)
hipLaunchKernelGGL(( ops_update_halo_kernel1_t2), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(int *)arg6.data_d,x_size, y_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[51].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 7);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
ops_set_halo_dirtybit3(&args[2],range);
ops_set_halo_dirtybit3(&args[3],range);
ops_set_halo_dirtybit3(&args[4],range);
ops_set_halo_dirtybit3(&args[5],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[51].mpi_time += t2-t1;
OPS_kernels[51].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[51].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[51].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[51].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[51].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[51].transfer += ops_compute_transfer(dim, start, end, &arg5);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel1_t2(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 51;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 51;
for ( int i=0; i<4; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 7;
desc->args = (ops_arg*)malloc(7*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg6.data,NUM_FIELDS*sizeof(int));
desc->args[6].data = tmp;
desc->function = ops_par_loop_update_halo_kernel1_t2_execute;
if (OPS_diags > 1) {
ops_timing_realloc(51,"update_halo_kernel1_t2");
}
ops_enqueue_kernel(desc);
}
#endif
| 714c577e20c9794864c76d606ffa4e5984ae139d.cu | //
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel1_t2 [7][1];
static int dims_update_halo_kernel1_t2_h [7][1] = {0};
//user function
__device__
inline void update_halo_kernel1_t2_gpu(ACC<double> &density0,
ACC<double> &energy0,
ACC<double> &energy1,
ACC<double> &u,
ACC<double> &p,
ACC<double> &sd,
const int* fields) {
if(fields[FIELD_DENSITY] == 1) density0(0,0) = density0(0,-3);
if(fields[FIELD_ENERGY0] == 1) energy0(0,0) = energy0(0,-3);
if(fields[FIELD_ENERGY1] == 1) energy1(0,0) = energy1(0,-3);
if(fields[FIELD_U] == 1) u(0,0) = u(0,-3);
if(fields[FIELD_P] == 1) p(0,0) = p(0,-3);
if(fields[FIELD_SD] == 1) sd(0,0) = sd(0,-3);
}
__global__ void ops_update_halo_kernel1_t2(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
const int* __restrict arg6,
int size0,
int size1 ){
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_t2[0][0];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_t2[1][0];
arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_t2[2][0];
arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_t2[3][0];
arg4 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_t2[4][0];
arg5 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_t2[5][0];
if (idx_x < size0 && idx_y < size1) {
ACC<double> argp0(dims_update_halo_kernel1_t2[0][0], arg0);
ACC<double> argp1(dims_update_halo_kernel1_t2[1][0], arg1);
ACC<double> argp2(dims_update_halo_kernel1_t2[2][0], arg2);
ACC<double> argp3(dims_update_halo_kernel1_t2[3][0], arg3);
ACC<double> argp4(dims_update_halo_kernel1_t2[4][0], arg4);
ACC<double> argp5(dims_update_halo_kernel1_t2[5][0], arg5);
update_halo_kernel1_t2_gpu(argp0, argp1, argp2, argp3,
argp4, argp5, arg6);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel1_t2(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6) {
#else
void ops_par_loop_update_halo_kernel1_t2_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[7] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,7,range,51)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(51,"update_halo_kernel1_t2");
OPS_kernels[51].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[2];
int end[2];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[2];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 7,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<2; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
int xdim3 = args[3].dat->size[0];
int xdim4 = args[4].dat->size[0];
int xdim5 = args[5].dat->size[0];
if (xdim0 != dims_update_halo_kernel1_t2_h[0][0] || xdim1 != dims_update_halo_kernel1_t2_h[1][0] || xdim2 != dims_update_halo_kernel1_t2_h[2][0] || xdim3 != dims_update_halo_kernel1_t2_h[3][0] || xdim4 != dims_update_halo_kernel1_t2_h[4][0] || xdim5 != dims_update_halo_kernel1_t2_h[5][0]) {
dims_update_halo_kernel1_t2_h[0][0] = xdim0;
dims_update_halo_kernel1_t2_h[1][0] = xdim1;
dims_update_halo_kernel1_t2_h[2][0] = xdim2;
dims_update_halo_kernel1_t2_h[3][0] = xdim3;
dims_update_halo_kernel1_t2_h[4][0] = xdim4;
dims_update_halo_kernel1_t2_h[5][0] = xdim5;
cutilSafeCall(cudaMemcpyToSymbol( dims_update_halo_kernel1_t2, dims_update_halo_kernel1_t2_h, sizeof(dims_update_halo_kernel1_t2)));
}
int *arg6h = (int *)arg6.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg6.data = OPS_consts_h + consts_bytes;
arg6.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg6.data)[d] = arg6h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
char *p_a[7];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
p_a[5] = (char *)args[5].data_d + base5;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 7);
ops_halo_exchanges(args,7,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[51].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0)
ops_update_halo_kernel1_t2<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(int *)arg6.data_d,x_size, y_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[51].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 7);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
ops_set_halo_dirtybit3(&args[2],range);
ops_set_halo_dirtybit3(&args[3],range);
ops_set_halo_dirtybit3(&args[4],range);
ops_set_halo_dirtybit3(&args[5],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[51].mpi_time += t2-t1;
OPS_kernels[51].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[51].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[51].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[51].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[51].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[51].transfer += ops_compute_transfer(dim, start, end, &arg5);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel1_t2(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 51;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 51;
for ( int i=0; i<4; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 7;
desc->args = (ops_arg*)malloc(7*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg6.data,NUM_FIELDS*sizeof(int));
desc->args[6].data = tmp;
desc->function = ops_par_loop_update_halo_kernel1_t2_execute;
if (OPS_diags > 1) {
ops_timing_realloc(51,"update_halo_kernel1_t2");
}
ops_enqueue_kernel(desc);
}
#endif
|
1a579ac8cafe637c2f0fd4d24437642b4426561e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cmath>
#include <algorithm>
#include <stdio.h>
#include <hip/hip_fp16.h>
#include "torch_gather.h"
#include "amir_cuda_util/cuda_util.h"
namespace amirstan
{
namespace plugin
{
using namespace amirstan::cuda;
using amirstan::cuda::TensorSize;
using amirstan::cuda::TensorStride;
template <typename T>
__global__ void torch_gather_kernel(T* dst, const T* src, const int* gather_table,
int dim, TensorStride input_stride, TensorStride output_stride, int nb_dims,
int num_output){
size_t* src_stride = &(input_stride.size[0]);
size_t* dst_stride = &(output_stride.size[0]);
CUDA_KERNEL_LOOP(index, num_output){
size_t dst_index = index;
size_t src_index = 0;
const int gather_value = gather_table[dst_index];
for (int i = 0; i < nb_dims; ++i)
{
int dim_index = dst_index / dst_stride[i];
dst_index = dst_index % dst_stride[i];
if(i!=dim){
src_index+=dim_index*src_stride[i];
}else{
src_index+=gather_value*src_stride[i];
}
}
dst[index] = src[src_index];
}
}
template <typename T>
void torch_gather(T *output, const T* input, const int* index,
int dim, int* input_dims, int *index_dims, int nb_dims,
hipStream_t stream){
TensorSize ts_input_size;
TensorStride input_stride;
memcpy(&ts_input_size.size[0], input_dims, sizeof(int)*nb_dims);
input_stride.size[nb_dims-1] = 1;
for(int i=nb_dims-2; i>=0; --i){
input_stride.size[i] = input_stride.size[i+1] * ts_input_size.size[i+1];
}
TensorSize ts_output_size;
TensorStride output_stride;
memcpy(&ts_output_size.size[0], index_dims, sizeof(int)*nb_dims);
output_stride.size[nb_dims-1] = 1;
for(int i=nb_dims-2; i>=0; --i){
output_stride.size[i] = output_stride.size[i+1] * ts_output_size.size[i+1];
}
size_t num_output = output_stride.size[0]*ts_output_size.size[0];
hipLaunchKernelGGL(( torch_gather_kernel<T>), dim3(GET_BLOCKS(num_output)), dim3(CUDA_NUM_THREADS), 0, stream,
output, input, index, dim,
input_stride, output_stride, nb_dims,
num_output
);
}
template void torch_gather<float>(float *output, const float* input, const int* index,
int dim, int* input_dims, int *index_dims, int nb_dims,
hipStream_t stream);
template void torch_gather<half>(half *output, const half* input, const int* index,
int dim, int* input_dims, int *index_dims, int nb_dims,
hipStream_t stream);
template void torch_gather<int>(int *output, const int* input, const int* index,
int dim, int* input_dims, int *index_dims, int nb_dims,
hipStream_t stream);
}
} | 1a579ac8cafe637c2f0fd4d24437642b4426561e.cu | #include <cmath>
#include <algorithm>
#include <stdio.h>
#include <cuda_fp16.h>
#include "torch_gather.h"
#include "amir_cuda_util/cuda_util.h"
namespace amirstan
{
namespace plugin
{
using namespace amirstan::cuda;
using amirstan::cuda::TensorSize;
using amirstan::cuda::TensorStride;
template <typename T>
__global__ void torch_gather_kernel(T* dst, const T* src, const int* gather_table,
int dim, TensorStride input_stride, TensorStride output_stride, int nb_dims,
int num_output){
size_t* src_stride = &(input_stride.size[0]);
size_t* dst_stride = &(output_stride.size[0]);
CUDA_KERNEL_LOOP(index, num_output){
size_t dst_index = index;
size_t src_index = 0;
const int gather_value = gather_table[dst_index];
for (int i = 0; i < nb_dims; ++i)
{
int dim_index = dst_index / dst_stride[i];
dst_index = dst_index % dst_stride[i];
if(i!=dim){
src_index+=dim_index*src_stride[i];
}else{
src_index+=gather_value*src_stride[i];
}
}
dst[index] = src[src_index];
}
}
template <typename T>
void torch_gather(T *output, const T* input, const int* index,
int dim, int* input_dims, int *index_dims, int nb_dims,
cudaStream_t stream){
TensorSize ts_input_size;
TensorStride input_stride;
memcpy(&ts_input_size.size[0], input_dims, sizeof(int)*nb_dims);
input_stride.size[nb_dims-1] = 1;
for(int i=nb_dims-2; i>=0; --i){
input_stride.size[i] = input_stride.size[i+1] * ts_input_size.size[i+1];
}
TensorSize ts_output_size;
TensorStride output_stride;
memcpy(&ts_output_size.size[0], index_dims, sizeof(int)*nb_dims);
output_stride.size[nb_dims-1] = 1;
for(int i=nb_dims-2; i>=0; --i){
output_stride.size[i] = output_stride.size[i+1] * ts_output_size.size[i+1];
}
size_t num_output = output_stride.size[0]*ts_output_size.size[0];
torch_gather_kernel<T><<<GET_BLOCKS(num_output), CUDA_NUM_THREADS, 0, stream>>>(
output, input, index, dim,
input_stride, output_stride, nb_dims,
num_output
);
}
template void torch_gather<float>(float *output, const float* input, const int* index,
int dim, int* input_dims, int *index_dims, int nb_dims,
cudaStream_t stream);
template void torch_gather<half>(half *output, const half* input, const int* index,
int dim, int* input_dims, int *index_dims, int nb_dims,
cudaStream_t stream);
template void torch_gather<int>(int *output, const int* input, const int* index,
int dim, int* input_dims, int *index_dims, int nb_dims,
cudaStream_t stream);
}
} |
4deb3462e02f1131e14e32e30a8479d5ee3487a8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/util/gpu_util.cuh"
#include "caffe/ultinous/projective_transformer_layer.hpp"
#include "caffe/util/benchmark.hpp"
namespace caffe {
template <typename Dtype>
__global__ void copy_values(const int nthreads, int size_src, int k,
const Dtype* src, int size_dst, int i, Dtype* dst) {
CUDA_KERNEL_LOOP(index, nthreads) {
dst[index * size_dst + i] = src[index * size_src + k];
}
}
template <typename Dtype>
__global__ void ProjectiveTransformerForwardGPU(const int nthreads, int N, int C,
int output_H_, int output_W_, int H, int W,
const Dtype* input_grid_data, const Dtype* U, Dtype* V, bool grid) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int t = index % output_W_;
const int s = (index / output_W_) % output_H_;
const int j = (index / (output_W_ * output_H_)) % C;
const int i = index / (output_W_ * output_H_ * C);
const Dtype* coordinates = input_grid_data + (output_H_ * output_W_ * 3) * i;
const int row_idx = output_W_ * s + t;
// homogen coordinates
const Dtype hom_x = coordinates[row_idx * 3];
const Dtype hom_y = coordinates[row_idx * 3 + 1];
const Dtype hom_h = coordinates[row_idx * 3 + 2];
const Dtype px = hom_x / hom_h; // NOTE: check for div by zero
const Dtype py = hom_y / hom_h;
const int V_offset = index;
V[V_offset] = (Dtype)0.;
const Dtype x = (px + 1) / 2 * H;
const Dtype y = (py + 1) / 2 * W;
int m, n; Dtype w;
const Dtype* pic = U + i * (C * H * W) + j * (H * W);
m = floor(x); n = floor(y); w = 0;
if(m >= 0 && m < H && n >= 0 && n < W) {
w = (1 - (x - m)) * (1 - (y - n));
V[V_offset] += w * pic[m * W + n];
}
m = floor(x) + 1; n = floor(y); w = 0;
if(m >= 0 && m < H && n >= 0 && n < W) {
w = (1 - (m - x)) * (1 - (y - n));
V[V_offset] += w * pic[m * W + n];
}
m = floor(x); n = floor(y) + 1; w = 0;
if(m >= 0 && m < H && n >= 0 && n < W) {
w = (1 - (x - m)) * (1 - (n - y));
V[V_offset] += w * pic[m * W + n];
}
m = floor(x) + 1; n = floor(y) + 1; w = 0;
if(m >= 0 && m < H && n >= 0 && n < W) {
w = (1 - (m - x)) * (1 - (n - y));
V[V_offset] += w * pic[m * W + n];
}
if( grid && ( ((int(round(px*64))%7)==0) || ((int(round(py*64))%7)==0) ) )
V[V_offset] = (Dtype)0.;
}
}
template <typename Dtype>
void ProjectiveTransformerLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
string prefix = "ProjectiveTransformerLayer::Forward_gpu::\t";
const Dtype* U = bottom[0]->gpu_data();
const Dtype* theta = bottom[1]->gpu_data();
const Dtype* output_grid_data = output_grid.gpu_data();
Dtype* input_grid_data = input_grid.mutable_gpu_data();
Dtype* V = top[0]->mutable_gpu_data();
caffe_gpu_set(input_grid.count(), (Dtype)0, input_grid_data);
caffe_gpu_set(top[0]->count(), (Dtype)0, V);
// compute out input_grid_data
for(int i = 0; i < N; ++i) {
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, output_H_ * output_W_, 3, 3, (Dtype)1.,
output_grid_data, theta + 9 * i, (Dtype)0.,
input_grid_data + (output_H_ * output_W_ * 3) * i);
}
const int nthreads = N * C * output_H_ * output_W_;
hipLaunchKernelGGL(( ProjectiveTransformerForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3( CAFFE_CUDA_NUM_THREADS),0,Caffe::cuda_stream(), nthreads, N, C, output_H_, output_W_, H, W, input_grid_data, U, V, this->layer_param_.proj_trans_param().grid());
}
template <typename Dtype>
__global__ void ProjectiveTransformerBackwardGPU_dTheta(const int nthreads, int C,
int output_H_, int output_W_, int H, int W,
const Dtype* input_grid_data, const Dtype* dV_array, const Dtype* U_array,
Dtype* dTheta_tmp_diff, const Dtype* theta) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int t = index % output_W_;
const int s = (index / output_W_) % output_H_;
const int j = (index / (output_W_ * output_H_)) % C;
const int i = index / (output_W_ * output_H_ * C);
Dtype t11=theta[9*i], t12=theta[9*i+1], t13=theta[9*i+2];
Dtype t21=theta[9*i+3], t22=theta[9*i+4], t23=theta[9*i+5];
Dtype t31=theta[9*i+6], t32=theta[9*i+7], t33=theta[9*i+8];
const Dtype* coordinates = input_grid_data + (output_H_ * output_W_ * 3) * i;
const int row_idx = output_W_ * s + t;
const Dtype hom_x = coordinates[row_idx * 3];
const Dtype hom_y = coordinates[row_idx * 3 + 1];
const Dtype hom_h = coordinates[row_idx * 3 + 2];
const Dtype px = hom_x / hom_h; // NOTE: check for div by zero
const Dtype py = hom_y / hom_h;
Dtype delta_dpx = (Dtype)0.;
Dtype delta_dpy = (Dtype)0.;
const Dtype x = (px + 1) / 2 * H;
const Dtype y = (py + 1) / 2 * W;
const int dV_offset = index;
const Dtype dV = dV_array[dV_offset];
int m, n;
const Dtype* U = U_array + i * (C * H * W) + j * (H * W);
// left-bottom neighbor
m = floor(x); n = floor(y);
if(m >= 0 && m < H && n >= 0 && n < W) {
delta_dpx -= (1 - (y - n)) * U[m * W + n] * dV * H / 2;
delta_dpy -= (1 - (x - m)) * U[m * W + n] * dV * W / 2;
}
// left-top neighbor
m = floor(x); n = floor(y) + 1;
if(m >= 0 && m < H && n >= 0 && n < W) {
delta_dpx -= (1 - (n - y)) * U[m * W + n] * dV * H / 2;
delta_dpy += (1 - (x - m)) * U[m * W + n] * dV * W / 2;
}
// right-bottom neighbor
m = floor(x) + 1; n = floor(y);
if(m >= 0 && m < H && n >= 0 && n < W) {
delta_dpx += (1 - (y - n)) * U[m * W + n] * dV * H / 2;
delta_dpy -= (1 - (m - x)) * U[m * W + n] * dV * W / 2;
}
// right-top neighbor
m = floor(x) + 1; n = floor(y) + 1;
if(m >= 0 && m < H && n >= 0 && n < W) {
delta_dpx += (1 - (n - y)) * U[m * W + n] * dV * H / 2;
delta_dpy += (1 - (m - x)) * U[m * W + n] * dV * W / 2;
}
int idx = j * (output_H_ * output_W_) + s * output_W_ + t;
Dtype outX = (s * 1.0 / output_H_ * 2 - 1);
Dtype outY = (t * 1.0 / output_W_ * 2 - 1);
Dtype nomX = t11*outX + t12*outY + t13;
Dtype nomY = t21*outX + t22*outY + t23;
Dtype denom = t31*outX + t32*outY + t33;
Dtype denom2 = denom*denom;
dTheta_tmp_diff[(9 * i) * (output_H_ * output_W_ * C) + idx] += delta_dpx * outX/denom;
dTheta_tmp_diff[(9 * i + 1) * (output_H_ * output_W_ * C) + idx] += delta_dpx * outY/denom;
dTheta_tmp_diff[(9 * i + 2) * (output_H_ * output_W_ * C) + idx] += delta_dpx/denom;
dTheta_tmp_diff[(9 * i + 3) * (output_H_ * output_W_ * C) + idx] += delta_dpy * outX/denom;
dTheta_tmp_diff[(9 * i + 4) * (output_H_ * output_W_ * C) + idx] += delta_dpy * outY/denom;
dTheta_tmp_diff[(9 * i + 5) * (output_H_ * output_W_ * C) + idx] += delta_dpy/denom;
dTheta_tmp_diff[(9 * i + 6) * (output_H_ * output_W_ * C) + idx] += delta_dpx * ( nomX*(-1)*(1.0/denom2)*outX );
dTheta_tmp_diff[(9 * i + 6) * (output_H_ * output_W_ * C) + idx] += delta_dpy * ( nomY*(-1)*(1.0/denom2)*outX );
dTheta_tmp_diff[(9 * i + 7) * (output_H_ * output_W_ * C) + idx] += delta_dpx * ( nomX*(-1)*(1.0/denom2)*outY );
dTheta_tmp_diff[(9 * i + 7) * (output_H_ * output_W_ * C) + idx] += delta_dpy * ( nomY*(-1)*(1.0/denom2)*outY );
dTheta_tmp_diff[(9 * i + 8) * (output_H_ * output_W_ * C) + idx] += delta_dpx * ( nomX*(-1)*(1.0/denom2) );
dTheta_tmp_diff[(9 * i + 8) * (output_H_ * output_W_ * C) + idx] += delta_dpy * ( nomY*(-1)*(1.0/denom2) );
}
}
template <typename Dtype>
void ProjectiveTransformerLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
string prefix = "ProjectiveTransformerLayer::Backward_GPU::\t";
//caffe_gpu_set(bottom[0]->count(), (Dtype)0., bottom[0]->mutable_gpu_diff());
//caffe_gpu_set(bottom[1]->count(), (Dtype)0., bottom[1]->mutable_gpu_diff());
//return;
const Dtype* dV = top[0]->gpu_diff();
const Dtype* input_grid_data = input_grid.gpu_data();
const Dtype* U = bottom[0]->gpu_data();
Dtype* dTheta = bottom[1]->mutable_gpu_diff();
Dtype* dTheta_tmp_diff = dTheta_tmp.mutable_gpu_diff();
caffe_gpu_set(dTheta_tmp.count(), (Dtype)0., dTheta_tmp_diff);
const int nthreads = N * C * output_H_ * output_W_;
hipLaunchKernelGGL(( ProjectiveTransformerBackwardGPU_dTheta<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3( CAFFE_CUDA_NUM_THREADS),0,Caffe::cuda_stream(), nthreads, C, output_H_, output_W_, H, W, input_grid_data,
dV, U, dTheta_tmp_diff, bottom[1]->gpu_data());
Dtype* all_ones_2_data = all_ones_2.mutable_gpu_data();
caffe_gpu_set(all_ones_2.count(), (Dtype)1., all_ones_2_data);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, bottom[1]->count(), 1, output_H_ * output_W_ * C,
(Dtype)1., dTheta_tmp_diff, all_ones_2_data, (Dtype)0., dTheta);
/*const Dtype* db_dfull_theta = full_theta.cpu_diff();
for(int i=0; i<full_theta.count(); ++i) {
std::cout << db_dFull_theta[i] << " ";
}
std::cout<<std::endl;*/
/*int k = 0;
const int num_threads = N;
for(int i=0; i<6; ++i) {
copy_values<Dtype><<<CAFFE_GET_BLOCKS(num_threads), CAFFE_CUDA_NUM_THREADS>>>(num_threads,
6, i, dFull_theta, 6, k, dTheta);
++ k;
}*/
/*const Dtype* db_dtheta = bottom[1]->cpu_diff();
for(int i=0; i<bottom[1]->count(); ++i) {
std::cout << db_dtheta[i] << " ";
}
std::cout<<std::endl;*/
}
INSTANTIATE_LAYER_GPU_FUNCS(ProjectiveTransformerLayer);
} // namespace caffe
| 4deb3462e02f1131e14e32e30a8479d5ee3487a8.cu | #include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/util/gpu_util.cuh"
#include "caffe/ultinous/projective_transformer_layer.hpp"
#include "caffe/util/benchmark.hpp"
namespace caffe {
template <typename Dtype>
__global__ void copy_values(const int nthreads, int size_src, int k,
const Dtype* src, int size_dst, int i, Dtype* dst) {
CUDA_KERNEL_LOOP(index, nthreads) {
dst[index * size_dst + i] = src[index * size_src + k];
}
}
template <typename Dtype>
__global__ void ProjectiveTransformerForwardGPU(const int nthreads, int N, int C,
int output_H_, int output_W_, int H, int W,
const Dtype* input_grid_data, const Dtype* U, Dtype* V, bool grid) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int t = index % output_W_;
const int s = (index / output_W_) % output_H_;
const int j = (index / (output_W_ * output_H_)) % C;
const int i = index / (output_W_ * output_H_ * C);
const Dtype* coordinates = input_grid_data + (output_H_ * output_W_ * 3) * i;
const int row_idx = output_W_ * s + t;
// homogen coordinates
const Dtype hom_x = coordinates[row_idx * 3];
const Dtype hom_y = coordinates[row_idx * 3 + 1];
const Dtype hom_h = coordinates[row_idx * 3 + 2];
const Dtype px = hom_x / hom_h; // NOTE: check for div by zero
const Dtype py = hom_y / hom_h;
const int V_offset = index;
V[V_offset] = (Dtype)0.;
const Dtype x = (px + 1) / 2 * H;
const Dtype y = (py + 1) / 2 * W;
int m, n; Dtype w;
const Dtype* pic = U + i * (C * H * W) + j * (H * W);
m = floor(x); n = floor(y); w = 0;
if(m >= 0 && m < H && n >= 0 && n < W) {
w = (1 - (x - m)) * (1 - (y - n));
V[V_offset] += w * pic[m * W + n];
}
m = floor(x) + 1; n = floor(y); w = 0;
if(m >= 0 && m < H && n >= 0 && n < W) {
w = (1 - (m - x)) * (1 - (y - n));
V[V_offset] += w * pic[m * W + n];
}
m = floor(x); n = floor(y) + 1; w = 0;
if(m >= 0 && m < H && n >= 0 && n < W) {
w = (1 - (x - m)) * (1 - (n - y));
V[V_offset] += w * pic[m * W + n];
}
m = floor(x) + 1; n = floor(y) + 1; w = 0;
if(m >= 0 && m < H && n >= 0 && n < W) {
w = (1 - (m - x)) * (1 - (n - y));
V[V_offset] += w * pic[m * W + n];
}
if( grid && ( ((int(round(px*64))%7)==0) || ((int(round(py*64))%7)==0) ) )
V[V_offset] = (Dtype)0.;
}
}
template <typename Dtype>
void ProjectiveTransformerLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
string prefix = "ProjectiveTransformerLayer::Forward_gpu::\t";
const Dtype* U = bottom[0]->gpu_data();
const Dtype* theta = bottom[1]->gpu_data();
const Dtype* output_grid_data = output_grid.gpu_data();
Dtype* input_grid_data = input_grid.mutable_gpu_data();
Dtype* V = top[0]->mutable_gpu_data();
caffe_gpu_set(input_grid.count(), (Dtype)0, input_grid_data);
caffe_gpu_set(top[0]->count(), (Dtype)0, V);
// compute out input_grid_data
for(int i = 0; i < N; ++i) {
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, output_H_ * output_W_, 3, 3, (Dtype)1.,
output_grid_data, theta + 9 * i, (Dtype)0.,
input_grid_data + (output_H_ * output_W_ * 3) * i);
}
const int nthreads = N * C * output_H_ * output_W_;
ProjectiveTransformerForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS,0,Caffe::cuda_stream()>>>(nthreads, N, C, output_H_, output_W_, H, W, input_grid_data, U, V, this->layer_param_.proj_trans_param().grid());
}
template <typename Dtype>
__global__ void ProjectiveTransformerBackwardGPU_dTheta(const int nthreads, int C,
int output_H_, int output_W_, int H, int W,
const Dtype* input_grid_data, const Dtype* dV_array, const Dtype* U_array,
Dtype* dTheta_tmp_diff, const Dtype* theta) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int t = index % output_W_;
const int s = (index / output_W_) % output_H_;
const int j = (index / (output_W_ * output_H_)) % C;
const int i = index / (output_W_ * output_H_ * C);
Dtype t11=theta[9*i], t12=theta[9*i+1], t13=theta[9*i+2];
Dtype t21=theta[9*i+3], t22=theta[9*i+4], t23=theta[9*i+5];
Dtype t31=theta[9*i+6], t32=theta[9*i+7], t33=theta[9*i+8];
const Dtype* coordinates = input_grid_data + (output_H_ * output_W_ * 3) * i;
const int row_idx = output_W_ * s + t;
const Dtype hom_x = coordinates[row_idx * 3];
const Dtype hom_y = coordinates[row_idx * 3 + 1];
const Dtype hom_h = coordinates[row_idx * 3 + 2];
const Dtype px = hom_x / hom_h; // NOTE: check for div by zero
const Dtype py = hom_y / hom_h;
Dtype delta_dpx = (Dtype)0.;
Dtype delta_dpy = (Dtype)0.;
const Dtype x = (px + 1) / 2 * H;
const Dtype y = (py + 1) / 2 * W;
const int dV_offset = index;
const Dtype dV = dV_array[dV_offset];
int m, n;
const Dtype* U = U_array + i * (C * H * W) + j * (H * W);
// left-bottom neighbor
m = floor(x); n = floor(y);
if(m >= 0 && m < H && n >= 0 && n < W) {
delta_dpx -= (1 - (y - n)) * U[m * W + n] * dV * H / 2;
delta_dpy -= (1 - (x - m)) * U[m * W + n] * dV * W / 2;
}
// left-top neighbor
m = floor(x); n = floor(y) + 1;
if(m >= 0 && m < H && n >= 0 && n < W) {
delta_dpx -= (1 - (n - y)) * U[m * W + n] * dV * H / 2;
delta_dpy += (1 - (x - m)) * U[m * W + n] * dV * W / 2;
}
// right-bottom neighbor
m = floor(x) + 1; n = floor(y);
if(m >= 0 && m < H && n >= 0 && n < W) {
delta_dpx += (1 - (y - n)) * U[m * W + n] * dV * H / 2;
delta_dpy -= (1 - (m - x)) * U[m * W + n] * dV * W / 2;
}
// right-top neighbor
m = floor(x) + 1; n = floor(y) + 1;
if(m >= 0 && m < H && n >= 0 && n < W) {
delta_dpx += (1 - (n - y)) * U[m * W + n] * dV * H / 2;
delta_dpy += (1 - (m - x)) * U[m * W + n] * dV * W / 2;
}
int idx = j * (output_H_ * output_W_) + s * output_W_ + t;
Dtype outX = (s * 1.0 / output_H_ * 2 - 1);
Dtype outY = (t * 1.0 / output_W_ * 2 - 1);
Dtype nomX = t11*outX + t12*outY + t13;
Dtype nomY = t21*outX + t22*outY + t23;
Dtype denom = t31*outX + t32*outY + t33;
Dtype denom2 = denom*denom;
dTheta_tmp_diff[(9 * i) * (output_H_ * output_W_ * C) + idx] += delta_dpx * outX/denom;
dTheta_tmp_diff[(9 * i + 1) * (output_H_ * output_W_ * C) + idx] += delta_dpx * outY/denom;
dTheta_tmp_diff[(9 * i + 2) * (output_H_ * output_W_ * C) + idx] += delta_dpx/denom;
dTheta_tmp_diff[(9 * i + 3) * (output_H_ * output_W_ * C) + idx] += delta_dpy * outX/denom;
dTheta_tmp_diff[(9 * i + 4) * (output_H_ * output_W_ * C) + idx] += delta_dpy * outY/denom;
dTheta_tmp_diff[(9 * i + 5) * (output_H_ * output_W_ * C) + idx] += delta_dpy/denom;
dTheta_tmp_diff[(9 * i + 6) * (output_H_ * output_W_ * C) + idx] += delta_dpx * ( nomX*(-1)*(1.0/denom2)*outX );
dTheta_tmp_diff[(9 * i + 6) * (output_H_ * output_W_ * C) + idx] += delta_dpy * ( nomY*(-1)*(1.0/denom2)*outX );
dTheta_tmp_diff[(9 * i + 7) * (output_H_ * output_W_ * C) + idx] += delta_dpx * ( nomX*(-1)*(1.0/denom2)*outY );
dTheta_tmp_diff[(9 * i + 7) * (output_H_ * output_W_ * C) + idx] += delta_dpy * ( nomY*(-1)*(1.0/denom2)*outY );
dTheta_tmp_diff[(9 * i + 8) * (output_H_ * output_W_ * C) + idx] += delta_dpx * ( nomX*(-1)*(1.0/denom2) );
dTheta_tmp_diff[(9 * i + 8) * (output_H_ * output_W_ * C) + idx] += delta_dpy * ( nomY*(-1)*(1.0/denom2) );
}
}
template <typename Dtype>
void ProjectiveTransformerLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
string prefix = "ProjectiveTransformerLayer::Backward_GPU::\t";
//caffe_gpu_set(bottom[0]->count(), (Dtype)0., bottom[0]->mutable_gpu_diff());
//caffe_gpu_set(bottom[1]->count(), (Dtype)0., bottom[1]->mutable_gpu_diff());
//return;
const Dtype* dV = top[0]->gpu_diff();
const Dtype* input_grid_data = input_grid.gpu_data();
const Dtype* U = bottom[0]->gpu_data();
Dtype* dTheta = bottom[1]->mutable_gpu_diff();
Dtype* dTheta_tmp_diff = dTheta_tmp.mutable_gpu_diff();
caffe_gpu_set(dTheta_tmp.count(), (Dtype)0., dTheta_tmp_diff);
const int nthreads = N * C * output_H_ * output_W_;
ProjectiveTransformerBackwardGPU_dTheta<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS,0,Caffe::cuda_stream()>>>(nthreads, C, output_H_, output_W_, H, W, input_grid_data,
dV, U, dTheta_tmp_diff, bottom[1]->gpu_data());
Dtype* all_ones_2_data = all_ones_2.mutable_gpu_data();
caffe_gpu_set(all_ones_2.count(), (Dtype)1., all_ones_2_data);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, bottom[1]->count(), 1, output_H_ * output_W_ * C,
(Dtype)1., dTheta_tmp_diff, all_ones_2_data, (Dtype)0., dTheta);
/*const Dtype* db_dfull_theta = full_theta.cpu_diff();
for(int i=0; i<full_theta.count(); ++i) {
std::cout << db_dFull_theta[i] << " ";
}
std::cout<<std::endl;*/
/*int k = 0;
const int num_threads = N;
for(int i=0; i<6; ++i) {
copy_values<Dtype><<<CAFFE_GET_BLOCKS(num_threads), CAFFE_CUDA_NUM_THREADS>>>(num_threads,
6, i, dFull_theta, 6, k, dTheta);
++ k;
}*/
/*const Dtype* db_dtheta = bottom[1]->cpu_diff();
for(int i=0; i<bottom[1]->count(); ++i) {
std::cout << db_dtheta[i] << " ";
}
std::cout<<std::endl;*/
}
INSTANTIATE_LAYER_GPU_FUNCS(ProjectiveTransformerLayer);
} // namespace caffe
|
1358f416f7d09462702e29b15ceab4c67a4e8a56.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
using namespace std;
#ifndef MAP_FILE
#define MAP_FILE MAP_SHARED
#endif
__global__ void elementwise_1D_1D_mul(float* in1, float* in2, float* out, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; tid < size; tid += stride)
if (tid < size) out[tid] = in1[tid] * in2[tid];
} | 1358f416f7d09462702e29b15ceab4c67a4e8a56.cu | #include "includes.h"
using namespace std;
#ifndef MAP_FILE
#define MAP_FILE MAP_SHARED
#endif
__global__ void elementwise_1D_1D_mul(float* in1, float* in2, float* out, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; tid < size; tid += stride)
if (tid < size) out[tid] = in1[tid] * in2[tid];
} |
b52b93f9435e28ad16c91a1bdf214c53a48de7da.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda_helper.h"
#include "model.h"
__global__
void scale_kernel(float* ptr, coord_t size, float a, float b)
{
CUDA_KERNEL_LOOP(i, size)
{
ptr[i] = (b - a) * ptr[i] + a;
}
}
__global__
void ones_kernel(float* ptr, coord_t size)
{
CUDA_KERNEL_LOOP(i, size)
{
ptr[i] = 1.0f;
}
}
template<typename DT>
__global__
void assign_kernel(DT* ptr, coord_t size, DT value)
{
CUDA_KERNEL_LOOP(i, size)
{
ptr[i] = value;
}
}
template<typename DT>
__global__
void copy_kernel(DT* dst, const DT* src, coord_t size)
{
CUDA_KERNEL_LOOP(i, size)
{
dst[i] = src[i];
}
}
__global__
void reluBackward(float *grad_ptr, const float *output, int n)
{
CUDA_KERNEL_LOOP(i, n)
{
grad_ptr[i] = (output[i] > 0.0f) ? grad_ptr[i] : 0;
}
}
__global__
void apply_add(float *data_ptr, const float *replica_ptr, size_t size)
{
CUDA_KERNEL_LOOP(i, size)
{
data_ptr[i] += replica_ptr[i];
}
}
__global__
void apply_add_with_scale(float *data_ptr, const float *grad_ptr,
size_t size, float scale)
{
CUDA_KERNEL_LOOP(i, size)
{
data_ptr[i] += grad_ptr[i] * scale;
}
}
__global__
void add_with_stride(float* output,
const float* input,
int num_blocks,
int output_blk_size,
int input_blk_size)
{
int min_blk_size = min(output_blk_size, input_blk_size);
CUDA_KERNEL_LOOP(i, num_blocks * min_blk_size)
{
int blk_idx = i / min_blk_size;
int blk_offset = i % min_blk_size;
int input_offset = blk_idx * input_blk_size + blk_offset;
int output_offset = blk_idx * output_blk_size + blk_offset;
output[output_offset] += input[input_offset];
}
}
__global__
void copy_with_stride(float* output,
const float* input,
int num_blocks,
int output_blk_size,
int input_blk_size)
{
int min_blk_size = min(output_blk_size, input_blk_size);
CUDA_KERNEL_LOOP(i, num_blocks * min_blk_size)
{
int blk_idx = i / min_blk_size;
int blk_offset = i % min_blk_size;
int input_offset = blk_idx * input_blk_size + blk_offset;
int output_offset = blk_idx * output_blk_size + blk_offset;
output[output_offset] = input[input_offset];
}
}
__host__
void updateGAS(float* para_ptr, const float* grad_ptr, size_t replica_size,
int num_replica, float learning_rate)
{
// Step 1: gater gradients to the first replica
for (int i = 1; i < num_replica; i++) {
const float *replica = grad_ptr + i * replica_size;
hipLaunchKernelGGL(( apply_add), dim3(GET_BLOCKS(replica_size)), dim3(CUDA_NUM_THREADS), 0, 0,
(float*)grad_ptr, replica, replica_size);
}
// Step 2: scale the first replica
float scale_factor = 1.0f / num_replica * (-learning_rate);
hipLaunchKernelGGL(( apply_add_with_scale), dim3(GET_BLOCKS(replica_size)), dim3(CUDA_NUM_THREADS), 0, 0,
para_ptr, grad_ptr, replica_size, scale_factor);
}
template<unsigned DIM, typename T>
__host__
void print_tensor(const T* ptr, Rect<DIM> rect, const char* prefix)
{
// device synchronize to make sure the data are ready
checkCUDA(hipDeviceSynchronize());
T* host_ptr;
checkCUDA(hipHostMalloc(&host_ptr, sizeof(T) * rect.volume(),
hipHostMallocPortable | hipHostMallocMapped));
checkCUDA(hipMemcpy(host_ptr, ptr, sizeof(T) * rect.volume(),
hipMemcpyDeviceToHost));
checkCUDA(hipDeviceSynchronize());
int idx = 0;
printf("%s", prefix);
for (PointInRectIterator<DIM> it(rect); it(); it++, idx++) {
printf(" %.4lf", (float)host_ptr[idx]);
if (idx >= 16) break;
}
printf("\n");
checkCUDA(hipHostFree(host_ptr));
}
cudnnStatus_t cudnnSetTensorDescriptorFromDomain(cudnnTensorDescriptor_t tensor, Domain domain)
{
int dims[MAX_TENSOR_DIM];
switch (domain.get_dim()) {
case 1:
{
Rect<1> rect = domain;
dims[0] = rect.hi[0] - rect.lo[0] + 1;
return cudnnSetTensor4dDescriptor(tensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, dims[0], 1, 1, 1);
}
case 2:
{
Rect<2> rect = domain;
dims[0] = rect.hi[0] - rect.lo[0] + 1;
dims[1] = rect.hi[1] - rect.lo[1] + 1;
return cudnnSetTensor4dDescriptor(tensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, dims[1], dims[0], 1, 1);
}
case 3:
{
Rect<3> rect = domain;
dims[0] = rect.hi[0] - rect.lo[0] + 1;
dims[1] = rect.hi[1] - rect.lo[1] + 1;
dims[2] = rect.hi[2] - rect.lo[2] + 1;
return cudnnSetTensor4dDescriptor(tensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, dims[2], dims[1], dims[0], 1);
}
case 4:
{
Rect<4> rect = domain;
dims[0] = rect.hi[0] - rect.lo[0] + 1;
dims[1] = rect.hi[1] - rect.lo[1] + 1;
dims[2] = rect.hi[2] - rect.lo[2] + 1;
dims[3] = rect.hi[3] - rect.lo[3] + 1;
return cudnnSetTensor4dDescriptor(tensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, dims[3], dims[2], dims[1], dims[0]);
}
default:
assert(false && "Unsupported dim number");
}
return CUDNN_STATUS_BAD_PARAM;
}
template __global__ void assign_kernel<float>(float* ptr, coord_t size, float value);
template __global__ void assign_kernel<int32_t>(int32_t* ptr, coord_t size, int32_t value);
template __global__ void assign_kernel<int64_t>(int64_t* ptr, coord_t size, int64_t value);
template __global__ void copy_kernel<float>(float* dst, const float* src, coord_t size);
template __global__ void copy_kernel<int>(int* dst, const int* src, coord_t size);
template __host__ void print_tensor<1, float>(const float* ptr, Rect<1> rect, const char* prefix);
template __host__ void print_tensor<2, float>(const float* ptr, Rect<2> rect, const char* prefix);
template __host__ void print_tensor<3, float>(const float* ptr, Rect<3> rect, const char* prefix);
template __host__ void print_tensor<4, float>(const float* ptr, Rect<4> rect, const char* prefix);
template __host__ void print_tensor<2, long>(const long* ptr, Rect<2> rect, const char* prefix);
| b52b93f9435e28ad16c91a1bdf214c53a48de7da.cu | #include "cuda_helper.h"
#include "model.h"
__global__
void scale_kernel(float* ptr, coord_t size, float a, float b)
{
CUDA_KERNEL_LOOP(i, size)
{
ptr[i] = (b - a) * ptr[i] + a;
}
}
__global__
void ones_kernel(float* ptr, coord_t size)
{
CUDA_KERNEL_LOOP(i, size)
{
ptr[i] = 1.0f;
}
}
template<typename DT>
__global__
void assign_kernel(DT* ptr, coord_t size, DT value)
{
CUDA_KERNEL_LOOP(i, size)
{
ptr[i] = value;
}
}
template<typename DT>
__global__
void copy_kernel(DT* dst, const DT* src, coord_t size)
{
CUDA_KERNEL_LOOP(i, size)
{
dst[i] = src[i];
}
}
__global__
void reluBackward(float *grad_ptr, const float *output, int n)
{
CUDA_KERNEL_LOOP(i, n)
{
grad_ptr[i] = (output[i] > 0.0f) ? grad_ptr[i] : 0;
}
}
__global__
void apply_add(float *data_ptr, const float *replica_ptr, size_t size)
{
CUDA_KERNEL_LOOP(i, size)
{
data_ptr[i] += replica_ptr[i];
}
}
__global__
void apply_add_with_scale(float *data_ptr, const float *grad_ptr,
size_t size, float scale)
{
CUDA_KERNEL_LOOP(i, size)
{
data_ptr[i] += grad_ptr[i] * scale;
}
}
__global__
void add_with_stride(float* output,
const float* input,
int num_blocks,
int output_blk_size,
int input_blk_size)
{
int min_blk_size = min(output_blk_size, input_blk_size);
CUDA_KERNEL_LOOP(i, num_blocks * min_blk_size)
{
int blk_idx = i / min_blk_size;
int blk_offset = i % min_blk_size;
int input_offset = blk_idx * input_blk_size + blk_offset;
int output_offset = blk_idx * output_blk_size + blk_offset;
output[output_offset] += input[input_offset];
}
}
__global__
void copy_with_stride(float* output,
const float* input,
int num_blocks,
int output_blk_size,
int input_blk_size)
{
int min_blk_size = min(output_blk_size, input_blk_size);
CUDA_KERNEL_LOOP(i, num_blocks * min_blk_size)
{
int blk_idx = i / min_blk_size;
int blk_offset = i % min_blk_size;
int input_offset = blk_idx * input_blk_size + blk_offset;
int output_offset = blk_idx * output_blk_size + blk_offset;
output[output_offset] = input[input_offset];
}
}
__host__
void updateGAS(float* para_ptr, const float* grad_ptr, size_t replica_size,
int num_replica, float learning_rate)
{
// Step 1: gater gradients to the first replica
for (int i = 1; i < num_replica; i++) {
const float *replica = grad_ptr + i * replica_size;
apply_add<<<GET_BLOCKS(replica_size), CUDA_NUM_THREADS>>>(
(float*)grad_ptr, replica, replica_size);
}
// Step 2: scale the first replica
float scale_factor = 1.0f / num_replica * (-learning_rate);
apply_add_with_scale<<<GET_BLOCKS(replica_size), CUDA_NUM_THREADS>>>(
para_ptr, grad_ptr, replica_size, scale_factor);
}
template<unsigned DIM, typename T>
__host__
void print_tensor(const T* ptr, Rect<DIM> rect, const char* prefix)
{
// device synchronize to make sure the data are ready
checkCUDA(cudaDeviceSynchronize());
T* host_ptr;
checkCUDA(cudaHostAlloc(&host_ptr, sizeof(T) * rect.volume(),
cudaHostAllocPortable | cudaHostAllocMapped));
checkCUDA(cudaMemcpy(host_ptr, ptr, sizeof(T) * rect.volume(),
cudaMemcpyDeviceToHost));
checkCUDA(cudaDeviceSynchronize());
int idx = 0;
printf("%s", prefix);
for (PointInRectIterator<DIM> it(rect); it(); it++, idx++) {
printf(" %.4lf", (float)host_ptr[idx]);
if (idx >= 16) break;
}
printf("\n");
checkCUDA(cudaFreeHost(host_ptr));
}
cudnnStatus_t cudnnSetTensorDescriptorFromDomain(cudnnTensorDescriptor_t tensor, Domain domain)
{
int dims[MAX_TENSOR_DIM];
switch (domain.get_dim()) {
case 1:
{
Rect<1> rect = domain;
dims[0] = rect.hi[0] - rect.lo[0] + 1;
return cudnnSetTensor4dDescriptor(tensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, dims[0], 1, 1, 1);
}
case 2:
{
Rect<2> rect = domain;
dims[0] = rect.hi[0] - rect.lo[0] + 1;
dims[1] = rect.hi[1] - rect.lo[1] + 1;
return cudnnSetTensor4dDescriptor(tensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, dims[1], dims[0], 1, 1);
}
case 3:
{
Rect<3> rect = domain;
dims[0] = rect.hi[0] - rect.lo[0] + 1;
dims[1] = rect.hi[1] - rect.lo[1] + 1;
dims[2] = rect.hi[2] - rect.lo[2] + 1;
return cudnnSetTensor4dDescriptor(tensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, dims[2], dims[1], dims[0], 1);
}
case 4:
{
Rect<4> rect = domain;
dims[0] = rect.hi[0] - rect.lo[0] + 1;
dims[1] = rect.hi[1] - rect.lo[1] + 1;
dims[2] = rect.hi[2] - rect.lo[2] + 1;
dims[3] = rect.hi[3] - rect.lo[3] + 1;
return cudnnSetTensor4dDescriptor(tensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, dims[3], dims[2], dims[1], dims[0]);
}
default:
assert(false && "Unsupported dim number");
}
return CUDNN_STATUS_BAD_PARAM;
}
template __global__ void assign_kernel<float>(float* ptr, coord_t size, float value);
template __global__ void assign_kernel<int32_t>(int32_t* ptr, coord_t size, int32_t value);
template __global__ void assign_kernel<int64_t>(int64_t* ptr, coord_t size, int64_t value);
template __global__ void copy_kernel<float>(float* dst, const float* src, coord_t size);
template __global__ void copy_kernel<int>(int* dst, const int* src, coord_t size);
template __host__ void print_tensor<1, float>(const float* ptr, Rect<1> rect, const char* prefix);
template __host__ void print_tensor<2, float>(const float* ptr, Rect<2> rect, const char* prefix);
template __host__ void print_tensor<3, float>(const float* ptr, Rect<3> rect, const char* prefix);
template __host__ void print_tensor<4, float>(const float* ptr, Rect<4> rect, const char* prefix);
template __host__ void print_tensor<2, long>(const long* ptr, Rect<2> rect, const char* prefix);
|
692e35eefea8628b2df4f841d8e7d282882c4f7f.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <set>
#include <vector>
#include <assert.h>
#include <stdlib.h>
#include <stdio.h>
#include <fstream>
#include <iostream>
#include <algorithm>
#include <typeinfo>
#include <map>
#include <hip/hip_runtime.h>
#include <signal.h>
#include "../include/nvmatrix.cuh"
#include "../include/nvmatrix_operators.cuh"
using namespace std;
/*
* Device random number generator pointers.
*/
//map<int,hiprandGenerator_t> NVMatrix::rndGen;
map<int,MemorySegment*> NVMatrix::_rndDevStates;
map<int,int> NVMatrix::_rndDevThreads;
pthread_mutex_t* NVMatrix::_rndMutex = makeMutex();
pthread_mutex_t* NVMatrix::_cublasMutex = makeMutex();
pthread_mutex_t* NVMatrix::_streamMutex = makeMutex();
std::map<int,hipblasHandle_t> NVMatrix::_cublasHandles;
std::map<int,hipStream_t> NVMatrix::_defaultStreams;
pthread_mutex_t* NVMatrix::makeMutex() {
pthread_mutex_t* m = (pthread_mutex_t*) malloc(sizeof(pthread_mutex_t));
pthread_mutex_init(m, NULL);
return m;
}
/*
Do not call resize in _init because resize is a virtual function
which is overridden in base class. Since C++ is retarded and unable
to call overridden functions from constructors, we shall call resize
separately from every constructor after calling _init.
*/
void NVMatrix::_init(bool isTrans) {
_numRows = 0;
_numCols = 0;
_numElements = 0;
_ownsData = true;
_isTrans = isTrans;
_memSegment = NULL;
_stride = 0;
_texObj = 0;
}
NVMatrix::NVMatrix() : _deleted(false) {
_init(false);
}
NVMatrix::NVMatrix(bool isTrans) : _deleted(false) {
_init(isTrans);
}
NVMatrix::NVMatrix(int numRows, int numCols, bool isTrans) : _deleted(false) {
_init(isTrans);
resize(numRows, numCols);
}
NVMatrix::NVMatrix(const Matrix& like, bool copy) : _deleted(false) {
_init(like.isTrans());
resize(like.getNumRows(), like.getNumCols());
if (copy) {
copyFromHost(like);
}
}
NVMatrix::NVMatrix(const NVMatrix& like, bool copy) : _deleted(false) {
_init(like.isTrans());
resize(like.getNumRows(), like.getNumCols());
if (copy) {
like.copy(*this);
}
}
/*
* Initializes NVMatrix with same dimensions as given matrix but
* does not copy any data.
*/
NVMatrix::NVMatrix(const NVMatrix& like) : _deleted(false) {
_init(like.isTrans());
resize(like.getNumRows(), like.getNumCols());
}
/*
* Initializes NVMatrix with same dimensions as given matrix but
* does not copy any data.
*/
NVMatrix::NVMatrix(const Matrix& like) : _deleted(false) {
_init(false);
resize(like.getNumRows(), like.getNumCols());
}
NVMatrix::NVMatrix(MemorySegment* mem, int numRows, int numCols, int stride, bool isTrans) :
_numRows(numRows),
_numCols(numCols),
_numElements(numRows*numCols),
_ownsData(false),
_memSegment(mem),
_isTrans(isTrans),
_deleted(false),
_texObj(0) {
_stride = stride < 0 ? getLeadingDim() : stride;
}
NVMatrix::~NVMatrix() {
if (!_deleted) {
deallocTexture();
if(_ownsData && _numElements > 0) {
dealloc();
} else {
// dealloc deletes the mem segment. But if this is a view,
// then we still need to delete the mem segment object.
// assert(_memSegment == NULL || _memSegment->getSize() == 0);
delete _memSegment;
}
}
}
void NVMatrix::copyFromHost(const Matrix& hostMatrix) {
copyFromHost(hostMatrix, false, getDefaultStream());
}
void NVMatrix::copyFromHost(const Matrix& hostMatrix, bool resizeTarget) {
copyFromHost(hostMatrix, resizeTarget, getDefaultStream());
}
void NVMatrix::copyFromHost(const Matrix& hostMatrix, bool resizeTarget, hipStream_t stream) {
if (resizeTarget) {
resize(hostMatrix);
} else {
assert(isSameDims(hostMatrix));
}
setTrans(hostMatrix.isTrans());
if (getNumElements() > 0) {
CUBLAS_CALL(hipblasSetMatrixAsync(hostMatrix.getLeadingDim(), hostMatrix.getFollowingDim(), sizeof(float),
hostMatrix.getData(), hostMatrix.getLeadingDim(), getDevData(), _stride, stream));
syncStream(stream);
}
}
void NVMatrix::copyToHost(Matrix& hostMatrix) const {
copyToHost(hostMatrix, false, getDefaultStream());
}
void NVMatrix::copyToHost(Matrix& hostMatrix, bool resizeTarget) const {
copyToHost(hostMatrix, resizeTarget, getDefaultStream());
}
void NVMatrix::copyToHost(Matrix& hostMatrix, bool resizeTarget, hipStream_t stream) const {
if (resizeTarget) {
hostMatrix.resize(_numRows, _numCols);
} else {
assert(isSameDims(hostMatrix));
}
hostMatrix.setTrans(_isTrans);
if (getNumElements() > 0) {
// printf("gpu.stride: %d, host.stride: %d\n", getStride(), hostMatrix.getLeadingDim());
CUBLAS_CALL(hipblasGetMatrixAsync(getLeadingDim(),getFollowingDim(), sizeof(float),
getDevData(), getStride(), hostMatrix.getData(), hostMatrix.getLeadingDim(), stream));
syncStream(stream);
}
}
void NVMatrix::copy(NVMatrix& dest) const {
copy(dest, getDefaultStream());
}
void NVMatrix::copy(NVMatrix& dest, hipStream_t stream) const {
if (&dest != this) {
if (!isSameDims(dest)) {
dest.resize(*this);
}
copy(dest, 0, -1, 0, -1, 0, 0, stream);
}
}
NVMatrix& NVMatrix::copy() const {
NVMatrix& c = construct();
copy(c);
return c;
}
void NVMatrix::rightMult(NVMatrix &b, float scaleAB, NVMatrix &target) {
rightMult(b, scaleAB, target, getDefaultStream());
}
void NVMatrix::rightMult(NVMatrix &b, float scaleAB, NVMatrix &target, hipStream_t stream) {
// if(&target != this && &target != &b) {
// target.resize(_numRows, b.getNumCols());
// target.setTrans(true);
// }
target.addProduct(*this, b, 0, scaleAB, stream);
}
void NVMatrix::rightMult(NVMatrix &b, float scaleAB) {
rightMult(b, scaleAB, *this);
}
void NVMatrix::rightMult(NVMatrix &b, NVMatrix& target) {
rightMult(b, 1, target);
}
void NVMatrix::addProduct(NVMatrix& a, NVMatrix &b, float scaleThis, float scaleAB) {
addProduct(a, b, scaleThis, scaleAB, getDefaultStream());
}
/*
* This will only work if this matrix is in column-major order! In other words,
* if isTrans() returns true.
*/
void NVMatrix::addProduct(NVMatrix& a, NVMatrix &b, float scaleThis, float scaleAB, hipStream_t stream) {
assert(a.getNumCols() == b.getNumRows());
if (scaleThis == 0) {
resize(a.getNumRows(), b.getNumCols());
setTrans(true);
}
assert(this->getNumRows() == a.getNumRows());
assert(this->getNumCols() == b.getNumCols());
assert(_isTrans);
CUBLAS_CALL(hipblasSetStream(getCublasHandle(), stream));
CUBLAS_CALL(hipblasSgemm(getCublasHandle(), a.getTransChar(), b.getTransChar(), a.getNumRows(), b.getNumCols(), a.getNumCols(),
&scaleAB, a.getDevData(), a.getStride(), b.getDevData(), b.getStride(),
&scaleThis, getDevData(), getStride()));
}
void NVMatrix::addProduct(NVMatrix& a, NVMatrix &b) {
addProduct(a, b, 1, 1);
}
void NVMatrix::assertSame(NVMatrixV& a) {
for (int i = 1; i < a.size(); ++i) {
assert(a[i]->isSameDims(*a[0]));
assert(a[i]->isTrans() == a[0]->isTrans());
assert(a[i]->getStride() == a[0]->getStride());
assert(a[i]->getDataDeviceID() == a[0]->getDataDeviceID());
}
}
void NVMatrix::batchedMatrixMultiply(NVMatrixV& a, NVMatrixV& b, NVMatrixV& target, float scaleTarget, float scaleAB,
const float** aPtrsDev, const float** bPtrsDev, float** tgtPtrsDev) {
batchedMatrixMultiply(a, b, target, scaleTarget, scaleAB, getDefaultStream(), aPtrsDev, bPtrsDev, tgtPtrsDev);
}
void NVMatrix::batchedMatrixMultiply(NVMatrixV& a, NVMatrixV& b, NVMatrixV& target, float scaleTarget, float scaleAB) {
batchedMatrixMultiply(a, b, target, scaleTarget, scaleAB, getDefaultStream());
}
void NVMatrix::batchedMatrixMultiply(NVMatrixV& a, NVMatrixV& b, NVMatrixV& target, float scaleTarget, float scaleAB, hipStream_t stream,
const float** aPtrsDev, const float** bPtrsDev, float** tgtPtrsDev) {
assert(a.size() == b.size());
assert(a.size() == target.size());
assertSame(a);
assertSame(b);
assertSame(target);
const int batch = a.size();
if (batch > 0) {
const int rows = a[0]->getNumRows(), inner = a[0]->getNumCols(), cols = b[0]->getNumCols();
assert(inner == b[0]->getNumRows());
assert(target[0]->getNumRows() == rows);
assert(target[0]->getNumCols() == cols);
const int lda = a[0]->getStride(), ldb = b[0]->getStride(), ldc = target[0]->getStride();
hipblasOperation_t atrans = a[0]->getTransChar(), btrans = b[0]->getTransChar();
CUBLAS_CALL(hipblasSetStream(getCublasHandle(), stream));
CUBLAS_CALL(hipblasSgemmBatched(getCublasHandle(), atrans, btrans, rows, cols, inner, &scaleAB, aPtrsDev, lda, bPtrsDev, ldb, &scaleTarget, tgtPtrsDev, ldc, batch));
}
}
void NVMatrix::batchedMatrixMultiply(NVMatrixV& a, NVMatrixV& b, NVMatrixV& target, float scaleTarget, float scaleAB, hipStream_t stream) {
assert(a.size() == b.size());
assert(a.size() == target.size() || target.size() == 0);
const int batch = a.size();
if (batch > 0) {
const int rows = a[0]->getNumRows(), cols = b[0]->getNumCols();
const float* aPtrs[batch], *bPtrs[batch], *tgtPtrs[batch];
for (int i = 0; i < batch; ++i) {
if (target.size() <= i) {
target.push_back(new NVMatrix(rows, cols, true));
}
aPtrs[i] = a[i]->getDevData();
bPtrs[i] = b[i]->getDevData();
tgtPtrs[i] = target[i]->getDevData();
}
// const float** aPtrsDev, **bPtrsDev;
// float **tgtPtrsDev;
// checkCudaErrors(hipMalloc(&aPtrsDev, batch * sizeof(float*)));
// checkCudaErrors(hipMalloc(&bPtrsDev, batch * sizeof(float*)));
// checkCudaErrors(hipMalloc(&tgtPtrsDev, batch * sizeof(float*)));
MemorySegment* aPtrsDev = DEVICE_MEMORY_MANAGER::getInstance(getDeviceID()).malloc(batch * sizeof(float*));
MemorySegment* bPtrsDev = DEVICE_MEMORY_MANAGER::getInstance(getDeviceID()).malloc(batch * sizeof(float*));
MemorySegment* tgtPtrsDev = DEVICE_MEMORY_MANAGER::getInstance(getDeviceID()).malloc(batch * sizeof(float*));
checkCudaErrors(hipMemcpyAsync(aPtrsDev, aPtrs, batch * sizeof(float*), hipMemcpyHostToDevice, stream));
checkCudaErrors(hipMemcpyAsync(bPtrsDev, bPtrs, batch * sizeof(float*), hipMemcpyHostToDevice, stream));
checkCudaErrors(hipMemcpyAsync(tgtPtrsDev, tgtPtrs, batch * sizeof(float*), hipMemcpyHostToDevice, stream));
batchedMatrixMultiply(a, b, target, scaleTarget, scaleAB, stream, const_cast<const float**>(aPtrsDev->getData<float*>()),
const_cast<const float**>(bPtrsDev->getData<float*>()),
tgtPtrsDev->getData<float*>());
// checkCudaErrors(hipFree(aPtrsDev));
// checkCudaErrors(hipFree(bPtrsDev));
// checkCudaErrors(hipFree(tgtPtrsDev));
DEVICE_MEMORY_MANAGER::getInstance(getDeviceID()).free(aPtrsDev);
DEVICE_MEMORY_MANAGER::getInstance(getDeviceID()).free(bPtrsDev);
DEVICE_MEMORY_MANAGER::getInstance(getDeviceID()).free(tgtPtrsDev);
}
}
template <class Randomizer>
void NVMatrix::_unaryRandomize(NVMatrix& target, Randomizer rnd) {
_unaryRandomize(target, rnd, getDefaultStream());
}
template <class Randomizer>
void NVMatrix::_unaryRandomize(NVMatrix& target, Randomizer rnd, hipStream_t stream) {
assert(isRndInitialized());
assert(isContiguous() && target.isContiguous());
if (!isSameDims(target)) {
target.resize(*this);
}
assert(isTrans() == target.isTrans());
hipLaunchKernelGGL(( kUnaryRandomize), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, stream, getDevData(), target.getDevData(), getCurandState(), getNumElements(), rnd);
getLastCudaError("kUnaryRandomize: Kernel execution failed");
}
template <class Randomizer>
void NVMatrix::_binaryRandomize(NVMatrix& data2, NVMatrix& target, Randomizer rnd) {
_binaryRandomize(data2, target, rnd, getDefaultStream());
}
template <class Randomizer>
void NVMatrix::_binaryRandomize(NVMatrix& data2, NVMatrix& target, Randomizer rnd, hipStream_t stream) {
assert(isRndInitialized());
assert(isContiguous() && data2.isContiguous() && target.isContiguous());
assert(isSameDims(data2));
assert(isTrans() == data2.isTrans());
if (!isSameDims(target)) {
target.resize(*this);
}
assert(isTrans() == target.isTrans());
hipLaunchKernelGGL(( kBinaryRandomize), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, stream, getDevData(), data2.getDevData(), target.getDevData(), getCurandState(), getNumElements(), rnd);
getLastCudaError("kBinaryRandomize: Kernel execution failed");
}
void NVMatrix::initRandom(unsigned long long seed, int numStreams) {
NVMatrix::initRandom(seed, numStreams, NVMatrix::getDefaultStream());
}
void NVMatrix::initRandom(unsigned long long seed, int numStreams, hipStream_t stream) {
// printf("init random on device %d\n", getDeviceID());
pthread_mutex_lock(_rndMutex);
assert(!isRndInitialized(true));
int d = getDeviceID();
// _rndDevStates[d] = NULL;
_rndDevThreads[d] = numStreams;
_rndDevStates[d] = DEVICE_MEMORY_MANAGER::getInstance(d).malloc(numStreams * sizeof(hiprandState_t));
// checkCudaErrors(hipMalloc((void **)&_rndDevStates[d], numStreams * sizeof(hiprandState_t)));
pthread_mutex_unlock(_rndMutex);
hipLaunchKernelGGL(( kSetupCurand), dim3(NUM_RND_BLOCKS), dim3(NUM_RND_THREADS_PER_BLOCK), 0, stream, getCurandState(), 1 + seed*2); // so there's no chance it'll be correlated with the other one
getLastCudaError("kSetupCurand: Kernel execution failed");
}
void NVMatrix::initRandom(unsigned long long seed) {
initRandom(seed, NUM_RND_STREAMS);
}
void NVMatrix::initRandom() {
NVMatrix::initRandom(time(0));
}
void NVMatrix::initCublas() {
int d = getDeviceID();
pthread_mutex_lock(_cublasMutex);
assert(_cublasHandles.count(d) == 0);
CUBLAS_CALL(hipblasCreate(&_cublasHandles[d]));
// It appears that hipblasCreate causes a host -> device copy on stream 0,
// so we synchronize with it because we run everything else on other
// streams.
syncDevice();
pthread_mutex_unlock(_cublasMutex);
}
void NVMatrix::destroyCublas() {
int d = getDeviceID();
pthread_mutex_lock(_cublasMutex);
assert(_cublasHandles.count(d) > 0);
CUBLAS_CALL(hipblasDestroy(_cublasHandles[d]));
_cublasHandles.erase(d);
pthread_mutex_unlock(_cublasMutex);
}
hipblasHandle_t NVMatrix::getCublasHandle() {
return getCublasHandle(getDeviceID());
}
hipblasHandle_t NVMatrix::getCublasHandle(int deviceID) {
pthread_mutex_lock(_cublasMutex);
assert(_cublasHandles.count(deviceID) > 0);
hipblasHandle_t h = _cublasHandles[deviceID];
pthread_mutex_unlock(_cublasMutex);
return h;
}
hipStream_t NVMatrix::getDefaultStream() {
return getDefaultStream(NVMatrix::getDeviceID());
}
hipStream_t NVMatrix::getDefaultStream(int deviceID) {
if (deviceID >= 0) {
pthread_mutex_lock(_streamMutex);
if (_defaultStreams.count(deviceID) == 0) {
int oldDeviceID = getDeviceID();
NVMatrix::setDeviceID(deviceID);
checkCudaErrors(hipStreamCreateWithFlags(&_defaultStreams[deviceID], hipStreamNonBlocking));
NVMatrix::setDeviceID(oldDeviceID);
}
hipStream_t s = _defaultStreams[deviceID];
pthread_mutex_unlock(_streamMutex);
return s;
}
return 0;
}
void NVMatrix::syncDevice() {
checkCudaErrors(hipDeviceSynchronize());
}
void NVMatrix::syncStream(hipStream_t stream) {
checkCudaErrors(hipStreamSynchronize(stream));
}
void NVMatrix::syncStream() {
syncStream(getDefaultStream());
}
hiprandState_t* NVMatrix::getCurandState() {
/*
* Even though we're only reading from the map here, it's important to grab
* the mutex because another thread may be writing to it.
*/
pthread_mutex_lock(_rndMutex);
int d = getDeviceID();
assert(isRndInitialized(true));
hiprandState_t* r = _rndDevStates[d]->getData<hiprandState_t>();
pthread_mutex_unlock(_rndMutex);
return r;
}
hiprandState_t* NVMatrix::getCurandState(int numStreams) {
int d = getDeviceID();
pthread_mutex_lock(_rndMutex);
assert(isRndInitialized(true));
bool realloc = numStreams > _rndDevThreads[d];
pthread_mutex_unlock(_rndMutex);
if (realloc) {
destroyRandom();
initRandom(time(0), numStreams);
}
return getCurandState();
}
int NVMatrix::getDataDeviceID() const {
if (getDevData() == NULL) {
return DEVICE_NULL;
}
struct hipPointerAttribute_t atts;
checkCudaErrors(hipPointerGetAttributes(&atts, getDevData()));
return atts.memoryType == hipMemoryTypeDevice ? atts.device : DEVICE_HOST;
}
int NVMatrix::getDeviceID() {
int d;
checkCudaErrors(hipGetDevice(&d));
// if (d == 0) {
// raise(SIGABRT);
// }
return d;
}
void NVMatrix::setDeviceID(int d) {
assert(d >= 0);
// printf("Setting device to %d\n", d);
// if (d == 0) {
// raise(SIGABRT);
// }
checkCudaErrors(hipSetDevice(d));
}
bool NVMatrix::canAccessPeer(int srcDevice, int tgtDevice) {
if (srcDevice == tgtDevice) {
return true;
}
int canAccess;
checkCudaErrors(hipDeviceCanAccessPeer(&canAccess, srcDevice, tgtDevice));
return canAccess;
}
bool NVMatrix::isRndInitialized(bool haveLock) {
if (!haveLock) {
pthread_mutex_lock(_rndMutex);
}
bool b = _rndDevStates.count(getDeviceID()) != 0;
if (!haveLock) {
pthread_mutex_unlock(_rndMutex);
}
return b;
}
bool NVMatrix::isRndInitialized() {
return isRndInitialized(false);
}
void NVMatrix::destroyRandom() {
int d = getDeviceID();
pthread_mutex_lock(_rndMutex);
assert(isRndInitialized(true));
// checkCudaErrors(hipFree(_rndDevStates[d]));
DEVICE_MEMORY_MANAGER::getInstance(d).free(_rndDevStates[d]);
_rndDevStates.erase(d);
_rndDevThreads.erase(d);
pthread_mutex_unlock(_rndMutex);
}
void NVMatrix::binarizeProbs() {
binarizeProbs(*this);
}
void NVMatrix::binarizeProbs(NVMatrix& target) {
_unaryRandomize(target, BinarizeUnaryRandomizer());
}
void NVMatrix::randomizeUniform() {
assert(isContiguous());
assert(isRndInitialized());
// CURAND_CALL(hiprandGenerateUniform(rndGen, _devData, getNumElements()));
_unaryRandomize(*this, UniformUnaryRandomizer());
}
void NVMatrix::randomizeGaussian() {
randomizeGaussian(1);
}
void NVMatrix::randomizeGaussian(float stdev) {
randomizeGaussian(0, stdev);
}
void NVMatrix::randomizeGaussian(float mean, float stdev) {
assert(isContiguous());
assert(isRndInitialized());
// CURAND_CALL(hiprandGenerateNormal(rndGen, _devData, getNumElements(), mean, stdev));
_unaryRandomize(*this, GaussianUnaryRandomizer(mean, stdev));
}
/*
* Kind of a hack since we don't actually need the contents of this matrix for it,
* so we don't really need a binary randomizer.
*/
void NVMatrix::randomizeGaussian(NVMatrix& stdevs) {
randomizeGaussian(0, stdevs);
}
void NVMatrix::randomizeGaussian(float mean, NVMatrix& stdevs) {
_binaryRandomize(stdevs, *this, GaussianBinaryRandomizer(mean));
}
void NVMatrix::randomizeGaussian(float mean, float stdevMult, NVMatrix& stdevs) {
_binaryRandomize(stdevs, *this, ScaledGaussianBinaryRandomizer(mean, stdevMult));
}
void NVMatrix::addGaussianNoise() {
addGaussianNoise(1);
}
void NVMatrix::addGaussianNoise(float stdev) {
addGaussianNoise(stdev, *this);
}
void NVMatrix::addGaussianNoise(float stdev, NVMatrix& target) {
_unaryRandomize(target, AddGaussianUnaryRandomizer(stdev));
}
void NVMatrix::addGaussianNoise(NVMatrix& stdevs, bool var) {
addGaussianNoise(stdevs, var, *this);
}
void NVMatrix::addGaussianNoise(NVMatrix& stdevs) {
addGaussianNoise(stdevs, false, *this);
}
void NVMatrix::addGaussianNoise(NVMatrix& stdevs, bool var, NVMatrix& target) {
if (var) {
_binaryRandomize(stdevs, target, AddGaussianBinaryRandomizer<true>());
} else {
_binaryRandomize(stdevs, target, AddGaussianBinaryRandomizer<false>());
}
}
void NVMatrix::biggerThan(NVMatrix& b, NVMatrix& target) {
applyBinary(NVMatrixBinaryOps::BiggerThan(), b, target);
}
void NVMatrix::biggerThan(NVMatrix& b) {
biggerThan(b, *this);
}
void NVMatrix::equals(NVMatrix& b, NVMatrix& target) {
applyBinary(NVMatrixBinaryOps::Equals(), b, target);
}
void NVMatrix::equals(NVMatrix& m) {
equals(m, *this);
}
void NVMatrix::biggerThanVector(NVMatrix& vec, NVMatrix& target) {
applyBinaryV(NVMatrixBinaryOps::BiggerThan(), vec, target);
}
void NVMatrix::biggerThanVector(NVMatrix& vec) {
biggerThanVector(vec, *this);
}
void NVMatrix::_checkBounds(int startRow, int endRow, int startCol, int endCol) const {
assert(startRow >= 0 && startRow <= _numRows);
assert(endRow >= startRow && endRow <= _numRows);
assert(startCol >= 0 && startCol <= _numCols);
assert(endCol >= startCol && endCol <= _numCols);
}
/*
* The only place where stride is supported for now!
* Will ALWAYS return a view of the original data, sometimes non-contiguous.
*/
NVMatrix& NVMatrix::slice(int startRow, int endRow, int startCol, int endCol) const {
endRow = endRow < 0 ? this->_numRows : endRow;
endCol = endCol < 0 ? this->_numCols : endCol;
_checkBounds(startRow, endRow, startCol, endCol);
if (!isTrans()) {
return construct(new MemorySegment(this->getDevData() + startRow * _stride + startCol), endRow - startRow, endCol - startCol, _stride, false);
}
return construct(new MemorySegment(this->getDevData() + startCol * _stride + startRow), endRow - startRow, endCol - startCol, _stride, true);
}
/* this will NEVER return a view */
void NVMatrix::slice(int startRow, int endRow, int startCol, int endCol, NVMatrix& target) const {
endRow = endRow < 0 ? this->_numRows : endRow;
endCol = endCol < 0 ? this->_numCols : endCol;
_checkBounds(startRow, endRow, startCol, endCol);
int sliceRows = endRow - startRow, sliceCols = endCol - startCol;
if (target.getNumRows() != sliceRows || target.getNumCols() != sliceCols) {
target.resize(sliceRows, sliceCols);
}
this->copy(target, startRow, endRow, startCol, endCol, 0, 0);
}
NVMatrix& NVMatrix::sliceRows(int startRow, int endRow) const {
return slice(startRow, endRow, 0, -1);
}
void NVMatrix::sliceRows(int startRow, int endRow, NVMatrix& target) const {
slice(startRow, endRow, 0, -1, target);
}
NVMatrix& NVMatrix::sliceCols(int startCol, int endCol) const {
return slice(0, -1, startCol, endCol);
}
void NVMatrix::sliceCols(int startCol, int endCol, NVMatrix& target) const {
slice(0, -1, startCol, endCol, target);
}
NVMatrixV& NVMatrix::splitRows(int numParts) {
assert(getNumRows() % numParts == 0);
NVMatrixV& v = *new NVMatrixV();
int partSize = getNumRows() / numParts;
for (int p = 0; p < numParts; ++p) {
v.push_back(&sliceRows(p * partSize, (p+1) * partSize));
}
return v;
}
NVMatrixV& NVMatrix::splitCols(int numParts) {
assert(getNumCols() % numParts == 0);
NVMatrixV& v = *new NVMatrixV();
int partSize = getNumCols() / numParts;
for (int p = 0; p < numParts; ++p) {
v.push_back(&sliceCols(p * partSize, (p+1) * partSize));
}
return v;
}
/*
* Guaranteed to not change the data if the number of elements doesn't change.
* So you can use this to "reshape" a matrix.
*/
bool NVMatrix::resize(int numRows, int numCols, bool trans) {
setTrans(trans);
bool reallocated = false;
if (numRows != _numRows || numCols != _numCols) {
assert(_ownsData || (_numElements == numRows * numCols && isContiguous()));
if (_numElements != numRows * numCols) {
if (_numElements > 0) { // free old memory
dealloc();
}
if (numRows * numCols > 0) { // allocate new memory
alloc(numCols * numRows);
} else {
_memSegment = NULL;
}
reallocated = true;
}
_numRows = numRows;
_numCols = numCols;
_numElements = numRows * numCols;
_stride = getLeadingDim();
}
return reallocated;
}
bool NVMatrix::resize(int numRows, int numCols) {
return resize(numRows, numCols, isTrans());
}
bool NVMatrix::resize(const NVMatrix& like) {
setTrans(like.isTrans());
return resize(like.getNumRows(), like.getNumCols());
}
bool NVMatrix::resize(const Matrix& like) {
setTrans(like.isTrans());
return resize(like.getNumRows(), like.getNumCols());
}
void NVMatrix::reshape(int numRows, int numCols) {
assert(isContiguous());
assert(_numElements == numRows*numCols);
_numRows = numRows;
_numCols = numCols;
_stride = getLeadingDim();
}
NVMatrix& NVMatrix::reshaped(int numRows, int numCols) const {
assert(isContiguous());
assert(_numElements == numRows*numCols);
return construct(new MemorySegment(*_memSegment), numRows, numCols, -1, _isTrans);
}
void NVMatrix::copy(NVMatrix &dest, int srcStartRow, int srcEndRow,
int srcStartCol, int srcEndCol,
int destStartRow, int destStartCol) const {
copy(dest, srcStartRow, srcEndRow, srcStartCol, srcEndCol, destStartRow, destStartCol, getDefaultStream());
}
void NVMatrix::copy(NVMatrix &dest, int srcStartRow, int srcEndRow,
int srcStartCol, int srcEndCol,
int destStartRow, int destStartCol, hipStream_t stream) const {
srcEndRow = srcEndRow < 0 ? _numRows : srcEndRow;
srcEndCol = srcEndCol < 0 ? _numCols : srcEndCol;
NVMatrix* srcSlice = &slice(srcStartRow, srcEndRow, srcStartCol, srcEndCol);
NVMatrix* destSlice = &dest.slice(destStartRow, destStartRow + srcEndRow - srcStartRow, destStartCol, destStartCol + srcEndCol - srcStartCol);
if (srcSlice->isContiguous() && destSlice->isContiguous() && srcSlice->isSameDims(*destSlice) && srcSlice->isTrans() == destSlice->isTrans()) {
// The commonest case.
checkCudaErrors(hipMemcpyAsync(destSlice->getDevData(), srcSlice->getDevData(), srcSlice->getNumDataBytes(), hipMemcpyDefault, stream));
} else {
srcSlice->apply(NVMatrixOps::Identity(), *destSlice, stream);
}
delete srcSlice;
delete destSlice;
}
NVMatrix& NVMatrix::getTranspose() {
return construct(new MemorySegment(*_memSegment), _numCols, _numRows, _stride, !_isTrans);
}
NVMatrix& NVMatrix::getClone() {
return construct(new MemorySegment(*_memSegment), _numRows, _numCols, _stride, _isTrans);
}
void NVMatrix::transpose(NVMatrix& target) {
flipTrans(target);
target.setTrans(!target.isTrans());
target.reshape(target.getNumCols(), target.getNumRows());
}
void NVMatrix::transpose() {
int tmp = _numCols;
_numCols = _numRows;
_numRows = tmp;
_isTrans = !_isTrans;
}
bool NVMatrix::transpose(bool trans) {
bool oldTrans = _isTrans;
if (oldTrans != trans) {
transpose();
}
return oldTrans;
}
/*
* Flips the ordering of the matrix from row-major to column-major and vice versa.
* This creates temporary storage -- not a cheap operation.
*
* This is not equivalent to a "hard transpose". The resultant matrix still has
* the same dimensions, its layout in memory just changes.
*/
NVMatrix& NVMatrix::flipTrans() {
NVMatrix& meTrans = construct(*this);
flipTrans(meTrans);
return meTrans;
}
void NVMatrix::flipTrans(NVMatrix& target) {
flipTrans(target, getDefaultStream());
}
void NVMatrix::flipTrans(NVMatrix& target, hipStream_t stream) {
assert(&target != this);
target.resize(_numRows, _numCols);
target.setTrans(!isTrans());
// target.printShape("target");
// this->printShape("this");
apply(NVMatrixOps::Identity(), target, stream);
}
void NVMatrix::squaredDiff(NVMatrix& b) {
squaredDiff(b, *this);
}
void NVMatrix::squaredDiff(NVMatrix& b, NVMatrix& target) {
applyBinary(NVMatrixBinaryOps::SquaredDiff(), b, target);
}
void NVMatrix::add(NVMatrix& b, float scaleA, float scaleB, NVMatrix& target) {
add(b, scaleA, scaleB, target, NVMatrix::getDefaultStream());
}
void NVMatrix::add(NVMatrix& b, float scaleA, float scaleB, NVMatrix& target, hipStream_t stream) {
if (scaleA == 0) {
b.scale(scaleB, target, stream);
} else if (scaleB == 0) {
scale(scaleA, target, stream);
} else if (scaleA == 1 && scaleB == 1) { // slight optimization
applyBinary(NVMatrixBinaryOps::Add(), b, target, stream);
} else if (scaleA == 1) {
applyBinary(NVMatrixBinaryOps::WeightedAdd1(scaleB), b, target, stream);
} else {
applyBinary(NVMatrixBinaryOps::WeightedAdd(scaleA, scaleB), b, target, stream);
}
}
void NVMatrix::add(NVMatrix& b, float scaleB, NVMatrix& target) {
add(b, 1, scaleB, target);
}
void NVMatrix::add(NVMatrix& b, NVMatrix& target) {
add(b, 1, target);
}
void NVMatrix::add(NVMatrix& b, float scaleB) {
add(b, scaleB, *this);
}
void NVMatrix::add(NVMatrix& b, float scaleA, float scaleB) {
add(b, scaleA, scaleB, *this);
}
void NVMatrix::add(NVMatrix& b) {
add(b, 1, *this);
}
void NVMatrix::subtract(NVMatrix& b, NVMatrix& target) {
add(b, -1, target);
}
void NVMatrix::subtract(NVMatrix& b) {
add(b, -1);
}
void NVMatrix::eltwiseMult(NVMatrix& b, NVMatrix& target) {
applyBinary(NVMatrixBinaryOps::Multiply(), b, target);
}
void NVMatrix::eltwiseMult(NVMatrix& b) {
eltwiseMult(b, *this);
}
void NVMatrix::eltwiseDivide(NVMatrix& b, NVMatrix& target) {
applyBinary(NVMatrixBinaryOps::Divide(), b, target);
}
void NVMatrix::eltwiseDivide(NVMatrix& b) {
eltwiseDivide(b, *this);
}
void NVMatrix::tile(int timesY, int timesX, NVMatrix& target) {
tile(timesY, timesX, target, getDefaultStream());
}
void NVMatrix::tile(int timesY, int timesX, NVMatrix& target, hipStream_t stream) {
assert(isContiguous() && target.isContiguous());
assert(timesX > 0 && timesY > 0);
target.resize(_numRows*timesY, _numCols*timesX);
target.setTrans(_isTrans);
if(!isTrans()) {
hipLaunchKernelGGL(( kTile), dim3(NUM_TILE_BLOCKS),dim3(NUM_TILE_THREADS_PER_BLOCK), 0, stream, getDevData(), target.getDevData(), _numCols, _numRows, target._numCols, target._numRows);
} else {
hipLaunchKernelGGL(( kTile), dim3(NUM_TILE_BLOCKS),dim3(NUM_TILE_THREADS_PER_BLOCK), 0, stream, getDevData(), target.getDevData(), _numRows, _numCols, target._numRows, target._numCols);
}
getLastCudaError("Kernel execution failed");
}
void NVMatrix::addVector(NVMatrix& vec, float scaleVec, NVMatrix& target) {
addVector(vec, scaleVec, target, getDefaultStream());
}
void NVMatrix::addVector(NVMatrix& vec, float scaleVec, NVMatrix& target, hipStream_t stream) {
applyBinaryV(NVMatrixBinaryOps::ScaledAdd(scaleVec), vec, target, stream);
}
void NVMatrix::addVector(NVMatrix& vec) {
addVector(vec, 1);
}
void NVMatrix::addVector(NVMatrix& vec, float scaleVec) {
addVector(vec, scaleVec, *this);
}
void NVMatrix::addVector(NVMatrix& vec, NVMatrix& target) {
addVector(vec, 1, target);
}
void NVMatrix::equalsVector(NVMatrix& vec, NVMatrix& target) {
applyBinaryV(NVMatrixBinaryOps::Equals(), vec, target);
}
void NVMatrix::equalsVector(NVMatrix& vec) {
equalsVector(vec, *this);
}
void NVMatrix::eltwiseMultByVector(NVMatrix& vec, NVMatrix& target) {
eltwiseMultByVector(vec, target, getDefaultStream());
}
void NVMatrix::eltwiseMultByVector(NVMatrix& vec, NVMatrix& target, hipStream_t stream) {
applyBinaryV(NVMatrixBinaryOps::Multiply(), vec, target, stream);
}
void NVMatrix::eltwiseMultByVector(NVMatrix& vec, hipStream_t stream) {
eltwiseMultByVector(vec, *this, stream);
}
void NVMatrix::eltwiseMultByVector(NVMatrix& vec) {
eltwiseMultByVector(vec, *this);
}
void NVMatrix::eltwiseDivideByVector(NVMatrix& vec) {
eltwiseDivideByVector(vec, *this);
}
void NVMatrix::eltwiseDivideByVector(NVMatrix& vec, NVMatrix& target) {
applyBinaryV(NVMatrixBinaryOps::Divide(), vec, target);
}
/*
* TODO: this is a mess, fix it. it works pretty fast but it's too ugly.
* TODO: this function is _really_ bad for very long aggregations of few columns.
*/
template<class Agg, class UnaryOp, class BinaryOp>
void NVMatrix::_aggregate(int axis, NVMatrix& target, Agg agg, UnaryOp uop, BinaryOp bop, hipStream_t stream) {
assert(axis == 0 || axis == 1);
assert(isContiguous() && target.isContiguous());
assert(&target != this);
int width = _isTrans ? _numRows : _numCols;
int height = _isTrans ? _numCols : _numRows;
target.setTrans(_isTrans);
assert(width > 0);
assert(height > 0);
if((axis == 0 && !_isTrans) || (axis == 1 && _isTrans)) { //col sum
target.resize(!_isTrans ? 1 : _numRows, !_isTrans ? _numCols : 1);
// int height = getFollowingDim();
if ((height <= 2048 || width >= 4096)) {
int numBlocks = DIVUP(width, NUM_SUM_COLS_THREADS_PER_BLOCK);
assert(numBlocks * NUM_SUM_COLS_THREADS_PER_BLOCK >= width);
assert(numBlocks < NUM_BLOCKS_MAX);
hipLaunchKernelGGL(( kDumbAggCols<Agg, UnaryOp, BinaryOp>), dim3(numBlocks),dim3(NUM_SUM_COLS_THREADS_PER_BLOCK), 0, stream, getTextureObject(), target.getDevData(), width, height, agg, uop, bop);
getLastCudaError("kDumbAggCols: Kernel execution failed");
} else { // Specialize the case when we have very long columns and few of them
const int sumLength = 128;
NVMatrix tmp(DIVUP(height, sumLength), width);
int numBlocksX = DIVUP(width, NUM_SUM_COLS_THREADS_PER_BLOCK);
int numBlocksY = DIVUP(height, sumLength);
dim3 blocks(numBlocksX, numBlocksY);
dim3 threads(NUM_SUM_COLS_THREADS_PER_BLOCK);
hipLaunchKernelGGL(( kAggCols<Agg, UnaryOp>), dim3(blocks),dim3(threads), 0, stream, getTextureObject(), tmp.getDevData(), width, height, sumLength, agg, uop);
getLastCudaError("kAggCols: Kernel execution failed");
int numBlocks = DIVUP(width, NUM_SUM_COLS_THREADS_PER_BLOCK);
hipLaunchKernelGGL(( kDumbAggCols<Agg, NVMatrixOps::Identity, BinaryOp>), dim3(numBlocks),dim3(NUM_SUM_COLS_THREADS_PER_BLOCK), 0, stream, tmp.getTextureObject(), target.getDevData(), width, height, agg, NVMatrixOps::Identity(), bop);
getLastCudaError("kDumbAggCols: Kernel execution failed");
}
} else { // row sum
target.resize(_isTrans ? 1 : _numRows, _isTrans ? _numCols : 1);
if (width > 1) {
if (height >= 16384) { // linear aggregation
int numBlocksX = 1;
int numBlocksY = DIVUP(height, AGG_SHORT_ROWS_THREADS_Y*AGG_SHORT_ROWS_LOOPS_Y);
int numThreadsX = width <= 4 ? 4 : width <= 8 ? 8 : width <= 12 ? 12 : width <= 16 ? 16 : AGG_SHORT_ROWS_THREADS_X;
int numThreadsY = AGG_SHORT_ROWS_THREADS_Y;
while (numBlocksY > NUM_BLOCKS_MAX) {
numBlocksY = DIVUP(numBlocksY,2);
numBlocksX *= 2;
}
dim3 grid(numBlocksX, numBlocksY), threads(numThreadsX, numThreadsY);
if(width <= 16) {
if(width <= 4) {
hipLaunchKernelGGL(( kAggShortRows<Agg, UnaryOp, BinaryOp, 1, 4>), dim3(grid), dim3(threads), 0, stream, getDevData(), target.getDevData(),width, height, agg, uop, bop);
} else if(width <= 8) {
hipLaunchKernelGGL(( kAggShortRows<Agg, UnaryOp, BinaryOp, 1, 8>), dim3(grid), dim3(threads), 0, stream, getDevData(), target.getDevData(),width, height, agg, uop, bop);
} else if(width <= 12) {
hipLaunchKernelGGL(( kAggShortRows<Agg, UnaryOp, BinaryOp, 1, 12>), dim3(grid), dim3(threads), 0, stream, getDevData(), target.getDevData(),width, height, agg, uop, bop);
} else {
hipLaunchKernelGGL(( kAggShortRows<Agg, UnaryOp, BinaryOp, 1, 16>), dim3(grid), dim3(threads), 0, stream, getDevData(), target.getDevData(),width, height, agg, uop, bop);
}
} else if(width <= 32) {
hipLaunchKernelGGL(( kAggShortRows<Agg, UnaryOp, BinaryOp, 2, AGG_SHORT_ROWS_THREADS_X>), dim3(grid), dim3(threads), 0, stream, getDevData(), target.getDevData(),width, height, agg, uop, bop);
} else if(width <= 48){
hipLaunchKernelGGL(( kAggShortRows<Agg, UnaryOp, BinaryOp, 3, AGG_SHORT_ROWS_THREADS_X>), dim3(grid), dim3(threads), 0, stream, getDevData(), target.getDevData(),width, height, agg, uop, bop);
} else if(width <= 64){
hipLaunchKernelGGL(( kAggShortRows<Agg, UnaryOp, BinaryOp, 4, AGG_SHORT_ROWS_THREADS_X>), dim3(grid), dim3(threads), 0, stream, getDevData(), target.getDevData(),width, height, agg, uop, bop);
} else {
hipLaunchKernelGGL(( kAggShortRows2<Agg, UnaryOp, BinaryOp>), dim3(grid), dim3(threads), 0, stream, getDevData(), target.getDevData(),width, height, agg, uop, bop);
}
} else {
if (width >= 512) {
// NOTE: this is the only case which I bothered to try to optimize for Kepler
dim3 threads(AWR_NUM_THREADS);
dim3 blocks(1, height);
hipLaunchKernelGGL(( kAggRows_wholerow_nosync), dim3(blocks), dim3(threads), 0, stream, getDevData(), target.getDevData(), width, height, agg, uop, bop);
} else {
int numThreadsX = width <= 64 ? 32 : (width <= 128 ? 64 : (width <= 256 ? 128 : (width <= 512 ? 256 : 512)));
int numThreadsY = 1;
int numBlocksX = DIVUP(width, 2*numThreadsX);
int numBlocksY = ::min(height, NUM_BLOCKS_MAX);
dim3 grid(numBlocksX, numBlocksY), threads(numThreadsX, numThreadsY);
assert(numBlocksX <= NUM_BLOCKS_MAX);
assert(numBlocksY <= NUM_BLOCKS_MAX);
if(width <= 64) {
hipLaunchKernelGGL(( kAggRows<Agg, UnaryOp, BinaryOp, 32>), dim3(grid), dim3(threads), 0, stream, getDevData(), target.getDevData(),
width, height, target.getLeadingDim(), agg, uop, bop);
} else if(width <= 128) {
hipLaunchKernelGGL(( kAggRows<Agg, UnaryOp, BinaryOp, 64>), dim3(grid), dim3(threads), 0, stream, getDevData(), target.getDevData(),
width, height, target.getLeadingDim(), agg, uop, bop);
} else if(width <= 256) {
hipLaunchKernelGGL(( kAggRows<Agg, UnaryOp, BinaryOp, 128>), dim3(grid), dim3(threads), 0, stream, getDevData(), target.getDevData(),
width, height, target.getLeadingDim(), agg, uop, bop);
} else if(width <= 512) {
hipLaunchKernelGGL(( kAggRows<Agg, UnaryOp, BinaryOp, 256>), dim3(grid), dim3(threads), 0, stream, getDevData(), target.getDevData(),
width, height, target.getLeadingDim(), agg, uop, bop);
} else {
hipLaunchKernelGGL(( kAggRows<Agg, UnaryOp, BinaryOp, 512>), dim3(grid), dim3(threads), 0, stream, getDevData(), target.getDevData(),
width, height, target.getLeadingDim(), agg, uop, bop);
}
getLastCudaError("agg rows: Kernel execution failed");
}
}
} else {
target.applyBinary(NVMatrixBinaryOps::CompositeSecond<UnaryOp, BinaryOp>(uop, bop), *this, target, stream);
// copy(target, stream);
}
}
}
template<class Agg, class UnaryOp, class BinaryOp>
void NVMatrix::_aggregate(int axis, NVMatrix& target, Agg agg, UnaryOp uop, BinaryOp bop) {
_aggregate(axis, target, agg, uop, bop, getDefaultStream());
}
template<class Agg, class BinaryOp>
void NVMatrix::_aggregate(int axis, NVMatrix& target, Agg agg, BinaryOp bop) {
_aggregate(axis, target, agg, NVMatrixOps::Identity(), bop, getDefaultStream());
}
template<class Agg, class BinaryOp>
void NVMatrix::_aggregate(int axis, NVMatrix& target, Agg agg, BinaryOp bop, hipStream_t stream) {
_aggregate(axis, target, agg, NVMatrixOps::Identity(), bop, stream);
}
template<class Agg, class UnaryOp, class BinaryOp>
NVMatrix& NVMatrix::_aggregate(int axis, Agg agg, UnaryOp uop, BinaryOp bop) {
NVMatrix &sumVec = construct();
_aggregate(axis, sumVec, agg, uop, bop);
return sumVec;
}
template<class Agg, class UnaryOp, class BinaryOp>
NVMatrix& NVMatrix::_aggregate(int axis, Agg agg, UnaryOp uop, BinaryOp bop, hipStream_t stream) {
NVMatrix &sumVec = construct();
_aggregate(axis, sumVec, agg, uop, bop, stream);
return sumVec;
}
template<class Agg, class BinaryOp>
NVMatrix& NVMatrix::_aggregate(int axis, Agg agg, BinaryOp bop) {
return _aggregate(axis, agg, NVMatrixOps::Identity(), bop);
}
template<class Agg, class BinaryOp>
NVMatrix& NVMatrix::_aggregate(int axis, Agg agg, BinaryOp bop, hipStream_t stream) {
return _aggregate(axis, agg, NVMatrixOps::Identity(), bop, stream);
}
void NVMatrix::inRangeInc(float lower, float upper) {
inRangeInc(lower, upper, *this);
}
void NVMatrix::inRangeInc(float lower, float upper, NVMatrix& target) {
apply(NVMatrixOps::InRange<false>(lower, upper), target);
}
void NVMatrix::inRangeExc(float lower, float upper) {
inRangeExc(lower, upper, *this);
}
void NVMatrix::inRangeExc(float lower, float upper, NVMatrix& target) {
apply(NVMatrixOps::InRange<true>(lower, upper), target);
}
void NVMatrix::biggerThanScalar(float scalar) {
biggerThanScalar(scalar, *this);
}
void NVMatrix::biggerThanScalar(float scalar, NVMatrix& target) {
apply(NVMatrixOps::BiggerThanScalar(scalar), target);
}
void NVMatrix::smallerThanScalar(float scalar) {
smallerThanScalar(scalar, *this);
}
void NVMatrix::smallerThanScalar(float scalar, NVMatrix& target) {
apply(NVMatrixOps::SmallerThanScalar(scalar), target);
}
void NVMatrix::addScalar(float scaleThis, float scalar, NVMatrix& target) {
apply(NVMatrixOps::WeightedAddScalar(scaleThis, scalar), target);
}
void NVMatrix::addScalar(float scalar, NVMatrix& target) {
apply(NVMatrixOps::AddScalar(scalar), target);
}
void NVMatrix::addScalar(float scalar) {
addScalar(scalar, *this);
}
void NVMatrix::minWithScalar(float scalar, NVMatrix& target) {
apply(NVMatrixOps::MinWithScalar(scalar), target);
}
void NVMatrix::minWithScalar(float scalar) {
minWithScalar(scalar, *this);
}
void NVMatrix::maxWithScalar(float scalar, NVMatrix& target) {
apply(NVMatrixOps::MaxWithScalar(scalar), target);
}
void NVMatrix::maxWithScalar(float scalar) {
maxWithScalar(scalar, *this);
}
void NVMatrix::pow(float p, NVMatrix& target) {
apply(NVMatrixOps::Pow(p), target);
}
void NVMatrix::pow(float p) {
pow(p, *this);
}
void NVMatrix::scale(float _scale) {
scale(_scale, *this);
}
void NVMatrix::scale(float _scale, hipStream_t stream) {
scale(_scale, *this, stream);
}
void NVMatrix::scale(float _scale, NVMatrix& target) {
scale(_scale, target, NVMatrix::getDefaultStream());
}
void NVMatrix::scale(float _scale, NVMatrix& target, hipStream_t stream) {
if (_scale != 1 || &target != this) { // optimize away scale by 1
if (_scale == 1) {
copy(target, stream);
} else {
apply(NVMatrixOps::MultByScalar(_scale), target, stream);
}
}
}
void NVMatrix::zero() {
apply(NVMatrixOps::Zero());
}
void NVMatrix::zero(NVMatrix& like) {
resize(like);
zero();
}
void NVMatrix::max(int axis, NVMatrix& target) {
_aggregate(axis, target, NVMatrixAggs::Max(), NVMatrixBinaryOps::Second());
}
void NVMatrix::addSum(NVMatrix& a, int axis, float scaleThis, float scaleSum) {
addSum(a, axis, scaleThis, scaleSum, getDefaultStream());
}
void NVMatrix::addSum(NVMatrix& a, int axis, float scaleThis, float scaleSum, hipStream_t stream) {
if (scaleThis != 0) {
a._aggregate(axis, *this, NVMatrixAggs::Sum(), NVMatrixBinaryOps::WeightedAdd(scaleThis, scaleSum), stream);
} else {
a._aggregate(axis, *this, NVMatrixAggs::Sum(), NVMatrixBinaryOps::SecondScaled(scaleSum), stream);
}
}
void NVMatrix::sum(int axis, NVMatrix& target) {
sum(axis, target, getDefaultStream());
}
void NVMatrix::sum(int axis, NVMatrix& target, hipStream_t stream) {
_aggregate(axis, target, NVMatrixAggs::Sum(), NVMatrixBinaryOps::Second(), stream);
}
void NVMatrix::sumOfSquares(int axis, NVMatrix& target) {
sumOfSquares(axis, target, getDefaultStream());
}
void NVMatrix::sumOfSquares(int axis, NVMatrix& target, hipStream_t stream) {
_aggregate(axis, target, NVMatrixAggs::Sum(), NVMatrixOps::Square(), NVMatrixBinaryOps::Second(), stream);
}
void NVMatrix::min(int axis, NVMatrix& target) {
_aggregate(axis, target, NVMatrixAggs::Min(), NVMatrixBinaryOps::Second());
}
NVMatrix& NVMatrix::max(int axis) {
return _aggregate(axis, NVMatrixAggs::Max(), NVMatrixBinaryOps::Second());
}
NVMatrix& NVMatrix::sum(int axis) {
return _aggregate(axis, NVMatrixAggs::Sum(), NVMatrixBinaryOps::Second());
}
NVMatrix& NVMatrix::min(int axis) {
return _aggregate(axis, NVMatrixAggs::Min(), NVMatrixBinaryOps::Second());
}
NVMatrix& NVMatrix::sumOfSquares(int axis) {
return _aggregate(axis, NVMatrixAggs::Sum(), NVMatrixOps::Square(), NVMatrixBinaryOps::Second());
}
void NVMatrix::_sum_setParams(int n, dim3* blocks, dim3* threads) {
*threads = dim3(DP_BLOCKSIZE);
*blocks = dim3(::min(CPUSUM_MAX, DIVUP(n, DP_BLOCKSIZE)));
}
float NVMatrix::mean() {
return sum() / getNumElements();
}
float NVMatrix::sum() {
return _totalAgg(NVMatrixAggs::Sum());
}
float NVMatrix::sum(NVMatrix& tmpbuf) {
return _totalAgg(NVMatrixAggs::Sum(), tmpbuf, getDefaultStream());
}
float NVMatrix::max() {
return _totalAgg(NVMatrixAggs::Max());
}
float NVMatrix::min() {
return _totalAgg(NVMatrixAggs::Min());
}
float NVMatrix::countNan() {
return _totalAgg(NVMatrixAggs::CountNan());
}
float NVMatrix::countInf() {
return _totalAgg(NVMatrixAggs::CountInf());
}
template<class Agg>
float NVMatrix::_totalAgg(Agg agg) {
return _totalAgg(agg, getDefaultStream());
}
template<class Agg>
float NVMatrix::_totalAgg(Agg agg, hipStream_t stream) {
NVMatrix tmp;
return _totalAgg(agg, tmp, stream);
}
template<class Agg>
float NVMatrix::_totalAgg(Agg agg, NVMatrix& tmpbuf, hipStream_t stream) {
assert(isContiguous());
dim3 blocks, threads;
// Sum most of it on GPU
_sum_setParams(getNumElements(), &blocks, &threads);
tmpbuf.resize(1, blocks.x);
hipLaunchKernelGGL(( kTotalAgg), dim3(blocks), dim3(threads), 0, stream, getDevData(), tmpbuf.getDevData(), getNumElements(), agg);
getLastCudaError("kTotalAgg: Kernel execution failed");
// Don't need to sync because we copyToHost in the same stream, so it's serialized
// NVMatrix::syncStream(stream);
return tmpbuf.cpuAgg(agg, stream);
}
template<class Agg>
float NVMatrix::cpuAgg(Agg agg, hipStream_t stream) {
Matrix bufCPU(getNumRows(), getNumCols());
copyToHost(bufCPU, false, stream);
if (getNumElements() > 1) { // Sum remainder on CPU
if (typeid(Agg) == typeid(NVMatrixAggs::Sum)) {
return bufCPU.sum();
} else if (typeid(Agg) == typeid(NVMatrixAggs::Max)) {
return bufCPU.max();
} else if (typeid(Agg) == typeid(NVMatrixAggs::Min)) {
return bufCPU.min();
} else if (typeid(Agg) == typeid(NVMatrixAggs::CountNan)) {
return bufCPU.hasNan(); //yea, it's not the same, who cares
} else if (typeid(Agg) == typeid(NVMatrixAggs::CountInf)) {
return bufCPU.hasInf();
} else {
assert(false);
}
}
return bufCPU(0,0);
}
float NVMatrix::dotProduct(NVMatrix& b) {
return dotProduct(b, getDefaultStream());
}
float NVMatrix::dotProduct(NVMatrix& b, hipStream_t stream) {
NVMatrix tmp;
return dotProduct(b, tmp, stream);
}
/*
* Fast dot product only for matrices with same transposedness.
*/
float NVMatrix::dotProduct(NVMatrix& b, NVMatrix& tmp, hipStream_t stream) {
assert(isContiguous() && b.isContiguous());
assert(isSameDims(b));
assert(isTrans() == b.isTrans()); // see?
dim3 blocks, threads;
_sum_setParams(getNumElements(), &blocks, &threads);
// NVMatrix target(1, blocks.x);
tmp.resize(1, blocks.x);
hipLaunchKernelGGL(( kDotProduct_r), dim3(blocks), dim3(threads), 0, stream, getDevData(), b.getDevData(), tmp.getDevData(), getNumElements());
getLastCudaError("kDotProduct_r: Kernel execution failed");
// hipDeviceSynchronize();
// syncStream(stream);
// return tmp._totalAgg(NVMatrixAggs::Sum(), stream);
return tmp.cpuAgg(NVMatrixAggs::Sum(), stream);
}
float NVMatrix::norm2() {
return dotProduct(*this);
}
float NVMatrix::norm() {
return sqrt(norm2());
}
void NVMatrix::print(int startRow, int rows, int startCol, int cols) const {
// hipDeviceSynchronize();
syncDevice();
Matrix hm = Matrix(_numRows, _numCols);
copyToHost(hm);
hm.print(startRow, rows, startCol, cols);
}
void NVMatrix::print(int rows, int cols) const {
print(0, rows, 0, cols);
}
void NVMatrix::printShape(const char* name) const {
printf("%s: %dx%d\n", name, _numRows, _numCols);
}
void NVMatrix::alloc(int numElements) {
_memSegment = DEVICE_MEMORY_MANAGER::getInstance(getDeviceID()).malloc(numElements * sizeof(float));
}
void NVMatrix::dealloc() {
DEVICE_MEMORY_MANAGER::getInstance(_memSegment->getDeviceID()).free(_memSegment);
_memSegment = NULL;
deallocTexture();
}
void NVMatrix::deallocTexture() {
if (_texObj != 0) {
checkCudaErrors(hipDestroyTextureObject(_texObj));
_texObj = 0;
}
}
hipTextureObject_t NVMatrix::getTextureObject() {
if (_texObj == 0) {
assert(isContiguous());
//size_t memFree, memTotal;
struct hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = hipResourceTypeLinear;
resDesc.res.linear.devPtr = getDevData();
resDesc.res.linear.sizeInBytes = getNumDataBytes();
resDesc.res.linear.desc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
struct hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
checkCudaErrors(hipCreateTextureObject(&_texObj, &resDesc, &texDesc, NULL));
}
assert(_texObj != 0);
return _texObj;
}
NVMatrix& NVMatrix::construct() const {
return *new NVMatrix();
}
NVMatrix& NVMatrix::construct(bool isTrans) const {
return *new NVMatrix(isTrans);
}
NVMatrix& NVMatrix::construct(int numRows, int numCols, bool isTrans) const {
return *new NVMatrix(numRows, numCols, isTrans);
}
NVMatrix& NVMatrix::construct(const Matrix& like, bool copy) const {
return *new NVMatrix(like, copy);
}
NVMatrix& NVMatrix::construct(const NVMatrix& like, bool copy) const {
return *new NVMatrix(like, copy);
}
NVMatrix& NVMatrix::construct(const NVMatrix& like) const {
return *new NVMatrix(like);
}
NVMatrix& NVMatrix::construct(const Matrix& like) const {
return *new NVMatrix(like);
}
NVMatrix& NVMatrix::construct(MemorySegment* mem, int numRows, int numCols, int stride, bool isTrans) const {
return *new NVMatrix(mem, numRows, numCols, stride, isTrans);
}
/* ================
* HostNVMatrix
* ================
*/
HostNVMatrix::~HostNVMatrix() {
if (_ownsData && _numElements > 0) {
dealloc();
} else {
// dealloc frees the mem segment. But if this is a view,
// then we need to delete the mem segment object.
// assert(_memSegment == NULL || _memSegment->getSize() == 0);
delete _memSegment;
}
_deleted = true;
}
HostNVMatrix::HostNVMatrix() : NVMatrix() {
_init(false);
}
HostNVMatrix::HostNVMatrix(bool isTrans) {
_init(isTrans);
}
HostNVMatrix::HostNVMatrix(int numRows, int numCols, bool isTrans) {
_init(isTrans);
resize(numRows, numCols);
}
HostNVMatrix::HostNVMatrix(const Matrix& like, bool copy) {
_init(like.isTrans());
resize(like.getNumRows(), like.getNumCols());
if (copy) {
copyFromHost(like);
}
}
HostNVMatrix::HostNVMatrix(const NVMatrix& like, bool copy) {
_init(like.isTrans());
resize(like.getNumRows(), like.getNumCols());
if (copy) {
like.copy(*this);
}
}
HostNVMatrix::HostNVMatrix(const NVMatrix& like) {
_init(like.isTrans());
resize(like.getNumRows(), like.getNumCols());
}
HostNVMatrix::HostNVMatrix(const Matrix& like) {
_init(false);
resize(like.getNumRows(), like.getNumCols());
}
HostNVMatrix::HostNVMatrix(MemorySegment* mem, int numRows, int numCols, int stride, bool isTrans)
: NVMatrix(mem, numRows, numCols, stride, isTrans) {
}
NVMatrix& HostNVMatrix::construct() const {
return *new HostNVMatrix();
}
NVMatrix& HostNVMatrix::construct(bool isTrans) const {
return *new HostNVMatrix(isTrans);
}
NVMatrix& HostNVMatrix::construct(int numRows, int numCols, bool isTrans) const {
return *new HostNVMatrix(numRows, numCols, isTrans);
}
NVMatrix& HostNVMatrix::construct(const Matrix& like, bool copy) const {
return *new HostNVMatrix(like, copy);
}
NVMatrix& HostNVMatrix::construct(const NVMatrix& like, bool copy) const {
return *new HostNVMatrix(like, copy);
}
NVMatrix& HostNVMatrix::construct(const NVMatrix& like) const {
return *new HostNVMatrix(like);
}
NVMatrix& HostNVMatrix::construct(const Matrix& like) const {
return *new HostNVMatrix(like);
}
NVMatrix& HostNVMatrix::construct(MemorySegment* mem, int numRows, int numCols, int stride, bool isTrans) const {
return *new HostNVMatrix(mem, numRows, numCols, stride, isTrans);
}
void HostNVMatrix::copyFromHost(const Matrix& hostMatrix, bool resizeTarget, hipStream_t stream) {
if (resizeTarget) {
resize(hostMatrix);
} else {
assert(isSameDims(hostMatrix));
}
setTrans(hostMatrix.isTrans());
if (getNumElements() > 0) {
checkCudaErrors(hipMemcpy2D(getDevData(), _stride * sizeof(float), hostMatrix.getData(),
hostMatrix.getLeadingDim() * sizeof(float), getLeadingDim() * sizeof(float),
getFollowingDim(), hipMemcpyHostToHost));
// syncStream(stream);
}
}
void HostNVMatrix::copyFromHost(const Matrix& hostMatrix, bool resizeTarget) {
copyFromHost(hostMatrix, resizeTarget, 0);
}
void HostNVMatrix::copyFromHost(const Matrix& hostMatrix) {
copyFromHost(hostMatrix, false, 0);
}
void HostNVMatrix::copyToHost(Matrix& hostMatrix, bool resizeTarget, hipStream_t stream) const {
if (resizeTarget) {
hostMatrix.resize(getNumRows(), getNumCols());
} else {
assert(isSameDims(hostMatrix));
}
hostMatrix.setTrans(_isTrans);
if (getNumElements() > 0) {
checkCudaErrors(hipMemcpy2D(hostMatrix.getData(), hostMatrix.getLeadingDim() * sizeof(float),
getDevData(), _stride * sizeof(float), getLeadingDim() * sizeof(float),
getFollowingDim(), hipMemcpyHostToHost));
// syncStream(stream);
}
}
void HostNVMatrix::copyToHost(Matrix& hostMatrix, bool resizeTarget) const {
copyToHost(hostMatrix, resizeTarget, 0);
}
void HostNVMatrix::copyToHost(Matrix& hostMatrix) const {
copyToHost(hostMatrix, false, 0);
}
void HostNVMatrix::alloc(int numElements) {
// checkCudaErrors(hipHostMalloc(&_devData, numElements * sizeof(float), hipHostMallocPortable));
_memSegment = HOST_MEMORY_MANAGER::getInstance().malloc(numElements * sizeof(float));
// _memSegment = FastHostMemoryManager::getInstance().malloc(numElements * sizeof(float));
}
void HostNVMatrix::dealloc() {
// FastHostMemoryManager::getInstance().free(_memSegment);
HOST_MEMORY_MANAGER::getInstance().free(_memSegment);
_memSegment = NULL;
// checkCudaErrors(hipHostFree(_devData));
}
hipTextureObject_t HostNVMatrix::getTextureObject() {
assert(false);
return 0;
}
| 692e35eefea8628b2df4f841d8e7d282882c4f7f.cu | /*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <set>
#include <vector>
#include <assert.h>
#include <stdlib.h>
#include <stdio.h>
#include <fstream>
#include <iostream>
#include <algorithm>
#include <typeinfo>
#include <map>
#include <cuda.h>
#include <signal.h>
#include "../include/nvmatrix.cuh"
#include "../include/nvmatrix_operators.cuh"
using namespace std;
/*
* Device random number generator pointers.
*/
//map<int,curandGenerator_t> NVMatrix::rndGen;
map<int,MemorySegment*> NVMatrix::_rndDevStates;
map<int,int> NVMatrix::_rndDevThreads;
pthread_mutex_t* NVMatrix::_rndMutex = makeMutex();
pthread_mutex_t* NVMatrix::_cublasMutex = makeMutex();
pthread_mutex_t* NVMatrix::_streamMutex = makeMutex();
std::map<int,cublasHandle_t> NVMatrix::_cublasHandles;
std::map<int,cudaStream_t> NVMatrix::_defaultStreams;
pthread_mutex_t* NVMatrix::makeMutex() {
pthread_mutex_t* m = (pthread_mutex_t*) malloc(sizeof(pthread_mutex_t));
pthread_mutex_init(m, NULL);
return m;
}
/*
Do not call resize in _init because resize is a virtual function
which is overridden in base class. Since C++ is retarded and unable
to call overridden functions from constructors, we shall call resize
separately from every constructor after calling _init.
*/
void NVMatrix::_init(bool isTrans) {
_numRows = 0;
_numCols = 0;
_numElements = 0;
_ownsData = true;
_isTrans = isTrans;
_memSegment = NULL;
_stride = 0;
_texObj = 0;
}
NVMatrix::NVMatrix() : _deleted(false) {
_init(false);
}
NVMatrix::NVMatrix(bool isTrans) : _deleted(false) {
_init(isTrans);
}
NVMatrix::NVMatrix(int numRows, int numCols, bool isTrans) : _deleted(false) {
_init(isTrans);
resize(numRows, numCols);
}
NVMatrix::NVMatrix(const Matrix& like, bool copy) : _deleted(false) {
_init(like.isTrans());
resize(like.getNumRows(), like.getNumCols());
if (copy) {
copyFromHost(like);
}
}
NVMatrix::NVMatrix(const NVMatrix& like, bool copy) : _deleted(false) {
_init(like.isTrans());
resize(like.getNumRows(), like.getNumCols());
if (copy) {
like.copy(*this);
}
}
/*
* Initializes NVMatrix with same dimensions as given matrix but
* does not copy any data.
*/
NVMatrix::NVMatrix(const NVMatrix& like) : _deleted(false) {
_init(like.isTrans());
resize(like.getNumRows(), like.getNumCols());
}
/*
* Initializes NVMatrix with same dimensions as given matrix but
* does not copy any data.
*/
NVMatrix::NVMatrix(const Matrix& like) : _deleted(false) {
_init(false);
resize(like.getNumRows(), like.getNumCols());
}
NVMatrix::NVMatrix(MemorySegment* mem, int numRows, int numCols, int stride, bool isTrans) :
_numRows(numRows),
_numCols(numCols),
_numElements(numRows*numCols),
_ownsData(false),
_memSegment(mem),
_isTrans(isTrans),
_deleted(false),
_texObj(0) {
_stride = stride < 0 ? getLeadingDim() : stride;
}
NVMatrix::~NVMatrix() {
if (!_deleted) {
deallocTexture();
if(_ownsData && _numElements > 0) {
dealloc();
} else {
// dealloc deletes the mem segment. But if this is a view,
// then we still need to delete the mem segment object.
// assert(_memSegment == NULL || _memSegment->getSize() == 0);
delete _memSegment;
}
}
}
void NVMatrix::copyFromHost(const Matrix& hostMatrix) {
copyFromHost(hostMatrix, false, getDefaultStream());
}
void NVMatrix::copyFromHost(const Matrix& hostMatrix, bool resizeTarget) {
copyFromHost(hostMatrix, resizeTarget, getDefaultStream());
}
void NVMatrix::copyFromHost(const Matrix& hostMatrix, bool resizeTarget, cudaStream_t stream) {
if (resizeTarget) {
resize(hostMatrix);
} else {
assert(isSameDims(hostMatrix));
}
setTrans(hostMatrix.isTrans());
if (getNumElements() > 0) {
CUBLAS_CALL(cublasSetMatrixAsync(hostMatrix.getLeadingDim(), hostMatrix.getFollowingDim(), sizeof(float),
hostMatrix.getData(), hostMatrix.getLeadingDim(), getDevData(), _stride, stream));
syncStream(stream);
}
}
void NVMatrix::copyToHost(Matrix& hostMatrix) const {
copyToHost(hostMatrix, false, getDefaultStream());
}
void NVMatrix::copyToHost(Matrix& hostMatrix, bool resizeTarget) const {
copyToHost(hostMatrix, resizeTarget, getDefaultStream());
}
void NVMatrix::copyToHost(Matrix& hostMatrix, bool resizeTarget, cudaStream_t stream) const {
if (resizeTarget) {
hostMatrix.resize(_numRows, _numCols);
} else {
assert(isSameDims(hostMatrix));
}
hostMatrix.setTrans(_isTrans);
if (getNumElements() > 0) {
// printf("gpu.stride: %d, host.stride: %d\n", getStride(), hostMatrix.getLeadingDim());
CUBLAS_CALL(cublasGetMatrixAsync(getLeadingDim(),getFollowingDim(), sizeof(float),
getDevData(), getStride(), hostMatrix.getData(), hostMatrix.getLeadingDim(), stream));
syncStream(stream);
}
}
void NVMatrix::copy(NVMatrix& dest) const {
copy(dest, getDefaultStream());
}
void NVMatrix::copy(NVMatrix& dest, cudaStream_t stream) const {
if (&dest != this) {
if (!isSameDims(dest)) {
dest.resize(*this);
}
copy(dest, 0, -1, 0, -1, 0, 0, stream);
}
}
NVMatrix& NVMatrix::copy() const {
NVMatrix& c = construct();
copy(c);
return c;
}
void NVMatrix::rightMult(NVMatrix &b, float scaleAB, NVMatrix &target) {
rightMult(b, scaleAB, target, getDefaultStream());
}
void NVMatrix::rightMult(NVMatrix &b, float scaleAB, NVMatrix &target, cudaStream_t stream) {
// if(&target != this && &target != &b) {
// target.resize(_numRows, b.getNumCols());
// target.setTrans(true);
// }
target.addProduct(*this, b, 0, scaleAB, stream);
}
void NVMatrix::rightMult(NVMatrix &b, float scaleAB) {
rightMult(b, scaleAB, *this);
}
void NVMatrix::rightMult(NVMatrix &b, NVMatrix& target) {
rightMult(b, 1, target);
}
void NVMatrix::addProduct(NVMatrix& a, NVMatrix &b, float scaleThis, float scaleAB) {
addProduct(a, b, scaleThis, scaleAB, getDefaultStream());
}
/*
* This will only work if this matrix is in column-major order! In other words,
* if isTrans() returns true.
*/
void NVMatrix::addProduct(NVMatrix& a, NVMatrix &b, float scaleThis, float scaleAB, cudaStream_t stream) {
assert(a.getNumCols() == b.getNumRows());
if (scaleThis == 0) {
resize(a.getNumRows(), b.getNumCols());
setTrans(true);
}
assert(this->getNumRows() == a.getNumRows());
assert(this->getNumCols() == b.getNumCols());
assert(_isTrans);
CUBLAS_CALL(cublasSetStream_v2(getCublasHandle(), stream));
CUBLAS_CALL(cublasSgemm_v2(getCublasHandle(), a.getTransChar(), b.getTransChar(), a.getNumRows(), b.getNumCols(), a.getNumCols(),
&scaleAB, a.getDevData(), a.getStride(), b.getDevData(), b.getStride(),
&scaleThis, getDevData(), getStride()));
}
void NVMatrix::addProduct(NVMatrix& a, NVMatrix &b) {
addProduct(a, b, 1, 1);
}
void NVMatrix::assertSame(NVMatrixV& a) {
for (int i = 1; i < a.size(); ++i) {
assert(a[i]->isSameDims(*a[0]));
assert(a[i]->isTrans() == a[0]->isTrans());
assert(a[i]->getStride() == a[0]->getStride());
assert(a[i]->getDataDeviceID() == a[0]->getDataDeviceID());
}
}
void NVMatrix::batchedMatrixMultiply(NVMatrixV& a, NVMatrixV& b, NVMatrixV& target, float scaleTarget, float scaleAB,
const float** aPtrsDev, const float** bPtrsDev, float** tgtPtrsDev) {
batchedMatrixMultiply(a, b, target, scaleTarget, scaleAB, getDefaultStream(), aPtrsDev, bPtrsDev, tgtPtrsDev);
}
void NVMatrix::batchedMatrixMultiply(NVMatrixV& a, NVMatrixV& b, NVMatrixV& target, float scaleTarget, float scaleAB) {
batchedMatrixMultiply(a, b, target, scaleTarget, scaleAB, getDefaultStream());
}
void NVMatrix::batchedMatrixMultiply(NVMatrixV& a, NVMatrixV& b, NVMatrixV& target, float scaleTarget, float scaleAB, cudaStream_t stream,
const float** aPtrsDev, const float** bPtrsDev, float** tgtPtrsDev) {
assert(a.size() == b.size());
assert(a.size() == target.size());
assertSame(a);
assertSame(b);
assertSame(target);
const int batch = a.size();
if (batch > 0) {
const int rows = a[0]->getNumRows(), inner = a[0]->getNumCols(), cols = b[0]->getNumCols();
assert(inner == b[0]->getNumRows());
assert(target[0]->getNumRows() == rows);
assert(target[0]->getNumCols() == cols);
const int lda = a[0]->getStride(), ldb = b[0]->getStride(), ldc = target[0]->getStride();
cublasOperation_t atrans = a[0]->getTransChar(), btrans = b[0]->getTransChar();
CUBLAS_CALL(cublasSetStream_v2(getCublasHandle(), stream));
CUBLAS_CALL(cublasSgemmBatched(getCublasHandle(), atrans, btrans, rows, cols, inner, &scaleAB, aPtrsDev, lda, bPtrsDev, ldb, &scaleTarget, tgtPtrsDev, ldc, batch));
}
}
void NVMatrix::batchedMatrixMultiply(NVMatrixV& a, NVMatrixV& b, NVMatrixV& target, float scaleTarget, float scaleAB, cudaStream_t stream) {
assert(a.size() == b.size());
assert(a.size() == target.size() || target.size() == 0);
const int batch = a.size();
if (batch > 0) {
const int rows = a[0]->getNumRows(), cols = b[0]->getNumCols();
const float* aPtrs[batch], *bPtrs[batch], *tgtPtrs[batch];
for (int i = 0; i < batch; ++i) {
if (target.size() <= i) {
target.push_back(new NVMatrix(rows, cols, true));
}
aPtrs[i] = a[i]->getDevData();
bPtrs[i] = b[i]->getDevData();
tgtPtrs[i] = target[i]->getDevData();
}
// const float** aPtrsDev, **bPtrsDev;
// float **tgtPtrsDev;
// checkCudaErrors(cudaMalloc(&aPtrsDev, batch * sizeof(float*)));
// checkCudaErrors(cudaMalloc(&bPtrsDev, batch * sizeof(float*)));
// checkCudaErrors(cudaMalloc(&tgtPtrsDev, batch * sizeof(float*)));
MemorySegment* aPtrsDev = DEVICE_MEMORY_MANAGER::getInstance(getDeviceID()).malloc(batch * sizeof(float*));
MemorySegment* bPtrsDev = DEVICE_MEMORY_MANAGER::getInstance(getDeviceID()).malloc(batch * sizeof(float*));
MemorySegment* tgtPtrsDev = DEVICE_MEMORY_MANAGER::getInstance(getDeviceID()).malloc(batch * sizeof(float*));
checkCudaErrors(cudaMemcpyAsync(aPtrsDev, aPtrs, batch * sizeof(float*), cudaMemcpyHostToDevice, stream));
checkCudaErrors(cudaMemcpyAsync(bPtrsDev, bPtrs, batch * sizeof(float*), cudaMemcpyHostToDevice, stream));
checkCudaErrors(cudaMemcpyAsync(tgtPtrsDev, tgtPtrs, batch * sizeof(float*), cudaMemcpyHostToDevice, stream));
batchedMatrixMultiply(a, b, target, scaleTarget, scaleAB, stream, const_cast<const float**>(aPtrsDev->getData<float*>()),
const_cast<const float**>(bPtrsDev->getData<float*>()),
tgtPtrsDev->getData<float*>());
// checkCudaErrors(cudaFree(aPtrsDev));
// checkCudaErrors(cudaFree(bPtrsDev));
// checkCudaErrors(cudaFree(tgtPtrsDev));
DEVICE_MEMORY_MANAGER::getInstance(getDeviceID()).free(aPtrsDev);
DEVICE_MEMORY_MANAGER::getInstance(getDeviceID()).free(bPtrsDev);
DEVICE_MEMORY_MANAGER::getInstance(getDeviceID()).free(tgtPtrsDev);
}
}
template <class Randomizer>
void NVMatrix::_unaryRandomize(NVMatrix& target, Randomizer rnd) {
_unaryRandomize(target, rnd, getDefaultStream());
}
template <class Randomizer>
void NVMatrix::_unaryRandomize(NVMatrix& target, Randomizer rnd, cudaStream_t stream) {
assert(isRndInitialized());
assert(isContiguous() && target.isContiguous());
if (!isSameDims(target)) {
target.resize(*this);
}
assert(isTrans() == target.isTrans());
kUnaryRandomize<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK, 0, stream>>>(getDevData(), target.getDevData(), getCurandState(), getNumElements(), rnd);
getLastCudaError("kUnaryRandomize: Kernel execution failed");
}
template <class Randomizer>
void NVMatrix::_binaryRandomize(NVMatrix& data2, NVMatrix& target, Randomizer rnd) {
_binaryRandomize(data2, target, rnd, getDefaultStream());
}
template <class Randomizer>
void NVMatrix::_binaryRandomize(NVMatrix& data2, NVMatrix& target, Randomizer rnd, cudaStream_t stream) {
assert(isRndInitialized());
assert(isContiguous() && data2.isContiguous() && target.isContiguous());
assert(isSameDims(data2));
assert(isTrans() == data2.isTrans());
if (!isSameDims(target)) {
target.resize(*this);
}
assert(isTrans() == target.isTrans());
kBinaryRandomize<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK, 0, stream>>>(getDevData(), data2.getDevData(), target.getDevData(), getCurandState(), getNumElements(), rnd);
getLastCudaError("kBinaryRandomize: Kernel execution failed");
}
void NVMatrix::initRandom(unsigned long long seed, int numStreams) {
NVMatrix::initRandom(seed, numStreams, NVMatrix::getDefaultStream());
}
void NVMatrix::initRandom(unsigned long long seed, int numStreams, cudaStream_t stream) {
// printf("init random on device %d\n", getDeviceID());
pthread_mutex_lock(_rndMutex);
assert(!isRndInitialized(true));
int d = getDeviceID();
// _rndDevStates[d] = NULL;
_rndDevThreads[d] = numStreams;
_rndDevStates[d] = DEVICE_MEMORY_MANAGER::getInstance(d).malloc(numStreams * sizeof(curandState));
// checkCudaErrors(cudaMalloc((void **)&_rndDevStates[d], numStreams * sizeof(curandState)));
pthread_mutex_unlock(_rndMutex);
kSetupCurand<<<NUM_RND_BLOCKS, NUM_RND_THREADS_PER_BLOCK, 0, stream>>>(getCurandState(), 1 + seed*2); // so there's no chance it'll be correlated with the other one
getLastCudaError("kSetupCurand: Kernel execution failed");
}
void NVMatrix::initRandom(unsigned long long seed) {
initRandom(seed, NUM_RND_STREAMS);
}
void NVMatrix::initRandom() {
NVMatrix::initRandom(time(0));
}
void NVMatrix::initCublas() {
int d = getDeviceID();
pthread_mutex_lock(_cublasMutex);
assert(_cublasHandles.count(d) == 0);
CUBLAS_CALL(cublasCreate(&_cublasHandles[d]));
// It appears that cublasCreate causes a host -> device copy on stream 0,
// so we synchronize with it because we run everything else on other
// streams.
syncDevice();
pthread_mutex_unlock(_cublasMutex);
}
void NVMatrix::destroyCublas() {
int d = getDeviceID();
pthread_mutex_lock(_cublasMutex);
assert(_cublasHandles.count(d) > 0);
CUBLAS_CALL(cublasDestroy(_cublasHandles[d]));
_cublasHandles.erase(d);
pthread_mutex_unlock(_cublasMutex);
}
cublasHandle_t NVMatrix::getCublasHandle() {
return getCublasHandle(getDeviceID());
}
cublasHandle_t NVMatrix::getCublasHandle(int deviceID) {
pthread_mutex_lock(_cublasMutex);
assert(_cublasHandles.count(deviceID) > 0);
cublasHandle_t h = _cublasHandles[deviceID];
pthread_mutex_unlock(_cublasMutex);
return h;
}
cudaStream_t NVMatrix::getDefaultStream() {
return getDefaultStream(NVMatrix::getDeviceID());
}
cudaStream_t NVMatrix::getDefaultStream(int deviceID) {
if (deviceID >= 0) {
pthread_mutex_lock(_streamMutex);
if (_defaultStreams.count(deviceID) == 0) {
int oldDeviceID = getDeviceID();
NVMatrix::setDeviceID(deviceID);
checkCudaErrors(cudaStreamCreateWithFlags(&_defaultStreams[deviceID], cudaStreamNonBlocking));
NVMatrix::setDeviceID(oldDeviceID);
}
cudaStream_t s = _defaultStreams[deviceID];
pthread_mutex_unlock(_streamMutex);
return s;
}
return 0;
}
void NVMatrix::syncDevice() {
checkCudaErrors(cudaDeviceSynchronize());
}
void NVMatrix::syncStream(cudaStream_t stream) {
checkCudaErrors(cudaStreamSynchronize(stream));
}
void NVMatrix::syncStream() {
syncStream(getDefaultStream());
}
curandState* NVMatrix::getCurandState() {
/*
* Even though we're only reading from the map here, it's important to grab
* the mutex because another thread may be writing to it.
*/
pthread_mutex_lock(_rndMutex);
int d = getDeviceID();
assert(isRndInitialized(true));
curandState* r = _rndDevStates[d]->getData<curandState>();
pthread_mutex_unlock(_rndMutex);
return r;
}
curandState* NVMatrix::getCurandState(int numStreams) {
int d = getDeviceID();
pthread_mutex_lock(_rndMutex);
assert(isRndInitialized(true));
bool realloc = numStreams > _rndDevThreads[d];
pthread_mutex_unlock(_rndMutex);
if (realloc) {
destroyRandom();
initRandom(time(0), numStreams);
}
return getCurandState();
}
int NVMatrix::getDataDeviceID() const {
if (getDevData() == NULL) {
return DEVICE_NULL;
}
struct cudaPointerAttributes atts;
checkCudaErrors(cudaPointerGetAttributes(&atts, getDevData()));
return atts.memoryType == cudaMemoryTypeDevice ? atts.device : DEVICE_HOST;
}
int NVMatrix::getDeviceID() {
int d;
checkCudaErrors(cudaGetDevice(&d));
// if (d == 0) {
// raise(SIGABRT);
// }
return d;
}
void NVMatrix::setDeviceID(int d) {
assert(d >= 0);
// printf("Setting device to %d\n", d);
// if (d == 0) {
// raise(SIGABRT);
// }
checkCudaErrors(cudaSetDevice(d));
}
bool NVMatrix::canAccessPeer(int srcDevice, int tgtDevice) {
if (srcDevice == tgtDevice) {
return true;
}
int canAccess;
checkCudaErrors(cudaDeviceCanAccessPeer(&canAccess, srcDevice, tgtDevice));
return canAccess;
}
bool NVMatrix::isRndInitialized(bool haveLock) {
if (!haveLock) {
pthread_mutex_lock(_rndMutex);
}
bool b = _rndDevStates.count(getDeviceID()) != 0;
if (!haveLock) {
pthread_mutex_unlock(_rndMutex);
}
return b;
}
bool NVMatrix::isRndInitialized() {
return isRndInitialized(false);
}
void NVMatrix::destroyRandom() {
int d = getDeviceID();
pthread_mutex_lock(_rndMutex);
assert(isRndInitialized(true));
// checkCudaErrors(cudaFree(_rndDevStates[d]));
DEVICE_MEMORY_MANAGER::getInstance(d).free(_rndDevStates[d]);
_rndDevStates.erase(d);
_rndDevThreads.erase(d);
pthread_mutex_unlock(_rndMutex);
}
void NVMatrix::binarizeProbs() {
binarizeProbs(*this);
}
void NVMatrix::binarizeProbs(NVMatrix& target) {
_unaryRandomize(target, BinarizeUnaryRandomizer());
}
void NVMatrix::randomizeUniform() {
assert(isContiguous());
assert(isRndInitialized());
// CURAND_CALL(curandGenerateUniform(rndGen, _devData, getNumElements()));
_unaryRandomize(*this, UniformUnaryRandomizer());
}
void NVMatrix::randomizeGaussian() {
randomizeGaussian(1);
}
void NVMatrix::randomizeGaussian(float stdev) {
randomizeGaussian(0, stdev);
}
void NVMatrix::randomizeGaussian(float mean, float stdev) {
assert(isContiguous());
assert(isRndInitialized());
// CURAND_CALL(curandGenerateNormal(rndGen, _devData, getNumElements(), mean, stdev));
_unaryRandomize(*this, GaussianUnaryRandomizer(mean, stdev));
}
/*
* Kind of a hack since we don't actually need the contents of this matrix for it,
* so we don't really need a binary randomizer.
*/
void NVMatrix::randomizeGaussian(NVMatrix& stdevs) {
randomizeGaussian(0, stdevs);
}
void NVMatrix::randomizeGaussian(float mean, NVMatrix& stdevs) {
_binaryRandomize(stdevs, *this, GaussianBinaryRandomizer(mean));
}
void NVMatrix::randomizeGaussian(float mean, float stdevMult, NVMatrix& stdevs) {
_binaryRandomize(stdevs, *this, ScaledGaussianBinaryRandomizer(mean, stdevMult));
}
void NVMatrix::addGaussianNoise() {
addGaussianNoise(1);
}
void NVMatrix::addGaussianNoise(float stdev) {
addGaussianNoise(stdev, *this);
}
void NVMatrix::addGaussianNoise(float stdev, NVMatrix& target) {
_unaryRandomize(target, AddGaussianUnaryRandomizer(stdev));
}
void NVMatrix::addGaussianNoise(NVMatrix& stdevs, bool var) {
addGaussianNoise(stdevs, var, *this);
}
void NVMatrix::addGaussianNoise(NVMatrix& stdevs) {
addGaussianNoise(stdevs, false, *this);
}
void NVMatrix::addGaussianNoise(NVMatrix& stdevs, bool var, NVMatrix& target) {
if (var) {
_binaryRandomize(stdevs, target, AddGaussianBinaryRandomizer<true>());
} else {
_binaryRandomize(stdevs, target, AddGaussianBinaryRandomizer<false>());
}
}
void NVMatrix::biggerThan(NVMatrix& b, NVMatrix& target) {
applyBinary(NVMatrixBinaryOps::BiggerThan(), b, target);
}
void NVMatrix::biggerThan(NVMatrix& b) {
biggerThan(b, *this);
}
void NVMatrix::equals(NVMatrix& b, NVMatrix& target) {
applyBinary(NVMatrixBinaryOps::Equals(), b, target);
}
void NVMatrix::equals(NVMatrix& m) {
equals(m, *this);
}
void NVMatrix::biggerThanVector(NVMatrix& vec, NVMatrix& target) {
applyBinaryV(NVMatrixBinaryOps::BiggerThan(), vec, target);
}
void NVMatrix::biggerThanVector(NVMatrix& vec) {
biggerThanVector(vec, *this);
}
void NVMatrix::_checkBounds(int startRow, int endRow, int startCol, int endCol) const {
assert(startRow >= 0 && startRow <= _numRows);
assert(endRow >= startRow && endRow <= _numRows);
assert(startCol >= 0 && startCol <= _numCols);
assert(endCol >= startCol && endCol <= _numCols);
}
/*
* The only place where stride is supported for now!
* Will ALWAYS return a view of the original data, sometimes non-contiguous.
*/
NVMatrix& NVMatrix::slice(int startRow, int endRow, int startCol, int endCol) const {
endRow = endRow < 0 ? this->_numRows : endRow;
endCol = endCol < 0 ? this->_numCols : endCol;
_checkBounds(startRow, endRow, startCol, endCol);
if (!isTrans()) {
return construct(new MemorySegment(this->getDevData() + startRow * _stride + startCol), endRow - startRow, endCol - startCol, _stride, false);
}
return construct(new MemorySegment(this->getDevData() + startCol * _stride + startRow), endRow - startRow, endCol - startCol, _stride, true);
}
/* this will NEVER return a view */
void NVMatrix::slice(int startRow, int endRow, int startCol, int endCol, NVMatrix& target) const {
endRow = endRow < 0 ? this->_numRows : endRow;
endCol = endCol < 0 ? this->_numCols : endCol;
_checkBounds(startRow, endRow, startCol, endCol);
int sliceRows = endRow - startRow, sliceCols = endCol - startCol;
if (target.getNumRows() != sliceRows || target.getNumCols() != sliceCols) {
target.resize(sliceRows, sliceCols);
}
this->copy(target, startRow, endRow, startCol, endCol, 0, 0);
}
NVMatrix& NVMatrix::sliceRows(int startRow, int endRow) const {
return slice(startRow, endRow, 0, -1);
}
void NVMatrix::sliceRows(int startRow, int endRow, NVMatrix& target) const {
slice(startRow, endRow, 0, -1, target);
}
NVMatrix& NVMatrix::sliceCols(int startCol, int endCol) const {
return slice(0, -1, startCol, endCol);
}
void NVMatrix::sliceCols(int startCol, int endCol, NVMatrix& target) const {
slice(0, -1, startCol, endCol, target);
}
NVMatrixV& NVMatrix::splitRows(int numParts) {
assert(getNumRows() % numParts == 0);
NVMatrixV& v = *new NVMatrixV();
int partSize = getNumRows() / numParts;
for (int p = 0; p < numParts; ++p) {
v.push_back(&sliceRows(p * partSize, (p+1) * partSize));
}
return v;
}
NVMatrixV& NVMatrix::splitCols(int numParts) {
assert(getNumCols() % numParts == 0);
NVMatrixV& v = *new NVMatrixV();
int partSize = getNumCols() / numParts;
for (int p = 0; p < numParts; ++p) {
v.push_back(&sliceCols(p * partSize, (p+1) * partSize));
}
return v;
}
/*
* Guaranteed to not change the data if the number of elements doesn't change.
* So you can use this to "reshape" a matrix.
*/
bool NVMatrix::resize(int numRows, int numCols, bool trans) {
setTrans(trans);
bool reallocated = false;
if (numRows != _numRows || numCols != _numCols) {
assert(_ownsData || (_numElements == numRows * numCols && isContiguous()));
if (_numElements != numRows * numCols) {
if (_numElements > 0) { // free old memory
dealloc();
}
if (numRows * numCols > 0) { // allocate new memory
alloc(numCols * numRows);
} else {
_memSegment = NULL;
}
reallocated = true;
}
_numRows = numRows;
_numCols = numCols;
_numElements = numRows * numCols;
_stride = getLeadingDim();
}
return reallocated;
}
bool NVMatrix::resize(int numRows, int numCols) {
return resize(numRows, numCols, isTrans());
}
bool NVMatrix::resize(const NVMatrix& like) {
setTrans(like.isTrans());
return resize(like.getNumRows(), like.getNumCols());
}
bool NVMatrix::resize(const Matrix& like) {
setTrans(like.isTrans());
return resize(like.getNumRows(), like.getNumCols());
}
void NVMatrix::reshape(int numRows, int numCols) {
assert(isContiguous());
assert(_numElements == numRows*numCols);
_numRows = numRows;
_numCols = numCols;
_stride = getLeadingDim();
}
NVMatrix& NVMatrix::reshaped(int numRows, int numCols) const {
assert(isContiguous());
assert(_numElements == numRows*numCols);
return construct(new MemorySegment(*_memSegment), numRows, numCols, -1, _isTrans);
}
void NVMatrix::copy(NVMatrix &dest, int srcStartRow, int srcEndRow,
int srcStartCol, int srcEndCol,
int destStartRow, int destStartCol) const {
copy(dest, srcStartRow, srcEndRow, srcStartCol, srcEndCol, destStartRow, destStartCol, getDefaultStream());
}
void NVMatrix::copy(NVMatrix &dest, int srcStartRow, int srcEndRow,
int srcStartCol, int srcEndCol,
int destStartRow, int destStartCol, cudaStream_t stream) const {
srcEndRow = srcEndRow < 0 ? _numRows : srcEndRow;
srcEndCol = srcEndCol < 0 ? _numCols : srcEndCol;
NVMatrix* srcSlice = &slice(srcStartRow, srcEndRow, srcStartCol, srcEndCol);
NVMatrix* destSlice = &dest.slice(destStartRow, destStartRow + srcEndRow - srcStartRow, destStartCol, destStartCol + srcEndCol - srcStartCol);
if (srcSlice->isContiguous() && destSlice->isContiguous() && srcSlice->isSameDims(*destSlice) && srcSlice->isTrans() == destSlice->isTrans()) {
// The commonest case.
checkCudaErrors(cudaMemcpyAsync(destSlice->getDevData(), srcSlice->getDevData(), srcSlice->getNumDataBytes(), cudaMemcpyDefault, stream));
} else {
srcSlice->apply(NVMatrixOps::Identity(), *destSlice, stream);
}
delete srcSlice;
delete destSlice;
}
NVMatrix& NVMatrix::getTranspose() {
return construct(new MemorySegment(*_memSegment), _numCols, _numRows, _stride, !_isTrans);
}
NVMatrix& NVMatrix::getClone() {
return construct(new MemorySegment(*_memSegment), _numRows, _numCols, _stride, _isTrans);
}
void NVMatrix::transpose(NVMatrix& target) {
flipTrans(target);
target.setTrans(!target.isTrans());
target.reshape(target.getNumCols(), target.getNumRows());
}
void NVMatrix::transpose() {
int tmp = _numCols;
_numCols = _numRows;
_numRows = tmp;
_isTrans = !_isTrans;
}
bool NVMatrix::transpose(bool trans) {
bool oldTrans = _isTrans;
if (oldTrans != trans) {
transpose();
}
return oldTrans;
}
/*
* Flips the ordering of the matrix from row-major to column-major and vice versa.
* This creates temporary storage -- not a cheap operation.
*
* This is not equivalent to a "hard transpose". The resultant matrix still has
* the same dimensions, its layout in memory just changes.
*/
NVMatrix& NVMatrix::flipTrans() {
NVMatrix& meTrans = construct(*this);
flipTrans(meTrans);
return meTrans;
}
void NVMatrix::flipTrans(NVMatrix& target) {
flipTrans(target, getDefaultStream());
}
void NVMatrix::flipTrans(NVMatrix& target, cudaStream_t stream) {
assert(&target != this);
target.resize(_numRows, _numCols);
target.setTrans(!isTrans());
// target.printShape("target");
// this->printShape("this");
apply(NVMatrixOps::Identity(), target, stream);
}
void NVMatrix::squaredDiff(NVMatrix& b) {
squaredDiff(b, *this);
}
void NVMatrix::squaredDiff(NVMatrix& b, NVMatrix& target) {
applyBinary(NVMatrixBinaryOps::SquaredDiff(), b, target);
}
void NVMatrix::add(NVMatrix& b, float scaleA, float scaleB, NVMatrix& target) {
add(b, scaleA, scaleB, target, NVMatrix::getDefaultStream());
}
void NVMatrix::add(NVMatrix& b, float scaleA, float scaleB, NVMatrix& target, cudaStream_t stream) {
if (scaleA == 0) {
b.scale(scaleB, target, stream);
} else if (scaleB == 0) {
scale(scaleA, target, stream);
} else if (scaleA == 1 && scaleB == 1) { // slight optimization
applyBinary(NVMatrixBinaryOps::Add(), b, target, stream);
} else if (scaleA == 1) {
applyBinary(NVMatrixBinaryOps::WeightedAdd1(scaleB), b, target, stream);
} else {
applyBinary(NVMatrixBinaryOps::WeightedAdd(scaleA, scaleB), b, target, stream);
}
}
void NVMatrix::add(NVMatrix& b, float scaleB, NVMatrix& target) {
add(b, 1, scaleB, target);
}
void NVMatrix::add(NVMatrix& b, NVMatrix& target) {
add(b, 1, target);
}
void NVMatrix::add(NVMatrix& b, float scaleB) {
add(b, scaleB, *this);
}
void NVMatrix::add(NVMatrix& b, float scaleA, float scaleB) {
add(b, scaleA, scaleB, *this);
}
void NVMatrix::add(NVMatrix& b) {
add(b, 1, *this);
}
void NVMatrix::subtract(NVMatrix& b, NVMatrix& target) {
add(b, -1, target);
}
void NVMatrix::subtract(NVMatrix& b) {
add(b, -1);
}
void NVMatrix::eltwiseMult(NVMatrix& b, NVMatrix& target) {
applyBinary(NVMatrixBinaryOps::Multiply(), b, target);
}
void NVMatrix::eltwiseMult(NVMatrix& b) {
eltwiseMult(b, *this);
}
void NVMatrix::eltwiseDivide(NVMatrix& b, NVMatrix& target) {
applyBinary(NVMatrixBinaryOps::Divide(), b, target);
}
void NVMatrix::eltwiseDivide(NVMatrix& b) {
eltwiseDivide(b, *this);
}
void NVMatrix::tile(int timesY, int timesX, NVMatrix& target) {
tile(timesY, timesX, target, getDefaultStream());
}
void NVMatrix::tile(int timesY, int timesX, NVMatrix& target, cudaStream_t stream) {
assert(isContiguous() && target.isContiguous());
assert(timesX > 0 && timesY > 0);
target.resize(_numRows*timesY, _numCols*timesX);
target.setTrans(_isTrans);
if(!isTrans()) {
kTile<<<NUM_TILE_BLOCKS,NUM_TILE_THREADS_PER_BLOCK, 0, stream>>>(getDevData(), target.getDevData(), _numCols, _numRows, target._numCols, target._numRows);
} else {
kTile<<<NUM_TILE_BLOCKS,NUM_TILE_THREADS_PER_BLOCK, 0, stream>>>(getDevData(), target.getDevData(), _numRows, _numCols, target._numRows, target._numCols);
}
getLastCudaError("Kernel execution failed");
}
void NVMatrix::addVector(NVMatrix& vec, float scaleVec, NVMatrix& target) {
addVector(vec, scaleVec, target, getDefaultStream());
}
void NVMatrix::addVector(NVMatrix& vec, float scaleVec, NVMatrix& target, cudaStream_t stream) {
applyBinaryV(NVMatrixBinaryOps::ScaledAdd(scaleVec), vec, target, stream);
}
void NVMatrix::addVector(NVMatrix& vec) {
addVector(vec, 1);
}
void NVMatrix::addVector(NVMatrix& vec, float scaleVec) {
addVector(vec, scaleVec, *this);
}
void NVMatrix::addVector(NVMatrix& vec, NVMatrix& target) {
addVector(vec, 1, target);
}
void NVMatrix::equalsVector(NVMatrix& vec, NVMatrix& target) {
applyBinaryV(NVMatrixBinaryOps::Equals(), vec, target);
}
void NVMatrix::equalsVector(NVMatrix& vec) {
equalsVector(vec, *this);
}
void NVMatrix::eltwiseMultByVector(NVMatrix& vec, NVMatrix& target) {
eltwiseMultByVector(vec, target, getDefaultStream());
}
void NVMatrix::eltwiseMultByVector(NVMatrix& vec, NVMatrix& target, cudaStream_t stream) {
applyBinaryV(NVMatrixBinaryOps::Multiply(), vec, target, stream);
}
void NVMatrix::eltwiseMultByVector(NVMatrix& vec, cudaStream_t stream) {
eltwiseMultByVector(vec, *this, stream);
}
void NVMatrix::eltwiseMultByVector(NVMatrix& vec) {
eltwiseMultByVector(vec, *this);
}
void NVMatrix::eltwiseDivideByVector(NVMatrix& vec) {
eltwiseDivideByVector(vec, *this);
}
void NVMatrix::eltwiseDivideByVector(NVMatrix& vec, NVMatrix& target) {
applyBinaryV(NVMatrixBinaryOps::Divide(), vec, target);
}
/*
* TODO: this is a mess, fix it. it works pretty fast but it's too ugly.
* TODO: this function is _really_ bad for very long aggregations of few columns.
*/
template<class Agg, class UnaryOp, class BinaryOp>
void NVMatrix::_aggregate(int axis, NVMatrix& target, Agg agg, UnaryOp uop, BinaryOp bop, cudaStream_t stream) {
assert(axis == 0 || axis == 1);
assert(isContiguous() && target.isContiguous());
assert(&target != this);
int width = _isTrans ? _numRows : _numCols;
int height = _isTrans ? _numCols : _numRows;
target.setTrans(_isTrans);
assert(width > 0);
assert(height > 0);
if((axis == 0 && !_isTrans) || (axis == 1 && _isTrans)) { //col sum
target.resize(!_isTrans ? 1 : _numRows, !_isTrans ? _numCols : 1);
// int height = getFollowingDim();
if ((height <= 2048 || width >= 4096)) {
int numBlocks = DIVUP(width, NUM_SUM_COLS_THREADS_PER_BLOCK);
assert(numBlocks * NUM_SUM_COLS_THREADS_PER_BLOCK >= width);
assert(numBlocks < NUM_BLOCKS_MAX);
kDumbAggCols<Agg, UnaryOp, BinaryOp><<<numBlocks,NUM_SUM_COLS_THREADS_PER_BLOCK, 0, stream>>>(getTextureObject(), target.getDevData(), width, height, agg, uop, bop);
getLastCudaError("kDumbAggCols: Kernel execution failed");
} else { // Specialize the case when we have very long columns and few of them
const int sumLength = 128;
NVMatrix tmp(DIVUP(height, sumLength), width);
int numBlocksX = DIVUP(width, NUM_SUM_COLS_THREADS_PER_BLOCK);
int numBlocksY = DIVUP(height, sumLength);
dim3 blocks(numBlocksX, numBlocksY);
dim3 threads(NUM_SUM_COLS_THREADS_PER_BLOCK);
kAggCols<Agg, UnaryOp><<<blocks,threads, 0, stream>>>(getTextureObject(), tmp.getDevData(), width, height, sumLength, agg, uop);
getLastCudaError("kAggCols: Kernel execution failed");
int numBlocks = DIVUP(width, NUM_SUM_COLS_THREADS_PER_BLOCK);
kDumbAggCols<Agg, NVMatrixOps::Identity, BinaryOp><<<numBlocks,NUM_SUM_COLS_THREADS_PER_BLOCK, 0, stream>>>(tmp.getTextureObject(), target.getDevData(), width, height, agg, NVMatrixOps::Identity(), bop);
getLastCudaError("kDumbAggCols: Kernel execution failed");
}
} else { // row sum
target.resize(_isTrans ? 1 : _numRows, _isTrans ? _numCols : 1);
if (width > 1) {
if (height >= 16384) { // linear aggregation
int numBlocksX = 1;
int numBlocksY = DIVUP(height, AGG_SHORT_ROWS_THREADS_Y*AGG_SHORT_ROWS_LOOPS_Y);
int numThreadsX = width <= 4 ? 4 : width <= 8 ? 8 : width <= 12 ? 12 : width <= 16 ? 16 : AGG_SHORT_ROWS_THREADS_X;
int numThreadsY = AGG_SHORT_ROWS_THREADS_Y;
while (numBlocksY > NUM_BLOCKS_MAX) {
numBlocksY = DIVUP(numBlocksY,2);
numBlocksX *= 2;
}
dim3 grid(numBlocksX, numBlocksY), threads(numThreadsX, numThreadsY);
if(width <= 16) {
if(width <= 4) {
kAggShortRows<Agg, UnaryOp, BinaryOp, 1, 4><<<grid, threads, 0, stream>>>(getDevData(), target.getDevData(),width, height, agg, uop, bop);
} else if(width <= 8) {
kAggShortRows<Agg, UnaryOp, BinaryOp, 1, 8><<<grid, threads, 0, stream>>>(getDevData(), target.getDevData(),width, height, agg, uop, bop);
} else if(width <= 12) {
kAggShortRows<Agg, UnaryOp, BinaryOp, 1, 12><<<grid, threads, 0, stream>>>(getDevData(), target.getDevData(),width, height, agg, uop, bop);
} else {
kAggShortRows<Agg, UnaryOp, BinaryOp, 1, 16><<<grid, threads, 0, stream>>>(getDevData(), target.getDevData(),width, height, agg, uop, bop);
}
} else if(width <= 32) {
kAggShortRows<Agg, UnaryOp, BinaryOp, 2, AGG_SHORT_ROWS_THREADS_X><<<grid, threads, 0, stream>>>(getDevData(), target.getDevData(),width, height, agg, uop, bop);
} else if(width <= 48){
kAggShortRows<Agg, UnaryOp, BinaryOp, 3, AGG_SHORT_ROWS_THREADS_X><<<grid, threads, 0, stream>>>(getDevData(), target.getDevData(),width, height, agg, uop, bop);
} else if(width <= 64){
kAggShortRows<Agg, UnaryOp, BinaryOp, 4, AGG_SHORT_ROWS_THREADS_X><<<grid, threads, 0, stream>>>(getDevData(), target.getDevData(),width, height, agg, uop, bop);
} else {
kAggShortRows2<Agg, UnaryOp, BinaryOp><<<grid, threads, 0, stream>>>(getDevData(), target.getDevData(),width, height, agg, uop, bop);
}
} else {
if (width >= 512) {
// NOTE: this is the only case which I bothered to try to optimize for Kepler
dim3 threads(AWR_NUM_THREADS);
dim3 blocks(1, height);
kAggRows_wholerow_nosync<<<blocks, threads, 0, stream>>>(getDevData(), target.getDevData(), width, height, agg, uop, bop);
} else {
int numThreadsX = width <= 64 ? 32 : (width <= 128 ? 64 : (width <= 256 ? 128 : (width <= 512 ? 256 : 512)));
int numThreadsY = 1;
int numBlocksX = DIVUP(width, 2*numThreadsX);
int numBlocksY = std::min(height, NUM_BLOCKS_MAX);
dim3 grid(numBlocksX, numBlocksY), threads(numThreadsX, numThreadsY);
assert(numBlocksX <= NUM_BLOCKS_MAX);
assert(numBlocksY <= NUM_BLOCKS_MAX);
if(width <= 64) {
kAggRows<Agg, UnaryOp, BinaryOp, 32><<<grid, threads, 0, stream>>>(getDevData(), target.getDevData(),
width, height, target.getLeadingDim(), agg, uop, bop);
} else if(width <= 128) {
kAggRows<Agg, UnaryOp, BinaryOp, 64><<<grid, threads, 0, stream>>>(getDevData(), target.getDevData(),
width, height, target.getLeadingDim(), agg, uop, bop);
} else if(width <= 256) {
kAggRows<Agg, UnaryOp, BinaryOp, 128><<<grid, threads, 0, stream>>>(getDevData(), target.getDevData(),
width, height, target.getLeadingDim(), agg, uop, bop);
} else if(width <= 512) {
kAggRows<Agg, UnaryOp, BinaryOp, 256><<<grid, threads, 0, stream>>>(getDevData(), target.getDevData(),
width, height, target.getLeadingDim(), agg, uop, bop);
} else {
kAggRows<Agg, UnaryOp, BinaryOp, 512><<<grid, threads, 0, stream>>>(getDevData(), target.getDevData(),
width, height, target.getLeadingDim(), agg, uop, bop);
}
getLastCudaError("agg rows: Kernel execution failed");
}
}
} else {
target.applyBinary(NVMatrixBinaryOps::CompositeSecond<UnaryOp, BinaryOp>(uop, bop), *this, target, stream);
// copy(target, stream);
}
}
}
template<class Agg, class UnaryOp, class BinaryOp>
void NVMatrix::_aggregate(int axis, NVMatrix& target, Agg agg, UnaryOp uop, BinaryOp bop) {
_aggregate(axis, target, agg, uop, bop, getDefaultStream());
}
template<class Agg, class BinaryOp>
void NVMatrix::_aggregate(int axis, NVMatrix& target, Agg agg, BinaryOp bop) {
_aggregate(axis, target, agg, NVMatrixOps::Identity(), bop, getDefaultStream());
}
template<class Agg, class BinaryOp>
void NVMatrix::_aggregate(int axis, NVMatrix& target, Agg agg, BinaryOp bop, cudaStream_t stream) {
_aggregate(axis, target, agg, NVMatrixOps::Identity(), bop, stream);
}
template<class Agg, class UnaryOp, class BinaryOp>
NVMatrix& NVMatrix::_aggregate(int axis, Agg agg, UnaryOp uop, BinaryOp bop) {
NVMatrix &sumVec = construct();
_aggregate(axis, sumVec, agg, uop, bop);
return sumVec;
}
template<class Agg, class UnaryOp, class BinaryOp>
NVMatrix& NVMatrix::_aggregate(int axis, Agg agg, UnaryOp uop, BinaryOp bop, cudaStream_t stream) {
NVMatrix &sumVec = construct();
_aggregate(axis, sumVec, agg, uop, bop, stream);
return sumVec;
}
template<class Agg, class BinaryOp>
NVMatrix& NVMatrix::_aggregate(int axis, Agg agg, BinaryOp bop) {
return _aggregate(axis, agg, NVMatrixOps::Identity(), bop);
}
template<class Agg, class BinaryOp>
NVMatrix& NVMatrix::_aggregate(int axis, Agg agg, BinaryOp bop, cudaStream_t stream) {
return _aggregate(axis, agg, NVMatrixOps::Identity(), bop, stream);
}
void NVMatrix::inRangeInc(float lower, float upper) {
inRangeInc(lower, upper, *this);
}
void NVMatrix::inRangeInc(float lower, float upper, NVMatrix& target) {
apply(NVMatrixOps::InRange<false>(lower, upper), target);
}
void NVMatrix::inRangeExc(float lower, float upper) {
inRangeExc(lower, upper, *this);
}
void NVMatrix::inRangeExc(float lower, float upper, NVMatrix& target) {
apply(NVMatrixOps::InRange<true>(lower, upper), target);
}
void NVMatrix::biggerThanScalar(float scalar) {
biggerThanScalar(scalar, *this);
}
void NVMatrix::biggerThanScalar(float scalar, NVMatrix& target) {
apply(NVMatrixOps::BiggerThanScalar(scalar), target);
}
void NVMatrix::smallerThanScalar(float scalar) {
smallerThanScalar(scalar, *this);
}
void NVMatrix::smallerThanScalar(float scalar, NVMatrix& target) {
apply(NVMatrixOps::SmallerThanScalar(scalar), target);
}
void NVMatrix::addScalar(float scaleThis, float scalar, NVMatrix& target) {
apply(NVMatrixOps::WeightedAddScalar(scaleThis, scalar), target);
}
void NVMatrix::addScalar(float scalar, NVMatrix& target) {
apply(NVMatrixOps::AddScalar(scalar), target);
}
void NVMatrix::addScalar(float scalar) {
addScalar(scalar, *this);
}
void NVMatrix::minWithScalar(float scalar, NVMatrix& target) {
apply(NVMatrixOps::MinWithScalar(scalar), target);
}
void NVMatrix::minWithScalar(float scalar) {
minWithScalar(scalar, *this);
}
void NVMatrix::maxWithScalar(float scalar, NVMatrix& target) {
apply(NVMatrixOps::MaxWithScalar(scalar), target);
}
void NVMatrix::maxWithScalar(float scalar) {
maxWithScalar(scalar, *this);
}
void NVMatrix::pow(float p, NVMatrix& target) {
apply(NVMatrixOps::Pow(p), target);
}
void NVMatrix::pow(float p) {
pow(p, *this);
}
void NVMatrix::scale(float _scale) {
scale(_scale, *this);
}
void NVMatrix::scale(float _scale, cudaStream_t stream) {
scale(_scale, *this, stream);
}
void NVMatrix::scale(float _scale, NVMatrix& target) {
scale(_scale, target, NVMatrix::getDefaultStream());
}
void NVMatrix::scale(float _scale, NVMatrix& target, cudaStream_t stream) {
if (_scale != 1 || &target != this) { // optimize away scale by 1
if (_scale == 1) {
copy(target, stream);
} else {
apply(NVMatrixOps::MultByScalar(_scale), target, stream);
}
}
}
void NVMatrix::zero() {
apply(NVMatrixOps::Zero());
}
void NVMatrix::zero(NVMatrix& like) {
resize(like);
zero();
}
void NVMatrix::max(int axis, NVMatrix& target) {
_aggregate(axis, target, NVMatrixAggs::Max(), NVMatrixBinaryOps::Second());
}
void NVMatrix::addSum(NVMatrix& a, int axis, float scaleThis, float scaleSum) {
addSum(a, axis, scaleThis, scaleSum, getDefaultStream());
}
void NVMatrix::addSum(NVMatrix& a, int axis, float scaleThis, float scaleSum, cudaStream_t stream) {
if (scaleThis != 0) {
a._aggregate(axis, *this, NVMatrixAggs::Sum(), NVMatrixBinaryOps::WeightedAdd(scaleThis, scaleSum), stream);
} else {
a._aggregate(axis, *this, NVMatrixAggs::Sum(), NVMatrixBinaryOps::SecondScaled(scaleSum), stream);
}
}
void NVMatrix::sum(int axis, NVMatrix& target) {
sum(axis, target, getDefaultStream());
}
void NVMatrix::sum(int axis, NVMatrix& target, cudaStream_t stream) {
_aggregate(axis, target, NVMatrixAggs::Sum(), NVMatrixBinaryOps::Second(), stream);
}
void NVMatrix::sumOfSquares(int axis, NVMatrix& target) {
sumOfSquares(axis, target, getDefaultStream());
}
void NVMatrix::sumOfSquares(int axis, NVMatrix& target, cudaStream_t stream) {
_aggregate(axis, target, NVMatrixAggs::Sum(), NVMatrixOps::Square(), NVMatrixBinaryOps::Second(), stream);
}
void NVMatrix::min(int axis, NVMatrix& target) {
_aggregate(axis, target, NVMatrixAggs::Min(), NVMatrixBinaryOps::Second());
}
NVMatrix& NVMatrix::max(int axis) {
return _aggregate(axis, NVMatrixAggs::Max(), NVMatrixBinaryOps::Second());
}
NVMatrix& NVMatrix::sum(int axis) {
return _aggregate(axis, NVMatrixAggs::Sum(), NVMatrixBinaryOps::Second());
}
NVMatrix& NVMatrix::min(int axis) {
return _aggregate(axis, NVMatrixAggs::Min(), NVMatrixBinaryOps::Second());
}
NVMatrix& NVMatrix::sumOfSquares(int axis) {
return _aggregate(axis, NVMatrixAggs::Sum(), NVMatrixOps::Square(), NVMatrixBinaryOps::Second());
}
void NVMatrix::_sum_setParams(int n, dim3* blocks, dim3* threads) {
*threads = dim3(DP_BLOCKSIZE);
*blocks = dim3(std::min(CPUSUM_MAX, DIVUP(n, DP_BLOCKSIZE)));
}
float NVMatrix::mean() {
return sum() / getNumElements();
}
float NVMatrix::sum() {
return _totalAgg(NVMatrixAggs::Sum());
}
float NVMatrix::sum(NVMatrix& tmpbuf) {
return _totalAgg(NVMatrixAggs::Sum(), tmpbuf, getDefaultStream());
}
float NVMatrix::max() {
return _totalAgg(NVMatrixAggs::Max());
}
float NVMatrix::min() {
return _totalAgg(NVMatrixAggs::Min());
}
float NVMatrix::countNan() {
return _totalAgg(NVMatrixAggs::CountNan());
}
float NVMatrix::countInf() {
return _totalAgg(NVMatrixAggs::CountInf());
}
template<class Agg>
float NVMatrix::_totalAgg(Agg agg) {
return _totalAgg(agg, getDefaultStream());
}
template<class Agg>
float NVMatrix::_totalAgg(Agg agg, cudaStream_t stream) {
NVMatrix tmp;
return _totalAgg(agg, tmp, stream);
}
template<class Agg>
float NVMatrix::_totalAgg(Agg agg, NVMatrix& tmpbuf, cudaStream_t stream) {
assert(isContiguous());
dim3 blocks, threads;
// Sum most of it on GPU
_sum_setParams(getNumElements(), &blocks, &threads);
tmpbuf.resize(1, blocks.x);
kTotalAgg<<<blocks, threads, 0, stream>>>(getDevData(), tmpbuf.getDevData(), getNumElements(), agg);
getLastCudaError("kTotalAgg: Kernel execution failed");
// Don't need to sync because we copyToHost in the same stream, so it's serialized
// NVMatrix::syncStream(stream);
return tmpbuf.cpuAgg(agg, stream);
}
template<class Agg>
float NVMatrix::cpuAgg(Agg agg, cudaStream_t stream) {
Matrix bufCPU(getNumRows(), getNumCols());
copyToHost(bufCPU, false, stream);
if (getNumElements() > 1) { // Sum remainder on CPU
if (typeid(Agg) == typeid(NVMatrixAggs::Sum)) {
return bufCPU.sum();
} else if (typeid(Agg) == typeid(NVMatrixAggs::Max)) {
return bufCPU.max();
} else if (typeid(Agg) == typeid(NVMatrixAggs::Min)) {
return bufCPU.min();
} else if (typeid(Agg) == typeid(NVMatrixAggs::CountNan)) {
return bufCPU.hasNan(); //yea, it's not the same, who cares
} else if (typeid(Agg) == typeid(NVMatrixAggs::CountInf)) {
return bufCPU.hasInf();
} else {
assert(false);
}
}
return bufCPU(0,0);
}
float NVMatrix::dotProduct(NVMatrix& b) {
return dotProduct(b, getDefaultStream());
}
float NVMatrix::dotProduct(NVMatrix& b, cudaStream_t stream) {
NVMatrix tmp;
return dotProduct(b, tmp, stream);
}
/*
* Fast dot product only for matrices with same transposedness.
*/
float NVMatrix::dotProduct(NVMatrix& b, NVMatrix& tmp, cudaStream_t stream) {
assert(isContiguous() && b.isContiguous());
assert(isSameDims(b));
assert(isTrans() == b.isTrans()); // see?
dim3 blocks, threads;
_sum_setParams(getNumElements(), &blocks, &threads);
// NVMatrix target(1, blocks.x);
tmp.resize(1, blocks.x);
kDotProduct_r<<<blocks, threads, 0, stream>>>(getDevData(), b.getDevData(), tmp.getDevData(), getNumElements());
getLastCudaError("kDotProduct_r: Kernel execution failed");
// cudaThreadSynchronize();
// syncStream(stream);
// return tmp._totalAgg(NVMatrixAggs::Sum(), stream);
return tmp.cpuAgg(NVMatrixAggs::Sum(), stream);
}
float NVMatrix::norm2() {
return dotProduct(*this);
}
float NVMatrix::norm() {
return sqrt(norm2());
}
void NVMatrix::print(int startRow, int rows, int startCol, int cols) const {
// cudaThreadSynchronize();
syncDevice();
Matrix hm = Matrix(_numRows, _numCols);
copyToHost(hm);
hm.print(startRow, rows, startCol, cols);
}
void NVMatrix::print(int rows, int cols) const {
print(0, rows, 0, cols);
}
void NVMatrix::printShape(const char* name) const {
printf("%s: %dx%d\n", name, _numRows, _numCols);
}
void NVMatrix::alloc(int numElements) {
_memSegment = DEVICE_MEMORY_MANAGER::getInstance(getDeviceID()).malloc(numElements * sizeof(float));
}
void NVMatrix::dealloc() {
DEVICE_MEMORY_MANAGER::getInstance(_memSegment->getDeviceID()).free(_memSegment);
_memSegment = NULL;
deallocTexture();
}
void NVMatrix::deallocTexture() {
if (_texObj != 0) {
checkCudaErrors(cudaDestroyTextureObject(_texObj));
_texObj = 0;
}
}
cudaTextureObject_t NVMatrix::getTextureObject() {
if (_texObj == 0) {
assert(isContiguous());
//size_t memFree, memTotal;
struct cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeLinear;
resDesc.res.linear.devPtr = getDevData();
resDesc.res.linear.sizeInBytes = getNumDataBytes();
resDesc.res.linear.desc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
struct cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
checkCudaErrors(cudaCreateTextureObject(&_texObj, &resDesc, &texDesc, NULL));
}
assert(_texObj != 0);
return _texObj;
}
NVMatrix& NVMatrix::construct() const {
return *new NVMatrix();
}
NVMatrix& NVMatrix::construct(bool isTrans) const {
return *new NVMatrix(isTrans);
}
NVMatrix& NVMatrix::construct(int numRows, int numCols, bool isTrans) const {
return *new NVMatrix(numRows, numCols, isTrans);
}
NVMatrix& NVMatrix::construct(const Matrix& like, bool copy) const {
return *new NVMatrix(like, copy);
}
NVMatrix& NVMatrix::construct(const NVMatrix& like, bool copy) const {
return *new NVMatrix(like, copy);
}
NVMatrix& NVMatrix::construct(const NVMatrix& like) const {
return *new NVMatrix(like);
}
NVMatrix& NVMatrix::construct(const Matrix& like) const {
return *new NVMatrix(like);
}
NVMatrix& NVMatrix::construct(MemorySegment* mem, int numRows, int numCols, int stride, bool isTrans) const {
return *new NVMatrix(mem, numRows, numCols, stride, isTrans);
}
/* ================
* HostNVMatrix
* ================
*/
HostNVMatrix::~HostNVMatrix() {
if (_ownsData && _numElements > 0) {
dealloc();
} else {
// dealloc frees the mem segment. But if this is a view,
// then we need to delete the mem segment object.
// assert(_memSegment == NULL || _memSegment->getSize() == 0);
delete _memSegment;
}
_deleted = true;
}
HostNVMatrix::HostNVMatrix() : NVMatrix() {
_init(false);
}
HostNVMatrix::HostNVMatrix(bool isTrans) {
_init(isTrans);
}
HostNVMatrix::HostNVMatrix(int numRows, int numCols, bool isTrans) {
_init(isTrans);
resize(numRows, numCols);
}
HostNVMatrix::HostNVMatrix(const Matrix& like, bool copy) {
_init(like.isTrans());
resize(like.getNumRows(), like.getNumCols());
if (copy) {
copyFromHost(like);
}
}
HostNVMatrix::HostNVMatrix(const NVMatrix& like, bool copy) {
_init(like.isTrans());
resize(like.getNumRows(), like.getNumCols());
if (copy) {
like.copy(*this);
}
}
HostNVMatrix::HostNVMatrix(const NVMatrix& like) {
_init(like.isTrans());
resize(like.getNumRows(), like.getNumCols());
}
HostNVMatrix::HostNVMatrix(const Matrix& like) {
_init(false);
resize(like.getNumRows(), like.getNumCols());
}
HostNVMatrix::HostNVMatrix(MemorySegment* mem, int numRows, int numCols, int stride, bool isTrans)
: NVMatrix(mem, numRows, numCols, stride, isTrans) {
}
NVMatrix& HostNVMatrix::construct() const {
return *new HostNVMatrix();
}
NVMatrix& HostNVMatrix::construct(bool isTrans) const {
return *new HostNVMatrix(isTrans);
}
NVMatrix& HostNVMatrix::construct(int numRows, int numCols, bool isTrans) const {
return *new HostNVMatrix(numRows, numCols, isTrans);
}
NVMatrix& HostNVMatrix::construct(const Matrix& like, bool copy) const {
return *new HostNVMatrix(like, copy);
}
NVMatrix& HostNVMatrix::construct(const NVMatrix& like, bool copy) const {
return *new HostNVMatrix(like, copy);
}
NVMatrix& HostNVMatrix::construct(const NVMatrix& like) const {
return *new HostNVMatrix(like);
}
NVMatrix& HostNVMatrix::construct(const Matrix& like) const {
return *new HostNVMatrix(like);
}
NVMatrix& HostNVMatrix::construct(MemorySegment* mem, int numRows, int numCols, int stride, bool isTrans) const {
return *new HostNVMatrix(mem, numRows, numCols, stride, isTrans);
}
void HostNVMatrix::copyFromHost(const Matrix& hostMatrix, bool resizeTarget, cudaStream_t stream) {
if (resizeTarget) {
resize(hostMatrix);
} else {
assert(isSameDims(hostMatrix));
}
setTrans(hostMatrix.isTrans());
if (getNumElements() > 0) {
checkCudaErrors(cudaMemcpy2D(getDevData(), _stride * sizeof(float), hostMatrix.getData(),
hostMatrix.getLeadingDim() * sizeof(float), getLeadingDim() * sizeof(float),
getFollowingDim(), cudaMemcpyHostToHost));
// syncStream(stream);
}
}
void HostNVMatrix::copyFromHost(const Matrix& hostMatrix, bool resizeTarget) {
copyFromHost(hostMatrix, resizeTarget, 0);
}
void HostNVMatrix::copyFromHost(const Matrix& hostMatrix) {
copyFromHost(hostMatrix, false, 0);
}
void HostNVMatrix::copyToHost(Matrix& hostMatrix, bool resizeTarget, cudaStream_t stream) const {
if (resizeTarget) {
hostMatrix.resize(getNumRows(), getNumCols());
} else {
assert(isSameDims(hostMatrix));
}
hostMatrix.setTrans(_isTrans);
if (getNumElements() > 0) {
checkCudaErrors(cudaMemcpy2D(hostMatrix.getData(), hostMatrix.getLeadingDim() * sizeof(float),
getDevData(), _stride * sizeof(float), getLeadingDim() * sizeof(float),
getFollowingDim(), cudaMemcpyHostToHost));
// syncStream(stream);
}
}
void HostNVMatrix::copyToHost(Matrix& hostMatrix, bool resizeTarget) const {
copyToHost(hostMatrix, resizeTarget, 0);
}
void HostNVMatrix::copyToHost(Matrix& hostMatrix) const {
copyToHost(hostMatrix, false, 0);
}
void HostNVMatrix::alloc(int numElements) {
// checkCudaErrors(cudaHostAlloc(&_devData, numElements * sizeof(float), cudaHostAllocPortable));
_memSegment = HOST_MEMORY_MANAGER::getInstance().malloc(numElements * sizeof(float));
// _memSegment = FastHostMemoryManager::getInstance().malloc(numElements * sizeof(float));
}
void HostNVMatrix::dealloc() {
// FastHostMemoryManager::getInstance().free(_memSegment);
HOST_MEMORY_MANAGER::getInstance().free(_memSegment);
_memSegment = NULL;
// checkCudaErrors(cudaFreeHost(_devData));
}
cudaTextureObject_t HostNVMatrix::getTextureObject() {
assert(false);
return 0;
}
|
41cac4b88015bcb8bff8a7e3e08bd3a07ecb892e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void windowHann(float* idata, int length)
{
int tidx = threadIdx.x + blockIdx.x*blockDim.x;
if (tidx < length)
{
idata[tidx] = 0.5*(1 + cos(2*tidx*PI_F / (length - 1)));
}
} | 41cac4b88015bcb8bff8a7e3e08bd3a07ecb892e.cu | #include "includes.h"
__global__ void windowHann(float* idata, int length)
{
int tidx = threadIdx.x + blockIdx.x*blockDim.x;
if (tidx < length)
{
idata[tidx] = 0.5*(1 + cos(2*tidx*PI_F / (length - 1)));
}
} |
14b8028b79942f8819518534b49a27eea0b77098.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
__global__ void initialize(int * arr) {
arr[blockIdx.x] = 0;
}
int main(int argc, char ** argv) {
int * arr;
int * d_arr;
int n = 32;
int size = n * sizeof(int);
arr = (int *)malloc(size);
hipMalloc((void **) &d_arr, size);
hipMemcpy(d_arr, arr, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( initialize), dim3(n), dim3(1), 0, 0, d_arr);
hipMemcpy(arr, d_arr, size, hipMemcpyDeviceToHost);
for (int i = 0; i < n; i++) {
printf("%d ", arr[i]);
}
printf("\n");
}
| 14b8028b79942f8819518534b49a27eea0b77098.cu | #include <stdio.h>
#include <cuda.h>
__global__ void initialize(int * arr) {
arr[blockIdx.x] = 0;
}
int main(int argc, char ** argv) {
int * arr;
int * d_arr;
int n = 32;
int size = n * sizeof(int);
arr = (int *)malloc(size);
cudaMalloc((void **) &d_arr, size);
cudaMemcpy(d_arr, arr, size, cudaMemcpyHostToDevice);
initialize<<<n, 1>>>(d_arr);
cudaMemcpy(arr, d_arr, size, cudaMemcpyDeviceToHost);
for (int i = 0; i < n; i++) {
printf("%d ", arr[i]);
}
printf("\n");
}
|
c37a07e383ea09ee73f8b4067522124ce4b28350.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/sparse_to_dense_op.h"
#include "caffe2/core/common_gpu.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
template <typename TInd, typename TData>
__global__ void SparseToDenseKernel(
size_t N, TIndex block_nitems, const TInd* indices, const TData* vals, TData* dst) {
CUDA_1D_KERNEL_LOOP(i, N) {
int idx = indices[i / block_nitems];
int dst_idx = block_nitems * idx + i % block_nitems;
atomicAdd(&dst[dst_idx], vals[i]);
}
}
template <>
bool SparseToDenseOp<CUDAContext>::RunOnDevice() {
return DispatchHelper<TensorTypes<int32_t>>::call(
this, Input(INDICES));
}
template <>
template <typename TInd>
bool SparseToDenseOp<CUDAContext>::DoRunWithType() {
return DispatchHelper<
TensorTypes2<
float,
int32_t>,
TInd>::call(this, Input(VALUES));
}
template <>
template <typename TInd, typename TData>
bool SparseToDenseOp<CUDAContext>::DoRunWithType2() {
auto& sparse_indices = Input(INDICES);
CAFFE_ENFORCE_EQ(sparse_indices.ndim(), 1);
auto& sparse_values = Input(VALUES);
CAFFE_ENFORCE_GE(sparse_values.ndim(), 1);
CAFFE_ENFORCE_EQ(sparse_indices.size(), sparse_values.dim(0));
const TInd* sparse_indices_vec = sparse_indices.template data<TInd>();
const int32_t sparse_indices_len = sparse_indices.dim32(0);
const int output_first_dim =
GetOutputFirstDim(sparse_indices_vec, sparse_indices_len);
auto shape = sparse_values.dims();
shape[0] = output_first_dim;
auto* output = Output(0);
output->Resize(shape);
TData* output_data = output->template mutable_data<TData>();
math::Set<TData>(output->size(), TData(0), output_data, &context_);
const auto block_nitems = sparse_values.size_from_dim(1);
const TData* sparse_values_vec = sparse_values.template data<TData>();
size_t N = block_nitems * sparse_indices_len;
CAFFE_ENFORCE_EQ(output->size(), output_first_dim * block_nitems);
hipLaunchKernelGGL(( SparseToDenseKernel<TInd, TData>),
dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0,
context_.cuda_stream(),
N,
block_nitems,
sparse_indices_vec,
sparse_values_vec,
output_data
);
return true;
}
REGISTER_CUDA_OPERATOR(SparseToDense, SparseToDenseOp<CUDAContext>);
} // namespace caffe2
| c37a07e383ea09ee73f8b4067522124ce4b28350.cu | #include "caffe2/operators/sparse_to_dense_op.h"
#include "caffe2/core/common_gpu.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
template <typename TInd, typename TData>
__global__ void SparseToDenseKernel(
size_t N, TIndex block_nitems, const TInd* indices, const TData* vals, TData* dst) {
CUDA_1D_KERNEL_LOOP(i, N) {
int idx = indices[i / block_nitems];
int dst_idx = block_nitems * idx + i % block_nitems;
atomicAdd(&dst[dst_idx], vals[i]);
}
}
template <>
bool SparseToDenseOp<CUDAContext>::RunOnDevice() {
return DispatchHelper<TensorTypes<int32_t>>::call(
this, Input(INDICES));
}
template <>
template <typename TInd>
bool SparseToDenseOp<CUDAContext>::DoRunWithType() {
return DispatchHelper<
TensorTypes2<
float,
int32_t>,
TInd>::call(this, Input(VALUES));
}
template <>
template <typename TInd, typename TData>
bool SparseToDenseOp<CUDAContext>::DoRunWithType2() {
auto& sparse_indices = Input(INDICES);
CAFFE_ENFORCE_EQ(sparse_indices.ndim(), 1);
auto& sparse_values = Input(VALUES);
CAFFE_ENFORCE_GE(sparse_values.ndim(), 1);
CAFFE_ENFORCE_EQ(sparse_indices.size(), sparse_values.dim(0));
const TInd* sparse_indices_vec = sparse_indices.template data<TInd>();
const int32_t sparse_indices_len = sparse_indices.dim32(0);
const int output_first_dim =
GetOutputFirstDim(sparse_indices_vec, sparse_indices_len);
auto shape = sparse_values.dims();
shape[0] = output_first_dim;
auto* output = Output(0);
output->Resize(shape);
TData* output_data = output->template mutable_data<TData>();
math::Set<TData>(output->size(), TData(0), output_data, &context_);
const auto block_nitems = sparse_values.size_from_dim(1);
const TData* sparse_values_vec = sparse_values.template data<TData>();
size_t N = block_nitems * sparse_indices_len;
CAFFE_ENFORCE_EQ(output->size(), output_first_dim * block_nitems);
SparseToDenseKernel<TInd, TData><<<
CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0,
context_.cuda_stream()>>>(
N,
block_nitems,
sparse_indices_vec,
sparse_values_vec,
output_data
);
return true;
}
REGISTER_CUDA_OPERATOR(SparseToDense, SparseToDenseOp<CUDAContext>);
} // namespace caffe2
|
4c82c289ecedfe7630a318ddc4b17ac52d99d25d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from sparse-iter/blas/zmergebicgstab2.cu normal z -> d, Tue Feb 9 16:05:42 2016
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
#define PRECISION_d
// These routines merge multiple kernels from dmergebicgstab into one
// This is the code used for the ASHES2014 paper
// "Accelerating Krylov Subspace Solvers on Graphics Processing Units".
// notice that only CSR format is supported so far.
// accelerated reduction for one vector
__global__ void
magma_dreduce_kernel_spmv1(
int Gs,
int n,
double * vtmp,
double * vtmp2 )
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
temp[Idx] = MAGMA_D_MAKE( 0.0, 0.0);
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
while (i < Gs ) {
temp[ Idx ] += vtmp[ i ];
temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ]
: MAGMA_D_MAKE( 0.0, 0.0);
i += gridSize;
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
__global__ void
magma_dbicgmerge_spmv1_kernel(
int n,
double * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
double * p,
double * r,
double * v,
double * vtmp)
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
if( i<n ){
double dot = MAGMA_D_ZERO;
int start = drowptr[ i ];
int end = drowptr[ i+1 ];
for( j=start; j<end; j++)
dot += dval[ j ] * p[ dcolind[j] ];
v[ i ] = dot;
}
__syncthreads();
temp[ Idx ] = ( i < n ) ? v[ i ] * r[ i ] : MAGMA_D_MAKE( 0.0, 0.0);
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
__global__ void
magma_dbicgstab_alphakernel(
double * skp ){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
double tmp = skp[0];
skp[0] = skp[4]/tmp;
}
}
/**
Purpose
-------
Merges the first SpmV using CSR with the dot product
and the computation of alpha
Arguments
---------
@param[in]
A magma_d_matrix
system matrix
@param[in]
d1 magmaDouble_ptr
temporary vector
@param[in]
d2 magmaDouble_ptr
temporary vector
@param[in]
dp magmaDouble_ptr
input vector p
@param[in]
dr magmaDouble_ptr
input vector r
@param[in]
dv magmaDouble_ptr
output vector v
@param[in,out]
skp magmaDouble_ptr
array for parameters ( skp[0]=alpha )
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C" magma_int_t
magma_dbicgmerge_spmv1(
magma_d_matrix A,
magmaDouble_ptr d1,
magmaDouble_ptr d2,
magmaDouble_ptr dp,
magmaDouble_ptr dr,
magmaDouble_ptr dv,
magmaDouble_ptr skp,
magma_queue_t queue )
{
int n = A.num_rows;
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = local_block_size * sizeof( double );
magmaDouble_ptr aux1 = d1, aux2 = d2;
int b = 1;
if ( A.storage_type == Magma_CSR)
hipLaunchKernelGGL(( magma_dbicgmerge_spmv1_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream(),
n, A.dval, A.drow, A.dcol, dp, dr, dv, d1 );
else
printf("error: only CSR format supported.\n");
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_dreduce_kernel_spmv1), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream(),
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_dcopyvector( 1, aux1, 1, skp, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
hipLaunchKernelGGL(( magma_dbicgstab_alphakernel), dim3(Gs2), dim3(Bs2), 0, queue->cuda_stream(), skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
// accelerated block reduction for multiple vectors
__global__ void
magma_dreduce_kernel_spmv2(
int Gs,
int n,
double * vtmp,
double * vtmp2 )
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int j;
for( j=0; j<2; j++){
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx+j*(blockSize)] = MAGMA_D_MAKE( 0.0, 0.0);
while (i < Gs ) {
temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ];
temp[ Idx+j*(blockSize) ] +=
( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ]
: MAGMA_D_MAKE( 0.0, 0.0);
i += gridSize;
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ];
}
}
}
__global__ void
magma_dbicgmerge_spmv2_kernel(
int n,
double * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
double * s,
double * t,
double * vtmp )
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
if( i<n ){
double dot = MAGMA_D_ZERO;
int start = drowptr[ i ];
int end = drowptr[ i+1 ];
for( j=start; j<end; j++)
dot += dval[ j ] * s[ dcolind[j] ];
t[ i ] = dot;
}
__syncthreads();
// 2 vectors
if (i<n){
double tmp2 = t[i];
temp[Idx] = s[i] * tmp2;
temp[Idx+blockDim.x] = tmp2 * tmp2;
}
else {
for( j=0; j<2; j++)
temp[Idx+j*blockDim.x] = MAGMA_D_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
__global__ void
magma_dbicgstab_omegakernel(
double * skp ){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
skp[2] = skp[6]/skp[7];
skp[3] = skp[4];
}
}
/**
Purpose
-------
Merges the second SpmV using CSR with the dot product
and the computation of omega
Arguments
---------
@param[in]
A magma_d_matrix
input matrix
@param[in]
d1 magmaDouble_ptr
temporary vector
@param[in]
d2 magmaDouble_ptr
temporary vector
@param[in]
ds magmaDouble_ptr
input vector s
@param[in]
dt magmaDouble_ptr
output vector t
@param[in,out]
skp magmaDouble_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C" magma_int_t
magma_dbicgmerge_spmv2(
magma_d_matrix A,
magmaDouble_ptr d1,
magmaDouble_ptr d2,
magmaDouble_ptr ds,
magmaDouble_ptr dt,
magmaDouble_ptr skp,
magma_queue_t queue )
{
int n = A.num_rows;
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 2*local_block_size * sizeof( double );
magmaDouble_ptr aux1 = d1, aux2 = d2;
int b = 1;
if ( A.storage_type == Magma_CSR)
hipLaunchKernelGGL(( magma_dbicgmerge_spmv2_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream(),
n, A.dval, A.drow, A.dcol, ds, dt, d1 );
else
printf("error: only CSR format supported.\n");
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_dreduce_kernel_spmv2), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream(),
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_dcopyvector( 1, aux1, 1, skp+6, 1, queue );
magma_dcopyvector( 1, aux1+n, 1, skp+7, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
hipLaunchKernelGGL(( magma_dbicgstab_omegakernel), dim3(Gs2), dim3(Bs2), 0, queue->cuda_stream(), skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
__global__ void
magma_dbicgmerge_xrbeta_kernel(
int n,
double * rr,
double * r,
double * p,
double * s,
double * t,
double * x,
double * skp,
double * vtmp )
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
double alpha=skp[0];
double omega=skp[2];
if( i<n ){
double sl;
sl = s[i];
x[i] = x[i] + alpha * p[i] + omega * sl;
r[i] = sl - omega * t[i];
}
__syncthreads();
// 2 vectors
if (i<n){
double tmp2 = r[i];
temp[Idx] = rr[i] * tmp2;
temp[Idx+blockDim.x] = tmp2 * tmp2;
}
else {
for( j=0; j<2; j++)
temp[Idx+j*blockDim.x] = MAGMA_D_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
__global__ void
magma_dbicgstab_betakernel(
double * skp )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
double tmp1 = skp[4]/skp[3];
double tmp2 = skp[0] / skp[2];
skp[1] = tmp1*tmp2;
}
}
/**
Purpose
-------
Merges the second SpmV using CSR with the dot product
and the computation of omega
Arguments
---------
@param[in]
n int
dimension n
@param[in]
d1 magmaDouble_ptr
temporary vector
@param[in]
d2 magmaDouble_ptr
temporary vector
@param[in]
rr magmaDouble_ptr
input vector rr
@param[in]
r magmaDouble_ptr
input/output vector r
@param[in]
p magmaDouble_ptr
input vector p
@param[in]
s magmaDouble_ptr
input vector s
@param[in]
t magmaDouble_ptr
input vector t
@param[out]
x magmaDouble_ptr
output vector x
@param[in]
skp magmaDouble_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C" magma_int_t
magma_dbicgmerge_xrbeta(
magma_int_t n,
magmaDouble_ptr d1,
magmaDouble_ptr d2,
magmaDouble_ptr rr,
magmaDouble_ptr r,
magmaDouble_ptr p,
magmaDouble_ptr s,
magmaDouble_ptr t,
magmaDouble_ptr x,
magmaDouble_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 2*local_block_size * sizeof( double );
magmaDouble_ptr aux1 = d1, aux2 = d2;
int b = 1;
hipLaunchKernelGGL(( magma_dbicgmerge_xrbeta_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream(),
n, rr, r, p, s, t, x, skp, d1);
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_dreduce_kernel_spmv2), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream(),
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_dcopyvector( 1, aux1, 1, skp+4, 1, queue );
magma_dcopyvector( 1, aux1+n, 1, skp+5, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
hipLaunchKernelGGL(( magma_dbicgstab_betakernel), dim3(Gs2), dim3(Bs2), 0, queue->cuda_stream(), skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
| 4c82c289ecedfe7630a318ddc4b17ac52d99d25d.cu | /*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from sparse-iter/blas/zmergebicgstab2.cu normal z -> d, Tue Feb 9 16:05:42 2016
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
#define PRECISION_d
// These routines merge multiple kernels from dmergebicgstab into one
// This is the code used for the ASHES2014 paper
// "Accelerating Krylov Subspace Solvers on Graphics Processing Units".
// notice that only CSR format is supported so far.
// accelerated reduction for one vector
__global__ void
magma_dreduce_kernel_spmv1(
int Gs,
int n,
double * vtmp,
double * vtmp2 )
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
temp[Idx] = MAGMA_D_MAKE( 0.0, 0.0);
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
while (i < Gs ) {
temp[ Idx ] += vtmp[ i ];
temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ]
: MAGMA_D_MAKE( 0.0, 0.0);
i += gridSize;
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
__global__ void
magma_dbicgmerge_spmv1_kernel(
int n,
double * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
double * p,
double * r,
double * v,
double * vtmp)
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
if( i<n ){
double dot = MAGMA_D_ZERO;
int start = drowptr[ i ];
int end = drowptr[ i+1 ];
for( j=start; j<end; j++)
dot += dval[ j ] * p[ dcolind[j] ];
v[ i ] = dot;
}
__syncthreads();
temp[ Idx ] = ( i < n ) ? v[ i ] * r[ i ] : MAGMA_D_MAKE( 0.0, 0.0);
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
__global__ void
magma_dbicgstab_alphakernel(
double * skp ){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
double tmp = skp[0];
skp[0] = skp[4]/tmp;
}
}
/**
Purpose
-------
Merges the first SpmV using CSR with the dot product
and the computation of alpha
Arguments
---------
@param[in]
A magma_d_matrix
system matrix
@param[in]
d1 magmaDouble_ptr
temporary vector
@param[in]
d2 magmaDouble_ptr
temporary vector
@param[in]
dp magmaDouble_ptr
input vector p
@param[in]
dr magmaDouble_ptr
input vector r
@param[in]
dv magmaDouble_ptr
output vector v
@param[in,out]
skp magmaDouble_ptr
array for parameters ( skp[0]=alpha )
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C" magma_int_t
magma_dbicgmerge_spmv1(
magma_d_matrix A,
magmaDouble_ptr d1,
magmaDouble_ptr d2,
magmaDouble_ptr dp,
magmaDouble_ptr dr,
magmaDouble_ptr dv,
magmaDouble_ptr skp,
magma_queue_t queue )
{
int n = A.num_rows;
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = local_block_size * sizeof( double );
magmaDouble_ptr aux1 = d1, aux2 = d2;
int b = 1;
if ( A.storage_type == Magma_CSR)
magma_dbicgmerge_spmv1_kernel<<< Gs, Bs, Ms, queue->cuda_stream()>>>
( n, A.dval, A.drow, A.dcol, dp, dr, dv, d1 );
else
printf("error: only CSR format supported.\n");
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_dreduce_kernel_spmv1<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream()>>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_dcopyvector( 1, aux1, 1, skp, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
magma_dbicgstab_alphakernel<<< Gs2, Bs2, 0, queue->cuda_stream()>>>( skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
// accelerated block reduction for multiple vectors
__global__ void
magma_dreduce_kernel_spmv2(
int Gs,
int n,
double * vtmp,
double * vtmp2 )
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int j;
for( j=0; j<2; j++){
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx+j*(blockSize)] = MAGMA_D_MAKE( 0.0, 0.0);
while (i < Gs ) {
temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ];
temp[ Idx+j*(blockSize) ] +=
( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ]
: MAGMA_D_MAKE( 0.0, 0.0);
i += gridSize;
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ];
}
}
}
__global__ void
magma_dbicgmerge_spmv2_kernel(
int n,
double * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
double * s,
double * t,
double * vtmp )
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
if( i<n ){
double dot = MAGMA_D_ZERO;
int start = drowptr[ i ];
int end = drowptr[ i+1 ];
for( j=start; j<end; j++)
dot += dval[ j ] * s[ dcolind[j] ];
t[ i ] = dot;
}
__syncthreads();
// 2 vectors
if (i<n){
double tmp2 = t[i];
temp[Idx] = s[i] * tmp2;
temp[Idx+blockDim.x] = tmp2 * tmp2;
}
else {
for( j=0; j<2; j++)
temp[Idx+j*blockDim.x] = MAGMA_D_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
__global__ void
magma_dbicgstab_omegakernel(
double * skp ){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
skp[2] = skp[6]/skp[7];
skp[3] = skp[4];
}
}
/**
Purpose
-------
Merges the second SpmV using CSR with the dot product
and the computation of omega
Arguments
---------
@param[in]
A magma_d_matrix
input matrix
@param[in]
d1 magmaDouble_ptr
temporary vector
@param[in]
d2 magmaDouble_ptr
temporary vector
@param[in]
ds magmaDouble_ptr
input vector s
@param[in]
dt magmaDouble_ptr
output vector t
@param[in,out]
skp magmaDouble_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C" magma_int_t
magma_dbicgmerge_spmv2(
magma_d_matrix A,
magmaDouble_ptr d1,
magmaDouble_ptr d2,
magmaDouble_ptr ds,
magmaDouble_ptr dt,
magmaDouble_ptr skp,
magma_queue_t queue )
{
int n = A.num_rows;
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 2*local_block_size * sizeof( double );
magmaDouble_ptr aux1 = d1, aux2 = d2;
int b = 1;
if ( A.storage_type == Magma_CSR)
magma_dbicgmerge_spmv2_kernel<<< Gs, Bs, Ms, queue->cuda_stream()>>>
( n, A.dval, A.drow, A.dcol, ds, dt, d1 );
else
printf("error: only CSR format supported.\n");
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_dreduce_kernel_spmv2<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream()>>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_dcopyvector( 1, aux1, 1, skp+6, 1, queue );
magma_dcopyvector( 1, aux1+n, 1, skp+7, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
magma_dbicgstab_omegakernel<<< Gs2, Bs2, 0, queue->cuda_stream()>>>( skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
__global__ void
magma_dbicgmerge_xrbeta_kernel(
int n,
double * rr,
double * r,
double * p,
double * s,
double * t,
double * x,
double * skp,
double * vtmp )
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
double alpha=skp[0];
double omega=skp[2];
if( i<n ){
double sl;
sl = s[i];
x[i] = x[i] + alpha * p[i] + omega * sl;
r[i] = sl - omega * t[i];
}
__syncthreads();
// 2 vectors
if (i<n){
double tmp2 = r[i];
temp[Idx] = rr[i] * tmp2;
temp[Idx+blockDim.x] = tmp2 * tmp2;
}
else {
for( j=0; j<2; j++)
temp[Idx+j*blockDim.x] = MAGMA_D_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
__global__ void
magma_dbicgstab_betakernel(
double * skp )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
double tmp1 = skp[4]/skp[3];
double tmp2 = skp[0] / skp[2];
skp[1] = tmp1*tmp2;
}
}
/**
Purpose
-------
Merges the second SpmV using CSR with the dot product
and the computation of omega
Arguments
---------
@param[in]
n int
dimension n
@param[in]
d1 magmaDouble_ptr
temporary vector
@param[in]
d2 magmaDouble_ptr
temporary vector
@param[in]
rr magmaDouble_ptr
input vector rr
@param[in]
r magmaDouble_ptr
input/output vector r
@param[in]
p magmaDouble_ptr
input vector p
@param[in]
s magmaDouble_ptr
input vector s
@param[in]
t magmaDouble_ptr
input vector t
@param[out]
x magmaDouble_ptr
output vector x
@param[in]
skp magmaDouble_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C" magma_int_t
magma_dbicgmerge_xrbeta(
magma_int_t n,
magmaDouble_ptr d1,
magmaDouble_ptr d2,
magmaDouble_ptr rr,
magmaDouble_ptr r,
magmaDouble_ptr p,
magmaDouble_ptr s,
magmaDouble_ptr t,
magmaDouble_ptr x,
magmaDouble_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 2*local_block_size * sizeof( double );
magmaDouble_ptr aux1 = d1, aux2 = d2;
int b = 1;
magma_dbicgmerge_xrbeta_kernel<<< Gs, Bs, Ms, queue->cuda_stream()>>>
( n, rr, r, p, s, t, x, skp, d1);
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_dreduce_kernel_spmv2<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream()>>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_dcopyvector( 1, aux1, 1, skp+4, 1, queue );
magma_dcopyvector( 1, aux1+n, 1, skp+5, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
magma_dbicgstab_betakernel<<< Gs2, Bs2, 0, queue->cuda_stream()>>>( skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
|
d7751f45d58bdaceedaba6017d47b5099cf82249.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2016 Alexander Terenin
*
* Licensed under the Apache License, Version 2.0 (the "License")
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* /
*/
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
// utility script to print sizeof(hiprandState_t), which is nowhere to be found in JCuda
int main() {
hiprandState_t *states;
hiprandStatePhilox4_32_10_t *philox;
curandStateMRG32k3a *mrg;
hipMalloc((void **)&states, 64 * 64 * sizeof(hiprandState_t));
hipMalloc((void **)&philox, 64 * 64 * sizeof(hiprandStatePhilox4_32_10_t));
hipMalloc((void **)&mrg, 64 * 64 * sizeof(curandStateMRG32k3a));
printf("sizeof(hiprandState_t) %lu\n",sizeof(hiprandState_t));
printf("sizeof(hiprandStatePhilox4_32_10_t) %lu\n",sizeof(hiprandStatePhilox4_32_10_t));
printf("sizeof(curandStateMRG32k3a) %lu\n",sizeof(curandStateMRG32k3a));
}
| d7751f45d58bdaceedaba6017d47b5099cf82249.cu | /*
* Copyright 2016 Alexander Terenin
*
* Licensed under the Apache License, Version 2.0 (the "License")
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* /
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <curand_kernel.h>
// utility script to print sizeof(curandState), which is nowhere to be found in JCuda
int main() {
curandState *states;
curandStatePhilox4_32_10_t *philox;
curandStateMRG32k3a *mrg;
cudaMalloc((void **)&states, 64 * 64 * sizeof(curandState));
cudaMalloc((void **)&philox, 64 * 64 * sizeof(curandStatePhilox4_32_10_t));
cudaMalloc((void **)&mrg, 64 * 64 * sizeof(curandStateMRG32k3a));
printf("sizeof(curandState) %lu\n",sizeof(curandState));
printf("sizeof(curandStatePhilox4_32_10_t) %lu\n",sizeof(curandStatePhilox4_32_10_t));
printf("sizeof(curandStateMRG32k3a) %lu\n",sizeof(curandStateMRG32k3a));
}
|
23cb1494483a6b5e7f57c1c03747448cffcf324c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @File main.cu
*
* The main file of the project
*
* Paraleln programovn na GPU (PCG 2020)
* Projekt c. 1 (cuda)
* Login: xmarci10
*/
#include <sys/time.h>
#include <cstdio>
#include <cmath>
#include "nbody.h"
#include "h5Helper.h"
/**
* Main rotine
* @param argc
* @param argv
* @return
*/
int main(int argc, char **argv)
{
// Time measurement
struct timeval t1, t2;
if (argc != 10)
{
printf("Usage: nbody <N> <dt> <steps> <threads/block> <write intesity> <reduction threads> <reduction threads/block> <input> <output>\n");
exit(1);
}
// Number of particles
const int N = std::stoi(argv[1]);
// Length of time step
const float dt = std::stof(argv[2]);
// Number of steps
const int steps = std::stoi(argv[3]);
// Number of thread blocks
const int thr_blc = std::stoi(argv[4]);
// Write frequency
int writeFreq = std::stoi(argv[5]);
// number of reduction threads
const int red_thr = std::stoi(argv[6]);
// Number of reduction threads/blocks
const int red_thr_blc = std::stoi(argv[7]);
// Size of the simulation CUDA gird - number of blocks
const size_t simulationGrid = (N + thr_blc - 1) / thr_blc;
// Size of the reduction CUDA grid - number of blocks
const size_t reductionGrid = (red_thr + red_thr_blc - 1) / red_thr_blc;
// Size of the shared memory used in calculation_velocity kernel
const size_t shared_mem_size = thr_blc * 7 * sizeof(float);
// Size of the shared memory used in centerOfMass kernel
const size_t reduction_shared_mem_size = red_thr_blc * 4 * sizeof(float);
// Log benchmark setup
printf("N: %d\n", N);
printf("dt: %f\n", dt);
printf("steps: %d\n", steps);
printf("threads/block: %d\n", thr_blc);
printf("blocks/grid: %lu\n", simulationGrid);
printf("reduction threads/block: %d\n", red_thr_blc);
printf("reduction blocks/grid: %lu\n", reductionGrid);
const size_t recordsNum = (writeFreq > 0) ? (steps + writeFreq - 1) / writeFreq : 0;
writeFreq = (writeFreq > 0) ? writeFreq : 0;
// CPU side memory allocation
t_particles particles_cpu;
particles_cpu.pos_x = (float*)malloc(N*sizeof(float));
particles_cpu.pos_y = (float*)malloc(N*sizeof(float));
particles_cpu.pos_z = (float*)malloc(N*sizeof(float));
particles_cpu.vel_x = (float*)malloc(N*sizeof(float));
particles_cpu.vel_y = (float*)malloc(N*sizeof(float));
particles_cpu.vel_z = (float*)malloc(N*sizeof(float));
particles_cpu.weight = (float*)malloc(N*sizeof(float));
/*
* Caution! Create only after CPU side allocation
* parameters:
* Stride of two Offset of the first
* Data pointer consecutive elements element in floats,
* in floats, not bytes not bytes
*/
MemDesc md(
particles_cpu.pos_x, 1, 0, // Postition in X
particles_cpu.pos_y, 1, 0, // Postition in Y
particles_cpu.pos_z, 1, 0, // Postition in Z
particles_cpu.vel_x, 1, 0, // Velocity in X
particles_cpu.vel_y, 1, 0, // Velocity in Y
particles_cpu.vel_z, 1, 0, // Velocity in Z
particles_cpu.weight, 1, 0, // Weight
N, // Number of particles
recordsNum); // Number of records in output file
// Initialisation of helper class and loading of input data
H5Helper h5Helper(argv[8], argv[9], md);
try
{
h5Helper.init();
h5Helper.readParticleData();
}
catch (const std::exception& e)
{
std::cerr<<e.what()<<std::endl;
return -1;
}
// GPU side memory allocation
// Step 3.*
float4 *centerOfMassGPU;
int *lock;
hipMalloc(¢erOfMassGPU, 4*sizeof(float));
hipMalloc(&lock, sizeof(int));
// Step 0-2
t_particles particles_gpuIn;
t_particles particles_gpuOut;
t_particles particles_tmp;
hipMalloc(&particles_gpuIn.pos_x, N*sizeof(float));
hipMalloc(&particles_gpuIn.pos_y, N*sizeof(float));
hipMalloc(&particles_gpuIn.pos_z, N*sizeof(float));
hipMalloc(&particles_gpuIn.vel_x, N*sizeof(float));
hipMalloc(&particles_gpuIn.vel_y, N*sizeof(float));
hipMalloc(&particles_gpuIn.vel_z, N*sizeof(float));
hipMalloc(&particles_gpuIn.weight, N*sizeof(float));
hipMalloc(&particles_gpuOut.pos_x, N*sizeof(float));
hipMalloc(&particles_gpuOut.pos_y, N*sizeof(float));
hipMalloc(&particles_gpuOut.pos_z, N*sizeof(float));
hipMalloc(&particles_gpuOut.vel_x, N*sizeof(float));
hipMalloc(&particles_gpuOut.vel_y, N*sizeof(float));
hipMalloc(&particles_gpuOut.vel_z, N*sizeof(float));
hipMalloc(&particles_gpuOut.weight, N*sizeof(float));
// Transfer data to GPU
// Step 3.*
hipMemset(centerOfMassGPU, 0.0f, 4*sizeof(float));
hipMemset(lock, 0, sizeof(int));
// Step 0-2
hipMemcpy(particles_gpuIn.pos_x, particles_cpu.pos_x, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(particles_gpuIn.pos_y, particles_cpu.pos_y, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(particles_gpuIn.pos_z, particles_cpu.pos_z, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(particles_gpuIn.vel_x, particles_cpu.vel_x, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(particles_gpuIn.vel_y, particles_cpu.vel_y, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(particles_gpuIn.vel_z, particles_cpu.vel_z, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(particles_gpuIn.weight, particles_cpu.weight, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(particles_gpuOut.weight, particles_cpu.weight, N*sizeof(float), hipMemcpyHostToDevice);
gettimeofday(&t1, 0);
for(int s = 0; s < steps; s++)
{
// Kernel invoaction
hipLaunchKernelGGL(( calculate_velocity), dim3(simulationGrid), dim3(thr_blc), shared_mem_size, 0, particles_gpuIn, particles_gpuOut, N, dt);
// Swap pointers
particles_tmp = particles_gpuOut;
particles_gpuOut = particles_gpuIn;
particles_gpuIn = particles_tmp;
}
hipDeviceSynchronize();
// Kernel invoaction
hipLaunchKernelGGL(( centerOfMass), dim3(reductionGrid), dim3(red_thr_blc), reduction_shared_mem_size, 0, particles_gpuIn,
¢erOfMassGPU->x, ¢erOfMassGPU->y, ¢erOfMassGPU->z, ¢erOfMassGPU->w, lock, N);
gettimeofday(&t2, 0);
// Approximate simulation wall time
double t = (1000000.0 * (t2.tv_sec - t1.tv_sec) + t2.tv_usec - t1.tv_usec) / 1000000.0;
printf("Time: %f s\n", t);
float4 comOnGPU;
// Transfer results back to the CPU
// Step 3.*
hipMemcpy(&comOnGPU.x, centerOfMassGPU, 4*sizeof(float), hipMemcpyDeviceToHost);
// Step 0-2
hipMemcpy(particles_cpu.pos_x, particles_gpuIn.pos_x, N*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(particles_cpu.pos_y, particles_gpuIn.pos_y, N*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(particles_cpu.pos_z, particles_gpuIn.pos_z, N*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(particles_cpu.vel_x, particles_gpuIn.vel_x, N*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(particles_cpu.vel_y, particles_gpuIn.vel_y, N*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(particles_cpu.vel_z, particles_gpuIn.vel_z, N*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(particles_cpu.weight, particles_gpuIn.weight, N*sizeof(float), hipMemcpyDeviceToHost);
// CPU completes the calculation of CenterOfMass
comOnGPU.x = comOnGPU.x / comOnGPU.w;
comOnGPU.y = comOnGPU.y / comOnGPU.w;
comOnGPU.z = comOnGPU.z / comOnGPU.w;
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// FILL IN: memory transfers for center-of-mass (step 3.1, step 3.2) //
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
float4 comOnCPU = centerOfMassCPU(md);
std::cout << "Center of mass on CPU:" << std::endl
<< comOnCPU.x <<", "
<< comOnCPU.y <<", "
<< comOnCPU.z <<", "
<< comOnCPU.w
<< std::endl;
std::cout << "Center of mass on GPU:" << std::endl
<< comOnGPU.x<<", "
<< comOnGPU.y<<", "
<< comOnGPU.z<<", "
<< comOnGPU.w
<< std::endl;
// Writing final values to the file
h5Helper.writeComFinal(comOnGPU.x, comOnGPU.y, comOnGPU.z, comOnGPU.w);
h5Helper.writeParticleDataFinal();
// Free CPU memory
free(particles_cpu.pos_x );
free(particles_cpu.pos_y );
free(particles_cpu.pos_z );
free(particles_cpu.vel_x );
free(particles_cpu.vel_y );
free(particles_cpu.vel_z );
free(particles_cpu.weight);
// Free GPU memory
hipFree(particles_gpuIn.pos_x);
hipFree(particles_gpuIn.pos_y);
hipFree(particles_gpuIn.pos_z);
hipFree(particles_gpuIn.vel_x);
hipFree(particles_gpuIn.vel_y);
hipFree(particles_gpuIn.vel_z);
hipFree(particles_gpuIn.weight);
hipFree(particles_gpuOut.pos_x);
hipFree(particles_gpuOut.pos_y);
hipFree(particles_gpuOut.pos_z);
hipFree(particles_gpuOut.vel_x);
hipFree(particles_gpuOut.vel_y);
hipFree(particles_gpuOut.vel_z);
hipFree(particles_gpuOut.weight);
hipFree(centerOfMassGPU);
hipFree(lock);
return 0;
}// end of main
//----------------------------------------------------------------------------------------------------------------------
| 23cb1494483a6b5e7f57c1c03747448cffcf324c.cu | /**
* @File main.cu
*
* The main file of the project
*
* Paralelní programování na GPU (PCG 2020)
* Projekt c. 1 (cuda)
* Login: xmarci10
*/
#include <sys/time.h>
#include <cstdio>
#include <cmath>
#include "nbody.h"
#include "h5Helper.h"
/**
* Main rotine
* @param argc
* @param argv
* @return
*/
int main(int argc, char **argv)
{
// Time measurement
struct timeval t1, t2;
if (argc != 10)
{
printf("Usage: nbody <N> <dt> <steps> <threads/block> <write intesity> <reduction threads> <reduction threads/block> <input> <output>\n");
exit(1);
}
// Number of particles
const int N = std::stoi(argv[1]);
// Length of time step
const float dt = std::stof(argv[2]);
// Number of steps
const int steps = std::stoi(argv[3]);
// Number of thread blocks
const int thr_blc = std::stoi(argv[4]);
// Write frequency
int writeFreq = std::stoi(argv[5]);
// number of reduction threads
const int red_thr = std::stoi(argv[6]);
// Number of reduction threads/blocks
const int red_thr_blc = std::stoi(argv[7]);
// Size of the simulation CUDA gird - number of blocks
const size_t simulationGrid = (N + thr_blc - 1) / thr_blc;
// Size of the reduction CUDA grid - number of blocks
const size_t reductionGrid = (red_thr + red_thr_blc - 1) / red_thr_blc;
// Size of the shared memory used in calculation_velocity kernel
const size_t shared_mem_size = thr_blc * 7 * sizeof(float);
// Size of the shared memory used in centerOfMass kernel
const size_t reduction_shared_mem_size = red_thr_blc * 4 * sizeof(float);
// Log benchmark setup
printf("N: %d\n", N);
printf("dt: %f\n", dt);
printf("steps: %d\n", steps);
printf("threads/block: %d\n", thr_blc);
printf("blocks/grid: %lu\n", simulationGrid);
printf("reduction threads/block: %d\n", red_thr_blc);
printf("reduction blocks/grid: %lu\n", reductionGrid);
const size_t recordsNum = (writeFreq > 0) ? (steps + writeFreq - 1) / writeFreq : 0;
writeFreq = (writeFreq > 0) ? writeFreq : 0;
// CPU side memory allocation
t_particles particles_cpu;
particles_cpu.pos_x = (float*)malloc(N*sizeof(float));
particles_cpu.pos_y = (float*)malloc(N*sizeof(float));
particles_cpu.pos_z = (float*)malloc(N*sizeof(float));
particles_cpu.vel_x = (float*)malloc(N*sizeof(float));
particles_cpu.vel_y = (float*)malloc(N*sizeof(float));
particles_cpu.vel_z = (float*)malloc(N*sizeof(float));
particles_cpu.weight = (float*)malloc(N*sizeof(float));
/*
* Caution! Create only after CPU side allocation
* parameters:
* Stride of two Offset of the first
* Data pointer consecutive elements element in floats,
* in floats, not bytes not bytes
*/
MemDesc md(
particles_cpu.pos_x, 1, 0, // Postition in X
particles_cpu.pos_y, 1, 0, // Postition in Y
particles_cpu.pos_z, 1, 0, // Postition in Z
particles_cpu.vel_x, 1, 0, // Velocity in X
particles_cpu.vel_y, 1, 0, // Velocity in Y
particles_cpu.vel_z, 1, 0, // Velocity in Z
particles_cpu.weight, 1, 0, // Weight
N, // Number of particles
recordsNum); // Number of records in output file
// Initialisation of helper class and loading of input data
H5Helper h5Helper(argv[8], argv[9], md);
try
{
h5Helper.init();
h5Helper.readParticleData();
}
catch (const std::exception& e)
{
std::cerr<<e.what()<<std::endl;
return -1;
}
// GPU side memory allocation
// Step 3.*
float4 *centerOfMassGPU;
int *lock;
cudaMalloc(¢erOfMassGPU, 4*sizeof(float));
cudaMalloc(&lock, sizeof(int));
// Step 0-2
t_particles particles_gpuIn;
t_particles particles_gpuOut;
t_particles particles_tmp;
cudaMalloc(&particles_gpuIn.pos_x, N*sizeof(float));
cudaMalloc(&particles_gpuIn.pos_y, N*sizeof(float));
cudaMalloc(&particles_gpuIn.pos_z, N*sizeof(float));
cudaMalloc(&particles_gpuIn.vel_x, N*sizeof(float));
cudaMalloc(&particles_gpuIn.vel_y, N*sizeof(float));
cudaMalloc(&particles_gpuIn.vel_z, N*sizeof(float));
cudaMalloc(&particles_gpuIn.weight, N*sizeof(float));
cudaMalloc(&particles_gpuOut.pos_x, N*sizeof(float));
cudaMalloc(&particles_gpuOut.pos_y, N*sizeof(float));
cudaMalloc(&particles_gpuOut.pos_z, N*sizeof(float));
cudaMalloc(&particles_gpuOut.vel_x, N*sizeof(float));
cudaMalloc(&particles_gpuOut.vel_y, N*sizeof(float));
cudaMalloc(&particles_gpuOut.vel_z, N*sizeof(float));
cudaMalloc(&particles_gpuOut.weight, N*sizeof(float));
// Transfer data to GPU
// Step 3.*
cudaMemset(centerOfMassGPU, 0.0f, 4*sizeof(float));
cudaMemset(lock, 0, sizeof(int));
// Step 0-2
cudaMemcpy(particles_gpuIn.pos_x, particles_cpu.pos_x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(particles_gpuIn.pos_y, particles_cpu.pos_y, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(particles_gpuIn.pos_z, particles_cpu.pos_z, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(particles_gpuIn.vel_x, particles_cpu.vel_x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(particles_gpuIn.vel_y, particles_cpu.vel_y, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(particles_gpuIn.vel_z, particles_cpu.vel_z, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(particles_gpuIn.weight, particles_cpu.weight, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(particles_gpuOut.weight, particles_cpu.weight, N*sizeof(float), cudaMemcpyHostToDevice);
gettimeofday(&t1, 0);
for(int s = 0; s < steps; s++)
{
// Kernel invoaction
calculate_velocity<<<simulationGrid, thr_blc, shared_mem_size>>>(particles_gpuIn, particles_gpuOut, N, dt);
// Swap pointers
particles_tmp = particles_gpuOut;
particles_gpuOut = particles_gpuIn;
particles_gpuIn = particles_tmp;
}
cudaDeviceSynchronize();
// Kernel invoaction
centerOfMass<<<reductionGrid, red_thr_blc, reduction_shared_mem_size>>>(particles_gpuIn,
¢erOfMassGPU->x, ¢erOfMassGPU->y, ¢erOfMassGPU->z, ¢erOfMassGPU->w, lock, N);
gettimeofday(&t2, 0);
// Approximate simulation wall time
double t = (1000000.0 * (t2.tv_sec - t1.tv_sec) + t2.tv_usec - t1.tv_usec) / 1000000.0;
printf("Time: %f s\n", t);
float4 comOnGPU;
// Transfer results back to the CPU
// Step 3.*
cudaMemcpy(&comOnGPU.x, centerOfMassGPU, 4*sizeof(float), cudaMemcpyDeviceToHost);
// Step 0-2
cudaMemcpy(particles_cpu.pos_x, particles_gpuIn.pos_x, N*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(particles_cpu.pos_y, particles_gpuIn.pos_y, N*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(particles_cpu.pos_z, particles_gpuIn.pos_z, N*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(particles_cpu.vel_x, particles_gpuIn.vel_x, N*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(particles_cpu.vel_y, particles_gpuIn.vel_y, N*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(particles_cpu.vel_z, particles_gpuIn.vel_z, N*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(particles_cpu.weight, particles_gpuIn.weight, N*sizeof(float), cudaMemcpyDeviceToHost);
// CPU completes the calculation of CenterOfMass
comOnGPU.x = comOnGPU.x / comOnGPU.w;
comOnGPU.y = comOnGPU.y / comOnGPU.w;
comOnGPU.z = comOnGPU.z / comOnGPU.w;
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// FILL IN: memory transfers for center-of-mass (step 3.1, step 3.2) //
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
float4 comOnCPU = centerOfMassCPU(md);
std::cout << "Center of mass on CPU:" << std::endl
<< comOnCPU.x <<", "
<< comOnCPU.y <<", "
<< comOnCPU.z <<", "
<< comOnCPU.w
<< std::endl;
std::cout << "Center of mass on GPU:" << std::endl
<< comOnGPU.x<<", "
<< comOnGPU.y<<", "
<< comOnGPU.z<<", "
<< comOnGPU.w
<< std::endl;
// Writing final values to the file
h5Helper.writeComFinal(comOnGPU.x, comOnGPU.y, comOnGPU.z, comOnGPU.w);
h5Helper.writeParticleDataFinal();
// Free CPU memory
free(particles_cpu.pos_x );
free(particles_cpu.pos_y );
free(particles_cpu.pos_z );
free(particles_cpu.vel_x );
free(particles_cpu.vel_y );
free(particles_cpu.vel_z );
free(particles_cpu.weight);
// Free GPU memory
cudaFree(particles_gpuIn.pos_x);
cudaFree(particles_gpuIn.pos_y);
cudaFree(particles_gpuIn.pos_z);
cudaFree(particles_gpuIn.vel_x);
cudaFree(particles_gpuIn.vel_y);
cudaFree(particles_gpuIn.vel_z);
cudaFree(particles_gpuIn.weight);
cudaFree(particles_gpuOut.pos_x);
cudaFree(particles_gpuOut.pos_y);
cudaFree(particles_gpuOut.pos_z);
cudaFree(particles_gpuOut.vel_x);
cudaFree(particles_gpuOut.vel_y);
cudaFree(particles_gpuOut.vel_z);
cudaFree(particles_gpuOut.weight);
cudaFree(centerOfMassGPU);
cudaFree(lock);
return 0;
}// end of main
//----------------------------------------------------------------------------------------------------------------------
|
c957fd4e802b729b000d82e9694295527a11a421.hip | // !!! This is a file automatically generated by hipify!!!
// Fun??es at?micas ajudam a resolver o problema de ter muitas threads acessando a mesma ?rea de mem?ria
// Opera??es at?micas garantem que somente uma thread esteja acessando uma ?rea de mem?ria em um dado momento
// Opera??es at?micas devem ser configuradas com sm_20_atomic_functions.h ou o padr?o correspondente da arquitetura
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "sm_20_atomic_functions.h"
#include <iostream>
using namespace std;
__device__ int dSum = 0;
__global__ void sum(int* d)
{
int tid = threadIdx.x;
// Essa instru??o vai gerar um problema, pois estamos tratando as threads como sequenciais,
// problema conhecido como race condition
//dSum += d[tid];
// A fun??o atomicAdd evita o problema de race condition
atomicAdd(&dSum, d[tid]);
}
int main()
{
const int count = 256;
const int size = count * sizeof(int);
int h[count];
for (int i = 0; i < count; ++i)
h[i] = i + 1;
int* d;
hipMalloc(&d, size);
hipMemcpy(d, h, size, hipMemcpyHostToDevice);
sum << <1, count >> >(d);
int hSum;
hipMemcpyFromSymbol(&hSum, dSum, sizeof(int));
cout << "A soma dos valores de 1 a " << count << " igual a " << hSum << endl;
hipFree(d);
} | c957fd4e802b729b000d82e9694295527a11a421.cu | // Fun??es at?micas ajudam a resolver o problema de ter muitas threads acessando a mesma ?rea de mem?ria
// Opera??es at?micas garantem que somente uma thread esteja acessando uma ?rea de mem?ria em um dado momento
// Opera??es at?micas devem ser configuradas com sm_20_atomic_functions.h ou o padr?o correspondente da arquitetura
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "sm_20_atomic_functions.h"
#include <iostream>
using namespace std;
__device__ int dSum = 0;
__global__ void sum(int* d)
{
int tid = threadIdx.x;
// Essa instru??o vai gerar um problema, pois estamos tratando as threads como sequenciais,
// problema conhecido como race condition
//dSum += d[tid];
// A fun??o atomicAdd evita o problema de race condition
atomicAdd(&dSum, d[tid]);
}
int main()
{
const int count = 256;
const int size = count * sizeof(int);
int h[count];
for (int i = 0; i < count; ++i)
h[i] = i + 1;
int* d;
cudaMalloc(&d, size);
cudaMemcpy(d, h, size, cudaMemcpyHostToDevice);
sum << <1, count >> >(d);
int hSum;
cudaMemcpyFromSymbol(&hSum, dSum, sizeof(int));
cout << "A soma dos valores de 1 a " << count << " igual a " << hSum << endl;
cudaFree(d);
} |
7cfb9d62ee5e4d7ebcbf52da0d90b036c4e3c47e.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <torch/types.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
template <typename scalar_t>
static __inline__ __device__ scalar_t mod(scalar_t x, scalar_t y) {
if (x > 0) {
return fmod(x,y);
}
else {
return y + fmod(x,y);
}
}
namespace {
const int REPEAT = 0;
const int MIRRORED_REPEAT = 1;
const int CLAMP_TO_EDGE = 2;
const int CLAMP_TO_BORDER = 3;
template <typename scalar_t>
__global__ void load_textures_cuda_kernel(
const scalar_t* image,
const int32_t* is_update,
scalar_t* faces,
scalar_t* __restrict__ textures,
int textures_size,
int texture_size,
int image_height,
int image_width,
int texture_wrapping,
bool use_bilinear) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= textures_size / 3) {
return;
}
const int ts = texture_size;
const int fn = i / (ts * ts * ts);
scalar_t dim0 = ((i / (ts * ts)) % ts) / (ts - 1.) ;
scalar_t dim1 = ((i / ts) % ts) / (ts - 1.);
scalar_t dim2 = (i % ts) / (ts - 1.);
if (0 < dim0 + dim1 + dim2) {
float sum = dim0 + dim1 + dim2;
dim0 /= sum;
dim1 /= sum;
dim2 /= sum;
}
scalar_t* face = &faces[fn * 3 * 2];
scalar_t* texture_ = &textures[i * 3];
if (is_update[fn] != 0) {
if (texture_wrapping == REPEAT) {
#pragma unroll
for (int i = 0; i < 6; ++i) {
face[i] = mod(face[i], (scalar_t)1.);
}
}
else if (texture_wrapping == MIRRORED_REPEAT) {
#pragma unroll
for (int i = 0; i < 6; ++i) {
if (mod(face[i], (scalar_t)2) < 1) {
face[i] = mod(face[i], (scalar_t)1.);
}
else {
face[i] = 1 - mod(face[i], (scalar_t)1.);
}
}
}
else if (texture_wrapping == CLAMP_TO_EDGE) {
#pragma unroll
for (int i = 0; i < 6; ++i) {
face[i] = max(min(face[i], (scalar_t) 1), (scalar_t) 0);
}
}
const scalar_t pos_x = (
(face[2 * 0 + 0] * dim0 + face[2 * 1 + 0] * dim1 + face[2 * 2 + 0] * dim2) * (image_width - 1));
const scalar_t pos_y = (
(face[2 * 0 + 1] * dim0 + face[2 * 1 + 1] * dim1 + face[2 * 2 + 1] * dim2) * (image_height - 1));
if (use_bilinear) {
/* bilinear sampling */
const scalar_t weight_x1 = pos_x - (int)pos_x;
const scalar_t weight_x0 = 1 - weight_x1;
const scalar_t weight_y1 = pos_y - (int)pos_y;
const scalar_t weight_y0 = 1 - weight_y1;
for (int k = 0; k < 3; k++) {
if (texture_wrapping != CLAMP_TO_BORDER) {
scalar_t c = 0;
c += image[(int)pos_y * image_width * 3 + (int)pos_x * 3 + k] * (weight_x0 * weight_y0);
c += image[min((int)(pos_y + 1), image_height-1) * image_width * 3 + (int)pos_x * 3 + k] * (weight_x0 * weight_y1);
c += image[(int)pos_y * image_width * 3 + min((int)pos_x + 1, image_width-1) * 3 + k] * (weight_x1 * weight_y0);
c += image[min((int)(pos_y + 1), image_height-1) * image_width * 3 + min((int)pos_x + 1, image_width-1) * 3 + k] * (weight_x1 * weight_y1);
texture_[k] = c;
}
else {
texture_[k] = 0;
}
}
} else {
/* nearest neighbor */
const int pos_xi = round(pos_x);
const int pos_yi = round(pos_y);
for (int k = 0; k < 3; k++) {
if (texture_wrapping != CLAMP_TO_BORDER) {
texture_[k] = image[pos_yi * image_width * 3 + pos_xi * 3 + k];
}
else {
texture_[k] = 0;
}
}
}
}
}
}
at::Tensor load_textures_cuda(
at::Tensor image,
at::Tensor faces,
at::Tensor textures,
at::Tensor is_update,
int texture_wrapping,
int use_bilinear) {
// textures_size = size of the textures tensor
const auto textures_size = textures.numel();
// notice that texture_size != texture_size
const auto texture_size = textures.size(1);
const auto image_height = image.size(0);
const auto image_width = image.size(1);
const int threads = 1024;
const dim3 blocks ((textures_size / 3 - 1) / threads + 1);
AT_DISPATCH_FLOATING_TYPES(image.type(), "load_textures_cuda", ([&] {
hipLaunchKernelGGL(( load_textures_cuda_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
image.data<scalar_t>(),
is_update.data<int32_t>(),
faces.data<scalar_t>(),
textures.data<scalar_t>(),
textures_size,
texture_size,
image_height,
image_width,
texture_wrapping,
use_bilinear);
}));
hipError_t err = hipGetLastError();
if (err != hipSuccess)
printf("Error in load_textures: %s\n", hipGetErrorString(err));
return textures;
}
| 7cfb9d62ee5e4d7ebcbf52da0d90b036c4e3c47e.cu | #include <ATen/ATen.h>
#include <torch/types.h>
#include <cuda.h>
#include <cuda_runtime.h>
template <typename scalar_t>
static __inline__ __device__ scalar_t mod(scalar_t x, scalar_t y) {
if (x > 0) {
return fmod(x,y);
}
else {
return y + fmod(x,y);
}
}
namespace {
const int REPEAT = 0;
const int MIRRORED_REPEAT = 1;
const int CLAMP_TO_EDGE = 2;
const int CLAMP_TO_BORDER = 3;
template <typename scalar_t>
__global__ void load_textures_cuda_kernel(
const scalar_t* image,
const int32_t* is_update,
scalar_t* faces,
scalar_t* __restrict__ textures,
int textures_size,
int texture_size,
int image_height,
int image_width,
int texture_wrapping,
bool use_bilinear) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= textures_size / 3) {
return;
}
const int ts = texture_size;
const int fn = i / (ts * ts * ts);
scalar_t dim0 = ((i / (ts * ts)) % ts) / (ts - 1.) ;
scalar_t dim1 = ((i / ts) % ts) / (ts - 1.);
scalar_t dim2 = (i % ts) / (ts - 1.);
if (0 < dim0 + dim1 + dim2) {
float sum = dim0 + dim1 + dim2;
dim0 /= sum;
dim1 /= sum;
dim2 /= sum;
}
scalar_t* face = &faces[fn * 3 * 2];
scalar_t* texture_ = &textures[i * 3];
if (is_update[fn] != 0) {
if (texture_wrapping == REPEAT) {
#pragma unroll
for (int i = 0; i < 6; ++i) {
face[i] = mod(face[i], (scalar_t)1.);
}
}
else if (texture_wrapping == MIRRORED_REPEAT) {
#pragma unroll
for (int i = 0; i < 6; ++i) {
if (mod(face[i], (scalar_t)2) < 1) {
face[i] = mod(face[i], (scalar_t)1.);
}
else {
face[i] = 1 - mod(face[i], (scalar_t)1.);
}
}
}
else if (texture_wrapping == CLAMP_TO_EDGE) {
#pragma unroll
for (int i = 0; i < 6; ++i) {
face[i] = max(min(face[i], (scalar_t) 1), (scalar_t) 0);
}
}
const scalar_t pos_x = (
(face[2 * 0 + 0] * dim0 + face[2 * 1 + 0] * dim1 + face[2 * 2 + 0] * dim2) * (image_width - 1));
const scalar_t pos_y = (
(face[2 * 0 + 1] * dim0 + face[2 * 1 + 1] * dim1 + face[2 * 2 + 1] * dim2) * (image_height - 1));
if (use_bilinear) {
/* bilinear sampling */
const scalar_t weight_x1 = pos_x - (int)pos_x;
const scalar_t weight_x0 = 1 - weight_x1;
const scalar_t weight_y1 = pos_y - (int)pos_y;
const scalar_t weight_y0 = 1 - weight_y1;
for (int k = 0; k < 3; k++) {
if (texture_wrapping != CLAMP_TO_BORDER) {
scalar_t c = 0;
c += image[(int)pos_y * image_width * 3 + (int)pos_x * 3 + k] * (weight_x0 * weight_y0);
c += image[min((int)(pos_y + 1), image_height-1) * image_width * 3 + (int)pos_x * 3 + k] * (weight_x0 * weight_y1);
c += image[(int)pos_y * image_width * 3 + min((int)pos_x + 1, image_width-1) * 3 + k] * (weight_x1 * weight_y0);
c += image[min((int)(pos_y + 1), image_height-1) * image_width * 3 + min((int)pos_x + 1, image_width-1) * 3 + k] * (weight_x1 * weight_y1);
texture_[k] = c;
}
else {
texture_[k] = 0;
}
}
} else {
/* nearest neighbor */
const int pos_xi = round(pos_x);
const int pos_yi = round(pos_y);
for (int k = 0; k < 3; k++) {
if (texture_wrapping != CLAMP_TO_BORDER) {
texture_[k] = image[pos_yi * image_width * 3 + pos_xi * 3 + k];
}
else {
texture_[k] = 0;
}
}
}
}
}
}
at::Tensor load_textures_cuda(
at::Tensor image,
at::Tensor faces,
at::Tensor textures,
at::Tensor is_update,
int texture_wrapping,
int use_bilinear) {
// textures_size = size of the textures tensor
const auto textures_size = textures.numel();
// notice that texture_size != texture_size
const auto texture_size = textures.size(1);
const auto image_height = image.size(0);
const auto image_width = image.size(1);
const int threads = 1024;
const dim3 blocks ((textures_size / 3 - 1) / threads + 1);
AT_DISPATCH_FLOATING_TYPES(image.type(), "load_textures_cuda", ([&] {
load_textures_cuda_kernel<scalar_t><<<blocks, threads>>>(
image.data<scalar_t>(),
is_update.data<int32_t>(),
faces.data<scalar_t>(),
textures.data<scalar_t>(),
textures_size,
texture_size,
image_height,
image_width,
texture_wrapping,
use_bilinear);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error in load_textures: %s\n", cudaGetErrorString(err));
return textures;
}
|
b7a411a4e01d73cfa0a2d92cc628c501310afef4.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <hip/hip_runtime.h>
#include "paddle/fluid/platform/profiler.h"
namespace paddle {
namespace platform {
__global__ void DummyKernel(int *a) { a[0] = 0; }
static void ForEachDevice(std::function<void(int)> func) {
auto original_device = platform::GetCurrentDeviceId();
int count = platform::GetCUDADeviceCount();
for (int i = 0; i < count; i++) {
platform::SetDeviceId(i);
func(i);
}
platform::SetDeviceId(original_device);
}
void DummyKernelAndEvent() {
for (int i = 0; i < 5; i++) {
ForEachDevice([](int d) {
platform::SetDeviceId(d);
hipStream_t stream;
PADDLE_ENFORCE(hipStreamCreate(&stream));
Mark("_cuda_startup_");
int *ptr;
PADDLE_ENFORCE(hipMalloc(&ptr, sizeof(int)));
hipLaunchKernelGGL(( DummyKernel), dim3(1), dim3(1), 0, stream, ptr);
PADDLE_ENFORCE(hipStreamSynchronize(stream));
PADDLE_ENFORCE(hipFree(ptr));
});
}
}
} // namespace platform
} // namespace paddle
| b7a411a4e01d73cfa0a2d92cc628c501310afef4.cu | /* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <cuda.h>
#include "paddle/fluid/platform/profiler.h"
namespace paddle {
namespace platform {
__global__ void DummyKernel(int *a) { a[0] = 0; }
static void ForEachDevice(std::function<void(int)> func) {
auto original_device = platform::GetCurrentDeviceId();
int count = platform::GetCUDADeviceCount();
for (int i = 0; i < count; i++) {
platform::SetDeviceId(i);
func(i);
}
platform::SetDeviceId(original_device);
}
void DummyKernelAndEvent() {
for (int i = 0; i < 5; i++) {
ForEachDevice([](int d) {
platform::SetDeviceId(d);
cudaStream_t stream;
PADDLE_ENFORCE(cudaStreamCreate(&stream));
Mark("_cuda_startup_");
int *ptr;
PADDLE_ENFORCE(cudaMalloc(&ptr, sizeof(int)));
DummyKernel<<<1, 1, 0, stream>>>(ptr);
PADDLE_ENFORCE(cudaStreamSynchronize(stream));
PADDLE_ENFORCE(cudaFree(ptr));
});
}
}
} // namespace platform
} // namespace paddle
|
c8b6a714dc6d279004291599ccdea5029e3317f2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
#include <assert.h>
extern "C" {
#include "blas.h"
#include "hip/hip_runtime.h"
#include "utils.h"
}
__global__ void scale_bias_kernel(float *output, float *biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if(offset < size) output[(batch*n+filter)*size + offset] *= biases[filter];
}
void scale_bias_gpu(float *output, float *biases, int batch, int n, int size)
{
dim3 dimGrid((size-1)/BLOCK + 1, n, batch);
dim3 dimBlock(BLOCK, 1, 1);
hipLaunchKernelGGL(( scale_bias_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, output, biases, n, size);
check_error(hipPeekAtLastError());
}
__global__ void backward_scale_kernel(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates)
{
__shared__ float part[BLOCK];
int i,b;
int filter = blockIdx.x;
int p = threadIdx.x;
float sum = 0;
for(b = 0; b < batch; ++b){
for(i = 0; i < size; i += BLOCK){
int index = p + i + size*(filter + n*b);
sum += (p+i < size) ? delta[index]*x_norm[index] : 0;
}
}
part[p] = sum;
__syncthreads();
if (p == 0) {
for(i = 0; i < BLOCK; ++i) scale_updates[filter] += part[i];
}
}
void backward_scale_gpu(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates)
{
hipLaunchKernelGGL(( backward_scale_kernel), dim3(n), dim3(BLOCK), 0, 0, x_norm, delta, batch, n, size, scale_updates);
check_error(hipPeekAtLastError());
}
__global__ void add_bias_kernel(float *output, float *biases, int batch, int n, int size)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= n*size*batch) return;
int i = index % size;
index /= size;
int j = index % n;
index /= n;
int k = index;
output[(k*n+j)*size + i] += biases[j];
}
void add_bias_gpu(float *output, float *biases, int batch, int n, int size)
{
int num = n*size*batch;
hipLaunchKernelGGL(( add_bias_kernel), dim3(cuda_gridsize(num)), dim3(BLOCK), 0, 0, output, biases, batch, n, size);
check_error(hipPeekAtLastError());
}
__global__ void backward_bias_conn_kernel(float *bias_updates, float *delta, int batch, int n)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= n) return;
int b;
float sum = 0;
for(b = 0; b < batch; ++b){
int i = b*n + index;
sum += delta[i];
}
bias_updates[index] += sum;
}
__global__ void backward_bias_kernel(float *bias_updates, float *delta, int batch, int n, int size)
{
__shared__ float part[BLOCK];
int i,b;
int filter = blockIdx.x;
int p = threadIdx.x;
float sum = 0;
for(b = 0; b < batch; ++b){
for(i = 0; i < size; i += BLOCK){
int index = p + i + size*(filter + n*b);
sum += (p+i < size) ? delta[index] : 0;
}
}
part[p] = sum;
__syncthreads();
if (p == 0) {
for(i = 0; i < BLOCK; ++i) bias_updates[filter] += part[i];
}
}
void backward_bias_gpu(float *bias_updates, float *delta, int batch, int n, int size)
{
if(size == 1){
hipLaunchKernelGGL(( backward_bias_conn_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, bias_updates, delta, batch, n);
}else{
hipLaunchKernelGGL(( backward_bias_kernel), dim3(n), dim3(BLOCK), 0, 0, bias_updates, delta, batch, n, size);
}
check_error(hipPeekAtLastError());
}
/*
__global__ void dot_kernel(float *output, float scale, int batch, int n, int size, float *delta)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
int f1 = index / n;
int f2 = index % n;
if (f2 <= f1) return;
float sum = 0;
float norm1 = 0;
float norm2 = 0;
int b, i;
for(b = 0; b < batch; ++b){
for(i = 0; i < size; ++i){
int i1 = b * size * n + f1 * size + i;
int i2 = b * size * n + f2 * size + i;
sum += output[i1] * output[i2];
norm1 += output[i1] * output[i1];
norm2 += output[i2] * output[i2];
}
}
norm1 = sqrt(norm1);
norm2 = sqrt(norm2);
float norm = norm1 * norm2;
sum = sum / norm;
for(b = 0; b < batch; ++b){
for(i = 0; i < size; ++i){
int i1 = b * size * n + f1 * size + i;
int i2 = b * size * n + f2 * size + i;
delta[i1] += - scale * sum * output[i2] / norm;
delta[i2] += - scale * sum * output[i1] / norm;
}
}
}
void dot_error_gpu(layer l)
{
dot_kernel<<<cuda_gridsize(l.n*l.n), BLOCK>>>(l.output_gpu, l.dot, l.batch, l.n, l.out_w * l.out_h, l.delta_gpu);
check_error(hipPeekAtLastError());
}
*/
__global__ void adam_kernel(int N, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= N) return;
x[index] = x[index] + (rate * sqrtf(1.f-powf(B2, t)) / (1.f-powf(B1, t)) * m[index] / (sqrtf(v[index]) + eps));
}
extern "C" void adam_gpu(int n, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t)
{
hipLaunchKernelGGL(( adam_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, x, m, v, B1, B2, rate, eps, t);
check_error(hipPeekAtLastError());
}
extern "C" void adam_update_gpu(float *w, float *d, float *m, float *v, float B1, float B2, float eps, float decay, float rate, int n, int batch, int t)
{
scal_gpu(n, B1, m, 1);
scal_gpu(n, B2, v, 1);
axpy_gpu(n, -decay*batch, w, 1, d, 1);
axpy_gpu(n, (1-B1), d, 1, m, 1);
mul_gpu(n, d, 1, d, 1);
axpy_gpu(n, (1-B2), d, 1, v, 1);
adam_gpu(n, w, m, v, B1, B2, rate, eps, t);
fill_gpu(n, 0, d, 1);
}
__global__ void normalize_kernel(int N, float *x, float *mean, float *variance, int batch, int filters, int spatial)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= N) return;
int f = (index/spatial)%filters;
x[index] = (x[index] - mean[f])/(sqrtf(variance[f] + .00001f));
}
__global__ void normalize_delta_kernel(int N, float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= N) return;
int f = (index/spatial)%filters;
delta[index] = delta[index] * 1.f/(sqrtf(variance[f] + .00001f)) + variance_delta[f] * 2.f * (x[index] - mean[f]) / (spatial * batch) + mean_delta[f]/(spatial*batch);
}
extern "C" void normalize_delta_gpu(float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta)
{
size_t N = batch*filters*spatial;
hipLaunchKernelGGL(( normalize_delta_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, x, mean, variance, mean_delta, variance_delta, batch, filters, spatial, delta);
check_error(hipPeekAtLastError());
}
__global__ void variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= filters) return;
int j,k;
variance_delta[i] = 0;
for(j = 0; j < batch; ++j){
for(k = 0; k < spatial; ++k){
int index = j*filters*spatial + i*spatial + k;
variance_delta[i] += delta[index]*(x[index] - mean[i]);
}
}
variance_delta[i] *= -.5f * powf(variance[i] + .00001f, (float)(-3.f/2.f));
}
__global__ void accumulate_kernel(float *x, int n, int groups, float *sum)
{
int k;
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= groups) return;
sum[i] = 0;
for(k = 0; k < n; ++k){
sum[i] += x[k*groups + i];
}
}
__global__ void fast_mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
{
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for(j = 0; j < batch; ++j){
for(i = 0; i < spatial; i += threads){
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i+id < spatial) ? delta[index] : 0;
}
}
__syncthreads();
if(id == 0){
mean_delta[filter] = 0;
for(i = 0; i < threads; ++i){
mean_delta[filter] += local[i];
}
mean_delta[filter] *= (-1.f/sqrtf(variance[filter] + .00001f));
}
}
__global__ void fast_variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta)
{
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for(j = 0; j < batch; ++j){
for(i = 0; i < spatial; i += threads){
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i+id < spatial) ? delta[index]*(x[index] - mean[filter]) : 0;
}
}
__syncthreads();
if(id == 0){
variance_delta[filter] = 0;
for(i = 0; i < threads; ++i){
variance_delta[filter] += local[i];
}
variance_delta[filter] *= -.5f * powf(variance[filter] + .00001f, (float)(-3.f/2.f));
}
}
__global__ void mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= filters) return;
int j,k;
mean_delta[i] = 0;
for (j = 0; j < batch; ++j) {
for (k = 0; k < spatial; ++k) {
int index = j*filters*spatial + i*spatial + k;
mean_delta[i] += delta[index];
}
}
mean_delta[i] *= (-1.f/sqrtf(variance[i] + .00001f));
}
extern "C" void mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
{
hipLaunchKernelGGL(( mean_delta_kernel), dim3(cuda_gridsize(filters)), dim3(BLOCK), 0, 0, delta, variance, batch, filters, spatial, mean_delta);
check_error(hipPeekAtLastError());
}
extern "C" void fast_mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
{
hipLaunchKernelGGL(( fast_mean_delta_kernel), dim3(filters), dim3(BLOCK), 0, 0, delta, variance, batch, filters, spatial, mean_delta);
check_error(hipPeekAtLastError());
}
extern "C" void fast_variance_delta_gpu(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta)
{
hipLaunchKernelGGL(( fast_variance_delta_kernel), dim3(filters), dim3(BLOCK), 0, 0, x, delta, mean, variance, batch, filters, spatial, variance_delta);
check_error(hipPeekAtLastError());
}
__global__ void mean_kernel(float *x, int batch, int filters, int spatial, float *mean)
{
float scale = 1.f/(batch * spatial);
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= filters) return;
int j,k;
mean[i] = 0;
for(j = 0; j < batch; ++j){
for(k = 0; k < spatial; ++k){
int index = j*filters*spatial + i*spatial + k;
mean[i] += x[index];
}
}
mean[i] *= scale;
}
__global__ void variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance)
{
float scale = 1.f/(batch * spatial - 1);
int j,k;
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= filters) return;
variance[i] = 0;
for(j = 0; j < batch; ++j){
for(k = 0; k < spatial; ++k){
int index = j*filters*spatial + i*spatial + k;
variance[i] += powf((x[index] - mean[i]), 2);
}
}
variance[i] *= scale;
}
__global__ void reorg_kernel(int N, float *x, int w, int h, int c, int batch, int stride, int forward, float *out)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i >= N) return;
int in_index = i;
int in_w = i%w;
i = i/w;
int in_h = i%h;
i = i/h;
int in_c = i%c;
i = i/c;
int b = i%batch;
int out_c = c/(stride*stride);
int c2 = in_c % out_c;
int offset = in_c / out_c;
int w2 = in_w*stride + offset % stride;
int h2 = in_h*stride + offset / stride;
//printf("%d\n", offset);
int out_index = w2 + w*stride*(h2 + h*stride*(c2 + out_c*b));
// printf("%d %d %d\n", w2, h2, c2);
//printf("%d %d\n", in_index, out_index);
//if(out_index >= N || out_index < 0) printf("bad bad bad \n");
if(forward) out[out_index] = x[in_index];
else out[in_index] = x[out_index];
//if(forward) out[1] = x[1];
//else out[0] = x[0];
}
__global__ void axpy_kernel(int N, float ALPHA, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) Y[OFFY+i*INCY] += ALPHA*X[OFFX+i*INCX];
}
__global__ void pow_kernel(int N, float ALPHA, float *X, int INCX, float *Y, int INCY)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) Y[i*INCY] = pow(X[i*INCX], ALPHA);
}
__global__ void const_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] = ALPHA;
}
__global__ void constrain_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] = fminf(ALPHA, fmaxf(-ALPHA, X[i*INCX]));
}
__global__ void supp_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) {
if((X[i*INCX] * X[i*INCX]) < (ALPHA * ALPHA)) X[i*INCX] = 0;
}
}
__global__ void add_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] += ALPHA;
}
__global__ void scal_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] *= ALPHA;
}
__global__ void fill_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] = ALPHA;
}
__global__ void mask_kernel(int n, float *x, float mask_num, float *mask)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n && mask[i] == mask_num) x[i] = mask_num;
}
__global__ void copy_kernel(int N, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) Y[i*INCY + OFFY] = X[i*INCX + OFFX];
}
__global__ void mul_kernel(int N, float *X, int INCX, float *Y, int INCY)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) Y[i*INCY] *= X[i*INCX];
}
extern "C" void normalize_gpu(float *x, float *mean, float *variance, int batch, int filters, int spatial)
{
size_t N = batch*filters*spatial;
hipLaunchKernelGGL(( normalize_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, x, mean, variance, batch, filters, spatial);
check_error(hipPeekAtLastError());
}
__global__ void fast_mean_kernel(float *x, int batch, int filters, int spatial, float *mean)
{
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for(j = 0; j < batch; ++j){
for(i = 0; i < spatial; i += threads){
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i+id < spatial) ? x[index] : 0;
}
}
__syncthreads();
if(id == 0){
mean[filter] = 0;
for(i = 0; i < threads; ++i){
mean[filter] += local[i];
}
mean[filter] /= spatial * batch;
}
}
__global__ void fast_variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance)
{
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for(j = 0; j < batch; ++j){
for(i = 0; i < spatial; i += threads){
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i+id < spatial) ? powf((x[index] - mean[filter]), 2) : 0;
}
}
__syncthreads();
if(id == 0){
variance[filter] = 0;
for(i = 0; i < threads; ++i){
variance[filter] += local[i];
}
variance[filter] /= (spatial * batch - 1);
}
}
extern "C" void fast_mean_gpu(float *x, int batch, int filters, int spatial, float *mean)
{
hipLaunchKernelGGL(( fast_mean_kernel), dim3(filters), dim3(BLOCK), 0, 0, x, batch, filters, spatial, mean);
check_error(hipPeekAtLastError());
}
extern "C" void fast_variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance)
{
hipLaunchKernelGGL(( fast_variance_kernel), dim3(filters), dim3(BLOCK), 0, 0, x, mean, batch, filters, spatial, variance);
check_error(hipPeekAtLastError());
}
extern "C" void mean_gpu(float *x, int batch, int filters, int spatial, float *mean)
{
hipLaunchKernelGGL(( mean_kernel), dim3(cuda_gridsize(filters)), dim3(BLOCK), 0, 0, x, batch, filters, spatial, mean);
check_error(hipPeekAtLastError());
}
extern "C" void variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance)
{
hipLaunchKernelGGL(( variance_kernel), dim3(cuda_gridsize(filters)), dim3(BLOCK), 0, 0, x, mean, batch, filters, spatial, variance);
check_error(hipPeekAtLastError());
}
extern "C" void axpy_gpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY)
{
axpy_gpu_offset(N, ALPHA, X, 0, INCX, Y, 0, INCY);
}
extern "C" void pow_gpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY)
{
hipLaunchKernelGGL(( pow_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX, Y, INCY);
check_error(hipPeekAtLastError());
}
extern "C" void axpy_gpu_offset(int N, float ALPHA, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY)
{
hipLaunchKernelGGL(( axpy_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, OFFX, INCX, Y, OFFY, INCY);
check_error(hipPeekAtLastError());
}
extern "C" void copy_gpu(int N, float * X, int INCX, float * Y, int INCY)
{
copy_gpu_offset(N, X, 0, INCX, Y, 0, INCY);
}
extern "C" void mul_gpu(int N, float * X, int INCX, float * Y, int INCY)
{
hipLaunchKernelGGL(( mul_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, X, INCX, Y, INCY);
check_error(hipPeekAtLastError());
}
extern "C" void copy_gpu_offset(int N, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY)
{
hipLaunchKernelGGL(( copy_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, X, OFFX, INCX, Y, OFFY, INCY);
check_error(hipPeekAtLastError());
}
__global__ void flatten_kernel(int N, float *x, int spatial, int layers, int batch, int forward, float *out)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i >= N) return;
int in_s = i%spatial;
i = i/spatial;
int in_c = i%layers;
i = i/layers;
int b = i;
int i1 = b*layers*spatial + in_c*spatial + in_s;
int i2 = b*layers*spatial + in_s*layers + in_c;
if (forward) out[i2] = x[i1];
else out[i1] = x[i2];
}
extern "C" void flatten_gpu(float *x, int spatial, int layers, int batch, int forward, float *out)
{
int size = spatial*batch*layers;
hipLaunchKernelGGL(( flatten_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, size, x, spatial, layers, batch, forward, out);
check_error(hipPeekAtLastError());
}
extern "C" void reorg_gpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out)
{
int size = w*h*c*batch;
hipLaunchKernelGGL(( reorg_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, size, x, w, h, c, batch, stride, forward, out);
check_error(hipPeekAtLastError());
}
__global__ void scale_mask_kernel(int n, float *x, float mask_num, float *mask, float scale)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n && mask[i] == mask_num) x[i] *= scale;
}
extern "C" void scale_mask_gpu(int N, float * X, float mask_num, float * mask, float scale)
{
hipLaunchKernelGGL(( scale_mask_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, X, mask_num, mask, scale);
check_error(hipPeekAtLastError());
}
extern "C" void mask_gpu(int N, float * X, float mask_num, float * mask)
{
hipLaunchKernelGGL(( mask_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, X, mask_num, mask);
check_error(hipPeekAtLastError());
}
extern "C" void const_gpu(int N, float ALPHA, float * X, int INCX)
{
hipLaunchKernelGGL(( const_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX);
check_error(hipPeekAtLastError());
}
extern "C" void constrain_gpu(int N, float ALPHA, float * X, int INCX)
{
hipLaunchKernelGGL(( constrain_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX);
check_error(hipPeekAtLastError());
}
extern "C" void add_gpu(int N, float ALPHA, float * X, int INCX)
{
hipLaunchKernelGGL(( add_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX);
check_error(hipPeekAtLastError());
}
extern "C" void scal_gpu(int N, float ALPHA, float * X, int INCX)
{
hipLaunchKernelGGL(( scal_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX);
check_error(hipPeekAtLastError());
}
extern "C" void supp_gpu(int N, float ALPHA, float * X, int INCX)
{
hipLaunchKernelGGL(( supp_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX);
check_error(hipPeekAtLastError());
}
extern "C" void fill_gpu(int N, float ALPHA, float * X, int INCX)
{
hipLaunchKernelGGL(( fill_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX);
check_error(hipPeekAtLastError());
}
__global__ void shortcut_kernel(int size, int minw, int minh, int minc, int stride, int sample, int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= size) return;
int i = id % minw;
id /= minw;
int j = id % minh;
id /= minh;
int k = id % minc;
id /= minc;
int b = id % batch;
int out_index = i*sample + w2*(j*sample + h2*(k + c2*b));
int add_index = i*stride + w1*(j*stride + h1*(k + c1*b));
out[out_index] += add[add_index];
}
extern "C" void shortcut_gpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out)
{
int minw = (w1 < w2) ? w1 : w2;
int minh = (h1 < h2) ? h1 : h2;
int minc = (c1 < c2) ? c1 : c2;
int stride = w1/w2;
int sample = w2/w1;
assert(stride == h1/h2);
assert(sample == h2/h1);
if(stride < 1) stride = 1;
if(sample < 1) sample = 1;
int size = batch * minw * minh * minc;
hipLaunchKernelGGL(( shortcut_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, size, minw, minh, minc, stride, sample, batch, w1, h1, c1, add, w2, h2, c2, out);
check_error(hipPeekAtLastError());
}
__global__ void smooth_l1_kernel(int n, float *pred, float *truth, float *delta, float *error)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
float diff = truth[i] - pred[i];
float abs_val = fabsf(diff);
if(abs_val < 1) {
error[i] = diff * diff;
delta[i] = diff;
}
else {
error[i] = 2*abs_val - 1;
delta[i] = (diff > 0) ? 1 : -1;
}
}
}
extern "C" void smooth_l1_gpu(int n, float *pred, float *truth, float *delta, float *error)
{
hipLaunchKernelGGL(( smooth_l1_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, pred, truth, delta, error);
check_error(hipPeekAtLastError());
}
__global__ void l2_kernel(int n, float *pred, float *truth, float *delta, float *error)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
float diff = truth[i] - pred[i];
error[i] = diff * diff; //I know this is technically wrong, deal with it.
delta[i] = diff;
}
}
extern "C" void l2_gpu(int n, float *pred, float *truth, float *delta, float *error)
{
hipLaunchKernelGGL(( l2_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, pred, truth, delta, error);
check_error(hipPeekAtLastError());
}
__global__ void l1_kernel(int n, float *pred, float *truth, float *delta, float *error)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
float diff = truth[i] - pred[i];
error[i] = abs(diff);
delta[i] = (diff > 0) ? 1 : -1;
}
}
extern "C" void l1_gpu(int n, float *pred, float *truth, float *delta, float *error)
{
hipLaunchKernelGGL(( l1_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, pred, truth, delta, error);
check_error(hipPeekAtLastError());
}
__global__ void weighted_sum_kernel(int n, float *a, float *b, float *s, float *c)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
c[i] = s[i]*a[i] + (1-s[i])*(b ? b[i] : 0);
}
}
__global__ void deinter_kernel(int NX, float *X, int NY, float *Y, int B, float *OUT)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < (NX+NY)*B){
int b = i / (NX+NY);
int j = i % (NX+NY);
if (j < NX){
if(X) X[b*NX + j] += OUT[i];
} else {
if(Y) Y[b*NY + j - NX] += OUT[i];
}
}
}
extern "C" void deinter_gpu(int NX, float *X, int NY, float *Y, int B, float *OUT)
{
hipLaunchKernelGGL(( deinter_kernel), dim3(cuda_gridsize((NX+NY)*B)), dim3(BLOCK), 0, 0, NX, X, NY, Y, B, OUT);
check_error(hipPeekAtLastError());
}
__global__ void inter_kernel(int NX, float *X, int NY, float *Y, int B, float *OUT)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < (NX+NY)*B){
int b = i / (NX+NY);
int j = i % (NX+NY);
if (j < NX){
OUT[i] = X[b*NX + j];
} else {
OUT[i] = Y[b*NY + j - NX];
}
}
}
extern "C" void inter_gpu(int NX, float *X, int NY, float *Y, int B, float *OUT)
{
hipLaunchKernelGGL(( inter_kernel), dim3(cuda_gridsize((NX+NY)*B)), dim3(BLOCK), 0, 0, NX, X, NY, Y, B, OUT);
check_error(hipPeekAtLastError());
}
extern "C" void weighted_sum_gpu(float *a, float *b, float *s, int num, float *c)
{
hipLaunchKernelGGL(( weighted_sum_kernel), dim3(cuda_gridsize(num)), dim3(BLOCK), 0, 0, num, a, b, s, c);
check_error(hipPeekAtLastError());
}
__global__ void weighted_delta_kernel(int n, float *a, float *b, float *s, float *da, float *db, float *ds, float *dc)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
if(da) da[i] += dc[i] * s[i];
if(db) db[i] += dc[i] * (1-s[i]);
ds[i] += dc[i] * (a[i] - b[i]);
}
}
extern "C" void weighted_delta_gpu(float *a, float *b, float *s, float *da, float *db, float *ds, int num, float *dc)
{
hipLaunchKernelGGL(( weighted_delta_kernel), dim3(cuda_gridsize(num)), dim3(BLOCK), 0, 0, num, a, b, s, da, db, ds, dc);
check_error(hipPeekAtLastError());
}
__global__ void mult_add_into_kernel(int n, float *a, float *b, float *c)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
c[i] += a[i]*b[i];
}
}
extern "C" void mult_add_into_gpu(int num, float *a, float *b, float *c)
{
hipLaunchKernelGGL(( mult_add_into_kernel), dim3(cuda_gridsize(num)), dim3(BLOCK), 0, 0, num, a, b, c);
check_error(hipPeekAtLastError());
}
__device__ void softmax_device(float *input, int n, float temp, int stride, float *output)
{
int i;
float sum = 0;
float largest = -INFINITY;
for(i = 0; i < n; ++i){
int val = input[i*stride];
largest = (val>largest) ? val : largest;
}
for(i = 0; i < n; ++i){
float e = expf(input[i*stride]/temp - largest/temp);
sum += e;
output[i*stride] = e;
}
for(i = 0; i < n; ++i){
output[i*stride] /= sum;
}
}
__global__ void softmax_tree_kernel(float *input, int spatial, int batch, int stride, float temp, float *output, int groups, int *group_size, int *group_offset)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= spatial*batch*groups) return;
int s = id % spatial;
id = id / spatial;
int g = id % groups;
int b = id / groups;
int goff = group_offset[g]*spatial;
int boff = b*stride;
softmax_device(input + goff + boff + s, group_size[g], temp, spatial, output + goff + boff + s);
}
extern "C" void softmax_tree(float *input, int spatial, int batch, int stride, float temp, float *output, tree hier)
{
int *tree_groups_size = cuda_make_int_array(hier.group_size, hier.groups);
int *tree_groups_offset = cuda_make_int_array(hier.group_offset, hier.groups);
/*
static int *tree_groups_size = 0;
static int *tree_groups_offset = 0;
if(!tree_groups_size){
tree_groups_size = cuda_make_int_array(hier.group_size, hier.groups);
tree_groups_offset = cuda_make_int_array(hier.group_offset, hier.groups);
}
*/
int num = spatial*batch*hier.groups;
hipLaunchKernelGGL(( softmax_tree_kernel), dim3(cuda_gridsize(num)), dim3(BLOCK), 0, 0, input, spatial, batch, stride, temp, output, hier.groups, tree_groups_size, tree_groups_offset);
check_error(hipPeekAtLastError());
cuda_free((float *)tree_groups_size);
cuda_free((float *)tree_groups_offset);
}
__global__ void softmax_kernel(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= batch*groups) return;
int b = id / groups;
int g = id % groups;
softmax_device(input + b*batch_offset + g*group_offset, n, temp, stride, output + b*batch_offset + g*group_offset);
}
extern "C" void softmax_gpu(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output)
{
hipLaunchKernelGGL(( softmax_kernel), dim3(cuda_gridsize(batch*groups)), dim3(BLOCK), 0, 0, input, n, batch, batch_offset, groups, group_offset, stride, temp, output);
check_error(hipPeekAtLastError());
}
| c8b6a714dc6d279004291599ccdea5029e3317f2.cu | #include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
#include <assert.h>
extern "C" {
#include "blas.h"
#include "cuda.h"
#include "utils.h"
}
__global__ void scale_bias_kernel(float *output, float *biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if(offset < size) output[(batch*n+filter)*size + offset] *= biases[filter];
}
void scale_bias_gpu(float *output, float *biases, int batch, int n, int size)
{
dim3 dimGrid((size-1)/BLOCK + 1, n, batch);
dim3 dimBlock(BLOCK, 1, 1);
scale_bias_kernel<<<dimGrid, dimBlock>>>(output, biases, n, size);
check_error(cudaPeekAtLastError());
}
__global__ void backward_scale_kernel(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates)
{
__shared__ float part[BLOCK];
int i,b;
int filter = blockIdx.x;
int p = threadIdx.x;
float sum = 0;
for(b = 0; b < batch; ++b){
for(i = 0; i < size; i += BLOCK){
int index = p + i + size*(filter + n*b);
sum += (p+i < size) ? delta[index]*x_norm[index] : 0;
}
}
part[p] = sum;
__syncthreads();
if (p == 0) {
for(i = 0; i < BLOCK; ++i) scale_updates[filter] += part[i];
}
}
void backward_scale_gpu(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates)
{
backward_scale_kernel<<<n, BLOCK>>>(x_norm, delta, batch, n, size, scale_updates);
check_error(cudaPeekAtLastError());
}
__global__ void add_bias_kernel(float *output, float *biases, int batch, int n, int size)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= n*size*batch) return;
int i = index % size;
index /= size;
int j = index % n;
index /= n;
int k = index;
output[(k*n+j)*size + i] += biases[j];
}
void add_bias_gpu(float *output, float *biases, int batch, int n, int size)
{
int num = n*size*batch;
add_bias_kernel<<<cuda_gridsize(num), BLOCK>>>(output, biases, batch, n, size);
check_error(cudaPeekAtLastError());
}
__global__ void backward_bias_conn_kernel(float *bias_updates, float *delta, int batch, int n)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= n) return;
int b;
float sum = 0;
for(b = 0; b < batch; ++b){
int i = b*n + index;
sum += delta[i];
}
bias_updates[index] += sum;
}
__global__ void backward_bias_kernel(float *bias_updates, float *delta, int batch, int n, int size)
{
__shared__ float part[BLOCK];
int i,b;
int filter = blockIdx.x;
int p = threadIdx.x;
float sum = 0;
for(b = 0; b < batch; ++b){
for(i = 0; i < size; i += BLOCK){
int index = p + i + size*(filter + n*b);
sum += (p+i < size) ? delta[index] : 0;
}
}
part[p] = sum;
__syncthreads();
if (p == 0) {
for(i = 0; i < BLOCK; ++i) bias_updates[filter] += part[i];
}
}
void backward_bias_gpu(float *bias_updates, float *delta, int batch, int n, int size)
{
if(size == 1){
backward_bias_conn_kernel<<<cuda_gridsize(n), BLOCK>>>(bias_updates, delta, batch, n);
}else{
backward_bias_kernel<<<n, BLOCK>>>(bias_updates, delta, batch, n, size);
}
check_error(cudaPeekAtLastError());
}
/*
__global__ void dot_kernel(float *output, float scale, int batch, int n, int size, float *delta)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
int f1 = index / n;
int f2 = index % n;
if (f2 <= f1) return;
float sum = 0;
float norm1 = 0;
float norm2 = 0;
int b, i;
for(b = 0; b < batch; ++b){
for(i = 0; i < size; ++i){
int i1 = b * size * n + f1 * size + i;
int i2 = b * size * n + f2 * size + i;
sum += output[i1] * output[i2];
norm1 += output[i1] * output[i1];
norm2 += output[i2] * output[i2];
}
}
norm1 = sqrt(norm1);
norm2 = sqrt(norm2);
float norm = norm1 * norm2;
sum = sum / norm;
for(b = 0; b < batch; ++b){
for(i = 0; i < size; ++i){
int i1 = b * size * n + f1 * size + i;
int i2 = b * size * n + f2 * size + i;
delta[i1] += - scale * sum * output[i2] / norm;
delta[i2] += - scale * sum * output[i1] / norm;
}
}
}
void dot_error_gpu(layer l)
{
dot_kernel<<<cuda_gridsize(l.n*l.n), BLOCK>>>(l.output_gpu, l.dot, l.batch, l.n, l.out_w * l.out_h, l.delta_gpu);
check_error(cudaPeekAtLastError());
}
*/
__global__ void adam_kernel(int N, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= N) return;
x[index] = x[index] + (rate * sqrtf(1.f-powf(B2, t)) / (1.f-powf(B1, t)) * m[index] / (sqrtf(v[index]) + eps));
}
extern "C" void adam_gpu(int n, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t)
{
adam_kernel<<<cuda_gridsize(n), BLOCK>>>(n, x, m, v, B1, B2, rate, eps, t);
check_error(cudaPeekAtLastError());
}
extern "C" void adam_update_gpu(float *w, float *d, float *m, float *v, float B1, float B2, float eps, float decay, float rate, int n, int batch, int t)
{
scal_gpu(n, B1, m, 1);
scal_gpu(n, B2, v, 1);
axpy_gpu(n, -decay*batch, w, 1, d, 1);
axpy_gpu(n, (1-B1), d, 1, m, 1);
mul_gpu(n, d, 1, d, 1);
axpy_gpu(n, (1-B2), d, 1, v, 1);
adam_gpu(n, w, m, v, B1, B2, rate, eps, t);
fill_gpu(n, 0, d, 1);
}
__global__ void normalize_kernel(int N, float *x, float *mean, float *variance, int batch, int filters, int spatial)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= N) return;
int f = (index/spatial)%filters;
x[index] = (x[index] - mean[f])/(sqrtf(variance[f] + .00001f));
}
__global__ void normalize_delta_kernel(int N, float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= N) return;
int f = (index/spatial)%filters;
delta[index] = delta[index] * 1.f/(sqrtf(variance[f] + .00001f)) + variance_delta[f] * 2.f * (x[index] - mean[f]) / (spatial * batch) + mean_delta[f]/(spatial*batch);
}
extern "C" void normalize_delta_gpu(float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta)
{
size_t N = batch*filters*spatial;
normalize_delta_kernel<<<cuda_gridsize(N), BLOCK>>>(N, x, mean, variance, mean_delta, variance_delta, batch, filters, spatial, delta);
check_error(cudaPeekAtLastError());
}
__global__ void variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= filters) return;
int j,k;
variance_delta[i] = 0;
for(j = 0; j < batch; ++j){
for(k = 0; k < spatial; ++k){
int index = j*filters*spatial + i*spatial + k;
variance_delta[i] += delta[index]*(x[index] - mean[i]);
}
}
variance_delta[i] *= -.5f * powf(variance[i] + .00001f, (float)(-3.f/2.f));
}
__global__ void accumulate_kernel(float *x, int n, int groups, float *sum)
{
int k;
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= groups) return;
sum[i] = 0;
for(k = 0; k < n; ++k){
sum[i] += x[k*groups + i];
}
}
__global__ void fast_mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
{
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for(j = 0; j < batch; ++j){
for(i = 0; i < spatial; i += threads){
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i+id < spatial) ? delta[index] : 0;
}
}
__syncthreads();
if(id == 0){
mean_delta[filter] = 0;
for(i = 0; i < threads; ++i){
mean_delta[filter] += local[i];
}
mean_delta[filter] *= (-1.f/sqrtf(variance[filter] + .00001f));
}
}
__global__ void fast_variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta)
{
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for(j = 0; j < batch; ++j){
for(i = 0; i < spatial; i += threads){
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i+id < spatial) ? delta[index]*(x[index] - mean[filter]) : 0;
}
}
__syncthreads();
if(id == 0){
variance_delta[filter] = 0;
for(i = 0; i < threads; ++i){
variance_delta[filter] += local[i];
}
variance_delta[filter] *= -.5f * powf(variance[filter] + .00001f, (float)(-3.f/2.f));
}
}
__global__ void mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= filters) return;
int j,k;
mean_delta[i] = 0;
for (j = 0; j < batch; ++j) {
for (k = 0; k < spatial; ++k) {
int index = j*filters*spatial + i*spatial + k;
mean_delta[i] += delta[index];
}
}
mean_delta[i] *= (-1.f/sqrtf(variance[i] + .00001f));
}
extern "C" void mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
{
mean_delta_kernel<<<cuda_gridsize(filters), BLOCK>>>(delta, variance, batch, filters, spatial, mean_delta);
check_error(cudaPeekAtLastError());
}
extern "C" void fast_mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
{
fast_mean_delta_kernel<<<filters, BLOCK>>>(delta, variance, batch, filters, spatial, mean_delta);
check_error(cudaPeekAtLastError());
}
extern "C" void fast_variance_delta_gpu(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta)
{
fast_variance_delta_kernel<<<filters, BLOCK>>>(x, delta, mean, variance, batch, filters, spatial, variance_delta);
check_error(cudaPeekAtLastError());
}
__global__ void mean_kernel(float *x, int batch, int filters, int spatial, float *mean)
{
float scale = 1.f/(batch * spatial);
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= filters) return;
int j,k;
mean[i] = 0;
for(j = 0; j < batch; ++j){
for(k = 0; k < spatial; ++k){
int index = j*filters*spatial + i*spatial + k;
mean[i] += x[index];
}
}
mean[i] *= scale;
}
__global__ void variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance)
{
float scale = 1.f/(batch * spatial - 1);
int j,k;
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= filters) return;
variance[i] = 0;
for(j = 0; j < batch; ++j){
for(k = 0; k < spatial; ++k){
int index = j*filters*spatial + i*spatial + k;
variance[i] += powf((x[index] - mean[i]), 2);
}
}
variance[i] *= scale;
}
__global__ void reorg_kernel(int N, float *x, int w, int h, int c, int batch, int stride, int forward, float *out)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i >= N) return;
int in_index = i;
int in_w = i%w;
i = i/w;
int in_h = i%h;
i = i/h;
int in_c = i%c;
i = i/c;
int b = i%batch;
int out_c = c/(stride*stride);
int c2 = in_c % out_c;
int offset = in_c / out_c;
int w2 = in_w*stride + offset % stride;
int h2 = in_h*stride + offset / stride;
//printf("%d\n", offset);
int out_index = w2 + w*stride*(h2 + h*stride*(c2 + out_c*b));
// printf("%d %d %d\n", w2, h2, c2);
//printf("%d %d\n", in_index, out_index);
//if(out_index >= N || out_index < 0) printf("bad bad bad \n");
if(forward) out[out_index] = x[in_index];
else out[in_index] = x[out_index];
//if(forward) out[1] = x[1];
//else out[0] = x[0];
}
__global__ void axpy_kernel(int N, float ALPHA, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) Y[OFFY+i*INCY] += ALPHA*X[OFFX+i*INCX];
}
__global__ void pow_kernel(int N, float ALPHA, float *X, int INCX, float *Y, int INCY)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) Y[i*INCY] = pow(X[i*INCX], ALPHA);
}
__global__ void const_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] = ALPHA;
}
__global__ void constrain_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] = fminf(ALPHA, fmaxf(-ALPHA, X[i*INCX]));
}
__global__ void supp_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) {
if((X[i*INCX] * X[i*INCX]) < (ALPHA * ALPHA)) X[i*INCX] = 0;
}
}
__global__ void add_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] += ALPHA;
}
__global__ void scal_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] *= ALPHA;
}
__global__ void fill_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] = ALPHA;
}
__global__ void mask_kernel(int n, float *x, float mask_num, float *mask)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n && mask[i] == mask_num) x[i] = mask_num;
}
__global__ void copy_kernel(int N, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) Y[i*INCY + OFFY] = X[i*INCX + OFFX];
}
__global__ void mul_kernel(int N, float *X, int INCX, float *Y, int INCY)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) Y[i*INCY] *= X[i*INCX];
}
extern "C" void normalize_gpu(float *x, float *mean, float *variance, int batch, int filters, int spatial)
{
size_t N = batch*filters*spatial;
normalize_kernel<<<cuda_gridsize(N), BLOCK>>>(N, x, mean, variance, batch, filters, spatial);
check_error(cudaPeekAtLastError());
}
__global__ void fast_mean_kernel(float *x, int batch, int filters, int spatial, float *mean)
{
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for(j = 0; j < batch; ++j){
for(i = 0; i < spatial; i += threads){
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i+id < spatial) ? x[index] : 0;
}
}
__syncthreads();
if(id == 0){
mean[filter] = 0;
for(i = 0; i < threads; ++i){
mean[filter] += local[i];
}
mean[filter] /= spatial * batch;
}
}
__global__ void fast_variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance)
{
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for(j = 0; j < batch; ++j){
for(i = 0; i < spatial; i += threads){
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i+id < spatial) ? powf((x[index] - mean[filter]), 2) : 0;
}
}
__syncthreads();
if(id == 0){
variance[filter] = 0;
for(i = 0; i < threads; ++i){
variance[filter] += local[i];
}
variance[filter] /= (spatial * batch - 1);
}
}
extern "C" void fast_mean_gpu(float *x, int batch, int filters, int spatial, float *mean)
{
fast_mean_kernel<<<filters, BLOCK>>>(x, batch, filters, spatial, mean);
check_error(cudaPeekAtLastError());
}
extern "C" void fast_variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance)
{
fast_variance_kernel<<<filters, BLOCK>>>(x, mean, batch, filters, spatial, variance);
check_error(cudaPeekAtLastError());
}
extern "C" void mean_gpu(float *x, int batch, int filters, int spatial, float *mean)
{
mean_kernel<<<cuda_gridsize(filters), BLOCK>>>(x, batch, filters, spatial, mean);
check_error(cudaPeekAtLastError());
}
extern "C" void variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance)
{
variance_kernel<<<cuda_gridsize(filters), BLOCK>>>(x, mean, batch, filters, spatial, variance);
check_error(cudaPeekAtLastError());
}
extern "C" void axpy_gpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY)
{
axpy_gpu_offset(N, ALPHA, X, 0, INCX, Y, 0, INCY);
}
extern "C" void pow_gpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY)
{
pow_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX, Y, INCY);
check_error(cudaPeekAtLastError());
}
extern "C" void axpy_gpu_offset(int N, float ALPHA, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY)
{
axpy_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, OFFX, INCX, Y, OFFY, INCY);
check_error(cudaPeekAtLastError());
}
extern "C" void copy_gpu(int N, float * X, int INCX, float * Y, int INCY)
{
copy_gpu_offset(N, X, 0, INCX, Y, 0, INCY);
}
extern "C" void mul_gpu(int N, float * X, int INCX, float * Y, int INCY)
{
mul_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, INCX, Y, INCY);
check_error(cudaPeekAtLastError());
}
extern "C" void copy_gpu_offset(int N, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY)
{
copy_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, OFFX, INCX, Y, OFFY, INCY);
check_error(cudaPeekAtLastError());
}
__global__ void flatten_kernel(int N, float *x, int spatial, int layers, int batch, int forward, float *out)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i >= N) return;
int in_s = i%spatial;
i = i/spatial;
int in_c = i%layers;
i = i/layers;
int b = i;
int i1 = b*layers*spatial + in_c*spatial + in_s;
int i2 = b*layers*spatial + in_s*layers + in_c;
if (forward) out[i2] = x[i1];
else out[i1] = x[i2];
}
extern "C" void flatten_gpu(float *x, int spatial, int layers, int batch, int forward, float *out)
{
int size = spatial*batch*layers;
flatten_kernel<<<cuda_gridsize(size), BLOCK>>>(size, x, spatial, layers, batch, forward, out);
check_error(cudaPeekAtLastError());
}
extern "C" void reorg_gpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out)
{
int size = w*h*c*batch;
reorg_kernel<<<cuda_gridsize(size), BLOCK>>>(size, x, w, h, c, batch, stride, forward, out);
check_error(cudaPeekAtLastError());
}
__global__ void scale_mask_kernel(int n, float *x, float mask_num, float *mask, float scale)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n && mask[i] == mask_num) x[i] *= scale;
}
extern "C" void scale_mask_gpu(int N, float * X, float mask_num, float * mask, float scale)
{
scale_mask_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, mask_num, mask, scale);
check_error(cudaPeekAtLastError());
}
extern "C" void mask_gpu(int N, float * X, float mask_num, float * mask)
{
mask_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, mask_num, mask);
check_error(cudaPeekAtLastError());
}
extern "C" void const_gpu(int N, float ALPHA, float * X, int INCX)
{
const_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX);
check_error(cudaPeekAtLastError());
}
extern "C" void constrain_gpu(int N, float ALPHA, float * X, int INCX)
{
constrain_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX);
check_error(cudaPeekAtLastError());
}
extern "C" void add_gpu(int N, float ALPHA, float * X, int INCX)
{
add_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX);
check_error(cudaPeekAtLastError());
}
extern "C" void scal_gpu(int N, float ALPHA, float * X, int INCX)
{
scal_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX);
check_error(cudaPeekAtLastError());
}
extern "C" void supp_gpu(int N, float ALPHA, float * X, int INCX)
{
supp_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX);
check_error(cudaPeekAtLastError());
}
extern "C" void fill_gpu(int N, float ALPHA, float * X, int INCX)
{
fill_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX);
check_error(cudaPeekAtLastError());
}
__global__ void shortcut_kernel(int size, int minw, int minh, int minc, int stride, int sample, int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= size) return;
int i = id % minw;
id /= minw;
int j = id % minh;
id /= minh;
int k = id % minc;
id /= minc;
int b = id % batch;
int out_index = i*sample + w2*(j*sample + h2*(k + c2*b));
int add_index = i*stride + w1*(j*stride + h1*(k + c1*b));
out[out_index] += add[add_index];
}
extern "C" void shortcut_gpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out)
{
int minw = (w1 < w2) ? w1 : w2;
int minh = (h1 < h2) ? h1 : h2;
int minc = (c1 < c2) ? c1 : c2;
int stride = w1/w2;
int sample = w2/w1;
assert(stride == h1/h2);
assert(sample == h2/h1);
if(stride < 1) stride = 1;
if(sample < 1) sample = 1;
int size = batch * minw * minh * minc;
shortcut_kernel<<<cuda_gridsize(size), BLOCK>>>(size, minw, minh, minc, stride, sample, batch, w1, h1, c1, add, w2, h2, c2, out);
check_error(cudaPeekAtLastError());
}
__global__ void smooth_l1_kernel(int n, float *pred, float *truth, float *delta, float *error)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
float diff = truth[i] - pred[i];
float abs_val = fabsf(diff);
if(abs_val < 1) {
error[i] = diff * diff;
delta[i] = diff;
}
else {
error[i] = 2*abs_val - 1;
delta[i] = (diff > 0) ? 1 : -1;
}
}
}
extern "C" void smooth_l1_gpu(int n, float *pred, float *truth, float *delta, float *error)
{
smooth_l1_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error);
check_error(cudaPeekAtLastError());
}
__global__ void l2_kernel(int n, float *pred, float *truth, float *delta, float *error)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
float diff = truth[i] - pred[i];
error[i] = diff * diff; //I know this is technically wrong, deal with it.
delta[i] = diff;
}
}
extern "C" void l2_gpu(int n, float *pred, float *truth, float *delta, float *error)
{
l2_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error);
check_error(cudaPeekAtLastError());
}
__global__ void l1_kernel(int n, float *pred, float *truth, float *delta, float *error)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
float diff = truth[i] - pred[i];
error[i] = abs(diff);
delta[i] = (diff > 0) ? 1 : -1;
}
}
extern "C" void l1_gpu(int n, float *pred, float *truth, float *delta, float *error)
{
l1_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error);
check_error(cudaPeekAtLastError());
}
__global__ void weighted_sum_kernel(int n, float *a, float *b, float *s, float *c)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
c[i] = s[i]*a[i] + (1-s[i])*(b ? b[i] : 0);
}
}
__global__ void deinter_kernel(int NX, float *X, int NY, float *Y, int B, float *OUT)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < (NX+NY)*B){
int b = i / (NX+NY);
int j = i % (NX+NY);
if (j < NX){
if(X) X[b*NX + j] += OUT[i];
} else {
if(Y) Y[b*NY + j - NX] += OUT[i];
}
}
}
extern "C" void deinter_gpu(int NX, float *X, int NY, float *Y, int B, float *OUT)
{
deinter_kernel<<<cuda_gridsize((NX+NY)*B), BLOCK>>>(NX, X, NY, Y, B, OUT);
check_error(cudaPeekAtLastError());
}
__global__ void inter_kernel(int NX, float *X, int NY, float *Y, int B, float *OUT)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < (NX+NY)*B){
int b = i / (NX+NY);
int j = i % (NX+NY);
if (j < NX){
OUT[i] = X[b*NX + j];
} else {
OUT[i] = Y[b*NY + j - NX];
}
}
}
extern "C" void inter_gpu(int NX, float *X, int NY, float *Y, int B, float *OUT)
{
inter_kernel<<<cuda_gridsize((NX+NY)*B), BLOCK>>>(NX, X, NY, Y, B, OUT);
check_error(cudaPeekAtLastError());
}
extern "C" void weighted_sum_gpu(float *a, float *b, float *s, int num, float *c)
{
weighted_sum_kernel<<<cuda_gridsize(num), BLOCK>>>(num, a, b, s, c);
check_error(cudaPeekAtLastError());
}
__global__ void weighted_delta_kernel(int n, float *a, float *b, float *s, float *da, float *db, float *ds, float *dc)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
if(da) da[i] += dc[i] * s[i];
if(db) db[i] += dc[i] * (1-s[i]);
ds[i] += dc[i] * (a[i] - b[i]);
}
}
extern "C" void weighted_delta_gpu(float *a, float *b, float *s, float *da, float *db, float *ds, int num, float *dc)
{
weighted_delta_kernel<<<cuda_gridsize(num), BLOCK>>>(num, a, b, s, da, db, ds, dc);
check_error(cudaPeekAtLastError());
}
__global__ void mult_add_into_kernel(int n, float *a, float *b, float *c)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
c[i] += a[i]*b[i];
}
}
extern "C" void mult_add_into_gpu(int num, float *a, float *b, float *c)
{
mult_add_into_kernel<<<cuda_gridsize(num), BLOCK>>>(num, a, b, c);
check_error(cudaPeekAtLastError());
}
__device__ void softmax_device(float *input, int n, float temp, int stride, float *output)
{
int i;
float sum = 0;
float largest = -INFINITY;
for(i = 0; i < n; ++i){
int val = input[i*stride];
largest = (val>largest) ? val : largest;
}
for(i = 0; i < n; ++i){
float e = expf(input[i*stride]/temp - largest/temp);
sum += e;
output[i*stride] = e;
}
for(i = 0; i < n; ++i){
output[i*stride] /= sum;
}
}
__global__ void softmax_tree_kernel(float *input, int spatial, int batch, int stride, float temp, float *output, int groups, int *group_size, int *group_offset)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= spatial*batch*groups) return;
int s = id % spatial;
id = id / spatial;
int g = id % groups;
int b = id / groups;
int goff = group_offset[g]*spatial;
int boff = b*stride;
softmax_device(input + goff + boff + s, group_size[g], temp, spatial, output + goff + boff + s);
}
extern "C" void softmax_tree(float *input, int spatial, int batch, int stride, float temp, float *output, tree hier)
{
int *tree_groups_size = cuda_make_int_array(hier.group_size, hier.groups);
int *tree_groups_offset = cuda_make_int_array(hier.group_offset, hier.groups);
/*
static int *tree_groups_size = 0;
static int *tree_groups_offset = 0;
if(!tree_groups_size){
tree_groups_size = cuda_make_int_array(hier.group_size, hier.groups);
tree_groups_offset = cuda_make_int_array(hier.group_offset, hier.groups);
}
*/
int num = spatial*batch*hier.groups;
softmax_tree_kernel<<<cuda_gridsize(num), BLOCK>>>(input, spatial, batch, stride, temp, output, hier.groups, tree_groups_size, tree_groups_offset);
check_error(cudaPeekAtLastError());
cuda_free((float *)tree_groups_size);
cuda_free((float *)tree_groups_offset);
}
__global__ void softmax_kernel(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= batch*groups) return;
int b = id / groups;
int g = id % groups;
softmax_device(input + b*batch_offset + g*group_offset, n, temp, stride, output + b*batch_offset + g*group_offset);
}
extern "C" void softmax_gpu(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output)
{
softmax_kernel<<<cuda_gridsize(batch*groups), BLOCK>>>(input, n, batch, batch_offset, groups, group_offset, stride, temp, output);
check_error(cudaPeekAtLastError());
}
|
64c62f515e57975f891cee64095e356040ea7c9c.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "common.h"
#include "thrust.h"
namespace StreamCompaction {
namespace Thrust {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
thrust::host_vector<int> host_idata(idata, idata + n);
//thrust::device_vector<int> dev_idata(n);
thrust::device_vector<int> dev_idata(host_idata);
thrust::device_vector<int> dev_odata(n);
//thrust::copy(host_idata.begin(), host_idata.end(), dev_idata.begin());//copy to device
hipDeviceSynchronize();
timer().startGpuTimer();
// TODO use `thrust::exclusive_scan`
// example: for device_vectors dv_in and dv_out:
// thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin());
thrust::exclusive_scan(dev_idata.begin(), dev_idata.end(), dev_odata.begin());
timer().endGpuTimer();
hipMemcpy(odata, thrust::raw_pointer_cast(&dev_odata[0]), sizeof(int) * n, hipMemcpyDeviceToHost);
}
}
}
| 64c62f515e57975f891cee64095e356040ea7c9c.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "common.h"
#include "thrust.h"
namespace StreamCompaction {
namespace Thrust {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
thrust::host_vector<int> host_idata(idata, idata + n);
//thrust::device_vector<int> dev_idata(n);
thrust::device_vector<int> dev_idata(host_idata);
thrust::device_vector<int> dev_odata(n);
//thrust::copy(host_idata.begin(), host_idata.end(), dev_idata.begin());//copy to device
cudaDeviceSynchronize();
timer().startGpuTimer();
// TODO use `thrust::exclusive_scan`
// example: for device_vectors dv_in and dv_out:
// thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin());
thrust::exclusive_scan(dev_idata.begin(), dev_idata.end(), dev_odata.begin());
timer().endGpuTimer();
cudaMemcpy(odata, thrust::raw_pointer_cast(&dev_odata[0]), sizeof(int) * n, cudaMemcpyDeviceToHost);
}
}
}
|
5557b1110eeda24e86f4df036025773c9019c636.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <community/louvain.cuh>
#include <converters/permute_graph.cuh>
#include <cugraph/algorithms.hpp>
#include <cugraph/utilities/error.hpp>
#include <utilities/graph_utils.cuh>
#include <thrust/random.h>
#include <rmm/exec_policy.hpp>
#include <ctime>
namespace {
template <typename IndexType>
__device__ IndexType
binsearch_maxle(const IndexType* vec, const IndexType val, IndexType low, IndexType high)
{
while (true) {
if (low == high) return low; // we know it exists
if ((low + 1) == high) return (vec[high] <= val) ? high : low;
IndexType mid = low + (high - low) / 2;
if (vec[mid] > val)
high = mid - 1;
else
low = mid;
}
}
// FIXME: This shouldn't need to be a custom kernel, this
// seems like it should just be a thrust::transform
template <typename IdxT, typename ValT>
__global__ void match_check_kernel(
IdxT size, IdxT num_verts, IdxT* offsets, IdxT* indices, IdxT* parts, ValT* weights)
{
IdxT tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < size) {
IdxT source = binsearch_maxle(offsets, tid, (IdxT)0, num_verts);
IdxT dest = indices[tid];
if (parts[source] == parts[dest]) weights[tid] += 1;
tid += gridDim.x * blockDim.x;
}
}
struct prg {
__device__ float operator()(int n)
{
thrust::default_random_engine rng;
thrust::uniform_real_distribution<float> dist(0.0, 1.0);
rng.discard(n);
return dist(rng);
}
};
template <typename ValT>
struct update_functor {
ValT min_value;
ValT ensemble_size;
update_functor(ValT minv, ValT es) : min_value(minv), ensemble_size(es) {}
__host__ __device__ ValT operator()(ValT input)
{
return min_value + (1 - min_value) * (input / ensemble_size);
}
};
/**
* Computes a random permutation vector of length size. A permutation vector of length n
* contains all values [0..n-1] exactly once.
* @param size The length of the permutation vector to generate
* @param seed A seed value for the random number generator, the generator will discard this many
* values before using values. Calling this method with the same seed will result in the same
* permutation vector.
* @return A pointer to memory containing the requested permutation vector. The caller is
* responsible for freeing the allocated memory using ALLOC_FREE_TRY().
*/
template <typename T>
void get_permutation_vector(T size, T seed, T* permutation, rmm::cuda_stream_view stream_view)
{
rmm::device_uvector<float> randoms_v(size, stream_view);
thrust::counting_iterator<uint32_t> index(seed);
thrust::transform(rmm::exec_policy(stream_view), index, index + size, randoms_v.begin(), prg());
thrust::sequence(rmm::exec_policy(stream_view), permutation, permutation + size, 0);
thrust::sort_by_key(
rmm::exec_policy(stream_view), randoms_v.begin(), randoms_v.end(), permutation);
}
template <typename graph_type>
class EcgLouvain : public cugraph::Louvain<graph_type> {
public:
using graph_t = graph_type;
using vertex_t = typename graph_type::vertex_type;
using edge_t = typename graph_type::edge_type;
using weight_t = typename graph_type::weight_type;
EcgLouvain(raft::handle_t const& handle, graph_type const& graph, vertex_t seed)
: cugraph::Louvain<graph_type>(handle, graph), seed_(seed)
{
}
void initialize_dendrogram_level(vertex_t num_vertices) override
{
this->dendrogram_->add_level(0, num_vertices, this->handle_.get_stream_view());
get_permutation_vector(num_vertices,
seed_,
this->dendrogram_->current_level_begin(),
this->handle_.get_stream_view());
}
private:
vertex_t seed_;
};
} // anonymous namespace
namespace cugraph {
template <typename vertex_t, typename edge_t, typename weight_t>
void ecg(raft::handle_t const& handle,
legacy::GraphCSRView<vertex_t, edge_t, weight_t> const& graph,
weight_t min_weight,
vertex_t ensemble_size,
vertex_t* clustering)
{
using graph_type = legacy::GraphCSRView<vertex_t, edge_t, weight_t>;
CUGRAPH_EXPECTS(graph.edge_data != nullptr,
"Invalid input argument: ecg expects a weighted graph");
CUGRAPH_EXPECTS(clustering != nullptr,
"Invalid input argument: clustering is NULL, should be a device pointer to "
"memory for storing the result");
rmm::device_uvector<weight_t> ecg_weights_v(graph.number_of_edges, handle.get_stream_view());
thrust::copy(rmm::exec_policy(handle.get_stream_view()),
graph.edge_data,
graph.edge_data + graph.number_of_edges,
ecg_weights_v.data());
vertex_t size{graph.number_of_vertices};
// FIXME: This seed should be a parameter
vertex_t seed{1};
// Iterate over each member of the ensemble
for (vertex_t i = 0; i < ensemble_size; i++) {
EcgLouvain<graph_type> runner(handle, graph, seed);
seed += size;
weight_t wt = runner(size_t{1}, weight_t{1});
// For each edge in the graph determine whether the endpoints are in the same partition
// Keep a sum for each edge of the total number of times its endpoints are in the same partition
dim3 grid, block;
block.x = 512;
grid.x = min(vertex_t{CUDA_MAX_BLOCKS}, (graph.number_of_edges / 512 + 1));
hipLaunchKernelGGL(( match_check_kernel), dim3(grid), dim3(block), 0, handle.get_stream(),
graph.number_of_edges,
graph.number_of_vertices,
graph.offsets,
graph.indices,
runner.get_dendrogram().get_level_ptr_nocheck(0),
ecg_weights_v.data());
}
// Set weights = min_weight + (1 - min-weight)*sum/ensemble_size
update_functor<weight_t> uf(min_weight, ensemble_size);
thrust::transform(rmm::exec_policy(handle.get_stream_view()),
ecg_weights_v.begin(),
ecg_weights_v.end(),
ecg_weights_v.begin(),
uf);
// Run Louvain on the original graph using the computed weights
// (pass max_level = 100 for a "full run")
legacy::GraphCSRView<vertex_t, edge_t, weight_t> louvain_graph;
louvain_graph.indices = graph.indices;
louvain_graph.offsets = graph.offsets;
louvain_graph.edge_data = ecg_weights_v.data();
louvain_graph.number_of_vertices = graph.number_of_vertices;
louvain_graph.number_of_edges = graph.number_of_edges;
cugraph::louvain(handle, louvain_graph, clustering, size_t{100});
}
// Explicit template instantiations.
template void ecg<int32_t, int32_t, float>(
raft::handle_t const&,
legacy::GraphCSRView<int32_t, int32_t, float> const& graph,
float min_weight,
int32_t ensemble_size,
int32_t* clustering);
template void ecg<int32_t, int32_t, double>(
raft::handle_t const&,
legacy::GraphCSRView<int32_t, int32_t, double> const& graph,
double min_weight,
int32_t ensemble_size,
int32_t* clustering);
} // namespace cugraph
| 5557b1110eeda24e86f4df036025773c9019c636.cu | /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <community/louvain.cuh>
#include <converters/permute_graph.cuh>
#include <cugraph/algorithms.hpp>
#include <cugraph/utilities/error.hpp>
#include <utilities/graph_utils.cuh>
#include <thrust/random.h>
#include <rmm/exec_policy.hpp>
#include <ctime>
namespace {
template <typename IndexType>
__device__ IndexType
binsearch_maxle(const IndexType* vec, const IndexType val, IndexType low, IndexType high)
{
while (true) {
if (low == high) return low; // we know it exists
if ((low + 1) == high) return (vec[high] <= val) ? high : low;
IndexType mid = low + (high - low) / 2;
if (vec[mid] > val)
high = mid - 1;
else
low = mid;
}
}
// FIXME: This shouldn't need to be a custom kernel, this
// seems like it should just be a thrust::transform
template <typename IdxT, typename ValT>
__global__ void match_check_kernel(
IdxT size, IdxT num_verts, IdxT* offsets, IdxT* indices, IdxT* parts, ValT* weights)
{
IdxT tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < size) {
IdxT source = binsearch_maxle(offsets, tid, (IdxT)0, num_verts);
IdxT dest = indices[tid];
if (parts[source] == parts[dest]) weights[tid] += 1;
tid += gridDim.x * blockDim.x;
}
}
struct prg {
__device__ float operator()(int n)
{
thrust::default_random_engine rng;
thrust::uniform_real_distribution<float> dist(0.0, 1.0);
rng.discard(n);
return dist(rng);
}
};
template <typename ValT>
struct update_functor {
ValT min_value;
ValT ensemble_size;
update_functor(ValT minv, ValT es) : min_value(minv), ensemble_size(es) {}
__host__ __device__ ValT operator()(ValT input)
{
return min_value + (1 - min_value) * (input / ensemble_size);
}
};
/**
* Computes a random permutation vector of length size. A permutation vector of length n
* contains all values [0..n-1] exactly once.
* @param size The length of the permutation vector to generate
* @param seed A seed value for the random number generator, the generator will discard this many
* values before using values. Calling this method with the same seed will result in the same
* permutation vector.
* @return A pointer to memory containing the requested permutation vector. The caller is
* responsible for freeing the allocated memory using ALLOC_FREE_TRY().
*/
template <typename T>
void get_permutation_vector(T size, T seed, T* permutation, rmm::cuda_stream_view stream_view)
{
rmm::device_uvector<float> randoms_v(size, stream_view);
thrust::counting_iterator<uint32_t> index(seed);
thrust::transform(rmm::exec_policy(stream_view), index, index + size, randoms_v.begin(), prg());
thrust::sequence(rmm::exec_policy(stream_view), permutation, permutation + size, 0);
thrust::sort_by_key(
rmm::exec_policy(stream_view), randoms_v.begin(), randoms_v.end(), permutation);
}
template <typename graph_type>
class EcgLouvain : public cugraph::Louvain<graph_type> {
public:
using graph_t = graph_type;
using vertex_t = typename graph_type::vertex_type;
using edge_t = typename graph_type::edge_type;
using weight_t = typename graph_type::weight_type;
EcgLouvain(raft::handle_t const& handle, graph_type const& graph, vertex_t seed)
: cugraph::Louvain<graph_type>(handle, graph), seed_(seed)
{
}
void initialize_dendrogram_level(vertex_t num_vertices) override
{
this->dendrogram_->add_level(0, num_vertices, this->handle_.get_stream_view());
get_permutation_vector(num_vertices,
seed_,
this->dendrogram_->current_level_begin(),
this->handle_.get_stream_view());
}
private:
vertex_t seed_;
};
} // anonymous namespace
namespace cugraph {
template <typename vertex_t, typename edge_t, typename weight_t>
void ecg(raft::handle_t const& handle,
legacy::GraphCSRView<vertex_t, edge_t, weight_t> const& graph,
weight_t min_weight,
vertex_t ensemble_size,
vertex_t* clustering)
{
using graph_type = legacy::GraphCSRView<vertex_t, edge_t, weight_t>;
CUGRAPH_EXPECTS(graph.edge_data != nullptr,
"Invalid input argument: ecg expects a weighted graph");
CUGRAPH_EXPECTS(clustering != nullptr,
"Invalid input argument: clustering is NULL, should be a device pointer to "
"memory for storing the result");
rmm::device_uvector<weight_t> ecg_weights_v(graph.number_of_edges, handle.get_stream_view());
thrust::copy(rmm::exec_policy(handle.get_stream_view()),
graph.edge_data,
graph.edge_data + graph.number_of_edges,
ecg_weights_v.data());
vertex_t size{graph.number_of_vertices};
// FIXME: This seed should be a parameter
vertex_t seed{1};
// Iterate over each member of the ensemble
for (vertex_t i = 0; i < ensemble_size; i++) {
EcgLouvain<graph_type> runner(handle, graph, seed);
seed += size;
weight_t wt = runner(size_t{1}, weight_t{1});
// For each edge in the graph determine whether the endpoints are in the same partition
// Keep a sum for each edge of the total number of times its endpoints are in the same partition
dim3 grid, block;
block.x = 512;
grid.x = min(vertex_t{CUDA_MAX_BLOCKS}, (graph.number_of_edges / 512 + 1));
match_check_kernel<<<grid, block, 0, handle.get_stream()>>>(
graph.number_of_edges,
graph.number_of_vertices,
graph.offsets,
graph.indices,
runner.get_dendrogram().get_level_ptr_nocheck(0),
ecg_weights_v.data());
}
// Set weights = min_weight + (1 - min-weight)*sum/ensemble_size
update_functor<weight_t> uf(min_weight, ensemble_size);
thrust::transform(rmm::exec_policy(handle.get_stream_view()),
ecg_weights_v.begin(),
ecg_weights_v.end(),
ecg_weights_v.begin(),
uf);
// Run Louvain on the original graph using the computed weights
// (pass max_level = 100 for a "full run")
legacy::GraphCSRView<vertex_t, edge_t, weight_t> louvain_graph;
louvain_graph.indices = graph.indices;
louvain_graph.offsets = graph.offsets;
louvain_graph.edge_data = ecg_weights_v.data();
louvain_graph.number_of_vertices = graph.number_of_vertices;
louvain_graph.number_of_edges = graph.number_of_edges;
cugraph::louvain(handle, louvain_graph, clustering, size_t{100});
}
// Explicit template instantiations.
template void ecg<int32_t, int32_t, float>(
raft::handle_t const&,
legacy::GraphCSRView<int32_t, int32_t, float> const& graph,
float min_weight,
int32_t ensemble_size,
int32_t* clustering);
template void ecg<int32_t, int32_t, double>(
raft::handle_t const&,
legacy::GraphCSRView<int32_t, int32_t, double> const& graph,
double min_weight,
int32_t ensemble_size,
int32_t* clustering);
} // namespace cugraph
|
9ff8b8d9b7fbc96a7c8259954cf171b18093c085.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include "timer.h"
#include "cuda_utils.h"
typedef float dtype;
#define N_ (8 * 1024 * 1024)
#define MAX_THREADS 256
#define MAX_BLOCKS 64
#define MIN(x,y) ((x < y) ? x : y)
/* return the next power of 2 number that is larger than x */
unsigned int nextPow2( unsigned int x ) {
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
/* find out # of threads and # thread blocks for a particular kernel */
void getNumBlocksAndThreads(int whichKernel, int n, int maxBlocks, int maxThreads, int &blocks, int &threads)
{
if (whichKernel < 3)
{
/* 1 thread per element */
threads = (n < maxThreads) ? nextPow2(n) : maxThreads;
blocks = (n + threads - 1) / threads;
}
else
{
/* 1 thread per 2 elements */
threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads;
blocks = (n + (threads * 2 - 1)) / (threads * 2);
}
/* limit the total number of threads */
if (whichKernel == 5)
blocks = MIN(maxBlocks, blocks);
}
/* special type of reduction to account for floating point error */
dtype reduce_cpu(dtype *data, int n) {
dtype sum = data[0];
dtype c = (dtype)0.0;
for (int i = 1; i < n; i++)
{
dtype y = data[i] - c;
dtype t = sum + y;
c = (t - sum) - y;
sum = t;
}
return sum;
}
__device__ void
unroll_warp(volatile dtype* input, int tid, int n){
if(n > 64){
input[tid] += input[tid + 32];
}
if(n > 32){
input[tid] += input[tid + 16];
}
input[tid] += input[tid + 8];
input[tid] += input[tid + 4];
input[tid] += input[tid + 2];
input[tid] += input[tid + 1];
}
__global__ void
kernel5(dtype *g_idata, dtype *g_odata, unsigned int n)
{
__shared__ dtype scratch[MAX_THREADS];
unsigned int bid = gridDim.x * blockIdx.y + blockIdx.x;
unsigned int i = bid * blockDim.x*2 + threadIdx.x;
unsigned int size_grid = blockDim.x*2*gridDim.x;
scratch[threadIdx.x] = 0;
while(i < n){
scratch[threadIdx.x] += g_idata[i] + g_idata[i+blockDim.x];
i += size_grid;
}
__syncthreads();
for(unsigned int s = blockDim.x/2; s > 32; s>>=1){
if(threadIdx.x < s){
scratch[threadIdx.x] += scratch[threadIdx.x + s];
}
__syncthreads();
}
if( threadIdx.x < 32){
unroll_warp(scratch, threadIdx.x, n);
}
if(threadIdx.x == 0){
g_odata[bid] = scratch[0];
}
}
int
main(int argc, char** argv)
{
int i;
/* data structure */
dtype *h_idata, h_odata, h_cpu;
dtype *d_idata, *d_odata;
/* timer */
struct stopwatch_t* timer = NULL;
long double t_kernel_5, t_cpu;
/* which kernel are we running */
int whichKernel;
/* number of threads and thread blocks */
int threads, blocks;
int N;
if(argc > 1) {
N = atoi (argv[1]);
printf("N: %d\n", N);
} else {
N = N_;
printf("N: %d\n", N);
}
/* naive kernel */
whichKernel = 5;
getNumBlocksAndThreads (whichKernel, N, MAX_BLOCKS, MAX_THREADS,
blocks, threads);
/* initialize timer */
stopwatch_init ();
timer = stopwatch_create ();
/* allocate memory */
h_idata = (dtype*) malloc (N * sizeof (dtype));
CUDA_CHECK_ERROR (hipMalloc (&d_idata, N * sizeof (dtype)));
CUDA_CHECK_ERROR (hipMalloc (&d_odata, blocks * sizeof (dtype)));
/* Initialize array */
srand48(time(NULL));
for(i = 0; i < N; i++) {
h_idata[i] = drand48() / 100000;
}
CUDA_CHECK_ERROR (hipMemcpy (d_idata, h_idata, N * sizeof (dtype),
hipMemcpyHostToDevice));
/* ================================================== */
/* GPU kernel */
dim3 gb(blocks, 1, 1);
dim3 tb(threads, 1, 1);
/* warm up */
hipLaunchKernelGGL(( kernel5) , dim3(gb), dim3(tb), 0, 0, d_idata, d_odata, N);
hipDeviceSynchronize ();
stopwatch_start (timer);
/* execute kernel */
hipLaunchKernelGGL(( kernel5) , dim3(gb), dim3(tb), 0, 0, d_idata, d_odata, N);
int s = blocks;
while(s > 1) {
threads = 0;
blocks = 0;
getNumBlocksAndThreads (whichKernel, s, MAX_BLOCKS, MAX_THREADS,
blocks, threads);
dim3 gb(blocks, 1, 1);
dim3 tb(threads, 1, 1);
hipLaunchKernelGGL(( kernel5) , dim3(gb), dim3(tb), 0, 0, d_odata, d_odata, s);
s = (s + threads * 2 - 1) / (threads * 2);
}
hipDeviceSynchronize ();
t_kernel_5 = stopwatch_stop (timer);
fprintf (stdout, "Time to execute multiple add GPU reduction kernel: %Lg secs\n", t_kernel_5);
double bw = (N * sizeof(dtype)) / (t_kernel_5 * 1e9);
fprintf (stdout, "Effective bandwidth: %.2lf GB/s\n", bw);
/* copy result back from GPU */
CUDA_CHECK_ERROR (hipMemcpy (&h_odata, d_odata, sizeof (dtype),
hipMemcpyDeviceToHost));
/* ================================================== */
/* ================================================== */
/* CPU kernel */
stopwatch_start (timer);
h_cpu = reduce_cpu (h_idata, N);
t_cpu = stopwatch_stop (timer);
fprintf (stdout, "Time to execute naive CPU reduction: %Lg secs\n",
t_cpu);
/* ================================================== */
if(abs (h_odata - h_cpu) > 1e-5) {
fprintf(stderr, "FAILURE: GPU: %f CPU: %f\n", h_odata, h_cpu);
} else {
printf("SUCCESS: GPU: %f CPU: %f\n", h_odata, h_cpu);
}
return 0;
}
| 9ff8b8d9b7fbc96a7c8259954cf171b18093c085.cu | #include <stdlib.h>
#include <stdio.h>
#include "timer.h"
#include "cuda_utils.h"
typedef float dtype;
#define N_ (8 * 1024 * 1024)
#define MAX_THREADS 256
#define MAX_BLOCKS 64
#define MIN(x,y) ((x < y) ? x : y)
/* return the next power of 2 number that is larger than x */
unsigned int nextPow2( unsigned int x ) {
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
/* find out # of threads and # thread blocks for a particular kernel */
void getNumBlocksAndThreads(int whichKernel, int n, int maxBlocks, int maxThreads, int &blocks, int &threads)
{
if (whichKernel < 3)
{
/* 1 thread per element */
threads = (n < maxThreads) ? nextPow2(n) : maxThreads;
blocks = (n + threads - 1) / threads;
}
else
{
/* 1 thread per 2 elements */
threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads;
blocks = (n + (threads * 2 - 1)) / (threads * 2);
}
/* limit the total number of threads */
if (whichKernel == 5)
blocks = MIN(maxBlocks, blocks);
}
/* special type of reduction to account for floating point error */
dtype reduce_cpu(dtype *data, int n) {
dtype sum = data[0];
dtype c = (dtype)0.0;
for (int i = 1; i < n; i++)
{
dtype y = data[i] - c;
dtype t = sum + y;
c = (t - sum) - y;
sum = t;
}
return sum;
}
__device__ void
unroll_warp(volatile dtype* input, int tid, int n){
if(n > 64){
input[tid] += input[tid + 32];
}
if(n > 32){
input[tid] += input[tid + 16];
}
input[tid] += input[tid + 8];
input[tid] += input[tid + 4];
input[tid] += input[tid + 2];
input[tid] += input[tid + 1];
}
__global__ void
kernel5(dtype *g_idata, dtype *g_odata, unsigned int n)
{
__shared__ dtype scratch[MAX_THREADS];
unsigned int bid = gridDim.x * blockIdx.y + blockIdx.x;
unsigned int i = bid * blockDim.x*2 + threadIdx.x;
unsigned int size_grid = blockDim.x*2*gridDim.x;
scratch[threadIdx.x] = 0;
while(i < n){
scratch[threadIdx.x] += g_idata[i] + g_idata[i+blockDim.x];
i += size_grid;
}
__syncthreads();
for(unsigned int s = blockDim.x/2; s > 32; s>>=1){
if(threadIdx.x < s){
scratch[threadIdx.x] += scratch[threadIdx.x + s];
}
__syncthreads();
}
if( threadIdx.x < 32){
unroll_warp(scratch, threadIdx.x, n);
}
if(threadIdx.x == 0){
g_odata[bid] = scratch[0];
}
}
int
main(int argc, char** argv)
{
int i;
/* data structure */
dtype *h_idata, h_odata, h_cpu;
dtype *d_idata, *d_odata;
/* timer */
struct stopwatch_t* timer = NULL;
long double t_kernel_5, t_cpu;
/* which kernel are we running */
int whichKernel;
/* number of threads and thread blocks */
int threads, blocks;
int N;
if(argc > 1) {
N = atoi (argv[1]);
printf("N: %d\n", N);
} else {
N = N_;
printf("N: %d\n", N);
}
/* naive kernel */
whichKernel = 5;
getNumBlocksAndThreads (whichKernel, N, MAX_BLOCKS, MAX_THREADS,
blocks, threads);
/* initialize timer */
stopwatch_init ();
timer = stopwatch_create ();
/* allocate memory */
h_idata = (dtype*) malloc (N * sizeof (dtype));
CUDA_CHECK_ERROR (cudaMalloc (&d_idata, N * sizeof (dtype)));
CUDA_CHECK_ERROR (cudaMalloc (&d_odata, blocks * sizeof (dtype)));
/* Initialize array */
srand48(time(NULL));
for(i = 0; i < N; i++) {
h_idata[i] = drand48() / 100000;
}
CUDA_CHECK_ERROR (cudaMemcpy (d_idata, h_idata, N * sizeof (dtype),
cudaMemcpyHostToDevice));
/* ================================================== */
/* GPU kernel */
dim3 gb(blocks, 1, 1);
dim3 tb(threads, 1, 1);
/* warm up */
kernel5 <<<gb, tb>>> (d_idata, d_odata, N);
cudaThreadSynchronize ();
stopwatch_start (timer);
/* execute kernel */
kernel5 <<<gb, tb>>> (d_idata, d_odata, N);
int s = blocks;
while(s > 1) {
threads = 0;
blocks = 0;
getNumBlocksAndThreads (whichKernel, s, MAX_BLOCKS, MAX_THREADS,
blocks, threads);
dim3 gb(blocks, 1, 1);
dim3 tb(threads, 1, 1);
kernel5 <<<gb, tb>>> (d_odata, d_odata, s);
s = (s + threads * 2 - 1) / (threads * 2);
}
cudaThreadSynchronize ();
t_kernel_5 = stopwatch_stop (timer);
fprintf (stdout, "Time to execute multiple add GPU reduction kernel: %Lg secs\n", t_kernel_5);
double bw = (N * sizeof(dtype)) / (t_kernel_5 * 1e9);
fprintf (stdout, "Effective bandwidth: %.2lf GB/s\n", bw);
/* copy result back from GPU */
CUDA_CHECK_ERROR (cudaMemcpy (&h_odata, d_odata, sizeof (dtype),
cudaMemcpyDeviceToHost));
/* ================================================== */
/* ================================================== */
/* CPU kernel */
stopwatch_start (timer);
h_cpu = reduce_cpu (h_idata, N);
t_cpu = stopwatch_stop (timer);
fprintf (stdout, "Time to execute naive CPU reduction: %Lg secs\n",
t_cpu);
/* ================================================== */
if(abs (h_odata - h_cpu) > 1e-5) {
fprintf(stderr, "FAILURE: GPU: %f CPU: %f\n", h_odata, h_cpu);
} else {
printf("SUCCESS: GPU: %f CPU: %f\n", h_odata, h_cpu);
}
return 0;
}
|
4866dfafc6b941b3edd38951d502b2faf380475b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <opencv2/opencv.hpp>
#include <helper_cuda.h>
#include <timer.h>
#include <string>
#include "corner.h"
#include "fast_cuda.h"
//__device__
//int position(int m,int n,int width)
//{
// int idx=m+n*width;
// return idx;
//}
//__global__
//void fast(uchar* image, int width, int height,Corner* d_corner,int gridsize_x, int gridsize_y, const int threshold)
//{
// __shared__ uchar patch[22][22];
// uint sp=0,sn=0;
// int m=blockDim.x*blockIdx.x+threadIdx.x;
// int n=blockDim.y*blockIdx.y+threadIdx.y;
// uint idx = m+n*width;
// uint idx_block=threadIdx.y*blockDim.x+threadIdx.x; //offset for pixel in patch
// d_corner[idx]={0,0}; //initialize the class member
// int patch_top_x=blockDim.x*blockIdx.x-3;
// int patch_top_y=blockDim.y*blockIdx.y-3;
// int idx_block_256=idx_block+256;
//
// //load into patch
// patch[idx_block%22][idx_block/22]=image[position(patch_top_x+idx_block%22,patch_top_y+idx_block/22,width)];
// if(idx_block_256<484)
// patch[(idx_block+256)%22][(idx_block+256)/22]=image[position(patch_top_x+idx_block_256%22,patch_top_y+idx_block_256/22,width)];
// __syncthreads();
//
// //detect
// int x=3+threadIdx.x;
// int y=3+threadIdx.y;
// if(m>2&&m<(width-3)&&n>2&&n<(height-3)) //detect the points away from the edges
// {
// uchar center_value = patch[x][y];
// sp |=(patch[x][y-3]>(center_value + threshold))<< 0;
// sp |=(patch[x+1][y-3]>(center_value + threshold))<< 1;
// sp |=(patch[x+2][y-2]>(center_value + threshold))<< 2;
// sp |=(patch[x+3][y-1]>(center_value + threshold))<< 3;
// sp |=(patch[x+3][y]>(center_value + threshold))<< 4;
// sp |=(patch[x+3][y+1]>(center_value + threshold))<< 5;
// sp |=(patch[x+2][y+2]>(center_value + threshold))<< 6;
// sp |=(patch[x+1][y+3]>(center_value + threshold))<< 7;
// sp |=(patch[x][y+3]>(center_value + threshold))<< 8;
// sp |=(patch[x-1][y+3]>(center_value + threshold))<< 9;
// sp |=(patch[x-2][y+2]>(center_value + threshold))<< 10;
// sp |=(patch[x-3][y+1]>(center_value + threshold))<< 11;
// sp |=(patch[x-3][y]>(center_value + threshold))<< 12;
// sp |=(patch[x-3][y-1]>(center_value + threshold))<< 13;
// sp |=(patch[x-2][y-2]>(center_value + threshold))<< 14;
// sp |=(patch[x-1][y-3]>(center_value + threshold))<< 15;
//
// sp+=sp<<16;
// uint sp1=sp&(sp<<1);
// uint sp2=sp1&(sp1<<2);
// uint sp3=sp2&(sp2<<4);
// uint sp4=sp3&(sp<<8);
// if(sp4!=0)
// {
// int value=abs(center_value-patch[x-1][y-1])+abs(center_value-patch[x][y-1])+abs(center_value-patch[x+1][y-1])+
// abs(center_value-patch[x-1][y])+abs(center_value-patch[x+1][y])+abs(center_value-patch[x+1][y-1])+
// abs(center_value-patch[x+1][y])+abs(center_value-patch[x+1][y+1]);
// d_corner[idx].value=value;
// d_corner[idx].set=1;
// }
// else
// {
// sn |=(patch[x][y-3]<(center_value - threshold))<< 0;
// sn |=(patch[x+1][y-3]<(center_value - threshold))<< 1;
// sn |=(patch[x+2][y-2]<(center_value - threshold))<< 2;
// sn |=(patch[x+3][y-1]<(center_value - threshold))<< 3;
// sn |=(patch[x+3][y]<(center_value - threshold))<< 4;
// sn |=(patch[x+3][y+1]<(center_value - threshold))<< 5;
// sn |=(patch[x+2][y+2]<(center_value - threshold))<< 6;
// sn |=(patch[x+1][y+3]<(center_value - threshold))<< 7;
// sn |=(patch[x][y+3]>(center_value - threshold))<< 8;
// sn |=(patch[x-1][y+3]<(center_value - threshold))<< 9;
// sn |=(patch[x-2][y+2]<(center_value - threshold))<< 10;
// sn |=(patch[x-3][y+1]<(center_value - threshold))<< 11;
// sn |=(patch[x-3][y]<(center_value - threshold))<< 12;
// sn |=(patch[x-3][y-1]<(center_value - threshold))<< 13;
// sn |=(patch[x-2][y-2]<(center_value - threshold))<< 14;
// sn |=(patch[x-1][y-3]<(center_value - threshold))<< 15;
// sn+=sn<<16;
// uint sn1=sn&(sn<<1);
// uint sn2=sn1&(sn1<<2);
// uint sn3=sn2&(sn2<<4);
// uint sn4=sn3&(sn<<8);
// if(sn4!=0)
// {
// int value=abs(center_value-patch[x-1][y-1])+abs(center_value-patch[x][y-1])+abs(center_value-patch[x+1][y-1])+
// abs(center_value-patch[x-1][y])+abs(center_value-patch[x+1][y])+abs(center_value-patch[x+1][y-1])+
// abs(center_value-patch[x+1][y])+abs(center_value-patch[x+1][y+1]);
// d_corner[idx].value=value;
// d_corner[idx].set=1;
// }
// }
// }
//
//}
//__global__
//void nms(uchar* image, Corner* d_corner,int width, int height)
//{
// int m=blockDim.x*blockIdx.x+threadIdx.x;
// int n=blockDim.y*blockIdx.y+threadIdx.y;
// int idx=n*width+m;
// if(d_corner[idx].set==1)
// {
// if(d_corner[position(m-1,n-1,width)].value>d_corner[idx].value)
// {d_corner[idx].set=0;return;}
// if(d_corner[position(m,n-1,width)].value>d_corner[idx].value)
// {d_corner[idx].set=0;return;}
// if(d_corner[position(m+1,n-1,width)].value>d_corner[idx].value)
// {d_corner[idx].set=0;return;}
// if(d_corner[position(m-1,n,width)].value>d_corner[idx].value)
// {d_corner[idx].set=0;return;}
// if(d_corner[position(m+1,n,width)].value>d_corner[idx].value)
// {d_corner[idx].set=0;return;}
// if(d_corner[position(m+1,n-1,width)].value>d_corner[idx].value)
// {d_corner[idx].set=0;return;}
// if(d_corner[position(m+1,n,width)].value>d_corner[idx].value)
// {d_corner[idx].set=0;return;}
// if(d_corner[position(m+1,n+1,width)].value>d_corner[idx].value)
// {d_corner[idx].set=0;return;}
//
// }
//}
int main( void )
{
using namespace std;
using namespace cv;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
const int threshold=100;
string filename="/Users/macbookpro/Downloads/monash.jpg";
Mat image;
image = cv::imread(filename,0); // Read the file
if(! image.data ) // Check for invalid input
{
cout << "Could not open or find the image" << std::endl ;
return -1;
}
uchar* d_data; // create a pointer
size_t imSize=image.cols*image.rows;
Corner* h_corner=new Corner[imSize];
Corner* d_corner;
checkCudaErrors(hipMalloc((void**) &d_corner,sizeof(Corner)*imSize));
checkCudaErrors(hipMalloc((void**) &d_data, sizeof(uchar)*imSize)); // create memory on the gpu and pass a pointer to the host
checkCudaErrors(hipMemcpy(d_data, image.data, sizeof(uchar)*imSize, hipMemcpyHostToDevice));// copy from the image data to the gpu memory you reserved
dim3 blocksize(16,16);
dim3 gridsize((image.cols-1)/blocksize.x+1, (image.rows-1)/blocksize.y+1, 1);
hipEventRecord(start);
hipLaunchKernelGGL(( fast), dim3(gridsize),dim3(blocksize), 0, 0, d_data, image.cols, image.rows,d_corner,gridsize.x,gridsize.y,threshold); // processed data on the gpu
//checkCudaErrors(hipDeviceSynchronize());
hipEventRecord(stop); hipEventSynchronize(stop);
hipLaunchKernelGGL(( nms), dim3(gridsize),dim3(blocksize), 0, 0, d_data,d_corner,image.cols,image.rows);
checkCudaErrors(hipMemcpy(h_corner,d_corner,sizeof(Corner)*imSize,hipMemcpyDeviceToHost));
float elptime;
hipEventElapsedTime(&elptime,start,stop);
//show the corner in the image
Mat image_color = imread(filename,1);
int point=0;
for(int i=0;i<imSize;i++)
{
if(h_corner[i].set!=0)
{
int x=i%image.cols;
int y=i/image.cols;
circle(image_color,Point(x,y),1,Scalar(0,255,0),-1,8,0);
point++;
}
}
cout<<"points:"<<point<<endl;
cout<<"Elapsed time:"<<elptime<<"ms"<<endl;
//printf("%x\n",0x7|((10>1)<<3));
//cout<<"the size of: "<<sizeof(corner)<<endl;
namedWindow( "Display window", WINDOW_AUTOSIZE );// Create a window for display.
imshow( "Display window", image_color ); // Show our image inside it.
waitKey(0); // Wait for a keystroke in the window
delete[] h_corner;
hipFree(d_corner);
hipFree(d_data);
hipEventDestroy(start);
hipEventDestroy(stop);
return 0;
}
| 4866dfafc6b941b3edd38951d502b2faf380475b.cu |
#include <iostream>
#include <opencv2/opencv.hpp>
#include <helper_cuda.h>
#include <timer.h>
#include <string>
#include "corner.h"
#include "fast_cuda.h"
//__device__
//int position(int m,int n,int width)
//{
// int idx=m+n*width;
// return idx;
//}
//__global__
//void fast(uchar* image, int width, int height,Corner* d_corner,int gridsize_x, int gridsize_y, const int threshold)
//{
// __shared__ uchar patch[22][22];
// uint sp=0,sn=0;
// int m=blockDim.x*blockIdx.x+threadIdx.x;
// int n=blockDim.y*blockIdx.y+threadIdx.y;
// uint idx = m+n*width;
// uint idx_block=threadIdx.y*blockDim.x+threadIdx.x; //offset for pixel in patch
// d_corner[idx]={0,0}; //initialize the class member
// int patch_top_x=blockDim.x*blockIdx.x-3;
// int patch_top_y=blockDim.y*blockIdx.y-3;
// int idx_block_256=idx_block+256;
//
// //load into patch
// patch[idx_block%22][idx_block/22]=image[position(patch_top_x+idx_block%22,patch_top_y+idx_block/22,width)];
// if(idx_block_256<484)
// patch[(idx_block+256)%22][(idx_block+256)/22]=image[position(patch_top_x+idx_block_256%22,patch_top_y+idx_block_256/22,width)];
// __syncthreads();
//
// //detect
// int x=3+threadIdx.x;
// int y=3+threadIdx.y;
// if(m>2&&m<(width-3)&&n>2&&n<(height-3)) //detect the points away from the edges
// {
// uchar center_value = patch[x][y];
// sp |=(patch[x][y-3]>(center_value + threshold))<< 0;
// sp |=(patch[x+1][y-3]>(center_value + threshold))<< 1;
// sp |=(patch[x+2][y-2]>(center_value + threshold))<< 2;
// sp |=(patch[x+3][y-1]>(center_value + threshold))<< 3;
// sp |=(patch[x+3][y]>(center_value + threshold))<< 4;
// sp |=(patch[x+3][y+1]>(center_value + threshold))<< 5;
// sp |=(patch[x+2][y+2]>(center_value + threshold))<< 6;
// sp |=(patch[x+1][y+3]>(center_value + threshold))<< 7;
// sp |=(patch[x][y+3]>(center_value + threshold))<< 8;
// sp |=(patch[x-1][y+3]>(center_value + threshold))<< 9;
// sp |=(patch[x-2][y+2]>(center_value + threshold))<< 10;
// sp |=(patch[x-3][y+1]>(center_value + threshold))<< 11;
// sp |=(patch[x-3][y]>(center_value + threshold))<< 12;
// sp |=(patch[x-3][y-1]>(center_value + threshold))<< 13;
// sp |=(patch[x-2][y-2]>(center_value + threshold))<< 14;
// sp |=(patch[x-1][y-3]>(center_value + threshold))<< 15;
//
// sp+=sp<<16;
// uint sp1=sp&(sp<<1);
// uint sp2=sp1&(sp1<<2);
// uint sp3=sp2&(sp2<<4);
// uint sp4=sp3&(sp<<8);
// if(sp4!=0)
// {
// int value=abs(center_value-patch[x-1][y-1])+abs(center_value-patch[x][y-1])+abs(center_value-patch[x+1][y-1])+
// abs(center_value-patch[x-1][y])+abs(center_value-patch[x+1][y])+abs(center_value-patch[x+1][y-1])+
// abs(center_value-patch[x+1][y])+abs(center_value-patch[x+1][y+1]);
// d_corner[idx].value=value;
// d_corner[idx].set=1;
// }
// else
// {
// sn |=(patch[x][y-3]<(center_value - threshold))<< 0;
// sn |=(patch[x+1][y-3]<(center_value - threshold))<< 1;
// sn |=(patch[x+2][y-2]<(center_value - threshold))<< 2;
// sn |=(patch[x+3][y-1]<(center_value - threshold))<< 3;
// sn |=(patch[x+3][y]<(center_value - threshold))<< 4;
// sn |=(patch[x+3][y+1]<(center_value - threshold))<< 5;
// sn |=(patch[x+2][y+2]<(center_value - threshold))<< 6;
// sn |=(patch[x+1][y+3]<(center_value - threshold))<< 7;
// sn |=(patch[x][y+3]>(center_value - threshold))<< 8;
// sn |=(patch[x-1][y+3]<(center_value - threshold))<< 9;
// sn |=(patch[x-2][y+2]<(center_value - threshold))<< 10;
// sn |=(patch[x-3][y+1]<(center_value - threshold))<< 11;
// sn |=(patch[x-3][y]<(center_value - threshold))<< 12;
// sn |=(patch[x-3][y-1]<(center_value - threshold))<< 13;
// sn |=(patch[x-2][y-2]<(center_value - threshold))<< 14;
// sn |=(patch[x-1][y-3]<(center_value - threshold))<< 15;
// sn+=sn<<16;
// uint sn1=sn&(sn<<1);
// uint sn2=sn1&(sn1<<2);
// uint sn3=sn2&(sn2<<4);
// uint sn4=sn3&(sn<<8);
// if(sn4!=0)
// {
// int value=abs(center_value-patch[x-1][y-1])+abs(center_value-patch[x][y-1])+abs(center_value-patch[x+1][y-1])+
// abs(center_value-patch[x-1][y])+abs(center_value-patch[x+1][y])+abs(center_value-patch[x+1][y-1])+
// abs(center_value-patch[x+1][y])+abs(center_value-patch[x+1][y+1]);
// d_corner[idx].value=value;
// d_corner[idx].set=1;
// }
// }
// }
//
//}
//__global__
//void nms(uchar* image, Corner* d_corner,int width, int height)
//{
// int m=blockDim.x*blockIdx.x+threadIdx.x;
// int n=blockDim.y*blockIdx.y+threadIdx.y;
// int idx=n*width+m;
// if(d_corner[idx].set==1)
// {
// if(d_corner[position(m-1,n-1,width)].value>d_corner[idx].value)
// {d_corner[idx].set=0;return;}
// if(d_corner[position(m,n-1,width)].value>d_corner[idx].value)
// {d_corner[idx].set=0;return;}
// if(d_corner[position(m+1,n-1,width)].value>d_corner[idx].value)
// {d_corner[idx].set=0;return;}
// if(d_corner[position(m-1,n,width)].value>d_corner[idx].value)
// {d_corner[idx].set=0;return;}
// if(d_corner[position(m+1,n,width)].value>d_corner[idx].value)
// {d_corner[idx].set=0;return;}
// if(d_corner[position(m+1,n-1,width)].value>d_corner[idx].value)
// {d_corner[idx].set=0;return;}
// if(d_corner[position(m+1,n,width)].value>d_corner[idx].value)
// {d_corner[idx].set=0;return;}
// if(d_corner[position(m+1,n+1,width)].value>d_corner[idx].value)
// {d_corner[idx].set=0;return;}
//
// }
//}
int main( void )
{
using namespace std;
using namespace cv;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
const int threshold=100;
string filename="/Users/macbookpro/Downloads/monash.jpg";
Mat image;
image = cv::imread(filename,0); // Read the file
if(! image.data ) // Check for invalid input
{
cout << "Could not open or find the image" << std::endl ;
return -1;
}
uchar* d_data; // create a pointer
size_t imSize=image.cols*image.rows;
Corner* h_corner=new Corner[imSize];
Corner* d_corner;
checkCudaErrors(cudaMalloc((void**) &d_corner,sizeof(Corner)*imSize));
checkCudaErrors(cudaMalloc((void**) &d_data, sizeof(uchar)*imSize)); // create memory on the gpu and pass a pointer to the host
checkCudaErrors(cudaMemcpy(d_data, image.data, sizeof(uchar)*imSize, cudaMemcpyHostToDevice));// copy from the image data to the gpu memory you reserved
dim3 blocksize(16,16);
dim3 gridsize((image.cols-1)/blocksize.x+1, (image.rows-1)/blocksize.y+1, 1);
cudaEventRecord(start);
fast<<<gridsize,blocksize>>>(d_data, image.cols, image.rows,d_corner,gridsize.x,gridsize.y,threshold); // processed data on the gpu
//checkCudaErrors(cudaDeviceSynchronize());
cudaEventRecord(stop); cudaEventSynchronize(stop);
nms<<<gridsize,blocksize>>>(d_data,d_corner,image.cols,image.rows);
checkCudaErrors(cudaMemcpy(h_corner,d_corner,sizeof(Corner)*imSize,cudaMemcpyDeviceToHost));
float elptime;
cudaEventElapsedTime(&elptime,start,stop);
//show the corner in the image
Mat image_color = imread(filename,1);
int point=0;
for(int i=0;i<imSize;i++)
{
if(h_corner[i].set!=0)
{
int x=i%image.cols;
int y=i/image.cols;
circle(image_color,Point(x,y),1,Scalar(0,255,0),-1,8,0);
point++;
}
}
cout<<"points:"<<point<<endl;
cout<<"Elapsed time:"<<elptime<<"ms"<<endl;
//printf("%x\n",0x7|((10>1)<<3));
//cout<<"the size of: "<<sizeof(corner)<<endl;
namedWindow( "Display window", WINDOW_AUTOSIZE );// Create a window for display.
imshow( "Display window", image_color ); // Show our image inside it.
waitKey(0); // Wait for a keystroke in the window
delete[] h_corner;
cudaFree(d_corner);
cudaFree(d_data);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
|
61300e6a4fd5bbbe4ea6a19ff0327afd41df989a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <float.h>
#include "vec3.h"
#include "ray.h"
#include "sphere.h"
#include "hitable_list.h"
// limited version of checkCudaErrors from helper_cuda.h in CUDA examples
#define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ )
void check_cuda(hipError_t result, char const *const func, const char *const file, int const line) {
if (result) {
std::cerr << "CUDA error = " << static_cast<unsigned int>(result) << " at " <<
file << ":" << line << " '" << func << "' \n";
// Make sure we call CUDA Device Reset before exiting
hipDeviceReset();
exit(99);
}
}
__device__ vec3 color(const ray& r, hitable **world) {
hit_record rec;
if ((*world)->hit(r, 0.0, FLT_MAX, rec)) {
return 0.5f*vec3(rec.normal.x()+1.0f, rec.normal.y()+1.0f, rec.normal.z()+1.0f);
}
else {
vec3 unit_direction = unit_vector(r.direction());
float t = 0.5f*(unit_direction.y() + 1.0f);
return (1.0f-t)*vec3(1.0, 1.0, 1.0) + t*vec3(0.5, 0.7, 1.0);
}
}
__global__ void render(vec3 *fb, int max_x, int max_y, vec3 lower_left_corner, vec3 horizontal, vec3 vertical, vec3 origin, hitable **world) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if((i >= max_x) || (j >= max_y)) return;
int pixel_index = j*max_x + i;
float u = float(i) / float(max_x);
float v = float(j) / float(max_y);
ray r(origin, lower_left_corner + u*horizontal + v*vertical);
fb[pixel_index] = color(r, world);
}
__global__ void create_world(hitable **d_list, hitable **d_world) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
*(d_list) = new sphere(vec3(0,0,-1), 0.5);
*(d_list+1) = new sphere(vec3(0,-100.5,-1), 100);
*d_world = new hitable_list(d_list,2);
}
}
__global__ void free_world(hitable **d_list, hitable **d_world) {
delete *(d_list);
delete *(d_list+1);
delete *d_world;
}
int main() {
int nx = 1200;
int ny = 600;
int tx = 8;
int ty = 8;
std::cerr << "Rendering a " << nx << "x" << ny << " image ";
std::cerr << "in " << tx << "x" << ty << " blocks.\n";
int num_pixels = nx*ny;
size_t fb_size = num_pixels*sizeof(vec3);
// allocate FB
vec3 *fb;
checkCudaErrors(hipMallocManaged((void **)&fb, fb_size));
// make our world of hitables
hitable **d_list;
checkCudaErrors(hipMalloc((void **)&d_list, 2*sizeof(hitable *)));
hitable **d_world;
checkCudaErrors(hipMalloc((void **)&d_world, sizeof(hitable *)));
hipLaunchKernelGGL(( create_world), dim3(1),dim3(1), 0, 0, d_list,d_world);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
clock_t start, stop;
start = clock();
// Render our buffer
dim3 blocks(nx/tx+1,ny/ty+1);
dim3 threads(tx,ty);
hipLaunchKernelGGL(( render), dim3(blocks), dim3(threads), 0, 0, fb, nx, ny,
vec3(-2.0, -1.0, -1.0),
vec3(4.0, 0.0, 0.0),
vec3(0.0, 2.0, 0.0),
vec3(0.0, 0.0, 0.0),
d_world);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
stop = clock();
double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC;
std::cerr << "took " << timer_seconds << " seconds.\n";
// Output FB as Image
std::cout << "P3\n" << nx << " " << ny << "\n255\n";
for (int j = ny-1; j >= 0; j--) {
for (int i = 0; i < nx; i++) {
size_t pixel_index = j*nx + i;
int ir = int(255.99*fb[pixel_index].r());
int ig = int(255.99*fb[pixel_index].g());
int ib = int(255.99*fb[pixel_index].b());
std::cout << ir << " " << ig << " " << ib << "\n";
}
}
// clean up
checkCudaErrors(hipDeviceSynchronize());
hipLaunchKernelGGL(( free_world), dim3(1),dim3(1), 0, 0, d_list,d_world);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipFree(d_list));
checkCudaErrors(hipFree(d_world));
checkCudaErrors(hipFree(fb));
// useful for cuda-memcheck --leak-check full
hipDeviceReset();
} | 61300e6a4fd5bbbe4ea6a19ff0327afd41df989a.cu | #include <iostream>
#include <float.h>
#include "vec3.h"
#include "ray.h"
#include "sphere.h"
#include "hitable_list.h"
// limited version of checkCudaErrors from helper_cuda.h in CUDA examples
#define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ )
void check_cuda(cudaError_t result, char const *const func, const char *const file, int const line) {
if (result) {
std::cerr << "CUDA error = " << static_cast<unsigned int>(result) << " at " <<
file << ":" << line << " '" << func << "' \n";
// Make sure we call CUDA Device Reset before exiting
cudaDeviceReset();
exit(99);
}
}
__device__ vec3 color(const ray& r, hitable **world) {
hit_record rec;
if ((*world)->hit(r, 0.0, FLT_MAX, rec)) {
return 0.5f*vec3(rec.normal.x()+1.0f, rec.normal.y()+1.0f, rec.normal.z()+1.0f);
}
else {
vec3 unit_direction = unit_vector(r.direction());
float t = 0.5f*(unit_direction.y() + 1.0f);
return (1.0f-t)*vec3(1.0, 1.0, 1.0) + t*vec3(0.5, 0.7, 1.0);
}
}
__global__ void render(vec3 *fb, int max_x, int max_y, vec3 lower_left_corner, vec3 horizontal, vec3 vertical, vec3 origin, hitable **world) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if((i >= max_x) || (j >= max_y)) return;
int pixel_index = j*max_x + i;
float u = float(i) / float(max_x);
float v = float(j) / float(max_y);
ray r(origin, lower_left_corner + u*horizontal + v*vertical);
fb[pixel_index] = color(r, world);
}
__global__ void create_world(hitable **d_list, hitable **d_world) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
*(d_list) = new sphere(vec3(0,0,-1), 0.5);
*(d_list+1) = new sphere(vec3(0,-100.5,-1), 100);
*d_world = new hitable_list(d_list,2);
}
}
__global__ void free_world(hitable **d_list, hitable **d_world) {
delete *(d_list);
delete *(d_list+1);
delete *d_world;
}
int main() {
int nx = 1200;
int ny = 600;
int tx = 8;
int ty = 8;
std::cerr << "Rendering a " << nx << "x" << ny << " image ";
std::cerr << "in " << tx << "x" << ty << " blocks.\n";
int num_pixels = nx*ny;
size_t fb_size = num_pixels*sizeof(vec3);
// allocate FB
vec3 *fb;
checkCudaErrors(cudaMallocManaged((void **)&fb, fb_size));
// make our world of hitables
hitable **d_list;
checkCudaErrors(cudaMalloc((void **)&d_list, 2*sizeof(hitable *)));
hitable **d_world;
checkCudaErrors(cudaMalloc((void **)&d_world, sizeof(hitable *)));
create_world<<<1,1>>>(d_list,d_world);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
clock_t start, stop;
start = clock();
// Render our buffer
dim3 blocks(nx/tx+1,ny/ty+1);
dim3 threads(tx,ty);
render<<<blocks, threads>>>(fb, nx, ny,
vec3(-2.0, -1.0, -1.0),
vec3(4.0, 0.0, 0.0),
vec3(0.0, 2.0, 0.0),
vec3(0.0, 0.0, 0.0),
d_world);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
stop = clock();
double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC;
std::cerr << "took " << timer_seconds << " seconds.\n";
// Output FB as Image
std::cout << "P3\n" << nx << " " << ny << "\n255\n";
for (int j = ny-1; j >= 0; j--) {
for (int i = 0; i < nx; i++) {
size_t pixel_index = j*nx + i;
int ir = int(255.99*fb[pixel_index].r());
int ig = int(255.99*fb[pixel_index].g());
int ib = int(255.99*fb[pixel_index].b());
std::cout << ir << " " << ig << " " << ib << "\n";
}
}
// clean up
checkCudaErrors(cudaDeviceSynchronize());
free_world<<<1,1>>>(d_list,d_world);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaFree(d_list));
checkCudaErrors(cudaFree(d_world));
checkCudaErrors(cudaFree(fb));
// useful for cuda-memcheck --leak-check full
cudaDeviceReset();
} |
9b63138342b589dce13ff6ba515823860ad2b3be.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/hip/JitLoops.cuh>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/hip/Math.cuh>
#include <ATen/native/hip/jit_utils.h>
namespace at::native {
namespace {
const char shifted_chebyshev_polynomial_u_name[] = "shifted_chebyshev_polynomial_u_forward";
void shifted_chebyshev_polynomial_u_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "shifted_chebyshev_polynomial_u_cuda", [&]() {
opmath_jitted_gpu_kernel_with_scalars<shifted_chebyshev_polynomial_u_name, scalar_t, scalar_t>(iterator, shifted_chebyshev_polynomial_u_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "shifted_chebyshev_polynomial_u_cuda", [&]() {
gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {
return shifted_chebyshev_polynomial_u_forward<scalar_t, true>(x, n);
});
});
#endif
} // shifted_chebyshev_polynomial_u_kernel_cuda
} // namespace (anonymous)
REGISTER_DISPATCH(shifted_chebyshev_polynomial_u_stub, &shifted_chebyshev_polynomial_u_kernel_cuda);
} // namespace at::native
| 9b63138342b589dce13ff6ba515823860ad2b3be.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/jit_utils.h>
namespace at::native {
namespace {
const char shifted_chebyshev_polynomial_u_name[] = "shifted_chebyshev_polynomial_u_forward";
void shifted_chebyshev_polynomial_u_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "shifted_chebyshev_polynomial_u_cuda", [&]() {
opmath_jitted_gpu_kernel_with_scalars<shifted_chebyshev_polynomial_u_name, scalar_t, scalar_t>(iterator, shifted_chebyshev_polynomial_u_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "shifted_chebyshev_polynomial_u_cuda", [&]() {
gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {
return shifted_chebyshev_polynomial_u_forward<scalar_t, true>(x, n);
});
});
#endif
} // shifted_chebyshev_polynomial_u_kernel_cuda
} // namespace (anonymous)
REGISTER_DISPATCH(shifted_chebyshev_polynomial_u_stub, &shifted_chebyshev_polynomial_u_kernel_cuda);
} // namespace at::native
|
c0c42fa31bb324dfb35943f21cbfc2cf5f66b9ec.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "blob.h"
#include "convolution.h"
#include "logging.h"
#include "timer.h"
#include <math.h>
void convolve_cpu(BLOB* in,BLOB* out,BLOB* w,int Kx,int Ky, conv_param_t* conv_param)
{
for(int group_id=0;group_id<conv_param->group;group_id++)
{
int delta = (out->d/conv_param->group);//Depth of output divided by number of groups.
int output_starting_depth = group_id*delta;
for(int out_depth=output_starting_depth;out_depth< output_starting_depth + delta;out_depth++)
{
int delta = (in->d/conv_param->group);//Depth of input divided by number of groups.
int in_depth_start = group_id*delta;
for(int in_depth=in_depth_start;in_depth<in_depth_start+delta;in_depth++)
{
for(int out_y=0;out_y<out->h;out_y++)
for(int out_x=0;out_x<out->w;out_x++)
for(int ky=0;ky<Ky;ky++)
for(int kx=0;kx<Kx;kx++)
{
int in_y = out_y*conv_param->Sy+ky;
int in_x = out_x*conv_param->Sx+kx;
int weigth_y = in_depth-(group_id*(in->d/conv_param->group));
int weigth_x = ky*Kx + kx;
float input = blob_data(in, in_depth, in_y,in_x);
float weight = blob_data(w, out_depth, weigth_y, weigth_x);
blob_data(out,out_depth,out_y,out_x)+= input*weight;
}
}
}
}
}
__device__ int calc_blob_id(int z,int y,int x,int height,int width)
{
return z * height * width + y * width + x;
}
// More complex convolution, runs only once so not really worth optimizing
__global__ void gpu_device_convolve_depth_parrallel
(float* data_in,float * data_weight, float* data_out // Data
,int Sx,int Sy // Sizes ...
,int in_w,int in_h,int in_d // input blob dimensions
,int w_w,int w_h // weigth height and depth
,int out_w,int out_h,int out_d // output width and height
,int Ky,int Kx
,int group
,int in_depth_max)
{
unsigned int out_x = blockIdx.z*blockDim.z+ threadIdx.z;
unsigned int out_y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int out_depth = blockIdx.x*blockDim.x + threadIdx.x;
if(out_depth < out_d)
{
int out_id = calc_blob_id(out_depth,out_y,out_x,out_h,out_w);
for(int in_depth=0;in_depth<in_depth_max;in_depth++)
{
for(int ky=0;ky<Ky;ky++)
{
for(int kx=0;kx<Kx;kx++)
{
int in_y = out_y*Sy+ky;
int in_x = out_x*Sx+kx;
int weigth_y = in_depth;
int weight_x = ky*Kx + kx;
int weight_id = calc_blob_id(out_depth,weigth_y,weight_x,w_h,w_w);
int in_id = calc_blob_id(in_depth,in_y,in_x,in_h,in_w);
data_out[out_id] += data_weight[weight_id] * data_in[in_id];
}
}
}
}
}
// Old
// __global__ void gpu_device_convolve_depth_parrallel_simple
// (float* data_in,float * data_weight, float* data_out // Data
// ,int in_w,int in_h,int in_d // input blob dimensions
// ,int w_w,int w_h // weigth height and depth
// ,int out_w,int out_h,int out_d // output width and height
// ,int in_depth_max)
// {
// unsigned int out_x = blockIdx.z*blockDim.z+ threadIdx.z;
// unsigned int out_y = blockIdx.y*blockDim.y + threadIdx.y;
// unsigned int out_depth = blockIdx.x*blockDim.x + threadIdx.x;
// if(out_depth < out_d)
// {
// int out_id = calc_blob_id(out_depth,out_y,out_x,out_h,out_w);
// for(int in_depth=0;in_depth<in_depth_max;in_depth++)
// {
// int weight_id = calc_blob_id(out_depth,in_depth,0,w_h,w_w);
// int in_id = calc_blob_id(in_depth,out_y,out_x,in_h,in_w);
// data_out[out_id] += data_weight[weight_id] * data_in[in_id];
// }
// }
// }
/*
// multiplexing width and height may reduce the cost of address calculation
// This kernel is still the most expensive, and runs often
// input width and height is always equal to output width and height
__global__ void gpu_device_convolve_depth_parrallel_simple_height_width_multiplexed
(float* data_in,float * data_weight, float* data_out // Data
,int w_h // weigth height and depth
,int in_out_wh,int out_d // input/output width * height, output depth
,int in_depth_max)
{
unsigned int in_out_xy = blockIdx.y*blockDim.y+ threadIdx.y;// 2d -> 1d pixel adress
unsigned int out_depth = blockIdx.x*blockDim.x + threadIdx.x;
if(out_depth < out_d && in_out_xy < in_out_wh)
{
float local_out;
int out_id = out_depth * in_out_wh + in_out_xy;
int in_id = in_out_xy; // per depth the same input values are loaded
int weight_id = out_depth * w_h; // Weigth is different per depth
local_out = data_out[out_id];
for(int in_depth=0;in_depth<in_depth_max;in_depth++)
{
local_out += data_weight[weight_id] * data_in[in_id];
in_id+=in_out_wh;
weight_id++;
}
data_out[out_id] = local_out;
}
}
*/
// multiplexing width and height may reduce the cost of address calculation
// This kernel is still the most expensive, and runs often
// input width and height is always equal to output width and height
__global__ void gpu_device_convolve_depth_parrallel_simple_height_width_multiplexed_shared_mem
(float* data_in,float * data_weight, float* data_out // Data
,int w_h // weigth height and depth
,int in_out_wh,int out_d // input/output width * height, output depth
,int in_depth_max)
{
unsigned int in_out_xy = blockIdx.y*blockDim.y+ threadIdx.y;// 2d -> 1d pixel adress
unsigned int out_depth = blockIdx.x*blockDim.x + threadIdx.x;
int max_shared_mem_size = 0;//(blockIdx.y*blockDim.y+ threadIdx.y);//49152/sizeof(float);
if(out_depth < out_d && in_out_xy < in_out_wh)
{
__shared__ float shared_out[12288];//shared memory size = 12288
float local_out; //local storage of data output
int out_id = out_depth * in_out_wh + in_out_xy;
int in_id = in_out_xy; // per depth the same input values are loaded
int weight_id = out_depth * w_h; // Weigth is different per depth
if (out_id < in_out_wh && out_id < 12288)
shared_out[out_id] = data_out[out_id];
__syncthreads();
if (out_id >= 12288){
local_out = data_out[out_id];
}
else{
local_out = shared_out[out_id];
}
for(int in_depth=0;in_depth<in_depth_max;in_depth++)
{
local_out += data_weight[weight_id] * data_in[in_id];
in_id+=in_out_wh;
weight_id++;
}
data_out[out_id] = local_out;
}
}
// Runs a lot but already is relative quick
__global__ void gpu_device_convolve_naive_group_parrallel
(float* data_in,float * data_weight, float* data_out // Data
,int Sx,int Sy // Sizes ...
,int in_w,int in_h,int in_d // input blob dimensions
,int w_w,int w_h // weigth height and depth
,int out_w,int out_h,int out_d // output width and height
,int Ky,int Kx
,int group)
{
unsigned int out_x = blockIdx.z*blockDim.z+ threadIdx.z;
unsigned int out_y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int group_id = blockIdx.x*blockDim.x + threadIdx.x;
if(group_id < group)
{
int out_id = calc_blob_id(group_id,out_y,out_x,out_h,out_w);
for(int ky=0;ky<Ky;ky++)
{
for(int kx=0;kx<Kx;kx++)
{
int in_y = out_y*Sy+ky;
int in_x = out_x*Sx+kx;
int weigth_y = group_id-(group_id*(in_d/group));
int weight_x = ky*Kx + kx;
int weight_id = calc_blob_id(group_id,weigth_y,weight_x,w_h,w_w);
int in_id = calc_blob_id(group_id,in_y,in_x,in_h,in_w);
data_out[out_id] += data_weight[weight_id] * data_in[in_id];
}
}
}
}
int get_next_pow2(int v)
{
v--;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
v++;
return v;
}
int ceil_div(int val,int div)
{
int result = val/div;
if(result * div < val) ++result;
return result;
}
int _max(int val1,int val2)
{
if(val1 > val2)
{
return val1;
}
else
{
return val2;
}
}
int _min(int val1,int val2)
{
if(val1 < val2)
{
return val1;
}
else
{
return val2;
}
}
// HERE IT STARTS
void convolve_gpu(BLOB* in,BLOB* out,BLOB* w,int Kx,int Ky, conv_param_t* conv_param)
{
timer_start();
int in_depth_max = in->d/conv_param->group;//Depth of input divided by number of groups.
int out_depth_max = out->d/conv_param->group;//Depth of output divided by number of groups.
float* in_data;
float* out_data;
float* w_data;
blob2gpu(in_data, in);
blob2gpu(out_data, out);
blob2gpu(w_data, w);
int threadsPerBlockX = 1;
int numBlocksX= 1;
int threadsPerBlockYZ = 1;
int numBlocksYZ = 1;
// Can we ignore the group for loop?
if(conv_param->group == 1)
{
// Can we ignore all these loop?
if(Ky == 1 && Kx == 1 && conv_param->Sx == 1 && conv_param->Sy == 1)
{
threadsPerBlockX = _min(out_depth_max,1024);
numBlocksX= ceil_div(out_depth_max,threadsPerBlockX);
numBlocksYZ = ceil_div(out->h,threadsPerBlockYZ);
threadsPerBlockYZ = 1024/threadsPerBlockX;
numBlocksYZ = ceil_div(out->w * out->w, threadsPerBlockYZ);
if(out->d == 1)
{
threadsPerBlockYZ = _min(1024,out->w * out->w);
numBlocksYZ = ceil_div(out->w * out->w,threadsPerBlockYZ);
threadsPerBlockX = 1;
numBlocksX = 1;
}
if(out->w == 1)
{
numBlocksYZ = 1;
threadsPerBlockYZ=1;
}
dim3 grid( numBlocksX,numBlocksYZ, 1 );
dim3 block(threadsPerBlockX, threadsPerBlockYZ, 1);
// Simplest yet slowest convolution
hipLaunchKernelGGL(( gpu_device_convolve_depth_parrallel_simple_height_width_multiplexed_shared_mem), dim3(grid),dim3(block), 0, 0,
in_data,w_data,out_data
,w->h
,out->w*out->h,out->d
,in_depth_max);
}
else
{
//return convolve_cpu(in,out,w,Kx,Ky,conv_param);
numBlocksX=16;
numBlocksYZ = 7;
threadsPerBlockX = get_next_pow2(out_depth_max/numBlocksX+1);
threadsPerBlockYZ =out->h/numBlocksYZ;
dim3 grid( numBlocksX,numBlocksYZ, numBlocksYZ );
dim3 block(threadsPerBlockX, threadsPerBlockYZ, threadsPerBlockYZ);
hipLaunchKernelGGL(( gpu_device_convolve_depth_parrallel), dim3(grid),dim3(block), 0, 0,
in_data,w_data,out_data
,conv_param->Sx,conv_param->Sy
,in->w,in->h,in->d
,w->w,w->h
,out->w,out->h,out->d
,Ky,Kx
,conv_param->group
,in_depth_max);
}
}
else
{
numBlocksX=16;
numBlocksYZ = 7;
threadsPerBlockYZ =out->h/numBlocksYZ;
threadsPerBlockX = get_next_pow2(conv_param->group/numBlocksX+1);
if(out->w == 1)
{
numBlocksYZ = 1;
threadsPerBlockYZ=1;
}
dim3 grid( numBlocksX,numBlocksYZ, numBlocksYZ );
dim3 block(threadsPerBlockX, threadsPerBlockYZ, threadsPerBlockYZ);
//
hipLaunchKernelGGL(( gpu_device_convolve_naive_group_parrallel), dim3(grid),dim3(block), 0, 0,
in_data,w_data,out_data
,conv_param->Sx,conv_param->Sy
,in->w,in->h,in->d
,w->w,w->h
,out->w,out->h,out->d
,Ky,Kx
,conv_param->group
);
// }
}
#ifdef DEBUG
printf("groups : %i \n",conv_param->group);
printf("out_width %i, out_height %i , out_depth_max : %i \n",out->w,out->h,out_depth_max);
printf("in_width %i, in_height %i , in_depth_max : %i \n",in->w,in->h,in_depth_max);
printf("Kx : %i, Ky : %i , Sx : %i ,Sy : %i \n",Kx,Ky,conv_param->Sx,conv_param->Sy);
int threads_per_block = threadsPerBlockX * threadsPerBlockYZ * threadsPerBlockYZ;
if(conv_param->group == 1 && Ky == 1 && Kx == 1 && conv_param->Sx == 1 && conv_param->Sy == 1)
{
//
printf("GRID : (x : %i) (y : % i) (z : %i) , ",numBlocksX,numBlocksYZ,1);
threads_per_block = threadsPerBlockX * threadsPerBlockYZ;
printf("BLOCK : (x : %i) (y : % i) (z : %i), (total tpb : %i) \n",threadsPerBlockX,threadsPerBlockYZ,1,threads_per_block);
}
else
{
printf("BLOCK : (x : %i) (y : % i) (z : %i), (total tpb : %i) \n",threadsPerBlockX,threadsPerBlockYZ,threadsPerBlockYZ,threads_per_block);
printf("GRID : (x : %i) (y : % i) (z : %i) , ",numBlocksX,numBlocksYZ,numBlocksYZ);
}
if(threads_per_block > 1024)
{
printf("TOO MANY THREADS PER BLOCK!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!");
}
#endif
gpu2blob(out,out_data);
hipFree(in_data);
hipFree(out_data);
hipFree(w_data);
if(conv_param->group == 1)
{
if(Ky == 1 && Kx == 1 && conv_param->Sx == 1 && conv_param->Sy == 1)
{
writeToFile("depth_Parrallel_simple",(double)timer_stop());
}
else
{
writeToFile("depth_Parrallel_complex",(double)timer_stop());
}
}
else
{
writeToFile("group_Parrallel",(double)timer_stop());
}
timer_destroy();
} | c0c42fa31bb324dfb35943f21cbfc2cf5f66b9ec.cu | #include "blob.h"
#include "convolution.h"
#include "logging.h"
#include "timer.h"
#include <math.h>
void convolve_cpu(BLOB* in,BLOB* out,BLOB* w,int Kx,int Ky, conv_param_t* conv_param)
{
for(int group_id=0;group_id<conv_param->group;group_id++)
{
int delta = (out->d/conv_param->group);//Depth of output divided by number of groups.
int output_starting_depth = group_id*delta;
for(int out_depth=output_starting_depth;out_depth< output_starting_depth + delta;out_depth++)
{
int delta = (in->d/conv_param->group);//Depth of input divided by number of groups.
int in_depth_start = group_id*delta;
for(int in_depth=in_depth_start;in_depth<in_depth_start+delta;in_depth++)
{
for(int out_y=0;out_y<out->h;out_y++)
for(int out_x=0;out_x<out->w;out_x++)
for(int ky=0;ky<Ky;ky++)
for(int kx=0;kx<Kx;kx++)
{
int in_y = out_y*conv_param->Sy+ky;
int in_x = out_x*conv_param->Sx+kx;
int weigth_y = in_depth-(group_id*(in->d/conv_param->group));
int weigth_x = ky*Kx + kx;
float input = blob_data(in, in_depth, in_y,in_x);
float weight = blob_data(w, out_depth, weigth_y, weigth_x);
blob_data(out,out_depth,out_y,out_x)+= input*weight;
}
}
}
}
}
__device__ int calc_blob_id(int z,int y,int x,int height,int width)
{
return z * height * width + y * width + x;
}
// More complex convolution, runs only once so not really worth optimizing
__global__ void gpu_device_convolve_depth_parrallel
(float* data_in,float * data_weight, float* data_out // Data
,int Sx,int Sy // Sizes ...
,int in_w,int in_h,int in_d // input blob dimensions
,int w_w,int w_h // weigth height and depth
,int out_w,int out_h,int out_d // output width and height
,int Ky,int Kx
,int group
,int in_depth_max)
{
unsigned int out_x = blockIdx.z*blockDim.z+ threadIdx.z;
unsigned int out_y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int out_depth = blockIdx.x*blockDim.x + threadIdx.x;
if(out_depth < out_d)
{
int out_id = calc_blob_id(out_depth,out_y,out_x,out_h,out_w);
for(int in_depth=0;in_depth<in_depth_max;in_depth++)
{
for(int ky=0;ky<Ky;ky++)
{
for(int kx=0;kx<Kx;kx++)
{
int in_y = out_y*Sy+ky;
int in_x = out_x*Sx+kx;
int weigth_y = in_depth;
int weight_x = ky*Kx + kx;
int weight_id = calc_blob_id(out_depth,weigth_y,weight_x,w_h,w_w);
int in_id = calc_blob_id(in_depth,in_y,in_x,in_h,in_w);
data_out[out_id] += data_weight[weight_id] * data_in[in_id];
}
}
}
}
}
// Old
// __global__ void gpu_device_convolve_depth_parrallel_simple
// (float* data_in,float * data_weight, float* data_out // Data
// ,int in_w,int in_h,int in_d // input blob dimensions
// ,int w_w,int w_h // weigth height and depth
// ,int out_w,int out_h,int out_d // output width and height
// ,int in_depth_max)
// {
// unsigned int out_x = blockIdx.z*blockDim.z+ threadIdx.z;
// unsigned int out_y = blockIdx.y*blockDim.y + threadIdx.y;
// unsigned int out_depth = blockIdx.x*blockDim.x + threadIdx.x;
// if(out_depth < out_d)
// {
// int out_id = calc_blob_id(out_depth,out_y,out_x,out_h,out_w);
// for(int in_depth=0;in_depth<in_depth_max;in_depth++)
// {
// int weight_id = calc_blob_id(out_depth,in_depth,0,w_h,w_w);
// int in_id = calc_blob_id(in_depth,out_y,out_x,in_h,in_w);
// data_out[out_id] += data_weight[weight_id] * data_in[in_id];
// }
// }
// }
/*
// multiplexing width and height may reduce the cost of address calculation
// This kernel is still the most expensive, and runs often
// input width and height is always equal to output width and height
__global__ void gpu_device_convolve_depth_parrallel_simple_height_width_multiplexed
(float* data_in,float * data_weight, float* data_out // Data
,int w_h // weigth height and depth
,int in_out_wh,int out_d // input/output width * height, output depth
,int in_depth_max)
{
unsigned int in_out_xy = blockIdx.y*blockDim.y+ threadIdx.y;// 2d -> 1d pixel adress
unsigned int out_depth = blockIdx.x*blockDim.x + threadIdx.x;
if(out_depth < out_d && in_out_xy < in_out_wh)
{
float local_out;
int out_id = out_depth * in_out_wh + in_out_xy;
int in_id = in_out_xy; // per depth the same input values are loaded
int weight_id = out_depth * w_h; // Weigth is different per depth
local_out = data_out[out_id];
for(int in_depth=0;in_depth<in_depth_max;in_depth++)
{
local_out += data_weight[weight_id] * data_in[in_id];
in_id+=in_out_wh;
weight_id++;
}
data_out[out_id] = local_out;
}
}
*/
// multiplexing width and height may reduce the cost of address calculation
// This kernel is still the most expensive, and runs often
// input width and height is always equal to output width and height
__global__ void gpu_device_convolve_depth_parrallel_simple_height_width_multiplexed_shared_mem
(float* data_in,float * data_weight, float* data_out // Data
,int w_h // weigth height and depth
,int in_out_wh,int out_d // input/output width * height, output depth
,int in_depth_max)
{
unsigned int in_out_xy = blockIdx.y*blockDim.y+ threadIdx.y;// 2d -> 1d pixel adress
unsigned int out_depth = blockIdx.x*blockDim.x + threadIdx.x;
int max_shared_mem_size = 0;//(blockIdx.y*blockDim.y+ threadIdx.y);//49152/sizeof(float);
if(out_depth < out_d && in_out_xy < in_out_wh)
{
__shared__ float shared_out[12288];//shared memory size = 12288
float local_out; //local storage of data output
int out_id = out_depth * in_out_wh + in_out_xy;
int in_id = in_out_xy; // per depth the same input values are loaded
int weight_id = out_depth * w_h; // Weigth is different per depth
if (out_id < in_out_wh && out_id < 12288)
shared_out[out_id] = data_out[out_id];
__syncthreads();
if (out_id >= 12288){
local_out = data_out[out_id];
}
else{
local_out = shared_out[out_id];
}
for(int in_depth=0;in_depth<in_depth_max;in_depth++)
{
local_out += data_weight[weight_id] * data_in[in_id];
in_id+=in_out_wh;
weight_id++;
}
data_out[out_id] = local_out;
}
}
// Runs a lot but already is relative quick
__global__ void gpu_device_convolve_naive_group_parrallel
(float* data_in,float * data_weight, float* data_out // Data
,int Sx,int Sy // Sizes ...
,int in_w,int in_h,int in_d // input blob dimensions
,int w_w,int w_h // weigth height and depth
,int out_w,int out_h,int out_d // output width and height
,int Ky,int Kx
,int group)
{
unsigned int out_x = blockIdx.z*blockDim.z+ threadIdx.z;
unsigned int out_y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int group_id = blockIdx.x*blockDim.x + threadIdx.x;
if(group_id < group)
{
int out_id = calc_blob_id(group_id,out_y,out_x,out_h,out_w);
for(int ky=0;ky<Ky;ky++)
{
for(int kx=0;kx<Kx;kx++)
{
int in_y = out_y*Sy+ky;
int in_x = out_x*Sx+kx;
int weigth_y = group_id-(group_id*(in_d/group));
int weight_x = ky*Kx + kx;
int weight_id = calc_blob_id(group_id,weigth_y,weight_x,w_h,w_w);
int in_id = calc_blob_id(group_id,in_y,in_x,in_h,in_w);
data_out[out_id] += data_weight[weight_id] * data_in[in_id];
}
}
}
}
int get_next_pow2(int v)
{
v--;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
v++;
return v;
}
int ceil_div(int val,int div)
{
int result = val/div;
if(result * div < val) ++result;
return result;
}
int _max(int val1,int val2)
{
if(val1 > val2)
{
return val1;
}
else
{
return val2;
}
}
int _min(int val1,int val2)
{
if(val1 < val2)
{
return val1;
}
else
{
return val2;
}
}
// HERE IT STARTS
void convolve_gpu(BLOB* in,BLOB* out,BLOB* w,int Kx,int Ky, conv_param_t* conv_param)
{
timer_start();
int in_depth_max = in->d/conv_param->group;//Depth of input divided by number of groups.
int out_depth_max = out->d/conv_param->group;//Depth of output divided by number of groups.
float* in_data;
float* out_data;
float* w_data;
blob2gpu(in_data, in);
blob2gpu(out_data, out);
blob2gpu(w_data, w);
int threadsPerBlockX = 1;
int numBlocksX= 1;
int threadsPerBlockYZ = 1;
int numBlocksYZ = 1;
// Can we ignore the group for loop?
if(conv_param->group == 1)
{
// Can we ignore all these loop?
if(Ky == 1 && Kx == 1 && conv_param->Sx == 1 && conv_param->Sy == 1)
{
threadsPerBlockX = _min(out_depth_max,1024);
numBlocksX= ceil_div(out_depth_max,threadsPerBlockX);
numBlocksYZ = ceil_div(out->h,threadsPerBlockYZ);
threadsPerBlockYZ = 1024/threadsPerBlockX;
numBlocksYZ = ceil_div(out->w * out->w, threadsPerBlockYZ);
if(out->d == 1)
{
threadsPerBlockYZ = _min(1024,out->w * out->w);
numBlocksYZ = ceil_div(out->w * out->w,threadsPerBlockYZ);
threadsPerBlockX = 1;
numBlocksX = 1;
}
if(out->w == 1)
{
numBlocksYZ = 1;
threadsPerBlockYZ=1;
}
dim3 grid( numBlocksX,numBlocksYZ, 1 );
dim3 block(threadsPerBlockX, threadsPerBlockYZ, 1);
// Simplest yet slowest convolution
gpu_device_convolve_depth_parrallel_simple_height_width_multiplexed_shared_mem<<<grid,block>>>(
in_data,w_data,out_data
,w->h
,out->w*out->h,out->d
,in_depth_max);
}
else
{
//return convolve_cpu(in,out,w,Kx,Ky,conv_param);
numBlocksX=16;
numBlocksYZ = 7;
threadsPerBlockX = get_next_pow2(out_depth_max/numBlocksX+1);
threadsPerBlockYZ =out->h/numBlocksYZ;
dim3 grid( numBlocksX,numBlocksYZ, numBlocksYZ );
dim3 block(threadsPerBlockX, threadsPerBlockYZ, threadsPerBlockYZ);
gpu_device_convolve_depth_parrallel<<<grid,block>>>(
in_data,w_data,out_data
,conv_param->Sx,conv_param->Sy
,in->w,in->h,in->d
,w->w,w->h
,out->w,out->h,out->d
,Ky,Kx
,conv_param->group
,in_depth_max);
}
}
else
{
numBlocksX=16;
numBlocksYZ = 7;
threadsPerBlockYZ =out->h/numBlocksYZ;
threadsPerBlockX = get_next_pow2(conv_param->group/numBlocksX+1);
if(out->w == 1)
{
numBlocksYZ = 1;
threadsPerBlockYZ=1;
}
dim3 grid( numBlocksX,numBlocksYZ, numBlocksYZ );
dim3 block(threadsPerBlockX, threadsPerBlockYZ, threadsPerBlockYZ);
//
gpu_device_convolve_naive_group_parrallel<<<grid,block>>>(
in_data,w_data,out_data
,conv_param->Sx,conv_param->Sy
,in->w,in->h,in->d
,w->w,w->h
,out->w,out->h,out->d
,Ky,Kx
,conv_param->group
);
// }
}
#ifdef DEBUG
printf("groups : %i \n",conv_param->group);
printf("out_width %i, out_height %i , out_depth_max : %i \n",out->w,out->h,out_depth_max);
printf("in_width %i, in_height %i , in_depth_max : %i \n",in->w,in->h,in_depth_max);
printf("Kx : %i, Ky : %i , Sx : %i ,Sy : %i \n",Kx,Ky,conv_param->Sx,conv_param->Sy);
int threads_per_block = threadsPerBlockX * threadsPerBlockYZ * threadsPerBlockYZ;
if(conv_param->group == 1 && Ky == 1 && Kx == 1 && conv_param->Sx == 1 && conv_param->Sy == 1)
{
//
printf("GRID : (x : %i) (y : % i) (z : %i) , ",numBlocksX,numBlocksYZ,1);
threads_per_block = threadsPerBlockX * threadsPerBlockYZ;
printf("BLOCK : (x : %i) (y : % i) (z : %i), (total tpb : %i) \n",threadsPerBlockX,threadsPerBlockYZ,1,threads_per_block);
}
else
{
printf("BLOCK : (x : %i) (y : % i) (z : %i), (total tpb : %i) \n",threadsPerBlockX,threadsPerBlockYZ,threadsPerBlockYZ,threads_per_block);
printf("GRID : (x : %i) (y : % i) (z : %i) , ",numBlocksX,numBlocksYZ,numBlocksYZ);
}
if(threads_per_block > 1024)
{
printf("TOO MANY THREADS PER BLOCK!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!");
}
#endif
gpu2blob(out,out_data);
cudaFree(in_data);
cudaFree(out_data);
cudaFree(w_data);
if(conv_param->group == 1)
{
if(Ky == 1 && Kx == 1 && conv_param->Sx == 1 && conv_param->Sy == 1)
{
writeToFile("depth_Parrallel_simple",(double)timer_stop());
}
else
{
writeToFile("depth_Parrallel_complex",(double)timer_stop());
}
}
else
{
writeToFile("group_Parrallel",(double)timer_stop());
}
timer_destroy();
} |
d69fda59aeb6030c72a82ae428684b911c106bdb.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include "hip/hip_runtime.h"
__global__ void kernelAddMatrices1D(int N, double *A, double *B, double *C) {
int threadId = threadIdx.x;
int blockId = blockIdx.x;
int blockSize = blockDim.x; //32
int id = threadId + blockId*blockSize;
C[id] = A[id] + B[id];
}
__global__ void kernelAddMatrices2D(int N, double *A, double* B, double *C) {
int tIdx = threadIdx.x;
int tIdy = threadIdx.y;
int bIdx = blockIdx.x;
int bIdy = blockIdx.y;
int bSizex = blockDim.x;
int bSizey = blockDim.y;
int i = tIdx + bIdx*bSizex; //unique x coordinate
int j = tIdy + bIdy*bSizey; //unique y coordinate
int nx = 1024;
C[i+j*nx] = A[i+j*nx] + B[i+j*nx];
}
__global__ void kernelMatrixTranspose2D_v1(double *A, double *AT) {
int tIdx = threadIdx.x;
int tIdy = threadIdx.y;
int bIdx = blockIdx.x;
int bIdy = blockIdx.y;
int bSizex = blockDim.x;
int bSizey = blockDim.y;
int i = tIdx + bIdx*bSizex; //unique x coordinate
int j = tIdy + bIdy*bSizey; //unique y coordinate
int nx = 1024;
AT[i+j*nx] = A[j+i*nx];
}
//do the transpose using shared memory to get better device memory acceses
__global__ void kernelMatrixTranspose2D_v2(double *A, double *AT) {
int tIdx = threadIdx.x;
int tIdy = threadIdx.y;
int bIdx = blockIdx.x;
int bIdy = blockIdx.y;
int bSizex = blockDim.x;
int bSizey = blockDim.y;
__shared__ double s_A[32][32];
int i = tIdx + bIdx*bSizex; //unique x coordinate
int j = tIdy + bIdy*bSizey; //unique y coordinate
int nx = 1024;
//fetch a block of A into the shared array s_A
s_A[tIdx][tIdy] = A[i+j*nx]; //read from A and write the block's transpose
__syncthreads(); // barrier the threads on this block so all the
// writes to s_A are completed
AT[i+j*nx] = s_A[tIdy][tIdx]; // write out
}
int main(int argc, char **argv) {
// dimensions of the matrices
int nx = 1024;
int ny = 1024;
int N = nx*ny;
//seed RNG
double seed = clock();
srand48(seed);
double *h_a, *h_b, *h_c; //host vectors
// allocate storage
h_a = (double *) malloc(N*sizeof(double));
h_b = (double *) malloc(N*sizeof(double));
h_c = (double *) malloc(N*sizeof(double));
//populate a and b
for (int n=0;n<N;n++) {
h_a[n] = drand48();
h_b[n] = drand48();
}
double hostStart = clock();
// c = a + b
for (int j=0;j<ny;j++) {
for (int i=0;i<nx;i++) {
int id = i+j*nx;
h_c[id] = h_a[id] + h_b[id];
}
}
double hostEnd = clock();
double hostTime = (hostEnd - hostStart)/(double) CLOCKS_PER_SEC;
size_t inputMem = 2*N*sizeof(double); //number of bytes the operation inputs
size_t outMem = 1*N*sizeof(double); //number of bytes the operation outputs
size_t totalMem = (inputMem+outMem);
printf("The host took %f seconds to add a and b \n", hostTime);
printf("The efective bandwidth of the host was: %f GB/s\n", totalMem/(1E9*hostTime));
printf("\n");
//Device arrays
double *d_a, *d_b, *d_c;
//allocate memory on the Device with hipMalloc
hipMalloc(&d_a,N*sizeof(double));
hipMalloc(&d_b,N*sizeof(double));
hipMalloc(&d_c,N*sizeof(double));
double copyStart = clock();
//copy data from the host to the device
hipMemcpy(d_a,h_a,N*sizeof(double),hipMemcpyHostToDevice);
hipMemcpy(d_b,h_b,N*sizeof(double),hipMemcpyHostToDevice);
double copyEnd = clock();
double copyTime = (copyEnd-copyStart)/(double)CLOCKS_PER_SEC;
printf("It took %f seconds to copy the data to device. \n",copyTime);
printf("The efective bandwidth of the copy was: %f GB/s\n", inputMem/(1E9*copyTime));
printf("\n");
//at this point the data is allocated and populated on the device
int Nthreads = 32; //get the number of threads per block from command line
int Nblocks = (N+Nthreads-1)/Nthreads;
double deviceStart = clock();
hipLaunchKernelGGL(( kernelAddMatrices1D) , dim3(Nblocks) , dim3(Nthreads) , 0, 0, N, d_a, d_b, d_c);
hipDeviceSynchronize();
double deviceEnd = clock();
double deviceTime = (deviceEnd-deviceStart)/(double) CLOCKS_PER_SEC;
printf("The 1D Kernel took %f seconds to add a and b \n", deviceTime);
printf("The efective bandwidth of the 1D kernel was: %f GB/s\n", totalMem/(1E9*deviceTime));
//use 2D thread blocks instead
int Nthreadsx = 32;
int Nthreadsy = 32;
int Nthreadsz = 1;
//declare the size of the block
// Nthreadsx*Nthreadsy*Nthreadsz <= 1024
dim3 Nthreads3(Nthreadsx,Nthreadsy,Nthreadsz);
//set number of blocks
int Nblocksx = (nx+Nthreadsx-1)/Nthreadsx;
int Nblocksy = (ny+Nthreadsy-1)/Nthreadsy;
int Nblocksz = 1;
dim3 Nblocks3(Nblocksx,Nblocksy,Nblocksz);
deviceStart = clock();
hipLaunchKernelGGL(( kernelAddMatrices2D) , dim3(Nblocks3) , dim3(Nthreads3) , 0, 0, N, d_a, d_b, d_c);
hipDeviceSynchronize();
deviceEnd = clock();
deviceTime = (deviceEnd-deviceStart)/(double) CLOCKS_PER_SEC;
printf("The 2D Kernel took %f seconds to add a and b \n", deviceTime);
printf("The efective bandwidth of the 2D kernel was: %f GB/s\n", totalMem/(1E9*deviceTime));
printf("The device was %f times faster\n", hostTime/deviceTime);
copyStart = clock();
hipMemcpy(h_c,d_c,N*sizeof(double),hipMemcpyDeviceToHost);
copyEnd = clock();
copyTime = (copyEnd-copyStart)/(double) CLOCKS_PER_SEC;
printf("It took %f seconds to copy the data back to the host. \n",copyTime);
printf("The efective bandwidth of the copy was: %f GB/s\n", outMem/(1E9*copyTime));
deviceStart = clock();
// C = A^T
hipLaunchKernelGGL(( kernelMatrixTranspose2D_v1) , dim3(Nblocks3) , dim3(Nthreads3) , 0, 0, d_a, d_c);
hipDeviceSynchronize();
deviceEnd = clock();
deviceTime = (deviceEnd-deviceStart)/(double) CLOCKS_PER_SEC;
printf("The v1 tarnspose kernel took %f seconds to add a and b \n", deviceTime);
printf("The efective bandwidth of the v1 transpose kernel was: %f GB/s\n", totalMem/(1E9*deviceTime));
deviceStart = clock();
// C = A^T
hipLaunchKernelGGL(( kernelMatrixTranspose2D_v2), dim3(Nblocks3) , dim3(Nthreads3) , 0, 0, d_a, d_c);
hipDeviceSynchronize();
deviceEnd = clock();
deviceTime = (deviceEnd-deviceStart)/(double) CLOCKS_PER_SEC;
printf("The v2 tarnspose kernel took %f seconds to add a and b \n", deviceTime);
printf("The efective bandwidth of the v2 transpose kernel was: %f GB/s\n", totalMem/(1E9*deviceTime));
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
free(h_a);
free(h_b);
free(h_c);
}
| d69fda59aeb6030c72a82ae428684b911c106bdb.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include "cuda.h"
__global__ void kernelAddMatrices1D(int N, double *A, double *B, double *C) {
int threadId = threadIdx.x;
int blockId = blockIdx.x;
int blockSize = blockDim.x; //32
int id = threadId + blockId*blockSize;
C[id] = A[id] + B[id];
}
__global__ void kernelAddMatrices2D(int N, double *A, double* B, double *C) {
int tIdx = threadIdx.x;
int tIdy = threadIdx.y;
int bIdx = blockIdx.x;
int bIdy = blockIdx.y;
int bSizex = blockDim.x;
int bSizey = blockDim.y;
int i = tIdx + bIdx*bSizex; //unique x coordinate
int j = tIdy + bIdy*bSizey; //unique y coordinate
int nx = 1024;
C[i+j*nx] = A[i+j*nx] + B[i+j*nx];
}
__global__ void kernelMatrixTranspose2D_v1(double *A, double *AT) {
int tIdx = threadIdx.x;
int tIdy = threadIdx.y;
int bIdx = blockIdx.x;
int bIdy = blockIdx.y;
int bSizex = blockDim.x;
int bSizey = blockDim.y;
int i = tIdx + bIdx*bSizex; //unique x coordinate
int j = tIdy + bIdy*bSizey; //unique y coordinate
int nx = 1024;
AT[i+j*nx] = A[j+i*nx];
}
//do the transpose using shared memory to get better device memory acceses
__global__ void kernelMatrixTranspose2D_v2(double *A, double *AT) {
int tIdx = threadIdx.x;
int tIdy = threadIdx.y;
int bIdx = blockIdx.x;
int bIdy = blockIdx.y;
int bSizex = blockDim.x;
int bSizey = blockDim.y;
__shared__ double s_A[32][32];
int i = tIdx + bIdx*bSizex; //unique x coordinate
int j = tIdy + bIdy*bSizey; //unique y coordinate
int nx = 1024;
//fetch a block of A into the shared array s_A
s_A[tIdx][tIdy] = A[i+j*nx]; //read from A and write the block's transpose
__syncthreads(); // barrier the threads on this block so all the
// writes to s_A are completed
AT[i+j*nx] = s_A[tIdy][tIdx]; // write out
}
int main(int argc, char **argv) {
// dimensions of the matrices
int nx = 1024;
int ny = 1024;
int N = nx*ny;
//seed RNG
double seed = clock();
srand48(seed);
double *h_a, *h_b, *h_c; //host vectors
// allocate storage
h_a = (double *) malloc(N*sizeof(double));
h_b = (double *) malloc(N*sizeof(double));
h_c = (double *) malloc(N*sizeof(double));
//populate a and b
for (int n=0;n<N;n++) {
h_a[n] = drand48();
h_b[n] = drand48();
}
double hostStart = clock();
// c = a + b
for (int j=0;j<ny;j++) {
for (int i=0;i<nx;i++) {
int id = i+j*nx;
h_c[id] = h_a[id] + h_b[id];
}
}
double hostEnd = clock();
double hostTime = (hostEnd - hostStart)/(double) CLOCKS_PER_SEC;
size_t inputMem = 2*N*sizeof(double); //number of bytes the operation inputs
size_t outMem = 1*N*sizeof(double); //number of bytes the operation outputs
size_t totalMem = (inputMem+outMem);
printf("The host took %f seconds to add a and b \n", hostTime);
printf("The efective bandwidth of the host was: %f GB/s\n", totalMem/(1E9*hostTime));
printf("\n");
//Device arrays
double *d_a, *d_b, *d_c;
//allocate memory on the Device with cudaMalloc
cudaMalloc(&d_a,N*sizeof(double));
cudaMalloc(&d_b,N*sizeof(double));
cudaMalloc(&d_c,N*sizeof(double));
double copyStart = clock();
//copy data from the host to the device
cudaMemcpy(d_a,h_a,N*sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy(d_b,h_b,N*sizeof(double),cudaMemcpyHostToDevice);
double copyEnd = clock();
double copyTime = (copyEnd-copyStart)/(double)CLOCKS_PER_SEC;
printf("It took %f seconds to copy the data to device. \n",copyTime);
printf("The efective bandwidth of the copy was: %f GB/s\n", inputMem/(1E9*copyTime));
printf("\n");
//at this point the data is allocated and populated on the device
int Nthreads = 32; //get the number of threads per block from command line
int Nblocks = (N+Nthreads-1)/Nthreads;
double deviceStart = clock();
kernelAddMatrices1D <<<Nblocks , Nthreads >>>(N, d_a, d_b, d_c);
cudaDeviceSynchronize();
double deviceEnd = clock();
double deviceTime = (deviceEnd-deviceStart)/(double) CLOCKS_PER_SEC;
printf("The 1D Kernel took %f seconds to add a and b \n", deviceTime);
printf("The efective bandwidth of the 1D kernel was: %f GB/s\n", totalMem/(1E9*deviceTime));
//use 2D thread blocks instead
int Nthreadsx = 32;
int Nthreadsy = 32;
int Nthreadsz = 1;
//declare the size of the block
// Nthreadsx*Nthreadsy*Nthreadsz <= 1024
dim3 Nthreads3(Nthreadsx,Nthreadsy,Nthreadsz);
//set number of blocks
int Nblocksx = (nx+Nthreadsx-1)/Nthreadsx;
int Nblocksy = (ny+Nthreadsy-1)/Nthreadsy;
int Nblocksz = 1;
dim3 Nblocks3(Nblocksx,Nblocksy,Nblocksz);
deviceStart = clock();
kernelAddMatrices2D <<<Nblocks3 , Nthreads3 >>>(N, d_a, d_b, d_c);
cudaDeviceSynchronize();
deviceEnd = clock();
deviceTime = (deviceEnd-deviceStart)/(double) CLOCKS_PER_SEC;
printf("The 2D Kernel took %f seconds to add a and b \n", deviceTime);
printf("The efective bandwidth of the 2D kernel was: %f GB/s\n", totalMem/(1E9*deviceTime));
printf("The device was %f times faster\n", hostTime/deviceTime);
copyStart = clock();
cudaMemcpy(h_c,d_c,N*sizeof(double),cudaMemcpyDeviceToHost);
copyEnd = clock();
copyTime = (copyEnd-copyStart)/(double) CLOCKS_PER_SEC;
printf("It took %f seconds to copy the data back to the host. \n",copyTime);
printf("The efective bandwidth of the copy was: %f GB/s\n", outMem/(1E9*copyTime));
deviceStart = clock();
// C = A^T
kernelMatrixTranspose2D_v1 <<<Nblocks3 , Nthreads3 >>>(d_a, d_c);
cudaDeviceSynchronize();
deviceEnd = clock();
deviceTime = (deviceEnd-deviceStart)/(double) CLOCKS_PER_SEC;
printf("The v1 tarnspose kernel took %f seconds to add a and b \n", deviceTime);
printf("The efective bandwidth of the v1 transpose kernel was: %f GB/s\n", totalMem/(1E9*deviceTime));
deviceStart = clock();
// C = A^T
kernelMatrixTranspose2D_v2<<<Nblocks3 , Nthreads3 >>>(d_a, d_c);
cudaDeviceSynchronize();
deviceEnd = clock();
deviceTime = (deviceEnd-deviceStart)/(double) CLOCKS_PER_SEC;
printf("The v2 tarnspose kernel took %f seconds to add a and b \n", deviceTime);
printf("The efective bandwidth of the v2 transpose kernel was: %f GB/s\n", totalMem/(1E9*deviceTime));
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(h_a);
free(h_b);
free(h_c);
}
|
8735eefeb6a98894d3a8cd8c874eb84bbca57fe1.hip | // !!! This is a file automatically generated by hipify!!!
#ifdef __HIPCC__
#include "management/world.h"
#endif
#define TINYOBJLOADER_IMPLEMENTATION
#include "tinyobjloader/tiny_obj_loader.h"
#include <algorithm>
#include <gsl/gsl>
#include <iostream>
#include <stdexcept>
#define DEBUG_OUTPUT 0
world_geometry::world_geometry() = default;
world_geometry::world_geometry(const std::string& file_name) { load(file_name); }
// Wrap the tiny_obj library and do the internal data structuring
namespace __detail
{
struct loaded_data {
tinyobj::attrib_t attrib;
std::vector<tinyobj::shape_t> shapes;
std::vector<tinyobj::material_t> materials;
};
loaded_data load(const char* file_name)
{
Expects(file_name != nullptr);
loaded_data result;
std::string err;
bool ret = tinyobj::LoadObj(&result.attrib, &result.shapes, &result.materials, &err,
file_name);
if (!err.empty()) {
std::cerr << err << std::endl;
}
if (ret == false) {
std::string error_message = "Could not read the file ";
throw std::invalid_argument{error_message + file_name};
}
Ensures(result.attrib.vertices.size() % 3 == 0);
Ensures(result.attrib.normals.size() % 3 == 0);
Ensures(result.attrib.texcoords.size() % 2 == 0);
return result;
}
thrust::host_vector<coord> build_coords(const std::vector<tinyobj::real_t>& vertices)
{
Expects(vertices.size() % 3 == 0ul);
thrust::host_vector<coord> v;
v.reserve(vertices.size() / 3);
/// See data format for tinyobjloader
for (std::size_t i = 0; i < vertices.size(); i += 3)
v.push_back({vertices[i], vertices[i + 1], vertices[i + 2]});
return v;
}
thrust::host_vector<phong_material>
build_materials(const std::vector<tinyobj::material_t>& materials)
{
thrust::host_vector<phong_material> m;
m.reserve(materials.size());
for (const auto& mat : materials) {
m.push_back(phong_material(static_cast<const float*>(mat.specular),
static_cast<const float*>(mat.diffuse),
static_cast<const float*>(mat.ambient),
static_cast<float>(mat.shininess)));
}
return m;
}
struct VertexData {
// indices to the vertices
int i0;
int i1;
int i2;
};
VertexData vertex_information(const thrust::device_vector<coord>& vertices,
const std::vector<tinyobj::index_t>& indices,
const std::size_t index_offset)
{
// all indices of the face
const auto idx0 = indices[index_offset + 0].vertex_index;
const auto idx1 = indices[index_offset + 1].vertex_index;
const auto idx2 = indices[index_offset + 2].vertex_index;
Expects(static_cast<std::size_t>(idx0) < vertices.size() && idx0 >= 0);
Expects(static_cast<std::size_t>(idx1) < vertices.size() && idx1 >= 0);
Expects(static_cast<std::size_t>(idx2) < vertices.size() && idx2 >= 0);
VertexData p;
p.i0 = idx0;
p.i1 = idx1;
p.i2 = idx2;
return p;
}
struct NormalData {
// indices for normals
int i0;
int i1;
int i2;
int f;
};
// will insert face normal if necessary, otherwise just get all pointers right
NormalData normal_information(const thrust::device_vector<coord>& vertices,
const std::vector<tinyobj::index_t>& indices,
const VertexData vd, const std::size_t index_offset,
thrust::device_vector<coord>& normals)
{
NormalData nd;
#ifdef NNDEBUG
std::clog << "Input: " << vd.i0 << " " << vd.i1 << " " << vd.i2 << std::endl;
for (const auto& i : indices)
std::clog << i.normal_index << std::endl;
#endif
// all vertex normals
const auto n_idx0 = indices[index_offset + 0].normal_index;
const auto n_idx1 = indices[index_offset + 1].normal_index;
const auto n_idx2 = indices[index_offset + 2].normal_index;
// if normals not in file
// calculate face normal
// insert face normal
// set all normals and indices to the inserted normal
// CHECK
// else // normals in file
// if all normals identical
// set all pointers and indices to first normal
// else // normals are not identical
// set vertex normals to the one in file
// calculate face normal from vertices
// AXIOM: either all normals are set, or none are!
if (n_idx0 == -1) // vertex normals not in file
{
Expects(n_idx1 == -1);
Expects(n_idx2 == -1);
// calculate face normal
const coord p0 = vertices[vd.i0];
const coord p1 = vertices[vd.i1];
const coord p2 = vertices[vd.i2];
const coord n = normalize(cross(p1 - p0, p2 - p1));
// push back normal, and get the last index (the created normal)
normals.push_back(n);
const auto fn_idx = normals.size() - 1;
nd.i0 = fn_idx;
nd.i1 = fn_idx;
nd.i2 = fn_idx;
nd.f = fn_idx;
} else // vertex normals in file
{
if ((n_idx0 == n_idx1) && (n_idx1 == n_idx2)) // normals are identical
{
// all normals (including face normal) are the same
nd.i0 = n_idx0;
nd.i1 = n_idx0;
nd.i2 = n_idx0;
nd.f = n_idx0;
} else // normals are different
{
// vertex normals from file
nd.i0 = n_idx0;
nd.i1 = n_idx1;
nd.i2 = n_idx2;
#if DEBUG_OUTPUT == 1
std::clog << "ni0: " << nd.i0 << '\t' << "ni1: " << nd.i1 << '\t'
<< "ni2: " << nd.i2 << '\t' << "nmax: " << normals.size() << '\t'
<< '\t' << "vi0: " << vd.i0 << '\t' << "vi1: " << vd.i1 << '\t'
<< "vi2: " << vd.i2 << '\t' << "vmax: " << vertices.size() << '\n';
#endif
// calculate face normal
const coord p0 = vertices[vd.i0];
const coord p1 = vertices[vd.i1];
const coord p2 = vertices[vd.i2];
const coord n = normalize(cross(p1 - p0, p2 - p1));
// push back normal, and get the last index (the created normal)
normals.push_back(n);
const auto fn_idx = normals.size() - 1;
nd.f = fn_idx;
}
}
Ensures(static_cast<std::size_t>(nd.i0) < normals.size() && nd.i0 >= 0);
Ensures(static_cast<std::size_t>(nd.i1) < normals.size() && nd.i1 >= 0);
Ensures(static_cast<std::size_t>(nd.i2) < normals.size() && nd.i2 >= 0);
return nd;
}
/// Connects the vertices to triangles, assigns them a normal and material.
/// Normals will be calculated if necessary
thrust::device_vector<triangle>
build_faces(const std::vector<tinyobj::shape_t>& shapes,
const thrust::device_vector<coord>& vertices,
thrust::device_vector<coord>& normals,
const thrust::device_vector<phong_material>& materials)
{
thrust::device_vector<triangle> triangles;
// temporarily store normal indices, because the normal vector could grow, and
// invalidate all pointers
thrust::host_vector<NormalData> face_normal_information;
thrust::host_vector<VertexData> face_vertex_information;
thrust::host_vector<const phong_material*> face_materials;
for (const auto& shape : shapes) {
// we will use only triangles
Expects(std::all_of(std::begin(shape.mesh.num_face_vertices),
std::end(shape.mesh.num_face_vertices),
[](int i) { return i == 3; }));
std::size_t index_offset = 0;
// all faces of the shape
for (std::size_t f = 0; f < shape.mesh.num_face_vertices.size(); ++f) {
const auto td = vertex_information(vertices, shape.mesh.indices, index_offset);
// updates normals if necessary
const auto nd = normal_information(vertices, shape.mesh.indices, td,
index_offset, normals);
// WARN: this writes the pointer on the device, as material pointer.
// if you derefence material on the cpu, that results in a segfault
// on cuda devices!
const phong_material* m_ptr =
shape.mesh.material_ids[f] < 0 ?
nullptr :
(&materials[shape.mesh.material_ids[f]]).get();
// add triangle to world, and store the normal information for later
// connection
face_vertex_information.push_back(td);
face_normal_information.push_back(nd);
face_materials.push_back(m_ptr);
index_offset += shape.mesh.num_face_vertices[f];
}
}
Expects(face_normal_information.size() == face_vertex_information.size());
Expects(face_normal_information.size() == face_materials.size());
Expects(face_normal_information.size() > 0);
// connect all normals with the triangle, the normal vector is expected to not grow
// anymore
for (std::size_t i = 0; i < face_normal_information.size(); ++i) {
const auto& nd = face_normal_information[i];
const auto& td = face_vertex_information[i];
triangle t((&vertices[td.i0]).get(), (&vertices[td.i1]).get(),
(&vertices[td.i2]).get(), (&normals[nd.f]).get());
t.p0_normal((&normals[nd.i0]).get());
t.p1_normal((&normals[nd.i1]).get());
t.p2_normal((&normals[nd.i2]).get());
t.material(face_materials[i]);
triangles.push_back(t);
}
Ensures(triangles.size() > 0);
Ensures(normals.size() > 0);
return triangles;
}
} // namespace __detail
void world_geometry::load(const std::string& file_name)
{
const auto data = __detail::load(file_name.c_str());
__shape_count = data.shapes.size();
// Handle all Vertices
__vertices = __detail::build_coords(data.attrib.vertices);
Expects(__vertices.size() == data.attrib.vertices.size() / 3ul);
// Handle all normals
__normals = __detail::build_coords(data.attrib.normals);
// Handle all Materials
__materials = __detail::build_materials(data.materials);
Expects(__materials.size() == data.materials.size());
// Connect the triangles and give their surfaces a material, creates normals if
// necessary!
__triangles = __detail::build_faces(data.shapes, __vertices, __normals, __materials);
Expects(__normals.size() > 0ul);
}
void world_geometry::add_light(phong_light l, coord position)
{
__lights.push_back(light_source(l, position));
}
world_geometry::data_handle::data_handle(const thrust::device_vector<coord>& vert,
const thrust::device_vector<coord>& norm,
const thrust::device_vector<triangle>& tria,
const thrust::device_vector<phong_material>& mat,
const thrust::device_vector<light_source>& light)
: vertices{vert.data().get(), vert.size()}
, normals{norm.data().get(), norm.size()}
, triangles{tria.data().get(), tria.size()}
, materials{mat.data().get(), mat.size()}
, lights{light.data().get(), light.size()}
{
}
world_geometry::data_handle world_geometry::handle() const noexcept
{
return data_handle(__vertices, __normals, __triangles, __materials, __lights);
}
| 8735eefeb6a98894d3a8cd8c874eb84bbca57fe1.cu | #ifdef __CUDACC__
#include "management/world.h"
#endif
#define TINYOBJLOADER_IMPLEMENTATION
#include "tinyobjloader/tiny_obj_loader.h"
#include <algorithm>
#include <gsl/gsl>
#include <iostream>
#include <stdexcept>
#define DEBUG_OUTPUT 0
world_geometry::world_geometry() = default;
world_geometry::world_geometry(const std::string& file_name) { load(file_name); }
// Wrap the tiny_obj library and do the internal data structuring
namespace __detail
{
struct loaded_data {
tinyobj::attrib_t attrib;
std::vector<tinyobj::shape_t> shapes;
std::vector<tinyobj::material_t> materials;
};
loaded_data load(const char* file_name)
{
Expects(file_name != nullptr);
loaded_data result;
std::string err;
bool ret = tinyobj::LoadObj(&result.attrib, &result.shapes, &result.materials, &err,
file_name);
if (!err.empty()) {
std::cerr << err << std::endl;
}
if (ret == false) {
std::string error_message = "Could not read the file ";
throw std::invalid_argument{error_message + file_name};
}
Ensures(result.attrib.vertices.size() % 3 == 0);
Ensures(result.attrib.normals.size() % 3 == 0);
Ensures(result.attrib.texcoords.size() % 2 == 0);
return result;
}
thrust::host_vector<coord> build_coords(const std::vector<tinyobj::real_t>& vertices)
{
Expects(vertices.size() % 3 == 0ul);
thrust::host_vector<coord> v;
v.reserve(vertices.size() / 3);
/// See data format for tinyobjloader
for (std::size_t i = 0; i < vertices.size(); i += 3)
v.push_back({vertices[i], vertices[i + 1], vertices[i + 2]});
return v;
}
thrust::host_vector<phong_material>
build_materials(const std::vector<tinyobj::material_t>& materials)
{
thrust::host_vector<phong_material> m;
m.reserve(materials.size());
for (const auto& mat : materials) {
m.push_back(phong_material(static_cast<const float*>(mat.specular),
static_cast<const float*>(mat.diffuse),
static_cast<const float*>(mat.ambient),
static_cast<float>(mat.shininess)));
}
return m;
}
struct VertexData {
// indices to the vertices
int i0;
int i1;
int i2;
};
VertexData vertex_information(const thrust::device_vector<coord>& vertices,
const std::vector<tinyobj::index_t>& indices,
const std::size_t index_offset)
{
// all indices of the face
const auto idx0 = indices[index_offset + 0].vertex_index;
const auto idx1 = indices[index_offset + 1].vertex_index;
const auto idx2 = indices[index_offset + 2].vertex_index;
Expects(static_cast<std::size_t>(idx0) < vertices.size() && idx0 >= 0);
Expects(static_cast<std::size_t>(idx1) < vertices.size() && idx1 >= 0);
Expects(static_cast<std::size_t>(idx2) < vertices.size() && idx2 >= 0);
VertexData p;
p.i0 = idx0;
p.i1 = idx1;
p.i2 = idx2;
return p;
}
struct NormalData {
// indices for normals
int i0;
int i1;
int i2;
int f;
};
// will insert face normal if necessary, otherwise just get all pointers right
NormalData normal_information(const thrust::device_vector<coord>& vertices,
const std::vector<tinyobj::index_t>& indices,
const VertexData vd, const std::size_t index_offset,
thrust::device_vector<coord>& normals)
{
NormalData nd;
#ifdef NNDEBUG
std::clog << "Input: " << vd.i0 << " " << vd.i1 << " " << vd.i2 << std::endl;
for (const auto& i : indices)
std::clog << i.normal_index << std::endl;
#endif
// all vertex normals
const auto n_idx0 = indices[index_offset + 0].normal_index;
const auto n_idx1 = indices[index_offset + 1].normal_index;
const auto n_idx2 = indices[index_offset + 2].normal_index;
// if normals not in file
// calculate face normal
// insert face normal
// set all normals and indices to the inserted normal
// CHECK
// else // normals in file
// if all normals identical
// set all pointers and indices to first normal
// else // normals are not identical
// set vertex normals to the one in file
// calculate face normal from vertices
// AXIOM: either all normals are set, or none are!
if (n_idx0 == -1) // vertex normals not in file
{
Expects(n_idx1 == -1);
Expects(n_idx2 == -1);
// calculate face normal
const coord p0 = vertices[vd.i0];
const coord p1 = vertices[vd.i1];
const coord p2 = vertices[vd.i2];
const coord n = normalize(cross(p1 - p0, p2 - p1));
// push back normal, and get the last index (the created normal)
normals.push_back(n);
const auto fn_idx = normals.size() - 1;
nd.i0 = fn_idx;
nd.i1 = fn_idx;
nd.i2 = fn_idx;
nd.f = fn_idx;
} else // vertex normals in file
{
if ((n_idx0 == n_idx1) && (n_idx1 == n_idx2)) // normals are identical
{
// all normals (including face normal) are the same
nd.i0 = n_idx0;
nd.i1 = n_idx0;
nd.i2 = n_idx0;
nd.f = n_idx0;
} else // normals are different
{
// vertex normals from file
nd.i0 = n_idx0;
nd.i1 = n_idx1;
nd.i2 = n_idx2;
#if DEBUG_OUTPUT == 1
std::clog << "ni0: " << nd.i0 << '\t' << "ni1: " << nd.i1 << '\t'
<< "ni2: " << nd.i2 << '\t' << "nmax: " << normals.size() << '\t'
<< '\t' << "vi0: " << vd.i0 << '\t' << "vi1: " << vd.i1 << '\t'
<< "vi2: " << vd.i2 << '\t' << "vmax: " << vertices.size() << '\n';
#endif
// calculate face normal
const coord p0 = vertices[vd.i0];
const coord p1 = vertices[vd.i1];
const coord p2 = vertices[vd.i2];
const coord n = normalize(cross(p1 - p0, p2 - p1));
// push back normal, and get the last index (the created normal)
normals.push_back(n);
const auto fn_idx = normals.size() - 1;
nd.f = fn_idx;
}
}
Ensures(static_cast<std::size_t>(nd.i0) < normals.size() && nd.i0 >= 0);
Ensures(static_cast<std::size_t>(nd.i1) < normals.size() && nd.i1 >= 0);
Ensures(static_cast<std::size_t>(nd.i2) < normals.size() && nd.i2 >= 0);
return nd;
}
/// Connects the vertices to triangles, assigns them a normal and material.
/// Normals will be calculated if necessary
thrust::device_vector<triangle>
build_faces(const std::vector<tinyobj::shape_t>& shapes,
const thrust::device_vector<coord>& vertices,
thrust::device_vector<coord>& normals,
const thrust::device_vector<phong_material>& materials)
{
thrust::device_vector<triangle> triangles;
// temporarily store normal indices, because the normal vector could grow, and
// invalidate all pointers
thrust::host_vector<NormalData> face_normal_information;
thrust::host_vector<VertexData> face_vertex_information;
thrust::host_vector<const phong_material*> face_materials;
for (const auto& shape : shapes) {
// we will use only triangles
Expects(std::all_of(std::begin(shape.mesh.num_face_vertices),
std::end(shape.mesh.num_face_vertices),
[](int i) { return i == 3; }));
std::size_t index_offset = 0;
// all faces of the shape
for (std::size_t f = 0; f < shape.mesh.num_face_vertices.size(); ++f) {
const auto td = vertex_information(vertices, shape.mesh.indices, index_offset);
// updates normals if necessary
const auto nd = normal_information(vertices, shape.mesh.indices, td,
index_offset, normals);
// WARN: this writes the pointer on the device, as material pointer.
// if you derefence material on the cpu, that results in a segfault
// on cuda devices!
const phong_material* m_ptr =
shape.mesh.material_ids[f] < 0 ?
nullptr :
(&materials[shape.mesh.material_ids[f]]).get();
// add triangle to world, and store the normal information for later
// connection
face_vertex_information.push_back(td);
face_normal_information.push_back(nd);
face_materials.push_back(m_ptr);
index_offset += shape.mesh.num_face_vertices[f];
}
}
Expects(face_normal_information.size() == face_vertex_information.size());
Expects(face_normal_information.size() == face_materials.size());
Expects(face_normal_information.size() > 0);
// connect all normals with the triangle, the normal vector is expected to not grow
// anymore
for (std::size_t i = 0; i < face_normal_information.size(); ++i) {
const auto& nd = face_normal_information[i];
const auto& td = face_vertex_information[i];
triangle t((&vertices[td.i0]).get(), (&vertices[td.i1]).get(),
(&vertices[td.i2]).get(), (&normals[nd.f]).get());
t.p0_normal((&normals[nd.i0]).get());
t.p1_normal((&normals[nd.i1]).get());
t.p2_normal((&normals[nd.i2]).get());
t.material(face_materials[i]);
triangles.push_back(t);
}
Ensures(triangles.size() > 0);
Ensures(normals.size() > 0);
return triangles;
}
} // namespace __detail
void world_geometry::load(const std::string& file_name)
{
const auto data = __detail::load(file_name.c_str());
__shape_count = data.shapes.size();
// Handle all Vertices
__vertices = __detail::build_coords(data.attrib.vertices);
Expects(__vertices.size() == data.attrib.vertices.size() / 3ul);
// Handle all normals
__normals = __detail::build_coords(data.attrib.normals);
// Handle all Materials
__materials = __detail::build_materials(data.materials);
Expects(__materials.size() == data.materials.size());
// Connect the triangles and give their surfaces a material, creates normals if
// necessary!
__triangles = __detail::build_faces(data.shapes, __vertices, __normals, __materials);
Expects(__normals.size() > 0ul);
}
void world_geometry::add_light(phong_light l, coord position)
{
__lights.push_back(light_source(l, position));
}
world_geometry::data_handle::data_handle(const thrust::device_vector<coord>& vert,
const thrust::device_vector<coord>& norm,
const thrust::device_vector<triangle>& tria,
const thrust::device_vector<phong_material>& mat,
const thrust::device_vector<light_source>& light)
: vertices{vert.data().get(), vert.size()}
, normals{norm.data().get(), norm.size()}
, triangles{tria.data().get(), tria.size()}
, materials{mat.data().get(), mat.size()}
, lights{light.data().get(), light.size()}
{
}
world_geometry::data_handle world_geometry::handle() const noexcept
{
return data_handle(__vertices, __normals, __triangles, __materials, __lights);
}
|
3ef24dd5c1cca78b3202220512565db30e1825d8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "utils.h"
#include <stdlib.h>
#include "life_kernel.cu"
void init_data(int * domain, int domain_x, int domain_y)
{
for(int i = 0; i != domain_y; ++i) {
for(int j = 0; j != domain_x; ++j) {
domain[i * domain_x + j] = rand() % 3;
}
}
}
int main(int argc, char ** argv)
{
// Definition of parameters
int domain_x = 128; // Multiple of threads_per_block * cells_per_word
int domain_y = 128;
int cells_per_word = 1;
int steps = 2;
int threads_per_block = 128;
int blocks_x = domain_x / (threads_per_block * cells_per_word);
int blocks_y = domain_y;
dim3 grid(blocks_x, blocks_y); // CUDA grid dimensions
dim3 threads(threads_per_block); // CUDA block dimensions
// Allocation of arrays
int * domain_gpu[2] = {NULL, NULL};
// Arrays of dimensions domain.x * domain.y
size_t domain_size = domain_x * domain_y / cells_per_word * sizeof(int);
CUDA_SAFE_CALL(hipMalloc((void**)&domain_gpu[0], domain_size));
CUDA_SAFE_CALL(hipMalloc((void**)&domain_gpu[1], domain_size));
int * domain_cpu = (int*)malloc(domain_size);
// Arrays of dimensions pitch * domain.y
init_data(domain_cpu, domain_x, domain_y);
CUDA_SAFE_CALL(hipMemcpy(domain_gpu[0], domain_cpu, domain_size, hipMemcpyHostToDevice));
//init_kernel<<< grid, threads, 0 >>>(domain_gpu[0], domain_x);
// Timer initialization
hipEvent_t start, stop;
CUDA_SAFE_CALL(hipEventCreate(&start));
CUDA_SAFE_CALL(hipEventCreate(&stop));
// Start timer
CUDA_SAFE_CALL(hipEventRecord(start, 0));
// Get the sizes
int shared_x = (domain_x / blocks_x) + 2;
int shared_y = (domain_y / blocks_y) + 2;
// Kernel execution
int shared_mem_size = shared_x * shared_y * sizeof(int);
for(int i = 0; i < steps; i++) {
hipLaunchKernelGGL(( life_kernel), dim3(grid), dim3(threads), shared_mem_size , 0, domain_gpu[i%2],
domain_gpu[(i+1)%2], domain_x, domain_y, (domain_x / blocks_x), (domain_y / blocks_y));
}
// Stop timer
CUDA_SAFE_CALL(hipEventRecord(stop, 0));
CUDA_SAFE_CALL(hipEventSynchronize(stop));
float elapsedTime;
CUDA_SAFE_CALL(hipEventElapsedTime(&elapsedTime, start, stop)); // In ms
printf("GPU time: %f ms\n", elapsedTime);
CUDA_SAFE_CALL(hipEventDestroy(start));
CUDA_SAFE_CALL(hipEventDestroy(stop));
// Get results back
CUDA_SAFE_CALL(hipMemcpy(domain_cpu, domain_gpu[steps%2], domain_size, hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipFree(domain_gpu[0]));
CUDA_SAFE_CALL(hipFree(domain_gpu[1]));
// Count colors
int red = 0;
int blue = 0;
for(int y = 0; y < domain_y; y++)
{
for(int x = 0; x < domain_x; x++)
{
int cell = domain_cpu[y * domain_x + x];
printf("%u", cell);
if(cell == 1) {
red++;
}
else if(cell == 2) {
blue++;
}
}
printf("\n");
}
printf("Red/Blue cells: %d/%d\n", red, blue);
free(domain_cpu);
return 0;
}
| 3ef24dd5c1cca78b3202220512565db30e1825d8.cu |
#include "utils.h"
#include <stdlib.h>
#include "life_kernel.cu"
void init_data(int * domain, int domain_x, int domain_y)
{
for(int i = 0; i != domain_y; ++i) {
for(int j = 0; j != domain_x; ++j) {
domain[i * domain_x + j] = rand() % 3;
}
}
}
int main(int argc, char ** argv)
{
// Definition of parameters
int domain_x = 128; // Multiple of threads_per_block * cells_per_word
int domain_y = 128;
int cells_per_word = 1;
int steps = 2;
int threads_per_block = 128;
int blocks_x = domain_x / (threads_per_block * cells_per_word);
int blocks_y = domain_y;
dim3 grid(blocks_x, blocks_y); // CUDA grid dimensions
dim3 threads(threads_per_block); // CUDA block dimensions
// Allocation of arrays
int * domain_gpu[2] = {NULL, NULL};
// Arrays of dimensions domain.x * domain.y
size_t domain_size = domain_x * domain_y / cells_per_word * sizeof(int);
CUDA_SAFE_CALL(cudaMalloc((void**)&domain_gpu[0], domain_size));
CUDA_SAFE_CALL(cudaMalloc((void**)&domain_gpu[1], domain_size));
int * domain_cpu = (int*)malloc(domain_size);
// Arrays of dimensions pitch * domain.y
init_data(domain_cpu, domain_x, domain_y);
CUDA_SAFE_CALL(cudaMemcpy(domain_gpu[0], domain_cpu, domain_size, cudaMemcpyHostToDevice));
//init_kernel<<< grid, threads, 0 >>>(domain_gpu[0], domain_x);
// Timer initialization
cudaEvent_t start, stop;
CUDA_SAFE_CALL(cudaEventCreate(&start));
CUDA_SAFE_CALL(cudaEventCreate(&stop));
// Start timer
CUDA_SAFE_CALL(cudaEventRecord(start, 0));
// Get the sizes
int shared_x = (domain_x / blocks_x) + 2;
int shared_y = (domain_y / blocks_y) + 2;
// Kernel execution
int shared_mem_size = shared_x * shared_y * sizeof(int);
for(int i = 0; i < steps; i++) {
life_kernel<<< grid, threads, shared_mem_size >>>(domain_gpu[i%2],
domain_gpu[(i+1)%2], domain_x, domain_y, (domain_x / blocks_x), (domain_y / blocks_y));
}
// Stop timer
CUDA_SAFE_CALL(cudaEventRecord(stop, 0));
CUDA_SAFE_CALL(cudaEventSynchronize(stop));
float elapsedTime;
CUDA_SAFE_CALL(cudaEventElapsedTime(&elapsedTime, start, stop)); // In ms
printf("GPU time: %f ms\n", elapsedTime);
CUDA_SAFE_CALL(cudaEventDestroy(start));
CUDA_SAFE_CALL(cudaEventDestroy(stop));
// Get results back
CUDA_SAFE_CALL(cudaMemcpy(domain_cpu, domain_gpu[steps%2], domain_size, cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaFree(domain_gpu[0]));
CUDA_SAFE_CALL(cudaFree(domain_gpu[1]));
// Count colors
int red = 0;
int blue = 0;
for(int y = 0; y < domain_y; y++)
{
for(int x = 0; x < domain_x; x++)
{
int cell = domain_cpu[y * domain_x + x];
printf("%u", cell);
if(cell == 1) {
red++;
}
else if(cell == 2) {
blue++;
}
}
printf("\n");
}
printf("Red/Blue cells: %d/%d\n", red, blue);
free(domain_cpu);
return 0;
}
|
4473e623298531946cda23cc445eeaa1a48ec4d5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ProjHelperFun.h"
#include "Constants.h"
#include "TridagPar.h"
#include "kernels.hip"
#include "kernels_small.cu"
#include "ProjHelperFun.cu"
#include "cudaErrHandling.cu"
#include <vector>
// void printArray(vector<REAL> arr) {
// printf("[");
// for (const auto& elem : arr) {
// printf("%f, ", elem);
// }
// printf("]\n");
// }
void
rollback(
const unsigned g,
PrivGlobs& globs,
REAL* d_myResult,
REAL* d_myVarX,
REAL* d_myVarY,
REAL* d_myDxx,
REAL* d_myDyy,
REAL* d_u,
REAL* d_v,
REAL* d_a,
REAL* d_b,
REAL* d_c,
REAL* d_yy,
REAL* d__y,
const unsigned int outer,
const unsigned dim
) {
unsigned numX = globs.myX.size(),
numY = globs.myY.size();
unsigned numZ = max(numX,numY);
REAL dtInv = 1.0/(globs.myTimeline[g+1]-globs.myTimeline[g]);
/* Call kernel */
// const bool is_3D = outer * numX < 5000;
const bool is_3D = true;
int dimO = ceil( ((float)outer) / (is_3D ? 16 : dim ));
int dimX = ceil( ((float)numX) / (is_3D ? 8 : dim ));
int dimY = ceil( ((float)numY) / (is_3D ? 8 : dim ));
dim3 block(dim, dim, 1), gridOX(dimO, dimX, 1);
dim3 gridOY(dimO, dimY, 1);
dim3 block3D(16, 8, 8), gridOXY(dimO, dimX, dimY);
if (is_3D) {
hipLaunchKernelGGL(( initUAndV3Dim), dim3(gridOXY), dim3(block3D), 0, 0,
d_u, d_v, d_myVarX, d_myVarY, d_myDxx, d_myDyy, d_myResult,
outer, numX, numY, dtInv);
} else {
hipLaunchKernelGGL(( initUAndV2Dim), dim3(gridOX), dim3(block), 0, 0,
d_u, d_v, d_myVarX, d_myVarY, d_myDxx, d_myDyy, d_myResult,
outer, numX, numY, dtInv);
}
if (false) {
// 3D-calc of a, b, c and y. Does not beat privatization of a and b, even on small dataset.
hipLaunchKernelGGL(( initABC3D), dim3(gridOXY), dim3(block3D), 0, 0, d_a, d_b, d_c, outer, numX, numY, numZ, dtInv, d_myVarX, d_myDxx, true);
hipLaunchKernelGGL(( onlyTridag1), dim3(gridOY), dim3(block), 0, 0, outer, numX, numY, numZ, d_a, d_b, d_c, dtInv, d_myVarX, d_myDxx, d_u, d_yy);
hipLaunchKernelGGL(( initABC3D), dim3(gridOXY), dim3(block3D), 0, 0, d_a, d_b, d_c, outer, numX, numY, numZ, dtInv, d_myVarY, d_myDyy, false);
hipLaunchKernelGGL(( init_y), dim3(gridOXY), dim3(block3D), 0, 0, outer, numX, numY, numZ, d__y, d_u, d_v, dtInv);
hipLaunchKernelGGL(( onlyTridag2), dim3(gridOX), dim3(block), 0, 0, outer, numX, numY, numZ, d_a, d_b, d_c, dtInv, d_myVarY, d_myDyy, d_u, d_v, d_yy, d__y, d_myResult);
} else {
hipLaunchKernelGGL(( tridag1), dim3(gridOY), dim3(block), 0, 0, outer, numX, numY, numZ, d_c, dtInv, d_myVarX, d_myDxx, d_u, d_yy);
hipLaunchKernelGGL(( tridag2), dim3(gridOX), dim3(block), 0, 0, outer, numX, numY, numZ, d_c, dtInv, d_myVarY, d_myDyy, d_u, d_v, d_yy, d_myResult);
}
}
struct d_alloc {
REAL** ptr;
unsigned int size;
};
void run_OrigCPU(
const unsigned int& outer,
const unsigned int& numX,
const unsigned int& numY,
const unsigned int& numT,
const REAL& s0,
const REAL& t,
const REAL& alpha,
const REAL& nu,
const REAL& beta,
REAL* res // [outer] RESULT
) {
PrivGlobs globs(numX, numY, numT);
initGrid(s0,alpha,nu,t, numX, numY, numT, globs);
initOperator(globs.myX,globs.myDxx);
initOperator(globs.myY,globs.myDyy);
unsigned int numZ = max(numX, numY);
/* Declare device pointers */
REAL *d_myResult, *d_myX, *d_myY, *d_myVarX, *d_myVarY, *d_myDxx, *d_myDyy, *d_u, *d_v, *d_res;
REAL *d_a, *d_b, *d_c;
REAL *d_yy, *d__y;
unsigned int myResultSize = outer * numX * numY;
// List of device pointers and what size they request
vector<d_alloc> allocations = {
{&d_res, outer},
{&d_myX, numX},
{&d_myY, numY},
{&d_myVarX, numX * numY},
{&d_myVarY, numX * numY},
{&d_myDxx, numX * 4},
{&d_myDyy, numY * 4},
{&d_a, outer*numZ*numZ},
{&d_b, outer*numZ*numZ},
{&d_c, outer*numZ*numZ},
{&d_yy, outer * numZ * numZ},
{&d__y, outer * numZ * numZ},
{&d_v, myResultSize},
{&d_myResult, myResultSize},
{&d_u, myResultSize},
};
unsigned long total_size;
for (auto& al : allocations) {
total_size += al.size;
}
// Allocate all device memory to save allocation/deallocation overhead
// Unfortunately it doesn't seem to make too much difference.
REAL* master_device_ptr;
cudaErrchkAPI(hipMalloc((void**)&master_device_ptr, total_size * sizeof(REAL)));
REAL* accum_ptr = master_device_ptr;
for (auto& al : allocations) {
*(al.ptr) = accum_ptr;
accum_ptr += al.size;
}
/* Copy initial required data to device */
copy2DVec(d_myDxx, globs.myDxx, hipMemcpyHostToDevice);
copy2DVec(d_myDyy, globs.myDyy, hipMemcpyHostToDevice);
cudaErrchkAPI(hipMemcpy(d_myX, globs.myX.data(), numX * sizeof(REAL), hipMemcpyHostToDevice));
cudaErrchkAPI(hipMemcpy(d_myY, globs.myY.data(), numY * sizeof(REAL), hipMemcpyHostToDevice));
/* Compute myResult from a 2d kernel */
int dim;
if (outer > 31) {
dim = 32;
} else {
dim = 16;
}
int dimO = ceil(((float)outer) / dim);
int dimX = ceil(((float)numX) / dim);
int dimY = ceil(((float)numY) / dim);
dim3 block(dim, dim, 1), gridOX(dimO, dimX, 1), gridXY(dimX, dimY, 1);
hipLaunchKernelGGL(( myResultKernel2D), dim3(gridOX), dim3(block), 0, 0, outer, numX, numY, d_myX, d_myResult);
cudaErrchkKernelAndSync();
for(int g = globs.myTimeline.size()-2;g>=0;--g) {
{
REAL nu2t = 0.5 * nu * nu * globs.myTimeline[g];
hipLaunchKernelGGL(( myVarXYKernel), dim3(gridXY), dim3(block), 0, 0, numX, numY, beta, nu2t, alpha, d_myX, d_myY, d_myVarX, d_myVarY);
cudaErrchkKernelAndSync();
}
rollback(g, globs, d_myResult, d_myVarX, d_myVarY, d_myDxx, d_myDyy, d_u, d_v, d_a, d_b, d_c, d_yy, d__y, outer, dim);
}
{
unsigned int block_size = 256;
unsigned int num_blocks = (outer + (block_size -1)) / block_size;
hipLaunchKernelGGL(( buildResultKernel), dim3(num_blocks), dim3(block_size), 0, 0, outer, numX, numY, globs.myXindex, globs.myYindex, d_res, d_myResult);
cudaErrchkKernelAndSync();
hipMemcpy(res, d_res, outer * sizeof(REAL), hipMemcpyDeviceToHost);
}
cudaErrchkAPI(hipFree(master_device_ptr));
}
//#endif // PROJ_CORE_ORIG
| 4473e623298531946cda23cc445eeaa1a48ec4d5.cu | #include "ProjHelperFun.h"
#include "Constants.h"
#include "TridagPar.h"
#include "kernels.cu"
#include "kernels_small.cu"
#include "ProjHelperFun.cu"
#include "cudaErrHandling.cu"
#include <vector>
// void printArray(vector<REAL> arr) {
// printf("[");
// for (const auto& elem : arr) {
// printf("%f, ", elem);
// }
// printf("]\n");
// }
void
rollback(
const unsigned g,
PrivGlobs& globs,
REAL* d_myResult,
REAL* d_myVarX,
REAL* d_myVarY,
REAL* d_myDxx,
REAL* d_myDyy,
REAL* d_u,
REAL* d_v,
REAL* d_a,
REAL* d_b,
REAL* d_c,
REAL* d_yy,
REAL* d__y,
const unsigned int outer,
const unsigned dim
) {
unsigned numX = globs.myX.size(),
numY = globs.myY.size();
unsigned numZ = max(numX,numY);
REAL dtInv = 1.0/(globs.myTimeline[g+1]-globs.myTimeline[g]);
/* Call kernel */
// const bool is_3D = outer * numX < 5000;
const bool is_3D = true;
int dimO = ceil( ((float)outer) / (is_3D ? 16 : dim ));
int dimX = ceil( ((float)numX) / (is_3D ? 8 : dim ));
int dimY = ceil( ((float)numY) / (is_3D ? 8 : dim ));
dim3 block(dim, dim, 1), gridOX(dimO, dimX, 1);
dim3 gridOY(dimO, dimY, 1);
dim3 block3D(16, 8, 8), gridOXY(dimO, dimX, dimY);
if (is_3D) {
initUAndV3Dim<<<gridOXY, block3D>>>
(d_u, d_v, d_myVarX, d_myVarY, d_myDxx, d_myDyy, d_myResult,
outer, numX, numY, dtInv);
} else {
initUAndV2Dim<<<gridOX, block>>>
(d_u, d_v, d_myVarX, d_myVarY, d_myDxx, d_myDyy, d_myResult,
outer, numX, numY, dtInv);
}
if (false) {
// 3D-calc of a, b, c and y. Does not beat privatization of a and b, even on small dataset.
initABC3D<<<gridOXY, block3D>>>(d_a, d_b, d_c, outer, numX, numY, numZ, dtInv, d_myVarX, d_myDxx, true);
onlyTridag1<<<gridOY, block>>>(outer, numX, numY, numZ, d_a, d_b, d_c, dtInv, d_myVarX, d_myDxx, d_u, d_yy);
initABC3D<<<gridOXY, block3D>>>(d_a, d_b, d_c, outer, numX, numY, numZ, dtInv, d_myVarY, d_myDyy, false);
init_y<<<gridOXY, block3D>>>(outer, numX, numY, numZ, d__y, d_u, d_v, dtInv);
onlyTridag2<<<gridOX, block>>>(outer, numX, numY, numZ, d_a, d_b, d_c, dtInv, d_myVarY, d_myDyy, d_u, d_v, d_yy, d__y, d_myResult);
} else {
tridag1<<<gridOY, block>>>(outer, numX, numY, numZ, d_c, dtInv, d_myVarX, d_myDxx, d_u, d_yy);
tridag2<<<gridOX, block>>>(outer, numX, numY, numZ, d_c, dtInv, d_myVarY, d_myDyy, d_u, d_v, d_yy, d_myResult);
}
}
struct d_alloc {
REAL** ptr;
unsigned int size;
};
void run_OrigCPU(
const unsigned int& outer,
const unsigned int& numX,
const unsigned int& numY,
const unsigned int& numT,
const REAL& s0,
const REAL& t,
const REAL& alpha,
const REAL& nu,
const REAL& beta,
REAL* res // [outer] RESULT
) {
PrivGlobs globs(numX, numY, numT);
initGrid(s0,alpha,nu,t, numX, numY, numT, globs);
initOperator(globs.myX,globs.myDxx);
initOperator(globs.myY,globs.myDyy);
unsigned int numZ = max(numX, numY);
/* Declare device pointers */
REAL *d_myResult, *d_myX, *d_myY, *d_myVarX, *d_myVarY, *d_myDxx, *d_myDyy, *d_u, *d_v, *d_res;
REAL *d_a, *d_b, *d_c;
REAL *d_yy, *d__y;
unsigned int myResultSize = outer * numX * numY;
// List of device pointers and what size they request
vector<d_alloc> allocations = {
{&d_res, outer},
{&d_myX, numX},
{&d_myY, numY},
{&d_myVarX, numX * numY},
{&d_myVarY, numX * numY},
{&d_myDxx, numX * 4},
{&d_myDyy, numY * 4},
{&d_a, outer*numZ*numZ},
{&d_b, outer*numZ*numZ},
{&d_c, outer*numZ*numZ},
{&d_yy, outer * numZ * numZ},
{&d__y, outer * numZ * numZ},
{&d_v, myResultSize},
{&d_myResult, myResultSize},
{&d_u, myResultSize},
};
unsigned long total_size;
for (auto& al : allocations) {
total_size += al.size;
}
// Allocate all device memory to save allocation/deallocation overhead
// Unfortunately it doesn't seem to make too much difference.
REAL* master_device_ptr;
cudaErrchkAPI(cudaMalloc((void**)&master_device_ptr, total_size * sizeof(REAL)));
REAL* accum_ptr = master_device_ptr;
for (auto& al : allocations) {
*(al.ptr) = accum_ptr;
accum_ptr += al.size;
}
/* Copy initial required data to device */
copy2DVec(d_myDxx, globs.myDxx, cudaMemcpyHostToDevice);
copy2DVec(d_myDyy, globs.myDyy, cudaMemcpyHostToDevice);
cudaErrchkAPI(cudaMemcpy(d_myX, globs.myX.data(), numX * sizeof(REAL), cudaMemcpyHostToDevice));
cudaErrchkAPI(cudaMemcpy(d_myY, globs.myY.data(), numY * sizeof(REAL), cudaMemcpyHostToDevice));
/* Compute myResult from a 2d kernel */
int dim;
if (outer > 31) {
dim = 32;
} else {
dim = 16;
}
int dimO = ceil(((float)outer) / dim);
int dimX = ceil(((float)numX) / dim);
int dimY = ceil(((float)numY) / dim);
dim3 block(dim, dim, 1), gridOX(dimO, dimX, 1), gridXY(dimX, dimY, 1);
myResultKernel2D<<<gridOX, block>>>(outer, numX, numY, d_myX, d_myResult);
cudaErrchkKernelAndSync();
for(int g = globs.myTimeline.size()-2;g>=0;--g) {
{
REAL nu2t = 0.5 * nu * nu * globs.myTimeline[g];
myVarXYKernel<<<gridXY, block>>>(numX, numY, beta, nu2t, alpha, d_myX, d_myY, d_myVarX, d_myVarY);
cudaErrchkKernelAndSync();
}
rollback(g, globs, d_myResult, d_myVarX, d_myVarY, d_myDxx, d_myDyy, d_u, d_v, d_a, d_b, d_c, d_yy, d__y, outer, dim);
}
{
unsigned int block_size = 256;
unsigned int num_blocks = (outer + (block_size -1)) / block_size;
buildResultKernel<<<num_blocks, block_size>>>(outer, numX, numY, globs.myXindex, globs.myYindex, d_res, d_myResult);
cudaErrchkKernelAndSync();
cudaMemcpy(res, d_res, outer * sizeof(REAL), cudaMemcpyDeviceToHost);
}
cudaErrchkAPI(cudaFree(master_device_ptr));
}
//#endif // PROJ_CORE_ORIG
|
d7d83b9ef68af9b8af38a4513c8d1019cdb2b0c7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,float var_3,float var_4,float* var_5,float* var_6,float* var_7,float var_8,float var_9,float var_10,float* var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20) {
comp = (+1.0268E34f * +1.6547E9f * (var_3 - var_4));
for (int i=0; i < var_1; ++i) {
var_5[i] = -1.5800E34f;
var_6[i] = sinf((var_8 + (var_9 - cosf(-1.8832E-43f))));
var_7[i] = +1.8731E34f;
comp += var_7[i] + var_6[i] * var_5[i] * +1.5318E-35f + (-1.0675E-37f - var_10);
}
for (int i=0; i < var_2; ++i) {
var_11[i] = -0.0f;
float tmp_1 = -1.9597E36f;
comp = tmp_1 * var_11[i] - (var_12 / (-1.4935E-36f * (var_13 * +1.5838E-37f)));
}
if (comp >= (var_14 * (-1.5700E-26f * +1.2359E-42f + cosf((-1.2237E-37f * var_15 / +1.8914E-44f / var_16))))) {
float tmp_2 = +1.2200E35f;
comp += tmp_2 - (-1.8775E-41f * (-0.0f / +1.9521E34f / -1.7387E34f));
comp = var_17 * logf(var_18 / (-1.3151E-37f * (var_19 * var_20 * +1.4525E-29f)));
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float* tmp_6 = initPointer( atof(argv[6]) );
float* tmp_7 = initPointer( atof(argv[7]) );
float* tmp_8 = initPointer( atof(argv[8]) );
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float* tmp_12 = initPointer( atof(argv[12]) );
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21);
hipDeviceSynchronize();
return 0;
}
| d7d83b9ef68af9b8af38a4513c8d1019cdb2b0c7.cu |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,float var_3,float var_4,float* var_5,float* var_6,float* var_7,float var_8,float var_9,float var_10,float* var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20) {
comp = (+1.0268E34f * +1.6547E9f * (var_3 - var_4));
for (int i=0; i < var_1; ++i) {
var_5[i] = -1.5800E34f;
var_6[i] = sinf((var_8 + (var_9 - cosf(-1.8832E-43f))));
var_7[i] = +1.8731E34f;
comp += var_7[i] + var_6[i] * var_5[i] * +1.5318E-35f + (-1.0675E-37f - var_10);
}
for (int i=0; i < var_2; ++i) {
var_11[i] = -0.0f;
float tmp_1 = -1.9597E36f;
comp = tmp_1 * var_11[i] - (var_12 / (-1.4935E-36f * (var_13 * +1.5838E-37f)));
}
if (comp >= (var_14 * (-1.5700E-26f * +1.2359E-42f + cosf((-1.2237E-37f * var_15 / +1.8914E-44f / var_16))))) {
float tmp_2 = +1.2200E35f;
comp += tmp_2 - (-1.8775E-41f * (-0.0f / +1.9521E34f / -1.7387E34f));
comp = var_17 * logf(var_18 / (-1.3151E-37f * (var_19 * var_20 * +1.4525E-29f)));
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float* tmp_6 = initPointer( atof(argv[6]) );
float* tmp_7 = initPointer( atof(argv[7]) );
float* tmp_8 = initPointer( atof(argv[8]) );
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float* tmp_12 = initPointer( atof(argv[12]) );
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21);
cudaDeviceSynchronize();
return 0;
}
|
d2c2a368ed96e558072be1a456a06340ccc789f0.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "KNearestNeighborsGPU.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float3 *dataArray = NULL;
hipMalloc(&dataArray, XSIZE*YSIZE);
int *result = NULL;
hipMalloc(&result, XSIZE*YSIZE);
int cnt = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
KNearestNeighborsGPU), dim3(gridBlock),dim3(threadBlock), 0, 0, dataArray,result,cnt);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
KNearestNeighborsGPU), dim3(gridBlock),dim3(threadBlock), 0, 0, dataArray,result,cnt);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
KNearestNeighborsGPU), dim3(gridBlock),dim3(threadBlock), 0, 0, dataArray,result,cnt);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | d2c2a368ed96e558072be1a456a06340ccc789f0.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "KNearestNeighborsGPU.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float3 *dataArray = NULL;
cudaMalloc(&dataArray, XSIZE*YSIZE);
int *result = NULL;
cudaMalloc(&result, XSIZE*YSIZE);
int cnt = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
KNearestNeighborsGPU<<<gridBlock,threadBlock>>>(dataArray,result,cnt);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
KNearestNeighborsGPU<<<gridBlock,threadBlock>>>(dataArray,result,cnt);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
KNearestNeighborsGPU<<<gridBlock,threadBlock>>>(dataArray,result,cnt);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
43356bfaf8173b24eefd1e24c01073c6caaffb10.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "EnemyBulletUpdaterKernel.cuh"
#include "../../../Screen/Screen.h"
__global__ void EnemyBulletUpdaterKernel::Process(EnemyBullet* ebPtr, int length)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= length) return;
//
ebPtr[idx].m_position += ebPtr[idx].m_speed;
//
if (ebPtr[idx].m_position.x > Screen::Width + ebPtr[idx].m_collisionRadius ||
ebPtr[idx].m_position.x < -ebPtr[idx].m_collisionRadius ||
ebPtr[idx].m_position.y > Screen::Height + ebPtr[idx].m_collisionRadius ||
ebPtr[idx].m_position.y < -ebPtr[idx].m_collisionRadius) ebPtr[idx].m_isDead = true;
return;
} | 43356bfaf8173b24eefd1e24c01073c6caaffb10.cu | #include "EnemyBulletUpdaterKernel.cuh"
#include "../../../Screen/Screen.h"
__global__ void EnemyBulletUpdaterKernel::Process(EnemyBullet* ebPtr, int length)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= length) return;
// 移動
ebPtr[idx].m_position += ebPtr[idx].m_speed;
// 画面外に出たら死亡
if (ebPtr[idx].m_position.x > Screen::Width + ebPtr[idx].m_collisionRadius ||
ebPtr[idx].m_position.x < -ebPtr[idx].m_collisionRadius ||
ebPtr[idx].m_position.y > Screen::Height + ebPtr[idx].m_collisionRadius ||
ebPtr[idx].m_position.y < -ebPtr[idx].m_collisionRadius) ebPtr[idx].m_isDead = true;
return;
} |
6235bdb03cf8fcbf6ff3661bcd3ecb54ae38a800.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <opencv2/highgui/highgui.hpp>
#include "Tensor.h"
#include "gpu_util.h"
Tensor::Tensor(void) {
this->d_pixels = NULL;
this->create(0, 0, 0, gfill::none);
}
Tensor::Tensor(const initializer_list<size_t> &shape, uint8_t fill_type) :
shape(shape) {
// create a new data field and initialize the size fields
this->create(this->shape, fill_type);
}
Tensor::Tensor(const Tensor &gpucube) {
this->d_pixels = NULL;
this->copy(gpucube);
}
Tensor::Tensor(const Tensor *gpucube) {
this->d_pixels = NULL;
this->copy(*gpucube);
}
Tensor::~Tensor(void) {
this->destroy();
}
__global__ void GPU_map_id(float *F, size_t n_elems) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n_elems) {
return;
}
F[idx] = idx;
}
__global__ void GPU_map_assign(float *F, float val, size_t n_elems) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n_elems) {
return;
}
F[idx] = val;
}
void Tensor::create(const initializer_list<size_t> &shape, uint8_t fill_type) {
this->destroy();
this->n_rows = n_rows;
this->n_cols = n_cols;
this->n_slices = n_slices;
this->n_elem = n_rows * n_cols * n_slices;
if (this->n_elem != 0) {
checkCudaErrors(hipMalloc(&this->d_pixels, this->n_elem * sizeof(float)));
switch (fill_type) {
case gfill::none:
break;
case gfill::zeros:
checkCudaErrors(hipMemset(this->d_pixels, 0, this->n_elem * sizeof(float)));
break;
case gfill::ones:
hipLaunchKernelGGL(( GPU_map_assign), dim3((this->n_elem-1) / 128 + 1), dim3(128), 0, 0, this->d_pixels, 1, this->n_elem);
checkCudaErrors(hipGetLastError());
break;
case gfill::linspace:
hipLaunchKernelGGL(( GPU_map_id), dim3((this->n_elem-1) / 128 + 1), dim3(128), 0, 0, this->d_pixels, this->n_elem);
checkCudaErrors(hipGetLastError());
default:
break;
}
}
}
void Tensor::destroy(void) {
if (this->d_pixels) {
checkCudaErrors(hipFree(this->d_pixels));
this->d_pixels = NULL;
}
}
// OPERATORS
void Tensor::set(float v, size_t i, size_t j, size_t k) {
checkCudaErrors(hipMemcpy(&this->d_pixels[IJK2C(i, j, k, this->n_rows, this->n_cols)],
&v, sizeof(float), hipMemcpyHostToDevice));
}
float Tensor::get(size_t i, size_t j, size_t k) {
float v;
checkCudaErrors(hipMemcpy(&v, &this->d_pixels[IJK2C(i, j, k, this->n_rows, this->n_cols)],
sizeof(float), hipMemcpyDeviceToHost));
return v;
}
Tensor &Tensor::operator=(const Tensor &gpucube) {
this->copy(gpucube);
return *this;
}
// MEMORY
void Tensor::copy(const Tensor &gpucube) {
this->create(gpucube.n_rows, gpucube.n_cols, gpucube.n_slices, gfill::none);
checkCudaErrors(hipMemcpy(this->d_pixels, gpucube.d_pixels,
this->n_elem * sizeof(float), hipMemcpyDeviceToDevice));
}
/*void Tensor::submatCopy(const Tensor &gpucube, int x1, int x2, int y1, int y2) {
this->
}*/
void Tensor::load(const std::string &fname) { // change
this->create(cv::imread(fname));
}
void Tensor::save(const std::string &fname) { // change
cv::imwrite(fname, this->cv_img());
}
// Specific OpenCV interaction (to make sure that they are backwards compatible)
Tensor::Tensor(cv::Mat &cvMat) {
this->d_pixels = NULL;
this->create(cvMat);
}
__global__ void GPU_cv_img2Tensor(float *dst, unsigned char *src, int dst_n_rows, int dst_n_cols, int src_n_rows, int src_n_cols, int n_slices, int ioffset, int joffset) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = blockIdx.z * blockDim.z + threadIdx.z;
if (i >= dst_n_rows || j >= dst_n_cols || k >= n_slices) {
return;
}
dst[IJK2C(i, j, n_slices-k-1, dst_n_rows, dst_n_cols)] = ((float)src[IJK2C(k, j+joffset, i+ioffset, n_slices, src_n_cols)]) / 255.0;
}
void Tensor::create(const cv::Mat &cvMat, bool remalloc) {
if (remalloc) {
this->create(cvMat.rows, cvMat.cols, cvMat.channels(), gfill::none);
} else {
assert(cvMat.rows * cvMat.cols * cvMat.channels() == this->n_elem && this->d_pixels != NULL);
}
if (this->n_elem == 0) {
return;
}
// copy to memory
unsigned char *dimg;
checkCudaErrors(hipMalloc(&dimg, sizeof(unsigned char) * this->n_elem));
checkCudaErrors(hipMemcpy(dimg, cvMat.data, sizeof(unsigned char) * this->n_elem, hipMemcpyHostToDevice));
// reformat
dim3 blockSize(16, 16, 1);
dim3 gridSize((this->n_rows-1)/16+1, (this->n_cols-1)/16+1, this->n_slices);
hipLaunchKernelGGL(( GPU_cv_img2Tensor), dim3(gridSize), dim3(blockSize), 0, 0, this->d_pixels, dimg, this->n_rows, this->n_cols, this->n_rows, this->n_cols, this->n_slices, 0, 0);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipFree(dimg));
}
void Tensor::create(const cv::Mat &cvMat, int i1, int i2, int j1, int j2, bool remalloc) {
assert(i1 <= i2 && j1 <= j2 && j2 <= cvMat.cols && i2 <= cvMat.rows);
int di = i2 - i1;
int dj = j2 - j1;
if (remalloc) {
this->create(di, dj, cvMat.channels(), gfill::none);
} else {
assert(di * dj * cvMat.channels() == this->n_elem && this->d_pixels != NULL);
}
if (this->n_elem == 0) {
return;
}
// copy to memory
size_t n_elem = cvMat.rows * cvMat.cols * cvMat.channels();
unsigned char *dimg;
checkCudaErrors(hipMalloc(&dimg, sizeof(unsigned char) * n_elem));
checkCudaErrors(hipMemcpy(dimg, cvMat.data, sizeof(unsigned char) * n_elem, hipMemcpyHostToDevice));
// reformat
dim3 blockSize(16, 16, 1);
dim3 gridSize((di-1)/16+1, (dj-1)/16+1, this->n_slices);
hipLaunchKernelGGL(( GPU_cv_img2Tensor), dim3(gridSize), dim3(blockSize), 0, 0, this->d_pixels, dimg, di, dj, cvMat.rows, cvMat.cols, this->n_slices, i1, j1);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipFree(dimg));
}
/*static int limit(int x, int a, int b) {
if (x < a) {
return a;
} else if (x > b) {
return b;
} else {
return x;
}
}*/
__global__ void GPU_Tensor2cv_img(unsigned char *dst, float *src, int n_rows, int n_cols, int n_slices) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = blockIdx.z * blockDim.z + threadIdx.z;
if (i >= n_rows || j >= n_cols || k >= n_slices) {
return;
}
dst[IJK2C(k, j, i, n_slices, n_cols)] = (unsigned char)(src[IJK2C(i, j, n_slices-k-1, n_rows, n_cols)] * 255.0);
}
cv::Mat Tensor::cv_img(void) {
if (this->n_elem == 0) {
return cv::Mat(0, 0, CV_8UC1);
}
cv::Mat cv_image(this->n_rows, this->n_cols, (this->n_slices == 3) ? CV_8UC3 : CV_8UC1);
// reformat
unsigned char *dimg;
checkCudaErrors(hipMalloc(&dimg, sizeof(unsigned char) * this->n_elem));
dim3 blockSize(16, 16, 1);
dim3 gridSize((this->n_rows-1)/16+1, (this->n_cols-1)/16+1, this->n_slices);
hipLaunchKernelGGL(( GPU_Tensor2cv_img), dim3(gridSize), dim3(blockSize), 0, 0, dimg, this->d_pixels, this->n_rows, this->n_cols, this->n_slices);
checkCudaErrors(hipGetLastError());
// place the matrix into the image
checkCudaErrors(hipMemcpy(cv_image.data, dimg, sizeof(unsigned char) * this->n_elem, hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(dimg));
return cv_image;
}
cv::Mat Tensor::cv_mat(void) {
cv::Mat cv_image(this->n_rows, this->n_cols, (this->n_slices == 1 ? CV_32F : CV_32FC3));
float *h_pixels = new float[this->n_elem];
checkCudaErrors(hipMemcpy(h_pixels, this->d_pixels,
this->n_elem * sizeof(float), hipMemcpyDeviceToHost));
for (int i = 0; i < this->n_rows; i++) {
for (int j = 0; j < this->n_cols; j++) {
if (this->n_slices == 1) {
cv_image.at<float>(i, j) = h_pixels[IJ2C(i, j, this->n_rows)];
} else if (this->n_slices == 3) {
cv_image.at<cv::Vec3f>(i, j) = cv::Vec3f(
h_pixels[IJK2C(i, j, 0, this->n_rows, this->n_cols)],
h_pixels[IJK2C(i, j, 1, this->n_rows, this->n_cols)],
h_pixels[IJK2C(i, j, 2, this->n_rows, this->n_cols)]);
}
}
}
delete h_pixels;
return cv_image;
}
// specific armadillo compatibility
Tensor::Tensor(arma::vec &armaCube) {
this->d_pixels = NULL;
this->create(armaCube);
}
Tensor::Tensor(arma::mat &armaCube) {
this->d_pixels = NULL;
this->create(armaCube);
}
Tensor::Tensor(arma::cube &armaCube) {
this->d_pixels = NULL;
this->create(armaCube);
}
void Tensor::create(const arma::vec &armaCube) {
this->create(armaCube.n_rows, 1, 1, gfill::none);
if (this->n_elem == 0) {
return;
}
float *h_pixels = new float[this->n_elem];
for (int i = 0; i < this->n_rows; i++) {
h_pixels[i] = (float)armaCube(i);
}
checkCudaErrors(hipMemcpy(this->d_pixels, h_pixels,
this->n_elem * sizeof(float), hipMemcpyHostToDevice));
delete h_pixels;
}
void Tensor::create(const arma::mat &armaCube) {
this->create(armaCube.n_rows, armaCube.n_cols, 1, gfill::none);
if (this->n_elem == 0) {
return;
}
float *h_pixels = new float[this->n_elem];
for (int i = 0; i < this->n_rows; i++) {
for (int j = 0; j < this->n_cols; j++) {
h_pixels[IJ2C(i, j, this->n_rows)] = (float)armaCube(i, j);
}
}
checkCudaErrors(hipMemcpy(this->d_pixels, h_pixels,
this->n_elem * sizeof(float), hipMemcpyHostToDevice));
delete h_pixels;
}
void Tensor::create(const arma::cube &armaCube) {
this->create(armaCube.n_rows, armaCube.n_cols, armaCube.n_slices, gfill::none);
if (this->n_elem == 0) {
return;
}
float *h_pixels = new float[this->n_elem];
for (int i = 0; i < this->n_rows; i++) {
for (int j = 0; j < this->n_cols; j++) {
for (int k = 0; k < this->n_slices; k++) {
h_pixels[IJK2C(i, j, k, this->n_rows, this->n_cols)] = (float)armaCube(i, j, k);
}
}
}
checkCudaErrors(hipMemcpy(this->d_pixels, h_pixels,
this->n_elem * sizeof(float), hipMemcpyHostToDevice));
delete h_pixels;
}
arma::cube Tensor::arma_cube(void) {
arma::cube ac(this->n_rows, this->n_cols, this->n_slices);
float *h_pixels = new float[this->n_elem];
checkCudaErrors(hipMemcpy(h_pixels, this->d_pixels,
this->n_elem * sizeof(float), hipMemcpyDeviceToHost));
for (int i = 0; i < this->n_rows; i++) {
for (int j = 0; j < this->n_cols; j++) {
for (int k = 0; k < this->n_slices; k++) {
ac(i, j, k) = h_pixels[IJK2C(i, j, k, this->n_rows, this->n_cols)];
}
}
}
delete h_pixels;
return ac;
}
/*Tensor &Tensor::operator=(const cv::Mat &cvMat) {
this->create(cvMat);
return *this;
}*/
| 6235bdb03cf8fcbf6ff3661bcd3ecb54ae38a800.cu | #include <opencv2/highgui/highgui.hpp>
#include "Tensor.h"
#include "gpu_util.h"
Tensor::Tensor(void) {
this->d_pixels = NULL;
this->create(0, 0, 0, gfill::none);
}
Tensor::Tensor(const initializer_list<size_t> &shape, uint8_t fill_type) :
shape(shape) {
// create a new data field and initialize the size fields
this->create(this->shape, fill_type);
}
Tensor::Tensor(const Tensor &gpucube) {
this->d_pixels = NULL;
this->copy(gpucube);
}
Tensor::Tensor(const Tensor *gpucube) {
this->d_pixels = NULL;
this->copy(*gpucube);
}
Tensor::~Tensor(void) {
this->destroy();
}
__global__ void GPU_map_id(float *F, size_t n_elems) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n_elems) {
return;
}
F[idx] = idx;
}
__global__ void GPU_map_assign(float *F, float val, size_t n_elems) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n_elems) {
return;
}
F[idx] = val;
}
void Tensor::create(const initializer_list<size_t> &shape, uint8_t fill_type) {
this->destroy();
this->n_rows = n_rows;
this->n_cols = n_cols;
this->n_slices = n_slices;
this->n_elem = n_rows * n_cols * n_slices;
if (this->n_elem != 0) {
checkCudaErrors(cudaMalloc(&this->d_pixels, this->n_elem * sizeof(float)));
switch (fill_type) {
case gfill::none:
break;
case gfill::zeros:
checkCudaErrors(cudaMemset(this->d_pixels, 0, this->n_elem * sizeof(float)));
break;
case gfill::ones:
GPU_map_assign<<<(this->n_elem-1) / 128 + 1, 128>>>(this->d_pixels, 1, this->n_elem);
checkCudaErrors(cudaGetLastError());
break;
case gfill::linspace:
GPU_map_id<<<(this->n_elem-1) / 128 + 1, 128>>>(this->d_pixels, this->n_elem);
checkCudaErrors(cudaGetLastError());
default:
break;
}
}
}
void Tensor::destroy(void) {
if (this->d_pixels) {
checkCudaErrors(cudaFree(this->d_pixels));
this->d_pixels = NULL;
}
}
// OPERATORS
void Tensor::set(float v, size_t i, size_t j, size_t k) {
checkCudaErrors(cudaMemcpy(&this->d_pixels[IJK2C(i, j, k, this->n_rows, this->n_cols)],
&v, sizeof(float), cudaMemcpyHostToDevice));
}
float Tensor::get(size_t i, size_t j, size_t k) {
float v;
checkCudaErrors(cudaMemcpy(&v, &this->d_pixels[IJK2C(i, j, k, this->n_rows, this->n_cols)],
sizeof(float), cudaMemcpyDeviceToHost));
return v;
}
Tensor &Tensor::operator=(const Tensor &gpucube) {
this->copy(gpucube);
return *this;
}
// MEMORY
void Tensor::copy(const Tensor &gpucube) {
this->create(gpucube.n_rows, gpucube.n_cols, gpucube.n_slices, gfill::none);
checkCudaErrors(cudaMemcpy(this->d_pixels, gpucube.d_pixels,
this->n_elem * sizeof(float), cudaMemcpyDeviceToDevice));
}
/*void Tensor::submatCopy(const Tensor &gpucube, int x1, int x2, int y1, int y2) {
this->
}*/
void Tensor::load(const std::string &fname) { // change
this->create(cv::imread(fname));
}
void Tensor::save(const std::string &fname) { // change
cv::imwrite(fname, this->cv_img());
}
// Specific OpenCV interaction (to make sure that they are backwards compatible)
Tensor::Tensor(cv::Mat &cvMat) {
this->d_pixels = NULL;
this->create(cvMat);
}
__global__ void GPU_cv_img2Tensor(float *dst, unsigned char *src, int dst_n_rows, int dst_n_cols, int src_n_rows, int src_n_cols, int n_slices, int ioffset, int joffset) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = blockIdx.z * blockDim.z + threadIdx.z;
if (i >= dst_n_rows || j >= dst_n_cols || k >= n_slices) {
return;
}
dst[IJK2C(i, j, n_slices-k-1, dst_n_rows, dst_n_cols)] = ((float)src[IJK2C(k, j+joffset, i+ioffset, n_slices, src_n_cols)]) / 255.0;
}
void Tensor::create(const cv::Mat &cvMat, bool remalloc) {
if (remalloc) {
this->create(cvMat.rows, cvMat.cols, cvMat.channels(), gfill::none);
} else {
assert(cvMat.rows * cvMat.cols * cvMat.channels() == this->n_elem && this->d_pixels != NULL);
}
if (this->n_elem == 0) {
return;
}
// copy to memory
unsigned char *dimg;
checkCudaErrors(cudaMalloc(&dimg, sizeof(unsigned char) * this->n_elem));
checkCudaErrors(cudaMemcpy(dimg, cvMat.data, sizeof(unsigned char) * this->n_elem, cudaMemcpyHostToDevice));
// reformat
dim3 blockSize(16, 16, 1);
dim3 gridSize((this->n_rows-1)/16+1, (this->n_cols-1)/16+1, this->n_slices);
GPU_cv_img2Tensor<<<gridSize, blockSize>>>(this->d_pixels, dimg, this->n_rows, this->n_cols, this->n_rows, this->n_cols, this->n_slices, 0, 0);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaFree(dimg));
}
void Tensor::create(const cv::Mat &cvMat, int i1, int i2, int j1, int j2, bool remalloc) {
assert(i1 <= i2 && j1 <= j2 && j2 <= cvMat.cols && i2 <= cvMat.rows);
int di = i2 - i1;
int dj = j2 - j1;
if (remalloc) {
this->create(di, dj, cvMat.channels(), gfill::none);
} else {
assert(di * dj * cvMat.channels() == this->n_elem && this->d_pixels != NULL);
}
if (this->n_elem == 0) {
return;
}
// copy to memory
size_t n_elem = cvMat.rows * cvMat.cols * cvMat.channels();
unsigned char *dimg;
checkCudaErrors(cudaMalloc(&dimg, sizeof(unsigned char) * n_elem));
checkCudaErrors(cudaMemcpy(dimg, cvMat.data, sizeof(unsigned char) * n_elem, cudaMemcpyHostToDevice));
// reformat
dim3 blockSize(16, 16, 1);
dim3 gridSize((di-1)/16+1, (dj-1)/16+1, this->n_slices);
GPU_cv_img2Tensor<<<gridSize, blockSize>>>(this->d_pixels, dimg, di, dj, cvMat.rows, cvMat.cols, this->n_slices, i1, j1);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaFree(dimg));
}
/*static int limit(int x, int a, int b) {
if (x < a) {
return a;
} else if (x > b) {
return b;
} else {
return x;
}
}*/
__global__ void GPU_Tensor2cv_img(unsigned char *dst, float *src, int n_rows, int n_cols, int n_slices) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = blockIdx.z * blockDim.z + threadIdx.z;
if (i >= n_rows || j >= n_cols || k >= n_slices) {
return;
}
dst[IJK2C(k, j, i, n_slices, n_cols)] = (unsigned char)(src[IJK2C(i, j, n_slices-k-1, n_rows, n_cols)] * 255.0);
}
cv::Mat Tensor::cv_img(void) {
if (this->n_elem == 0) {
return cv::Mat(0, 0, CV_8UC1);
}
cv::Mat cv_image(this->n_rows, this->n_cols, (this->n_slices == 3) ? CV_8UC3 : CV_8UC1);
// reformat
unsigned char *dimg;
checkCudaErrors(cudaMalloc(&dimg, sizeof(unsigned char) * this->n_elem));
dim3 blockSize(16, 16, 1);
dim3 gridSize((this->n_rows-1)/16+1, (this->n_cols-1)/16+1, this->n_slices);
GPU_Tensor2cv_img<<<gridSize, blockSize>>>(dimg, this->d_pixels, this->n_rows, this->n_cols, this->n_slices);
checkCudaErrors(cudaGetLastError());
// place the matrix into the image
checkCudaErrors(cudaMemcpy(cv_image.data, dimg, sizeof(unsigned char) * this->n_elem, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(dimg));
return cv_image;
}
cv::Mat Tensor::cv_mat(void) {
cv::Mat cv_image(this->n_rows, this->n_cols, (this->n_slices == 1 ? CV_32F : CV_32FC3));
float *h_pixels = new float[this->n_elem];
checkCudaErrors(cudaMemcpy(h_pixels, this->d_pixels,
this->n_elem * sizeof(float), cudaMemcpyDeviceToHost));
for (int i = 0; i < this->n_rows; i++) {
for (int j = 0; j < this->n_cols; j++) {
if (this->n_slices == 1) {
cv_image.at<float>(i, j) = h_pixels[IJ2C(i, j, this->n_rows)];
} else if (this->n_slices == 3) {
cv_image.at<cv::Vec3f>(i, j) = cv::Vec3f(
h_pixels[IJK2C(i, j, 0, this->n_rows, this->n_cols)],
h_pixels[IJK2C(i, j, 1, this->n_rows, this->n_cols)],
h_pixels[IJK2C(i, j, 2, this->n_rows, this->n_cols)]);
}
}
}
delete h_pixels;
return cv_image;
}
// specific armadillo compatibility
Tensor::Tensor(arma::vec &armaCube) {
this->d_pixels = NULL;
this->create(armaCube);
}
Tensor::Tensor(arma::mat &armaCube) {
this->d_pixels = NULL;
this->create(armaCube);
}
Tensor::Tensor(arma::cube &armaCube) {
this->d_pixels = NULL;
this->create(armaCube);
}
void Tensor::create(const arma::vec &armaCube) {
this->create(armaCube.n_rows, 1, 1, gfill::none);
if (this->n_elem == 0) {
return;
}
float *h_pixels = new float[this->n_elem];
for (int i = 0; i < this->n_rows; i++) {
h_pixels[i] = (float)armaCube(i);
}
checkCudaErrors(cudaMemcpy(this->d_pixels, h_pixels,
this->n_elem * sizeof(float), cudaMemcpyHostToDevice));
delete h_pixels;
}
void Tensor::create(const arma::mat &armaCube) {
this->create(armaCube.n_rows, armaCube.n_cols, 1, gfill::none);
if (this->n_elem == 0) {
return;
}
float *h_pixels = new float[this->n_elem];
for (int i = 0; i < this->n_rows; i++) {
for (int j = 0; j < this->n_cols; j++) {
h_pixels[IJ2C(i, j, this->n_rows)] = (float)armaCube(i, j);
}
}
checkCudaErrors(cudaMemcpy(this->d_pixels, h_pixels,
this->n_elem * sizeof(float), cudaMemcpyHostToDevice));
delete h_pixels;
}
void Tensor::create(const arma::cube &armaCube) {
this->create(armaCube.n_rows, armaCube.n_cols, armaCube.n_slices, gfill::none);
if (this->n_elem == 0) {
return;
}
float *h_pixels = new float[this->n_elem];
for (int i = 0; i < this->n_rows; i++) {
for (int j = 0; j < this->n_cols; j++) {
for (int k = 0; k < this->n_slices; k++) {
h_pixels[IJK2C(i, j, k, this->n_rows, this->n_cols)] = (float)armaCube(i, j, k);
}
}
}
checkCudaErrors(cudaMemcpy(this->d_pixels, h_pixels,
this->n_elem * sizeof(float), cudaMemcpyHostToDevice));
delete h_pixels;
}
arma::cube Tensor::arma_cube(void) {
arma::cube ac(this->n_rows, this->n_cols, this->n_slices);
float *h_pixels = new float[this->n_elem];
checkCudaErrors(cudaMemcpy(h_pixels, this->d_pixels,
this->n_elem * sizeof(float), cudaMemcpyDeviceToHost));
for (int i = 0; i < this->n_rows; i++) {
for (int j = 0; j < this->n_cols; j++) {
for (int k = 0; k < this->n_slices; k++) {
ac(i, j, k) = h_pixels[IJK2C(i, j, k, this->n_rows, this->n_cols)];
}
}
}
delete h_pixels;
return ac;
}
/*Tensor &Tensor::operator=(const cv::Mat &cvMat) {
this->create(cvMat);
return *this;
}*/
|
fbd586bb562d9b68be7a0e8bf9770a5b71ae70d2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <cassert>
typedef void (* execute_task_t)(void *);
class UnifiedMemoryClass
{
public:
void* operator new(size_t size) {
void* vp;
hipMallocManaged(&vp, size);
hipDeviceSynchronize();
return vp;
}
void operator delete(void* vp) {
hipDeviceSynchronize();
hipFree(vp);
}
};
class UnaryTask {
double* xi;
execute_task_t* unary_on_device;
public:
__host__ __device__ UnaryTask();
__host__ __device__ UnaryTask(execute_task_t* unary_device_fp = NULL, double* x = NULL) : unary_on_device(unary_device_fp), xi(x) {}
__device__ void execute() {
(*unary_on_device)(xi);
}
};
__global__
void device_mutex_lock(int* mutex) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid == 0) while(atomicCAS(mutex, 0, 1) != 0);
}
__global__
void device_mutex_unlock(int* mutex) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid == 0) atomicExch(mutex, 0);
}
class HostDeviceLock : public UnifiedMemoryClass {
int* mutex;
hipStream_t lockStream;
public:
HostDeviceLock() {
hipError_t cuda_status = hipSuccess;
cuda_status = hipMalloc((void**) &mutex, sizeof(int));
assert(cuda_status == hipSuccess);
int unlocked = 0;
cuda_status = hipMemcpy(mutex, &unlocked, sizeof(int), hipMemcpyHostToDevice);
assert(cuda_status == hipSuccess);
cuda_status = hipStreamCreate(&lockStream);
assert(cuda_status == hipSuccess);
}
__host__ __device__
void lock() {
#ifdef __CUDA_ARCH__
while(atomicCAS(mutex, 0, 1) != 0);
#else
hipLaunchKernelGGL(( device_mutex_lock), dim3(1),dim3(1),0,lockStream, mutex);
hipError_t cuda_status = hipStreamSynchronize(lockStream);
assert(cuda_status == hipSuccess);
#endif
}
__host__ __device__
void unlock() {
#ifdef __CUDA_ARCH__
atomicExch(mutex, 0);
#else
hipLaunchKernelGGL(( device_mutex_unlock), dim3(1),dim3(1),0,lockStream, mutex);
hipError_t cuda_status = hipStreamSynchronize(lockStream);
assert(cuda_status == hipSuccess);
#endif
}
};
__device__
void cube(void* xv) {
double* xi = static_cast<double*>(xv);
*xi = (*xi) * (*xi) * (*xi);
}
__device__
void square(void* xv) {
double* xi = static_cast<double*>(xv);
*xi = (*xi) * (*xi);
}
__global__
void task_kernel(UnaryTask* tasks, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) {
tasks[tid].execute();
}
}
__global__
void task_launcher(UnaryTask* tasks, int size, HostDeviceLock* task_lock) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid == 0) {
task_lock->lock();
int numThreads = min(32, size);
int numBlocks = static_cast<int>(ceil(((double) size)/((double) numThreads)));
hipLaunchKernelGGL(( task_kernel), dim3(numBlocks), dim3(numThreads), 0, 0, tasks, size);
hipDeviceSynchronize();
task_lock->unlock();
}
}
__global__
void get_cube_pointer(execute_task_t* device_pointer) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid == 0) *device_pointer = cube;
}
__global__
void get_square_pointer(execute_task_t* device_pointer) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid == 0) *device_pointer = square;
}
void square_host(double* x, int size, HostDeviceLock* task_lock) {
task_lock->lock();
for (int i = 0; i < size; i++) {
x[i] = x[i] * x[i];
}
task_lock->unlock();
}
int main(int argc, char* argv[]) {
double* x = NULL;
int size = 100;
execute_task_t* device_cube_fun_p = NULL;
execute_task_t* device_square_fun_p = NULL;
UnaryTask* cube_tasks = NULL;
UnaryTask* square_tasks = NULL;
hipStream_t streamA;
hipStream_t streamB;
hipError_t cuda_status = hipSuccess;
HostDeviceLock* task_lock = new HostDeviceLock();
cuda_status = hipStreamCreate(&streamA);
assert(cuda_status == hipSuccess);
cuda_status = hipStreamCreate(&streamB);
assert(cuda_status == hipSuccess);
cuda_status = hipMallocManaged(&x, sizeof(double) * size);
assert(cuda_status == hipSuccess);
cuda_status = hipMallocManaged(&device_cube_fun_p, sizeof(execute_task_t));
assert(cuda_status == hipSuccess);
cuda_status = hipMallocManaged(&cube_tasks, sizeof(UnaryTask)*size);
assert(cuda_status == hipSuccess);
cuda_status = hipMallocManaged(&device_square_fun_p, sizeof(execute_task_t));
assert(cuda_status == hipSuccess);
cuda_status = hipMallocManaged(&square_tasks, sizeof(UnaryTask)*size);
assert(cuda_status == hipSuccess);
hipLaunchKernelGGL(( get_cube_pointer), dim3(1),dim3(1), 0, 0, device_cube_fun_p);
hipLaunchKernelGGL(( get_square_pointer), dim3(1),dim3(1), 0, 0, device_square_fun_p);
cuda_status = hipDeviceSynchronize();
assert(cuda_status == hipSuccess);
for (int i = 0; i < size; i++) {
x[i] = 2.0;
}
for (int i = 0; i < size; i++) {
square_tasks[i] = UnaryTask(device_square_fun_p, &x[i]);
}
for (int i = 0; i < size; i++) {
cube_tasks[i] = UnaryTask(device_cube_fun_p, &x[i]);
}
cuda_status = hipDeviceSynchronize();
assert(cuda_status == hipSuccess);
int numThreads = min(32, size);
int numBlocks = static_cast<int>(ceil(((double) size)/((double) numThreads)));
hipLaunchKernelGGL(( task_launcher), dim3(1),dim3(1),0,streamA, square_tasks, size, task_lock);
hipLaunchKernelGGL(( task_launcher), dim3(1),dim3(1),0,streamB, cube_tasks, size, task_lock);
square_host(x, size, task_lock);
cuda_status = hipDeviceSynchronize();
assert(cuda_status == hipSuccess);
double xsum = 0.0;
for (int i = 0; i < size; i++) {
xsum += x[i];
}
std::cout << "sum of elementwise squared cubed squared x is: " << xsum << std::endl;
if (xsum == 409600.0) std::cout << "SUCCESS!" << std::endl;
else std::cout << "ERROR!" << std::endl;
return 0;
}
| fbd586bb562d9b68be7a0e8bf9770a5b71ae70d2.cu | #include <iostream>
#include <cassert>
typedef void (* execute_task_t)(void *);
class UnifiedMemoryClass
{
public:
void* operator new(size_t size) {
void* vp;
cudaMallocManaged(&vp, size);
cudaDeviceSynchronize();
return vp;
}
void operator delete(void* vp) {
cudaDeviceSynchronize();
cudaFree(vp);
}
};
class UnaryTask {
double* xi;
execute_task_t* unary_on_device;
public:
__host__ __device__ UnaryTask();
__host__ __device__ UnaryTask(execute_task_t* unary_device_fp = NULL, double* x = NULL) : unary_on_device(unary_device_fp), xi(x) {}
__device__ void execute() {
(*unary_on_device)(xi);
}
};
__global__
void device_mutex_lock(int* mutex) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid == 0) while(atomicCAS(mutex, 0, 1) != 0);
}
__global__
void device_mutex_unlock(int* mutex) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid == 0) atomicExch(mutex, 0);
}
class HostDeviceLock : public UnifiedMemoryClass {
int* mutex;
cudaStream_t lockStream;
public:
HostDeviceLock() {
cudaError_t cuda_status = cudaSuccess;
cuda_status = cudaMalloc((void**) &mutex, sizeof(int));
assert(cuda_status == cudaSuccess);
int unlocked = 0;
cuda_status = cudaMemcpy(mutex, &unlocked, sizeof(int), cudaMemcpyHostToDevice);
assert(cuda_status == cudaSuccess);
cuda_status = cudaStreamCreate(&lockStream);
assert(cuda_status == cudaSuccess);
}
__host__ __device__
void lock() {
#ifdef __CUDA_ARCH__
while(atomicCAS(mutex, 0, 1) != 0);
#else
device_mutex_lock<<<1,1,0,lockStream>>>(mutex);
cudaError_t cuda_status = cudaStreamSynchronize(lockStream);
assert(cuda_status == cudaSuccess);
#endif
}
__host__ __device__
void unlock() {
#ifdef __CUDA_ARCH__
atomicExch(mutex, 0);
#else
device_mutex_unlock<<<1,1,0,lockStream>>>(mutex);
cudaError_t cuda_status = cudaStreamSynchronize(lockStream);
assert(cuda_status == cudaSuccess);
#endif
}
};
__device__
void cube(void* xv) {
double* xi = static_cast<double*>(xv);
*xi = (*xi) * (*xi) * (*xi);
}
__device__
void square(void* xv) {
double* xi = static_cast<double*>(xv);
*xi = (*xi) * (*xi);
}
__global__
void task_kernel(UnaryTask* tasks, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) {
tasks[tid].execute();
}
}
__global__
void task_launcher(UnaryTask* tasks, int size, HostDeviceLock* task_lock) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid == 0) {
task_lock->lock();
int numThreads = min(32, size);
int numBlocks = static_cast<int>(ceil(((double) size)/((double) numThreads)));
task_kernel<<<numBlocks, numThreads>>>(tasks, size);
cudaDeviceSynchronize();
task_lock->unlock();
}
}
__global__
void get_cube_pointer(execute_task_t* device_pointer) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid == 0) *device_pointer = cube;
}
__global__
void get_square_pointer(execute_task_t* device_pointer) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid == 0) *device_pointer = square;
}
void square_host(double* x, int size, HostDeviceLock* task_lock) {
task_lock->lock();
for (int i = 0; i < size; i++) {
x[i] = x[i] * x[i];
}
task_lock->unlock();
}
int main(int argc, char* argv[]) {
double* x = NULL;
int size = 100;
execute_task_t* device_cube_fun_p = NULL;
execute_task_t* device_square_fun_p = NULL;
UnaryTask* cube_tasks = NULL;
UnaryTask* square_tasks = NULL;
cudaStream_t streamA;
cudaStream_t streamB;
cudaError_t cuda_status = cudaSuccess;
HostDeviceLock* task_lock = new HostDeviceLock();
cuda_status = cudaStreamCreate(&streamA);
assert(cuda_status == cudaSuccess);
cuda_status = cudaStreamCreate(&streamB);
assert(cuda_status == cudaSuccess);
cuda_status = cudaMallocManaged(&x, sizeof(double) * size);
assert(cuda_status == cudaSuccess);
cuda_status = cudaMallocManaged(&device_cube_fun_p, sizeof(execute_task_t));
assert(cuda_status == cudaSuccess);
cuda_status = cudaMallocManaged(&cube_tasks, sizeof(UnaryTask)*size);
assert(cuda_status == cudaSuccess);
cuda_status = cudaMallocManaged(&device_square_fun_p, sizeof(execute_task_t));
assert(cuda_status == cudaSuccess);
cuda_status = cudaMallocManaged(&square_tasks, sizeof(UnaryTask)*size);
assert(cuda_status == cudaSuccess);
get_cube_pointer<<<1,1>>>(device_cube_fun_p);
get_square_pointer<<<1,1>>>(device_square_fun_p);
cuda_status = cudaDeviceSynchronize();
assert(cuda_status == cudaSuccess);
for (int i = 0; i < size; i++) {
x[i] = 2.0;
}
for (int i = 0; i < size; i++) {
square_tasks[i] = UnaryTask(device_square_fun_p, &x[i]);
}
for (int i = 0; i < size; i++) {
cube_tasks[i] = UnaryTask(device_cube_fun_p, &x[i]);
}
cuda_status = cudaDeviceSynchronize();
assert(cuda_status == cudaSuccess);
int numThreads = min(32, size);
int numBlocks = static_cast<int>(ceil(((double) size)/((double) numThreads)));
task_launcher<<<1,1,0,streamA>>>(square_tasks, size, task_lock);
task_launcher<<<1,1,0,streamB>>>(cube_tasks, size, task_lock);
square_host(x, size, task_lock);
cuda_status = cudaDeviceSynchronize();
assert(cuda_status == cudaSuccess);
double xsum = 0.0;
for (int i = 0; i < size; i++) {
xsum += x[i];
}
std::cout << "sum of elementwise squared cubed squared x is: " << xsum << std::endl;
if (xsum == 409600.0) std::cout << "SUCCESS!" << std::endl;
else std::cout << "ERROR!" << std::endl;
return 0;
}
|
e5aa2c0bf4c3992590060c8bfd4c6f0d115bdd94.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C" {
#include "lua.h"
#include "lualib.h"
#include "lauxlib.h"
}
#include "luaT.h"
#include "THH.h"
#include <stdio.h>
#include <assert.h>
__global__ void shrink_kernel(float *x, float amount, int size_of_x)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size_of_x) {
if (x[id] > amount) {
x[id] -= amount;
} else if (x[id] < -amount) {
x[id] += amount;
} else {
x[id] = 0;
}
}
}
int shrink(lua_State *L)
{
THCudaTensor *x = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor");
float amount = luaL_checknumber(L, 2);
int size_of_x = THCudaTensor_nElement(x);
int tb = 128;
hipLaunchKernelGGL(( shrink_kernel), dim3((size_of_x - 1) / tb + 1), dim3(tb) , 0, 0, THCudaTensor_data(x), amount, size_of_x);
return 0;
}
static const struct luaL_Reg funcs[] = {
{"shrink_from_lua", shrink},
{NULL, NULL}
};
extern "C" int luaopen_libdlp(lua_State *L) {
luaL_openlib(L, "dlp", funcs, 0);
return 1;
}
| e5aa2c0bf4c3992590060c8bfd4c6f0d115bdd94.cu | extern "C" {
#include "lua.h"
#include "lualib.h"
#include "lauxlib.h"
}
#include "luaT.h"
#include "THC.h"
#include <stdio.h>
#include <assert.h>
__global__ void shrink_kernel(float *x, float amount, int size_of_x)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size_of_x) {
if (x[id] > amount) {
x[id] -= amount;
} else if (x[id] < -amount) {
x[id] += amount;
} else {
x[id] = 0;
}
}
}
int shrink(lua_State *L)
{
THCudaTensor *x = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor");
float amount = luaL_checknumber(L, 2);
int size_of_x = THCudaTensor_nElement(x);
int tb = 128;
shrink_kernel<<< (size_of_x - 1) / tb + 1, tb >>>(THCudaTensor_data(x), amount, size_of_x);
return 0;
}
static const struct luaL_Reg funcs[] = {
{"shrink_from_lua", shrink},
{NULL, NULL}
};
extern "C" int luaopen_libdlp(lua_State *L) {
luaL_openlib(L, "dlp", funcs, 0);
return 1;
}
|
dc2c6b6abd5052ce11b46736409cbe59bfe925ba.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <windows.h>
#include "kernel.h"
byte* dev_src2 = NULL;
byte* dev_dst2 = NULL;
byte* dev_aux2 = NULL;
int threadsInX;
int threadsInY;
int blocksInX;
int blocksInY;
int width;
int height;
int size_img;
//**************************************************** Funciones GPU **********************************************************//
__global__ void threshold(byte* src, byte* dst, byte min, byte max, int stride, int size)
{
for (int pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += stride)
dst[pos] = (src[pos] >= min && src[pos] <= max) ? 1 : 0;
}
__global__ void erode(byte* src, byte* dst, int w, int h, int radio)
{
int posx = threadIdx.x + blockIdx.x * blockDim.x;
int posy = threadIdx.y + blockIdx.y * blockDim.y;
if (posx >= w || posy >= h)
return;
unsigned int start_y = max(posy - radio, 0);
unsigned int end_y = min(h - 1, posy + radio);
unsigned int start_x = max(posx - radio, 0);
unsigned int end_x = min(w - 1, posx + radio);
int _min = 255;
for (int y = start_y; y <= end_y; y++)
for (int x = start_x; x <= end_x; x++)
_min = min(_min, src[y * w + x]);
dst[posy * w + posx] = _min;
}
__global__ void erode_separable_step2(byte* src, byte* dst, int w, int h, int radio) {
int posx = threadIdx.x + blockIdx.x * blockDim.x;
int posy = threadIdx.y + blockIdx.y * blockDim.y;
if (posx >= w || posy >= h)
return;
unsigned int start_y = max(posy - radio, 0);
unsigned int end_y = min(h - 1, posy + radio);
int _min = 255;
for (int y = start_y; y <= end_y; y++) {
_min = min(_min, src[y * w + posx]);
}
dst[posy * w + posx] = _min;
}
__global__ void erode_separable_step1(byte* src, byte* dst, int w, int h, int radio) {
int posx = threadIdx.x + blockIdx.x * blockDim.x;
int posy = threadIdx.y + blockIdx.y * blockDim.y;
if (posx >= w || posy >= h)
return;
unsigned int start_x = max(posx - radio, 0);
unsigned int end_x = min(w - 1, posx + radio);
int _min = 255;
for (int x = start_x; x <= end_x; x++) {
_min = min(_min, src[posy * w + x]);
}
dst[posy * w + posx] = _min;
}
__global__ void dilate(byte * src, byte *dst, int w, int h, int radio)
{
int posx = threadIdx.x + blockIdx.x * blockDim.x;
int posy = threadIdx.y + blockIdx.y * blockDim.y;
if (posx >= w || posy >= h)
return;
unsigned int start_y = max(posy - radio, 0);
unsigned int end_y = min(h - 1, posy + radio);
unsigned int start_x = max(posx - radio, 0);
unsigned int end_x = min(w - 1, posx + radio);
int _max = 0;
for (int y = start_y; y <= end_y; y++)
for (int x = start_x; x <= end_x; x++)
_max = max(_max, src[y * w + x]);
dst[posy * w + posx] = _max;
}
__global__ void dilate_separable_step2(byte* src, byte* dst, int w, int h, int radio) {
int posx = threadIdx.x + blockIdx.x * blockDim.x;
int posy = threadIdx.y + blockIdx.y * blockDim.y;
if (posx >= w || posy >= h)
return;
unsigned int start_y = max(posy - radio, 0);
unsigned int end_y = min(h - 1, posy + radio);
int _max = 0;
for (int y = start_y; y <= end_y; y++) {
_max = max(_max, src[y * w + posx]);
}
dst[posy * w + posx] = _max;
}
__global__ void dilate_separable_step1(byte* src, byte* dst, int w, int h, int radio) {
int posx = threadIdx.x + blockIdx.x * blockDim.x;
int posy = threadIdx.y + blockIdx.y * blockDim.y;
if (posx >= w || posy >= h)
return;
unsigned int start_x = max(posx - radio, 0);
unsigned int end_x = min(w - 1, posx + radio);
int _max = 0;
for (int x = start_x; x <= end_x; x++) {
_max = max(_max, src[posy * w + x]);
}
dst[posy * w + posx] = _max;
}
__global__ void reverseThreshold(byte* src, byte* dst, byte min, byte max, int stride, int size)
{
for (int pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += stride)
dst[pos] = (src[pos] >= min && src[pos] <= max) ? 0 : 1;
}
//********************************************************* Llamadas a GPU *************************************************************//
void dev_threshold(byte *src, byte *dst, byte min, byte max, int threads, int blocks, int stride, int size, int* error) {
hipError_t cudaStatus;
threshold << < blocks, threads >> > (src, dst, min, max, stride, size);
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess)
*error = -7;
}
void dev_erode(byte *src, byte *dst, int width, int height, int radio, dim3 threads, dim3 blocks, int* error) {
hipError_t cudaStatus;
erode << < blocks, threads >> > (src, dst, width, height, radio);
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess)
*error = -8;
}
void dev_erode_twoSteps(byte* src, byte* dst, byte* aux, int radio, dim3 threads, dim3 blocks, int* error) {
hipError_t cudaStatus;
erode_separable_step1 << <blocks, threads >> > (src, aux, width, height, radio);
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
*error = -10;
return;
}
erode_separable_step2 << <blocks, threads >> > (aux, dst, width, height, radio);
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess)
*error = -10;
}
void dev_dilate(byte *src, byte *dst, int width, int height, int radio, dim3 threads, dim3 blocks, int* error) {
hipError_t cudaStatus;
dilate << < blocks, threads >> > (src, dst, width, height, radio);
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess)
*error = -11;
}
void dev_dilate_twoSteps(byte* src, byte* dst, byte* aux, int radio, dim3 threads, dim3 blocks, int* error) {
hipError_t cudaStatus;
dilate_separable_step1 << <blocks, threads >> > (src, aux, width, height, radio);
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
*error = -14;
return;
}
dilate_separable_step2 << <blocks, threads >> > (aux, dst, width, height, radio);
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess)
*error = -14;
}
void dev_reverseThreshold(byte* src, byte* dst, byte min, byte max, int threads, int blocks, int stride, int size, int* error) {
hipError_t cudaStatus;
reverseThreshold << < blocks, threads >> > (src, dst, min, max, stride, size);
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess)
*error = -12;
}
//******************************************************* Herramientas para GPU ********************************************************//
// DETERMINA EL NUMERO DE DEVICES DISPONIBLES
void expert_numAvailableDevices(int *numCUDADevices)
{
hipError_t cudaStatus = hipGetDeviceCount(numCUDADevices);
if (cudaStatus != hipSuccess)
*numCUDADevices = 0;
}
// RESETEA EL DEVICE SELECCIONADO
void expert_resetDevice(int deviceId, int* error)
{
int numDevices;
expert_numAvailableDevices(&numDevices);
if (deviceId < 0 || deviceId >= numDevices)
*error = -222;
else
{
hipError_t cudaStatus = hipSetDevice(deviceId);
if (cudaStatus == hipSuccess)
cudaStatus = hipDeviceReset();
if (cudaStatus == hipSuccess)
*error = 0;
else
*error = -333;
}
}
// ESTABLECE EL DEVICE SELECCIONADO
void expert_setDevice(int deviceId, int *error)
{
int numDevices;
expert_numAvailableDevices(&numDevices);
if (deviceId < 0 || deviceId >= numDevices)
*error = -222;
else
{
hipError_t cudaStatus = hipSetDevice(deviceId);
if (cudaStatus == hipSuccess)
*error = 0;
else
*error = -444;
}
}
// RESETEA TODOS LOS DEVICES DISPONIBLES
void expert_resetAllDevices(int* error)
{
int numDevices;
expert_numAvailableDevices(&numDevices);
*error = 0;
if (numDevices < 1)
*error = -111;
else
{
for (int dev = 0; dev < numDevices && *error == 0; dev++)
expert_resetDevice(dev, error);
expert_setDevice(0, error);
}
}
/*string expert_descriptionError(int* error) {
string error_msg;
switch (*error) {
case -111:
error_msg = "Al intentar resetear todos los devices, no haba devices";
break;
case -222:
error_msg = "Numero del device metido no esta en el rango";
break;
case -333:
error_msg = "Error en reseteo del device metido";
break;
case -444:
error_msg = "Error en el establecimiento del device";
break;
case -1:
error_msg = "Al intentar reservar memoria para la imagen fuente";
break;
case -2:
error_msg = "Al intentar reservar memoria para la imagen destino";
break;
case -3:
error_msg = "Copia de memoria de CPU a GPU";
break;
case -4:
error_msg = "Copia de memoria de GPU a CPU";
break;
case -5:
error_msg = "Al intentar liberar memoria para la imagen fuente";
break;
case -6:
error_msg = "Al intentar liberar memoria para la imagen destino";
break;
case -7:
error_msg = "Falla en threshold";
break;
case -8:
error_msg = "Falla en Erode Low";
break;
case -9:
error_msg = "Falla en Erode Fast(TwoSteps) al intentar reservar memoria en GPU";
break;
case -10:
error_msg = "Falla en Erode Fast(TwoSteps) al hacer el algoritmo";
break;
case -11:
error_msg = "Falla en Dilate";
break;
case -12:
error_msg = "Falla en ReverseThreshold";
break;
case -13:
error_msg = "Falla en Dilate Fast(TwoSteps) al intentar reservar memoria en GPU";
break;
case -14:
error_msg = "Falla en Dilate Fast(TwoSteps) al hacer algoritmo";
break;
case -15:
error_msg = "Fallo en reescaldo de imagen";
break;
default:
error_msg = "NO ERROR";
}
return error_msg;
}
*/
//************************************************ Determinar Threads Y Bloques *********************************************************//
void setNumberThreads1D(int threadsX, int blocksX, bool automatic) {
if (automatic) {
threadsInX = 1024;
blocksInX = 640; ///////// 65535 / 1024 = 64 * 10
}
else {
if (threadsX == 0 || threadsX > 1024)
threadsX = 1024;
if (blocksX == 0 || blocksX > 65535)
blocksX = 65535;
threadsInX = threadsX;
blocksInX = blocksX;
}
}
void setNumberThreads2D(int threadsX, int threadsY, int blocksX, int blocksY, bool automatic) {
if (automatic) {
if (width > 3000) {
threadsInX = 16;
blocksInX = 500;
}
else if (width > 1600) {
threadsInX = 8;
blocksInX = 400;
}
else {
threadsInX = 8;
blocksInX = 240;
}
if (height > 3000){
threadsInY = 16;
blocksInY = 500;
}
else if (width > 1600) {
threadsInY = 8;
blocksInY = 400;
}
else {
threadsInY = 8;
blocksInY = 240;
}
}
else {
if (threadsX == 0 || threadsX > 512)
threadsX = 512;
if (blocksX == 0 || blocksX > 65535)
blocksX = 65535;
threadsInX = threadsInY = threadsX;
blocksInX = blocksInY = blocksX;
}
//dim3 grid(width / threadsPerBlock.x, height / threadsPerBlock.y);
}
void setDimensionNumber_Threads_Blocks(int size, int threadsX, int threadsY, int blocksX, int blocksY, bool automatic) {
if (size > 2 || size <= 0)
size = 1;
if (size == 1)
setNumberThreads1D(threadsX, blocksX, automatic);
else
setNumberThreads2D(threadsX, threadsY, blocksX, blocksY, automatic);
}
//***************************************************** INTERCAMBIAR ARRAYS *********************************************************//
void swapBuffers(byte** a, byte** b)
{
byte* aux = *a;
*a = *b;
*b = aux;
}
//***********************************************************************************************************************************//
/* ERRORES de procesamiento:
/*
*
* -111: Al intentar resetear todos los devices, no haba devices
* -222: Numero del device metido no esta en el rango
* -333: Error en reseteo del device metido
* -444: Error en el establecimiento del device
*
* -1: Al intentar reservar memoria para la imagen fuente
* -2: Al intentar reservar memoria para la imagen destino
* -3: Copia de memoria de CPU a GPU
* -4: Copia de memoria de GPU a CPU
* -5: Al intentar liberar memoria para la imagen fuente
* -6: Al intentar liberar memoria para la imagen destino
* -7: Falla en threshold
* -8: Falla en Erode Low
* -9: Falla en Erode Fast (TwoSteps) al intentar reservar memoria en GPU
* -10: Falla en Erode Fast (TwoSteps) al hacer el algoritmo
* -11: Falla en Dilate
* -12: Falla en ReverseThreshold
* -13: Falla en Dilate Fast (TwoSteps) al intentar reservar memoria en GPU
* -14: Falla en Dilate Fast (TwoSteps) al hacer algoritmo
* -15: Fallo en reescaldo de imagen
* -16: Fallo en Close
* -17: Fallo en Open
*
*/
//************************************ Reserva de memoria en GPU y copia de imagen en GPU ****************************************//
void reservationMemory_CopyHostToDeviceOnce(byte* src, int w, int h, int* error) {
hipError_t cudaStatus;
width = w;
height = h;
size_img = width * height;
hipDeviceSynchronize();
//if (dev_src2 == NULL)
//{
cudaStatus = hipMalloc(&dev_src2, size_img);
if (cudaStatus != hipSuccess) {
//printf("Error en reserva de memoria del dev_src");
*error = -1;
//dev_src2 = NULL;
//dev_dst2 = NULL;
return;
}
cudaStatus = hipMalloc(&dev_dst2, size_img);
if (cudaStatus != hipSuccess) {
//printf("Error en reserva de memoria del dev_dst");
*error = -2;
//dev_src2 = NULL;
//dev_dst2 = NULL;
hipFree(dev_src2);
return;
}
//}
cudaStatus = hipMemcpy(dev_src2, src, size_img, hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess)
{
//printf("Error en copia de CPU a GPU");
*error = -3;
//dev_src2 = NULL;
hipFree(dev_src2);
//dev_dst2 = NULL;
hipFree(dev_dst2);
}
}
/*void reservationMemory_CopyHostToDeviceMulti(byte* src, byte* dev_src, byte* dev_dst, int size, int* error) {
hipError_t cudaStatus;
*error = 1000;
if (dev_src == NULL)
{
cudaStatus = hipMalloc(&dev_src, size);
if (cudaStatus != hipSuccess) {
//printf("Error en reserva de memoria del dev_src");
*error = -1;
dev_src = NULL;
dev_dst = NULL;
return;
}
cudaStatus = hipMalloc(&dev_dst, size);
if (cudaStatus != hipSuccess) {
//printf("Error en reserva de memoria del dev_dst");
*error = -2;
dev_src = NULL;
dev_dst = NULL;
hipFree(dev_src);
}
}
cudaStatus = hipMemcpy(dev_src, src, size, hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess)
{
//printf("Error en copia de CPU a GPU");
*error = -3;
dev_src = NULL;
hipFree(dev_src);
dev_dst = NULL;
hipFree(dev_dst);
}
}*/
//***********************************************************************************************************************************//
//************************************ Liberacin de memoria en GPU y copia de imagen en CPU ****************************************//
void freeMemory_CopyDeviceToHostOnce(byte* dst, int* error) {
hipError_t cudaStatus;
*error = 0;
hipDeviceSynchronize();
cudaStatus = hipMemcpy(dst, dev_src2, size_img, hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess)
{
*error = -4;
//dev_src2 = NULL;
hipFree(dev_src2);
//dev_dst2 = NULL;
hipFree(dev_dst2);
return;
}
//dev_src2 = NULL;
hipFree(dev_src2);
//dev_dst2 = NULL;
hipFree(dev_dst2);
}
/*void freeMemory_CopyDeviceToHostMulti(byte* dst, byte* dev_src, byte* dev_dst, int size, int* error) {
hipError_t cudaStatus;
*error = 1000;
cudaStatus = hipMemcpy(dst, dev_src, size, hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess)
{
*error = -4;
dev_src = NULL;
hipFree(dev_src);
dev_dst = NULL;
hipFree(dev_dst);
}
dev_src = NULL;
hipFree(dev_src);
dev_dst = NULL;
hipFree(dev_dst);
}*/
//***********************************************************************************************************************************//
//************************************************ Reserva de memoria en GPU ********************************************************//
void reservationMemoryOnce(int w, int h, int* error) {
hipError_t cudaStatus;
width = w;
height = h;
size_img = width * height;
//if (dev_src2 == NULL)
//{
cudaStatus = hipMalloc(&dev_src2, size_img);
if (cudaStatus != hipSuccess)
{
//printf("Error en reserva de memoria");
*error = -1;
//dev_src2 = NULL;
hipFree(dev_src2);
//dev_dst2 = NULL;
return;
}
cudaStatus = hipMalloc(&dev_dst2, size_img);
if (cudaStatus != hipSuccess)
{
//printf("Error en reserva de memoria");
*error = -2;
//dev_src2 = NULL;
hipFree(dev_src2);
//dev_dst2 = NULL;
hipFree(dev_dst2);
}
//}
hipDeviceSynchronize();
}
/*void reservationMemoryMulti(byte* dev_src, byte* dev_dst, int size, string* error) {
hipError_t cudaStatus;
if (dev_src == NULL)
{
cudaStatus = hipMalloc(&dev_src, size);
if (cudaStatus != hipSuccess)
{
dev_src = NULL;
dev_dst = NULL;
hipFree(dev_src);
goto Error;
}
cudaStatus = hipMalloc(&dev_dst, size);
if (cudaStatus != hipSuccess)
{
dev_src = NULL;
dev_dst = NULL;
hipFree(dev_src);
hipFree(dev_dst);
goto Error;
}
*error = hipGetErrorName(cudaStatus);
}
Error:
//printf("Error en reserva de memoria");
*error = hipGetErrorName(cudaStatus);
}*/
//***********************************************************************************************************************************//
//********************************************* Liberacion de memoria en la GPU *****************************************************//
void freeMemoryOnce(int* error) {
hipError_t cudaStatus;
*error = 0;
//dev_src2 = NULL;
cudaStatus = hipFree(dev_src2);
if (cudaStatus != hipSuccess)
*error = -5;
else {
*error = 10;
}
//dev_dst2 = NULL;
cudaStatus = hipFree(dev_dst2);
if (cudaStatus != hipSuccess)
*error = -6;
else {
*error = 11;
}
hipDeviceSynchronize();
}
/*void freeMemoryMulti(byte* dev_src, byte* dev_dst, string* error) {
hipError_t cudaStatus;
dev_src = NULL;
dev_dst = NULL;
cudaStatus = hipFree(dev_src);
if (cudaStatus != hipSuccess)
goto Error;
cudaStatus = hipFree(dev_dst);
if (cudaStatus != hipSuccess)
goto Error;
*error = hipGetErrorName(cudaStatus);
Error:
*error = hipGetErrorName(cudaStatus);
}*/
//***********************************************************************************************************************************//
//******************************************** Copia de memoria de CPU a GPU ********************************************************//
void copyHostToDeviceOnce(byte* src, int* error) {
hipError_t cudaStatus;
hipDeviceSynchronize();
cudaStatus = hipMemcpy(dev_src2, src, size_img, hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess)
{
//printf("Error en copia de CPU a GPU");
*error = -3;
//dev_src2 = NULL;
hipFree(dev_src2);
//dev_dst2 = NULL;
hipFree(dev_dst2);
}
hipDeviceSynchronize();
}
/*void copyHostToDeviceMulti(byte* src, byte* dev_src, byte* dev_dst, int size, string* error) {
hipError_t cudaStatus;
cudaStatus = hipMemcpy(dev_src, src, size, hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess)
goto Error;
*error = hipGetErrorName(cudaStatus);
Error:
//printf("Error en copia de host a device");
*error = hipGetErrorName(cudaStatus);
dev_src = NULL;
dev_dst = NULL;
hipFree(dev_src);
hipFree(dev_dst);
}*/
//***********************************************************************************************************************************//
//********************************************** Copia de memoria de GPU a CPU ******************************************************//
void copyDeviceToHostOnce(byte *dst, int* error) {
hipError_t cudaStatus;
*error = 0;
hipDeviceSynchronize();
cudaStatus = hipMemcpy(dst, dev_src2, size_img, hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess)
{
//printf("Error en copia de device a host ");
*error = -4;
//dev_src2 = NULL;
hipFree(dev_src2);
//dev_dst2 = NULL;
hipFree(dev_dst2);
}
hipDeviceSynchronize();
}
/*void copyDeviceToHostMulti(byte *dst, byte* dev_src, byte* dev_dst, int size, string* error) {
hipError_t cudaStatus;
cudaStatus = hipMemcpy(dst, dev_src, size, hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess)
goto Error;
*error = hipGetErrorName(cudaStatus);
Error:
//printf("Error en copia de device a host ");
*error = hipGetErrorName(cudaStatus);
dev_src = NULL;
dev_dst = NULL;
hipFree(dev_src);
hipFree(dev_dst);
}*/
// ************************************************* LLAMADAS A THRESHOLD ***********************************************************//
/////////////////////////////////////////// Le indicas los threads que quieres /////////////////////////////////////////
void dev_threshold_manualOnce(byte min, byte max, int threads, int blocks, int* error) {
hipDeviceSynchronize();
setDimensionNumber_Threads_Blocks(1, threads, 0, blocks, 0, false);
int stride = threadsInX * blocksInX;
dev_threshold(dev_src2, dev_dst2, min, max, threadsInX, blocksInX, stride, size_img, error);
swapBuffers(&dev_src2, &dev_dst2);
}
/*void dev_threshold_manualMulti(byte* dev_src, byte* dev_dst, int size, byte min, byte max, int threads, int blocks) {
int stride = threads * blocks;
if (threads == 0 || threads > 1024)
threads = 1024;
if (blocks == 0 || blocks > 65535)
blocks = 65535;
dev_threshold(dev_src, dev_dst, min, max, threads, blocks, stride, size);
swapBuffers(&dev_src, &dev_dst);
}*/
/////////////////////////////////////////// Los threads se calculan de forma automatica /////////////////////////////////////////
void dev_threshold_automaticoOnce(byte min, byte max, int* error) {
hipDeviceSynchronize();
setDimensionNumber_Threads_Blocks(1, 0, 0, 0, 0, true);
int stride = threadsInX * blocksInX;
dev_threshold(dev_src2, dev_dst2, min, max, threadsInX, blocksInX, stride, size_img, error);
swapBuffers(&dev_src2, &dev_dst2);
}
/*void dev_threshold_automaticoMulti(byte* dev_src, byte* dev_dst, int size, byte min, byte max, int* error) {
int threads = 1024;
int blocks = 640; ///////// 65535 / 1024 = 64 * 10
int stride = threads * blocks;
dev_threshold(dev_src, dev_dst, min, max, threads, blocks, stride, size);
swapBuffers(&dev_src, &dev_dst);
}*/
//****************************************************** LLAMADAS A ERODE ************************************************************//
// ERODE - Optimo MANUAL
void dev_erode_manualOnce(int radio, int threads, int blocks, int* error) {
hipDeviceSynchronize();
setDimensionNumber_Threads_Blocks(2, threads, threads, blocks, blocks, false);
dim3 threadsPerBlock(threadsInX, threadsInY);
dim3 grid(blocksInX, blocksInY);
dev_erode(dev_src2, dev_dst2, width, height, radio, threadsPerBlock, grid, error);
swapBuffers(&dev_src2, &dev_dst2);
}
/*void dev_erode_manualMulti(byte* dev_src, byte* dev_dst, int width, int height, int radio, int threads, int blocks) {
if (threads == 0 || threads > 1024 )
threads = 1024;
if (blocks == 0 || blocks > 65535)
blocks = 65535;
dim3 threadsPerBlock(threads, threads);
dim3 grid(blocks, blocks);
//dev_erode(dev_src, dev_dst, width, height, radio, threadsPerBlock, grid);
swapBuffers(&dev_src, &dev_dst);
}*/
// ERODE - Optimo AUTO
void dev_erode_automaticoOnce(int radio, int* error) {
hipDeviceSynchronize();
setDimensionNumber_Threads_Blocks(2, 0, 0, 0, 0, true);
dim3 threadsPerBlock(threadsInX, threadsInY);
dim3 grid(blocksInX, blocksInY);
dev_erode(dev_src2, dev_dst2, width, height, radio, threadsPerBlock, grid, error);
swapBuffers(&dev_src2, &dev_dst2);
}
/*void dev_erode_automaticoMulti(byte* dev_src, byte* dev_dst, int width, int height, int radio) {
int threads = 32;
dim3 threadsPerBlock(threads, threads);
dim3 grid(width / threadsPerBlock.x, height / threadsPerBlock.y);
//dev_erode(dev_src, dev_dst, width, height, radio, threadsPerBlock, grid);
swapBuffers(&dev_src, &dev_dst);
}*/
// ERODE + Optimo MANUAL
void dev_erode_twoSteps_manualOnce(int radio, int threads, int blocks, int* error) {
hipError_t cudaStatus;
hipDeviceSynchronize();
cudaStatus = hipMalloc(&dev_aux2, size_img);
if (cudaStatus != hipSuccess) {
*error = -8;
return;
}
setDimensionNumber_Threads_Blocks(2, threads, threads, blocks, blocks, false);
dim3 threadsPerBlock(threadsInX, threadsInY);
dim3 grid(blocksInX, blocksInY);
dev_erode_twoSteps(dev_src2, dev_dst2, dev_aux2, radio, threadsPerBlock, grid, error);
swapBuffers(&dev_src2, &dev_dst2);
hipFree(dev_aux2);
}
// ERODE + Optimo AUTO
void dev_erode_twoSteps_automaticOnce(int radio, int* error) {
hipError_t cudaStatus;
hipDeviceSynchronize();
cudaStatus = hipMalloc(&dev_aux2, size_img);
if (cudaStatus != hipSuccess) {
*error = -8;
return;
}
setDimensionNumber_Threads_Blocks(2, 0, 0, 0, 0, true);
dim3 threadsPerBlock(threadsInX, threadsInY);
dim3 grid(blocksInX, blocksInY);
dev_erode_twoSteps(dev_src2, dev_dst2, dev_aux2, radio, threadsPerBlock, grid, error);
swapBuffers(&dev_src2, &dev_dst2);
hipFree(dev_aux2);
}
//****************************************************** LLAMADAS A DILATE ************************************************************//
// DILATE - Optimo MANUAL
void dev_dilate_manualOnce(int radio, int threads, int blocks, int* error) {
hipDeviceSynchronize();
setDimensionNumber_Threads_Blocks(2, threads, threads, blocks, blocks, false);
dim3 threadsPerBlock(threadsInX, threadsInY);
dim3 grid(blocksInX, blocksInY);
dev_dilate(dev_src2, dev_dst2, width, height, radio, threadsPerBlock, grid, error);
swapBuffers(&dev_src2, &dev_dst2);
}
// DILATE - Optimo AUTO
void dev_dilate_automaticOnce(int radio, int* error) {
hipDeviceSynchronize();
setDimensionNumber_Threads_Blocks(2, 0, 0, 0, 0, true);
dim3 threadsPerBlock(threadsInX, threadsInY);
dim3 grid(blocksInX, blocksInY);
dev_dilate(dev_src2, dev_dst2, width, height, radio, threadsPerBlock, grid, error);
swapBuffers(&dev_src2, &dev_dst2);
}
// DILATE + Optimo MANUAL
void dev_dilate_twoSteps_manualOnce(int radio, int threads, int blocks, int* error) {
hipError_t cudaStatus;
hipDeviceSynchronize();
cudaStatus = hipMalloc(&dev_aux2, size_img);
if (cudaStatus != hipSuccess) {
*error = -13;
return;
}
setDimensionNumber_Threads_Blocks(2, threads, threads, blocks, blocks, false);
dim3 threadsPerBlock(threadsInX, threadsInY);
dim3 grid(blocksInX, blocksInY);
dev_dilate_twoSteps(dev_src2, dev_dst2, dev_aux2, radio, threadsPerBlock, grid, error);
swapBuffers(&dev_src2, &dev_dst2);
hipFree(dev_aux2);
}
// DILATE + Optimo AUTO
void dev_dilate_twoSteps_automaticOnce(int radio, int* error) {
hipError_t cudaStatus;
hipDeviceSynchronize();
cudaStatus = hipMalloc(&dev_aux2, size_img);
if (cudaStatus != hipSuccess) {
*error = -13;
return;
}
setDimensionNumber_Threads_Blocks(2, 0, 0, 0, 0, true);
dim3 threadsPerBlock(threadsInX, threadsInY);
dim3 grid(blocksInX, blocksInY);
dev_dilate_twoSteps(dev_src2, dev_dst2, dev_aux2, radio, threadsPerBlock, grid, error);
swapBuffers(&dev_src2, &dev_dst2);
hipFree(dev_aux2);
}
//****************************************************** REVERSE THRESHOLD ************************************************************//
// REVERSE THRESHOLD MANUAL
void dev_reverseThreshold_manualOnce(byte min, byte max, int threads, int blocks, int* error) {
hipDeviceSynchronize();
setDimensionNumber_Threads_Blocks(1, threads, 0, blocks, 0, false);
int stride = threadsInX * blocksInX;
dev_reverseThreshold(dev_src2, dev_dst2, min, max, threadsInX, blocksInY, stride, size_img, error);
swapBuffers(&dev_src2, &dev_dst2);
}
// REVERSE THRESHOLD AUTO
void dev_reverseThreshold_automaticOnce(byte min, byte max, int* error) {
hipDeviceSynchronize();
setDimensionNumber_Threads_Blocks(1, 0, 0, 0, 0, true);
int stride = threadsInX * blocksInX;
dev_reverseThreshold(dev_src2, dev_dst2, min, max, threadsInX, blocksInX, stride, size_img, error);
swapBuffers(&dev_src2, &dev_dst2);
}
//****************************************************** LLAMADAS A OPEN ***************************************************************//
// OPEN + Optimo MANUAL
void dev_open_fast_manualOnce(int radio, int threads, int blocks, int* error) {
hipDeviceSynchronize();
setDimensionNumber_Threads_Blocks(2, threads, threads, blocks, blocks, false);
dim3 threadsPerBlock(threadsInX, threadsInY);
dim3 grid(blocksInX, blocksInY);
hipError_t cudaStatus = hipMalloc(&dev_aux2, size_img);
if (cudaStatus != hipSuccess) {
*error = -17;
return;
}
dev_erode_twoSteps(dev_src2, dev_dst2, dev_aux2, radio, threadsPerBlock, grid, error);
swapBuffers(&dev_src2, &dev_dst2);
if (error != 0)
return;
dev_dilate_twoSteps(dev_src2, dev_dst2, dev_aux2, radio, threadsPerBlock, grid, error);
swapBuffers(&dev_src2, &dev_dst2);
hipFree(dev_aux2);
}
// OPEN + Optimo AUTO
void dev_open_fast_automaticOnce(int radio, int* error) {
hipDeviceSynchronize();
setDimensionNumber_Threads_Blocks(2, 0, 0, 0, 0, true);
dim3 threadsPerBlock(threadsInX, threadsInY);
dim3 grid(blocksInX, blocksInY);
hipError_t cudaStatus = hipMalloc(&dev_aux2, size_img);
if (cudaStatus != hipSuccess) {
*error = -17;
return;
}
dev_erode_twoSteps(dev_src2, dev_dst2, dev_aux2, radio, threadsPerBlock, grid, error);
swapBuffers(&dev_src2, &dev_dst2);
if (error != 0)
return;
dev_dilate_twoSteps(dev_src2, dev_dst2, dev_aux2, radio, threadsPerBlock, grid, error);
swapBuffers(&dev_src2, &dev_dst2);
hipFree(dev_aux2);
}
//********************************************************** LLAMADAS A CLOSE **************************************************************//
// CLOSE + Optimo MANUAL
void dev_close_fast_manualOnce(int radio, int threads, int blocks, int* error) {
hipDeviceSynchronize();
setDimensionNumber_Threads_Blocks(2, threads, threads, blocks, blocks, false);
dim3 threadsPerBlock(threadsInX, threadsInY);
dim3 grid(blocksInX, blocksInY);
hipError_t cudaStatus = hipMalloc(&dev_aux2, size_img);
if (cudaStatus != hipSuccess) {
*error = -16;
return;
}
dev_dilate_twoSteps(dev_src2, dev_dst2, dev_aux2, radio, threadsPerBlock, grid, error);
swapBuffers(&dev_src2, &dev_dst2);
if (error != 0)
return;
dev_erode_twoSteps(dev_src2, dev_dst2, dev_aux2, radio, threadsPerBlock, grid, error);
swapBuffers(&dev_src2, &dev_dst2);
hipFree(dev_aux2);
}
// CLOSE + Optimo AUTO
void dev_close_fast_automaticOnce(int radio, int* error) {
hipDeviceSynchronize();
setDimensionNumber_Threads_Blocks(2, 0, 0, 0, 0, true);
dim3 threadsPerBlock(threadsInX, threadsInY);
dim3 grid(blocksInX, blocksInY);
hipError_t cudaStatus = hipMalloc(&dev_aux2, size_img);
if (cudaStatus != hipSuccess) {
*error = -16;
return;
}
dev_dilate_twoSteps(dev_src2, dev_dst2, dev_aux2, radio, threadsPerBlock, grid, error);
swapBuffers(&dev_src2, &dev_dst2);
if (error != 0)
return;
dev_erode_twoSteps(dev_src2, dev_dst2, dev_aux2, radio, threadsPerBlock, grid, error);
swapBuffers(&dev_src2, &dev_dst2);
hipFree(dev_aux2);
}
//************************************************************ AUTOMASK **************************************************************//
void automask(byte *src, byte* dst, byte * dev_src, byte * dev_dst, int size, byte min, byte max, int *error) {
hipError_t cudaSTATUS;
*error = 1000;
if (dev_src == NULL) {
cudaSTATUS = hipMalloc(&dev_src, size);
if (cudaSTATUS != hipSuccess) {
*error = -1;
dev_src = NULL;
dev_dst = NULL;
}
cudaSTATUS = hipMalloc(&dev_dst, size);
if (cudaSTATUS != hipSuccess) {
*error = -2;
dev_src = NULL;
dev_dst = NULL;
hipFree(dev_src);
}
}
cudaSTATUS = hipMemcpy(dev_src, src, size, hipMemcpyHostToDevice);
if (cudaSTATUS != hipSuccess) {
*error = -3;
goto Error;
}
int threads = 1024;
int blocks = 640; ///////// 65535 / 1024 = 64 * 10
int stride = threads * blocks;
dev_threshold(dev_src, dev_dst, min, max, threads, blocks, stride, size, error);
swapBuffers(&dev_src, &dev_dst);
cudaSTATUS = hipMemcpy(dst,dev_src, size, hipMemcpyDeviceToHost);
if (cudaSTATUS != hipSuccess) {
*error = -4;
goto Error;
}
//dst = dev_src;
dev_src = NULL;
hipFree(dev_src);
dev_dst = NULL;
hipFree(dev_dst);
Error:
dev_src = NULL;
hipFree(dev_src);
dev_dst = NULL;
hipFree(dev_dst);
}
//************************************************************ ERRORES **************************************************************//
/*
else if (hipError_t == hipErrorInvalidDevicePointer)
*error = -3;
else if (hipError_t == hipErrorInvalidMemcpyDirection)
*error = -4;
else if (hipError_t == hipErrorInvalidValue)
*error = -5;
else if (cudaSTATUS == hipErrorUnsupportedLimit) {
*error = -21;
goto Error;
}
else if (cudaSTATUS == hipErrorDuplicateVariableName) {
*error = -22;
goto Error;
}
else if (cudaSTATUS == hipErrorDuplicateTextureName) {
*error = -23;
goto Error;
}
else if (cudaSTATUS == hipErrorDuplicateSurfaceName) {
*error = -24;
goto Error;
}
else if (cudaSTATUS == hipErrorDevicesUnavailable) {
*error = -25;
goto Error;
}
else if (cudaSTATUS == hipErrorInvalidImage) {
*error = -26;
goto Error;
}
else if (cudaSTATUS == hipErrorNoBinaryForGpu) {
*error = -27;
goto Error;
}
else if (cudaSTATUS == hipErrorIncompatibleDriverContext) {
*error = -28;
goto Error;
}
else if (cudaSTATUS == hipErrorPeerAccessAlreadyEnabled) {
*error = -29;
goto Error;
}
else if (cudaSTATUS == hipErrorPeerAccessNotEnabled) {
*error = -30;
goto Error;
}
else if (cudaSTATUS == hipErrorDeviceAlreadyInUse) {
*error = -31;
goto Error;
}
else if (cudaSTATUS == hipErrorProfilerDisabled) {
*error = -32;
goto Error;
}
else if (cudaSTATUS == hipErrorProfilerNotInitialized) {
*error = -33;
goto Error;
}
else if (cudaSTATUS == hipErrorProfilerAlreadyStarted) {
*error = -34;
goto Error;
}
else if (cudaSTATUS == hipErrorProfilerAlreadyStopped) {
*error = -35;
goto Error;
}
else if (cudaSTATUS == hipErrorStartupFailure) {
*error = -36;
goto Error;
}
else if (cudaSTATUS == hipErrorApiFailureBase) {
*error = -37;
goto Error;
}
else if (cudaSTATUS == hipErrorInvalidSurface) {
*error = -38;
goto Error;
}
else if (cudaSTATUS == hipErrorNoDevice) {
*error = -39;
goto Error;
}
else if (cudaSTATUS == hipErrorECCNotCorrectable) {
*error = -40;
goto Error;
}
else if (cudaSTATUS == hipErrorSharedObjectSymbolNotFound) {
*error = -41;
goto Error;
}
else if (cudaSTATUS == hipErrorSharedObjectInitFailed) {
*error = -42;
goto Error;
}
else
{
*error = -1000001;
goto Error;
}
*/
| dc2c6b6abd5052ce11b46736409cbe59bfe925ba.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <windows.h>
#include "kernel.h"
byte* dev_src2 = NULL;
byte* dev_dst2 = NULL;
byte* dev_aux2 = NULL;
int threadsInX;
int threadsInY;
int blocksInX;
int blocksInY;
int width;
int height;
int size_img;
//**************************************************** Funciones GPU **********************************************************//
__global__ void threshold(byte* src, byte* dst, byte min, byte max, int stride, int size)
{
for (int pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += stride)
dst[pos] = (src[pos] >= min && src[pos] <= max) ? 1 : 0;
}
__global__ void erode(byte* src, byte* dst, int w, int h, int radio)
{
int posx = threadIdx.x + blockIdx.x * blockDim.x;
int posy = threadIdx.y + blockIdx.y * blockDim.y;
if (posx >= w || posy >= h)
return;
unsigned int start_y = max(posy - radio, 0);
unsigned int end_y = min(h - 1, posy + radio);
unsigned int start_x = max(posx - radio, 0);
unsigned int end_x = min(w - 1, posx + radio);
int _min = 255;
for (int y = start_y; y <= end_y; y++)
for (int x = start_x; x <= end_x; x++)
_min = min(_min, src[y * w + x]);
dst[posy * w + posx] = _min;
}
__global__ void erode_separable_step2(byte* src, byte* dst, int w, int h, int radio) {
int posx = threadIdx.x + blockIdx.x * blockDim.x;
int posy = threadIdx.y + blockIdx.y * blockDim.y;
if (posx >= w || posy >= h)
return;
unsigned int start_y = max(posy - radio, 0);
unsigned int end_y = min(h - 1, posy + radio);
int _min = 255;
for (int y = start_y; y <= end_y; y++) {
_min = min(_min, src[y * w + posx]);
}
dst[posy * w + posx] = _min;
}
__global__ void erode_separable_step1(byte* src, byte* dst, int w, int h, int radio) {
int posx = threadIdx.x + blockIdx.x * blockDim.x;
int posy = threadIdx.y + blockIdx.y * blockDim.y;
if (posx >= w || posy >= h)
return;
unsigned int start_x = max(posx - radio, 0);
unsigned int end_x = min(w - 1, posx + radio);
int _min = 255;
for (int x = start_x; x <= end_x; x++) {
_min = min(_min, src[posy * w + x]);
}
dst[posy * w + posx] = _min;
}
__global__ void dilate(byte * src, byte *dst, int w, int h, int radio)
{
int posx = threadIdx.x + blockIdx.x * blockDim.x;
int posy = threadIdx.y + blockIdx.y * blockDim.y;
if (posx >= w || posy >= h)
return;
unsigned int start_y = max(posy - radio, 0);
unsigned int end_y = min(h - 1, posy + radio);
unsigned int start_x = max(posx - radio, 0);
unsigned int end_x = min(w - 1, posx + radio);
int _max = 0;
for (int y = start_y; y <= end_y; y++)
for (int x = start_x; x <= end_x; x++)
_max = max(_max, src[y * w + x]);
dst[posy * w + posx] = _max;
}
__global__ void dilate_separable_step2(byte* src, byte* dst, int w, int h, int radio) {
int posx = threadIdx.x + blockIdx.x * blockDim.x;
int posy = threadIdx.y + blockIdx.y * blockDim.y;
if (posx >= w || posy >= h)
return;
unsigned int start_y = max(posy - radio, 0);
unsigned int end_y = min(h - 1, posy + radio);
int _max = 0;
for (int y = start_y; y <= end_y; y++) {
_max = max(_max, src[y * w + posx]);
}
dst[posy * w + posx] = _max;
}
__global__ void dilate_separable_step1(byte* src, byte* dst, int w, int h, int radio) {
int posx = threadIdx.x + blockIdx.x * blockDim.x;
int posy = threadIdx.y + blockIdx.y * blockDim.y;
if (posx >= w || posy >= h)
return;
unsigned int start_x = max(posx - radio, 0);
unsigned int end_x = min(w - 1, posx + radio);
int _max = 0;
for (int x = start_x; x <= end_x; x++) {
_max = max(_max, src[posy * w + x]);
}
dst[posy * w + posx] = _max;
}
__global__ void reverseThreshold(byte* src, byte* dst, byte min, byte max, int stride, int size)
{
for (int pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += stride)
dst[pos] = (src[pos] >= min && src[pos] <= max) ? 0 : 1;
}
//********************************************************* Llamadas a GPU *************************************************************//
void dev_threshold(byte *src, byte *dst, byte min, byte max, int threads, int blocks, int stride, int size, int* error) {
cudaError_t cudaStatus;
threshold << < blocks, threads >> > (src, dst, min, max, stride, size);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess)
*error = -7;
}
void dev_erode(byte *src, byte *dst, int width, int height, int radio, dim3 threads, dim3 blocks, int* error) {
cudaError_t cudaStatus;
erode << < blocks, threads >> > (src, dst, width, height, radio);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess)
*error = -8;
}
void dev_erode_twoSteps(byte* src, byte* dst, byte* aux, int radio, dim3 threads, dim3 blocks, int* error) {
cudaError_t cudaStatus;
erode_separable_step1 << <blocks, threads >> > (src, aux, width, height, radio);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
*error = -10;
return;
}
erode_separable_step2 << <blocks, threads >> > (aux, dst, width, height, radio);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess)
*error = -10;
}
void dev_dilate(byte *src, byte *dst, int width, int height, int radio, dim3 threads, dim3 blocks, int* error) {
cudaError_t cudaStatus;
dilate << < blocks, threads >> > (src, dst, width, height, radio);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess)
*error = -11;
}
void dev_dilate_twoSteps(byte* src, byte* dst, byte* aux, int radio, dim3 threads, dim3 blocks, int* error) {
cudaError_t cudaStatus;
dilate_separable_step1 << <blocks, threads >> > (src, aux, width, height, radio);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
*error = -14;
return;
}
dilate_separable_step2 << <blocks, threads >> > (aux, dst, width, height, radio);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess)
*error = -14;
}
void dev_reverseThreshold(byte* src, byte* dst, byte min, byte max, int threads, int blocks, int stride, int size, int* error) {
cudaError_t cudaStatus;
reverseThreshold << < blocks, threads >> > (src, dst, min, max, stride, size);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess)
*error = -12;
}
//******************************************************* Herramientas para GPU ********************************************************//
// DETERMINA EL NUMERO DE DEVICES DISPONIBLES
void expert_numAvailableDevices(int *numCUDADevices)
{
cudaError_t cudaStatus = cudaGetDeviceCount(numCUDADevices);
if (cudaStatus != cudaSuccess)
*numCUDADevices = 0;
}
// RESETEA EL DEVICE SELECCIONADO
void expert_resetDevice(int deviceId, int* error)
{
int numDevices;
expert_numAvailableDevices(&numDevices);
if (deviceId < 0 || deviceId >= numDevices)
*error = -222;
else
{
cudaError_t cudaStatus = cudaSetDevice(deviceId);
if (cudaStatus == cudaSuccess)
cudaStatus = cudaDeviceReset();
if (cudaStatus == cudaSuccess)
*error = 0;
else
*error = -333;
}
}
// ESTABLECE EL DEVICE SELECCIONADO
void expert_setDevice(int deviceId, int *error)
{
int numDevices;
expert_numAvailableDevices(&numDevices);
if (deviceId < 0 || deviceId >= numDevices)
*error = -222;
else
{
cudaError_t cudaStatus = cudaSetDevice(deviceId);
if (cudaStatus == cudaSuccess)
*error = 0;
else
*error = -444;
}
}
// RESETEA TODOS LOS DEVICES DISPONIBLES
void expert_resetAllDevices(int* error)
{
int numDevices;
expert_numAvailableDevices(&numDevices);
*error = 0;
if (numDevices < 1)
*error = -111;
else
{
for (int dev = 0; dev < numDevices && *error == 0; dev++)
expert_resetDevice(dev, error);
expert_setDevice(0, error);
}
}
/*string expert_descriptionError(int* error) {
string error_msg;
switch (*error) {
case -111:
error_msg = "Al intentar resetear todos los devices, no había devices";
break;
case -222:
error_msg = "Numero del device metido no esta en el rango";
break;
case -333:
error_msg = "Error en reseteo del device metido";
break;
case -444:
error_msg = "Error en el establecimiento del device";
break;
case -1:
error_msg = "Al intentar reservar memoria para la imagen fuente";
break;
case -2:
error_msg = "Al intentar reservar memoria para la imagen destino";
break;
case -3:
error_msg = "Copia de memoria de CPU a GPU";
break;
case -4:
error_msg = "Copia de memoria de GPU a CPU";
break;
case -5:
error_msg = "Al intentar liberar memoria para la imagen fuente";
break;
case -6:
error_msg = "Al intentar liberar memoria para la imagen destino";
break;
case -7:
error_msg = "Falla en threshold";
break;
case -8:
error_msg = "Falla en Erode Low";
break;
case -9:
error_msg = "Falla en Erode Fast(TwoSteps) al intentar reservar memoria en GPU";
break;
case -10:
error_msg = "Falla en Erode Fast(TwoSteps) al hacer el algoritmo";
break;
case -11:
error_msg = "Falla en Dilate";
break;
case -12:
error_msg = "Falla en ReverseThreshold";
break;
case -13:
error_msg = "Falla en Dilate Fast(TwoSteps) al intentar reservar memoria en GPU";
break;
case -14:
error_msg = "Falla en Dilate Fast(TwoSteps) al hacer algoritmo";
break;
case -15:
error_msg = "Fallo en reescaldo de imagen";
break;
default:
error_msg = "NO ERROR";
}
return error_msg;
}
*/
//************************************************ Determinar Threads Y Bloques *********************************************************//
void setNumberThreads1D(int threadsX, int blocksX, bool automatic) {
if (automatic) {
threadsInX = 1024;
blocksInX = 640; ///////// 65535 / 1024 = 64 * 10
}
else {
if (threadsX == 0 || threadsX > 1024)
threadsX = 1024;
if (blocksX == 0 || blocksX > 65535)
blocksX = 65535;
threadsInX = threadsX;
blocksInX = blocksX;
}
}
void setNumberThreads2D(int threadsX, int threadsY, int blocksX, int blocksY, bool automatic) {
if (automatic) {
if (width > 3000) {
threadsInX = 16;
blocksInX = 500;
}
else if (width > 1600) {
threadsInX = 8;
blocksInX = 400;
}
else {
threadsInX = 8;
blocksInX = 240;
}
if (height > 3000){
threadsInY = 16;
blocksInY = 500;
}
else if (width > 1600) {
threadsInY = 8;
blocksInY = 400;
}
else {
threadsInY = 8;
blocksInY = 240;
}
}
else {
if (threadsX == 0 || threadsX > 512)
threadsX = 512;
if (blocksX == 0 || blocksX > 65535)
blocksX = 65535;
threadsInX = threadsInY = threadsX;
blocksInX = blocksInY = blocksX;
}
//dim3 grid(width / threadsPerBlock.x, height / threadsPerBlock.y);
}
void setDimensionNumber_Threads_Blocks(int size, int threadsX, int threadsY, int blocksX, int blocksY, bool automatic) {
if (size > 2 || size <= 0)
size = 1;
if (size == 1)
setNumberThreads1D(threadsX, blocksX, automatic);
else
setNumberThreads2D(threadsX, threadsY, blocksX, blocksY, automatic);
}
//***************************************************** INTERCAMBIAR ARRAYS *********************************************************//
void swapBuffers(byte** a, byte** b)
{
byte* aux = *a;
*a = *b;
*b = aux;
}
//***********************************************************************************************************************************//
/* ERRORES de procesamiento:
/*
*
* -111: Al intentar resetear todos los devices, no había devices
* -222: Numero del device metido no esta en el rango
* -333: Error en reseteo del device metido
* -444: Error en el establecimiento del device
*
* -1: Al intentar reservar memoria para la imagen fuente
* -2: Al intentar reservar memoria para la imagen destino
* -3: Copia de memoria de CPU a GPU
* -4: Copia de memoria de GPU a CPU
* -5: Al intentar liberar memoria para la imagen fuente
* -6: Al intentar liberar memoria para la imagen destino
* -7: Falla en threshold
* -8: Falla en Erode Low
* -9: Falla en Erode Fast (TwoSteps) al intentar reservar memoria en GPU
* -10: Falla en Erode Fast (TwoSteps) al hacer el algoritmo
* -11: Falla en Dilate
* -12: Falla en ReverseThreshold
* -13: Falla en Dilate Fast (TwoSteps) al intentar reservar memoria en GPU
* -14: Falla en Dilate Fast (TwoSteps) al hacer algoritmo
* -15: Fallo en reescaldo de imagen
* -16: Fallo en Close
* -17: Fallo en Open
*
*/
//************************************ Reserva de memoria en GPU y copia de imagen en GPU ****************************************//
void reservationMemory_CopyHostToDeviceOnce(byte* src, int w, int h, int* error) {
cudaError_t cudaStatus;
width = w;
height = h;
size_img = width * height;
cudaDeviceSynchronize();
//if (dev_src2 == NULL)
//{
cudaStatus = cudaMalloc(&dev_src2, size_img);
if (cudaStatus != cudaSuccess) {
//printf("Error en reserva de memoria del dev_src");
*error = -1;
//dev_src2 = NULL;
//dev_dst2 = NULL;
return;
}
cudaStatus = cudaMalloc(&dev_dst2, size_img);
if (cudaStatus != cudaSuccess) {
//printf("Error en reserva de memoria del dev_dst");
*error = -2;
//dev_src2 = NULL;
//dev_dst2 = NULL;
cudaFree(dev_src2);
return;
}
//}
cudaStatus = cudaMemcpy(dev_src2, src, size_img, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
{
//printf("Error en copia de CPU a GPU");
*error = -3;
//dev_src2 = NULL;
cudaFree(dev_src2);
//dev_dst2 = NULL;
cudaFree(dev_dst2);
}
}
/*void reservationMemory_CopyHostToDeviceMulti(byte* src, byte* dev_src, byte* dev_dst, int size, int* error) {
cudaError_t cudaStatus;
*error = 1000;
if (dev_src == NULL)
{
cudaStatus = cudaMalloc(&dev_src, size);
if (cudaStatus != cudaSuccess) {
//printf("Error en reserva de memoria del dev_src");
*error = -1;
dev_src = NULL;
dev_dst = NULL;
return;
}
cudaStatus = cudaMalloc(&dev_dst, size);
if (cudaStatus != cudaSuccess) {
//printf("Error en reserva de memoria del dev_dst");
*error = -2;
dev_src = NULL;
dev_dst = NULL;
cudaFree(dev_src);
}
}
cudaStatus = cudaMemcpy(dev_src, src, size, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
{
//printf("Error en copia de CPU a GPU");
*error = -3;
dev_src = NULL;
cudaFree(dev_src);
dev_dst = NULL;
cudaFree(dev_dst);
}
}*/
//***********************************************************************************************************************************//
//************************************ Liberación de memoria en GPU y copia de imagen en CPU ****************************************//
void freeMemory_CopyDeviceToHostOnce(byte* dst, int* error) {
cudaError_t cudaStatus;
*error = 0;
cudaDeviceSynchronize();
cudaStatus = cudaMemcpy(dst, dev_src2, size_img, cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess)
{
*error = -4;
//dev_src2 = NULL;
cudaFree(dev_src2);
//dev_dst2 = NULL;
cudaFree(dev_dst2);
return;
}
//dev_src2 = NULL;
cudaFree(dev_src2);
//dev_dst2 = NULL;
cudaFree(dev_dst2);
}
/*void freeMemory_CopyDeviceToHostMulti(byte* dst, byte* dev_src, byte* dev_dst, int size, int* error) {
cudaError_t cudaStatus;
*error = 1000;
cudaStatus = cudaMemcpy(dst, dev_src, size, cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess)
{
*error = -4;
dev_src = NULL;
cudaFree(dev_src);
dev_dst = NULL;
cudaFree(dev_dst);
}
dev_src = NULL;
cudaFree(dev_src);
dev_dst = NULL;
cudaFree(dev_dst);
}*/
//***********************************************************************************************************************************//
//************************************************ Reserva de memoria en GPU ********************************************************//
void reservationMemoryOnce(int w, int h, int* error) {
cudaError_t cudaStatus;
width = w;
height = h;
size_img = width * height;
//if (dev_src2 == NULL)
//{
cudaStatus = cudaMalloc(&dev_src2, size_img);
if (cudaStatus != cudaSuccess)
{
//printf("Error en reserva de memoria");
*error = -1;
//dev_src2 = NULL;
cudaFree(dev_src2);
//dev_dst2 = NULL;
return;
}
cudaStatus = cudaMalloc(&dev_dst2, size_img);
if (cudaStatus != cudaSuccess)
{
//printf("Error en reserva de memoria");
*error = -2;
//dev_src2 = NULL;
cudaFree(dev_src2);
//dev_dst2 = NULL;
cudaFree(dev_dst2);
}
//}
cudaDeviceSynchronize();
}
/*void reservationMemoryMulti(byte* dev_src, byte* dev_dst, int size, string* error) {
cudaError_t cudaStatus;
if (dev_src == NULL)
{
cudaStatus = cudaMalloc(&dev_src, size);
if (cudaStatus != cudaSuccess)
{
dev_src = NULL;
dev_dst = NULL;
cudaFree(dev_src);
goto Error;
}
cudaStatus = cudaMalloc(&dev_dst, size);
if (cudaStatus != cudaSuccess)
{
dev_src = NULL;
dev_dst = NULL;
cudaFree(dev_src);
cudaFree(dev_dst);
goto Error;
}
*error = cudaGetErrorName(cudaStatus);
}
Error:
//printf("Error en reserva de memoria");
*error = cudaGetErrorName(cudaStatus);
}*/
//***********************************************************************************************************************************//
//********************************************* Liberacion de memoria en la GPU *****************************************************//
void freeMemoryOnce(int* error) {
cudaError_t cudaStatus;
*error = 0;
//dev_src2 = NULL;
cudaStatus = cudaFree(dev_src2);
if (cudaStatus != cudaSuccess)
*error = -5;
else {
*error = 10;
}
//dev_dst2 = NULL;
cudaStatus = cudaFree(dev_dst2);
if (cudaStatus != cudaSuccess)
*error = -6;
else {
*error = 11;
}
cudaDeviceSynchronize();
}
/*void freeMemoryMulti(byte* dev_src, byte* dev_dst, string* error) {
cudaError_t cudaStatus;
dev_src = NULL;
dev_dst = NULL;
cudaStatus = cudaFree(dev_src);
if (cudaStatus != cudaSuccess)
goto Error;
cudaStatus = cudaFree(dev_dst);
if (cudaStatus != cudaSuccess)
goto Error;
*error = cudaGetErrorName(cudaStatus);
Error:
*error = cudaGetErrorName(cudaStatus);
}*/
//***********************************************************************************************************************************//
//******************************************** Copia de memoria de CPU a GPU ********************************************************//
void copyHostToDeviceOnce(byte* src, int* error) {
cudaError_t cudaStatus;
cudaDeviceSynchronize();
cudaStatus = cudaMemcpy(dev_src2, src, size_img, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
{
//printf("Error en copia de CPU a GPU");
*error = -3;
//dev_src2 = NULL;
cudaFree(dev_src2);
//dev_dst2 = NULL;
cudaFree(dev_dst2);
}
cudaDeviceSynchronize();
}
/*void copyHostToDeviceMulti(byte* src, byte* dev_src, byte* dev_dst, int size, string* error) {
cudaError_t cudaStatus;
cudaStatus = cudaMemcpy(dev_src, src, size, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
goto Error;
*error = cudaGetErrorName(cudaStatus);
Error:
//printf("Error en copia de host a device");
*error = cudaGetErrorName(cudaStatus);
dev_src = NULL;
dev_dst = NULL;
cudaFree(dev_src);
cudaFree(dev_dst);
}*/
//***********************************************************************************************************************************//
//********************************************** Copia de memoria de GPU a CPU ******************************************************//
void copyDeviceToHostOnce(byte *dst, int* error) {
cudaError_t cudaStatus;
*error = 0;
cudaDeviceSynchronize();
cudaStatus = cudaMemcpy(dst, dev_src2, size_img, cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess)
{
//printf("Error en copia de device a host ");
*error = -4;
//dev_src2 = NULL;
cudaFree(dev_src2);
//dev_dst2 = NULL;
cudaFree(dev_dst2);
}
cudaDeviceSynchronize();
}
/*void copyDeviceToHostMulti(byte *dst, byte* dev_src, byte* dev_dst, int size, string* error) {
cudaError_t cudaStatus;
cudaStatus = cudaMemcpy(dst, dev_src, size, cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess)
goto Error;
*error = cudaGetErrorName(cudaStatus);
Error:
//printf("Error en copia de device a host ");
*error = cudaGetErrorName(cudaStatus);
dev_src = NULL;
dev_dst = NULL;
cudaFree(dev_src);
cudaFree(dev_dst);
}*/
// ************************************************* LLAMADAS A THRESHOLD ***********************************************************//
/////////////////////////////////////////// Le indicas los threads que quieres /////////////////////////////////////////
void dev_threshold_manualOnce(byte min, byte max, int threads, int blocks, int* error) {
cudaDeviceSynchronize();
setDimensionNumber_Threads_Blocks(1, threads, 0, blocks, 0, false);
int stride = threadsInX * blocksInX;
dev_threshold(dev_src2, dev_dst2, min, max, threadsInX, blocksInX, stride, size_img, error);
swapBuffers(&dev_src2, &dev_dst2);
}
/*void dev_threshold_manualMulti(byte* dev_src, byte* dev_dst, int size, byte min, byte max, int threads, int blocks) {
int stride = threads * blocks;
if (threads == 0 || threads > 1024)
threads = 1024;
if (blocks == 0 || blocks > 65535)
blocks = 65535;
dev_threshold(dev_src, dev_dst, min, max, threads, blocks, stride, size);
swapBuffers(&dev_src, &dev_dst);
}*/
/////////////////////////////////////////// Los threads se calculan de forma automatica /////////////////////////////////////////
void dev_threshold_automaticoOnce(byte min, byte max, int* error) {
cudaDeviceSynchronize();
setDimensionNumber_Threads_Blocks(1, 0, 0, 0, 0, true);
int stride = threadsInX * blocksInX;
dev_threshold(dev_src2, dev_dst2, min, max, threadsInX, blocksInX, stride, size_img, error);
swapBuffers(&dev_src2, &dev_dst2);
}
/*void dev_threshold_automaticoMulti(byte* dev_src, byte* dev_dst, int size, byte min, byte max, int* error) {
int threads = 1024;
int blocks = 640; ///////// 65535 / 1024 = 64 * 10
int stride = threads * blocks;
dev_threshold(dev_src, dev_dst, min, max, threads, blocks, stride, size);
swapBuffers(&dev_src, &dev_dst);
}*/
//****************************************************** LLAMADAS A ERODE ************************************************************//
// ERODE - Optimo MANUAL
void dev_erode_manualOnce(int radio, int threads, int blocks, int* error) {
cudaDeviceSynchronize();
setDimensionNumber_Threads_Blocks(2, threads, threads, blocks, blocks, false);
dim3 threadsPerBlock(threadsInX, threadsInY);
dim3 grid(blocksInX, blocksInY);
dev_erode(dev_src2, dev_dst2, width, height, radio, threadsPerBlock, grid, error);
swapBuffers(&dev_src2, &dev_dst2);
}
/*void dev_erode_manualMulti(byte* dev_src, byte* dev_dst, int width, int height, int radio, int threads, int blocks) {
if (threads == 0 || threads > 1024 )
threads = 1024;
if (blocks == 0 || blocks > 65535)
blocks = 65535;
dim3 threadsPerBlock(threads, threads);
dim3 grid(blocks, blocks);
//dev_erode(dev_src, dev_dst, width, height, radio, threadsPerBlock, grid);
swapBuffers(&dev_src, &dev_dst);
}*/
// ERODE - Optimo AUTO
void dev_erode_automaticoOnce(int radio, int* error) {
cudaDeviceSynchronize();
setDimensionNumber_Threads_Blocks(2, 0, 0, 0, 0, true);
dim3 threadsPerBlock(threadsInX, threadsInY);
dim3 grid(blocksInX, blocksInY);
dev_erode(dev_src2, dev_dst2, width, height, radio, threadsPerBlock, grid, error);
swapBuffers(&dev_src2, &dev_dst2);
}
/*void dev_erode_automaticoMulti(byte* dev_src, byte* dev_dst, int width, int height, int radio) {
int threads = 32;
dim3 threadsPerBlock(threads, threads);
dim3 grid(width / threadsPerBlock.x, height / threadsPerBlock.y);
//dev_erode(dev_src, dev_dst, width, height, radio, threadsPerBlock, grid);
swapBuffers(&dev_src, &dev_dst);
}*/
// ERODE + Optimo MANUAL
void dev_erode_twoSteps_manualOnce(int radio, int threads, int blocks, int* error) {
cudaError_t cudaStatus;
cudaDeviceSynchronize();
cudaStatus = cudaMalloc(&dev_aux2, size_img);
if (cudaStatus != cudaSuccess) {
*error = -8;
return;
}
setDimensionNumber_Threads_Blocks(2, threads, threads, blocks, blocks, false);
dim3 threadsPerBlock(threadsInX, threadsInY);
dim3 grid(blocksInX, blocksInY);
dev_erode_twoSteps(dev_src2, dev_dst2, dev_aux2, radio, threadsPerBlock, grid, error);
swapBuffers(&dev_src2, &dev_dst2);
cudaFree(dev_aux2);
}
// ERODE + Optimo AUTO
void dev_erode_twoSteps_automaticOnce(int radio, int* error) {
cudaError_t cudaStatus;
cudaDeviceSynchronize();
cudaStatus = cudaMalloc(&dev_aux2, size_img);
if (cudaStatus != cudaSuccess) {
*error = -8;
return;
}
setDimensionNumber_Threads_Blocks(2, 0, 0, 0, 0, true);
dim3 threadsPerBlock(threadsInX, threadsInY);
dim3 grid(blocksInX, blocksInY);
dev_erode_twoSteps(dev_src2, dev_dst2, dev_aux2, radio, threadsPerBlock, grid, error);
swapBuffers(&dev_src2, &dev_dst2);
cudaFree(dev_aux2);
}
//****************************************************** LLAMADAS A DILATE ************************************************************//
// DILATE - Optimo MANUAL
void dev_dilate_manualOnce(int radio, int threads, int blocks, int* error) {
cudaDeviceSynchronize();
setDimensionNumber_Threads_Blocks(2, threads, threads, blocks, blocks, false);
dim3 threadsPerBlock(threadsInX, threadsInY);
dim3 grid(blocksInX, blocksInY);
dev_dilate(dev_src2, dev_dst2, width, height, radio, threadsPerBlock, grid, error);
swapBuffers(&dev_src2, &dev_dst2);
}
// DILATE - Optimo AUTO
void dev_dilate_automaticOnce(int radio, int* error) {
cudaDeviceSynchronize();
setDimensionNumber_Threads_Blocks(2, 0, 0, 0, 0, true);
dim3 threadsPerBlock(threadsInX, threadsInY);
dim3 grid(blocksInX, blocksInY);
dev_dilate(dev_src2, dev_dst2, width, height, radio, threadsPerBlock, grid, error);
swapBuffers(&dev_src2, &dev_dst2);
}
// DILATE + Optimo MANUAL
void dev_dilate_twoSteps_manualOnce(int radio, int threads, int blocks, int* error) {
cudaError_t cudaStatus;
cudaDeviceSynchronize();
cudaStatus = cudaMalloc(&dev_aux2, size_img);
if (cudaStatus != cudaSuccess) {
*error = -13;
return;
}
setDimensionNumber_Threads_Blocks(2, threads, threads, blocks, blocks, false);
dim3 threadsPerBlock(threadsInX, threadsInY);
dim3 grid(blocksInX, blocksInY);
dev_dilate_twoSteps(dev_src2, dev_dst2, dev_aux2, radio, threadsPerBlock, grid, error);
swapBuffers(&dev_src2, &dev_dst2);
cudaFree(dev_aux2);
}
// DILATE + Optimo AUTO
void dev_dilate_twoSteps_automaticOnce(int radio, int* error) {
cudaError_t cudaStatus;
cudaDeviceSynchronize();
cudaStatus = cudaMalloc(&dev_aux2, size_img);
if (cudaStatus != cudaSuccess) {
*error = -13;
return;
}
setDimensionNumber_Threads_Blocks(2, 0, 0, 0, 0, true);
dim3 threadsPerBlock(threadsInX, threadsInY);
dim3 grid(blocksInX, blocksInY);
dev_dilate_twoSteps(dev_src2, dev_dst2, dev_aux2, radio, threadsPerBlock, grid, error);
swapBuffers(&dev_src2, &dev_dst2);
cudaFree(dev_aux2);
}
//****************************************************** REVERSE THRESHOLD ************************************************************//
// REVERSE THRESHOLD MANUAL
void dev_reverseThreshold_manualOnce(byte min, byte max, int threads, int blocks, int* error) {
cudaDeviceSynchronize();
setDimensionNumber_Threads_Blocks(1, threads, 0, blocks, 0, false);
int stride = threadsInX * blocksInX;
dev_reverseThreshold(dev_src2, dev_dst2, min, max, threadsInX, blocksInY, stride, size_img, error);
swapBuffers(&dev_src2, &dev_dst2);
}
// REVERSE THRESHOLD AUTO
void dev_reverseThreshold_automaticOnce(byte min, byte max, int* error) {
cudaDeviceSynchronize();
setDimensionNumber_Threads_Blocks(1, 0, 0, 0, 0, true);
int stride = threadsInX * blocksInX;
dev_reverseThreshold(dev_src2, dev_dst2, min, max, threadsInX, blocksInX, stride, size_img, error);
swapBuffers(&dev_src2, &dev_dst2);
}
//****************************************************** LLAMADAS A OPEN ***************************************************************//
// OPEN + Optimo MANUAL
void dev_open_fast_manualOnce(int radio, int threads, int blocks, int* error) {
cudaDeviceSynchronize();
setDimensionNumber_Threads_Blocks(2, threads, threads, blocks, blocks, false);
dim3 threadsPerBlock(threadsInX, threadsInY);
dim3 grid(blocksInX, blocksInY);
cudaError_t cudaStatus = cudaMalloc(&dev_aux2, size_img);
if (cudaStatus != cudaSuccess) {
*error = -17;
return;
}
dev_erode_twoSteps(dev_src2, dev_dst2, dev_aux2, radio, threadsPerBlock, grid, error);
swapBuffers(&dev_src2, &dev_dst2);
if (error != 0)
return;
dev_dilate_twoSteps(dev_src2, dev_dst2, dev_aux2, radio, threadsPerBlock, grid, error);
swapBuffers(&dev_src2, &dev_dst2);
cudaFree(dev_aux2);
}
// OPEN + Optimo AUTO
void dev_open_fast_automaticOnce(int radio, int* error) {
cudaDeviceSynchronize();
setDimensionNumber_Threads_Blocks(2, 0, 0, 0, 0, true);
dim3 threadsPerBlock(threadsInX, threadsInY);
dim3 grid(blocksInX, blocksInY);
cudaError_t cudaStatus = cudaMalloc(&dev_aux2, size_img);
if (cudaStatus != cudaSuccess) {
*error = -17;
return;
}
dev_erode_twoSteps(dev_src2, dev_dst2, dev_aux2, radio, threadsPerBlock, grid, error);
swapBuffers(&dev_src2, &dev_dst2);
if (error != 0)
return;
dev_dilate_twoSteps(dev_src2, dev_dst2, dev_aux2, radio, threadsPerBlock, grid, error);
swapBuffers(&dev_src2, &dev_dst2);
cudaFree(dev_aux2);
}
//********************************************************** LLAMADAS A CLOSE **************************************************************//
// CLOSE + Optimo MANUAL
void dev_close_fast_manualOnce(int radio, int threads, int blocks, int* error) {
cudaDeviceSynchronize();
setDimensionNumber_Threads_Blocks(2, threads, threads, blocks, blocks, false);
dim3 threadsPerBlock(threadsInX, threadsInY);
dim3 grid(blocksInX, blocksInY);
cudaError_t cudaStatus = cudaMalloc(&dev_aux2, size_img);
if (cudaStatus != cudaSuccess) {
*error = -16;
return;
}
dev_dilate_twoSteps(dev_src2, dev_dst2, dev_aux2, radio, threadsPerBlock, grid, error);
swapBuffers(&dev_src2, &dev_dst2);
if (error != 0)
return;
dev_erode_twoSteps(dev_src2, dev_dst2, dev_aux2, radio, threadsPerBlock, grid, error);
swapBuffers(&dev_src2, &dev_dst2);
cudaFree(dev_aux2);
}
// CLOSE + Optimo AUTO
void dev_close_fast_automaticOnce(int radio, int* error) {
cudaDeviceSynchronize();
setDimensionNumber_Threads_Blocks(2, 0, 0, 0, 0, true);
dim3 threadsPerBlock(threadsInX, threadsInY);
dim3 grid(blocksInX, blocksInY);
cudaError_t cudaStatus = cudaMalloc(&dev_aux2, size_img);
if (cudaStatus != cudaSuccess) {
*error = -16;
return;
}
dev_dilate_twoSteps(dev_src2, dev_dst2, dev_aux2, radio, threadsPerBlock, grid, error);
swapBuffers(&dev_src2, &dev_dst2);
if (error != 0)
return;
dev_erode_twoSteps(dev_src2, dev_dst2, dev_aux2, radio, threadsPerBlock, grid, error);
swapBuffers(&dev_src2, &dev_dst2);
cudaFree(dev_aux2);
}
//************************************************************ AUTOMASK **************************************************************//
void automask(byte *src, byte* dst, byte * dev_src, byte * dev_dst, int size, byte min, byte max, int *error) {
cudaError_t cudaSTATUS;
*error = 1000;
if (dev_src == NULL) {
cudaSTATUS = cudaMalloc(&dev_src, size);
if (cudaSTATUS != cudaSuccess) {
*error = -1;
dev_src = NULL;
dev_dst = NULL;
}
cudaSTATUS = cudaMalloc(&dev_dst, size);
if (cudaSTATUS != cudaSuccess) {
*error = -2;
dev_src = NULL;
dev_dst = NULL;
cudaFree(dev_src);
}
}
cudaSTATUS = cudaMemcpy(dev_src, src, size, cudaMemcpyHostToDevice);
if (cudaSTATUS != cudaSuccess) {
*error = -3;
goto Error;
}
int threads = 1024;
int blocks = 640; ///////// 65535 / 1024 = 64 * 10
int stride = threads * blocks;
dev_threshold(dev_src, dev_dst, min, max, threads, blocks, stride, size, error);
swapBuffers(&dev_src, &dev_dst);
cudaSTATUS = cudaMemcpy(dst,dev_src, size, cudaMemcpyDeviceToHost);
if (cudaSTATUS != cudaSuccess) {
*error = -4;
goto Error;
}
//dst = dev_src;
dev_src = NULL;
cudaFree(dev_src);
dev_dst = NULL;
cudaFree(dev_dst);
Error:
dev_src = NULL;
cudaFree(dev_src);
dev_dst = NULL;
cudaFree(dev_dst);
}
//************************************************************ ERRORES **************************************************************//
/*
else if (cudaError == cudaErrorInvalidDevicePointer)
*error = -3;
else if (cudaError == cudaErrorInvalidMemcpyDirection)
*error = -4;
else if (cudaError == cudaErrorInvalidValue)
*error = -5;
else if (cudaSTATUS == cudaErrorUnsupportedLimit) {
*error = -21;
goto Error;
}
else if (cudaSTATUS == cudaErrorDuplicateVariableName) {
*error = -22;
goto Error;
}
else if (cudaSTATUS == cudaErrorDuplicateTextureName) {
*error = -23;
goto Error;
}
else if (cudaSTATUS == cudaErrorDuplicateSurfaceName) {
*error = -24;
goto Error;
}
else if (cudaSTATUS == cudaErrorDevicesUnavailable) {
*error = -25;
goto Error;
}
else if (cudaSTATUS == cudaErrorInvalidKernelImage) {
*error = -26;
goto Error;
}
else if (cudaSTATUS == cudaErrorNoKernelImageForDevice) {
*error = -27;
goto Error;
}
else if (cudaSTATUS == cudaErrorIncompatibleDriverContext) {
*error = -28;
goto Error;
}
else if (cudaSTATUS == cudaErrorPeerAccessAlreadyEnabled) {
*error = -29;
goto Error;
}
else if (cudaSTATUS == cudaErrorPeerAccessNotEnabled) {
*error = -30;
goto Error;
}
else if (cudaSTATUS == cudaErrorDeviceAlreadyInUse) {
*error = -31;
goto Error;
}
else if (cudaSTATUS == cudaErrorProfilerDisabled) {
*error = -32;
goto Error;
}
else if (cudaSTATUS == cudaErrorProfilerNotInitialized) {
*error = -33;
goto Error;
}
else if (cudaSTATUS == cudaErrorProfilerAlreadyStarted) {
*error = -34;
goto Error;
}
else if (cudaSTATUS == cudaErrorProfilerAlreadyStopped) {
*error = -35;
goto Error;
}
else if (cudaSTATUS == cudaErrorStartupFailure) {
*error = -36;
goto Error;
}
else if (cudaSTATUS == cudaErrorApiFailureBase) {
*error = -37;
goto Error;
}
else if (cudaSTATUS == cudaErrorInvalidSurface) {
*error = -38;
goto Error;
}
else if (cudaSTATUS == cudaErrorNoDevice) {
*error = -39;
goto Error;
}
else if (cudaSTATUS == cudaErrorECCUncorrectable) {
*error = -40;
goto Error;
}
else if (cudaSTATUS == cudaErrorSharedObjectSymbolNotFound) {
*error = -41;
goto Error;
}
else if (cudaSTATUS == cudaErrorSharedObjectInitFailed) {
*error = -42;
goto Error;
}
else
{
*error = -1000001;
goto Error;
}
*/
|
3e3320dbf005051754a998bd5df1aea6bd33eddd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) OpenMMLab. All rights reserved
#include "deform_conv_cuda_kernel.cuh"
#include "pytorch_cuda_helper.hpp"
void deformable_im2col_cuda(Tensor data_im, Tensor data_offset,
const int channels, const int height,
const int width, const int ksize_h,
const int ksize_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int parallel_imgs, const int deformable_group,
Tensor data_col) {
// num_axes should be smaller than block size
// todo: check parallel_imgs is correctly passed in
int height_col =
(height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col =
(width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * height_col * width_col * parallel_imgs;
int channel_per_deformable_group = channels / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_im.scalar_type(), "deformable_im2col_gpu", ([&] {
const scalar_t *data_im_ = data_im.data_ptr<scalar_t>();
const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>();
scalar_t *data_col_ = data_col.data_ptr<scalar_t>();
hipLaunchKernelGGL(( deformable_im2col_gpu_kernel), dim3(GET_BLOCKS(num_kernels)),
dim3(THREADS_PER_BLOCK), 0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
num_kernels, data_im_, data_offset_, height, width, ksize_h,
ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
channel_per_deformable_group, parallel_imgs, channels,
deformable_group, height_col, width_col, data_col_);
}));
AT_CUDA_CHECK(hipGetLastError());
}
void deformable_col2im_cuda(Tensor data_col, Tensor data_offset,
const int channels, const int height,
const int width, const int ksize_h,
const int ksize_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int parallel_imgs, const int deformable_group,
Tensor grad_im) {
// todo: make sure parallel_imgs is passed in correctly
int height_col =
(height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col =
(width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels =
channels * ksize_h * ksize_w * height_col * width_col * parallel_imgs;
int channel_per_deformable_group = channels / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.scalar_type(), "deformable_col2im_gpu", ([&] {
const scalar_t *data_col_ = data_col.data_ptr<scalar_t>();
const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>();
scalar_t *grad_im_ = grad_im.data_ptr<scalar_t>();
hipLaunchKernelGGL(( deformable_col2im_gpu_kernel), dim3(GET_BLOCKS(num_kernels)),
dim3(THREADS_PER_BLOCK), 0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
num_kernels, data_col_, data_offset_, channels, height, width,
ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h,
dilation_w, channel_per_deformable_group, parallel_imgs,
deformable_group, height_col, width_col, grad_im_);
}));
AT_CUDA_CHECK(hipGetLastError());
}
void deformable_col2im_coord_cuda(
Tensor data_col, Tensor data_im, Tensor data_offset, const int channels,
const int height, const int width, const int ksize_h, const int ksize_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, const int parallel_imgs,
const int deformable_group, Tensor grad_offset) {
int height_col =
(height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col =
(width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = height_col * width_col * 2 * ksize_h * ksize_w *
deformable_group * parallel_imgs;
int channel_per_deformable_group =
channels * ksize_h * ksize_w / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.scalar_type(), "deformable_col2im_coord_gpu", ([&] {
const scalar_t *data_col_ = data_col.data_ptr<scalar_t>();
const scalar_t *data_im_ = data_im.data_ptr<scalar_t>();
const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>();
scalar_t *grad_offset_ = grad_offset.data_ptr<scalar_t>();
hipLaunchKernelGGL(( deformable_col2im_coord_gpu_kernel),
dim3(GET_BLOCKS(num_kernels)), dim3(THREADS_PER_BLOCK), 0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
num_kernels, data_col_, data_im_, data_offset_, channels, height,
width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs,
2 * ksize_h * ksize_w * deformable_group, deformable_group,
height_col, width_col, grad_offset_);
}));
AT_CUDA_CHECK(hipGetLastError());
}
| 3e3320dbf005051754a998bd5df1aea6bd33eddd.cu | // Copyright (c) OpenMMLab. All rights reserved
#include "deform_conv_cuda_kernel.cuh"
#include "pytorch_cuda_helper.hpp"
void deformable_im2col_cuda(Tensor data_im, Tensor data_offset,
const int channels, const int height,
const int width, const int ksize_h,
const int ksize_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int parallel_imgs, const int deformable_group,
Tensor data_col) {
// num_axes should be smaller than block size
// todo: check parallel_imgs is correctly passed in
int height_col =
(height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col =
(width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * height_col * width_col * parallel_imgs;
int channel_per_deformable_group = channels / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_im.scalar_type(), "deformable_im2col_gpu", ([&] {
const scalar_t *data_im_ = data_im.data_ptr<scalar_t>();
const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>();
scalar_t *data_col_ = data_col.data_ptr<scalar_t>();
deformable_im2col_gpu_kernel<<<GET_BLOCKS(num_kernels),
THREADS_PER_BLOCK, 0,
at::cuda::getCurrentCUDAStream()>>>(
num_kernels, data_im_, data_offset_, height, width, ksize_h,
ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
channel_per_deformable_group, parallel_imgs, channels,
deformable_group, height_col, width_col, data_col_);
}));
AT_CUDA_CHECK(cudaGetLastError());
}
void deformable_col2im_cuda(Tensor data_col, Tensor data_offset,
const int channels, const int height,
const int width, const int ksize_h,
const int ksize_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int parallel_imgs, const int deformable_group,
Tensor grad_im) {
// todo: make sure parallel_imgs is passed in correctly
int height_col =
(height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col =
(width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels =
channels * ksize_h * ksize_w * height_col * width_col * parallel_imgs;
int channel_per_deformable_group = channels / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.scalar_type(), "deformable_col2im_gpu", ([&] {
const scalar_t *data_col_ = data_col.data_ptr<scalar_t>();
const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>();
scalar_t *grad_im_ = grad_im.data_ptr<scalar_t>();
deformable_col2im_gpu_kernel<<<GET_BLOCKS(num_kernels),
THREADS_PER_BLOCK, 0,
at::cuda::getCurrentCUDAStream()>>>(
num_kernels, data_col_, data_offset_, channels, height, width,
ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h,
dilation_w, channel_per_deformable_group, parallel_imgs,
deformable_group, height_col, width_col, grad_im_);
}));
AT_CUDA_CHECK(cudaGetLastError());
}
void deformable_col2im_coord_cuda(
Tensor data_col, Tensor data_im, Tensor data_offset, const int channels,
const int height, const int width, const int ksize_h, const int ksize_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, const int parallel_imgs,
const int deformable_group, Tensor grad_offset) {
int height_col =
(height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col =
(width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = height_col * width_col * 2 * ksize_h * ksize_w *
deformable_group * parallel_imgs;
int channel_per_deformable_group =
channels * ksize_h * ksize_w / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.scalar_type(), "deformable_col2im_coord_gpu", ([&] {
const scalar_t *data_col_ = data_col.data_ptr<scalar_t>();
const scalar_t *data_im_ = data_im.data_ptr<scalar_t>();
const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>();
scalar_t *grad_offset_ = grad_offset.data_ptr<scalar_t>();
deformable_col2im_coord_gpu_kernel<<<
GET_BLOCKS(num_kernels), THREADS_PER_BLOCK, 0,
at::cuda::getCurrentCUDAStream()>>>(
num_kernels, data_col_, data_im_, data_offset_, channels, height,
width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs,
2 * ksize_h * ksize_w * deformable_group, deformable_group,
height_col, width_col, grad_offset_);
}));
AT_CUDA_CHECK(cudaGetLastError());
}
|
f9018b140b05ffc61e3a112702483017d65be80c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void selection_k_radius_gpu(int b, int m, int k, float radius, const int* idx, const float* val, int* idx_out, float* val_out){
int batch_index = blockIdx.x;
int stride = batch_index * m * k;
idx += stride;
val += stride;
idx_out += stride;
val_out += stride;
for(int i = threadIdx.x; i < m;i += blockDim.x) {
for(int j = 0;j < k;j ++) {
if(val[i * k + j] < radius) {
idx_out[i * k + j] = idx[i * k + j];
val_out[i * k + j] = val[i * k + j];
} else {
idx_out[i * k + j] = idx[i * k ];
val_out[i * k + j] = val[i * k ];
}
}
}
}
__global__ void cube_select(int b, int n,float radius, const float* xyz, int* idx_out) {
int batch_idx = blockIdx.x;
xyz += batch_idx * n * 3;
idx_out += batch_idx * n * 8;
float temp_dist[8];
float judge_dist = radius * radius;
for(int i = threadIdx.x; i < n;i += blockDim.x) {
float x = xyz[i * 3];
float y = xyz[i * 3 + 1];
float z = xyz[i * 3 + 2];
for(int j = 0;j < 8;j ++) {
temp_dist[j] = 1e8;
idx_out[i * 8 + j] = i; // if not found, just return itself..
}
for(int j = 0;j < n;j ++) {
if(i == j) continue;
float tx = xyz[j * 3];
float ty = xyz[j * 3 + 1];
float tz = xyz[j * 3 + 2];
float dist = (x - tx) * (x - tx) + (y - ty) * (y - ty);
if(dist > judge_dist) continue;
int _x = (tx > x);
int _y = (ty > y);
int _s1 = (_y - _x);
int _s2 = (_y + _x);
int temp_idx = 0;
if(_x==1 && _y ==1 && _s1==0) temp_idx = 0;
else if(_x==1 && _y ==1 && _s1==1) temp_idx = 1;
else if(_x==0 && _y ==1 && _s2==1) temp_idx = 2;
else if(_x==0 && _y ==1 && _s1==0) temp_idx = 3;
else if(_x==0 && _y ==0 && _s1==1) temp_idx = 4;
else if(_x==0 && _y ==0 && _s1==0) temp_idx = 5;
else if(_x==0 && _y ==1 && _s2==0) temp_idx = 6;
else temp_idx = 7;
if(dist < temp_dist[temp_idx]) {
idx_out[i * 8 + temp_idx] = j;
temp_dist[temp_idx] = dist;
}
}
}
}
__global__ void cube_select_two(int b, int n,float radius, const float* xyz, int* idx_out) {
int batch_idx = blockIdx.x;
xyz += batch_idx * n * 3;
idx_out += batch_idx * n * 16;
float temp_dist[16];
float judge_dist = radius * radius;
for(int i = threadIdx.x; i < n;i += blockDim.x) {
float x = xyz[i * 3];
float y = xyz[i * 3 + 1];
float z = xyz[i * 3 + 2];
for(int j = 0;j < 16;j ++) {
temp_dist[j] = judge_dist;
idx_out[i * 16 + j] = i; // if not found, just return itself..
}
for(int j = 0;j < n;j ++) {
if(i == j) continue;
float tx = xyz[j * 3];
float ty = xyz[j * 3 + 1];
float tz = xyz[j * 3 + 2];
float dist = (x - tx) * (x - tx) + (y - ty) * (y - ty);
if(dist > judge_dist) continue;
int _x = (tx > x);
int _y = (ty > y);
int _s1 = (_y - _x);
int _s2 = (_y + _x);
int temp_idx = 0;
if(_x==1 && _y ==1 && _s1==0) temp_idx = 0;
else if(_x==1 && _y ==1 && _s1==1) temp_idx = 1;
else if(_x==0 && _y ==1 && _s2==1) temp_idx = 2;
else if(_x==0 && _y ==1 && _s1==0) temp_idx = 3;
else if(_x==0 && _y ==0 && _s1==1) temp_idx = 4;
else if(_x==0 && _y ==0 && _s1==0) temp_idx = 5;
else if(_x==0 && _y ==1 && _s2==0) temp_idx = 6;
else temp_idx = 7;
bool flag = false;
for(int k = 0;k < 2;k ++) {
if (dist < temp_dist[temp_idx + k]) {
flag = true;
}
if (flag) {
for (int kk = 1; kk >= k + 1; kk --) {
idx_out[i * 16 + temp_idx + kk] = idx_out[i * 16 + temp_idx + kk - 1];
temp_dist[temp_idx + kk] = temp_dist[temp_idx + kk - 1];
}
idx_out[i * 16 + temp_idx + k] = j;
temp_dist[temp_idx + k] = dist;
break;
}
}
}
}
}
__global__ void cube_select_four(int b, int n,float radius, const float* xyz, int* idx_out) {
int batch_idx = blockIdx.x;
xyz += batch_idx * n * 3;
idx_out += batch_idx * n * 32;
float temp_dist[32];
float judge_dist = radius * radius;
for(int i = threadIdx.x; i < n;i += blockDim.x) {
float x = xyz[i * 3];
float y = xyz[i * 3 + 1];
float z = xyz[i * 3 + 2];
for(int j = 0;j < 32;j ++) {
temp_dist[j] = judge_dist;
idx_out[i * 32 + j] = i; // if not found, just return itself..
}
for(int j = 0;j < n;j ++) {
if(i == j) continue;
float tx = xyz[j * 3];
float ty = xyz[j * 3 + 1];
float tz = xyz[j * 3 + 2];
float dist = (x - tx) * (x - tx) + (y - ty) * (y - ty);
if(dist > judge_dist) continue;
int _x = (tx > x);
int _y = (ty > y);
int _s1 = (_y - _x);
int _s2 = (_y + _x);
int temp_idx = 0;
if(_x==1 && _y ==1 && _s1==0) temp_idx = 0;
else if(_x==1 && _y ==1 && _s1==1) temp_idx = 1;
else if(_x==0 && _y ==1 && _s2==1) temp_idx = 2;
else if(_x==0 && _y ==1 && _s1==0) temp_idx = 3;
else if(_x==0 && _y ==0 && _s1==1) temp_idx = 4;
else if(_x==0 && _y ==0 && _s1==0) temp_idx = 5;
else if(_x==0 && _y ==1 && _s2==0) temp_idx = 6;
else temp_idx = 7;
bool flag = false;
for(int k = 0;k < 4;k ++) {
if (dist < temp_dist[temp_idx + k]) {
flag = true;
}
if (flag) {
for (int kk = 3; kk >= k + 1; kk --) {
idx_out[i * 32 + temp_idx + kk] = idx_out[i * 32 + temp_idx + kk - 1];
temp_dist[temp_idx + kk] = temp_dist[temp_idx + kk - 1];
}
idx_out[i * 32 + temp_idx + k] = j;
temp_dist[temp_idx + k] = dist;
break;
}
}
}
}
}
void selectionKRadiusLauncher(int b, int m, int k, float radius, const int* idx, const float* val, int* idx_out, float* val_out){
hipLaunchKernelGGL(( selection_k_radius_gpu), dim3(b),dim3(256), 0, 0, b, m, k, radius, idx, val, idx_out, val_out);
}
void cubeSelectLauncher(int b, int n, float radius, const float* xyz, int* idx_out) {
hipLaunchKernelGGL(( cube_select), dim3(b), dim3(512), 0, 0, b, n, radius, xyz, idx_out);
}
void cubeSelectTwoLauncher(int b, int n, float radius, const float* xyz, int* idx_out) {
hipLaunchKernelGGL(( cube_select_two), dim3(b), dim3(512), 0, 0, b, n, radius, xyz, idx_out);
}
void cubeSelectFourLauncher(int b, int n, float radius, const float* xyz, int* idx_out) {
hipLaunchKernelGGL(( cube_select_four), dim3(b), dim3(512), 0, 0, b, n, radius, xyz, idx_out);
}
| f9018b140b05ffc61e3a112702483017d65be80c.cu | __global__ void selection_k_radius_gpu(int b, int m, int k, float radius, const int* idx, const float* val, int* idx_out, float* val_out){
int batch_index = blockIdx.x;
int stride = batch_index * m * k;
idx += stride;
val += stride;
idx_out += stride;
val_out += stride;
for(int i = threadIdx.x; i < m;i += blockDim.x) {
for(int j = 0;j < k;j ++) {
if(val[i * k + j] < radius) {
idx_out[i * k + j] = idx[i * k + j];
val_out[i * k + j] = val[i * k + j];
} else {
idx_out[i * k + j] = idx[i * k ];
val_out[i * k + j] = val[i * k ];
}
}
}
}
__global__ void cube_select(int b, int n,float radius, const float* xyz, int* idx_out) {
int batch_idx = blockIdx.x;
xyz += batch_idx * n * 3;
idx_out += batch_idx * n * 8;
float temp_dist[8];
float judge_dist = radius * radius;
for(int i = threadIdx.x; i < n;i += blockDim.x) {
float x = xyz[i * 3];
float y = xyz[i * 3 + 1];
float z = xyz[i * 3 + 2];
for(int j = 0;j < 8;j ++) {
temp_dist[j] = 1e8;
idx_out[i * 8 + j] = i; // if not found, just return itself..
}
for(int j = 0;j < n;j ++) {
if(i == j) continue;
float tx = xyz[j * 3];
float ty = xyz[j * 3 + 1];
float tz = xyz[j * 3 + 2];
float dist = (x - tx) * (x - tx) + (y - ty) * (y - ty);
if(dist > judge_dist) continue;
int _x = (tx > x);
int _y = (ty > y);
int _s1 = (_y - _x);
int _s2 = (_y + _x);
int temp_idx = 0;
if(_x==1 && _y ==1 && _s1==0) temp_idx = 0;
else if(_x==1 && _y ==1 && _s1==1) temp_idx = 1;
else if(_x==0 && _y ==1 && _s2==1) temp_idx = 2;
else if(_x==0 && _y ==1 && _s1==0) temp_idx = 3;
else if(_x==0 && _y ==0 && _s1==1) temp_idx = 4;
else if(_x==0 && _y ==0 && _s1==0) temp_idx = 5;
else if(_x==0 && _y ==1 && _s2==0) temp_idx = 6;
else temp_idx = 7;
if(dist < temp_dist[temp_idx]) {
idx_out[i * 8 + temp_idx] = j;
temp_dist[temp_idx] = dist;
}
}
}
}
__global__ void cube_select_two(int b, int n,float radius, const float* xyz, int* idx_out) {
int batch_idx = blockIdx.x;
xyz += batch_idx * n * 3;
idx_out += batch_idx * n * 16;
float temp_dist[16];
float judge_dist = radius * radius;
for(int i = threadIdx.x; i < n;i += blockDim.x) {
float x = xyz[i * 3];
float y = xyz[i * 3 + 1];
float z = xyz[i * 3 + 2];
for(int j = 0;j < 16;j ++) {
temp_dist[j] = judge_dist;
idx_out[i * 16 + j] = i; // if not found, just return itself..
}
for(int j = 0;j < n;j ++) {
if(i == j) continue;
float tx = xyz[j * 3];
float ty = xyz[j * 3 + 1];
float tz = xyz[j * 3 + 2];
float dist = (x - tx) * (x - tx) + (y - ty) * (y - ty);
if(dist > judge_dist) continue;
int _x = (tx > x);
int _y = (ty > y);
int _s1 = (_y - _x);
int _s2 = (_y + _x);
int temp_idx = 0;
if(_x==1 && _y ==1 && _s1==0) temp_idx = 0;
else if(_x==1 && _y ==1 && _s1==1) temp_idx = 1;
else if(_x==0 && _y ==1 && _s2==1) temp_idx = 2;
else if(_x==0 && _y ==1 && _s1==0) temp_idx = 3;
else if(_x==0 && _y ==0 && _s1==1) temp_idx = 4;
else if(_x==0 && _y ==0 && _s1==0) temp_idx = 5;
else if(_x==0 && _y ==1 && _s2==0) temp_idx = 6;
else temp_idx = 7;
bool flag = false;
for(int k = 0;k < 2;k ++) {
if (dist < temp_dist[temp_idx + k]) {
flag = true;
}
if (flag) {
for (int kk = 1; kk >= k + 1; kk --) {
idx_out[i * 16 + temp_idx + kk] = idx_out[i * 16 + temp_idx + kk - 1];
temp_dist[temp_idx + kk] = temp_dist[temp_idx + kk - 1];
}
idx_out[i * 16 + temp_idx + k] = j;
temp_dist[temp_idx + k] = dist;
break;
}
}
}
}
}
__global__ void cube_select_four(int b, int n,float radius, const float* xyz, int* idx_out) {
int batch_idx = blockIdx.x;
xyz += batch_idx * n * 3;
idx_out += batch_idx * n * 32;
float temp_dist[32];
float judge_dist = radius * radius;
for(int i = threadIdx.x; i < n;i += blockDim.x) {
float x = xyz[i * 3];
float y = xyz[i * 3 + 1];
float z = xyz[i * 3 + 2];
for(int j = 0;j < 32;j ++) {
temp_dist[j] = judge_dist;
idx_out[i * 32 + j] = i; // if not found, just return itself..
}
for(int j = 0;j < n;j ++) {
if(i == j) continue;
float tx = xyz[j * 3];
float ty = xyz[j * 3 + 1];
float tz = xyz[j * 3 + 2];
float dist = (x - tx) * (x - tx) + (y - ty) * (y - ty);
if(dist > judge_dist) continue;
int _x = (tx > x);
int _y = (ty > y);
int _s1 = (_y - _x);
int _s2 = (_y + _x);
int temp_idx = 0;
if(_x==1 && _y ==1 && _s1==0) temp_idx = 0;
else if(_x==1 && _y ==1 && _s1==1) temp_idx = 1;
else if(_x==0 && _y ==1 && _s2==1) temp_idx = 2;
else if(_x==0 && _y ==1 && _s1==0) temp_idx = 3;
else if(_x==0 && _y ==0 && _s1==1) temp_idx = 4;
else if(_x==0 && _y ==0 && _s1==0) temp_idx = 5;
else if(_x==0 && _y ==1 && _s2==0) temp_idx = 6;
else temp_idx = 7;
bool flag = false;
for(int k = 0;k < 4;k ++) {
if (dist < temp_dist[temp_idx + k]) {
flag = true;
}
if (flag) {
for (int kk = 3; kk >= k + 1; kk --) {
idx_out[i * 32 + temp_idx + kk] = idx_out[i * 32 + temp_idx + kk - 1];
temp_dist[temp_idx + kk] = temp_dist[temp_idx + kk - 1];
}
idx_out[i * 32 + temp_idx + k] = j;
temp_dist[temp_idx + k] = dist;
break;
}
}
}
}
}
void selectionKRadiusLauncher(int b, int m, int k, float radius, const int* idx, const float* val, int* idx_out, float* val_out){
selection_k_radius_gpu<<<b,256>>>(b, m, k, radius, idx, val, idx_out, val_out);
}
void cubeSelectLauncher(int b, int n, float radius, const float* xyz, int* idx_out) {
cube_select<<<b, 512>>>(b, n, radius, xyz, idx_out);
}
void cubeSelectTwoLauncher(int b, int n, float radius, const float* xyz, int* idx_out) {
cube_select_two<<<b, 512>>>(b, n, radius, xyz, idx_out);
}
void cubeSelectFourLauncher(int b, int n, float radius, const float* xyz, int* idx_out) {
cube_select_four<<<b, 512>>>(b, n, radius, xyz, idx_out);
}
|
24e17c5c034073766ed233b18620852cb65e5d35.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "utils.h"
__global__
void image_sum_kernel(const uchar3* const inImg1, const uchar3* const inImg2, uchar3* const outImg, size_t n)
{
size_t id = blockIdx.x*blockDim.x + threadIdx.x;
if (id < n){
uchar3 px_img1 = inImg1[id];
uchar3 px_img2 = inImg2[id];
outImg[id].x = px_img1.x + px_img2.x;
outImg[id].y = px_img1.y + px_img2.y;
outImg[id].z = px_img1.z + px_img2.z;
}
}
void image_sum(uchar3* const d_inImg1, uchar3* const d_inImg2, uchar3* const d_outImg, size_t n)
{
const dim3 gridSize(n/warpSize,1,1);
const dim3 blockSize(warpSize,1,1);
hipLaunchKernelGGL(( sum_kernel), dim3(gridSize),dim3(blockSize), 0, 0, d_inImg1, d_inImg2, d_outImg, n);
}
| 24e17c5c034073766ed233b18620852cb65e5d35.cu | #include "utils.h"
__global__
void image_sum_kernel(const uchar3* const inImg1, const uchar3* const inImg2, uchar3* const outImg, size_t n)
{
size_t id = blockIdx.x*blockDim.x + threadIdx.x;
if (id < n){
uchar3 px_img1 = inImg1[id];
uchar3 px_img2 = inImg2[id];
outImg[id].x = px_img1.x + px_img2.x;
outImg[id].y = px_img1.y + px_img2.y;
outImg[id].z = px_img1.z + px_img2.z;
}
}
void image_sum(uchar3* const d_inImg1, uchar3* const d_inImg2, uchar3* const d_outImg, size_t n)
{
const dim3 gridSize(n/warpSize,1,1);
const dim3 blockSize(warpSize,1,1);
sum_kernel<<<gridSize,blockSize>>>(d_inImg1, d_inImg2, d_outImg, n);
}
|
60f7fc4dcebcbdc89de66cec15787eb33206d1fc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../include/cudapars.h"
#include "../include/paramssteeringtest1.h"
#include "../include/iobparams.h"
/////////////////////////////////////
// standard imports
/////////////////////////////////////
#include <stdio.h>
#include <math.h>
#include "../include/smaugcukernels.h"
/////////////////////////////////////
// kernel function (CUDA device)
/////////////////////////////////////
#include "../include/gradops_hdv1r.cuh"
__device__ __host__
void bc_hyperdifr(real *wt, struct params *p,int *ii, int f,int dir) {
int i=ii[0];
int j=ii[1];
int k=0;
#ifdef USE_SAC_3D
k=ii[2];
#endif
int is=1;
#ifdef USE_SAC
if( (dir == 0) && (i==(p->n[0])-1) && j>=0 && j<(p->n[1]) )
{
//for(int is=0;is<3-2*(j<((p->n[1])-1));is++)
wt[encode3p2_hdv1r(p,i+2,j+is,k,f)]=wt[encode3p2_hdv1r(p,(p->n[0])-5,j+is,k,f)];
}
else if((dir == 1) && (j==(p->n[1])-1) && i>=0 && i<((p->n[0])) )
//for(int is=0;is<3-2*(j<((p->n[1])-1));is++)
wt[encode3p2_hdv1r(p,i+is,j+2,k,f)]=wt[encode3p2_hdv1r(p,i+is,(p->n[1])-5,k,f)];
else if((dir == 0) && (i==0) && j>=0 && j<((p->n[1])) )
//for(int is=0;is<3-2*(j<((p->n[1])-1));is++)
wt[encode3p2_hdv1r(p,0,j+is,k,f)]=wt[encode3p2_hdv1r(p,6,j+is,k,f)];
else if((dir == 1) && (j==0) && i>=0 && i<((p->n[0])) )
//for(int is=0;is<3-2*(j<((p->n[1])-1));is++)
wt[encode3p2_hdv1r(p,i+is,0,k,f)]=wt[encode3p2_hdv1r(p,i+is,6,k,f)];
#endif
#ifdef USE_SAC_3D
if( (dir == 0) && (i==(p->n[0])-1) && j>=0 && j<(p->n[1]) && k>=0 && k<(p->n[2]) )
wt[encode3p2_hdv1r(p,i+2,j+is,k+is,f)]=wt[encode3p2_hdv1r(p,(p->n[0])-5,j+is,k+is,f)];
else if((dir == 1) && (j==(p->n[1])-1) && i>=0 && i<((p->n[0])) && k>=0 && k<((p->n[2])) )
wt[encode3p2_hdv1r(p,i+is,j+2,k+is,f)]=wt[encode3p2_hdv1r(p,i+is,(p->n[1])-5,k+is,f)];
else if((dir == 2) && (k==(p->n[2])-1) && i>=0 && i<((p->n[0])) && j>=0 && j<((p->n[1])) )
wt[encode3p2_hdv1r(p,i+is,j+is,k+2,f)]=wt[encode3p2_hdv1r(p,i+is,j+is,(p->n[2])-5,f)];
else if((dir == 0) && (i==0) && j>=0 && j<((p->n[1])) && k>=0 && k<((p->n[2])) )
wt[encode3p2_hdv1r(p,0,j+is,k+is,f)]=wt[encode3p2_hdv1r(p,6,j+is,k+is,f)];
else if((dir == 1) && (j==0) && i>=0 && i<((p->n[0])) && k>=0 && k<((p->n[2])) )
wt[encode3p2_hdv1r(p,i+is,0,k+is,f)]=wt[encode3p2_hdv1r(p,i+is,6,k+is,f)];
else if((dir == 2) && (k==0) && i>=0 && i<((p->n[0])) && j>=0 && j<((p->n[1])) )
wt[encode3p2_hdv1r(p,i+is,j+is,0,f)]=wt[encode3p2_hdv1r(p,i+is,j+is,6,f)];
#endif
}
__device__ __host__
void bc_hyperdifr0(real *wt, struct params *p,int *ii, int f,int dir) {
int i=ii[0];
int j=ii[1];
int k=0;
#ifdef USE_SAC_3D
k=ii[2];
#endif
int is=1;
#ifdef USE_SAC
if( (i==(p->n[0])-1) && j>=0 && j<(p->n[1]) )
{
//for(int is=0;is<3-2*(j<((p->n[1])-1));is++)
wt[encode3p2_hdv1r(p,i+2,j+is,k,f)]=wt[encode3p2_hdv1r(p,(p->n[0])-5,j+is,k,f)];
}
else if((i==0) && j>=0 && j<((p->n[1])) )
//for(int is=0;is<3-2*(j<((p->n[1])-1));is++)
wt[encode3p2_hdv1r(p,0,j+is,k,f)]=wt[encode3p2_hdv1r(p,6,j+is,k,f)];
#endif
#ifdef USE_SAC_3D
if( (i==(p->n[0])-1) && j>=0 && j<(p->n[1]) && k>=0 && k<(p->n[2]) )
wt[encode3p2_hdv1r(p,i+2,j+is,k+is,f)]=wt[encode3p2_hdv1r(p,(p->n[0])-5,j+is,k+is,f)];
else if((i==0) && j>=0 && j<((p->n[1])) && k>=0 && k<((p->n[2])) )
wt[encode3p2_hdv1r(p,0,j+is,k+is,f)]=wt[encode3p2_hdv1r(p,6,j+is,k+is,f)];
#endif
}
__device__ __host__
void bc_hyperdifr1(real *wt, struct params *p,int *ii, int f,int dir) {
int i=ii[0];
int j=ii[1];
int k=0;
#ifdef USE_SAC_3D
k=ii[2];
#endif
int is=1;
#ifdef USE_SAC
if( (j==(p->n[1])-1) && i>=0 && i<((p->n[0])) )
//for(int is=0;is<3-2*(j<((p->n[1])-1));is++)
wt[encode3p2_hdv1r(p,i+is,j+2,k,f)]=wt[encode3p2_hdv1r(p,i+is,(p->n[1])-5,k,f)];
else if((j==0) && i>=0 && i<((p->n[0])) )
//for(int is=0;is<3-2*(j<((p->n[1])-1));is++)
wt[encode3p2_hdv1r(p,i+is,0,k,f)]=wt[encode3p2_hdv1r(p,i+is,6,k,f)];
#endif
#ifdef USE_SAC_3D
if (j==((p->n[1])-1) && i>=0 && i<((p->n[0])) && k>=0 && k<((p->n[2])) )
wt[encode3p2_hdv1r(p,i+is,j+2,k+is,f)]=wt[encode3p2_hdv1r(p,i+is,(p->n[1])-5,k+is,f)];
else if( (j==0) && i>=0 && i<((p->n[0])) && k>=0 && k<((p->n[2])) )
wt[encode3p2_hdv1r(p,i+is,0,k+is,f)]=wt[encode3p2_hdv1r(p,i+is,6,k+is,f)];
#endif
}
__device__ __host__
void bc_hyperdifr2(real *wt, struct params *p,int *ii, int f,int dir) {
int i=ii[0];
int j=ii[1];
int k=0;
#ifdef USE_SAC_3D
k=ii[2];
#endif
int is=1;
#ifdef USE_SAC_3D
if( (k==(p->n[2])-1) && i>=0 && i<((p->n[0])) && j>=0 && j<((p->n[1])) )
wt[encode3p2_hdv1r(p,i+is,j+is,k+2,f)]=wt[encode3p2_hdv1r(p,i+is,j+is,(p->n[2])-5,f)];
else if((k==0) && i>=0 && i<((p->n[0])) && j>=0 && j<((p->n[1])) )
wt[encode3p2_hdv1r(p,i+is,j+is,0,f)]=wt[encode3p2_hdv1r(p,i+is,j+is,6,f)];
#endif
}
/*__device__ __host__
void bc_periodic1_temp2(real *wt, struct params *p,int i, int j, int f) {
if(i==1 )
wt[fencode_hdv1r(p,i,j,f)]=wt[fencode_hdv1r(p,6,j,f)];
else if((i==((p->n[0]))) )
wt[fencode_hdv1r(p,i,j,f)]=wt[fencode_hdv1r(p,i-4,j,f)];
else if(j==1 )
wt[fencode_hdv1r(p,i,j,f)]=wt[fencode_hdv1r(p,i,6,f)];
else if((j==((p->n[1]))) )
wt[fencode_hdv1r(p,i,j,f)]=wt[fencode_hdv1r(p,i,j-4,f)];
}*/
/*__device__ __host__
void bc_periodic2_temp2(real *wt, struct params *p,int i, int j, int f) {
if(i<1 && j<1)
{
if(i==j)
//wt[fencode_hdv1r(p,i,j,f)]=wt[fencode_hdv1r(p,(p->n[0])-3+i,j,f)];
wt[fencode_hdv1r(p,i,j,f)]=wt[fencode_hdv1r(p,i,6,f)];
else
//wt[fencode_hdv1r(p,i,j,f)]=wt[fencode_hdv1r(p,i,(p->n[1])-3+j,f)];
wt[fencode_hdv1r(p,i,j,f)]=wt[fencode_hdv1r(p,6,j,f)];
}
else if(i<1 && j>((p->n[1])-1))
{
if(i==(j-(p->n[1])-1))
//wt[fencode_hdv1r(p,i,j,f)]=wt[fencode_hdv1r(p,(p->n[0])-3+i,4-(p->n[1])+j,f)];
wt[fencode_hdv1r(p,i,j,f)]=wt[fencode_hdv1r(p,6,j,f)];
else
wt[fencode_hdv1r(p,i,j,f)]=wt[fencode_hdv1r(p,i,j-6,f)];
}
else if(i>((p->n[0])-1) && j<1)
{
if((i-(p->n[0])+1)==j)
wt[fencode_hdv1r(p,i,j,f)]=wt[fencode_hdv1r(p,i-5,j,f)];
else
wt[fencode_hdv1r(p,i,j,f)]=wt[fencode_hdv1r(p,i,4,f)];
}
else if(i>((p->n[0])-1) && j>((p->n[1])-1))
{
if(i==j)
wt[fencode_hdv1r(p,i,j,f)]=wt[fencode_hdv1r(p,i,j-5,f)];
else
wt[fencode_hdv1r(p,i,j,f)]=wt[fencode_hdv1r(p,i-5,j,f)];
}
}*/
__global__ void zeropadmaxviscr_parallel(struct params *p, real *wmod, real *wd, int order, int dir, real *temp, int ndimp)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
if(iindex<ndimp)
temp[iindex]=0.0;
}
__global__ void newreduction0computemaxviscr_parallel(real *cmax, real *temp,int ndimp)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int tid = threadIdx.x;
extern __shared__ double partialResult[];
int i;
partialResult[tid]=0.0;
if(iindex<ndimp)
partialResult[tid]=temp[iindex];
__syncthreads();
for(unsigned int s=1; s < blockDim.x; s *= 2) {
if ((tid % (2*s)) == 0) {
if(partialResult[tid+s]>partialResult[tid])
partialResult[tid]=partialResult[tid + s];
}
__syncthreads();
}
__syncthreads();
if(tid==0)
{
cmax[blockIdx.x]=partialResult[0];
temp[blockIdx.x]=partialResult[0];
}
__syncthreads();
}
__global__ void myreduction0computemaxviscr_parallel(struct params *p, real *wmod, real *wd, int order, int dir, real *temp,int ndimp,int s)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int tid = threadIdx.x;
int i,j;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
// extern __shared__ real sdata[];
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
int tnumThreadsPerBlock = 128;
int numBlocks = (dimp+tnumThreadsPerBlock-1) / tnumThreadsPerBlock;
//real temp[dimp];
// perform first level of reduction,
// reading from global memory, writing to shared memory
//sdata[tid]=0.0;
// if(iindex<1024)
// temp[iindex]=0.0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
//int s=1;
//while(((s*=2)<=((ndimp/2)-1)) && ((iindex+s)<ndimp)) {
if((iindex+s)<ndimp)
if(temp[iindex+s]>temp[iindex])
temp[iindex]=temp[iindex + s];
// }
// __syncthreads();
// __syncthreads();
if(iindex==0)
p->maxviscoef=temp[0];
}
__global__ void hyperdifvisc5r_parallel(struct params *p,real *wmod,
real *wd, int order, real *wtemp, real *wtemp1, real *wtemp2, int field, int dim)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int is,js;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dt=p->dt;
real dy=p->dx[1];
real dx=p->dx[0];
//real g=p->g;
// dt=1.0;
//dt=0.05;
//enum vars rho, mom1, mom2, mom3, energy, b1, b2, b3;
real maxt=0,max3=0, max1=0;
int ip,jp;
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
//int ip,jp,ipg,jpg;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
int shift=order*NVAR*dimp;
if(iindex==0)
{
p->hdmean=0.0;
p->hdmax=0;
// for(ipg=0;ipg<(p->npgp[0]);ipg++)
// for(jpg=0;jpg<(p->npgp[1]);jpg++)
// {
// i=ip*(p->npgp[0])+ipg;
// j=jp*(p->npgp[1])+jpg;
//if( i<((p->n[0])) && j<((p->n[1])))
//if(i>1 && j >1 && i<((p->n[0])-2) && j<((p->n[1])-2))
//p->cmax=0.0;
for(ii[0]=1;ii[0]<((p->n[0])-1);ii[0]++)
for(ii[1]=1;ii[1]<((p->n[1])-1);ii[1]++)
#ifdef USE_SAC_3D
for(ii[2]=1;ii[2]<((p->n[2])-1);ii[2]++)
#endif
{
// computecmax3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
// atomicExch(&(p->cmax),(wd[fencode3_MODID(p,ii,soundspeed)]));
#ifdef USE_SAC_3D
if(wd[encode3_hdv1r(p,ii[0],ii[1],ii[2],hdnur)]>(p->maxviscoef))
p->maxviscoef=(wd[encode3_hdv1r(p,ii[0],ii[1],ii[2],hdnur)]);
#else
if(wd[encode3_hdv1r(p,ii[0],ii[1],0,hdnur)]>(p->maxviscoef))
p->maxviscoef=(wd[encode3_hdv1r(p,ii[0],ii[1],0,hdnur)]);
#endif
/* if(wd[encode3_hdv1r(p,ii[0],ii[1],0,hdnur)]>(p->hdmax))
p->hdmax=(wd[encode3_hdv1r(p,ii[0],ii[1],0,hdnur)]);
p->hdmean=(p->hdmean)+wd[encode3_hdv1r(p,ii[0],ii[1],0,hdnur)];*/
}
//p->hdmean=(p->hdmean)/(((p->n[0])-2)*((p->n[1]))-2);
// }
}
//__syncthreads();
}
__global__ void hyperdifvisc4r_parallel(struct params *p,real *wmod,
real *wd, int order, real *wtemp, real *wtemp1, real *wtemp2, int field, int dim)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int is,js;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dt=p->dt;
real dy=p->dx[1];
real dx=p->dx[0];
//real g=p->g;
// dt=1.0;
//dt=0.05;
//enum vars rho, mom1, mom2, mom3, energy, b1, b2, b3;
real maxt=0,max3=0, max1=0;
int ip,jp;
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
//int ip,jp,ipg,jpg;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
int shift=order*NVAR*dimp;
//tmp1 tmp_nuI
//tmp2 d3r
//tmp3 d1r
//tmp4 md3r
//tmp5 md1r
//tmp6 d3l
//tmp7 d1l
//tmp8 md3l
//tmp9 md1l
// p->maxviscoef=0;
// p->cmax=1.0;
//finally update nur and nul
//tmp4 md3r
//tmp5 md1r
ii[0]=ip;
ii[1]=jp;
i=ii[0];
j=ii[1];
k=0;
#ifdef USE_SAC_3D
ii[2]=kp;
k=ii[2];
#endif
#ifdef USE_SAC_3D
if(i>1 && i<((p->n[0])-1) && j>1 && j<((p->n[1])-1) && k>1 && k<((p->n[2])-1))
#else
if(i>1 && i<((p->n[0])-1) && j>1 && j<((p->n[1])-1))
#endif
//if(i>1 && i<((p->n[0])-2) && j>1 && j<((p->n[1])-2))
{
//wd[encode3_hdv1r(p,i,j,hdnur)]=wtemp2[encode3_hdv1r(p,i+1,j+1,tmpnui)];
if(wtemp[encode3_hdv1r(p,i,j,k,tmp5)]>0)
{
//p->cmax=1.0;
#ifdef USE_SAC_3D
wd[encode3_hdv1r(p,i,j,k,hdnur)]=((dim==0)*(wd[encode3_hdv1r(p,i,j,k,delx1)])+(dim==1)*(wd[encode3_hdv1r(p,i,j,k,delx2)])+(dim==2)*(wd[encode3_hdv1r(p,i,j,k,delx3)]))*(p->cmax)*(p->chyp[field])*wtemp[encode3_hdv1r(p,i,j,k,tmp4)]/wtemp[encode3_hdv1r(p,i,j,k,tmp5)];
#else
wd[encode3_hdv1r(p,i,j,k,hdnur)]=((dim==0)*(wd[encode3_hdv1r(p,i,j,k,delx1)])+(dim==1)*(wd[encode3_hdv1r(p,i,j,k,delx2)]))*(p->cmax)*(p->chyp[field])*wtemp[encode3_hdv1r(p,i,j,k,tmp4)]/wtemp[encode3_hdv1r(p,i,j,k,tmp5)];
#endif
//wd[encode3_hdv1r(p,i,j,k,hdnur)]=1.0e-1;
//wd[encode3_hdv1r(p,i,j,hdnur)]=wtemp[encode3_hdv1r(p,i,j,tmp4)];
//wd[encode3_hdv1r(p,i,j,k,hdnur)]=0.01;
// wd[encode3_hdv1r(p,i,j,k,hdnur)]=0.0005;
}
else
wd[encode3_hdv1r(p,i,j,k,hdnur)]=0;
//correct boundary contribution for MPI
//#ifdef USE_MPI
//if(i==2 || i==((p->n[0])-2) || j==2 || j<((p->n[1])-2)) wd[encode3_hdv1r(p,i,j,k,hdnur)]=0;
//#endif
/*switch(field)
{
case 0:
wd[encode3_hdv1r(p,i,j,k,hdnur)]=6.744e-6;
break;
case 3:
wd[encode3_hdv1r(p,i,j,k,hdnur)]=1.8e-6;
break;
case 1:
wd[encode3_hdv1r(p,i,j,k,hdnur)]=1.9e-6;
break;
case 2:
wd[encode3_hdv1r(p,i,j,k,hdnur)]=1.9e-6;
break;
case 5:
wd[encode3_hdv1r(p,i,j,k,hdnur)]=9.4e-8;
break;
case 4:
wd[encode3_hdv1r(p,i,j,k,hdnur)]=3.8e-7;
break;
} */
}
//__syncthreads();
}
__global__ void hyperdifvisc3r_parallel(struct params *p,real *wmod,
real *wd, int order, real *wtemp, real *wtemp1, real *wtemp2, int field, int dim)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int is,js,ks;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dt=p->dt;
real dy=p->dx[1];
real dx=p->dx[0];
//real g=p->g;
// dt=1.0;
//dt=0.05;
//enum vars rho, mom1, mom2, mom3, energy, b1, b2, b3;
real maxt1=0,max3=0, maxt2=0;
int ip,jp;
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
//int ip,jp,ipg,jpg;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
int shift=order*NVAR*dimp;
//tmp1 tmp_nuI
//tmp2 d3r
//tmp3 d1r
//tmp4 md3r
//tmp5 md1r
//tmp6 d3l
//tmp7 d1l
//tmp8 md3l
//tmp9 md1l
//compute md3r and md1r
//tmp4 md3r
//tmp5 md1r
//js=0;
// is=0;
ii[0]=ip;
ii[1]=jp;
i=ii[0];
j=ii[1];
k=0;
#ifdef USE_SAC_3D
ii[2]=kp;
k=ii[2];
#endif
#ifdef USE_SAC_3D
//if(ii[0]>1 && ii[1]>1 && ii[2]>1 && ii[0]<p->n[0] && ii[1]<p->n[1] && ii[2]<p->n[2])
if(i>1 && j>1 && k>1 && i<((p->n[0])-2) && j<((p->n[1])-2) && k<((p->n[2]))-2)
#else
//if(ii[0]>1 && ii[1]>1 && ii[0]<p->n[0] && ii[1]<p->n[1])
if(i>1 && j>1 && i<((p->n[0])-2) && j<((p->n[1])-2))
#endif
// if( i>1 && j>1 && i<((p->n[0])-2) && j<((p->n[1])-2))
{
maxt1=0;
#ifdef USE_SAC_3D
for(is=-(dim==0); is<=(dim==0); is++)
for(js=-(dim==1); js<=(dim==1); js++)
for(ks=-(dim==2); ks<=(dim==2); ks++)
{
if(wtemp1[encode3p1_hdv1r(p,i+1+is,j+1+js,k+1+ks,d3)]>maxt1)
maxt1=wtemp1[encode3p1_hdv1r(p,i+1+is,j+1+js,k+1+ks,d3)];
}
#else
for(is=-(dim==0); is<=(dim==0); is++)
for(js=-(dim==1); js<=(dim==1); js++)
{
if(wtemp1[encode3p1_hdv1r(p,i+1+is,j+1+js,k,d3)]>maxt1)
maxt1=wtemp1[encode3p1_hdv1r(p,i+1+is,j+1+js,k,d3)];
}
#endif
wtemp[encode3_hdv1r(p,i,j,k,tmp4)]=maxt1;
maxt2=0;
#ifdef USE_SAC_3D
for(is=-2*(dim==0); is<=2*(dim==0); is++)
for(js=-2*(dim==1); js<=2*(dim==1); js++)
for(ks=-2*(dim==2); ks<=2*(dim==2); ks++)
{
if(wtemp1[encode3p1_hdv1r(p,i+1+is,j+1+js,k+1+ks,d1)]>maxt2)
maxt2=wtemp1[encode3p1_hdv1r(p,i+1+is,j+1+js,k+1+ks,d1)];
}
#else
for(is=-2*(dim==0); is<=2*(dim==0); is++)
for(js=-2*(dim==1); js<=2*(dim==1); js++)
{
if(wtemp1[encode3p1_hdv1r(p,i+1+is,j+1+js,k,d1)]>maxt2)
maxt2=wtemp1[encode3p1_hdv1r(p,i+1+is,j+1+js,k,d1)];
}
#endif
wtemp[encode3_hdv1r(p,i,j,k,tmp5)]=maxt2;
}
//__syncthreads();
}
__global__ void hyperdifvisc2r_parallel(struct params *p,real *wmod,
real *wd, int order, real *wtemp, real *wtemp1, real *wtemp2, int field, int dim)
{
// compute the global index in the vector from
// the number of the current block, blockIdx,
// the number of threads per block, blockDim,
// and the number of the current thread within the block, threadIdx
//int i = blockIdx.x * blockDim.x + threadIdx.x;
//int j = blockIdx.y * blockDim.y + threadIdx.y;
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int is,js;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dt=p->dt;
real dy=p->dx[1];
real dx=p->dx[0];
//real g=p->g;
// dt=1.0;
//dt=0.05;
//enum vars rho, mom1, mom2, mom3, energy, b1, b2, b3;
real maxt=0,max3=0, max1=0;
int ip,jp;
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
//int ip,jp,ipg,jpg;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
int shift=order*NVAR*dimp;
//tmp1 tmp_nuI
//compute d3r and d1r
//tmp2 d3r
//tmp3 d1r
ii[0]=ip;
ii[1]=jp;
i=ii[0];
j=ii[1];
k=0;
#ifdef USE_SAC_3D
ii[2]=kp;
k=ii[2];
#endif
#ifdef USE_SAC_3D
if(ii[0]>1 && ii[1]>1 && ii[2]>1 && ii[0]<p->n[0] && ii[1]<p->n[1] && ii[2]<p->n[2])
#else
if(ii[0]>1 && ii[1]>1 && ii[0]<p->n[0] && ii[1]<p->n[1])
#endif
//if(i>1 && j>1 && i<((p->n[0])) && j<((p->n[1])))
{
#ifdef USE_SAC_3D
wtemp1[encode3p1_hdv1r(p,i,j,k,d3)]=fabs(3.0*(wtemp2[encode3p2_hdv1r(p,i+(dim==0),j+(dim==1),k+(dim==2),tmpnui)] - wtemp2[encode3p2_hdv1r(p,i,j,k,tmpnui)] ) - (wtemp2[encode3p2_hdv1r(p,i+2*(dim==0),j+2*(dim==1),k+2*(dim==2),tmpnui)] - wtemp2[encode3p2_hdv1r(p,i-(dim==0),j-(dim==1),k-(dim==2),tmpnui)] ));
#else
wtemp1[encode3p1_hdv1r(p,i,j,k,d3)]=fabs(3.0*(wtemp2[encode3p2_hdv1r(p,i+(dim==0),j+(dim==1),k,tmpnui)] - wtemp2[encode3p2_hdv1r(p,i,j,k,tmpnui)] ) - (wtemp2[encode3p2_hdv1r(p,i+2*(dim==0),j+2*(dim==1),k,tmpnui)] - wtemp2[encode3p2_hdv1r(p,i-(dim==0),j-(dim==1),k,tmpnui)] ));
#endif
}
//__syncthreads();
ii[0]=ip;
ii[1]=jp;
i=ii[0];
j=ii[1];
k=0;
#ifdef USE_SAC_3D
ii[2]=kp;
k=ii[2];
#endif
#ifdef USE_SAC_3D
if(i<((p->n[0])) && j<((p->n[1])) && k<((p->n[2])))
// if(i>0 && j>0 && k>0 && i<=((p->n[0])) && j<=((p->n[1])) && k<=((p->n[2])))
#else
if(i<((p->n[0])) && j<((p->n[1])))
//if(i>0 && j>0 && i<=((p->n[0])) && j<=((p->n[1])))
#endif
//if(i>0 && j>0 && i<=((p->n[0])) && j<=((p->n[1])))
{
#ifdef USE_SAC_3D
wtemp1[encode3p1_hdv1r(p,i+1,j+1,k+1,d1)]=fabs((wtemp2[encode3p2_hdv1r(p,i+(dim==0)+1,j+(dim==1)+1,k+(dim==2)+1,tmpnui)] - wtemp2[encode3p2_hdv1r(p,i+1,j+1,k+1,tmpnui)] ));
#else
//wtemp1[encode3p1_hdv1r(p,i,j,k,d1)]=fabs((wtemp2[encode3p2_hdv1r(p,i+(dim==0),j+(dim==1),k,tmpnui)] - wtemp2[encode3p2_hdv1r(p,i,j,k,tmpnui)] ));
wtemp1[encode3p1_hdv1r(p,i+1,j+1,k,d1)]=fabs((wtemp2[encode3p2_hdv1r(p,i+(dim==0)+1,j+(dim==1)+1,k,tmpnui)] - wtemp2[encode3p2_hdv1r(p,i+1,j+1,k,tmpnui)] ));
#endif
}
//__syncthreads();
}
__global__ void hyperdifvisc1ar_parallel(struct params *p,real *wmod,
real *wd, int order, real *wtemp, real *wtemp1, real *wtemp2, int field, int dim)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
const int blockdim=blockDim.x;
const int SZWT=blockdim;
const int SZWM=blockdim*NVAR;
int tid=threadIdx.x;
int i,j,iv;
int is,js;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dt=p->dt;
real dy=p->dx[1];
real dx=p->dx[0];
real maxt=0,max3=0, max1=0;
int ip,jp;
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
//int ip,jp,ipg,jpg;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
int shift=order*NVAR*dimp;
__shared__ real wts[512];
__shared__ real wms[512];
ii[0]=ip;
ii[1]=jp;
i=ii[0];
j=ii[1];
k=0;
#ifdef USE_SAC_3D
ii[2]=kp;
k=ii[2];
#endif
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]<p->n[1] && ii[2]<p->n[2])
#else
if(ii[0]<p->n[0] && ii[1]<p->n[1])
#endif
//set viscosities
//if( i<((p->n[0])) && j<((p->n[1])))
{
#ifdef USE_SAC_3D
wtemp2[encode3p2_hdv1r(p,i+1,j+1,k+1,tmpnui)]=wtemp[encode3_hdv1r(p,i,j,k,tmp6)];
#else
wtemp2[encode3p2_hdv1r(p,i+1,j+1,0,tmpnui)]=wtemp[encode3_hdv1r(p,i,j,0,tmp6)];
#endif
}
//__syncthreads();
}
__global__ void hyperdifvisc1bcr_parallel(struct params *p,real *wmod,
real *wd, int order, real *wtemp, real *wtemp1, real *wtemp2, int field, int dim)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
const int blockdim=blockDim.x;
const int SZWT=blockdim;
const int SZWM=blockdim*NVAR;
int tid=threadIdx.x;
int i,j,iv;
int is,js;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dt=p->dt;
real dy=p->dx[1];
real dx=p->dx[0];
real maxt=0,max3=0, max1=0;
int ip,jp;
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
//int ip,jp,ipg,jpg;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
int shift=order*NVAR*dimp;
__shared__ real wts[512];
__shared__ real wms[512];
ii[0]=ip;
ii[1]=jp;
i=ii[0];
j=ii[1];
k=0;
#ifdef USE_SAC_3D
ii[2]=kp;
k=ii[2];
#endif
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]<(p->n[1]) && ii[2]<(p->n[2]))
#else
if(ii[0]<p->n[0] && ii[1]<p->n[1])
#endif
//set viscosities
//if(i<((p->n[0])) && j<((p->n[1])))
{
bc_hyperdifr(wtemp2, p,ii, tmpnui,dim);
}
//__syncthreads();
}
__global__ void hyperdifvisc1r_parallel(struct params *p,real *wmod,
real *wd, int order, real *wtemp, real *wtemp1, real *wtemp2, int field, int dim)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
const int blockdim=blockDim.x;
const int SZWT=blockdim;
const int SZWM=blockdim*NVAR;
int tid=threadIdx.x;
real maxt=0,max3=0, max1=0;
int i,j,iv;
int is,js;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dt=p->dt;
real dy=p->dx[1];
real dx=p->dx[0];
int ip,jp;
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
//int ip,jp,ipg,jpg;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
int bfac1,bfac2,bfac3;
//int bfac1=(field==rho || field>mom2)+(field>rho && field<energy);
//int bfac2= (field==rho || field>mom2);
//int bfac3=(field>rho && field<energy);
int shift=order*NVAR*dimp;
//__shared__ real wts[512];
//__shared__ real wms[512];
//init temp1 and temp2 to zero
//the compute element initialising n[0] or n[1] element must do +1 and +2
//this is because we fit the problem geometrically to nixnj elements
ii[0]=ip;
ii[1]=jp;
i=ii[0];
j=ii[1];
k=0;
#ifdef USE_SAC_3D
ii[2]=kp;
k=ii[2];
#endif
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]<p->n[1] && ii[2]<p->n[2])
#else
if(ii[0]<p->n[0] && ii[1]<p->n[1])
#endif
//set viscosities
//if(i<((p->n[0])) && j<((p->n[1])))
{
for(int f=tmp1; f<=tmp8; f++)
wtemp[fencode3_hdv1r(p,ii,f)]=0;
for(int f=d1; f<=d3; f++)
#ifdef USE_SAC_3D
wtemp1[encode3p1_hdv1r(p,ii[0],ii[1],ii[2],f)]=0;
wtemp2[encode3p2_hdv1r(p,ii[0],ii[1],ii[2],tmpnui)]=0;
#else
wtemp1[encode3p1_hdv1r(p,ii[0],ii[1],k,f)]=0;
wtemp2[encode3p2_hdv1r(p,ii[0],ii[1],k,tmpnui)]=0;
#endif
if(i==((p->n[0])-1))
{
for(int f=d1; f<=d3; f++)
wtemp1[encode3p1_hdv1r(p,ii[0]+1,ii[1],k,f)]=0;
wtemp2[encode3p2_hdv1r(p,i+1,j,k,tmpnui)]=0;
wtemp2[encode3p2_hdv1r(p,i+2,j,k,tmpnui)]=0;
}
if(j==((p->n[1])-1))
{
for(int f=d1; f<=d3; f++)
wtemp1[encode3p1_hdv1r(p,i,j+1,k,f)]=0;
wtemp2[encode3p2_hdv1r(p,i,j+1,k,tmpnui)]=0;
wtemp2[encode3p2_hdv1r(p,i,j+2,k,tmpnui)]=0;
}
#ifdef USE_SAC_3D
if(k==((p->n[2])-1))
{
for(int f=d1; f<=d3; f++)
wtemp1[encode3p1_hdv1r(p,i,j,k+1,f)]=0;
wtemp2[encode3p2_hdv1r(p,i,j,k+1,tmpnui)]=0;
wtemp2[encode3p2_hdv1r(p,i,j,k+2,tmpnui)]=0;
}
#endif
if(j==((p->n[1])-1) && i==((p->n[0])-1))
{
for(int f=d1; f<=d3; f++)
wtemp1[encode3p1_hdv1r(p,i+1,j+1,k,f)]=0;
for(int di=0; di<2; di++)
for(int dj=0; dj<2; dj++)
wtemp2[encode3p2_hdv1r(p,i+1+di,j+1+dj,k,tmpnui)]=0;
}
#ifdef USE_SAC_3D
if(i==((p->n[0])-1) && k==((p->n[2])-1))
{
for(int f=d1; f<=d3; f++)
wtemp1[encode3p1_hdv1r(p,i+1,j,k+1,f)]=0;
for(int di=0; di<2; di++)
for(int dk=0; dk<2; dk++)
wtemp2[encode3p2_hdv1r(p,i+1+di,j,k+1+dk,tmpnui)]=0;
}
#endif
#ifdef USE_SAC_3D
if(j==((p->n[1])-1) && k==((p->n[2])-1))
{
for(int f=d1; f<=d3; f++)
wtemp1[encode3p1_hdv1r(p,i+1,j+1,k,f)]=0;
for(int dk=0; dk<2; dk++)
for(int dj=0; dj<2; dj++)
wtemp2[encode3p2_hdv1r(p,i,j+1+dj,k+1+dk,tmpnui)]=0;
}
#endif
#ifdef USE_SAC_3D
if(i==((p->n[0])-1) && j==((p->n[1])-1) && k==((p->n[2])-1))
{
for(int f=d1; f<=d3; f++)
wtemp1[encode3p1_hdv1r(p,i+1,j+1,k+1,f)]=0;
for(int dk=0; dk<2; dk++)
for(int dj=0; dj<2; dj++)
for(int di=0; di<2; di++)
wtemp2[encode3p2_hdv1r(p,i+1+di,j+1+dj,k+1+dk,tmpnui)]=0;
}
#endif
}
//__syncthreads();
ii[0]=ip;
ii[1]=jp;
i=ii[0];
j=ii[1];
k=0;
#ifdef USE_SAC_3D
ii[2]=kp;
k=ii[2];
#endif
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]<p->n[1] && ii[2]<p->n[2])
#else
if(ii[0]<p->n[0] && ii[1]<p->n[1])
#endif
//set viscosities
//if(i<((p->n[0])) && j<((p->n[1])))
{
//for(iv=0;iv<NVAR;iv++)
// wms[tid+iv*blockdim]=wmod[fencode_hdv1r(p,i,j,iv)+shift];
//wts[tid]=wtemp[fencode_hdv1r(p,i,j,tmp6)];
//temp value for viscosity
//tmp6 tmpnu
#ifdef USE_SAC
if(field==energy)
wtemp[fencode3_hdv1r(p,ii,tmp6)]=wmod[fencode3_hdv1r(p,ii,energy)+shift]-0.5*((wmod[fencode3_hdv1r(p,ii,b1)+shift]*wmod[fencode3_hdv1r(p,ii,b1)+shift]+wmod[fencode3_hdv1r(p,ii,b2)+shift]*wmod[fencode3_hdv1r(p,ii,b2)+shift])+(wmod[fencode3_hdv1r(p,ii,mom1)+shift]*wmod[fencode3_hdv1r(p,ii,mom1)+shift]+wmod[fencode3_hdv1r(p,ii,mom2)+shift]*wmod[fencode3_hdv1r(p,ii,mom2)+shift])/(wmod[fencode3_hdv1r(p,ii,rho)+shift]+wmod[fencode3_hdv1r(p,ii,rhob)+shift] ));
else
{
wtemp[fencode3_hdv1r(p,ii,tmp6)]=wmod[fencode3_hdv1r(p,ii,field)+shift];
if((field ==mom1 || field == mom2))
wtemp[fencode3_hdv1r(p,ii,tmp6)]=wmod[fencode3_hdv1r(p,ii,field)+shift]/(((wmod[fencode3_hdv1r(p,ii,rho)+shift] +wmod[fencode3_hdv1r(p,ii,rhob)+shift])));
}
//comment removed below to test mpi 29/10/2013
wtemp2[encode3_hdv1r(p,i+1,j+1,k,tmpnui)]=wtemp[fencode3_hdv1r(p,ii,tmp6)];
#endif
#ifdef USE_SAC_3D
if(field==energy)
wtemp[fencode3_hdv1r(p,ii,tmp6)]=wmod[fencode3_hdv1r(p,ii,energy)+shift]-0.5*((wmod[fencode3_hdv1r(p,ii,b1)+shift]*wmod[fencode3_hdv1r(p,ii,b1)+shift]+wmod[fencode3_hdv1r(p,ii,b2)+shift]*wmod[fencode3_hdv1r(p,ii,b2)+shift]+wmod[fencode3_hdv1r(p,ii,b3)+shift]*wmod[fencode3_hdv1r(p,ii,b3)+shift])
+(wmod[fencode3_hdv1r(p,ii,mom1)+shift]*wmod[fencode3_hdv1r(p,ii,mom1)+shift]+wmod[fencode3_hdv1r(p,ii,mom2)+shift]*wmod[fencode3_hdv1r(p,ii,mom2)+shift]+wmod[fencode3_hdv1r(p,ii,mom3)+shift]*wmod[fencode3_hdv1r(p,ii,mom3)+shift])/(wmod[fencode3_hdv1r(p,ii,rho)+shift]+wmod[fencode3_hdv1r(p,ii,rhob)+shift] ));
else
{
wtemp[fencode3_hdv1r(p,ii,tmp6)]=wmod[fencode3_hdv1r(p,ii,field)+shift];
if((field ==mom1 || field == mom2 || field == mom3))
wtemp[fencode3_hdv1r(p,ii,tmp6)]=wmod[fencode3_hdv1r(p,ii,field)+shift]/(((wmod[fencode3_hdv1r(p,ii,rho)+shift] +wmod[fencode3_hdv1r(p,ii,rhob)+shift])));
}
//comment removed below to test mpi 29/10/2013
wtemp2[encode3_hdv1r(p,i+1,j+1,k+1,tmpnui)]=wtemp[fencode3_hdv1r(p,ii,tmp6)];
#endif
wd[fencode3_hdv1r(p,ii,hdnur)]=0;
}
//__syncthreads();
}
/////////////////////////////////////
// error checking routine
/////////////////////////////////////
void checkErrors_hdv1r(char *label)
{
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
hipError_t err;
err = hipDeviceSynchronize();
if (err != hipSuccess)
{
char *e = (char*) hipGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
err = hipGetLastError();
if (err != hipSuccess)
{
char *e = (char*) hipGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
}
int cuhyperdifvisc1r(struct params **p, struct params **d_p, real **d_wmod,real **wd, real **d_wd, int order, real **d_wtemp, real **d_wtemp1, real **d_wtemp2, int field, int dim)
{
int dimp=(((*p)->n[0]))*(((*p)->n[1]));
double *d_cmax;
double maxviscoef;
double *d_bmax;
real fn,fractn,in;
int ndimp;
int i;
////hipSetDevice(selectedDevice);
int nit=100;
double *h_cmax;
#ifdef USE_SAC_3D
dimp=(((*p)->n[0]))*(((*p)->n[1]))*(((*p)->n[2]));
#endif
int NTPB=512;
int smemSize = NTPB * sizeof(double);
fn=log(dimp)/log(2.0);
fractn=modf(fn,&in);
if(fractn>0)
{
fn+=1;
ndimp=(int)pow(2,fn);
}
else
ndimp=dimp;
// dim3 dimBlock(dimblock, 1);
// dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
int numBlocks = (dimp+numThreadsPerBlock-1) / numThreadsPerBlock;
(*p)->hdmax=0;
hipMemcpy(*d_p, *p, sizeof(struct params), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( hyperdifvisc1r_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p, *d_wmod, *d_wd, order, *d_wtemp,*d_wtemp1,*d_wtemp2, field, dim);
hipDeviceSynchronize();
hipLaunchKernelGGL(( hyperdifvisc1ar_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p, *d_wmod, *d_wd, order, *d_wtemp,*d_wtemp1,*d_wtemp2, field, dim);
hipDeviceSynchronize();
hipLaunchKernelGGL(( hyperdifvisc2r_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p, *d_wmod, *d_wd, order, *d_wtemp,*d_wtemp1,*d_wtemp2, field, dim);
hipDeviceSynchronize();
hipLaunchKernelGGL(( hyperdifvisc3r_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p, *d_wmod, *d_wd, order, *d_wtemp,*d_wtemp1,*d_wtemp2, field, dim);
hipDeviceSynchronize();
hipLaunchKernelGGL(( hyperdifvisc4r_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p, *d_wmod, *d_wd, order, *d_wtemp,*d_wtemp1,*d_wtemp2, field, dim);
hipDeviceSynchronize();
//compute max hyperviscosity (only used by dt modifier)
if(((*p)->moddton)==1 )
{
// hyperdifvisc5r_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p, *d_wmod, *d_wd, order, *d_wtemp,*d_wtemp1,*d_wtemp2, field, dim);
// hipDeviceSynchronize();
numBlocks = (ndimp+NTPB-1) / NTPB;
h_cmax = (double*)malloc(numBlocks*sizeof(double));
hipMalloc((void**)&d_cmax, numBlocks*sizeof(double));
hipMalloc((void**)&d_bmax, numBlocks*sizeof(double));
maxviscoef=(*p)->maxviscoef;
//maxviscoef=SMALLDOUBLE;
hipLaunchKernelGGL(( zeropadmaxviscr_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p, *d_wmod, *d_wd, order, dim, *d_wtemp,ndimp);
hipDeviceSynchronize();
hipMemcpy(*wd, *d_wd, NDERV*dimp*sizeof(real), hipMemcpyDeviceToHost);
hipMemcpy(*d_wtemp, ((*wd)+(hdnur*dimp)), dimp*sizeof(real), hipMemcpyHostToDevice);
/*int s=1;
while(((s*=2)<=((ndimp/2)-1)) )
{
myreduction0computemaxviscr_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p, *d_wmod, *d_wd, order, dim, *d_wtemp,ndimp,s);
hipDeviceSynchronize();
}*/
for(i=0;i<numBlocks;i++)
h_cmax[i]=0;
hipMemcpy(d_bmax, h_cmax, numBlocks*sizeof(double), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( newreduction0computemaxviscr_parallel), dim3(numBlocks),dim3(NTPB),smemSize, 0, d_bmax,*d_wtemp,ndimp);
hipDeviceSynchronize();
hipMemcpy(h_cmax, d_bmax, numBlocks*sizeof(double), hipMemcpyDeviceToHost);
for( i=0;i<numBlocks;i++)
if(h_cmax[i]>maxviscoef) maxviscoef=h_cmax[i];
if((*p)->maxviscoef<maxviscoef)
(*p)->maxviscoef=maxviscoef;
free(h_cmax);
hipFree(d_bmax);
hipFree(d_cmax);
}
hipMemcpy(*d_p, *p, sizeof(struct params), hipMemcpyHostToDevice);
//hipMemcpy(*p, *d_p, sizeof(struct params), hipMemcpyDeviceToHost);
// printf("field right hdmean hdmax %d %8.8g %8.8g \n",field, (*p)->hdmean, (*p)->hdmax);
}
int cuhyperdifvisc1ir(struct params **p, struct params **d_p, real **d_wmod, real **d_wd, int order, real **d_wtemp, real **d_wtemp1, real **d_wtemp2, int field, int dim)
{
int dimp=(((*p)->n[0]))*(((*p)->n[1]));
#ifdef USE_SAC_3D
dimp=(((*p)->n[0]))*(((*p)->n[1]))*(((*p)->n[2]));
#endif
// dim3 dimBlock(dimblock, 1);
// dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
int numBlocks = (dimp+numThreadsPerBlock-1) / numThreadsPerBlock;
hipMemcpy(*d_p, *p, sizeof(struct params), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( hyperdifvisc1r_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p, *d_wmod, *d_wd, order, *d_wtemp,*d_wtemp1,*d_wtemp2, field, dim);
hipDeviceSynchronize();
hipMemcpy(*p, *d_p, sizeof(struct params), hipMemcpyDeviceToHost);
// printf("field right hdmean hdmax %d %8.8g %8.8g \n",field, (*p)->hdmean, (*p)->hdmax);
}
| 60f7fc4dcebcbdc89de66cec15787eb33206d1fc.cu | #include "../include/cudapars.h"
#include "../include/paramssteeringtest1.h"
#include "../include/iobparams.h"
/////////////////////////////////////
// standard imports
/////////////////////////////////////
#include <stdio.h>
#include <math.h>
#include "../include/smaugcukernels.h"
/////////////////////////////////////
// kernel function (CUDA device)
/////////////////////////////////////
#include "../include/gradops_hdv1r.cuh"
__device__ __host__
void bc_hyperdifr(real *wt, struct params *p,int *ii, int f,int dir) {
int i=ii[0];
int j=ii[1];
int k=0;
#ifdef USE_SAC_3D
k=ii[2];
#endif
int is=1;
#ifdef USE_SAC
if( (dir == 0) && (i==(p->n[0])-1) && j>=0 && j<(p->n[1]) )
{
//for(int is=0;is<3-2*(j<((p->n[1])-1));is++)
wt[encode3p2_hdv1r(p,i+2,j+is,k,f)]=wt[encode3p2_hdv1r(p,(p->n[0])-5,j+is,k,f)];
}
else if((dir == 1) && (j==(p->n[1])-1) && i>=0 && i<((p->n[0])) )
//for(int is=0;is<3-2*(j<((p->n[1])-1));is++)
wt[encode3p2_hdv1r(p,i+is,j+2,k,f)]=wt[encode3p2_hdv1r(p,i+is,(p->n[1])-5,k,f)];
else if((dir == 0) && (i==0) && j>=0 && j<((p->n[1])) )
//for(int is=0;is<3-2*(j<((p->n[1])-1));is++)
wt[encode3p2_hdv1r(p,0,j+is,k,f)]=wt[encode3p2_hdv1r(p,6,j+is,k,f)];
else if((dir == 1) && (j==0) && i>=0 && i<((p->n[0])) )
//for(int is=0;is<3-2*(j<((p->n[1])-1));is++)
wt[encode3p2_hdv1r(p,i+is,0,k,f)]=wt[encode3p2_hdv1r(p,i+is,6,k,f)];
#endif
#ifdef USE_SAC_3D
if( (dir == 0) && (i==(p->n[0])-1) && j>=0 && j<(p->n[1]) && k>=0 && k<(p->n[2]) )
wt[encode3p2_hdv1r(p,i+2,j+is,k+is,f)]=wt[encode3p2_hdv1r(p,(p->n[0])-5,j+is,k+is,f)];
else if((dir == 1) && (j==(p->n[1])-1) && i>=0 && i<((p->n[0])) && k>=0 && k<((p->n[2])) )
wt[encode3p2_hdv1r(p,i+is,j+2,k+is,f)]=wt[encode3p2_hdv1r(p,i+is,(p->n[1])-5,k+is,f)];
else if((dir == 2) && (k==(p->n[2])-1) && i>=0 && i<((p->n[0])) && j>=0 && j<((p->n[1])) )
wt[encode3p2_hdv1r(p,i+is,j+is,k+2,f)]=wt[encode3p2_hdv1r(p,i+is,j+is,(p->n[2])-5,f)];
else if((dir == 0) && (i==0) && j>=0 && j<((p->n[1])) && k>=0 && k<((p->n[2])) )
wt[encode3p2_hdv1r(p,0,j+is,k+is,f)]=wt[encode3p2_hdv1r(p,6,j+is,k+is,f)];
else if((dir == 1) && (j==0) && i>=0 && i<((p->n[0])) && k>=0 && k<((p->n[2])) )
wt[encode3p2_hdv1r(p,i+is,0,k+is,f)]=wt[encode3p2_hdv1r(p,i+is,6,k+is,f)];
else if((dir == 2) && (k==0) && i>=0 && i<((p->n[0])) && j>=0 && j<((p->n[1])) )
wt[encode3p2_hdv1r(p,i+is,j+is,0,f)]=wt[encode3p2_hdv1r(p,i+is,j+is,6,f)];
#endif
}
__device__ __host__
void bc_hyperdifr0(real *wt, struct params *p,int *ii, int f,int dir) {
int i=ii[0];
int j=ii[1];
int k=0;
#ifdef USE_SAC_3D
k=ii[2];
#endif
int is=1;
#ifdef USE_SAC
if( (i==(p->n[0])-1) && j>=0 && j<(p->n[1]) )
{
//for(int is=0;is<3-2*(j<((p->n[1])-1));is++)
wt[encode3p2_hdv1r(p,i+2,j+is,k,f)]=wt[encode3p2_hdv1r(p,(p->n[0])-5,j+is,k,f)];
}
else if((i==0) && j>=0 && j<((p->n[1])) )
//for(int is=0;is<3-2*(j<((p->n[1])-1));is++)
wt[encode3p2_hdv1r(p,0,j+is,k,f)]=wt[encode3p2_hdv1r(p,6,j+is,k,f)];
#endif
#ifdef USE_SAC_3D
if( (i==(p->n[0])-1) && j>=0 && j<(p->n[1]) && k>=0 && k<(p->n[2]) )
wt[encode3p2_hdv1r(p,i+2,j+is,k+is,f)]=wt[encode3p2_hdv1r(p,(p->n[0])-5,j+is,k+is,f)];
else if((i==0) && j>=0 && j<((p->n[1])) && k>=0 && k<((p->n[2])) )
wt[encode3p2_hdv1r(p,0,j+is,k+is,f)]=wt[encode3p2_hdv1r(p,6,j+is,k+is,f)];
#endif
}
__device__ __host__
void bc_hyperdifr1(real *wt, struct params *p,int *ii, int f,int dir) {
int i=ii[0];
int j=ii[1];
int k=0;
#ifdef USE_SAC_3D
k=ii[2];
#endif
int is=1;
#ifdef USE_SAC
if( (j==(p->n[1])-1) && i>=0 && i<((p->n[0])) )
//for(int is=0;is<3-2*(j<((p->n[1])-1));is++)
wt[encode3p2_hdv1r(p,i+is,j+2,k,f)]=wt[encode3p2_hdv1r(p,i+is,(p->n[1])-5,k,f)];
else if((j==0) && i>=0 && i<((p->n[0])) )
//for(int is=0;is<3-2*(j<((p->n[1])-1));is++)
wt[encode3p2_hdv1r(p,i+is,0,k,f)]=wt[encode3p2_hdv1r(p,i+is,6,k,f)];
#endif
#ifdef USE_SAC_3D
if (j==((p->n[1])-1) && i>=0 && i<((p->n[0])) && k>=0 && k<((p->n[2])) )
wt[encode3p2_hdv1r(p,i+is,j+2,k+is,f)]=wt[encode3p2_hdv1r(p,i+is,(p->n[1])-5,k+is,f)];
else if( (j==0) && i>=0 && i<((p->n[0])) && k>=0 && k<((p->n[2])) )
wt[encode3p2_hdv1r(p,i+is,0,k+is,f)]=wt[encode3p2_hdv1r(p,i+is,6,k+is,f)];
#endif
}
__device__ __host__
void bc_hyperdifr2(real *wt, struct params *p,int *ii, int f,int dir) {
int i=ii[0];
int j=ii[1];
int k=0;
#ifdef USE_SAC_3D
k=ii[2];
#endif
int is=1;
#ifdef USE_SAC_3D
if( (k==(p->n[2])-1) && i>=0 && i<((p->n[0])) && j>=0 && j<((p->n[1])) )
wt[encode3p2_hdv1r(p,i+is,j+is,k+2,f)]=wt[encode3p2_hdv1r(p,i+is,j+is,(p->n[2])-5,f)];
else if((k==0) && i>=0 && i<((p->n[0])) && j>=0 && j<((p->n[1])) )
wt[encode3p2_hdv1r(p,i+is,j+is,0,f)]=wt[encode3p2_hdv1r(p,i+is,j+is,6,f)];
#endif
}
/*__device__ __host__
void bc_periodic1_temp2(real *wt, struct params *p,int i, int j, int f) {
if(i==1 )
wt[fencode_hdv1r(p,i,j,f)]=wt[fencode_hdv1r(p,6,j,f)];
else if((i==((p->n[0]))) )
wt[fencode_hdv1r(p,i,j,f)]=wt[fencode_hdv1r(p,i-4,j,f)];
else if(j==1 )
wt[fencode_hdv1r(p,i,j,f)]=wt[fencode_hdv1r(p,i,6,f)];
else if((j==((p->n[1]))) )
wt[fencode_hdv1r(p,i,j,f)]=wt[fencode_hdv1r(p,i,j-4,f)];
}*/
/*__device__ __host__
void bc_periodic2_temp2(real *wt, struct params *p,int i, int j, int f) {
if(i<1 && j<1)
{
if(i==j)
//wt[fencode_hdv1r(p,i,j,f)]=wt[fencode_hdv1r(p,(p->n[0])-3+i,j,f)];
wt[fencode_hdv1r(p,i,j,f)]=wt[fencode_hdv1r(p,i,6,f)];
else
//wt[fencode_hdv1r(p,i,j,f)]=wt[fencode_hdv1r(p,i,(p->n[1])-3+j,f)];
wt[fencode_hdv1r(p,i,j,f)]=wt[fencode_hdv1r(p,6,j,f)];
}
else if(i<1 && j>((p->n[1])-1))
{
if(i==(j-(p->n[1])-1))
//wt[fencode_hdv1r(p,i,j,f)]=wt[fencode_hdv1r(p,(p->n[0])-3+i,4-(p->n[1])+j,f)];
wt[fencode_hdv1r(p,i,j,f)]=wt[fencode_hdv1r(p,6,j,f)];
else
wt[fencode_hdv1r(p,i,j,f)]=wt[fencode_hdv1r(p,i,j-6,f)];
}
else if(i>((p->n[0])-1) && j<1)
{
if((i-(p->n[0])+1)==j)
wt[fencode_hdv1r(p,i,j,f)]=wt[fencode_hdv1r(p,i-5,j,f)];
else
wt[fencode_hdv1r(p,i,j,f)]=wt[fencode_hdv1r(p,i,4,f)];
}
else if(i>((p->n[0])-1) && j>((p->n[1])-1))
{
if(i==j)
wt[fencode_hdv1r(p,i,j,f)]=wt[fencode_hdv1r(p,i,j-5,f)];
else
wt[fencode_hdv1r(p,i,j,f)]=wt[fencode_hdv1r(p,i-5,j,f)];
}
}*/
__global__ void zeropadmaxviscr_parallel(struct params *p, real *wmod, real *wd, int order, int dir, real *temp, int ndimp)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
if(iindex<ndimp)
temp[iindex]=0.0;
}
__global__ void newreduction0computemaxviscr_parallel(real *cmax, real *temp,int ndimp)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int tid = threadIdx.x;
extern __shared__ double partialResult[];
int i;
partialResult[tid]=0.0;
if(iindex<ndimp)
partialResult[tid]=temp[iindex];
__syncthreads();
for(unsigned int s=1; s < blockDim.x; s *= 2) {
if ((tid % (2*s)) == 0) {
if(partialResult[tid+s]>partialResult[tid])
partialResult[tid]=partialResult[tid + s];
}
__syncthreads();
}
__syncthreads();
if(tid==0)
{
cmax[blockIdx.x]=partialResult[0];
temp[blockIdx.x]=partialResult[0];
}
__syncthreads();
}
__global__ void myreduction0computemaxviscr_parallel(struct params *p, real *wmod, real *wd, int order, int dir, real *temp,int ndimp,int s)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int tid = threadIdx.x;
int i,j;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
// extern __shared__ real sdata[];
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
int tnumThreadsPerBlock = 128;
int numBlocks = (dimp+tnumThreadsPerBlock-1) / tnumThreadsPerBlock;
//real temp[dimp];
// perform first level of reduction,
// reading from global memory, writing to shared memory
//sdata[tid]=0.0;
// if(iindex<1024)
// temp[iindex]=0.0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
//int s=1;
//while(((s*=2)<=((ndimp/2)-1)) && ((iindex+s)<ndimp)) {
if((iindex+s)<ndimp)
if(temp[iindex+s]>temp[iindex])
temp[iindex]=temp[iindex + s];
// }
// __syncthreads();
// __syncthreads();
if(iindex==0)
p->maxviscoef=temp[0];
}
__global__ void hyperdifvisc5r_parallel(struct params *p,real *wmod,
real *wd, int order, real *wtemp, real *wtemp1, real *wtemp2, int field, int dim)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int is,js;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dt=p->dt;
real dy=p->dx[1];
real dx=p->dx[0];
//real g=p->g;
// dt=1.0;
//dt=0.05;
//enum vars rho, mom1, mom2, mom3, energy, b1, b2, b3;
real maxt=0,max3=0, max1=0;
int ip,jp;
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
//int ip,jp,ipg,jpg;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
int shift=order*NVAR*dimp;
if(iindex==0)
{
p->hdmean=0.0;
p->hdmax=0;
// for(ipg=0;ipg<(p->npgp[0]);ipg++)
// for(jpg=0;jpg<(p->npgp[1]);jpg++)
// {
// i=ip*(p->npgp[0])+ipg;
// j=jp*(p->npgp[1])+jpg;
//if( i<((p->n[0])) && j<((p->n[1])))
//if(i>1 && j >1 && i<((p->n[0])-2) && j<((p->n[1])-2))
//p->cmax=0.0;
for(ii[0]=1;ii[0]<((p->n[0])-1);ii[0]++)
for(ii[1]=1;ii[1]<((p->n[1])-1);ii[1]++)
#ifdef USE_SAC_3D
for(ii[2]=1;ii[2]<((p->n[2])-1);ii[2]++)
#endif
{
// computecmax3_cdf(wmod+(order*dimp*NVAR),wd,p,ii);
// atomicExch(&(p->cmax),(wd[fencode3_MODID(p,ii,soundspeed)]));
#ifdef USE_SAC_3D
if(wd[encode3_hdv1r(p,ii[0],ii[1],ii[2],hdnur)]>(p->maxviscoef))
p->maxviscoef=(wd[encode3_hdv1r(p,ii[0],ii[1],ii[2],hdnur)]);
#else
if(wd[encode3_hdv1r(p,ii[0],ii[1],0,hdnur)]>(p->maxviscoef))
p->maxviscoef=(wd[encode3_hdv1r(p,ii[0],ii[1],0,hdnur)]);
#endif
/* if(wd[encode3_hdv1r(p,ii[0],ii[1],0,hdnur)]>(p->hdmax))
p->hdmax=(wd[encode3_hdv1r(p,ii[0],ii[1],0,hdnur)]);
p->hdmean=(p->hdmean)+wd[encode3_hdv1r(p,ii[0],ii[1],0,hdnur)];*/
}
//p->hdmean=(p->hdmean)/(((p->n[0])-2)*((p->n[1]))-2);
// }
}
//__syncthreads();
}
__global__ void hyperdifvisc4r_parallel(struct params *p,real *wmod,
real *wd, int order, real *wtemp, real *wtemp1, real *wtemp2, int field, int dim)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int is,js;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dt=p->dt;
real dy=p->dx[1];
real dx=p->dx[0];
//real g=p->g;
// dt=1.0;
//dt=0.05;
//enum vars rho, mom1, mom2, mom3, energy, b1, b2, b3;
real maxt=0,max3=0, max1=0;
int ip,jp;
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
//int ip,jp,ipg,jpg;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
int shift=order*NVAR*dimp;
//tmp1 tmp_nuI
//tmp2 d3r
//tmp3 d1r
//tmp4 md3r
//tmp5 md1r
//tmp6 d3l
//tmp7 d1l
//tmp8 md3l
//tmp9 md1l
// p->maxviscoef=0;
// p->cmax=1.0;
//finally update nur and nul
//tmp4 md3r
//tmp5 md1r
ii[0]=ip;
ii[1]=jp;
i=ii[0];
j=ii[1];
k=0;
#ifdef USE_SAC_3D
ii[2]=kp;
k=ii[2];
#endif
#ifdef USE_SAC_3D
if(i>1 && i<((p->n[0])-1) && j>1 && j<((p->n[1])-1) && k>1 && k<((p->n[2])-1))
#else
if(i>1 && i<((p->n[0])-1) && j>1 && j<((p->n[1])-1))
#endif
//if(i>1 && i<((p->n[0])-2) && j>1 && j<((p->n[1])-2))
{
//wd[encode3_hdv1r(p,i,j,hdnur)]=wtemp2[encode3_hdv1r(p,i+1,j+1,tmpnui)];
if(wtemp[encode3_hdv1r(p,i,j,k,tmp5)]>0)
{
//p->cmax=1.0;
#ifdef USE_SAC_3D
wd[encode3_hdv1r(p,i,j,k,hdnur)]=((dim==0)*(wd[encode3_hdv1r(p,i,j,k,delx1)])+(dim==1)*(wd[encode3_hdv1r(p,i,j,k,delx2)])+(dim==2)*(wd[encode3_hdv1r(p,i,j,k,delx3)]))*(p->cmax)*(p->chyp[field])*wtemp[encode3_hdv1r(p,i,j,k,tmp4)]/wtemp[encode3_hdv1r(p,i,j,k,tmp5)];
#else
wd[encode3_hdv1r(p,i,j,k,hdnur)]=((dim==0)*(wd[encode3_hdv1r(p,i,j,k,delx1)])+(dim==1)*(wd[encode3_hdv1r(p,i,j,k,delx2)]))*(p->cmax)*(p->chyp[field])*wtemp[encode3_hdv1r(p,i,j,k,tmp4)]/wtemp[encode3_hdv1r(p,i,j,k,tmp5)];
#endif
//wd[encode3_hdv1r(p,i,j,k,hdnur)]=1.0e-1;
//wd[encode3_hdv1r(p,i,j,hdnur)]=wtemp[encode3_hdv1r(p,i,j,tmp4)];
//wd[encode3_hdv1r(p,i,j,k,hdnur)]=0.01;
// wd[encode3_hdv1r(p,i,j,k,hdnur)]=0.0005;
}
else
wd[encode3_hdv1r(p,i,j,k,hdnur)]=0;
//correct boundary contribution for MPI
//#ifdef USE_MPI
//if(i==2 || i==((p->n[0])-2) || j==2 || j<((p->n[1])-2)) wd[encode3_hdv1r(p,i,j,k,hdnur)]=0;
//#endif
/*switch(field)
{
case 0:
wd[encode3_hdv1r(p,i,j,k,hdnur)]=6.744e-6;
break;
case 3:
wd[encode3_hdv1r(p,i,j,k,hdnur)]=1.8e-6;
break;
case 1:
wd[encode3_hdv1r(p,i,j,k,hdnur)]=1.9e-6;
break;
case 2:
wd[encode3_hdv1r(p,i,j,k,hdnur)]=1.9e-6;
break;
case 5:
wd[encode3_hdv1r(p,i,j,k,hdnur)]=9.4e-8;
break;
case 4:
wd[encode3_hdv1r(p,i,j,k,hdnur)]=3.8e-7;
break;
} */
}
//__syncthreads();
}
__global__ void hyperdifvisc3r_parallel(struct params *p,real *wmod,
real *wd, int order, real *wtemp, real *wtemp1, real *wtemp2, int field, int dim)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int is,js,ks;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dt=p->dt;
real dy=p->dx[1];
real dx=p->dx[0];
//real g=p->g;
// dt=1.0;
//dt=0.05;
//enum vars rho, mom1, mom2, mom3, energy, b1, b2, b3;
real maxt1=0,max3=0, maxt2=0;
int ip,jp;
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
//int ip,jp,ipg,jpg;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
int shift=order*NVAR*dimp;
//tmp1 tmp_nuI
//tmp2 d3r
//tmp3 d1r
//tmp4 md3r
//tmp5 md1r
//tmp6 d3l
//tmp7 d1l
//tmp8 md3l
//tmp9 md1l
//compute md3r and md1r
//tmp4 md3r
//tmp5 md1r
//js=0;
// is=0;
ii[0]=ip;
ii[1]=jp;
i=ii[0];
j=ii[1];
k=0;
#ifdef USE_SAC_3D
ii[2]=kp;
k=ii[2];
#endif
#ifdef USE_SAC_3D
//if(ii[0]>1 && ii[1]>1 && ii[2]>1 && ii[0]<p->n[0] && ii[1]<p->n[1] && ii[2]<p->n[2])
if(i>1 && j>1 && k>1 && i<((p->n[0])-2) && j<((p->n[1])-2) && k<((p->n[2]))-2)
#else
//if(ii[0]>1 && ii[1]>1 && ii[0]<p->n[0] && ii[1]<p->n[1])
if(i>1 && j>1 && i<((p->n[0])-2) && j<((p->n[1])-2))
#endif
// if( i>1 && j>1 && i<((p->n[0])-2) && j<((p->n[1])-2))
{
maxt1=0;
#ifdef USE_SAC_3D
for(is=-(dim==0); is<=(dim==0); is++)
for(js=-(dim==1); js<=(dim==1); js++)
for(ks=-(dim==2); ks<=(dim==2); ks++)
{
if(wtemp1[encode3p1_hdv1r(p,i+1+is,j+1+js,k+1+ks,d3)]>maxt1)
maxt1=wtemp1[encode3p1_hdv1r(p,i+1+is,j+1+js,k+1+ks,d3)];
}
#else
for(is=-(dim==0); is<=(dim==0); is++)
for(js=-(dim==1); js<=(dim==1); js++)
{
if(wtemp1[encode3p1_hdv1r(p,i+1+is,j+1+js,k,d3)]>maxt1)
maxt1=wtemp1[encode3p1_hdv1r(p,i+1+is,j+1+js,k,d3)];
}
#endif
wtemp[encode3_hdv1r(p,i,j,k,tmp4)]=maxt1;
maxt2=0;
#ifdef USE_SAC_3D
for(is=-2*(dim==0); is<=2*(dim==0); is++)
for(js=-2*(dim==1); js<=2*(dim==1); js++)
for(ks=-2*(dim==2); ks<=2*(dim==2); ks++)
{
if(wtemp1[encode3p1_hdv1r(p,i+1+is,j+1+js,k+1+ks,d1)]>maxt2)
maxt2=wtemp1[encode3p1_hdv1r(p,i+1+is,j+1+js,k+1+ks,d1)];
}
#else
for(is=-2*(dim==0); is<=2*(dim==0); is++)
for(js=-2*(dim==1); js<=2*(dim==1); js++)
{
if(wtemp1[encode3p1_hdv1r(p,i+1+is,j+1+js,k,d1)]>maxt2)
maxt2=wtemp1[encode3p1_hdv1r(p,i+1+is,j+1+js,k,d1)];
}
#endif
wtemp[encode3_hdv1r(p,i,j,k,tmp5)]=maxt2;
}
//__syncthreads();
}
__global__ void hyperdifvisc2r_parallel(struct params *p,real *wmod,
real *wd, int order, real *wtemp, real *wtemp1, real *wtemp2, int field, int dim)
{
// compute the global index in the vector from
// the number of the current block, blockIdx,
// the number of threads per block, blockDim,
// and the number of the current thread within the block, threadIdx
//int i = blockIdx.x * blockDim.x + threadIdx.x;
//int j = blockIdx.y * blockDim.y + threadIdx.y;
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int is,js;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dt=p->dt;
real dy=p->dx[1];
real dx=p->dx[0];
//real g=p->g;
// dt=1.0;
//dt=0.05;
//enum vars rho, mom1, mom2, mom3, energy, b1, b2, b3;
real maxt=0,max3=0, max1=0;
int ip,jp;
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
//int ip,jp,ipg,jpg;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
int shift=order*NVAR*dimp;
//tmp1 tmp_nuI
//compute d3r and d1r
//tmp2 d3r
//tmp3 d1r
ii[0]=ip;
ii[1]=jp;
i=ii[0];
j=ii[1];
k=0;
#ifdef USE_SAC_3D
ii[2]=kp;
k=ii[2];
#endif
#ifdef USE_SAC_3D
if(ii[0]>1 && ii[1]>1 && ii[2]>1 && ii[0]<p->n[0] && ii[1]<p->n[1] && ii[2]<p->n[2])
#else
if(ii[0]>1 && ii[1]>1 && ii[0]<p->n[0] && ii[1]<p->n[1])
#endif
//if(i>1 && j>1 && i<((p->n[0])) && j<((p->n[1])))
{
#ifdef USE_SAC_3D
wtemp1[encode3p1_hdv1r(p,i,j,k,d3)]=fabs(3.0*(wtemp2[encode3p2_hdv1r(p,i+(dim==0),j+(dim==1),k+(dim==2),tmpnui)] - wtemp2[encode3p2_hdv1r(p,i,j,k,tmpnui)] ) - (wtemp2[encode3p2_hdv1r(p,i+2*(dim==0),j+2*(dim==1),k+2*(dim==2),tmpnui)] - wtemp2[encode3p2_hdv1r(p,i-(dim==0),j-(dim==1),k-(dim==2),tmpnui)] ));
#else
wtemp1[encode3p1_hdv1r(p,i,j,k,d3)]=fabs(3.0*(wtemp2[encode3p2_hdv1r(p,i+(dim==0),j+(dim==1),k,tmpnui)] - wtemp2[encode3p2_hdv1r(p,i,j,k,tmpnui)] ) - (wtemp2[encode3p2_hdv1r(p,i+2*(dim==0),j+2*(dim==1),k,tmpnui)] - wtemp2[encode3p2_hdv1r(p,i-(dim==0),j-(dim==1),k,tmpnui)] ));
#endif
}
//__syncthreads();
ii[0]=ip;
ii[1]=jp;
i=ii[0];
j=ii[1];
k=0;
#ifdef USE_SAC_3D
ii[2]=kp;
k=ii[2];
#endif
#ifdef USE_SAC_3D
if(i<((p->n[0])) && j<((p->n[1])) && k<((p->n[2])))
// if(i>0 && j>0 && k>0 && i<=((p->n[0])) && j<=((p->n[1])) && k<=((p->n[2])))
#else
if(i<((p->n[0])) && j<((p->n[1])))
//if(i>0 && j>0 && i<=((p->n[0])) && j<=((p->n[1])))
#endif
//if(i>0 && j>0 && i<=((p->n[0])) && j<=((p->n[1])))
{
#ifdef USE_SAC_3D
wtemp1[encode3p1_hdv1r(p,i+1,j+1,k+1,d1)]=fabs((wtemp2[encode3p2_hdv1r(p,i+(dim==0)+1,j+(dim==1)+1,k+(dim==2)+1,tmpnui)] - wtemp2[encode3p2_hdv1r(p,i+1,j+1,k+1,tmpnui)] ));
#else
//wtemp1[encode3p1_hdv1r(p,i,j,k,d1)]=fabs((wtemp2[encode3p2_hdv1r(p,i+(dim==0),j+(dim==1),k,tmpnui)] - wtemp2[encode3p2_hdv1r(p,i,j,k,tmpnui)] ));
wtemp1[encode3p1_hdv1r(p,i+1,j+1,k,d1)]=fabs((wtemp2[encode3p2_hdv1r(p,i+(dim==0)+1,j+(dim==1)+1,k,tmpnui)] - wtemp2[encode3p2_hdv1r(p,i+1,j+1,k,tmpnui)] ));
#endif
}
//__syncthreads();
}
__global__ void hyperdifvisc1ar_parallel(struct params *p,real *wmod,
real *wd, int order, real *wtemp, real *wtemp1, real *wtemp2, int field, int dim)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
const int blockdim=blockDim.x;
const int SZWT=blockdim;
const int SZWM=blockdim*NVAR;
int tid=threadIdx.x;
int i,j,iv;
int is,js;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dt=p->dt;
real dy=p->dx[1];
real dx=p->dx[0];
real maxt=0,max3=0, max1=0;
int ip,jp;
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
//int ip,jp,ipg,jpg;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
int shift=order*NVAR*dimp;
__shared__ real wts[512];
__shared__ real wms[512];
ii[0]=ip;
ii[1]=jp;
i=ii[0];
j=ii[1];
k=0;
#ifdef USE_SAC_3D
ii[2]=kp;
k=ii[2];
#endif
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]<p->n[1] && ii[2]<p->n[2])
#else
if(ii[0]<p->n[0] && ii[1]<p->n[1])
#endif
//set viscosities
//if( i<((p->n[0])) && j<((p->n[1])))
{
#ifdef USE_SAC_3D
wtemp2[encode3p2_hdv1r(p,i+1,j+1,k+1,tmpnui)]=wtemp[encode3_hdv1r(p,i,j,k,tmp6)];
#else
wtemp2[encode3p2_hdv1r(p,i+1,j+1,0,tmpnui)]=wtemp[encode3_hdv1r(p,i,j,0,tmp6)];
#endif
}
//__syncthreads();
}
__global__ void hyperdifvisc1bcr_parallel(struct params *p,real *wmod,
real *wd, int order, real *wtemp, real *wtemp1, real *wtemp2, int field, int dim)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
const int blockdim=blockDim.x;
const int SZWT=blockdim;
const int SZWM=blockdim*NVAR;
int tid=threadIdx.x;
int i,j,iv;
int is,js;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dt=p->dt;
real dy=p->dx[1];
real dx=p->dx[0];
real maxt=0,max3=0, max1=0;
int ip,jp;
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
//int ip,jp,ipg,jpg;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
int shift=order*NVAR*dimp;
__shared__ real wts[512];
__shared__ real wms[512];
ii[0]=ip;
ii[1]=jp;
i=ii[0];
j=ii[1];
k=0;
#ifdef USE_SAC_3D
ii[2]=kp;
k=ii[2];
#endif
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]<(p->n[1]) && ii[2]<(p->n[2]))
#else
if(ii[0]<p->n[0] && ii[1]<p->n[1])
#endif
//set viscosities
//if(i<((p->n[0])) && j<((p->n[1])))
{
bc_hyperdifr(wtemp2, p,ii, tmpnui,dim);
}
//__syncthreads();
}
__global__ void hyperdifvisc1r_parallel(struct params *p,real *wmod,
real *wd, int order, real *wtemp, real *wtemp1, real *wtemp2, int field, int dim)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
const int blockdim=blockDim.x;
const int SZWT=blockdim;
const int SZWM=blockdim*NVAR;
int tid=threadIdx.x;
real maxt=0,max3=0, max1=0;
int i,j,iv;
int is,js;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dt=p->dt;
real dy=p->dx[1];
real dx=p->dx[0];
int ip,jp;
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
//int ip,jp,ipg,jpg;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
int bfac1,bfac2,bfac3;
//int bfac1=(field==rho || field>mom2)+(field>rho && field<energy);
//int bfac2= (field==rho || field>mom2);
//int bfac3=(field>rho && field<energy);
int shift=order*NVAR*dimp;
//__shared__ real wts[512];
//__shared__ real wms[512];
//init temp1 and temp2 to zero
//the compute element initialising n[0] or n[1] element must do +1 and +2
//this is because we fit the problem geometrically to nixnj elements
ii[0]=ip;
ii[1]=jp;
i=ii[0];
j=ii[1];
k=0;
#ifdef USE_SAC_3D
ii[2]=kp;
k=ii[2];
#endif
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]<p->n[1] && ii[2]<p->n[2])
#else
if(ii[0]<p->n[0] && ii[1]<p->n[1])
#endif
//set viscosities
//if(i<((p->n[0])) && j<((p->n[1])))
{
for(int f=tmp1; f<=tmp8; f++)
wtemp[fencode3_hdv1r(p,ii,f)]=0;
for(int f=d1; f<=d3; f++)
#ifdef USE_SAC_3D
wtemp1[encode3p1_hdv1r(p,ii[0],ii[1],ii[2],f)]=0;
wtemp2[encode3p2_hdv1r(p,ii[0],ii[1],ii[2],tmpnui)]=0;
#else
wtemp1[encode3p1_hdv1r(p,ii[0],ii[1],k,f)]=0;
wtemp2[encode3p2_hdv1r(p,ii[0],ii[1],k,tmpnui)]=0;
#endif
if(i==((p->n[0])-1))
{
for(int f=d1; f<=d3; f++)
wtemp1[encode3p1_hdv1r(p,ii[0]+1,ii[1],k,f)]=0;
wtemp2[encode3p2_hdv1r(p,i+1,j,k,tmpnui)]=0;
wtemp2[encode3p2_hdv1r(p,i+2,j,k,tmpnui)]=0;
}
if(j==((p->n[1])-1))
{
for(int f=d1; f<=d3; f++)
wtemp1[encode3p1_hdv1r(p,i,j+1,k,f)]=0;
wtemp2[encode3p2_hdv1r(p,i,j+1,k,tmpnui)]=0;
wtemp2[encode3p2_hdv1r(p,i,j+2,k,tmpnui)]=0;
}
#ifdef USE_SAC_3D
if(k==((p->n[2])-1))
{
for(int f=d1; f<=d3; f++)
wtemp1[encode3p1_hdv1r(p,i,j,k+1,f)]=0;
wtemp2[encode3p2_hdv1r(p,i,j,k+1,tmpnui)]=0;
wtemp2[encode3p2_hdv1r(p,i,j,k+2,tmpnui)]=0;
}
#endif
if(j==((p->n[1])-1) && i==((p->n[0])-1))
{
for(int f=d1; f<=d3; f++)
wtemp1[encode3p1_hdv1r(p,i+1,j+1,k,f)]=0;
for(int di=0; di<2; di++)
for(int dj=0; dj<2; dj++)
wtemp2[encode3p2_hdv1r(p,i+1+di,j+1+dj,k,tmpnui)]=0;
}
#ifdef USE_SAC_3D
if(i==((p->n[0])-1) && k==((p->n[2])-1))
{
for(int f=d1; f<=d3; f++)
wtemp1[encode3p1_hdv1r(p,i+1,j,k+1,f)]=0;
for(int di=0; di<2; di++)
for(int dk=0; dk<2; dk++)
wtemp2[encode3p2_hdv1r(p,i+1+di,j,k+1+dk,tmpnui)]=0;
}
#endif
#ifdef USE_SAC_3D
if(j==((p->n[1])-1) && k==((p->n[2])-1))
{
for(int f=d1; f<=d3; f++)
wtemp1[encode3p1_hdv1r(p,i+1,j+1,k,f)]=0;
for(int dk=0; dk<2; dk++)
for(int dj=0; dj<2; dj++)
wtemp2[encode3p2_hdv1r(p,i,j+1+dj,k+1+dk,tmpnui)]=0;
}
#endif
#ifdef USE_SAC_3D
if(i==((p->n[0])-1) && j==((p->n[1])-1) && k==((p->n[2])-1))
{
for(int f=d1; f<=d3; f++)
wtemp1[encode3p1_hdv1r(p,i+1,j+1,k+1,f)]=0;
for(int dk=0; dk<2; dk++)
for(int dj=0; dj<2; dj++)
for(int di=0; di<2; di++)
wtemp2[encode3p2_hdv1r(p,i+1+di,j+1+dj,k+1+dk,tmpnui)]=0;
}
#endif
}
//__syncthreads();
ii[0]=ip;
ii[1]=jp;
i=ii[0];
j=ii[1];
k=0;
#ifdef USE_SAC_3D
ii[2]=kp;
k=ii[2];
#endif
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]<p->n[1] && ii[2]<p->n[2])
#else
if(ii[0]<p->n[0] && ii[1]<p->n[1])
#endif
//set viscosities
//if(i<((p->n[0])) && j<((p->n[1])))
{
//for(iv=0;iv<NVAR;iv++)
// wms[tid+iv*blockdim]=wmod[fencode_hdv1r(p,i,j,iv)+shift];
//wts[tid]=wtemp[fencode_hdv1r(p,i,j,tmp6)];
//temp value for viscosity
//tmp6 tmpnu
#ifdef USE_SAC
if(field==energy)
wtemp[fencode3_hdv1r(p,ii,tmp6)]=wmod[fencode3_hdv1r(p,ii,energy)+shift]-0.5*((wmod[fencode3_hdv1r(p,ii,b1)+shift]*wmod[fencode3_hdv1r(p,ii,b1)+shift]+wmod[fencode3_hdv1r(p,ii,b2)+shift]*wmod[fencode3_hdv1r(p,ii,b2)+shift])+(wmod[fencode3_hdv1r(p,ii,mom1)+shift]*wmod[fencode3_hdv1r(p,ii,mom1)+shift]+wmod[fencode3_hdv1r(p,ii,mom2)+shift]*wmod[fencode3_hdv1r(p,ii,mom2)+shift])/(wmod[fencode3_hdv1r(p,ii,rho)+shift]+wmod[fencode3_hdv1r(p,ii,rhob)+shift] ));
else
{
wtemp[fencode3_hdv1r(p,ii,tmp6)]=wmod[fencode3_hdv1r(p,ii,field)+shift];
if((field ==mom1 || field == mom2))
wtemp[fencode3_hdv1r(p,ii,tmp6)]=wmod[fencode3_hdv1r(p,ii,field)+shift]/(((wmod[fencode3_hdv1r(p,ii,rho)+shift] +wmod[fencode3_hdv1r(p,ii,rhob)+shift])));
}
//comment removed below to test mpi 29/10/2013
wtemp2[encode3_hdv1r(p,i+1,j+1,k,tmpnui)]=wtemp[fencode3_hdv1r(p,ii,tmp6)];
#endif
#ifdef USE_SAC_3D
if(field==energy)
wtemp[fencode3_hdv1r(p,ii,tmp6)]=wmod[fencode3_hdv1r(p,ii,energy)+shift]-0.5*((wmod[fencode3_hdv1r(p,ii,b1)+shift]*wmod[fencode3_hdv1r(p,ii,b1)+shift]+wmod[fencode3_hdv1r(p,ii,b2)+shift]*wmod[fencode3_hdv1r(p,ii,b2)+shift]+wmod[fencode3_hdv1r(p,ii,b3)+shift]*wmod[fencode3_hdv1r(p,ii,b3)+shift])
+(wmod[fencode3_hdv1r(p,ii,mom1)+shift]*wmod[fencode3_hdv1r(p,ii,mom1)+shift]+wmod[fencode3_hdv1r(p,ii,mom2)+shift]*wmod[fencode3_hdv1r(p,ii,mom2)+shift]+wmod[fencode3_hdv1r(p,ii,mom3)+shift]*wmod[fencode3_hdv1r(p,ii,mom3)+shift])/(wmod[fencode3_hdv1r(p,ii,rho)+shift]+wmod[fencode3_hdv1r(p,ii,rhob)+shift] ));
else
{
wtemp[fencode3_hdv1r(p,ii,tmp6)]=wmod[fencode3_hdv1r(p,ii,field)+shift];
if((field ==mom1 || field == mom2 || field == mom3))
wtemp[fencode3_hdv1r(p,ii,tmp6)]=wmod[fencode3_hdv1r(p,ii,field)+shift]/(((wmod[fencode3_hdv1r(p,ii,rho)+shift] +wmod[fencode3_hdv1r(p,ii,rhob)+shift])));
}
//comment removed below to test mpi 29/10/2013
wtemp2[encode3_hdv1r(p,i+1,j+1,k+1,tmpnui)]=wtemp[fencode3_hdv1r(p,ii,tmp6)];
#endif
wd[fencode3_hdv1r(p,ii,hdnur)]=0;
}
//__syncthreads();
}
/////////////////////////////////////
// error checking routine
/////////////////////////////////////
void checkErrors_hdv1r(char *label)
{
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
cudaError_t err;
err = cudaThreadSynchronize();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
err = cudaGetLastError();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
}
int cuhyperdifvisc1r(struct params **p, struct params **d_p, real **d_wmod,real **wd, real **d_wd, int order, real **d_wtemp, real **d_wtemp1, real **d_wtemp2, int field, int dim)
{
int dimp=(((*p)->n[0]))*(((*p)->n[1]));
double *d_cmax;
double maxviscoef;
double *d_bmax;
real fn,fractn,in;
int ndimp;
int i;
////cudaSetDevice(selectedDevice);
int nit=100;
double *h_cmax;
#ifdef USE_SAC_3D
dimp=(((*p)->n[0]))*(((*p)->n[1]))*(((*p)->n[2]));
#endif
int NTPB=512;
int smemSize = NTPB * sizeof(double);
fn=log(dimp)/log(2.0);
fractn=modf(fn,&in);
if(fractn>0)
{
fn+=1;
ndimp=(int)pow(2,fn);
}
else
ndimp=dimp;
// dim3 dimBlock(dimblock, 1);
// dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
int numBlocks = (dimp+numThreadsPerBlock-1) / numThreadsPerBlock;
(*p)->hdmax=0;
cudaMemcpy(*d_p, *p, sizeof(struct params), cudaMemcpyHostToDevice);
hyperdifvisc1r_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p, *d_wmod, *d_wd, order, *d_wtemp,*d_wtemp1,*d_wtemp2, field, dim);
cudaThreadSynchronize();
hyperdifvisc1ar_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p, *d_wmod, *d_wd, order, *d_wtemp,*d_wtemp1,*d_wtemp2, field, dim);
cudaThreadSynchronize();
hyperdifvisc2r_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p, *d_wmod, *d_wd, order, *d_wtemp,*d_wtemp1,*d_wtemp2, field, dim);
cudaThreadSynchronize();
hyperdifvisc3r_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p, *d_wmod, *d_wd, order, *d_wtemp,*d_wtemp1,*d_wtemp2, field, dim);
cudaThreadSynchronize();
hyperdifvisc4r_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p, *d_wmod, *d_wd, order, *d_wtemp,*d_wtemp1,*d_wtemp2, field, dim);
cudaThreadSynchronize();
//compute max hyperviscosity (only used by dt modifier)
if(((*p)->moddton)==1 )
{
// hyperdifvisc5r_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p, *d_wmod, *d_wd, order, *d_wtemp,*d_wtemp1,*d_wtemp2, field, dim);
// cudaThreadSynchronize();
numBlocks = (ndimp+NTPB-1) / NTPB;
h_cmax = (double*)malloc(numBlocks*sizeof(double));
cudaMalloc((void**)&d_cmax, numBlocks*sizeof(double));
cudaMalloc((void**)&d_bmax, numBlocks*sizeof(double));
maxviscoef=(*p)->maxviscoef;
//maxviscoef=SMALLDOUBLE;
zeropadmaxviscr_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p, *d_wmod, *d_wd, order, dim, *d_wtemp,ndimp);
cudaThreadSynchronize();
cudaMemcpy(*wd, *d_wd, NDERV*dimp*sizeof(real), cudaMemcpyDeviceToHost);
cudaMemcpy(*d_wtemp, ((*wd)+(hdnur*dimp)), dimp*sizeof(real), cudaMemcpyHostToDevice);
/*int s=1;
while(((s*=2)<=((ndimp/2)-1)) )
{
myreduction0computemaxviscr_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p, *d_wmod, *d_wd, order, dim, *d_wtemp,ndimp,s);
cudaThreadSynchronize();
}*/
for(i=0;i<numBlocks;i++)
h_cmax[i]=0;
cudaMemcpy(d_bmax, h_cmax, numBlocks*sizeof(double), cudaMemcpyHostToDevice);
newreduction0computemaxviscr_parallel<<<numBlocks,NTPB,smemSize>>>(d_bmax,*d_wtemp,ndimp);
cudaThreadSynchronize();
cudaMemcpy(h_cmax, d_bmax, numBlocks*sizeof(double), cudaMemcpyDeviceToHost);
for( i=0;i<numBlocks;i++)
if(h_cmax[i]>maxviscoef) maxviscoef=h_cmax[i];
if((*p)->maxviscoef<maxviscoef)
(*p)->maxviscoef=maxviscoef;
free(h_cmax);
cudaFree(d_bmax);
cudaFree(d_cmax);
}
cudaMemcpy(*d_p, *p, sizeof(struct params), cudaMemcpyHostToDevice);
//cudaMemcpy(*p, *d_p, sizeof(struct params), cudaMemcpyDeviceToHost);
// printf("field right hdmean hdmax %d %8.8g %8.8g \n",field, (*p)->hdmean, (*p)->hdmax);
}
int cuhyperdifvisc1ir(struct params **p, struct params **d_p, real **d_wmod, real **d_wd, int order, real **d_wtemp, real **d_wtemp1, real **d_wtemp2, int field, int dim)
{
int dimp=(((*p)->n[0]))*(((*p)->n[1]));
#ifdef USE_SAC_3D
dimp=(((*p)->n[0]))*(((*p)->n[1]))*(((*p)->n[2]));
#endif
// dim3 dimBlock(dimblock, 1);
// dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
int numBlocks = (dimp+numThreadsPerBlock-1) / numThreadsPerBlock;
cudaMemcpy(*d_p, *p, sizeof(struct params), cudaMemcpyHostToDevice);
hyperdifvisc1r_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p, *d_wmod, *d_wd, order, *d_wtemp,*d_wtemp1,*d_wtemp2, field, dim);
cudaThreadSynchronize();
cudaMemcpy(*p, *d_p, sizeof(struct params), cudaMemcpyDeviceToHost);
// printf("field right hdmean hdmax %d %8.8g %8.8g \n",field, (*p)->hdmean, (*p)->hdmax);
}
|
68953c108d0dc9d988acb2b9d071a688962a8e9b.hip | // !!! This is a file automatically generated by hipify!!!
#include "stdio.h"
int main( void ) {
hipDeviceProp_t prop;
int count;
hipGetDeviceCount( &count );
for (int i=0; i< count; i++) {
hipGetDeviceProperties( &prop, i );
printf( " --- General Information for device %d ---\n", i );
printf( "Name: %s\n", prop.name );
printf( "Compute capability: %d.%d\n", prop.major, prop.minor );
printf( "Clock rate: %d\n", prop.clockRate );
printf( "Device copy overlap: " );
if (prop.deviceOverlap)
printf( "Enabled\n" );
else
printf( "Disabled\n");
printf( "Kernel execution timeout : " );
if (prop.kernelExecTimeoutEnabled)
printf( "Enabled\n" );
else
printf( "Disabled\n" );
printf( " --- Memory Information for device %d ---\n", i );
printf( "Total global mem: %ld\n", prop.totalGlobalMem );
printf( "Total constant Mem: %ld\n", prop.totalConstMem );
printf( "Max mem pitch: %ld\n", prop.memPitch );
printf( "Texture Alignment: %ld\n", prop.textureAlignment );
printf( " --- MP Information for device %d ---\n", i );
printf( "Multiprocessor count: %d\n",
prop.multiProcessorCount );
printf( "Shared mem per mp: %ld\n", prop.sharedMemPerBlock );
printf( "Registers per mp: %d\n", prop.regsPerBlock );
printf( "Threads in warp: %d\n", prop.warpSize );
printf( "Max threads per block: %d\n",
prop.maxThreadsPerBlock );
printf( "Max thread dimensions: (%d, %d, %d)\n",
prop.maxThreadsDim[0], prop.maxThreadsDim[1],
prop.maxThreadsDim[2] );
printf( "Max grid dimensions: (%d, %d, %d)\n",
prop.maxGridSize[0], prop.maxGridSize[1],
prop.maxGridSize[2] );
printf( "\n" );
}
}
| 68953c108d0dc9d988acb2b9d071a688962a8e9b.cu | #include "stdio.h"
int main( void ) {
cudaDeviceProp prop;
int count;
cudaGetDeviceCount( &count );
for (int i=0; i< count; i++) {
cudaGetDeviceProperties( &prop, i );
printf( " --- General Information for device %d ---\n", i );
printf( "Name: %s\n", prop.name );
printf( "Compute capability: %d.%d\n", prop.major, prop.minor );
printf( "Clock rate: %d\n", prop.clockRate );
printf( "Device copy overlap: " );
if (prop.deviceOverlap)
printf( "Enabled\n" );
else
printf( "Disabled\n");
printf( "Kernel execution timeout : " );
if (prop.kernelExecTimeoutEnabled)
printf( "Enabled\n" );
else
printf( "Disabled\n" );
printf( " --- Memory Information for device %d ---\n", i );
printf( "Total global mem: %ld\n", prop.totalGlobalMem );
printf( "Total constant Mem: %ld\n", prop.totalConstMem );
printf( "Max mem pitch: %ld\n", prop.memPitch );
printf( "Texture Alignment: %ld\n", prop.textureAlignment );
printf( " --- MP Information for device %d ---\n", i );
printf( "Multiprocessor count: %d\n",
prop.multiProcessorCount );
printf( "Shared mem per mp: %ld\n", prop.sharedMemPerBlock );
printf( "Registers per mp: %d\n", prop.regsPerBlock );
printf( "Threads in warp: %d\n", prop.warpSize );
printf( "Max threads per block: %d\n",
prop.maxThreadsPerBlock );
printf( "Max thread dimensions: (%d, %d, %d)\n",
prop.maxThreadsDim[0], prop.maxThreadsDim[1],
prop.maxThreadsDim[2] );
printf( "Max grid dimensions: (%d, %d, %d)\n",
prop.maxGridSize[0], prop.maxGridSize[1],
prop.maxGridSize[2] );
printf( "\n" );
}
}
|
e27fc4a3c93a071b364737c68a6fca57d91e0301.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../../common/core/Logger.h"
#include "DisparityEstimator.h"
#include "DisparityEstimatorKernels.h"
// static class members
// const float DisparityEstimator::tau_ = 1.0f / sqrtf(12.0f);
// const float DisparityEstimator::sigma_ = 1.0f / sqrtf(12.0f);
const float DisparityEstimator::tau_ = 1.0f / 6.0f;
const float DisparityEstimator::sigma_ = 1.0f / 2.0f;
////////////////////////////////////////////////////////////////////////////////
DisparityEstimator::DisparityEstimator() :
d_(8),
mu_(50.0f),
alpha_(5.0f),
theta_(0.5f),
regularizer_("huber")
{
Logger logger("DisparityEstimator::DisparityEstimator");
// init pointers
h_img1_ = 0;
h_img2_ = 0;
h_v_ = 0;
h_disp_ = 0;
d_img1_ = 0;
d_img2_ = 0;
d_v_ = 0;
d_vBar_ = 0;
d_vGrad_ = 0;
d_Phi_ = 0;
d_divPhi_ = 0;
d_g_ = 0;
d_disp_ = 0;
}
////////////////////////////////////////////////////////////////////////////////
void DisparityEstimator::setImagePair(const cv::Mat& img1, const cv::Mat& img2)
{
Logger logger("DisparityEstimator::setImagePair");
img1_ = img1;
img2_ = img2;
}
////////////////////////////////////////////////////////////////////////////////
void DisparityEstimator::setParameters(int argc, char* argv[])
{
Logger logger("DisparityEstimator::setParameters");
getParam("d", d_, argc, argv);
getParam("mu", mu_, argc, argv);
getParam("alpha", alpha_, argc, argv);
getParam("theta", theta_, argc, argv);
getParam("reg", regularizer_, argc, argv);
setRegularizer(regularizer_);
logger << "d: " << d_; logger.eol();
logger << "mu: " << mu_; logger.eol();
logger << "alpha: " << alpha_; logger.eol();
logger << "theta: " << theta_; logger.eol();
logger << "tau: " << tau_; logger.eol();
logger << "sigma: " << sigma_; logger.eol();
logger << "reg: " << regularizer_; logger.eol();
}
////////////////////////////////////////////////////////////////////////////////
const int DisparityEstimator::getRegularizerId()
{
if (regularizer_ == "huber") {
return REG_HUBER;
} else if (regularizer_ == "tv") {
return REG_TV;
} else if (regularizer_ == "quadratic") {
return REG_QUADRATIC;
}
return -1;
}
////////////////////////////////////////////////////////////////////////////////
void DisparityEstimator::setRegularizer(const string& regularizer)
{
Logger logger("DisparityEstimator::setRegularizer");
regularizer_ = regularizer;
if (getRegularizerId() < 0) {
logger << "unsupported regularizer " << regularizer_; logger.eol();
logger.pop(false);
}
}
////////////////////////////////////////////////////////////////////////////////
void DisparityEstimator::setParameters()
{
Logger logger("DisparityEstimator::setDimensions");
// image properties
w_ = img1_.cols;
h_ = img1_.rows;
nc_ = img1_.channels();
// unit sizes
size2_ = w_ * h_;
size2v_ = w_ * h_ * nc_;
size3_ = w_ * h_ * d_;
size3v_ = w_ * h_ * d_ * 3;
// memory sizes
bytes2i_ = size2_ * sizeof(int);
bytes2fv_ = size2v_ * sizeof(float);
bytes3f_ = size3_ * sizeof(float);
bytes3fv_ = size3v_ * sizeof(float);
// kernel
dim_ = dim3(w_, h_, d_);
#ifdef ZORAH
block_ = dim3(128 / d_, 1, d_);
#else
block_ = dim3(32, 16, 1);
#endif
grid_.x = (w_ + block_.x - 1) / block_.x;
grid_.y = (h_ + block_.y - 1) / block_.y;
#ifdef ZORAH
grid_.z = 1;
#else
grid_.z = (d_ + block_.z - 1) / block_.z;
#endif
// print parameters
logger << "disparity size: (" << w_ << " x " << h_ << ")";
logger << " [" << bytes2i_ / 1000000.0f << " mb]"; logger.eol();
logger << "image size: (" << w_ << " x " << h_ << " x " << nc_ << ")";
logger << " [" << bytes2fv_ / 1000000.0f << " mb]"; logger.eol();
logger << "volume size: (" << w_ << " x " << h_ << " x " << d_ << ")";
logger << " [" << bytes3f_ / 1000000.0f << " mb]"; logger.eol();
logger << "flux size: (" << w_ << " x " << h_ << " x ";
logger << d_ << " x " << "3)";
logger << " [" << bytes3fv_ / 1000000.0f << " mb]"; logger.eol();
logger << "block: " << block_; logger.eol();
logger << "grid: " << grid_; logger.eol();
// also, set device parameters
float tau = tau_;
float sigma = sigma_;
setConstantMemory(
&w_, &h_, &d_,
&size2_, &size3_,
&tau, &sigma
);
}
////////////////////////////////////////////////////////////////////////////////
void DisparityEstimator::allocateHostMemory()
{
Logger logger("DisparityEstimator::allocateHostMemory");
h_img1_ = new float[size2v_];
h_img2_ = new float[size2v_];
h_v_ = new float[size3_];
h_disp_ = new int[size2_];
memset(h_img1_, 0, bytes2fv_);
memset(h_img2_, 0, bytes2fv_);
memset(h_v_, 0, bytes3f_);
memset(h_disp_, 0, bytes2i_);
}
////////////////////////////////////////////////////////////////////////////////
void DisparityEstimator::allocateDeviceMemory()
{
Logger logger("DisparityEstimator::allocateDeviceMemory");
// alloc input images
hipMalloc(&d_img1_, bytes2fv_); CUDA_CHECK;
hipMalloc(&d_img2_, bytes2fv_); CUDA_CHECK;
hipMemset(d_img1_, 0, bytes2fv_); CUDA_CHECK;
hipMemset(d_img2_, 0, bytes2fv_); CUDA_CHECK;
// alloc algorithm variables
hipMalloc(&d_v_, bytes3f_); CUDA_CHECK;
hipMalloc(&d_vBar_, bytes3f_); CUDA_CHECK;
hipMalloc(&d_vGrad_, bytes3fv_); CUDA_CHECK;
hipMalloc(&d_Phi_, bytes3fv_); CUDA_CHECK;
hipMalloc(&d_divPhi_, bytes3f_); CUDA_CHECK;
hipMalloc(&d_g_, bytes3f_); CUDA_CHECK;
hipMalloc(&d_disp_, bytes2i_); CUDA_CHECK;
hipMemset(d_v_, 0, bytes3f_); CUDA_CHECK;
hipMemset(d_vBar_, 0, bytes3f_); CUDA_CHECK;
hipMemset(d_vGrad_, 0, bytes3fv_); CUDA_CHECK;
hipMemset(d_Phi_, 0, bytes3fv_); CUDA_CHECK;
hipMemset(d_divPhi_, 0, bytes3f_); CUDA_CHECK;
hipMemset(d_g_, 0, bytes3f_); CUDA_CHECK;
hipMemset(d_disp_, 0, bytes2i_); CUDA_CHECK;
}
////////////////////////////////////////////////////////////////////////////////
void DisparityEstimator::copyImagePairHostToDevice()
{
Logger logger("DisparityEstimator::copyImagePairHostToDevice");
cv::Mat img1, img2;
img1_.convertTo(img1, CV_32F);
img2_.convertTo(img2, CV_32F);
img1 /= 255.0f;
img2 /= 255.0f;
convert_mat_to_layered(h_img1_, img1);
convert_mat_to_layered(h_img2_, img2);
hipMemcpy(d_img1_, h_img1_, bytes2fv_, hipMemcpyHostToDevice); CUDA_CHECK;
hipMemcpy(d_img2_, h_img2_, bytes2fv_, hipMemcpyHostToDevice); CUDA_CHECK;
}
////////////////////////////////////////////////////////////////////////////////
void DisparityEstimator::initialize()
{
Logger logger("DisparityEstimator::initialize");
// compute missing parameters
setParameters();
if (img1_.empty() || img2_.empty()) {
logger.pop("invalid image asigned");
}
// allocate memory
freeAllMemory();
allocateHostMemory();
allocateDeviceMemory();
// initialize gpu variables
copyImagePairHostToDevice();
hipLaunchKernelGGL(( computeDataTerm), dim3(grid_), dim3(block_), 0, 0, d_img1_, d_img2_, d_g_, mu_, nc_);
CUDA_CHECK;
hipLaunchKernelGGL(( initialise_v), dim3(grid_), dim3(block_), 0, 0, d_v_, d_vBar_);
CUDA_CHECK;
hipDeviceSynchronize();
}
////////////////////////////////////////////////////////////////////////////////
void DisparityEstimator::update()
{
Logger logger("DisparityEstimator::update");
// primal-dual update: Phi
hipLaunchKernelGGL(( update_phi), dim3(grid_), dim3(block_), 0, 0,
d_g_, d_Phi_, d_vBar_, d_vGrad_,
alpha_, getRegularizerId());
CUDA_CHECK;
hipDeviceSynchronize();
// primal-dual update: v, v_bar
hipLaunchKernelGGL(( update_v_vBar), dim3(grid_), dim3(block_), 0, 0, d_v_, d_vBar_, d_Phi_, d_divPhi_);
CUDA_CHECK;
hipDeviceSynchronize();
}
////////////////////////////////////////////////////////////////////////////////
void DisparityEstimator::showHostInputImages(const string& windowName)
{
cv::Mat img12;
cv::hconcat(img1_, img2_, img12);
cv::imshow(windowName, img12);
}
////////////////////////////////////////////////////////////////////////////////
void DisparityEstimator::showDeviceDisparity(const string& windowName) {
cv::Mat disp;
getDeviceDisparity(disp);
cv::imshow(windowName, disp);
}
////////////////////////////////////////////////////////////////////////////////
const void DisparityEstimator::getDeviceDisparity(cv::Mat& disparity)
{
// reduce volume to disparity map
hipMemset(d_disp_, 0, bytes2i_); CUDA_CHECK;
hipDeviceSynchronize(); CUDA_CHECK;
hipLaunchKernelGGL(( getDepthMap), dim3(grid_), dim3(block_), 0, 0, d_v_, d_disp_, theta_); CUDA_CHECK;
// copy to host float array
hipMemcpy(h_disp_, d_disp_, bytes2i_, hipMemcpyDeviceToHost);
CUDA_CHECK;
cv::Mat disp = cv::Mat(h_, w_, CV_32S, h_disp_);
// normalize to [0, 1]
disp.convertTo(disparity, CV_32F);
disparity /= d_;
// normalize to [min, max]
// double min, max;
// cv::minMaxIdx(disp32f, &min, &max);
// disp32f -= min;
// disp32f /= (max - min);
// disp32f = 1.0f - disp32f;
disparity *= 255.0f;
disparity.convertTo(disparity, CV_8U);
}
////////////////////////////////////////////////////////////////////////////////
void DisparityEstimator::freeAllMemory()
{
Logger logger("DisparityEstimator::freeAllMemory");
// free vram
if (d_img1_) hipFree(d_img1_); CUDA_CHECK;
if (d_img2_) hipFree(d_img2_); CUDA_CHECK;
if (d_v_) hipFree(d_v_); CUDA_CHECK;
if (d_vBar_) hipFree(d_vBar_); CUDA_CHECK;
if (d_vGrad_) hipFree(d_vGrad_); CUDA_CHECK;
if (d_Phi_) hipFree(d_Phi_); CUDA_CHECK;
if (d_divPhi_) hipFree(d_divPhi_); CUDA_CHECK;
if (d_g_) hipFree(d_g_); CUDA_CHECK;
if (d_disp_) hipFree(d_disp_); CUDA_CHECK;
// free ram
if (h_img1_) delete[] h_img1_;
if (h_img2_) delete[] h_img2_;
if (h_v_) delete[] h_v_;
if (h_disp_) delete[] h_disp_;
// invalidate pointers
d_img1_ = 0;
d_img2_ = 0;
d_v_ = 0;
d_vBar_ = 0;
d_vGrad_ = 0;
d_Phi_ = 0;
d_divPhi_ = 0;
d_g_ = 0;
d_disp_ = 0;
h_img1_ = 0;
h_img2_ = 0;
h_v_ = 0;
h_disp_ = 0;
}
////////////////////////////////////////////////////////////////////////////////
DisparityEstimator::~DisparityEstimator()
{
Logger logger("DisparityEstimator::~DisparityEstimator");
freeAllMemory();
}
| e27fc4a3c93a071b364737c68a6fca57d91e0301.cu | #include "../../common/core/Logger.h"
#include "DisparityEstimator.h"
#include "DisparityEstimatorKernels.h"
// static class members
// const float DisparityEstimator::tau_ = 1.0f / sqrtf(12.0f);
// const float DisparityEstimator::sigma_ = 1.0f / sqrtf(12.0f);
const float DisparityEstimator::tau_ = 1.0f / 6.0f;
const float DisparityEstimator::sigma_ = 1.0f / 2.0f;
////////////////////////////////////////////////////////////////////////////////
DisparityEstimator::DisparityEstimator() :
d_(8),
mu_(50.0f),
alpha_(5.0f),
theta_(0.5f),
regularizer_("huber")
{
Logger logger("DisparityEstimator::DisparityEstimator");
// init pointers
h_img1_ = 0;
h_img2_ = 0;
h_v_ = 0;
h_disp_ = 0;
d_img1_ = 0;
d_img2_ = 0;
d_v_ = 0;
d_vBar_ = 0;
d_vGrad_ = 0;
d_Phi_ = 0;
d_divPhi_ = 0;
d_g_ = 0;
d_disp_ = 0;
}
////////////////////////////////////////////////////////////////////////////////
void DisparityEstimator::setImagePair(const cv::Mat& img1, const cv::Mat& img2)
{
Logger logger("DisparityEstimator::setImagePair");
img1_ = img1;
img2_ = img2;
}
////////////////////////////////////////////////////////////////////////////////
void DisparityEstimator::setParameters(int argc, char* argv[])
{
Logger logger("DisparityEstimator::setParameters");
getParam("d", d_, argc, argv);
getParam("mu", mu_, argc, argv);
getParam("alpha", alpha_, argc, argv);
getParam("theta", theta_, argc, argv);
getParam("reg", regularizer_, argc, argv);
setRegularizer(regularizer_);
logger << "d: " << d_; logger.eol();
logger << "mu: " << mu_; logger.eol();
logger << "alpha: " << alpha_; logger.eol();
logger << "theta: " << theta_; logger.eol();
logger << "tau: " << tau_; logger.eol();
logger << "sigma: " << sigma_; logger.eol();
logger << "reg: " << regularizer_; logger.eol();
}
////////////////////////////////////////////////////////////////////////////////
const int DisparityEstimator::getRegularizerId()
{
if (regularizer_ == "huber") {
return REG_HUBER;
} else if (regularizer_ == "tv") {
return REG_TV;
} else if (regularizer_ == "quadratic") {
return REG_QUADRATIC;
}
return -1;
}
////////////////////////////////////////////////////////////////////////////////
void DisparityEstimator::setRegularizer(const string& regularizer)
{
Logger logger("DisparityEstimator::setRegularizer");
regularizer_ = regularizer;
if (getRegularizerId() < 0) {
logger << "unsupported regularizer " << regularizer_; logger.eol();
logger.pop(false);
}
}
////////////////////////////////////////////////////////////////////////////////
void DisparityEstimator::setParameters()
{
Logger logger("DisparityEstimator::setDimensions");
// image properties
w_ = img1_.cols;
h_ = img1_.rows;
nc_ = img1_.channels();
// unit sizes
size2_ = w_ * h_;
size2v_ = w_ * h_ * nc_;
size3_ = w_ * h_ * d_;
size3v_ = w_ * h_ * d_ * 3;
// memory sizes
bytes2i_ = size2_ * sizeof(int);
bytes2fv_ = size2v_ * sizeof(float);
bytes3f_ = size3_ * sizeof(float);
bytes3fv_ = size3v_ * sizeof(float);
// kernel
dim_ = dim3(w_, h_, d_);
#ifdef ZORAH
block_ = dim3(128 / d_, 1, d_);
#else
block_ = dim3(32, 16, 1);
#endif
grid_.x = (w_ + block_.x - 1) / block_.x;
grid_.y = (h_ + block_.y - 1) / block_.y;
#ifdef ZORAH
grid_.z = 1;
#else
grid_.z = (d_ + block_.z - 1) / block_.z;
#endif
// print parameters
logger << "disparity size: (" << w_ << " x " << h_ << ")";
logger << " [" << bytes2i_ / 1000000.0f << " mb]"; logger.eol();
logger << "image size: (" << w_ << " x " << h_ << " x " << nc_ << ")";
logger << " [" << bytes2fv_ / 1000000.0f << " mb]"; logger.eol();
logger << "volume size: (" << w_ << " x " << h_ << " x " << d_ << ")";
logger << " [" << bytes3f_ / 1000000.0f << " mb]"; logger.eol();
logger << "flux size: (" << w_ << " x " << h_ << " x ";
logger << d_ << " x " << "3)";
logger << " [" << bytes3fv_ / 1000000.0f << " mb]"; logger.eol();
logger << "block: " << block_; logger.eol();
logger << "grid: " << grid_; logger.eol();
// also, set device parameters
float tau = tau_;
float sigma = sigma_;
setConstantMemory(
&w_, &h_, &d_,
&size2_, &size3_,
&tau, &sigma
);
}
////////////////////////////////////////////////////////////////////////////////
void DisparityEstimator::allocateHostMemory()
{
Logger logger("DisparityEstimator::allocateHostMemory");
h_img1_ = new float[size2v_];
h_img2_ = new float[size2v_];
h_v_ = new float[size3_];
h_disp_ = new int[size2_];
memset(h_img1_, 0, bytes2fv_);
memset(h_img2_, 0, bytes2fv_);
memset(h_v_, 0, bytes3f_);
memset(h_disp_, 0, bytes2i_);
}
////////////////////////////////////////////////////////////////////////////////
void DisparityEstimator::allocateDeviceMemory()
{
Logger logger("DisparityEstimator::allocateDeviceMemory");
// alloc input images
cudaMalloc(&d_img1_, bytes2fv_); CUDA_CHECK;
cudaMalloc(&d_img2_, bytes2fv_); CUDA_CHECK;
cudaMemset(d_img1_, 0, bytes2fv_); CUDA_CHECK;
cudaMemset(d_img2_, 0, bytes2fv_); CUDA_CHECK;
// alloc algorithm variables
cudaMalloc(&d_v_, bytes3f_); CUDA_CHECK;
cudaMalloc(&d_vBar_, bytes3f_); CUDA_CHECK;
cudaMalloc(&d_vGrad_, bytes3fv_); CUDA_CHECK;
cudaMalloc(&d_Phi_, bytes3fv_); CUDA_CHECK;
cudaMalloc(&d_divPhi_, bytes3f_); CUDA_CHECK;
cudaMalloc(&d_g_, bytes3f_); CUDA_CHECK;
cudaMalloc(&d_disp_, bytes2i_); CUDA_CHECK;
cudaMemset(d_v_, 0, bytes3f_); CUDA_CHECK;
cudaMemset(d_vBar_, 0, bytes3f_); CUDA_CHECK;
cudaMemset(d_vGrad_, 0, bytes3fv_); CUDA_CHECK;
cudaMemset(d_Phi_, 0, bytes3fv_); CUDA_CHECK;
cudaMemset(d_divPhi_, 0, bytes3f_); CUDA_CHECK;
cudaMemset(d_g_, 0, bytes3f_); CUDA_CHECK;
cudaMemset(d_disp_, 0, bytes2i_); CUDA_CHECK;
}
////////////////////////////////////////////////////////////////////////////////
void DisparityEstimator::copyImagePairHostToDevice()
{
Logger logger("DisparityEstimator::copyImagePairHostToDevice");
cv::Mat img1, img2;
img1_.convertTo(img1, CV_32F);
img2_.convertTo(img2, CV_32F);
img1 /= 255.0f;
img2 /= 255.0f;
convert_mat_to_layered(h_img1_, img1);
convert_mat_to_layered(h_img2_, img2);
cudaMemcpy(d_img1_, h_img1_, bytes2fv_, cudaMemcpyHostToDevice); CUDA_CHECK;
cudaMemcpy(d_img2_, h_img2_, bytes2fv_, cudaMemcpyHostToDevice); CUDA_CHECK;
}
////////////////////////////////////////////////////////////////////////////////
void DisparityEstimator::initialize()
{
Logger logger("DisparityEstimator::initialize");
// compute missing parameters
setParameters();
if (img1_.empty() || img2_.empty()) {
logger.pop("invalid image asigned");
}
// allocate memory
freeAllMemory();
allocateHostMemory();
allocateDeviceMemory();
// initialize gpu variables
copyImagePairHostToDevice();
computeDataTerm<<<grid_, block_>>>(d_img1_, d_img2_, d_g_, mu_, nc_);
CUDA_CHECK;
initialise_v<<<grid_, block_>>>(d_v_, d_vBar_);
CUDA_CHECK;
cudaDeviceSynchronize();
}
////////////////////////////////////////////////////////////////////////////////
void DisparityEstimator::update()
{
Logger logger("DisparityEstimator::update");
// primal-dual update: Phi
update_phi<<<grid_, block_>>>(
d_g_, d_Phi_, d_vBar_, d_vGrad_,
alpha_, getRegularizerId());
CUDA_CHECK;
cudaDeviceSynchronize();
// primal-dual update: v, v_bar
update_v_vBar<<<grid_, block_>>>(d_v_, d_vBar_, d_Phi_, d_divPhi_);
CUDA_CHECK;
cudaDeviceSynchronize();
}
////////////////////////////////////////////////////////////////////////////////
void DisparityEstimator::showHostInputImages(const string& windowName)
{
cv::Mat img12;
cv::hconcat(img1_, img2_, img12);
cv::imshow(windowName, img12);
}
////////////////////////////////////////////////////////////////////////////////
void DisparityEstimator::showDeviceDisparity(const string& windowName) {
cv::Mat disp;
getDeviceDisparity(disp);
cv::imshow(windowName, disp);
}
////////////////////////////////////////////////////////////////////////////////
const void DisparityEstimator::getDeviceDisparity(cv::Mat& disparity)
{
// reduce volume to disparity map
cudaMemset(d_disp_, 0, bytes2i_); CUDA_CHECK;
cudaDeviceSynchronize(); CUDA_CHECK;
getDepthMap<<<grid_, block_>>>(d_v_, d_disp_, theta_); CUDA_CHECK;
// copy to host float array
cudaMemcpy(h_disp_, d_disp_, bytes2i_, cudaMemcpyDeviceToHost);
CUDA_CHECK;
cv::Mat disp = cv::Mat(h_, w_, CV_32S, h_disp_);
// normalize to [0, 1]
disp.convertTo(disparity, CV_32F);
disparity /= d_;
// normalize to [min, max]
// double min, max;
// cv::minMaxIdx(disp32f, &min, &max);
// disp32f -= min;
// disp32f /= (max - min);
// disp32f = 1.0f - disp32f;
disparity *= 255.0f;
disparity.convertTo(disparity, CV_8U);
}
////////////////////////////////////////////////////////////////////////////////
void DisparityEstimator::freeAllMemory()
{
Logger logger("DisparityEstimator::freeAllMemory");
// free vram
if (d_img1_) cudaFree(d_img1_); CUDA_CHECK;
if (d_img2_) cudaFree(d_img2_); CUDA_CHECK;
if (d_v_) cudaFree(d_v_); CUDA_CHECK;
if (d_vBar_) cudaFree(d_vBar_); CUDA_CHECK;
if (d_vGrad_) cudaFree(d_vGrad_); CUDA_CHECK;
if (d_Phi_) cudaFree(d_Phi_); CUDA_CHECK;
if (d_divPhi_) cudaFree(d_divPhi_); CUDA_CHECK;
if (d_g_) cudaFree(d_g_); CUDA_CHECK;
if (d_disp_) cudaFree(d_disp_); CUDA_CHECK;
// free ram
if (h_img1_) delete[] h_img1_;
if (h_img2_) delete[] h_img2_;
if (h_v_) delete[] h_v_;
if (h_disp_) delete[] h_disp_;
// invalidate pointers
d_img1_ = 0;
d_img2_ = 0;
d_v_ = 0;
d_vBar_ = 0;
d_vGrad_ = 0;
d_Phi_ = 0;
d_divPhi_ = 0;
d_g_ = 0;
d_disp_ = 0;
h_img1_ = 0;
h_img2_ = 0;
h_v_ = 0;
h_disp_ = 0;
}
////////////////////////////////////////////////////////////////////////////////
DisparityEstimator::~DisparityEstimator()
{
Logger logger("DisparityEstimator::~DisparityEstimator");
freeAllMemory();
}
|
68b2b9893d938fe0eb47958ccaaa03ee4afeda8c.hip | // !!! This is a file automatically generated by hipify!!!
// https://wagonhelm.github.io/articles/2018-03/detecting-cuda-capability-with-cmake
// Justin Francis
#include <stdio.h>
int main(int argc, char **argv){
hipDeviceProp_t dP;
float min_cc = 5.2;
int rc = hipGetDeviceProperties(&dP, 0);
if(rc != hipSuccess) {
hipError_t error = hipGetLastError();
printf("CUDA error: %s\n", hipGetErrorString(error));
return rc; /* Failure */
}
float cc = dP.major + (dP.minor / 10.0);
if(cc < min_cc) {
printf("Minimum Compute Capability of %2.1f required: %2.1f found. Not Building CUDA Code.\n",
min_cc, cc);
return 1; /* Failure */
} else {
printf("sm_%d%d", dP.major, dP.minor);
return 0; /* Success */
}
}
| 68b2b9893d938fe0eb47958ccaaa03ee4afeda8c.cu | // https://wagonhelm.github.io/articles/2018-03/detecting-cuda-capability-with-cmake
// Justin Francis
#include <stdio.h>
int main(int argc, char **argv){
cudaDeviceProp dP;
float min_cc = 5.2;
int rc = cudaGetDeviceProperties(&dP, 0);
if(rc != cudaSuccess) {
cudaError_t error = cudaGetLastError();
printf("CUDA error: %s\n", cudaGetErrorString(error));
return rc; /* Failure */
}
float cc = dP.major + (dP.minor / 10.0);
if(cc < min_cc) {
printf("Minimum Compute Capability of %2.1f required: %2.1f found. Not Building CUDA Code.\n",
min_cc, cc);
return 1; /* Failure */
} else {
printf("sm_%d%d", dP.major, dP.minor);
return 0; /* Success */
}
}
|
f9298d3fbacc6091885b97d1d1b7d8a2b62e4eb5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void vectorMultiplyBy2(float *v, float *w, size_t n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
w[i] = v[i] * 2;
}
int main() {
size_t N = 1024 * 1024 * 1024;
size_t size = N * sizeof(float);
float *a = (float *) malloc(size);
float *b = (float *) malloc(size);
float *b_check = (float *) malloc(size);
for (int i = 0; i < N; i++) {
a[i] = i;
}
for (int i = 0; i < N; i++) {
b_check[i] = a[i] * 2;
}
float *ha;
hipMalloc((void **) &ha, size);
float *hb;
hipMalloc((void **) &hb, size);
hipMemcpy(ha, a, size, hipMemcpyHostToDevice);
int tInB = 1024;
dim3 threadsInBlock(tInB);
int numberOfBlocks = 32768;
printf("Number of blocks is %d\n", numberOfBlocks);
dim3 nBlocks(numberOfBlocks, 32768);
hipLaunchKernelGGL(( vectorMultiplyBy2), dim3(nBlocks), dim3(threadsInBlock), 0, 0, ha, hb, N);
hipMemcpy(b, hb, size, hipMemcpyDeviceToHost);
int cmp = memcmp(b, b_check, size);
if (cmp == 0) {
printf("Arrays are equal.\n");
} else {
printf("Arrays are not equal.\n");
}
return 0;
}
| f9298d3fbacc6091885b97d1d1b7d8a2b62e4eb5.cu | #include <stdio.h>
__global__ void vectorMultiplyBy2(float *v, float *w, size_t n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
w[i] = v[i] * 2;
}
int main() {
size_t N = 1024 * 1024 * 1024;
size_t size = N * sizeof(float);
float *a = (float *) malloc(size);
float *b = (float *) malloc(size);
float *b_check = (float *) malloc(size);
for (int i = 0; i < N; i++) {
a[i] = i;
}
for (int i = 0; i < N; i++) {
b_check[i] = a[i] * 2;
}
float *ha;
cudaMalloc((void **) &ha, size);
float *hb;
cudaMalloc((void **) &hb, size);
cudaMemcpy(ha, a, size, cudaMemcpyHostToDevice);
int tInB = 1024;
dim3 threadsInBlock(tInB);
int numberOfBlocks = 32768;
printf("Number of blocks is %d\n", numberOfBlocks);
dim3 nBlocks(numberOfBlocks, 32768);
vectorMultiplyBy2<<<nBlocks, threadsInBlock>>>(ha, hb, N);
cudaMemcpy(b, hb, size, cudaMemcpyDeviceToHost);
int cmp = memcmp(b, b_check, size);
if (cmp == 0) {
printf("Arrays are equal.\n");
} else {
printf("Arrays are not equal.\n");
}
return 0;
}
|
6cfcedf0f92ea1ad2cfdf92b8c8f7bea9753a0ba.hip | // !!! This is a file automatically generated by hipify!!!
#include "perlin.cuh"
#ifndef HALF_PRECISION_SUPPORT
__device__ float perlin2d(hipTextureObject_t perm_tex, hipTextureObject_t grad_tex, float2 point, int seed) {
// Calculate 2D integer coordinates and fractional component
float2 i = make_float2(floorf(point.x), floorf(point.y));
float2 f = make_float2(point.x - i.x, point.y - i.y);
// Get weights.
float2 w;
w.x = f.x * f.x * f.x * (f.x * (f.x * 6.0f - 15.0f) + 10.0f);
w.y = f.y * f.y * f.y * (f.y * (f.y * 6.0f - 15.0f) + 10.0f);
float4 w4 = make_float4(1.0f, w.x, w.y, w.x * w.y);
// Get four randomly permutated indices from the noise lattice nearest "point"
// and offset them by the seed.
uchar4 tmp = tex2D<uchar4>(perm_tex, i.x + 0.50f, i.y + 0.50f);
float4 perm = make_float4(tmp.x, tmp.y, tmp.z, tmp.w);
perm = perm + seed;
// Permute the fourst indices again and get the 2D gradient for each of
// the four new coord-seed pairs.
float4 gLeft, gRight;
uchar4 tmp0 = tex2D<uchar4>(grad_tex, perm.x + 0.50f, perm.y + 0.50f);
gLeft = make_float4(tmp0.x, tmp0.y, tmp0.z, tmp0.w);
gLeft = gLeft * 2.0f;
gLeft = gLeft - 1.0f;
uchar4 tmp1 = tex2D<uchar4>(grad_tex, perm.z + 0.50f, perm.w + 0.50f);
gRight = make_float4(tmp1.x, tmp1.y, tmp1.z, tmp1.w);
gRight = gRight * 2.0f;
gRight = gRight - 1.0f;
// Evaluate gradients at four lattice points.
float nLeftTop = dot(make_float2(gLeft.x, gLeft.y), f);
float nRightTop = dot(make_float2(gRight.x, gRight.y), f + make_float2(-1.0f, 0.0f));
float nLeftBottom = dot(make_float2(gLeft.z, gLeft.w), f + make_float2(0.0f, -1.0f));
float nRightBottom = dot(make_float2(gRight.z, gRight.w), f + make_float2(-1.0f, -1.0f));
// Blend gradients.
float4 gradientBlend = make_float4(nLeftTop, nRightTop - nLeftTop, nLeftBottom - nLeftTop,
nLeftTop - nRightTop - nLeftBottom + nRightBottom);
float n = dot(gradientBlend, w4);
// Return value.
return (n * 1.5f) / (2.5f);
//return n * 1.530734f;
}
#else
// TODO: Removed these until its re-implemented. Need to figure out how it works with textures.
#endif // !HALF_PRECISION_SUPPORT
| 6cfcedf0f92ea1ad2cfdf92b8c8f7bea9753a0ba.cu | #include "perlin.cuh"
#ifndef HALF_PRECISION_SUPPORT
__device__ float perlin2d(cudaTextureObject_t perm_tex, cudaTextureObject_t grad_tex, float2 point, int seed) {
// Calculate 2D integer coordinates and fractional component
float2 i = make_float2(floorf(point.x), floorf(point.y));
float2 f = make_float2(point.x - i.x, point.y - i.y);
// Get weights.
float2 w;
w.x = f.x * f.x * f.x * (f.x * (f.x * 6.0f - 15.0f) + 10.0f);
w.y = f.y * f.y * f.y * (f.y * (f.y * 6.0f - 15.0f) + 10.0f);
float4 w4 = make_float4(1.0f, w.x, w.y, w.x * w.y);
// Get four randomly permutated indices from the noise lattice nearest "point"
// and offset them by the seed.
uchar4 tmp = tex2D<uchar4>(perm_tex, i.x + 0.50f, i.y + 0.50f);
float4 perm = make_float4(tmp.x, tmp.y, tmp.z, tmp.w);
perm = perm + seed;
// Permute the fourst indices again and get the 2D gradient for each of
// the four new coord-seed pairs.
float4 gLeft, gRight;
uchar4 tmp0 = tex2D<uchar4>(grad_tex, perm.x + 0.50f, perm.y + 0.50f);
gLeft = make_float4(tmp0.x, tmp0.y, tmp0.z, tmp0.w);
gLeft = gLeft * 2.0f;
gLeft = gLeft - 1.0f;
uchar4 tmp1 = tex2D<uchar4>(grad_tex, perm.z + 0.50f, perm.w + 0.50f);
gRight = make_float4(tmp1.x, tmp1.y, tmp1.z, tmp1.w);
gRight = gRight * 2.0f;
gRight = gRight - 1.0f;
// Evaluate gradients at four lattice points.
float nLeftTop = dot(make_float2(gLeft.x, gLeft.y), f);
float nRightTop = dot(make_float2(gRight.x, gRight.y), f + make_float2(-1.0f, 0.0f));
float nLeftBottom = dot(make_float2(gLeft.z, gLeft.w), f + make_float2(0.0f, -1.0f));
float nRightBottom = dot(make_float2(gRight.z, gRight.w), f + make_float2(-1.0f, -1.0f));
// Blend gradients.
float4 gradientBlend = make_float4(nLeftTop, nRightTop - nLeftTop, nLeftBottom - nLeftTop,
nLeftTop - nRightTop - nLeftBottom + nRightBottom);
float n = dot(gradientBlend, w4);
// Return value.
return (n * 1.5f) / (2.5f);
//return n * 1.530734f;
}
#else
// TODO: Removed these until its re-implemented. Need to figure out how it works with textures.
#endif // !HALF_PRECISION_SUPPORT
|
42745ac66885596e2e22e6773e26283dc37aa870.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
#define FILENAME(line) \
FILENAME_FOR_EXCEPTIONS_CUDA("src/cuda-kernels/awkward_reduce_prod.cu", line)
#include "standard_parallel_algorithms.h"
#include "awkward/kernels.h"
template <typename OUT, typename IN>
__global__ void
awkward_reduce_prod_kernel(OUT* toptr,
const IN* fromptr,
const int64_t* parents,
int64_t lenparents) {
int64_t thread_id = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id < lenparents) {
toptr[parents[thread_id]] *= (OUT)fromptr[thread_id];
}
}
template <typename OUT, typename IN>
ERROR
awkward_reduce_prod(OUT* toptr,
const IN* fromptr,
const int64_t* parents,
int64_t lenparents,
int64_t outlength) {
HANDLE_ERROR(hipMemset(toptr, 1, sizeof(OUT) * outlength));
dim3 blocks_per_grid = blocks(lenparents);
dim3 threads_per_block = threads(lenparents);
hipLaunchKernelGGL(( awkward_reduce_prod_kernel), dim3(blocks_per_grid), dim3(threads_per_block), 0, 0,
toptr, fromptr, parents, lenparents);
return success();
}
ERROR
awkward_reduce_prod_int64_int8_64(int64_t* toptr,
const int8_t* fromptr,
const int64_t* parents,
int64_t lenparents,
int64_t outlength) {
return awkward_reduce_prod<int64_t, int8_t>(
toptr, fromptr, parents, lenparents, outlength);
}
ERROR
awkward_reduce_prod_uint64_uint8_64(uint64_t* toptr,
const uint8_t* fromptr,
const int64_t* parents,
int64_t lenparents,
int64_t outlength) {
return awkward_reduce_prod<uint64_t, uint8_t>(
toptr, fromptr, parents, lenparents, outlength);
}
ERROR
awkward_reduce_prod_int64_int16_64(int64_t* toptr,
const int16_t* fromptr,
const int64_t* parents,
int64_t lenparents,
int64_t outlength) {
return awkward_reduce_prod<int64_t, int16_t>(
toptr, fromptr, parents, lenparents, outlength);
}
ERROR
awkward_reduce_prod_uint64_uint16_64(uint64_t* toptr,
const uint16_t* fromptr,
const int64_t* parents,
int64_t lenparents,
int64_t outlength) {
return awkward_reduce_prod<uint64_t, uint16_t>(
toptr, fromptr, parents, lenparents, outlength);
}
ERROR
awkward_reduce_prod_int64_int32_64(int64_t* toptr,
const int32_t* fromptr,
const int64_t* parents,
int64_t lenparents,
int64_t outlength) {
return awkward_reduce_prod<int64_t, int32_t>(
toptr, fromptr, parents, lenparents, outlength);
}
ERROR
awkward_reduce_prod_uint64_uint32_64(uint64_t* toptr,
const uint32_t* fromptr,
const int64_t* parents,
int64_t lenparents,
int64_t outlength) {
return awkward_reduce_prod<uint64_t, uint32_t>(
toptr, fromptr, parents, lenparents, outlength);
}
ERROR
awkward_reduce_prod_int64_int64_64(int64_t* toptr,
const int64_t* fromptr,
const int64_t* parents,
int64_t lenparents,
int64_t outlength) {
return awkward_reduce_prod<int64_t, int64_t>(
toptr, fromptr, parents, lenparents, outlength);
}
ERROR
awkward_reduce_prod_uint64_uint64_64(uint64_t* toptr,
const uint64_t* fromptr,
const int64_t* parents,
int64_t lenparents,
int64_t outlength) {
return awkward_reduce_prod<uint64_t, uint64_t>(
toptr, fromptr, parents, lenparents, outlength);
}
ERROR
awkward_reduce_prod_float32_float32_64(float* toptr,
const float* fromptr,
const int64_t* parents,
int64_t lenparents,
int64_t outlength) {
return awkward_reduce_prod<float, float>(
toptr, fromptr, parents, lenparents, outlength);
}
ERROR
awkward_reduce_prod_float64_float64_64(double* toptr,
const double* fromptr,
const int64_t* parents,
int64_t lenparents,
int64_t outlength) {
return awkward_reduce_prod<double, double>(
toptr, fromptr, parents, lenparents, outlength);
}
ERROR
awkward_reduce_prod_int32_int8_64(int32_t* toptr,
const int8_t* fromptr,
const int64_t* parents,
int64_t lenparents,
int64_t outlength) {
return awkward_reduce_prod<int32_t, int8_t>(
toptr, fromptr, parents, lenparents, outlength);
}
ERROR
awkward_reduce_prod_uint32_uint8_64(uint32_t* toptr,
const uint8_t* fromptr,
const int64_t* parents,
int64_t lenparents,
int64_t outlength) {
return awkward_reduce_prod<uint32_t, uint8_t>(
toptr, fromptr, parents, lenparents, outlength);
}
ERROR
awkward_reduce_prod_int32_int16_64(int32_t* toptr,
const int16_t* fromptr,
const int64_t* parents,
int64_t lenparents,
int64_t outlength) {
return awkward_reduce_prod<int32_t, int16_t>(
toptr, fromptr, parents, lenparents, outlength);
}
ERROR
awkward_reduce_prod_uint32_uint16_64(uint32_t* toptr,
const uint16_t* fromptr,
const int64_t* parents,
int64_t lenparents,
int64_t outlength) {
return awkward_reduce_prod<uint32_t, uint16_t>(
toptr, fromptr, parents, lenparents, outlength);
}
ERROR
awkward_reduce_prod_int32_int32_64(int32_t* toptr,
const int32_t* fromptr,
const int64_t* parents,
int64_t lenparents,
int64_t outlength) {
return awkward_reduce_prod<int32_t, int32_t>(
toptr, fromptr, parents, lenparents, outlength);
}
ERROR
awkward_reduce_prod_uint32_uint32_64(uint32_t* toptr,
const uint32_t* fromptr,
const int64_t* parents,
int64_t lenparents,
int64_t outlength) {
return awkward_reduce_prod<uint32_t, uint32_t>(
toptr, fromptr, parents, lenparents, outlength);
}
| 42745ac66885596e2e22e6773e26283dc37aa870.cu | // BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
#define FILENAME(line) \
FILENAME_FOR_EXCEPTIONS_CUDA("src/cuda-kernels/awkward_reduce_prod.cu", line)
#include "standard_parallel_algorithms.h"
#include "awkward/kernels.h"
template <typename OUT, typename IN>
__global__ void
awkward_reduce_prod_kernel(OUT* toptr,
const IN* fromptr,
const int64_t* parents,
int64_t lenparents) {
int64_t thread_id = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id < lenparents) {
toptr[parents[thread_id]] *= (OUT)fromptr[thread_id];
}
}
template <typename OUT, typename IN>
ERROR
awkward_reduce_prod(OUT* toptr,
const IN* fromptr,
const int64_t* parents,
int64_t lenparents,
int64_t outlength) {
HANDLE_ERROR(cudaMemset(toptr, 1, sizeof(OUT) * outlength));
dim3 blocks_per_grid = blocks(lenparents);
dim3 threads_per_block = threads(lenparents);
awkward_reduce_prod_kernel<<<blocks_per_grid, threads_per_block>>>(
toptr, fromptr, parents, lenparents);
return success();
}
ERROR
awkward_reduce_prod_int64_int8_64(int64_t* toptr,
const int8_t* fromptr,
const int64_t* parents,
int64_t lenparents,
int64_t outlength) {
return awkward_reduce_prod<int64_t, int8_t>(
toptr, fromptr, parents, lenparents, outlength);
}
ERROR
awkward_reduce_prod_uint64_uint8_64(uint64_t* toptr,
const uint8_t* fromptr,
const int64_t* parents,
int64_t lenparents,
int64_t outlength) {
return awkward_reduce_prod<uint64_t, uint8_t>(
toptr, fromptr, parents, lenparents, outlength);
}
ERROR
awkward_reduce_prod_int64_int16_64(int64_t* toptr,
const int16_t* fromptr,
const int64_t* parents,
int64_t lenparents,
int64_t outlength) {
return awkward_reduce_prod<int64_t, int16_t>(
toptr, fromptr, parents, lenparents, outlength);
}
ERROR
awkward_reduce_prod_uint64_uint16_64(uint64_t* toptr,
const uint16_t* fromptr,
const int64_t* parents,
int64_t lenparents,
int64_t outlength) {
return awkward_reduce_prod<uint64_t, uint16_t>(
toptr, fromptr, parents, lenparents, outlength);
}
ERROR
awkward_reduce_prod_int64_int32_64(int64_t* toptr,
const int32_t* fromptr,
const int64_t* parents,
int64_t lenparents,
int64_t outlength) {
return awkward_reduce_prod<int64_t, int32_t>(
toptr, fromptr, parents, lenparents, outlength);
}
ERROR
awkward_reduce_prod_uint64_uint32_64(uint64_t* toptr,
const uint32_t* fromptr,
const int64_t* parents,
int64_t lenparents,
int64_t outlength) {
return awkward_reduce_prod<uint64_t, uint32_t>(
toptr, fromptr, parents, lenparents, outlength);
}
ERROR
awkward_reduce_prod_int64_int64_64(int64_t* toptr,
const int64_t* fromptr,
const int64_t* parents,
int64_t lenparents,
int64_t outlength) {
return awkward_reduce_prod<int64_t, int64_t>(
toptr, fromptr, parents, lenparents, outlength);
}
ERROR
awkward_reduce_prod_uint64_uint64_64(uint64_t* toptr,
const uint64_t* fromptr,
const int64_t* parents,
int64_t lenparents,
int64_t outlength) {
return awkward_reduce_prod<uint64_t, uint64_t>(
toptr, fromptr, parents, lenparents, outlength);
}
ERROR
awkward_reduce_prod_float32_float32_64(float* toptr,
const float* fromptr,
const int64_t* parents,
int64_t lenparents,
int64_t outlength) {
return awkward_reduce_prod<float, float>(
toptr, fromptr, parents, lenparents, outlength);
}
ERROR
awkward_reduce_prod_float64_float64_64(double* toptr,
const double* fromptr,
const int64_t* parents,
int64_t lenparents,
int64_t outlength) {
return awkward_reduce_prod<double, double>(
toptr, fromptr, parents, lenparents, outlength);
}
ERROR
awkward_reduce_prod_int32_int8_64(int32_t* toptr,
const int8_t* fromptr,
const int64_t* parents,
int64_t lenparents,
int64_t outlength) {
return awkward_reduce_prod<int32_t, int8_t>(
toptr, fromptr, parents, lenparents, outlength);
}
ERROR
awkward_reduce_prod_uint32_uint8_64(uint32_t* toptr,
const uint8_t* fromptr,
const int64_t* parents,
int64_t lenparents,
int64_t outlength) {
return awkward_reduce_prod<uint32_t, uint8_t>(
toptr, fromptr, parents, lenparents, outlength);
}
ERROR
awkward_reduce_prod_int32_int16_64(int32_t* toptr,
const int16_t* fromptr,
const int64_t* parents,
int64_t lenparents,
int64_t outlength) {
return awkward_reduce_prod<int32_t, int16_t>(
toptr, fromptr, parents, lenparents, outlength);
}
ERROR
awkward_reduce_prod_uint32_uint16_64(uint32_t* toptr,
const uint16_t* fromptr,
const int64_t* parents,
int64_t lenparents,
int64_t outlength) {
return awkward_reduce_prod<uint32_t, uint16_t>(
toptr, fromptr, parents, lenparents, outlength);
}
ERROR
awkward_reduce_prod_int32_int32_64(int32_t* toptr,
const int32_t* fromptr,
const int64_t* parents,
int64_t lenparents,
int64_t outlength) {
return awkward_reduce_prod<int32_t, int32_t>(
toptr, fromptr, parents, lenparents, outlength);
}
ERROR
awkward_reduce_prod_uint32_uint32_64(uint32_t* toptr,
const uint32_t* fromptr,
const int64_t* parents,
int64_t lenparents,
int64_t outlength) {
return awkward_reduce_prod<uint32_t, uint32_t>(
toptr, fromptr, parents, lenparents, outlength);
}
|
6445f1e31ee4468a6fe127ade3e1928451ab544b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "kernels.h"
#include <stdlib.h>
#include <math.h>
int comp( const void *a, const void *b)
{
return *((int *)a) - *((int*)b);
}
int compare(const int *a, const int *b, int N)
{
for (int i=0; i<N; ++i) {
if (a[i] != b[i]) {
return 0;
}
}
return 1;
}
int main(int argc, char** argv)
{
int *inp, *out, *d_temp, *d_inp;
int N = 4096;
int M = 32;
for (int ii=0; ii<7; ++ii) {
scanf("%d %d", &N, &M);
int numbytes = sizeof(int)*N;
inp = (int *) malloc(numbytes);
out = (int *) malloc(numbytes);
for (int i=0; i<N; ++i) {
inp[i] = N-i;
}
hipError_t err;
hipMalloc(&d_inp, numbytes);
hipMalloc(&d_temp, numbytes);
hipMemcpy(d_inp, inp, numbytes, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( msort), dim3((int)ceil((float)N/M)), dim3(M), 0, 0, d_inp, d_temp, N);
hipDeviceSynchronize();
/* Print the last error encountered -- helpful for debugging */
err = hipGetLastError();
if (err != hipSuccess)
printf("Error: %s\n", hipGetErrorString(err));
hipMemcpy(out, d_inp, numbytes, hipMemcpyDeviceToHost);
qsort(inp, N, sizeof(int), comp);
printf("%s\n", compare(inp, out, N)? "Success":"Fail");
free(inp);
free(out);
hipFree(d_inp);
hipFree(d_temp);
}
return 0;
}
| 6445f1e31ee4468a6fe127ade3e1928451ab544b.cu | #include <stdio.h>
#include "kernels.h"
#include <stdlib.h>
#include <math.h>
int comp( const void *a, const void *b)
{
return *((int *)a) - *((int*)b);
}
int compare(const int *a, const int *b, int N)
{
for (int i=0; i<N; ++i) {
if (a[i] != b[i]) {
return 0;
}
}
return 1;
}
int main(int argc, char** argv)
{
int *inp, *out, *d_temp, *d_inp;
int N = 4096;
int M = 32;
for (int ii=0; ii<7; ++ii) {
scanf("%d %d", &N, &M);
int numbytes = sizeof(int)*N;
inp = (int *) malloc(numbytes);
out = (int *) malloc(numbytes);
for (int i=0; i<N; ++i) {
inp[i] = N-i;
}
cudaError_t err;
cudaMalloc(&d_inp, numbytes);
cudaMalloc(&d_temp, numbytes);
cudaMemcpy(d_inp, inp, numbytes, cudaMemcpyHostToDevice);
msort<<<(int)ceil((float)N/M), M>>>(d_inp, d_temp, N);
cudaThreadSynchronize();
/* Print the last error encountered -- helpful for debugging */
err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
cudaMemcpy(out, d_inp, numbytes, cudaMemcpyDeviceToHost);
qsort(inp, N, sizeof(int), comp);
printf("%s\n", compare(inp, out, N)? "Success":"Fail");
free(inp);
free(out);
cudaFree(d_inp);
cudaFree(d_temp);
}
return 0;
}
|
78b469dce130db0ffb195325cd1070b495d63d6f.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include <rocblas.h>
#include <cstring>
#include "cutlass_unit_test.h"
#include "tools/util/half.h"
#include "tools/util/host_tensor.h"
#include "tools/util/tensor_view_io.h"
#include "cutlass/gemm/volta884_gemm_traits.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/threadblock_swizzle.h"
#include "cutlass/gemm/linear_scaling.h"
#include "tools/test/unit/gemm/gemm_testbed.h"
#include "tools/test/unit/gemm/run_gemm.h"
#if CUTLASS_ENABLE_TENSOR_CORE_MMA
////////////////////////////////////////////////////////////////////////////////////////////////////
//
// Very small warp sizes
//
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Volta884_f16_s884gemm_64x64x32_32x32x32_nn_swizzle, short_480x280x224_rowMajorSwizzle) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kColumnMajor,
cutlass::Shape<32, 64, 64>,
cutlass::Shape<32, 32, 32>,
float,
float,
float,
2,
cutlass::gemm::LinearScaling<float>,
typename cutlass::gemm::RowMajorBlockSwizzle<1, cutlass::gemm::swizzleDirection::OneDirection>
> GemmTraits;
run_gemm<GemmTraits>(480, 280, 224);
}
TEST(Volta884_f16_s884gemm_64x64x32_32x32x32_nn_swizzle, short_480x280x224_rowMajorSwizzle_groupCol2) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kColumnMajor,
cutlass::Shape<32, 64, 64>,
cutlass::Shape<32, 32, 32>,
float,
float,
float,
2,
cutlass::gemm::LinearScaling<float>,
typename cutlass::gemm::RowMajorBlockSwizzle<2, cutlass::gemm::swizzleDirection::OneDirection>
> GemmTraits;
run_gemm<GemmTraits>(480, 280, 224);
}
TEST(Volta884_f16_s884gemm_64x64x32_32x32x32_nn_swizzle, short_480x280x224_rowMajorSwizzle_Boustrophedon) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kColumnMajor,
cutlass::Shape<32, 64, 64>,
cutlass::Shape<32, 32, 32>,
float,
float,
float,
2,
cutlass::gemm::LinearScaling<float>,
typename cutlass::gemm::RowMajorBlockSwizzle<1, cutlass::gemm::swizzleDirection::Boustrophedon>
> GemmTraits;
run_gemm<GemmTraits>(480, 280, 224);
}
TEST(Volta884_f16_s884gemm_64x64x32_32x32x32_nn_swizzle, short_480x280x224_rowMajorSwizzle_groupCol2_Boustrophedon) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kColumnMajor,
cutlass::Shape<32, 64, 64>,
cutlass::Shape<32, 32, 32>,
float,
float,
float,
2,
cutlass::gemm::LinearScaling<float>,
typename cutlass::gemm::RowMajorBlockSwizzle<2, cutlass::gemm::swizzleDirection::Boustrophedon>
> GemmTraits;
run_gemm<GemmTraits>(480, 280, 224);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Volta884_f16_s884gemm_64x64x32_32x32x32_nn_swizzle, short_480x280x224_columnMajorSwizzle) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kColumnMajor,
cutlass::Shape<32, 64, 64>,
cutlass::Shape<32, 32, 32>,
float,
float,
float,
2,
cutlass::gemm::LinearScaling<float>,
typename cutlass::gemm::ColumnMajorBlockSwizzle<1, cutlass::gemm::swizzleDirection::OneDirection>
> GemmTraits;
run_gemm<GemmTraits>(480, 280, 224);
}
TEST(Volta884_f16_s884gemm_64x64x32_32x32x32_nn_swizzle, short_480x280x224_columnMajorSwizzle_groupCol2) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kColumnMajor,
cutlass::Shape<32, 64, 64>,
cutlass::Shape<32, 32, 32>,
float,
float,
float,
2,
cutlass::gemm::LinearScaling<float>,
typename cutlass::gemm::ColumnMajorBlockSwizzle<2, cutlass::gemm::swizzleDirection::OneDirection>
> GemmTraits;
run_gemm<GemmTraits>(480, 280, 224);
}
TEST(Volta884_f16_s884gemm_64x64x32_32x32x32_nn_swizzle, short_480x280x224_columnMajorSwizzle_Boustrophedon) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kColumnMajor,
cutlass::Shape<32, 64, 64>,
cutlass::Shape<32, 32, 32>,
float,
float,
float,
2,
cutlass::gemm::LinearScaling<float>,
typename cutlass::gemm::ColumnMajorBlockSwizzle<1, cutlass::gemm::swizzleDirection::Boustrophedon>
> GemmTraits;
run_gemm<GemmTraits>(480, 280, 224);
}
TEST(Volta884_f16_s884gemm_64x64x32_32x32x32_nn_swizzle, short_480x280x224_columnMajorSwizzle_groupCol2_Boustrophedon) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kColumnMajor,
cutlass::Shape<32, 64, 64>,
cutlass::Shape<32, 32, 32>,
float,
float,
float,
2,
cutlass::gemm::LinearScaling<float>,
typename cutlass::gemm::ColumnMajorBlockSwizzle<2, cutlass::gemm::swizzleDirection::Boustrophedon>
> GemmTraits;
run_gemm<GemmTraits>(480, 280, 224);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Volta884_f16_s884gemm_64x64x32_32x32x32_tt_swizzle, short_480x280x224_rowMajorSwizzle) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kRowMajor,
cutlass::MatrixLayout::kRowMajor,
cutlass::Shape<32, 64, 64>,
cutlass::Shape<32, 32, 32>,
float,
float,
float,
2,
cutlass::gemm::LinearScaling<float>,
typename cutlass::gemm::RowMajorBlockSwizzle<1, cutlass::gemm::swizzleDirection::OneDirection>
> GemmTraits;
run_gemm<GemmTraits>(480, 280, 224);
}
TEST(Volta884_f16_s884gemm_64x64x32_32x32x32_tt_swizzle, short_480x280x224_rowMajorSwizzle_groupCol2) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kRowMajor,
cutlass::MatrixLayout::kRowMajor,
cutlass::Shape<32, 64, 64>,
cutlass::Shape<32, 32, 32>,
float,
float,
float,
2,
cutlass::gemm::LinearScaling<float>,
typename cutlass::gemm::RowMajorBlockSwizzle<2, cutlass::gemm::swizzleDirection::OneDirection>
> GemmTraits;
run_gemm<GemmTraits>(480, 280, 224);
}
TEST(Volta884_f16_s884gemm_64x64x32_32x32x32_tt_swizzle, short_480x280x224_rowMajorSwizzle_Boustrophedon) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kRowMajor,
cutlass::MatrixLayout::kRowMajor,
cutlass::Shape<32, 64, 64>,
cutlass::Shape<32, 32, 32>,
float,
float,
float,
2,
cutlass::gemm::LinearScaling<float>,
typename cutlass::gemm::RowMajorBlockSwizzle<1, cutlass::gemm::swizzleDirection::Boustrophedon>
> GemmTraits;
run_gemm<GemmTraits>(480, 280, 224);
}
TEST(Volta884_f16_s884gemm_64x64x32_32x32x32_tt_swizzle, short_480x280x224_rowMajorSwizzle_groupCol2_Boustrophedon) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kRowMajor,
cutlass::MatrixLayout::kRowMajor,
cutlass::Shape<32, 64, 64>,
cutlass::Shape<32, 32, 32>,
float,
float,
float,
2,
cutlass::gemm::LinearScaling<float>,
typename cutlass::gemm::RowMajorBlockSwizzle<2, cutlass::gemm::swizzleDirection::Boustrophedon>
> GemmTraits;
run_gemm<GemmTraits>(480, 280, 224);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Volta884_f16_s884gemm_64x64x32_32x32x32_tt_swizzle, short_480x280x224_columnMajorSwizzle) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kRowMajor,
cutlass::MatrixLayout::kRowMajor,
cutlass::Shape<32, 64, 64>,
cutlass::Shape<32, 32, 32>,
float,
float,
float,
2,
cutlass::gemm::LinearScaling<float>,
typename cutlass::gemm::ColumnMajorBlockSwizzle<1, cutlass::gemm::swizzleDirection::OneDirection>
> GemmTraits;
run_gemm<GemmTraits>(480, 280, 224);
}
TEST(Volta884_f16_s884gemm_64x64x32_32x32x32_tt_swizzle, short_480x280x224_columnMajorSwizzle_groupCol2) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kRowMajor,
cutlass::MatrixLayout::kRowMajor,
cutlass::Shape<32, 64, 64>,
cutlass::Shape<32, 32, 32>,
float,
float,
float,
2,
cutlass::gemm::LinearScaling<float>,
typename cutlass::gemm::ColumnMajorBlockSwizzle<2, cutlass::gemm::swizzleDirection::OneDirection>
> GemmTraits;
run_gemm<GemmTraits>(480, 280, 224);
}
TEST(Volta884_f16_s884gemm_64x64x32_32x32x32_tt_swizzle, short_480x280x224_columnMajorSwizzle_Boustrophedon) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kRowMajor,
cutlass::MatrixLayout::kRowMajor,
cutlass::Shape<32, 64, 64>,
cutlass::Shape<32, 32, 32>,
float,
float,
float,
2,
cutlass::gemm::LinearScaling<float>,
typename cutlass::gemm::ColumnMajorBlockSwizzle<1, cutlass::gemm::swizzleDirection::Boustrophedon>
> GemmTraits;
run_gemm<GemmTraits>(480, 280, 224);
}
TEST(Volta884_f16_s884gemm_64x64x32_32x32x32_tt_swizzle, short_480x280x224_columnMajorSwizzle_groupCol2_Boustrophedon) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kRowMajor,
cutlass::MatrixLayout::kRowMajor,
cutlass::Shape<32, 64, 64>,
cutlass::Shape<32, 32, 32>,
float,
float,
float,
2,
cutlass::gemm::LinearScaling<float>,
typename cutlass::gemm::ColumnMajorBlockSwizzle<2, cutlass::gemm::swizzleDirection::Boustrophedon>
> GemmTraits;
run_gemm<GemmTraits>(480, 280, 224);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
//
// FP32 accumulation, FP16 output
//
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Volta884_f16_s884gemm_f16_128x128x32_nn_swizzle, 480x280x224_rowMajorSwizzle) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kColumnMajor,
cutlass::Shape<32, 128, 128>,
cutlass::Shape<32, 64, 64>,
float,
half,
half,
2,
cutlass::gemm::LinearScaling<float>,
typename cutlass::gemm::RowMajorBlockSwizzle<1, cutlass::gemm::swizzleDirection::OneDirection>
> GemmTraits;
run_gemm<GemmTraits>(480, 280, 224);
}
TEST(Volta884_f16_s884gemm_f16_128x128x32_nn_swizzle, 480x280x224_rowMajorSwizzle_groupCol2) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kColumnMajor,
cutlass::Shape<32, 128, 128>,
cutlass::Shape<32, 64, 64>,
float,
half,
half,
2,
cutlass::gemm::LinearScaling<float>,
typename cutlass::gemm::RowMajorBlockSwizzle<2, cutlass::gemm::swizzleDirection::OneDirection>
> GemmTraits;
run_gemm<GemmTraits>(480, 280, 224);
}
TEST(Volta884_f16_s884gemm_f16_128x128x32_nn_swizzle, 480x280x224_rowMajorSwizzle_Boustrophedon) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kColumnMajor,
cutlass::Shape<32, 128, 128>,
cutlass::Shape<32, 64, 64>,
float,
half,
half,
2,
cutlass::gemm::LinearScaling<float>,
typename cutlass::gemm::RowMajorBlockSwizzle<1, cutlass::gemm::swizzleDirection::Boustrophedon>
> GemmTraits;
run_gemm<GemmTraits>(480, 280, 224);
}
TEST(Volta884_f16_s884gemm_f16_128x128x32_nn_swizzle, 480x280x224_rowMajorSwizzle_groupCol2_Boustrophedon) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kColumnMajor,
cutlass::Shape<32, 128, 128>,
cutlass::Shape<32, 64, 64>,
float,
half,
half,
2,
cutlass::gemm::LinearScaling<float>,
typename cutlass::gemm::RowMajorBlockSwizzle<2, cutlass::gemm::swizzleDirection::Boustrophedon>
> GemmTraits;
run_gemm<GemmTraits>(480, 280, 224);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Volta884_f16_s884gemm_f16_128x128x32_nn_swizzle, 480x280x224_columnMajorSwizzle) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kColumnMajor,
cutlass::Shape<32, 128, 128>,
cutlass::Shape<32, 64, 64>,
float,
half,
half,
2,
cutlass::gemm::LinearScaling<float>,
typename cutlass::gemm::ColumnMajorBlockSwizzle<1, cutlass::gemm::swizzleDirection::OneDirection>
> GemmTraits;
run_gemm<GemmTraits>(480, 280, 224);
}
TEST(Volta884_f16_s884gemm_f16_128x128x32_nn_swizzle, 480x280x224_columnMajorSwizzle_groupCol2) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kColumnMajor,
cutlass::Shape<32, 128, 128>,
cutlass::Shape<32, 64, 64>,
float,
half,
half,
2,
cutlass::gemm::LinearScaling<float>,
typename cutlass::gemm::ColumnMajorBlockSwizzle<2, cutlass::gemm::swizzleDirection::OneDirection>
> GemmTraits;
run_gemm<GemmTraits>(480, 280, 224);
}
TEST(Volta884_f16_s884gemm_f16_128x128x32_nn_swizzle, 480x280x224_columnMajorSwizzle_Boustrophedon) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kColumnMajor,
cutlass::Shape<32, 128, 128>,
cutlass::Shape<32, 64, 64>,
float,
half,
half,
2,
cutlass::gemm::LinearScaling<float>,
typename cutlass::gemm::ColumnMajorBlockSwizzle<1, cutlass::gemm::swizzleDirection::Boustrophedon>
> GemmTraits;
run_gemm<GemmTraits>(480, 280, 224);
}
TEST(Volta884_f16_s884gemm_f16_128x128x32_nn_swizzle, 480x280x224_columnMajorSwizzle_groupCol2_Boustrophedon) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kColumnMajor,
cutlass::Shape<32, 128, 128>,
cutlass::Shape<32, 64, 64>,
float,
half,
half,
2,
cutlass::gemm::LinearScaling<float>,
typename cutlass::gemm::ColumnMajorBlockSwizzle<2, cutlass::gemm::swizzleDirection::Boustrophedon>
> GemmTraits;
run_gemm<GemmTraits>(480, 280, 224);
}
#endif // if defined(CUTLASS_ENABLE_TENSOR_CORE_MMA)
| 78b469dce130db0ffb195325cd1070b495d63d6f.cu | /***************************************************************************************************
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include <cublas_v2.h>
#include <cstring>
#include "cutlass_unit_test.h"
#include "tools/util/half.h"
#include "tools/util/host_tensor.h"
#include "tools/util/tensor_view_io.h"
#include "cutlass/gemm/volta884_gemm_traits.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/threadblock_swizzle.h"
#include "cutlass/gemm/linear_scaling.h"
#include "tools/test/unit/gemm/gemm_testbed.h"
#include "tools/test/unit/gemm/run_gemm.h"
#if CUTLASS_ENABLE_TENSOR_CORE_MMA
////////////////////////////////////////////////////////////////////////////////////////////////////
//
// Very small warp sizes
//
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Volta884_f16_s884gemm_64x64x32_32x32x32_nn_swizzle, short_480x280x224_rowMajorSwizzle) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kColumnMajor,
cutlass::Shape<32, 64, 64>,
cutlass::Shape<32, 32, 32>,
float,
float,
float,
2,
cutlass::gemm::LinearScaling<float>,
typename cutlass::gemm::RowMajorBlockSwizzle<1, cutlass::gemm::swizzleDirection::OneDirection>
> GemmTraits;
run_gemm<GemmTraits>(480, 280, 224);
}
TEST(Volta884_f16_s884gemm_64x64x32_32x32x32_nn_swizzle, short_480x280x224_rowMajorSwizzle_groupCol2) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kColumnMajor,
cutlass::Shape<32, 64, 64>,
cutlass::Shape<32, 32, 32>,
float,
float,
float,
2,
cutlass::gemm::LinearScaling<float>,
typename cutlass::gemm::RowMajorBlockSwizzle<2, cutlass::gemm::swizzleDirection::OneDirection>
> GemmTraits;
run_gemm<GemmTraits>(480, 280, 224);
}
TEST(Volta884_f16_s884gemm_64x64x32_32x32x32_nn_swizzle, short_480x280x224_rowMajorSwizzle_Boustrophedon) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kColumnMajor,
cutlass::Shape<32, 64, 64>,
cutlass::Shape<32, 32, 32>,
float,
float,
float,
2,
cutlass::gemm::LinearScaling<float>,
typename cutlass::gemm::RowMajorBlockSwizzle<1, cutlass::gemm::swizzleDirection::Boustrophedon>
> GemmTraits;
run_gemm<GemmTraits>(480, 280, 224);
}
TEST(Volta884_f16_s884gemm_64x64x32_32x32x32_nn_swizzle, short_480x280x224_rowMajorSwizzle_groupCol2_Boustrophedon) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kColumnMajor,
cutlass::Shape<32, 64, 64>,
cutlass::Shape<32, 32, 32>,
float,
float,
float,
2,
cutlass::gemm::LinearScaling<float>,
typename cutlass::gemm::RowMajorBlockSwizzle<2, cutlass::gemm::swizzleDirection::Boustrophedon>
> GemmTraits;
run_gemm<GemmTraits>(480, 280, 224);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Volta884_f16_s884gemm_64x64x32_32x32x32_nn_swizzle, short_480x280x224_columnMajorSwizzle) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kColumnMajor,
cutlass::Shape<32, 64, 64>,
cutlass::Shape<32, 32, 32>,
float,
float,
float,
2,
cutlass::gemm::LinearScaling<float>,
typename cutlass::gemm::ColumnMajorBlockSwizzle<1, cutlass::gemm::swizzleDirection::OneDirection>
> GemmTraits;
run_gemm<GemmTraits>(480, 280, 224);
}
TEST(Volta884_f16_s884gemm_64x64x32_32x32x32_nn_swizzle, short_480x280x224_columnMajorSwizzle_groupCol2) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kColumnMajor,
cutlass::Shape<32, 64, 64>,
cutlass::Shape<32, 32, 32>,
float,
float,
float,
2,
cutlass::gemm::LinearScaling<float>,
typename cutlass::gemm::ColumnMajorBlockSwizzle<2, cutlass::gemm::swizzleDirection::OneDirection>
> GemmTraits;
run_gemm<GemmTraits>(480, 280, 224);
}
TEST(Volta884_f16_s884gemm_64x64x32_32x32x32_nn_swizzle, short_480x280x224_columnMajorSwizzle_Boustrophedon) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kColumnMajor,
cutlass::Shape<32, 64, 64>,
cutlass::Shape<32, 32, 32>,
float,
float,
float,
2,
cutlass::gemm::LinearScaling<float>,
typename cutlass::gemm::ColumnMajorBlockSwizzle<1, cutlass::gemm::swizzleDirection::Boustrophedon>
> GemmTraits;
run_gemm<GemmTraits>(480, 280, 224);
}
TEST(Volta884_f16_s884gemm_64x64x32_32x32x32_nn_swizzle, short_480x280x224_columnMajorSwizzle_groupCol2_Boustrophedon) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kColumnMajor,
cutlass::Shape<32, 64, 64>,
cutlass::Shape<32, 32, 32>,
float,
float,
float,
2,
cutlass::gemm::LinearScaling<float>,
typename cutlass::gemm::ColumnMajorBlockSwizzle<2, cutlass::gemm::swizzleDirection::Boustrophedon>
> GemmTraits;
run_gemm<GemmTraits>(480, 280, 224);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Volta884_f16_s884gemm_64x64x32_32x32x32_tt_swizzle, short_480x280x224_rowMajorSwizzle) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kRowMajor,
cutlass::MatrixLayout::kRowMajor,
cutlass::Shape<32, 64, 64>,
cutlass::Shape<32, 32, 32>,
float,
float,
float,
2,
cutlass::gemm::LinearScaling<float>,
typename cutlass::gemm::RowMajorBlockSwizzle<1, cutlass::gemm::swizzleDirection::OneDirection>
> GemmTraits;
run_gemm<GemmTraits>(480, 280, 224);
}
TEST(Volta884_f16_s884gemm_64x64x32_32x32x32_tt_swizzle, short_480x280x224_rowMajorSwizzle_groupCol2) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kRowMajor,
cutlass::MatrixLayout::kRowMajor,
cutlass::Shape<32, 64, 64>,
cutlass::Shape<32, 32, 32>,
float,
float,
float,
2,
cutlass::gemm::LinearScaling<float>,
typename cutlass::gemm::RowMajorBlockSwizzle<2, cutlass::gemm::swizzleDirection::OneDirection>
> GemmTraits;
run_gemm<GemmTraits>(480, 280, 224);
}
TEST(Volta884_f16_s884gemm_64x64x32_32x32x32_tt_swizzle, short_480x280x224_rowMajorSwizzle_Boustrophedon) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kRowMajor,
cutlass::MatrixLayout::kRowMajor,
cutlass::Shape<32, 64, 64>,
cutlass::Shape<32, 32, 32>,
float,
float,
float,
2,
cutlass::gemm::LinearScaling<float>,
typename cutlass::gemm::RowMajorBlockSwizzle<1, cutlass::gemm::swizzleDirection::Boustrophedon>
> GemmTraits;
run_gemm<GemmTraits>(480, 280, 224);
}
TEST(Volta884_f16_s884gemm_64x64x32_32x32x32_tt_swizzle, short_480x280x224_rowMajorSwizzle_groupCol2_Boustrophedon) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kRowMajor,
cutlass::MatrixLayout::kRowMajor,
cutlass::Shape<32, 64, 64>,
cutlass::Shape<32, 32, 32>,
float,
float,
float,
2,
cutlass::gemm::LinearScaling<float>,
typename cutlass::gemm::RowMajorBlockSwizzle<2, cutlass::gemm::swizzleDirection::Boustrophedon>
> GemmTraits;
run_gemm<GemmTraits>(480, 280, 224);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Volta884_f16_s884gemm_64x64x32_32x32x32_tt_swizzle, short_480x280x224_columnMajorSwizzle) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kRowMajor,
cutlass::MatrixLayout::kRowMajor,
cutlass::Shape<32, 64, 64>,
cutlass::Shape<32, 32, 32>,
float,
float,
float,
2,
cutlass::gemm::LinearScaling<float>,
typename cutlass::gemm::ColumnMajorBlockSwizzle<1, cutlass::gemm::swizzleDirection::OneDirection>
> GemmTraits;
run_gemm<GemmTraits>(480, 280, 224);
}
TEST(Volta884_f16_s884gemm_64x64x32_32x32x32_tt_swizzle, short_480x280x224_columnMajorSwizzle_groupCol2) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kRowMajor,
cutlass::MatrixLayout::kRowMajor,
cutlass::Shape<32, 64, 64>,
cutlass::Shape<32, 32, 32>,
float,
float,
float,
2,
cutlass::gemm::LinearScaling<float>,
typename cutlass::gemm::ColumnMajorBlockSwizzle<2, cutlass::gemm::swizzleDirection::OneDirection>
> GemmTraits;
run_gemm<GemmTraits>(480, 280, 224);
}
TEST(Volta884_f16_s884gemm_64x64x32_32x32x32_tt_swizzle, short_480x280x224_columnMajorSwizzle_Boustrophedon) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kRowMajor,
cutlass::MatrixLayout::kRowMajor,
cutlass::Shape<32, 64, 64>,
cutlass::Shape<32, 32, 32>,
float,
float,
float,
2,
cutlass::gemm::LinearScaling<float>,
typename cutlass::gemm::ColumnMajorBlockSwizzle<1, cutlass::gemm::swizzleDirection::Boustrophedon>
> GemmTraits;
run_gemm<GemmTraits>(480, 280, 224);
}
TEST(Volta884_f16_s884gemm_64x64x32_32x32x32_tt_swizzle, short_480x280x224_columnMajorSwizzle_groupCol2_Boustrophedon) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kRowMajor,
cutlass::MatrixLayout::kRowMajor,
cutlass::Shape<32, 64, 64>,
cutlass::Shape<32, 32, 32>,
float,
float,
float,
2,
cutlass::gemm::LinearScaling<float>,
typename cutlass::gemm::ColumnMajorBlockSwizzle<2, cutlass::gemm::swizzleDirection::Boustrophedon>
> GemmTraits;
run_gemm<GemmTraits>(480, 280, 224);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
//
// FP32 accumulation, FP16 output
//
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Volta884_f16_s884gemm_f16_128x128x32_nn_swizzle, 480x280x224_rowMajorSwizzle) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kColumnMajor,
cutlass::Shape<32, 128, 128>,
cutlass::Shape<32, 64, 64>,
float,
half,
half,
2,
cutlass::gemm::LinearScaling<float>,
typename cutlass::gemm::RowMajorBlockSwizzle<1, cutlass::gemm::swizzleDirection::OneDirection>
> GemmTraits;
run_gemm<GemmTraits>(480, 280, 224);
}
TEST(Volta884_f16_s884gemm_f16_128x128x32_nn_swizzle, 480x280x224_rowMajorSwizzle_groupCol2) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kColumnMajor,
cutlass::Shape<32, 128, 128>,
cutlass::Shape<32, 64, 64>,
float,
half,
half,
2,
cutlass::gemm::LinearScaling<float>,
typename cutlass::gemm::RowMajorBlockSwizzle<2, cutlass::gemm::swizzleDirection::OneDirection>
> GemmTraits;
run_gemm<GemmTraits>(480, 280, 224);
}
TEST(Volta884_f16_s884gemm_f16_128x128x32_nn_swizzle, 480x280x224_rowMajorSwizzle_Boustrophedon) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kColumnMajor,
cutlass::Shape<32, 128, 128>,
cutlass::Shape<32, 64, 64>,
float,
half,
half,
2,
cutlass::gemm::LinearScaling<float>,
typename cutlass::gemm::RowMajorBlockSwizzle<1, cutlass::gemm::swizzleDirection::Boustrophedon>
> GemmTraits;
run_gemm<GemmTraits>(480, 280, 224);
}
TEST(Volta884_f16_s884gemm_f16_128x128x32_nn_swizzle, 480x280x224_rowMajorSwizzle_groupCol2_Boustrophedon) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kColumnMajor,
cutlass::Shape<32, 128, 128>,
cutlass::Shape<32, 64, 64>,
float,
half,
half,
2,
cutlass::gemm::LinearScaling<float>,
typename cutlass::gemm::RowMajorBlockSwizzle<2, cutlass::gemm::swizzleDirection::Boustrophedon>
> GemmTraits;
run_gemm<GemmTraits>(480, 280, 224);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Volta884_f16_s884gemm_f16_128x128x32_nn_swizzle, 480x280x224_columnMajorSwizzle) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kColumnMajor,
cutlass::Shape<32, 128, 128>,
cutlass::Shape<32, 64, 64>,
float,
half,
half,
2,
cutlass::gemm::LinearScaling<float>,
typename cutlass::gemm::ColumnMajorBlockSwizzle<1, cutlass::gemm::swizzleDirection::OneDirection>
> GemmTraits;
run_gemm<GemmTraits>(480, 280, 224);
}
TEST(Volta884_f16_s884gemm_f16_128x128x32_nn_swizzle, 480x280x224_columnMajorSwizzle_groupCol2) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kColumnMajor,
cutlass::Shape<32, 128, 128>,
cutlass::Shape<32, 64, 64>,
float,
half,
half,
2,
cutlass::gemm::LinearScaling<float>,
typename cutlass::gemm::ColumnMajorBlockSwizzle<2, cutlass::gemm::swizzleDirection::OneDirection>
> GemmTraits;
run_gemm<GemmTraits>(480, 280, 224);
}
TEST(Volta884_f16_s884gemm_f16_128x128x32_nn_swizzle, 480x280x224_columnMajorSwizzle_Boustrophedon) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kColumnMajor,
cutlass::Shape<32, 128, 128>,
cutlass::Shape<32, 64, 64>,
float,
half,
half,
2,
cutlass::gemm::LinearScaling<float>,
typename cutlass::gemm::ColumnMajorBlockSwizzle<1, cutlass::gemm::swizzleDirection::Boustrophedon>
> GemmTraits;
run_gemm<GemmTraits>(480, 280, 224);
}
TEST(Volta884_f16_s884gemm_f16_128x128x32_nn_swizzle, 480x280x224_columnMajorSwizzle_groupCol2_Boustrophedon) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kColumnMajor,
cutlass::Shape<32, 128, 128>,
cutlass::Shape<32, 64, 64>,
float,
half,
half,
2,
cutlass::gemm::LinearScaling<float>,
typename cutlass::gemm::ColumnMajorBlockSwizzle<2, cutlass::gemm::swizzleDirection::Boustrophedon>
> GemmTraits;
run_gemm<GemmTraits>(480, 280, 224);
}
#endif // if defined(CUTLASS_ENABLE_TENSOR_CORE_MMA)
|
f45e67889d709eaf09e00dd0bda12e010950e0b7.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright 2013-2018. The Regents of the University of California.
* Copyright 2017-2018. Martin Uecker.
* All rights reserved. Use of this source code is governed by
* a BSD-style license which can be found in the LICENSE file.
*
* Authors:
* 2012-2018 Martin Uecker <[email protected]>
* 2015-2018 Jon Tamir <[email protected]>
*
*
* This file defines basic operations on vectors of floats/complex floats
* for operations on the GPU. See the CPU version (vecops.c) for more
* information.
*/
#include <stdio.h>
#include <stdbool.h>
#include <assert.h>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <hip/hip_complex.h>
#include "num/gpukrnls.h"
#include "num/multind.h"
#if 1
// see Dara's src/calib/calibcu.cu for how to get
// runtime info
// limited by hardware to 1024 on most devices
// should be a multiple of 32 (warp size)
#define BLOCKSIZE 1024
static int blocksize(int N)
{
return BLOCKSIZE;
}
static long gridsize(long N)
{
return (N + BLOCKSIZE - 1) / BLOCKSIZE;
}
#else
// http://stackoverflow.com/questions/5810447/cuda-block-and-grid-size-efficiencies
#define WARPSIZE 32
#define MAXBLOCKS (16 * 8)
// 16 multi processor times 8 blocks
#define MIN(x, y) ((x < y) ? (x) : (y))
#define MAX(x, y) ((x > y) ? (x) : (y))
static int blocksize(int N)
{
int warps_total = (N + WARPSIZE - 1) / WARPSIZE;
int warps_block = MAX(1, MIN(4, warps_total));
return WARPSIZE * warps_block;
}
static long gridsize(long N)
{
int warps_total = (N + WARPSIZE - 1) / WARPSIZE;
int warps_block = MAX(1, MIN(4, warps_total));
return MIN(MAXBLOCKS, MAX(1, warps_total / warps_block));
}
#endif
__global__ void kern_float2double(long N, double* dst, const float* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = src[i];
}
extern "C" void cuda_float2double(long N, double* dst, const float* src)
{
hipLaunchKernelGGL(( kern_float2double), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, dst, src);
}
__global__ void kern_double2float(long N, float* dst, const double* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = src[i];
}
extern "C" void cuda_double2float(long N, float* dst, const double* src)
{
hipLaunchKernelGGL(( kern_double2float), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, dst, src);
}
__global__ void kern_xpay(long N, float beta, float* dst, const float* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = dst[i] * beta + src[i];
}
extern "C" void cuda_xpay(long N, float beta, float* dst, const float* src)
{
hipLaunchKernelGGL(( kern_xpay), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, beta, dst, src);
}
__global__ void kern_axpbz(long N, float* dst, const float a1, const float* src1, const float a2, const float* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = a1 * src1[i] + a2 * src2[i];
}
extern "C" void cuda_axpbz(long N, float* dst, const float a1, const float* src1, const float a2, const float* src2)
{
hipLaunchKernelGGL(( kern_axpbz), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, dst, a1, src1, a2, src2);
}
__global__ void kern_smul(long N, float alpha, float* dst, const float* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = alpha * src[i];
}
extern "C" void cuda_smul(long N, float alpha, float* dst, const float* src)
{
hipLaunchKernelGGL(( kern_smul), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, alpha, dst, src);
}
typedef void (*cuda_3op_f)(long N, float* dst, const float* src1, const float* src2);
extern "C" void cuda_3op(cuda_3op_f krn, int N, float* dst, const float* src1, const float* src2)
{
hipLaunchKernelGGL(( krn), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, dst, src1, src2);
}
__global__ void kern_add(long N, float* dst, const float* src1, const float* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = src1[i] + src2[i];
}
extern "C" void cuda_add(long N, float* dst, const float* src1, const float* src2)
{
cuda_3op(kern_add, N, dst, src1, src2);
}
__global__ void kern_sadd(long N, float val, float* dst, const float* src1)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = src1[i] + val;
}
extern "C" void cuda_sadd(long N, float val, float* dst, const float* src1)
{
hipLaunchKernelGGL(( kern_sadd), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, val, dst, src1);
}
__global__ void kern_zsadd(long N, cuFloatComplex val, cuFloatComplex* dst, const cuFloatComplex* src1)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = cuCaddf(src1[i], val);
}
extern "C" void cuda_zsadd(long N, _Complex float val, _Complex float* dst, const _Complex float* src1)
{
hipLaunchKernelGGL(( kern_zsadd), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, make_cuFloatComplex(__real(val), __imag(val)), (cuFloatComplex*)dst, (const cuFloatComplex*)src1);
}
__global__ void kern_sub(long N, float* dst, const float* src1, const float* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = src1[i] - src2[i];
}
extern "C" void cuda_sub(long N, float* dst, const float* src1, const float* src2)
{
cuda_3op(kern_sub, N, dst, src1, src2);
}
__global__ void kern_mul(long N, float* dst, const float* src1, const float* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = src1[i] * src2[i];
}
extern "C" void cuda_mul(long N, float* dst, const float* src1, const float* src2)
{
cuda_3op(kern_mul, N, dst, src1, src2);
}
__global__ void kern_div(long N, float* dst, const float* src1, const float* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = src1[i] / src2[i];
}
extern "C" void cuda_div(long N, float* dst, const float* src1, const float* src2)
{
cuda_3op(kern_div, N, dst, src1, src2);
}
__global__ void kern_fmac(long N, float* dst, const float* src1, const float* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] += src1[i] * src2[i];
}
extern "C" void cuda_fmac(long N, float* dst, const float* src1, const float* src2)
{
cuda_3op(kern_fmac, N, dst, src1, src2);
}
__global__ void kern_fmac2(long N, double* dst, const float* src1, const float* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] += src1[i] * src2[i];
}
extern "C" void cuda_fmac2(long N, double* dst, const float* src1, const float* src2)
{
hipLaunchKernelGGL(( kern_fmac2), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, dst, src1, src2);
}
__global__ void kern_zsmul(long N, cuFloatComplex val, cuFloatComplex* dst, const cuFloatComplex* src1)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = cuCmulf(src1[i], val);
}
extern "C" void cuda_zsmul(long N, _Complex float alpha, _Complex float* dst, const _Complex float* src1)
{
hipLaunchKernelGGL(( kern_zsmul), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, make_cuFloatComplex(__real(alpha), __imag(alpha)), (cuFloatComplex*)dst, (const cuFloatComplex*)src1);
}
__global__ void kern_zmul(long N, cuFloatComplex* dst, const cuFloatComplex* src1, const cuFloatComplex* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = cuCmulf(src1[i], src2[i]);
}
extern "C" void cuda_zmul(long N, _Complex float* dst, const _Complex float* src1, const _Complex float* src2)
{
hipLaunchKernelGGL(( kern_zmul), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, (cuFloatComplex*)dst, (const cuFloatComplex*)src1, (const cuFloatComplex*)src2);
}
__global__ void kern_zdiv(long N, cuFloatComplex* dst, const cuFloatComplex* src1, const cuFloatComplex* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride) {
float abs = cuCabsf(src2[i]);
dst[i] = (0. == abs) ? make_cuFloatComplex(0., 0.) : cuCdivf(src1[i], src2[i]);
}
}
extern "C" void cuda_zdiv(long N, _Complex float* dst, const _Complex float* src1, const _Complex float* src2)
{
hipLaunchKernelGGL(( kern_zdiv), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, (cuFloatComplex*)dst, (const cuFloatComplex*)src1, (const cuFloatComplex*)src2);
}
__global__ void kern_zfmac(long N, cuFloatComplex* dst, const cuFloatComplex* src1, const cuFloatComplex* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = cuCaddf(dst[i], cuCmulf(src1[i], src2[i]));
}
extern "C" void cuda_zfmac(long N, _Complex float* dst, const _Complex float* src1, const _Complex float* src2)
{
hipLaunchKernelGGL(( kern_zfmac), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, (cuFloatComplex*)dst, (const cuFloatComplex*)src1, (const cuFloatComplex*)src2);
}
__global__ void kern_zfmac2(long N, hipDoubleComplex* dst, const cuFloatComplex* src1, const cuFloatComplex* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = cuCadd(dst[i], cuComplexFloatToDouble(cuCmulf(src1[i], src2[i])));
}
extern "C" void cuda_zfmac2(long N, _Complex double* dst, const _Complex float* src1, const _Complex float* src2)
{
hipLaunchKernelGGL(( kern_zfmac2), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, (hipDoubleComplex*)dst, (const cuFloatComplex*)src1, (const cuFloatComplex*)src2);
}
__global__ void kern_zmulc(long N, cuFloatComplex* dst, const cuFloatComplex* src1, const cuFloatComplex* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = cuCmulf(src1[i], cuConjf(src2[i]));
}
extern "C" void cuda_zmulc(long N, _Complex float* dst, const _Complex float* src1, const _Complex float* src2)
{
hipLaunchKernelGGL(( kern_zmulc), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, (cuFloatComplex*)dst, (const cuFloatComplex*)src1, (const cuFloatComplex*)src2);
}
__global__ void kern_zfmacc(long N, cuFloatComplex* dst, const cuFloatComplex* src1, const cuFloatComplex* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = cuCaddf(dst[i], cuCmulf(src1[i], cuConjf(src2[i])));
}
extern "C" void cuda_zfmacc(long N, _Complex float* dst, const _Complex float* src1, const _Complex float* src2)
{
hipLaunchKernelGGL(( kern_zfmacc), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, (cuFloatComplex*)dst, (const cuFloatComplex*)src1, (const cuFloatComplex*)src2);
}
__global__ void kern_zfmacc2(long N, hipDoubleComplex* dst, const cuFloatComplex* src1, const cuFloatComplex* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = cuCadd(dst[i], cuComplexFloatToDouble(cuCmulf(src1[i], cuConjf(src2[i]))));
}
extern "C" void cuda_zfmacc2(long N, _Complex double* dst, const _Complex float* src1, const _Complex float* src2)
{
hipLaunchKernelGGL(( kern_zfmacc2), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, (hipDoubleComplex*)dst, (const cuFloatComplex*)src1, (const cuFloatComplex*)src2);
}
#define MAX_DIMS 3
struct stride_desc {
long dims[MAX_DIMS];
long ostrs[MAX_DIMS];
long istrs1[MAX_DIMS];
long istrs2[MAX_DIMS];
};
__global__ void kern_zfmac_strides(stride_desc strides, long N, cuFloatComplex* dst, const cuFloatComplex* src1, const cuFloatComplex* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride) {
for (long z = 0; z < strides.dims[2]; z++) {
for (long y = 0; y < strides.dims[1]; y++) {
for (long x = 0; x < strides.dims[0]; x++) {
long o_offset = x * strides.ostrs[0] + y * strides.ostrs[1] + z * strides.ostrs[2];
long i1_offset = x * strides.istrs1[0] + y * strides.istrs1[1] + z * strides.istrs1[2];
long i2_offset = x * strides.istrs2[0] + y * strides.istrs2[1] + z * strides.istrs2[2];
dst[i + o_offset] = cuCaddf(dst[i + o_offset], cuCmulf(src1[i + i1_offset], src2[i + i2_offset]));
}
}
}
}
}
//this version needs to start less kernels
extern "C" void cuda_zfmac_strided(long N, long dims[3], unsigned long oflags, unsigned long iflags1, unsigned long iflags2, _Complex float* dst, const _Complex float* src1, const _Complex float* src2)
{
struct stride_desc s;
md_copy_dims(3, s.dims, dims);
long odims[3];
long idims1[3];
long idims2[3];
md_select_dims(3, oflags, odims, dims);
md_select_dims(3, iflags1, idims1, dims);
md_select_dims(3, iflags2, idims2, dims);
md_calc_strides(3, s.ostrs, odims, N);
md_calc_strides(3, s.istrs1, idims1, N);
md_calc_strides(3, s.istrs2, idims2, N);
hipLaunchKernelGGL(( kern_zfmac_strides), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, s, N, (cuFloatComplex*)dst, (const cuFloatComplex*)src1, (const cuFloatComplex*)src2);
}
__global__ void kern_zfmacc_strides(stride_desc strides, long N, cuFloatComplex* dst, const cuFloatComplex* src1, const cuFloatComplex* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride) {
for (long z = 0; z < strides.dims[2]; z++) {
for (long y = 0; y < strides.dims[1]; y++) {
for (long x = 0; x < strides.dims[0]; x++) {
long o_offset = x * strides.ostrs[0] + y * strides.ostrs[1] + z * strides.ostrs[2];
long i1_offset = x * strides.istrs1[0] + y * strides.istrs1[1] + z * strides.istrs1[2];
long i2_offset = x * strides.istrs2[0] + y * strides.istrs2[1] + z * strides.istrs2[2];
dst[i + o_offset] = cuCaddf(dst[i + o_offset], cuCmulf(src1[i + i1_offset], cuConjf(src2[i + i2_offset])));
}
}
}
}
}
extern "C" void cuda_zfmacc_strided(long N, long dims[3], unsigned long oflags, unsigned long iflags1, unsigned long iflags2, _Complex float* dst, const _Complex float* src1, const _Complex float* src2)
{
struct stride_desc s;
md_copy_dims(3, s.dims, dims);
long odims[3];
long idims1[3];
long idims2[3];
md_select_dims(3, oflags, odims, dims);
md_select_dims(3, iflags1, idims1, dims);
md_select_dims(3, iflags2, idims2, dims);
md_calc_strides(3, s.ostrs, odims, N);
md_calc_strides(3, s.istrs1, idims1, N);
md_calc_strides(3, s.istrs2, idims2, N);
hipLaunchKernelGGL(( kern_zfmacc_strides), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, s, N, (cuFloatComplex*)dst, (const cuFloatComplex*)src1, (const cuFloatComplex*)src2);
}
__global__ void kern_pow(long N, float* dst, const float* src1, const float* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = powf(src1[i], src2[i]);
}
extern "C" void cuda_pow(long N, float* dst, const float* src1, const float* src2)
{
cuda_3op(kern_pow, N, dst, src1, src2);
}
__device__ hipDoubleComplex zexpD(hipDoubleComplex x)
{
double sc = exp(cuCreal(x));
double si;
double co;
sincos(cuCimag(x), &si, &co);
return make_cuDoubleComplex(sc * co, sc * si);
}
__device__ cuFloatComplex zexp(cuFloatComplex x)
{
float sc = expf(cuCrealf(x));
float si;
float co;
sincosf(cuCimagf(x), &si, &co);
return make_cuFloatComplex(sc * co, sc * si);
}
__device__ cuFloatComplex zsin(cuFloatComplex x)
{
float si;
float co;
float sih;
float coh;
sincosf(cuCrealf(x), &si, &co);
sih = sinhf(cuCimagf(x));
coh = coshf(cuCimagf(x));
return make_cuFloatComplex(si * coh , co * sih);
}
__device__ cuFloatComplex zcos(cuFloatComplex x)
{
float si;
float co;
float sih;
float coh;
sincosf(cuCrealf(x), &si, &co);
sih = sinhf(cuCimagf(x));
coh = coshf(cuCimagf(x));
return make_cuFloatComplex(co * coh , -si * sih);
}
__device__ cuFloatComplex zsinh(cuFloatComplex x)
{
float si_i;
float co_i;
float sih_r;
float coh_r;
sincosf(cuCimagf(x), &si_i, &co_i);
sih_r = sinhf(cuCrealf(x));
coh_r = coshf(cuCrealf(x));
return make_cuFloatComplex(sih_r * co_i , coh_r * si_i);
}
__device__ cuFloatComplex zcosh(cuFloatComplex x)
{
float si_i;
float co_i;
float sih_r;
float coh_r;
sincosf(cuCimagf(x), &si_i, &co_i);
sih_r = sinhf(cuCrealf(x));
coh_r = coshf(cuCrealf(x));
return make_cuFloatComplex(coh_r * co_i , sih_r * si_i);
}
__device__ float zarg(cuFloatComplex x)
{
return atan2(cuCimagf(x), cuCrealf(x));
}
__device__ float zabs(cuFloatComplex x)
{
return cuCabsf(x);
}
__device__ cuFloatComplex zlog(cuFloatComplex x)
{
return make_cuFloatComplex(log(cuCabsf(x)), zarg(x));
}
// x^y = e^{y ln(x)} = e^{y
__device__ cuFloatComplex zpow(cuFloatComplex x, cuFloatComplex y)
{
if ((0 == y.x) && (0 == y.y))
return make_cuFloatComplex(1., 0.);
if (((0 == x.x) && (0 == x.y)) && (0. < y.x))
return make_cuFloatComplex(0., 0.);
return zexp(cuCmulf(y, zlog(x)));
}
__global__ void kern_zpow(long N, cuFloatComplex* dst, const cuFloatComplex* src1, const cuFloatComplex* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = zpow(src1[i], src2[i]);
}
extern "C" void cuda_zpow(long N, _Complex float* dst, const _Complex float* src1, const _Complex float* src2)
{
hipLaunchKernelGGL(( kern_zpow), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, (cuFloatComplex*)dst, (const cuFloatComplex*)src1, (const cuFloatComplex*)src2);
}
__global__ void kern_sqrt(long N, float* dst, const float* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = sqrtf(fabs(src[i]));
}
extern "C" void cuda_sqrt(long N, float* dst, const float* src)
{
hipLaunchKernelGGL(( kern_sqrt), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, dst, src);
}
__global__ void kern_zconj(long N, cuFloatComplex* dst, const cuFloatComplex* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = cuConjf(src[i]);
}
extern "C" void cuda_zconj(long N, _Complex float* dst, const _Complex float* src)
{
hipLaunchKernelGGL(( kern_zconj), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, (cuFloatComplex*)dst, (const cuFloatComplex*)src);
}
__global__ void kern_zcmp(long N, cuFloatComplex* dst, const cuFloatComplex* src1, const cuFloatComplex* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = make_cuFloatComplex(((cuCrealf(src1[i]) == cuCrealf(src2[i])) && (cuCimagf(src1[i]) == cuCimagf(src2[i]))) ? 1. : 0, 0.);
}
extern "C" void cuda_zcmp(long N, _Complex float* dst, const _Complex float* src1, const _Complex float* src2)
{
hipLaunchKernelGGL(( kern_zcmp), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, (cuFloatComplex*)dst, (const cuFloatComplex*)src1, (const cuFloatComplex*)src2);
}
__global__ void kern_zdiv_reg(long N, cuFloatComplex* dst, const cuFloatComplex* src1, const cuFloatComplex* src2, cuFloatComplex lambda)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = cuCdivf(src1[i], cuCaddf(src2[i], lambda));
}
extern "C" void cuda_zdiv_reg(long N, _Complex float* dst, const _Complex float* src1, const _Complex float* src2, _Complex float lambda)
{
hipLaunchKernelGGL(( kern_zdiv_reg), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, (cuFloatComplex*)dst, (const cuFloatComplex*)src1, (const cuFloatComplex*)src2, make_cuFloatComplex(__real(lambda), __imag(lambda)));
}
__global__ void kern_zphsr(long N, cuFloatComplex* dst, const cuFloatComplex* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride) {
float abs = cuCabsf(src[i]); // moved out, otherwise it triggers a compiler error in nvcc
dst[i] = (0. == abs) ? make_cuFloatComplex(1., 0.) : (cuCdivf(src[i], make_cuFloatComplex(abs, 0.)));
}
}
extern "C" void cuda_zphsr(long N, _Complex float* dst, const _Complex float* src)
{
hipLaunchKernelGGL(( kern_zphsr), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, (cuFloatComplex*)dst, (const cuFloatComplex*)src);
}
__global__ void kern_zexp(long N, cuFloatComplex* dst, const cuFloatComplex* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = zexp(src[i]);
}
extern "C" void cuda_zexp(long N, _Complex float* dst, const _Complex float* src)
{
hipLaunchKernelGGL(( kern_zexp), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, (cuFloatComplex*)dst, (const cuFloatComplex*)src);
}
__global__ void kern_zexpj(long N, cuFloatComplex* dst, const cuFloatComplex* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride) {
float re = cuCrealf(src[i]); // moved out, otherwise it triggers a compiler error in nvcc
float im = cuCimagf(src[i]); // moved out, otherwise it triggers a compiler error in nvcc
dst[i] = zexp(make_cuFloatComplex(-im, re));
}
}
extern "C" void cuda_zexpj(long N, _Complex float* dst, const _Complex float* src)
{
hipLaunchKernelGGL(( kern_zexpj), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, (cuFloatComplex*)dst, (const cuFloatComplex*)src);
}
__global__ void kern_zlog(long N, cuFloatComplex* dst, const cuFloatComplex* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride){
float abs = cuCabsf(src[i]);
dst[i] = (0. == abs) ? make_cuFloatComplex(0., 0.) : zlog(src[i]);
}
}
extern "C" void cuda_zlog(long N, _Complex float* dst, const _Complex float* src)
{
hipLaunchKernelGGL(( kern_zlog), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, (cuFloatComplex*)dst, (const cuFloatComplex*)src);
}
__global__ void kern_zarg(long N, cuFloatComplex* dst, const cuFloatComplex* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = make_cuFloatComplex(zarg(src[i]), 0.);
}
extern "C" void cuda_zarg(long N, _Complex float* dst, const _Complex float* src)
{
hipLaunchKernelGGL(( kern_zarg), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, (cuFloatComplex*)dst, (const cuFloatComplex*)src);
}
__global__ void kern_zsin(long N, cuFloatComplex* dst, const cuFloatComplex* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = zsin(src[i]);
}
extern "C" void cuda_zsin(long N, _Complex float* dst, const _Complex float* src)
{
hipLaunchKernelGGL(( kern_zsin), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, (cuFloatComplex*)dst, (const cuFloatComplex*)src);
}
__global__ void kern_zcos(long N, cuFloatComplex* dst, const cuFloatComplex* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = zcos(src[i]);
}
extern "C" void cuda_zcos(long N, _Complex float* dst, const _Complex float* src)
{
hipLaunchKernelGGL(( kern_zcos), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, (cuFloatComplex*)dst, (const cuFloatComplex*)src);
}
__global__ void kern_zsinh(long N, cuFloatComplex* dst, const cuFloatComplex* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = zsinh(src[i]);
}
extern "C" void cuda_zsinh(long N, _Complex float* dst, const _Complex float* src)
{
hipLaunchKernelGGL(( kern_zsinh), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, (cuFloatComplex*)dst, (const cuFloatComplex*)src);
}
__global__ void kern_zcosh(long N, cuFloatComplex* dst, const cuFloatComplex* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = zcosh(src[i]);
}
extern "C" void cuda_zcosh(long N, _Complex float* dst, const _Complex float* src)
{
hipLaunchKernelGGL(( kern_zcosh), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, (cuFloatComplex*)dst, (const cuFloatComplex*)src);
}
__global__ void kern_zabs(long N, cuFloatComplex* dst, const cuFloatComplex* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = make_cuFloatComplex(zabs(src[i]), 0.);
}
extern "C" void cuda_zabs(long N, _Complex float* dst, const _Complex float* src)
{
hipLaunchKernelGGL(( kern_zabs), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, (cuFloatComplex*)dst, (const cuFloatComplex*)src);
}
__global__ void kern_exp(long N, float* dst, const float* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = expf(src[i]);
}
extern "C" void cuda_exp(long N, float* dst, const float* src)
{
hipLaunchKernelGGL(( kern_exp), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, dst, src);
}
__global__ void kern_log(long N, float* dst, const float* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = (0. == src[i]) ? 0. : logf(src[i]);
}
extern "C" void cuda_log(long N, float* dst, const float* src)
{
hipLaunchKernelGGL(( kern_log), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, dst, src);
}
__global__ void kern_zatanr(long N, cuFloatComplex* dst, const cuFloatComplex* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = make_cuFloatComplex(atan(cuCrealf(src[i])), 0.);
}
extern "C" void cuda_zatanr(long N, _Complex float* dst, const _Complex float* src)
{
hipLaunchKernelGGL(( kern_zatanr), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, (cuFloatComplex*)dst, (const cuFloatComplex*)src);
}
__global__ void kern_zacos(long N, cuFloatComplex* dst, const cuFloatComplex* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = make_cuFloatComplex(acosf(cuCrealf(src[i])), 0.);
}
extern "C" void cuda_zacos(long N, _Complex float* dst, const _Complex float* src)
{
hipLaunchKernelGGL(( kern_zacos), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, (cuFloatComplex*)dst, (const cuFloatComplex*)src);
}
/**
* (GPU) Step (1) of soft thesholding, y = ST(x, lambda).
* Only computes the residual, resid = MAX( (abs(x) - lambda)/abs(x)), 0 )
*
* @param N number of elements
* @param lambda threshold parameter
* @param d pointer to destination, resid
* @param x pointer to input
*/
__global__ void kern_zsoftthresh_half(long N, float lambda, cuFloatComplex* d, const cuFloatComplex* x)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride) {
float norm = cuCabsf(x[i]);
float red = norm - lambda;
//d[i] = (red > 0.) ? (cuCmulf(make_cuFloatComplex(red / norm, 0.), x[i])) : make_cuFloatComplex(0., 0.);
d[i] = (red > 0.) ? make_cuFloatComplex(red / norm, 0.) : make_cuFloatComplex(0., 0.);
}
}
extern "C" void cuda_zsoftthresh_half(long N, float lambda, _Complex float* d, const _Complex float* x)
{
hipLaunchKernelGGL(( kern_zsoftthresh_half), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, lambda, (cuFloatComplex*)d, (const cuFloatComplex*)x);
}
__global__ void kern_zsoftthresh(long N, float lambda, cuFloatComplex* d, const cuFloatComplex* x)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride) {
float norm = cuCabsf(x[i]);
float red = norm - lambda;
d[i] = (red > 0.) ? (cuCmulf(make_cuFloatComplex(red / norm, 0.), x[i])) : make_cuFloatComplex(0., 0.);
}
}
extern "C" void cuda_zsoftthresh(long N, float lambda, _Complex float* d, const _Complex float* x)
{
hipLaunchKernelGGL(( kern_zsoftthresh), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, lambda, (cuFloatComplex*)d, (const cuFloatComplex*)x);
}
__global__ void kern_softthresh_half(long N, float lambda, float* d, const float* x)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride) {
float norm = fabsf(x[i]);
float red = norm - lambda;
d[i] = (red > 0.) ? (red / norm) : 0.;
}
}
extern "C" void cuda_softthresh_half(long N, float lambda, float* d, const float* x)
{
hipLaunchKernelGGL(( kern_softthresh_half), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, lambda, d, x);
}
__global__ void kern_softthresh(long N, float lambda, float* d, const float* x)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride) {
float norm = fabsf(x[i]);
float red = norm - lambda;
d[i] = (red > 0.) ? (red / norm * x[i]) : 0.;
}
}
extern "C" void cuda_softthresh(long N, float lambda, float* d, const float* x)
{
hipLaunchKernelGGL(( kern_softthresh), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, lambda, d, x);
}
__global__ void kern_zreal(long N, cuFloatComplex* dst, const cuFloatComplex* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = make_cuFloatComplex(cuCrealf(src[i]), 0.);
}
extern "C" void cuda_zreal(long N, _Complex float* dst, const _Complex float* src)
{
hipLaunchKernelGGL(( kern_zreal), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, (cuFloatComplex*)dst, (const cuFloatComplex*)src);
}
__global__ void kern_zle(long N, cuFloatComplex* dst, const cuFloatComplex* src1, const cuFloatComplex* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = make_cuFloatComplex((cuCrealf(src1[i]) <= cuCrealf(src2[i])), 0.);
}
extern "C" void cuda_zle(long N, _Complex float* dst, const _Complex float* src1, const _Complex float* src2)
{
hipLaunchKernelGGL(( kern_zle), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, (cuFloatComplex*)dst, (const cuFloatComplex*)src1, (const cuFloatComplex*)src2);
}
__global__ void kern_le(long N, float* dst, const float* src1, const float* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = (src1[i] <= src2[i]);
}
extern "C" void cuda_le(long N, float* dst, const float* src1, const float* src2)
{
hipLaunchKernelGGL(( kern_le), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, dst, src1, src2);
}
__device__ cuFloatComplex cuDouble2Float(hipDoubleComplex x)
{
return make_cuFloatComplex(cuCreal(x), cuCimag(x));
}
__device__ hipDoubleComplex cuFloat2Double(cuFloatComplex x)
{
return make_cuDoubleComplex(cuCrealf(x), cuCimagf(x));
}
// identical copy in num/fft.c
__device__ double fftmod_phase(long length, int j)
{
long center1 = length / 2;
double shift = (double)center1 / (double)length;
return ((double)j - (double)center1 / 2.) * shift;
}
__device__ hipDoubleComplex fftmod_phase2(long n, int j, bool inv, double phase)
{
phase += fftmod_phase(n, j);
double rem = phase - floor(phase);
double sgn = inv ? -1. : 1.;
#if 1
if (rem == 0.)
return make_cuDoubleComplex(1., 0.);
if (rem == 0.5)
return make_cuDoubleComplex(-1., 0.);
if (rem == 0.25)
return make_cuDoubleComplex(0., sgn);
if (rem == 0.75)
return make_cuDoubleComplex(0., -sgn);
#endif
return zexpD(make_cuDoubleComplex(0., M_PI * 2. * sgn * rem));
}
__global__ void kern_zfftmod(long N, cuFloatComplex* dst, const cuFloatComplex* src, unsigned int n, _Bool inv, double phase)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
for (int j = 0; j < n; j++)
dst[i * n + j] = cuDouble2Float(cuCmul(fftmod_phase2(n, j, inv, phase),
cuFloat2Double(src[i * n + j])));
}
extern "C" void cuda_zfftmod(long N, _Complex float* dst, const _Complex float* src, unsigned int n, _Bool inv, double phase)
{
hipLaunchKernelGGL(( kern_zfftmod), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, (cuFloatComplex*)dst, (const cuFloatComplex*)src, n, inv, phase);
}
#define MAX(x, y) (((x) > (y)) ? (x) : (y))
#define MIN(x, y) (((x) < (y)) ? (x) : (y))
__global__ void kern_zmax(long N, cuFloatComplex* dst, const cuFloatComplex* src1, const cuFloatComplex* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride) {
dst[i].x = MAX(src1[i].x, src2[i].x);
dst[i].y = 0.0;
}
}
extern "C" void cuda_zmax(long N, _Complex float* dst, const _Complex float* src1, const _Complex float* src2)
{
hipLaunchKernelGGL(( kern_zmax), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, (cuFloatComplex*)dst, (const cuFloatComplex*)src1, (const cuFloatComplex*)src2);
}
__global__ void kern_smax(long N, float val, float* dst, const float* src1)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = MAX(src1[i], val);
}
extern "C" void cuda_smax(long N, float val, float* dst, const float* src1)
{
hipLaunchKernelGGL(( kern_smax), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, val, dst, src1);
}
__global__ void kern_max(long N, float* dst, const float* src1, const float* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = MAX(src1[i], src2[i]);
}
extern "C" void cuda_max(long N, float* dst, const float* src1, const float* src2)
{
hipLaunchKernelGGL(( kern_max), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, dst, src1, src2);
}
__global__ void kern_min(long N, float* dst, const float* src1, const float* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = MIN(src1[i], src2[i]);
}
extern "C" void cuda_min(long N, float* dst, const float* src1, const float* src2)
{
hipLaunchKernelGGL(( kern_min), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, dst, src1, src2);
}
__global__ void kern_zsmax(long N, float val, cuFloatComplex* dst, const cuFloatComplex* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride) {
dst[i].x = MAX(src[i].x, val);
dst[i].y = 0.0;
}
}
extern "C" void cuda_zsmax(long N, float alpha, _Complex float* dst, const _Complex float* src)
{
hipLaunchKernelGGL(( kern_zsmax), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, alpha, (cuFloatComplex*)dst, (const cuFloatComplex*)src);
}
__global__ void kern_reduce_zsum(long N, cuFloatComplex* dst)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
cuFloatComplex sum = make_cuFloatComplex(0., 0.);
for (long i = start; i < N; i += stride)
sum = cuCaddf(sum, dst[i]);
if (start < N)
dst[start] = sum;
}
extern "C" void cuda_zsum(long N, _Complex float* dst)
{
int B = blocksize(N);
while (N > 1) {
hipLaunchKernelGGL(( kern_reduce_zsum), dim3(1), dim3(B), 0, 0, N, (cuFloatComplex*)dst);
N = MIN(B, N);
B /= 32;
}
}
__global__ void kern_pdf_gauss(long N, float mu, float sig, float* dst, const float* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = expf(- (src[i] - mu) * (src[i] - mu) / (2 * sig * sig)) / (sqrtf(2 * M_PI) * sig);
}
extern "C" void cuda_pdf_gauss(long N, float mu, float sig, float* dst, const float* src)
{
hipLaunchKernelGGL(( kern_pdf_gauss), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, mu, sig, dst, src);
}
__global__ void kern_real(int N, float* dst, const cuFloatComplex* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = cuCrealf(src[i]);
}
extern "C" void cuda_real(long N, float* dst, const _Complex float* src)
{
hipLaunchKernelGGL(( kern_real), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, dst, (cuFloatComplex*)src);
}
__global__ void kern_imag(int N, float* dst, const cuFloatComplex* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = cuCimagf(src[i]);
}
extern "C" void cuda_imag(long N, float* dst, const _Complex float* src)
{
hipLaunchKernelGGL(( kern_imag), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, dst, (cuFloatComplex*)src);
}
__global__ void kern_zcmpl_real(int N, cuFloatComplex* dst, const float* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = make_cuFloatComplex(src[i], 0);
}
extern "C" void cuda_zcmpl_real(long N, _Complex float* dst, const float* src)
{
hipLaunchKernelGGL(( kern_zcmpl_real), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, (cuFloatComplex*)dst, src);
}
__global__ void kern_zcmpl_imag(int N, cuFloatComplex* dst, const float* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = make_cuFloatComplex(0., src[i]);
}
extern "C" void cuda_zcmpl_imag(long N, _Complex float* dst, const float* src)
{
hipLaunchKernelGGL(( kern_zcmpl_imag), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, (cuFloatComplex*)dst, src);
}
__global__ void kern_zcmpl(int N, cuFloatComplex* dst, const float* real_src, const float* imag_src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = make_cuFloatComplex(real_src[i], imag_src[i]);
}
extern "C" void cuda_zcmpl(long N, _Complex float* dst, const float* real_src, const float* imag_src)
{
hipLaunchKernelGGL(( kern_zcmpl), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, (cuFloatComplex*)dst, real_src, imag_src);
}
__global__ void kern_zfill(int N, cuFloatComplex val, cuFloatComplex* dst)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = val;
}
extern "C" void cuda_zfill(long N, _Complex float val, _Complex float* dst)
{
hipLaunchKernelGGL(( kern_zfill), dim3(gridsize(N)), dim3(blocksize(N)), 0, 0, N, make_cuFloatComplex(__real(val), __imag(val)), (cuFloatComplex*)dst);
}
| f45e67889d709eaf09e00dd0bda12e010950e0b7.cu | /* Copyright 2013-2018. The Regents of the University of California.
* Copyright 2017-2018. Martin Uecker.
* All rights reserved. Use of this source code is governed by
* a BSD-style license which can be found in the LICENSE file.
*
* Authors:
* 2012-2018 Martin Uecker <[email protected]>
* 2015-2018 Jon Tamir <[email protected]>
*
*
* This file defines basic operations on vectors of floats/complex floats
* for operations on the GPU. See the CPU version (vecops.c) for more
* information.
*/
#include <stdio.h>
#include <stdbool.h>
#include <assert.h>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include <cuComplex.h>
#include "num/gpukrnls.h"
#include "num/multind.h"
#if 1
// see Dara's src/calib/calibcu.cu for how to get
// runtime info
// limited by hardware to 1024 on most devices
// should be a multiple of 32 (warp size)
#define BLOCKSIZE 1024
static int blocksize(int N)
{
return BLOCKSIZE;
}
static long gridsize(long N)
{
return (N + BLOCKSIZE - 1) / BLOCKSIZE;
}
#else
// http://stackoverflow.com/questions/5810447/cuda-block-and-grid-size-efficiencies
#define WARPSIZE 32
#define MAXBLOCKS (16 * 8)
// 16 multi processor times 8 blocks
#define MIN(x, y) ((x < y) ? (x) : (y))
#define MAX(x, y) ((x > y) ? (x) : (y))
static int blocksize(int N)
{
int warps_total = (N + WARPSIZE - 1) / WARPSIZE;
int warps_block = MAX(1, MIN(4, warps_total));
return WARPSIZE * warps_block;
}
static long gridsize(long N)
{
int warps_total = (N + WARPSIZE - 1) / WARPSIZE;
int warps_block = MAX(1, MIN(4, warps_total));
return MIN(MAXBLOCKS, MAX(1, warps_total / warps_block));
}
#endif
__global__ void kern_float2double(long N, double* dst, const float* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = src[i];
}
extern "C" void cuda_float2double(long N, double* dst, const float* src)
{
kern_float2double<<<gridsize(N), blocksize(N)>>>(N, dst, src);
}
__global__ void kern_double2float(long N, float* dst, const double* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = src[i];
}
extern "C" void cuda_double2float(long N, float* dst, const double* src)
{
kern_double2float<<<gridsize(N), blocksize(N)>>>(N, dst, src);
}
__global__ void kern_xpay(long N, float beta, float* dst, const float* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = dst[i] * beta + src[i];
}
extern "C" void cuda_xpay(long N, float beta, float* dst, const float* src)
{
kern_xpay<<<gridsize(N), blocksize(N)>>>(N, beta, dst, src);
}
__global__ void kern_axpbz(long N, float* dst, const float a1, const float* src1, const float a2, const float* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = a1 * src1[i] + a2 * src2[i];
}
extern "C" void cuda_axpbz(long N, float* dst, const float a1, const float* src1, const float a2, const float* src2)
{
kern_axpbz<<<gridsize(N), blocksize(N)>>>(N, dst, a1, src1, a2, src2);
}
__global__ void kern_smul(long N, float alpha, float* dst, const float* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = alpha * src[i];
}
extern "C" void cuda_smul(long N, float alpha, float* dst, const float* src)
{
kern_smul<<<gridsize(N), blocksize(N)>>>(N, alpha, dst, src);
}
typedef void (*cuda_3op_f)(long N, float* dst, const float* src1, const float* src2);
extern "C" void cuda_3op(cuda_3op_f krn, int N, float* dst, const float* src1, const float* src2)
{
krn<<<gridsize(N), blocksize(N)>>>(N, dst, src1, src2);
}
__global__ void kern_add(long N, float* dst, const float* src1, const float* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = src1[i] + src2[i];
}
extern "C" void cuda_add(long N, float* dst, const float* src1, const float* src2)
{
cuda_3op(kern_add, N, dst, src1, src2);
}
__global__ void kern_sadd(long N, float val, float* dst, const float* src1)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = src1[i] + val;
}
extern "C" void cuda_sadd(long N, float val, float* dst, const float* src1)
{
kern_sadd<<<gridsize(N), blocksize(N)>>>(N, val, dst, src1);
}
__global__ void kern_zsadd(long N, cuFloatComplex val, cuFloatComplex* dst, const cuFloatComplex* src1)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = cuCaddf(src1[i], val);
}
extern "C" void cuda_zsadd(long N, _Complex float val, _Complex float* dst, const _Complex float* src1)
{
kern_zsadd<<<gridsize(N), blocksize(N)>>>(N, make_cuFloatComplex(__real(val), __imag(val)), (cuFloatComplex*)dst, (const cuFloatComplex*)src1);
}
__global__ void kern_sub(long N, float* dst, const float* src1, const float* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = src1[i] - src2[i];
}
extern "C" void cuda_sub(long N, float* dst, const float* src1, const float* src2)
{
cuda_3op(kern_sub, N, dst, src1, src2);
}
__global__ void kern_mul(long N, float* dst, const float* src1, const float* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = src1[i] * src2[i];
}
extern "C" void cuda_mul(long N, float* dst, const float* src1, const float* src2)
{
cuda_3op(kern_mul, N, dst, src1, src2);
}
__global__ void kern_div(long N, float* dst, const float* src1, const float* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = src1[i] / src2[i];
}
extern "C" void cuda_div(long N, float* dst, const float* src1, const float* src2)
{
cuda_3op(kern_div, N, dst, src1, src2);
}
__global__ void kern_fmac(long N, float* dst, const float* src1, const float* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] += src1[i] * src2[i];
}
extern "C" void cuda_fmac(long N, float* dst, const float* src1, const float* src2)
{
cuda_3op(kern_fmac, N, dst, src1, src2);
}
__global__ void kern_fmac2(long N, double* dst, const float* src1, const float* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] += src1[i] * src2[i];
}
extern "C" void cuda_fmac2(long N, double* dst, const float* src1, const float* src2)
{
kern_fmac2<<<gridsize(N), blocksize(N)>>>(N, dst, src1, src2);
}
__global__ void kern_zsmul(long N, cuFloatComplex val, cuFloatComplex* dst, const cuFloatComplex* src1)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = cuCmulf(src1[i], val);
}
extern "C" void cuda_zsmul(long N, _Complex float alpha, _Complex float* dst, const _Complex float* src1)
{
kern_zsmul<<<gridsize(N), blocksize(N)>>>(N, make_cuFloatComplex(__real(alpha), __imag(alpha)), (cuFloatComplex*)dst, (const cuFloatComplex*)src1);
}
__global__ void kern_zmul(long N, cuFloatComplex* dst, const cuFloatComplex* src1, const cuFloatComplex* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = cuCmulf(src1[i], src2[i]);
}
extern "C" void cuda_zmul(long N, _Complex float* dst, const _Complex float* src1, const _Complex float* src2)
{
kern_zmul<<<gridsize(N), blocksize(N)>>>(N, (cuFloatComplex*)dst, (const cuFloatComplex*)src1, (const cuFloatComplex*)src2);
}
__global__ void kern_zdiv(long N, cuFloatComplex* dst, const cuFloatComplex* src1, const cuFloatComplex* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride) {
float abs = cuCabsf(src2[i]);
dst[i] = (0. == abs) ? make_cuFloatComplex(0., 0.) : cuCdivf(src1[i], src2[i]);
}
}
extern "C" void cuda_zdiv(long N, _Complex float* dst, const _Complex float* src1, const _Complex float* src2)
{
kern_zdiv<<<gridsize(N), blocksize(N)>>>(N, (cuFloatComplex*)dst, (const cuFloatComplex*)src1, (const cuFloatComplex*)src2);
}
__global__ void kern_zfmac(long N, cuFloatComplex* dst, const cuFloatComplex* src1, const cuFloatComplex* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = cuCaddf(dst[i], cuCmulf(src1[i], src2[i]));
}
extern "C" void cuda_zfmac(long N, _Complex float* dst, const _Complex float* src1, const _Complex float* src2)
{
kern_zfmac<<<gridsize(N), blocksize(N)>>>(N, (cuFloatComplex*)dst, (const cuFloatComplex*)src1, (const cuFloatComplex*)src2);
}
__global__ void kern_zfmac2(long N, cuDoubleComplex* dst, const cuFloatComplex* src1, const cuFloatComplex* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = cuCadd(dst[i], cuComplexFloatToDouble(cuCmulf(src1[i], src2[i])));
}
extern "C" void cuda_zfmac2(long N, _Complex double* dst, const _Complex float* src1, const _Complex float* src2)
{
kern_zfmac2<<<gridsize(N), blocksize(N)>>>(N, (cuDoubleComplex*)dst, (const cuFloatComplex*)src1, (const cuFloatComplex*)src2);
}
__global__ void kern_zmulc(long N, cuFloatComplex* dst, const cuFloatComplex* src1, const cuFloatComplex* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = cuCmulf(src1[i], cuConjf(src2[i]));
}
extern "C" void cuda_zmulc(long N, _Complex float* dst, const _Complex float* src1, const _Complex float* src2)
{
kern_zmulc<<<gridsize(N), blocksize(N)>>>(N, (cuFloatComplex*)dst, (const cuFloatComplex*)src1, (const cuFloatComplex*)src2);
}
__global__ void kern_zfmacc(long N, cuFloatComplex* dst, const cuFloatComplex* src1, const cuFloatComplex* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = cuCaddf(dst[i], cuCmulf(src1[i], cuConjf(src2[i])));
}
extern "C" void cuda_zfmacc(long N, _Complex float* dst, const _Complex float* src1, const _Complex float* src2)
{
kern_zfmacc<<<gridsize(N), blocksize(N)>>>(N, (cuFloatComplex*)dst, (const cuFloatComplex*)src1, (const cuFloatComplex*)src2);
}
__global__ void kern_zfmacc2(long N, cuDoubleComplex* dst, const cuFloatComplex* src1, const cuFloatComplex* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = cuCadd(dst[i], cuComplexFloatToDouble(cuCmulf(src1[i], cuConjf(src2[i]))));
}
extern "C" void cuda_zfmacc2(long N, _Complex double* dst, const _Complex float* src1, const _Complex float* src2)
{
kern_zfmacc2<<<gridsize(N), blocksize(N)>>>(N, (cuDoubleComplex*)dst, (const cuFloatComplex*)src1, (const cuFloatComplex*)src2);
}
#define MAX_DIMS 3
struct stride_desc {
long dims[MAX_DIMS];
long ostrs[MAX_DIMS];
long istrs1[MAX_DIMS];
long istrs2[MAX_DIMS];
};
__global__ void kern_zfmac_strides(stride_desc strides, long N, cuFloatComplex* dst, const cuFloatComplex* src1, const cuFloatComplex* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride) {
for (long z = 0; z < strides.dims[2]; z++) {
for (long y = 0; y < strides.dims[1]; y++) {
for (long x = 0; x < strides.dims[0]; x++) {
long o_offset = x * strides.ostrs[0] + y * strides.ostrs[1] + z * strides.ostrs[2];
long i1_offset = x * strides.istrs1[0] + y * strides.istrs1[1] + z * strides.istrs1[2];
long i2_offset = x * strides.istrs2[0] + y * strides.istrs2[1] + z * strides.istrs2[2];
dst[i + o_offset] = cuCaddf(dst[i + o_offset], cuCmulf(src1[i + i1_offset], src2[i + i2_offset]));
}
}
}
}
}
//this version needs to start less kernels
extern "C" void cuda_zfmac_strided(long N, long dims[3], unsigned long oflags, unsigned long iflags1, unsigned long iflags2, _Complex float* dst, const _Complex float* src1, const _Complex float* src2)
{
struct stride_desc s;
md_copy_dims(3, s.dims, dims);
long odims[3];
long idims1[3];
long idims2[3];
md_select_dims(3, oflags, odims, dims);
md_select_dims(3, iflags1, idims1, dims);
md_select_dims(3, iflags2, idims2, dims);
md_calc_strides(3, s.ostrs, odims, N);
md_calc_strides(3, s.istrs1, idims1, N);
md_calc_strides(3, s.istrs2, idims2, N);
kern_zfmac_strides<<<gridsize(N), blocksize(N)>>>(s, N, (cuFloatComplex*)dst, (const cuFloatComplex*)src1, (const cuFloatComplex*)src2);
}
__global__ void kern_zfmacc_strides(stride_desc strides, long N, cuFloatComplex* dst, const cuFloatComplex* src1, const cuFloatComplex* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride) {
for (long z = 0; z < strides.dims[2]; z++) {
for (long y = 0; y < strides.dims[1]; y++) {
for (long x = 0; x < strides.dims[0]; x++) {
long o_offset = x * strides.ostrs[0] + y * strides.ostrs[1] + z * strides.ostrs[2];
long i1_offset = x * strides.istrs1[0] + y * strides.istrs1[1] + z * strides.istrs1[2];
long i2_offset = x * strides.istrs2[0] + y * strides.istrs2[1] + z * strides.istrs2[2];
dst[i + o_offset] = cuCaddf(dst[i + o_offset], cuCmulf(src1[i + i1_offset], cuConjf(src2[i + i2_offset])));
}
}
}
}
}
extern "C" void cuda_zfmacc_strided(long N, long dims[3], unsigned long oflags, unsigned long iflags1, unsigned long iflags2, _Complex float* dst, const _Complex float* src1, const _Complex float* src2)
{
struct stride_desc s;
md_copy_dims(3, s.dims, dims);
long odims[3];
long idims1[3];
long idims2[3];
md_select_dims(3, oflags, odims, dims);
md_select_dims(3, iflags1, idims1, dims);
md_select_dims(3, iflags2, idims2, dims);
md_calc_strides(3, s.ostrs, odims, N);
md_calc_strides(3, s.istrs1, idims1, N);
md_calc_strides(3, s.istrs2, idims2, N);
kern_zfmacc_strides<<<gridsize(N), blocksize(N)>>>(s, N, (cuFloatComplex*)dst, (const cuFloatComplex*)src1, (const cuFloatComplex*)src2);
}
__global__ void kern_pow(long N, float* dst, const float* src1, const float* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = powf(src1[i], src2[i]);
}
extern "C" void cuda_pow(long N, float* dst, const float* src1, const float* src2)
{
cuda_3op(kern_pow, N, dst, src1, src2);
}
__device__ cuDoubleComplex zexpD(cuDoubleComplex x)
{
double sc = exp(cuCreal(x));
double si;
double co;
sincos(cuCimag(x), &si, &co);
return make_cuDoubleComplex(sc * co, sc * si);
}
__device__ cuFloatComplex zexp(cuFloatComplex x)
{
float sc = expf(cuCrealf(x));
float si;
float co;
sincosf(cuCimagf(x), &si, &co);
return make_cuFloatComplex(sc * co, sc * si);
}
__device__ cuFloatComplex zsin(cuFloatComplex x)
{
float si;
float co;
float sih;
float coh;
sincosf(cuCrealf(x), &si, &co);
sih = sinhf(cuCimagf(x));
coh = coshf(cuCimagf(x));
return make_cuFloatComplex(si * coh , co * sih);
}
__device__ cuFloatComplex zcos(cuFloatComplex x)
{
float si;
float co;
float sih;
float coh;
sincosf(cuCrealf(x), &si, &co);
sih = sinhf(cuCimagf(x));
coh = coshf(cuCimagf(x));
return make_cuFloatComplex(co * coh , -si * sih);
}
__device__ cuFloatComplex zsinh(cuFloatComplex x)
{
float si_i;
float co_i;
float sih_r;
float coh_r;
sincosf(cuCimagf(x), &si_i, &co_i);
sih_r = sinhf(cuCrealf(x));
coh_r = coshf(cuCrealf(x));
return make_cuFloatComplex(sih_r * co_i , coh_r * si_i);
}
__device__ cuFloatComplex zcosh(cuFloatComplex x)
{
float si_i;
float co_i;
float sih_r;
float coh_r;
sincosf(cuCimagf(x), &si_i, &co_i);
sih_r = sinhf(cuCrealf(x));
coh_r = coshf(cuCrealf(x));
return make_cuFloatComplex(coh_r * co_i , sih_r * si_i);
}
__device__ float zarg(cuFloatComplex x)
{
return atan2(cuCimagf(x), cuCrealf(x));
}
__device__ float zabs(cuFloatComplex x)
{
return cuCabsf(x);
}
__device__ cuFloatComplex zlog(cuFloatComplex x)
{
return make_cuFloatComplex(log(cuCabsf(x)), zarg(x));
}
// x^y = e^{y ln(x)} = e^{y
__device__ cuFloatComplex zpow(cuFloatComplex x, cuFloatComplex y)
{
if ((0 == y.x) && (0 == y.y))
return make_cuFloatComplex(1., 0.);
if (((0 == x.x) && (0 == x.y)) && (0. < y.x))
return make_cuFloatComplex(0., 0.);
return zexp(cuCmulf(y, zlog(x)));
}
__global__ void kern_zpow(long N, cuFloatComplex* dst, const cuFloatComplex* src1, const cuFloatComplex* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = zpow(src1[i], src2[i]);
}
extern "C" void cuda_zpow(long N, _Complex float* dst, const _Complex float* src1, const _Complex float* src2)
{
kern_zpow<<<gridsize(N), blocksize(N)>>>(N, (cuFloatComplex*)dst, (const cuFloatComplex*)src1, (const cuFloatComplex*)src2);
}
__global__ void kern_sqrt(long N, float* dst, const float* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = sqrtf(fabs(src[i]));
}
extern "C" void cuda_sqrt(long N, float* dst, const float* src)
{
kern_sqrt<<<gridsize(N), blocksize(N)>>>(N, dst, src);
}
__global__ void kern_zconj(long N, cuFloatComplex* dst, const cuFloatComplex* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = cuConjf(src[i]);
}
extern "C" void cuda_zconj(long N, _Complex float* dst, const _Complex float* src)
{
kern_zconj<<<gridsize(N), blocksize(N)>>>(N, (cuFloatComplex*)dst, (const cuFloatComplex*)src);
}
__global__ void kern_zcmp(long N, cuFloatComplex* dst, const cuFloatComplex* src1, const cuFloatComplex* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = make_cuFloatComplex(((cuCrealf(src1[i]) == cuCrealf(src2[i])) && (cuCimagf(src1[i]) == cuCimagf(src2[i]))) ? 1. : 0, 0.);
}
extern "C" void cuda_zcmp(long N, _Complex float* dst, const _Complex float* src1, const _Complex float* src2)
{
kern_zcmp<<<gridsize(N), blocksize(N)>>>(N, (cuFloatComplex*)dst, (const cuFloatComplex*)src1, (const cuFloatComplex*)src2);
}
__global__ void kern_zdiv_reg(long N, cuFloatComplex* dst, const cuFloatComplex* src1, const cuFloatComplex* src2, cuFloatComplex lambda)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = cuCdivf(src1[i], cuCaddf(src2[i], lambda));
}
extern "C" void cuda_zdiv_reg(long N, _Complex float* dst, const _Complex float* src1, const _Complex float* src2, _Complex float lambda)
{
kern_zdiv_reg<<<gridsize(N), blocksize(N)>>>(N, (cuFloatComplex*)dst, (const cuFloatComplex*)src1, (const cuFloatComplex*)src2, make_cuFloatComplex(__real(lambda), __imag(lambda)));
}
__global__ void kern_zphsr(long N, cuFloatComplex* dst, const cuFloatComplex* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride) {
float abs = cuCabsf(src[i]); // moved out, otherwise it triggers a compiler error in nvcc
dst[i] = (0. == abs) ? make_cuFloatComplex(1., 0.) : (cuCdivf(src[i], make_cuFloatComplex(abs, 0.)));
}
}
extern "C" void cuda_zphsr(long N, _Complex float* dst, const _Complex float* src)
{
kern_zphsr<<<gridsize(N), blocksize(N)>>>(N, (cuFloatComplex*)dst, (const cuFloatComplex*)src);
}
__global__ void kern_zexp(long N, cuFloatComplex* dst, const cuFloatComplex* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = zexp(src[i]);
}
extern "C" void cuda_zexp(long N, _Complex float* dst, const _Complex float* src)
{
kern_zexp<<<gridsize(N), blocksize(N)>>>(N, (cuFloatComplex*)dst, (const cuFloatComplex*)src);
}
__global__ void kern_zexpj(long N, cuFloatComplex* dst, const cuFloatComplex* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride) {
float re = cuCrealf(src[i]); // moved out, otherwise it triggers a compiler error in nvcc
float im = cuCimagf(src[i]); // moved out, otherwise it triggers a compiler error in nvcc
dst[i] = zexp(make_cuFloatComplex(-im, re));
}
}
extern "C" void cuda_zexpj(long N, _Complex float* dst, const _Complex float* src)
{
kern_zexpj<<<gridsize(N), blocksize(N)>>>(N, (cuFloatComplex*)dst, (const cuFloatComplex*)src);
}
__global__ void kern_zlog(long N, cuFloatComplex* dst, const cuFloatComplex* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride){
float abs = cuCabsf(src[i]);
dst[i] = (0. == abs) ? make_cuFloatComplex(0., 0.) : zlog(src[i]);
}
}
extern "C" void cuda_zlog(long N, _Complex float* dst, const _Complex float* src)
{
kern_zlog<<<gridsize(N), blocksize(N)>>>(N, (cuFloatComplex*)dst, (const cuFloatComplex*)src);
}
__global__ void kern_zarg(long N, cuFloatComplex* dst, const cuFloatComplex* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = make_cuFloatComplex(zarg(src[i]), 0.);
}
extern "C" void cuda_zarg(long N, _Complex float* dst, const _Complex float* src)
{
kern_zarg<<<gridsize(N), blocksize(N)>>>(N, (cuFloatComplex*)dst, (const cuFloatComplex*)src);
}
__global__ void kern_zsin(long N, cuFloatComplex* dst, const cuFloatComplex* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = zsin(src[i]);
}
extern "C" void cuda_zsin(long N, _Complex float* dst, const _Complex float* src)
{
kern_zsin<<<gridsize(N), blocksize(N)>>>(N, (cuFloatComplex*)dst, (const cuFloatComplex*)src);
}
__global__ void kern_zcos(long N, cuFloatComplex* dst, const cuFloatComplex* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = zcos(src[i]);
}
extern "C" void cuda_zcos(long N, _Complex float* dst, const _Complex float* src)
{
kern_zcos<<<gridsize(N), blocksize(N)>>>(N, (cuFloatComplex*)dst, (const cuFloatComplex*)src);
}
__global__ void kern_zsinh(long N, cuFloatComplex* dst, const cuFloatComplex* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = zsinh(src[i]);
}
extern "C" void cuda_zsinh(long N, _Complex float* dst, const _Complex float* src)
{
kern_zsinh<<<gridsize(N), blocksize(N)>>>(N, (cuFloatComplex*)dst, (const cuFloatComplex*)src);
}
__global__ void kern_zcosh(long N, cuFloatComplex* dst, const cuFloatComplex* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = zcosh(src[i]);
}
extern "C" void cuda_zcosh(long N, _Complex float* dst, const _Complex float* src)
{
kern_zcosh<<<gridsize(N), blocksize(N)>>>(N, (cuFloatComplex*)dst, (const cuFloatComplex*)src);
}
__global__ void kern_zabs(long N, cuFloatComplex* dst, const cuFloatComplex* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = make_cuFloatComplex(zabs(src[i]), 0.);
}
extern "C" void cuda_zabs(long N, _Complex float* dst, const _Complex float* src)
{
kern_zabs<<<gridsize(N), blocksize(N)>>>(N, (cuFloatComplex*)dst, (const cuFloatComplex*)src);
}
__global__ void kern_exp(long N, float* dst, const float* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = expf(src[i]);
}
extern "C" void cuda_exp(long N, float* dst, const float* src)
{
kern_exp<<<gridsize(N), blocksize(N)>>>(N, dst, src);
}
__global__ void kern_log(long N, float* dst, const float* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = (0. == src[i]) ? 0. : logf(src[i]);
}
extern "C" void cuda_log(long N, float* dst, const float* src)
{
kern_log<<<gridsize(N), blocksize(N)>>>(N, dst, src);
}
__global__ void kern_zatanr(long N, cuFloatComplex* dst, const cuFloatComplex* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = make_cuFloatComplex(atan(cuCrealf(src[i])), 0.);
}
extern "C" void cuda_zatanr(long N, _Complex float* dst, const _Complex float* src)
{
kern_zatanr<<<gridsize(N), blocksize(N)>>>(N, (cuFloatComplex*)dst, (const cuFloatComplex*)src);
}
__global__ void kern_zacos(long N, cuFloatComplex* dst, const cuFloatComplex* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = make_cuFloatComplex(acosf(cuCrealf(src[i])), 0.);
}
extern "C" void cuda_zacos(long N, _Complex float* dst, const _Complex float* src)
{
kern_zacos<<<gridsize(N), blocksize(N)>>>(N, (cuFloatComplex*)dst, (const cuFloatComplex*)src);
}
/**
* (GPU) Step (1) of soft thesholding, y = ST(x, lambda).
* Only computes the residual, resid = MAX( (abs(x) - lambda)/abs(x)), 0 )
*
* @param N number of elements
* @param lambda threshold parameter
* @param d pointer to destination, resid
* @param x pointer to input
*/
__global__ void kern_zsoftthresh_half(long N, float lambda, cuFloatComplex* d, const cuFloatComplex* x)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride) {
float norm = cuCabsf(x[i]);
float red = norm - lambda;
//d[i] = (red > 0.) ? (cuCmulf(make_cuFloatComplex(red / norm, 0.), x[i])) : make_cuFloatComplex(0., 0.);
d[i] = (red > 0.) ? make_cuFloatComplex(red / norm, 0.) : make_cuFloatComplex(0., 0.);
}
}
extern "C" void cuda_zsoftthresh_half(long N, float lambda, _Complex float* d, const _Complex float* x)
{
kern_zsoftthresh_half<<<gridsize(N), blocksize(N)>>>(N, lambda, (cuFloatComplex*)d, (const cuFloatComplex*)x);
}
__global__ void kern_zsoftthresh(long N, float lambda, cuFloatComplex* d, const cuFloatComplex* x)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride) {
float norm = cuCabsf(x[i]);
float red = norm - lambda;
d[i] = (red > 0.) ? (cuCmulf(make_cuFloatComplex(red / norm, 0.), x[i])) : make_cuFloatComplex(0., 0.);
}
}
extern "C" void cuda_zsoftthresh(long N, float lambda, _Complex float* d, const _Complex float* x)
{
kern_zsoftthresh<<<gridsize(N), blocksize(N)>>>(N, lambda, (cuFloatComplex*)d, (const cuFloatComplex*)x);
}
__global__ void kern_softthresh_half(long N, float lambda, float* d, const float* x)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride) {
float norm = fabsf(x[i]);
float red = norm - lambda;
d[i] = (red > 0.) ? (red / norm) : 0.;
}
}
extern "C" void cuda_softthresh_half(long N, float lambda, float* d, const float* x)
{
kern_softthresh_half<<<gridsize(N), blocksize(N)>>>(N, lambda, d, x);
}
__global__ void kern_softthresh(long N, float lambda, float* d, const float* x)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride) {
float norm = fabsf(x[i]);
float red = norm - lambda;
d[i] = (red > 0.) ? (red / norm * x[i]) : 0.;
}
}
extern "C" void cuda_softthresh(long N, float lambda, float* d, const float* x)
{
kern_softthresh<<<gridsize(N), blocksize(N)>>>(N, lambda, d, x);
}
__global__ void kern_zreal(long N, cuFloatComplex* dst, const cuFloatComplex* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = make_cuFloatComplex(cuCrealf(src[i]), 0.);
}
extern "C" void cuda_zreal(long N, _Complex float* dst, const _Complex float* src)
{
kern_zreal<<<gridsize(N), blocksize(N)>>>(N, (cuFloatComplex*)dst, (const cuFloatComplex*)src);
}
__global__ void kern_zle(long N, cuFloatComplex* dst, const cuFloatComplex* src1, const cuFloatComplex* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = make_cuFloatComplex((cuCrealf(src1[i]) <= cuCrealf(src2[i])), 0.);
}
extern "C" void cuda_zle(long N, _Complex float* dst, const _Complex float* src1, const _Complex float* src2)
{
kern_zle<<<gridsize(N), blocksize(N)>>>(N, (cuFloatComplex*)dst, (const cuFloatComplex*)src1, (const cuFloatComplex*)src2);
}
__global__ void kern_le(long N, float* dst, const float* src1, const float* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = (src1[i] <= src2[i]);
}
extern "C" void cuda_le(long N, float* dst, const float* src1, const float* src2)
{
kern_le<<<gridsize(N), blocksize(N)>>>(N, dst, src1, src2);
}
__device__ cuFloatComplex cuDouble2Float(cuDoubleComplex x)
{
return make_cuFloatComplex(cuCreal(x), cuCimag(x));
}
__device__ cuDoubleComplex cuFloat2Double(cuFloatComplex x)
{
return make_cuDoubleComplex(cuCrealf(x), cuCimagf(x));
}
// identical copy in num/fft.c
__device__ double fftmod_phase(long length, int j)
{
long center1 = length / 2;
double shift = (double)center1 / (double)length;
return ((double)j - (double)center1 / 2.) * shift;
}
__device__ cuDoubleComplex fftmod_phase2(long n, int j, bool inv, double phase)
{
phase += fftmod_phase(n, j);
double rem = phase - floor(phase);
double sgn = inv ? -1. : 1.;
#if 1
if (rem == 0.)
return make_cuDoubleComplex(1., 0.);
if (rem == 0.5)
return make_cuDoubleComplex(-1., 0.);
if (rem == 0.25)
return make_cuDoubleComplex(0., sgn);
if (rem == 0.75)
return make_cuDoubleComplex(0., -sgn);
#endif
return zexpD(make_cuDoubleComplex(0., M_PI * 2. * sgn * rem));
}
__global__ void kern_zfftmod(long N, cuFloatComplex* dst, const cuFloatComplex* src, unsigned int n, _Bool inv, double phase)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
for (int j = 0; j < n; j++)
dst[i * n + j] = cuDouble2Float(cuCmul(fftmod_phase2(n, j, inv, phase),
cuFloat2Double(src[i * n + j])));
}
extern "C" void cuda_zfftmod(long N, _Complex float* dst, const _Complex float* src, unsigned int n, _Bool inv, double phase)
{
kern_zfftmod<<<gridsize(N), blocksize(N)>>>(N, (cuFloatComplex*)dst, (const cuFloatComplex*)src, n, inv, phase);
}
#define MAX(x, y) (((x) > (y)) ? (x) : (y))
#define MIN(x, y) (((x) < (y)) ? (x) : (y))
__global__ void kern_zmax(long N, cuFloatComplex* dst, const cuFloatComplex* src1, const cuFloatComplex* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride) {
dst[i].x = MAX(src1[i].x, src2[i].x);
dst[i].y = 0.0;
}
}
extern "C" void cuda_zmax(long N, _Complex float* dst, const _Complex float* src1, const _Complex float* src2)
{
kern_zmax<<<gridsize(N), blocksize(N)>>>(N, (cuFloatComplex*)dst, (const cuFloatComplex*)src1, (const cuFloatComplex*)src2);
}
__global__ void kern_smax(long N, float val, float* dst, const float* src1)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = MAX(src1[i], val);
}
extern "C" void cuda_smax(long N, float val, float* dst, const float* src1)
{
kern_smax<<<gridsize(N), blocksize(N)>>>(N, val, dst, src1);
}
__global__ void kern_max(long N, float* dst, const float* src1, const float* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = MAX(src1[i], src2[i]);
}
extern "C" void cuda_max(long N, float* dst, const float* src1, const float* src2)
{
kern_max<<<gridsize(N), blocksize(N)>>>(N, dst, src1, src2);
}
__global__ void kern_min(long N, float* dst, const float* src1, const float* src2)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = MIN(src1[i], src2[i]);
}
extern "C" void cuda_min(long N, float* dst, const float* src1, const float* src2)
{
kern_min<<<gridsize(N), blocksize(N)>>>(N, dst, src1, src2);
}
__global__ void kern_zsmax(long N, float val, cuFloatComplex* dst, const cuFloatComplex* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride) {
dst[i].x = MAX(src[i].x, val);
dst[i].y = 0.0;
}
}
extern "C" void cuda_zsmax(long N, float alpha, _Complex float* dst, const _Complex float* src)
{
kern_zsmax<<<gridsize(N), blocksize(N)>>>(N, alpha, (cuFloatComplex*)dst, (const cuFloatComplex*)src);
}
__global__ void kern_reduce_zsum(long N, cuFloatComplex* dst)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
cuFloatComplex sum = make_cuFloatComplex(0., 0.);
for (long i = start; i < N; i += stride)
sum = cuCaddf(sum, dst[i]);
if (start < N)
dst[start] = sum;
}
extern "C" void cuda_zsum(long N, _Complex float* dst)
{
int B = blocksize(N);
while (N > 1) {
kern_reduce_zsum<<<1, B>>>(N, (cuFloatComplex*)dst);
N = MIN(B, N);
B /= 32;
}
}
__global__ void kern_pdf_gauss(long N, float mu, float sig, float* dst, const float* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = start; i < N; i += stride)
dst[i] = expf(- (src[i] - mu) * (src[i] - mu) / (2 * sig * sig)) / (sqrtf(2 * M_PI) * sig);
}
extern "C" void cuda_pdf_gauss(long N, float mu, float sig, float* dst, const float* src)
{
kern_pdf_gauss<<<gridsize(N), blocksize(N)>>>(N, mu, sig, dst, src);
}
__global__ void kern_real(int N, float* dst, const cuFloatComplex* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = cuCrealf(src[i]);
}
extern "C" void cuda_real(long N, float* dst, const _Complex float* src)
{
kern_real<<<gridsize(N), blocksize(N)>>>(N, dst, (cuFloatComplex*)src);
}
__global__ void kern_imag(int N, float* dst, const cuFloatComplex* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = cuCimagf(src[i]);
}
extern "C" void cuda_imag(long N, float* dst, const _Complex float* src)
{
kern_imag<<<gridsize(N), blocksize(N)>>>(N, dst, (cuFloatComplex*)src);
}
__global__ void kern_zcmpl_real(int N, cuFloatComplex* dst, const float* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = make_cuFloatComplex(src[i], 0);
}
extern "C" void cuda_zcmpl_real(long N, _Complex float* dst, const float* src)
{
kern_zcmpl_real<<<gridsize(N), blocksize(N)>>>(N, (cuFloatComplex*)dst, src);
}
__global__ void kern_zcmpl_imag(int N, cuFloatComplex* dst, const float* src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = make_cuFloatComplex(0., src[i]);
}
extern "C" void cuda_zcmpl_imag(long N, _Complex float* dst, const float* src)
{
kern_zcmpl_imag<<<gridsize(N), blocksize(N)>>>(N, (cuFloatComplex*)dst, src);
}
__global__ void kern_zcmpl(int N, cuFloatComplex* dst, const float* real_src, const float* imag_src)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = make_cuFloatComplex(real_src[i], imag_src[i]);
}
extern "C" void cuda_zcmpl(long N, _Complex float* dst, const float* real_src, const float* imag_src)
{
kern_zcmpl<<<gridsize(N), blocksize(N)>>>(N, (cuFloatComplex*)dst, real_src, imag_src);
}
__global__ void kern_zfill(int N, cuFloatComplex val, cuFloatComplex* dst)
{
int start = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = start; i < N; i += stride)
dst[i] = val;
}
extern "C" void cuda_zfill(long N, _Complex float val, _Complex float* dst)
{
kern_zfill<<<gridsize(N), blocksize(N)>>>(N, make_cuFloatComplex(__real(val), __imag(val)), (cuFloatComplex*)dst);
}
|
d9b021d26f513ae759e6c9e82805bbca7ff0a326.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
@file rx_cu_common.cu
@brief CUDA
@author Makoto Fujisawa
@date 2009-08, 2011-06
*/
// FILE --rx_cu_common.cu--
#ifndef _RX_CU_COMMON_CU_
#define _RX_CU_COMMON_CU_
//-----------------------------------------------------------------------------
//
//-----------------------------------------------------------------------------
#include <stdio.h>
#include <math.h>
#include "helper_math.h"
#include <math_constants.h>
#include "rx_cuda_utils.h"
#include "rx_cu_common.cuh"
// ()
__constant__ rxSimParams params;
//-----------------------------------------------------------------------------
//
//-----------------------------------------------------------------------------
__device__ __host__
inline uint calUintPow(uint x, uint y)
{
uint x_y = 1;
for(uint i=0; i < y;i++) x_y *= x;
return x_y;
}
/*!
* a/b
* @param[in] a,b a/b
* @return
*/
__device__ __host__
inline uint DivCeil(uint a, uint b)
{
return (a % b != 0) ? (a / b + 1) : (a / b);
}
/*!
* [a,b]
* @param[in] x
* @param[in] a,b
* @return
*/
__device__
inline float CuClamp(float x, float a, float b)
{
return max(a, min(b, x));
}
__device__
inline int CuClamp(int x, int a, int b)
{
return max(a, min(b, x));
}
/*!
* for float3
* @param[in] v
*/
__device__
inline int CuIsZero(float3 v)
{
if(fabsf(v.x) < 1.0e-10 && fabsf(v.y) < 1.0e-10 && fabsf(v.z) < 1.0e-10){
return 1;
}
else{
return 0;
}
}
/*!
*
* @param[in] m 3x3
* @param[in] v 3D
* @return
*/
__device__
inline float3 CuMulMV(matrix3x3 m, float3 v)
{
return make_float3(dot(m.e[0], v), dot(m.e[1], v), dot(m.e[2], v));
}
//
__device__ __host__
inline void computeGridSize(uint n, uint thread_per_block, uint &numBlocks, uint &numThreads)
{
numThreads = min(thread_per_block, n);
numBlocks = DivCeil(n, numThreads);
}
//-----------------------------------------------------------------------------
//
//-----------------------------------------------------------------------------
/*!
*
* @param[in] p
* @return
*/
__device__
inline int3 calcGridPos(float3 p)
{
int3 gridPos;
gridPos.x = floor((p.x-params.WorldOrigin.x)/params.CellWidth.x);
gridPos.y = floor((p.y-params.WorldOrigin.y)/params.CellWidth.y);
gridPos.z = floor((p.z-params.WorldOrigin.z)/params.CellWidth.z);
gridPos.x = min(max(gridPos.x, 0), params.GridSize.x-1);
gridPos.y = min(max(gridPos.y, 0), params.GridSize.y-1);
gridPos.z = min(max(gridPos.z, 0), params.GridSize.z-1);
return gridPos;
}
/*!
* 1
* @param[in] gridPos
* @return
*/
__device__
inline uint calcGridHash(int3 gridPos)
{
return __umul24(__umul24(gridPos.z, params.GridSize.y), params.GridSize.x)+__umul24(gridPos.y, params.GridSize.x)+gridPos.x;
}
/*!
*
* @param[in] p
* @param[in] origin
* @param[in] cell_width 1
* @param[in] grid_size
* @return
*/
__device__
inline int3 calcGridPosB(float3 p, float3 origin, float3 cell_width, uint3 grid_size)
{
int3 gridPos;
gridPos.x = floor((p.x-origin.x)/cell_width.x);
gridPos.y = floor((p.y-origin.y)/cell_width.y);
gridPos.z = floor((p.z-origin.z)/cell_width.z);
gridPos.x = min(max(gridPos.x, 0), grid_size.x-1);
gridPos.y = min(max(gridPos.y, 0), grid_size.y-1);
gridPos.z = min(max(gridPos.z, 0), grid_size.z-1);
return gridPos;
}
/*!
* 1
* @param[in] gridPos
* @return
*/
__device__
inline uint calcGridHashB(int3 gridPos, uint3 grid_size)
{
return __umul24(__umul24(gridPos.z, grid_size.y), grid_size.x)+__umul24(gridPos.y, grid_size.x)+gridPos.x;
}
//-----------------------------------------------------------------------------
//
//-----------------------------------------------------------------------------
#ifdef RX_USE_ATOMIC_FUNC
/*!
* floatatomicAdd
*/
__device__
inline void atomicFloatAdd(float *address, float val)
{
int i_val = __float_as_int(val);
int tmp0 = 0;
int tmp1;
while( (tmp1 = atomicCAS((int *)address, tmp0, i_val)) != tmp0)
{
tmp0 = tmp1;
i_val = __float_as_int(val + __int_as_float(tmp1));
}
}
/*!
* doubleatomicAdd
*/
__device__
inline double atomicDoubleAdd(double *address, double val)
{
unsigned long long int *address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do{
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val+__longlong_as_double(assumed)));
}while(assumed != old);
return __longlong_as_double(old);
}
/*!
* floatatomicMin
*/
__device__
inline float atomicFloatMin(float *address, float val)
{
int *address_as_int = (int*)address;
int old = atomicMin(address_as_int, __float_as_int(val));
return __int_as_float(old);
}
/*!
* floatatomicMax
*/
__device__
inline float atomicFloatMax(float *address, float val)
{
int *address_as_int = (int*)address;
int old = atomicMax(address_as_int, __float_as_int(val));
return __int_as_float(old);
}
#endif // #ifdef RX_USE_ATOMIC_FUNC
//-----------------------------------------------------------------------------
//
//-----------------------------------------------------------------------------
/*!
* 1D3D()
* @param[in] i 1D
* @param[in] gridSize
* @return 3D
*/
__device__
inline uint3 calcGridPosU(uint i, uint3 ngrid)
{
uint3 gridPos;
uint w = i%(ngrid.x*ngrid.y);
gridPos.x = w%ngrid.x;
gridPos.y = w/ngrid.x;
gridPos.z = i/(ngrid.x*ngrid.y);
return gridPos;
}
/*!
* 3D1D()
* @param[in] p 3D
* @param[in] gridSize
* @return 1D
*/
__device__
inline uint calcGridPos3(uint3 p, uint3 ngrid)
{
p.x = min(p.x, ngrid.x-1);
p.y = min(p.y, ngrid.y-1);
p.z = min(p.z, ngrid.z-1);
return (p.z*ngrid.x*ngrid.y)+(p.y*ngrid.x)+p.x;
}
//-----------------------------------------------------------------------------
// CWT
//-----------------------------------------------------------------------------
/*!
*
* @param[in] t
* @return
*/
__device__
inline float MexicanHat(float t)
{
t = t*t;
return MEXICAN_HAT_C*(1.0-t)*exp(-t/2.0);
}
__device__
inline float MexicanHatIm(float t)
{
return 0.0f;
}
/*!
* ()
* @param[in] w
* @return
*/
__device__
inline float MexicanHatWave(float w)
{
w = w*w;
return MEXICAN_HAT_C*M_SQRT2PI*w*exp(-w/2.0);
}
inline float MexicanHatWaveIm(float w)
{
return 0.0f;
}
/*!
* (2D)
* @param[in] x,y
* @return
*/
__device__
inline float MexicanHat2D(float x, float y)
{
x = x*x;
y = y*y;
return MEXICAN_HAT_C*(x+y-2)*exp(-(x+y)/2.0);
}
__device__
inline float MexicanHat2DIm(float x, float y)
{
return 0.0f;
}
/*!
* (3D)
* @param[in] x,y
* @return
*/
__device__ __host__
inline float MexicanHat3D(float x, float y, float z)
{
x = x*x;
y = y*y;
z = z*z;
return MEXICAN_HAT_C*(x+y+z-3.0f)*exp(-(x+y+z)/2.0f);
}
__device__ __host__
inline float MexicanHat3DIm(float x, float y)
{
return 0.0f;
}
__device__
inline int Mod(int x, int n)
{
int m = (int)fmodf((float)x, (float)n);
return ((m < 0) ? m+n : m);
}
//-----------------------------------------------------------------------------
//
//-----------------------------------------------------------------------------
__device__ static mt_struct_stripped ds_MT[MT_RNG_COUNT];
static mt_struct_stripped h_MT[MT_RNG_COUNT];
/*!
* Mersenne Twister (CUDA)
* @param[out] d_Random
* @param[in] NPerRng
*/
__global__
static void RandomGPU(float *d_Random, int NPerRng)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int THREAD_N = blockDim.x * gridDim.x;
int iState, iState1, iStateM, iOut;
unsigned int mti, mti1, mtiM, x;
unsigned int mt[MT_NN];
for(int iRng = tid; iRng < MT_RNG_COUNT; iRng += THREAD_N){
//Load bit-vector Mersenne Twister parameters
mt_struct_stripped config = ds_MT[iRng];
//Initialize current state
mt[0] = config.seed;
for(iState = 1; iState < MT_NN; iState++)
mt[iState] = (1812433253U * (mt[iState - 1] ^ (mt[iState - 1] >> 30)) + iState) & MT_WMASK;
iState = 0;
mti1 = mt[0];
for(iOut = 0; iOut < NPerRng; iOut++){
//iState1 = (iState + 1) % MT_NN
//iStateM = (iState + MT_MM) % MT_NN
iState1 = iState + 1;
iStateM = iState + MT_MM;
if(iState1 >= MT_NN) iState1 -= MT_NN;
if(iStateM >= MT_NN) iStateM -= MT_NN;
mti = mti1;
mti1 = mt[iState1];
mtiM = mt[iStateM];
x = (mti & MT_UMASK) | (mti1 & MT_LMASK);
x = mtiM ^ (x >> 1) ^ ((x & 1) ? config.matrix_a : 0);
mt[iState] = x;
iState = iState1;
//Tempering transformation
x ^= (x >> MT_SHIFT0);
x ^= (x << MT_SHIFTB) & config.mask_b;
x ^= (x << MT_SHIFTC) & config.mask_c;
x ^= (x >> MT_SHIFT1);
//Convert to (0, 1] float and write to global memory
d_Random[iRng + iOut * MT_RNG_COUNT] = ((float)x + 1.0f) / 4294967296.0f;
}
}
}
// (C)
__device__ static unsigned int randx = 1;
__device__
inline void Srand(unsigned int s)
{
randx = s;
}
__device__
inline unsigned int Rand()
{
randx = randx*1103515245+12345;
return randx&2147483647;
}
__device__
inline unsigned int Rand2(unsigned int x)
{
x = x*1103515245+12345;
return x&2147483647;
}
#define RAND2_MAX (2147483647)
// XORShift
__device__ static unsigned long xors_x = 123456789;
__device__ static unsigned long xors_y = 362436069;
__device__ static unsigned long xors_z = 521288629;
__device__ static unsigned long xors_w = 88675123;
/*!
* G. Marsaglia, "Xorshift RNGs", Journal of Statistical Software, Vol. 8(14), pp.1-6, 2003.
* - http://www.jstatsoft.org/v08/i14/
* @param[in]
* @return
*/
__device__
inline unsigned long Xorshift128()
{
unsigned long t;
t = (xors_x^(xors_x<<11));
xors_x = xors_y; xors_y = xors_z; xors_z = xors_w;
return ( xors_w = (xors_w^(xors_w>>19))^(t^(t>>8)) );
}
__device__
inline long Xorshift128(long l, long h)
{
unsigned long t;
t = (xors_x^(xors_x<<11));
xors_x = xors_y; xors_y = xors_z; xors_z = xors_w;
xors_w = (xors_w^(xors_w>>19))^(t^(t>>8));
return l+(xors_w%(h-l));
}
__device__
inline float XorFrand(float l, float h)
{
return l+(h-l)*(Xorshift128(0, 1000000)/1000000.0f);
}
__device__
inline void Random(float2 &x, float a, float b)
{
x.x = XorFrand(a, b);
x.y = XorFrand(a, b);
}
__device__
inline void Random(float3 &x, float a, float b)
{
x.x = XorFrand(a, b);
x.y = XorFrand(a, b);
x.z = XorFrand(a, b);
}
//
__device__
inline float GaussianNoise(void)
{
float x1, x2;
float ret;
float r2;
do {
x1 = 2.0 * XorFrand(0.0, 1.0-(1e-10)) - 1.0; /* [-1, 1) */
x2 = 2.0 * XorFrand(0.0, 1.0-(1e-10)) - 1.0;
r2 = x1*x1 + x2*x2;
} while ((r2 == 0) || (r2 > 1.0));
ret = x1 * sqrtf((-2.0 * logf(r2))/r2);
ret *= 0.25; // Possibility of ( N(0, 1) < 4.0 ) = 100%
if (ret < -1.0) ret = -1.0; /* Account for loss of precision. */
if (ret > 1.0) ret = 1.0;
return ret;
}
//-----------------------------------------------------------------------------
//
//-----------------------------------------------------------------------------
/*!
* (2D, A)
* @param[in] A,B
* @param[in] C
* @param[in] r
* @param[out] P
* @return
*/
__device__
static int CuLineCircleIntersection(float2 A, float2 B, float2 C, float r, float2 P[2], float t[2])
{
float rr = r*r;
float2 AC = C-A;
float2 BC = C-B;
float2 v = B-A;
float l = length(v);
v /= l;
float td = dot(v, AC);
float2 D = A+td*v;
float dd = dot(D-C, D-C);
if(dd < rr){
float dt = sqrtf(rr-dd);
float da = rr-dot(AC, AC);
float db = rr-dot(BC, BC);
int inter = 0;
float t1 = td-dt;
float t2 = td+dt;
if(t1 >= 0 && t1 <= l){
P[inter] = A+t1*v;
t[inter] = t1;
inter++;
}
if(t2 >= 0 && t2 <= l){
P[inter] = A+t2*v;
t[inter] = t2;
inter++;
}
return inter;
}
else{
return 0;
}
}
/*!
* AABB
* @param[in] spos
* @param[in] r
* @param[in] sgn
* @param[in] box_min,box_max AABB
* @param[out] cp AABB
* @param[out] d AABB
* @param[out] n
*/
__device__
inline int collisionSphereAABB(float3 spos, float r, int sgn, float3 box_min, float3 box_max, float3 &cp, float &d, float3 &n)
{
float3 dist_min; // box_min
float3 dist_max; // box_max
float d0 = 0.0f;
float3 n0 = make_float3(0.0f, 0.0f, 0.0f);
int bout = 0;
int count = 0;
//
if((dist_min.x = (spos.x-r)-box_min.x) < 0.0){ bout |= 0x0001; count++; d0 = dist_min.x; n0 = make_float3( 1.0, 0.0, 0.0);}
if((dist_min.y = (spos.y-r)-box_min.y) < 0.0){ bout |= 0x0002; count++; d0 = dist_min.y; n0 = make_float3( 0.0, 1.0, 0.0);}
if((dist_min.z = (spos.z-r)-box_min.z) < 0.0){ bout |= 0x0004; count++; d0 = dist_min.z; n0 = make_float3( 0.0, 0.0, 1.0);}
if((dist_max.x = box_max.x-(spos.x+r)) < 0.0){ bout |= 0x0008; count++; d0 = dist_max.x; n0 = make_float3(-1.0, 0.0, 0.0);}
if((dist_max.y = box_max.y-(spos.y+r)) < 0.0){ bout |= 0x0010; count++; d0 = dist_max.y; n0 = make_float3( 0.0, -1.0, 0.0);}
if((dist_max.z = box_max.z-(spos.z+r)) < 0.0){ bout |= 0x0020; count++; d0 = dist_max.z; n0 = make_float3( 0.0, 0.0, -1.0);}
// ()
if(bout == 0){
float min_d = 1e10;
if(dist_min.x < min_d){ min_d = dist_min.x; n = make_float3( 1.0, 0.0, 0.0); }
if(dist_min.y < min_d){ min_d = dist_min.y; n = make_float3( 0.0, 1.0, 0.0); }
if(dist_min.z < min_d){ min_d = dist_min.z; n = make_float3( 0.0, 0.0, 1.0); }
if(dist_max.x < min_d){ min_d = dist_max.x; n = make_float3(-1.0, 0.0, 0.0); }
if(dist_max.y < min_d){ min_d = dist_max.y; n = make_float3( 0.0, -1.0, 0.0); }
if(dist_max.z < min_d){ min_d = dist_max.z; n = make_float3( 0.0, 0.0, -1.0); }
d = (float)sgn*min_d;
n *= (float)sgn;
cp = spos+n*fabs(d);
return 1;
}
//
// sgn = 1:-1:
if(count == 1){
//
d = (float)sgn*d0;
n = (float)sgn*n0;
cp = spos+n*fabs(d);
}
else{
// /
float3 x = make_float3(0.0f, 0.0f, 0.0f);
if(bout & 0x0001) x.x = dist_min.x;
if(bout & 0x0002) x.y = dist_min.y;
if(bout & 0x0004) x.z = dist_min.z;
if(bout & 0x0008) x.x = -dist_max.x;
if(bout & 0x0010) x.y = -dist_max.y;
if(bout & 0x0020) x.z = -dist_max.z;
d = length(x);
n = normalize(x);
d *= -(float)sgn;
n *= -(float)sgn;
cp = spos+n*fabs(d);
float3 disp = make_float3(0.00001);
//Random(disp, 0, 0.00001);
disp = disp*n;
cp += disp;
}
return 0;
}
/*!
* AABB
* @param[in] p
* @param[in] box_cen AABB
* @param[in] box_ext AABB1/2
* @param[out] cp AABB
* @param[out] d AABB
* @param[out] n
*/
__device__
inline int collisionPointAABB(float3 p, float3 box_cen, float3 box_ext, float3 &cp, float &d, float3 &n)
{
cp = p-box_cen;
float3 tmp = fabs(cp)-box_ext;
float res = ((tmp.x > tmp.y && tmp.x > tmp.z) ? tmp.x : (tmp.y > tmp.z ? tmp.y : tmp.z));
float sgn = (res > 0.0) ? -1.0 : 1.0;
int coli = 0;
n = make_float3(0.0f);
if(cp.x > box_ext.x){
cp.x = box_ext.x;
n.x -= 1.0;
coli++;
}
else if(cp.x < -box_ext.x){
cp.x = -box_ext.x;
n.x += 1.0;
coli++;
}
if(cp.y > box_ext.y){
cp.y = box_ext.y;
n.y -= 1.0;
coli++;
}
else if(cp.y < -box_ext.y){
cp.y = -box_ext.y;
n.y += 1.0;
coli++;
}
if(cp.z > box_ext.z){
cp.z = box_ext.z;
n.z -= 1.0;
coli++;
}
else if(cp.z < -box_ext.z){
cp.z = -box_ext.z;
n.z += 1.0;
coli++;
}
n = normalize(n);
//if(coli > 1){
// float3 disp;
// Random(disp, 0, 0.00001);
// disp = disp*n;
// cp += disp;
//}
cp += box_cen;
d = sgn*length(cp-p);
return 0;
}
/*!
* BOX
* @param[in] p
* @param[in] box_cen BOX
* @param[in] box_ext BOX1/2
* @param[in] box_rot BOX(3x3)
* @param[in] box_inv_rot BOX(3x3)
* @param[out] cp BOX
* @param[out] d BOX
* @param[out] n
*/
__device__
inline int collisionPointBox(float3 p, float3 box_cen, float3 box_ext, matrix3x3 box_rot, matrix3x3 box_inv_rot, float3 &cp, float &d, float3 &n)
{
cp = p-box_cen;
cp = CuMulMV(box_rot, cp);
float3 tmp = fabs(cp)-box_ext;
int coli = 0;
n = make_float3(0.0f);
if(tmp.x < 0.0 && tmp.y < 0.0 && tmp.z < 0.0){
tmp = fabs(tmp);
if(tmp.x <= tmp.y && tmp.x <= tmp.z){ // x
if(cp.x > 0){
cp.x = box_ext.x;
n.x += 1.0;
}
else{
cp.x = -box_ext.x;
n.x -= 1.0;
}
}
else if(tmp.y <= tmp.x && tmp.y <= tmp.z){ // y
if(cp.y > 0){
cp.y = box_ext.y;
n.y += 1.0;
}
else{
cp.y = -box_ext.y;
n.y -= 1.0;
}
}
else{ // z
if(cp.z > 0){
cp.z = box_ext.z;
n.z += 1.0;
}
else{
cp.z = -box_ext.z;
n.z -= 1.0;
}
}
coli++;
}
cp = CuMulMV(box_inv_rot, cp);
n = CuMulMV(box_inv_rot, n);
n = normalize(n);
cp += box_cen;
float sgn = (coli) ? -1.0 : 1.0;
d = sgn*(length(cp-p));
return 0;
}
/*!
*
* @param[in] p
* @param[in] sphere_cen
* @param[in] sphere_rad
* @param[out] cp
* @param[out] d
* @param[out] n
*/
__device__
inline int collisionPointSphere(float3 p, float3 sphere_cen, float sphere_rad, float3 &cp, float &d, float3 &n)
{
n = make_float3(0.0f);
float3 l = p-sphere_cen;
float ll = length(l);
d = ll-sphere_rad;
if(d < 0.0){
n = normalize(p-sphere_cen);
cp = sphere_cen+n*sphere_rad;
}
return 0;
}
/*!
*
* @param[in] v
* @param[in] px
* @param[in] pn
* @return
*/
__device__
inline float distPointPlane(float3 v, float3 px, float3 pn)
{
return dot((v-px), pn)/length(pn);
}
/*!
*
* @param[in] v0,v1,v2
* @param[in] n
* @param[in] p
* @return
*/
__device__
inline int distPointTriangle(float3 v0, float3 v1, float3 v2, float3 n, float3 p, float &dist, float3 &p0)
{
//
float l = distPointPlane(p, v0, n);
//
float3 np = p-l*n;
//
float3 n1 = cross((v0-p), (v1-p));
float3 n2 = cross((v1-p), (v2-p));
float3 n3 = cross((v2-p), (v0-p));
if(dot(n1, n2) > 0 && dot(n2, n3) > 0){
//
dist = l;
p0 = np;
return 1;
}
else{
//
return 0;
}
}
/*!
* /
* @param[in] P0,P1 /or
* @param[in] V0,V1,V2
* @param[out] I
* @retval 1 I
* @retval 0
* @retval 2
* @retval -1 "degenerate"(0)
*/
inline __device__
int intersectSegmentTriangle(float3 P0, float3 P1,
float3 V0, float3 V1, float3 V2,
float3 &I, float3 &n, float rp = 0.01)
{
//
float3 u = V1-V0;
float3 v = V2-V0;
n = normalize(cross(u, v));
if(CuIsZero(n)){
return -1; // "degenerate"(0)
}
//
float3 dir = P1-P0;
float a = dot(n, P0-V0);
float b = dot(n, dir);
if(fabs(b) < 1e-10){ //
if(a == 0){
return 2; //
}
else{
return 0; //
}
}
//
// 2
float r = -a/b;
if(r < 0.0 || fabs(a) > fabs(b) || b > 0){
return 0;
}
//if(r < 0.0){
// return 0;
//}
//else{
// if(fabs(a) > fabs(b)){
// return 0;
// }
// else{
// if(b > 0){
// return 0;
// }
// }
//}
//
I = P0+r*dir;
//
float uu, uv, vv, wu, wv, D;
uu = dot(u, u);
uv = dot(u, v);
vv = dot(v, v);
float3 w = I-V0;
wu = dot(w, u);
wv = dot(w, v);
D = uv*uv-uu*vv;
float s, t;
s = (uv*wv-vv*wu)/D;
if(s < 0.0 || s > 1.0){
return 0;
}
t = (uv*wu-uu*wv)/D;
if(t < 0.0 || (s+t) > 1.0){
return 0;
}
return 1;
}
#endif // #ifndef _RX_CU_COMMON_CU_
| d9b021d26f513ae759e6c9e82805bbca7ff0a326.cu | /*!
@file rx_cu_common.cu
@brief CUDA共通デバイス関数
@author Makoto Fujisawa
@date 2009-08, 2011-06
*/
// FILE --rx_cu_common.cu--
#ifndef _RX_CU_COMMON_CU_
#define _RX_CU_COMMON_CU_
//-----------------------------------------------------------------------------
// インクルードファイル
//-----------------------------------------------------------------------------
#include <stdio.h>
#include <math.h>
#include "helper_math.h"
#include <math_constants.h>
#include "rx_cuda_utils.h"
#include "rx_cu_common.cuh"
// シミュレーションパラメータ(コンスタントメモリ)
__constant__ rxSimParams params;
//-----------------------------------------------------------------------------
// 関数
//-----------------------------------------------------------------------------
__device__ __host__
inline uint calUintPow(uint x, uint y)
{
uint x_y = 1;
for(uint i=0; i < y;i++) x_y *= x;
return x_y;
}
/*!
* a/bの計算結果を切り上げ
* @param[in] a,b a/b
* @return 切り上げた除算結果
*/
__device__ __host__
inline uint DivCeil(uint a, uint b)
{
return (a % b != 0) ? (a / b + 1) : (a / b);
}
/*!
* [a,b]にクランプ
* @param[in] x クランプしたい数値
* @param[in] a,b クランプ境界
* @return クランプされた数値
*/
__device__
inline float CuClamp(float x, float a, float b)
{
return max(a, min(b, x));
}
__device__
inline int CuClamp(int x, int a, int b)
{
return max(a, min(b, x));
}
/*!
* ゼロ判定 for float3
* @param[in] v 値
*/
__device__
inline int CuIsZero(float3 v)
{
if(fabsf(v.x) < 1.0e-10 && fabsf(v.y) < 1.0e-10 && fabsf(v.z) < 1.0e-10){
return 1;
}
else{
return 0;
}
}
/*!
* 行列とベクトルの積
* @param[in] m 3x3行列
* @param[in] v 3Dベクトル
* @return 積の結果
*/
__device__
inline float3 CuMulMV(matrix3x3 m, float3 v)
{
return make_float3(dot(m.e[0], v), dot(m.e[1], v), dot(m.e[2], v));
}
// グリッド内ブロック数,ブロック内スレッド数の計算
__device__ __host__
inline void computeGridSize(uint n, uint thread_per_block, uint &numBlocks, uint &numThreads)
{
numThreads = min(thread_per_block, n);
numBlocks = DivCeil(n, numThreads);
}
//-----------------------------------------------------------------------------
// グリッド
//-----------------------------------------------------------------------------
/*!
* グリッド位置計算
* @param[in] p 座標
* @return グリッド座標
*/
__device__
inline int3 calcGridPos(float3 p)
{
int3 gridPos;
gridPos.x = floor((p.x-params.WorldOrigin.x)/params.CellWidth.x);
gridPos.y = floor((p.y-params.WorldOrigin.y)/params.CellWidth.y);
gridPos.z = floor((p.z-params.WorldOrigin.z)/params.CellWidth.z);
gridPos.x = min(max(gridPos.x, 0), params.GridSize.x-1);
gridPos.y = min(max(gridPos.y, 0), params.GridSize.y-1);
gridPos.z = min(max(gridPos.z, 0), params.GridSize.z-1);
return gridPos;
}
/*!
* グリッド座標から1次元配列中での位置を計算
* @param[in] gridPos グリッド座標
* @return アドレス
*/
__device__
inline uint calcGridHash(int3 gridPos)
{
return __umul24(__umul24(gridPos.z, params.GridSize.y), params.GridSize.x)+__umul24(gridPos.y, params.GridSize.x)+gridPos.x;
}
/*!
* グリッド位置計算
* @param[in] p 座標
* @param[in] origin グリッドの最小座標
* @param[in] cell_width 1グリッドセルの幅
* @param[in] grid_size グリッド数
* @return グリッド座標
*/
__device__
inline int3 calcGridPosB(float3 p, float3 origin, float3 cell_width, uint3 grid_size)
{
int3 gridPos;
gridPos.x = floor((p.x-origin.x)/cell_width.x);
gridPos.y = floor((p.y-origin.y)/cell_width.y);
gridPos.z = floor((p.z-origin.z)/cell_width.z);
gridPos.x = min(max(gridPos.x, 0), grid_size.x-1);
gridPos.y = min(max(gridPos.y, 0), grid_size.y-1);
gridPos.z = min(max(gridPos.z, 0), grid_size.z-1);
return gridPos;
}
/*!
* グリッド座標から1次元配列中での位置を計算
* @param[in] gridPos グリッド座標
* @return アドレス
*/
__device__
inline uint calcGridHashB(int3 gridPos, uint3 grid_size)
{
return __umul24(__umul24(gridPos.z, grid_size.y), grid_size.x)+__umul24(gridPos.y, grid_size.x)+gridPos.x;
}
//-----------------------------------------------------------------------------
// アトミック関数
//-----------------------------------------------------------------------------
#ifdef RX_USE_ATOMIC_FUNC
/*!
* float版atomicAdd
*/
__device__
inline void atomicFloatAdd(float *address, float val)
{
int i_val = __float_as_int(val);
int tmp0 = 0;
int tmp1;
while( (tmp1 = atomicCAS((int *)address, tmp0, i_val)) != tmp0)
{
tmp0 = tmp1;
i_val = __float_as_int(val + __int_as_float(tmp1));
}
}
/*!
* double版atomicAdd
*/
__device__
inline double atomicDoubleAdd(double *address, double val)
{
unsigned long long int *address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do{
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val+__longlong_as_double(assumed)));
}while(assumed != old);
return __longlong_as_double(old);
}
/*!
* float版atomicMin
*/
__device__
inline float atomicFloatMin(float *address, float val)
{
int *address_as_int = (int*)address;
int old = atomicMin(address_as_int, __float_as_int(val));
return __int_as_float(old);
}
/*!
* float版atomicMax
*/
__device__
inline float atomicFloatMax(float *address, float val)
{
int *address_as_int = (int*)address;
int old = atomicMax(address_as_int, __float_as_int(val));
return __int_as_float(old);
}
#endif // #ifdef RX_USE_ATOMIC_FUNC
//-----------------------------------------------------------------------------
// グリッド
//-----------------------------------------------------------------------------
/*!
* 1Dインデックスから3Dインデックスへの変換(グリッド数は任意)
* @param[in] i 1Dインデックス
* @param[in] gridSize グリッド数
* @return 3Dインデックス
*/
__device__
inline uint3 calcGridPosU(uint i, uint3 ngrid)
{
uint3 gridPos;
uint w = i%(ngrid.x*ngrid.y);
gridPos.x = w%ngrid.x;
gridPos.y = w/ngrid.x;
gridPos.z = i/(ngrid.x*ngrid.y);
return gridPos;
}
/*!
* 3Dインデックスから1Dインデックスへの変換(グリッド数は任意)
* @param[in] p 3Dインデックス
* @param[in] gridSize グリッド数
* @return 1Dインデックス
*/
__device__
inline uint calcGridPos3(uint3 p, uint3 ngrid)
{
p.x = min(p.x, ngrid.x-1);
p.y = min(p.y, ngrid.y-1);
p.z = min(p.z, ngrid.z-1);
return (p.z*ngrid.x*ngrid.y)+(p.y*ngrid.x)+p.x;
}
//-----------------------------------------------------------------------------
// CWTデバイス関数
//-----------------------------------------------------------------------------
/*!
* メキシカンハット
* @param[in] t 座標
* @return ウェーブレット母関数値
*/
__device__
inline float MexicanHat(float t)
{
t = t*t;
return MEXICAN_HAT_C*(1.0-t)*exp(-t/2.0);
}
__device__
inline float MexicanHatIm(float t)
{
return 0.0f;
}
/*!
* メキシカンハット(波数空間)
* @param[in] w 波数
* @return ウェーブレット母関数値
*/
__device__
inline float MexicanHatWave(float w)
{
w = w*w;
return MEXICAN_HAT_C*M_SQRT2PI*w*exp(-w/2.0);
}
inline float MexicanHatWaveIm(float w)
{
return 0.0f;
}
/*!
* メキシカンハット(2D)
* @param[in] x,y 座標
* @return ウェーブレット母関数値
*/
__device__
inline float MexicanHat2D(float x, float y)
{
x = x*x;
y = y*y;
return MEXICAN_HAT_C*(x+y-2)*exp(-(x+y)/2.0);
}
__device__
inline float MexicanHat2DIm(float x, float y)
{
return 0.0f;
}
/*!
* メキシカンハット(3D)
* @param[in] x,y 座標
* @return ウェーブレット母関数値
*/
__device__ __host__
inline float MexicanHat3D(float x, float y, float z)
{
x = x*x;
y = y*y;
z = z*z;
return MEXICAN_HAT_C*(x+y+z-3.0f)*exp(-(x+y+z)/2.0f);
}
__device__ __host__
inline float MexicanHat3DIm(float x, float y)
{
return 0.0f;
}
__device__
inline int Mod(int x, int n)
{
int m = (int)fmodf((float)x, (float)n);
return ((m < 0) ? m+n : m);
}
//-----------------------------------------------------------------------------
// 乱数
//-----------------------------------------------------------------------------
__device__ static mt_struct_stripped ds_MT[MT_RNG_COUNT];
static mt_struct_stripped h_MT[MT_RNG_COUNT];
/*!
* Mersenne Twister による乱数生成 (CUDAサンプルより)
* @param[out] d_Random 乱数生成結果
* @param[in] NPerRng 生成数
*/
__global__
static void RandomGPU(float *d_Random, int NPerRng)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int THREAD_N = blockDim.x * gridDim.x;
int iState, iState1, iStateM, iOut;
unsigned int mti, mti1, mtiM, x;
unsigned int mt[MT_NN];
for(int iRng = tid; iRng < MT_RNG_COUNT; iRng += THREAD_N){
//Load bit-vector Mersenne Twister parameters
mt_struct_stripped config = ds_MT[iRng];
//Initialize current state
mt[0] = config.seed;
for(iState = 1; iState < MT_NN; iState++)
mt[iState] = (1812433253U * (mt[iState - 1] ^ (mt[iState - 1] >> 30)) + iState) & MT_WMASK;
iState = 0;
mti1 = mt[0];
for(iOut = 0; iOut < NPerRng; iOut++){
//iState1 = (iState + 1) % MT_NN
//iStateM = (iState + MT_MM) % MT_NN
iState1 = iState + 1;
iStateM = iState + MT_MM;
if(iState1 >= MT_NN) iState1 -= MT_NN;
if(iStateM >= MT_NN) iStateM -= MT_NN;
mti = mti1;
mti1 = mt[iState1];
mtiM = mt[iStateM];
x = (mti & MT_UMASK) | (mti1 & MT_LMASK);
x = mtiM ^ (x >> 1) ^ ((x & 1) ? config.matrix_a : 0);
mt[iState] = x;
iState = iState1;
//Tempering transformation
x ^= (x >> MT_SHIFT0);
x ^= (x << MT_SHIFTB) & config.mask_b;
x ^= (x << MT_SHIFTC) & config.mask_c;
x ^= (x >> MT_SHIFT1);
//Convert to (0, 1] float and write to global memory
d_Random[iRng + iOut * MT_RNG_COUNT] = ((float)x + 1.0f) / 4294967296.0f;
}
}
}
// 線型合同法による乱数生成(C言語などと同じ)
__device__ static unsigned int randx = 1;
__device__
inline void Srand(unsigned int s)
{
randx = s;
}
__device__
inline unsigned int Rand()
{
randx = randx*1103515245+12345;
return randx&2147483647;
}
__device__
inline unsigned int Rand2(unsigned int x)
{
x = x*1103515245+12345;
return x&2147483647;
}
#define RAND2_MAX (2147483647)
// XORShiftによる乱数
__device__ static unsigned long xors_x = 123456789;
__device__ static unsigned long xors_y = 362436069;
__device__ static unsigned long xors_z = 521288629;
__device__ static unsigned long xors_w = 88675123;
/*!
* G. Marsaglia, "Xorshift RNGs", Journal of Statistical Software, Vol. 8(14), pp.1-6, 2003.
* - http://www.jstatsoft.org/v08/i14/
* @param[in]
* @return
*/
__device__
inline unsigned long Xorshift128()
{
unsigned long t;
t = (xors_x^(xors_x<<11));
xors_x = xors_y; xors_y = xors_z; xors_z = xors_w;
return ( xors_w = (xors_w^(xors_w>>19))^(t^(t>>8)) );
}
__device__
inline long Xorshift128(long l, long h)
{
unsigned long t;
t = (xors_x^(xors_x<<11));
xors_x = xors_y; xors_y = xors_z; xors_z = xors_w;
xors_w = (xors_w^(xors_w>>19))^(t^(t>>8));
return l+(xors_w%(h-l));
}
__device__
inline float XorFrand(float l, float h)
{
return l+(h-l)*(Xorshift128(0, 1000000)/1000000.0f);
}
__device__
inline void Random(float2 &x, float a, float b)
{
x.x = XorFrand(a, b);
x.y = XorFrand(a, b);
}
__device__
inline void Random(float3 &x, float a, float b)
{
x.x = XorFrand(a, b);
x.y = XorFrand(a, b);
x.z = XorFrand(a, b);
}
// ガウスノイズ
__device__
inline float GaussianNoise(void)
{
float x1, x2;
float ret;
float r2;
do {
x1 = 2.0 * XorFrand(0.0, 1.0-(1e-10)) - 1.0; /* [-1, 1) */
x2 = 2.0 * XorFrand(0.0, 1.0-(1e-10)) - 1.0;
r2 = x1*x1 + x2*x2;
} while ((r2 == 0) || (r2 > 1.0));
ret = x1 * sqrtf((-2.0 * logf(r2))/r2);
ret *= 0.25; // Possibility of ( N(0, 1) < 4.0 ) = 100%
if (ret < -1.0) ret = -1.0; /* Account for loss of precision. */
if (ret > 1.0) ret = 1.0;
return ret;
}
//-----------------------------------------------------------------------------
// 交差判定
//-----------------------------------------------------------------------------
/*!
* 線分と円の交差判定(2D, Aに)
* @param[in] A,B 線分の両端点座標
* @param[in] C 円の中心
* @param[in] r 円の半径
* @param[out] P 交点座標
* @return 交点数
*/
__device__
static int CuLineCircleIntersection(float2 A, float2 B, float2 C, float r, float2 P[2], float t[2])
{
float rr = r*r;
float2 AC = C-A;
float2 BC = C-B;
float2 v = B-A;
float l = length(v);
v /= l;
float td = dot(v, AC);
float2 D = A+td*v;
float dd = dot(D-C, D-C);
if(dd < rr){
float dt = sqrtf(rr-dd);
float da = rr-dot(AC, AC);
float db = rr-dot(BC, BC);
int inter = 0;
float t1 = td-dt;
float t2 = td+dt;
if(t1 >= 0 && t1 <= l){
P[inter] = A+t1*v;
t[inter] = t1;
inter++;
}
if(t2 >= 0 && t2 <= l){
P[inter] = A+t2*v;
t[inter] = t2;
inter++;
}
return inter;
}
else{
return 0;
}
}
/*!
* AABBと球の距離
* @param[in] spos 球中心
* @param[in] r 球半径
* @param[in] sgn
* @param[in] box_min,box_max AABB最小,最大座標値
* @param[out] cp AABB表面の最近傍点
* @param[out] d 旧都とAABBの距離
* @param[out] n 交点における単位法線ベクトル
*/
__device__
inline int collisionSphereAABB(float3 spos, float r, int sgn, float3 box_min, float3 box_max, float3 &cp, float &d, float3 &n)
{
float3 dist_min; // box_minとの距離
float3 dist_max; // box_maxとの距離
float d0 = 0.0f;
float3 n0 = make_float3(0.0f, 0.0f, 0.0f);
int bout = 0;
int count = 0;
// 各軸ごとに最小と最大境界外になっていないか調べる
if((dist_min.x = (spos.x-r)-box_min.x) < 0.0){ bout |= 0x0001; count++; d0 = dist_min.x; n0 = make_float3( 1.0, 0.0, 0.0);}
if((dist_min.y = (spos.y-r)-box_min.y) < 0.0){ bout |= 0x0002; count++; d0 = dist_min.y; n0 = make_float3( 0.0, 1.0, 0.0);}
if((dist_min.z = (spos.z-r)-box_min.z) < 0.0){ bout |= 0x0004; count++; d0 = dist_min.z; n0 = make_float3( 0.0, 0.0, 1.0);}
if((dist_max.x = box_max.x-(spos.x+r)) < 0.0){ bout |= 0x0008; count++; d0 = dist_max.x; n0 = make_float3(-1.0, 0.0, 0.0);}
if((dist_max.y = box_max.y-(spos.y+r)) < 0.0){ bout |= 0x0010; count++; d0 = dist_max.y; n0 = make_float3( 0.0, -1.0, 0.0);}
if((dist_max.z = box_max.z-(spos.z+r)) < 0.0){ bout |= 0x0020; count++; d0 = dist_max.z; n0 = make_float3( 0.0, 0.0, -1.0);}
// 立方体内(全軸で境界内)
if(bout == 0){
float min_d = 1e10;
if(dist_min.x < min_d){ min_d = dist_min.x; n = make_float3( 1.0, 0.0, 0.0); }
if(dist_min.y < min_d){ min_d = dist_min.y; n = make_float3( 0.0, 1.0, 0.0); }
if(dist_min.z < min_d){ min_d = dist_min.z; n = make_float3( 0.0, 0.0, 1.0); }
if(dist_max.x < min_d){ min_d = dist_max.x; n = make_float3(-1.0, 0.0, 0.0); }
if(dist_max.y < min_d){ min_d = dist_max.y; n = make_float3( 0.0, -1.0, 0.0); }
if(dist_max.z < min_d){ min_d = dist_max.z; n = make_float3( 0.0, 0.0, -1.0); }
d = (float)sgn*min_d;
n *= (float)sgn;
cp = spos+n*fabs(d);
return 1;
}
// 立方体外
// sgn = 1:箱,-1:オブジェクト
if(count == 1){
// 平面近傍
d = (float)sgn*d0;
n = (float)sgn*n0;
cp = spos+n*fabs(d);
}
else{
// エッジ/コーナー近傍
float3 x = make_float3(0.0f, 0.0f, 0.0f);
if(bout & 0x0001) x.x = dist_min.x;
if(bout & 0x0002) x.y = dist_min.y;
if(bout & 0x0004) x.z = dist_min.z;
if(bout & 0x0008) x.x = -dist_max.x;
if(bout & 0x0010) x.y = -dist_max.y;
if(bout & 0x0020) x.z = -dist_max.z;
d = length(x);
n = normalize(x);
d *= -(float)sgn;
n *= -(float)sgn;
cp = spos+n*fabs(d);
float3 disp = make_float3(0.00001);
//Random(disp, 0, 0.00001);
disp = disp*n;
cp += disp;
}
return 0;
}
/*!
* AABBと点の距離
* @param[in] p 点座標
* @param[in] box_cen AABBの中心
* @param[in] box_ext AABBの各辺の長さの1/2
* @param[out] cp AABB表面の最近傍点
* @param[out] d 旧都とAABBの距離
* @param[out] n 交点における単位法線ベクトル
*/
__device__
inline int collisionPointAABB(float3 p, float3 box_cen, float3 box_ext, float3 &cp, float &d, float3 &n)
{
cp = p-box_cen;
float3 tmp = fabs(cp)-box_ext;
float res = ((tmp.x > tmp.y && tmp.x > tmp.z) ? tmp.x : (tmp.y > tmp.z ? tmp.y : tmp.z));
float sgn = (res > 0.0) ? -1.0 : 1.0;
int coli = 0;
n = make_float3(0.0f);
if(cp.x > box_ext.x){
cp.x = box_ext.x;
n.x -= 1.0;
coli++;
}
else if(cp.x < -box_ext.x){
cp.x = -box_ext.x;
n.x += 1.0;
coli++;
}
if(cp.y > box_ext.y){
cp.y = box_ext.y;
n.y -= 1.0;
coli++;
}
else if(cp.y < -box_ext.y){
cp.y = -box_ext.y;
n.y += 1.0;
coli++;
}
if(cp.z > box_ext.z){
cp.z = box_ext.z;
n.z -= 1.0;
coli++;
}
else if(cp.z < -box_ext.z){
cp.z = -box_ext.z;
n.z += 1.0;
coli++;
}
n = normalize(n);
//if(coli > 1){
// float3 disp;
// Random(disp, 0, 0.00001);
// disp = disp*n;
// cp += disp;
//}
cp += box_cen;
d = sgn*length(cp-p);
return 0;
}
/*!
* 点とBOXの距離
* @param[in] p 点座標
* @param[in] box_cen BOXの中心
* @param[in] box_ext BOXの各辺の長さの1/2
* @param[in] box_rot BOXの方向行列(3x3回転行列)
* @param[in] box_inv_rot BOXの方向行列の逆行列(3x3)
* @param[out] cp BOX表面の最近傍点
* @param[out] d 点とBOXの距離
* @param[out] n 交点における単位法線ベクトル
*/
__device__
inline int collisionPointBox(float3 p, float3 box_cen, float3 box_ext, matrix3x3 box_rot, matrix3x3 box_inv_rot, float3 &cp, float &d, float3 &n)
{
cp = p-box_cen;
cp = CuMulMV(box_rot, cp);
float3 tmp = fabs(cp)-box_ext;
int coli = 0;
n = make_float3(0.0f);
if(tmp.x < 0.0 && tmp.y < 0.0 && tmp.z < 0.0){
tmp = fabs(tmp);
if(tmp.x <= tmp.y && tmp.x <= tmp.z){ // x平面に近い
if(cp.x > 0){
cp.x = box_ext.x;
n.x += 1.0;
}
else{
cp.x = -box_ext.x;
n.x -= 1.0;
}
}
else if(tmp.y <= tmp.x && tmp.y <= tmp.z){ // y平面に近い
if(cp.y > 0){
cp.y = box_ext.y;
n.y += 1.0;
}
else{
cp.y = -box_ext.y;
n.y -= 1.0;
}
}
else{ // z平面に近い
if(cp.z > 0){
cp.z = box_ext.z;
n.z += 1.0;
}
else{
cp.z = -box_ext.z;
n.z -= 1.0;
}
}
coli++;
}
cp = CuMulMV(box_inv_rot, cp);
n = CuMulMV(box_inv_rot, n);
n = normalize(n);
cp += box_cen;
float sgn = (coli) ? -1.0 : 1.0;
d = sgn*(length(cp-p));
return 0;
}
/*!
* 点と球の距離
* @param[in] p 点座標
* @param[in] sphere_cen 球の中心
* @param[in] sphere_rad 球の半径
* @param[out] cp 点と球中心を結ぶ線分と球の交点
* @param[out] d 点と球表面の距離
* @param[out] n 球中心から点への単位ベクトル
*/
__device__
inline int collisionPointSphere(float3 p, float3 sphere_cen, float sphere_rad, float3 &cp, float &d, float3 &n)
{
n = make_float3(0.0f);
float3 l = p-sphere_cen;
float ll = length(l);
d = ll-sphere_rad;
if(d < 0.0){
n = normalize(p-sphere_cen);
cp = sphere_cen+n*sphere_rad;
}
return 0;
}
/*!
* 点と平面の距離
* @param[in] v 点の座標
* @param[in] px 平面上の点
* @param[in] pn 平面の法線
* @return 距離
*/
__device__
inline float distPointPlane(float3 v, float3 px, float3 pn)
{
return dot((v-px), pn)/length(pn);
}
/*!
* 三角形と点の距離と最近傍点
* @param[in] v0,v1,v2 三角形の頂点
* @param[in] n 三角形の法線
* @param[in] p 点
* @return
*/
__device__
inline int distPointTriangle(float3 v0, float3 v1, float3 v2, float3 n, float3 p, float &dist, float3 &p0)
{
// ポリゴンを含む平面と点の距離
float l = distPointPlane(p, v0, n);
// 平面との最近傍点座標
float3 np = p-l*n;
// 近傍点が三角形内かどうかの判定
float3 n1 = cross((v0-p), (v1-p));
float3 n2 = cross((v1-p), (v2-p));
float3 n3 = cross((v2-p), (v0-p));
if(dot(n1, n2) > 0 && dot(n2, n3) > 0){
// 三角形内
dist = l;
p0 = np;
return 1;
}
else{
// 三角形外
return 0;
}
}
/*!
* レイ/線分と三角形の交差
* @param[in] P0,P1 レイ/線分の端点orレイ上の点
* @param[in] V0,V1,V2 三角形の頂点座標
* @param[out] I 交点座標
* @retval 1 交点Iで交差
* @retval 0 交点なし
* @retval 2 三角形の平面内
* @retval -1 三角形が"degenerate"である(面積が0,つまり,線分か点になっている)
*/
inline __device__
int intersectSegmentTriangle(float3 P0, float3 P1,
float3 V0, float3 V1, float3 V2,
float3 &I, float3 &n, float rp = 0.01)
{
// 三角形のエッジベクトルと法線
float3 u = V1-V0;
float3 v = V2-V0;
n = normalize(cross(u, v));
if(CuIsZero(n)){
return -1; // 三角形が"degenerate"である(面積が0)
}
// 線分
float3 dir = P1-P0;
float a = dot(n, P0-V0);
float b = dot(n, dir);
if(fabs(b) < 1e-10){ // 線分と三角形平面が平行
if(a == 0){
return 2; // 線分が平面上
}
else{
return 0; // 交点なし
}
}
// 交点計算
// 2端点がそれぞれ異なる面にあるかどうかを判定
float r = -a/b;
if(r < 0.0 || fabs(a) > fabs(b) || b > 0){
return 0;
}
//if(r < 0.0){
// return 0;
//}
//else{
// if(fabs(a) > fabs(b)){
// return 0;
// }
// else{
// if(b > 0){
// return 0;
// }
// }
//}
// 線分と平面の交点
I = P0+r*dir;
// 交点が三角形内にあるかどうかの判定
float uu, uv, vv, wu, wv, D;
uu = dot(u, u);
uv = dot(u, v);
vv = dot(v, v);
float3 w = I-V0;
wu = dot(w, u);
wv = dot(w, v);
D = uv*uv-uu*vv;
float s, t;
s = (uv*wv-vv*wu)/D;
if(s < 0.0 || s > 1.0){
return 0;
}
t = (uv*wu-uu*wv)/D;
if(t < 0.0 || (s+t) > 1.0){
return 0;
}
return 1;
}
#endif // #ifndef _RX_CU_COMMON_CU_
|
2b0bd1f708debd53b105cd3240543aa11df33dd7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma, created on 16.04.2018
//
#include <ops/declarable/helpers/reverse.h>
#include <helpers/ShapeUtils.h>
#include <array/ResultSet.h>
#include <helpers/TAD.h>
#include <helpers/PointersManager.h>
#include <helpers/ConstantTadHelper.h>
namespace sd {
namespace ops {
namespace helpers {
template <typename T>
static __global__ void reverseTadKernel(const void* vinput, const Nd4jLong *inputShape, void* voutput, const Nd4jLong *outputShape, const Nd4jLong *inputTadShape, const Nd4jLong *inputTadOffsets, const Nd4jLong *outputTadShape, const Nd4jLong *outputTadOffsets, uint64_t limit, uint64_t numOfElemsToReverse, uint64_t numTads) {
auto input = reinterpret_cast<const T*>(vinput);
auto output = reinterpret_cast<T*>(voutput);
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
// this means that we'll have additional cycle, to move middle element
auto div = numOfElemsToReverse / 2;
auto odd = numOfElemsToReverse % 2 != 0;
auto rlimit = odd ? limit / 2 + 1 : limit / 2;
// all threads operate in the same input/output space
for (uint64_t e = tid; e < rlimit; e += step) {
// finding out the TAD we're going to process
auto tadId = e / div;
if (tadId >= numTads)
continue;
// now finding out element within tad
auto idx = e % div;
//printf("TID: %i; numTads: %lld; tadLength: %lld; tadId: %i, idx: %lld\n", tid, numTads, numOfElemsToReverse, tadId, idx);
auto tadInput = input + inputTadOffsets[tadId];
auto tadOutput = output + outputTadOffsets[tadId];
// we're calculating offsets within input TAD
auto fOffset = shape::getIndexOffset(idx, inputTadShape);
auto lOffset = shape::getIndexOffset(numOfElemsToReverse - idx - 1, inputTadShape);
// now we're storing input values
auto v1 = tadInput[fOffset];
auto v2 = tadInput[lOffset];
// now we're calculating offsets within output TAD
auto zfOffset = shape::getIndexOffset(idx, outputTadShape);
auto zlOffset = shape::getIndexOffset(numOfElemsToReverse - idx - 1, outputTadShape);
// and saving values to output arrays
tadOutput[zfOffset] = v2;
tadOutput[zlOffset] = v1;
}
// moving odd element in blocks
if (odd && threadIdx.x == 0) {
for (uint64_t e = blockIdx.x; e < numTads; e += gridDim.x) {
auto tadInput = input + inputTadOffsets[e];
auto tadOutput = output + outputTadOffsets[e];
auto xOffset = shape::getIndexOffset(numOfElemsToReverse / 2, inputTadShape);
auto zOffset = shape::getIndexOffset(numOfElemsToReverse / 2, outputTadShape);
tadOutput[zOffset] = tadInput[xOffset];
}
}
}
template <typename T>
static __global__ void reverseArrayKernel(const void* input, const Nd4jLong *inputShape, void* output, const Nd4jLong *outputShape, Nd4jLong numOfElemsToReverse) {
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
__shared__ int linearStatus;
__shared__ const T* inputArr;
__shared__ T* outputArr;
__shared__ char inputOrder, outputOrder;
if (threadIdx.x == 0) {
linearStatus = (shape::elementWiseStride(inputShape) == shape::elementWiseStride(outputShape)) && (inputOrder == outputOrder)? shape::elementWiseStride(inputShape):0;
char inputOrder = shape::order(inputShape);
char outputOrder = shape::order(outputShape);
inputArr = reinterpret_cast<const T*>(input);
outputArr = reinterpret_cast<T*>(output);
}
__syncthreads();
auto odd = numOfElemsToReverse % 2 != 0;
auto limit = numOfElemsToReverse / 2;
for (uint64_t e = tid; e < limit; e += step) {
// we're calculating offsets within input array
auto fOffset = shape::getIndexOffset(e, inputShape);
auto lOffset = shape::getIndexOffset(numOfElemsToReverse - e - 1, inputShape);
// now we're storing input values
auto v1 = inputArr[fOffset];
auto v2 = inputArr[lOffset];
// now we're calculating offsets within output array
auto zfOffset = shape::getIndexOffset(e, outputShape);
auto zlOffset = shape::getIndexOffset(numOfElemsToReverse - e - 1, outputShape);
// and saving values to output arrays
outputArr[zfOffset] = v2;
outputArr[zlOffset] = v1;
}
// in case of odd array we'll have to move middle value
if (odd && tid == 0) {
auto xOffset = shape::getIndexOffset(limit, inputShape);
auto zOffset = shape::getIndexOffset(limit, outputShape);
outputArr[zOffset] = inputArr[xOffset];
}
}
template<typename T>
static void reverseTad(sd::LaunchContext * context, const NDArray* input, NDArray* output, const Nd4jLong *inputTadShape, const Nd4jLong *inputTadOffsets, const Nd4jLong *outputTadShape, const Nd4jLong *outputTadOffsets, uint64_t tadLength) {
auto stream = context->getCudaStream();
hipLaunchKernelGGL(( reverseTadKernel<T>), dim3(256), dim3(512), 8192, *stream, input->specialBuffer(), input->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), inputTadShape, inputTadOffsets, outputTadShape, outputTadOffsets, input->lengthOf(), tadLength, input->lengthOf() / tadLength);
}
template<typename T>
static void reverseArray(sd::LaunchContext * context, const NDArray* input, NDArray* output, Nd4jLong numOfElemsToReverse) {
auto stream = context->getCudaStream();
Nd4jLong numOfReverse = numOfElemsToReverse;
if (numOfElemsToReverse == 0)
numOfReverse = input->lengthOf();
hipLaunchKernelGGL(( reverseArrayKernel<T>), dim3(256), dim3(512), 8192, *stream, input->specialBuffer(), input->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), numOfReverse);
}
///////////////////////////////////////////////////////////////////
template <typename T>
static void reverseSequence_(sd::LaunchContext * context, const NDArray* input, const NDArray* seqLengths, NDArray* output, int seqDim, const int batchDim){
int posOfNonUnityDim = -1;
seqLengths->syncToHost();
auto stream = context->getCudaStream();
if(input->isVector() || shape::isLikeVector(input->shapeInfo(), posOfNonUnityDim) || seqLengths->lengthOf() == 1) {
int numOfElemsToReverse = seqLengths->e<int>(0);
if((seqDim == 0 && input->sizeAt(0) == 1) || (batchDim == posOfNonUnityDim))
output->assign(input);
else
hipLaunchKernelGGL(( reverseArrayKernel<T>), dim3(256), dim3(512), 8192, *stream, input->specialBuffer(), input->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), numOfElemsToReverse);//helpers::reverseArray<T>(context, const_cast<NDArray*>(input), output, numOfElemsToReverse);
}
else {
if(seqDim > batchDim)
--seqDim;
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {batchDim});
auto inSubArrsSet = input->allTensorsAlongDimension(dimensions);
auto outSubArrsSet = output->allTensorsAlongDimension(dimensions);
for(int i = 0; i < inSubArrsSet.size(); ++i) {
int numOfElemsToReverse = seqLengths->e<int>(i);
if(numOfElemsToReverse == 0 || numOfElemsToReverse == 1) {
outSubArrsSet.at(i)->assign(inSubArrsSet.at(i));
}
else {
auto inInnerSet = inSubArrsSet.at(i)->allTensorsAlongDimension({seqDim});
auto outInnerSet = outSubArrsSet.at(i)->allTensorsAlongDimension({seqDim});
for(int j = 0; j < inInnerSet.size(); ++j)
reverseArray<T>(context, inInnerSet.at(j), outInnerSet.at(j), numOfElemsToReverse);
}
}
}
}
void reverseSequence(sd::LaunchContext * context, const NDArray* input, const NDArray* seqLengths, NDArray* output, int seqDim, const int batchDim) {
NDArray::prepareSpecialUse({output}, {input, seqLengths});
// if op isn't inplace - copy original data into output array
if (output->specialBuffer() != input->specialBuffer())
output->assign(input);
BUILD_SINGLE_SELECTOR(input->dataType(), reverseSequence_, (context, input, seqLengths, output, seqDim, batchDim), LIBND4J_TYPES);
NDArray::registerSpecialUse({output}, {input, seqLengths});
}
//////////////////////////////////////////////////////////////////////////
void reverse(sd::LaunchContext * context, const NDArray* input, NDArray* output, const std::vector<int>* intArgs, bool isBackProp) {
// we need to reverse axis only if that's new op
std::vector<int> dimensions = isBackProp ? ShapeUtils::evalDimsToExclude(input->rankOf(), *intArgs) : *intArgs;
std::vector<int> axis = ShapeUtils::evalDimsToExclude(input->rankOf(), dimensions);
auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(input->shapeInfo(), dimensions);
auto packZ = sd::ConstantTadHelper::getInstance()->tadForDimensions(output->shapeInfo(), dimensions);
NDArray::prepareSpecialUse({output}, {input});
if (packX.numberOfTads() == 1) {
BUILD_SINGLE_SELECTOR(input->dataType(), reverseArray, (context, input, output, 0), LIBND4J_TYPES);
} else {
BUILD_SINGLE_SELECTOR(input->dataType(), reverseTad, (context, input, output, packX.platformShapeInfo(), packX.platformOffsets(), packZ.platformShapeInfo(), packZ.platformOffsets(), (uint64_t) (input->lengthOf() / packX.numberOfTads())), LIBND4J_TYPES);
}
NDArray::registerSpecialUse({output}, {input});
}
}
}
}
| 2b0bd1f708debd53b105cd3240543aa11df33dd7.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma, created on 16.04.2018
//
#include <ops/declarable/helpers/reverse.h>
#include <helpers/ShapeUtils.h>
#include <array/ResultSet.h>
#include <helpers/TAD.h>
#include <helpers/PointersManager.h>
#include <helpers/ConstantTadHelper.h>
namespace sd {
namespace ops {
namespace helpers {
template <typename T>
static __global__ void reverseTadKernel(const void* vinput, const Nd4jLong *inputShape, void* voutput, const Nd4jLong *outputShape, const Nd4jLong *inputTadShape, const Nd4jLong *inputTadOffsets, const Nd4jLong *outputTadShape, const Nd4jLong *outputTadOffsets, uint64_t limit, uint64_t numOfElemsToReverse, uint64_t numTads) {
auto input = reinterpret_cast<const T*>(vinput);
auto output = reinterpret_cast<T*>(voutput);
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
// this means that we'll have additional cycle, to move middle element
auto div = numOfElemsToReverse / 2;
auto odd = numOfElemsToReverse % 2 != 0;
auto rlimit = odd ? limit / 2 + 1 : limit / 2;
// all threads operate in the same input/output space
for (uint64_t e = tid; e < rlimit; e += step) {
// finding out the TAD we're going to process
auto tadId = e / div;
if (tadId >= numTads)
continue;
// now finding out element within tad
auto idx = e % div;
//printf("TID: %i; numTads: %lld; tadLength: %lld; tadId: %i, idx: %lld\n", tid, numTads, numOfElemsToReverse, tadId, idx);
auto tadInput = input + inputTadOffsets[tadId];
auto tadOutput = output + outputTadOffsets[tadId];
// we're calculating offsets within input TAD
auto fOffset = shape::getIndexOffset(idx, inputTadShape);
auto lOffset = shape::getIndexOffset(numOfElemsToReverse - idx - 1, inputTadShape);
// now we're storing input values
auto v1 = tadInput[fOffset];
auto v2 = tadInput[lOffset];
// now we're calculating offsets within output TAD
auto zfOffset = shape::getIndexOffset(idx, outputTadShape);
auto zlOffset = shape::getIndexOffset(numOfElemsToReverse - idx - 1, outputTadShape);
// and saving values to output arrays
tadOutput[zfOffset] = v2;
tadOutput[zlOffset] = v1;
}
// moving odd element in blocks
if (odd && threadIdx.x == 0) {
for (uint64_t e = blockIdx.x; e < numTads; e += gridDim.x) {
auto tadInput = input + inputTadOffsets[e];
auto tadOutput = output + outputTadOffsets[e];
auto xOffset = shape::getIndexOffset(numOfElemsToReverse / 2, inputTadShape);
auto zOffset = shape::getIndexOffset(numOfElemsToReverse / 2, outputTadShape);
tadOutput[zOffset] = tadInput[xOffset];
}
}
}
template <typename T>
static __global__ void reverseArrayKernel(const void* input, const Nd4jLong *inputShape, void* output, const Nd4jLong *outputShape, Nd4jLong numOfElemsToReverse) {
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
__shared__ int linearStatus;
__shared__ const T* inputArr;
__shared__ T* outputArr;
__shared__ char inputOrder, outputOrder;
if (threadIdx.x == 0) {
linearStatus = (shape::elementWiseStride(inputShape) == shape::elementWiseStride(outputShape)) && (inputOrder == outputOrder)? shape::elementWiseStride(inputShape):0;
char inputOrder = shape::order(inputShape);
char outputOrder = shape::order(outputShape);
inputArr = reinterpret_cast<const T*>(input);
outputArr = reinterpret_cast<T*>(output);
}
__syncthreads();
auto odd = numOfElemsToReverse % 2 != 0;
auto limit = numOfElemsToReverse / 2;
for (uint64_t e = tid; e < limit; e += step) {
// we're calculating offsets within input array
auto fOffset = shape::getIndexOffset(e, inputShape);
auto lOffset = shape::getIndexOffset(numOfElemsToReverse - e - 1, inputShape);
// now we're storing input values
auto v1 = inputArr[fOffset];
auto v2 = inputArr[lOffset];
// now we're calculating offsets within output array
auto zfOffset = shape::getIndexOffset(e, outputShape);
auto zlOffset = shape::getIndexOffset(numOfElemsToReverse - e - 1, outputShape);
// and saving values to output arrays
outputArr[zfOffset] = v2;
outputArr[zlOffset] = v1;
}
// in case of odd array we'll have to move middle value
if (odd && tid == 0) {
auto xOffset = shape::getIndexOffset(limit, inputShape);
auto zOffset = shape::getIndexOffset(limit, outputShape);
outputArr[zOffset] = inputArr[xOffset];
}
}
template<typename T>
static void reverseTad(sd::LaunchContext * context, const NDArray* input, NDArray* output, const Nd4jLong *inputTadShape, const Nd4jLong *inputTadOffsets, const Nd4jLong *outputTadShape, const Nd4jLong *outputTadOffsets, uint64_t tadLength) {
auto stream = context->getCudaStream();
reverseTadKernel<T><<<256, 512, 8192, *stream>>>(input->specialBuffer(), input->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), inputTadShape, inputTadOffsets, outputTadShape, outputTadOffsets, input->lengthOf(), tadLength, input->lengthOf() / tadLength);
}
template<typename T>
static void reverseArray(sd::LaunchContext * context, const NDArray* input, NDArray* output, Nd4jLong numOfElemsToReverse) {
auto stream = context->getCudaStream();
Nd4jLong numOfReverse = numOfElemsToReverse;
if (numOfElemsToReverse == 0)
numOfReverse = input->lengthOf();
reverseArrayKernel<T><<<256, 512, 8192, *stream>>>(input->specialBuffer(), input->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), numOfReverse);
}
///////////////////////////////////////////////////////////////////
template <typename T>
static void reverseSequence_(sd::LaunchContext * context, const NDArray* input, const NDArray* seqLengths, NDArray* output, int seqDim, const int batchDim){
int posOfNonUnityDim = -1;
seqLengths->syncToHost();
auto stream = context->getCudaStream();
if(input->isVector() || shape::isLikeVector(input->shapeInfo(), posOfNonUnityDim) || seqLengths->lengthOf() == 1) {
int numOfElemsToReverse = seqLengths->e<int>(0);
if((seqDim == 0 && input->sizeAt(0) == 1) || (batchDim == posOfNonUnityDim))
output->assign(input);
else
reverseArrayKernel<T><<<256, 512, 8192, *stream>>>(input->specialBuffer(), input->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), numOfElemsToReverse);//helpers::reverseArray<T>(context, const_cast<NDArray*>(input), output, numOfElemsToReverse);
}
else {
if(seqDim > batchDim)
--seqDim;
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {batchDim});
auto inSubArrsSet = input->allTensorsAlongDimension(dimensions);
auto outSubArrsSet = output->allTensorsAlongDimension(dimensions);
for(int i = 0; i < inSubArrsSet.size(); ++i) {
int numOfElemsToReverse = seqLengths->e<int>(i);
if(numOfElemsToReverse == 0 || numOfElemsToReverse == 1) {
outSubArrsSet.at(i)->assign(inSubArrsSet.at(i));
}
else {
auto inInnerSet = inSubArrsSet.at(i)->allTensorsAlongDimension({seqDim});
auto outInnerSet = outSubArrsSet.at(i)->allTensorsAlongDimension({seqDim});
for(int j = 0; j < inInnerSet.size(); ++j)
reverseArray<T>(context, inInnerSet.at(j), outInnerSet.at(j), numOfElemsToReverse);
}
}
}
}
void reverseSequence(sd::LaunchContext * context, const NDArray* input, const NDArray* seqLengths, NDArray* output, int seqDim, const int batchDim) {
NDArray::prepareSpecialUse({output}, {input, seqLengths});
// if op isn't inplace - copy original data into output array
if (output->specialBuffer() != input->specialBuffer())
output->assign(input);
BUILD_SINGLE_SELECTOR(input->dataType(), reverseSequence_, (context, input, seqLengths, output, seqDim, batchDim), LIBND4J_TYPES);
NDArray::registerSpecialUse({output}, {input, seqLengths});
}
//////////////////////////////////////////////////////////////////////////
void reverse(sd::LaunchContext * context, const NDArray* input, NDArray* output, const std::vector<int>* intArgs, bool isBackProp) {
// we need to reverse axis only if that's new op
std::vector<int> dimensions = isBackProp ? ShapeUtils::evalDimsToExclude(input->rankOf(), *intArgs) : *intArgs;
std::vector<int> axis = ShapeUtils::evalDimsToExclude(input->rankOf(), dimensions);
auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(input->shapeInfo(), dimensions);
auto packZ = sd::ConstantTadHelper::getInstance()->tadForDimensions(output->shapeInfo(), dimensions);
NDArray::prepareSpecialUse({output}, {input});
if (packX.numberOfTads() == 1) {
BUILD_SINGLE_SELECTOR(input->dataType(), reverseArray, (context, input, output, 0), LIBND4J_TYPES);
} else {
BUILD_SINGLE_SELECTOR(input->dataType(), reverseTad, (context, input, output, packX.platformShapeInfo(), packX.platformOffsets(), packZ.platformShapeInfo(), packZ.platformOffsets(), (uint64_t) (input->lengthOf() / packX.numberOfTads())), LIBND4J_TYPES);
}
NDArray::registerSpecialUse({output}, {input});
}
}
}
}
|
c455dcff35fa84df472a085de9dbecf21b9c3c0d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "setupCuRandState.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
hiprandState_t *state = NULL;
hipMalloc(&state, XSIZE*YSIZE);
unsigned long seed = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
setupCuRandState), dim3(gridBlock),dim3(threadBlock), 0, 0, state,seed);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
setupCuRandState), dim3(gridBlock),dim3(threadBlock), 0, 0, state,seed);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
setupCuRandState), dim3(gridBlock),dim3(threadBlock), 0, 0, state,seed);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | c455dcff35fa84df472a085de9dbecf21b9c3c0d.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "setupCuRandState.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
curandState *state = NULL;
cudaMalloc(&state, XSIZE*YSIZE);
unsigned long seed = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
setupCuRandState<<<gridBlock,threadBlock>>>(state,seed);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
setupCuRandState<<<gridBlock,threadBlock>>>(state,seed);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
setupCuRandState<<<gridBlock,threadBlock>>>(state,seed);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
92f4391e4e8bdb2eca22716753f0a22818a3ec9f.hip | // !!! This is a file automatically generated by hipify!!!
#include <cassert>
#include <hip/hip_runtime.h>
#include "transpose_device.cuh"
/*
* TODO for all kernels (including naive):
* Leave a comment above all non-coalesced memory accesses and bank conflicts.
* Make it clear if the suboptimal access is a read or write. If an access is
* non-coalesced, specify how many cache lines it touches, and if an access
* causes bank conflicts, say if its a 2-way bank conflict, 4-way bank
* conflict, etc.
*
* Comment all of your kernels.
*/
/*
* Each block of the naive transpose handles a 64x64 block of the input matrix,
* with each thread of the block handling a 1x4 section and each warp handling
* a 32x4 section.
*
* If we split the 64x64 matrix into 32 blocks of shape (32, 4), then we have
* a block matrix of shape (2 blocks, 16 blocks).
* Warp 0 handles block (0, 0), warp 1 handles (1, 0), warp 2 handles (0, 1),
* warp n handles (n % 2, n / 2).
*
* This kernel is launched with block shape (64, 16) and grid shape
* (n / 64, n / 64) where n is the size of the square matrix.
*
* You may notice that we suggested in lecture that threads should be able to
* handle an arbitrary number of elements and that this kernel handles exactly
* 4 elements per thread. This is OK here because to overwhelm this kernel
* it would take a 4194304 x 4194304 matrix, which would take ~17.6TB of
* memory (well beyond what I expect GPUs to have in the next few years).
*/
__global__
void naiveTransposeKernel(const float *input, float *output, int n) {
// TODO: do not modify code, just comment on suboptimal accesses
const int i = threadIdx.x + 64 * blockIdx.x;
int j = 4 * threadIdx.y + 64 * blockIdx.y;
const int end_j = j + 4;
for (; j < end_j; j++)
output[j + n * i] = input[i + n * j];
}
__global__
void shmemTransposeKernel(const float *input, float *output, int n) {
// TODO: Modify transpose kernel to use shared memory. All global memory
// reads and writes should be coalesced. Minimize the number of shared
// memory bank conflicts (0 bank conflicts should be possible using
// padding). Again, comment on all sub-optimal accesses.
// __shared__ float data[???];
const int i = threadIdx.x + 64 * blockIdx.x;
int j = 4 * threadIdx.y + 64 * blockIdx.y;
const int end_j = j + 4;
for (; j < end_j; j++)
output[j + n * i] = input[i + n * j];
}
__global__
void optimalTransposeKernel(const float *input, float *output, int n) {
// TODO: This should be based off of your shmemTransposeKernel.
// Use any optimization tricks discussed so far to improve performance.
// Consider ILP and loop unrolling.
const int i = threadIdx.x + 64 * blockIdx.x;
int j = 4 * threadIdx.y + 64 * blockIdx.y;
const int end_j = j + 4;
for (; j < end_j; j++)
output[j + n * i] = input[i + n * j];
}
void cudaTranspose(
const float *d_input,
float *d_output,
int n,
TransposeImplementation type)
{
if (type == NAIVE) {
dim3 blockSize(64, 16);
dim3 gridSize(n / 64, n / 64);
hipLaunchKernelGGL(( naiveTransposeKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_input, d_output, n);
}
else if (type == SHMEM) {
dim3 blockSize(64, 16);
dim3 gridSize(n / 64, n / 64);
hipLaunchKernelGGL(( shmemTransposeKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_input, d_output, n);
}
else if (type == OPTIMAL) {
dim3 blockSize(64, 16);
dim3 gridSize(n / 64, n / 64);
hipLaunchKernelGGL(( optimalTransposeKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_input, d_output, n);
}
// Unknown type
else
assert(false);
}
| 92f4391e4e8bdb2eca22716753f0a22818a3ec9f.cu | #include <cassert>
#include <cuda_runtime.h>
#include "transpose_device.cuh"
/*
* TODO for all kernels (including naive):
* Leave a comment above all non-coalesced memory accesses and bank conflicts.
* Make it clear if the suboptimal access is a read or write. If an access is
* non-coalesced, specify how many cache lines it touches, and if an access
* causes bank conflicts, say if its a 2-way bank conflict, 4-way bank
* conflict, etc.
*
* Comment all of your kernels.
*/
/*
* Each block of the naive transpose handles a 64x64 block of the input matrix,
* with each thread of the block handling a 1x4 section and each warp handling
* a 32x4 section.
*
* If we split the 64x64 matrix into 32 blocks of shape (32, 4), then we have
* a block matrix of shape (2 blocks, 16 blocks).
* Warp 0 handles block (0, 0), warp 1 handles (1, 0), warp 2 handles (0, 1),
* warp n handles (n % 2, n / 2).
*
* This kernel is launched with block shape (64, 16) and grid shape
* (n / 64, n / 64) where n is the size of the square matrix.
*
* You may notice that we suggested in lecture that threads should be able to
* handle an arbitrary number of elements and that this kernel handles exactly
* 4 elements per thread. This is OK here because to overwhelm this kernel
* it would take a 4194304 x 4194304 matrix, which would take ~17.6TB of
* memory (well beyond what I expect GPUs to have in the next few years).
*/
__global__
void naiveTransposeKernel(const float *input, float *output, int n) {
// TODO: do not modify code, just comment on suboptimal accesses
const int i = threadIdx.x + 64 * blockIdx.x;
int j = 4 * threadIdx.y + 64 * blockIdx.y;
const int end_j = j + 4;
for (; j < end_j; j++)
output[j + n * i] = input[i + n * j];
}
__global__
void shmemTransposeKernel(const float *input, float *output, int n) {
// TODO: Modify transpose kernel to use shared memory. All global memory
// reads and writes should be coalesced. Minimize the number of shared
// memory bank conflicts (0 bank conflicts should be possible using
// padding). Again, comment on all sub-optimal accesses.
// __shared__ float data[???];
const int i = threadIdx.x + 64 * blockIdx.x;
int j = 4 * threadIdx.y + 64 * blockIdx.y;
const int end_j = j + 4;
for (; j < end_j; j++)
output[j + n * i] = input[i + n * j];
}
__global__
void optimalTransposeKernel(const float *input, float *output, int n) {
// TODO: This should be based off of your shmemTransposeKernel.
// Use any optimization tricks discussed so far to improve performance.
// Consider ILP and loop unrolling.
const int i = threadIdx.x + 64 * blockIdx.x;
int j = 4 * threadIdx.y + 64 * blockIdx.y;
const int end_j = j + 4;
for (; j < end_j; j++)
output[j + n * i] = input[i + n * j];
}
void cudaTranspose(
const float *d_input,
float *d_output,
int n,
TransposeImplementation type)
{
if (type == NAIVE) {
dim3 blockSize(64, 16);
dim3 gridSize(n / 64, n / 64);
naiveTransposeKernel<<<gridSize, blockSize>>>(d_input, d_output, n);
}
else if (type == SHMEM) {
dim3 blockSize(64, 16);
dim3 gridSize(n / 64, n / 64);
shmemTransposeKernel<<<gridSize, blockSize>>>(d_input, d_output, n);
}
else if (type == OPTIMAL) {
dim3 blockSize(64, 16);
dim3 gridSize(n / 64, n / 64);
optimalTransposeKernel<<<gridSize, blockSize>>>(d_input, d_output, n);
}
// Unknown type
else
assert(false);
}
|
aa4fc5e1a89e36946a0c5f0b6a31211c63c83d4c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "Match8small.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_pts1 = NULL;
hipMalloc(&d_pts1, XSIZE*YSIZE);
float *d_pts2 = NULL;
hipMalloc(&d_pts2, XSIZE*YSIZE);
float *d_score = NULL;
hipMalloc(&d_score, XSIZE*YSIZE);
int *d_index = NULL;
hipMalloc(&d_index, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
Match8small), dim3(gridBlock),dim3(threadBlock), 0, 0, d_pts1,d_pts2,d_score,d_index);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
Match8small), dim3(gridBlock),dim3(threadBlock), 0, 0, d_pts1,d_pts2,d_score,d_index);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
Match8small), dim3(gridBlock),dim3(threadBlock), 0, 0, d_pts1,d_pts2,d_score,d_index);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | aa4fc5e1a89e36946a0c5f0b6a31211c63c83d4c.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "Match8small.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_pts1 = NULL;
cudaMalloc(&d_pts1, XSIZE*YSIZE);
float *d_pts2 = NULL;
cudaMalloc(&d_pts2, XSIZE*YSIZE);
float *d_score = NULL;
cudaMalloc(&d_score, XSIZE*YSIZE);
int *d_index = NULL;
cudaMalloc(&d_index, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
Match8small<<<gridBlock,threadBlock>>>(d_pts1,d_pts2,d_score,d_index);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
Match8small<<<gridBlock,threadBlock>>>(d_pts1,d_pts2,d_score,d_index);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
Match8small<<<gridBlock,threadBlock>>>(d_pts1,d_pts2,d_score,d_index);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
518401afa65156618f44bdf3fd816330115ff0ae.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/native/TensorTransformations.h>
#include <ATen/Dispatch.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/HIPContext.h>
#include <c10/macros/Macros.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/empty_like.h>
#include <ATen/ops/roll_native.h>
#endif
#include <cstddef>
#include <vector>
namespace at::native {
template <typename scalar_t, typename IndexType>
#if __CUDA_ARCH__ >= 350 || defined(USE_ROCM)
C10_LAUNCH_BOUNDS_2(cuda::getApplyBlockSize(), cuda::getApplyBlocksPerSM())
#endif
__global__ void kernel_pointwise_flip_apply2(
const cuda::detail::TensorInfo<scalar_t, IndexType> in_tensor_info,
cuda::detail::TensorInfo<scalar_t, IndexType> out_tensor_info,
IndexType N,
int flip_dim,
IndexType total_dims) {
for (IndexType linear_index = blockIdx.x * blockDim.x + threadIdx.x; linear_index < N; linear_index += gridDim.x * blockDim.x) {
IndexType dst_offset = 0;
if (flip_dim == 0) {
// flip 1st dim
dst_offset = (in_tensor_info.sizes[0] - 1 - linear_index / in_tensor_info.strides[0]) * in_tensor_info.strides[0] + linear_index % in_tensor_info.strides[0];
}
else {
// flip last dim
IndexType i = total_dims - 1;
dst_offset = linear_index / in_tensor_info.strides[0] * in_tensor_info.strides[0] + (in_tensor_info.sizes[i] - 1 - linear_index % in_tensor_info.strides[0]);
}
out_tensor_info.data[dst_offset] = in_tensor_info.data[linear_index];
}
}
template <typename scalar_t>
C10_LAUNCH_BOUNDS_1(cuda::getApplyBlockSize())
__global__ void flip_cuda_kernel(
scalar_t* in_tensor,
scalar_t* out_tensor,
int64_t N,
int64_t* flip_dims,
int64_t flip_dims_size,
int64_t* strides,
int64_t* strides_contiguous,
int64_t* shape,
int64_t total_dims) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index >= N) {
return;
}
int64_t cur_indices = linear_index, rem = 0, dst_offset = 0;
for (int64_t i = 0; i < total_dims; i++) {
int64_t temp = cur_indices;
cur_indices = cur_indices / strides_contiguous[i];
rem = temp - cur_indices * strides_contiguous[i];
// flip the indices if it is in flip_dims
for (int64_t j = 0; j < flip_dims_size; j++) {
if (i == flip_dims[j]) {
cur_indices = shape[i] - 1 - cur_indices;
}
}
dst_offset += cur_indices * strides[i];
cur_indices = rem;
}
out_tensor[linear_index] = in_tensor[dst_offset];
}
template <typename scalar_t>
C10_LAUNCH_BOUNDS_1(cuda::getApplyBlockSize())
__global__ void roll_cuda_kernel(
scalar_t* in_tensor,
scalar_t* out_tensor,
int64_t N,
int64_t roll_dim,
int64_t start,
int64_t size,
int64_t stride,
int64_t total_dims) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index >= N) {
return;
}
// roll dim idx is the index of linear_index along the rolling dimension.
int64_t roll_dim_idx = linear_index % (stride * size) / stride;
// index into the source data to find appropriate value.
int64_t source_idx = 0;
if( roll_dim_idx >= (size - start) ) {
source_idx = linear_index - ((size - start) * stride);
} else {
source_idx = linear_index + (start * stride);
}
out_tensor[linear_index] = in_tensor[source_idx];
}
// Roll a tensor along a dimension
Tensor roll_cuda(const Tensor& self, IntArrayRef shifts, IntArrayRef dims) {
if (dims.size() != 1 || shifts.size() != 1) {
return roll_common(self, shifts, dims);
}
auto in_tensor = self;
if(!self.is_contiguous()) {
in_tensor = self.contiguous();
}
auto out_tensor = at::empty_like(in_tensor, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
if (out_tensor.numel() == 0) {
return out_tensor;
}
const int64_t N = in_tensor.numel();
const int64_t dim = dims[0];
const int64_t size = in_tensor.size(dim);
int64_t start = (size - shifts[0]) % size;
// Behavior of % is different in C++ vs Python for negative numbers. This
// corrects the difference.
if( start < 0 ) start = start + size;
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid;
TORCH_CHECK(cuda::getApplyGrid(N, dim_grid, in_tensor.get_device()), "unable to get dim grid");
auto total_dims = in_tensor.dim();
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
at::ScalarType::ComplexHalf,
in_tensor.scalar_type(), "roll_cuda",
[&] {
hipLaunchKernelGGL(( roll_cuda_kernel), dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
in_tensor.data_ptr<scalar_t>(), out_tensor.data_ptr<scalar_t>(), N,
dim, start,
size,
in_tensor.stride(dim),
total_dims);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
return out_tensor;
}
} // namespace at::native
| 518401afa65156618f44bdf3fd816330115ff0ae.cu | #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/native/TensorTransformations.h>
#include <ATen/Dispatch.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <c10/macros/Macros.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/empty_like.h>
#include <ATen/ops/roll_native.h>
#endif
#include <cstddef>
#include <vector>
namespace at::native {
template <typename scalar_t, typename IndexType>
#if __CUDA_ARCH__ >= 350 || defined(USE_ROCM)
C10_LAUNCH_BOUNDS_2(cuda::getApplyBlockSize(), cuda::getApplyBlocksPerSM())
#endif
__global__ void kernel_pointwise_flip_apply2(
const cuda::detail::TensorInfo<scalar_t, IndexType> in_tensor_info,
cuda::detail::TensorInfo<scalar_t, IndexType> out_tensor_info,
IndexType N,
int flip_dim,
IndexType total_dims) {
for (IndexType linear_index = blockIdx.x * blockDim.x + threadIdx.x; linear_index < N; linear_index += gridDim.x * blockDim.x) {
IndexType dst_offset = 0;
if (flip_dim == 0) {
// flip 1st dim
dst_offset = (in_tensor_info.sizes[0] - 1 - linear_index / in_tensor_info.strides[0]) * in_tensor_info.strides[0] + linear_index % in_tensor_info.strides[0];
}
else {
// flip last dim
IndexType i = total_dims - 1;
dst_offset = linear_index / in_tensor_info.strides[0] * in_tensor_info.strides[0] + (in_tensor_info.sizes[i] - 1 - linear_index % in_tensor_info.strides[0]);
}
out_tensor_info.data[dst_offset] = in_tensor_info.data[linear_index];
}
}
template <typename scalar_t>
C10_LAUNCH_BOUNDS_1(cuda::getApplyBlockSize())
__global__ void flip_cuda_kernel(
scalar_t* in_tensor,
scalar_t* out_tensor,
int64_t N,
int64_t* flip_dims,
int64_t flip_dims_size,
int64_t* strides,
int64_t* strides_contiguous,
int64_t* shape,
int64_t total_dims) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index >= N) {
return;
}
int64_t cur_indices = linear_index, rem = 0, dst_offset = 0;
for (int64_t i = 0; i < total_dims; i++) {
int64_t temp = cur_indices;
cur_indices = cur_indices / strides_contiguous[i];
rem = temp - cur_indices * strides_contiguous[i];
// flip the indices if it is in flip_dims
for (int64_t j = 0; j < flip_dims_size; j++) {
if (i == flip_dims[j]) {
cur_indices = shape[i] - 1 - cur_indices;
}
}
dst_offset += cur_indices * strides[i];
cur_indices = rem;
}
out_tensor[linear_index] = in_tensor[dst_offset];
}
template <typename scalar_t>
C10_LAUNCH_BOUNDS_1(cuda::getApplyBlockSize())
__global__ void roll_cuda_kernel(
scalar_t* in_tensor,
scalar_t* out_tensor,
int64_t N,
int64_t roll_dim,
int64_t start,
int64_t size,
int64_t stride,
int64_t total_dims) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index >= N) {
return;
}
// roll dim idx is the index of linear_index along the rolling dimension.
int64_t roll_dim_idx = linear_index % (stride * size) / stride;
// index into the source data to find appropriate value.
int64_t source_idx = 0;
if( roll_dim_idx >= (size - start) ) {
source_idx = linear_index - ((size - start) * stride);
} else {
source_idx = linear_index + (start * stride);
}
out_tensor[linear_index] = in_tensor[source_idx];
}
// Roll a tensor along a dimension
Tensor roll_cuda(const Tensor& self, IntArrayRef shifts, IntArrayRef dims) {
if (dims.size() != 1 || shifts.size() != 1) {
return roll_common(self, shifts, dims);
}
auto in_tensor = self;
if(!self.is_contiguous()) {
in_tensor = self.contiguous();
}
auto out_tensor = at::empty_like(in_tensor, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
if (out_tensor.numel() == 0) {
return out_tensor;
}
const int64_t N = in_tensor.numel();
const int64_t dim = dims[0];
const int64_t size = in_tensor.size(dim);
int64_t start = (size - shifts[0]) % size;
// Behavior of % is different in C++ vs Python for negative numbers. This
// corrects the difference.
if( start < 0 ) start = start + size;
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid;
TORCH_CHECK(cuda::getApplyGrid(N, dim_grid, in_tensor.get_device()), "unable to get dim grid");
auto total_dims = in_tensor.dim();
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
at::ScalarType::ComplexHalf,
in_tensor.scalar_type(), "roll_cuda",
[&] {
roll_cuda_kernel<<<dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(
in_tensor.data_ptr<scalar_t>(), out_tensor.data_ptr<scalar_t>(), N,
dim, start,
size,
in_tensor.stride(dim),
total_dims);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
return out_tensor;
}
} // namespace at::native
|
86f762ea37fdfd56600240f739803ce3f6869640.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
#include <hostParameters.cuh>
#include <deviceParameters.cuh>
#include <check.cuh>
#include <commonFunc.cuh>
extern __device__ float reluDelA_bn(const int wba_x, const int wba_y, const int outputIdx, const int miniBatchIdx, const int cnnLayer);
extern __global__ void kernelBackCnnBatchNormalization(const int cnnLayer);
void backCnnBatchNormalization(const int cnnLayer){
// puts("backCnnBatchNormalization start.");
// struct timeval t1, t2, t3;
// gettimeofday(&t1, NULL);
//
dim3 grid(getCnnOutputNums(cnnLayer), getMiniBatchNums()); //miniBatch11
dim3 block(getCnnWba_xNums(cnnLayer), getCnnWba_yNums(cnnLayer), 1);
//
checkGridSize(grid);
checkThreadSize(block);
// gettimeofday(&t2, NULL);
// puts("kernelBackCnnBatchNormalization start.");
hipDeviceSynchronize();
hipLaunchKernelGGL(( kernelBackCnnBatchNormalization), dim3(grid), dim3(block), 0, 0, cnnLayer);
// puts("kernelBackCnnBatchNormalization end.");
// gettimeofday(&t3, NULL);
// puts("backCnnBatchNormalization end.");
}
__global__ void
kernelBackCnnBatchNormalization(const int cnnLayer){
int wba_x = threadIdx.x;
int wba_y = threadIdx.y;
int outputIdx = blockIdx.x;
int miniBatchIdx = blockIdx.y;
dCnnBnBack[getDCnnWbaIdx(wba_x, wba_y, outputIdx, miniBatchIdx, cnnLayer)]\
= getDCnnABack(wba_x, wba_y, outputIdx, miniBatchIdx, cnnLayer)\
* reluDelA_bn(wba_x, wba_y, outputIdx, miniBatchIdx, cnnLayer);
/*
if(miniBatchIdx == 1){
printf("cnnABack(%d,%d,%d,%d,%d):%f\n", wba_x,wba_y,outputIdx, miniBatchIdx, cnnLayer, getDCnnABack(wba_x,wba_y,outputIdx, miniBatchIdx, cnnLayer));
printf("cnnBn(%d,%d,%d,%d,%d):%f\n", wba_x,wba_y,outputIdx, miniBatchIdx, cnnLayer, getDCnnBn(wba_x,wba_y,outputIdx, miniBatchIdx, cnnLayer));
printf("cnnBnBack(%d,%d,%d,%d,%d):%f\n", wba_x,wba_y,outputIdx, miniBatchIdx, cnnLayer, dCnnBnBack[getDCnnWbaIdx(wba_x,wba_y,outputIdx, miniBatchIdx, cnnLayer)]);
}
*/
}
__device__ float
reluDelA_bn(const int wba_x, const int wba_y, const int outputIdx, const int miniBatchIdx, const int cnnLayer){
float val;
val = (0 < getDCnnBn(wba_x, wba_y, outputIdx, miniBatchIdx, cnnLayer));
return(val);
}
| 86f762ea37fdfd56600240f739803ce3f6869640.cu | #include <stdio.h>
#include <math.h>
#include <hostParameters.cuh>
#include <deviceParameters.cuh>
#include <check.cuh>
#include <commonFunc.cuh>
extern __device__ float reluDelA_bn(const int wba_x, const int wba_y, const int outputIdx, const int miniBatchIdx, const int cnnLayer);
extern __global__ void kernelBackCnnBatchNormalization(const int cnnLayer);
void backCnnBatchNormalization(const int cnnLayer){
// puts("backCnnBatchNormalization start.");
// struct timeval t1, t2, t3;
// gettimeofday(&t1, NULL);
//カーネルの次元設定
dim3 grid(getCnnOutputNums(cnnLayer), getMiniBatchNums()); //miniBatch毎の1ニューロンの出力を1ブロックとする。
dim3 block(getCnnWba_xNums(cnnLayer), getCnnWba_yNums(cnnLayer), 1);
//次元チェック
checkGridSize(grid);
checkThreadSize(block);
// gettimeofday(&t2, NULL);
// puts("kernelBackCnnBatchNormalization start.");
cudaDeviceSynchronize();
kernelBackCnnBatchNormalization<<<grid, block>>>(cnnLayer);
// puts("kernelBackCnnBatchNormalization end.");
// gettimeofday(&t3, NULL);
// puts("backCnnBatchNormalization end.");
}
__global__ void
kernelBackCnnBatchNormalization(const int cnnLayer){
int wba_x = threadIdx.x;
int wba_y = threadIdx.y;
int outputIdx = blockIdx.x;
int miniBatchIdx = blockIdx.y;
dCnnBnBack[getDCnnWbaIdx(wba_x, wba_y, outputIdx, miniBatchIdx, cnnLayer)]\
= getDCnnABack(wba_x, wba_y, outputIdx, miniBatchIdx, cnnLayer)\
* reluDelA_bn(wba_x, wba_y, outputIdx, miniBatchIdx, cnnLayer);
/*
if(miniBatchIdx == 1){
printf("cnnABack(%d,%d,%d,%d,%d):%f\n", wba_x,wba_y,outputIdx, miniBatchIdx, cnnLayer, getDCnnABack(wba_x,wba_y,outputIdx, miniBatchIdx, cnnLayer));
printf("cnnBn(%d,%d,%d,%d,%d):%f\n", wba_x,wba_y,outputIdx, miniBatchIdx, cnnLayer, getDCnnBn(wba_x,wba_y,outputIdx, miniBatchIdx, cnnLayer));
printf("cnnBnBack(%d,%d,%d,%d,%d):%f\n", wba_x,wba_y,outputIdx, miniBatchIdx, cnnLayer, dCnnBnBack[getDCnnWbaIdx(wba_x,wba_y,outputIdx, miniBatchIdx, cnnLayer)]);
}
*/
}
__device__ float
reluDelA_bn(const int wba_x, const int wba_y, const int outputIdx, const int miniBatchIdx, const int cnnLayer){
float val;
val = (0 < getDCnnBn(wba_x, wba_y, outputIdx, miniBatchIdx, cnnLayer));
return(val);
}
|
d4c9d35a38500867640984f499fcbf8c7af3fddd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <stdio.h>
#include <math.h>
__global__ void cuPi(float *sum, int nbin, float step)
{
// Write your pi calculation kernel here
return;
}
int main(void)
{
int REAL_PI = 3.141592653589793238462643383;
int NBINS = 10; // modify this to achieve better performance
int STEP = 5; // modify this to achieve better performance
float* pi;
hipMallocManaged(&pi, sizeof(float));
// Calculate Pi
hipLaunchKernelGGL(( cuPi), dim3(1), dim3(1), 0, 0, pi, NBINS, STEP);
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
printf("Pi calculated as %f\n", *pi);
float acc = REAL_PI - *pi;
printf("Accuracy of Pi calculation %f\n", acc);
// Free memory
hipFree(pi);
return 0;
} | d4c9d35a38500867640984f499fcbf8c7af3fddd.cu | #include <iostream>
#include <stdio.h>
#include <math.h>
__global__ void cuPi(float *sum, int nbin, float step)
{
// Write your pi calculation kernel here
return;
}
int main(void)
{
int REAL_PI = 3.141592653589793238462643383;
int NBINS = 10; // modify this to achieve better performance
int STEP = 5; // modify this to achieve better performance
float* pi;
cudaMallocManaged(&pi, sizeof(float));
// Calculate Pi
cuPi<<<1, 1>>>(pi, NBINS, STEP);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
printf("Pi calculated as %f\n", *pi);
float acc = REAL_PI - *pi;
printf("Accuracy of Pi calculation %f\n", acc);
// Free memory
cudaFree(pi);
return 0;
} |
d37f6a70526c4c19bfce35373d8398677ca1708c.hip | // !!! This is a file automatically generated by hipify!!!
#include "flamegpu/runtime/flamegpu_host_api.h"
#include "flamegpu/runtime/flamegpu_host_agent_api.h"
#include "flamegpu/gpu/CUDAAgentModel.h"
FLAMEGPU_HOST_API::FLAMEGPU_HOST_API(CUDAAgentModel &_agentModel)
: random(),
agentModel(_agentModel),
d_cub_temp(nullptr),
d_cub_temp_size(0),
d_output_space(nullptr),
d_output_space_size(0) {
}
FLAMEGPU_HOST_API::~FLAMEGPU_HOST_API() {
if (d_cub_temp) {
gpuErrchk(hipFree(d_cub_temp));
d_cub_temp_size = 0;
}
if (d_output_space_size) {
gpuErrchk(hipFree(d_output_space));
d_output_space_size = 0;
}
}
// FLAMEGPU_HOST_AGENT_API FLAMEGPU_HOST_API::agent(const std::string &agent_name) {
// return FLAMEGPU_HOST_AGENT_API(*this, agentModel.getCUDAAgent(agent_name));
// }
FLAMEGPU_HOST_AGENT_API FLAMEGPU_HOST_API::agent(const std::string &agent_name, const std::string &stateName) {
return FLAMEGPU_HOST_AGENT_API(*this, agentModel.getCUDAAgent(agent_name), stateName);
}
bool FLAMEGPU_HOST_API::tempStorageRequiresResize(const CUB_Config &cc, const unsigned int &items) {
auto lao = cub_largestAllocatedOp.find(cc);
if (lao != cub_largestAllocatedOp.end()) {
if (lao->second < items)
return false;
}
return true;
}
void FLAMEGPU_HOST_API::resizeTempStorage(const CUB_Config &cc, const unsigned int &items, const size_t &newSize) {
if (newSize > d_cub_temp_size) {
if (d_cub_temp) {
gpuErrchk(hipFree(d_cub_temp));
}
gpuErrchk(hipMalloc(&d_cub_temp, newSize));
d_cub_temp_size = newSize;
}
assert(tempStorageRequiresResize(cc, items));
cub_largestAllocatedOp[cc] = items;
}
| d37f6a70526c4c19bfce35373d8398677ca1708c.cu | #include "flamegpu/runtime/flamegpu_host_api.h"
#include "flamegpu/runtime/flamegpu_host_agent_api.h"
#include "flamegpu/gpu/CUDAAgentModel.h"
FLAMEGPU_HOST_API::FLAMEGPU_HOST_API(CUDAAgentModel &_agentModel)
: random(),
agentModel(_agentModel),
d_cub_temp(nullptr),
d_cub_temp_size(0),
d_output_space(nullptr),
d_output_space_size(0) {
}
FLAMEGPU_HOST_API::~FLAMEGPU_HOST_API() {
if (d_cub_temp) {
gpuErrchk(cudaFree(d_cub_temp));
d_cub_temp_size = 0;
}
if (d_output_space_size) {
gpuErrchk(cudaFree(d_output_space));
d_output_space_size = 0;
}
}
// FLAMEGPU_HOST_AGENT_API FLAMEGPU_HOST_API::agent(const std::string &agent_name) {
// return FLAMEGPU_HOST_AGENT_API(*this, agentModel.getCUDAAgent(agent_name));
// }
FLAMEGPU_HOST_AGENT_API FLAMEGPU_HOST_API::agent(const std::string &agent_name, const std::string &stateName) {
return FLAMEGPU_HOST_AGENT_API(*this, agentModel.getCUDAAgent(agent_name), stateName);
}
bool FLAMEGPU_HOST_API::tempStorageRequiresResize(const CUB_Config &cc, const unsigned int &items) {
auto lao = cub_largestAllocatedOp.find(cc);
if (lao != cub_largestAllocatedOp.end()) {
if (lao->second < items)
return false;
}
return true;
}
void FLAMEGPU_HOST_API::resizeTempStorage(const CUB_Config &cc, const unsigned int &items, const size_t &newSize) {
if (newSize > d_cub_temp_size) {
if (d_cub_temp) {
gpuErrchk(cudaFree(d_cub_temp));
}
gpuErrchk(cudaMalloc(&d_cub_temp, newSize));
d_cub_temp_size = newSize;
}
assert(tempStorageRequiresResize(cc, items));
cub_largestAllocatedOp[cc] = items;
}
|
c132161b60a5e847995ede64a81de2fa80328414.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/div_rtn.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/native/hip/im2col.cuh>
#include <ATen/native/im2col_shape_check.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/col2im_native.h>
#include <ATen/ops/col2im_backward_native.h>
#include <ATen/ops/empty_like.h>
#include <ATen/ops/im2col_native.h>
#endif
namespace at {
namespace native {
namespace {
void col2im_out_cuda_template(
Tensor& output,
const Tensor& input_,
IntArrayRef output_size,
IntArrayRef kernel_size,
IntArrayRef dilation,
IntArrayRef padding,
IntArrayRef stride) {
TensorArg input_arg{input_, "input", 1};
TensorArg output_arg{output, "output", 2};
checkAllSameGPU(__func__, {input_arg, output_arg});
TORCH_CHECK(
output_size.size() == 2,
"It is expected output_size equals to 2, but got size ",
output_size.size());
TORCH_CHECK(
kernel_size.size() == 2,
"It is expected kernel_size equals to 2, but got size ",
kernel_size.size());
TORCH_CHECK(
dilation.size() == 2,
"It is expected dilation equals to 2, but got size ",
dilation.size());
TORCH_CHECK(
padding.size() == 2,
"It is expected padding equals to 2, but got size ",
padding.size());
TORCH_CHECK(
stride.size() == 2,
"It is expected stride equals to 2, but got size ",
stride.size());
int64_t output_height = output_size[0];
int64_t output_width = output_size[1];
int64_t kernel_height = kernel_size[0];
int64_t kernel_width = kernel_size[1];
int64_t dilation_height = dilation[0];
int64_t dilation_width = dilation[1];
int64_t pad_height = padding[0];
int64_t pad_width = padding[1];
int64_t stride_height = stride[0];
int64_t stride_width = stride[1];
col2im_shape_check(
input_,
Tensor(),
output_height,
output_width,
kernel_height,
kernel_width,
dilation_height,
dilation_width,
pad_height,
pad_width,
stride_height,
stride_width);
Tensor input = input_.contiguous();
bool batched_input = true;
if (input.dim() == 2) {
// Force batch
batched_input = false;
input = input.view({1, input.size(0), input.size(1)});
}
int64_t batch_size = input.size(0);
int64_t n_input_plane = input.size(1);
int64_t n_output_plane = n_input_plane / (kernel_width * kernel_height);
int64_t input_batch_stride = input.stride(0);
output.resize_({batch_size, n_output_plane, output_height, output_width});
output.zero_();
int64_t output_batch_stride = output.stride(0);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf,
input.scalar_type(), "col2im_out_cuda", [&] {
int64_t height_col = (output_height + 2 * pad_height -
(dilation_height * (kernel_height - 1) + 1)) /
stride_height +
1;
int64_t width_col = (output_width + 2 * pad_width -
(dilation_width * (kernel_width - 1) + 1)) /
stride_width +
1;
col2im_batched(
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
input.data_ptr<scalar_t>(),
input_batch_stride,
batch_size,
n_output_plane,
output_height,
output_width,
height_col,
width_col,
kernel_height,
kernel_width,
pad_height,
pad_width,
stride_height,
stride_width,
dilation_height,
dilation_width,
output.data_ptr<scalar_t>(),
output_batch_stride);
if (!batched_input) {
output.resize_({n_output_plane, output_height, output_width});
}
});
}
void col2im_backward_out_cuda_template(
Tensor& grad_input,
const Tensor& grad_output,
IntArrayRef kernel_size,
IntArrayRef dilation,
IntArrayRef padding,
IntArrayRef stride) {
// im2col_out_cuda checks size of kernel_size, dilation, padding and stride
at::native::im2col_out_cuda(
grad_output, kernel_size, dilation, padding, stride, grad_input);
}
} // namespace
Tensor& col2im_out_cuda(const Tensor& input,
IntArrayRef output_size,
IntArrayRef kernel_size,
IntArrayRef dilation,
IntArrayRef padding,
IntArrayRef stride,
Tensor& output) {
col2im_out_cuda_template(
output, input, output_size, kernel_size, dilation, padding, stride);
return output;
}
Tensor col2im_cuda(
const Tensor& input,
IntArrayRef output_size,
IntArrayRef kernel_size,
IntArrayRef dilation,
IntArrayRef padding,
IntArrayRef stride) {
Tensor output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
col2im_out_cuda_template(
output, input, output_size, kernel_size, dilation, padding, stride);
return output;
}
Tensor& col2im_backward_out_cuda(const Tensor& grad_output,
IntArrayRef kernel_size,
IntArrayRef dilation,
IntArrayRef padding,
IntArrayRef stride,
Tensor& grad_input) {
col2im_backward_out_cuda_template(
grad_input, grad_output, kernel_size, dilation, padding, stride);
return grad_input;
}
Tensor col2im_backward_cuda(
const Tensor& grad_output,
IntArrayRef kernel_size,
IntArrayRef dilation,
IntArrayRef padding,
IntArrayRef stride) {
Tensor grad_input = at::empty_like(grad_output, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
col2im_backward_out_cuda_template(
grad_input, grad_output, kernel_size, dilation, padding, stride);
return grad_input;
}
} // namespace native
} // namespace at
| c132161b60a5e847995ede64a81de2fa80328414.cu | #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/div_rtn.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/native/cuda/im2col.cuh>
#include <ATen/native/im2col_shape_check.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/col2im_native.h>
#include <ATen/ops/col2im_backward_native.h>
#include <ATen/ops/empty_like.h>
#include <ATen/ops/im2col_native.h>
#endif
namespace at {
namespace native {
namespace {
void col2im_out_cuda_template(
Tensor& output,
const Tensor& input_,
IntArrayRef output_size,
IntArrayRef kernel_size,
IntArrayRef dilation,
IntArrayRef padding,
IntArrayRef stride) {
TensorArg input_arg{input_, "input", 1};
TensorArg output_arg{output, "output", 2};
checkAllSameGPU(__func__, {input_arg, output_arg});
TORCH_CHECK(
output_size.size() == 2,
"It is expected output_size equals to 2, but got size ",
output_size.size());
TORCH_CHECK(
kernel_size.size() == 2,
"It is expected kernel_size equals to 2, but got size ",
kernel_size.size());
TORCH_CHECK(
dilation.size() == 2,
"It is expected dilation equals to 2, but got size ",
dilation.size());
TORCH_CHECK(
padding.size() == 2,
"It is expected padding equals to 2, but got size ",
padding.size());
TORCH_CHECK(
stride.size() == 2,
"It is expected stride equals to 2, but got size ",
stride.size());
int64_t output_height = output_size[0];
int64_t output_width = output_size[1];
int64_t kernel_height = kernel_size[0];
int64_t kernel_width = kernel_size[1];
int64_t dilation_height = dilation[0];
int64_t dilation_width = dilation[1];
int64_t pad_height = padding[0];
int64_t pad_width = padding[1];
int64_t stride_height = stride[0];
int64_t stride_width = stride[1];
col2im_shape_check(
input_,
Tensor(),
output_height,
output_width,
kernel_height,
kernel_width,
dilation_height,
dilation_width,
pad_height,
pad_width,
stride_height,
stride_width);
Tensor input = input_.contiguous();
bool batched_input = true;
if (input.dim() == 2) {
// Force batch
batched_input = false;
input = input.view({1, input.size(0), input.size(1)});
}
int64_t batch_size = input.size(0);
int64_t n_input_plane = input.size(1);
int64_t n_output_plane = n_input_plane / (kernel_width * kernel_height);
int64_t input_batch_stride = input.stride(0);
output.resize_({batch_size, n_output_plane, output_height, output_width});
output.zero_();
int64_t output_batch_stride = output.stride(0);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf,
input.scalar_type(), "col2im_out_cuda", [&] {
int64_t height_col = (output_height + 2 * pad_height -
(dilation_height * (kernel_height - 1) + 1)) /
stride_height +
1;
int64_t width_col = (output_width + 2 * pad_width -
(dilation_width * (kernel_width - 1) + 1)) /
stride_width +
1;
col2im_batched(
at::cuda::getCurrentCUDAStream(),
input.data_ptr<scalar_t>(),
input_batch_stride,
batch_size,
n_output_plane,
output_height,
output_width,
height_col,
width_col,
kernel_height,
kernel_width,
pad_height,
pad_width,
stride_height,
stride_width,
dilation_height,
dilation_width,
output.data_ptr<scalar_t>(),
output_batch_stride);
if (!batched_input) {
output.resize_({n_output_plane, output_height, output_width});
}
});
}
void col2im_backward_out_cuda_template(
Tensor& grad_input,
const Tensor& grad_output,
IntArrayRef kernel_size,
IntArrayRef dilation,
IntArrayRef padding,
IntArrayRef stride) {
// im2col_out_cuda checks size of kernel_size, dilation, padding and stride
at::native::im2col_out_cuda(
grad_output, kernel_size, dilation, padding, stride, grad_input);
}
} // namespace
Tensor& col2im_out_cuda(const Tensor& input,
IntArrayRef output_size,
IntArrayRef kernel_size,
IntArrayRef dilation,
IntArrayRef padding,
IntArrayRef stride,
Tensor& output) {
col2im_out_cuda_template(
output, input, output_size, kernel_size, dilation, padding, stride);
return output;
}
Tensor col2im_cuda(
const Tensor& input,
IntArrayRef output_size,
IntArrayRef kernel_size,
IntArrayRef dilation,
IntArrayRef padding,
IntArrayRef stride) {
Tensor output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
col2im_out_cuda_template(
output, input, output_size, kernel_size, dilation, padding, stride);
return output;
}
Tensor& col2im_backward_out_cuda(const Tensor& grad_output,
IntArrayRef kernel_size,
IntArrayRef dilation,
IntArrayRef padding,
IntArrayRef stride,
Tensor& grad_input) {
col2im_backward_out_cuda_template(
grad_input, grad_output, kernel_size, dilation, padding, stride);
return grad_input;
}
Tensor col2im_backward_cuda(
const Tensor& grad_output,
IntArrayRef kernel_size,
IntArrayRef dilation,
IntArrayRef padding,
IntArrayRef stride) {
Tensor grad_input = at::empty_like(grad_output, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
col2im_backward_out_cuda_template(
grad_input, grad_output, kernel_size, dilation, padding, stride);
return grad_input;
}
} // namespace native
} // namespace at
|
367afdba77372068c0760910c048761a8a033823.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2013-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "config.h"
#include <optix.h>
#include "system_data.h"
#include "per_ray_data.h"
#include "shader_common.h"
#include "random_number_generators.h"
extern "C" __constant__ SystemData sysData;
__forceinline__ __device__ float3 integrator(PerRayData& prd)
{
// This renderer supports nested volumes. Four levels is plenty enough for most cases.
// The absorption coefficient and IOR of the volume the ray is currently inside.
float4 absorptionStack[MATERIAL_STACK_SIZE]; // .xyz == absorptionCoefficient (sigma_a), .w == index of refraction
int stackIdx = MATERIAL_STACK_EMPTY; // Start with empty nested materials stack.
// Russian Roulette path termination after a specified number of bounces needs the current depth.
int depth = 0; // Path segment index. Primary ray is 0.
float3 radiance = make_float3(0.0f); // Start with black.
float3 throughput = make_float3(1.0f); // The throughput for the next radiance, starts with 1.0f.
// Assumes that the primary ray starts in vacuum.
prd.absorption_ior = make_float4(0.0f, 0.0f, 0.0f, 1.0f); // No absorption, IOR == 1.0f,
prd.sigma_t = make_float3(0.0f); // No extinction.
prd.flags = 0;
while (depth < sysData.pathLengths.y)
{
prd.wo = -prd.wi; // Direction to observer.
prd.ior = make_float2(1.0f); // Reset the volume IORs.
prd.distance = RT_DEFAULT_MAX; // Shoot the next ray with maximum length.
prd.flags &= FLAG_CLEAR_MASK; // Clear all non-persistent flags. In this demo only the last diffuse surface interaction stays.
// Special case for volume handling.
if (MATERIAL_STACK_FIRST <= stackIdx) // Inside a volume?
{
prd.flags |= FLAG_VOLUME; // Indicate that we're inside a volume. => At least absorption calculation needs to happen.
prd.sigma_t = make_float3(absorptionStack[stackIdx]); // There is only volume absorption in this demo, no volume scattering.
prd.ior.x = absorptionStack[stackIdx].w; // The IOR of the volume we're inside. Needed for eta calculations in transparent materials.
if (MATERIAL_STACK_FIRST <= stackIdx - 1)
{
prd.ior.y = absorptionStack[stackIdx - 1].w; // The IOR of the surrounding volume.
}
}
// Put payload pointer into two unsigned integers. Actually const, but that's not what optixTrace() expects.
uint2 payload = splitPointer(&prd);
// Note that the primary rays (or volume scattering miss cases) wouldn't normally offset the ray t_min by sysSceneEpsilon. Keep it simple here.
optixTrace(sysData.topObject,
prd.pos, prd.wi, // origin, direction
sysData.sceneEpsilon, prd.distance, 0.0f, // tmin, tmax, time
OptixVisibilityMask(0xFF), OPTIX_RAY_FLAG_NONE,
RAYTYPE_RADIANCE, NUM_RAYTYPES, RAYTYPE_RADIANCE,
payload.x, payload.y);
// This renderer supports nested volumes.
if (prd.flags & FLAG_VOLUME) // We're inside a volume?
{
// We're inside a volume. Calculate the extinction along the current path segment in any case.
// The transmittance along the current path segment inside a volume needs to attenuate the ray throughput with the extinction
// before it modulates the radiance of the hitpoint.
throughput *= expf(-prd.distance * prd.sigma_t);
}
radiance += throughput * prd.radiance;
// Path termination by miss shader or sample() routines.
// If terminate is true, f_over_pdf and pdf might be undefined.
if ((prd.flags & FLAG_TERMINATE) || prd.pdf <= 0.0f || isNull(prd.f_over_pdf))
{
break;
}
// PERF f_over_pdf already contains the proper throughput adjustment for diffuse materials: f * (fabsf(dot(prd.wi, state.normal)) / prd.pdf);
throughput *= prd.f_over_pdf;
// Unbiased Russian Roulette path termination.
if (sysData.pathLengths.x <= depth) // Start termination after a minimum number of bounces.
{
const float probability = fmaxf(throughput); // DEBUG Other options: // intensity(throughput); // fminf(0.5f, intensity(throughput));
if (probability < rng(prd.seed)) // Paths with lower probability to continue are terminated earlier.
{
break;
}
throughput /= probability; // Path isn't terminated. Adjust the throughput so that the average is right again.
}
// Adjust the material volume stack if the geometry is not thin-walled but a border between two volumes and
// the outgoing ray direction was a transmission.
if ((prd.flags & (FLAG_THINWALLED | FLAG_TRANSMISSION)) == FLAG_TRANSMISSION)
{
// Transmission.
if (prd.flags & FLAG_FRONTFACE) // Entered a new volume?
{
// Push the entered material's volume properties onto the volume stack.
//rtAssert((stackIdx < MATERIAL_STACK_LAST), 1); // Overflow?
stackIdx = min(stackIdx + 1, MATERIAL_STACK_LAST);
absorptionStack[stackIdx] = prd.absorption_ior;
}
else // Exited the current volume?
{
// Pop the top of stack material volume.
// This assert fires and is intended because I tuned the frontface checks so that there are more exits than enters at silhouettes.
//rtAssert((MATERIAL_STACK_EMPTY < stackIdx), 0); // Underflow?
stackIdx = max(stackIdx - 1, MATERIAL_STACK_EMPTY);
}
}
++depth; // Next path segment.
}
return radiance;
}
__forceinline__ __device__ unsigned int distribute(const uint2 launchIndex)
{
// First calculate block coordinates of this launch index.
// That is the launch index divided by the tile dimensions. (No operator>>() on vectors?)
const unsigned int xBlock = launchIndex.x >> sysData.tileShift.x;
const unsigned int yBlock = launchIndex.y >> sysData.tileShift.y;
// Each device needs to start at a different column and each row should start with a different device.
const unsigned int xTile = xBlock * sysData.deviceCount + ((sysData.deviceIndex + yBlock) % sysData.deviceCount);
// The horizontal pixel coordinate is: tile coordinate * tile width + launch index % tile width.
return xTile * sysData.tileSize.x + (launchIndex.x & (sysData.tileSize.x - 1)); // tileSize needs to be power-of-two for this modulo operation.
}
extern "C" __global__ void __raygen__path_tracer()
{
#if USE_TIME_VIEW
clock_t clockBegin = clock();
#endif
const uint2 theLaunchIndex = make_uint2(optixGetLaunchIndex());
unsigned int launchColumn = theLaunchIndex.x;
if (sysData.distribution && 1 < sysData.deviceCount) // Multi-GPU distribution required?
{
launchColumn = distribute(theLaunchIndex); // Calculate mapping from launch index to pixel index.
if (sysData.resolution.x <= launchColumn) // Check if the launchColumn is outside the resolution.
{
return;
}
}
PerRayData prd;
const uint2 theLaunchDim = make_uint2(optixGetLaunchDimensions()); // For multi-GPU tiling this is (resolution + deviceCount - 1) / deviceCount.
// Initialize the random number generator seed from the linear pixel index and the iteration index.
const unsigned int seedIndex = theLaunchDim.x * theLaunchIndex.y + launchColumn * sysData.deviceCount + sysData.deviceIndex;
prd.seed = tea<4>(seedIndex, sysData.iterationIndex); // PERF This template really generates a lot of instructions.
// Decoupling the pixel coordinates from the screen size will allow for partial rendering algorithms.
// Resolution is the actual full rendering resolution and for the single GPU strategy, theLaunchDim == resolution.
const float2 screen = make_float2(sysData.resolution); // == theLaunchDim for rendering strategy RS_SINGLE_GPU.
const float2 pixel = make_float2(launchColumn, theLaunchIndex.y);
const float2 sample = rng2(prd.seed); // Random per pixel jitter.
// Lens shaders
optixDirectCall<void, const float2, const float2, const float2, float3&, float3&>(sysData.lensShader, screen, pixel, sample, prd.pos, prd.wi);
float3 radiance = integrator(prd);
#if USE_DEBUG_EXCEPTIONS
// DEBUG Highlight numerical errors.
if (isnan(radiance.x) || isnan(radiance.y) || isnan(radiance.z))
{
radiance = make_float3(1000000.0f, 0.0f, 0.0f); // super red
}
else if (isinf(radiance.x) || isinf(radiance.y) || isinf(radiance.z))
{
radiance = make_float3(0.0f, 1000000.0f, 0.0f); // super green
}
else if (radiance.x < 0.0f || radiance.y < 0.0f || radiance.z < 0.0f)
{
radiance = make_float3(0.0f, 0.0f, 1000000.0f); // super blue
}
#else
// NaN values will never go away. Filter them out before they can arrive in the output buffer.
// This only has an effect if the debug coloring above is off!
if (!(isnan(radiance.x) || isnan(radiance.y) || isnan(radiance.z)))
#endif
{
// The outputBuffer is a hipDeviceptr_t to allow different formats.
// DAR FIXME Implement half4 support.
float4* buffer = reinterpret_cast<float4*>(sysData.outputBuffer);
// Note that the launch dimension is independent of resolution in some rendering strategies.
const unsigned int index = theLaunchIndex.y * sysData.resolution.x + launchColumn;
#if USE_TIME_VIEW
clock_t clockEnd = clock();
const float alpha = (clockEnd - clockBegin) * sysData.clockScale;
float4 result = make_float4(radiance, alpha);
if (0 < sysData.iterationIndex)
{
const float4 dst = buffer[index]; // RGBA32F
result = lerp(dst, result, 1.0f / float(sysData.iterationIndex + 1)); // Accumulate the alpha as well.
}
// iterationIndex 0 will fill the buffer.
// If this isn't done separately, the result of the lerp() above is undefined, e.g. dst could be NaN.
buffer[index] = result;
#else
if (0 < sysData.iterationIndex)
{
const float4 dst = buffer[index]; // RGBA32F
radiance = lerp(make_float3(dst), radiance, 1.0f / float(sysData.iterationIndex + 1)); // Only accumulate the radiance, alpha stays 1.0f.
}
// iterationIndex 0 will fill the buffer.
// If this isn't done separately, the result of the lerp() above is undefined, e.g. dst could be NaN.
buffer[index] = make_float4(radiance, 1.0f);
#endif
}
}
extern "C" __global__ void __raygen__path_tracer_local_copy()
{
#if USE_TIME_VIEW
clock_t clockBegin = clock();
#endif
const uint2 theLaunchIndex = make_uint2(optixGetLaunchIndex());
unsigned int launchColumn = theLaunchIndex.x;
if (sysData.distribution && 1 < sysData.deviceCount) // Multi-GPU distribution required?
{
launchColumn = distribute(theLaunchIndex); // Calculate mapping from launch index to pixel index.
if (sysData.resolution.x <= launchColumn) // Check if the launchColumn is outside the resolution.
{
return;
}
}
PerRayData prd;
const uint2 theLaunchDim = make_uint2(optixGetLaunchDimensions()); // For multi-GPU tiling this is (resolution + deviceCount - 1) / deviceCount.
// Initialize the random number generator seed from some unique pixel index and the iteration index.
const unsigned int seedIndex = theLaunchDim.x * theLaunchIndex.y + launchColumn * sysData.deviceCount + sysData.deviceIndex;
prd.seed = tea<4>(seedIndex, sysData.iterationIndex); // PERF This template really generates a lot of instructions.
// Decoupling the pixel coordinates from the screen size will allow for partial rendering algorithms.
// Resolution is the actual full rendering resolution and for the single GPU strategy, theLaunchDim == resolution.
const float2 screen = make_float2(sysData.resolution); // == theLaunchDim for rendering strategy RS_SINGLE_GPU.
const float2 pixel = make_float2(launchColumn, theLaunchIndex.y);
const float2 sample = rng2(prd.seed); // Random per pixel jitter.
// Lens shaders
optixDirectCall<void, const float2, const float2, const float2, float3&, float3&>(sysData.lensShader, screen, pixel, sample, prd.pos, prd.wi);
float3 radiance = integrator(prd);
#if USE_DEBUG_EXCEPTIONS
// DEBUG Highlight numerical errors.
if (isnan(radiance.x) || isnan(radiance.y) || isnan(radiance.z))
{
radiance = make_float3(1000000.0f, 0.0f, 0.0f); // super red
}
else if (isinf(radiance.x) || isinf(radiance.y) || isinf(radiance.z))
{
radiance = make_float3(0.0f, 1000000.0f, 0.0f); // super green
}
else if (radiance.x < 0.0f || radiance.y < 0.0f || radiance.z < 0.0f)
{
radiance = make_float3(0.0f, 0.0f, 1000000.0f); // super blue
}
#else
// NaN values will never go away. Filter them out before they can arrive in the output buffer.
// This only has an effect if the debug coloring above is off!
if (!(isnan(radiance.x) || isnan(radiance.y) || isnan(radiance.z)))
#endif
{
// The texelBuffer is a hipDeviceptr_t to allow different formats.
float4* buffer = reinterpret_cast<float4*>(sysData.texelBuffer); // This is a per device launch sized buffer in this renderer strategy.
// This renderer write the results into individual launch sized local buffers and composites them in a separate native CUDA kernel.
const unsigned int index = theLaunchIndex.y * theLaunchDim.x + theLaunchIndex.x;
#if USE_TIME_VIEW
clock_t clockEnd = clock();
const float alpha = (clockEnd - clockBegin) * sysData.clockScale;
float4 result = make_float4(radiance, alpha);
if (0 < sysData.iterationIndex)
{
const float4 dst = buffer[index]; // RGBA32F
result = lerp(dst, result, 1.0f / float(sysData.iterationIndex + 1)); // Accumulate the alpha as well.
}
buffer[index] = result;
#else
if (0 < sysData.iterationIndex)
{
const float4 dst = buffer[index]; // RGBA32F
radiance = lerp(make_float3(dst), radiance, 1.0f / float(sysData.iterationIndex + 1)); // Only accumulate the radiance, alpha stays 1.0f.
}
buffer[index] = make_float4(radiance, 1.0f);
#endif
}
}
| 367afdba77372068c0760910c048761a8a033823.cu | /*
* Copyright (c) 2013-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "config.h"
#include <optix.h>
#include "system_data.h"
#include "per_ray_data.h"
#include "shader_common.h"
#include "random_number_generators.h"
extern "C" __constant__ SystemData sysData;
__forceinline__ __device__ float3 integrator(PerRayData& prd)
{
// This renderer supports nested volumes. Four levels is plenty enough for most cases.
// The absorption coefficient and IOR of the volume the ray is currently inside.
float4 absorptionStack[MATERIAL_STACK_SIZE]; // .xyz == absorptionCoefficient (sigma_a), .w == index of refraction
int stackIdx = MATERIAL_STACK_EMPTY; // Start with empty nested materials stack.
// Russian Roulette path termination after a specified number of bounces needs the current depth.
int depth = 0; // Path segment index. Primary ray is 0.
float3 radiance = make_float3(0.0f); // Start with black.
float3 throughput = make_float3(1.0f); // The throughput for the next radiance, starts with 1.0f.
// Assumes that the primary ray starts in vacuum.
prd.absorption_ior = make_float4(0.0f, 0.0f, 0.0f, 1.0f); // No absorption, IOR == 1.0f,
prd.sigma_t = make_float3(0.0f); // No extinction.
prd.flags = 0;
while (depth < sysData.pathLengths.y)
{
prd.wo = -prd.wi; // Direction to observer.
prd.ior = make_float2(1.0f); // Reset the volume IORs.
prd.distance = RT_DEFAULT_MAX; // Shoot the next ray with maximum length.
prd.flags &= FLAG_CLEAR_MASK; // Clear all non-persistent flags. In this demo only the last diffuse surface interaction stays.
// Special case for volume handling.
if (MATERIAL_STACK_FIRST <= stackIdx) // Inside a volume?
{
prd.flags |= FLAG_VOLUME; // Indicate that we're inside a volume. => At least absorption calculation needs to happen.
prd.sigma_t = make_float3(absorptionStack[stackIdx]); // There is only volume absorption in this demo, no volume scattering.
prd.ior.x = absorptionStack[stackIdx].w; // The IOR of the volume we're inside. Needed for eta calculations in transparent materials.
if (MATERIAL_STACK_FIRST <= stackIdx - 1)
{
prd.ior.y = absorptionStack[stackIdx - 1].w; // The IOR of the surrounding volume.
}
}
// Put payload pointer into two unsigned integers. Actually const, but that's not what optixTrace() expects.
uint2 payload = splitPointer(&prd);
// Note that the primary rays (or volume scattering miss cases) wouldn't normally offset the ray t_min by sysSceneEpsilon. Keep it simple here.
optixTrace(sysData.topObject,
prd.pos, prd.wi, // origin, direction
sysData.sceneEpsilon, prd.distance, 0.0f, // tmin, tmax, time
OptixVisibilityMask(0xFF), OPTIX_RAY_FLAG_NONE,
RAYTYPE_RADIANCE, NUM_RAYTYPES, RAYTYPE_RADIANCE,
payload.x, payload.y);
// This renderer supports nested volumes.
if (prd.flags & FLAG_VOLUME) // We're inside a volume?
{
// We're inside a volume. Calculate the extinction along the current path segment in any case.
// The transmittance along the current path segment inside a volume needs to attenuate the ray throughput with the extinction
// before it modulates the radiance of the hitpoint.
throughput *= expf(-prd.distance * prd.sigma_t);
}
radiance += throughput * prd.radiance;
// Path termination by miss shader or sample() routines.
// If terminate is true, f_over_pdf and pdf might be undefined.
if ((prd.flags & FLAG_TERMINATE) || prd.pdf <= 0.0f || isNull(prd.f_over_pdf))
{
break;
}
// PERF f_over_pdf already contains the proper throughput adjustment for diffuse materials: f * (fabsf(dot(prd.wi, state.normal)) / prd.pdf);
throughput *= prd.f_over_pdf;
// Unbiased Russian Roulette path termination.
if (sysData.pathLengths.x <= depth) // Start termination after a minimum number of bounces.
{
const float probability = fmaxf(throughput); // DEBUG Other options: // intensity(throughput); // fminf(0.5f, intensity(throughput));
if (probability < rng(prd.seed)) // Paths with lower probability to continue are terminated earlier.
{
break;
}
throughput /= probability; // Path isn't terminated. Adjust the throughput so that the average is right again.
}
// Adjust the material volume stack if the geometry is not thin-walled but a border between two volumes and
// the outgoing ray direction was a transmission.
if ((prd.flags & (FLAG_THINWALLED | FLAG_TRANSMISSION)) == FLAG_TRANSMISSION)
{
// Transmission.
if (prd.flags & FLAG_FRONTFACE) // Entered a new volume?
{
// Push the entered material's volume properties onto the volume stack.
//rtAssert((stackIdx < MATERIAL_STACK_LAST), 1); // Overflow?
stackIdx = min(stackIdx + 1, MATERIAL_STACK_LAST);
absorptionStack[stackIdx] = prd.absorption_ior;
}
else // Exited the current volume?
{
// Pop the top of stack material volume.
// This assert fires and is intended because I tuned the frontface checks so that there are more exits than enters at silhouettes.
//rtAssert((MATERIAL_STACK_EMPTY < stackIdx), 0); // Underflow?
stackIdx = max(stackIdx - 1, MATERIAL_STACK_EMPTY);
}
}
++depth; // Next path segment.
}
return radiance;
}
__forceinline__ __device__ unsigned int distribute(const uint2 launchIndex)
{
// First calculate block coordinates of this launch index.
// That is the launch index divided by the tile dimensions. (No operator>>() on vectors?)
const unsigned int xBlock = launchIndex.x >> sysData.tileShift.x;
const unsigned int yBlock = launchIndex.y >> sysData.tileShift.y;
// Each device needs to start at a different column and each row should start with a different device.
const unsigned int xTile = xBlock * sysData.deviceCount + ((sysData.deviceIndex + yBlock) % sysData.deviceCount);
// The horizontal pixel coordinate is: tile coordinate * tile width + launch index % tile width.
return xTile * sysData.tileSize.x + (launchIndex.x & (sysData.tileSize.x - 1)); // tileSize needs to be power-of-two for this modulo operation.
}
extern "C" __global__ void __raygen__path_tracer()
{
#if USE_TIME_VIEW
clock_t clockBegin = clock();
#endif
const uint2 theLaunchIndex = make_uint2(optixGetLaunchIndex());
unsigned int launchColumn = theLaunchIndex.x;
if (sysData.distribution && 1 < sysData.deviceCount) // Multi-GPU distribution required?
{
launchColumn = distribute(theLaunchIndex); // Calculate mapping from launch index to pixel index.
if (sysData.resolution.x <= launchColumn) // Check if the launchColumn is outside the resolution.
{
return;
}
}
PerRayData prd;
const uint2 theLaunchDim = make_uint2(optixGetLaunchDimensions()); // For multi-GPU tiling this is (resolution + deviceCount - 1) / deviceCount.
// Initialize the random number generator seed from the linear pixel index and the iteration index.
const unsigned int seedIndex = theLaunchDim.x * theLaunchIndex.y + launchColumn * sysData.deviceCount + sysData.deviceIndex;
prd.seed = tea<4>(seedIndex, sysData.iterationIndex); // PERF This template really generates a lot of instructions.
// Decoupling the pixel coordinates from the screen size will allow for partial rendering algorithms.
// Resolution is the actual full rendering resolution and for the single GPU strategy, theLaunchDim == resolution.
const float2 screen = make_float2(sysData.resolution); // == theLaunchDim for rendering strategy RS_SINGLE_GPU.
const float2 pixel = make_float2(launchColumn, theLaunchIndex.y);
const float2 sample = rng2(prd.seed); // Random per pixel jitter.
// Lens shaders
optixDirectCall<void, const float2, const float2, const float2, float3&, float3&>(sysData.lensShader, screen, pixel, sample, prd.pos, prd.wi);
float3 radiance = integrator(prd);
#if USE_DEBUG_EXCEPTIONS
// DEBUG Highlight numerical errors.
if (isnan(radiance.x) || isnan(radiance.y) || isnan(radiance.z))
{
radiance = make_float3(1000000.0f, 0.0f, 0.0f); // super red
}
else if (isinf(radiance.x) || isinf(radiance.y) || isinf(radiance.z))
{
radiance = make_float3(0.0f, 1000000.0f, 0.0f); // super green
}
else if (radiance.x < 0.0f || radiance.y < 0.0f || radiance.z < 0.0f)
{
radiance = make_float3(0.0f, 0.0f, 1000000.0f); // super blue
}
#else
// NaN values will never go away. Filter them out before they can arrive in the output buffer.
// This only has an effect if the debug coloring above is off!
if (!(isnan(radiance.x) || isnan(radiance.y) || isnan(radiance.z)))
#endif
{
// The outputBuffer is a CUdeviceptr to allow different formats.
// DAR FIXME Implement half4 support.
float4* buffer = reinterpret_cast<float4*>(sysData.outputBuffer);
// Note that the launch dimension is independent of resolution in some rendering strategies.
const unsigned int index = theLaunchIndex.y * sysData.resolution.x + launchColumn;
#if USE_TIME_VIEW
clock_t clockEnd = clock();
const float alpha = (clockEnd - clockBegin) * sysData.clockScale;
float4 result = make_float4(radiance, alpha);
if (0 < sysData.iterationIndex)
{
const float4 dst = buffer[index]; // RGBA32F
result = lerp(dst, result, 1.0f / float(sysData.iterationIndex + 1)); // Accumulate the alpha as well.
}
// iterationIndex 0 will fill the buffer.
// If this isn't done separately, the result of the lerp() above is undefined, e.g. dst could be NaN.
buffer[index] = result;
#else
if (0 < sysData.iterationIndex)
{
const float4 dst = buffer[index]; // RGBA32F
radiance = lerp(make_float3(dst), radiance, 1.0f / float(sysData.iterationIndex + 1)); // Only accumulate the radiance, alpha stays 1.0f.
}
// iterationIndex 0 will fill the buffer.
// If this isn't done separately, the result of the lerp() above is undefined, e.g. dst could be NaN.
buffer[index] = make_float4(radiance, 1.0f);
#endif
}
}
extern "C" __global__ void __raygen__path_tracer_local_copy()
{
#if USE_TIME_VIEW
clock_t clockBegin = clock();
#endif
const uint2 theLaunchIndex = make_uint2(optixGetLaunchIndex());
unsigned int launchColumn = theLaunchIndex.x;
if (sysData.distribution && 1 < sysData.deviceCount) // Multi-GPU distribution required?
{
launchColumn = distribute(theLaunchIndex); // Calculate mapping from launch index to pixel index.
if (sysData.resolution.x <= launchColumn) // Check if the launchColumn is outside the resolution.
{
return;
}
}
PerRayData prd;
const uint2 theLaunchDim = make_uint2(optixGetLaunchDimensions()); // For multi-GPU tiling this is (resolution + deviceCount - 1) / deviceCount.
// Initialize the random number generator seed from some unique pixel index and the iteration index.
const unsigned int seedIndex = theLaunchDim.x * theLaunchIndex.y + launchColumn * sysData.deviceCount + sysData.deviceIndex;
prd.seed = tea<4>(seedIndex, sysData.iterationIndex); // PERF This template really generates a lot of instructions.
// Decoupling the pixel coordinates from the screen size will allow for partial rendering algorithms.
// Resolution is the actual full rendering resolution and for the single GPU strategy, theLaunchDim == resolution.
const float2 screen = make_float2(sysData.resolution); // == theLaunchDim for rendering strategy RS_SINGLE_GPU.
const float2 pixel = make_float2(launchColumn, theLaunchIndex.y);
const float2 sample = rng2(prd.seed); // Random per pixel jitter.
// Lens shaders
optixDirectCall<void, const float2, const float2, const float2, float3&, float3&>(sysData.lensShader, screen, pixel, sample, prd.pos, prd.wi);
float3 radiance = integrator(prd);
#if USE_DEBUG_EXCEPTIONS
// DEBUG Highlight numerical errors.
if (isnan(radiance.x) || isnan(radiance.y) || isnan(radiance.z))
{
radiance = make_float3(1000000.0f, 0.0f, 0.0f); // super red
}
else if (isinf(radiance.x) || isinf(radiance.y) || isinf(radiance.z))
{
radiance = make_float3(0.0f, 1000000.0f, 0.0f); // super green
}
else if (radiance.x < 0.0f || radiance.y < 0.0f || radiance.z < 0.0f)
{
radiance = make_float3(0.0f, 0.0f, 1000000.0f); // super blue
}
#else
// NaN values will never go away. Filter them out before they can arrive in the output buffer.
// This only has an effect if the debug coloring above is off!
if (!(isnan(radiance.x) || isnan(radiance.y) || isnan(radiance.z)))
#endif
{
// The texelBuffer is a CUdeviceptr to allow different formats.
float4* buffer = reinterpret_cast<float4*>(sysData.texelBuffer); // This is a per device launch sized buffer in this renderer strategy.
// This renderer write the results into individual launch sized local buffers and composites them in a separate native CUDA kernel.
const unsigned int index = theLaunchIndex.y * theLaunchDim.x + theLaunchIndex.x;
#if USE_TIME_VIEW
clock_t clockEnd = clock();
const float alpha = (clockEnd - clockBegin) * sysData.clockScale;
float4 result = make_float4(radiance, alpha);
if (0 < sysData.iterationIndex)
{
const float4 dst = buffer[index]; // RGBA32F
result = lerp(dst, result, 1.0f / float(sysData.iterationIndex + 1)); // Accumulate the alpha as well.
}
buffer[index] = result;
#else
if (0 < sysData.iterationIndex)
{
const float4 dst = buffer[index]; // RGBA32F
radiance = lerp(make_float3(dst), radiance, 1.0f / float(sysData.iterationIndex + 1)); // Only accumulate the radiance, alpha stays 1.0f.
}
buffer[index] = make_float4(radiance, 1.0f);
#endif
}
}
|
83019df81b5193341a1d6b02edc6dde13ef90b77.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cassert>
#include <cstring>
#include <vector>
#include "paddle/fluid/inference/tensorrt/plugin/gelu_op_plugin.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
// constants for approximating the normal cdf
static const float kA = 1.41421356237309504; // sqrt(2)
static const float kAT = 0.5;
static const float kBT = 0.7978845608028654; // sqrt(2.0/M_PI)
static const float kCT = 0.035677408136300125; // 0.044715 * sqrt(2.0/M_PI)
bool GeluPlugin::supportsFormat(
nvinfer1::DataType type, nvinfer1::PluginFormat format) const TRT_NOEXCEPT {
if (with_fp16_) {
return ((type == nvinfer1::DataType::kFLOAT ||
type == nvinfer1::DataType::kHALF) &&
(format == nvinfer1::PluginFormat::kLINEAR));
} else {
return ((type == nvinfer1::DataType::kFLOAT) &&
(format == nvinfer1::PluginFormat::kLINEAR));
}
}
nvinfer1::Dims GeluPlugin::getOutputDimensions(int index,
const nvinfer1::Dims* in_dims,
int nb_inputs) TRT_NOEXCEPT {
assert(nb_inputs == 1);
assert(index < this->getNbOutputs());
nvinfer1::Dims const& input_dims = in_dims[0];
nvinfer1::Dims output_dims = input_dims;
return output_dims;
}
template <typename T, unsigned TPB>
__global__ void gelu_kernel(const T a, int n, const T* input, T* output) {
const int idx = blockIdx.x * TPB + threadIdx.x;
if (idx < n) {
const T in = input[idx];
const T cdf = 0.5f * (1.0f + erff(in * 0.5f * a));
output[idx] = in * cdf;
}
}
template <typename T>
__device__ T do_tanh(T a);
template <>
__device__ float do_tanh<float>(float a) {
return tanf(a);
}
template <>
__device__ half do_tanh<half>(half a) {
const float tmp = tanhf(__half2float(a));
return __float2half(tmp);
}
// the kernel below is not aligned with fluid fp32 forwrad ones, use it for
// fp16.
template <typename T, unsigned TPB>
__global__ void no_exact_gelu_kernel(
const T a, const T b, const T c, int n, const T* input, T* output) {
#if CUDA_ARCH_FP16_SUPPORTED(__CUDA_ARCH__)
const int idx = blockIdx.x * TPB + threadIdx.x;
if (idx < n) {
const T in = input[idx];
const T tmp = in * (c * in * in + b);
const T cdf = a + a * do_tanh<T>(tmp);
output[idx] = in * cdf;
}
#endif
}
int GeluPlugin::enqueue(int batch_size,
const void* const* inputs,
#if IS_TRT_VERSION_LT(8000)
void** outputs,
void*,
hipStream_t stream) {
#else
void* const* outputs,
void*,
hipStream_t stream) TRT_NOEXCEPT {
#endif
const auto& input_dims = this->getInputDims(0);
int num = batch_size;
for (int i = 0; i < input_dims.nbDims; i++) {
num *= input_dims.d[i];
}
const int block_size = 256;
const int grid_size = (num + block_size - 1) / block_size;
auto type = getDataType();
if (type == nvinfer1::DataType::kFLOAT) {
VLOG(1) << "TRT Plugin DataType selected. Gelu-->fp32";
const float* input = static_cast<const float*>(inputs[0]);
float* output = static_cast<float*>(outputs[0]);
hipLaunchKernelGGL(( gelu_kernel<float, block_size>)
, dim3(grid_size), dim3(block_size), 0, stream, kA, num, input, output);
} else if (type == nvinfer1::DataType::kHALF) {
VLOG(1) << "TRT Plugin DataType selected. Gelu-->fp16";
const half* input = static_cast<const half*>(inputs[0]);
half* output = static_cast<half*>(outputs[0]);
hipLaunchKernelGGL(( no_exact_gelu_kernel<half, block_size>)
, dim3(grid_size), dim3(block_size), 0, stream,
kAT, kBT, kCT, num, input, output);
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"The Gelu TRT Plugin's input type should be float or half."));
}
return hipGetLastError() != hipSuccess;
}
// Dynamic Plugin below.
#if IS_TRT_VERSION_GE(6000)
nvinfer1::DimsExprs GeluPluginDynamic::getOutputDimensions(
int output_index,
const nvinfer1::DimsExprs* inputs,
int nb_inputs,
nvinfer1::IExprBuilder& expr_builder) TRT_NOEXCEPT {
return inputs[0];
}
bool GeluPluginDynamic::supportsFormatCombination(
int pos,
const nvinfer1::PluginTensorDesc* in_out,
int nb_inputs,
int nb_outputs) TRT_NOEXCEPT {
PADDLE_ENFORCE_NOT_NULL(
in_out,
platform::errors::InvalidArgument(
"The input of swish plugin shoule not be nullptr."));
PADDLE_ENFORCE_LT(
pos,
nb_inputs + nb_outputs,
platform::errors::InvalidArgument("The pos(%d) should be less than the "
"num(%d) of the input and the output.",
pos,
nb_inputs + nb_outputs));
(in_out && pos < (nb_inputs + nb_outputs));
const nvinfer1::PluginTensorDesc& in = in_out[pos];
if (pos == 0) {
if (with_fp16_) {
return (in.type == nvinfer1::DataType::kFLOAT ||
in.type == nvinfer1::DataType::kHALF) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
} else {
return (in.type == nvinfer1::DataType::kFLOAT) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
}
}
const nvinfer1::PluginTensorDesc& prev = in_out[pos - 1];
// output
return in.type == prev.type && in.format == prev.format;
}
nvinfer1::DataType GeluPluginDynamic::getOutputDataType(
int index,
const nvinfer1::DataType* input_types,
int nb_inputs) const TRT_NOEXCEPT {
PADDLE_ENFORCE_EQ(index,
0,
platform::errors::InvalidArgument(
"The Gelu Plugin only has one input, so the "
"index value should be 0, but get %d.",
index));
return input_types[0];
}
int GeluPluginDynamic::enqueue(const nvinfer1::PluginTensorDesc* input_desc,
const nvinfer1::PluginTensorDesc* output_desc,
const void* const* inputs,
void* const* outputs,
void* workspace,
hipStream_t stream) TRT_NOEXCEPT {
auto input_dims = input_desc[0].dims;
size_t num = ProductDim(input_dims);
const int block_size = 256;
const int grid_size = (num + block_size - 1) / block_size;
auto input_type = input_desc[0].type;
if (input_type == nvinfer1::DataType::kFLOAT) {
VLOG(1) << "TRT Plugin DataType selected. Gelu-->fp32";
const float* input = static_cast<const float*>(inputs[0]);
float* output = static_cast<float*>(outputs[0]);
hipLaunchKernelGGL(( gelu_kernel<float, block_size>)
, dim3(grid_size), dim3(block_size), 0, stream, kA, num, input, output);
} else if (input_type == nvinfer1::DataType::kHALF) {
VLOG(1) << "TRT Plugin DataType selected. Gelu-->fp16";
const half* input = static_cast<const half*>(inputs[0]);
half* output = static_cast<half*>(outputs[0]);
hipLaunchKernelGGL(( no_exact_gelu_kernel<half, block_size>)
, dim3(grid_size), dim3(block_size), 0, stream,
kAT, kBT, kCT, num, input, output);
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"The Gelu TRT Plugin's input type should be float or half."));
}
return hipGetLastError() != hipSuccess;
}
#endif
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
| 83019df81b5193341a1d6b02edc6dde13ef90b77.cu | // Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cassert>
#include <cstring>
#include <vector>
#include "paddle/fluid/inference/tensorrt/plugin/gelu_op_plugin.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
// constants for approximating the normal cdf
static const float kA = 1.41421356237309504; // sqrt(2)
static const float kAT = 0.5;
static const float kBT = 0.7978845608028654; // sqrt(2.0/M_PI)
static const float kCT = 0.035677408136300125; // 0.044715 * sqrt(2.0/M_PI)
bool GeluPlugin::supportsFormat(
nvinfer1::DataType type, nvinfer1::PluginFormat format) const TRT_NOEXCEPT {
if (with_fp16_) {
return ((type == nvinfer1::DataType::kFLOAT ||
type == nvinfer1::DataType::kHALF) &&
(format == nvinfer1::PluginFormat::kLINEAR));
} else {
return ((type == nvinfer1::DataType::kFLOAT) &&
(format == nvinfer1::PluginFormat::kLINEAR));
}
}
nvinfer1::Dims GeluPlugin::getOutputDimensions(int index,
const nvinfer1::Dims* in_dims,
int nb_inputs) TRT_NOEXCEPT {
assert(nb_inputs == 1);
assert(index < this->getNbOutputs());
nvinfer1::Dims const& input_dims = in_dims[0];
nvinfer1::Dims output_dims = input_dims;
return output_dims;
}
template <typename T, unsigned TPB>
__global__ void gelu_kernel(const T a, int n, const T* input, T* output) {
const int idx = blockIdx.x * TPB + threadIdx.x;
if (idx < n) {
const T in = input[idx];
const T cdf = 0.5f * (1.0f + erff(in * 0.5f * a));
output[idx] = in * cdf;
}
}
template <typename T>
__device__ T do_tanh(T a);
template <>
__device__ float do_tanh<float>(float a) {
return tanf(a);
}
template <>
__device__ half do_tanh<half>(half a) {
const float tmp = tanhf(__half2float(a));
return __float2half(tmp);
}
// the kernel below is not aligned with fluid fp32 forwrad ones, use it for
// fp16.
template <typename T, unsigned TPB>
__global__ void no_exact_gelu_kernel(
const T a, const T b, const T c, int n, const T* input, T* output) {
#if CUDA_ARCH_FP16_SUPPORTED(__CUDA_ARCH__)
const int idx = blockIdx.x * TPB + threadIdx.x;
if (idx < n) {
const T in = input[idx];
const T tmp = in * (c * in * in + b);
const T cdf = a + a * do_tanh<T>(tmp);
output[idx] = in * cdf;
}
#endif
}
int GeluPlugin::enqueue(int batch_size,
const void* const* inputs,
#if IS_TRT_VERSION_LT(8000)
void** outputs,
void*,
cudaStream_t stream) {
#else
void* const* outputs,
void*,
cudaStream_t stream) TRT_NOEXCEPT {
#endif
const auto& input_dims = this->getInputDims(0);
int num = batch_size;
for (int i = 0; i < input_dims.nbDims; i++) {
num *= input_dims.d[i];
}
const int block_size = 256;
const int grid_size = (num + block_size - 1) / block_size;
auto type = getDataType();
if (type == nvinfer1::DataType::kFLOAT) {
VLOG(1) << "TRT Plugin DataType selected. Gelu-->fp32";
const float* input = static_cast<const float*>(inputs[0]);
float* output = static_cast<float*>(outputs[0]);
gelu_kernel<float, block_size>
<<<grid_size, block_size, 0, stream>>>(kA, num, input, output);
} else if (type == nvinfer1::DataType::kHALF) {
VLOG(1) << "TRT Plugin DataType selected. Gelu-->fp16";
const half* input = static_cast<const half*>(inputs[0]);
half* output = static_cast<half*>(outputs[0]);
no_exact_gelu_kernel<half, block_size>
<<<grid_size, block_size, 0, stream>>>(
kAT, kBT, kCT, num, input, output);
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"The Gelu TRT Plugin's input type should be float or half."));
}
return cudaGetLastError() != cudaSuccess;
}
// Dynamic Plugin below.
#if IS_TRT_VERSION_GE(6000)
nvinfer1::DimsExprs GeluPluginDynamic::getOutputDimensions(
int output_index,
const nvinfer1::DimsExprs* inputs,
int nb_inputs,
nvinfer1::IExprBuilder& expr_builder) TRT_NOEXCEPT {
return inputs[0];
}
bool GeluPluginDynamic::supportsFormatCombination(
int pos,
const nvinfer1::PluginTensorDesc* in_out,
int nb_inputs,
int nb_outputs) TRT_NOEXCEPT {
PADDLE_ENFORCE_NOT_NULL(
in_out,
platform::errors::InvalidArgument(
"The input of swish plugin shoule not be nullptr."));
PADDLE_ENFORCE_LT(
pos,
nb_inputs + nb_outputs,
platform::errors::InvalidArgument("The pos(%d) should be less than the "
"num(%d) of the input and the output.",
pos,
nb_inputs + nb_outputs));
(in_out && pos < (nb_inputs + nb_outputs));
const nvinfer1::PluginTensorDesc& in = in_out[pos];
if (pos == 0) {
if (with_fp16_) {
return (in.type == nvinfer1::DataType::kFLOAT ||
in.type == nvinfer1::DataType::kHALF) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
} else {
return (in.type == nvinfer1::DataType::kFLOAT) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
}
}
const nvinfer1::PluginTensorDesc& prev = in_out[pos - 1];
// output
return in.type == prev.type && in.format == prev.format;
}
nvinfer1::DataType GeluPluginDynamic::getOutputDataType(
int index,
const nvinfer1::DataType* input_types,
int nb_inputs) const TRT_NOEXCEPT {
PADDLE_ENFORCE_EQ(index,
0,
platform::errors::InvalidArgument(
"The Gelu Plugin only has one input, so the "
"index value should be 0, but get %d.",
index));
return input_types[0];
}
int GeluPluginDynamic::enqueue(const nvinfer1::PluginTensorDesc* input_desc,
const nvinfer1::PluginTensorDesc* output_desc,
const void* const* inputs,
void* const* outputs,
void* workspace,
cudaStream_t stream) TRT_NOEXCEPT {
auto input_dims = input_desc[0].dims;
size_t num = ProductDim(input_dims);
const int block_size = 256;
const int grid_size = (num + block_size - 1) / block_size;
auto input_type = input_desc[0].type;
if (input_type == nvinfer1::DataType::kFLOAT) {
VLOG(1) << "TRT Plugin DataType selected. Gelu-->fp32";
const float* input = static_cast<const float*>(inputs[0]);
float* output = static_cast<float*>(outputs[0]);
gelu_kernel<float, block_size>
<<<grid_size, block_size, 0, stream>>>(kA, num, input, output);
} else if (input_type == nvinfer1::DataType::kHALF) {
VLOG(1) << "TRT Plugin DataType selected. Gelu-->fp16";
const half* input = static_cast<const half*>(inputs[0]);
half* output = static_cast<half*>(outputs[0]);
no_exact_gelu_kernel<half, block_size>
<<<grid_size, block_size, 0, stream>>>(
kAT, kBT, kCT, num, input, output);
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"The Gelu TRT Plugin's input type should be float or half."));
}
return cudaGetLastError() != cudaSuccess;
}
#endif
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
|
56c0a58df81144f540bd28a6c6ae9a33169f16fd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* 2018.11.10 // very slow average 200s
* every thread check for a Queen. not a pair[]
* 2018.12.4
* use dynamic parallel to speed up large N number.
* in Sub Kernel. set x of header Queen = -1 to avoid caculate global index of subArr[].
*/
#include "Kernel.h"
//#define DEBUG 0
//every block has x Queen, i.e. x raws.
#define BLOCK_SIZE 512
// number that every thread int sub_Kernel loops ,i.e. pairs that this thread checked;
// threshold
#define SUB_TASK (128*1024)
// thread numbers in every sub block
#define SUB_BLOCK_SIZE 32
//----------------------------Kernel----------------------------------------
__global__ void Ker_Warm(){
// empty body, just warmup GPU;
if(threadIdx.x == 0 )
printf("GPU is OK!\n");
}
__global__ void Ker_Sub(
int super_tid,
/*int X,*/ // use -1 instead.
int Y,
int *SubArr, // Arr[super.tid+1, N-1]
int SubLen , // N-1 -( super.tid+1) + 1
unsigned int *d_result )
{
const int sub_tid = blockDim.x * blockIdx.x + threadIdx.x; // [0.. 127]
//int my_job_begin = SUB_TASK * sub_tid ;
//int my_job_end = SUB_TASK * (sub_tid+1);
int X=-1; // convert sub[] index to global[] index;
for(int sx = SUB_TASK * sub_tid; sx< SUB_TASK * (sub_tid + 1) && sx < SubLen ; sx++){
int sy = SubArr[sx]; // Arr[super_tid + 1 + sx]
# ifdef DEBUG
printf("Super [%5d] Sub[%5d]-------->>>compare %5d to %5d\n ", super_tid, sub_tid, X, sx );
#endif
if(Y == sy || X+Y == sx + sy || Y -X == sy - sx) { // not a Permutations, it is random numbers.
atomicAdd ((unsigned int *)&d_result[0],1);
}//if
}//for
}
// check for a Queen , i.e. a round. compare arr[tid] to arr[tid+1 .. N-1].
__global__ void Ker_Check_Combination (
int *Arr, // arr[0,N-1]
int N, // length of combinations =queen number =N
unsigned int *d_result // return conflicts count.
)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(tid >= N -1) // last queen arr[N-1] need't check.
return;
//int curX=tid; // curX is global index in Arr[], in subArr, use -1
int curY=Arr[tid];
int len_of_rest_Queens = N -1-tid;
# ifdef DEBUG
printf("Super[%5d]-------------------->>> len_of_rest_Queens %5d, sub_ker_threads %5d \n",
tid , len_of_rest_Queens , sub_ker_threads);
#endif
if( len_of_rest_Queens > SUB_TASK ) {
//call sub kernel.
//printf("Sub Kernel called in thread:%5d\n", tid);
int sub_ker_threads = (len_of_rest_Queens + SUB_TASK -1) / SUB_TASK ;
hipLaunchKernelGGL(( Ker_Sub), dim3(( sub_ker_threads + SUB_BLOCK_SIZE -1 ) / SUB_BLOCK_SIZE) , dim3(SUB_BLOCK_SIZE) , 0, 0, tid , /*curX, -1 instead */ curY, &Arr[tid+1] , len_of_rest_Queens, d_result);
}
else {
// for small tasks, check by itself.
int curX=tid;
//check every queen after cur;
for(int iX=tid+1 ; iX <= N-1 ; iX++){
int iY=Arr[iX];
if(iY == curY || iX+iY == curX+ curY || iY -iX == curY - curX) { // not a Permutations, it is random numbers.
//printf("-------->>> (%5d,%5d) (%5d,%5d) thread:%5d \n ", curX,curY,iX,iY,tid);
atomicAdd ((unsigned int *)&d_result[0],1);
// break; // get all conflicts
}
}
}
}// end of Kernel
//----------------------------CPU Interface----------------------------------------
void setDevice(int i)
{
checkCudaErrors( hipSetDevice( i ) );
}
void warmGPU()
{
hipError_t cuda_err;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float cuda_time=0;
hipEventRecord(start, 0);
hipLaunchKernelGGL(( Ker_Warm) , dim3(1),dim3(1), 0, 0, );
cuda_err= hipSuccess;
cuda_err = hipGetLastError();
if (cuda_err != hipSuccess)
{
fprintf(stderr, "Failed to launch (error code= %s)!\n", hipGetErrorString(cuda_err));
exit(EXIT_FAILURE);
}
else
{
#ifdef DEBUG
fprintf(stderr, "launch successed! ( code= %s)!\n", hipGetErrorString(cuda_err));
#endif
}
//checkCudaErrors( hipDeviceSynchronize() );
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&cuda_time, start, stop);
#ifdef DEBUG
printf("%-40s %f ms \n","warmup() run time=",cuda_time);
#endif
}
void show_config(int combination_size)
{
printf("%20s\t%20s\t%20s\n","super block size","sub block size","sub task size");
printf("%20d\t%20d\t%20d\n",BLOCK_SIZE, SUB_BLOCK_SIZE, SUB_TASK);
printf("Ker_Check_Combination<<< %5d,%5d >>> \n", (combination_size + BLOCK_SIZE-1)/BLOCK_SIZE , BLOCK_SIZE );
}
unsigned int get_conflicts(int * combination, int combination_size)
{
int *h_combination = 0; //store a number in [1~N]
int *d_combination = 0;
unsigned int * h_result= 0;
unsigned int * d_result= 0;
// timer
//std::chrono::time_point<std::chrono::system_clock> c11_start, c11_end;
//hipEvent_t start, stop;
//hipEventCreate(&start);
//hipEventCreate(&stop);
//float cuda_time=0;
//int cpu_time=0;
//cuda status var;
hipError_t cuda_err;
// Allocate CPU memory and initialize data.
// init h_combination & timer.
//c11_start =std::chrono::system_clock::now();
//h_combination =(int *)malloc(combination_size * sizeof(int)); // need not allocate & free memory
h_result =(unsigned int *)malloc( 1 * sizeof(unsigned int));
if(h_result==NULL )
{
printf("malloc h_result error \ni");
exit(1);
}
h_combination= combination;
cuda_err = hipSuccess;
//hipEventRecord(start, 0);
// allocate GPU mem
checkCudaErrors(hipMalloc((void **)&d_combination, combination_size * sizeof(int)));
checkCudaErrors(hipMalloc((void **)&d_result , 1 * sizeof(unsigned int)));
cuda_err = hipGetLastError();
if (cuda_err != hipSuccess)
{
fprintf(stderr, "alloc d_combination error! (error code= %s)!\n", hipGetErrorString(cuda_err));
exit(EXIT_FAILURE);
}
//else
// fprintf(stderr, "alloc d_combination successed ! ( code= %s)!\n", hipGetErrorString(cuda_err));
//hipEventRecord(stop, 0);
//hipEventSynchronize(stop);
//hipEventElapsedTime(&cuda_time, start, stop);
//if(DEBUG) printf("%-40s %f ms \n","GPU mem allocate time=",cuda_time);
//************************************************************************************************************
//combination H->D
//hipEventRecord(start, 0);
checkCudaErrors(hipMemcpy(d_combination, h_combination, combination_size * sizeof(int), hipMemcpyHostToDevice));
//hipEventRecord(stop, 0);
//hipEventSynchronize(stop);
//hipEventElapsedTime(&cuda_time, start, stop);
//if(DEBUG) printf("%-40s %f ms \n","combination[] tranfer time =", cuda_time);
// Execute & timer
//hipEventRecord(start, 0);
hipLaunchKernelGGL(( Ker_Check_Combination), dim3(( combination_size + BLOCK_SIZE-1)/BLOCK_SIZE) , dim3(BLOCK_SIZE) , 0, 0,
d_combination ,combination_size , d_result);
cuda_err= hipSuccess;
cuda_err = hipGetLastError();
if (cuda_err != hipSuccess)
{
fprintf(stderr, "Failed to launch (error code= %s)!\n", hipGetErrorString(cuda_err));
exit(EXIT_FAILURE);
}
# ifdef DEBUG
else
fprintf(stderr, "launch successed! ( code= %s)!\n", hipGetErrorString(cuda_err));
#endif
//checkCudaErrors(hipDeviceSynchronize());
//hipEventRecord(stop, 0);
//hipEventSynchronize(stop);
//hipEventElapsedTime(&cuda_time, start, stop);
//if(DEBUG) printf("%-40s %f ms \n","CUDA Kernel run time=",cuda_time);
// D->H and timer
h_result[0]=0;
//hipEventRecord(start, 0);
checkCudaErrors(hipMemcpy(h_result, d_result, 1 * sizeof(unsigned int), hipMemcpyDeviceToHost));
cuda_err = hipSuccess;
cuda_err = hipGetLastError();
if (cuda_err != hipSuccess)
{
fprintf(stderr, "D->H error! (error code= %s)!\n", hipGetErrorString(cuda_err));
exit(EXIT_FAILURE);
}
# ifdef DEBUG
else
fprintf(stderr, "D->H successed ! ( code= %s)!\n", hipGetErrorString(cuda_err));
#endif
//hipEventRecord(stop, 0);
//hipEventSynchronize(stop);
//hipEventElapsedTime(&cuda_time, start, stop);
//if(DEBUG) printf("%-40s %f ms \n","CUDA D->H time = ", cuda_time);
//printf("testORI Kernel OK! result= %d \n",h_result[0]);
unsigned int conflicts = h_result[0];
//************************************************************************************************************
//free memory
//hipEventDestroy(start);
//hipEventDestroy(stop);
// free(h_combination); //it is a pointer point to static array a[][], need not to free;
free(h_result);
checkCudaErrors( hipFree(d_combination) );
checkCudaErrors( hipFree(d_result) );
checkCudaErrors( hipDeviceSynchronize() );
//checkCudaErrors( hipDeviceReset() );
return conflicts;
}
| 56c0a58df81144f540bd28a6c6ae9a33169f16fd.cu | /* 2018.11.10 // very slow average 200s
* every thread check for a Queen. not a pair[]
* 2018.12.4
* use dynamic parallel to speed up large N number.
* in Sub Kernel. set x of header Queen = -1 to avoid caculate global index of subArr[].
*/
#include "Kernel.h"
//#define DEBUG 0
//every block has x Queen, i.e. x raws.
#define BLOCK_SIZE 512
// number that every thread int sub_Kernel loops ,i.e. pairs that this thread checked;
// threshold
#define SUB_TASK (128*1024)
// thread numbers in every sub block
#define SUB_BLOCK_SIZE 32
//----------------------------Kernel----------------------------------------
__global__ void Ker_Warm(){
// empty body, just warmup GPU;
if(threadIdx.x == 0 )
printf("GPU is OK!\n");
}
__global__ void Ker_Sub(
int super_tid,
/*int X,*/ // use -1 instead.
int Y,
int *SubArr, // Arr[super.tid+1, N-1]
int SubLen , // N-1 -( super.tid+1) + 1
unsigned int *d_result )
{
const int sub_tid = blockDim.x * blockIdx.x + threadIdx.x; // [0.. 127]
//int my_job_begin = SUB_TASK * sub_tid ;
//int my_job_end = SUB_TASK * (sub_tid+1);
int X=-1; // convert sub[] index to global[] index;
for(int sx = SUB_TASK * sub_tid; sx< SUB_TASK * (sub_tid + 1) && sx < SubLen ; sx++){
int sy = SubArr[sx]; // Arr[super_tid + 1 + sx]
# ifdef DEBUG
printf("Super [%5d] Sub[%5d]-------->>>compare %5d to %5d\n ", super_tid, sub_tid, X, sx );
#endif
if(Y == sy || X+Y == sx + sy || Y -X == sy - sx) { // not a Permutations, it is random numbers.
atomicAdd ((unsigned int *)&d_result[0],1);
}//if
}//for
}
// check for a Queen , i.e. a round. compare arr[tid] to arr[tid+1 .. N-1].
__global__ void Ker_Check_Combination (
int *Arr, // arr[0,N-1]
int N, // length of combinations =queen number =N
unsigned int *d_result // return conflicts count.
)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(tid >= N -1) // last queen arr[N-1] need't check.
return;
//int curX=tid; // curX is global index in Arr[], in subArr, use -1
int curY=Arr[tid];
int len_of_rest_Queens = N -1-tid;
# ifdef DEBUG
printf("Super[%5d]-------------------->>> len_of_rest_Queens %5d, sub_ker_threads %5d \n",
tid , len_of_rest_Queens , sub_ker_threads);
#endif
if( len_of_rest_Queens > SUB_TASK ) {
//call sub kernel.
//printf("Sub Kernel called in thread:%5d\n", tid);
int sub_ker_threads = (len_of_rest_Queens + SUB_TASK -1) / SUB_TASK ;
Ker_Sub<<< ( sub_ker_threads + SUB_BLOCK_SIZE -1 ) / SUB_BLOCK_SIZE , SUB_BLOCK_SIZE >>> ( tid , /*curX, -1 instead */ curY, &Arr[tid+1] , len_of_rest_Queens, d_result);
}
else {
// for small tasks, check by itself.
int curX=tid;
//check every queen after cur;
for(int iX=tid+1 ; iX <= N-1 ; iX++){
int iY=Arr[iX];
if(iY == curY || iX+iY == curX+ curY || iY -iX == curY - curX) { // not a Permutations, it is random numbers.
//printf("-------->>> (%5d,%5d) (%5d,%5d) thread:%5d \n ", curX,curY,iX,iY,tid);
atomicAdd ((unsigned int *)&d_result[0],1);
// break; // get all conflicts
}
}
}
}// end of Kernel
//----------------------------CPU Interface----------------------------------------
void setDevice(int i)
{
checkCudaErrors( cudaSetDevice( i ) );
}
void warmGPU()
{
cudaError_t cuda_err;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float cuda_time=0;
cudaEventRecord(start, 0);
Ker_Warm <<<1,1>>> ();
cuda_err= cudaSuccess;
cuda_err = cudaGetLastError();
if (cuda_err != cudaSuccess)
{
fprintf(stderr, "Failed to launch (error code= %s)!\n", cudaGetErrorString(cuda_err));
exit(EXIT_FAILURE);
}
else
{
#ifdef DEBUG
fprintf(stderr, "launch successed! ( code= %s)!\n", cudaGetErrorString(cuda_err));
#endif
}
//checkCudaErrors( cudaDeviceSynchronize() );
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&cuda_time, start, stop);
#ifdef DEBUG
printf("%-40s %f ms \n","warmup() run time=",cuda_time);
#endif
}
void show_config(int combination_size)
{
printf("%20s\t%20s\t%20s\n","super block size","sub block size","sub task size");
printf("%20d\t%20d\t%20d\n",BLOCK_SIZE, SUB_BLOCK_SIZE, SUB_TASK);
printf("Ker_Check_Combination<<< %5d,%5d >>> \n", (combination_size + BLOCK_SIZE-1)/BLOCK_SIZE , BLOCK_SIZE );
}
unsigned int get_conflicts(int * combination, int combination_size)
{
int *h_combination = 0; //store a number in [1~N]
int *d_combination = 0;
unsigned int * h_result= 0;
unsigned int * d_result= 0;
// timer
//std::chrono::time_point<std::chrono::system_clock> c11_start, c11_end;
//cudaEvent_t start, stop;
//cudaEventCreate(&start);
//cudaEventCreate(&stop);
//float cuda_time=0;
//int cpu_time=0;
//cuda status var;
cudaError_t cuda_err;
// Allocate CPU memory and initialize data.
// init h_combination & timer.
//c11_start =std::chrono::system_clock::now();
//h_combination =(int *)malloc(combination_size * sizeof(int)); // need not allocate & free memory
h_result =(unsigned int *)malloc( 1 * sizeof(unsigned int));
if(h_result==NULL )
{
printf("malloc h_result error \ni");
exit(1);
}
h_combination= combination;
cuda_err = cudaSuccess;
//cudaEventRecord(start, 0);
// allocate GPU mem
checkCudaErrors(cudaMalloc((void **)&d_combination, combination_size * sizeof(int)));
checkCudaErrors(cudaMalloc((void **)&d_result , 1 * sizeof(unsigned int)));
cuda_err = cudaGetLastError();
if (cuda_err != cudaSuccess)
{
fprintf(stderr, "alloc d_combination error! (error code= %s)!\n", cudaGetErrorString(cuda_err));
exit(EXIT_FAILURE);
}
//else
// fprintf(stderr, "alloc d_combination successed ! ( code= %s)!\n", cudaGetErrorString(cuda_err));
//cudaEventRecord(stop, 0);
//cudaEventSynchronize(stop);
//cudaEventElapsedTime(&cuda_time, start, stop);
//if(DEBUG) printf("%-40s %f ms \n","GPU mem allocate time=",cuda_time);
//************************************************************************************************************
//combination H->D
//cudaEventRecord(start, 0);
checkCudaErrors(cudaMemcpy(d_combination, h_combination, combination_size * sizeof(int), cudaMemcpyHostToDevice));
//cudaEventRecord(stop, 0);
//cudaEventSynchronize(stop);
//cudaEventElapsedTime(&cuda_time, start, stop);
//if(DEBUG) printf("%-40s %f ms \n","combination[] tranfer time =", cuda_time);
// Execute & timer
//cudaEventRecord(start, 0);
Ker_Check_Combination<<< ( combination_size + BLOCK_SIZE-1)/BLOCK_SIZE , BLOCK_SIZE >>>
(d_combination ,combination_size , d_result);
cuda_err= cudaSuccess;
cuda_err = cudaGetLastError();
if (cuda_err != cudaSuccess)
{
fprintf(stderr, "Failed to launch (error code= %s)!\n", cudaGetErrorString(cuda_err));
exit(EXIT_FAILURE);
}
# ifdef DEBUG
else
fprintf(stderr, "launch successed! ( code= %s)!\n", cudaGetErrorString(cuda_err));
#endif
//checkCudaErrors(cudaDeviceSynchronize());
//cudaEventRecord(stop, 0);
//cudaEventSynchronize(stop);
//cudaEventElapsedTime(&cuda_time, start, stop);
//if(DEBUG) printf("%-40s %f ms \n","CUDA Kernel run time=",cuda_time);
// D->H and timer
h_result[0]=0;
//cudaEventRecord(start, 0);
checkCudaErrors(cudaMemcpy(h_result, d_result, 1 * sizeof(unsigned int), cudaMemcpyDeviceToHost));
cuda_err = cudaSuccess;
cuda_err = cudaGetLastError();
if (cuda_err != cudaSuccess)
{
fprintf(stderr, "D->H error! (error code= %s)!\n", cudaGetErrorString(cuda_err));
exit(EXIT_FAILURE);
}
# ifdef DEBUG
else
fprintf(stderr, "D->H successed ! ( code= %s)!\n", cudaGetErrorString(cuda_err));
#endif
//cudaEventRecord(stop, 0);
//cudaEventSynchronize(stop);
//cudaEventElapsedTime(&cuda_time, start, stop);
//if(DEBUG) printf("%-40s %f ms \n","CUDA D->H time = ", cuda_time);
//printf("testORI Kernel OK! result= %d \n",h_result[0]);
unsigned int conflicts = h_result[0];
//************************************************************************************************************
//free memory
//cudaEventDestroy(start);
//cudaEventDestroy(stop);
// free(h_combination); //it is a pointer point to static array a[][], need not to free;
free(h_result);
checkCudaErrors( cudaFree(d_combination) );
checkCudaErrors( cudaFree(d_result) );
checkCudaErrors( cudaDeviceSynchronize() );
//checkCudaErrors( cudaDeviceReset() );
return conflicts;
}
|
2786b8e8004c59430f4592c3dd668940d474191a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
#include "paddle/phi/kernels/mv_grad_kernel.h"
namespace phi {
template <typename T>
__global__ void MVGradDxCUDAKernel(
const int m, const int n, const T *dout, const T *vec, T *dx) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
for (; idx < m * n; idx += blockDim.x * gridDim.x) {
int i = idx / n;
int j = idx % n;
dx[idx] = dout[i] * vec[j];
}
}
template <typename T, typename Context>
void MvGradKernel(const Context &dev_ctx,
const DenseTensor &x,
const DenseTensor &vec,
const DenseTensor &out_grad,
DenseTensor *x_grad,
DenseTensor *vec_grad) {
auto dout = out_grad;
auto dx = x_grad;
auto dvec = vec_grad;
auto dim_x = x.dims();
int m = dim_x[0];
int n = dim_x[1];
// get data ptr
const T *x_data = x.data<T>();
const T *vec_data = vec.data<T>();
const T *dout_data = dout.data<T>();
auto blas = phi::funcs::GetBlas<Context, T>(dev_ctx);
auto stream = dev_ctx.stream();
auto config = phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, m * n);
if (dx) {
T *dx_data = dev_ctx.template Alloc<T>(dx);
hipLaunchKernelGGL(( MVGradDxCUDAKernel<T>)
, dim3(config.block_per_grid.x), dim3(config.thread_per_block.x), 0, stream,
m, n, dout_data, vec_data, dx_data);
}
if (dvec) {
T *dvec_data = dev_ctx.template Alloc<T>(dvec);
blas.GEMV(true,
dim_x[0],
dim_x[1],
static_cast<T>(1),
x_data,
dout_data,
static_cast<T>(0),
dvec_data);
}
}
} // namespace phi
PD_REGISTER_KERNEL(mv_grad, GPU, ALL_LAYOUT, phi::MvGradKernel, float, double) {
}
| 2786b8e8004c59430f4592c3dd668940d474191a.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
#include "paddle/phi/kernels/mv_grad_kernel.h"
namespace phi {
template <typename T>
__global__ void MVGradDxCUDAKernel(
const int m, const int n, const T *dout, const T *vec, T *dx) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
for (; idx < m * n; idx += blockDim.x * gridDim.x) {
int i = idx / n;
int j = idx % n;
dx[idx] = dout[i] * vec[j];
}
}
template <typename T, typename Context>
void MvGradKernel(const Context &dev_ctx,
const DenseTensor &x,
const DenseTensor &vec,
const DenseTensor &out_grad,
DenseTensor *x_grad,
DenseTensor *vec_grad) {
auto dout = out_grad;
auto dx = x_grad;
auto dvec = vec_grad;
auto dim_x = x.dims();
int m = dim_x[0];
int n = dim_x[1];
// get data ptr
const T *x_data = x.data<T>();
const T *vec_data = vec.data<T>();
const T *dout_data = dout.data<T>();
auto blas = phi::funcs::GetBlas<Context, T>(dev_ctx);
auto stream = dev_ctx.stream();
auto config = phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, m * n);
if (dx) {
T *dx_data = dev_ctx.template Alloc<T>(dx);
MVGradDxCUDAKernel<T>
<<<config.block_per_grid.x, config.thread_per_block.x, 0, stream>>>(
m, n, dout_data, vec_data, dx_data);
}
if (dvec) {
T *dvec_data = dev_ctx.template Alloc<T>(dvec);
blas.GEMV(true,
dim_x[0],
dim_x[1],
static_cast<T>(1),
x_data,
dout_data,
static_cast<T>(0),
dvec_data);
}
}
} // namespace phi
PD_REGISTER_KERNEL(mv_grad, GPU, ALL_LAYOUT, phi::MvGradKernel, float, double) {
}
|
4e9c2be8a53895b1738a92b92f1ce05d73e2887f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "matMult.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
float *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
float *c = NULL;
hipMalloc(&c, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
matMult), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,n,c);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
matMult), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,n,c);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
matMult), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,n,c);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 4e9c2be8a53895b1738a92b92f1ce05d73e2887f.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "matMult.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
float *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
float *c = NULL;
cudaMalloc(&c, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
matMult<<<gridBlock,threadBlock>>>(a,b,n,c);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
matMult<<<gridBlock,threadBlock>>>(a,b,n,c);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
matMult<<<gridBlock,threadBlock>>>(a,b,n,c);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
143af2c08e94aa2291a4743de457dddd73463cba.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void ScatterNdOps_forward_kernel(double *out, const long long*ii, const double *update, int n){
int p = blockIdx.x *blockDim.x + threadIdx.x;
if (p<n){
out[ii[p]-1] = update[p];
}
}
__global__ void setzero_kernel(double *out, int n){
int p = blockIdx.x *blockDim.x + threadIdx.x;
if (p<n){
out[p] = 0.0;
}
}
void Gpu_ScatterNdOps_forward(double *out, const long long *ii,
const double *update, int n, int N){
hipLaunchKernelGGL(( setzero_kernel), dim3((N - 1)/64 + 1), dim3(64) , 0, 0, out, N);
hipLaunchKernelGGL(( ScatterNdOps_forward_kernel), dim3((n - 1)/64 + 1), dim3(64) , 0, 0, out, ii, update, n);
}
__global__ void ScatterNdOps_backward_kernel(double *grad_update,
const double *grad_out,
const double *out, const long long *ii,
const double *update, int n){
int p = blockIdx.x *blockDim.x + threadIdx.x;
if (p<n) {
grad_update[p] = grad_out[ii[p]-1];
}
}
void Gpu_ScatterNdOps_backward(
double *grad_update,
const double *grad_out,
const double *out, const long long *ii,
const double *update, int n){
hipLaunchKernelGGL(( setzero_kernel), dim3((n - 1)/64 + 1), dim3(64) , 0, 0, grad_update, n);
hipLaunchKernelGGL(( ScatterNdOps_backward_kernel), dim3((n - 1)/64 + 1), dim3(64) , 0, 0, grad_update, grad_out, out, ii, update, n);
}
void get_ScatterNdOps_num(long long *out, const long long *m){
hipMemcpy(out, m, sizeof(long long), hipMemcpyDeviceToHost);
} | 143af2c08e94aa2291a4743de457dddd73463cba.cu | #include "cuda.h"
#include <stdio.h>
__global__ void ScatterNdOps_forward_kernel(double *out, const long long*ii, const double *update, int n){
int p = blockIdx.x *blockDim.x + threadIdx.x;
if (p<n){
out[ii[p]-1] = update[p];
}
}
__global__ void setzero_kernel(double *out, int n){
int p = blockIdx.x *blockDim.x + threadIdx.x;
if (p<n){
out[p] = 0.0;
}
}
void Gpu_ScatterNdOps_forward(double *out, const long long *ii,
const double *update, int n, int N){
setzero_kernel<<< (N - 1)/64 + 1, 64 >>>(out, N);
ScatterNdOps_forward_kernel<<< (n - 1)/64 + 1, 64 >>>(out, ii, update, n);
}
__global__ void ScatterNdOps_backward_kernel(double *grad_update,
const double *grad_out,
const double *out, const long long *ii,
const double *update, int n){
int p = blockIdx.x *blockDim.x + threadIdx.x;
if (p<n) {
grad_update[p] = grad_out[ii[p]-1];
}
}
void Gpu_ScatterNdOps_backward(
double *grad_update,
const double *grad_out,
const double *out, const long long *ii,
const double *update, int n){
setzero_kernel<<< (n - 1)/64 + 1, 64 >>>(grad_update, n);
ScatterNdOps_backward_kernel<<< (n - 1)/64 + 1, 64 >>>(grad_update, grad_out, out, ii, update, n);
}
void get_ScatterNdOps_num(long long *out, const long long *m){
cudaMemcpy(out, m, sizeof(long long), cudaMemcpyDeviceToHost);
} |
8cb32a5d0a85f53abfc871f43df7f47d78612385.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@generated from zlag2c.cu mixed zc -> ds, Sat Nov 15 19:53:59 2014
@author Mark Gates
*/
#include "common_magma.h"
#define PRECISION_d
#define BLK_X 64
#define BLK_Y 32
// TODO get rid of global variable!
static __device__ int flag = 0;
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to dlat2s and zlaset.
*/
__global__
void dlag2s_kernel(
int m, int n,
const double *A, int lda,
float *SA, int ldsa,
double rmax )
{
double tmp;
double neg_rmax = - rmax;
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
A += ind + iby*lda;
SA += ind + iby*ldsa;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
tmp = A[j*lda];
if ( ((tmp) < neg_rmax) || ((tmp) > rmax)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| ((tmp) < neg_rmax) || ((tmp) > rmax)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = (float)( tmp );
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
tmp = A[j*lda];
if ( ((tmp) < neg_rmax) || ((tmp) > rmax)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| ((tmp) < neg_rmax) || ((tmp) > rmax)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = (float)( tmp );
}
}
}
}
/**
Purpose
-------
DLAG2S_STREAM converts a double-real matrix, A,
to a single-real matrix, SA.
RMAX is the overflow for the single-real arithmetic.
DLAG2S checks that all the entries of A are between -RMAX and
RMAX. If not, the conversion is aborted and a flag is raised.
This is the same as DLAG2S, but adds queue argument.
Arguments
---------
@param[in]
m INTEGER
The number of lines of the matrix A. m >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. n >= 0.
@param[in]
A DOUBLE PRECISION array, dimension (LDA,n)
On entry, the m-by-n coefficient matrix A.
@param[in]
lda INTEGER
The leading dimension of the array A. LDA >= max(1,m).
@param[out]
SA SINGLE PRECISION array, dimension (LDSA,n)
On exit, if INFO=0, the m-by-n coefficient matrix SA;
if INFO > 0, the content of SA is unspecified.
@param[in]
ldsa INTEGER
The leading dimension of the array SA. LDSA >= max(1,m).
@param[out]
info INTEGER
- = 0: successful exit.
- < 0: if INFO = -i, the i-th argument had an illegal value
- = 1: an entry of the matrix A is greater than the SINGLE PRECISION
overflow threshold, in this case, the content
of SA on exit is unspecified.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_daux2
********************************************************************/
extern "C" void
magmablas_dlag2s_q(
magma_int_t m, magma_int_t n,
magmaDouble_const_ptr A, magma_int_t lda,
magmaFloat_ptr SA, magma_int_t ldsa,
magma_int_t *info,
magma_queue_t queue )
{
*info = 0;
if ( m < 0 )
*info = -1;
else if ( n < 0 )
*info = -2;
else if ( lda < max(1,m) )
*info = -4;
else if ( ldsa < max(1,m) )
*info = -6;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //*info;
}
/* quick return */
if ( m == 0 || n == 0 ) {
return;
}
double rmax = (double)lapackf77_slamch("O");
dim3 threads( BLK_X );
dim3 grid( (m+BLK_X-1)/BLK_X, (n+BLK_Y-1)/BLK_Y );
hipMemcpyToSymbol( flag, info, sizeof(flag) ); // flag = 0
hipLaunchKernelGGL(( dlag2s_kernel), dim3(grid), dim3(threads), 0, queue , m, n, A, lda, SA, ldsa, rmax );
hipMemcpyFromSymbol( info, flag, sizeof(flag) ); // info = flag
}
/**
@see magmablas_dlag2s_q
@ingroup magma_daux2
********************************************************************/
extern "C" void
magmablas_dlag2s(
magma_int_t m, magma_int_t n,
magmaDouble_const_ptr A, magma_int_t lda,
magmaFloat_ptr SA, magma_int_t ldsa,
magma_int_t *info )
{
magmablas_dlag2s_q( m, n, A, lda, SA, ldsa, info, magma_stream );
}
| 8cb32a5d0a85f53abfc871f43df7f47d78612385.cu | /*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@generated from zlag2c.cu mixed zc -> ds, Sat Nov 15 19:53:59 2014
@author Mark Gates
*/
#include "common_magma.h"
#define PRECISION_d
#define BLK_X 64
#define BLK_Y 32
// TODO get rid of global variable!
static __device__ int flag = 0;
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to dlat2s and zlaset.
*/
__global__
void dlag2s_kernel(
int m, int n,
const double *A, int lda,
float *SA, int ldsa,
double rmax )
{
double tmp;
double neg_rmax = - rmax;
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
A += ind + iby*lda;
SA += ind + iby*ldsa;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
tmp = A[j*lda];
if ( ((tmp) < neg_rmax) || ((tmp) > rmax)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| ((tmp) < neg_rmax) || ((tmp) > rmax)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = (float)( tmp );
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
tmp = A[j*lda];
if ( ((tmp) < neg_rmax) || ((tmp) > rmax)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| ((tmp) < neg_rmax) || ((tmp) > rmax)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = (float)( tmp );
}
}
}
}
/**
Purpose
-------
DLAG2S_STREAM converts a double-real matrix, A,
to a single-real matrix, SA.
RMAX is the overflow for the single-real arithmetic.
DLAG2S checks that all the entries of A are between -RMAX and
RMAX. If not, the conversion is aborted and a flag is raised.
This is the same as DLAG2S, but adds queue argument.
Arguments
---------
@param[in]
m INTEGER
The number of lines of the matrix A. m >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. n >= 0.
@param[in]
A DOUBLE PRECISION array, dimension (LDA,n)
On entry, the m-by-n coefficient matrix A.
@param[in]
lda INTEGER
The leading dimension of the array A. LDA >= max(1,m).
@param[out]
SA SINGLE PRECISION array, dimension (LDSA,n)
On exit, if INFO=0, the m-by-n coefficient matrix SA;
if INFO > 0, the content of SA is unspecified.
@param[in]
ldsa INTEGER
The leading dimension of the array SA. LDSA >= max(1,m).
@param[out]
info INTEGER
- = 0: successful exit.
- < 0: if INFO = -i, the i-th argument had an illegal value
- = 1: an entry of the matrix A is greater than the SINGLE PRECISION
overflow threshold, in this case, the content
of SA on exit is unspecified.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_daux2
********************************************************************/
extern "C" void
magmablas_dlag2s_q(
magma_int_t m, magma_int_t n,
magmaDouble_const_ptr A, magma_int_t lda,
magmaFloat_ptr SA, magma_int_t ldsa,
magma_int_t *info,
magma_queue_t queue )
{
*info = 0;
if ( m < 0 )
*info = -1;
else if ( n < 0 )
*info = -2;
else if ( lda < max(1,m) )
*info = -4;
else if ( ldsa < max(1,m) )
*info = -6;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //*info;
}
/* quick return */
if ( m == 0 || n == 0 ) {
return;
}
double rmax = (double)lapackf77_slamch("O");
dim3 threads( BLK_X );
dim3 grid( (m+BLK_X-1)/BLK_X, (n+BLK_Y-1)/BLK_Y );
cudaMemcpyToSymbol( flag, info, sizeof(flag) ); // flag = 0
dlag2s_kernel<<< grid, threads, 0, queue >>>( m, n, A, lda, SA, ldsa, rmax );
cudaMemcpyFromSymbol( info, flag, sizeof(flag) ); // info = flag
}
/**
@see magmablas_dlag2s_q
@ingroup magma_daux2
********************************************************************/
extern "C" void
magmablas_dlag2s(
magma_int_t m, magma_int_t n,
magmaDouble_const_ptr A, magma_int_t lda,
magmaFloat_ptr SA, magma_int_t ldsa,
magma_int_t *info )
{
magmablas_dlag2s_q( m, n, A, lda, SA, ldsa, info, magma_stream );
}
|
25b5b7c18fe79ae8e855b9a779fece5bbd3eb596.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by miguel on 11/03/2021.
//
#include "MatUtil.cuh"
#define BLOCK_SIZE 16
__global__ void matrixMultiplyKernel(const double *a, const double *b, double *c, unsigned int m, unsigned int n, unsigned int k) {
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row < m && col < k) {
double aux = .0;
for (int i = 0; i < n; i++) {
aux += a[row * n + i] * b[i * k + col];
}
c[row * k + col] = aux;
__syncthreads();
}
}
__global__ void matrixTransposeKernel(const double *a, double *b, unsigned int rows, unsigned int cols) {
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idx < cols && idy < rows) {
unsigned int pos = idy * cols + idx;
unsigned int transposedPos = idx * rows + idy;
b[transposedPos] = a[pos];
}
}
void MatUtil::matrixMultiply(double *a, double *b, double *c, unsigned int m, unsigned int n, unsigned int k) {
dim3 dimGrid((k + BLOCK_SIZE - 1) / BLOCK_SIZE, (m + BLOCK_SIZE - 1) / BLOCK_SIZE);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
hipLaunchKernelGGL(( matrixMultiplyKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, a, b, c, m, n, k);
}
void MatUtil::matrixTranspose(double *a, double *b, unsigned int rows, unsigned int cols) {
unsigned int n = rows * cols;
dim3 dim_grid((n - 1) / BLOCK_SIZE + 1, (n - 1) / BLOCK_SIZE + 1, 1);
dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1);
hipLaunchKernelGGL(( matrixTransposeKernel), dim3(dim_grid), dim3(dim_block), 0, 0, a, b, rows, cols);
}
| 25b5b7c18fe79ae8e855b9a779fece5bbd3eb596.cu | //
// Created by miguel on 11/03/2021.
//
#include "MatUtil.cuh"
#define BLOCK_SIZE 16
__global__ void matrixMultiplyKernel(const double *a, const double *b, double *c, unsigned int m, unsigned int n, unsigned int k) {
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row < m && col < k) {
double aux = .0;
for (int i = 0; i < n; i++) {
aux += a[row * n + i] * b[i * k + col];
}
c[row * k + col] = aux;
__syncthreads();
}
}
__global__ void matrixTransposeKernel(const double *a, double *b, unsigned int rows, unsigned int cols) {
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idx < cols && idy < rows) {
unsigned int pos = idy * cols + idx;
unsigned int transposedPos = idx * rows + idy;
b[transposedPos] = a[pos];
}
}
void MatUtil::matrixMultiply(double *a, double *b, double *c, unsigned int m, unsigned int n, unsigned int k) {
dim3 dimGrid((k + BLOCK_SIZE - 1) / BLOCK_SIZE, (m + BLOCK_SIZE - 1) / BLOCK_SIZE);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
matrixMultiplyKernel<<<dimGrid, dimBlock>>>(a, b, c, m, n, k);
}
void MatUtil::matrixTranspose(double *a, double *b, unsigned int rows, unsigned int cols) {
unsigned int n = rows * cols;
dim3 dim_grid((n - 1) / BLOCK_SIZE + 1, (n - 1) / BLOCK_SIZE + 1, 1);
dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1);
matrixTransposeKernel<<<dim_grid, dim_block>>>(a, b, rows, cols);
}
|
c1114d941c20c28f1932b5feed2127893096f745.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/local_response_normalization_op.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void LRNFillScaleNCHW(const int nthreads, const T* in,
const int num, const int channels, const int height,
const int width, const int size, const T alpha_over_size,
const T bias, T* scale) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int n = index / width / height;
int offset = (n * channels * height + h) * width + w;
int step = height * width;
in += offset;
scale += offset;
int head = 0;
int pre_pad = (size - 1) / 2;
int post_pad = size - pre_pad - 1;
T accum_scale = 0;
// fill the scale at [n, :, h, w]
// accumulate values
while (head < post_pad) {
accum_scale += in[head * step] * in[head * step];
++head;
}
// until we reach size, nothing needs to be subtracted
while (head < size) {
accum_scale += in[head * step] * in[head * step];
scale[(head - post_pad) * step] = bias + accum_scale * alpha_over_size;
++head;
}
// both add and subtract
while (head < channels) {
accum_scale += in[head * step] * in[head * step];
accum_scale -= in[(head - size) * step] * in[(head - size) * step];
scale[(head - post_pad) * step] = bias + accum_scale * alpha_over_size;
++head;
}
// subtract only
while (head < channels + post_pad) {
accum_scale -= in[(head - size) * step] * in[(head - size) * step];
scale[(head - post_pad) * step] = bias + accum_scale * alpha_over_size;
++head;
}
// recover the pointers for the next loop.
in -= offset;
scale -= offset;
}
}
template <typename T>
__global__ void LRNFillScaleNHWC(const int nthreads, const T* in,
const int num, const int height, const int width,
const int channels, const int size, const T alpha_over_size,
const T bias, T* scale) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int c = index % channels;
int pre_pad = (size - 1) / 2;
scale[index] = 0;
for (int i = 0; i < size; ++i) {
int raw_idx = c + i - pre_pad;
if (raw_idx >= 0 && raw_idx < channels) {
scale[index] += in[index + i - pre_pad] * in[index + i - pre_pad];
}
}
scale[index] = bias + scale[index] * alpha_over_size;
}
}
// TODO(Yangqing): check if it would be faster to just put it into the previous
// kernel.
template <typename T>
__global__ void LRNComputeOutput(const int nthreads, const T* in,
const T* scale, const T negative_beta, T* out) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
out[index] = in[index] * pow(scale[index], negative_beta);
}
}
template <typename T>
__global__ void LRNComputeDiffNCHW(const int nthreads, const T* bottom_data,
const T* top_data, const T* scale, const T* top_diff,
const int num, const int channels, const int height,
const int width, const int size, const T negative_beta,
const T cache_ratio,
T* bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int n = index / width / height;
int offset = (n * channels * height + h) * width + w;
int step = height * width;
bottom_data += offset;
top_data += offset;
scale += offset;
top_diff += offset;
bottom_diff += offset;
int head = 0;
int pre_pad = size - (size + 1) / 2;
int post_pad = size - pre_pad - 1;
T accum_ratio = 0;
// accumulate values
while (head < post_pad) {
accum_ratio += top_diff[head * step] * top_data[head * step] /
scale[head * step];
++head;
}
// until we reach size, nothing needs to be subtracted
while (head < size) {
accum_ratio += top_diff[head * step] * top_data[head * step] /
scale[head * step];
bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step]
* pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio *
bottom_data[(head - post_pad) * step] * accum_ratio;
++head;
}
// both add and subtract
while (head < channels) {
accum_ratio += top_diff[head * step] * top_data[head * step] /
scale[head * step];
accum_ratio -= top_diff[(head - size) * step] *
top_data[(head - size) * step] / scale[(head - size) * step];
bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step]
* pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio *
bottom_data[(head - post_pad) * step] * accum_ratio;
++head;
}
// subtract only
while (head < channels + post_pad) {
accum_ratio -= top_diff[(head - size) * step] *
top_data[(head - size) * step] / scale[(head - size) * step];
bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step]
* pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio *
bottom_data[(head - post_pad) * step] * accum_ratio;
++head;
}
// recover pointer for next iteration.
bottom_data -= offset;
top_data -= offset;
scale -= offset;
top_diff -= offset;
bottom_diff -= offset;
}
}
// This local response normalization gradient does one sum per output location
// and does not use the running trick for 1-d convolution: thus it might not be
// the fastest implementation.
template <typename T>
__global__ void LRNComputeDiffNHWC(const int nthreads, const T* bottom_data,
const T* top_data, const T* scale, const T* top_diff,
const int num, const int height, const int width, const int channels,
const int size, const T negative_beta, const T cache_ratio,
T* bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// find out the local channel offset
int c = index % channels;
int pre_pad = size / 2;
T accum_ratio = 0;
for (int i = -pre_pad; i < size - pre_pad; ++i) {
if (c + i >= 0 && c + i < channels) {
accum_ratio += top_diff[index + i] * top_data[index + i] /
scale[index + i];
}
}
bottom_diff[index] = top_diff[index] * pow(scale[index], negative_beta) -
cache_ratio * bottom_data[index] * accum_ratio;
}
}
} // namespace
template<>
bool LRNOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() {
auto& X = Input(0);
DCHECK_EQ(X.ndim(), 4);
const int N = X.dim32(0);
const int C = X.dim32(1);
const int H = X.dim32(2);
const int W = X.dim32(3);
const float* Xdata = X.data<float>();
auto* Y = Output(0, X.sizes(), at::dtype<float>());
float* Ydata = Y->template mutable_data<float>();
if (OutputSize() > 1) {
scale_ = Output(1);
} else {
if (!scale_) {
scale_ = &local_scale_tensor_;
}
}
scale_->ResizeLike(X);
float* scale_data = scale_->template mutable_data<float>();
int n_threads = N * H * W;
hipLaunchKernelGGL(( LRNFillScaleNCHW<float>), dim3(CAFFE_GET_BLOCKS(n_threads)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
n_threads, Xdata, N, C, H, W, size_, alpha_ / size_, bias_, scale_data);
n_threads = X.size();
hipLaunchKernelGGL(( LRNComputeOutput<float>), dim3(CAFFE_GET_BLOCKS(n_threads)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
n_threads, Xdata, scale_data, -beta_, Ydata);
return true;
}
template<>
bool LRNOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() {
auto& X = Input(0);
DCHECK_EQ(X.ndim(), 4);
const int N = X.dim32(0);
const int H = X.dim32(1);
const int W = X.dim32(2);
const int C = X.dim32(3);
const float* Xdata = X.data<float>();
auto* Y = Output(0, X.sizes(), at::dtype<float>());
float* Ydata = Y->template mutable_data<float>();
if (OutputSize() > 1) {
scale_ = Output(1);
} else {
if (!scale_) {
scale_ = &local_scale_tensor_;
}
}
scale_->ResizeLike(X);
float* scale_data = scale_->template mutable_data<float>();
int n_threads = X.size();
hipLaunchKernelGGL(( LRNFillScaleNHWC<float>), dim3(CAFFE_GET_BLOCKS(n_threads)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
n_threads, Xdata, N, H, W, C, size_, alpha_ / size_, bias_, scale_data);
hipLaunchKernelGGL(( LRNComputeOutput<float>), dim3(CAFFE_GET_BLOCKS(n_threads)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
n_threads, Xdata, scale_data, -beta_, Ydata);
return true;
}
template <>
bool LRNGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() {
auto& X = Input(0);
auto& Y = Input(1);
auto& dY = Input(2);
DCHECK_EQ(X.ndim(), 4);
const int N = X.dim32(0);
const int C = X.dim32(1);
const int H = X.dim32(2);
const int W = X.dim32(3);
// Loosely checking the size, assuming that the shapes will be the same as
// long as the sizes check out.
DCHECK_EQ(X.size(), Y.size());
DCHECK_EQ(X.size(), dY.size());
auto* dX = Output(0, X.sizes(), at::dtype<float>());
const float* Xdata = X.data<float>();
const float* Ydata = Y.data<float>();
if (!scale_) {
scale_ = &local_scale_tensor_;
}
scale_->ResizeLike(X);
float* scale_data = scale_->template mutable_data<float>();
int n_threads = N * H * W;
hipLaunchKernelGGL(( LRNFillScaleNCHW<float>), dim3(CAFFE_GET_BLOCKS(n_threads)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
n_threads, Xdata, N, C, H, W, size_, alpha_ / size_, bias_, scale_data);
const float* dYdata = dY.data<float>();
float* dXdata = dX->template mutable_data<float>();
hipLaunchKernelGGL(( LRNComputeDiffNCHW<float>), dim3(CAFFE_GET_BLOCKS(n_threads)),
dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
n_threads, Xdata, Ydata, scale_data, dYdata, N, C, H, W, size_, -beta_,
2.f * alpha_ * beta_ / size_, dXdata);
return true;
}
template <>
bool LRNGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() {
auto& X = Input(0);
auto& Y = Input(1);
auto& dY = Input(2);
DCHECK_EQ(X.ndim(), 4);
const int N = X.dim32(0);
const int H = X.dim32(1);
const int W = X.dim32(2);
const int C = X.dim32(3);
const float* Xdata = X.data<float>();
// Loosely checking the size, assuming that the shapes will be the same as
// long as the sizes check out.
DCHECK_EQ(X.size(), Y.size());
DCHECK_EQ(X.size(), dY.size());
auto* dX = Output(0, X.sizes(), at::dtype<float>());
if (!scale_) {
scale_ = &local_scale_tensor_;
}
scale_->ResizeLike(X);
float* scale_data = scale_->template mutable_data<float>();
int n_threads = X.size();
hipLaunchKernelGGL(( LRNFillScaleNHWC<float>), dim3(CAFFE_GET_BLOCKS(n_threads)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
n_threads, Xdata, N, H, W, C, size_, alpha_ / size_, bias_, scale_data);
hipLaunchKernelGGL(( LRNComputeDiffNHWC<float>)
, dim3(CAFFE_GET_BLOCKS(X.size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
X.size(),
X.data<float>(),
Y.data<float>(),
scale_data,
dY.data<float>(),
X.dim32(0),
X.dim32(1),
X.dim32(2),
X.dim32(3),
size_,
-beta_,
2.f * alpha_ * beta_ / size_,
dX->template mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(LRN, LRNOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(LRNGradient, LRNGradientOp<float, CUDAContext>);
} // namespace caffe2
| c1114d941c20c28f1932b5feed2127893096f745.cu | #include "caffe2/core/context_gpu.h"
#include "caffe2/operators/local_response_normalization_op.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void LRNFillScaleNCHW(const int nthreads, const T* in,
const int num, const int channels, const int height,
const int width, const int size, const T alpha_over_size,
const T bias, T* scale) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int n = index / width / height;
int offset = (n * channels * height + h) * width + w;
int step = height * width;
in += offset;
scale += offset;
int head = 0;
int pre_pad = (size - 1) / 2;
int post_pad = size - pre_pad - 1;
T accum_scale = 0;
// fill the scale at [n, :, h, w]
// accumulate values
while (head < post_pad) {
accum_scale += in[head * step] * in[head * step];
++head;
}
// until we reach size, nothing needs to be subtracted
while (head < size) {
accum_scale += in[head * step] * in[head * step];
scale[(head - post_pad) * step] = bias + accum_scale * alpha_over_size;
++head;
}
// both add and subtract
while (head < channels) {
accum_scale += in[head * step] * in[head * step];
accum_scale -= in[(head - size) * step] * in[(head - size) * step];
scale[(head - post_pad) * step] = bias + accum_scale * alpha_over_size;
++head;
}
// subtract only
while (head < channels + post_pad) {
accum_scale -= in[(head - size) * step] * in[(head - size) * step];
scale[(head - post_pad) * step] = bias + accum_scale * alpha_over_size;
++head;
}
// recover the pointers for the next loop.
in -= offset;
scale -= offset;
}
}
template <typename T>
__global__ void LRNFillScaleNHWC(const int nthreads, const T* in,
const int num, const int height, const int width,
const int channels, const int size, const T alpha_over_size,
const T bias, T* scale) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int c = index % channels;
int pre_pad = (size - 1) / 2;
scale[index] = 0;
for (int i = 0; i < size; ++i) {
int raw_idx = c + i - pre_pad;
if (raw_idx >= 0 && raw_idx < channels) {
scale[index] += in[index + i - pre_pad] * in[index + i - pre_pad];
}
}
scale[index] = bias + scale[index] * alpha_over_size;
}
}
// TODO(Yangqing): check if it would be faster to just put it into the previous
// kernel.
template <typename T>
__global__ void LRNComputeOutput(const int nthreads, const T* in,
const T* scale, const T negative_beta, T* out) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
out[index] = in[index] * pow(scale[index], negative_beta);
}
}
template <typename T>
__global__ void LRNComputeDiffNCHW(const int nthreads, const T* bottom_data,
const T* top_data, const T* scale, const T* top_diff,
const int num, const int channels, const int height,
const int width, const int size, const T negative_beta,
const T cache_ratio,
T* bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int n = index / width / height;
int offset = (n * channels * height + h) * width + w;
int step = height * width;
bottom_data += offset;
top_data += offset;
scale += offset;
top_diff += offset;
bottom_diff += offset;
int head = 0;
int pre_pad = size - (size + 1) / 2;
int post_pad = size - pre_pad - 1;
T accum_ratio = 0;
// accumulate values
while (head < post_pad) {
accum_ratio += top_diff[head * step] * top_data[head * step] /
scale[head * step];
++head;
}
// until we reach size, nothing needs to be subtracted
while (head < size) {
accum_ratio += top_diff[head * step] * top_data[head * step] /
scale[head * step];
bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step]
* pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio *
bottom_data[(head - post_pad) * step] * accum_ratio;
++head;
}
// both add and subtract
while (head < channels) {
accum_ratio += top_diff[head * step] * top_data[head * step] /
scale[head * step];
accum_ratio -= top_diff[(head - size) * step] *
top_data[(head - size) * step] / scale[(head - size) * step];
bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step]
* pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio *
bottom_data[(head - post_pad) * step] * accum_ratio;
++head;
}
// subtract only
while (head < channels + post_pad) {
accum_ratio -= top_diff[(head - size) * step] *
top_data[(head - size) * step] / scale[(head - size) * step];
bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step]
* pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio *
bottom_data[(head - post_pad) * step] * accum_ratio;
++head;
}
// recover pointer for next iteration.
bottom_data -= offset;
top_data -= offset;
scale -= offset;
top_diff -= offset;
bottom_diff -= offset;
}
}
// This local response normalization gradient does one sum per output location
// and does not use the running trick for 1-d convolution: thus it might not be
// the fastest implementation.
template <typename T>
__global__ void LRNComputeDiffNHWC(const int nthreads, const T* bottom_data,
const T* top_data, const T* scale, const T* top_diff,
const int num, const int height, const int width, const int channels,
const int size, const T negative_beta, const T cache_ratio,
T* bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// find out the local channel offset
int c = index % channels;
int pre_pad = size / 2;
T accum_ratio = 0;
for (int i = -pre_pad; i < size - pre_pad; ++i) {
if (c + i >= 0 && c + i < channels) {
accum_ratio += top_diff[index + i] * top_data[index + i] /
scale[index + i];
}
}
bottom_diff[index] = top_diff[index] * pow(scale[index], negative_beta) -
cache_ratio * bottom_data[index] * accum_ratio;
}
}
} // namespace
template<>
bool LRNOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() {
auto& X = Input(0);
DCHECK_EQ(X.ndim(), 4);
const int N = X.dim32(0);
const int C = X.dim32(1);
const int H = X.dim32(2);
const int W = X.dim32(3);
const float* Xdata = X.data<float>();
auto* Y = Output(0, X.sizes(), at::dtype<float>());
float* Ydata = Y->template mutable_data<float>();
if (OutputSize() > 1) {
scale_ = Output(1);
} else {
if (!scale_) {
scale_ = &local_scale_tensor_;
}
}
scale_->ResizeLike(X);
float* scale_data = scale_->template mutable_data<float>();
int n_threads = N * H * W;
LRNFillScaleNCHW<float><<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
n_threads, Xdata, N, C, H, W, size_, alpha_ / size_, bias_, scale_data);
n_threads = X.size();
LRNComputeOutput<float><<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
n_threads, Xdata, scale_data, -beta_, Ydata);
return true;
}
template<>
bool LRNOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() {
auto& X = Input(0);
DCHECK_EQ(X.ndim(), 4);
const int N = X.dim32(0);
const int H = X.dim32(1);
const int W = X.dim32(2);
const int C = X.dim32(3);
const float* Xdata = X.data<float>();
auto* Y = Output(0, X.sizes(), at::dtype<float>());
float* Ydata = Y->template mutable_data<float>();
if (OutputSize() > 1) {
scale_ = Output(1);
} else {
if (!scale_) {
scale_ = &local_scale_tensor_;
}
}
scale_->ResizeLike(X);
float* scale_data = scale_->template mutable_data<float>();
int n_threads = X.size();
LRNFillScaleNHWC<float><<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
n_threads, Xdata, N, H, W, C, size_, alpha_ / size_, bias_, scale_data);
LRNComputeOutput<float><<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
n_threads, Xdata, scale_data, -beta_, Ydata);
return true;
}
template <>
bool LRNGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() {
auto& X = Input(0);
auto& Y = Input(1);
auto& dY = Input(2);
DCHECK_EQ(X.ndim(), 4);
const int N = X.dim32(0);
const int C = X.dim32(1);
const int H = X.dim32(2);
const int W = X.dim32(3);
// Loosely checking the size, assuming that the shapes will be the same as
// long as the sizes check out.
DCHECK_EQ(X.size(), Y.size());
DCHECK_EQ(X.size(), dY.size());
auto* dX = Output(0, X.sizes(), at::dtype<float>());
const float* Xdata = X.data<float>();
const float* Ydata = Y.data<float>();
if (!scale_) {
scale_ = &local_scale_tensor_;
}
scale_->ResizeLike(X);
float* scale_data = scale_->template mutable_data<float>();
int n_threads = N * H * W;
LRNFillScaleNCHW<float><<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
n_threads, Xdata, N, C, H, W, size_, alpha_ / size_, bias_, scale_data);
const float* dYdata = dY.data<float>();
float* dXdata = dX->template mutable_data<float>();
LRNComputeDiffNCHW<float><<<CAFFE_GET_BLOCKS(n_threads),
CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
n_threads, Xdata, Ydata, scale_data, dYdata, N, C, H, W, size_, -beta_,
2.f * alpha_ * beta_ / size_, dXdata);
return true;
}
template <>
bool LRNGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() {
auto& X = Input(0);
auto& Y = Input(1);
auto& dY = Input(2);
DCHECK_EQ(X.ndim(), 4);
const int N = X.dim32(0);
const int H = X.dim32(1);
const int W = X.dim32(2);
const int C = X.dim32(3);
const float* Xdata = X.data<float>();
// Loosely checking the size, assuming that the shapes will be the same as
// long as the sizes check out.
DCHECK_EQ(X.size(), Y.size());
DCHECK_EQ(X.size(), dY.size());
auto* dX = Output(0, X.sizes(), at::dtype<float>());
if (!scale_) {
scale_ = &local_scale_tensor_;
}
scale_->ResizeLike(X);
float* scale_data = scale_->template mutable_data<float>();
int n_threads = X.size();
LRNFillScaleNHWC<float><<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
n_threads, Xdata, N, H, W, C, size_, alpha_ / size_, bias_, scale_data);
LRNComputeDiffNHWC<float>
<<<CAFFE_GET_BLOCKS(X.size()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
X.size(),
X.data<float>(),
Y.data<float>(),
scale_data,
dY.data<float>(),
X.dim32(0),
X.dim32(1),
X.dim32(2),
X.dim32(3),
size_,
-beta_,
2.f * alpha_ * beta_ / size_,
dX->template mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(LRN, LRNOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(LRNGradient, LRNGradientOp<float, CUDAContext>);
} // namespace caffe2
|
124ec4b408b12cfc48e18f12404fe8a617d3a6b9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*********************************************************************************
* ga_set_calculus_toolbox.cu // ga stands for geometry aware
* calculate gradient, hessian, curvature of the level set function
* with 4th order central scheme if there is no kink within the stencil
* otherwise, use 2nd order central scheme
* note that it seems very important to use a compact stencil
********************************************************************************/
#include "shared_utilities.cuh"
#include "shared_utilities.cup"
__device__ inline
void set_kink(bool & local_kink, bool const * kink, int row_idx, int col_idx, int pge_idx, int rows, int cols, int pges)
{
int shift_ind;
int n = 2;
for(int i=-n; i<=n; i++){
for(int j=-n; j<=n; j++){
for(int k=-n; k<=n; k++){
shift_ind = sub2ind(row_idx + i, col_idx + j, pge_idx + k, rows, cols, pges);
local_kink = local_kink || kink[shift_ind];
if(local_kink){
return;
}
}
}
}
}
__global__
void ga_set_calculus_toolbox(double * Fx, double * Fy, double * Fz, double * FGradMag, double * Nx, double * Ny, double * Nz, double * Fxx, double * Fyy, double * Fzz, double * Fxy, double * Fyz, double * Fzx, double * FLaplacian, double * MeanCurvature, double * GaussianCurvature, double * Heaviside, double * DiracDelta, double const * lsf, bool const * kink, double const * HPrimal, int rows, int cols, int pges, double dx, double dy, double dz, double ds, int num_ele)
{
int row_idx = blockIdx.x * blockDim.x + threadIdx.x;
int col_idx = blockIdx.y * blockDim.y + threadIdx.y;
int pge_idx = blockIdx.z * blockDim.z + threadIdx.z;
if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){
return;
}
int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges);
int rght1 = sub2ind(row_idx, col_idx+1, pge_idx, rows, cols, pges);
int rght2 = sub2ind(row_idx, col_idx+2, pge_idx, rows, cols, pges);
int left1 = sub2ind(row_idx, col_idx-1, pge_idx, rows, cols, pges);
int left2 = sub2ind(row_idx, col_idx-2, pge_idx, rows, cols, pges);
int frnt1 = sub2ind(row_idx+1, col_idx, pge_idx, rows, cols, pges);
int frnt2 = sub2ind(row_idx+2, col_idx, pge_idx, rows, cols, pges);
int back1 = sub2ind(row_idx-1, col_idx, pge_idx, rows, cols, pges);
int back2 = sub2ind(row_idx-2, col_idx, pge_idx, rows, cols, pges);
int upup1 = sub2ind(row_idx, col_idx, pge_idx+1, rows, cols, pges);
int upup2 = sub2ind(row_idx, col_idx, pge_idx+2, rows, cols, pges);
int down1 = sub2ind(row_idx, col_idx, pge_idx-1, rows, cols, pges);
int down2 = sub2ind(row_idx, col_idx, pge_idx-2, rows, cols, pges);
int frnt_rght1 = sub2ind(row_idx+1, col_idx+1, pge_idx, rows, cols, pges);
int frnt_rght2 = sub2ind(row_idx+2, col_idx+2, pge_idx, rows, cols, pges);
int back_left1 = sub2ind(row_idx-1, col_idx-1, pge_idx, rows, cols, pges);
int back_left2 = sub2ind(row_idx-2, col_idx-2, pge_idx, rows, cols, pges);
int back_rght1 = sub2ind(row_idx-1, col_idx+1, pge_idx, rows, cols, pges);
int back_rght2 = sub2ind(row_idx-2, col_idx+2, pge_idx, rows, cols, pges);
int frnt_left1 = sub2ind(row_idx+1, col_idx-1, pge_idx, rows, cols, pges);
int frnt_left2 = sub2ind(row_idx+2, col_idx-2, pge_idx, rows, cols, pges);
int frnt_upup1 = sub2ind(row_idx+1, col_idx, pge_idx+1, rows, cols, pges);
int frnt_upup2 = sub2ind(row_idx+2, col_idx, pge_idx+2, rows, cols, pges);
int back_down1 = sub2ind(row_idx-1, col_idx, pge_idx-1, rows, cols, pges);
int back_down2 = sub2ind(row_idx-2, col_idx, pge_idx-2, rows, cols, pges);
int frnt_down1 = sub2ind(row_idx+1, col_idx, pge_idx-1, rows, cols, pges);
int frnt_down2 = sub2ind(row_idx+2, col_idx, pge_idx-2, rows, cols, pges);
int back_upup1 = sub2ind(row_idx-1, col_idx, pge_idx+1, rows, cols, pges);
int back_upup2 = sub2ind(row_idx-2, col_idx, pge_idx+2, rows, cols, pges);
int rght_upup1 = sub2ind(row_idx, col_idx+1, pge_idx+1, rows, cols, pges);
int rght_upup2 = sub2ind(row_idx, col_idx+2, pge_idx+2, rows, cols, pges);
int left_down1 = sub2ind(row_idx, col_idx-1, pge_idx-1, rows, cols, pges);
int left_down2 = sub2ind(row_idx, col_idx-2, pge_idx-2, rows, cols, pges);
int rght_down1 = sub2ind(row_idx, col_idx+1, pge_idx-1, rows, cols, pges);
int rght_down2 = sub2ind(row_idx, col_idx+2, pge_idx-2, rows, cols, pges);
int left_upup1 = sub2ind(row_idx, col_idx-1, pge_idx+1, rows, cols, pges);
int left_upup2 = sub2ind(row_idx, col_idx-2, pge_idx+2, rows, cols, pges);
double fx2 = (lsf[rght1] - lsf[left1]) / (2.0*dx);
double fy2 = (lsf[frnt1] - lsf[back1]) / (2.0*dy);
double fz2 = (lsf[upup1] - lsf[down1]) / (2.0*dz);
double fxx2 = (lsf[rght1] - 2.0*lsf[ind] + lsf[left1]) / (dx*dx);
double fyy2 = (lsf[frnt1] - 2.0*lsf[ind] + lsf[back1]) / (dy*dy);
double fzz2 = (lsf[upup1] - 2.0*lsf[ind] + lsf[down1]) / (dz*dz);
double fLaplacian2 = fxx2 + fyy2 + fzz2;
double fGradMag2 = max2(sqrt(fx2*fx2 + fy2*fy2 + fz2*fz2), 1e-14);
Fx[ind] = fx2;
Fy[ind] = fy2;
Fz[ind] = fz2;
Fxx[ind] = fxx2;
Fyy[ind] = fyy2;
Fzz[ind] = fzz2;
FLaplacian[ind] = fLaplacian2;
FGradMag[ind] = fGradMag2;
Nx[ind] = fx2 / fGradMag2;
Ny[ind] = fy2 / fGradMag2;
Nz[ind] = fz2 / fGradMag2;
double fxy2 = (lsf[frnt_rght1]+lsf[back_left1]-lsf[frnt_left1]-lsf[back_rght1]) / (4*ds*ds);
double fyz2 = (lsf[frnt_upup1]+lsf[back_down1]-lsf[frnt_down1]-lsf[back_upup1]) / (4*ds*ds);
double fzx2 = (lsf[rght_upup1]+lsf[left_down1]-lsf[rght_down1]-lsf[left_upup1]) / (4*ds*ds);
Fxy[ind] = fxy2;
Fyz[ind] = fyz2;
Fzx[ind] = fzx2;
// calculate mean curvature and use 4th order central scheme if possible
double fx4 = (-lsf[rght2] + 8.0*lsf[rght1] - 8.0*lsf[left1] + lsf[left2]) / (12.0*dx);
double fy4 = (-lsf[frnt2] + 8.0*lsf[frnt1] - 8.0*lsf[back1] + lsf[back2]) / (12.0*dy);
double fz4 = (-lsf[upup2] + 8.0*lsf[upup1] - 8.0*lsf[down1] + lsf[down2]) / (12.0*dz);
double fxx4 = (-lsf[rght2] + 16.0*lsf[rght1] - 30.0 * lsf[ind] + 16.0*lsf[left1] - lsf[left2]) / (12.0*dx*dx);
double fyy4 = (-lsf[frnt2] + 16.0*lsf[frnt1] - 30.0 * lsf[ind] + 16.0*lsf[back1] - lsf[back2]) / (12.0*dy*dy);
double fzz4 = (-lsf[upup2] + 16.0*lsf[upup1] - 30.0 * lsf[ind] + 16.0*lsf[down1] - lsf[down2]) / (12.0*dz*dz);
double fxy4 = (-lsf[frnt_rght2]-lsf[back_left2]+lsf[frnt_left2]+lsf[back_rght2]+16.0*lsf[frnt_rght1]+16.0*lsf[back_left1]-16.0*lsf[frnt_left1]-16.0*lsf[back_rght1]) / (48.0*dx*dy);
double fyz4 = (-lsf[frnt_upup2]-lsf[back_down2]+lsf[frnt_down2]+lsf[back_upup2]+16.0*lsf[frnt_upup1]+16.0*lsf[back_down1]-16.0*lsf[frnt_down1]-16.0*lsf[back_upup1]) / (48.0*dy*dz);
double fzx4 = (-lsf[rght_upup2]-lsf[left_down2]+lsf[rght_down2]+lsf[left_upup2]+16.0*lsf[rght_upup1]+16.0*lsf[left_down1]-16.0*lsf[rght_down1]-16.0*lsf[left_upup1]) / (48.0*dz*dx);
//bool local_kink_x = kink[rght2] || kink[rght1] || kink[ind] || kink[left1] || kink[left2];
//bool local_kink_y = kink[frnt2] || kink[frnt1] || kink[ind] || kink[back1] || kink[back2];
//bool local_kink_z = kink[upup2] || kink[upup1] || kink[ind] || kink[down1] || kink[down2];
//bool local_kink_xy = kink[frnt_rght2] || kink[back_left2] || kink[frnt_left2] || kink[back_rght2] || kink[frnt_rght1] || kink[back_left1] || kink[frnt_left1] || kink[back_rght1] || kink[ind];
//bool local_kink_yz = kink[frnt_upup2] || kink[back_down1] || kink[frnt_down2] || kink[back_upup2] || kink[frnt_upup1] || kink[back_down1] || kink[frnt_down1] || kink[back_upup1] || kink[ind];
//bool local_kink_zx = kink[rght_upup2] || kink[left_down2] || kink[rght_down2] || kink[left_upup2] || kink[rght_upup1] || kink[left_down1] || kink[rght_down1] || kink[left_upup1] || kink[ind];
//bool local_kink = local_kink_x || local_kink_y || local_kink_z || local_kink_xy || local_kink_yz || local_kink_zx;
bool local_kink = false;
set_kink(local_kink, kink, row_idx, col_idx, pge_idx, rows, cols, pges);
double fx = local_kink ? fx2 : fx4;
double fy = local_kink ? fy2 : fy4;
double fz = local_kink ? fz2 : fz4;
double fxx = local_kink ? fxx2 : fxx4;
double fyy = local_kink ? fyy2 : fyy4;
double fzz = local_kink ? fzz2 : fzz4;
double fLaplacian = fxx + fyy + fzz;
double fGradMag = max2(sqrt(fx*fx + fy*fy + fz*fz), 1e-14); // avoid singuilarity
double fxy = local_kink ? fxy2 : fxy4;
double fyz = local_kink ? fyz2 : fyz4;
double fzx = local_kink ? fzx2 : fzx4;
double col1 = fxx*fx + fxy*fy + fzx*fz;
double col2 = fxy*fx + fyy*fy + fyz*fz;
double col3 = fzx*fx + fyz*fy + fzz*fz;
MeanCurvature[ind] = - fLaplacian/fGradMag + (fx*col1+fy*col2+fz*col3)/pow(fGradMag,3);
// calculate Gaussian curvature
col1 = (fyy2*fzz2-fyz2*fyz2)*fx2 + (fzx2*fyz2-fxy2*fzz2)*fy2 + (fxy2*fyz2-fzx2*fyy2)*fz2;
col2 = (fyz2*fzx2-fxy2*fzz2)*fx2 + (fxx2*fzz2-fzx2*fzx2)*fy2 + (fzx2*fxy2-fxx2*fyz2)*fz2;
col3 = (fxy2*fyz2-fyy2*fzx2)*fx2 + (fzx2*fxy2-fxx2*fyz2)*fy2 + (fxx2*fyy2-fxy2*fxy2)*fz2;
GaussianCurvature[ind] = (fx2*col1+fy2*col2+fz2*col3) / pow(fGradMag2,4);
// calculate Heaviside function
double px = (HPrimal[rght1] - HPrimal[left1]) / (2*dx);
double py = (HPrimal[frnt1] - HPrimal[back1]) / (2*dy);
double pz = (HPrimal[upup1] - HPrimal[down1]) / (2*dz);
double dot_DHPrimal_DF = px*fx2 + py*fy2 + pz*fz2;
Heaviside[ind] = dot_DHPrimal_DF / pow(fGradMag2,2);
// calculate DiraDelta function
double pxx = (HPrimal[rght1] - 2*HPrimal[ind] + HPrimal[left1]) / (dx*dx);
double pyy = (HPrimal[frnt1] - 2*HPrimal[ind] + HPrimal[back1]) / (dy*dy);
double pzz = (HPrimal[upup1] - 2*HPrimal[ind] + HPrimal[down1]) / (dz*dz);
double pLaplacian = pxx + pyy + pzz;
DiracDelta[ind] = pLaplacian/pow(fGradMag2,2) - dot_DHPrimal_DF*fLaplacian2/pow(fGradMag2,4);
}
| 124ec4b408b12cfc48e18f12404fe8a617d3a6b9.cu | /*********************************************************************************
* ga_set_calculus_toolbox.cu // ga stands for geometry aware
* calculate gradient, hessian, curvature of the level set function
* with 4th order central scheme if there is no kink within the stencil
* otherwise, use 2nd order central scheme
* note that it seems very important to use a compact stencil
********************************************************************************/
#include "shared_utilities.cuh"
#include "shared_utilities.cup"
__device__ inline
void set_kink(bool & local_kink, bool const * kink, int row_idx, int col_idx, int pge_idx, int rows, int cols, int pges)
{
int shift_ind;
int n = 2;
for(int i=-n; i<=n; i++){
for(int j=-n; j<=n; j++){
for(int k=-n; k<=n; k++){
shift_ind = sub2ind(row_idx + i, col_idx + j, pge_idx + k, rows, cols, pges);
local_kink = local_kink || kink[shift_ind];
if(local_kink){
return;
}
}
}
}
}
__global__
void ga_set_calculus_toolbox(double * Fx, double * Fy, double * Fz, double * FGradMag, double * Nx, double * Ny, double * Nz, double * Fxx, double * Fyy, double * Fzz, double * Fxy, double * Fyz, double * Fzx, double * FLaplacian, double * MeanCurvature, double * GaussianCurvature, double * Heaviside, double * DiracDelta, double const * lsf, bool const * kink, double const * HPrimal, int rows, int cols, int pges, double dx, double dy, double dz, double ds, int num_ele)
{
int row_idx = blockIdx.x * blockDim.x + threadIdx.x;
int col_idx = blockIdx.y * blockDim.y + threadIdx.y;
int pge_idx = blockIdx.z * blockDim.z + threadIdx.z;
if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){
return;
}
int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges);
int rght1 = sub2ind(row_idx, col_idx+1, pge_idx, rows, cols, pges);
int rght2 = sub2ind(row_idx, col_idx+2, pge_idx, rows, cols, pges);
int left1 = sub2ind(row_idx, col_idx-1, pge_idx, rows, cols, pges);
int left2 = sub2ind(row_idx, col_idx-2, pge_idx, rows, cols, pges);
int frnt1 = sub2ind(row_idx+1, col_idx, pge_idx, rows, cols, pges);
int frnt2 = sub2ind(row_idx+2, col_idx, pge_idx, rows, cols, pges);
int back1 = sub2ind(row_idx-1, col_idx, pge_idx, rows, cols, pges);
int back2 = sub2ind(row_idx-2, col_idx, pge_idx, rows, cols, pges);
int upup1 = sub2ind(row_idx, col_idx, pge_idx+1, rows, cols, pges);
int upup2 = sub2ind(row_idx, col_idx, pge_idx+2, rows, cols, pges);
int down1 = sub2ind(row_idx, col_idx, pge_idx-1, rows, cols, pges);
int down2 = sub2ind(row_idx, col_idx, pge_idx-2, rows, cols, pges);
int frnt_rght1 = sub2ind(row_idx+1, col_idx+1, pge_idx, rows, cols, pges);
int frnt_rght2 = sub2ind(row_idx+2, col_idx+2, pge_idx, rows, cols, pges);
int back_left1 = sub2ind(row_idx-1, col_idx-1, pge_idx, rows, cols, pges);
int back_left2 = sub2ind(row_idx-2, col_idx-2, pge_idx, rows, cols, pges);
int back_rght1 = sub2ind(row_idx-1, col_idx+1, pge_idx, rows, cols, pges);
int back_rght2 = sub2ind(row_idx-2, col_idx+2, pge_idx, rows, cols, pges);
int frnt_left1 = sub2ind(row_idx+1, col_idx-1, pge_idx, rows, cols, pges);
int frnt_left2 = sub2ind(row_idx+2, col_idx-2, pge_idx, rows, cols, pges);
int frnt_upup1 = sub2ind(row_idx+1, col_idx, pge_idx+1, rows, cols, pges);
int frnt_upup2 = sub2ind(row_idx+2, col_idx, pge_idx+2, rows, cols, pges);
int back_down1 = sub2ind(row_idx-1, col_idx, pge_idx-1, rows, cols, pges);
int back_down2 = sub2ind(row_idx-2, col_idx, pge_idx-2, rows, cols, pges);
int frnt_down1 = sub2ind(row_idx+1, col_idx, pge_idx-1, rows, cols, pges);
int frnt_down2 = sub2ind(row_idx+2, col_idx, pge_idx-2, rows, cols, pges);
int back_upup1 = sub2ind(row_idx-1, col_idx, pge_idx+1, rows, cols, pges);
int back_upup2 = sub2ind(row_idx-2, col_idx, pge_idx+2, rows, cols, pges);
int rght_upup1 = sub2ind(row_idx, col_idx+1, pge_idx+1, rows, cols, pges);
int rght_upup2 = sub2ind(row_idx, col_idx+2, pge_idx+2, rows, cols, pges);
int left_down1 = sub2ind(row_idx, col_idx-1, pge_idx-1, rows, cols, pges);
int left_down2 = sub2ind(row_idx, col_idx-2, pge_idx-2, rows, cols, pges);
int rght_down1 = sub2ind(row_idx, col_idx+1, pge_idx-1, rows, cols, pges);
int rght_down2 = sub2ind(row_idx, col_idx+2, pge_idx-2, rows, cols, pges);
int left_upup1 = sub2ind(row_idx, col_idx-1, pge_idx+1, rows, cols, pges);
int left_upup2 = sub2ind(row_idx, col_idx-2, pge_idx+2, rows, cols, pges);
double fx2 = (lsf[rght1] - lsf[left1]) / (2.0*dx);
double fy2 = (lsf[frnt1] - lsf[back1]) / (2.0*dy);
double fz2 = (lsf[upup1] - lsf[down1]) / (2.0*dz);
double fxx2 = (lsf[rght1] - 2.0*lsf[ind] + lsf[left1]) / (dx*dx);
double fyy2 = (lsf[frnt1] - 2.0*lsf[ind] + lsf[back1]) / (dy*dy);
double fzz2 = (lsf[upup1] - 2.0*lsf[ind] + lsf[down1]) / (dz*dz);
double fLaplacian2 = fxx2 + fyy2 + fzz2;
double fGradMag2 = max2(sqrt(fx2*fx2 + fy2*fy2 + fz2*fz2), 1e-14);
Fx[ind] = fx2;
Fy[ind] = fy2;
Fz[ind] = fz2;
Fxx[ind] = fxx2;
Fyy[ind] = fyy2;
Fzz[ind] = fzz2;
FLaplacian[ind] = fLaplacian2;
FGradMag[ind] = fGradMag2;
Nx[ind] = fx2 / fGradMag2;
Ny[ind] = fy2 / fGradMag2;
Nz[ind] = fz2 / fGradMag2;
double fxy2 = (lsf[frnt_rght1]+lsf[back_left1]-lsf[frnt_left1]-lsf[back_rght1]) / (4*ds*ds);
double fyz2 = (lsf[frnt_upup1]+lsf[back_down1]-lsf[frnt_down1]-lsf[back_upup1]) / (4*ds*ds);
double fzx2 = (lsf[rght_upup1]+lsf[left_down1]-lsf[rght_down1]-lsf[left_upup1]) / (4*ds*ds);
Fxy[ind] = fxy2;
Fyz[ind] = fyz2;
Fzx[ind] = fzx2;
// calculate mean curvature and use 4th order central scheme if possible
double fx4 = (-lsf[rght2] + 8.0*lsf[rght1] - 8.0*lsf[left1] + lsf[left2]) / (12.0*dx);
double fy4 = (-lsf[frnt2] + 8.0*lsf[frnt1] - 8.0*lsf[back1] + lsf[back2]) / (12.0*dy);
double fz4 = (-lsf[upup2] + 8.0*lsf[upup1] - 8.0*lsf[down1] + lsf[down2]) / (12.0*dz);
double fxx4 = (-lsf[rght2] + 16.0*lsf[rght1] - 30.0 * lsf[ind] + 16.0*lsf[left1] - lsf[left2]) / (12.0*dx*dx);
double fyy4 = (-lsf[frnt2] + 16.0*lsf[frnt1] - 30.0 * lsf[ind] + 16.0*lsf[back1] - lsf[back2]) / (12.0*dy*dy);
double fzz4 = (-lsf[upup2] + 16.0*lsf[upup1] - 30.0 * lsf[ind] + 16.0*lsf[down1] - lsf[down2]) / (12.0*dz*dz);
double fxy4 = (-lsf[frnt_rght2]-lsf[back_left2]+lsf[frnt_left2]+lsf[back_rght2]+16.0*lsf[frnt_rght1]+16.0*lsf[back_left1]-16.0*lsf[frnt_left1]-16.0*lsf[back_rght1]) / (48.0*dx*dy);
double fyz4 = (-lsf[frnt_upup2]-lsf[back_down2]+lsf[frnt_down2]+lsf[back_upup2]+16.0*lsf[frnt_upup1]+16.0*lsf[back_down1]-16.0*lsf[frnt_down1]-16.0*lsf[back_upup1]) / (48.0*dy*dz);
double fzx4 = (-lsf[rght_upup2]-lsf[left_down2]+lsf[rght_down2]+lsf[left_upup2]+16.0*lsf[rght_upup1]+16.0*lsf[left_down1]-16.0*lsf[rght_down1]-16.0*lsf[left_upup1]) / (48.0*dz*dx);
//bool local_kink_x = kink[rght2] || kink[rght1] || kink[ind] || kink[left1] || kink[left2];
//bool local_kink_y = kink[frnt2] || kink[frnt1] || kink[ind] || kink[back1] || kink[back2];
//bool local_kink_z = kink[upup2] || kink[upup1] || kink[ind] || kink[down1] || kink[down2];
//bool local_kink_xy = kink[frnt_rght2] || kink[back_left2] || kink[frnt_left2] || kink[back_rght2] || kink[frnt_rght1] || kink[back_left1] || kink[frnt_left1] || kink[back_rght1] || kink[ind];
//bool local_kink_yz = kink[frnt_upup2] || kink[back_down1] || kink[frnt_down2] || kink[back_upup2] || kink[frnt_upup1] || kink[back_down1] || kink[frnt_down1] || kink[back_upup1] || kink[ind];
//bool local_kink_zx = kink[rght_upup2] || kink[left_down2] || kink[rght_down2] || kink[left_upup2] || kink[rght_upup1] || kink[left_down1] || kink[rght_down1] || kink[left_upup1] || kink[ind];
//bool local_kink = local_kink_x || local_kink_y || local_kink_z || local_kink_xy || local_kink_yz || local_kink_zx;
bool local_kink = false;
set_kink(local_kink, kink, row_idx, col_idx, pge_idx, rows, cols, pges);
double fx = local_kink ? fx2 : fx4;
double fy = local_kink ? fy2 : fy4;
double fz = local_kink ? fz2 : fz4;
double fxx = local_kink ? fxx2 : fxx4;
double fyy = local_kink ? fyy2 : fyy4;
double fzz = local_kink ? fzz2 : fzz4;
double fLaplacian = fxx + fyy + fzz;
double fGradMag = max2(sqrt(fx*fx + fy*fy + fz*fz), 1e-14); // avoid singuilarity
double fxy = local_kink ? fxy2 : fxy4;
double fyz = local_kink ? fyz2 : fyz4;
double fzx = local_kink ? fzx2 : fzx4;
double col1 = fxx*fx + fxy*fy + fzx*fz;
double col2 = fxy*fx + fyy*fy + fyz*fz;
double col3 = fzx*fx + fyz*fy + fzz*fz;
MeanCurvature[ind] = - fLaplacian/fGradMag + (fx*col1+fy*col2+fz*col3)/pow(fGradMag,3);
// calculate Gaussian curvature
col1 = (fyy2*fzz2-fyz2*fyz2)*fx2 + (fzx2*fyz2-fxy2*fzz2)*fy2 + (fxy2*fyz2-fzx2*fyy2)*fz2;
col2 = (fyz2*fzx2-fxy2*fzz2)*fx2 + (fxx2*fzz2-fzx2*fzx2)*fy2 + (fzx2*fxy2-fxx2*fyz2)*fz2;
col3 = (fxy2*fyz2-fyy2*fzx2)*fx2 + (fzx2*fxy2-fxx2*fyz2)*fy2 + (fxx2*fyy2-fxy2*fxy2)*fz2;
GaussianCurvature[ind] = (fx2*col1+fy2*col2+fz2*col3) / pow(fGradMag2,4);
// calculate Heaviside function
double px = (HPrimal[rght1] - HPrimal[left1]) / (2*dx);
double py = (HPrimal[frnt1] - HPrimal[back1]) / (2*dy);
double pz = (HPrimal[upup1] - HPrimal[down1]) / (2*dz);
double dot_DHPrimal_DF = px*fx2 + py*fy2 + pz*fz2;
Heaviside[ind] = dot_DHPrimal_DF / pow(fGradMag2,2);
// calculate DiraDelta function
double pxx = (HPrimal[rght1] - 2*HPrimal[ind] + HPrimal[left1]) / (dx*dx);
double pyy = (HPrimal[frnt1] - 2*HPrimal[ind] + HPrimal[back1]) / (dy*dy);
double pzz = (HPrimal[upup1] - 2*HPrimal[ind] + HPrimal[down1]) / (dz*dz);
double pLaplacian = pxx + pyy + pzz;
DiracDelta[ind] = pLaplacian/pow(fGradMag2,2) - dot_DHPrimal_DF*fLaplacian2/pow(fGradMag2,4);
}
|
2e9eae3fe72348af175ae6e074128555b06480f2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <stdio.h>
#include "star3d1r-32x32-5-128_kernel.hu"
#define BENCH_DIM 3
#define BENCH_FPP 13
#define BENCH_RAD 1
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize][dimsize]
= (SB_TYPE (*)[dimsize][dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 3 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
hipError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != hipSuccess) { \
fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == hipSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(hipGetLastError()); \
} while(0)
float *dev_A;
cudaCheckReturn(hipMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float)));
{
cudaCheckReturn(hipMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), hipMemcpyHostToDevice));
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 22;
const AN5D_TYPE __side3Len = 22;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 26;
const AN5D_TYPE __side3Len = 26;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 2)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 30;
const AN5D_TYPE __side3Len = 30;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 30;
const AN5D_TYPE __side3Len = 30;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 3)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 30;
const AN5D_TYPE __side3Len = 30;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 4)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 30;
const AN5D_TYPE __side3Len = 30;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 2)
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 3)
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 26;
const AN5D_TYPE __side3Len = 26;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 4)
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 24;
const AN5D_TYPE __side3Len = 24;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(hipMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), hipMemcpyDeviceToHost));
}
cudaCheckReturn(hipFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
for (int k = BENCH_RAD; k < dimsize - BENCH_RAD; k++)
A[(t+1)%2][i][j][k] =
0.2500f * A[t%2][i][j][k] +
0.1248f * A[t%2][i-1][j][k] + 0.1249f * A[t%2][i+1][j][k] +
0.1250f * A[t%2][i][j-1][k] + 0.1251f * A[t%2][i][j+1][k] +
0.1252f * A[t%2][i][j][k-1] + 0.1253f * A[t%2][i][j][k+1];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
| 2e9eae3fe72348af175ae6e074128555b06480f2.cu | #include <assert.h>
#include <stdio.h>
#include "star3d1r-32x32-5-128_kernel.hu"
#define BENCH_DIM 3
#define BENCH_FPP 13
#define BENCH_RAD 1
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize][dimsize]
= (SB_TYPE (*)[dimsize][dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 3 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
cudaError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != cudaSuccess) { \
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == cudaSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(cudaGetLastError()); \
} while(0)
float *dev_A;
cudaCheckReturn(cudaMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float)));
{
cudaCheckReturn(cudaMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), cudaMemcpyHostToDevice));
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 22;
const AN5D_TYPE __side3Len = 22;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 26;
const AN5D_TYPE __side3Len = 26;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 2)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 30;
const AN5D_TYPE __side3Len = 30;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 30;
const AN5D_TYPE __side3Len = 30;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 3)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 30;
const AN5D_TYPE __side3Len = 30;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 4)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 30;
const AN5D_TYPE __side3Len = 30;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 2)
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 3)
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 26;
const AN5D_TYPE __side3Len = 26;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 4)
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 24;
const AN5D_TYPE __side3Len = 24;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(cudaMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), cudaMemcpyDeviceToHost));
}
cudaCheckReturn(cudaFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
for (int k = BENCH_RAD; k < dimsize - BENCH_RAD; k++)
A[(t+1)%2][i][j][k] =
0.2500f * A[t%2][i][j][k] +
0.1248f * A[t%2][i-1][j][k] + 0.1249f * A[t%2][i+1][j][k] +
0.1250f * A[t%2][i][j-1][k] + 0.1251f * A[t%2][i][j+1][k] +
0.1252f * A[t%2][i][j][k-1] + 0.1253f * A[t%2][i][j][k+1];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
|
ca6f45eb8a7e9e5f37b4b3066ea27d577bba7069.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include "Newton.h"
#include "Device.h"
#include "MathTools.h"
using cpu::IntervalI;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
extern __global__ void newton(uchar4* ptrDevPixels, int w, int h, DomaineMath domaineMath, int n);
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*-------------------------*\
|* Constructeur *|
\*-------------------------*/
Newton::Newton(int w, int h, int nMin, int nMax) :
variateurN(IntervalI(nMin, nMax), 1)
{
// Inputs
this->w = w;
this->h = h;
this->ptrDomaineMathInit = new DomaineMath(-1.3, -1.4, 1.4, 1.3);
// Tools
this->dg = dim3(8, 8, 1); // disons a optimiser
this->db = dim3(16, 16, 1); // disons a optimiser
//Outputs
this->title = "Newton_CUDA (Zoomable)";
// Check:
//print(dg, db);
Device::assertDim(dg, db);
}
Newton::~Newton()
{
delete ptrDomaineMathInit;
}
/*-------------------------*\
|* Methode *|
\*-------------------------*/
/**
* Override
* Call periodicly by the API
*/
void Newton::process(uchar4* ptrDevPixels, int w, int h, const DomaineMath& domaineMath)
{hipLaunchKernelGGL((
newton), dim3(dg),dim3(db), 0, 0, ptrDevPixels,w,h,domaineMath, n);
}
/**
* Override
* Call periodicly by the API
*/
void Newton::animationStep()
{
this->n = variateurN.varierAndGet(); // in [0,2pi]
}
/*--------------*\
|* get *|
\*--------------*/
/**
* Override
*/
DomaineMath* Newton::getDomaineMathInit(void)
{
return ptrDomaineMathInit;
}
/**
* Override
*/
float Newton::getAnimationPara(void)
{
return n;
}
/**
* Override
*/
int Newton::getW(void)
{
return w;
}
/**
* Override
*/
int Newton::getH(void)
{
return h;
}
/**
* Override
*/
string Newton::getTitle(void)
{
return title;
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
| ca6f45eb8a7e9e5f37b4b3066ea27d577bba7069.cu | #include <assert.h>
#include "Newton.h"
#include "Device.h"
#include "MathTools.h"
using cpu::IntervalI;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
extern __global__ void newton(uchar4* ptrDevPixels, int w, int h, DomaineMath domaineMath, int n);
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*-------------------------*\
|* Constructeur *|
\*-------------------------*/
Newton::Newton(int w, int h, int nMin, int nMax) :
variateurN(IntervalI(nMin, nMax), 1)
{
// Inputs
this->w = w;
this->h = h;
this->ptrDomaineMathInit = new DomaineMath(-1.3, -1.4, 1.4, 1.3);
// Tools
this->dg = dim3(8, 8, 1); // disons a optimiser
this->db = dim3(16, 16, 1); // disons a optimiser
//Outputs
this->title = "Newton_CUDA (Zoomable)";
// Check:
//print(dg, db);
Device::assertDim(dg, db);
}
Newton::~Newton()
{
delete ptrDomaineMathInit;
}
/*-------------------------*\
|* Methode *|
\*-------------------------*/
/**
* Override
* Call periodicly by the API
*/
void Newton::process(uchar4* ptrDevPixels, int w, int h, const DomaineMath& domaineMath)
{
newton<<<dg,db>>>(ptrDevPixels,w,h,domaineMath, n);
}
/**
* Override
* Call periodicly by the API
*/
void Newton::animationStep()
{
this->n = variateurN.varierAndGet(); // in [0,2pi]
}
/*--------------*\
|* get *|
\*--------------*/
/**
* Override
*/
DomaineMath* Newton::getDomaineMathInit(void)
{
return ptrDomaineMathInit;
}
/**
* Override
*/
float Newton::getAnimationPara(void)
{
return n;
}
/**
* Override
*/
int Newton::getW(void)
{
return w;
}
/**
* Override
*/
int Newton::getH(void)
{
return h;
}
/**
* Override
*/
string Newton::getTitle(void)
{
return title;
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
6db104d7b0c30e515047ec4c7903189553bbc0cc.hip | // !!! This is a file automatically generated by hipify!!!
#include <opencv2/core.hpp>
#include <opencv2/imgcodecs.hpp>
#include <iostream>
// Standard CUDA API functions
#include <hip/hip_runtime_api.h>
__global__ void monochrome(uchar* input_data, uchar* output_data, int size)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= size) return;
output_data[index] = input_data[index * 3] * 0.3125f +
input_data[index * 3 + 1] * 0.5f +
input_data[index * 3 + 2] * 0.1875f;
}
int main()
{
std::string image_path = "../data/starry_night.png";
cv::Mat img = cv::imread(image_path, cv::IMREAD_COLOR);
if(img.empty())
{
std::cout << "Could not read the image: " << image_path << std::endl;
return 1;
}
cv::Mat output_img;
output_img.create(img.rows, img.cols, CV_8UC1);
/*
// on CPU
for (int i = 0; i < img.rows; i++)
{
for (int j = 0; j < img.cols; j++)
{
cv::Vec3b inp = img.at<cv::Vec3b>(i, j);
output_img.at<uchar>(i, j) = inp[0] * 0.1875f + inp[1] * 0.5f + inp[2] * 0.3125f;
}
}
*/
// /*
// on GPU
{
uchar *input_data_d, *output_data_d;
// sizeof(img.data)
int inp_bytes = img.total() * img.elemSize();
int out_bytes = output_img.total() * output_img.elemSize();
hipMalloc(&input_data_d, inp_bytes);
hipMalloc(&output_data_d, out_bytes);
if (img.isContinuous())
{
int pixel_count = img.rows * img.cols;
int block_size = 256;
int n_blocks = (pixel_count + block_size - 1) / block_size;
hipMemcpy(input_data_d, img.data, inp_bytes, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( monochrome), dim3(n_blocks), dim3(block_size), 0, 0, input_data_d, output_data_d, pixel_count);
hipMemcpy(output_img.data, output_data_d, out_bytes, hipMemcpyDeviceToHost);
}
}
cv::imwrite("test.png", output_img);
// */
return 0;
} | 6db104d7b0c30e515047ec4c7903189553bbc0cc.cu | #include <opencv2/core.hpp>
#include <opencv2/imgcodecs.hpp>
#include <iostream>
// Standard CUDA API functions
#include <cuda_runtime_api.h>
__global__ void monochrome(uchar* input_data, uchar* output_data, int size)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= size) return;
output_data[index] = input_data[index * 3] * 0.3125f +
input_data[index * 3 + 1] * 0.5f +
input_data[index * 3 + 2] * 0.1875f;
}
int main()
{
std::string image_path = "../data/starry_night.png";
cv::Mat img = cv::imread(image_path, cv::IMREAD_COLOR);
if(img.empty())
{
std::cout << "Could not read the image: " << image_path << std::endl;
return 1;
}
cv::Mat output_img;
output_img.create(img.rows, img.cols, CV_8UC1);
/*
// on CPU
for (int i = 0; i < img.rows; i++)
{
for (int j = 0; j < img.cols; j++)
{
cv::Vec3b inp = img.at<cv::Vec3b>(i, j);
output_img.at<uchar>(i, j) = inp[0] * 0.1875f + inp[1] * 0.5f + inp[2] * 0.3125f;
}
}
*/
// /*
// on GPU
{
uchar *input_data_d, *output_data_d;
// 这里不能使用 sizeof(img.data)
int inp_bytes = img.total() * img.elemSize();
int out_bytes = output_img.total() * output_img.elemSize();
cudaMalloc(&input_data_d, inp_bytes);
cudaMalloc(&output_data_d, out_bytes);
if (img.isContinuous())
{
int pixel_count = img.rows * img.cols;
int block_size = 256;
int n_blocks = (pixel_count + block_size - 1) / block_size;
cudaMemcpy(input_data_d, img.data, inp_bytes, cudaMemcpyHostToDevice);
monochrome<<<n_blocks, block_size>>>(input_data_d, output_data_d, pixel_count);
cudaMemcpy(output_img.data, output_data_d, out_bytes, cudaMemcpyDeviceToHost);
}
}
cv::imwrite("test.png", output_img);
// */
return 0;
} |
9b54a4490286c6785555d717cbf869bcb1f38057.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <memory>
#include <chrono>
#include <random>
__global__ void add(float* vec_a, float* vec_b, float* vec_c, int n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
{
vec_c[i] = vec_a[i] + vec_b[i];
i += blockDim.x * gridDim.x;
}
}
int main(int args, char *argv[])
{
int n;
n = atoi(argv[1]);
float *vec_a, *vec_b, *vec_c;
std::unique_ptr<float[]> host_a(new float[n]);
std::unique_ptr<float[]> host_b(new float[n]);
std::unique_ptr<float[]> host_c(new float[n]);
hipMalloc((void**)&vec_a, n * sizeof(float));
hipMalloc((void**)&vec_b, n * sizeof(float));
hipMalloc((void**)&vec_c, n * sizeof(float));
for (int i = 0; i < n; i++)
{
std::random_device rand{};
host_a[i] = rand();
host_b[i] = rand();
host_c[i] = 0;
}
hipMemcpy(vec_a, host_a.get(), n*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(vec_b, host_b.get(), n*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(vec_c, host_c.get(), n*sizeof(float), hipMemcpyHostToDevice);
int blocksize = 128;
dim3 block (blocksize, 1, 1);
dim3 grid ((n + blocksize + 1) / block.x, 1, 1);
std::chrono::system_clock::time_point start, end;
start = std::chrono::system_clock::now();
hipLaunchKernelGGL(( add), dim3(grid), dim3(block), 0, 0, vec_a, vec_b, vec_c, n);
end = std::chrono::system_clock::now();
std::unique_ptr<float[]> host_result(new float[n]);
hipMemcpy(host_result.get(), vec_c, n * sizeof(n), hipMemcpyDeviceToHost);
int checker = 0;
for (int i = 0; i < n; i++)
{
if (fabs(host_result[i] - (host_a[i] + host_b[i])) > 10e-8)
{
std::cout << "ng: " << host_result[i] << std::endl;
checker++;
}
}
if (checker == 0)
{
std::cout << "ok" << std::endl;
}
else
{
std::cout << checker << std::endl;
}
double time = static_cast<double>(std::chrono::duration_cast<std::chrono::microseconds>(end - start).count() / 1000.0);
std::cout << "n: " << n << " threads: " << blocksize << std::endl;
std::cout << "time: " << time << " [ms]" << std::endl;
std::cout << "perf: " << n / time / 1e6 << " [Gflops/sec]" << std::endl;
hipFree(vec_a);
hipFree(vec_b);
hipFree(vec_c);
return 0;
}
| 9b54a4490286c6785555d717cbf869bcb1f38057.cu | #include <iostream>
#include <memory>
#include <chrono>
#include <random>
__global__ void add(float* vec_a, float* vec_b, float* vec_c, int n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
{
vec_c[i] = vec_a[i] + vec_b[i];
i += blockDim.x * gridDim.x;
}
}
int main(int args, char *argv[])
{
int n;
n = atoi(argv[1]);
float *vec_a, *vec_b, *vec_c;
std::unique_ptr<float[]> host_a(new float[n]);
std::unique_ptr<float[]> host_b(new float[n]);
std::unique_ptr<float[]> host_c(new float[n]);
cudaMalloc((void**)&vec_a, n * sizeof(float));
cudaMalloc((void**)&vec_b, n * sizeof(float));
cudaMalloc((void**)&vec_c, n * sizeof(float));
for (int i = 0; i < n; i++)
{
std::random_device rand{};
host_a[i] = rand();
host_b[i] = rand();
host_c[i] = 0;
}
cudaMemcpy(vec_a, host_a.get(), n*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(vec_b, host_b.get(), n*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(vec_c, host_c.get(), n*sizeof(float), cudaMemcpyHostToDevice);
int blocksize = 128;
dim3 block (blocksize, 1, 1);
dim3 grid ((n + blocksize + 1) / block.x, 1, 1);
std::chrono::system_clock::time_point start, end;
start = std::chrono::system_clock::now();
add<<<grid, block>>>(vec_a, vec_b, vec_c, n);
end = std::chrono::system_clock::now();
std::unique_ptr<float[]> host_result(new float[n]);
cudaMemcpy(host_result.get(), vec_c, n * sizeof(n), cudaMemcpyDeviceToHost);
int checker = 0;
for (int i = 0; i < n; i++)
{
if (fabs(host_result[i] - (host_a[i] + host_b[i])) > 10e-8)
{
std::cout << "ng: " << host_result[i] << std::endl;
checker++;
}
}
if (checker == 0)
{
std::cout << "ok" << std::endl;
}
else
{
std::cout << checker << std::endl;
}
double time = static_cast<double>(std::chrono::duration_cast<std::chrono::microseconds>(end - start).count() / 1000.0);
std::cout << "n: " << n << " threads: " << blocksize << std::endl;
std::cout << "time: " << time << " [ms]" << std::endl;
std::cout << "perf: " << n / time / 1e6 << " [Gflops/sec]" << std::endl;
cudaFree(vec_a);
cudaFree(vec_b);
cudaFree(vec_c);
return 0;
}
|
8910e251e159b2279dcf73baefb5d43f57d71696.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "allocator.hpp"
#include "synchronize.hpp"
#include <device_launch_parameters.h>
namespace mufflon {
namespace memory_details {
__global__ void cuda_copy_element(const void* srcMem, void* dstMem, const std::size_t elemBytes,
const std::size_t count) {
if(threadIdx.x == 0 && threadIdx.y == 0)
for(std::size_t i = 0u; i < count; ++i)
memcpy(dstMem, srcMem, elemBytes);
}
// Element is the (host-side) source, targetMem the (device-side) destination
void copy_element(const void* element, void* targetMem, const std::size_t elemBytes,
const std::size_t count) {
void* deviceMem;
cuda::check_error(hipMalloc(&deviceMem, elemBytes));
hipMemcpy(deviceMem, element, elemBytes, hipMemcpyDefault);
hipLaunchKernelGGL(( cuda_copy_element), dim3(1), dim3(1024) , 0, 0, deviceMem, targetMem, elemBytes, count);
cuda::check_error(hipGetLastError());
cuda::check_error(hipFree(deviceMem));
}
} // namespace memory_details
} // namespace mufflon | 8910e251e159b2279dcf73baefb5d43f57d71696.cu | #include "allocator.hpp"
#include "synchronize.hpp"
#include <device_launch_parameters.h>
namespace mufflon {
namespace memory_details {
__global__ void cuda_copy_element(const void* srcMem, void* dstMem, const std::size_t elemBytes,
const std::size_t count) {
if(threadIdx.x == 0 && threadIdx.y == 0)
for(std::size_t i = 0u; i < count; ++i)
memcpy(dstMem, srcMem, elemBytes);
}
// Element is the (host-side) source, targetMem the (device-side) destination
void copy_element(const void* element, void* targetMem, const std::size_t elemBytes,
const std::size_t count) {
void* deviceMem;
cuda::check_error(cudaMalloc(&deviceMem, elemBytes));
cudaMemcpy(deviceMem, element, elemBytes, cudaMemcpyDefault);
cuda_copy_element<<< 1, 1024 >>>(deviceMem, targetMem, elemBytes, count);
cuda::check_error(cudaGetLastError());
cuda::check_error(cudaFree(deviceMem));
}
} // namespace memory_details
} // namespace mufflon |
756a5283705134771996ec96ce452efdbb0d810a.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef _BACKPROP_CUDA_KERNEL_H_
#define _BACKPROP_CUDA_KERNEL_H_
#include <stdio.h>
#include "backprop.h"
#include "math.h"
#include "hip/hip_runtime.h"
//#define BPTH 3 //Du //bypass threshold
__global__ void bpnn_layerforward_CUDA(float *input_cuda,
float *output_hidden_cuda,
float *input_hidden_cuda,
float *hidden_partial_sum,
int in,
int hid ) //Du
{
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
// if( threadIdx.x==0 && threadIdx.y==0 && blockIdx.x==0 && blockIdx.y==0) printf("%d, %d\n", blockDim.x, blockDim.y);
int index = ( hid + 1 ) * HEIGHT * by + ( hid + 1 ) * ty + tx + 1 + ( hid + 1 ) ;
int index_in = HEIGHT * by + ty + 1;
__shared__ float input_node[HEIGHT];
__shared__ float weight_matrix[HEIGHT][WIDTH];
if ( tx == 0 ) input_node[ty] = input_cuda[index_in] ;
__syncthreads();
weight_matrix[ty][tx] = input_hidden_cuda[index];
__syncthreads();
weight_matrix[ty][tx] = weight_matrix[ty][tx] * input_node[ty];//Du: both are in shared memory
__syncthreads();
for ( int i = 1 ; i <= __log2f(HEIGHT) ; i++){
int power_two = __powf(2, i);
if( ty % power_two == 0 )
weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + power_two/2][tx];
__syncthreads();
}
//__syncthreads();
input_hidden_cuda[index] = weight_matrix[ty][tx]; //Du TODO: it is a store, need changing?
/*
for ( unsigned int i = 2 ; i <= HEIGHT ; i *= 2){
unsigned int power_two = i - 1;
if( (ty & power_two) == 0 ) {
weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + power_two/2][tx];
}
}
*/
__syncthreads();
if ( tx == 0 ) {
hidden_partial_sum[by * hid + ty] = weight_matrix[tx][ty]; //Du TODO: a store
}
}
__global__ void bpnn_adjust_weights_cuda(float * delta, int hid,
float * ly,
int in,
float * w,
float * oldw )
{
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
// if( threadIdx.x==0 && threadIdx.y==0 && blockIdx.x==0 && blockIdx.y==0) printf("%d, %d\n", blockDim.x, blockDim.y);
int index = ( hid + 1 ) * HEIGHT * by + ( hid + 1 ) * ty + tx + 1 + ( hid + 1 ) ;
int index_y = HEIGHT * by + ty + 1;
int index_x = tx + 1;
//eta = 0.3;
//momentum = 0.3;
w[index] += ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index]));//TODO: worth optimization
oldw[index] = ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index]));
__syncthreads();
if (ty == 0 && by ==0)
{
w[index_x] += ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x]));
oldw[index_x] = ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x]));
}
}
#endif
| 756a5283705134771996ec96ce452efdbb0d810a.cu |
#ifndef _BACKPROP_CUDA_KERNEL_H_
#define _BACKPROP_CUDA_KERNEL_H_
#include <stdio.h>
#include "backprop.h"
#include "math.h"
#include "cuda.h"
//#define BPTH 3 //Du //bypass threshold
__global__ void bpnn_layerforward_CUDA(float *input_cuda,
float *output_hidden_cuda,
float *input_hidden_cuda,
float *hidden_partial_sum,
int in,
int hid ) //Du
{
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
// if( threadIdx.x==0 && threadIdx.y==0 && blockIdx.x==0 && blockIdx.y==0) printf("%d, %d\n", blockDim.x, blockDim.y);
int index = ( hid + 1 ) * HEIGHT * by + ( hid + 1 ) * ty + tx + 1 + ( hid + 1 ) ;
int index_in = HEIGHT * by + ty + 1;
__shared__ float input_node[HEIGHT];
__shared__ float weight_matrix[HEIGHT][WIDTH];
if ( tx == 0 ) input_node[ty] = input_cuda[index_in] ;
__syncthreads();
weight_matrix[ty][tx] = input_hidden_cuda[index];
__syncthreads();
weight_matrix[ty][tx] = weight_matrix[ty][tx] * input_node[ty];//Du: both are in shared memory
__syncthreads();
for ( int i = 1 ; i <= __log2f(HEIGHT) ; i++){
int power_two = __powf(2, i);
if( ty % power_two == 0 )
weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + power_two/2][tx];
__syncthreads();
}
//__syncthreads();
input_hidden_cuda[index] = weight_matrix[ty][tx]; //Du TODO: it is a store, need changing?
/*
for ( unsigned int i = 2 ; i <= HEIGHT ; i *= 2){
unsigned int power_two = i - 1;
if( (ty & power_two) == 0 ) {
weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + power_two/2][tx];
}
}
*/
__syncthreads();
if ( tx == 0 ) {
hidden_partial_sum[by * hid + ty] = weight_matrix[tx][ty]; //Du TODO: a store
}
}
__global__ void bpnn_adjust_weights_cuda(float * delta, int hid,
float * ly,
int in,
float * w,
float * oldw )
{
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
// if( threadIdx.x==0 && threadIdx.y==0 && blockIdx.x==0 && blockIdx.y==0) printf("%d, %d\n", blockDim.x, blockDim.y);
int index = ( hid + 1 ) * HEIGHT * by + ( hid + 1 ) * ty + tx + 1 + ( hid + 1 ) ;
int index_y = HEIGHT * by + ty + 1;
int index_x = tx + 1;
//eta = 0.3;
//momentum = 0.3;
w[index] += ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index]));//TODO: worth optimization
oldw[index] = ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index]));
__syncthreads();
if (ty == 0 && by ==0)
{
w[index_x] += ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x]));
oldw[index_x] = ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x]));
}
}
#endif
|
6b7e9f76347957c8435a883db6e447d400055583.hip | // !!! This is a file automatically generated by hipify!!!
//#pragma comment (lib, "cublas.lib")
//#include "stdio.h"
//#include <hip/hip_runtime.h>
//using namespace std;
//#include <ctime>
//#include "hip/hip_runtime.h"
//#include "hiprand/hiprand_kernel.h"
//#include "device_launch_parameters.h"
//#include <stdio.h>
//#include <stdlib.h>
//
//#include <string>
//#include <iomanip>
//#include <time.h>
//#include <iostream>
//#include <cmath>
//#include <math.h>
//
//#define TRAIN_NUM 60000
//#define TEST_NUM 10000
//#define ROW 4
//#define COL 4
//#define FC1_SIZE 2
//#define FC2_SIZE 10
//
//float fc1_b[FC1_SIZE];
//float fc1_w[FC1_SIZE][ROW][COL];
//float fc2_b[FC2_SIZE];
//float fc2_w[FC2_SIZE][FC1_SIZE];
//
//__constant__ float _alpha;
//__constant__ int _minibatch;
//__constant__ int _epochs;
//
//__device__ int _correct_cnt;
//__device__ float _avg_error;
//
//int correct_cnt=3;
//float avg_error=2;
//float max_acc;
//
//float alpha = 0.2;
//int epochs = 5;
//int minibatch = 1;
//
//float train_image[TRAIN_NUM][ROW][COL];
//int train_label[TRAIN_NUM];
//float test_image[TEST_NUM][ROW][COL];
//int test_label[TEST_NUM];
//
//__device__ float _train_image[TRAIN_NUM][ROW][COL];
//__device__ int _train_label[TRAIN_NUM];
//__device__ float _test_image[TEST_NUM][ROW][COL];
//__device__ int _test_label[TEST_NUM];
//
//__device__ float _fc1_b[FC1_SIZE];
//__device__ float _fc1_w[FC1_SIZE][ROW][COL];
//__device__ float _fc2_b[FC2_SIZE];
//__device__ float _fc2_w[FC2_SIZE][FC1_SIZE];
//
//__device__ float _input[ROW][COL];
//__device__ float _fc1_z[FC1_SIZE];
//__device__ float _fc1_a[FC1_SIZE];
//__device__ float _fc2_z[FC2_SIZE];
//__device__ float _fc2_a[FC2_SIZE];
//__device__ float _output[FC2_SIZE];
//__device__ int _answer[FC2_SIZE];
//
//__device__ float _fc1_db[FC1_SIZE];
//__device__ float _fc1_dw[FC1_SIZE][ROW][COL];
//__device__ float _fc2_db[FC2_SIZE];
//__device__ float _fc2_dw[FC2_SIZE][FC1_SIZE];
//__device__ float _C[FC2_SIZE];
//__device__ float _fc2_delta[FC2_SIZE];
//__device__ float _fc1_delta[FC1_SIZE];
//
//__device__ int tmp;
//int swap_endian(int val)
//{
// unsigned char c1, c2, c3, c4;
// c1 = val & 255;
// c2 = (val >> 8) & 255;
// c3 = (val >> 16) & 255;
// c4 = (val >> 24) & 255;
// return ((int)c1 << 24) + ((int)c2 << 16) + ((int)c3 << 8) + c4;
//}
//void load_data()
//{
// FILE* f_images = fopen("D:\\\\Zufar\\\\CUDA-CNN\\\\CudaCNN2\\\\CudaCNN2\\\\data\\\\train-images.idx3-ubyte", "rb");
// FILE* f_labels = fopen("D:\\\\Zufar\\\\CUDA-CNN\\\\CudaCNN2\\\\CudaCNN2\\\\data\\\\train-labels.idx1-ubyte", "rb");
//
// int tmp;
//
// int magic_num;
// fread(&magic_num, sizeof(int), 1, f_images);
// fread(&magic_num, sizeof(int), 1, f_labels);
//
// // printf("debug:%d\n",swap_endian(magic_num));
//
// int train_size;
// fread(&train_size, sizeof(int), 1, f_images);
// fread(&train_size, sizeof(int), 1, f_labels);
// train_size = swap_endian(train_size);
//
// // printf("debug:%d\n",swap_endian(train_size));
//
// int rows, cols;
// fread(&rows, sizeof(int), 1, f_images);
// fread(&cols, sizeof(int), 1, f_images);
// rows = swap_endian(rows);
// cols = swap_endian(cols);
//
// // printf("debug:%d\n",swap_endian(rows));
// // printf("debug:%d\n",swap_endian(cols));
//
// for (int i = 0;i < train_size;i++)
// {
// fread(&train_label[i], 1, 1, f_labels);
// if (i % 1000 == 0)
// printf("Training labels : Already read %5d labels\r", i);
// // printf("%d:debug:%d\r",i,train_label[i]);
// // system("pause");
// }
// printf("Training labels : Already read %5d labels\n", train_size);
//
// for (int i = 0;i < train_size;i++)
// {
// for (int j = 0;j < rows;j++)
// for (int k = 0;k < cols;k++)
// {
// tmp = 0;
// fread(&tmp, 1, 1, f_images);
// train_image[i][j][k] = tmp;
// train_image[i][j][k] /= 255;
// // printf("%d %d %d debug: %f\n",i,j,k,train_image[i][j][k]);
// // system("pause");
// }
// if (i % 1000 == 0)
// printf("Training images : Already read %5d images\r", i);
// }
// printf("Training images : Already read %5d images\n", train_size);
//
// fclose(f_images);
// fclose(f_labels);
//
// f_images = fopen("D:\\\\Zufar\\\\CUDA-CNN\\\\CudaCNN2\\\\CudaCNN2\\\\data\\\\t10k-images.idx3-ubyte", "rb");
// f_labels = fopen("D:\\\\Zufar\\\\CUDA-CNN\\\\CudaCNN2\\\\CudaCNN2\\\\data\\\\t10k-labels.idx1-ubyte", "rb");
//
// fread(&magic_num, sizeof(int), 1, f_images);
// fread(&magic_num, sizeof(int), 1, f_labels);
//
// int test_size;
// fread(&test_size, sizeof(int), 1, f_images);
// fread(&test_size, sizeof(int), 1, f_labels);
// test_size = swap_endian(test_size);
//
// fread(&rows, sizeof(int), 1, f_images);
// fread(&cols, sizeof(int), 1, f_images);
// rows = swap_endian(rows);
// cols = swap_endian(cols);
//
// for (int i = 0;i < test_size;i++)
// {
// fread(&test_label[i], 1, 1, f_labels);
// if (i % 1000 == 0)
// printf("Testing labels : Already read %5d labels\r", i);
// }
// printf("Testing labels : Already read %5d labels\n", test_size);
//
// for (int i = 0;i < test_size;i++)
// {
// for (int j = 0;j < rows;j++)
// for (int k = 0;k < cols;k++)
// {
// tmp = 0;
// fread(&tmp, 1, 1, f_images);
// test_image[i][j][k] = tmp;
// test_image[i][j][k] /= 255;
// }
// if (i % 1000 == 0)
// printf("Testing images : Already read %5d images\r", i);
// }
// printf("Testing images : Already read %5d images\n\n", test_size);
//
// fclose(f_images);
// fclose(f_labels);
//}
//__device__ float _sigmoid(float x)
//{
// return (1 / (1 + exp(-1 * x)));
//}
//
//__global__ void _set_input_train(int idx)
//{
// int ix = threadIdx.x + blockDim.x * blockIdx.x;
// int iy = threadIdx.y + blockDim.y * blockIdx.y;
// if (ix < ROW && iy < COL)
// {
// _input[ix][iy] = _train_image[idx][ix][iy];
// }
//}
//
//__global__ void _set_input_test(int idx)
//{
// int ix = threadIdx.x + blockDim.x * blockIdx.x;
// int iy = threadIdx.y + blockDim.y * blockIdx.y;
// if (ix < ROW && iy < COL)
// {
// _input[ix][iy] = _test_image[idx][ix][iy];
// }
//}
//
//void set_input_gpu_train(int idx)
//{
// dim3 block(32, 32);
// dim3 grid((ROW - 1) / block.x + 1, (COL - 1) / block.y + 1);
// _set_input_train << <block, grid >> > (idx);
// hipDeviceSynchronize();
//}
//
//void set_input_gpu_test(int idx)
//{
// dim3 block(32, 32);
// dim3 grid((ROW - 1) / block.x + 1, (COL - 1) / block.y + 1);
// _set_input_test << <block, grid >> > (idx);
// hipDeviceSynchronize();
//}
//
//__global__ void _input_fc1()
//{
// int ib = blockIdx.x;
// int ix = threadIdx.x;
// int iy = threadIdx.y;
// _fc1_z[ib] = 0;
// for (int l = 0;l < ROW;l++)
// for (int m = 0;m < COL;m++)
// _fc1_z[ib] += _input[l][m] * _fc1_w[ib][l][m];
// _fc1_z[ib] += _fc1_b[ib];
// _fc1_a[ib] = _sigmoid(_fc1_z[ib]);
///* __shared__ float data[ROW*COL];
// int tid = threadIdx.x+threadIdx.y;
// data[tid] = _input[ix][iy] * _fc1_w[ib][ix][iy];*/
// /*__syncthreads();
// for (int s = blockDim.x / 2; s > 0; s >>= 1) {
// if (tid < s)
// data[tid] += data[tid + s];
// __syncthreads();
// }
// if (tid == 0) {
// _fc1_z[ib]= data[0];
// data[0] += _fc1_b[ib];
// _fc1_a[ib] = _sigmoid(data[0]);
// }*/
//}
//
//
//__global__ void _fc1_fc2()
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// if (i < FC2_SIZE)
// {
// _fc2_z[i] = 0;
// for (int j = 0;j < FC1_SIZE;j++)
// _fc2_z[i] += _fc1_a[j] * _fc2_w[i][j];
// _fc2_z[i] += _fc2_b[i];
// _fc2_a[i] = _sigmoid(_fc2_z[i]);
// }
//}
//
//void fc1_fc2_gpu()
//{
// dim3 block(32);
// dim3 grid((FC2_SIZE - 1) / block.x + 1);
// _fc1_fc2 << <block, grid >> > ();
// hipDeviceSynchronize();
//}
//
//__global__ void _set_answer_train(int idx)
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// if (i < FC2_SIZE)
// {
// _output[i] = _fc2_a[i];
// _answer[i] = (_train_label[idx] == i) ? 1 : 0;
// }
//}
//
//__global__ void _set_answer_test(int idx)
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// if (i < FC2_SIZE)
// {
// _output[i] = _fc2_a[i];
// _answer[i] = (_test_label[idx] == i) ? 1 : 0;
// }
//}
//
//void set_answer_gpu_train(int idx)
//{
// dim3 block(32);
// dim3 grid((FC2_SIZE - 1) / block.x + 1);
// _set_answer_train << <block, grid >> > (idx);
// //hipDeviceSynchronize();
//}
//
//void set_answer_gpu_test(int idx)
//{
// dim3 block(32);
// dim3 grid((FC2_SIZE - 1) / block.x + 1);
// _set_answer_test << <block, grid >> > (idx);
// hipDeviceSynchronize();
//}
//
//__global__ void _check_answer_get_error()
//{
// float _max = _output[0];
// int max_pos = 0;
// for (int i = 0;i < FC2_SIZE;i++)
// {
// if (_max < _output[i])
// {
// _max = _output[i];
// max_pos = i;
// }
// }
// if (_answer[max_pos])
// _correct_cnt++;
// //printf("Correct: %d", _correct_cnt);
// for (int i = 0;i < FC2_SIZE;i++)
// {
// _C[i] = _output[i] - _answer[i];
// _avg_error += _C[i] * _C[i] * 0.5;
// }
// /*if (j && j % 100 == 0)
// {
// printf("Accuracy : %0.4f%% Error : %0.4f%% \r", ((float)_correct_cnt / j) * 100, (_avg_error / j) * 100);
// }*/
//}
//
//void check_answer_get_error_gpu()
//{
// _check_answer_get_error << <1, 1 >> > ();
// hipDeviceSynchronize();
//}
////#include "bp_gpu.cuh"
//
//__global__ void _update_fc2_b()
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// if (i < FC2_SIZE)
// {
// _fc2_delta[i] = _alpha * _C[i] * (_fc2_a[i] * (1.0 - _fc2_a[i]));
// _fc2_db[i] += _fc2_delta[i];
// }
//}
//
//__global__ void _update_fc2_w()
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// int j = threadIdx.y + blockDim.y * blockIdx.y;
// if (i < FC2_SIZE && j < FC1_SIZE)
// _fc2_dw[i][j] += _fc2_delta[i] * _fc1_a[j];
//}
//
//void update_fc2_w_gpu()
//{
// dim3 block(32, 32);
// dim3 grid((FC2_SIZE - 1) / block.x + 1, (FC1_SIZE - 1) / block.x + 1);
// _update_fc2_w << <block, grid >> > ();
// hipDeviceSynchronize();
//}
//
//__global__ void _update_fc1_b()
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// if (i < FC1_SIZE)
// {
// float error = 0;
// for (int j = 0;j < FC2_SIZE;j++)
// error += _fc2_delta[j] * _fc2_w[j][i];
// _fc1_delta[i] = error * (_fc1_a[i] * (1.0 - _fc1_a[i]));
// _fc1_db[i] += _fc1_delta[i];
// }
//}
//
//__global__ void _update_fc1_w()
//{
// int i = blockIdx.x;
// int k = threadIdx.y + blockDim.y * blockIdx.y;
// int l = threadIdx.z + blockDim.z * blockIdx.z;
//
// _fc1_dw[i][k][l] += _fc1_delta[i] * _input[k][l];
//}
//
//
//__global__ void assign_fc2_b()
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// if (i < FC2_SIZE)
// {
// _fc2_b[i] -= (_fc2_db[i] / _minibatch);
// _fc2_db[i] = 0;
// }
//}
//
//__global__ void assign_fc2_w()
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// int j = threadIdx.y + blockDim.y * blockIdx.y;
// if (i < FC2_SIZE && j < FC1_SIZE)
// {
// _fc2_w[i][j] -= (_fc2_dw[i][j] / _minibatch);
// _fc2_dw[i][j] = 0;
// }
//}
//
//__global__ void assign_fc1_b()
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// if (i < FC1_SIZE)
// {
// _fc1_b[i] -= (_fc1_db[i] / _minibatch);
// _fc1_db[i] = 0;
// }
//}
//
//__global__ void assign_fc1_w()
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// int k = threadIdx.y + blockDim.y * blockIdx.y;
// int l = threadIdx.z + blockDim.z * blockIdx.z;
// _fc1_w[blockIdx.x][k][l] -= (_fc1_dw[blockIdx.x][k][l] / _minibatch);
// _fc1_dw[blockIdx.x][k][l] = 0;
//
//}
//
//
//void assign_grads_gpu()
//{
// dim3 block1(32);
// dim3 grid1((FC2_SIZE - 1) / block1.x + 1);
// assign_fc2_b << <block1, grid1 >> > ();
//
// dim3 block2(32, 32);
// dim3 grid2((FC2_SIZE - 1) / block2.x + 1, (FC1_SIZE - 1) / block2.y + 1);
// assign_fc2_w << <block2, grid2 >> > ();
//
// dim3 block3(32);
// dim3 grid3((FC1_SIZE - 1) / block3.x + 1);
// assign_fc1_b << <block3, grid3 >> > ();
//
// dim3 block4(8, 8, 8);
// //dim3 grid4((FC1_SIZE - 1) / block4.x + 1, (POOL_SIZE - 1) / block4.y + 1, (POOL_SIZE - 1) / block4.z + 1);
// assign_fc1_w << <block4, dim3(1, 28, 28) >> > ();
//
//
// hipDeviceSynchronize();
//}
//void init_data_gpu()
//{
// hipMemcpyToSymbol(_train_image, train_image, TRAIN_NUM * ROW * COL * sizeof(float));
// hipMemcpyToSymbol(_train_label, train_label, sizeof(train_label));
// hipMemcpyToSymbol(_test_image, test_image, TEST_NUM * ROW * COL * sizeof(float));
// hipMemcpyToSymbol(_test_label, test_label, sizeof(test_label));
//}
//float get_rand(float fan_in)
//{
// float sum = 0;
// for (int i = 0;i < 12;i++)
// sum += (float)rand() / RAND_MAX;
// sum -= 6;
// sum *= 1 / sqrt(fan_in);
// return sum;
//}
//void init_params()
//{
// /*for (int i = 0;i < CONV_W_NUM;i++)
// {
// for (int j = 0;j < CONV_W_SIZE;j++)
// for (int k = 0;k < CONV_W_SIZE;k++)
// conv_w[i][j][k] = get_rand(CONV_W_SIZE * CONV_W_SIZE);
// conv_b[i] = get_rand(CONV_W_SIZE * CONV_W_SIZE);
// }
//
// for (int i = 0;i < FC1_SIZE;i++)
// {
// for (int j = 0;j < CONV_W_NUM;j++)
// for (int k = 0;k < POOL_SIZE;k++)
// for (int l = 0;l < POOL_SIZE;l++)
// fc1_w[i][j][k][l] = get_rand(POOL_SIZE * POOL_SIZE * CONV_W_NUM);
// fc1_b[i] = get_rand(POOL_SIZE * POOL_SIZE * CONV_W_NUM);
// }*/
//
// for (int i = 0;i < FC1_SIZE;i++)
// {
// for (int j = 0;j < ROW;j++)
// for (int k = 0;k < COL;k++)
// fc1_w[i][j][k] = get_rand(ROW*COL);
// fc1_b[i] = get_rand(ROW*COL);
// }
//
// for (int i = 0;i < FC2_SIZE;i++)
// {
// for (int j = 0;j < FC1_SIZE;j++)
// fc2_w[i][j] = get_rand(FC1_SIZE);
// fc2_b[i] = get_rand(FC1_SIZE);
// }
//}
//
//__global__ void _test() {
// _correct_cnt = _correct_cnt + 10;
// _avg_error = _avg_error + 16.35;
// /*float _max = _output[0];
// int max_pos = 0;
// for (int i = 0;i < FC2_SIZE;i++)
// {
// if (_max < _output[i])
// {
// _max = _output[i];
// max_pos = i;
// }
// }
// if (_answer[max_pos])
// _correct_cnt++;
// for (int i = 0;i < FC2_SIZE;i++)
// {
// _C[i] = _output[i] - _answer[i];
// _avg_error += _C[i] * _C[i] * 0.5;
// }*/
//}
//float matrixMult(float a[ROW][COL], float b[FC1_SIZE][ROW][COL], int k) {
// float c=0;
// for (int i = 0;i < ROW;i++) {
// for (int j = 0;j < COL;j++) {
// c += a[i][j] * b[k][i][j];
// }
// }
// return c;
//}
//int main() {
//
//
//
// load_data();
// clock_t t = clock();
// hipMemcpyToSymbol(_alpha, &alpha, sizeof(float));
// hipMemcpyToSymbol(_minibatch, &minibatch, sizeof(int));
// hipMemcpyToSymbol(_epochs, &epochs, sizeof(int));
// init_data_gpu();
// init_params();
// hipMemcpyToSymbol(_fc1_b, fc1_b, FC1_SIZE* sizeof(float));
// hipMemcpyToSymbol(_fc1_w, fc1_w, FC1_SIZE*COL*ROW* sizeof(float));
// hipMemcpyToSymbol(_fc2_b, fc2_b, FC2_SIZE* sizeof(float));
// hipMemcpyToSymbol(_fc2_w, fc2_w, FC1_SIZE * FC2_SIZE * sizeof(float));
// //dim3 block_set_input(1, 1);
// //dim3 grid_set_input((ROW - 1) / block_set_input.x + 1, (COL - 1) / block_set_input.y + 1);
// dim3 grid_set_input(28,28);
// dim3 block_input(8, 8, 8);
// //dim3 grid((CONV_W_NUM - 1) / block.x + 1, (CONV_SIZE - 1) / block.y + 1, (CONV_SIZE - 1) / block.z + 1);
// dim3 grid_input(1,24,24);
//
// for (int i = 1;i <= epochs;i++)
// {
// int value1 = 1;
// float value2 = 1;
// hipMemcpyToSymbol(_correct_cnt, &value1, sizeof(int));
// hipMemcpyToSymbol(_avg_error, &value2, sizeof(float));
// hipDeviceSynchronize();
//
// for (int j = 1;j < TRAIN_NUM; j++)
// {
// _set_input_train << <1, dim3(ROW, COL) >> > (j);
// _input_fc1 << <FC1_SIZE, dim3(ROW, COL) >> > ();
// /*_fc1_fc2 << <10, FC1_SIZE >> > ();
// set_answer_gpu_train(j);
// _check_answer_get_error << <1, 1 >> > ();
//
// _update_fc2_b << <1, 10 >> > ();
// _update_fc2_w << <10, FC1_SIZE >> > ();
// _update_fc1_b << <1, FC1_SIZE >> > ();
// _update_fc1_w << <FC1_SIZE, dim3(1, 28, 28) >> > ();
// if ((j + 1) % minibatch == 0)
// assign_grads_gpu();
//
// if (j && j % 100 == 0)
// {
// hipMemcpyFromSymbol(&correct_cnt, _correct_cnt, sizeof(int));
// hipMemcpyFromSymbol(&avg_error, _avg_error, sizeof(float));
// printf("Training Time spent : %.0fs Image count : %d Accuracy : %0.4f%% Error : %0.4f%% Epoch : %d \r", floor(((float)(clock() - t)) / CLOCKS_PER_SEC), j, ((float)correct_cnt / j) * 100, (avg_error / j) * 100, i);
// }*/
// }
// }
// // hipMemcpyFromSymbol(&correct_cnt, _correct_cnt, sizeof(int));
// // hipMemcpyFromSymbol(&avg_error, _avg_error, sizeof(float));
// // printf("Training Time spent : %.0fs Image count : %d Accuracy : %0.4f%% Error : %0.4f%% Epoch : %d \n", floor(((float)(clock() - t)) / CLOCKS_PER_SEC), TRAIN_NUM, ((float)correct_cnt / TRAIN_NUM) * 100, (avg_error / TRAIN_NUM) * 100, i);
//
// // correct_cnt = 0;
// // avg_error = 0;
// // hipMemcpyToSymbol(_correct_cnt, &correct_cnt, sizeof(int));
// // hipMemcpyToSymbol(_avg_error, &avg_error, sizeof(float));
//
// // for (int j = 0;j < TEST_NUM;j++)
// // {
// // _set_input_test << <1, dim3(28, 28) >> > (j);
// // _input_fc1 << <400, dim3(28, 28) >> > ();
// // _fc1_fc2 << <10, 400 >> > ();
// // set_answer_gpu_test(j);
// // check_answer_get_error_gpu();
//
// // if (j && j % 100 == 0)
// // {
// // hipMemcpyFromSymbol(&correct_cnt, _correct_cnt, sizeof(int));
// // hipMemcpyFromSymbol(&avg_error, _avg_error, sizeof(float));
// // printf("Testing Time spent : %.0fs Image count : %d Accuracy : %0.4f%% Error : %0.4f%% \r", floor(((float)(clock() - t)) / CLOCKS_PER_SEC), j, ((float)correct_cnt / j) * 100, (avg_error / j) * 100);
// // }
// // }
// // hipMemcpyFromSymbol(&correct_cnt, _correct_cnt, sizeof(int));
// // hipMemcpyFromSymbol(&avg_error, _avg_error, sizeof(float));
// // printf("Testing Time spent : %.0fs Image count : %d Accuracy : %0.4f%% Error : %0.4f%% \n", floor(((float)(clock() - t)) / CLOCKS_PER_SEC), TEST_NUM, ((float)correct_cnt / TEST_NUM) * 100, (avg_error / TEST_NUM) * 100);
//
// // if ((float)correct_cnt / TEST_NUM * 100 > max_acc)
// // {
// // max_acc = (float)correct_cnt / TEST_NUM * 100;
// // //export_params();
// // printf("The new model has been exported.Accuracy has reached to %0.5f%%\n\n", max_acc);
// // }
// // else
// // {
// // alpha = alpha - (alpha / 3);
// // hipMemcpyToSymbol(_alpha, &alpha, sizeof(float));
// // printf("Learning rate has been reduced to %f\n\n", alpha);
// // }
// //}
//
//
// float train_image[ROW][COL] = {
// { 3, 1, 2, 4 },
// { 2, 4, 3, 1 },
// { 1, 5, 2, 3 },
// { 2, 3, 4, 1 }};
//
// float fc1_w[FC1_SIZE][ROW][COL] = { {
// {1, 2, 3, 4},
// {4, 3, 1, 1},
// {1, 2, 4, 3},
// {1, 3, 2, 1}},
// {{4, 2, 5, 7},
// {2, 3, 1, 3},
// {1, 2, 3, 1},
// {4, 2, 5, 7}} };
// cout << matrixMult(train_image, fc1_w, 0) << endl;
// cout << matrixMult(train_image, fc1_w, 1) << endl;
//
// /*float train_image[ROW][COL] = {
// { 3, 1, 2, 4, 3, 3 },
// { 2, 4, 3, 1, 1, 4 },
// { 1, 5, 2, 3, 2, 5 },
// { 2, 3, 4, 1, 4, 1 },
// { 1, 4, 2, 1, 2, 3 },
// { 2, 3, 6, 5, 4, 1 }, };
// float conv_w[CONV_W_NUM][CONV_W_SIZE][CONV_W_SIZE] = { {
// {1, 2, 3},
// {4, 3, 1},
// {1, 2, 4}},
// {{4, 2, 5},
// {2, 3, 1},
// {1, 2, 3}} };*/
//
// //float conv_z[CONV_W_NUM][CONV_SIZE][CONV_SIZE];
// float conv_z[2][2][2];
// float train_label[2] = { 3,2 };
// float testOut[FC1_SIZE];
//
// hipMemcpyToSymbol(_input, train_image, ROW * COL * sizeof(float));
// hipMemcpyToSymbol(_fc1_w, fc1_w, FC1_SIZE*ROW*COL*sizeof(float));
// //hipMemcpyToSymbol(_conv_w, conv_w, CONV_W_NUM * CONV_W_SIZE * CONV_W_SIZE * sizeof(float));
// //hipMemcpy(_train_label, train_label, 2 * sizeof(float), hipMemcpyHostToDevice);
// //hipMemcpy(_train_image, train_image, ROW * COL * sizeof(float), hipMemcpyHostToDevice);
// //hipMemcpy(_conv_w, conv_w, CONV_W_NUM*CONV_W_SIZE*CONV_W_SIZE*sizeof(float), hipMemcpyHostToDevice);
// dim3 grid2(ROW, COL);
//
// //_input_conv << <1, grid2>> > ((float (*)[4])_train_image, (float (*)[3][3])_conv_w, (float (*)[2][2])_conv_z);
// _input_fc1 << <FC1_SIZE, grid2 >> > ();
// //_conv_pool << <1, grid2 >> > ();
// //hipMemcpyFromSymbol(&conv_z, _pool, CONV_W_NUM * CONV_SIZE * CONV_SIZE * sizeof(float));
// //hipMemcpyFromSymbol(&conv_z, _pool, 8 * sizeof(float));
// hipMemcpyFromSymbol(&testOut, _fc1_z, FC1_SIZE * sizeof(float));
// for (int i = 0;i < FC1_SIZE;i++) {
// cout << testOut[i] << endl;
// }
// /*for (int i = 0;i < 2;i++) {
// for (int j = 0;j <2;j++) {
// cout << conv_z[0][i][j] << " ";
// }
// cout << endl;
// }
// for (int i = 0;i < 2;i++) {
// for (int j = 0;j < 2;j++) {
// cout << conv_z[1][i][j] << " ";
// }
// cout << endl;
// }*/
// return 0;
//} | 6b7e9f76347957c8435a883db6e447d400055583.cu | //#pragma comment (lib, "cublas.lib")
//#include "stdio.h"
//#include <cuda.h>
//using namespace std;
//#include <ctime>
//#include "cuda_runtime.h"
//#include "curand_kernel.h"
//#include "device_launch_parameters.h"
//#include <stdio.h>
//#include <stdlib.h>
//
//#include <string>
//#include <iomanip>
//#include <time.h>
//#include <iostream>
//#include <cmath>
//#include <math.h>
//
//#define TRAIN_NUM 60000
//#define TEST_NUM 10000
//#define ROW 4
//#define COL 4
//#define FC1_SIZE 2
//#define FC2_SIZE 10
//
//float fc1_b[FC1_SIZE];
//float fc1_w[FC1_SIZE][ROW][COL];
//float fc2_b[FC2_SIZE];
//float fc2_w[FC2_SIZE][FC1_SIZE];
//
//__constant__ float _alpha;
//__constant__ int _minibatch;
//__constant__ int _epochs;
//
//__device__ int _correct_cnt;
//__device__ float _avg_error;
//
//int correct_cnt=3;
//float avg_error=2;
//float max_acc;
//
//float alpha = 0.2;
//int epochs = 5;
//int minibatch = 1;
//
//float train_image[TRAIN_NUM][ROW][COL];
//int train_label[TRAIN_NUM];
//float test_image[TEST_NUM][ROW][COL];
//int test_label[TEST_NUM];
//
//__device__ float _train_image[TRAIN_NUM][ROW][COL];
//__device__ int _train_label[TRAIN_NUM];
//__device__ float _test_image[TEST_NUM][ROW][COL];
//__device__ int _test_label[TEST_NUM];
//
//__device__ float _fc1_b[FC1_SIZE];
//__device__ float _fc1_w[FC1_SIZE][ROW][COL];
//__device__ float _fc2_b[FC2_SIZE];
//__device__ float _fc2_w[FC2_SIZE][FC1_SIZE];
//
//__device__ float _input[ROW][COL];
//__device__ float _fc1_z[FC1_SIZE];
//__device__ float _fc1_a[FC1_SIZE];
//__device__ float _fc2_z[FC2_SIZE];
//__device__ float _fc2_a[FC2_SIZE];
//__device__ float _output[FC2_SIZE];
//__device__ int _answer[FC2_SIZE];
//
//__device__ float _fc1_db[FC1_SIZE];
//__device__ float _fc1_dw[FC1_SIZE][ROW][COL];
//__device__ float _fc2_db[FC2_SIZE];
//__device__ float _fc2_dw[FC2_SIZE][FC1_SIZE];
//__device__ float _C[FC2_SIZE];
//__device__ float _fc2_delta[FC2_SIZE];
//__device__ float _fc1_delta[FC1_SIZE];
//
//__device__ int tmp;
//int swap_endian(int val)
//{
// unsigned char c1, c2, c3, c4;
// c1 = val & 255;
// c2 = (val >> 8) & 255;
// c3 = (val >> 16) & 255;
// c4 = (val >> 24) & 255;
// return ((int)c1 << 24) + ((int)c2 << 16) + ((int)c3 << 8) + c4;
//}
//void load_data()
//{
// FILE* f_images = fopen("D:\\\\Zufar\\\\CUDA-CNN\\\\CudaCNN2\\\\CudaCNN2\\\\data\\\\train-images.idx3-ubyte", "rb");
// FILE* f_labels = fopen("D:\\\\Zufar\\\\CUDA-CNN\\\\CudaCNN2\\\\CudaCNN2\\\\data\\\\train-labels.idx1-ubyte", "rb");
//
// int tmp;
//
// int magic_num;
// fread(&magic_num, sizeof(int), 1, f_images);
// fread(&magic_num, sizeof(int), 1, f_labels);
//
// // printf("debug:%d\n",swap_endian(magic_num));
//
// int train_size;
// fread(&train_size, sizeof(int), 1, f_images);
// fread(&train_size, sizeof(int), 1, f_labels);
// train_size = swap_endian(train_size);
//
// // printf("debug:%d\n",swap_endian(train_size));
//
// int rows, cols;
// fread(&rows, sizeof(int), 1, f_images);
// fread(&cols, sizeof(int), 1, f_images);
// rows = swap_endian(rows);
// cols = swap_endian(cols);
//
// // printf("debug:%d\n",swap_endian(rows));
// // printf("debug:%d\n",swap_endian(cols));
//
// for (int i = 0;i < train_size;i++)
// {
// fread(&train_label[i], 1, 1, f_labels);
// if (i % 1000 == 0)
// printf("Training labels : Already read %5d labels\r", i);
// // printf("%d:debug:%d\r",i,train_label[i]);
// // system("pause");
// }
// printf("Training labels : Already read %5d labels\n", train_size);
//
// for (int i = 0;i < train_size;i++)
// {
// for (int j = 0;j < rows;j++)
// for (int k = 0;k < cols;k++)
// {
// tmp = 0;
// fread(&tmp, 1, 1, f_images);
// train_image[i][j][k] = tmp;
// train_image[i][j][k] /= 255;
// // printf("%d %d %d debug: %f\n",i,j,k,train_image[i][j][k]);
// // system("pause");
// }
// if (i % 1000 == 0)
// printf("Training images : Already read %5d images\r", i);
// }
// printf("Training images : Already read %5d images\n", train_size);
//
// fclose(f_images);
// fclose(f_labels);
//
// f_images = fopen("D:\\\\Zufar\\\\CUDA-CNN\\\\CudaCNN2\\\\CudaCNN2\\\\data\\\\t10k-images.idx3-ubyte", "rb");
// f_labels = fopen("D:\\\\Zufar\\\\CUDA-CNN\\\\CudaCNN2\\\\CudaCNN2\\\\data\\\\t10k-labels.idx1-ubyte", "rb");
//
// fread(&magic_num, sizeof(int), 1, f_images);
// fread(&magic_num, sizeof(int), 1, f_labels);
//
// int test_size;
// fread(&test_size, sizeof(int), 1, f_images);
// fread(&test_size, sizeof(int), 1, f_labels);
// test_size = swap_endian(test_size);
//
// fread(&rows, sizeof(int), 1, f_images);
// fread(&cols, sizeof(int), 1, f_images);
// rows = swap_endian(rows);
// cols = swap_endian(cols);
//
// for (int i = 0;i < test_size;i++)
// {
// fread(&test_label[i], 1, 1, f_labels);
// if (i % 1000 == 0)
// printf("Testing labels : Already read %5d labels\r", i);
// }
// printf("Testing labels : Already read %5d labels\n", test_size);
//
// for (int i = 0;i < test_size;i++)
// {
// for (int j = 0;j < rows;j++)
// for (int k = 0;k < cols;k++)
// {
// tmp = 0;
// fread(&tmp, 1, 1, f_images);
// test_image[i][j][k] = tmp;
// test_image[i][j][k] /= 255;
// }
// if (i % 1000 == 0)
// printf("Testing images : Already read %5d images\r", i);
// }
// printf("Testing images : Already read %5d images\n\n", test_size);
//
// fclose(f_images);
// fclose(f_labels);
//}
//__device__ float _sigmoid(float x)
//{
// return (1 / (1 + exp(-1 * x)));
//}
//
//__global__ void _set_input_train(int idx)
//{
// int ix = threadIdx.x + blockDim.x * blockIdx.x;
// int iy = threadIdx.y + blockDim.y * blockIdx.y;
// if (ix < ROW && iy < COL)
// {
// _input[ix][iy] = _train_image[idx][ix][iy];
// }
//}
//
//__global__ void _set_input_test(int idx)
//{
// int ix = threadIdx.x + blockDim.x * blockIdx.x;
// int iy = threadIdx.y + blockDim.y * blockIdx.y;
// if (ix < ROW && iy < COL)
// {
// _input[ix][iy] = _test_image[idx][ix][iy];
// }
//}
//
//void set_input_gpu_train(int idx)
//{
// dim3 block(32, 32);
// dim3 grid((ROW - 1) / block.x + 1, (COL - 1) / block.y + 1);
// _set_input_train << <block, grid >> > (idx);
// cudaDeviceSynchronize();
//}
//
//void set_input_gpu_test(int idx)
//{
// dim3 block(32, 32);
// dim3 grid((ROW - 1) / block.x + 1, (COL - 1) / block.y + 1);
// _set_input_test << <block, grid >> > (idx);
// cudaDeviceSynchronize();
//}
//
//__global__ void _input_fc1()
//{
// int ib = blockIdx.x;
// int ix = threadIdx.x;
// int iy = threadIdx.y;
// _fc1_z[ib] = 0;
// for (int l = 0;l < ROW;l++)
// for (int m = 0;m < COL;m++)
// _fc1_z[ib] += _input[l][m] * _fc1_w[ib][l][m];
// _fc1_z[ib] += _fc1_b[ib];
// _fc1_a[ib] = _sigmoid(_fc1_z[ib]);
///* __shared__ float data[ROW*COL];
// int tid = threadIdx.x+threadIdx.y;
// data[tid] = _input[ix][iy] * _fc1_w[ib][ix][iy];*/
// /*__syncthreads();
// for (int s = blockDim.x / 2; s > 0; s >>= 1) {
// if (tid < s)
// data[tid] += data[tid + s];
// __syncthreads();
// }
// if (tid == 0) {
// _fc1_z[ib]= data[0];
// data[0] += _fc1_b[ib];
// _fc1_a[ib] = _sigmoid(data[0]);
// }*/
//}
//
//
//__global__ void _fc1_fc2()
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// if (i < FC2_SIZE)
// {
// _fc2_z[i] = 0;
// for (int j = 0;j < FC1_SIZE;j++)
// _fc2_z[i] += _fc1_a[j] * _fc2_w[i][j];
// _fc2_z[i] += _fc2_b[i];
// _fc2_a[i] = _sigmoid(_fc2_z[i]);
// }
//}
//
//void fc1_fc2_gpu()
//{
// dim3 block(32);
// dim3 grid((FC2_SIZE - 1) / block.x + 1);
// _fc1_fc2 << <block, grid >> > ();
// cudaDeviceSynchronize();
//}
//
//__global__ void _set_answer_train(int idx)
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// if (i < FC2_SIZE)
// {
// _output[i] = _fc2_a[i];
// _answer[i] = (_train_label[idx] == i) ? 1 : 0;
// }
//}
//
//__global__ void _set_answer_test(int idx)
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// if (i < FC2_SIZE)
// {
// _output[i] = _fc2_a[i];
// _answer[i] = (_test_label[idx] == i) ? 1 : 0;
// }
//}
//
//void set_answer_gpu_train(int idx)
//{
// dim3 block(32);
// dim3 grid((FC2_SIZE - 1) / block.x + 1);
// _set_answer_train << <block, grid >> > (idx);
// //cudaDeviceSynchronize();
//}
//
//void set_answer_gpu_test(int idx)
//{
// dim3 block(32);
// dim3 grid((FC2_SIZE - 1) / block.x + 1);
// _set_answer_test << <block, grid >> > (idx);
// cudaDeviceSynchronize();
//}
//
//__global__ void _check_answer_get_error()
//{
// float _max = _output[0];
// int max_pos = 0;
// for (int i = 0;i < FC2_SIZE;i++)
// {
// if (_max < _output[i])
// {
// _max = _output[i];
// max_pos = i;
// }
// }
// if (_answer[max_pos])
// _correct_cnt++;
// //printf("Correct: %d", _correct_cnt);
// for (int i = 0;i < FC2_SIZE;i++)
// {
// _C[i] = _output[i] - _answer[i];
// _avg_error += _C[i] * _C[i] * 0.5;
// }
// /*if (j && j % 100 == 0)
// {
// printf("Accuracy : %0.4f%% Error : %0.4f%% \r", ((float)_correct_cnt / j) * 100, (_avg_error / j) * 100);
// }*/
//}
//
//void check_answer_get_error_gpu()
//{
// _check_answer_get_error << <1, 1 >> > ();
// cudaDeviceSynchronize();
//}
////#include "bp_gpu.cuh"
//
//__global__ void _update_fc2_b()
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// if (i < FC2_SIZE)
// {
// _fc2_delta[i] = _alpha * _C[i] * (_fc2_a[i] * (1.0 - _fc2_a[i]));
// _fc2_db[i] += _fc2_delta[i];
// }
//}
//
//__global__ void _update_fc2_w()
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// int j = threadIdx.y + blockDim.y * blockIdx.y;
// if (i < FC2_SIZE && j < FC1_SIZE)
// _fc2_dw[i][j] += _fc2_delta[i] * _fc1_a[j];
//}
//
//void update_fc2_w_gpu()
//{
// dim3 block(32, 32);
// dim3 grid((FC2_SIZE - 1) / block.x + 1, (FC1_SIZE - 1) / block.x + 1);
// _update_fc2_w << <block, grid >> > ();
// cudaDeviceSynchronize();
//}
//
//__global__ void _update_fc1_b()
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// if (i < FC1_SIZE)
// {
// float error = 0;
// for (int j = 0;j < FC2_SIZE;j++)
// error += _fc2_delta[j] * _fc2_w[j][i];
// _fc1_delta[i] = error * (_fc1_a[i] * (1.0 - _fc1_a[i]));
// _fc1_db[i] += _fc1_delta[i];
// }
//}
//
//__global__ void _update_fc1_w()
//{
// int i = blockIdx.x;
// int k = threadIdx.y + blockDim.y * blockIdx.y;
// int l = threadIdx.z + blockDim.z * blockIdx.z;
//
// _fc1_dw[i][k][l] += _fc1_delta[i] * _input[k][l];
//}
//
//
//__global__ void assign_fc2_b()
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// if (i < FC2_SIZE)
// {
// _fc2_b[i] -= (_fc2_db[i] / _minibatch);
// _fc2_db[i] = 0;
// }
//}
//
//__global__ void assign_fc2_w()
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// int j = threadIdx.y + blockDim.y * blockIdx.y;
// if (i < FC2_SIZE && j < FC1_SIZE)
// {
// _fc2_w[i][j] -= (_fc2_dw[i][j] / _minibatch);
// _fc2_dw[i][j] = 0;
// }
//}
//
//__global__ void assign_fc1_b()
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// if (i < FC1_SIZE)
// {
// _fc1_b[i] -= (_fc1_db[i] / _minibatch);
// _fc1_db[i] = 0;
// }
//}
//
//__global__ void assign_fc1_w()
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// int k = threadIdx.y + blockDim.y * blockIdx.y;
// int l = threadIdx.z + blockDim.z * blockIdx.z;
// _fc1_w[blockIdx.x][k][l] -= (_fc1_dw[blockIdx.x][k][l] / _minibatch);
// _fc1_dw[blockIdx.x][k][l] = 0;
//
//}
//
//
//void assign_grads_gpu()
//{
// dim3 block1(32);
// dim3 grid1((FC2_SIZE - 1) / block1.x + 1);
// assign_fc2_b << <block1, grid1 >> > ();
//
// dim3 block2(32, 32);
// dim3 grid2((FC2_SIZE - 1) / block2.x + 1, (FC1_SIZE - 1) / block2.y + 1);
// assign_fc2_w << <block2, grid2 >> > ();
//
// dim3 block3(32);
// dim3 grid3((FC1_SIZE - 1) / block3.x + 1);
// assign_fc1_b << <block3, grid3 >> > ();
//
// dim3 block4(8, 8, 8);
// //dim3 grid4((FC1_SIZE - 1) / block4.x + 1, (POOL_SIZE - 1) / block4.y + 1, (POOL_SIZE - 1) / block4.z + 1);
// assign_fc1_w << <block4, dim3(1, 28, 28) >> > ();
//
//
// cudaDeviceSynchronize();
//}
//void init_data_gpu()
//{
// cudaMemcpyToSymbol(_train_image, train_image, TRAIN_NUM * ROW * COL * sizeof(float));
// cudaMemcpyToSymbol(_train_label, train_label, sizeof(train_label));
// cudaMemcpyToSymbol(_test_image, test_image, TEST_NUM * ROW * COL * sizeof(float));
// cudaMemcpyToSymbol(_test_label, test_label, sizeof(test_label));
//}
//float get_rand(float fan_in)
//{
// float sum = 0;
// for (int i = 0;i < 12;i++)
// sum += (float)rand() / RAND_MAX;
// sum -= 6;
// sum *= 1 / sqrt(fan_in);
// return sum;
//}
//void init_params()
//{
// /*for (int i = 0;i < CONV_W_NUM;i++)
// {
// for (int j = 0;j < CONV_W_SIZE;j++)
// for (int k = 0;k < CONV_W_SIZE;k++)
// conv_w[i][j][k] = get_rand(CONV_W_SIZE * CONV_W_SIZE);
// conv_b[i] = get_rand(CONV_W_SIZE * CONV_W_SIZE);
// }
//
// for (int i = 0;i < FC1_SIZE;i++)
// {
// for (int j = 0;j < CONV_W_NUM;j++)
// for (int k = 0;k < POOL_SIZE;k++)
// for (int l = 0;l < POOL_SIZE;l++)
// fc1_w[i][j][k][l] = get_rand(POOL_SIZE * POOL_SIZE * CONV_W_NUM);
// fc1_b[i] = get_rand(POOL_SIZE * POOL_SIZE * CONV_W_NUM);
// }*/
//
// for (int i = 0;i < FC1_SIZE;i++)
// {
// for (int j = 0;j < ROW;j++)
// for (int k = 0;k < COL;k++)
// fc1_w[i][j][k] = get_rand(ROW*COL);
// fc1_b[i] = get_rand(ROW*COL);
// }
//
// for (int i = 0;i < FC2_SIZE;i++)
// {
// for (int j = 0;j < FC1_SIZE;j++)
// fc2_w[i][j] = get_rand(FC1_SIZE);
// fc2_b[i] = get_rand(FC1_SIZE);
// }
//}
//
//__global__ void _test() {
// _correct_cnt = _correct_cnt + 10;
// _avg_error = _avg_error + 16.35;
// /*float _max = _output[0];
// int max_pos = 0;
// for (int i = 0;i < FC2_SIZE;i++)
// {
// if (_max < _output[i])
// {
// _max = _output[i];
// max_pos = i;
// }
// }
// if (_answer[max_pos])
// _correct_cnt++;
// for (int i = 0;i < FC2_SIZE;i++)
// {
// _C[i] = _output[i] - _answer[i];
// _avg_error += _C[i] * _C[i] * 0.5;
// }*/
//}
//float matrixMult(float a[ROW][COL], float b[FC1_SIZE][ROW][COL], int k) {
// float c=0;
// for (int i = 0;i < ROW;i++) {
// for (int j = 0;j < COL;j++) {
// c += a[i][j] * b[k][i][j];
// }
// }
// return c;
//}
//int main() {
//
//
//
// load_data();
// clock_t t = clock();
// cudaMemcpyToSymbol(_alpha, &alpha, sizeof(float));
// cudaMemcpyToSymbol(_minibatch, &minibatch, sizeof(int));
// cudaMemcpyToSymbol(_epochs, &epochs, sizeof(int));
// init_data_gpu();
// init_params();
// cudaMemcpyToSymbol(_fc1_b, fc1_b, FC1_SIZE* sizeof(float));
// cudaMemcpyToSymbol(_fc1_w, fc1_w, FC1_SIZE*COL*ROW* sizeof(float));
// cudaMemcpyToSymbol(_fc2_b, fc2_b, FC2_SIZE* sizeof(float));
// cudaMemcpyToSymbol(_fc2_w, fc2_w, FC1_SIZE * FC2_SIZE * sizeof(float));
// //dim3 block_set_input(1, 1);
// //dim3 grid_set_input((ROW - 1) / block_set_input.x + 1, (COL - 1) / block_set_input.y + 1);
// dim3 grid_set_input(28,28);
// dim3 block_input(8, 8, 8);
// //dim3 grid((CONV_W_NUM - 1) / block.x + 1, (CONV_SIZE - 1) / block.y + 1, (CONV_SIZE - 1) / block.z + 1);
// dim3 grid_input(1,24,24);
//
// for (int i = 1;i <= epochs;i++)
// {
// int value1 = 1;
// float value2 = 1;
// cudaMemcpyToSymbol(_correct_cnt, &value1, sizeof(int));
// cudaMemcpyToSymbol(_avg_error, &value2, sizeof(float));
// cudaDeviceSynchronize();
//
// for (int j = 1;j < TRAIN_NUM; j++)
// {
// _set_input_train << <1, dim3(ROW, COL) >> > (j);
// _input_fc1 << <FC1_SIZE, dim3(ROW, COL) >> > ();
// /*_fc1_fc2 << <10, FC1_SIZE >> > ();
// set_answer_gpu_train(j);
// _check_answer_get_error << <1, 1 >> > ();
//
// _update_fc2_b << <1, 10 >> > ();
// _update_fc2_w << <10, FC1_SIZE >> > ();
// _update_fc1_b << <1, FC1_SIZE >> > ();
// _update_fc1_w << <FC1_SIZE, dim3(1, 28, 28) >> > ();
// if ((j + 1) % minibatch == 0)
// assign_grads_gpu();
//
// if (j && j % 100 == 0)
// {
// cudaMemcpyFromSymbol(&correct_cnt, _correct_cnt, sizeof(int));
// cudaMemcpyFromSymbol(&avg_error, _avg_error, sizeof(float));
// printf("Training Time spent : %.0fs Image count : %d Accuracy : %0.4f%% Error : %0.4f%% Epoch : %d \r", floor(((float)(clock() - t)) / CLOCKS_PER_SEC), j, ((float)correct_cnt / j) * 100, (avg_error / j) * 100, i);
// }*/
// }
// }
// // cudaMemcpyFromSymbol(&correct_cnt, _correct_cnt, sizeof(int));
// // cudaMemcpyFromSymbol(&avg_error, _avg_error, sizeof(float));
// // printf("Training Time spent : %.0fs Image count : %d Accuracy : %0.4f%% Error : %0.4f%% Epoch : %d \n", floor(((float)(clock() - t)) / CLOCKS_PER_SEC), TRAIN_NUM, ((float)correct_cnt / TRAIN_NUM) * 100, (avg_error / TRAIN_NUM) * 100, i);
//
// // correct_cnt = 0;
// // avg_error = 0;
// // cudaMemcpyToSymbol(_correct_cnt, &correct_cnt, sizeof(int));
// // cudaMemcpyToSymbol(_avg_error, &avg_error, sizeof(float));
//
// // for (int j = 0;j < TEST_NUM;j++)
// // {
// // _set_input_test << <1, dim3(28, 28) >> > (j);
// // _input_fc1 << <400, dim3(28, 28) >> > ();
// // _fc1_fc2 << <10, 400 >> > ();
// // set_answer_gpu_test(j);
// // check_answer_get_error_gpu();
//
// // if (j && j % 100 == 0)
// // {
// // cudaMemcpyFromSymbol(&correct_cnt, _correct_cnt, sizeof(int));
// // cudaMemcpyFromSymbol(&avg_error, _avg_error, sizeof(float));
// // printf("Testing Time spent : %.0fs Image count : %d Accuracy : %0.4f%% Error : %0.4f%% \r", floor(((float)(clock() - t)) / CLOCKS_PER_SEC), j, ((float)correct_cnt / j) * 100, (avg_error / j) * 100);
// // }
// // }
// // cudaMemcpyFromSymbol(&correct_cnt, _correct_cnt, sizeof(int));
// // cudaMemcpyFromSymbol(&avg_error, _avg_error, sizeof(float));
// // printf("Testing Time spent : %.0fs Image count : %d Accuracy : %0.4f%% Error : %0.4f%% \n", floor(((float)(clock() - t)) / CLOCKS_PER_SEC), TEST_NUM, ((float)correct_cnt / TEST_NUM) * 100, (avg_error / TEST_NUM) * 100);
//
// // if ((float)correct_cnt / TEST_NUM * 100 > max_acc)
// // {
// // max_acc = (float)correct_cnt / TEST_NUM * 100;
// // //export_params();
// // printf("The new model has been exported.Accuracy has reached to %0.5f%%\n\n", max_acc);
// // }
// // else
// // {
// // alpha = alpha - (alpha / 3);
// // cudaMemcpyToSymbol(_alpha, &alpha, sizeof(float));
// // printf("Learning rate has been reduced to %f\n\n", alpha);
// // }
// //}
//
//
// float train_image[ROW][COL] = {
// { 3, 1, 2, 4 },
// { 2, 4, 3, 1 },
// { 1, 5, 2, 3 },
// { 2, 3, 4, 1 }};
//
// float fc1_w[FC1_SIZE][ROW][COL] = { {
// {1, 2, 3, 4},
// {4, 3, 1, 1},
// {1, 2, 4, 3},
// {1, 3, 2, 1}},
// {{4, 2, 5, 7},
// {2, 3, 1, 3},
// {1, 2, 3, 1},
// {4, 2, 5, 7}} };
// cout << matrixMult(train_image, fc1_w, 0) << endl;
// cout << matrixMult(train_image, fc1_w, 1) << endl;
//
// /*float train_image[ROW][COL] = {
// { 3, 1, 2, 4, 3, 3 },
// { 2, 4, 3, 1, 1, 4 },
// { 1, 5, 2, 3, 2, 5 },
// { 2, 3, 4, 1, 4, 1 },
// { 1, 4, 2, 1, 2, 3 },
// { 2, 3, 6, 5, 4, 1 }, };
// float conv_w[CONV_W_NUM][CONV_W_SIZE][CONV_W_SIZE] = { {
// {1, 2, 3},
// {4, 3, 1},
// {1, 2, 4}},
// {{4, 2, 5},
// {2, 3, 1},
// {1, 2, 3}} };*/
//
// //float conv_z[CONV_W_NUM][CONV_SIZE][CONV_SIZE];
// float conv_z[2][2][2];
// float train_label[2] = { 3,2 };
// float testOut[FC1_SIZE];
//
// cudaMemcpyToSymbol(_input, train_image, ROW * COL * sizeof(float));
// cudaMemcpyToSymbol(_fc1_w, fc1_w, FC1_SIZE*ROW*COL*sizeof(float));
// //cudaMemcpyToSymbol(_conv_w, conv_w, CONV_W_NUM * CONV_W_SIZE * CONV_W_SIZE * sizeof(float));
// //cudaMemcpy(_train_label, train_label, 2 * sizeof(float), cudaMemcpyHostToDevice);
// //cudaMemcpy(_train_image, train_image, ROW * COL * sizeof(float), cudaMemcpyHostToDevice);
// //cudaMemcpy(_conv_w, conv_w, CONV_W_NUM*CONV_W_SIZE*CONV_W_SIZE*sizeof(float), cudaMemcpyHostToDevice);
// dim3 grid2(ROW, COL);
//
// //_input_conv << <1, grid2>> > ((float (*)[4])_train_image, (float (*)[3][3])_conv_w, (float (*)[2][2])_conv_z);
// _input_fc1 << <FC1_SIZE, grid2 >> > ();
// //_conv_pool << <1, grid2 >> > ();
// //cudaMemcpyFromSymbol(&conv_z, _pool, CONV_W_NUM * CONV_SIZE * CONV_SIZE * sizeof(float));
// //cudaMemcpyFromSymbol(&conv_z, _pool, 8 * sizeof(float));
// cudaMemcpyFromSymbol(&testOut, _fc1_z, FC1_SIZE * sizeof(float));
// for (int i = 0;i < FC1_SIZE;i++) {
// cout << testOut[i] << endl;
// }
// /*for (int i = 0;i < 2;i++) {
// for (int j = 0;j <2;j++) {
// cout << conv_z[0][i][j] << " ";
// }
// cout << endl;
// }
// for (int i = 0;i < 2;i++) {
// for (int j = 0;j < 2;j++) {
// cout << conv_z[1][i][j] << " ";
// }
// cout << endl;
// }*/
// return 0;
//} |
25cf1be5ed69458ac94ac1d6e2114aa0a1dbfb68.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "aes.h"
__global__ void aca_mix_columns(void *state_buf)
{
uint32_t *state = (uint32_t *)state_buf;
uint32_t col = threadIdx.x;
uint32_t base = col << 2;
uint32_t t, Tmp, Tm;
t = state[base];
Tmp = state[base] ^ state[base + 1] ^ state[base + 2] ^ state[base + 3];
Tm = state[base] ^ state[base + 1]; Tm = xtime_byte(Tm) & 0xff; state[base] ^= Tm ^ Tmp;
Tm = state[base + 1] ^ state[base + 2]; Tm = xtime_byte(Tm) & 0xff; state[base + 1] ^= Tm ^ Tmp;
Tm = state[base + 2] ^ state[base + 3]; Tm = xtime_byte(Tm) & 0xff; state[base + 2] ^= Tm ^ Tmp;
Tm = state[base + 3] ^ t; Tm = xtime_byte(Tm) & 0xff; state[base + 3] ^= Tm ^ Tmp;
}
__global__ void aca_inv_mix_columns(void *state_buf)
{
uint32_t *state = (uint32_t *)state_buf;
uint32_t col = threadIdx.x;
uint32_t base = col << 2;
uint32_t t, Tmp;
uint32_t u, v, w;
Tmp = state[base] ^ state[base + 1] ^ state[base + 2] ^ state[base + 3];
u = xtime_byte(Tmp) & 0xff;
v = xtime_byte(u) & 0xff;
w = xtime_byte(v) & 0xff;
t = w ^ Tmp;
t ^= (xtime_byte((xtime_byte(state[base]) & 0xff)) & 0xff) ^ state[base];
t ^= (xtime_byte(state[base + 1]) & 0xff);
t ^= (xtime_byte((xtime_byte(state[base + 2]) & 0xff)) & 0xff);
state[base] = t;
t = w ^ Tmp;
t ^= (xtime_byte((xtime_byte(state[base+1]) & 0xff)) & 0xff) ^ state[base+1];
t ^= (xtime_byte(state[base+2]) & 0xff);
t ^= (xtime_byte((xtime_byte(state[base+3]) & 0xff)) & 0xff);
state[base+1] = t;
t = w ^ Tmp;
t ^= (xtime_byte((xtime_byte(state[base+2]) & 0xff)) & 0xff) ^ state[base+2];
t ^= (xtime_byte(state[base + 3]) & 0xff);
t ^= (xtime_byte((xtime_byte(state[base]) & 0xff)) & 0xff);
state[base+2] = t;
t = w ^ Tmp;
t ^= (xtime_byte((xtime_byte(state[base+3]) & 0xff)) & 0xff) ^ state[base+3];
t ^= (xtime_byte(state[base]) & 0xff);
t ^= (xtime_byte((xtime_byte(state[base+1]) & 0xff)) & 0xff);
state[base+3] = t;
}
| 25cf1be5ed69458ac94ac1d6e2114aa0a1dbfb68.cu | #include "aes.h"
__global__ void aca_mix_columns(void *state_buf)
{
uint32_t *state = (uint32_t *)state_buf;
uint32_t col = threadIdx.x;
uint32_t base = col << 2;
uint32_t t, Tmp, Tm;
t = state[base];
Tmp = state[base] ^ state[base + 1] ^ state[base + 2] ^ state[base + 3];
Tm = state[base] ^ state[base + 1]; Tm = xtime_byte(Tm) & 0xff; state[base] ^= Tm ^ Tmp;
Tm = state[base + 1] ^ state[base + 2]; Tm = xtime_byte(Tm) & 0xff; state[base + 1] ^= Tm ^ Tmp;
Tm = state[base + 2] ^ state[base + 3]; Tm = xtime_byte(Tm) & 0xff; state[base + 2] ^= Tm ^ Tmp;
Tm = state[base + 3] ^ t; Tm = xtime_byte(Tm) & 0xff; state[base + 3] ^= Tm ^ Tmp;
}
__global__ void aca_inv_mix_columns(void *state_buf)
{
uint32_t *state = (uint32_t *)state_buf;
uint32_t col = threadIdx.x;
uint32_t base = col << 2;
uint32_t t, Tmp;
uint32_t u, v, w;
Tmp = state[base] ^ state[base + 1] ^ state[base + 2] ^ state[base + 3];
u = xtime_byte(Tmp) & 0xff;
v = xtime_byte(u) & 0xff;
w = xtime_byte(v) & 0xff;
t = w ^ Tmp;
t ^= (xtime_byte((xtime_byte(state[base]) & 0xff)) & 0xff) ^ state[base];
t ^= (xtime_byte(state[base + 1]) & 0xff);
t ^= (xtime_byte((xtime_byte(state[base + 2]) & 0xff)) & 0xff);
state[base] = t;
t = w ^ Tmp;
t ^= (xtime_byte((xtime_byte(state[base+1]) & 0xff)) & 0xff) ^ state[base+1];
t ^= (xtime_byte(state[base+2]) & 0xff);
t ^= (xtime_byte((xtime_byte(state[base+3]) & 0xff)) & 0xff);
state[base+1] = t;
t = w ^ Tmp;
t ^= (xtime_byte((xtime_byte(state[base+2]) & 0xff)) & 0xff) ^ state[base+2];
t ^= (xtime_byte(state[base + 3]) & 0xff);
t ^= (xtime_byte((xtime_byte(state[base]) & 0xff)) & 0xff);
state[base+2] = t;
t = w ^ Tmp;
t ^= (xtime_byte((xtime_byte(state[base+3]) & 0xff)) & 0xff) ^ state[base+3];
t ^= (xtime_byte(state[base]) & 0xff);
t ^= (xtime_byte((xtime_byte(state[base+1]) & 0xff)) & 0xff);
state[base+3] = t;
}
|
KdTreeGPUsms.hip | // !!! This is a file automatically generated by hipify!!!
//
// KdTreeGPUsms.cu
//
// Created by John Robinson on 7/15/15.
// Copyright (c) 2015 John Robinson. All rights reserved.
/*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSEARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
//
/*
* Copyright (c) 2015, Russell A. Brown
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSEARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* @(#)kdTreeSingleThread.cc 1.61 04/13/15 */
/*
* The k-d tree was described by Jon Bentley in "Multidimensional Binary Search Trees
* Used for Associative Searching", CACM 18(9): 509-517, 1975. For k dimensions and
* n elements of data, a balanced k-d tree is built in O(kn log n) + O((k+1)n log n)
* time by first sorting the data in each of k dimensions, then building the k-d tree
* in a manner that preserves the order of the k sorts while recursively partitioning
* the data at each level of the k-d tree. No further sorting is necessary. Moreover,
* it is possible to replace the O((k+1)n log n) term with a O((k-1)n log n) term but
* this approach sacrifices the generality of building the k-d tree for points of any
* number of dimensions.
*/
#include <stdbool.h>
#include <stdlib.h>
#include <vector>
#include <list>
#include <math.h>
#include <iostream>
#include <iomanip>
using std::setprecision;
using namespace std;
#include "Gpu.h"
#include "KdNode.h"
//#if __cplusplus != 201103L
#if 0
#include <chrono>
#define TIMER_DECLARATION() \
auto startTime = std::chrono::high_resolution_clock::now(); \
auto endTime = <std::chrono::high_resolution_clock::now();
#define TIMER_START() \
startTime = std::chrono::high_resolution_clock::now(); // high_resolution_clock::is_steady
#define TIMER_STOP(__TIMED) \
endTime = std::chrono::high_resolution_clock::now(); \
__TIMED = (std::chrono::duration<double, std::milli>(std::chrono::high_resolution_clock::now() - startTime).count())/1000.0
#elif defined(MACH)
#define TIMER_DECLARATION() \
struct timespec startTime, endTime;
#define TIMER_START() \
mach_gettime(CLOCK_REALTIME, &startTime);
#define TIMER_STOP(__TIMED) \
clock_gettime(CLOCK_REALTIME, &endTime); \
__TIMED = (endTime.tv_sec - startTime.tv_sec) + \
1.0e-9 * ((double)(endTime.tv_nsec - startTime.tv_nsec))
#else
#define TIMER_DECLARATION() \
struct timespec startTime, endTime;
#define TIMER_START() \
clock_gettime(CLOCK_REALTIME, &startTime);
#define TIMER_STOP(__TIMED) \
clock_gettime(CLOCK_REALTIME, &endTime); \
__TIMED = (endTime.tv_sec - startTime.tv_sec) + \
1.0e-9 * ((double)(endTime.tv_nsec - startTime.tv_nsec))
#endif
Gpu *gpu;
/*
* The superKeyCompare method compares two sint arrays in all k dimensions,
* and uses the sorting or partition coordinate as the most significant dimension.
*
* calling parameters:
*
* a - a int*
* b - a int*
* p - the most significant dimension
* dim - the number of dimensions
*
* returns: +1, 0 or -1 as the result of comparing two sint arrays
*/
KdCoord KdNode::superKeyCompare(const KdCoord *a, const KdCoord *b, const sint p, const sint dim)
{
KdCoord diff = 0;
for (sint i = 0; i < dim; i++) {
sint r = i + p;
r = (r < dim) ? r : r - dim;
diff = a[r] - b[r];
if (diff != 0) {
break;
}
}
return diff;
}
/*
* Walk the k-d tree and check that the children of a node are in the correct branch of that node.
*
* calling parameters:
*
* dim - the number of dimensions
* depth - the depth in the k-d tree
*
* returns: a count of the number of kdNodes in the k-d tree
*/
sint KdNode::verifyKdTree( const KdNode kdNodes[], const KdCoord coords[], const sint dim, const sint depth) const
{
sint count = 1 ;
// The partition cycles as x, y, z, w...
sint axis = depth % dim;
if (ltChild != -1) {
if (superKeyCompare(coords+kdNodes[ltChild].tuple*dim, coords+tuple*dim, axis, dim) >= 0) {
cout << "At Depth " << depth << " LT child is > node on axis " << axis << "!" << endl;
printTuple(coords+tuple*dim, dim);
cout << " < [" << ltChild << "]";
printTuple(coords+kdNodes[ltChild].tuple*dim, dim);
cout << endl;
exit(1);
}
count += kdNodes[ltChild].verifyKdTree(kdNodes, coords, dim, depth + 1);
}
if (gtChild != -1) {
if (superKeyCompare(coords+kdNodes[gtChild].tuple*dim, coords+tuple*dim, axis, dim) <= 0) {
cout << "At Depth " << depth << " GT child is < node on axis " << axis << "!" << endl;
printTuple(coords+tuple*dim, dim);
cout << " > [" << gtChild << "]";
printTuple(coords+kdNodes[gtChild].tuple*dim, dim);
cout << endl;
exit(1);
}
count += kdNodes[gtChild].verifyKdTree(kdNodes, coords, dim, depth + 1);
}
return count;
}
/*
* The createKdTree function performs the necessary initialization then calls the buildKdTree function.
*
* calling parameters:
*
* coordinates - a vector<int*> of references to each of the (x, y, z, w...) tuples
* numDimensions - the number of dimensions
*
* returns: a KdNode pointer to the root of the k-d tree
*/
KdNode* KdNode::createKdTree(KdNode kdNodes[], KdCoord coordinates[], const sint numDimensions, const sint numTuples)
{
TIMER_DECLARATION();
TIMER_START();
Gpu::initializeKdNodesArray(coordinates, numTuples, numDimensions);
hipDeviceSynchronize();
TIMER_STOP (double initTime);
// Sort the reference array using multiple threads if possible.
TIMER_START();
sint end[numDimensions]; // Array used to collect results of the remove duplicates function
Gpu::mergeSort(end, numTuples, numDimensions);
TIMER_STOP (double sortTime);
// Check that the same number of references was removed from each reference array.
for (sint i = 0; i < numDimensions-1; i++) {
if (end[i] < 0) {
cout << "removeDuplicates failed on dimension " << i << endl;
cout << end[0];
for (sint k = 1; k<numDimensions; k++) cout << ", " << end[k] ;
cout << endl;
exit(1);
}
for (sint j = i + 1; j < numDimensions; j++) {
if ( end[i] != end[j] ) {
cout << "Duplicate removal error" << endl;
cout << end[0];
for (sint k = 1; k<numDimensions; k++) cout << ", " << end[k] ;
cout << endl;
exit(1);
}
}
}
cout << numTuples-end[0] << " equal nodes removed. "<< endl;
// Build the k-d tree.
TIMER_START();
// refIdx_t root = gpu->startBuildKdTree(kdNodes, end[0], numDimensions);
refIdx_t root = Gpu::buildKdTree(kdNodes, end[0], numDimensions);
TIMER_STOP (double kdTime);
// Verify the k-d tree and report the number of KdNodes.
TIMER_START();
sint numberOfNodes = Gpu::verifyKdTree(kdNodes, root, numDimensions, numTuples);
// sint numberOfNodes = kdNodes[root].verifyKdTree( kdNodes, coordinates, numDimensions, 0);
cout << "Number of nodes = " << numberOfNodes << endl;
TIMER_STOP (double verifyTime);
cout << "totalTime = " << fixed << setprecision(4) << initTime + sortTime + kdTime + verifyTime
<< " initTime = " << initTime << " sortTime + removeDuplicatesTime = " << sortTime
<< " kdTime = " << kdTime << " verifyTime = " << verifyTime << endl << endl;
// Return the pointer to the root of the k-d tree.
return &kdNodes[root];
}
/*
* Search the k-d tree and find the KdNodes that lie within a cutoff distance
* from a query node in all k dimensions.
*
* calling parameters:
*
* query - the query point
* cut - the cutoff distance
* dim - the number of dimensions
* depth - the depth in the k-d tree
*
* returns: a list that contains the kdNodes that lie within the cutoff distance of the query node
*/
list<KdNode> KdNode::searchKdTree(const KdNode kdNodes[], const KdCoord coords[], const KdCoord* query, const KdCoord cut,
const sint dim, const sint depth) const {
// The partition cycles as x, y, z, w...
sint axis = depth % dim;
// If the distance from the query node to the k-d node is within the cutoff distance
// in all k dimensions, add the k-d node to a list.
list<KdNode> result;
bool inside = true;
for (sint i = 0; i < dim; i++) {
if (abs(query[i] - coords[tuple*dim+i]) > cut) {
inside = false;
break;
}
}
if (inside) {
result.push_back(*this); // The push_back function expects a KdNode for a call by reference.
}
// Search the < branch of the k-d tree if the partition coordinate of the query point minus
// the cutoff distance is <= the partition coordinate of the k-d node. The < branch must be
// searched when the cutoff distance equals the partition coordinate because the super key
// may assign a point to either branch of the tree if the sorting or partition coordinate,
// which forms the most significant portion of the super key, shows equality.
if ( ltChild != -1 && (query[axis] - cut) <= coords[tuple*dim+axis] ) {
list<KdNode> ltResult = kdNodes[ltChild].searchKdTree(kdNodes, coords, query, cut, dim, depth + 1);
result.splice(result.end(), ltResult); // Can't substitute searchKdTree(...) for ltResult.
}
// Search the > branch of the k-d tree if the partition coordinate of the query point plus
// the cutoff distance is >= the partition coordinate of the k-d node. The < branch must be
// searched when the cutoff distance equals the partition coordinate because the super key
// may assign a point to either branch of the tree if the sorting or partition coordinate,
// which forms the most significant portion of the super key, shows equality.
if ( gtChild != -1 && (query[axis] + cut) >= coords[tuple*dim+axis] ) {
list<KdNode> gtResult = kdNodes[gtChild].searchKdTree(kdNodes, coords, query, cut, dim, depth + 1);
result.splice(result.end(), gtResult); // Can't substitute searchKdTree(...) for gtResult.
}
return result;
}
/*
* Print one tuple.
*
* calling parameters:
*
* tuple - the tuple to print
* dim - the number of dimensions
*/
void KdNode::printTuple(const KdCoord* tuple, const sint dim)
{
cout << "(" << tuple[dim] << ",";
for (sint i=1; i<dim-1; i++) cout << tuple[i] << ",";
cout << tuple[dim-1] << ")";
}
/*
* Print the k-d tree "sideways" with the root at the ltChild.
*
* calling parameters:
*
* dim - the number of dimensions
* depth - the depth in the k-d tree
*/
void KdNode::printKdTree(KdNode kdNodes[], const KdCoord coords[], const sint dim, const sint depth) const
{
if (gtChild != -1) {
kdNodes[gtChild].printKdTree(kdNodes, coords, dim, depth+1);
}
for (sint i=0; i<depth; i++) cout << " ";
printTuple(coords+tuple*dim, dim);
cout << endl;
if (ltChild != -1) {
kdNodes[ltChild].printKdTree(kdNodes, coords, dim, depth+1);
}
}
/* Create a simple k-d tree and print its topology for inspection. */
sint main(sint argc, char **argv)
{
// Set the defaults then parse the input arguments.
sint numPoints = 4194304;
sint extraPoints = 100;
sint numDimensions = 3;
sint numThreads = 512;
sint numBlocks = 32;
sint searchDistance = 20000000;
sint maximumNumberOfNodesToPrint = 5;
for (sint i = 1; i < argc; i++) {
if ( 0 == strcmp(argv[i], "-n") || 0 == strcmp(argv[i], "--numPoints") ) {
numPoints = atol(argv[++i]);
continue;
}
if ( 0 == strcmp(argv[i], "-x") || 0 == strcmp(argv[i], "--extraPoints") ) {
extraPoints = atol(argv[++i]);
continue;
}
if ( 0 == strcmp(argv[i], "-d") || 0 == strcmp(argv[i], "--numDimensions") ) {
numDimensions = atol(argv[++i]);
continue;
}
if ( 0 == strcmp(argv[i], "-t") || 0 == strcmp(argv[i], "--numThreads") ) {
numThreads = atol(argv[++i]);
continue;
}
if ( 0 == strcmp(argv[i], "-b") || 0 == strcmp(argv[i], "--numBlocks") ) {
numBlocks = atol(argv[++i]);
continue;
}
if ( 0 == strcmp(argv[i], "-s") || 0 == strcmp(argv[i], "--searchDistance") ) {
searchDistance = atol(argv[++i]);
continue;
}
if ( 0 == strcmp(argv[i], "-p") || 0 == strcmp(argv[i], "--maximumNodesToPrint") ) {
maximumNumberOfNodesToPrint = atol(argv[++i]);
continue;
}
cout << "Unsupported command-line argument: " << argv[i] << endl;
exit(1);
}
sint i = maximumNumberOfNodesToPrint + numDimensions + extraPoints;
// Declare the two-dimensional coordinates array that contains (x,y,z) coordinates.
/*
sint coordinates[NUM_TUPLES][DIMENSIONS] = {
{2,3,3}, {5,4,2}, {9,6,7}, {4,7,9}, {8,1,5},
{7,2,6}, {9,4,1}, {8,4,2}, {9,7,8}, {6,3,1},
{3,4,5}, {1,6,8}, {9,5,3}, {2,1,3}, {8,7,6},
{5,4,2}, {6,3,1}, {8,7,6}, {9,6,7}, {2,1,3},
{7,2,6}, {4,7,9}, {1,6,8}, {3,4,5}, {9,4,1} };
*/
// gpu = new Gpu(numThreads,numBlocks,0,numDimensions);
Gpu::gpuSetup(2, numThreads,numBlocks,numDimensions);
if (Gpu::getNumThreads() == 0 || Gpu::getNumBlocks() == 0) {
cout << "KdNode Tree cannot be built with " << numThreads << " threads or " << numBlocks << " blocks." << endl;
exit(1);
}
cout << "Points = " << numPoints << " dimensions = " << numDimensions << ", threads = " << numThreads << ", blocks = " << numBlocks << endl;
srand(0);
KdCoord (*coordinates) = new KdCoord[numPoints*numDimensions];
for ( i = 0; i<numPoints; i++) {
for (sint j=0; j<numDimensions; j++) {
coordinates[i*numDimensions+j] = (KdCoord)rand();
//coordinates[i*numDimensions+j] = (j==1)? (numPoints-i) : i;
//coordinates[i*numDimensions+j] = i;
}
}
// Create the k-d tree. First copy the data to a tuple in its kdNode.
// also null out the gt and lt references
// create and initialize the kdNodes array
KdNode *kdNodes = new KdNode[numPoints];
if (kdNodes == NULL) {
printf("Can't allocate %d kdNodes\n", numPoints);
exit (1);
}
KdNode *root = KdNode::createKdTree(kdNodes, coordinates, numDimensions, numPoints);
// Print the k-d tree "sideways" with the root at the left.
cout << endl;
if (searchDistance == 0){
return 0;
}
TIMER_DECLARATION();
// Search the k-d tree for the k-d nodes that lie within the cutoff distance of the first tuple.
KdCoord* query = (KdCoord *)malloc(numDimensions * sizeof(KdCoord));
for (sint i = 0; i < numDimensions; i++) {
query[i] = coordinates[i];
}
// read the KdTree back from GPU
Gpu::getKdTreeResults( kdNodes, coordinates, numPoints, numDimensions);
#define VERIFY_ON_HOST
#ifdef VERIFY_ON_HOST
sint numberOfNodes = root->verifyKdTree( kdNodes, coordinates, numDimensions, 0);
cout << "Number of nodes on host = " << numberOfNodes << endl;
#endif
TIMER_START();
list<KdNode> kdList = root->searchKdTree(kdNodes, coordinates, query, searchDistance, numDimensions, 0);
TIMER_STOP(double searchTime);
cout << "searchTime = " << fixed << setprecision(2) << searchTime << " seconds" << endl << endl;
cout << endl << kdList.size() << " nodes within " << searchDistance << " units of ";
KdNode::printTuple(query, numDimensions);
cout << " in all dimensions." << endl << endl;
if (kdList.size() != 0) {
cout << "List of k-d nodes within " << searchDistance << "-unit search distance follows:" << endl << endl;
list<KdNode>::iterator it;
for (it = kdList.begin(); it != kdList.end(); it++) {
KdNode::printTuple(coordinates+it->getTuple()*numDimensions, numDimensions);
cout << " ";
}
cout << endl;
}
return 0;
}
| KdTreeGPUsms.cu | //
// KdTreeGPUsms.cu
//
// Created by John Robinson on 7/15/15.
// Copyright (c) 2015 John Robinson. All rights reserved.
/*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSEARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
//
/*
* Copyright (c) 2015, Russell A. Brown
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSEARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* @(#)kdTreeSingleThread.cc 1.61 04/13/15 */
/*
* The k-d tree was described by Jon Bentley in "Multidimensional Binary Search Trees
* Used for Associative Searching", CACM 18(9): 509-517, 1975. For k dimensions and
* n elements of data, a balanced k-d tree is built in O(kn log n) + O((k+1)n log n)
* time by first sorting the data in each of k dimensions, then building the k-d tree
* in a manner that preserves the order of the k sorts while recursively partitioning
* the data at each level of the k-d tree. No further sorting is necessary. Moreover,
* it is possible to replace the O((k+1)n log n) term with a O((k-1)n log n) term but
* this approach sacrifices the generality of building the k-d tree for points of any
* number of dimensions.
*/
#include <stdbool.h>
#include <stdlib.h>
#include <vector>
#include <list>
#include <math.h>
#include <iostream>
#include <iomanip>
using std::setprecision;
using namespace std;
#include "Gpu.h"
#include "KdNode.h"
//#if __cplusplus != 201103L
#if 0
#include <chrono>
#define TIMER_DECLARATION() \
auto startTime = std::chrono::high_resolution_clock::now(); \
auto endTime = <std::chrono::high_resolution_clock::now();
#define TIMER_START() \
startTime = std::chrono::high_resolution_clock::now(); // high_resolution_clock::is_steady
#define TIMER_STOP(__TIMED) \
endTime = std::chrono::high_resolution_clock::now(); \
__TIMED = (std::chrono::duration<double, std::milli>(std::chrono::high_resolution_clock::now() - startTime).count())/1000.0
#elif defined(MACH)
#define TIMER_DECLARATION() \
struct timespec startTime, endTime;
#define TIMER_START() \
mach_gettime(CLOCK_REALTIME, &startTime);
#define TIMER_STOP(__TIMED) \
clock_gettime(CLOCK_REALTIME, &endTime); \
__TIMED = (endTime.tv_sec - startTime.tv_sec) + \
1.0e-9 * ((double)(endTime.tv_nsec - startTime.tv_nsec))
#else
#define TIMER_DECLARATION() \
struct timespec startTime, endTime;
#define TIMER_START() \
clock_gettime(CLOCK_REALTIME, &startTime);
#define TIMER_STOP(__TIMED) \
clock_gettime(CLOCK_REALTIME, &endTime); \
__TIMED = (endTime.tv_sec - startTime.tv_sec) + \
1.0e-9 * ((double)(endTime.tv_nsec - startTime.tv_nsec))
#endif
Gpu *gpu;
/*
* The superKeyCompare method compares two sint arrays in all k dimensions,
* and uses the sorting or partition coordinate as the most significant dimension.
*
* calling parameters:
*
* a - a int*
* b - a int*
* p - the most significant dimension
* dim - the number of dimensions
*
* returns: +1, 0 or -1 as the result of comparing two sint arrays
*/
KdCoord KdNode::superKeyCompare(const KdCoord *a, const KdCoord *b, const sint p, const sint dim)
{
KdCoord diff = 0;
for (sint i = 0; i < dim; i++) {
sint r = i + p;
r = (r < dim) ? r : r - dim;
diff = a[r] - b[r];
if (diff != 0) {
break;
}
}
return diff;
}
/*
* Walk the k-d tree and check that the children of a node are in the correct branch of that node.
*
* calling parameters:
*
* dim - the number of dimensions
* depth - the depth in the k-d tree
*
* returns: a count of the number of kdNodes in the k-d tree
*/
sint KdNode::verifyKdTree( const KdNode kdNodes[], const KdCoord coords[], const sint dim, const sint depth) const
{
sint count = 1 ;
// The partition cycles as x, y, z, w...
sint axis = depth % dim;
if (ltChild != -1) {
if (superKeyCompare(coords+kdNodes[ltChild].tuple*dim, coords+tuple*dim, axis, dim) >= 0) {
cout << "At Depth " << depth << " LT child is > node on axis " << axis << "!" << endl;
printTuple(coords+tuple*dim, dim);
cout << " < [" << ltChild << "]";
printTuple(coords+kdNodes[ltChild].tuple*dim, dim);
cout << endl;
exit(1);
}
count += kdNodes[ltChild].verifyKdTree(kdNodes, coords, dim, depth + 1);
}
if (gtChild != -1) {
if (superKeyCompare(coords+kdNodes[gtChild].tuple*dim, coords+tuple*dim, axis, dim) <= 0) {
cout << "At Depth " << depth << " GT child is < node on axis " << axis << "!" << endl;
printTuple(coords+tuple*dim, dim);
cout << " > [" << gtChild << "]";
printTuple(coords+kdNodes[gtChild].tuple*dim, dim);
cout << endl;
exit(1);
}
count += kdNodes[gtChild].verifyKdTree(kdNodes, coords, dim, depth + 1);
}
return count;
}
/*
* The createKdTree function performs the necessary initialization then calls the buildKdTree function.
*
* calling parameters:
*
* coordinates - a vector<int*> of references to each of the (x, y, z, w...) tuples
* numDimensions - the number of dimensions
*
* returns: a KdNode pointer to the root of the k-d tree
*/
KdNode* KdNode::createKdTree(KdNode kdNodes[], KdCoord coordinates[], const sint numDimensions, const sint numTuples)
{
TIMER_DECLARATION();
TIMER_START();
Gpu::initializeKdNodesArray(coordinates, numTuples, numDimensions);
cudaDeviceSynchronize();
TIMER_STOP (double initTime);
// Sort the reference array using multiple threads if possible.
TIMER_START();
sint end[numDimensions]; // Array used to collect results of the remove duplicates function
Gpu::mergeSort(end, numTuples, numDimensions);
TIMER_STOP (double sortTime);
// Check that the same number of references was removed from each reference array.
for (sint i = 0; i < numDimensions-1; i++) {
if (end[i] < 0) {
cout << "removeDuplicates failed on dimension " << i << endl;
cout << end[0];
for (sint k = 1; k<numDimensions; k++) cout << ", " << end[k] ;
cout << endl;
exit(1);
}
for (sint j = i + 1; j < numDimensions; j++) {
if ( end[i] != end[j] ) {
cout << "Duplicate removal error" << endl;
cout << end[0];
for (sint k = 1; k<numDimensions; k++) cout << ", " << end[k] ;
cout << endl;
exit(1);
}
}
}
cout << numTuples-end[0] << " equal nodes removed. "<< endl;
// Build the k-d tree.
TIMER_START();
// refIdx_t root = gpu->startBuildKdTree(kdNodes, end[0], numDimensions);
refIdx_t root = Gpu::buildKdTree(kdNodes, end[0], numDimensions);
TIMER_STOP (double kdTime);
// Verify the k-d tree and report the number of KdNodes.
TIMER_START();
sint numberOfNodes = Gpu::verifyKdTree(kdNodes, root, numDimensions, numTuples);
// sint numberOfNodes = kdNodes[root].verifyKdTree( kdNodes, coordinates, numDimensions, 0);
cout << "Number of nodes = " << numberOfNodes << endl;
TIMER_STOP (double verifyTime);
cout << "totalTime = " << fixed << setprecision(4) << initTime + sortTime + kdTime + verifyTime
<< " initTime = " << initTime << " sortTime + removeDuplicatesTime = " << sortTime
<< " kdTime = " << kdTime << " verifyTime = " << verifyTime << endl << endl;
// Return the pointer to the root of the k-d tree.
return &kdNodes[root];
}
/*
* Search the k-d tree and find the KdNodes that lie within a cutoff distance
* from a query node in all k dimensions.
*
* calling parameters:
*
* query - the query point
* cut - the cutoff distance
* dim - the number of dimensions
* depth - the depth in the k-d tree
*
* returns: a list that contains the kdNodes that lie within the cutoff distance of the query node
*/
list<KdNode> KdNode::searchKdTree(const KdNode kdNodes[], const KdCoord coords[], const KdCoord* query, const KdCoord cut,
const sint dim, const sint depth) const {
// The partition cycles as x, y, z, w...
sint axis = depth % dim;
// If the distance from the query node to the k-d node is within the cutoff distance
// in all k dimensions, add the k-d node to a list.
list<KdNode> result;
bool inside = true;
for (sint i = 0; i < dim; i++) {
if (abs(query[i] - coords[tuple*dim+i]) > cut) {
inside = false;
break;
}
}
if (inside) {
result.push_back(*this); // The push_back function expects a KdNode for a call by reference.
}
// Search the < branch of the k-d tree if the partition coordinate of the query point minus
// the cutoff distance is <= the partition coordinate of the k-d node. The < branch must be
// searched when the cutoff distance equals the partition coordinate because the super key
// may assign a point to either branch of the tree if the sorting or partition coordinate,
// which forms the most significant portion of the super key, shows equality.
if ( ltChild != -1 && (query[axis] - cut) <= coords[tuple*dim+axis] ) {
list<KdNode> ltResult = kdNodes[ltChild].searchKdTree(kdNodes, coords, query, cut, dim, depth + 1);
result.splice(result.end(), ltResult); // Can't substitute searchKdTree(...) for ltResult.
}
// Search the > branch of the k-d tree if the partition coordinate of the query point plus
// the cutoff distance is >= the partition coordinate of the k-d node. The < branch must be
// searched when the cutoff distance equals the partition coordinate because the super key
// may assign a point to either branch of the tree if the sorting or partition coordinate,
// which forms the most significant portion of the super key, shows equality.
if ( gtChild != -1 && (query[axis] + cut) >= coords[tuple*dim+axis] ) {
list<KdNode> gtResult = kdNodes[gtChild].searchKdTree(kdNodes, coords, query, cut, dim, depth + 1);
result.splice(result.end(), gtResult); // Can't substitute searchKdTree(...) for gtResult.
}
return result;
}
/*
* Print one tuple.
*
* calling parameters:
*
* tuple - the tuple to print
* dim - the number of dimensions
*/
void KdNode::printTuple(const KdCoord* tuple, const sint dim)
{
cout << "(" << tuple[dim] << ",";
for (sint i=1; i<dim-1; i++) cout << tuple[i] << ",";
cout << tuple[dim-1] << ")";
}
/*
* Print the k-d tree "sideways" with the root at the ltChild.
*
* calling parameters:
*
* dim - the number of dimensions
* depth - the depth in the k-d tree
*/
void KdNode::printKdTree(KdNode kdNodes[], const KdCoord coords[], const sint dim, const sint depth) const
{
if (gtChild != -1) {
kdNodes[gtChild].printKdTree(kdNodes, coords, dim, depth+1);
}
for (sint i=0; i<depth; i++) cout << " ";
printTuple(coords+tuple*dim, dim);
cout << endl;
if (ltChild != -1) {
kdNodes[ltChild].printKdTree(kdNodes, coords, dim, depth+1);
}
}
/* Create a simple k-d tree and print its topology for inspection. */
sint main(sint argc, char **argv)
{
// Set the defaults then parse the input arguments.
sint numPoints = 4194304;
sint extraPoints = 100;
sint numDimensions = 3;
sint numThreads = 512;
sint numBlocks = 32;
sint searchDistance = 20000000;
sint maximumNumberOfNodesToPrint = 5;
for (sint i = 1; i < argc; i++) {
if ( 0 == strcmp(argv[i], "-n") || 0 == strcmp(argv[i], "--numPoints") ) {
numPoints = atol(argv[++i]);
continue;
}
if ( 0 == strcmp(argv[i], "-x") || 0 == strcmp(argv[i], "--extraPoints") ) {
extraPoints = atol(argv[++i]);
continue;
}
if ( 0 == strcmp(argv[i], "-d") || 0 == strcmp(argv[i], "--numDimensions") ) {
numDimensions = atol(argv[++i]);
continue;
}
if ( 0 == strcmp(argv[i], "-t") || 0 == strcmp(argv[i], "--numThreads") ) {
numThreads = atol(argv[++i]);
continue;
}
if ( 0 == strcmp(argv[i], "-b") || 0 == strcmp(argv[i], "--numBlocks") ) {
numBlocks = atol(argv[++i]);
continue;
}
if ( 0 == strcmp(argv[i], "-s") || 0 == strcmp(argv[i], "--searchDistance") ) {
searchDistance = atol(argv[++i]);
continue;
}
if ( 0 == strcmp(argv[i], "-p") || 0 == strcmp(argv[i], "--maximumNodesToPrint") ) {
maximumNumberOfNodesToPrint = atol(argv[++i]);
continue;
}
cout << "Unsupported command-line argument: " << argv[i] << endl;
exit(1);
}
sint i = maximumNumberOfNodesToPrint + numDimensions + extraPoints;
// Declare the two-dimensional coordinates array that contains (x,y,z) coordinates.
/*
sint coordinates[NUM_TUPLES][DIMENSIONS] = {
{2,3,3}, {5,4,2}, {9,6,7}, {4,7,9}, {8,1,5},
{7,2,6}, {9,4,1}, {8,4,2}, {9,7,8}, {6,3,1},
{3,4,5}, {1,6,8}, {9,5,3}, {2,1,3}, {8,7,6},
{5,4,2}, {6,3,1}, {8,7,6}, {9,6,7}, {2,1,3},
{7,2,6}, {4,7,9}, {1,6,8}, {3,4,5}, {9,4,1} };
*/
// gpu = new Gpu(numThreads,numBlocks,0,numDimensions);
Gpu::gpuSetup(2, numThreads,numBlocks,numDimensions);
if (Gpu::getNumThreads() == 0 || Gpu::getNumBlocks() == 0) {
cout << "KdNode Tree cannot be built with " << numThreads << " threads or " << numBlocks << " blocks." << endl;
exit(1);
}
cout << "Points = " << numPoints << " dimensions = " << numDimensions << ", threads = " << numThreads << ", blocks = " << numBlocks << endl;
srand(0);
KdCoord (*coordinates) = new KdCoord[numPoints*numDimensions];
for ( i = 0; i<numPoints; i++) {
for (sint j=0; j<numDimensions; j++) {
coordinates[i*numDimensions+j] = (KdCoord)rand();
//coordinates[i*numDimensions+j] = (j==1)? (numPoints-i) : i;
//coordinates[i*numDimensions+j] = i;
}
}
// Create the k-d tree. First copy the data to a tuple in its kdNode.
// also null out the gt and lt references
// create and initialize the kdNodes array
KdNode *kdNodes = new KdNode[numPoints];
if (kdNodes == NULL) {
printf("Can't allocate %d kdNodes\n", numPoints);
exit (1);
}
KdNode *root = KdNode::createKdTree(kdNodes, coordinates, numDimensions, numPoints);
// Print the k-d tree "sideways" with the root at the left.
cout << endl;
if (searchDistance == 0){
return 0;
}
TIMER_DECLARATION();
// Search the k-d tree for the k-d nodes that lie within the cutoff distance of the first tuple.
KdCoord* query = (KdCoord *)malloc(numDimensions * sizeof(KdCoord));
for (sint i = 0; i < numDimensions; i++) {
query[i] = coordinates[i];
}
// read the KdTree back from GPU
Gpu::getKdTreeResults( kdNodes, coordinates, numPoints, numDimensions);
#define VERIFY_ON_HOST
#ifdef VERIFY_ON_HOST
sint numberOfNodes = root->verifyKdTree( kdNodes, coordinates, numDimensions, 0);
cout << "Number of nodes on host = " << numberOfNodes << endl;
#endif
TIMER_START();
list<KdNode> kdList = root->searchKdTree(kdNodes, coordinates, query, searchDistance, numDimensions, 0);
TIMER_STOP(double searchTime);
cout << "searchTime = " << fixed << setprecision(2) << searchTime << " seconds" << endl << endl;
cout << endl << kdList.size() << " nodes within " << searchDistance << " units of ";
KdNode::printTuple(query, numDimensions);
cout << " in all dimensions." << endl << endl;
if (kdList.size() != 0) {
cout << "List of k-d nodes within " << searchDistance << "-unit search distance follows:" << endl << endl;
list<KdNode>::iterator it;
for (it = kdList.begin(); it != kdList.end(); it++) {
KdNode::printTuple(coordinates+it->getTuple()*numDimensions, numDimensions);
cout << " ";
}
cout << endl;
}
return 0;
}
|
329aad64c4e52f94e67b313fd081735700d63f60.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "cal_pi_d.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *sum = NULL;
hipMalloc(&sum, XSIZE*YSIZE);
int nbin = 1;
double step = 1;
int nthreads = 1;
int nBLOCKS = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
cal_pi_d), dim3(gridBlock),dim3(threadBlock), 0, 0, sum,nbin,step,nthreads,nBLOCKS);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
cal_pi_d), dim3(gridBlock),dim3(threadBlock), 0, 0, sum,nbin,step,nthreads,nBLOCKS);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
cal_pi_d), dim3(gridBlock),dim3(threadBlock), 0, 0, sum,nbin,step,nthreads,nBLOCKS);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 329aad64c4e52f94e67b313fd081735700d63f60.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "cal_pi_d.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *sum = NULL;
cudaMalloc(&sum, XSIZE*YSIZE);
int nbin = 1;
double step = 1;
int nthreads = 1;
int nBLOCKS = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
cal_pi_d<<<gridBlock,threadBlock>>>(sum,nbin,step,nthreads,nBLOCKS);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
cal_pi_d<<<gridBlock,threadBlock>>>(sum,nbin,step,nthreads,nBLOCKS);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
cal_pi_d<<<gridBlock,threadBlock>>>(sum,nbin,step,nthreads,nBLOCKS);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
0fc0148180ffc3f866aa1996b2e6966cd543cf58.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../include/cbf_generator.hpp"
#include "../include/hpc_helpers.hpp"
typedef uint64_t index_t;
typedef uint8_t label_t;
typedef float value_t;
__forceinline__ __device__
double cuda_rsqrt(const double& value) {
return rsqrt(value);
}
__forceinline__ __device__
float cuda_rsqrt(const float& value) {
return rsqrtf(value);
}
template <
typename index_t,
typename value_t> __global__
void znorm_kernel(
value_t * Subject, // pointer to the subject
index_t num_entries, // number of time series (m)
index_t num_features) { // number of time ticks (n)
// get thread and block identifiers
const index_t blid = blockIdx.x;
const index_t thid = threadIdx.x;
const index_t base = blid*num_features;
// 1. coalesced loading of entries
value_t v = Subject[base+thid];
value_t x = v; // copy for later
// 2a. perform a warp reduction (sum stored in thread zero)
for (index_t offset = num_features/2; offset > 0; offset /= 2)
x += __shfl_down(x, offset, num_features);
// 2b. perform the first broadcast
value_t mu = __shfl(x, 0)/num_features;
// define the square residues
value_t y = (v-mu)*(v-mu);
// 3a. perform a warp reduction (sum stored in thread zero)
for (index_t offset = num_features/2; offset > 0; offset /= 2)
y += __shfl_down(y, offset, num_features);
// 3b. perform the second broadcast
value_t sigma = __shfl(y, 0)/(num_features-1);
// 4. write result back
Subject[base+thid] = (v-mu)*cuda_rsqrt(sigma);
}
int main () {
constexpr index_t num_features = 32;
constexpr index_t num_entries = 1UL << 20;
// small letters for hosts, capital letters for device
value_t * data = nullptr, * Data = nullptr;
label_t * labels = nullptr;
// malloc memory
hipHostMalloc(&data, sizeof(value_t)*num_entries*num_features); CUERR
hipMalloc (&Data, sizeof(value_t)*num_entries*num_features); CUERR
hipHostMalloc(&labels, sizeof(label_t)*num_entries); CUERR
// create CBF data set on host
TIMERSTART(generate_data)
generate_cbf(data, labels, num_entries, num_features);
TIMERSTOP(generate_data)
TIMERSTART(copy_data_to_device)
hipMemcpy(Data, data, sizeof(value_t)*num_entries*num_features, H2D);CUERR
TIMERSTOP(copy_data_to_device)
TIMERSTART(z_norm)
hipLaunchKernelGGL(( znorm_kernel), dim3(num_entries), dim3(32), 0, 0, Data, num_entries, num_features); CUERR
TIMERSTOP(z_norm)
TIMERSTART(copy_data_to_host)
hipMemcpy(data, Data, sizeof(value_t)*num_entries*num_features, D2H);CUERR
TIMERSTOP(copy_data_to_host)
value_t accum = 0, accum2=0;
for (index_t i = 0; i < 32; i++) {
accum += data[i];
accum2 += data[i]*data[i];
}
std::cout << accum << " " << accum2 << std::endl;
// get rid of the memory
hipHostFree(labels);
hipHostFree(data);
hipFree(Data);
}
| 0fc0148180ffc3f866aa1996b2e6966cd543cf58.cu | #include "../include/cbf_generator.hpp"
#include "../include/hpc_helpers.hpp"
typedef uint64_t index_t;
typedef uint8_t label_t;
typedef float value_t;
__forceinline__ __device__
double cuda_rsqrt(const double& value) {
return rsqrt(value);
}
__forceinline__ __device__
float cuda_rsqrt(const float& value) {
return rsqrtf(value);
}
template <
typename index_t,
typename value_t> __global__
void znorm_kernel(
value_t * Subject, // pointer to the subject
index_t num_entries, // number of time series (m)
index_t num_features) { // number of time ticks (n)
// get thread and block identifiers
const index_t blid = blockIdx.x;
const index_t thid = threadIdx.x;
const index_t base = blid*num_features;
// 1. coalesced loading of entries
value_t v = Subject[base+thid];
value_t x = v; // copy for later
// 2a. perform a warp reduction (sum stored in thread zero)
for (index_t offset = num_features/2; offset > 0; offset /= 2)
x += __shfl_down(x, offset, num_features);
// 2b. perform the first broadcast
value_t mu = __shfl(x, 0)/num_features;
// define the square residues
value_t y = (v-mu)*(v-mu);
// 3a. perform a warp reduction (sum stored in thread zero)
for (index_t offset = num_features/2; offset > 0; offset /= 2)
y += __shfl_down(y, offset, num_features);
// 3b. perform the second broadcast
value_t sigma = __shfl(y, 0)/(num_features-1);
// 4. write result back
Subject[base+thid] = (v-mu)*cuda_rsqrt(sigma);
}
int main () {
constexpr index_t num_features = 32;
constexpr index_t num_entries = 1UL << 20;
// small letters for hosts, capital letters for device
value_t * data = nullptr, * Data = nullptr;
label_t * labels = nullptr;
// malloc memory
cudaMallocHost(&data, sizeof(value_t)*num_entries*num_features); CUERR
cudaMalloc (&Data, sizeof(value_t)*num_entries*num_features); CUERR
cudaMallocHost(&labels, sizeof(label_t)*num_entries); CUERR
// create CBF data set on host
TIMERSTART(generate_data)
generate_cbf(data, labels, num_entries, num_features);
TIMERSTOP(generate_data)
TIMERSTART(copy_data_to_device)
cudaMemcpy(Data, data, sizeof(value_t)*num_entries*num_features, H2D);CUERR
TIMERSTOP(copy_data_to_device)
TIMERSTART(z_norm)
znorm_kernel<<<num_entries, 32>>>(Data, num_entries, num_features); CUERR
TIMERSTOP(z_norm)
TIMERSTART(copy_data_to_host)
cudaMemcpy(data, Data, sizeof(value_t)*num_entries*num_features, D2H);CUERR
TIMERSTOP(copy_data_to_host)
value_t accum = 0, accum2=0;
for (index_t i = 0; i < 32; i++) {
accum += data[i];
accum2 += data[i]*data[i];
}
std::cout << accum << " " << accum2 << std::endl;
// get rid of the memory
cudaFreeHost(labels);
cudaFreeHost(data);
cudaFree(Data);
}
|
0ae572c3d4fe7c5a23b8dc35e9ca3ca4031025e4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include "CR_Device_functions.cuh"
__global__ void list_print(int nmax, float * in) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
printf("Thread %i shows %f \n", i, in[i]);
}
__global__ void CR_Kernel_Forward(
float * alist, float * blist, float * clist, float * dlist,
int stride, int DMax) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
int idx = stride * i;
int pre_idx = idx - stride/2;
int nex_idx = idx + stride/2;
float a[] = {0.0f, 0.0f, 0.0f};
float b[] = {0.0f, 0.0f, 0.0f};
float c[] = {0.0f, 0.0f, 0.0f};
float d[] = {0.0f, 0.0f, 0.0f};
float k1, k2;
a[1] = alist[idx];
b[1] = blist[idx];
c[1] = clist[idx];
d[1] = dlist[idx];
if (idx == 0) {
k1 = 0.0f;
a[2] = alist[nex_idx];
b[2] = blist[nex_idx];
c[2] = clist[nex_idx];
d[2] = dlist[nex_idx];
k2 = c[1]/b[2];
} else if (0 == (DMax-1-i*2) ) {
k2 = 0.0f;
a[0] = alist[pre_idx];
b[0] = blist[pre_idx];
c[0] = clist[pre_idx];
d[0] = dlist[pre_idx];
k1 = a[1]/b[0];
} else {
a[0] = alist[pre_idx];
b[0] = blist[pre_idx];
c[0] = clist[pre_idx];
d[0] = dlist[pre_idx];
a[2] = alist[nex_idx];
b[2] = blist[nex_idx];
c[2] = clist[nex_idx];
d[2] = dlist[nex_idx];
k1 = a[1]/b[0];
k2 = c[1]/b[2];
}
alist[idx] = -a[0]*k1;
blist[idx] = b[1] - c[0]*k1 - a[2]*k2;
clist[idx] = -c[2]*k2;
dlist[idx] = d[1] - d[0]*k1 - d[2]*k2;
}
__global__ void Solve2By2(
float * alist, float * blist, float * clist, float * dlist, float * xlist,
int stride ) {
int i = blockIdx.x*blockDim.x + threadIdx.x; //i = 0 or 1
if (i == 0) {
float k = clist[0]/blist[stride];
xlist[0] = (dlist[0]-dlist[stride]*k)/(blist[0]-alist[stride]*k);
} else {
float k = blist[0]/alist[stride];
xlist[stride] = (dlist[0]-dlist[stride]*k)/(clist[0]-blist[stride]*k);
}
}
__global__ void CR_Kernel_Backward(
float * alist, float * blist, float * clist, float * dlist, float * xlist,
int stride, int DMax) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
int idx = stride * (2*i+1);
float xupper = xlist[idx - stride];
float xlower = 0.0f;
if (idx != DMax) {
//printf("i = %i, idx = %i\n", i, idx);
xlower = xlist[idx + stride];
}
xlist[idx] = (dlist[idx] - alist[idx]*xupper - clist[idx]*xlower)/blist[idx];
}
| 0ae572c3d4fe7c5a23b8dc35e9ca3ca4031025e4.cu | #include <cstdio>
#include "CR_Device_functions.cuh"
__global__ void list_print(int nmax, float * in) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
printf("Thread %i shows %f \n", i, in[i]);
}
__global__ void CR_Kernel_Forward(
float * alist, float * blist, float * clist, float * dlist,
int stride, int DMax) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
int idx = stride * i;
int pre_idx = idx - stride/2;
int nex_idx = idx + stride/2;
float a[] = {0.0f, 0.0f, 0.0f};
float b[] = {0.0f, 0.0f, 0.0f};
float c[] = {0.0f, 0.0f, 0.0f};
float d[] = {0.0f, 0.0f, 0.0f};
float k1, k2;
a[1] = alist[idx];
b[1] = blist[idx];
c[1] = clist[idx];
d[1] = dlist[idx];
if (idx == 0) {
k1 = 0.0f;
a[2] = alist[nex_idx];
b[2] = blist[nex_idx];
c[2] = clist[nex_idx];
d[2] = dlist[nex_idx];
k2 = c[1]/b[2];
} else if (0 == (DMax-1-i*2) ) {
k2 = 0.0f;
a[0] = alist[pre_idx];
b[0] = blist[pre_idx];
c[0] = clist[pre_idx];
d[0] = dlist[pre_idx];
k1 = a[1]/b[0];
} else {
a[0] = alist[pre_idx];
b[0] = blist[pre_idx];
c[0] = clist[pre_idx];
d[0] = dlist[pre_idx];
a[2] = alist[nex_idx];
b[2] = blist[nex_idx];
c[2] = clist[nex_idx];
d[2] = dlist[nex_idx];
k1 = a[1]/b[0];
k2 = c[1]/b[2];
}
alist[idx] = -a[0]*k1;
blist[idx] = b[1] - c[0]*k1 - a[2]*k2;
clist[idx] = -c[2]*k2;
dlist[idx] = d[1] - d[0]*k1 - d[2]*k2;
}
__global__ void Solve2By2(
float * alist, float * blist, float * clist, float * dlist, float * xlist,
int stride ) {
int i = blockIdx.x*blockDim.x + threadIdx.x; //i = 0 or 1
if (i == 0) {
float k = clist[0]/blist[stride];
xlist[0] = (dlist[0]-dlist[stride]*k)/(blist[0]-alist[stride]*k);
} else {
float k = blist[0]/alist[stride];
xlist[stride] = (dlist[0]-dlist[stride]*k)/(clist[0]-blist[stride]*k);
}
}
__global__ void CR_Kernel_Backward(
float * alist, float * blist, float * clist, float * dlist, float * xlist,
int stride, int DMax) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
int idx = stride * (2*i+1);
float xupper = xlist[idx - stride];
float xlower = 0.0f;
if (idx != DMax) {
//printf("i = %i, idx = %i\n", i, idx);
xlower = xlist[idx + stride];
}
xlist[idx] = (dlist[idx] - alist[idx]*xupper - clist[idx]*xlower)/blist[idx];
}
|
65d9bc651c512c209f5e5738b8d959aab6de9466.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<opencv2/opencv.hpp>
#include<iostream>
#include<math.h>
#define INF 2e10f
#define rnd(x) (x*rand()/RAND_MAX)
#define SPHERES 20
#define DIM 1024
using namespace cv;
struct Sphere
{
float x,y,z;
float radius;
float r,g,b;
__device__ float hit(float ox,float oy,float *n)
{
float dx = ox - x;
float dy = oy - y;
if(dx*dx + dy*dy <= radius*radius)
{
float dz = sqrt(radius*radius - dx*dx - dy*dy);
*n = dz/sqrtf(radius*radius);
return dz+z;
}
return -INF;
}
};
__constant__ Sphere s[SPHERES];
__global__ void kernel(unsigned char *dev_mat)
{
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
int offset = x + y*blockDim.x*gridDim.x;
float ox = (x-DIM/2);
float oy = (y-DIM/2);
int i;
float maxd=-INF;
float d;
float scale;
float r=0;
float g=0;
float b=0;
float n;
for(i=0;i<SPHERES;i++)
{
d=s[i].hit(ox,oy,&n);
if(d > maxd)
{
scale = n;
r = s[i].r*scale;
g = s[i].g*scale;
b = s[i].b*scale;
maxd = d;
// printf("r:%f g:%f b:%f\n",r,g,b);
}
}
dev_mat[4*offset+0]=(int)(r*255);
dev_mat[4*offset+1]=(int)(g*255);
dev_mat[4*offset+2]=(int)(b*255);
dev_mat[4*offset+3]=255;
}
int main()
{
Mat mat(DIM,DIM,CV_8UC4);
int size = mat.cols*mat.rows*mat.elemSize();
unsigned char *dev_mat;
hipEvent_t start,stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
hipMalloc((void **)&dev_mat,size);
struct Sphere s_temp[SPHERES];
int i;
for(i=0;i<SPHERES;i++)
{
s_temp[i].r = rnd(1.0f);
s_temp[i].g = rnd(1.0f);
s_temp[i].b = rnd(1.0f);
s_temp[i].x = rnd(1000.0f) -500;
s_temp[i].y = rnd(1000.0f) -500;
s_temp[i].z = rnd(1000.0f) -500;
s_temp[i].radius = rnd(100.0f) + 20;
}
hipMemcpyToSymbol(s,s_temp,sizeof(Sphere)*SPHERES);
dim3 grids(DIM/16,DIM/16);
dim3 threads(16,16);
hipLaunchKernelGGL(( kernel), dim3(grids),dim3(threads), 0, 0, dev_mat);
hipMemcpy(mat.ptr(),dev_mat,size,hipMemcpyDeviceToHost);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime,start,stop);
printf("Time to generate %f ms\n",elapsedTime);
namedWindow("display",CV_WINDOW_AUTOSIZE);
imshow("display",mat);
cvWaitKey(0);
hipEventDestroy(start);
hipEventDestroy(stop);
hipFree(dev_mat);
hipFree(s);
return 0;
}
| 65d9bc651c512c209f5e5738b8d959aab6de9466.cu | #include<opencv2/opencv.hpp>
#include<iostream>
#include<math.h>
#define INF 2e10f
#define rnd(x) (x*rand()/RAND_MAX)
#define SPHERES 20
#define DIM 1024
using namespace cv;
struct Sphere
{
float x,y,z;
float radius;
float r,g,b;
__device__ float hit(float ox,float oy,float *n)
{
float dx = ox - x;
float dy = oy - y;
if(dx*dx + dy*dy <= radius*radius)
{
float dz = sqrt(radius*radius - dx*dx - dy*dy);
*n = dz/sqrtf(radius*radius);
return dz+z;
}
return -INF;
}
};
__constant__ Sphere s[SPHERES];
__global__ void kernel(unsigned char *dev_mat)
{
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
int offset = x + y*blockDim.x*gridDim.x;
float ox = (x-DIM/2);
float oy = (y-DIM/2);
int i;
float maxd=-INF;
float d;
float scale;
float r=0;
float g=0;
float b=0;
float n;
for(i=0;i<SPHERES;i++)
{
d=s[i].hit(ox,oy,&n);
if(d > maxd)
{
scale = n;
r = s[i].r*scale;
g = s[i].g*scale;
b = s[i].b*scale;
maxd = d;
// printf("r:%f g:%f b:%f\n",r,g,b);
}
}
dev_mat[4*offset+0]=(int)(r*255);
dev_mat[4*offset+1]=(int)(g*255);
dev_mat[4*offset+2]=(int)(b*255);
dev_mat[4*offset+3]=255;
}
int main()
{
Mat mat(DIM,DIM,CV_8UC4);
int size = mat.cols*mat.rows*mat.elemSize();
unsigned char *dev_mat;
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
cudaMalloc((void **)&dev_mat,size);
struct Sphere s_temp[SPHERES];
int i;
for(i=0;i<SPHERES;i++)
{
s_temp[i].r = rnd(1.0f);
s_temp[i].g = rnd(1.0f);
s_temp[i].b = rnd(1.0f);
s_temp[i].x = rnd(1000.0f) -500;
s_temp[i].y = rnd(1000.0f) -500;
s_temp[i].z = rnd(1000.0f) -500;
s_temp[i].radius = rnd(100.0f) + 20;
}
cudaMemcpyToSymbol(s,s_temp,sizeof(Sphere)*SPHERES);
dim3 grids(DIM/16,DIM/16);
dim3 threads(16,16);
kernel<<<grids,threads>>>(dev_mat);
cudaMemcpy(mat.ptr(),dev_mat,size,cudaMemcpyDeviceToHost);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,start,stop);
printf("Time to generate %f ms\n",elapsedTime);
namedWindow("display",CV_WINDOW_AUTOSIZE);
imshow("display",mat);
cvWaitKey(0);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaFree(dev_mat);
cudaFree(s);
return 0;
}
|
ba3b2c68f1888d20106e1e046c00024afe007581.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef __MEDIANFILTER_CU_
#define __MEDIANFILTER_CU_
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <time.h>
#include <iostream>
#define datasize 100
extern "C" void MedianFilter_host(int *pixel, int Width, int Height);
inline void checkCudaErrors(hipError_t err) //cuda error handle function
{
if (hipSuccess != err)
{
fprintf(stderr, "CUDA Runtime API error:%s.\n", hipGetErrorString(err));
return;
}
}
__global__ void MedianFilter(int *In, int *Out, int Width, int Height)
{
int window[9];
int y = blockDim.y * blockIdx.y + threadIdx.y;
int x = blockDim.x * blockIdx.x + threadIdx.x;
if (x <= Width && x >= 0 && y <= Height && y >= 0)
{
window[0] = (y == 0 || x == 0) ? 0 : In[(y - 1)* Width + x - 1];
window[1] = (y == 0) ? 0 : In[(y - 1)* Width + x];
window[2] = (y == 0 || x == Width - 1) ? 0 : In[(y - 1)* Width + x + 1];
window[3] = (x == 0) ? 0 : In[y* Width + x - 1];
window[4] = In[y* Width + x];
window[5] = (x == Width - 1) ? 0 : In[y* Width + x + 1];
window[6] = (y == Height - 1 || x == 0) ? 0 : In[(y + 1)* Width + x - 1];
window[7] = (y == Height - 1) ? 0 : In[(y + 1)* Width + x];
window[8] = (y == Height - 1 || x == Width - 1) ? 0 : In[(y + 1)* Width + x + 1];
for (unsigned int j = 0; j < 5; j++)
{
int min = j;
for (unsigned int l = j + 1; l < 9; l++)
if (window[l] < window[min])
min = l;
const float temp = window[j];
window[j] = window[min];
window[min] = temp;
}
Out[y* Width + x] = window[4];
}
}
extern "C" void MedianFilter_host(int *pixel, int Width, int Height)
{
int *pixelIn, *pixelOut;
dim3 dimBlock(32, 32);
dim3 dimGrid((Width + dimBlock.x - 1) / dimBlock.x, (Height + dimBlock.y -
1) / dimBlock.y);
checkCudaErrors(hipMalloc((void**)&pixelIn, sizeof(int) * Width * Height));
checkCudaErrors(hipMalloc((void**)&pixelOut, sizeof(int) * Width * Height));
checkCudaErrors(hipMemcpy(pixelIn, pixel, sizeof(int) * Width * Height, hipMemcpyHostToDevice));
MedianFilter << <dimGrid, dimBlock >> > (pixelIn, pixelOut, Width, Height);
checkCudaErrors(hipMemcpy(pixel, pixelOut, sizeof(int) * Width * Height, hipMemcpyDeviceToHost));
hipFree(pixelIn);
hipFree(pixelOut);
}
#endif // ! __MEDIANFILTER_KERNEL_CU_
| ba3b2c68f1888d20106e1e046c00024afe007581.cu | #ifndef __MEDIANFILTER_CU_
#define __MEDIANFILTER_CU_
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <time.h>
#include <iostream>
#define datasize 100
extern "C" void MedianFilter_host(int *pixel, int Width, int Height);
inline void checkCudaErrors(cudaError err) //cuda error handle function
{
if (cudaSuccess != err)
{
fprintf(stderr, "CUDA Runtime API error:%s.\n", cudaGetErrorString(err));
return;
}
}
__global__ void MedianFilter(int *In, int *Out, int Width, int Height)
{
int window[9];
int y = blockDim.y * blockIdx.y + threadIdx.y;
int x = blockDim.x * blockIdx.x + threadIdx.x;
if (x <= Width && x >= 0 && y <= Height && y >= 0)
{
window[0] = (y == 0 || x == 0) ? 0 : In[(y - 1)* Width + x - 1];
window[1] = (y == 0) ? 0 : In[(y - 1)* Width + x];
window[2] = (y == 0 || x == Width - 1) ? 0 : In[(y - 1)* Width + x + 1];
window[3] = (x == 0) ? 0 : In[y* Width + x - 1];
window[4] = In[y* Width + x];
window[5] = (x == Width - 1) ? 0 : In[y* Width + x + 1];
window[6] = (y == Height - 1 || x == 0) ? 0 : In[(y + 1)* Width + x - 1];
window[7] = (y == Height - 1) ? 0 : In[(y + 1)* Width + x];
window[8] = (y == Height - 1 || x == Width - 1) ? 0 : In[(y + 1)* Width + x + 1];
for (unsigned int j = 0; j < 5; j++)
{
int min = j;
for (unsigned int l = j + 1; l < 9; l++)
if (window[l] < window[min])
min = l;
const float temp = window[j];
window[j] = window[min];
window[min] = temp;
}
Out[y* Width + x] = window[4];
}
}
extern "C" void MedianFilter_host(int *pixel, int Width, int Height)
{
int *pixelIn, *pixelOut;
dim3 dimBlock(32, 32);
dim3 dimGrid((Width + dimBlock.x - 1) / dimBlock.x, (Height + dimBlock.y -
1) / dimBlock.y);
checkCudaErrors(cudaMalloc((void**)&pixelIn, sizeof(int) * Width * Height));
checkCudaErrors(cudaMalloc((void**)&pixelOut, sizeof(int) * Width * Height));
checkCudaErrors(cudaMemcpy(pixelIn, pixel, sizeof(int) * Width * Height, cudaMemcpyHostToDevice));
MedianFilter << <dimGrid, dimBlock >> > (pixelIn, pixelOut, Width, Height);
checkCudaErrors(cudaMemcpy(pixel, pixelOut, sizeof(int) * Width * Height, cudaMemcpyDeviceToHost));
cudaFree(pixelIn);
cudaFree(pixelOut);
}
#endif // ! __MEDIANFILTER_KERNEL_CU_
|
e5f1b7250d6a745554ed9d00285702bdd5517850.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#define ROWS 32
#define COLS 16
#define CHECK(res) if(res!=hipSuccess){exit(-1);}
__global__ void Kerneltest(int **da, unsigned int rows, unsigned int cols)
{
unsigned int row = blockDim.y*blockIdx.y + threadIdx.y;
unsigned int col = blockDim.x*blockIdx.x + threadIdx.x;
if (row < rows && col < cols)
{
da[row][col] = row*cols + col;
}
}
extern "C" int func() //
{
int **da = NULL;
int **ha = NULL;
int *dc = NULL;
int *hc = NULL;
hipError_t res;
int r, c;
bool is_right=true;
res = hipMalloc((void**)(&da), ROWS*sizeof(int*));CHECK(res)
res = hipMalloc((void**)(&dc), ROWS*COLS*sizeof(int));CHECK(res)
ha = (int**)malloc(ROWS*sizeof(int*));
hc = (int*)malloc(ROWS*COLS*sizeof(int));
for (r = 0; r < ROWS; r++)
{
ha[r] = dc + r*COLS;
}
res = hipMemcpy((void*)(da), (void*)(ha), ROWS*sizeof(int*), hipMemcpyHostToDevice);CHECK(res)
dim3 dimBlock(16,16);
dim3 dimGrid((COLS+dimBlock.x-1)/(dimBlock.x), (ROWS+dimBlock.y-1)/(dimBlock.y));
hipLaunchKernelGGL(( Kerneltest), dim3(dimGrid), dim3(dimBlock), 0, 0, da, ROWS, COLS);
res = hipMemcpy((void*)(hc), (void*)(dc), ROWS*COLS*sizeof(int), hipMemcpyDeviceToHost);CHECK(res)
for (r = 0; r < ROWS; r++)
{
for (c = 0; c < COLS; c++)
{
printf("%4d ", hc[r*COLS+c]);
if (hc[r*COLS+c] != (r*COLS+c))
{
is_right = false;
}
}
printf("\n");
}
printf("the result is %s!\n", is_right? "right":"false");
hipFree((void*)da);
hipFree((void*)dc);
free(ha);
free(hc);
// getchar();
return 0;
} | e5f1b7250d6a745554ed9d00285702bdd5517850.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#define ROWS 32
#define COLS 16
#define CHECK(res) if(res!=cudaSuccess){exit(-1);}
__global__ void Kerneltest(int **da, unsigned int rows, unsigned int cols)
{
unsigned int row = blockDim.y*blockIdx.y + threadIdx.y;
unsigned int col = blockDim.x*blockIdx.x + threadIdx.x;
if (row < rows && col < cols)
{
da[row][col] = row*cols + col;
}
}
extern "C" int func() // 注意这里定义形式
{
int **da = NULL;
int **ha = NULL;
int *dc = NULL;
int *hc = NULL;
cudaError_t res;
int r, c;
bool is_right=true;
res = cudaMalloc((void**)(&da), ROWS*sizeof(int*));CHECK(res)
res = cudaMalloc((void**)(&dc), ROWS*COLS*sizeof(int));CHECK(res)
ha = (int**)malloc(ROWS*sizeof(int*));
hc = (int*)malloc(ROWS*COLS*sizeof(int));
for (r = 0; r < ROWS; r++)
{
ha[r] = dc + r*COLS;
}
res = cudaMemcpy((void*)(da), (void*)(ha), ROWS*sizeof(int*), cudaMemcpyHostToDevice);CHECK(res)
dim3 dimBlock(16,16);
dim3 dimGrid((COLS+dimBlock.x-1)/(dimBlock.x), (ROWS+dimBlock.y-1)/(dimBlock.y));
Kerneltest<<<dimGrid, dimBlock>>>(da, ROWS, COLS);
res = cudaMemcpy((void*)(hc), (void*)(dc), ROWS*COLS*sizeof(int), cudaMemcpyDeviceToHost);CHECK(res)
for (r = 0; r < ROWS; r++)
{
for (c = 0; c < COLS; c++)
{
printf("%4d ", hc[r*COLS+c]);
if (hc[r*COLS+c] != (r*COLS+c))
{
is_right = false;
}
}
printf("\n");
}
printf("the result is %s!\n", is_right? "right":"false");
cudaFree((void*)da);
cudaFree((void*)dc);
free(ha);
free(hc);
// getchar();
return 0;
} |
5998f0ae8d7d875019f51d498fd029d42204f896.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Made with CLion Educational License
#include <cmath>
#include <chrono>
#include <iostream>
// Utility function
// For __host__ it's possible to use std::swap() working similarly
__device__ __host__ void swap(float &x, float &y){
float temp = y;
y = x;
x = temp;
}
// CPU Bubble Sort
__host__ void bubble_sort(int n, float *x){
// Set optimizing variables
bool next_loop = true;
int k = 0;
// Main loop
while(next_loop){
next_loop = false;
for (int j = 0; j < n - k - 1 ; j ++){
// Do the comparison and swap
if(x[j] > x[j + 1]){
swap(x[j], x[j + 1]);
next_loop = true;
}
}
k++;
}
}
// GPU Bubble Sort
// ODD-EVEN Sort
// Alternately compare (2n with 2n + 1) and (2n with 2n - 1)
__global__ void bubble_sort(int n, float *x, bool parity) {
// Get current index (only even)
int i = 2 * blockDim.x *blockIdx.x + threadIdx.x * 2;
if(i < n){
// Check if we doo even-odd or odd-even sort
if(parity){
// Check whether we are inside of array
if(i + 1 < n){
if(x[i] > x[i + 1]){
swap(x[i], x[i + 1]);
}
}
}else{
// Check whether we are inside of array
if(i - 1 >= 0){
if(x[i] < x[i - 1]){
swap(x[i], x[i - 1]);
}
}
}
}
}
// CPU Merge Sort
__host__ void merge(float *x, int l, int m, int r){
// Calculate temporary arrays length
int n1 = m - l + 1;
int n2 = r - m;
// Create temporary arrays
auto R = new float[n2]; // Left and Right
auto L = new float[n1];
// Copy array to subarrays
for (int i = 0; i < n1; ++i)
L[i] = x[l + i];
for (int i = 0; i < n2; ++i)
R[i] = x[(m + 1) + i];
// Init indices of arrays
int i = 0; // L
int j = 0; // R
int p = l; // *x
// Choose smaller value from sorted arrays
while(i < n1 && j < n2)
x[p++] = L[i] < R[j] ? L[i++] : R[j++];
// Copy remaining elements
while(i < n1)
x[p++] = L[i++];
while(j < n2)
x[p++] = R[j++];
// Deallocate memory
delete[] R;
delete[] L;
}
__host__ void merge_sort(float *x, int l, int r){
// Check if it is more than 1 element in the array
// If there is one element it's obviously sorted
if(r > l) {
// Get middle of the array
int m = (l + r) / 2;
// Divide into two smaller arrays
merge_sort(x, l, m);
merge_sort(x, m + 1, r);
// Merge arrays together
merge(x, l, m, r);
}
}
// GPU Bitonic Sort
__device__ void compare_and_swap(int n, float *x, int i, int j){
// Check whether values are in good order. If they are not -> swap
if(i < n && j < n)
if(x[i] > x[j]) swap(x[i], x[j]);
}
__global__ void bitonic_sequence_step(int n, float *x, int size, int current_size){
// Get current comparison id
int i = (blockIdx.x*blockDim.x + threadIdx.x);
// Check if the comparison lays in math.ceil(n / 2) (this is number of comparisions)
if(i < (n + 1) / 2){
// Divide comparisons into blocks
int block = i / current_size;
// Calculate direction of sorting
int block_dir = (i / size) % 2;
// Calculate offset in the group
int num_in_block = i % current_size;
int pivot, comparator;
// Check direction of comparison and calculate indecies
if(block_dir == 0) {
pivot = 2 * (block * current_size) + num_in_block; // Number of element in x
comparator = pivot + current_size;
}else{
pivot = 2 * ((block + 1) * current_size ) - num_in_block - 1; // Number of element in x
comparator = pivot - current_size;
}
// Compare and swap right indices
compare_and_swap(n, x, pivot, comparator);
}
}
// Two groups next to each other with opposite sorting directions can be merged into one sorted array by Bitonic Sequence
// Bitonic sort divide former array into groups of size 2 and order them easily in alternating directions
// Thanks to these arrays can me merged (also in alternating directions) into sorted ones with bitonic sequence and their size becomes 2^n
// Finally we can merge two subarrays sorted in opposite directions into one sorted using Bitonic Sequence again
__host__ void bitonic_sort(int n, float *x){
int current_size;
// Sorts every 2^n block in
for (int size=1; size <= n / 2; size *= 2)
{
// Bitonic Sequence is a loop
for (current_size = size; current_size >= 1; current_size /= 2){
// Call number of comparisons in parallel (blocks of threads rounded to next integer value)
hipLaunchKernelGGL(( bitonic_sequence_step), dim3(::ceil((float) (n / 2) / 1024.0f)), dim3(1024), 0, 0, n, x, size, current_size);
}
}
}
// CPU Quick sort
// 5 6 3 4
// pivot = 4
// i j 5 > 4
// V
// 5 6 3 4
// i j 6 > 4
// V V
// 5 6 3 4
// i j 3 < 4
// V V
// 5 6 3 4
// i j 4 = 4
// V V
// 3 6 5 4
// swap x[i] x[h[]
// i
// V
// 3 4 5 6
__host__ int partition (float* x, int l, int h)
{
float pivot = x[h]; // Choose last value as the pivot
int i = l; // Index or current pivot
for (int j = l; j < h; j++)
// If x[j] is smaller than pivot value move it to the left and move pivot index to the right
// We are sure things smaller than pivot are on the left of it
if (x[j] < pivot)
swap(x[i++], x[j]);
// Because pivot was chosen as last element we need to move it to calculated index
swap(x[i], x[h]);
return i;
}
__host__ void quick_sort(float *x, int i, int j){
if(i < j){
// Divide array into two smaller ones where one has values smaller than pivot and second has values grater than pivot
int pivot = partition(x, i, j);
// Sort divided arrays
quick_sort(x, i, pivot - 1);
quick_sort(x, pivot + 1, j);
}
}
int main(){
// Initialize data
int order_of_magnitude;
std::cout << "Enter order of magnitude to test: ";
std::cin >> order_of_magnitude;
std::cout << std::endl << "----------------" << std::endl << std::endl;
if(order_of_magnitude > 26) std::cout << "WARNING" << std::endl << "Order of magnitude lowered to 26 due to the performance issues" << std::endl << std::endl;
order_of_magnitude = ::min(26, order_of_magnitude);
int N = (1 << order_of_magnitude); // 2^n
int next_power = pow(2, ceil(log(N)/log(2)));
float *gpu_bubble, *cuda_gpu_bubble, *cpu_bubble, *gpu_bitonic, *cuda_gpu_bitonic, *cpu_merge, *cpu_quick;
// Allocate memory on CPU
gpu_bubble = (float*)malloc(N * sizeof(float));
cpu_bubble = (float*)malloc(N * sizeof(float));
gpu_bitonic = (float*)malloc(next_power * sizeof(float)); // Need to ensure that date amount is 2^n
cpu_merge = (float*)malloc(N * sizeof(float));
cpu_quick = (float*)malloc(N * sizeof(float));
// Allocate memory on GPU
hipMalloc(&cuda_gpu_bubble, N * sizeof(float));
hipMalloc(&cuda_gpu_bitonic , next_power * sizeof(float)); // Need to ensure that date amount is 2^n
// Choose pseudo-random numbers
for (int i = 0; i < next_power; i ++) {
if(i < N) {
gpu_bubble[i] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
cpu_bubble[i] = gpu_bubble[i];
gpu_bitonic[i] = gpu_bubble[i];
cpu_merge[i] = gpu_bubble[i];
cpu_quick[i] = gpu_bubble[i];
}else{
gpu_bitonic[i] = - INFINITY;
}
}
// Bubble Sort GPU
auto cuda_bubble_begin = std::chrono::steady_clock::now();
if(N <= (1 << 14)) {
hipMemcpy(cuda_gpu_bubble, gpu_bubble, N * sizeof(float), hipMemcpyHostToDevice);
for (int i = 0; i < N; i++) {
hipLaunchKernelGGL(( bubble_sort), dim3(ceil((float)N / 2048)), dim3(1024), 0, 0, N, cuda_gpu_bubble, i % 2 == 0);
}
hipMemcpy(gpu_bubble, cuda_gpu_bubble, N * sizeof(float), hipMemcpyDeviceToHost);
}else{
std::cout << "WARNING!" << std::endl << "GPU bubble sort disabled due to it's low performance" << std::endl << std::endl;
}
auto cuda_bubble_end = std::chrono::steady_clock::now();
// Bubble Sort CPU
auto cpu_bubble_begin = std::chrono::steady_clock::now();
if(N <= (1 << 14)) {
bubble_sort(N, cpu_bubble);
}else{
std::cout << "WARNING!" << std::endl << "CPU bubble sort disabled due to it's low performance" << std::endl;
std::cout << std::endl << "----------------" << std::endl << std::endl;
}
auto cpu_bubble_end = std::chrono::steady_clock::now();
// Bitonic Sort GPU
auto gpu_merge_start = std::chrono::steady_clock::now();
hipMemcpy(cuda_gpu_bitonic, gpu_bitonic, next_power * sizeof(float), hipMemcpyHostToDevice);
bitonic_sort(next_power, cuda_gpu_bitonic);
hipMemcpy(gpu_bitonic, cuda_gpu_bitonic, next_power * sizeof(float), hipMemcpyDeviceToHost);
auto gpu_merge_end = std::chrono::steady_clock::now();
// Merge Sort CPU
auto cpu_merge_start = std::chrono::steady_clock::now();
merge_sort(cpu_merge, 0, N - 1);
auto cpu_merge_end = std::chrono::steady_clock::now();
// Quick Sort CPU
auto cpu_quick_start = std::chrono::steady_clock::now();
quick_sort(cpu_quick, 0, N - 1);
auto cpu_quick_end = std::chrono::steady_clock::now();
// Set correctness flag
bool gpu_bubble_correct = true;
bool cpu_bubble_correct = true;
bool cpu_merge_correct = true;
bool gpu_bitonic_correct = true;
bool cpu_quick_correct = true;
// Check sorts correctness
for (int i = 0; i < next_power - 1 ; i ++) {
if(i < N - 1) {
if (gpu_bubble[i] > gpu_bubble[i + 1]) gpu_bubble_correct = false;
if (cpu_bubble[i] > cpu_bubble[i + 1]) cpu_bubble_correct = false;
if (cpu_merge[i] > cpu_merge[i + 1]) cpu_merge_correct = false;
if(cpu_quick[i] > cpu_quick [i + 1]) cpu_quick_correct = false;
}
if (gpu_bitonic[i] > gpu_bitonic[i + 1]) gpu_bitonic_correct = false;
}
// Display number of elements
std::cout << "Sorting algorithms for: " << N << " elements" << std::endl << std::endl;
// Display correctness test
std::cout << "GPU Bubble Sort correctness : "<< gpu_bubble_correct << std::endl;
std::cout << "CPU Bubble Sort correctness : "<< cpu_bubble_correct << std::endl;
std::cout << "GPU Bitonic Sort correctness : " << gpu_bitonic_correct << std::endl;
std::cout << "CPU Merge Sort correctness : " << cpu_merge_correct << std::endl;
std::cout << "CPU Quick Sort correctness : " << cpu_quick_correct << std::endl;
// Make space
std::cout << std::endl << "----------------" << std::endl << std::endl;
// Display times time
std::cout << "GPU Bubble Sort time = " << std::chrono::duration_cast<std::chrono::microseconds>(cuda_bubble_end - cuda_bubble_begin).count() << " s" << std::endl;
std::cout << "CPU Bubble Sort time = " << std::chrono::duration_cast<std::chrono::microseconds>(cpu_bubble_end - cpu_bubble_begin).count() << " s" << std::endl;
std::cout << "GPU Bitonic Sort time = " << std::chrono::duration_cast<std::chrono::microseconds>(gpu_merge_end - gpu_merge_start).count() << " s" << std::endl;
std::cout << "CPU Merge Sort time = " << std::chrono::duration_cast<std::chrono::microseconds>(cpu_merge_end - cpu_merge_start).count() << " s" << std::endl;
std::cout << "CPU Quick Sort time = " << std::chrono::duration_cast<std::chrono::microseconds>(cpu_quick_end - cpu_quick_start).count() << " s" << std::endl;
// Deallocate CUDA memory
hipFree(cuda_gpu_bubble);
hipFree(cuda_gpu_bitonic);
// Deallocate memory
free(cpu_bubble);
free(cpu_merge);
free(cpu_quick);
free(gpu_bubble);
free(gpu_bitonic);
} | 5998f0ae8d7d875019f51d498fd029d42204f896.cu | // Made with CLion Educational License
#include <cmath>
#include <chrono>
#include <iostream>
// Utility function
// For __host__ it's possible to use std::swap() working similarly
__device__ __host__ void swap(float &x, float &y){
float temp = y;
y = x;
x = temp;
}
// CPU Bubble Sort
__host__ void bubble_sort(int n, float *x){
// Set optimizing variables
bool next_loop = true;
int k = 0;
// Main loop
while(next_loop){
next_loop = false;
for (int j = 0; j < n - k - 1 ; j ++){
// Do the comparison and swap
if(x[j] > x[j + 1]){
swap(x[j], x[j + 1]);
next_loop = true;
}
}
k++;
}
}
// GPU Bubble Sort
// ODD-EVEN Sort
// Alternately compare (2n with 2n + 1) and (2n with 2n - 1)
__global__ void bubble_sort(int n, float *x, bool parity) {
// Get current index (only even)
int i = 2 * blockDim.x *blockIdx.x + threadIdx.x * 2;
if(i < n){
// Check if we doo even-odd or odd-even sort
if(parity){
// Check whether we are inside of array
if(i + 1 < n){
if(x[i] > x[i + 1]){
swap(x[i], x[i + 1]);
}
}
}else{
// Check whether we are inside of array
if(i - 1 >= 0){
if(x[i] < x[i - 1]){
swap(x[i], x[i - 1]);
}
}
}
}
}
// CPU Merge Sort
__host__ void merge(float *x, int l, int m, int r){
// Calculate temporary arrays length
int n1 = m - l + 1;
int n2 = r - m;
// Create temporary arrays
auto R = new float[n2]; // Left and Right
auto L = new float[n1];
// Copy array to subarrays
for (int i = 0; i < n1; ++i)
L[i] = x[l + i];
for (int i = 0; i < n2; ++i)
R[i] = x[(m + 1) + i];
// Init indices of arrays
int i = 0; // L
int j = 0; // R
int p = l; // *x
// Choose smaller value from sorted arrays
while(i < n1 && j < n2)
x[p++] = L[i] < R[j] ? L[i++] : R[j++];
// Copy remaining elements
while(i < n1)
x[p++] = L[i++];
while(j < n2)
x[p++] = R[j++];
// Deallocate memory
delete[] R;
delete[] L;
}
__host__ void merge_sort(float *x, int l, int r){
// Check if it is more than 1 element in the array
// If there is one element it's obviously sorted
if(r > l) {
// Get middle of the array
int m = (l + r) / 2;
// Divide into two smaller arrays
merge_sort(x, l, m);
merge_sort(x, m + 1, r);
// Merge arrays together
merge(x, l, m, r);
}
}
// GPU Bitonic Sort
__device__ void compare_and_swap(int n, float *x, int i, int j){
// Check whether values are in good order. If they are not -> swap
if(i < n && j < n)
if(x[i] > x[j]) swap(x[i], x[j]);
}
__global__ void bitonic_sequence_step(int n, float *x, int size, int current_size){
// Get current comparison id
int i = (blockIdx.x*blockDim.x + threadIdx.x);
// Check if the comparison lays in math.ceil(n / 2) (this is number of comparisions)
if(i < (n + 1) / 2){
// Divide comparisons into blocks
int block = i / current_size;
// Calculate direction of sorting
int block_dir = (i / size) % 2;
// Calculate offset in the group
int num_in_block = i % current_size;
int pivot, comparator;
// Check direction of comparison and calculate indecies
if(block_dir == 0) {
pivot = 2 * (block * current_size) + num_in_block; // Number of element in x
comparator = pivot + current_size;
}else{
pivot = 2 * ((block + 1) * current_size ) - num_in_block - 1; // Number of element in x
comparator = pivot - current_size;
}
// Compare and swap right indices
compare_and_swap(n, x, pivot, comparator);
}
}
// Two groups next to each other with opposite sorting directions can be merged into one sorted array by Bitonic Sequence
// Bitonic sort divide former array into groups of size 2 and order them easily in alternating directions
// Thanks to these arrays can me merged (also in alternating directions) into sorted ones with bitonic sequence and their size becomes 2^n
// Finally we can merge two subarrays sorted in opposite directions into one sorted using Bitonic Sequence again
__host__ void bitonic_sort(int n, float *x){
int current_size;
// Sorts every 2^n block in
for (int size=1; size <= n / 2; size *= 2)
{
// Bitonic Sequence is a loop
for (current_size = size; current_size >= 1; current_size /= 2){
// Call number of comparisons in parallel (blocks of threads rounded to next integer value)
bitonic_sequence_step<<<std::ceil((float) (n / 2) / 1024.0f), 1024>>>(n, x, size, current_size);
}
}
}
// CPU Quick sort
// 5 6 3 4
// pivot = 4
// i j 5 > 4
// V
// 5 6 3 4
// i j 6 > 4
// V V
// 5 6 3 4
// i j 3 < 4
// V V
// 5 6 3 4
// i j 4 = 4
// V V
// 3 6 5 4
// swap x[i] x[h[]
// i
// V
// 3 4 5 6
__host__ int partition (float* x, int l, int h)
{
float pivot = x[h]; // Choose last value as the pivot
int i = l; // Index or current pivot
for (int j = l; j < h; j++)
// If x[j] is smaller than pivot value move it to the left and move pivot index to the right
// We are sure things smaller than pivot are on the left of it
if (x[j] < pivot)
swap(x[i++], x[j]);
// Because pivot was chosen as last element we need to move it to calculated index
swap(x[i], x[h]);
return i;
}
__host__ void quick_sort(float *x, int i, int j){
if(i < j){
// Divide array into two smaller ones where one has values smaller than pivot and second has values grater than pivot
int pivot = partition(x, i, j);
// Sort divided arrays
quick_sort(x, i, pivot - 1);
quick_sort(x, pivot + 1, j);
}
}
int main(){
// Initialize data
int order_of_magnitude;
std::cout << "Enter order of magnitude to test: ";
std::cin >> order_of_magnitude;
std::cout << std::endl << "----------------" << std::endl << std::endl;
if(order_of_magnitude > 26) std::cout << "WARNING" << std::endl << "Order of magnitude lowered to 26 due to the performance issues" << std::endl << std::endl;
order_of_magnitude = std::min(26, order_of_magnitude);
int N = (1 << order_of_magnitude); // 2^n
int next_power = pow(2, ceil(log(N)/log(2)));
float *gpu_bubble, *cuda_gpu_bubble, *cpu_bubble, *gpu_bitonic, *cuda_gpu_bitonic, *cpu_merge, *cpu_quick;
// Allocate memory on CPU
gpu_bubble = (float*)malloc(N * sizeof(float));
cpu_bubble = (float*)malloc(N * sizeof(float));
gpu_bitonic = (float*)malloc(next_power * sizeof(float)); // Need to ensure that date amount is 2^n
cpu_merge = (float*)malloc(N * sizeof(float));
cpu_quick = (float*)malloc(N * sizeof(float));
// Allocate memory on GPU
cudaMalloc(&cuda_gpu_bubble, N * sizeof(float));
cudaMalloc(&cuda_gpu_bitonic , next_power * sizeof(float)); // Need to ensure that date amount is 2^n
// Choose pseudo-random numbers
for (int i = 0; i < next_power; i ++) {
if(i < N) {
gpu_bubble[i] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
cpu_bubble[i] = gpu_bubble[i];
gpu_bitonic[i] = gpu_bubble[i];
cpu_merge[i] = gpu_bubble[i];
cpu_quick[i] = gpu_bubble[i];
}else{
gpu_bitonic[i] = - INFINITY;
}
}
// Bubble Sort GPU
auto cuda_bubble_begin = std::chrono::steady_clock::now();
if(N <= (1 << 14)) {
cudaMemcpy(cuda_gpu_bubble, gpu_bubble, N * sizeof(float), cudaMemcpyHostToDevice);
for (int i = 0; i < N; i++) {
bubble_sort<<<ceil((float)N / 2048), 1024>>>(N, cuda_gpu_bubble, i % 2 == 0);
}
cudaMemcpy(gpu_bubble, cuda_gpu_bubble, N * sizeof(float), cudaMemcpyDeviceToHost);
}else{
std::cout << "WARNING!" << std::endl << "GPU bubble sort disabled due to it's low performance" << std::endl << std::endl;
}
auto cuda_bubble_end = std::chrono::steady_clock::now();
// Bubble Sort CPU
auto cpu_bubble_begin = std::chrono::steady_clock::now();
if(N <= (1 << 14)) {
bubble_sort(N, cpu_bubble);
}else{
std::cout << "WARNING!" << std::endl << "CPU bubble sort disabled due to it's low performance" << std::endl;
std::cout << std::endl << "----------------" << std::endl << std::endl;
}
auto cpu_bubble_end = std::chrono::steady_clock::now();
// Bitonic Sort GPU
auto gpu_merge_start = std::chrono::steady_clock::now();
cudaMemcpy(cuda_gpu_bitonic, gpu_bitonic, next_power * sizeof(float), cudaMemcpyHostToDevice);
bitonic_sort(next_power, cuda_gpu_bitonic);
cudaMemcpy(gpu_bitonic, cuda_gpu_bitonic, next_power * sizeof(float), cudaMemcpyDeviceToHost);
auto gpu_merge_end = std::chrono::steady_clock::now();
// Merge Sort CPU
auto cpu_merge_start = std::chrono::steady_clock::now();
merge_sort(cpu_merge, 0, N - 1);
auto cpu_merge_end = std::chrono::steady_clock::now();
// Quick Sort CPU
auto cpu_quick_start = std::chrono::steady_clock::now();
quick_sort(cpu_quick, 0, N - 1);
auto cpu_quick_end = std::chrono::steady_clock::now();
// Set correctness flag
bool gpu_bubble_correct = true;
bool cpu_bubble_correct = true;
bool cpu_merge_correct = true;
bool gpu_bitonic_correct = true;
bool cpu_quick_correct = true;
// Check sorts correctness
for (int i = 0; i < next_power - 1 ; i ++) {
if(i < N - 1) {
if (gpu_bubble[i] > gpu_bubble[i + 1]) gpu_bubble_correct = false;
if (cpu_bubble[i] > cpu_bubble[i + 1]) cpu_bubble_correct = false;
if (cpu_merge[i] > cpu_merge[i + 1]) cpu_merge_correct = false;
if(cpu_quick[i] > cpu_quick [i + 1]) cpu_quick_correct = false;
}
if (gpu_bitonic[i] > gpu_bitonic[i + 1]) gpu_bitonic_correct = false;
}
// Display number of elements
std::cout << "Sorting algorithms for: " << N << " elements" << std::endl << std::endl;
// Display correctness test
std::cout << "GPU Bubble Sort correctness : "<< gpu_bubble_correct << std::endl;
std::cout << "CPU Bubble Sort correctness : "<< cpu_bubble_correct << std::endl;
std::cout << "GPU Bitonic Sort correctness : " << gpu_bitonic_correct << std::endl;
std::cout << "CPU Merge Sort correctness : " << cpu_merge_correct << std::endl;
std::cout << "CPU Quick Sort correctness : " << cpu_quick_correct << std::endl;
// Make space
std::cout << std::endl << "----------------" << std::endl << std::endl;
// Display times time
std::cout << "GPU Bubble Sort time = " << std::chrono::duration_cast<std::chrono::microseconds>(cuda_bubble_end - cuda_bubble_begin).count() << " µs" << std::endl;
std::cout << "CPU Bubble Sort time = " << std::chrono::duration_cast<std::chrono::microseconds>(cpu_bubble_end - cpu_bubble_begin).count() << " µs" << std::endl;
std::cout << "GPU Bitonic Sort time = " << std::chrono::duration_cast<std::chrono::microseconds>(gpu_merge_end - gpu_merge_start).count() << " µs" << std::endl;
std::cout << "CPU Merge Sort time = " << std::chrono::duration_cast<std::chrono::microseconds>(cpu_merge_end - cpu_merge_start).count() << " µs" << std::endl;
std::cout << "CPU Quick Sort time = " << std::chrono::duration_cast<std::chrono::microseconds>(cpu_quick_end - cpu_quick_start).count() << " µs" << std::endl;
// Deallocate CUDA memory
cudaFree(cuda_gpu_bubble);
cudaFree(cuda_gpu_bitonic);
// Deallocate memory
free(cpu_bubble);
free(cpu_merge);
free(cpu_quick);
free(gpu_bubble);
free(gpu_bitonic);
} |
8938e9d95d2168a6a1e7066b71943c3703e31e0f.hip | // !!! This is a file automatically generated by hipify!!!
/******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/******************************************************************************
* Simple example of DevicePartition::If().
*
* Partitions items from from a sequence of int keys using a
* section functor (greater-than)
*
* To compile using the command line:
* nvcc -arch=sm_XX example_device_select_if.cu -I../.. -lcudart -O3
*
******************************************************************************/
// Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#include <stdio.h>
#include <hipcub/hipcub.hpp>
#include <cub/device/device_partition.cuh>
#include "test_util.h"
using namespace cub;
//---------------------------------------------------------------------
// Globals, constants and typedefs
//---------------------------------------------------------------------
bool g_verbose = false; // Whether to display input/output to console
CachingDeviceAllocator g_allocator(true); // Caching allocator for device memory
/// Selection functor type
struct GreaterThan
{
int compare;
__host__ __device__ __forceinline__
GreaterThan(int compare) : compare(compare) {}
__host__ __device__ __forceinline__
bool operator()(const int &a) const {
return (a > compare);
}
};
//---------------------------------------------------------------------
// Test generation
//---------------------------------------------------------------------
/**
* Initialize problem, setting runs of random length chosen from [1..max_segment]
*/
void Initialize(
int *h_in,
int num_items,
int max_segment)
{
int key = 0;
int i = 0;
while (i < num_items)
{
// Randomly select number of repeating occurrences uniformly from [1..max_segment]
unsigned short max_short = (unsigned short) -1;
unsigned short repeat;
RandomBits(repeat);
repeat = (unsigned short) ((float(repeat) * (float(max_segment) / float(max_short))));
repeat = CUB_MAX(1, repeat);
int j = i;
while (j < CUB_MIN(i + repeat, num_items))
{
h_in[j] = key;
j++;
}
i = j;
key++;
}
if (g_verbose)
{
printf("Input:\n");
DisplayResults(h_in, num_items);
printf("\n\n");
}
}
/**
* Solve unique problem
*/
template <typename SelectOp>
int Solve(
int *h_in,
SelectOp select_op,
int *h_reference,
int num_items)
{
int num_selected = 0;
for (int i = 0; i < num_items; ++i)
{
if (select_op(h_in[i]))
{
h_reference[num_selected] = h_in[i];
num_selected++;
}
else
{
h_reference[num_items - (i - num_selected) - 1] = h_in[i];
}
}
return num_selected;
}
//---------------------------------------------------------------------
// Main
//---------------------------------------------------------------------
/**
* Main
*/
int main(int argc, char** argv)
{
int num_items = 150;
int max_segment = 40; // Maximum segment length
// Initialize command line
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
args.GetCmdLineArgument("n", num_items);
args.GetCmdLineArgument("maxseg", max_segment);
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--n=<input items> "
"[--device=<device-id>] "
"[--maxseg=<max segment length>]"
"[--v] "
"\n", argv[0]);
exit(0);
}
// Initialize device
CubDebugExit(args.DeviceInit());
// Allocate host arrays
int *h_in = new int[num_items];
int *h_reference = new int[num_items];
// DevicePartition a pivot index
unsigned int pivot_index;
unsigned int max_int = (unsigned int) -1;
RandomBits(pivot_index);
pivot_index = (unsigned int) ((float(pivot_index) * (float(num_items - 1) / float(max_int))));
printf("Pivot idx: %d\n", pivot_index); fflush(stdout);
// Initialize problem and solution
Initialize(h_in, num_items, max_segment);
GreaterThan select_op(h_in[pivot_index]);
int num_selected = Solve(h_in, select_op, h_reference, num_items);
printf("cub::DevicePartition::If %d items, %d selected (avg run length %d), %d-byte elements\n",
num_items, num_selected, (num_selected > 0) ? num_items / num_selected : 0, (int) sizeof(int));
fflush(stdout);
// Allocate problem device arrays
int *d_in = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(int) * num_items));
// Initialize device input
CubDebugExit(hipMemcpy(d_in, h_in, sizeof(int) * num_items, hipMemcpyHostToDevice));
// Allocate device output array and num selected
int *d_out = NULL;
int *d_num_selected_out = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(int) * num_items));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_num_selected_out, sizeof(int)));
// Allocate temporary storage
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
CubDebugExit(DevicePartition::If(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, select_op));
CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes));
// Run
CubDebugExit(DevicePartition::If(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, select_op));
// Check for correctness (and display results, if specified)
int compare = CompareDeviceResults(h_reference, d_out, num_items, true, g_verbose);
printf("\t Data %s ", compare ? "FAIL" : "PASS");
compare = compare | CompareDeviceResults(&num_selected, d_num_selected_out, 1, true, g_verbose);
printf("\t Count %s ", compare ? "FAIL" : "PASS");
AssertEquals(0, compare);
// Cleanup
if (h_in) delete[] h_in;
if (h_reference) delete[] h_reference;
if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out));
if (d_num_selected_out) CubDebugExit(g_allocator.DeviceFree(d_num_selected_out));
if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage));
printf("\n\n");
return 0;
}
| 8938e9d95d2168a6a1e7066b71943c3703e31e0f.cu | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/******************************************************************************
* Simple example of DevicePartition::If().
*
* Partitions items from from a sequence of int keys using a
* section functor (greater-than)
*
* To compile using the command line:
* nvcc -arch=sm_XX example_device_select_if.cu -I../.. -lcudart -O3
*
******************************************************************************/
// Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#include <stdio.h>
#include <cub/util_allocator.cuh>
#include <cub/device/device_partition.cuh>
#include "test_util.h"
using namespace cub;
//---------------------------------------------------------------------
// Globals, constants and typedefs
//---------------------------------------------------------------------
bool g_verbose = false; // Whether to display input/output to console
CachingDeviceAllocator g_allocator(true); // Caching allocator for device memory
/// Selection functor type
struct GreaterThan
{
int compare;
__host__ __device__ __forceinline__
GreaterThan(int compare) : compare(compare) {}
__host__ __device__ __forceinline__
bool operator()(const int &a) const {
return (a > compare);
}
};
//---------------------------------------------------------------------
// Test generation
//---------------------------------------------------------------------
/**
* Initialize problem, setting runs of random length chosen from [1..max_segment]
*/
void Initialize(
int *h_in,
int num_items,
int max_segment)
{
int key = 0;
int i = 0;
while (i < num_items)
{
// Randomly select number of repeating occurrences uniformly from [1..max_segment]
unsigned short max_short = (unsigned short) -1;
unsigned short repeat;
RandomBits(repeat);
repeat = (unsigned short) ((float(repeat) * (float(max_segment) / float(max_short))));
repeat = CUB_MAX(1, repeat);
int j = i;
while (j < CUB_MIN(i + repeat, num_items))
{
h_in[j] = key;
j++;
}
i = j;
key++;
}
if (g_verbose)
{
printf("Input:\n");
DisplayResults(h_in, num_items);
printf("\n\n");
}
}
/**
* Solve unique problem
*/
template <typename SelectOp>
int Solve(
int *h_in,
SelectOp select_op,
int *h_reference,
int num_items)
{
int num_selected = 0;
for (int i = 0; i < num_items; ++i)
{
if (select_op(h_in[i]))
{
h_reference[num_selected] = h_in[i];
num_selected++;
}
else
{
h_reference[num_items - (i - num_selected) - 1] = h_in[i];
}
}
return num_selected;
}
//---------------------------------------------------------------------
// Main
//---------------------------------------------------------------------
/**
* Main
*/
int main(int argc, char** argv)
{
int num_items = 150;
int max_segment = 40; // Maximum segment length
// Initialize command line
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
args.GetCmdLineArgument("n", num_items);
args.GetCmdLineArgument("maxseg", max_segment);
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--n=<input items> "
"[--device=<device-id>] "
"[--maxseg=<max segment length>]"
"[--v] "
"\n", argv[0]);
exit(0);
}
// Initialize device
CubDebugExit(args.DeviceInit());
// Allocate host arrays
int *h_in = new int[num_items];
int *h_reference = new int[num_items];
// DevicePartition a pivot index
unsigned int pivot_index;
unsigned int max_int = (unsigned int) -1;
RandomBits(pivot_index);
pivot_index = (unsigned int) ((float(pivot_index) * (float(num_items - 1) / float(max_int))));
printf("Pivot idx: %d\n", pivot_index); fflush(stdout);
// Initialize problem and solution
Initialize(h_in, num_items, max_segment);
GreaterThan select_op(h_in[pivot_index]);
int num_selected = Solve(h_in, select_op, h_reference, num_items);
printf("cub::DevicePartition::If %d items, %d selected (avg run length %d), %d-byte elements\n",
num_items, num_selected, (num_selected > 0) ? num_items / num_selected : 0, (int) sizeof(int));
fflush(stdout);
// Allocate problem device arrays
int *d_in = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(int) * num_items));
// Initialize device input
CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(int) * num_items, cudaMemcpyHostToDevice));
// Allocate device output array and num selected
int *d_out = NULL;
int *d_num_selected_out = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(int) * num_items));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_num_selected_out, sizeof(int)));
// Allocate temporary storage
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
CubDebugExit(DevicePartition::If(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, select_op));
CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes));
// Run
CubDebugExit(DevicePartition::If(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, select_op));
// Check for correctness (and display results, if specified)
int compare = CompareDeviceResults(h_reference, d_out, num_items, true, g_verbose);
printf("\t Data %s ", compare ? "FAIL" : "PASS");
compare = compare | CompareDeviceResults(&num_selected, d_num_selected_out, 1, true, g_verbose);
printf("\t Count %s ", compare ? "FAIL" : "PASS");
AssertEquals(0, compare);
// Cleanup
if (h_in) delete[] h_in;
if (h_reference) delete[] h_reference;
if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out));
if (d_num_selected_out) CubDebugExit(g_allocator.DeviceFree(d_num_selected_out));
if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage));
printf("\n\n");
return 0;
}
|
20ee0f1ff92d580bc4dbb923fb0564185bd22383.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** @file fil.cu implements forest inference */
#include <omp.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <treelite/c_api.h>
#include <treelite/tree.h>
#include <algorithm>
#include <cmath>
#include <limits>
#include <stack>
#include <utility>
#include <cuml/fil/fil.h>
#include <raft/cudart_utils.h>
#include <cuml/common/cuml_allocator.hpp>
#include "common_hip.cuh"
namespace ML {
namespace fil {
using namespace MLCommon;
namespace tl = treelite;
__host__ __device__ float sigmoid(float x) { return 1.0f / (1.0f + expf(-x)); }
/** performs additional transformations on the array of forest predictions
(preds) of size n; the transformations are defined by output, and include
averaging (multiplying by inv_num_trees), adding global_bias (always done),
sigmoid and applying threshold. in case of complement_proba,
fills in the complement probability */
__global__ void transform_k(float* preds, size_t n, output_t output,
float inv_num_trees, float threshold,
float global_bias, bool complement_proba) {
size_t i = threadIdx.x + size_t(blockIdx.x) * blockDim.x;
if (i >= n) return;
if (complement_proba && i % 2 != 0) return;
float result = preds[i];
if ((output & output_t::AVG) != 0) result *= inv_num_trees;
result += global_bias;
if ((output & output_t::SIGMOID) != 0) result = sigmoid(result);
// will not be done on CATEGORICAL_LEAF because the whole kernel will not run
if ((output & output_t::CLASS) != 0) {
result = result > threshold ? 1.0f : 0.0f;
}
// sklearn outputs numpy array in 'C' order, with the number of classes being last dimension
// that is also the default order, so we should use the same one
if (complement_proba) {
preds[i] = 1.0f - result;
preds[i + 1] = result;
} else
preds[i] = result;
}
struct forest {
void init_n_items(int device) {
int max_shm_std = 48 * 1024; // 48 KiB
/// the most shared memory a kernel can request on the GPU in question
int max_shm = 0;
CUDA_CHECK(hipDeviceGetAttribute(
&max_shm, hipDeviceAttributeSharedMemPerBlockOptin, device));
// TODO(canonizer): use >48KiB shared memory if available
max_shm = ::min(max_shm, max_shm_std);
// searching for the most items per block while respecting the shared
// memory limits creates a full linear programming problem.
// solving it in a single equation looks less tractable than this
for (bool predict_proba : {false, true}) {
shmem_size_params& ssp_ = predict_proba ? proba_ssp_ : class_ssp_;
ssp_.predict_proba = predict_proba;
shmem_size_params ssp = ssp_;
for (bool cols_in_shmem : {false, true}) {
ssp.cols_in_shmem = cols_in_shmem;
for (ssp.n_items = 1;
ssp.n_items <= (algo_ == algo_t::BATCH_TREE_REORG ? 4 : 1);
++ssp.n_items) {
ssp.compute_smem_footprint();
if (ssp.shm_sz < max_shm) ssp_ = ssp;
}
}
ASSERT(max_shm >= ssp_.shm_sz,
"FIL out of shared memory. Perhaps the maximum number of \n"
"supported classes is exceeded? 5'000 would still be safe.");
}
}
void init_fixed_block_count(int device, int blocks_per_sm) {
int max_threads_per_sm, sm_count;
CUDA_CHECK(hipDeviceGetAttribute(
&max_threads_per_sm, hipDeviceAttributeMaxThreadsPerMultiProcessor, device));
int max_blocks_per_sm = max_threads_per_sm / FIL_TPB;
ASSERT(blocks_per_sm <= max_blocks_per_sm,
"on this GPU, FIL blocks_per_sm cannot exceed %d",
max_blocks_per_sm);
CUDA_CHECK(hipDeviceGetAttribute(&sm_count, hipDeviceAttributeMultiprocessorCount,
device));
fixed_block_count_ = blocks_per_sm * sm_count;
}
void init_common(const raft::handle_t& h, const forest_params_t* params) {
depth_ = params->depth;
num_trees_ = params->num_trees;
algo_ = params->algo;
output_ = params->output;
threshold_ = params->threshold;
global_bias_ = params->global_bias;
proba_ssp_.leaf_algo = params->leaf_algo;
proba_ssp_.num_cols = params->num_cols;
proba_ssp_.num_classes = params->num_classes;
class_ssp_ = proba_ssp_;
int device = h.get_device();
init_n_items(device); // n_items takes priority over blocks_per_sm
init_fixed_block_count(device, params->blocks_per_sm);
}
virtual void infer(predict_params params, hipStream_t stream) = 0;
void predict(const raft::handle_t& h, float* preds, const float* data,
size_t num_rows, bool predict_proba) {
// Initialize prediction parameters.
predict_params params(predict_proba ? proba_ssp_ : class_ssp_);
params.algo = algo_;
params.preds = preds;
params.data = data;
params.num_rows = num_rows;
// ignored unless predict_proba is true and algo is GROVE_PER_CLASS
params.transform = output_;
// fixed_block_count_ == 0 means the number of thread blocks is
// proportional to the number of rows
params.num_blocks = fixed_block_count_;
/**
The binary classification / regression (FLOAT_UNARY_BINARY) predict_proba() works as follows
(always 2 outputs):
RAW: output the sum of tree predictions
AVG is set: divide by the number of trees (averaging)
SIGMOID is set: apply sigmoid
CLASS is set: ignored
SOFTMAX is set: error
write the output of the previous stages and its complement
The binary classification / regression (FLOAT_UNARY_BINARY) predict() works as follows
(always 1 output):
RAW (no values set): output the sum of tree predictions
AVG is set: divide by the number of trees (averaging)
SIGMOID is set: apply sigmoid
CLASS is set: apply threshold (equivalent to choosing best class)
SOFTMAX is set: error
The multi-class classification / regression (CATEGORICAL_LEAF) predict_proba() works as follows
(always num_classes outputs):
RAW (no values set): output class votes
AVG is set: divide by the number of trees (averaging, output class probability)
SIGMOID is set: apply sigmoid
CLASS is set: ignored
SOFTMAX is set: error
The multi-class classification / regression (CATEGORICAL_LEAF) predict() works as follows
(always 1 output):
RAW (no values set): output the label of the class with highest probability, else output label 0.
SOFTMAX is set: error
All other flags (AVG, SIGMOID, CLASS) are ignored
The multi-class classification / regression (GROVE_PER_CLASS) predict_proba() works as follows
(always num_classes outputs):
RAW (no values set): output class votes
AVG is set: divide by the number of trees (averaging, output class probability)
SIGMOID is set: apply sigmoid; if SOFTMAX is also set: error
CLASS is set: ignored
SOFTMAX is set: softmax is applied after averaging and global_bias
The multi-class classification / regression (GROVE_PER_CLASS) predict() works as follows
(always 1 output):
RAW (no values set): output the label of the class with highest margin,
equal margins resolved in favor of smaller label integer
All other flags (AVG, SIGMOID, CLASS, SOFTMAX) are ignored
*/
output_t ot = output_;
// Treelite applies bias before softmax, but we do after.
// Simulating treelite order, which cancels out bias.
// If non-proba prediction used, it still will not matter
// for the same reason softmax will not.
float global_bias = (ot & output_t::SOFTMAX) != 0 ? 0.0f : global_bias_;
bool complement_proba = false, do_transform;
if (predict_proba) {
// no threshold on probabilities
ot = output_t(ot & ~output_t::CLASS);
switch (params.leaf_algo) {
case leaf_algo_t::FLOAT_UNARY_BINARY:
params.num_outputs = 2;
complement_proba = true;
do_transform = true;
break;
case leaf_algo_t::GROVE_PER_CLASS:
// for GROVE_PER_CLASS, averaging happens in infer_k
ot = output_t(ot & ~output_t::AVG);
params.num_outputs = params.num_classes;
do_transform = ot != output_t::RAW && ot != output_t::SOFTMAX ||
global_bias != 0.0f;
break;
case leaf_algo_t::CATEGORICAL_LEAF:
params.num_outputs = params.num_classes;
do_transform = ot != output_t::RAW || global_bias_ != 0.0f;
break;
default:
ASSERT(false, "internal error: invalid leaf_algo_");
}
} else {
if (params.leaf_algo == leaf_algo_t::FLOAT_UNARY_BINARY) {
do_transform = ot != output_t::RAW || global_bias_ != 0.0f;
} else {
// GROVE_PER_CLASS, CATEGORICAL_LEAF: moot since choosing best class and
// all transforms are monotonic. also, would break current code
do_transform = false;
}
params.num_outputs = 1;
}
// Predict using the forest.
hipStream_t stream = h.get_stream();
infer(params, stream);
if (do_transform) {
size_t num_values_to_transform =
(size_t)num_rows * (size_t)params.num_outputs;
hipLaunchKernelGGL(( transform_k), dim3(raft::ceildiv(num_values_to_transform, (size_t)FIL_TPB)),
dim3(FIL_TPB), 0, stream,
preds, num_values_to_transform, ot,
num_trees_ > 0 ? (1.0f / num_trees_) : 1.0f, threshold_, global_bias,
complement_proba);
CUDA_CHECK(hipPeekAtLastError());
}
}
virtual void free(const raft::handle_t& h) = 0;
virtual ~forest() {}
int num_trees_ = 0;
int depth_ = 0;
algo_t algo_ = algo_t::NAIVE;
output_t output_ = output_t::RAW;
float threshold_ = 0.5;
float global_bias_ = 0;
shmem_size_params class_ssp_, proba_ssp_;
int fixed_block_count_ = 0;
};
struct dense_forest : forest {
void transform_trees(const dense_node* nodes) {
/* Populate node information:
For each tree, the nodes are still stored in the breadth-first,
left-to-right order. However, instead of storing the nodes of the same
tree adjacently, it uses a different layout. In this layout, the roots
of all trees (node 0) are stored first, followed by left children of
the roots of all trees (node 1), followed by the right children of the
roots of all trees (node 2), and so on.
*/
int global_node = 0;
for (int tree = 0; tree < num_trees_; ++tree) {
int tree_node = 0;
// the counters `level` and `branch` are not used for computing node
// indices, they are only here to highlight the node ordering within
// each tree
for (int level = 0; level <= depth_; ++level) {
for (int branch = 0; branch < 1 << level; ++branch) {
h_nodes_[tree_node * num_trees_ + tree] = nodes[global_node];
++tree_node;
++global_node;
}
}
}
}
void init(const raft::handle_t& h, const dense_node* nodes,
const forest_params_t* params) {
init_common(h, params);
if (algo_ == algo_t::NAIVE) algo_ = algo_t::BATCH_TREE_REORG;
int num_nodes = forest_num_nodes(num_trees_, depth_);
nodes_ = (dense_node*)h.get_device_allocator()->allocate(
sizeof(dense_node) * num_nodes, h.get_stream());
h_nodes_.resize(num_nodes);
if (algo_ == algo_t::NAIVE) {
std::copy(nodes, nodes + num_nodes, h_nodes_.begin());
} else {
transform_trees(nodes);
}
CUDA_CHECK(hipMemcpyAsync(nodes_, h_nodes_.data(),
num_nodes * sizeof(dense_node),
hipMemcpyHostToDevice, h.get_stream()));
// copy must be finished before freeing the host data
CUDA_CHECK(hipStreamSynchronize(h.get_stream()));
h_nodes_.clear();
h_nodes_.shrink_to_fit();
}
virtual void infer(predict_params params, hipStream_t stream) override {
dense_storage forest(nodes_, num_trees_,
algo_ == algo_t::NAIVE ? tree_num_nodes(depth_) : 1,
algo_ == algo_t::NAIVE ? 1 : num_trees_);
fil::infer(forest, params, stream);
}
virtual void free(const raft::handle_t& h) override {
int num_nodes = forest_num_nodes(num_trees_, depth_);
h.get_device_allocator()->deallocate(nodes_, sizeof(dense_node) * num_nodes,
h.get_stream());
}
dense_node* nodes_ = nullptr;
thrust::host_vector<dense_node> h_nodes_;
};
template <typename node_t>
struct sparse_forest : forest {
void init(const raft::handle_t& h, const int* trees, const node_t* nodes,
const forest_params_t* params) {
init_common(h, params);
if (algo_ == algo_t::ALGO_AUTO) algo_ = algo_t::NAIVE;
depth_ = 0; // a placeholder value
num_nodes_ = params->num_nodes;
// trees
trees_ = (int*)h.get_device_allocator()->allocate(sizeof(int) * num_trees_,
h.get_stream());
CUDA_CHECK(hipMemcpyAsync(trees_, trees, sizeof(int) * num_trees_,
hipMemcpyHostToDevice, h.get_stream()));
// nodes
nodes_ = (node_t*)h.get_device_allocator()->allocate(
sizeof(node_t) * num_nodes_, h.get_stream());
CUDA_CHECK(hipMemcpyAsync(nodes_, nodes, sizeof(node_t) * num_nodes_,
hipMemcpyHostToDevice, h.get_stream()));
}
virtual void infer(predict_params params, hipStream_t stream) override {
sparse_storage<node_t> forest(trees_, nodes_, num_trees_);
fil::infer(forest, params, stream);
}
void free(const raft::handle_t& h) override {
h.get_device_allocator()->deallocate(trees_, sizeof(int) * num_trees_,
h.get_stream());
h.get_device_allocator()->deallocate(nodes_, sizeof(node_t) * num_nodes_,
h.get_stream());
}
int num_nodes_ = 0;
int* trees_ = nullptr;
node_t* nodes_ = nullptr;
};
void check_params(const forest_params_t* params, bool dense) {
if (dense) {
ASSERT(params->depth >= 0, "depth must be non-negative for dense forests");
} else {
ASSERT(params->num_nodes >= 0,
"num_nodes must be non-negative for sparse forests");
ASSERT(params->algo == algo_t::NAIVE || params->algo == algo_t::ALGO_AUTO,
"only ALGO_AUTO and NAIVE algorithms are supported "
"for sparse forests");
}
ASSERT(params->num_trees >= 0, "num_trees must be non-negative");
ASSERT(params->num_cols >= 0, "num_cols must be non-negative");
switch (params->algo) {
case algo_t::ALGO_AUTO:
case algo_t::NAIVE:
case algo_t::TREE_REORG:
case algo_t::BATCH_TREE_REORG:
break;
default:
ASSERT(false,
"algo should be ALGO_AUTO, NAIVE, TREE_REORG or BATCH_TREE_REORG");
}
switch (params->leaf_algo) {
case leaf_algo_t::FLOAT_UNARY_BINARY:
if ((params->output & output_t::CLASS) != 0) {
ASSERT(params->num_classes == 2,
"only supporting binary"
" classification using FLOAT_UNARY_BINARY");
} else {
ASSERT(params->num_classes == 1,
"num_classes must be 1 for "
"regression");
}
ASSERT((params->output & output_t::SOFTMAX) == 0,
"softmax does not make sense for leaf_algo == FLOAT_UNARY_BINARY");
break;
case leaf_algo_t::GROVE_PER_CLASS:
ASSERT(params->num_classes > 2,
"num_classes > 2 is required for leaf_algo == GROVE_PER_CLASS");
ASSERT(params->num_trees % params->num_classes == 0,
"num_classes must divide num_trees evenly for GROVE_PER_CLASS");
break;
case leaf_algo_t::CATEGORICAL_LEAF:
ASSERT(params->num_classes >= 2,
"num_classes >= 2 is required for "
"leaf_algo == CATEGORICAL_LEAF");
ASSERT((params->output & output_t::SOFTMAX) == 0,
"softmax not supported for leaf_algo == CATEGORICAL_LEAF");
break;
default:
ASSERT(false,
"leaf_algo must be FLOAT_UNARY_BINARY, CATEGORICAL_LEAF"
" or GROVE_PER_CLASS");
}
if ((params->output & ~output_t::ALL_SET) != 0) {
ASSERT(
false,
"output should be a combination of RAW, AVG, SIGMOID, CLASS and SOFTMAX");
}
ASSERT(~params->output & (output_t::SIGMOID | output_t::SOFTMAX),
"combining softmax and sigmoid is not supported");
ASSERT(params->blocks_per_sm >= 0, "blocks_per_sm must be nonnegative");
}
template <typename T, typename L>
int tree_root(const tl::Tree<T, L>& tree) {
return 0; // Treelite format assumes that the root is 0
}
template <typename T, typename L>
inline int max_depth(const tl::Tree<T, L>& tree) {
// trees of this depth aren't used, so it most likely means bad input data,
// e.g. cycles in the forest
const int DEPTH_LIMIT = 500;
int root_index = tree_root(tree);
typedef std::pair<int, int> pair_t;
std::stack<pair_t> stack;
stack.push(pair_t(root_index, 0));
int max_depth = 0;
while (!stack.empty()) {
const pair_t& pair = stack.top();
int node_id = pair.first;
int depth = pair.second;
stack.pop();
while (!tree.IsLeaf(node_id)) {
stack.push(pair_t(tree.LeftChild(node_id), depth + 1));
node_id = tree.RightChild(node_id);
depth++;
ASSERT(depth < DEPTH_LIMIT,
"depth limit reached, might be a cycle in the tree");
}
// only need to update depth for leaves
max_depth = ::max(max_depth, depth);
}
return max_depth;
}
template <typename T, typename L>
int max_depth(const tl::ModelImpl<T, L>& model) {
int depth = 0;
const auto& trees = model.trees;
#pragma omp parallel for reduction(max : depth)
for (size_t i = 0; i < trees.size(); ++i) {
const auto& tree = trees[i];
depth = ::max(depth, max_depth(tree));
}
return depth;
}
inline void adjust_threshold(float* pthreshold, int* tl_left, int* tl_right,
bool* default_left, tl::Operator comparison_op) {
// in treelite (take left node if val [op] threshold),
// the meaning of the condition is reversed compared to FIL;
// thus, "<" in treelite corresonds to comparison ">=" used by FIL
// https://github.com/dmlc/treelite/blob/master/include/treelite/tree.h#L243
switch (comparison_op) {
case tl::Operator::kLT:
break;
case tl::Operator::kLE:
// x <= y is equivalent to x < y', where y' is the next representable float
*pthreshold =
std::nextafterf(*pthreshold, std::numeric_limits<float>::infinity());
break;
case tl::Operator::kGT:
// x > y is equivalent to x >= y', where y' is the next representable float
// left and right still need to be swapped
*pthreshold =
std::nextafterf(*pthreshold, std::numeric_limits<float>::infinity());
case tl::Operator::kGE:
// swap left and right
std::swap(*tl_left, *tl_right);
*default_left = !*default_left;
break;
default:
ASSERT(false, "only <, >, <= and >= comparisons are supported");
}
}
/** if the vector consists of zeros and a single one, return the position
for the one (assumed class label). Else, asserts false.
If the vector contains a NAN, asserts false */
template <typename L>
int find_class_label_from_one_hot(L* vector, int len) {
bool found_label = false;
int out;
for (int i = 0; i < len; ++i) {
if (vector[i] == static_cast<L>(1.0)) {
ASSERT(!found_label, "label vector contains multiple 1.0f");
out = i;
found_label = true;
} else {
ASSERT(vector[i] == static_cast<L>(0.0),
"label vector contains values other than 0.0 and 1.0");
}
}
ASSERT(found_label, "did not find 1.0f in vector");
return out;
}
template <typename fil_node_t, typename T, typename L>
void tl2fil_leaf_payload(fil_node_t* fil_node, const tl::Tree<T, L>& tl_tree,
int tl_node_id, const forest_params_t& forest_params) {
auto vec = tl_tree.LeafVector(tl_node_id);
switch (forest_params.leaf_algo) {
case leaf_algo_t::CATEGORICAL_LEAF:
ASSERT(vec.size() == forest_params.num_classes,
"inconsistent number of classes in treelite leaves");
fil_node->val.idx = find_class_label_from_one_hot(&vec[0], vec.size());
break;
case leaf_algo_t::FLOAT_UNARY_BINARY:
case leaf_algo_t::GROVE_PER_CLASS:
fil_node->val.f = static_cast<float>(tl_tree.LeafValue(tl_node_id));
ASSERT(!tl_tree.HasLeafVector(tl_node_id),
"some but not all treelite leaves have leaf_vector()");
break;
default:
ASSERT(false, "internal error: invalid leaf_algo");
};
}
template <typename T, typename L>
void node2fil_dense(std::vector<dense_node>* pnodes, int root, int cur,
const tl::Tree<T, L>& tree, int node_id,
const forest_params_t& forest_params) {
if (tree.IsLeaf(node_id)) {
(*pnodes)[root + cur] = dense_node(val_t{.f = NAN}, NAN, 0, false, true);
tl2fil_leaf_payload(&(*pnodes)[root + cur], tree, node_id, forest_params);
return;
}
// inner node
ASSERT(tree.SplitType(node_id) == tl::SplitFeatureType::kNumerical,
"only numerical split nodes are supported");
int tl_left = tree.LeftChild(node_id), tl_right = tree.RightChild(node_id);
bool default_left = tree.DefaultLeft(node_id);
float threshold = static_cast<float>(tree.Threshold(node_id));
adjust_threshold(&threshold, &tl_left, &tl_right, &default_left,
tree.ComparisonOp(node_id));
(*pnodes)[root + cur] = dense_node(
val_t{.f = 0}, threshold, tree.SplitIndex(node_id), default_left, false);
int left = 2 * cur + 1;
node2fil_dense(pnodes, root, left, tree, tl_left, forest_params);
node2fil_dense(pnodes, root, left + 1, tree, tl_right, forest_params);
}
template <typename T, typename L>
void tree2fil_dense(std::vector<dense_node>* pnodes, int root,
const tl::Tree<T, L>& tree,
const forest_params_t& forest_params) {
node2fil_dense(pnodes, root, 0, tree, tree_root(tree), forest_params);
}
template <typename fil_node_t, typename T, typename L>
int tree2fil_sparse(std::vector<fil_node_t>& nodes, int root,
const tl::Tree<T, L>& tree,
const forest_params_t& forest_params) {
typedef std::pair<int, int> pair_t;
std::stack<pair_t> stack;
int built_index = root + 1;
stack.push(pair_t(tree_root(tree), 0));
while (!stack.empty()) {
const pair_t& top = stack.top();
int node_id = top.first;
int cur = top.second;
stack.pop();
while (!tree.IsLeaf(node_id)) {
// inner node
ASSERT(tree.SplitType(node_id) == tl::SplitFeatureType::kNumerical,
"only numerical split nodes are supported");
// tl_left and tl_right are indices of the children in the treelite tree
// (stored as an array of nodes)
int tl_left = tree.LeftChild(node_id),
tl_right = tree.RightChild(node_id);
bool default_left = tree.DefaultLeft(node_id);
float threshold = static_cast<float>(tree.Threshold(node_id));
adjust_threshold(&threshold, &tl_left, &tl_right, &default_left,
tree.ComparisonOp(node_id));
// reserve space for child nodes
// left is the offset of the left child node relative to the tree root
// in the array of all nodes of the FIL sparse forest
int left = built_index - root;
built_index += 2;
nodes[root + cur] =
fil_node_t(val_t{.f = 0}, threshold, tree.SplitIndex(node_id),
default_left, false, left);
// push child nodes into the stack
stack.push(pair_t(tl_right, left + 1));
//stack.push(pair_t(tl_left, left));
node_id = tl_left;
cur = left;
}
// leaf node
nodes[root + cur] = fil_node_t(val_t{.f = NAN}, NAN, 0, false, true, 0);
tl2fil_leaf_payload(&nodes[root + cur], tree, node_id, forest_params);
}
return root;
}
template <typename T, typename L>
size_t tl_leaf_vector_size(const tl::ModelImpl<T, L>& model) {
const tl::Tree<T, L>& tree = model.trees[0];
int node_key;
for (node_key = tree_root(tree); !tree.IsLeaf(node_key);
node_key = tree.RightChild(node_key))
;
if (tree.HasLeafVector(node_key)) return tree.LeafVector(node_key).size();
return 0;
}
// tl2fil_common is the part of conversion from a treelite model
// common for dense and sparse forests
template <typename T, typename L>
void tl2fil_common(forest_params_t* params, const tl::ModelImpl<T, L>& model,
const treelite_params_t* tl_params) {
// fill in forest-indendent params
params->algo = tl_params->algo;
params->threshold = tl_params->threshold;
// fill in forest-dependent params
params->depth = max_depth(model); // also checks for cycles
const tl::ModelParam& param = model.param;
// assuming either all leaves use the .leaf_vector() or all leaves use .leaf_value()
size_t leaf_vec_size = tl_leaf_vector_size(model);
std::string pred_transform(param.pred_transform);
if (leaf_vec_size > 0) {
ASSERT(leaf_vec_size == model.task_param.num_class,
"treelite model inconsistent");
params->num_classes = leaf_vec_size;
params->leaf_algo = leaf_algo_t::CATEGORICAL_LEAF;
ASSERT(tl_params->output_class,
"output_class==true is required for multi-class models");
ASSERT(
pred_transform == "max_index" || pred_transform == "identity_multiclass",
"only max_index and identity_multiclass values of pred_transform "
"are supported for multi-class models");
} else {
if (model.task_param.num_class > 1) {
params->num_classes = static_cast<int>(model.task_param.num_class);
ASSERT(tl_params->output_class,
"output_class==true is required for multi-class models");
ASSERT(pred_transform == "identity_multiclass" ||
pred_transform == "max_index" || pred_transform == "softmax" ||
pred_transform == "multiclass_ova",
"only identity_multiclass, max_index, multiclass_ova and softmax "
"values of pred_transform are supported for xgboost-style "
"multi-class classification models.");
// this function should not know how many threads per block will be used
params->leaf_algo = leaf_algo_t::GROVE_PER_CLASS;
} else {
params->num_classes = tl_params->output_class ? 2 : 1;
ASSERT(pred_transform == "sigmoid" || pred_transform == "identity",
"only sigmoid and identity values of pred_transform "
"are supported for binary classification and regression models.");
params->leaf_algo = leaf_algo_t::FLOAT_UNARY_BINARY;
}
}
params->num_cols = model.num_feature;
ASSERT(param.sigmoid_alpha == 1.0f, "sigmoid_alpha not supported");
params->global_bias = param.global_bias;
params->output = output_t::RAW;
/** output_t::CLASS denotes using a threshold in FIL, when
predict_proba == false. For all multiclass models, the best class is
selected using argmax instead. This happens when either
leaf_algo == CATEGORICAL_LEAF or num_classes > 2.
**/
if (tl_params->output_class && params->leaf_algo != CATEGORICAL_LEAF &&
params->num_classes <= 2) {
params->output = output_t(params->output | output_t::CLASS);
}
// "random forest" in treelite means tree output averaging
if (model.average_tree_output) {
params->output = output_t(params->output | output_t::AVG);
}
if (pred_transform == "sigmoid" || pred_transform == "multiclass_ova") {
params->output = output_t(params->output | output_t::SIGMOID);
}
if (pred_transform == "softmax")
params->output = output_t(params->output | output_t::SOFTMAX);
params->num_trees = model.trees.size();
params->blocks_per_sm = tl_params->blocks_per_sm;
}
// uses treelite model with additional tl_params to initialize FIL params
// and dense nodes (stored in *pnodes)
template <typename T, typename L>
void tl2fil_dense(std::vector<dense_node>* pnodes, forest_params_t* params,
const tl::ModelImpl<T, L>& model,
const treelite_params_t* tl_params) {
tl2fil_common(params, model, tl_params);
// convert the nodes
int num_nodes = forest_num_nodes(params->num_trees, params->depth);
pnodes->resize(num_nodes, dense_node());
for (int i = 0; i < model.trees.size(); ++i) {
tree2fil_dense(pnodes, i * tree_num_nodes(params->depth), model.trees[i],
*params);
}
}
template <typename fil_node_t>
struct tl2fil_sparse_check_t {
template <typename T, typename L>
static void check(const tl::ModelImpl<T, L>& model) {
ASSERT(false,
"internal error: "
"only a specialization of this template should be used");
}
};
template <>
struct tl2fil_sparse_check_t<sparse_node16> {
// no extra check for 16-byte sparse nodes
template <typename T, typename L>
static void check(const tl::ModelImpl<T, L>& model) {}
};
template <>
struct tl2fil_sparse_check_t<sparse_node8> {
static const int MAX_FEATURES = 1 << sparse_node8::FID_NUM_BITS;
static const int MAX_TREE_NODES = (1 << sparse_node8::LEFT_NUM_BITS) - 1;
template <typename T, typename L>
static void check(const tl::ModelImpl<T, L>& model) {
// check the number of features
int num_features = model.num_feature;
ASSERT(num_features <= MAX_FEATURES,
"model has %d features, "
"but only %d supported for 8-byte sparse nodes",
num_features, MAX_FEATURES);
// check the number of tree nodes
const std::vector<tl::Tree<T, L>>& trees = model.trees;
for (int i = 0; i < trees.size(); ++i) {
int num_nodes = trees[i].num_nodes;
ASSERT(num_nodes <= MAX_TREE_NODES,
"tree %d has %d nodes, "
"but only %d supported for 8-byte sparse nodes",
i, num_nodes, MAX_TREE_NODES);
}
}
};
// uses treelite model with additional tl_params to initialize FIL params,
// trees (stored in *ptrees) and sparse nodes (stored in *pnodes)
template <typename fil_node_t, typename T, typename L>
void tl2fil_sparse(std::vector<int>* ptrees, std::vector<fil_node_t>* pnodes,
forest_params_t* params, const tl::ModelImpl<T, L>& model,
const treelite_params_t* tl_params) {
tl2fil_common(params, model, tl_params);
tl2fil_sparse_check_t<fil_node_t>::check(model);
size_t num_trees = model.trees.size();
ptrees->reserve(num_trees);
ptrees->push_back(0);
for (size_t i = 0; i < num_trees - 1; ++i) {
ptrees->push_back(model.trees[i].num_nodes + ptrees->back());
}
size_t total_nodes = ptrees->back() + model.trees.back().num_nodes;
pnodes->resize(total_nodes);
// convert the nodes
#pragma omp parallel for
for (int i = 0; i < num_trees; ++i) {
tree2fil_sparse(*pnodes, (*ptrees)[i], model.trees[i], *params);
}
params->num_nodes = pnodes->size();
}
void init_dense(const raft::handle_t& h, forest_t* pf, const dense_node* nodes,
const forest_params_t* params) {
check_params(params, true);
dense_forest* f = new dense_forest;
f->init(h, nodes, params);
*pf = f;
}
template <typename fil_node_t>
void init_sparse(const raft::handle_t& h, forest_t* pf, const int* trees,
const fil_node_t* nodes, const forest_params_t* params) {
check_params(params, false);
sparse_forest<fil_node_t>* f = new sparse_forest<fil_node_t>;
f->init(h, trees, nodes, params);
*pf = f;
}
// explicit instantiations for init_sparse()
template void init_sparse<sparse_node16>(const raft::handle_t& h, forest_t* pf,
const int* trees,
const sparse_node16* nodes,
const forest_params_t* params);
template void init_sparse<sparse_node8>(const raft::handle_t& h, forest_t* pf,
const int* trees,
const sparse_node8* nodes,
const forest_params_t* params);
template <typename T, typename L>
void from_treelite(const raft::handle_t& handle, forest_t* pforest,
const tl::ModelImpl<T, L>& model,
const treelite_params_t* tl_params) {
// Invariants on threshold and leaf types
static_assert(std::is_same<T, float>::value || std::is_same<T, double>::value,
"Model must contain float32 or float64 thresholds for splits");
ASSERT((std::is_same<L, float>::value || std::is_same<L, double>::value),
"Models with integer leaf output are not yet supported");
// Display appropriate warnings when float64 values are being casted into
// float32, as FIL only supports inferencing with float32 for the time being
if (std::is_same<T, double>::value || std::is_same<L, double>::value) {
CUML_LOG_WARN(
"Casting all thresholds and leaf values to float32, as FIL currently "
"doesn't support inferencing models with float64 values. "
"This may lead to predictions with reduced accuracy.");
}
storage_type_t storage_type = tl_params->storage_type;
// build dense trees by default
if (storage_type == storage_type_t::AUTO) {
if (tl_params->algo == algo_t::ALGO_AUTO ||
tl_params->algo == algo_t::NAIVE) {
int depth = max_depth(model);
// max 2**25 dense nodes, 256 MiB dense model size
const int LOG2_MAX_DENSE_NODES = 25;
int log2_num_dense_nodes =
depth + 1 + int(ceil(std::log2(model.trees.size())));
storage_type = log2_num_dense_nodes > LOG2_MAX_DENSE_NODES
? storage_type_t::SPARSE
: storage_type_t::DENSE;
} else {
// only dense storage is supported for other algorithms
storage_type = storage_type_t::DENSE;
}
}
forest_params_t params;
switch (storage_type) {
case storage_type_t::DENSE: {
std::vector<dense_node> nodes;
tl2fil_dense(&nodes, ¶ms, model, tl_params);
init_dense(handle, pforest, nodes.data(), ¶ms);
// sync is necessary as nodes is used in init_dense(),
// but destructed at the end of this function
CUDA_CHECK(hipStreamSynchronize(handle.get_stream()));
break;
}
case storage_type_t::SPARSE: {
std::vector<int> trees;
std::vector<sparse_node16> nodes;
tl2fil_sparse(&trees, &nodes, ¶ms, model, tl_params);
init_sparse(handle, pforest, trees.data(), nodes.data(), ¶ms);
CUDA_CHECK(hipStreamSynchronize(handle.get_stream()));
break;
}
case storage_type_t::SPARSE8: {
std::vector<int> trees;
std::vector<sparse_node8> nodes;
tl2fil_sparse(&trees, &nodes, ¶ms, model, tl_params);
init_sparse(handle, pforest, trees.data(), nodes.data(), ¶ms);
CUDA_CHECK(hipStreamSynchronize(handle.get_stream()));
break;
}
default:
ASSERT(false, "tl_params->sparse must be one of AUTO, DENSE or SPARSE");
}
}
void from_treelite(const raft::handle_t& handle, forest_t* pforest,
ModelHandle model, const treelite_params_t* tl_params) {
const tl::Model& model_ref = *(tl::Model*)model;
model_ref.Dispatch([&handle, pforest, tl_params](const auto& model_inner) {
// model_inner is of the concrete type tl::ModelImpl<T, L>
from_treelite(handle, pforest, model_inner, tl_params);
});
}
void free(const raft::handle_t& h, forest_t f) {
f->free(h);
delete f;
}
void predict(const raft::handle_t& h, forest_t f, float* preds,
const float* data, size_t num_rows, bool predict_proba) {
f->predict(h, preds, data, num_rows, predict_proba);
}
} // namespace fil
} // namespace ML
| 20ee0f1ff92d580bc4dbb923fb0564185bd22383.cu | /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** @file fil.cu implements forest inference */
#include <omp.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <treelite/c_api.h>
#include <treelite/tree.h>
#include <algorithm>
#include <cmath>
#include <limits>
#include <stack>
#include <utility>
#include <cuml/fil/fil.h>
#include <raft/cudart_utils.h>
#include <cuml/common/cuml_allocator.hpp>
#include "common.cuh"
namespace ML {
namespace fil {
using namespace MLCommon;
namespace tl = treelite;
__host__ __device__ float sigmoid(float x) { return 1.0f / (1.0f + expf(-x)); }
/** performs additional transformations on the array of forest predictions
(preds) of size n; the transformations are defined by output, and include
averaging (multiplying by inv_num_trees), adding global_bias (always done),
sigmoid and applying threshold. in case of complement_proba,
fills in the complement probability */
__global__ void transform_k(float* preds, size_t n, output_t output,
float inv_num_trees, float threshold,
float global_bias, bool complement_proba) {
size_t i = threadIdx.x + size_t(blockIdx.x) * blockDim.x;
if (i >= n) return;
if (complement_proba && i % 2 != 0) return;
float result = preds[i];
if ((output & output_t::AVG) != 0) result *= inv_num_trees;
result += global_bias;
if ((output & output_t::SIGMOID) != 0) result = sigmoid(result);
// will not be done on CATEGORICAL_LEAF because the whole kernel will not run
if ((output & output_t::CLASS) != 0) {
result = result > threshold ? 1.0f : 0.0f;
}
// sklearn outputs numpy array in 'C' order, with the number of classes being last dimension
// that is also the default order, so we should use the same one
if (complement_proba) {
preds[i] = 1.0f - result;
preds[i + 1] = result;
} else
preds[i] = result;
}
struct forest {
void init_n_items(int device) {
int max_shm_std = 48 * 1024; // 48 KiB
/// the most shared memory a kernel can request on the GPU in question
int max_shm = 0;
CUDA_CHECK(cudaDeviceGetAttribute(
&max_shm, cudaDevAttrMaxSharedMemoryPerBlockOptin, device));
// TODO(canonizer): use >48KiB shared memory if available
max_shm = std::min(max_shm, max_shm_std);
// searching for the most items per block while respecting the shared
// memory limits creates a full linear programming problem.
// solving it in a single equation looks less tractable than this
for (bool predict_proba : {false, true}) {
shmem_size_params& ssp_ = predict_proba ? proba_ssp_ : class_ssp_;
ssp_.predict_proba = predict_proba;
shmem_size_params ssp = ssp_;
for (bool cols_in_shmem : {false, true}) {
ssp.cols_in_shmem = cols_in_shmem;
for (ssp.n_items = 1;
ssp.n_items <= (algo_ == algo_t::BATCH_TREE_REORG ? 4 : 1);
++ssp.n_items) {
ssp.compute_smem_footprint();
if (ssp.shm_sz < max_shm) ssp_ = ssp;
}
}
ASSERT(max_shm >= ssp_.shm_sz,
"FIL out of shared memory. Perhaps the maximum number of \n"
"supported classes is exceeded? 5'000 would still be safe.");
}
}
void init_fixed_block_count(int device, int blocks_per_sm) {
int max_threads_per_sm, sm_count;
CUDA_CHECK(cudaDeviceGetAttribute(
&max_threads_per_sm, cudaDevAttrMaxThreadsPerMultiProcessor, device));
int max_blocks_per_sm = max_threads_per_sm / FIL_TPB;
ASSERT(blocks_per_sm <= max_blocks_per_sm,
"on this GPU, FIL blocks_per_sm cannot exceed %d",
max_blocks_per_sm);
CUDA_CHECK(cudaDeviceGetAttribute(&sm_count, cudaDevAttrMultiProcessorCount,
device));
fixed_block_count_ = blocks_per_sm * sm_count;
}
void init_common(const raft::handle_t& h, const forest_params_t* params) {
depth_ = params->depth;
num_trees_ = params->num_trees;
algo_ = params->algo;
output_ = params->output;
threshold_ = params->threshold;
global_bias_ = params->global_bias;
proba_ssp_.leaf_algo = params->leaf_algo;
proba_ssp_.num_cols = params->num_cols;
proba_ssp_.num_classes = params->num_classes;
class_ssp_ = proba_ssp_;
int device = h.get_device();
init_n_items(device); // n_items takes priority over blocks_per_sm
init_fixed_block_count(device, params->blocks_per_sm);
}
virtual void infer(predict_params params, cudaStream_t stream) = 0;
void predict(const raft::handle_t& h, float* preds, const float* data,
size_t num_rows, bool predict_proba) {
// Initialize prediction parameters.
predict_params params(predict_proba ? proba_ssp_ : class_ssp_);
params.algo = algo_;
params.preds = preds;
params.data = data;
params.num_rows = num_rows;
// ignored unless predict_proba is true and algo is GROVE_PER_CLASS
params.transform = output_;
// fixed_block_count_ == 0 means the number of thread blocks is
// proportional to the number of rows
params.num_blocks = fixed_block_count_;
/**
The binary classification / regression (FLOAT_UNARY_BINARY) predict_proba() works as follows
(always 2 outputs):
RAW: output the sum of tree predictions
AVG is set: divide by the number of trees (averaging)
SIGMOID is set: apply sigmoid
CLASS is set: ignored
SOFTMAX is set: error
write the output of the previous stages and its complement
The binary classification / regression (FLOAT_UNARY_BINARY) predict() works as follows
(always 1 output):
RAW (no values set): output the sum of tree predictions
AVG is set: divide by the number of trees (averaging)
SIGMOID is set: apply sigmoid
CLASS is set: apply threshold (equivalent to choosing best class)
SOFTMAX is set: error
The multi-class classification / regression (CATEGORICAL_LEAF) predict_proba() works as follows
(always num_classes outputs):
RAW (no values set): output class votes
AVG is set: divide by the number of trees (averaging, output class probability)
SIGMOID is set: apply sigmoid
CLASS is set: ignored
SOFTMAX is set: error
The multi-class classification / regression (CATEGORICAL_LEAF) predict() works as follows
(always 1 output):
RAW (no values set): output the label of the class with highest probability, else output label 0.
SOFTMAX is set: error
All other flags (AVG, SIGMOID, CLASS) are ignored
The multi-class classification / regression (GROVE_PER_CLASS) predict_proba() works as follows
(always num_classes outputs):
RAW (no values set): output class votes
AVG is set: divide by the number of trees (averaging, output class probability)
SIGMOID is set: apply sigmoid; if SOFTMAX is also set: error
CLASS is set: ignored
SOFTMAX is set: softmax is applied after averaging and global_bias
The multi-class classification / regression (GROVE_PER_CLASS) predict() works as follows
(always 1 output):
RAW (no values set): output the label of the class with highest margin,
equal margins resolved in favor of smaller label integer
All other flags (AVG, SIGMOID, CLASS, SOFTMAX) are ignored
*/
output_t ot = output_;
// Treelite applies bias before softmax, but we do after.
// Simulating treelite order, which cancels out bias.
// If non-proba prediction used, it still will not matter
// for the same reason softmax will not.
float global_bias = (ot & output_t::SOFTMAX) != 0 ? 0.0f : global_bias_;
bool complement_proba = false, do_transform;
if (predict_proba) {
// no threshold on probabilities
ot = output_t(ot & ~output_t::CLASS);
switch (params.leaf_algo) {
case leaf_algo_t::FLOAT_UNARY_BINARY:
params.num_outputs = 2;
complement_proba = true;
do_transform = true;
break;
case leaf_algo_t::GROVE_PER_CLASS:
// for GROVE_PER_CLASS, averaging happens in infer_k
ot = output_t(ot & ~output_t::AVG);
params.num_outputs = params.num_classes;
do_transform = ot != output_t::RAW && ot != output_t::SOFTMAX ||
global_bias != 0.0f;
break;
case leaf_algo_t::CATEGORICAL_LEAF:
params.num_outputs = params.num_classes;
do_transform = ot != output_t::RAW || global_bias_ != 0.0f;
break;
default:
ASSERT(false, "internal error: invalid leaf_algo_");
}
} else {
if (params.leaf_algo == leaf_algo_t::FLOAT_UNARY_BINARY) {
do_transform = ot != output_t::RAW || global_bias_ != 0.0f;
} else {
// GROVE_PER_CLASS, CATEGORICAL_LEAF: moot since choosing best class and
// all transforms are monotonic. also, would break current code
do_transform = false;
}
params.num_outputs = 1;
}
// Predict using the forest.
cudaStream_t stream = h.get_stream();
infer(params, stream);
if (do_transform) {
size_t num_values_to_transform =
(size_t)num_rows * (size_t)params.num_outputs;
transform_k<<<raft::ceildiv(num_values_to_transform, (size_t)FIL_TPB),
FIL_TPB, 0, stream>>>(
preds, num_values_to_transform, ot,
num_trees_ > 0 ? (1.0f / num_trees_) : 1.0f, threshold_, global_bias,
complement_proba);
CUDA_CHECK(cudaPeekAtLastError());
}
}
virtual void free(const raft::handle_t& h) = 0;
virtual ~forest() {}
int num_trees_ = 0;
int depth_ = 0;
algo_t algo_ = algo_t::NAIVE;
output_t output_ = output_t::RAW;
float threshold_ = 0.5;
float global_bias_ = 0;
shmem_size_params class_ssp_, proba_ssp_;
int fixed_block_count_ = 0;
};
struct dense_forest : forest {
void transform_trees(const dense_node* nodes) {
/* Populate node information:
For each tree, the nodes are still stored in the breadth-first,
left-to-right order. However, instead of storing the nodes of the same
tree adjacently, it uses a different layout. In this layout, the roots
of all trees (node 0) are stored first, followed by left children of
the roots of all trees (node 1), followed by the right children of the
roots of all trees (node 2), and so on.
*/
int global_node = 0;
for (int tree = 0; tree < num_trees_; ++tree) {
int tree_node = 0;
// the counters `level` and `branch` are not used for computing node
// indices, they are only here to highlight the node ordering within
// each tree
for (int level = 0; level <= depth_; ++level) {
for (int branch = 0; branch < 1 << level; ++branch) {
h_nodes_[tree_node * num_trees_ + tree] = nodes[global_node];
++tree_node;
++global_node;
}
}
}
}
void init(const raft::handle_t& h, const dense_node* nodes,
const forest_params_t* params) {
init_common(h, params);
if (algo_ == algo_t::NAIVE) algo_ = algo_t::BATCH_TREE_REORG;
int num_nodes = forest_num_nodes(num_trees_, depth_);
nodes_ = (dense_node*)h.get_device_allocator()->allocate(
sizeof(dense_node) * num_nodes, h.get_stream());
h_nodes_.resize(num_nodes);
if (algo_ == algo_t::NAIVE) {
std::copy(nodes, nodes + num_nodes, h_nodes_.begin());
} else {
transform_trees(nodes);
}
CUDA_CHECK(cudaMemcpyAsync(nodes_, h_nodes_.data(),
num_nodes * sizeof(dense_node),
cudaMemcpyHostToDevice, h.get_stream()));
// copy must be finished before freeing the host data
CUDA_CHECK(cudaStreamSynchronize(h.get_stream()));
h_nodes_.clear();
h_nodes_.shrink_to_fit();
}
virtual void infer(predict_params params, cudaStream_t stream) override {
dense_storage forest(nodes_, num_trees_,
algo_ == algo_t::NAIVE ? tree_num_nodes(depth_) : 1,
algo_ == algo_t::NAIVE ? 1 : num_trees_);
fil::infer(forest, params, stream);
}
virtual void free(const raft::handle_t& h) override {
int num_nodes = forest_num_nodes(num_trees_, depth_);
h.get_device_allocator()->deallocate(nodes_, sizeof(dense_node) * num_nodes,
h.get_stream());
}
dense_node* nodes_ = nullptr;
thrust::host_vector<dense_node> h_nodes_;
};
template <typename node_t>
struct sparse_forest : forest {
void init(const raft::handle_t& h, const int* trees, const node_t* nodes,
const forest_params_t* params) {
init_common(h, params);
if (algo_ == algo_t::ALGO_AUTO) algo_ = algo_t::NAIVE;
depth_ = 0; // a placeholder value
num_nodes_ = params->num_nodes;
// trees
trees_ = (int*)h.get_device_allocator()->allocate(sizeof(int) * num_trees_,
h.get_stream());
CUDA_CHECK(cudaMemcpyAsync(trees_, trees, sizeof(int) * num_trees_,
cudaMemcpyHostToDevice, h.get_stream()));
// nodes
nodes_ = (node_t*)h.get_device_allocator()->allocate(
sizeof(node_t) * num_nodes_, h.get_stream());
CUDA_CHECK(cudaMemcpyAsync(nodes_, nodes, sizeof(node_t) * num_nodes_,
cudaMemcpyHostToDevice, h.get_stream()));
}
virtual void infer(predict_params params, cudaStream_t stream) override {
sparse_storage<node_t> forest(trees_, nodes_, num_trees_);
fil::infer(forest, params, stream);
}
void free(const raft::handle_t& h) override {
h.get_device_allocator()->deallocate(trees_, sizeof(int) * num_trees_,
h.get_stream());
h.get_device_allocator()->deallocate(nodes_, sizeof(node_t) * num_nodes_,
h.get_stream());
}
int num_nodes_ = 0;
int* trees_ = nullptr;
node_t* nodes_ = nullptr;
};
void check_params(const forest_params_t* params, bool dense) {
if (dense) {
ASSERT(params->depth >= 0, "depth must be non-negative for dense forests");
} else {
ASSERT(params->num_nodes >= 0,
"num_nodes must be non-negative for sparse forests");
ASSERT(params->algo == algo_t::NAIVE || params->algo == algo_t::ALGO_AUTO,
"only ALGO_AUTO and NAIVE algorithms are supported "
"for sparse forests");
}
ASSERT(params->num_trees >= 0, "num_trees must be non-negative");
ASSERT(params->num_cols >= 0, "num_cols must be non-negative");
switch (params->algo) {
case algo_t::ALGO_AUTO:
case algo_t::NAIVE:
case algo_t::TREE_REORG:
case algo_t::BATCH_TREE_REORG:
break;
default:
ASSERT(false,
"algo should be ALGO_AUTO, NAIVE, TREE_REORG or BATCH_TREE_REORG");
}
switch (params->leaf_algo) {
case leaf_algo_t::FLOAT_UNARY_BINARY:
if ((params->output & output_t::CLASS) != 0) {
ASSERT(params->num_classes == 2,
"only supporting binary"
" classification using FLOAT_UNARY_BINARY");
} else {
ASSERT(params->num_classes == 1,
"num_classes must be 1 for "
"regression");
}
ASSERT((params->output & output_t::SOFTMAX) == 0,
"softmax does not make sense for leaf_algo == FLOAT_UNARY_BINARY");
break;
case leaf_algo_t::GROVE_PER_CLASS:
ASSERT(params->num_classes > 2,
"num_classes > 2 is required for leaf_algo == GROVE_PER_CLASS");
ASSERT(params->num_trees % params->num_classes == 0,
"num_classes must divide num_trees evenly for GROVE_PER_CLASS");
break;
case leaf_algo_t::CATEGORICAL_LEAF:
ASSERT(params->num_classes >= 2,
"num_classes >= 2 is required for "
"leaf_algo == CATEGORICAL_LEAF");
ASSERT((params->output & output_t::SOFTMAX) == 0,
"softmax not supported for leaf_algo == CATEGORICAL_LEAF");
break;
default:
ASSERT(false,
"leaf_algo must be FLOAT_UNARY_BINARY, CATEGORICAL_LEAF"
" or GROVE_PER_CLASS");
}
if ((params->output & ~output_t::ALL_SET) != 0) {
ASSERT(
false,
"output should be a combination of RAW, AVG, SIGMOID, CLASS and SOFTMAX");
}
ASSERT(~params->output & (output_t::SIGMOID | output_t::SOFTMAX),
"combining softmax and sigmoid is not supported");
ASSERT(params->blocks_per_sm >= 0, "blocks_per_sm must be nonnegative");
}
template <typename T, typename L>
int tree_root(const tl::Tree<T, L>& tree) {
return 0; // Treelite format assumes that the root is 0
}
template <typename T, typename L>
inline int max_depth(const tl::Tree<T, L>& tree) {
// trees of this depth aren't used, so it most likely means bad input data,
// e.g. cycles in the forest
const int DEPTH_LIMIT = 500;
int root_index = tree_root(tree);
typedef std::pair<int, int> pair_t;
std::stack<pair_t> stack;
stack.push(pair_t(root_index, 0));
int max_depth = 0;
while (!stack.empty()) {
const pair_t& pair = stack.top();
int node_id = pair.first;
int depth = pair.second;
stack.pop();
while (!tree.IsLeaf(node_id)) {
stack.push(pair_t(tree.LeftChild(node_id), depth + 1));
node_id = tree.RightChild(node_id);
depth++;
ASSERT(depth < DEPTH_LIMIT,
"depth limit reached, might be a cycle in the tree");
}
// only need to update depth for leaves
max_depth = std::max(max_depth, depth);
}
return max_depth;
}
template <typename T, typename L>
int max_depth(const tl::ModelImpl<T, L>& model) {
int depth = 0;
const auto& trees = model.trees;
#pragma omp parallel for reduction(max : depth)
for (size_t i = 0; i < trees.size(); ++i) {
const auto& tree = trees[i];
depth = std::max(depth, max_depth(tree));
}
return depth;
}
inline void adjust_threshold(float* pthreshold, int* tl_left, int* tl_right,
bool* default_left, tl::Operator comparison_op) {
// in treelite (take left node if val [op] threshold),
// the meaning of the condition is reversed compared to FIL;
// thus, "<" in treelite corresonds to comparison ">=" used by FIL
// https://github.com/dmlc/treelite/blob/master/include/treelite/tree.h#L243
switch (comparison_op) {
case tl::Operator::kLT:
break;
case tl::Operator::kLE:
// x <= y is equivalent to x < y', where y' is the next representable float
*pthreshold =
std::nextafterf(*pthreshold, std::numeric_limits<float>::infinity());
break;
case tl::Operator::kGT:
// x > y is equivalent to x >= y', where y' is the next representable float
// left and right still need to be swapped
*pthreshold =
std::nextafterf(*pthreshold, std::numeric_limits<float>::infinity());
case tl::Operator::kGE:
// swap left and right
std::swap(*tl_left, *tl_right);
*default_left = !*default_left;
break;
default:
ASSERT(false, "only <, >, <= and >= comparisons are supported");
}
}
/** if the vector consists of zeros and a single one, return the position
for the one (assumed class label). Else, asserts false.
If the vector contains a NAN, asserts false */
template <typename L>
int find_class_label_from_one_hot(L* vector, int len) {
bool found_label = false;
int out;
for (int i = 0; i < len; ++i) {
if (vector[i] == static_cast<L>(1.0)) {
ASSERT(!found_label, "label vector contains multiple 1.0f");
out = i;
found_label = true;
} else {
ASSERT(vector[i] == static_cast<L>(0.0),
"label vector contains values other than 0.0 and 1.0");
}
}
ASSERT(found_label, "did not find 1.0f in vector");
return out;
}
template <typename fil_node_t, typename T, typename L>
void tl2fil_leaf_payload(fil_node_t* fil_node, const tl::Tree<T, L>& tl_tree,
int tl_node_id, const forest_params_t& forest_params) {
auto vec = tl_tree.LeafVector(tl_node_id);
switch (forest_params.leaf_algo) {
case leaf_algo_t::CATEGORICAL_LEAF:
ASSERT(vec.size() == forest_params.num_classes,
"inconsistent number of classes in treelite leaves");
fil_node->val.idx = find_class_label_from_one_hot(&vec[0], vec.size());
break;
case leaf_algo_t::FLOAT_UNARY_BINARY:
case leaf_algo_t::GROVE_PER_CLASS:
fil_node->val.f = static_cast<float>(tl_tree.LeafValue(tl_node_id));
ASSERT(!tl_tree.HasLeafVector(tl_node_id),
"some but not all treelite leaves have leaf_vector()");
break;
default:
ASSERT(false, "internal error: invalid leaf_algo");
};
}
template <typename T, typename L>
void node2fil_dense(std::vector<dense_node>* pnodes, int root, int cur,
const tl::Tree<T, L>& tree, int node_id,
const forest_params_t& forest_params) {
if (tree.IsLeaf(node_id)) {
(*pnodes)[root + cur] = dense_node(val_t{.f = NAN}, NAN, 0, false, true);
tl2fil_leaf_payload(&(*pnodes)[root + cur], tree, node_id, forest_params);
return;
}
// inner node
ASSERT(tree.SplitType(node_id) == tl::SplitFeatureType::kNumerical,
"only numerical split nodes are supported");
int tl_left = tree.LeftChild(node_id), tl_right = tree.RightChild(node_id);
bool default_left = tree.DefaultLeft(node_id);
float threshold = static_cast<float>(tree.Threshold(node_id));
adjust_threshold(&threshold, &tl_left, &tl_right, &default_left,
tree.ComparisonOp(node_id));
(*pnodes)[root + cur] = dense_node(
val_t{.f = 0}, threshold, tree.SplitIndex(node_id), default_left, false);
int left = 2 * cur + 1;
node2fil_dense(pnodes, root, left, tree, tl_left, forest_params);
node2fil_dense(pnodes, root, left + 1, tree, tl_right, forest_params);
}
template <typename T, typename L>
void tree2fil_dense(std::vector<dense_node>* pnodes, int root,
const tl::Tree<T, L>& tree,
const forest_params_t& forest_params) {
node2fil_dense(pnodes, root, 0, tree, tree_root(tree), forest_params);
}
template <typename fil_node_t, typename T, typename L>
int tree2fil_sparse(std::vector<fil_node_t>& nodes, int root,
const tl::Tree<T, L>& tree,
const forest_params_t& forest_params) {
typedef std::pair<int, int> pair_t;
std::stack<pair_t> stack;
int built_index = root + 1;
stack.push(pair_t(tree_root(tree), 0));
while (!stack.empty()) {
const pair_t& top = stack.top();
int node_id = top.first;
int cur = top.second;
stack.pop();
while (!tree.IsLeaf(node_id)) {
// inner node
ASSERT(tree.SplitType(node_id) == tl::SplitFeatureType::kNumerical,
"only numerical split nodes are supported");
// tl_left and tl_right are indices of the children in the treelite tree
// (stored as an array of nodes)
int tl_left = tree.LeftChild(node_id),
tl_right = tree.RightChild(node_id);
bool default_left = tree.DefaultLeft(node_id);
float threshold = static_cast<float>(tree.Threshold(node_id));
adjust_threshold(&threshold, &tl_left, &tl_right, &default_left,
tree.ComparisonOp(node_id));
// reserve space for child nodes
// left is the offset of the left child node relative to the tree root
// in the array of all nodes of the FIL sparse forest
int left = built_index - root;
built_index += 2;
nodes[root + cur] =
fil_node_t(val_t{.f = 0}, threshold, tree.SplitIndex(node_id),
default_left, false, left);
// push child nodes into the stack
stack.push(pair_t(tl_right, left + 1));
//stack.push(pair_t(tl_left, left));
node_id = tl_left;
cur = left;
}
// leaf node
nodes[root + cur] = fil_node_t(val_t{.f = NAN}, NAN, 0, false, true, 0);
tl2fil_leaf_payload(&nodes[root + cur], tree, node_id, forest_params);
}
return root;
}
template <typename T, typename L>
size_t tl_leaf_vector_size(const tl::ModelImpl<T, L>& model) {
const tl::Tree<T, L>& tree = model.trees[0];
int node_key;
for (node_key = tree_root(tree); !tree.IsLeaf(node_key);
node_key = tree.RightChild(node_key))
;
if (tree.HasLeafVector(node_key)) return tree.LeafVector(node_key).size();
return 0;
}
// tl2fil_common is the part of conversion from a treelite model
// common for dense and sparse forests
template <typename T, typename L>
void tl2fil_common(forest_params_t* params, const tl::ModelImpl<T, L>& model,
const treelite_params_t* tl_params) {
// fill in forest-indendent params
params->algo = tl_params->algo;
params->threshold = tl_params->threshold;
// fill in forest-dependent params
params->depth = max_depth(model); // also checks for cycles
const tl::ModelParam& param = model.param;
// assuming either all leaves use the .leaf_vector() or all leaves use .leaf_value()
size_t leaf_vec_size = tl_leaf_vector_size(model);
std::string pred_transform(param.pred_transform);
if (leaf_vec_size > 0) {
ASSERT(leaf_vec_size == model.task_param.num_class,
"treelite model inconsistent");
params->num_classes = leaf_vec_size;
params->leaf_algo = leaf_algo_t::CATEGORICAL_LEAF;
ASSERT(tl_params->output_class,
"output_class==true is required for multi-class models");
ASSERT(
pred_transform == "max_index" || pred_transform == "identity_multiclass",
"only max_index and identity_multiclass values of pred_transform "
"are supported for multi-class models");
} else {
if (model.task_param.num_class > 1) {
params->num_classes = static_cast<int>(model.task_param.num_class);
ASSERT(tl_params->output_class,
"output_class==true is required for multi-class models");
ASSERT(pred_transform == "identity_multiclass" ||
pred_transform == "max_index" || pred_transform == "softmax" ||
pred_transform == "multiclass_ova",
"only identity_multiclass, max_index, multiclass_ova and softmax "
"values of pred_transform are supported for xgboost-style "
"multi-class classification models.");
// this function should not know how many threads per block will be used
params->leaf_algo = leaf_algo_t::GROVE_PER_CLASS;
} else {
params->num_classes = tl_params->output_class ? 2 : 1;
ASSERT(pred_transform == "sigmoid" || pred_transform == "identity",
"only sigmoid and identity values of pred_transform "
"are supported for binary classification and regression models.");
params->leaf_algo = leaf_algo_t::FLOAT_UNARY_BINARY;
}
}
params->num_cols = model.num_feature;
ASSERT(param.sigmoid_alpha == 1.0f, "sigmoid_alpha not supported");
params->global_bias = param.global_bias;
params->output = output_t::RAW;
/** output_t::CLASS denotes using a threshold in FIL, when
predict_proba == false. For all multiclass models, the best class is
selected using argmax instead. This happens when either
leaf_algo == CATEGORICAL_LEAF or num_classes > 2.
**/
if (tl_params->output_class && params->leaf_algo != CATEGORICAL_LEAF &&
params->num_classes <= 2) {
params->output = output_t(params->output | output_t::CLASS);
}
// "random forest" in treelite means tree output averaging
if (model.average_tree_output) {
params->output = output_t(params->output | output_t::AVG);
}
if (pred_transform == "sigmoid" || pred_transform == "multiclass_ova") {
params->output = output_t(params->output | output_t::SIGMOID);
}
if (pred_transform == "softmax")
params->output = output_t(params->output | output_t::SOFTMAX);
params->num_trees = model.trees.size();
params->blocks_per_sm = tl_params->blocks_per_sm;
}
// uses treelite model with additional tl_params to initialize FIL params
// and dense nodes (stored in *pnodes)
template <typename T, typename L>
void tl2fil_dense(std::vector<dense_node>* pnodes, forest_params_t* params,
const tl::ModelImpl<T, L>& model,
const treelite_params_t* tl_params) {
tl2fil_common(params, model, tl_params);
// convert the nodes
int num_nodes = forest_num_nodes(params->num_trees, params->depth);
pnodes->resize(num_nodes, dense_node());
for (int i = 0; i < model.trees.size(); ++i) {
tree2fil_dense(pnodes, i * tree_num_nodes(params->depth), model.trees[i],
*params);
}
}
template <typename fil_node_t>
struct tl2fil_sparse_check_t {
template <typename T, typename L>
static void check(const tl::ModelImpl<T, L>& model) {
ASSERT(false,
"internal error: "
"only a specialization of this template should be used");
}
};
template <>
struct tl2fil_sparse_check_t<sparse_node16> {
// no extra check for 16-byte sparse nodes
template <typename T, typename L>
static void check(const tl::ModelImpl<T, L>& model) {}
};
template <>
struct tl2fil_sparse_check_t<sparse_node8> {
static const int MAX_FEATURES = 1 << sparse_node8::FID_NUM_BITS;
static const int MAX_TREE_NODES = (1 << sparse_node8::LEFT_NUM_BITS) - 1;
template <typename T, typename L>
static void check(const tl::ModelImpl<T, L>& model) {
// check the number of features
int num_features = model.num_feature;
ASSERT(num_features <= MAX_FEATURES,
"model has %d features, "
"but only %d supported for 8-byte sparse nodes",
num_features, MAX_FEATURES);
// check the number of tree nodes
const std::vector<tl::Tree<T, L>>& trees = model.trees;
for (int i = 0; i < trees.size(); ++i) {
int num_nodes = trees[i].num_nodes;
ASSERT(num_nodes <= MAX_TREE_NODES,
"tree %d has %d nodes, "
"but only %d supported for 8-byte sparse nodes",
i, num_nodes, MAX_TREE_NODES);
}
}
};
// uses treelite model with additional tl_params to initialize FIL params,
// trees (stored in *ptrees) and sparse nodes (stored in *pnodes)
template <typename fil_node_t, typename T, typename L>
void tl2fil_sparse(std::vector<int>* ptrees, std::vector<fil_node_t>* pnodes,
forest_params_t* params, const tl::ModelImpl<T, L>& model,
const treelite_params_t* tl_params) {
tl2fil_common(params, model, tl_params);
tl2fil_sparse_check_t<fil_node_t>::check(model);
size_t num_trees = model.trees.size();
ptrees->reserve(num_trees);
ptrees->push_back(0);
for (size_t i = 0; i < num_trees - 1; ++i) {
ptrees->push_back(model.trees[i].num_nodes + ptrees->back());
}
size_t total_nodes = ptrees->back() + model.trees.back().num_nodes;
pnodes->resize(total_nodes);
// convert the nodes
#pragma omp parallel for
for (int i = 0; i < num_trees; ++i) {
tree2fil_sparse(*pnodes, (*ptrees)[i], model.trees[i], *params);
}
params->num_nodes = pnodes->size();
}
void init_dense(const raft::handle_t& h, forest_t* pf, const dense_node* nodes,
const forest_params_t* params) {
check_params(params, true);
dense_forest* f = new dense_forest;
f->init(h, nodes, params);
*pf = f;
}
template <typename fil_node_t>
void init_sparse(const raft::handle_t& h, forest_t* pf, const int* trees,
const fil_node_t* nodes, const forest_params_t* params) {
check_params(params, false);
sparse_forest<fil_node_t>* f = new sparse_forest<fil_node_t>;
f->init(h, trees, nodes, params);
*pf = f;
}
// explicit instantiations for init_sparse()
template void init_sparse<sparse_node16>(const raft::handle_t& h, forest_t* pf,
const int* trees,
const sparse_node16* nodes,
const forest_params_t* params);
template void init_sparse<sparse_node8>(const raft::handle_t& h, forest_t* pf,
const int* trees,
const sparse_node8* nodes,
const forest_params_t* params);
template <typename T, typename L>
void from_treelite(const raft::handle_t& handle, forest_t* pforest,
const tl::ModelImpl<T, L>& model,
const treelite_params_t* tl_params) {
// Invariants on threshold and leaf types
static_assert(std::is_same<T, float>::value || std::is_same<T, double>::value,
"Model must contain float32 or float64 thresholds for splits");
ASSERT((std::is_same<L, float>::value || std::is_same<L, double>::value),
"Models with integer leaf output are not yet supported");
// Display appropriate warnings when float64 values are being casted into
// float32, as FIL only supports inferencing with float32 for the time being
if (std::is_same<T, double>::value || std::is_same<L, double>::value) {
CUML_LOG_WARN(
"Casting all thresholds and leaf values to float32, as FIL currently "
"doesn't support inferencing models with float64 values. "
"This may lead to predictions with reduced accuracy.");
}
storage_type_t storage_type = tl_params->storage_type;
// build dense trees by default
if (storage_type == storage_type_t::AUTO) {
if (tl_params->algo == algo_t::ALGO_AUTO ||
tl_params->algo == algo_t::NAIVE) {
int depth = max_depth(model);
// max 2**25 dense nodes, 256 MiB dense model size
const int LOG2_MAX_DENSE_NODES = 25;
int log2_num_dense_nodes =
depth + 1 + int(ceil(std::log2(model.trees.size())));
storage_type = log2_num_dense_nodes > LOG2_MAX_DENSE_NODES
? storage_type_t::SPARSE
: storage_type_t::DENSE;
} else {
// only dense storage is supported for other algorithms
storage_type = storage_type_t::DENSE;
}
}
forest_params_t params;
switch (storage_type) {
case storage_type_t::DENSE: {
std::vector<dense_node> nodes;
tl2fil_dense(&nodes, ¶ms, model, tl_params);
init_dense(handle, pforest, nodes.data(), ¶ms);
// sync is necessary as nodes is used in init_dense(),
// but destructed at the end of this function
CUDA_CHECK(cudaStreamSynchronize(handle.get_stream()));
break;
}
case storage_type_t::SPARSE: {
std::vector<int> trees;
std::vector<sparse_node16> nodes;
tl2fil_sparse(&trees, &nodes, ¶ms, model, tl_params);
init_sparse(handle, pforest, trees.data(), nodes.data(), ¶ms);
CUDA_CHECK(cudaStreamSynchronize(handle.get_stream()));
break;
}
case storage_type_t::SPARSE8: {
std::vector<int> trees;
std::vector<sparse_node8> nodes;
tl2fil_sparse(&trees, &nodes, ¶ms, model, tl_params);
init_sparse(handle, pforest, trees.data(), nodes.data(), ¶ms);
CUDA_CHECK(cudaStreamSynchronize(handle.get_stream()));
break;
}
default:
ASSERT(false, "tl_params->sparse must be one of AUTO, DENSE or SPARSE");
}
}
void from_treelite(const raft::handle_t& handle, forest_t* pforest,
ModelHandle model, const treelite_params_t* tl_params) {
const tl::Model& model_ref = *(tl::Model*)model;
model_ref.Dispatch([&handle, pforest, tl_params](const auto& model_inner) {
// model_inner is of the concrete type tl::ModelImpl<T, L>
from_treelite(handle, pforest, model_inner, tl_params);
});
}
void free(const raft::handle_t& h, forest_t f) {
f->free(h);
delete f;
}
void predict(const raft::handle_t& h, forest_t f, float* preds,
const float* data, size_t num_rows, bool predict_proba) {
f->predict(h, preds, data, num_rows, predict_proba);
}
} // namespace fil
} // namespace ML
|
e4e58cee90fb835ab45af22a2ced9a706abff608.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime_api.h>
__global__ void linear_bias_fwd_kernel(
const float *in_buf,
int dim,
int batch_size,
const float *bias,
float *out_buf)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int k = idx % dim;
int batch_idx = idx / dim;
if (k < dim && batch_idx < batch_size) {
out_buf[idx] = in_buf[idx] + bias[k];
}
}
extern "C" void neuralops_cuda_linear_bias_fwd(
const float *in_buf,
size_t dim,
size_t batch_size,
const float *bias,
float *out_buf,
hipStream_t stream)
{
int n = dim * batch_size;
hipLaunchKernelGGL(( linear_bias_fwd_kernel), dim3((n+1024-1)/1024), dim3(1024), 0, stream,
in_buf, dim, batch_size, bias, out_buf);
}
extern "C" void neuralops_cuda_linear_bias_fwd_inplace(
float *out_buf,
size_t dim,
size_t batch_size,
const float *bias,
hipStream_t stream)
{
int n = dim * batch_size;
hipLaunchKernelGGL(( linear_bias_fwd_kernel), dim3((n+1024-1)/1024), dim3(1024), 0, stream,
out_buf, dim, batch_size, bias, out_buf);
}
__global__ void linear_bias_bwd_kernel(
const float *out_grad,
int dim,
int batch_size,
float *in_grad)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int k = idx % dim;
int batch_idx = idx / dim;
if (k < dim && batch_idx < batch_size) {
atomicAdd(&in_grad[k], out_grad[idx]);
}
}
extern "C" void neuralops_cuda_linear_bias_bwd(
const float *out_grad,
size_t dim,
size_t batch_size,
float *in_grad,
hipStream_t stream)
{
int n = dim * batch_size;
hipLaunchKernelGGL(( linear_bias_bwd_kernel), dim3((n+1024-1)/1024), dim3(1024), 0, stream,
out_grad, dim, batch_size, in_grad);
}
| e4e58cee90fb835ab45af22a2ced9a706abff608.cu | #include <cuda_runtime_api.h>
__global__ void linear_bias_fwd_kernel(
const float *in_buf,
int dim,
int batch_size,
const float *bias,
float *out_buf)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int k = idx % dim;
int batch_idx = idx / dim;
if (k < dim && batch_idx < batch_size) {
out_buf[idx] = in_buf[idx] + bias[k];
}
}
extern "C" void neuralops_cuda_linear_bias_fwd(
const float *in_buf,
size_t dim,
size_t batch_size,
const float *bias,
float *out_buf,
cudaStream_t stream)
{
int n = dim * batch_size;
linear_bias_fwd_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>(
in_buf, dim, batch_size, bias, out_buf);
}
extern "C" void neuralops_cuda_linear_bias_fwd_inplace(
float *out_buf,
size_t dim,
size_t batch_size,
const float *bias,
cudaStream_t stream)
{
int n = dim * batch_size;
linear_bias_fwd_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>(
out_buf, dim, batch_size, bias, out_buf);
}
__global__ void linear_bias_bwd_kernel(
const float *out_grad,
int dim,
int batch_size,
float *in_grad)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int k = idx % dim;
int batch_idx = idx / dim;
if (k < dim && batch_idx < batch_size) {
atomicAdd(&in_grad[k], out_grad[idx]);
}
}
extern "C" void neuralops_cuda_linear_bias_bwd(
const float *out_grad,
size_t dim,
size_t batch_size,
float *in_grad,
cudaStream_t stream)
{
int n = dim * batch_size;
linear_bias_bwd_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>(
out_grad, dim, batch_size, in_grad);
}
|
0dfe7ab81257b10f4cb6f207ed5d3daede593868.hip | // !!! This is a file automatically generated by hipify!!!
#define DIM 4096
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <math.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
//#include <hip/device_functions.h>
#include <hip/hip_runtime_api.h>
#include <stdio.h>
#include <assert.h>
#include <chrono>
#define TILE_WIDTH 32
__global__
void MatrixMulKernelTiled(double *M, double *N, double *P, int size) {
__shared__ double Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ double Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
int Pvalue = 0;
for (int ph = 0; ph < (int)ceil(size / (double)TILE_WIDTH); ++ph) {
if ((Row < size) && ((ph*TILE_WIDTH + tx) < size)) {
Mds[ty][tx] = M[Row * size + ph * TILE_WIDTH + tx];
}
if (((ph * TILE_WIDTH + ty) < size) && (Col < size)) {
Nds[ty][tx] = N[(ph*TILE_WIDTH + ty) * size + Col];
}
__syncthreads();
for (int k = 0; k < TILE_WIDTH; ++k) {
Pvalue += Mds[ty][k] * Nds[k][tx];
}
__syncthreads();
}
if ((Row < size) && (Col < size)) {
P[Row * size + Col] = Pvalue;
}
}
void LaunchKernel(double *M, double *N, double *P, int size) {
double *d_A, *d_B, *d_C;
int spazio_tot = (size * size) * sizeof(double);
hipMalloc((void **)&d_A, spazio_tot);
hipMalloc((void **)&d_B, spazio_tot);
hipMalloc((void **)&d_C, spazio_tot);
hipMemcpy(d_A, M, spazio_tot, hipMemcpyHostToDevice);
hipMemcpy(d_B, N, spazio_tot, hipMemcpyHostToDevice);
dim3 block(TILE_WIDTH, TILE_WIDTH, 1);
dim3 grid(ceil((double)DIM / TILE_WIDTH), ceil((double)DIM / TILE_WIDTH), 1);
//MatrixMulKernel << <grid, block >> > (d_A, d_B, d_C, size);
MatrixMulKernelTiled << <grid, block >> > (d_A, d_B, d_C, size);
hipMemcpy(P, d_C, spazio_tot, hipMemcpyDeviceToHost);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
void MatrixMulHost(double(*A)[DIM], double(*B)[DIM], double(*C)[DIM]) {
for (int c = 0; c < DIM; c++) {
for (int d = 0; d < DIM; d++) {
int Pvalue = 0;
for (int k = 0; k < DIM; k++) {
Pvalue += A[c][k] * B[k][d];
}
C[c][d] = Pvalue;
}
}
}
int main() {
double *A =(double *)malloc(DIM*DIM*sizeof(double));
double *B=(double *)malloc(DIM*DIM*sizeof(double));
double *C=(double *)malloc(DIM*DIM*sizeof(double));
//riempio le matrici con dei valori arbitrari
for (int i = 0; i < DIM; i++) {
for (int j = 0; j < DIM; j++) {
A[i*DIM+j] = 1.0;
B[i*DIM+j] = 1.0;
}
}
std::chrono::steady_clock::time_point start = std::chrono::steady_clock::now();
LaunchKernel(&A[0], &B[0], &C[0], DIM);
std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now();
double tempo = std::chrono::duration_cast<std::chrono::duration<double> >(end - start).count();
printf("%f\n",tempo);
free(A);
free(B);
free(C);
}
| 0dfe7ab81257b10f4cb6f207ed5d3daede593868.cu | #define DIM 4096
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <math.h>
#include <stdio.h>
#include <cuda.h>
//#include <device_functions.h>
#include <cuda_runtime_api.h>
#include <stdio.h>
#include <assert.h>
#include <chrono>
#define TILE_WIDTH 32
__global__
void MatrixMulKernelTiled(double *M, double *N, double *P, int size) {
__shared__ double Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ double Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
int Pvalue = 0;
for (int ph = 0; ph < (int)ceil(size / (double)TILE_WIDTH); ++ph) {
if ((Row < size) && ((ph*TILE_WIDTH + tx) < size)) {
Mds[ty][tx] = M[Row * size + ph * TILE_WIDTH + tx];
}
if (((ph * TILE_WIDTH + ty) < size) && (Col < size)) {
Nds[ty][tx] = N[(ph*TILE_WIDTH + ty) * size + Col];
}
__syncthreads();
for (int k = 0; k < TILE_WIDTH; ++k) {
Pvalue += Mds[ty][k] * Nds[k][tx];
}
__syncthreads();
}
if ((Row < size) && (Col < size)) {
P[Row * size + Col] = Pvalue;
}
}
void LaunchKernel(double *M, double *N, double *P, int size) {
double *d_A, *d_B, *d_C;
int spazio_tot = (size * size) * sizeof(double);
cudaMalloc((void **)&d_A, spazio_tot);
cudaMalloc((void **)&d_B, spazio_tot);
cudaMalloc((void **)&d_C, spazio_tot);
cudaMemcpy(d_A, M, spazio_tot, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, N, spazio_tot, cudaMemcpyHostToDevice);
dim3 block(TILE_WIDTH, TILE_WIDTH, 1);
dim3 grid(ceil((double)DIM / TILE_WIDTH), ceil((double)DIM / TILE_WIDTH), 1);
//MatrixMulKernel << <grid, block >> > (d_A, d_B, d_C, size);
MatrixMulKernelTiled << <grid, block >> > (d_A, d_B, d_C, size);
cudaMemcpy(P, d_C, spazio_tot, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
void MatrixMulHost(double(*A)[DIM], double(*B)[DIM], double(*C)[DIM]) {
for (int c = 0; c < DIM; c++) {
for (int d = 0; d < DIM; d++) {
int Pvalue = 0;
for (int k = 0; k < DIM; k++) {
Pvalue += A[c][k] * B[k][d];
}
C[c][d] = Pvalue;
}
}
}
int main() {
double *A =(double *)malloc(DIM*DIM*sizeof(double));
double *B=(double *)malloc(DIM*DIM*sizeof(double));
double *C=(double *)malloc(DIM*DIM*sizeof(double));
//riempio le matrici con dei valori arbitrari
for (int i = 0; i < DIM; i++) {
for (int j = 0; j < DIM; j++) {
A[i*DIM+j] = 1.0;
B[i*DIM+j] = 1.0;
}
}
std::chrono::steady_clock::time_point start = std::chrono::steady_clock::now();
LaunchKernel(&A[0], &B[0], &C[0], DIM);
std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now();
double tempo = std::chrono::duration_cast<std::chrono::duration<double> >(end - start).count();
printf("%f\n",tempo);
free(A);
free(B);
free(C);
}
|
c8fdf7cf8ec28c98309101fa655d4399c6f1595b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__
void deviceKernel(int *a, int N)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = idx; i < N; i += stride)
{
a[i] = 1;
}
}
void hostFunction(int *a, int N)
{
for (int i = 0; i < N; ++i)
{
a[i] = 1;
}
}
int main()
{
int N = 2<<24;
size_t size = N * sizeof(int);
int *a;
hipMallocManaged(&a, size);
/*
* "`hipMallocManaged`
*
* GPU ?
* CPU ?
* GPU CPU ?
* CPU GPU ?
*
* `nvprof`
*/
hipFree(a);
}
| c8fdf7cf8ec28c98309101fa655d4399c6f1595b.cu | __global__
void deviceKernel(int *a, int N)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = idx; i < N; i += stride)
{
a[i] = 1;
}
}
void hostFunction(int *a, int N)
{
for (int i = 0; i < N; ++i)
{
a[i] = 1;
}
}
int main()
{
int N = 2<<24;
size_t size = N * sizeof(int);
int *a;
cudaMallocManaged(&a, size);
/*
* "`cudaMallocManaged` の動作を詳しく確認するために実験を行います。
*
* ユニファイド メモリに GPU だけがアクセスした場合、どうなるでしょうか?
* ユニファイド メモリに CPU だけがアクセスした場合、どうなるでしょうか?
* ユニファイド メモリに最初に GPU、次に CPU がアクセスした場合、どうなるでしょうか?
* ユニファイド メモリに最初に CPU、次に GPU がアクセスした場合、どうなるでしょうか?
*
* 各実験の前にユニファイド メモリ の動作、特にページ フォールトについて仮説を立ててから、`nvprof` を実行して検証します。
*/
cudaFree(a);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.