hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
bbab5307064a9f9df5e0495a950ee996d2d229ba.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHTensorRandom.h"
#include "THHDeviceUtils.cuh"
#include "THHGeneral.h"
#include "THHTensorCopy.h"
#include "THHTensorMath.h"
#include "THHReduceApplyUtils.cuh"
#include "THHTensorRandom.cuh"
#include <thrust/functional.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <hiprand/hiprand_mtgp32_host.h>
#include <rocrand/rocrand_mtgp32_11213.h>
#define MAX_NUM_BLOCKS 64
#define BLOCK_SIZE 256
Generator* THCRandom_getGenerator(THCState* state);
/* Sets up generator. Allocates but does not create the generator states. */
__host__ void initializeGenerator(THCState *state, Generator* gen)
{
THCudaCheck(THCudaMalloc(state, (void**)&gen->gen_states, MAX_NUM_BLOCKS * sizeof(hiprandStateMtgp32_t)));
THCudaCheck(THCudaMalloc(state, (void**)&gen->kernel_params, sizeof(mtgp32_kernel_params_t)));
}
/* Creates a new generator state given the seed. */
__host__ void createGeneratorState(Generator* gen, uint64_t seed)
{
if (hiprandMakeMTGP32Constants(mtgp32dc_params_fast_11213, gen->kernel_params) != HIPRAND_STATUS_SUCCESS)
{
THError("Creating MTGP constants failed.");
}
if (hiprandMakeMTGP32KernelState(gen->gen_states, mtgp32dc_params_fast_11213,
gen->kernel_params, MAX_NUM_BLOCKS, seed) != HIPRAND_STATUS_SUCCESS)
{
THError("Creating MTGP kernel state failed.");
}
}
__host__ void THCRandom_getRNGState(THCState* state, THByteTensor *rng_state)
{
Generator* gen = THCRandom_getGenerator(state);
// The RNG state comprises the MTPG32 states and the seed.
static const size_t states_size = MAX_NUM_BLOCKS * sizeof(hiprandStateMtgp32_t);
static const size_t seed_size = sizeof(gen->initial_seed);
static const size_t total_size = states_size + seed_size;
THByteTensor_resize1d(rng_state, total_size);
THArgCheck(THByteTensor_nElement(rng_state) == total_size, 1, "RNG state is wrong size");
THArgCheck(THByteTensor_isContiguous(rng_state), 1, "RNG state must be contiguous");
THCudaCheck(hipMemcpy(THByteTensor_data(rng_state), gen->gen_states,
states_size, hipMemcpyDeviceToHost));
memcpy(THByteTensor_data(rng_state) + states_size, &gen->initial_seed, seed_size);
}
__global__ void set_rngstate_kernel(hiprandStateMtgp32_t *state, mtgp32_kernel_params_t *kernel)
{
state[threadIdx.x].k = kernel;
}
__host__ void THCRandom_setRNGState(THCState* state, THByteTensor *rng_state)
{
Generator* gen = THCRandom_getGenerator(state);
static const size_t states_size = MAX_NUM_BLOCKS * sizeof(hiprandStateMtgp32_t);
static const size_t seed_size = sizeof(gen->initial_seed);
static const size_t total_size = states_size + seed_size;
THArgCheck(THByteTensor_nElement(rng_state) == total_size, 1, "RNG state is wrong size");
THArgCheck(THByteTensor_isContiguous(rng_state), 1, "RNG state must be contiguous");
THCudaCheck(hipMemcpy(gen->gen_states, THByteTensor_data(rng_state),
states_size, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( set_rngstate_kernel), dim3(1), dim3(MAX_NUM_BLOCKS), 0, THCState_getCurrentStream(state),
gen->gen_states, gen->kernel_params);
memcpy(&gen->initial_seed, THByteTensor_data(rng_state) + states_size, seed_size);
}
// Goes from (0, 1] to [0, 1). Note 1-x is not sufficient since for some floats
// eps near 0, 1-eps will round to 1.
template <typename T>
__device__ inline T reverse_bounds(T value) {
if (THCNumerics<T>::eq(value, ScalarConvert<int, T>::to(1))) {
return ScalarConvert<int, T>::to(0);
}
return value;
}
#ifdef CUDA_HALF_TENSOR
__device__ inline half half_uniform_scale_and_shift(float x, double a, double b) {
half width = ScalarConvert<double, half>::to(b - a);
half start = ScalarConvert<double, half>::to(a);
half scaled = THCNumerics<half>::mul(reverse_bounds(ScalarConvert<float, half>::to(x)), width);
return THCNumerics<half>::add(scaled, start);
}
#endif
#define GENERATE_KERNEL1(NAME, T, ARG1, CURAND_T, CURAND_FUNC, TRANSFORM) \
__global__ void NAME(hiprandStateMtgp32_t *state, int size, T *result, ARG1) \
{ \
int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; \
int rounded_size = THCCeilDiv(size, BLOCK_SIZE) * BLOCK_SIZE; \
for (int i = idx; i < rounded_size; i += BLOCK_SIZE * MAX_NUM_BLOCKS) { \
CURAND_T x = CURAND_FUNC(&state[blockIdx.x]); \
if (i < size) { \
T y = TRANSFORM; \
result[i] = y; \
} \
} \
}
#define GENERATE_KERNEL2(NAME, T, ARG1, ARG2, CURAND_T, CURAND_FUNC, TRANSFORM) \
__global__ void NAME(hiprandStateMtgp32_t *state, int size, T *result, ARG1, ARG2) \
{ \
int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; \
int rounded_size = THCCeilDiv(size, BLOCK_SIZE) * BLOCK_SIZE; \
for (int i = idx; i < rounded_size; i += BLOCK_SIZE * MAX_NUM_BLOCKS) { \
CURAND_T x = CURAND_FUNC(&state[blockIdx.x]); \
if (i < size) { \
T y = TRANSFORM; \
result[i] = y; \
} \
} \
}
template<typename T, typename U>
struct is_same { static const bool value = false; };
template<typename T>
struct is_same<T, T> { static const bool value = true; };
template<typename real, typename prob_type>
__global__ void generate_bernoulli_tensor(hiprandStateMtgp32_t *state, int size,
real *result, prob_type *probs)
{
int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x;
int rounded_size = THCCeilDiv(size, BLOCK_SIZE) * BLOCK_SIZE;
for (int i = idx; i < rounded_size; i += BLOCK_SIZE * MAX_NUM_BLOCKS) {
if (is_same<prob_type, double>::value) {
double x = hiprand_uniform_double(&state[blockIdx.x]);
if (i < size)
result[i] = ScalarConvert<bool, real>::to(x <= probs[i]);
} else {
float x = hiprand_uniform(&state[blockIdx.x]);
if (i < size)
result[i] = ScalarConvert<bool, real>::to(x <= probs[i]);
}
}
}
// NOTE: hiprand_uniform is (0, 1] and we want [a, b)
GENERATE_KERNEL2(generate_uniform, float, float a, float b, float, hiprand_uniform, reverse_bounds(x) * (b-a) + a)
GENERATE_KERNEL2(generate_uniform, double, double a, double b, double, hiprand_uniform_double, reverse_bounds(x) * (b-a) + a)
GENERATE_KERNEL2(generate_normal, float, double mean, double stdv, float, hiprand_normal, (x * stdv) + mean)
GENERATE_KERNEL2(generate_normal, double, double mean, double stdv, double, hiprand_normal_double, (x * stdv) + mean)
GENERATE_KERNEL1(generate_exponential, float, double lambda, float, hiprand_uniform, (float)(-1. / lambda * log(x)))
GENERATE_KERNEL1(generate_exponential, double, double lambda, double, hiprand_uniform_double, (double)(-1. / lambda * log(x)))
GENERATE_KERNEL2(generate_cauchy, float, double median, double sigma, float, hiprand_uniform, (float)(median + sigma * tan(M_PI*(x-0.5))))
GENERATE_KERNEL2(generate_cauchy, double, double median, double sigma, double, hiprand_uniform_double, (double)(median + sigma * tan(M_PI*(x-0.5))))
#ifdef CUDA_HALF_TENSOR
GENERATE_KERNEL2(generate_uniform, half, double a, double b, float, hiprand_uniform, (half_uniform_scale_and_shift(x, a, b)))
GENERATE_KERNEL2(generate_normal, half, double mean, double stdv, float, hiprand_normal, (ScalarConvert<float, half>::to((x * stdv) + mean)))
GENERATE_KERNEL1(generate_exponential, half, double lambda, float, hiprand_uniform, (ScalarConvert<float, half>::to((float)(-1. / lambda * log(x)))))
GENERATE_KERNEL2(generate_cauchy, half, double median, double sigma, float, hiprand_uniform, (ScalarConvert<float, half>::to((float)(median + sigma * tan(M_PI*(x-0.5))))))
#endif // CUDA_HALF_TENSOR
#include "generic/THCTensorRandom.cu"
#include "THHGenerateAllTypes.h"
#undef GENERATE_KERNEL1
#undef GENERATE_KERNEL2
| bbab5307064a9f9df5e0495a950ee996d2d229ba.cu | #include "THCTensorRandom.h"
#include "THCDeviceUtils.cuh"
#include "THCGeneral.h"
#include "THCTensorCopy.h"
#include "THCTensorMath.h"
#include "THCReduceApplyUtils.cuh"
#include "THCTensorRandom.cuh"
#include <thrust/functional.h>
#include <curand.h>
#include <curand_kernel.h>
#include <curand_mtgp32_host.h>
#include <curand_mtgp32dc_p_11213.h>
#define MAX_NUM_BLOCKS 64
#define BLOCK_SIZE 256
Generator* THCRandom_getGenerator(THCState* state);
/* Sets up generator. Allocates but does not create the generator states. */
__host__ void initializeGenerator(THCState *state, Generator* gen)
{
THCudaCheck(THCudaMalloc(state, (void**)&gen->gen_states, MAX_NUM_BLOCKS * sizeof(curandStateMtgp32)));
THCudaCheck(THCudaMalloc(state, (void**)&gen->kernel_params, sizeof(mtgp32_kernel_params)));
}
/* Creates a new generator state given the seed. */
__host__ void createGeneratorState(Generator* gen, uint64_t seed)
{
if (curandMakeMTGP32Constants(mtgp32dc_params_fast_11213, gen->kernel_params) != CURAND_STATUS_SUCCESS)
{
THError("Creating MTGP constants failed.");
}
if (curandMakeMTGP32KernelState(gen->gen_states, mtgp32dc_params_fast_11213,
gen->kernel_params, MAX_NUM_BLOCKS, seed) != CURAND_STATUS_SUCCESS)
{
THError("Creating MTGP kernel state failed.");
}
}
__host__ void THCRandom_getRNGState(THCState* state, THByteTensor *rng_state)
{
Generator* gen = THCRandom_getGenerator(state);
// The RNG state comprises the MTPG32 states and the seed.
static const size_t states_size = MAX_NUM_BLOCKS * sizeof(curandStateMtgp32);
static const size_t seed_size = sizeof(gen->initial_seed);
static const size_t total_size = states_size + seed_size;
THByteTensor_resize1d(rng_state, total_size);
THArgCheck(THByteTensor_nElement(rng_state) == total_size, 1, "RNG state is wrong size");
THArgCheck(THByteTensor_isContiguous(rng_state), 1, "RNG state must be contiguous");
THCudaCheck(cudaMemcpy(THByteTensor_data(rng_state), gen->gen_states,
states_size, cudaMemcpyDeviceToHost));
memcpy(THByteTensor_data(rng_state) + states_size, &gen->initial_seed, seed_size);
}
__global__ void set_rngstate_kernel(curandStateMtgp32 *state, mtgp32_kernel_params *kernel)
{
state[threadIdx.x].k = kernel;
}
__host__ void THCRandom_setRNGState(THCState* state, THByteTensor *rng_state)
{
Generator* gen = THCRandom_getGenerator(state);
static const size_t states_size = MAX_NUM_BLOCKS * sizeof(curandStateMtgp32);
static const size_t seed_size = sizeof(gen->initial_seed);
static const size_t total_size = states_size + seed_size;
THArgCheck(THByteTensor_nElement(rng_state) == total_size, 1, "RNG state is wrong size");
THArgCheck(THByteTensor_isContiguous(rng_state), 1, "RNG state must be contiguous");
THCudaCheck(cudaMemcpy(gen->gen_states, THByteTensor_data(rng_state),
states_size, cudaMemcpyHostToDevice));
set_rngstate_kernel<<<1, MAX_NUM_BLOCKS, 0, THCState_getCurrentStream(state)>>>(
gen->gen_states, gen->kernel_params);
memcpy(&gen->initial_seed, THByteTensor_data(rng_state) + states_size, seed_size);
}
// Goes from (0, 1] to [0, 1). Note 1-x is not sufficient since for some floats
// eps near 0, 1-eps will round to 1.
template <typename T>
__device__ inline T reverse_bounds(T value) {
if (THCNumerics<T>::eq(value, ScalarConvert<int, T>::to(1))) {
return ScalarConvert<int, T>::to(0);
}
return value;
}
#ifdef CUDA_HALF_TENSOR
__device__ inline half half_uniform_scale_and_shift(float x, double a, double b) {
half width = ScalarConvert<double, half>::to(b - a);
half start = ScalarConvert<double, half>::to(a);
half scaled = THCNumerics<half>::mul(reverse_bounds(ScalarConvert<float, half>::to(x)), width);
return THCNumerics<half>::add(scaled, start);
}
#endif
#define GENERATE_KERNEL1(NAME, T, ARG1, CURAND_T, CURAND_FUNC, TRANSFORM) \
__global__ void NAME(curandStateMtgp32 *state, int size, T *result, ARG1) \
{ \
int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; \
int rounded_size = THCCeilDiv(size, BLOCK_SIZE) * BLOCK_SIZE; \
for (int i = idx; i < rounded_size; i += BLOCK_SIZE * MAX_NUM_BLOCKS) { \
CURAND_T x = CURAND_FUNC(&state[blockIdx.x]); \
if (i < size) { \
T y = TRANSFORM; \
result[i] = y; \
} \
} \
}
#define GENERATE_KERNEL2(NAME, T, ARG1, ARG2, CURAND_T, CURAND_FUNC, TRANSFORM) \
__global__ void NAME(curandStateMtgp32 *state, int size, T *result, ARG1, ARG2) \
{ \
int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; \
int rounded_size = THCCeilDiv(size, BLOCK_SIZE) * BLOCK_SIZE; \
for (int i = idx; i < rounded_size; i += BLOCK_SIZE * MAX_NUM_BLOCKS) { \
CURAND_T x = CURAND_FUNC(&state[blockIdx.x]); \
if (i < size) { \
T y = TRANSFORM; \
result[i] = y; \
} \
} \
}
template<typename T, typename U>
struct is_same { static const bool value = false; };
template<typename T>
struct is_same<T, T> { static const bool value = true; };
template<typename real, typename prob_type>
__global__ void generate_bernoulli_tensor(curandStateMtgp32 *state, int size,
real *result, prob_type *probs)
{
int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x;
int rounded_size = THCCeilDiv(size, BLOCK_SIZE) * BLOCK_SIZE;
for (int i = idx; i < rounded_size; i += BLOCK_SIZE * MAX_NUM_BLOCKS) {
if (is_same<prob_type, double>::value) {
double x = curand_uniform_double(&state[blockIdx.x]);
if (i < size)
result[i] = ScalarConvert<bool, real>::to(x <= probs[i]);
} else {
float x = curand_uniform(&state[blockIdx.x]);
if (i < size)
result[i] = ScalarConvert<bool, real>::to(x <= probs[i]);
}
}
}
// NOTE: curand_uniform is (0, 1] and we want [a, b)
GENERATE_KERNEL2(generate_uniform, float, float a, float b, float, curand_uniform, reverse_bounds(x) * (b-a) + a)
GENERATE_KERNEL2(generate_uniform, double, double a, double b, double, curand_uniform_double, reverse_bounds(x) * (b-a) + a)
GENERATE_KERNEL2(generate_normal, float, double mean, double stdv, float, curand_normal, (x * stdv) + mean)
GENERATE_KERNEL2(generate_normal, double, double mean, double stdv, double, curand_normal_double, (x * stdv) + mean)
GENERATE_KERNEL1(generate_exponential, float, double lambda, float, curand_uniform, (float)(-1. / lambda * log(x)))
GENERATE_KERNEL1(generate_exponential, double, double lambda, double, curand_uniform_double, (double)(-1. / lambda * log(x)))
GENERATE_KERNEL2(generate_cauchy, float, double median, double sigma, float, curand_uniform, (float)(median + sigma * tan(M_PI*(x-0.5))))
GENERATE_KERNEL2(generate_cauchy, double, double median, double sigma, double, curand_uniform_double, (double)(median + sigma * tan(M_PI*(x-0.5))))
#ifdef CUDA_HALF_TENSOR
GENERATE_KERNEL2(generate_uniform, half, double a, double b, float, curand_uniform, (half_uniform_scale_and_shift(x, a, b)))
GENERATE_KERNEL2(generate_normal, half, double mean, double stdv, float, curand_normal, (ScalarConvert<float, half>::to((x * stdv) + mean)))
GENERATE_KERNEL1(generate_exponential, half, double lambda, float, curand_uniform, (ScalarConvert<float, half>::to((float)(-1. / lambda * log(x)))))
GENERATE_KERNEL2(generate_cauchy, half, double median, double sigma, float, curand_uniform, (ScalarConvert<float, half>::to((float)(median + sigma * tan(M_PI*(x-0.5))))))
#endif // CUDA_HALF_TENSOR
#include "generic/THCTensorRandom.cu"
#include "THCGenerateAllTypes.h"
#undef GENERATE_KERNEL1
#undef GENERATE_KERNEL2
|
localized_slot_sparse_embedding_hash.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "HugeCTR/include/data_simulator.hpp"
#include "HugeCTR/include/embeddings/localized_slot_sparse_embedding_hash.hpp"
#include "HugeCTR/include/utils.hpp"
#include "cub/hipcub/hipcub.hpp"
#include "cub/hipcub/hipcub.hpp"
namespace HugeCTR {
namespace {
// get slot_id from hash_table_slot_id vector by value_index
__global__ void get_hash_slot_id_kernel(size_t count, const size_t *value_index,
const size_t *hash_table_slot_id, size_t *slot_id) {
size_t gid = blockIdx.x * blockDim.x + threadIdx.x;
if (gid < count) {
size_t index = value_index[gid];
slot_id[gid] = hash_table_slot_id[index];
}
}
/**
* get hash table slot_id by value_index
* @param stream cuda stream.
* @param count total count of value which will be get from hash table.
* @param value_index the pointer of value_index.
* @param hash_table_slot_id the pointer of hash table slot id.
* @param slot_id the pointer of the retrieved slot_id.
*/
void get_hash_slot_id(size_t count, const size_t *value_index, const size_t *hash_table_slot_id,
size_t *slot_id, hipStream_t stream) {
const size_t block_size = 64;
const size_t grid_size = (count + block_size - 1) / block_size;
hipLaunchKernelGGL(( get_hash_slot_id_kernel), dim3(grid_size), dim3(block_size), 0, stream, count, value_index,
hash_table_slot_id, slot_id);
}
} // namespace
template <typename TypeHashKey, typename TypeEmbeddingComp>
LocalizedSlotSparseEmbeddingHash<TypeHashKey, TypeEmbeddingComp>::LocalizedSlotSparseEmbeddingHash(
const Tensors2<TypeHashKey> &train_row_offsets_tensors,
const Tensors2<TypeHashKey> &train_value_tensors,
const std::vector<std::shared_ptr<size_t>> &train_nnz_array,
const Tensors2<TypeHashKey> &evaluate_row_offsets_tensors,
const Tensors2<TypeHashKey> &evaluate_value_tensors,
const std::vector<std::shared_ptr<size_t>> &evaluate_nnz_array,
const SparseEmbeddingHashParams<TypeEmbeddingComp> &embedding_params,
const std::string plan_file, const std::shared_ptr<ResourceManager> &resource_manager)
: Base(train_row_offsets_tensors, train_value_tensors, train_nnz_array,
evaluate_row_offsets_tensors, evaluate_value_tensors, evaluate_nnz_array,
embedding_params, resource_manager),
slot_size_array_(embedding_params.slot_size_array)
#ifndef NCCL_A2A
,
plan_file_(plan_file)
#endif
{
try {
CudaDeviceContext context;
if (slot_size_array_.empty()) {
max_vocabulary_size_per_gpu_ = Base::get_max_vocabulary_size_per_gpu();
max_vocabulary_size_ = Base::get_max_vocabulary_size_per_gpu() *
Base::get_resource_manager().get_global_gpu_count();
} else {
max_vocabulary_size_per_gpu_ =
cal_max_voc_size_per_gpu(slot_size_array_, Base::get_resource_manager());
max_vocabulary_size_ = 0;
for (size_t slot_size : slot_size_array_) {
max_vocabulary_size_ += slot_size;
}
}
MESSAGE_("max_vocabulary_size_per_gpu_=" + std::to_string(max_vocabulary_size_per_gpu_));
for (size_t id = 0; id < Base::get_resource_manager().get_local_gpu_count(); id++) {
int cur_device = Base::get_local_gpu(id).get_device_id();
context.set_device(cur_device);
size_t gid = Base::get_local_gpu(id).get_global_gpu_id();
size_t slot_num_per_gpu =
Base::get_slot_num() / Base::get_resource_manager().get_global_gpu_count() +
((gid < Base::get_slot_num() % Base::get_resource_manager().get_global_gpu_count()) ? 1
: 0);
slot_num_per_gpu_.push_back(slot_num_per_gpu);
// construct HashTable object: used to store hash table <key, value_index>
hash_tables_.emplace_back(new NvHashTable(max_vocabulary_size_per_gpu_));
// new GeneralBuffer objects
const std::shared_ptr<GeneralBuffer2<CudaAllocator>> &buf = Base::get_buffer(id);
// new hash table value vectors
if (slot_size_array_.empty()) {
Tensor2<float> tensor;
buf->reserve({max_vocabulary_size_per_gpu_, Base::get_embedding_vec_size()}, &tensor);
hash_table_value_tensors_.push_back(tensor);
} else {
const std::shared_ptr<BufferBlock2<float>> &block = buf->create_block<float>();
Tensors2<float> tensors;
for (size_t i = 0; i < slot_size_array_.size(); i++) {
if ((i % Base::get_resource_manager().get_global_gpu_count()) == gid) {
Tensor2<float> tensor;
block->reserve({slot_size_array_[i], Base::get_embedding_vec_size()}, &tensor);
tensors.push_back(tensor);
}
}
value_table_tensors_.push_back(tensors);
hash_table_value_tensors_.push_back(block->as_tensor());
}
// new hash table value_index that get() from HashTable
{
Tensor2<size_t> tensor;
buf->reserve({1, Base::get_universal_batch_size() * Base::get_max_feature_num()}, &tensor);
hash_value_index_tensors_.push_back(tensor);
}
// new embedding features reduced by hash table values(results of forward)
{
Tensor2<TypeEmbeddingComp> tensor;
buf->reserve(
{Base::get_universal_batch_size() * slot_num_per_gpu, Base::get_embedding_vec_size()},
&tensor);
embedding_feature_tensors_.push_back(tensor);
}
// new wgrad used by backward
{
Tensor2<TypeEmbeddingComp> tensor;
buf->reserve(
{Base::get_batch_size(true) * slot_num_per_gpu, Base::get_embedding_vec_size()},
&tensor);
wgrad_tensors_.push_back(tensor);
}
// new optimizer params used by update_params
switch (Base::get_optimizer()) {
case Optimizer_t::Adam: // adam
{
Tensor2<TypeEmbeddingComp> tensor;
buf->reserve({max_vocabulary_size_per_gpu_, Base::get_embedding_vec_size()}, &tensor);
opt_m_tensors_.push_back(tensor);
buf->reserve({max_vocabulary_size_per_gpu_, Base::get_embedding_vec_size()}, &tensor);
opt_v_tensors_.push_back(tensor);
break;
}
case Optimizer_t::MomentumSGD: // momentum_sgd
{
Tensor2<TypeEmbeddingComp> tensor;
buf->reserve({max_vocabulary_size_per_gpu_, Base::get_embedding_vec_size()}, &tensor);
opt_momentum_tensors_.push_back(tensor);
break;
}
case Optimizer_t::Nesterov: // nesterov
{
Tensor2<TypeEmbeddingComp> tensor;
buf->reserve({max_vocabulary_size_per_gpu_, Base::get_embedding_vec_size()}, &tensor);
opt_accm_tensors_.push_back(tensor);
break;
}
case Optimizer_t::SGD:
break;
default:
throw std::runtime_error(
std::string("[HCDEBUG][ERROR] Runtime error: Invalid optimizer type\n"));
}
{
Tensor2<TypeHashKey> tensor;
buf->reserve({1, Base::get_batch_size(true) * Base::get_max_feature_num()}, &tensor);
sample_id_tensors_.push_back(tensor);
}
{
Tensor2<TypeHashKey> tensor;
buf->reserve({1, Base::get_batch_size(true) * Base::get_max_feature_num()}, &tensor);
sample_id_sort_tensors_.push_back(tensor);
}
{
Tensor2<size_t> tensor;
buf->reserve({1, Base::get_batch_size(true) * Base::get_max_feature_num()}, &tensor);
hash_value_index_sort_tensors_.push_back(tensor);
}
{
Tensor2<uint32_t> tensor;
buf->reserve({1, Base::get_batch_size(true) * Base::get_max_feature_num() + 1}, &tensor);
hash_value_index_count_offset_tensors_.push_back(tensor);
}
{
Tensor2<uint32_t> tensor;
buf->reserve({1, Base::get_batch_size(true) * Base::get_max_feature_num()}, &tensor);
new_hash_value_flag_tensors_.push_back(tensor);
}
{
Tensor2<uint32_t> tensor;
buf->reserve({1, Base::get_batch_size(true) * Base::get_max_feature_num()}, &tensor);
hash_value_flag_sumed_tensors_.push_back(tensor);
}
{
Tensor2<uint32_t> tensor;
buf->reserve({1, 1}, &tensor);
hash_value_index_count_counter_tensors_.push_back(tensor);
}
{
Tensor2<size_t> tensor;
buf->reserve({1, Base::get_batch_size(true) * Base::get_max_feature_num()}, &tensor);
deltaw_hash_value_index_tensors_.push_back(tensor);
}
{
Tensor2<float> tensor;
buf->reserve({Base::get_batch_size(true) * Base::get_max_feature_num(),
Base::get_embedding_vec_size()},
&tensor);
deltaw_tensors_.push_back(tensor);
}
{
// cal the temp storage bytes for CUB radix sort
size_t size = 0;
hipcub::DeviceRadixSort::SortPairs((void *)nullptr, size, (size_t *)nullptr, (size_t *)nullptr,
(TypeHashKey *)nullptr, (TypeHashKey *)nullptr,
Base::get_batch_size(true) * Base::get_max_feature_num());
// new temp storage tensors for CUB radix sort
Tensor2<void> tensor;
buf->reserve({size}, &tensor);
temp_storage_sort_tensors_.push_back(tensor);
}
{
size_t size = 0;
hipcub::DeviceScan::InclusiveSum((void *)nullptr, size, (uint32_t *)nullptr,
(uint32_t *)nullptr,
Base::get_batch_size(true) * Base::get_max_feature_num());
Tensor2<void> tensor;
buf->reserve({size}, &tensor);
temp_storage_scan_tensors_.push_back(tensor);
}
// the tenosrs for storing slot ids
// TODO: init to -1 ?
{
Tensor2<size_t> tensor;
buf->reserve({max_vocabulary_size_per_gpu_, 1}, &tensor);
hash_table_slot_id_tensors_.push_back(tensor);
}
// temp tensors for all2all
{
Tensor2<TypeEmbeddingComp> tensor;
buf->reserve({Base::get_universal_batch_size_per_gpu() * Base::get_slot_num(),
Base::get_embedding_vec_size()},
&tensor);
all2all_tensors_.push_back(tensor);
}
{
Tensor2<TypeEmbeddingComp> tensor;
buf->reserve({Base::get_universal_batch_size() * Base::get_slot_num(),
Base::get_embedding_vec_size()},
&tensor);
utest_forward_temp_tensors_.push_back(tensor);
}
{
Tensor2<TypeEmbeddingComp> tensor;
buf->reserve({Base::get_batch_size_per_gpu(true) * Base::get_slot_num(),
Base::get_embedding_vec_size()},
&tensor);
utest_all2all_tensors_.push_back(tensor);
}
{
Tensor2<TypeEmbeddingComp> tensor;
buf->reserve({Base::get_batch_size_per_gpu(true) * Base::get_slot_num(),
Base::get_embedding_vec_size()},
&tensor);
utest_reorder_tensors_.push_back(tensor);
}
{
Tensor2<TypeEmbeddingComp> tensor;
buf->reserve(
{Base::get_batch_size(true) * Base::get_slot_num(), Base::get_embedding_vec_size()},
&tensor);
utest_backward_temp_tensors_.push_back(tensor);
}
// init GenenralBuffers to do real allocation
#ifndef NDEBUG
std::cout << " max_feature_num_:" << Base::get_max_feature_num() << std::endl;
#endif
buf->allocate();
const OptParams<TypeEmbeddingComp> &source_opt_param = Base::get_opt_params();
OptParams<TypeEmbeddingComp> &target_opt_param = Base::get_opt_params(id);
switch (Base::get_optimizer()) {
case Optimizer_t::Adam: // adam
CK_CUDA_THROW_(hipMemsetAsync(opt_m_tensors_[id].get_ptr(), 0,
opt_m_tensors_[id].get_size_in_bytes(),
Base::get_local_gpu(id).get_stream()));
CK_CUDA_THROW_(hipMemsetAsync(opt_v_tensors_[id].get_ptr(), 0,
opt_v_tensors_[id].get_size_in_bytes(),
Base::get_local_gpu(id).get_stream()));
target_opt_param.hyperparams.adam.times = 0;
target_opt_param.hyperparams.adam.beta1 = source_opt_param.hyperparams.adam.beta1;
target_opt_param.hyperparams.adam.beta2 = source_opt_param.hyperparams.adam.beta2;
target_opt_param.hyperparams.adam.epsilon = source_opt_param.hyperparams.adam.epsilon;
target_opt_param.hyperparams.adam.m_ptr = opt_m_tensors_[id].get_ptr();
target_opt_param.hyperparams.adam.v_ptr = opt_v_tensors_[id].get_ptr();
break;
case Optimizer_t::MomentumSGD: // momentum_sgd
CK_CUDA_THROW_(hipMemsetAsync(opt_momentum_tensors_[id].get_ptr(), 0,
opt_momentum_tensors_[id].get_size_in_bytes(),
Base::get_local_gpu(id).get_stream()));
target_opt_param.hyperparams.momentum.factor =
source_opt_param.hyperparams.momentum.factor;
target_opt_param.hyperparams.momentum.momentum_ptr = opt_momentum_tensors_[id].get_ptr();
break;
case Optimizer_t::Nesterov: // nesterov
CK_CUDA_THROW_(hipMemsetAsync(opt_accm_tensors_[id].get_ptr(), 0,
opt_accm_tensors_[id].get_size_in_bytes(),
Base::get_local_gpu(id).get_stream()));
target_opt_param.hyperparams.nesterov.mu = source_opt_param.hyperparams.nesterov.mu;
target_opt_param.hyperparams.nesterov.accm_ptr = opt_accm_tensors_[id].get_ptr();
break;
case Optimizer_t::SGD:
break;
default:
throw std::runtime_error(
std::string("[HCDEBUG][ERROR] Runtime error: Invalid optimizer type\n"));
}
} // end of for(int id = 0; id < Base::get_local_gpu_count(); id++)
// sync
functors_.sync_all_gpus(Base::get_resource_manager());
#ifndef NCCL_A2A
// all2all init
#ifndef ENABLE_MPI // without MPI
functors_.all2all_init_forward(all2all_forward_, plan_file_, Base::get_batch_size_per_gpu(true),
slot_num_per_gpu_, Base::get_embedding_vec_size(),
embedding_feature_tensors_, all2all_tensors_,
Base::get_resource_manager());
functors_.all2all_init_backward(all2all_backward_, plan_file_,
Base::get_batch_size_per_gpu(true), slot_num_per_gpu_,
Base::get_embedding_vec_size(), all2all_tensors_,
embedding_feature_tensors_, Base::get_resource_manager());
functors_.all2all_init_forward(all2all_utest_, plan_file_, Base::get_batch_size_per_gpu(true),
slot_num_per_gpu_, Base::get_embedding_vec_size(),
wgrad_tensors_, utest_all2all_tensors_,
Base::get_resource_manager());
#else
functors_.all2all_init_forward(all2all_forward_, plan_file_, Base::get_batch_size_per_gpu(true),
Base::get_slot_num(), Base::get_embedding_vec_size(),
embedding_feature_tensors_, all2all_tensors_,
Base::get_resource_manager());
functors_.all2all_init_backward(all2all_backward_, plan_file_,
Base::get_batch_size_per_gpu(true), Base::get_slot_num(),
Base::get_embedding_vec_size(), all2all_tensors_,
embedding_feature_tensors_, Base::get_resource_manager());
functors_.all2all_init_forward(all2all_utest_, plan_file_, Base::get_batch_size_per_gpu(true),
Base::get_slot_num(), Base::get_embedding_vec_size(),
wgrad_tensors_, utest_all2all_tensors_,
Base::get_resource_manager());
#endif
#endif
// warm up for nccl all2all
#ifdef NCCL_A2A
MESSAGE_("All2All Warmup Start");
#ifndef ENABLE_MPI
if (Base::get_resource_manager().get_global_gpu_count() > 1) {
functors_.all2all_forward(Base::get_batch_size_per_gpu(true), slot_num_per_gpu_,
Base::get_embedding_vec_size(), embedding_feature_tensors_,
all2all_tensors_, Base::get_resource_manager());
}
#else
if (Base::get_resource_manager().get_global_gpu_count() > 1) {
functors_.all2all_forward(Base::get_batch_size_per_gpu(true), Base::get_slot_num(),
Base::get_embedding_vec_size(), embedding_feature_tensors_,
all2all_tensors_, Base::get_resource_manager());
}
#endif
MESSAGE_("All2All Warmup End");
#endif
} catch (const std::runtime_error &rt_err) {
std::cerr << rt_err.what() << std::endl;
throw;
}
return;
}
template <typename TypeHashKey, typename TypeEmbeddingComp>
void LocalizedSlotSparseEmbeddingHash<TypeHashKey, TypeEmbeddingComp>::upload_params_to_device(
std::ifstream &weight_stream, size_t vocabulary_size, size_t embedding_vec_size,
size_t max_vocabulary_size_per_gpu, Tensors2<float> &hash_table_value_tensors,
Tensors2<size_t> &hash_table_slot_id_tensors,
std::vector<std::shared_ptr<HashTable<TypeHashKey, size_t>>> &hash_tables) {
// check file size and vocabulary_size (file size <=hash_table_size)
weight_stream.seekg(0, weight_stream.end);
size_t file_size_in_B = weight_stream.tellg();
weight_stream.seekg(0, weight_stream.beg);
int my_rank = 0;
#ifdef ENABLE_MPI
int n_ranks = 1;
CK_MPI_THROW_(MPI_Comm_rank(MPI_COMM_WORLD, &my_rank));
CK_MPI_THROW_(MPI_Comm_size(MPI_COMM_WORLD, &n_ranks));
#endif
// define size
size_t local_gpu_count = Base::get_resource_manager().get_local_gpu_count();
size_t chunk_loop = 1000;
size_t tile_size = 1; // must be 1, because we need to cal (key&local_gpu_count) to decide
// gpu_id for each <key,value>
size_t hash_table_key_tile_size = tile_size;
size_t hash_table_key_tile_size_in_B = hash_table_key_tile_size * sizeof(TypeHashKey);
size_t hash_table_key_chunk_size = hash_table_key_tile_size * chunk_loop;
size_t hash_table_key_chunk_size_in_B = hash_table_key_chunk_size * sizeof(TypeHashKey);
size_t hash_table_value_index_chunk_size_in_B = hash_table_key_chunk_size * sizeof(size_t);
size_t hash_table_value_tile_size = tile_size * embedding_vec_size;
size_t hash_table_value_tile_size_in_B = hash_table_value_tile_size * sizeof(float);
size_t hash_table_value_chunk_size = hash_table_value_tile_size * chunk_loop;
size_t hash_table_value_chunk_size_in_B = hash_table_value_chunk_size * sizeof(float);
size_t hash_table_slot_id_tile_size = tile_size;
size_t hash_table_slot_id_tile_size_in_B = hash_table_slot_id_tile_size * sizeof(size_t);
size_t hash_table_slot_id_chunk_size = hash_table_slot_id_tile_size * chunk_loop;
size_t hash_table_slot_id_chunk_size_in_B = hash_table_slot_id_chunk_size * sizeof(size_t);
size_t hash_table_tile_size_in_B = hash_table_key_tile_size_in_B +
hash_table_slot_id_tile_size_in_B +
hash_table_value_tile_size_in_B;
size_t hash_table_chunk_size_in_B = hash_table_tile_size_in_B * chunk_loop;
size_t total_gpu_count = Base::get_resource_manager().get_global_gpu_count();
// CAUSION: can not decide how many values for each GPU, so need to allocate enough memory
// for each GPU allocate GPU memory for hash_table_value_index
std::unique_ptr<size_t[]> tile_counter_per_gpu(
new size_t[local_gpu_count]); // <= hash_table_value_index_per_gpu_size
memset(tile_counter_per_gpu.get(), 0, sizeof(size_t) * local_gpu_count);
std::unique_ptr<size_t[]> tile_counter_in_chunk_per_gpu(new size_t[local_gpu_count]);
memset(tile_counter_in_chunk_per_gpu.get(), 0, sizeof(size_t) * local_gpu_count);
std::unique_ptr<size_t *[]> d_hash_table_value_index_chunk_per_gpu(new size_t *[local_gpu_count]);
CudaDeviceContext context;
for (size_t id = 0; id < local_gpu_count; id++) {
context.set_device(Base::get_local_gpu(id).get_device_id());
CK_CUDA_THROW_(hipMalloc(&d_hash_table_value_index_chunk_per_gpu[id],
hash_table_value_index_chunk_size_in_B));
// initalize to zeros
CK_CUDA_THROW_(hipMemsetAsync(d_hash_table_value_index_chunk_per_gpu[id], 0,
hash_table_value_index_chunk_size_in_B,
Base::get_local_gpu(id).get_stream()));
}
// sync wait
functors_.sync_all_gpus(Base::get_resource_manager());
// CAUSION: can not decide how many values for each GPU, so need to allocate enough memory
// for each GPU allocate CPU/GPU memory for hash_table/key/value chunk
char *hash_table_chunk;
CK_CUDA_THROW_(hipHostMalloc(&hash_table_chunk, hash_table_chunk_size_in_B));
std::unique_ptr<TypeHashKey *[]> h_hash_table_key_chunk_per_gpu(
new TypeHashKey *[local_gpu_count]);
for (size_t id = 0; id < local_gpu_count; id++) {
CK_CUDA_THROW_(
hipHostMalloc(&h_hash_table_key_chunk_per_gpu[id], hash_table_key_chunk_size_in_B));
}
std::unique_ptr<TypeHashKey *[]> d_hash_table_key_chunk_per_gpu(
new TypeHashKey *[local_gpu_count]);
for (size_t id = 0; id < local_gpu_count; id++) {
context.set_device(Base::get_local_gpu(id).get_device_id());
CK_CUDA_THROW_(hipMalloc(&d_hash_table_key_chunk_per_gpu[id], hash_table_key_chunk_size_in_B));
}
std::unique_ptr<size_t *[]> h_hash_table_slot_id_chunk_per_gpu(new size_t *[local_gpu_count]);
for (size_t id = 0; id < local_gpu_count; id++) {
CK_CUDA_THROW_(hipHostMalloc(&h_hash_table_slot_id_chunk_per_gpu[id],
hash_table_slot_id_chunk_size_in_B));
}
std::unique_ptr<size_t *[]> d_hash_table_slot_id_chunk_per_gpu(new size_t *[local_gpu_count]);
for (size_t id = 0; id < local_gpu_count; id++) {
context.set_device(Base::get_local_gpu(id).get_device_id());
CK_CUDA_THROW_(
hipMalloc(&d_hash_table_slot_id_chunk_per_gpu[id], hash_table_slot_id_chunk_size_in_B));
}
std::unique_ptr<float *[]> h_hash_table_value_chunk_per_gpu(new float *[local_gpu_count]);
for (size_t id = 0; id < local_gpu_count; id++) {
CK_CUDA_THROW_(
hipHostMalloc(&h_hash_table_value_chunk_per_gpu[id], hash_table_value_chunk_size_in_B));
}
// do upload
size_t loop_num = file_size_in_B / hash_table_chunk_size_in_B;
MESSAGE_("Start to upload embedding table file to GPUs, file size: " +
std::to_string(file_size_in_B) + " Bytes, total loop_num: " + std::to_string(loop_num));
for (size_t i = 0; i < loop_num; i++) {
// read a chunk of data from file
// one pair in hash table file includes: <key, slot_id, value>
weight_stream.read(hash_table_chunk, hash_table_chunk_size_in_B);
// memcpy from CPU to CPU
char *src_buf = hash_table_chunk;
TypeHashKey *key_dst_buf;
size_t *slot_id_dst_buf;
float *value_dst_buf;
for (size_t k = 0; k < chunk_loop; k++) { // process a tile in each loop
size_t slot_id = *((size_t *)(src_buf + hash_table_key_tile_size_in_B));
size_t gid = slot_id % total_gpu_count; // global GPU ID
size_t id = Base::get_resource_manager().get_gpu_local_id_from_global_id(
gid); // local GPU ID (not gpudevice id)
int dst_rank = Base::get_resource_manager().get_pid_from_gpu_global_id(gid); // node id
if (my_rank == dst_rank) {
// memcpy hash_table_key to corresponding GPU
key_dst_buf = h_hash_table_key_chunk_per_gpu[id] +
tile_counter_in_chunk_per_gpu[id] * hash_table_key_tile_size;
memcpy(key_dst_buf, src_buf, hash_table_key_tile_size_in_B);
src_buf += hash_table_key_tile_size_in_B;
// memcpy hash_table_slot_id to corresponding GPU
slot_id_dst_buf = h_hash_table_slot_id_chunk_per_gpu[id] +
tile_counter_in_chunk_per_gpu[id] * hash_table_slot_id_tile_size;
memcpy(slot_id_dst_buf, src_buf, hash_table_slot_id_tile_size_in_B);
src_buf += hash_table_slot_id_tile_size_in_B;
// memcpy hash_table_value to corresponding GPU
value_dst_buf = h_hash_table_value_chunk_per_gpu[id] +
tile_counter_in_chunk_per_gpu[id] * hash_table_value_tile_size;
memcpy(value_dst_buf, src_buf, hash_table_value_tile_size_in_B);
src_buf += hash_table_value_tile_size_in_B;
tile_counter_in_chunk_per_gpu[id] += tile_size;
} else {
src_buf += hash_table_key_tile_size_in_B;
src_buf += hash_table_slot_id_tile_size_in_B;
src_buf += hash_table_value_tile_size_in_B;
continue;
}
} // end of for(int k = 0; k < (chunk_loop * local_gpu_count); k++)
// do HashTable insert <key,value_index>
for (size_t id = 0; id < local_gpu_count; id++) {
if (tile_counter_in_chunk_per_gpu[id] == 0) {
continue;
}
context.set_device(Base::get_local_gpu(id).get_device_id());
size_t tile_count = tile_counter_in_chunk_per_gpu[id];
// memcpy hash_table_key from CPU to GPU
CK_CUDA_THROW_(hipMemcpyAsync(d_hash_table_key_chunk_per_gpu[id],
h_hash_table_key_chunk_per_gpu[id],
tile_count * sizeof(TypeHashKey), hipMemcpyHostToDevice,
Base::get_local_gpu(id).get_stream()));
size_t value_index_offset = tile_counter_per_gpu[id];
size_t *value_index_buf = d_hash_table_value_index_chunk_per_gpu[id];
if (tile_count > 0) {
// set hash_table_value_index on GPU
functors_.memset_liner(value_index_buf, value_index_offset, 1ul, tile_count,
Base::get_local_gpu(id).get_stream());
}
// do hash table insert <key, value_index> on GPU
hash_tables[id]->insert(d_hash_table_key_chunk_per_gpu[id], value_index_buf, tile_count,
Base::get_local_gpu(id).get_stream());
size_t value_head =
hash_tables[id]->get_and_add_value_head(tile_count, Base::get_local_gpu(id).get_stream());
}
// memcpy hash_table_slot_id and hash_table_value from CPU to GPU
for (size_t id = 0; id < local_gpu_count; id++) {
if (tile_counter_in_chunk_per_gpu[id] == 0) {
continue;
}
context.set_device(Base::get_local_gpu(id).get_device_id());
size_t slot_id_chunk_size = tile_counter_in_chunk_per_gpu[id] * hash_table_slot_id_tile_size;
size_t slot_id_offset = tile_counter_per_gpu[id] * hash_table_slot_id_tile_size;
if ((slot_id_offset + slot_id_chunk_size) > max_vocabulary_size_per_gpu) {
char msg[100]{0};
sprintf(msg, "The size of hash table on GPU%zu is out of range %zu\n", id,
max_vocabulary_size_per_gpu);
CK_THROW_(Error_t::OutOfBound, msg);
}
size_t *src_buf_sid = h_hash_table_slot_id_chunk_per_gpu[id];
size_t *dst_buf_sid = hash_table_slot_id_tensors[id].get_ptr() + slot_id_offset;
CK_CUDA_THROW_(hipMemcpyAsync(dst_buf_sid, src_buf_sid, slot_id_chunk_size * sizeof(size_t),
hipMemcpyHostToDevice, Base::get_local_gpu(id).get_stream()));
size_t value_chunk_size = tile_counter_in_chunk_per_gpu[id] * hash_table_value_tile_size;
size_t value_chunk_offset = tile_counter_per_gpu[id] * hash_table_value_tile_size;
float *src_buf_value = h_hash_table_value_chunk_per_gpu[id];
float *dst_buf_value = hash_table_value_tensors[id].get_ptr() + value_chunk_offset;
CK_CUDA_THROW_(hipMemcpyAsync(dst_buf_value, src_buf_value, value_chunk_size * sizeof(float),
hipMemcpyHostToDevice, Base::get_local_gpu(id).get_stream()));
}
functors_.sync_all_gpus(Base::get_resource_manager());
// set counter value
for (size_t id = 0; id < local_gpu_count; id++) {
tile_counter_per_gpu[id] +=
tile_counter_in_chunk_per_gpu[id]; // accumulate total tile counter
tile_counter_in_chunk_per_gpu[id] = 0; // reset chunk counter to zero
if (tile_counter_per_gpu[id] > max_vocabulary_size_per_gpu) {
char msg[100];
sprintf(msg, "The size of hash table on GPU%zu is out of range %zu\n", id,
max_vocabulary_size_per_gpu);
CK_THROW_(Error_t::OutOfBound, msg);
}
}
/* std::cout << "\rUploading " << std::fixed << std::setprecision(2)
<< (float)(i) / loop_num * 100.0f << "%, loop " << i << " of " << loop_num
<< std::flush; */
} // end of for(int i = 0; i < loop_num; i++)
// std::cout << std::endl;
// process the remaining data(less than a chunk)
size_t remain_size_in_B = file_size_in_B - loop_num * hash_table_chunk_size_in_B;
size_t remain_loop_num = remain_size_in_B / hash_table_tile_size_in_B;
if (remain_loop_num != 0) {
MESSAGE_("Upload the remaining data");
// read all the remaining data
weight_stream.read((char *)hash_table_chunk, remain_size_in_B);
char *src_buf = hash_table_chunk;
TypeHashKey *key_dst_buf;
size_t *value_index_buf;
size_t *slot_id_dst_buf;
float *value_dst_buf;
for (size_t i = 0; i < remain_loop_num; i++) { // process one tile in each loop
size_t slot_id = *((size_t *)(src_buf + hash_table_key_tile_size_in_B));
size_t gid = slot_id % total_gpu_count; // global GPU ID
size_t id = Base::get_resource_manager().get_gpu_local_id_from_global_id(
gid); // local GPU ID (not gpu devie id)
int dst_rank = Base::get_resource_manager().get_pid_from_gpu_global_id(gid); // node id
if (my_rank == dst_rank) {
context.set_device(Base::get_local_gpu(id).get_device_id());
// memcpy hash_table_key from CPU to GPU
key_dst_buf = d_hash_table_key_chunk_per_gpu[id];
CK_CUDA_THROW_(hipMemcpyAsync(key_dst_buf, src_buf, hash_table_key_tile_size_in_B,
hipMemcpyHostToDevice,
Base::get_local_gpu(id).get_stream()));
src_buf += hash_table_key_tile_size_in_B;
// set value_index
size_t value_index_offset = tile_counter_per_gpu[id];
value_index_buf = d_hash_table_value_index_chunk_per_gpu[id];
functors_.memset_liner(value_index_buf, value_index_offset, 1ul, 1ul,
Base::get_local_gpu(id).get_stream());
// do hash table insert <key, value_index> on GPU
hash_tables[id]->insert(d_hash_table_key_chunk_per_gpu[id], value_index_buf,
hash_table_key_tile_size, Base::get_local_gpu(id).get_stream());
size_t value_head = hash_tables[id]->get_and_add_value_head(
hash_table_key_tile_size, Base::get_local_gpu(id).get_stream());
// memcpy hash_table_slot_id to corresponding GPU
size_t slot_id_offset = tile_counter_per_gpu[id];
slot_id_dst_buf = hash_table_slot_id_tensors[id].get_ptr() + slot_id_offset;
CK_CUDA_THROW_(hipMemcpyAsync(slot_id_dst_buf, src_buf, hash_table_slot_id_tile_size_in_B,
hipMemcpyHostToDevice,
Base::get_local_gpu(id).get_stream()));
src_buf += hash_table_slot_id_tile_size_in_B;
// memcpy hash_table_value from CPU to GPU
size_t value_offset = tile_counter_per_gpu[id] * embedding_vec_size;
value_dst_buf = hash_table_value_tensors[id].get_ptr() + value_offset;
CK_CUDA_THROW_(hipMemcpyAsync(value_dst_buf, src_buf, hash_table_value_tile_size_in_B,
hipMemcpyHostToDevice,
Base::get_local_gpu(id).get_stream()));
src_buf += hash_table_value_tile_size_in_B;
// set counter
tile_counter_per_gpu[id] += tile_size;
} else {
src_buf += hash_table_key_tile_size_in_B;
src_buf += hash_table_slot_id_tile_size_in_B;
src_buf += hash_table_value_tile_size_in_B;
continue;
}
}
// sync wait
functors_.sync_all_gpus(Base::get_resource_manager());
} // end of if(remain_loop_num)
MESSAGE_("Done");
// release resources
for (size_t id = 0; id < local_gpu_count; id++) {
context.set_device(Base::get_local_gpu(id).get_device_id());
CK_CUDA_THROW_(hipFree(d_hash_table_value_index_chunk_per_gpu[id]));
CK_CUDA_THROW_(hipFree(d_hash_table_key_chunk_per_gpu[id]));
}
CK_CUDA_THROW_(hipHostFree(hash_table_chunk));
for (size_t id = 0; id < local_gpu_count; id++) {
CK_CUDA_THROW_(hipHostFree(h_hash_table_key_chunk_per_gpu[id]));
CK_CUDA_THROW_(hipHostFree(h_hash_table_value_chunk_per_gpu[id]));
}
}
template <typename TypeHashKey, typename TypeEmbeddingComp>
void LocalizedSlotSparseEmbeddingHash<TypeHashKey, TypeEmbeddingComp>::download_params_to_host(
std::ofstream &weight_stream, size_t vocabulary_size, size_t embedding_vec_size,
const Tensors2<float> &hash_table_value_tensors,
const Tensors2<size_t> &hash_table_slot_id_tensors,
const std::vector<std::shared_ptr<HashTable<TypeHashKey, size_t>>> &hash_tables) const {
size_t local_gpu_count = Base::get_resource_manager().get_local_gpu_count();
int my_rank = 0;
#ifdef ENABLE_MPI
int n_ranks = 1;
CK_MPI_THROW_(MPI_Comm_rank(MPI_COMM_WORLD, &my_rank));
CK_MPI_THROW_(MPI_Comm_size(MPI_COMM_WORLD, &n_ranks));
#endif
// memory allocation
std::unique_ptr<size_t[]> count(new size_t[local_gpu_count]);
size_t max_count = 0;
size_t total_count = 0;
CudaDeviceContext context;
for (size_t id = 0; id < local_gpu_count; id++) {
context.set_device(Base::get_local_gpu(id).get_device_id());
auto count_tmp = hash_tables[id]->get_size(Base::get_local_gpu(id).get_stream());
if (count_tmp != hash_tables[id]->get_value_head(Base::get_local_gpu(id).get_stream())) {
std::cout << "gpu" << id << ", get_size=" << count_tmp << ", get_value_head="
<< hash_tables[id]->get_value_head(Base::get_local_gpu(id).get_stream())
<< std::endl;
CK_THROW_(Error_t::WrongInput,
"Error: hash_table get_value_head() is not equal to get_size()");
}
count[id] = count_tmp;
max_count = max(max_count, count[id]);
total_count += count[id];
}
#ifdef ENABLE_MPI
CK_MPI_THROW_(
MPI_Allreduce(MPI_IN_PLACE, &max_count, sizeof(size_t), MPI_CHAR, MPI_MAX, MPI_COMM_WORLD));
#endif
if (total_count > (size_t)vocabulary_size) {
CK_THROW_(Error_t::WrongInput,
"Error: required download size is larger than hash table vocabulary_size");
}
std::unique_ptr<TypeHashKey *[]> h_hash_table_key(new TypeHashKey *[local_gpu_count]);
std::unique_ptr<TypeHashKey *[]> d_hash_table_key(new TypeHashKey *[local_gpu_count]);
std::unique_ptr<size_t *[]> d_hash_table_value_index(new size_t *[local_gpu_count]);
std::unique_ptr<size_t *[]> h_hash_table_slot_id(new size_t *[local_gpu_count]);
std::unique_ptr<size_t *[]> d_hash_table_slot_id(new size_t *[local_gpu_count]);
std::unique_ptr<float *[]> h_hash_table_value(new float *[local_gpu_count]);
std::unique_ptr<float *[]> d_hash_table_value(new float *[local_gpu_count]);
std::unique_ptr<size_t *[]> d_dump_counter(new size_t *[local_gpu_count]);
for (size_t id = 0; id < local_gpu_count; id++) {
if (count[id] == 0) {
continue;
}
context.set_device(Base::get_local_gpu(id).get_device_id());
hipHostMalloc(&h_hash_table_key[id], count[id] * sizeof(TypeHashKey));
hipMalloc(&d_hash_table_key[id], count[id] * sizeof(TypeHashKey));
hipMalloc(&d_hash_table_value_index[id], count[id] * sizeof(size_t));
hipHostMalloc(&h_hash_table_slot_id[id], count[id] * sizeof(size_t));
hipMalloc(&d_hash_table_slot_id[id], count[id] * sizeof(size_t));
hipHostMalloc(&h_hash_table_value[id], count[id] * embedding_vec_size * sizeof(float));
hipMalloc(&d_hash_table_value[id], count[id] * embedding_vec_size * sizeof(float));
hipMalloc(&d_dump_counter[id], count[id] * sizeof(size_t));
}
// dump hash table on GPU
for (size_t id = 0; id < local_gpu_count; id++) {
if (count[id] == 0) {
continue;
}
MESSAGE_("Rank" + std::to_string(my_rank) + ": Dump hash table from GPU" + std::to_string(id));
context.set_device(Base::get_local_gpu(id).get_device_id());
hash_tables[id]->dump(d_hash_table_key[id], d_hash_table_value_index[id], d_dump_counter[id],
Base::get_local_gpu(id).get_stream());
CK_CUDA_THROW_(hipMemcpyAsync(h_hash_table_key[id], d_hash_table_key[id],
count[id] * sizeof(TypeHashKey), hipMemcpyDeviceToHost,
Base::get_local_gpu(id).get_stream()));
functors_.get_hash_value(count[id], embedding_vec_size, d_hash_table_value_index[id],
hash_table_value_tensors[id].get_ptr(), d_hash_table_value[id],
Base::get_local_gpu(id).get_stream());
CK_CUDA_THROW_(hipMemcpyAsync(h_hash_table_value[id], d_hash_table_value[id],
count[id] * embedding_vec_size * sizeof(float),
hipMemcpyDeviceToHost, Base::get_local_gpu(id).get_stream()));
get_hash_slot_id(count[id], d_hash_table_value_index[id],
hash_table_slot_id_tensors[id].get_ptr(), d_hash_table_slot_id[id],
Base::get_local_gpu(id).get_stream());
CK_CUDA_THROW_(hipMemcpyAsync(h_hash_table_slot_id[id], d_hash_table_slot_id[id],
count[id] * sizeof(size_t), hipMemcpyDeviceToHost,
Base::get_local_gpu(id).get_stream()));
}
// sync wait
functors_.sync_all_gpus(Base::get_resource_manager());
const int master_node = 0;
#ifdef ENABLE_MPI
const int base_tag = 0xed;
#endif
// TODO: could be optimized ???
// one pair in the file includes <key,slot_id,value>
size_t pair_size_in_B = sizeof(TypeHashKey) + sizeof(size_t) + sizeof(float) * embedding_vec_size;
size_t max_size_in_B = max_count * pair_size_in_B;
std::unique_ptr<char[]> file_buf(new char[max_size_in_B]);
size_t key_size = sizeof(TypeHashKey);
size_t slot_id_size = sizeof(size_t);
size_t value_size = sizeof(float) * embedding_vec_size;
for (size_t id = 0; id < local_gpu_count; id++) {
size_t size_in_B = count[id] * pair_size_in_B;
size_t offset = 0;
for (unsigned int k = 0; k < count[id]; k++) {
/* std::cout << "\rRank" << my_rank << ": Seperate keys, slot_ids and values on GPU"
<< id
<< ", finish " << k << " of total count " << count[id] << ", "
<< (float)k / count[id] * 100.0f << "%" << std::flush;
*/
memcpy(file_buf.get() + offset, h_hash_table_key[id] + k, key_size);
offset += key_size;
memcpy(file_buf.get() + offset, h_hash_table_slot_id[id] + k, slot_id_size);
offset += slot_id_size;
memcpy(file_buf.get() + offset, h_hash_table_value[id] + k * embedding_vec_size, value_size);
offset += value_size;
}
// std::cout << std::endl;
if (my_rank == master_node) {
MESSAGE_("Rank" + std::to_string(my_rank) + ": Write hash table <key,value> pairs to file");
weight_stream.write(file_buf.get(), size_in_B);
}
#ifdef ENABLE_MPI
else {
MESSAGE_("Rank" + std::to_string(my_rank) + ": Send hash table <key,value> pairs on GPU" +
std::to_string(id) + " to master node ");
int tag = (id << 8) | base_tag;
CK_MPI_THROW_(
MPI_Send(file_buf.get(), size_in_B, MPI_CHAR, master_node, tag, MPI_COMM_WORLD));
}
#endif
}
#ifdef ENABLE_MPI
if (my_rank == master_node) {
for (int r = 1; r < n_ranks; r++) {
for (size_t id = 0; id < local_gpu_count; id++) {
MESSAGE_("Rank" + std::to_string(my_rank) +
": Recv hash table <key,value> pairs from rank" + std::to_string(r) + " on GPU" +
std::to_string(id) + ", and write to file ");
int tag = (id << 8) | base_tag;
MPI_Status status;
CK_MPI_THROW_(MPI_Probe(r, tag, MPI_COMM_WORLD, &status));
int size_in_B;
CK_MPI_THROW_(MPI_Get_count(&status, MPI_CHAR, &size_in_B));
CK_MPI_THROW_(MPI_Recv(file_buf.get(), size_in_B, MPI_CHAR, r, tag, MPI_COMM_WORLD,
MPI_STATUS_IGNORE));
weight_stream.write(file_buf.get(), size_in_B);
}
}
}
#endif
MESSAGE_("Done");
for (size_t id = 0; id < local_gpu_count; id++) {
if (count[id] == 0) {
continue;
}
context.set_device(Base::get_local_gpu(id).get_device_id());
CK_CUDA_THROW_(hipHostFree(h_hash_table_key[id]));
CK_CUDA_THROW_(hipFree(d_hash_table_key[id]));
CK_CUDA_THROW_(hipFree(d_hash_table_value_index[id]));
CK_CUDA_THROW_(hipHostFree(h_hash_table_slot_id[id]));
CK_CUDA_THROW_(hipFree(d_hash_table_slot_id[id]));
CK_CUDA_THROW_(hipHostFree(h_hash_table_value[id]));
CK_CUDA_THROW_(hipFree(d_hash_table_value[id]));
CK_CUDA_THROW_(hipFree(d_dump_counter[id]));
}
return;
}
template <typename TypeHashKey, typename TypeEmbeddingComp>
void LocalizedSlotSparseEmbeddingHash<TypeHashKey, TypeEmbeddingComp>::init_embedding(
size_t max_vocabulary_size_per_gpu, size_t embedding_vec_size,
Tensors2<float> &hash_table_value_tensors) {
CudaDeviceContext context;
size_t local_gpu_count = Base::get_resource_manager().get_local_gpu_count();
for (size_t id = 0; id < local_gpu_count; id++) {
context.set_device(Base::get_local_gpu(id).get_device_id());
MESSAGE_("gpu" + std::to_string(id) + " start to init embedding");
HugeCTR::UniformGenerator::fill(hash_table_value_tensors[id], -0.05f, 0.05f,
Base::get_local_gpu(id));
}
for (size_t id = 0; id < local_gpu_count; id++) {
CK_CUDA_THROW_(hipStreamSynchronize(Base::get_local_gpu(id).get_stream()));
MESSAGE_("gpu" + std::to_string(id) + " init embedding done");
}
}
template <typename TypeHashKey, typename TypeEmbeddingComp>
void LocalizedSlotSparseEmbeddingHash<TypeHashKey, TypeEmbeddingComp>::init_embedding(
const std::vector<size_t> &slot_sizes, size_t embedding_vec_size,
std::vector<Tensors2<float>> &hash_table_value_tensors,
Tensors2<size_t> &hash_table_slot_id_tensors) {
size_t local_gpu_count = Base::get_resource_manager().get_local_gpu_count();
size_t total_gpu_count = Base::get_resource_manager().get_global_gpu_count();
#ifndef NDEBUG
MESSAGE_("local_gpu_count=" + std::to_string(local_gpu_count) + ", total_gpu_count=" +
std::to_string(total_gpu_count));
#endif
for (size_t id = 0; id < local_gpu_count; id++) {
size_t device_id = Base::get_local_gpu(id).get_device_id();
size_t global_id = Base::get_local_gpu(id).get_global_gpu_id();
#ifndef NDEBUG
MESSAGE_("id=" + std::to_string(id) + ", device_id=" + std::to_string(device_id) +
", global_id=" + std::to_string(global_id));
#endif
functors_.init_embedding_per_gpu(global_id, total_gpu_count, slot_sizes, embedding_vec_size,
hash_table_value_tensors[id], hash_table_slot_id_tensors[id],
Base::get_local_gpu(id));
}
for (size_t id = 0; id < local_gpu_count; id++) {
CK_CUDA_THROW_(hipStreamSynchronize(Base::get_local_gpu(id).get_stream()));
MESSAGE_("gpu" + std::to_string(id) + " init embedding done");
}
return;
}
template class LocalizedSlotSparseEmbeddingHash<unsigned int, float>;
template class LocalizedSlotSparseEmbeddingHash<long long, float>;
template class LocalizedSlotSparseEmbeddingHash<unsigned int, __half>;
template class LocalizedSlotSparseEmbeddingHash<long long, __half>;
} // namespace HugeCTR | localized_slot_sparse_embedding_hash.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "HugeCTR/include/data_simulator.hpp"
#include "HugeCTR/include/embeddings/localized_slot_sparse_embedding_hash.hpp"
#include "HugeCTR/include/utils.hpp"
#include "cub/cub/device/device_radix_sort.cuh"
#include "cub/cub/device/device_scan.cuh"
namespace HugeCTR {
namespace {
// get slot_id from hash_table_slot_id vector by value_index
__global__ void get_hash_slot_id_kernel(size_t count, const size_t *value_index,
const size_t *hash_table_slot_id, size_t *slot_id) {
size_t gid = blockIdx.x * blockDim.x + threadIdx.x;
if (gid < count) {
size_t index = value_index[gid];
slot_id[gid] = hash_table_slot_id[index];
}
}
/**
* get hash table slot_id by value_index
* @param stream cuda stream.
* @param count total count of value which will be get from hash table.
* @param value_index the pointer of value_index.
* @param hash_table_slot_id the pointer of hash table slot id.
* @param slot_id the pointer of the retrieved slot_id.
*/
void get_hash_slot_id(size_t count, const size_t *value_index, const size_t *hash_table_slot_id,
size_t *slot_id, cudaStream_t stream) {
const size_t block_size = 64;
const size_t grid_size = (count + block_size - 1) / block_size;
get_hash_slot_id_kernel<<<grid_size, block_size, 0, stream>>>(count, value_index,
hash_table_slot_id, slot_id);
}
} // namespace
template <typename TypeHashKey, typename TypeEmbeddingComp>
LocalizedSlotSparseEmbeddingHash<TypeHashKey, TypeEmbeddingComp>::LocalizedSlotSparseEmbeddingHash(
const Tensors2<TypeHashKey> &train_row_offsets_tensors,
const Tensors2<TypeHashKey> &train_value_tensors,
const std::vector<std::shared_ptr<size_t>> &train_nnz_array,
const Tensors2<TypeHashKey> &evaluate_row_offsets_tensors,
const Tensors2<TypeHashKey> &evaluate_value_tensors,
const std::vector<std::shared_ptr<size_t>> &evaluate_nnz_array,
const SparseEmbeddingHashParams<TypeEmbeddingComp> &embedding_params,
const std::string plan_file, const std::shared_ptr<ResourceManager> &resource_manager)
: Base(train_row_offsets_tensors, train_value_tensors, train_nnz_array,
evaluate_row_offsets_tensors, evaluate_value_tensors, evaluate_nnz_array,
embedding_params, resource_manager),
slot_size_array_(embedding_params.slot_size_array)
#ifndef NCCL_A2A
,
plan_file_(plan_file)
#endif
{
try {
CudaDeviceContext context;
if (slot_size_array_.empty()) {
max_vocabulary_size_per_gpu_ = Base::get_max_vocabulary_size_per_gpu();
max_vocabulary_size_ = Base::get_max_vocabulary_size_per_gpu() *
Base::get_resource_manager().get_global_gpu_count();
} else {
max_vocabulary_size_per_gpu_ =
cal_max_voc_size_per_gpu(slot_size_array_, Base::get_resource_manager());
max_vocabulary_size_ = 0;
for (size_t slot_size : slot_size_array_) {
max_vocabulary_size_ += slot_size;
}
}
MESSAGE_("max_vocabulary_size_per_gpu_=" + std::to_string(max_vocabulary_size_per_gpu_));
for (size_t id = 0; id < Base::get_resource_manager().get_local_gpu_count(); id++) {
int cur_device = Base::get_local_gpu(id).get_device_id();
context.set_device(cur_device);
size_t gid = Base::get_local_gpu(id).get_global_gpu_id();
size_t slot_num_per_gpu =
Base::get_slot_num() / Base::get_resource_manager().get_global_gpu_count() +
((gid < Base::get_slot_num() % Base::get_resource_manager().get_global_gpu_count()) ? 1
: 0);
slot_num_per_gpu_.push_back(slot_num_per_gpu);
// construct HashTable object: used to store hash table <key, value_index>
hash_tables_.emplace_back(new NvHashTable(max_vocabulary_size_per_gpu_));
// new GeneralBuffer objects
const std::shared_ptr<GeneralBuffer2<CudaAllocator>> &buf = Base::get_buffer(id);
// new hash table value vectors
if (slot_size_array_.empty()) {
Tensor2<float> tensor;
buf->reserve({max_vocabulary_size_per_gpu_, Base::get_embedding_vec_size()}, &tensor);
hash_table_value_tensors_.push_back(tensor);
} else {
const std::shared_ptr<BufferBlock2<float>> &block = buf->create_block<float>();
Tensors2<float> tensors;
for (size_t i = 0; i < slot_size_array_.size(); i++) {
if ((i % Base::get_resource_manager().get_global_gpu_count()) == gid) {
Tensor2<float> tensor;
block->reserve({slot_size_array_[i], Base::get_embedding_vec_size()}, &tensor);
tensors.push_back(tensor);
}
}
value_table_tensors_.push_back(tensors);
hash_table_value_tensors_.push_back(block->as_tensor());
}
// new hash table value_index that get() from HashTable
{
Tensor2<size_t> tensor;
buf->reserve({1, Base::get_universal_batch_size() * Base::get_max_feature_num()}, &tensor);
hash_value_index_tensors_.push_back(tensor);
}
// new embedding features reduced by hash table values(results of forward)
{
Tensor2<TypeEmbeddingComp> tensor;
buf->reserve(
{Base::get_universal_batch_size() * slot_num_per_gpu, Base::get_embedding_vec_size()},
&tensor);
embedding_feature_tensors_.push_back(tensor);
}
// new wgrad used by backward
{
Tensor2<TypeEmbeddingComp> tensor;
buf->reserve(
{Base::get_batch_size(true) * slot_num_per_gpu, Base::get_embedding_vec_size()},
&tensor);
wgrad_tensors_.push_back(tensor);
}
// new optimizer params used by update_params
switch (Base::get_optimizer()) {
case Optimizer_t::Adam: // adam
{
Tensor2<TypeEmbeddingComp> tensor;
buf->reserve({max_vocabulary_size_per_gpu_, Base::get_embedding_vec_size()}, &tensor);
opt_m_tensors_.push_back(tensor);
buf->reserve({max_vocabulary_size_per_gpu_, Base::get_embedding_vec_size()}, &tensor);
opt_v_tensors_.push_back(tensor);
break;
}
case Optimizer_t::MomentumSGD: // momentum_sgd
{
Tensor2<TypeEmbeddingComp> tensor;
buf->reserve({max_vocabulary_size_per_gpu_, Base::get_embedding_vec_size()}, &tensor);
opt_momentum_tensors_.push_back(tensor);
break;
}
case Optimizer_t::Nesterov: // nesterov
{
Tensor2<TypeEmbeddingComp> tensor;
buf->reserve({max_vocabulary_size_per_gpu_, Base::get_embedding_vec_size()}, &tensor);
opt_accm_tensors_.push_back(tensor);
break;
}
case Optimizer_t::SGD:
break;
default:
throw std::runtime_error(
std::string("[HCDEBUG][ERROR] Runtime error: Invalid optimizer type\n"));
}
{
Tensor2<TypeHashKey> tensor;
buf->reserve({1, Base::get_batch_size(true) * Base::get_max_feature_num()}, &tensor);
sample_id_tensors_.push_back(tensor);
}
{
Tensor2<TypeHashKey> tensor;
buf->reserve({1, Base::get_batch_size(true) * Base::get_max_feature_num()}, &tensor);
sample_id_sort_tensors_.push_back(tensor);
}
{
Tensor2<size_t> tensor;
buf->reserve({1, Base::get_batch_size(true) * Base::get_max_feature_num()}, &tensor);
hash_value_index_sort_tensors_.push_back(tensor);
}
{
Tensor2<uint32_t> tensor;
buf->reserve({1, Base::get_batch_size(true) * Base::get_max_feature_num() + 1}, &tensor);
hash_value_index_count_offset_tensors_.push_back(tensor);
}
{
Tensor2<uint32_t> tensor;
buf->reserve({1, Base::get_batch_size(true) * Base::get_max_feature_num()}, &tensor);
new_hash_value_flag_tensors_.push_back(tensor);
}
{
Tensor2<uint32_t> tensor;
buf->reserve({1, Base::get_batch_size(true) * Base::get_max_feature_num()}, &tensor);
hash_value_flag_sumed_tensors_.push_back(tensor);
}
{
Tensor2<uint32_t> tensor;
buf->reserve({1, 1}, &tensor);
hash_value_index_count_counter_tensors_.push_back(tensor);
}
{
Tensor2<size_t> tensor;
buf->reserve({1, Base::get_batch_size(true) * Base::get_max_feature_num()}, &tensor);
deltaw_hash_value_index_tensors_.push_back(tensor);
}
{
Tensor2<float> tensor;
buf->reserve({Base::get_batch_size(true) * Base::get_max_feature_num(),
Base::get_embedding_vec_size()},
&tensor);
deltaw_tensors_.push_back(tensor);
}
{
// cal the temp storage bytes for CUB radix sort
size_t size = 0;
cub::DeviceRadixSort::SortPairs((void *)nullptr, size, (size_t *)nullptr, (size_t *)nullptr,
(TypeHashKey *)nullptr, (TypeHashKey *)nullptr,
Base::get_batch_size(true) * Base::get_max_feature_num());
// new temp storage tensors for CUB radix sort
Tensor2<void> tensor;
buf->reserve({size}, &tensor);
temp_storage_sort_tensors_.push_back(tensor);
}
{
size_t size = 0;
cub::DeviceScan::InclusiveSum((void *)nullptr, size, (uint32_t *)nullptr,
(uint32_t *)nullptr,
Base::get_batch_size(true) * Base::get_max_feature_num());
Tensor2<void> tensor;
buf->reserve({size}, &tensor);
temp_storage_scan_tensors_.push_back(tensor);
}
// the tenosrs for storing slot ids
// TODO: init to -1 ?
{
Tensor2<size_t> tensor;
buf->reserve({max_vocabulary_size_per_gpu_, 1}, &tensor);
hash_table_slot_id_tensors_.push_back(tensor);
}
// temp tensors for all2all
{
Tensor2<TypeEmbeddingComp> tensor;
buf->reserve({Base::get_universal_batch_size_per_gpu() * Base::get_slot_num(),
Base::get_embedding_vec_size()},
&tensor);
all2all_tensors_.push_back(tensor);
}
{
Tensor2<TypeEmbeddingComp> tensor;
buf->reserve({Base::get_universal_batch_size() * Base::get_slot_num(),
Base::get_embedding_vec_size()},
&tensor);
utest_forward_temp_tensors_.push_back(tensor);
}
{
Tensor2<TypeEmbeddingComp> tensor;
buf->reserve({Base::get_batch_size_per_gpu(true) * Base::get_slot_num(),
Base::get_embedding_vec_size()},
&tensor);
utest_all2all_tensors_.push_back(tensor);
}
{
Tensor2<TypeEmbeddingComp> tensor;
buf->reserve({Base::get_batch_size_per_gpu(true) * Base::get_slot_num(),
Base::get_embedding_vec_size()},
&tensor);
utest_reorder_tensors_.push_back(tensor);
}
{
Tensor2<TypeEmbeddingComp> tensor;
buf->reserve(
{Base::get_batch_size(true) * Base::get_slot_num(), Base::get_embedding_vec_size()},
&tensor);
utest_backward_temp_tensors_.push_back(tensor);
}
// init GenenralBuffers to do real allocation
#ifndef NDEBUG
std::cout << " max_feature_num_:" << Base::get_max_feature_num() << std::endl;
#endif
buf->allocate();
const OptParams<TypeEmbeddingComp> &source_opt_param = Base::get_opt_params();
OptParams<TypeEmbeddingComp> &target_opt_param = Base::get_opt_params(id);
switch (Base::get_optimizer()) {
case Optimizer_t::Adam: // adam
CK_CUDA_THROW_(cudaMemsetAsync(opt_m_tensors_[id].get_ptr(), 0,
opt_m_tensors_[id].get_size_in_bytes(),
Base::get_local_gpu(id).get_stream()));
CK_CUDA_THROW_(cudaMemsetAsync(opt_v_tensors_[id].get_ptr(), 0,
opt_v_tensors_[id].get_size_in_bytes(),
Base::get_local_gpu(id).get_stream()));
target_opt_param.hyperparams.adam.times = 0;
target_opt_param.hyperparams.adam.beta1 = source_opt_param.hyperparams.adam.beta1;
target_opt_param.hyperparams.adam.beta2 = source_opt_param.hyperparams.adam.beta2;
target_opt_param.hyperparams.adam.epsilon = source_opt_param.hyperparams.adam.epsilon;
target_opt_param.hyperparams.adam.m_ptr = opt_m_tensors_[id].get_ptr();
target_opt_param.hyperparams.adam.v_ptr = opt_v_tensors_[id].get_ptr();
break;
case Optimizer_t::MomentumSGD: // momentum_sgd
CK_CUDA_THROW_(cudaMemsetAsync(opt_momentum_tensors_[id].get_ptr(), 0,
opt_momentum_tensors_[id].get_size_in_bytes(),
Base::get_local_gpu(id).get_stream()));
target_opt_param.hyperparams.momentum.factor =
source_opt_param.hyperparams.momentum.factor;
target_opt_param.hyperparams.momentum.momentum_ptr = opt_momentum_tensors_[id].get_ptr();
break;
case Optimizer_t::Nesterov: // nesterov
CK_CUDA_THROW_(cudaMemsetAsync(opt_accm_tensors_[id].get_ptr(), 0,
opt_accm_tensors_[id].get_size_in_bytes(),
Base::get_local_gpu(id).get_stream()));
target_opt_param.hyperparams.nesterov.mu = source_opt_param.hyperparams.nesterov.mu;
target_opt_param.hyperparams.nesterov.accm_ptr = opt_accm_tensors_[id].get_ptr();
break;
case Optimizer_t::SGD:
break;
default:
throw std::runtime_error(
std::string("[HCDEBUG][ERROR] Runtime error: Invalid optimizer type\n"));
}
} // end of for(int id = 0; id < Base::get_local_gpu_count(); id++)
// sync
functors_.sync_all_gpus(Base::get_resource_manager());
#ifndef NCCL_A2A
// all2all init
#ifndef ENABLE_MPI // without MPI
functors_.all2all_init_forward(all2all_forward_, plan_file_, Base::get_batch_size_per_gpu(true),
slot_num_per_gpu_, Base::get_embedding_vec_size(),
embedding_feature_tensors_, all2all_tensors_,
Base::get_resource_manager());
functors_.all2all_init_backward(all2all_backward_, plan_file_,
Base::get_batch_size_per_gpu(true), slot_num_per_gpu_,
Base::get_embedding_vec_size(), all2all_tensors_,
embedding_feature_tensors_, Base::get_resource_manager());
functors_.all2all_init_forward(all2all_utest_, plan_file_, Base::get_batch_size_per_gpu(true),
slot_num_per_gpu_, Base::get_embedding_vec_size(),
wgrad_tensors_, utest_all2all_tensors_,
Base::get_resource_manager());
#else
functors_.all2all_init_forward(all2all_forward_, plan_file_, Base::get_batch_size_per_gpu(true),
Base::get_slot_num(), Base::get_embedding_vec_size(),
embedding_feature_tensors_, all2all_tensors_,
Base::get_resource_manager());
functors_.all2all_init_backward(all2all_backward_, plan_file_,
Base::get_batch_size_per_gpu(true), Base::get_slot_num(),
Base::get_embedding_vec_size(), all2all_tensors_,
embedding_feature_tensors_, Base::get_resource_manager());
functors_.all2all_init_forward(all2all_utest_, plan_file_, Base::get_batch_size_per_gpu(true),
Base::get_slot_num(), Base::get_embedding_vec_size(),
wgrad_tensors_, utest_all2all_tensors_,
Base::get_resource_manager());
#endif
#endif
// warm up for nccl all2all
#ifdef NCCL_A2A
MESSAGE_("All2All Warmup Start");
#ifndef ENABLE_MPI
if (Base::get_resource_manager().get_global_gpu_count() > 1) {
functors_.all2all_forward(Base::get_batch_size_per_gpu(true), slot_num_per_gpu_,
Base::get_embedding_vec_size(), embedding_feature_tensors_,
all2all_tensors_, Base::get_resource_manager());
}
#else
if (Base::get_resource_manager().get_global_gpu_count() > 1) {
functors_.all2all_forward(Base::get_batch_size_per_gpu(true), Base::get_slot_num(),
Base::get_embedding_vec_size(), embedding_feature_tensors_,
all2all_tensors_, Base::get_resource_manager());
}
#endif
MESSAGE_("All2All Warmup End");
#endif
} catch (const std::runtime_error &rt_err) {
std::cerr << rt_err.what() << std::endl;
throw;
}
return;
}
template <typename TypeHashKey, typename TypeEmbeddingComp>
void LocalizedSlotSparseEmbeddingHash<TypeHashKey, TypeEmbeddingComp>::upload_params_to_device(
std::ifstream &weight_stream, size_t vocabulary_size, size_t embedding_vec_size,
size_t max_vocabulary_size_per_gpu, Tensors2<float> &hash_table_value_tensors,
Tensors2<size_t> &hash_table_slot_id_tensors,
std::vector<std::shared_ptr<HashTable<TypeHashKey, size_t>>> &hash_tables) {
// check file size and vocabulary_size (file size <= hash_table_size)
weight_stream.seekg(0, weight_stream.end);
size_t file_size_in_B = weight_stream.tellg();
weight_stream.seekg(0, weight_stream.beg);
int my_rank = 0;
#ifdef ENABLE_MPI
int n_ranks = 1;
CK_MPI_THROW_(MPI_Comm_rank(MPI_COMM_WORLD, &my_rank));
CK_MPI_THROW_(MPI_Comm_size(MPI_COMM_WORLD, &n_ranks));
#endif
// define size
size_t local_gpu_count = Base::get_resource_manager().get_local_gpu_count();
size_t chunk_loop = 1000;
size_t tile_size = 1; // must be 1, because we need to cal (key&local_gpu_count) to decide
// gpu_id for each <key,value>
size_t hash_table_key_tile_size = tile_size;
size_t hash_table_key_tile_size_in_B = hash_table_key_tile_size * sizeof(TypeHashKey);
size_t hash_table_key_chunk_size = hash_table_key_tile_size * chunk_loop;
size_t hash_table_key_chunk_size_in_B = hash_table_key_chunk_size * sizeof(TypeHashKey);
size_t hash_table_value_index_chunk_size_in_B = hash_table_key_chunk_size * sizeof(size_t);
size_t hash_table_value_tile_size = tile_size * embedding_vec_size;
size_t hash_table_value_tile_size_in_B = hash_table_value_tile_size * sizeof(float);
size_t hash_table_value_chunk_size = hash_table_value_tile_size * chunk_loop;
size_t hash_table_value_chunk_size_in_B = hash_table_value_chunk_size * sizeof(float);
size_t hash_table_slot_id_tile_size = tile_size;
size_t hash_table_slot_id_tile_size_in_B = hash_table_slot_id_tile_size * sizeof(size_t);
size_t hash_table_slot_id_chunk_size = hash_table_slot_id_tile_size * chunk_loop;
size_t hash_table_slot_id_chunk_size_in_B = hash_table_slot_id_chunk_size * sizeof(size_t);
size_t hash_table_tile_size_in_B = hash_table_key_tile_size_in_B +
hash_table_slot_id_tile_size_in_B +
hash_table_value_tile_size_in_B;
size_t hash_table_chunk_size_in_B = hash_table_tile_size_in_B * chunk_loop;
size_t total_gpu_count = Base::get_resource_manager().get_global_gpu_count();
// CAUSION: can not decide how many values for each GPU, so need to allocate enough memory
// for each GPU allocate GPU memory for hash_table_value_index
std::unique_ptr<size_t[]> tile_counter_per_gpu(
new size_t[local_gpu_count]); // <= hash_table_value_index_per_gpu_size
memset(tile_counter_per_gpu.get(), 0, sizeof(size_t) * local_gpu_count);
std::unique_ptr<size_t[]> tile_counter_in_chunk_per_gpu(new size_t[local_gpu_count]);
memset(tile_counter_in_chunk_per_gpu.get(), 0, sizeof(size_t) * local_gpu_count);
std::unique_ptr<size_t *[]> d_hash_table_value_index_chunk_per_gpu(new size_t *[local_gpu_count]);
CudaDeviceContext context;
for (size_t id = 0; id < local_gpu_count; id++) {
context.set_device(Base::get_local_gpu(id).get_device_id());
CK_CUDA_THROW_(cudaMalloc(&d_hash_table_value_index_chunk_per_gpu[id],
hash_table_value_index_chunk_size_in_B));
// initalize to zeros
CK_CUDA_THROW_(cudaMemsetAsync(d_hash_table_value_index_chunk_per_gpu[id], 0,
hash_table_value_index_chunk_size_in_B,
Base::get_local_gpu(id).get_stream()));
}
// sync wait
functors_.sync_all_gpus(Base::get_resource_manager());
// CAUSION: can not decide how many values for each GPU, so need to allocate enough memory
// for each GPU allocate CPU/GPU memory for hash_table/key/value chunk
char *hash_table_chunk;
CK_CUDA_THROW_(cudaMallocHost(&hash_table_chunk, hash_table_chunk_size_in_B));
std::unique_ptr<TypeHashKey *[]> h_hash_table_key_chunk_per_gpu(
new TypeHashKey *[local_gpu_count]);
for (size_t id = 0; id < local_gpu_count; id++) {
CK_CUDA_THROW_(
cudaMallocHost(&h_hash_table_key_chunk_per_gpu[id], hash_table_key_chunk_size_in_B));
}
std::unique_ptr<TypeHashKey *[]> d_hash_table_key_chunk_per_gpu(
new TypeHashKey *[local_gpu_count]);
for (size_t id = 0; id < local_gpu_count; id++) {
context.set_device(Base::get_local_gpu(id).get_device_id());
CK_CUDA_THROW_(cudaMalloc(&d_hash_table_key_chunk_per_gpu[id], hash_table_key_chunk_size_in_B));
}
std::unique_ptr<size_t *[]> h_hash_table_slot_id_chunk_per_gpu(new size_t *[local_gpu_count]);
for (size_t id = 0; id < local_gpu_count; id++) {
CK_CUDA_THROW_(cudaMallocHost(&h_hash_table_slot_id_chunk_per_gpu[id],
hash_table_slot_id_chunk_size_in_B));
}
std::unique_ptr<size_t *[]> d_hash_table_slot_id_chunk_per_gpu(new size_t *[local_gpu_count]);
for (size_t id = 0; id < local_gpu_count; id++) {
context.set_device(Base::get_local_gpu(id).get_device_id());
CK_CUDA_THROW_(
cudaMalloc(&d_hash_table_slot_id_chunk_per_gpu[id], hash_table_slot_id_chunk_size_in_B));
}
std::unique_ptr<float *[]> h_hash_table_value_chunk_per_gpu(new float *[local_gpu_count]);
for (size_t id = 0; id < local_gpu_count; id++) {
CK_CUDA_THROW_(
cudaMallocHost(&h_hash_table_value_chunk_per_gpu[id], hash_table_value_chunk_size_in_B));
}
// do upload
size_t loop_num = file_size_in_B / hash_table_chunk_size_in_B;
MESSAGE_("Start to upload embedding table file to GPUs, file size: " +
std::to_string(file_size_in_B) + " Bytes, total loop_num: " + std::to_string(loop_num));
for (size_t i = 0; i < loop_num; i++) {
// read a chunk of data from file
// one pair in hash table file includes: <key, slot_id, value>
weight_stream.read(hash_table_chunk, hash_table_chunk_size_in_B);
// memcpy from CPU to CPU
char *src_buf = hash_table_chunk;
TypeHashKey *key_dst_buf;
size_t *slot_id_dst_buf;
float *value_dst_buf;
for (size_t k = 0; k < chunk_loop; k++) { // process a tile in each loop
size_t slot_id = *((size_t *)(src_buf + hash_table_key_tile_size_in_B));
size_t gid = slot_id % total_gpu_count; // global GPU ID
size_t id = Base::get_resource_manager().get_gpu_local_id_from_global_id(
gid); // local GPU ID (not gpudevice id)
int dst_rank = Base::get_resource_manager().get_pid_from_gpu_global_id(gid); // node id
if (my_rank == dst_rank) {
// memcpy hash_table_key to corresponding GPU
key_dst_buf = h_hash_table_key_chunk_per_gpu[id] +
tile_counter_in_chunk_per_gpu[id] * hash_table_key_tile_size;
memcpy(key_dst_buf, src_buf, hash_table_key_tile_size_in_B);
src_buf += hash_table_key_tile_size_in_B;
// memcpy hash_table_slot_id to corresponding GPU
slot_id_dst_buf = h_hash_table_slot_id_chunk_per_gpu[id] +
tile_counter_in_chunk_per_gpu[id] * hash_table_slot_id_tile_size;
memcpy(slot_id_dst_buf, src_buf, hash_table_slot_id_tile_size_in_B);
src_buf += hash_table_slot_id_tile_size_in_B;
// memcpy hash_table_value to corresponding GPU
value_dst_buf = h_hash_table_value_chunk_per_gpu[id] +
tile_counter_in_chunk_per_gpu[id] * hash_table_value_tile_size;
memcpy(value_dst_buf, src_buf, hash_table_value_tile_size_in_B);
src_buf += hash_table_value_tile_size_in_B;
tile_counter_in_chunk_per_gpu[id] += tile_size;
} else {
src_buf += hash_table_key_tile_size_in_B;
src_buf += hash_table_slot_id_tile_size_in_B;
src_buf += hash_table_value_tile_size_in_B;
continue;
}
} // end of for(int k = 0; k < (chunk_loop * local_gpu_count); k++)
// do HashTable insert <key,value_index>
for (size_t id = 0; id < local_gpu_count; id++) {
if (tile_counter_in_chunk_per_gpu[id] == 0) {
continue;
}
context.set_device(Base::get_local_gpu(id).get_device_id());
size_t tile_count = tile_counter_in_chunk_per_gpu[id];
// memcpy hash_table_key from CPU to GPU
CK_CUDA_THROW_(cudaMemcpyAsync(d_hash_table_key_chunk_per_gpu[id],
h_hash_table_key_chunk_per_gpu[id],
tile_count * sizeof(TypeHashKey), cudaMemcpyHostToDevice,
Base::get_local_gpu(id).get_stream()));
size_t value_index_offset = tile_counter_per_gpu[id];
size_t *value_index_buf = d_hash_table_value_index_chunk_per_gpu[id];
if (tile_count > 0) {
// set hash_table_value_index on GPU
functors_.memset_liner(value_index_buf, value_index_offset, 1ul, tile_count,
Base::get_local_gpu(id).get_stream());
}
// do hash table insert <key, value_index> on GPU
hash_tables[id]->insert(d_hash_table_key_chunk_per_gpu[id], value_index_buf, tile_count,
Base::get_local_gpu(id).get_stream());
size_t value_head =
hash_tables[id]->get_and_add_value_head(tile_count, Base::get_local_gpu(id).get_stream());
}
// memcpy hash_table_slot_id and hash_table_value from CPU to GPU
for (size_t id = 0; id < local_gpu_count; id++) {
if (tile_counter_in_chunk_per_gpu[id] == 0) {
continue;
}
context.set_device(Base::get_local_gpu(id).get_device_id());
size_t slot_id_chunk_size = tile_counter_in_chunk_per_gpu[id] * hash_table_slot_id_tile_size;
size_t slot_id_offset = tile_counter_per_gpu[id] * hash_table_slot_id_tile_size;
if ((slot_id_offset + slot_id_chunk_size) > max_vocabulary_size_per_gpu) {
char msg[100]{0};
sprintf(msg, "The size of hash table on GPU%zu is out of range %zu\n", id,
max_vocabulary_size_per_gpu);
CK_THROW_(Error_t::OutOfBound, msg);
}
size_t *src_buf_sid = h_hash_table_slot_id_chunk_per_gpu[id];
size_t *dst_buf_sid = hash_table_slot_id_tensors[id].get_ptr() + slot_id_offset;
CK_CUDA_THROW_(cudaMemcpyAsync(dst_buf_sid, src_buf_sid, slot_id_chunk_size * sizeof(size_t),
cudaMemcpyHostToDevice, Base::get_local_gpu(id).get_stream()));
size_t value_chunk_size = tile_counter_in_chunk_per_gpu[id] * hash_table_value_tile_size;
size_t value_chunk_offset = tile_counter_per_gpu[id] * hash_table_value_tile_size;
float *src_buf_value = h_hash_table_value_chunk_per_gpu[id];
float *dst_buf_value = hash_table_value_tensors[id].get_ptr() + value_chunk_offset;
CK_CUDA_THROW_(cudaMemcpyAsync(dst_buf_value, src_buf_value, value_chunk_size * sizeof(float),
cudaMemcpyHostToDevice, Base::get_local_gpu(id).get_stream()));
}
functors_.sync_all_gpus(Base::get_resource_manager());
// set counter value
for (size_t id = 0; id < local_gpu_count; id++) {
tile_counter_per_gpu[id] +=
tile_counter_in_chunk_per_gpu[id]; // accumulate total tile counter
tile_counter_in_chunk_per_gpu[id] = 0; // reset chunk counter to zero
if (tile_counter_per_gpu[id] > max_vocabulary_size_per_gpu) {
char msg[100];
sprintf(msg, "The size of hash table on GPU%zu is out of range %zu\n", id,
max_vocabulary_size_per_gpu);
CK_THROW_(Error_t::OutOfBound, msg);
}
}
/* std::cout << "\rUploading " << std::fixed << std::setprecision(2)
<< (float)(i) / loop_num * 100.0f << "%, loop " << i << " of " << loop_num
<< std::flush; */
} // end of for(int i = 0; i < loop_num; i++)
// std::cout << std::endl;
// process the remaining data(less than a chunk)
size_t remain_size_in_B = file_size_in_B - loop_num * hash_table_chunk_size_in_B;
size_t remain_loop_num = remain_size_in_B / hash_table_tile_size_in_B;
if (remain_loop_num != 0) {
MESSAGE_("Upload the remaining data");
// read all the remaining data
weight_stream.read((char *)hash_table_chunk, remain_size_in_B);
char *src_buf = hash_table_chunk;
TypeHashKey *key_dst_buf;
size_t *value_index_buf;
size_t *slot_id_dst_buf;
float *value_dst_buf;
for (size_t i = 0; i < remain_loop_num; i++) { // process one tile in each loop
size_t slot_id = *((size_t *)(src_buf + hash_table_key_tile_size_in_B));
size_t gid = slot_id % total_gpu_count; // global GPU ID
size_t id = Base::get_resource_manager().get_gpu_local_id_from_global_id(
gid); // local GPU ID (not gpu devie id)
int dst_rank = Base::get_resource_manager().get_pid_from_gpu_global_id(gid); // node id
if (my_rank == dst_rank) {
context.set_device(Base::get_local_gpu(id).get_device_id());
// memcpy hash_table_key from CPU to GPU
key_dst_buf = d_hash_table_key_chunk_per_gpu[id];
CK_CUDA_THROW_(cudaMemcpyAsync(key_dst_buf, src_buf, hash_table_key_tile_size_in_B,
cudaMemcpyHostToDevice,
Base::get_local_gpu(id).get_stream()));
src_buf += hash_table_key_tile_size_in_B;
// set value_index
size_t value_index_offset = tile_counter_per_gpu[id];
value_index_buf = d_hash_table_value_index_chunk_per_gpu[id];
functors_.memset_liner(value_index_buf, value_index_offset, 1ul, 1ul,
Base::get_local_gpu(id).get_stream());
// do hash table insert <key, value_index> on GPU
hash_tables[id]->insert(d_hash_table_key_chunk_per_gpu[id], value_index_buf,
hash_table_key_tile_size, Base::get_local_gpu(id).get_stream());
size_t value_head = hash_tables[id]->get_and_add_value_head(
hash_table_key_tile_size, Base::get_local_gpu(id).get_stream());
// memcpy hash_table_slot_id to corresponding GPU
size_t slot_id_offset = tile_counter_per_gpu[id];
slot_id_dst_buf = hash_table_slot_id_tensors[id].get_ptr() + slot_id_offset;
CK_CUDA_THROW_(cudaMemcpyAsync(slot_id_dst_buf, src_buf, hash_table_slot_id_tile_size_in_B,
cudaMemcpyHostToDevice,
Base::get_local_gpu(id).get_stream()));
src_buf += hash_table_slot_id_tile_size_in_B;
// memcpy hash_table_value from CPU to GPU
size_t value_offset = tile_counter_per_gpu[id] * embedding_vec_size;
value_dst_buf = hash_table_value_tensors[id].get_ptr() + value_offset;
CK_CUDA_THROW_(cudaMemcpyAsync(value_dst_buf, src_buf, hash_table_value_tile_size_in_B,
cudaMemcpyHostToDevice,
Base::get_local_gpu(id).get_stream()));
src_buf += hash_table_value_tile_size_in_B;
// set counter
tile_counter_per_gpu[id] += tile_size;
} else {
src_buf += hash_table_key_tile_size_in_B;
src_buf += hash_table_slot_id_tile_size_in_B;
src_buf += hash_table_value_tile_size_in_B;
continue;
}
}
// sync wait
functors_.sync_all_gpus(Base::get_resource_manager());
} // end of if(remain_loop_num)
MESSAGE_("Done");
// release resources
for (size_t id = 0; id < local_gpu_count; id++) {
context.set_device(Base::get_local_gpu(id).get_device_id());
CK_CUDA_THROW_(cudaFree(d_hash_table_value_index_chunk_per_gpu[id]));
CK_CUDA_THROW_(cudaFree(d_hash_table_key_chunk_per_gpu[id]));
}
CK_CUDA_THROW_(cudaFreeHost(hash_table_chunk));
for (size_t id = 0; id < local_gpu_count; id++) {
CK_CUDA_THROW_(cudaFreeHost(h_hash_table_key_chunk_per_gpu[id]));
CK_CUDA_THROW_(cudaFreeHost(h_hash_table_value_chunk_per_gpu[id]));
}
}
template <typename TypeHashKey, typename TypeEmbeddingComp>
void LocalizedSlotSparseEmbeddingHash<TypeHashKey, TypeEmbeddingComp>::download_params_to_host(
std::ofstream &weight_stream, size_t vocabulary_size, size_t embedding_vec_size,
const Tensors2<float> &hash_table_value_tensors,
const Tensors2<size_t> &hash_table_slot_id_tensors,
const std::vector<std::shared_ptr<HashTable<TypeHashKey, size_t>>> &hash_tables) const {
size_t local_gpu_count = Base::get_resource_manager().get_local_gpu_count();
int my_rank = 0;
#ifdef ENABLE_MPI
int n_ranks = 1;
CK_MPI_THROW_(MPI_Comm_rank(MPI_COMM_WORLD, &my_rank));
CK_MPI_THROW_(MPI_Comm_size(MPI_COMM_WORLD, &n_ranks));
#endif
// memory allocation
std::unique_ptr<size_t[]> count(new size_t[local_gpu_count]);
size_t max_count = 0;
size_t total_count = 0;
CudaDeviceContext context;
for (size_t id = 0; id < local_gpu_count; id++) {
context.set_device(Base::get_local_gpu(id).get_device_id());
auto count_tmp = hash_tables[id]->get_size(Base::get_local_gpu(id).get_stream());
if (count_tmp != hash_tables[id]->get_value_head(Base::get_local_gpu(id).get_stream())) {
std::cout << "gpu" << id << ", get_size=" << count_tmp << ", get_value_head="
<< hash_tables[id]->get_value_head(Base::get_local_gpu(id).get_stream())
<< std::endl;
CK_THROW_(Error_t::WrongInput,
"Error: hash_table get_value_head() is not equal to get_size()");
}
count[id] = count_tmp;
max_count = max(max_count, count[id]);
total_count += count[id];
}
#ifdef ENABLE_MPI
CK_MPI_THROW_(
MPI_Allreduce(MPI_IN_PLACE, &max_count, sizeof(size_t), MPI_CHAR, MPI_MAX, MPI_COMM_WORLD));
#endif
if (total_count > (size_t)vocabulary_size) {
CK_THROW_(Error_t::WrongInput,
"Error: required download size is larger than hash table vocabulary_size");
}
std::unique_ptr<TypeHashKey *[]> h_hash_table_key(new TypeHashKey *[local_gpu_count]);
std::unique_ptr<TypeHashKey *[]> d_hash_table_key(new TypeHashKey *[local_gpu_count]);
std::unique_ptr<size_t *[]> d_hash_table_value_index(new size_t *[local_gpu_count]);
std::unique_ptr<size_t *[]> h_hash_table_slot_id(new size_t *[local_gpu_count]);
std::unique_ptr<size_t *[]> d_hash_table_slot_id(new size_t *[local_gpu_count]);
std::unique_ptr<float *[]> h_hash_table_value(new float *[local_gpu_count]);
std::unique_ptr<float *[]> d_hash_table_value(new float *[local_gpu_count]);
std::unique_ptr<size_t *[]> d_dump_counter(new size_t *[local_gpu_count]);
for (size_t id = 0; id < local_gpu_count; id++) {
if (count[id] == 0) {
continue;
}
context.set_device(Base::get_local_gpu(id).get_device_id());
cudaMallocHost(&h_hash_table_key[id], count[id] * sizeof(TypeHashKey));
cudaMalloc(&d_hash_table_key[id], count[id] * sizeof(TypeHashKey));
cudaMalloc(&d_hash_table_value_index[id], count[id] * sizeof(size_t));
cudaMallocHost(&h_hash_table_slot_id[id], count[id] * sizeof(size_t));
cudaMalloc(&d_hash_table_slot_id[id], count[id] * sizeof(size_t));
cudaMallocHost(&h_hash_table_value[id], count[id] * embedding_vec_size * sizeof(float));
cudaMalloc(&d_hash_table_value[id], count[id] * embedding_vec_size * sizeof(float));
cudaMalloc(&d_dump_counter[id], count[id] * sizeof(size_t));
}
// dump hash table on GPU
for (size_t id = 0; id < local_gpu_count; id++) {
if (count[id] == 0) {
continue;
}
MESSAGE_("Rank" + std::to_string(my_rank) + ": Dump hash table from GPU" + std::to_string(id));
context.set_device(Base::get_local_gpu(id).get_device_id());
hash_tables[id]->dump(d_hash_table_key[id], d_hash_table_value_index[id], d_dump_counter[id],
Base::get_local_gpu(id).get_stream());
CK_CUDA_THROW_(cudaMemcpyAsync(h_hash_table_key[id], d_hash_table_key[id],
count[id] * sizeof(TypeHashKey), cudaMemcpyDeviceToHost,
Base::get_local_gpu(id).get_stream()));
functors_.get_hash_value(count[id], embedding_vec_size, d_hash_table_value_index[id],
hash_table_value_tensors[id].get_ptr(), d_hash_table_value[id],
Base::get_local_gpu(id).get_stream());
CK_CUDA_THROW_(cudaMemcpyAsync(h_hash_table_value[id], d_hash_table_value[id],
count[id] * embedding_vec_size * sizeof(float),
cudaMemcpyDeviceToHost, Base::get_local_gpu(id).get_stream()));
get_hash_slot_id(count[id], d_hash_table_value_index[id],
hash_table_slot_id_tensors[id].get_ptr(), d_hash_table_slot_id[id],
Base::get_local_gpu(id).get_stream());
CK_CUDA_THROW_(cudaMemcpyAsync(h_hash_table_slot_id[id], d_hash_table_slot_id[id],
count[id] * sizeof(size_t), cudaMemcpyDeviceToHost,
Base::get_local_gpu(id).get_stream()));
}
// sync wait
functors_.sync_all_gpus(Base::get_resource_manager());
const int master_node = 0;
#ifdef ENABLE_MPI
const int base_tag = 0xed;
#endif
// TODO: could be optimized ???
// one pair in the file includes <key,slot_id,value>
size_t pair_size_in_B = sizeof(TypeHashKey) + sizeof(size_t) + sizeof(float) * embedding_vec_size;
size_t max_size_in_B = max_count * pair_size_in_B;
std::unique_ptr<char[]> file_buf(new char[max_size_in_B]);
size_t key_size = sizeof(TypeHashKey);
size_t slot_id_size = sizeof(size_t);
size_t value_size = sizeof(float) * embedding_vec_size;
for (size_t id = 0; id < local_gpu_count; id++) {
size_t size_in_B = count[id] * pair_size_in_B;
size_t offset = 0;
for (unsigned int k = 0; k < count[id]; k++) {
/* std::cout << "\rRank" << my_rank << ": Seperate keys, slot_ids and values on GPU"
<< id
<< ", finish " << k << " of total count " << count[id] << ", "
<< (float)k / count[id] * 100.0f << "%" << std::flush;
*/
memcpy(file_buf.get() + offset, h_hash_table_key[id] + k, key_size);
offset += key_size;
memcpy(file_buf.get() + offset, h_hash_table_slot_id[id] + k, slot_id_size);
offset += slot_id_size;
memcpy(file_buf.get() + offset, h_hash_table_value[id] + k * embedding_vec_size, value_size);
offset += value_size;
}
// std::cout << std::endl;
if (my_rank == master_node) {
MESSAGE_("Rank" + std::to_string(my_rank) + ": Write hash table <key,value> pairs to file");
weight_stream.write(file_buf.get(), size_in_B);
}
#ifdef ENABLE_MPI
else {
MESSAGE_("Rank" + std::to_string(my_rank) + ": Send hash table <key,value> pairs on GPU" +
std::to_string(id) + " to master node ");
int tag = (id << 8) | base_tag;
CK_MPI_THROW_(
MPI_Send(file_buf.get(), size_in_B, MPI_CHAR, master_node, tag, MPI_COMM_WORLD));
}
#endif
}
#ifdef ENABLE_MPI
if (my_rank == master_node) {
for (int r = 1; r < n_ranks; r++) {
for (size_t id = 0; id < local_gpu_count; id++) {
MESSAGE_("Rank" + std::to_string(my_rank) +
": Recv hash table <key,value> pairs from rank" + std::to_string(r) + " on GPU" +
std::to_string(id) + ", and write to file ");
int tag = (id << 8) | base_tag;
MPI_Status status;
CK_MPI_THROW_(MPI_Probe(r, tag, MPI_COMM_WORLD, &status));
int size_in_B;
CK_MPI_THROW_(MPI_Get_count(&status, MPI_CHAR, &size_in_B));
CK_MPI_THROW_(MPI_Recv(file_buf.get(), size_in_B, MPI_CHAR, r, tag, MPI_COMM_WORLD,
MPI_STATUS_IGNORE));
weight_stream.write(file_buf.get(), size_in_B);
}
}
}
#endif
MESSAGE_("Done");
for (size_t id = 0; id < local_gpu_count; id++) {
if (count[id] == 0) {
continue;
}
context.set_device(Base::get_local_gpu(id).get_device_id());
CK_CUDA_THROW_(cudaFreeHost(h_hash_table_key[id]));
CK_CUDA_THROW_(cudaFree(d_hash_table_key[id]));
CK_CUDA_THROW_(cudaFree(d_hash_table_value_index[id]));
CK_CUDA_THROW_(cudaFreeHost(h_hash_table_slot_id[id]));
CK_CUDA_THROW_(cudaFree(d_hash_table_slot_id[id]));
CK_CUDA_THROW_(cudaFreeHost(h_hash_table_value[id]));
CK_CUDA_THROW_(cudaFree(d_hash_table_value[id]));
CK_CUDA_THROW_(cudaFree(d_dump_counter[id]));
}
return;
}
template <typename TypeHashKey, typename TypeEmbeddingComp>
void LocalizedSlotSparseEmbeddingHash<TypeHashKey, TypeEmbeddingComp>::init_embedding(
size_t max_vocabulary_size_per_gpu, size_t embedding_vec_size,
Tensors2<float> &hash_table_value_tensors) {
CudaDeviceContext context;
size_t local_gpu_count = Base::get_resource_manager().get_local_gpu_count();
for (size_t id = 0; id < local_gpu_count; id++) {
context.set_device(Base::get_local_gpu(id).get_device_id());
MESSAGE_("gpu" + std::to_string(id) + " start to init embedding");
HugeCTR::UniformGenerator::fill(hash_table_value_tensors[id], -0.05f, 0.05f,
Base::get_local_gpu(id));
}
for (size_t id = 0; id < local_gpu_count; id++) {
CK_CUDA_THROW_(cudaStreamSynchronize(Base::get_local_gpu(id).get_stream()));
MESSAGE_("gpu" + std::to_string(id) + " init embedding done");
}
}
template <typename TypeHashKey, typename TypeEmbeddingComp>
void LocalizedSlotSparseEmbeddingHash<TypeHashKey, TypeEmbeddingComp>::init_embedding(
const std::vector<size_t> &slot_sizes, size_t embedding_vec_size,
std::vector<Tensors2<float>> &hash_table_value_tensors,
Tensors2<size_t> &hash_table_slot_id_tensors) {
size_t local_gpu_count = Base::get_resource_manager().get_local_gpu_count();
size_t total_gpu_count = Base::get_resource_manager().get_global_gpu_count();
#ifndef NDEBUG
MESSAGE_("local_gpu_count=" + std::to_string(local_gpu_count) + ", total_gpu_count=" +
std::to_string(total_gpu_count));
#endif
for (size_t id = 0; id < local_gpu_count; id++) {
size_t device_id = Base::get_local_gpu(id).get_device_id();
size_t global_id = Base::get_local_gpu(id).get_global_gpu_id();
#ifndef NDEBUG
MESSAGE_("id=" + std::to_string(id) + ", device_id=" + std::to_string(device_id) +
", global_id=" + std::to_string(global_id));
#endif
functors_.init_embedding_per_gpu(global_id, total_gpu_count, slot_sizes, embedding_vec_size,
hash_table_value_tensors[id], hash_table_slot_id_tensors[id],
Base::get_local_gpu(id));
}
for (size_t id = 0; id < local_gpu_count; id++) {
CK_CUDA_THROW_(cudaStreamSynchronize(Base::get_local_gpu(id).get_stream()));
MESSAGE_("gpu" + std::to_string(id) + " init embedding done");
}
return;
}
template class LocalizedSlotSparseEmbeddingHash<unsigned int, float>;
template class LocalizedSlotSparseEmbeddingHash<long long, float>;
template class LocalizedSlotSparseEmbeddingHash<unsigned int, __half>;
template class LocalizedSlotSparseEmbeddingHash<long long, __half>;
} // namespace HugeCTR |
97d904c4f33aa65ef2b793f15b4c635826fe5223.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <mkl.h>
#include <math.h>
#include <omp.h>
#include <assert.h>
#include <iostream>
#include <hip/hip_runtime.h>
//#define VERBOSE
using std::cout;
#define EXIT_SUCCESS 0
#define EXIT_FAILURE 1
#define nullptr NULL
#define safeCall(err) __safeCall(err, __FILE__, __LINE__)
inline void __safeCall(hipError_t err, const char * file, const int line)
{
if(hipSuccess != err) {
fprintf(stderr, "ERROR: safeCall() Runtime API error in file <%s>, line %i : %s.\n", file , line, hipGetErrorString(err));
exit(-1);
}
}
class TimerGPU {
public:
hipEvent_t start, stop;
hipStream_t stream;
TimerGPU(hipStream_t stream_ = 0) : stream(stream_) {
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, stream);
}
~TimerGPU() {
hipEventDestroy(start);
hipEventDestroy(stop);
}
float read() {
hipEventRecord(stop, stream);
hipEventSynchronize(stop);
float time;
hipEventElapsedTime(&time, start, stop);
return time;
}
};
class TimerCPU {
static const int bits = 10;
public:
long long beg_clock;
float freq;
TimerCPU(float freq_) : freq(freq_) {
beg_clock = getTSC(bits);
}
long long getTSC(int bits) {
#ifdef WIN32
return __rdtsc();
#else
unsigned int low, high;
__asm__(".byte 0x0f, 0x31" :"=a" (low), "=d" (high));
return ((long long)high<<(32 - bits)) | ((long long)low >> bits);
#endif
}
float read() {
long long end_clock = getTSC(bits);
long long Kcycles = end_clock - beg_clock;
float time = (float)(1 << bits) * Kcycles / freq / 1e3f;
return time;
}
};
int iDivUp(int a, int b);
int iDivDown(int a, int b);
int iAlignUp(int a, int b);
int iAlignDown(int a, int b);
template<size_t BX, size_t BY>
class CudaMatrix {
public:
CudaMatrix();
~CudaMatrix();
void allocate(const int M_, const int N_, bool host, float * devmem, float * hostmem);
double download();
double readback();
public:
int M, N;
int padM, padN;
float * h_data;
float * d_data;
bool h_internalAlloc;
bool d_internalAlloc;
};
int iDivUp(int a, int b) { return (a % b == 0) ? (a / b) : (a / b + 1); }
int iDivDown(int a, int b) { return a / b; }
int iAlignUp(int a, int b) { return (a % b == 0) ? a : (a - a % b + b); }
int iAlignDown(int a, int b) { return a - a % b; }
template<size_t BX, size_t BY>
void CudaMatrix<BX, BY>::allocate(const int M_, const int N_, bool host, float * devmem, float * hostmem)
{
M = M_;
N = N_;
padM = iAlignUp(M, BY);
padN = iAlignUp(N, BX);
h_data = hostmem;
d_data = devmem;
if(d_data == nullptr) {
long int nbts = sizeof(float) * (long)padM * padN;
if(nbts < 0) {
fprintf(stderr, "ERROR: cannot allocate %lld bytes from device global memory, file: %s, line: %d\n", nbts, __FILE__, __LINE__);
d_data = nullptr;
exit(EXIT_FAILURE);
}
safeCall(hipMalloc((void**)&d_data, nbts));
safeCall(hipMemset(d_data, 0, nbts));
if(d_data == nullptr) {
fprintf(stderr, "ERROR: cannot allocate %lld bytes from device global memory, file: %s, line: %d\n", nbts, __FILE__, __LINE__);
}
d_internalAlloc = true;
}
if(host && h_data == nullptr) {
long int nbts = sizeof(float) * (long)M * N;
if(nbts < 0) {
fprintf(stderr, "ERROR: cannot allocate %lld bytes from host memory, file: %s, line: %d\n", nbts, __FILE__, __LINE__);
h_data = nullptr;
exit(EXIT_FAILURE);
}
h_data = (float*)malloc(nbts);
memset(h_data, 0, nbts);
h_internalAlloc = true;
}
}
template<size_t BX, size_t BY>
CudaMatrix<BX, BY>::CudaMatrix() : M(0), N(0), h_data(nullptr), d_data(nullptr), h_internalAlloc(false), d_internalAlloc(false)
{
}
template<size_t BX, size_t BY>
CudaMatrix<BX, BY>::~CudaMatrix()
{
if(h_internalAlloc && h_data != nullptr) free(h_data);
h_data = nullptr;
if(d_internalAlloc && d_data != nullptr) safeCall(hipFree(d_data));
d_data = nullptr;
}
template<size_t BX, size_t BY>
double CudaMatrix<BX, BY>::download()
{
TimerGPU timer(0);
int p = sizeof(float) * padN;
if(h_data != nullptr && d_data != nullptr) {
safeCall(hipMemcpy2D(d_data, p, h_data, sizeof(float) * N, sizeof(float) * N, M, hipMemcpyHostToDevice));
}
double gpuTime = timer.read();
#ifdef VERBOSE
fprintf(stdout, "INFO: download time = %.2fms\n", gpuTime);
fflush(stdout);
#endif
return gpuTime;
}
template<size_t BX, size_t BY>
double CudaMatrix<BX, BY>::readback()
{
TimerGPU timer(0);
int p = sizeof(float) * padN;
// cout << sizeof(float) * N << "\t" << p << "\n";
// if(h_data == nullptr) cout << "1\n";
// if(d_data == nullptr) cout << "2\n";
safeCall(hipMemcpy2D(h_data, sizeof(float) * N, d_data, p, sizeof(float) * N, M, hipMemcpyDeviceToHost));
double gpuTime = timer.read();
#ifdef VERBOSE
fprintf(stdout, "INFO: readback time = %.2fms\n", gpuTime);
fflush(stdout);
#endif
return gpuTime;
}
// cache A and cache B
template<size_t BM, size_t BK, size_t BN, size_t TX, size_t TY>
__global__ void mysgemm_cache_AB(const float alpha, const float * __restrict__ dA, const int lda, const float * __restrict__ dB, const int ldb, const float beta, float * __restrict__ dC, const int ldc)
{
__shared__ float A_smem[BM][BK];
__shared__ float B_smem[BK][BN];
float C_reg[BM / TY][BN / TX];
const int gy = blockIdx.y * BM;
const int gx = blockIdx.x * BN;
const int tidy = threadIdx.y;
const int tidx = threadIdx.x;
const float * daptr = dA + gy * lda;
const float * dbptr = dB + gx;
float * dcptr = dC + gy * ldc + gx;
const int stride_b = BK * ldb;
for(int ii = 0; ii < BM / TY; ii++) {
for(int ij = 0; ij < BN / TX; ij++) {
C_reg[ii][ij] = 0.f;
}
}
for(int ik = 0; ik < lda; ik += BK, daptr += BK, dbptr += stride_b) {
// load block of A to shared memory
const float * daptr_ = daptr + tidy * lda;
for(int ii = tidy; ii < BM; ii += TY, daptr_ += TY * lda) {
for(int ij = tidx; ij < BK; ij += TX) {
A_smem[ii][ij] = daptr_[ij];
}
}
const float * dbptr_ = dbptr + tidy * ldb;
for(int ii = tidy; ii < BK; ii += TY, dbptr_ += TY * ldb) {
for(int ij = tidx; ij < BN; ij += TX) {
B_smem[ii][ij] = dbptr_[ij];
}
}
__syncthreads();
// float * dcptr_ = dcptr + tidy * ldc;
// if(ik == 0) {
// for(int im = tidy; im < BM; im += TY, dcptr_ += TY * ldc) {
// for(int in = tidx; in < BN; in += TX) {
// dcptr_[in] = beta * dcptr_[in];
// }
// }
// for(int im = tidy, ii = 0; im < BM; im += TY, dcptr_ += TY * ldc, ii++) {
// for(int in = tidx, ij = 0; in < BN; in += TX, ij++) {
// C_reg[ii][ij] = beta * dcptr_[in];
// }
// }
// }
// __syncthreads();
// dcptr_ = dcptr + tidy * ldc;
// for(int im = tidy, ii = 0; im < BM; im += TY, dcptr_ += TY * ldc, ii++) {
for(int im = tidy, ii = 0; im < BM; im += TY, ii++) {
for(int in = tidx, ij = 0; in < BN; in += TX, ij++) {
// float ret = 0.f;
// const float * dbptr_ = dbptr;
for(int kk = 0; kk < BK; kk++) {
// ret += smem[im][kk] * dbptr_[in];
C_reg[ii][ij] += A_smem[im][kk] * B_smem[kk][in];
}
// dcptr_[in] += alpha * ret;
}
}
__syncthreads();
}
float * dcptr_ = dcptr + tidy * ldc;
for(int im = tidy, ii = 0; im < BM; im += TY, dcptr_ += TY * ldc, ii++) {
for(int in = tidx, ij = 0; in < BN; in += TX, ij++) {
dcptr_[in] = beta * dcptr_[in] + alpha * C_reg[ii][ij];
}
}
}
// cache B
template<size_t BM, size_t BK, size_t BN, size_t TX, size_t TY>
__global__ void mysgemm_cache_B(const float alpha, const float * __restrict__ dA, const int lda, const float * __restrict__ dB, const int ldb, const float beta, float * __restrict__ dC, const int ldc)
{
__shared__ float B_smem[BK][BN];
float C_reg[BM / TY][BN / TX];
const int gy = blockIdx.y * BM;
const int gx = blockIdx.x * BN;
const int tidy = threadIdx.y;
const int tidx = threadIdx.x;
const float * daptr = dA + gy * lda;
const float * dbptr = dB + gx;
float * dcptr = dC + gy * ldc + gx;
const int stride_b = BK * ldb;
for(int ii = 0; ii < BM / TY; ii++) {
for(int ij = 0; ij < BN / TX; ij++) {
C_reg[ii][ij] = 0.f;
}
}
for(int ik = 0; ik < lda; ik += BK, daptr += BK, dbptr += stride_b) {
// // load block of A to shared memory
// const float * daptr_ = daptr + tidy * lda;
// for(int ii = tidy; ii < BM; ii += TY, daptr_ += TY * lda) {
// for(int ij = tidx; ij < BK; ij += TX) {
// A_smem[ii][ij] = daptr_[ij];
// }
// }
const float * dbptr_ = dbptr + tidy * ldb;
for(int ii = tidy; ii < BK; ii += TY, dbptr_ += TY * ldb) {
for(int ij = tidx; ij < BN; ij += TX) {
B_smem[ii][ij] = dbptr_[ij];
}
}
__syncthreads();
const float * daptr_ = daptr + tidy * lda;
for(int im = tidy, ii = 0; im < BM; im += TY, ii++, daptr_ += TY * lda) {
for(int in = tidx, ij = 0; in < BN; in += TX, ij++) {
// float ret = 0.f;
// const float * dbptr_ = dbptr;
for(int kk = 0; kk < BK; kk++) {
// ret += smem[im][kk] * dbptr_[in];
// C_reg[ii][ij] += A_smem[im][kk] * B_smem[kk][in];
C_reg[ii][ij] += daptr_[kk] * B_smem[kk][in];
}
// dcptr_[in] += alpha * ret;
}
}
__syncthreads();
}
float * dcptr_ = dcptr + tidy * ldc;
for(int im = tidy, ii = 0; im < BM; im += TY, dcptr_ += TY * ldc, ii++) {
for(int in = tidx, ij = 0; in < BN; in += TX, ij++) {
dcptr_[in] = beta * dcptr_[in] + alpha * C_reg[ii][ij];
}
}
}
// cache A
template<size_t BM, size_t BK, size_t BN, size_t TX, size_t TY>
__global__ void mysgemm_cache_A(const float alpha, const float * __restrict__ dA, const int lda, const float * __restrict__ dB, const int ldb, const float beta, float * __restrict__ dC, const int ldc)
{
__shared__ float A_smem[BM][BK];
// __shared__ float B_smem[BK][BN];
float C_reg[BM / TY][BN / TX];
const int gy = blockIdx.y * BM;
const int gx = blockIdx.x * BN;
const int tidy = threadIdx.y;
const int tidx = threadIdx.x;
const float * daptr = dA + gy * lda;
const float * dbptr = dB + gx;
float * dcptr = dC + gy * ldc + gx;
const int stride_b = BK * ldb;
for(int ii = 0; ii < BM / TY; ii++) {
for(int ij = 0; ij < BN / TX; ij++) {
C_reg[ii][ij] = 0.f;
}
}
for(int ik = 0; ik < lda; ik += BK, daptr += BK, dbptr += stride_b) {
// load block of A to shared memory
const float * daptr_ = daptr + tidy * lda;
for(int ii = tidy; ii < BM; ii += TY, daptr_ += TY * lda) {
for(int ij = tidx; ij < BK; ij += TX) {
A_smem[ii][ij] = daptr_[ij];
}
}
// const float * dbptr_ = dbptr + tidy * ldb;
// for(int ii = tidy; ii < BK; ii += TY, dbptr_ += TY * ldb) {
// for(int ij = tidx; ij < BN; ij += TX) {
// B_smem[ii][ij] = dbptr_[ij];
// }
// }
__syncthreads();
// dcptr_ = dcptr + tidy * ldc;
// for(int im = tidy, ii = 0; im < BM; im += TY, dcptr_ += TY * ldc, ii++) {
for(int im = tidy, ii = 0; im < BM; im += TY, ii++) {
for(int in = tidx, ij = 0; in < BN; in += TX, ij++) {
const float * dbptr_ = dbptr;
for(int kk = 0; kk < BK; kk++, dbptr_ += ldb) {
// ret += smem[im][kk] * dbptr_[in];
// C_reg[ii][ij] += A_smem[im][kk] * B_smem[kk][in];
C_reg[ii][ij] += A_smem[im][kk] * dbptr_[in];
}
// dcptr_[in] += alpha * ret;
}
}
__syncthreads();
}
float * dcptr_ = dcptr + tidy * ldc;
for(int im = tidy, ii = 0; im < BM; im += TY, dcptr_ += TY * ldc, ii++) {
for(int in = tidx, ij = 0; in < BN; in += TX, ij++) {
dcptr_[in] = beta * dcptr_[in] + alpha * C_reg[ii][ij];
}
}
}
template<size_t BM, size_t BK, size_t BN, size_t TX, size_t TY>
void mygemm_wrapper(const int M, const int K, const int N, const float alpha, const float * A, const int lda, const float * B, const int ldb, const float beta, float * C, const int ldc)
{
CudaMatrix<BK, BM> wrapperA;
wrapperA.allocate(M, lda, false, nullptr, const_cast<float*>(A));
wrapperA.download();
CudaMatrix<BN, BK> wrapperB;
wrapperB.allocate(K, ldb, false, nullptr, const_cast<float*>(B));
wrapperB.download();
CudaMatrix<BN, BM> wrapperC;
wrapperC.allocate(M, ldc, false, nullptr, C);
wrapperC.download();
#ifdef VERBOSE
fprintf(stdout, "INFO: matrix A, size = (%dx%d), padding size = (%dx%d)\n", M, K, wrapperA.padM, wrapperA.padN);
fprintf(stdout, "INFO: matrix B, size = (%dx%d), padding size = (%dx%d)\n", M, K, wrapperB.padM, wrapperB.padN);
fprintf(stdout, "INFO: matrix C, size = (%dx%d), padding size = (%dx%d)\n", M, K, wrapperC.padM, wrapperC.padN);
#endif
dim3 grid( wrapperC.padN / BN, wrapperA.padM / BM, 1 );
dim3 threads( TX, TY, 1 );
TimerGPU timer(0);
hipLaunchKernelGGL(( mysgemm_cache_B<BM, BK, BN, TX, TY>), dim3(grid), dim3(threads), 0, 0, alpha, wrapperA.d_data, wrapperA.padN, wrapperB.d_data, wrapperB.padN, beta, wrapperC.d_data, wrapperC.padN);
double gpuTime = timer.read();
// wrapperA.readback();
// for(int i = 0; i < M; i++) {
// for(int j = 0; j < N; j++) {
// fprintf(stdout, "%02.2f\t", A[i * N + j]);
// }
// fprintf(stdout, "\n");
// }
// fflush(stdout);
fprintf(stdout, "INFO: matrix multiply time = %.2f ms.\n", gpuTime);
#ifdef VERBOSE
fprintf(stdout, "INFO: performance = %f GFLOPS\n", (2.0 * M * N * K) / (gpuTime / 1000.0 * 1e9));
#endif
fflush(stdout);
wrapperC.readback();
}
void constantInit(float * data, long int size, float val)
{
for(long int i = 0; i < size; i++) {
data[i] = val;
}
}
int main(int argc, char * argv[])
{
if(argc != 4) {
fprintf(stderr, "USAGE: M K N\n");
return -1;
}
int M = atoi(argv[1]);
int K = atoi(argv[2]);
int N = atoi(argv[3]);
#ifdef VERBOSE
fprintf(stdout, "INFO: matrix A (MxK) multiply matrix B (KxN), result matrix C (MxN).\n");
fprintf(stdout, "INFO: M = %d, K = %d, N = %d\n", M, K, N);
fflush(stdout);
#endif
float * h_A = (float*)malloc(sizeof(float) * M * K);
float * h_B = (float*)malloc(sizeof(float) * K * N);
float * h_C = (float*)malloc(sizeof(float) * M * N);
float * h_D = (float*)malloc(sizeof(float) * M * N);
const float valB = 0.01f;
long int size_A = M * K;
long int size_B = K * N;
constantInit(h_A, size_A, 1.0f);
constantInit(h_B, size_B, valB);
long int size_C = M * N;
long int size_D = size_C;
memset(h_C, 0, sizeof(float) * size_C);
memset(h_D, 0, sizeof(float) * size_D);
// warm up
mygemm_wrapper<128, 32, 128, 16, 16>(
M, K, N, 1.f,
h_A, K, h_B, N, 0.f, h_C, N);
// mygemm_wrapper<128, 32, 64, 32, 8>(
// M, K, N, 1.f,
// h_A, K, h_B, N, 0.f, h_C, N);
// double t0 = omp_get_wtime();
TimerCPU timer(3.07 * 1000);
cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, M, N, K, 1.0f, h_A, K, h_B, N, 0.0f, h_D, N);
double cpuTime = timer.read();
// t0 = omp_get_wtime() - t0;
// cout << t0 << "\n";
#ifdef VERBOSE
fprintf(stdout, "INFO: matrix multiply time = %.2f ms.\n", cpuTime);
fprintf(stdout, "INFO: performance = %f GFLOPS\n", (2.0 * M * N * K) / (cpuTime / 1000.0 * 1e9));
#endif
fflush(stdout);
// test relative error
bool correct = true;
double eps = 1.e-6;
for(long int i = 0; i < size_C; i++) {
double abs_err = fabs(h_C[i] - h_D[i]);
double dot_length = K;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err / abs_val / dot_length;
if (rel_err > eps) {
// fprintf(stderr, "ERROR: Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, h_C[i], h_D[i], eps);
correct = false;
}
}
fprintf(stdout, "%s\n", correct ? "Result = PASS" : "Result = FAIL");
fflush(stdout);
free(h_A); h_A = nullptr;
free(h_B); h_B = nullptr;
free(h_C); h_C = nullptr;
free(h_D); h_D = nullptr;
if (!correct) {
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
| 97d904c4f33aa65ef2b793f15b4c635826fe5223.cu | #include <stdio.h>
#include <mkl.h>
#include <math.h>
#include <omp.h>
#include <assert.h>
#include <iostream>
#include <cuda_runtime.h>
//#define VERBOSE
using std::cout;
#define EXIT_SUCCESS 0
#define EXIT_FAILURE 1
#define nullptr NULL
#define safeCall(err) __safeCall(err, __FILE__, __LINE__)
inline void __safeCall(cudaError err, const char * file, const int line)
{
if(cudaSuccess != err) {
fprintf(stderr, "ERROR: safeCall() Runtime API error in file <%s>, line %i : %s.\n", file , line, cudaGetErrorString(err));
exit(-1);
}
}
class TimerGPU {
public:
cudaEvent_t start, stop;
cudaStream_t stream;
TimerGPU(cudaStream_t stream_ = 0) : stream(stream_) {
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, stream);
}
~TimerGPU() {
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
float read() {
cudaEventRecord(stop, stream);
cudaEventSynchronize(stop);
float time;
cudaEventElapsedTime(&time, start, stop);
return time;
}
};
class TimerCPU {
static const int bits = 10;
public:
long long beg_clock;
float freq;
TimerCPU(float freq_) : freq(freq_) {
beg_clock = getTSC(bits);
}
long long getTSC(int bits) {
#ifdef WIN32
return __rdtsc();
#else
unsigned int low, high;
__asm__(".byte 0x0f, 0x31" :"=a" (low), "=d" (high));
return ((long long)high<<(32 - bits)) | ((long long)low >> bits);
#endif
}
float read() {
long long end_clock = getTSC(bits);
long long Kcycles = end_clock - beg_clock;
float time = (float)(1 << bits) * Kcycles / freq / 1e3f;
return time;
}
};
int iDivUp(int a, int b);
int iDivDown(int a, int b);
int iAlignUp(int a, int b);
int iAlignDown(int a, int b);
template<size_t BX, size_t BY>
class CudaMatrix {
public:
CudaMatrix();
~CudaMatrix();
void allocate(const int M_, const int N_, bool host, float * devmem, float * hostmem);
double download();
double readback();
public:
int M, N;
int padM, padN;
float * h_data;
float * d_data;
bool h_internalAlloc;
bool d_internalAlloc;
};
int iDivUp(int a, int b) { return (a % b == 0) ? (a / b) : (a / b + 1); }
int iDivDown(int a, int b) { return a / b; }
int iAlignUp(int a, int b) { return (a % b == 0) ? a : (a - a % b + b); }
int iAlignDown(int a, int b) { return a - a % b; }
template<size_t BX, size_t BY>
void CudaMatrix<BX, BY>::allocate(const int M_, const int N_, bool host, float * devmem, float * hostmem)
{
M = M_;
N = N_;
padM = iAlignUp(M, BY);
padN = iAlignUp(N, BX);
h_data = hostmem;
d_data = devmem;
if(d_data == nullptr) {
long int nbts = sizeof(float) * (long)padM * padN;
if(nbts < 0) {
fprintf(stderr, "ERROR: cannot allocate %lld bytes from device global memory, file: %s, line: %d\n", nbts, __FILE__, __LINE__);
d_data = nullptr;
exit(EXIT_FAILURE);
}
safeCall(cudaMalloc((void**)&d_data, nbts));
safeCall(cudaMemset(d_data, 0, nbts));
if(d_data == nullptr) {
fprintf(stderr, "ERROR: cannot allocate %lld bytes from device global memory, file: %s, line: %d\n", nbts, __FILE__, __LINE__);
}
d_internalAlloc = true;
}
if(host && h_data == nullptr) {
long int nbts = sizeof(float) * (long)M * N;
if(nbts < 0) {
fprintf(stderr, "ERROR: cannot allocate %lld bytes from host memory, file: %s, line: %d\n", nbts, __FILE__, __LINE__);
h_data = nullptr;
exit(EXIT_FAILURE);
}
h_data = (float*)malloc(nbts);
memset(h_data, 0, nbts);
h_internalAlloc = true;
}
}
template<size_t BX, size_t BY>
CudaMatrix<BX, BY>::CudaMatrix() : M(0), N(0), h_data(nullptr), d_data(nullptr), h_internalAlloc(false), d_internalAlloc(false)
{
}
template<size_t BX, size_t BY>
CudaMatrix<BX, BY>::~CudaMatrix()
{
if(h_internalAlloc && h_data != nullptr) free(h_data);
h_data = nullptr;
if(d_internalAlloc && d_data != nullptr) safeCall(cudaFree(d_data));
d_data = nullptr;
}
template<size_t BX, size_t BY>
double CudaMatrix<BX, BY>::download()
{
TimerGPU timer(0);
int p = sizeof(float) * padN;
if(h_data != nullptr && d_data != nullptr) {
safeCall(cudaMemcpy2D(d_data, p, h_data, sizeof(float) * N, sizeof(float) * N, M, cudaMemcpyHostToDevice));
}
double gpuTime = timer.read();
#ifdef VERBOSE
fprintf(stdout, "INFO: download time = %.2fms\n", gpuTime);
fflush(stdout);
#endif
return gpuTime;
}
template<size_t BX, size_t BY>
double CudaMatrix<BX, BY>::readback()
{
TimerGPU timer(0);
int p = sizeof(float) * padN;
// cout << sizeof(float) * N << "\t" << p << "\n";
// if(h_data == nullptr) cout << "1\n";
// if(d_data == nullptr) cout << "2\n";
safeCall(cudaMemcpy2D(h_data, sizeof(float) * N, d_data, p, sizeof(float) * N, M, cudaMemcpyDeviceToHost));
double gpuTime = timer.read();
#ifdef VERBOSE
fprintf(stdout, "INFO: readback time = %.2fms\n", gpuTime);
fflush(stdout);
#endif
return gpuTime;
}
// cache A and cache B
template<size_t BM, size_t BK, size_t BN, size_t TX, size_t TY>
__global__ void mysgemm_cache_AB(const float alpha, const float * __restrict__ dA, const int lda, const float * __restrict__ dB, const int ldb, const float beta, float * __restrict__ dC, const int ldc)
{
__shared__ float A_smem[BM][BK];
__shared__ float B_smem[BK][BN];
float C_reg[BM / TY][BN / TX];
const int gy = blockIdx.y * BM;
const int gx = blockIdx.x * BN;
const int tidy = threadIdx.y;
const int tidx = threadIdx.x;
const float * daptr = dA + gy * lda;
const float * dbptr = dB + gx;
float * dcptr = dC + gy * ldc + gx;
const int stride_b = BK * ldb;
for(int ii = 0; ii < BM / TY; ii++) {
for(int ij = 0; ij < BN / TX; ij++) {
C_reg[ii][ij] = 0.f;
}
}
for(int ik = 0; ik < lda; ik += BK, daptr += BK, dbptr += stride_b) {
// load block of A to shared memory
const float * daptr_ = daptr + tidy * lda;
for(int ii = tidy; ii < BM; ii += TY, daptr_ += TY * lda) {
for(int ij = tidx; ij < BK; ij += TX) {
A_smem[ii][ij] = daptr_[ij];
}
}
const float * dbptr_ = dbptr + tidy * ldb;
for(int ii = tidy; ii < BK; ii += TY, dbptr_ += TY * ldb) {
for(int ij = tidx; ij < BN; ij += TX) {
B_smem[ii][ij] = dbptr_[ij];
}
}
__syncthreads();
// float * dcptr_ = dcptr + tidy * ldc;
// if(ik == 0) {
// for(int im = tidy; im < BM; im += TY, dcptr_ += TY * ldc) {
// for(int in = tidx; in < BN; in += TX) {
// dcptr_[in] = beta * dcptr_[in];
// }
// }
// for(int im = tidy, ii = 0; im < BM; im += TY, dcptr_ += TY * ldc, ii++) {
// for(int in = tidx, ij = 0; in < BN; in += TX, ij++) {
// C_reg[ii][ij] = beta * dcptr_[in];
// }
// }
// }
// __syncthreads();
// dcptr_ = dcptr + tidy * ldc;
// for(int im = tidy, ii = 0; im < BM; im += TY, dcptr_ += TY * ldc, ii++) {
for(int im = tidy, ii = 0; im < BM; im += TY, ii++) {
for(int in = tidx, ij = 0; in < BN; in += TX, ij++) {
// float ret = 0.f;
// const float * dbptr_ = dbptr;
for(int kk = 0; kk < BK; kk++) {
// ret += smem[im][kk] * dbptr_[in];
C_reg[ii][ij] += A_smem[im][kk] * B_smem[kk][in];
}
// dcptr_[in] += alpha * ret;
}
}
__syncthreads();
}
float * dcptr_ = dcptr + tidy * ldc;
for(int im = tidy, ii = 0; im < BM; im += TY, dcptr_ += TY * ldc, ii++) {
for(int in = tidx, ij = 0; in < BN; in += TX, ij++) {
dcptr_[in] = beta * dcptr_[in] + alpha * C_reg[ii][ij];
}
}
}
// cache B
template<size_t BM, size_t BK, size_t BN, size_t TX, size_t TY>
__global__ void mysgemm_cache_B(const float alpha, const float * __restrict__ dA, const int lda, const float * __restrict__ dB, const int ldb, const float beta, float * __restrict__ dC, const int ldc)
{
__shared__ float B_smem[BK][BN];
float C_reg[BM / TY][BN / TX];
const int gy = blockIdx.y * BM;
const int gx = blockIdx.x * BN;
const int tidy = threadIdx.y;
const int tidx = threadIdx.x;
const float * daptr = dA + gy * lda;
const float * dbptr = dB + gx;
float * dcptr = dC + gy * ldc + gx;
const int stride_b = BK * ldb;
for(int ii = 0; ii < BM / TY; ii++) {
for(int ij = 0; ij < BN / TX; ij++) {
C_reg[ii][ij] = 0.f;
}
}
for(int ik = 0; ik < lda; ik += BK, daptr += BK, dbptr += stride_b) {
// // load block of A to shared memory
// const float * daptr_ = daptr + tidy * lda;
// for(int ii = tidy; ii < BM; ii += TY, daptr_ += TY * lda) {
// for(int ij = tidx; ij < BK; ij += TX) {
// A_smem[ii][ij] = daptr_[ij];
// }
// }
const float * dbptr_ = dbptr + tidy * ldb;
for(int ii = tidy; ii < BK; ii += TY, dbptr_ += TY * ldb) {
for(int ij = tidx; ij < BN; ij += TX) {
B_smem[ii][ij] = dbptr_[ij];
}
}
__syncthreads();
const float * daptr_ = daptr + tidy * lda;
for(int im = tidy, ii = 0; im < BM; im += TY, ii++, daptr_ += TY * lda) {
for(int in = tidx, ij = 0; in < BN; in += TX, ij++) {
// float ret = 0.f;
// const float * dbptr_ = dbptr;
for(int kk = 0; kk < BK; kk++) {
// ret += smem[im][kk] * dbptr_[in];
// C_reg[ii][ij] += A_smem[im][kk] * B_smem[kk][in];
C_reg[ii][ij] += daptr_[kk] * B_smem[kk][in];
}
// dcptr_[in] += alpha * ret;
}
}
__syncthreads();
}
float * dcptr_ = dcptr + tidy * ldc;
for(int im = tidy, ii = 0; im < BM; im += TY, dcptr_ += TY * ldc, ii++) {
for(int in = tidx, ij = 0; in < BN; in += TX, ij++) {
dcptr_[in] = beta * dcptr_[in] + alpha * C_reg[ii][ij];
}
}
}
// cache A
template<size_t BM, size_t BK, size_t BN, size_t TX, size_t TY>
__global__ void mysgemm_cache_A(const float alpha, const float * __restrict__ dA, const int lda, const float * __restrict__ dB, const int ldb, const float beta, float * __restrict__ dC, const int ldc)
{
__shared__ float A_smem[BM][BK];
// __shared__ float B_smem[BK][BN];
float C_reg[BM / TY][BN / TX];
const int gy = blockIdx.y * BM;
const int gx = blockIdx.x * BN;
const int tidy = threadIdx.y;
const int tidx = threadIdx.x;
const float * daptr = dA + gy * lda;
const float * dbptr = dB + gx;
float * dcptr = dC + gy * ldc + gx;
const int stride_b = BK * ldb;
for(int ii = 0; ii < BM / TY; ii++) {
for(int ij = 0; ij < BN / TX; ij++) {
C_reg[ii][ij] = 0.f;
}
}
for(int ik = 0; ik < lda; ik += BK, daptr += BK, dbptr += stride_b) {
// load block of A to shared memory
const float * daptr_ = daptr + tidy * lda;
for(int ii = tidy; ii < BM; ii += TY, daptr_ += TY * lda) {
for(int ij = tidx; ij < BK; ij += TX) {
A_smem[ii][ij] = daptr_[ij];
}
}
// const float * dbptr_ = dbptr + tidy * ldb;
// for(int ii = tidy; ii < BK; ii += TY, dbptr_ += TY * ldb) {
// for(int ij = tidx; ij < BN; ij += TX) {
// B_smem[ii][ij] = dbptr_[ij];
// }
// }
__syncthreads();
// dcptr_ = dcptr + tidy * ldc;
// for(int im = tidy, ii = 0; im < BM; im += TY, dcptr_ += TY * ldc, ii++) {
for(int im = tidy, ii = 0; im < BM; im += TY, ii++) {
for(int in = tidx, ij = 0; in < BN; in += TX, ij++) {
const float * dbptr_ = dbptr;
for(int kk = 0; kk < BK; kk++, dbptr_ += ldb) {
// ret += smem[im][kk] * dbptr_[in];
// C_reg[ii][ij] += A_smem[im][kk] * B_smem[kk][in];
C_reg[ii][ij] += A_smem[im][kk] * dbptr_[in];
}
// dcptr_[in] += alpha * ret;
}
}
__syncthreads();
}
float * dcptr_ = dcptr + tidy * ldc;
for(int im = tidy, ii = 0; im < BM; im += TY, dcptr_ += TY * ldc, ii++) {
for(int in = tidx, ij = 0; in < BN; in += TX, ij++) {
dcptr_[in] = beta * dcptr_[in] + alpha * C_reg[ii][ij];
}
}
}
template<size_t BM, size_t BK, size_t BN, size_t TX, size_t TY>
void mygemm_wrapper(const int M, const int K, const int N, const float alpha, const float * A, const int lda, const float * B, const int ldb, const float beta, float * C, const int ldc)
{
CudaMatrix<BK, BM> wrapperA;
wrapperA.allocate(M, lda, false, nullptr, const_cast<float*>(A));
wrapperA.download();
CudaMatrix<BN, BK> wrapperB;
wrapperB.allocate(K, ldb, false, nullptr, const_cast<float*>(B));
wrapperB.download();
CudaMatrix<BN, BM> wrapperC;
wrapperC.allocate(M, ldc, false, nullptr, C);
wrapperC.download();
#ifdef VERBOSE
fprintf(stdout, "INFO: matrix A, size = (%dx%d), padding size = (%dx%d)\n", M, K, wrapperA.padM, wrapperA.padN);
fprintf(stdout, "INFO: matrix B, size = (%dx%d), padding size = (%dx%d)\n", M, K, wrapperB.padM, wrapperB.padN);
fprintf(stdout, "INFO: matrix C, size = (%dx%d), padding size = (%dx%d)\n", M, K, wrapperC.padM, wrapperC.padN);
#endif
dim3 grid( wrapperC.padN / BN, wrapperA.padM / BM, 1 );
dim3 threads( TX, TY, 1 );
TimerGPU timer(0);
mysgemm_cache_B<BM, BK, BN, TX, TY><<<grid, threads>>>(alpha, wrapperA.d_data, wrapperA.padN, wrapperB.d_data, wrapperB.padN, beta, wrapperC.d_data, wrapperC.padN);
double gpuTime = timer.read();
// wrapperA.readback();
// for(int i = 0; i < M; i++) {
// for(int j = 0; j < N; j++) {
// fprintf(stdout, "%02.2f\t", A[i * N + j]);
// }
// fprintf(stdout, "\n");
// }
// fflush(stdout);
fprintf(stdout, "INFO: matrix multiply time = %.2f ms.\n", gpuTime);
#ifdef VERBOSE
fprintf(stdout, "INFO: performance = %f GFLOPS\n", (2.0 * M * N * K) / (gpuTime / 1000.0 * 1e9));
#endif
fflush(stdout);
wrapperC.readback();
}
void constantInit(float * data, long int size, float val)
{
for(long int i = 0; i < size; i++) {
data[i] = val;
}
}
int main(int argc, char * argv[])
{
if(argc != 4) {
fprintf(stderr, "USAGE: M K N\n");
return -1;
}
int M = atoi(argv[1]);
int K = atoi(argv[2]);
int N = atoi(argv[3]);
#ifdef VERBOSE
fprintf(stdout, "INFO: matrix A (MxK) multiply matrix B (KxN), result matrix C (MxN).\n");
fprintf(stdout, "INFO: M = %d, K = %d, N = %d\n", M, K, N);
fflush(stdout);
#endif
float * h_A = (float*)malloc(sizeof(float) * M * K);
float * h_B = (float*)malloc(sizeof(float) * K * N);
float * h_C = (float*)malloc(sizeof(float) * M * N);
float * h_D = (float*)malloc(sizeof(float) * M * N);
const float valB = 0.01f;
long int size_A = M * K;
long int size_B = K * N;
constantInit(h_A, size_A, 1.0f);
constantInit(h_B, size_B, valB);
long int size_C = M * N;
long int size_D = size_C;
memset(h_C, 0, sizeof(float) * size_C);
memset(h_D, 0, sizeof(float) * size_D);
// warm up
mygemm_wrapper<128, 32, 128, 16, 16>(
M, K, N, 1.f,
h_A, K, h_B, N, 0.f, h_C, N);
// mygemm_wrapper<128, 32, 64, 32, 8>(
// M, K, N, 1.f,
// h_A, K, h_B, N, 0.f, h_C, N);
// double t0 = omp_get_wtime();
TimerCPU timer(3.07 * 1000);
cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, M, N, K, 1.0f, h_A, K, h_B, N, 0.0f, h_D, N);
double cpuTime = timer.read();
// t0 = omp_get_wtime() - t0;
// cout << t0 << "\n";
#ifdef VERBOSE
fprintf(stdout, "INFO: matrix multiply time = %.2f ms.\n", cpuTime);
fprintf(stdout, "INFO: performance = %f GFLOPS\n", (2.0 * M * N * K) / (cpuTime / 1000.0 * 1e9));
#endif
fflush(stdout);
// test relative error
bool correct = true;
double eps = 1.e-6;
for(long int i = 0; i < size_C; i++) {
double abs_err = fabs(h_C[i] - h_D[i]);
double dot_length = K;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err / abs_val / dot_length;
if (rel_err > eps) {
// fprintf(stderr, "ERROR: Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, h_C[i], h_D[i], eps);
correct = false;
}
}
fprintf(stdout, "%s\n", correct ? "Result = PASS" : "Result = FAIL");
fflush(stdout);
free(h_A); h_A = nullptr;
free(h_B); h_B = nullptr;
free(h_C); h_C = nullptr;
free(h_D); h_D = nullptr;
if (!correct) {
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
|
824666a9e16062270e88b105d7b058ac134f3a2e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// y_load.cu
//
// Ghost loading based on threadIdx.y. Requires NX_TILE = NY_TILE.
//
__global__ void
grad_kernel_y_load(const real * __restrict f, real * __restrict u, const real xfactor, const real yfactor,
const real zfactor)
{
__shared__ real fs[NY_TILE + 2 * NGHOST][NX_TILE + 2 * NGHOST];
const int ghostMul[] = { 0, 0, 0, 1, 1, 1 };
// Local indices
const int xli = threadIdx.x + NGHOST;
const int yli = threadIdx.y + NGHOST;
// Global indices
const int xi = blockIdx.x * blockDim.x + threadIdx.x + NGHOST;
const int yi = blockIdx.y * blockDim.y + threadIdx.y + NGHOST;
// Ghost zone loading indices
int2 gli = make_int2(-1, -1), gi = make_int2(-1, -1);
if (threadIdx.y < 2 * NGHOST) {
int off = -3 + ghostMul[threadIdx.y] * NY_TILE;
gli.x = xli;
gli.y = yli + off;
gi.x = xi;
gi.y = yi + off;
}
else if (threadIdx.y < 4 * NGHOST) {
int adjidx = threadIdx.y - 2 * NGHOST;
int off = -3 + ghostMul[adjidx] * NY_TILE - 2 * NGHOST;
gli.x = yli + off;
gli.y = xli;
gi.x = blockIdx.x * blockDim.x + yli + off;
gi.y = blockIdx.y * blockDim.y + xli;
}
// Z-wise iteration values
real behind3,
behind2 = f[vfidx(xi, yi, 0)],
behind1 = f[vfidx(xi, yi, 1)],
current = f[vfidx(xi, yi, 2)],
forward1 = f[vfidx(xi, yi, 3)],
forward2 = f[vfidx(xi, yi, 4)],
forward3 = f[vfidx(xi, yi, 5)];
for (int zi = NGHOST; zi < NZ + NGHOST; zi++) {
// Iterate through z dimension in registers
behind3 = behind2;
behind2 = behind1;
behind1 = current;
current = forward1;
forward1 = forward2;
forward2 = forward3;
forward3 = f[vfidx(xi, yi, zi + 3)];
// Load x-y tile to shared memory
__syncthreads();
fs[yli][xli] = current;
if (gli.x >= 0)
fs[gli.y][gli.x] = f[vfidx(gi.x, gi.y, zi)];
__syncthreads();
// Compute the gradient
u[vfidx(xi, yi, zi, 2)] = zfactor * fd1D(
behind3, behind2, behind1, forward1, forward2, forward3);
u[vfidx(xi, yi, zi, 1)] = yfactor * fd1D(
fs[yli - 3][xli], fs[yli - 2][xli], fs[yli - 1][xli],
fs[yli + 1][xli], fs[yli + 2][xli], fs[yli + 3][xli]);
u[vfidx(xi, yi, zi, 0)] = xfactor * fd1D(
fs[yli][xli - 3], fs[yli][xli - 2], fs[yli][xli - 1],
fs[yli][xli + 1], fs[yli][xli + 2], fs[yli][xli + 3]);
}
}
void
grad_y_load(vf3dgpu &f, vf3dgpu &u)
{
hipLaunchKernelGGL(( grad_kernel_y_load), dim3(xy_tile.nblocks), dim3(xy_tile.nthreads), 0, 0, f.mem(), u.mem(),
1.0/dx, 1.0/dy, 1.0/dz);
}
| 824666a9e16062270e88b105d7b058ac134f3a2e.cu | // y_load.cu
//
// Ghost loading based on threadIdx.y. Requires NX_TILE = NY_TILE.
//
__global__ void
grad_kernel_y_load(const real * __restrict f, real * __restrict u, const real xfactor, const real yfactor,
const real zfactor)
{
__shared__ real fs[NY_TILE + 2 * NGHOST][NX_TILE + 2 * NGHOST];
const int ghostMul[] = { 0, 0, 0, 1, 1, 1 };
// Local indices
const int xli = threadIdx.x + NGHOST;
const int yli = threadIdx.y + NGHOST;
// Global indices
const int xi = blockIdx.x * blockDim.x + threadIdx.x + NGHOST;
const int yi = blockIdx.y * blockDim.y + threadIdx.y + NGHOST;
// Ghost zone loading indices
int2 gli = make_int2(-1, -1), gi = make_int2(-1, -1);
if (threadIdx.y < 2 * NGHOST) {
int off = -3 + ghostMul[threadIdx.y] * NY_TILE;
gli.x = xli;
gli.y = yli + off;
gi.x = xi;
gi.y = yi + off;
}
else if (threadIdx.y < 4 * NGHOST) {
int adjidx = threadIdx.y - 2 * NGHOST;
int off = -3 + ghostMul[adjidx] * NY_TILE - 2 * NGHOST;
gli.x = yli + off;
gli.y = xli;
gi.x = blockIdx.x * blockDim.x + yli + off;
gi.y = blockIdx.y * blockDim.y + xli;
}
// Z-wise iteration values
real behind3,
behind2 = f[vfidx(xi, yi, 0)],
behind1 = f[vfidx(xi, yi, 1)],
current = f[vfidx(xi, yi, 2)],
forward1 = f[vfidx(xi, yi, 3)],
forward2 = f[vfidx(xi, yi, 4)],
forward3 = f[vfidx(xi, yi, 5)];
for (int zi = NGHOST; zi < NZ + NGHOST; zi++) {
// Iterate through z dimension in registers
behind3 = behind2;
behind2 = behind1;
behind1 = current;
current = forward1;
forward1 = forward2;
forward2 = forward3;
forward3 = f[vfidx(xi, yi, zi + 3)];
// Load x-y tile to shared memory
__syncthreads();
fs[yli][xli] = current;
if (gli.x >= 0)
fs[gli.y][gli.x] = f[vfidx(gi.x, gi.y, zi)];
__syncthreads();
// Compute the gradient
u[vfidx(xi, yi, zi, 2)] = zfactor * fd1D(
behind3, behind2, behind1, forward1, forward2, forward3);
u[vfidx(xi, yi, zi, 1)] = yfactor * fd1D(
fs[yli - 3][xli], fs[yli - 2][xli], fs[yli - 1][xli],
fs[yli + 1][xli], fs[yli + 2][xli], fs[yli + 3][xli]);
u[vfidx(xi, yi, zi, 0)] = xfactor * fd1D(
fs[yli][xli - 3], fs[yli][xli - 2], fs[yli][xli - 1],
fs[yli][xli + 1], fs[yli][xli + 2], fs[yli][xli + 3]);
}
}
void
grad_y_load(vf3dgpu &f, vf3dgpu &u)
{
grad_kernel_y_load<<<xy_tile.nblocks, xy_tile.nthreads>>>(f.mem(), u.mem(),
1.0/dx, 1.0/dy, 1.0/dz);
}
|
684de1301655a6dfa365372f4fb02b0c5236adf5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ------------------------------------------------------------------
// GroupNorm op in Caffe2 for GPU
// Written by Kaiming He
// Improved by Xiaomeng Yang
// see https://arxiv.org/abs/1803.08494
// This is a stand-alone op: Y = gamma * (X - mu) / sig + beta
// ------------------------------------------------------------------
#include "caffe2/operators/group_norm_op.h"
#include <hipcub/hipcub.hpp>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math_utils.h"
namespace caffe2 {
namespace {
template <typename T>
using BlockReduce = hipcub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS>;
template <typename T, StorageOrder kOrder>
__global__ void GroupNormForwardCUDAKernel(
const int size,
const int G,
const int D,
const int HxW,
const T* X,
const T* mu,
const T* rsig,
const T* gamma,
const T* beta,
T* Y) {
const int C = G * D;
CUDA_1D_KERNEL_LOOP(i, size) {
const int i_mu = kOrder == StorageOrder::NCHW
? i / (D * HxW)
: i / (C * HxW) * G + (i / D % G);
const int i_gamma = kOrder == StorageOrder::NCHW ? (i / HxW) % C : i % C;
#if __CUDA_ARCH__ >= 350
Y[i] = __ldg(gamma + i_gamma) * (__ldg(X + i) - __ldg(mu + i_mu)) *
__ldg(rsig + i_mu) +
__ldg(beta + i_gamma);
#else
Y[i] = gamma[i_gamma] * (X[i] - mu[i_mu]) * rsig[i_mu] + beta[i_gamma];
#endif
}
}
template <typename T, StorageOrder kOrder>
__global__ void ComputeInternalGradientsCUDAKernel(
const int N,
const int G,
const int D,
const int HxW,
const T* dY,
const T* X,
const T* gamma,
T* ds,
T* db) {
const int outer_size = N * G;
const int inner_size = D * HxW;
__shared__ typename BlockReduce<T>::TempStorage ds_storage;
__shared__ typename BlockReduce<T>::TempStorage db_storage;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
T ds_val = 0;
T db_val = 0;
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int i_gamma = i % G * D + j / HxW;
const int index = kOrder == StorageOrder::NCHW
? i * inner_size + j
: (i / G * HxW + j % HxW) * G * D + i_gamma;
#if __CUDA_ARCH__ >= 350
ds_val += __ldg(gamma + i_gamma) * __ldg(dY + index) * __ldg(X + index);
db_val += __ldg(gamma + i_gamma) * __ldg(dY + index);
#else
ds_val += gamma[i_gamma] * dY[index] * X[index];
db_val += gamma[i_gamma] * dY[index];
#endif
}
ds_val = BlockReduce<T>(ds_storage).Reduce(ds_val, hipcub::Sum());
db_val = BlockReduce<T>(db_storage).Reduce(db_val, hipcub::Sum());
if (threadIdx.x == 0) {
ds[i] = ds_val;
db[i] = db_val;
}
__syncthreads();
}
}
// Math:
// Y = gamma * (X - mu) * rsig + beta
// let s = gamma * rsig
// let b = beta - mu * rsig
// Y = s * X + b
// let n = D * HxW
// dL/dX = dL/dY * dY/dX = dL/dY * (d(s * X)/dX + db/dX)
// d(s * X)/dX = s + X * ds/dX = s + gamma * X * drsig/dX
// db/dX = -u * drsig/dX - rsig * dmu/dX
// drsig/dX = -rsig^3 * (X - mu) / n
// dmu/dX = 1 / n
template <typename T, StorageOrder kOrder>
__global__ void GroupNormBackwardCUDAKernel(
const int size,
const int G,
const int D,
const int HxW,
const T* dY,
const T* X,
const T* mu,
const T* rsig,
const T* gamma,
const T* ds,
const T* db,
T* dX) {
const int C = G * D;
const T denom = T(1) / static_cast<T>(D * HxW);
CUDA_1D_KERNEL_LOOP(i, size) {
const int i_mu = kOrder == StorageOrder::NCHW
? i / (D * HxW)
: i / (C * HxW) * G + (i / D % G);
const int i_gamma = kOrder == StorageOrder::NCHW ? (i / HxW) % C : i % C;
#if __CUDA_ARCH__ >= 350
const T u = (__ldg(db + i_mu) * __ldg(mu + i_mu) - __ldg(ds + i_mu)) *
(__ldg(X + i) - __ldg(mu + i_mu)) *
math::utils::Cube<T>(__ldg(rsig + i_mu));
const T v = __ldg(db + i_mu) * __ldg(rsig + i_mu);
dX[i] = __ldg(gamma + i_gamma) * __ldg(dY + i) * __ldg(rsig + i_mu) +
(u - v) * denom;
#else
const T u = (db[i_mu] * mu[i_mu] - ds[i_mu]) * (X[i] - mu[i_mu]) *
math::utils::Cube<T>(rsig[i_mu]);
const T v = db[i_mu] * rsig[i_mu];
dX[i] = gamma[i_gamma] * dY[i] * rsig[i_mu] + (u - v) * denom;
#endif
}
}
template <typename T, StorageOrder kOrder>
__global__ void GammaBetaBackwardCUDAKernel(
const int N,
const int G,
const int D,
const int HxW,
const T* dY,
const T* X,
const T* mu,
const T* rsig,
T* dgamma,
T* dbeta) {
const int outer_size = G * D;
const int inner_size = N * HxW;
__shared__ typename BlockReduce<T>::TempStorage dg_storage;
__shared__ typename BlockReduce<T>::TempStorage db_storage;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
T dg_val = 0;
T db_val = 0;
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int n = j / HxW;
const int index = kOrder == StorageOrder::NCHW
? (n * outer_size + i) * HxW + j % HxW
: j * outer_size + i;
const int i_mu = n * G + i / D;
#if __CUDA_ARCH__ >= 350
dg_val += __ldg(dY + index) * (__ldg(X + index) - __ldg(mu + i_mu)) *
__ldg(rsig + i_mu);
db_val += __ldg(dY + index);
#else
dg_val += dY[index] * (X[index] - mu[i_mu]) * rsig[i_mu];
db_val += dY[index];
#endif
}
dg_val = BlockReduce<T>(dg_storage).Reduce(dg_val, hipcub::Sum());
db_val = BlockReduce<T>(db_storage).Reduce(db_val, hipcub::Sum());
if (threadIdx.x == 0) {
dgamma[i] = dg_val;
dbeta[i] = db_val;
}
__syncthreads();
}
}
} // namespace
template <>
void GroupNormOp<float, CUDAContext>::GroupNormForwardNCHW(
const int N,
const int G,
const int D,
const int HxW,
const float* X,
const float* mu,
const float* rsig,
const float* gamma,
const float* beta,
float* Y) {
const int size = N * G * D * HxW;
hipLaunchKernelGGL(( GroupNormForwardCUDAKernel<float, StorageOrder::NCHW>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
size, G, D, HxW, X, mu, rsig, gamma, beta, Y);
}
template <>
void GroupNormOp<float, CUDAContext>::GroupNormForwardNHWC(
const int N,
const int G,
const int D,
const int HxW,
const float* X,
const float* mu,
const float* rsig,
const float* gamma,
const float* beta,
float* Y) {
const int size = N * G * D * HxW;
hipLaunchKernelGGL(( GroupNormForwardCUDAKernel<float, StorageOrder::NHWC>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
size, G, D, HxW, X, mu, rsig, gamma, beta, Y);
}
// Math:
// let: s = gamma * rsig
// let: b = beta - mu * gamma * rsig
// then: Y = s * X + b
template <>
bool GroupNormGradientOp<float, CUDAContext>::RunOnDeviceImpl(
const int N,
const int G,
const int D,
const int HxW,
const float* dY_data,
const float* X_data,
const float* mu_data,
const float* rsig_data,
const float* gamma_data,
float* dX_data,
float* dgamma_data,
float* dbeta_data) {
const int size = N * G * D * HxW;
const int C = G * D;
ds_.Resize(N, G);
db_.Resize(N, G);
float* ds_data = ds_.mutable_data<float>();
float* db_data = db_.mutable_data<float>();
if (order_ == StorageOrder::NCHW) {
// Computes dL/ds and dL/db.
// dL/ds = Sum(dL/dY * gamma * X)
// dL/db = Sum(dL/dY * gamma)
hipLaunchKernelGGL(( ComputeInternalGradientsCUDAKernel<float, StorageOrder::NCHW>)
, dim3(::min(N * G, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N, G, D, HxW, dY_data, X_data, gamma_data, ds_data, db_data);
// Computes dL/dX.
hipLaunchKernelGGL(( GroupNormBackwardCUDAKernel<float, StorageOrder::NCHW>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
size,
G,
D,
HxW,
dY_data,
X_data,
mu_data,
rsig_data,
gamma_data,
ds_data,
db_data,
dX_data);
// Computes dL/dgamma and dL/dbeta.
hipLaunchKernelGGL(( GammaBetaBackwardCUDAKernel<float, StorageOrder::NCHW>)
, dim3(::min(C, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
G,
D,
HxW,
dY_data,
X_data,
mu_data,
rsig_data,
dgamma_data,
dbeta_data);
} else {
// Computes dL/ds and dL/db.
// dL/ds = Sum(dL/dY * gamma * X)
// dL/db = Sum(dL/dY * gamma)
hipLaunchKernelGGL(( ComputeInternalGradientsCUDAKernel<float, StorageOrder::NHWC>)
, dim3(::min(N * G, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N, G, D, HxW, dY_data, X_data, gamma_data, ds_data, db_data);
// Computes dL/dX.
hipLaunchKernelGGL(( GroupNormBackwardCUDAKernel<float, StorageOrder::NHWC>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
size,
G,
D,
HxW,
dY_data,
X_data,
mu_data,
rsig_data,
gamma_data,
ds_data,
db_data,
dX_data);
// Computes dL/dgamma and dL/dbeta.
hipLaunchKernelGGL(( GammaBetaBackwardCUDAKernel<float, StorageOrder::NHWC>)
, dim3(::min(C, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
G,
D,
HxW,
dY_data,
X_data,
mu_data,
rsig_data,
dgamma_data,
dbeta_data);
}
return true;
}
REGISTER_CUDA_OPERATOR(GroupNorm, GroupNormOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
GroupNormGradient,
GroupNormGradientOp<float, CUDAContext>);
} // namespace caffe2
| 684de1301655a6dfa365372f4fb02b0c5236adf5.cu | // ------------------------------------------------------------------
// GroupNorm op in Caffe2 for GPU
// Written by Kaiming He
// Improved by Xiaomeng Yang
// see https://arxiv.org/abs/1803.08494
// This is a stand-alone op: Y = gamma * (X - mu) / sig + beta
// ------------------------------------------------------------------
#include "caffe2/operators/group_norm_op.h"
#include <cub/block/block_reduce.cuh>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math_utils.h"
namespace caffe2 {
namespace {
template <typename T>
using BlockReduce = cub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS>;
template <typename T, StorageOrder kOrder>
__global__ void GroupNormForwardCUDAKernel(
const int size,
const int G,
const int D,
const int HxW,
const T* X,
const T* mu,
const T* rsig,
const T* gamma,
const T* beta,
T* Y) {
const int C = G * D;
CUDA_1D_KERNEL_LOOP(i, size) {
const int i_mu = kOrder == StorageOrder::NCHW
? i / (D * HxW)
: i / (C * HxW) * G + (i / D % G);
const int i_gamma = kOrder == StorageOrder::NCHW ? (i / HxW) % C : i % C;
#if __CUDA_ARCH__ >= 350
Y[i] = __ldg(gamma + i_gamma) * (__ldg(X + i) - __ldg(mu + i_mu)) *
__ldg(rsig + i_mu) +
__ldg(beta + i_gamma);
#else
Y[i] = gamma[i_gamma] * (X[i] - mu[i_mu]) * rsig[i_mu] + beta[i_gamma];
#endif
}
}
template <typename T, StorageOrder kOrder>
__global__ void ComputeInternalGradientsCUDAKernel(
const int N,
const int G,
const int D,
const int HxW,
const T* dY,
const T* X,
const T* gamma,
T* ds,
T* db) {
const int outer_size = N * G;
const int inner_size = D * HxW;
__shared__ typename BlockReduce<T>::TempStorage ds_storage;
__shared__ typename BlockReduce<T>::TempStorage db_storage;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
T ds_val = 0;
T db_val = 0;
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int i_gamma = i % G * D + j / HxW;
const int index = kOrder == StorageOrder::NCHW
? i * inner_size + j
: (i / G * HxW + j % HxW) * G * D + i_gamma;
#if __CUDA_ARCH__ >= 350
ds_val += __ldg(gamma + i_gamma) * __ldg(dY + index) * __ldg(X + index);
db_val += __ldg(gamma + i_gamma) * __ldg(dY + index);
#else
ds_val += gamma[i_gamma] * dY[index] * X[index];
db_val += gamma[i_gamma] * dY[index];
#endif
}
ds_val = BlockReduce<T>(ds_storage).Reduce(ds_val, cub::Sum());
db_val = BlockReduce<T>(db_storage).Reduce(db_val, cub::Sum());
if (threadIdx.x == 0) {
ds[i] = ds_val;
db[i] = db_val;
}
__syncthreads();
}
}
// Math:
// Y = gamma * (X - mu) * rsig + beta
// let s = gamma * rsig
// let b = beta - mu * rsig
// Y = s * X + b
// let n = D * HxW
// dL/dX = dL/dY * dY/dX = dL/dY * (d(s * X)/dX + db/dX)
// d(s * X)/dX = s + X * ds/dX = s + gamma * X * drsig/dX
// db/dX = -u * drsig/dX - rsig * dmu/dX
// drsig/dX = -rsig^3 * (X - mu) / n
// dmu/dX = 1 / n
template <typename T, StorageOrder kOrder>
__global__ void GroupNormBackwardCUDAKernel(
const int size,
const int G,
const int D,
const int HxW,
const T* dY,
const T* X,
const T* mu,
const T* rsig,
const T* gamma,
const T* ds,
const T* db,
T* dX) {
const int C = G * D;
const T denom = T(1) / static_cast<T>(D * HxW);
CUDA_1D_KERNEL_LOOP(i, size) {
const int i_mu = kOrder == StorageOrder::NCHW
? i / (D * HxW)
: i / (C * HxW) * G + (i / D % G);
const int i_gamma = kOrder == StorageOrder::NCHW ? (i / HxW) % C : i % C;
#if __CUDA_ARCH__ >= 350
const T u = (__ldg(db + i_mu) * __ldg(mu + i_mu) - __ldg(ds + i_mu)) *
(__ldg(X + i) - __ldg(mu + i_mu)) *
math::utils::Cube<T>(__ldg(rsig + i_mu));
const T v = __ldg(db + i_mu) * __ldg(rsig + i_mu);
dX[i] = __ldg(gamma + i_gamma) * __ldg(dY + i) * __ldg(rsig + i_mu) +
(u - v) * denom;
#else
const T u = (db[i_mu] * mu[i_mu] - ds[i_mu]) * (X[i] - mu[i_mu]) *
math::utils::Cube<T>(rsig[i_mu]);
const T v = db[i_mu] * rsig[i_mu];
dX[i] = gamma[i_gamma] * dY[i] * rsig[i_mu] + (u - v) * denom;
#endif
}
}
template <typename T, StorageOrder kOrder>
__global__ void GammaBetaBackwardCUDAKernel(
const int N,
const int G,
const int D,
const int HxW,
const T* dY,
const T* X,
const T* mu,
const T* rsig,
T* dgamma,
T* dbeta) {
const int outer_size = G * D;
const int inner_size = N * HxW;
__shared__ typename BlockReduce<T>::TempStorage dg_storage;
__shared__ typename BlockReduce<T>::TempStorage db_storage;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
T dg_val = 0;
T db_val = 0;
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int n = j / HxW;
const int index = kOrder == StorageOrder::NCHW
? (n * outer_size + i) * HxW + j % HxW
: j * outer_size + i;
const int i_mu = n * G + i / D;
#if __CUDA_ARCH__ >= 350
dg_val += __ldg(dY + index) * (__ldg(X + index) - __ldg(mu + i_mu)) *
__ldg(rsig + i_mu);
db_val += __ldg(dY + index);
#else
dg_val += dY[index] * (X[index] - mu[i_mu]) * rsig[i_mu];
db_val += dY[index];
#endif
}
dg_val = BlockReduce<T>(dg_storage).Reduce(dg_val, cub::Sum());
db_val = BlockReduce<T>(db_storage).Reduce(db_val, cub::Sum());
if (threadIdx.x == 0) {
dgamma[i] = dg_val;
dbeta[i] = db_val;
}
__syncthreads();
}
}
} // namespace
template <>
void GroupNormOp<float, CUDAContext>::GroupNormForwardNCHW(
const int N,
const int G,
const int D,
const int HxW,
const float* X,
const float* mu,
const float* rsig,
const float* gamma,
const float* beta,
float* Y) {
const int size = N * G * D * HxW;
GroupNormForwardCUDAKernel<float, StorageOrder::NCHW>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
size, G, D, HxW, X, mu, rsig, gamma, beta, Y);
}
template <>
void GroupNormOp<float, CUDAContext>::GroupNormForwardNHWC(
const int N,
const int G,
const int D,
const int HxW,
const float* X,
const float* mu,
const float* rsig,
const float* gamma,
const float* beta,
float* Y) {
const int size = N * G * D * HxW;
GroupNormForwardCUDAKernel<float, StorageOrder::NHWC>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
size, G, D, HxW, X, mu, rsig, gamma, beta, Y);
}
// Math:
// let: s = gamma * rsig
// let: b = beta - mu * gamma * rsig
// then: Y = s * X + b
template <>
bool GroupNormGradientOp<float, CUDAContext>::RunOnDeviceImpl(
const int N,
const int G,
const int D,
const int HxW,
const float* dY_data,
const float* X_data,
const float* mu_data,
const float* rsig_data,
const float* gamma_data,
float* dX_data,
float* dgamma_data,
float* dbeta_data) {
const int size = N * G * D * HxW;
const int C = G * D;
ds_.Resize(N, G);
db_.Resize(N, G);
float* ds_data = ds_.mutable_data<float>();
float* db_data = db_.mutable_data<float>();
if (order_ == StorageOrder::NCHW) {
// Computes dL/ds and dL/db.
// dL/ds = Sum(dL/dY * gamma * X)
// dL/db = Sum(dL/dY * gamma)
ComputeInternalGradientsCUDAKernel<float, StorageOrder::NCHW>
<<<std::min(N * G, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N, G, D, HxW, dY_data, X_data, gamma_data, ds_data, db_data);
// Computes dL/dX.
GroupNormBackwardCUDAKernel<float, StorageOrder::NCHW>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
size,
G,
D,
HxW,
dY_data,
X_data,
mu_data,
rsig_data,
gamma_data,
ds_data,
db_data,
dX_data);
// Computes dL/dgamma and dL/dbeta.
GammaBetaBackwardCUDAKernel<float, StorageOrder::NCHW>
<<<std::min(C, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
G,
D,
HxW,
dY_data,
X_data,
mu_data,
rsig_data,
dgamma_data,
dbeta_data);
} else {
// Computes dL/ds and dL/db.
// dL/ds = Sum(dL/dY * gamma * X)
// dL/db = Sum(dL/dY * gamma)
ComputeInternalGradientsCUDAKernel<float, StorageOrder::NHWC>
<<<std::min(N * G, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N, G, D, HxW, dY_data, X_data, gamma_data, ds_data, db_data);
// Computes dL/dX.
GroupNormBackwardCUDAKernel<float, StorageOrder::NHWC>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
size,
G,
D,
HxW,
dY_data,
X_data,
mu_data,
rsig_data,
gamma_data,
ds_data,
db_data,
dX_data);
// Computes dL/dgamma and dL/dbeta.
GammaBetaBackwardCUDAKernel<float, StorageOrder::NHWC>
<<<std::min(C, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
G,
D,
HxW,
dY_data,
X_data,
mu_data,
rsig_data,
dgamma_data,
dbeta_data);
}
return true;
}
REGISTER_CUDA_OPERATOR(GroupNorm, GroupNormOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
GroupNormGradient,
GroupNormGradientOp<float, CUDAContext>);
} // namespace caffe2
|
972911af9b23bc749f3e0030b2ab3660d74f3fd5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <cstdlib>
#include <cmath>
#include "kernels.h"
#define cudaCheckError() { \
hipError_t e=hipGetLastError(); \
if(e!=hipSuccess) { \
printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,hipGetErrorString(e)); \
exit(EXIT_FAILURE); \
} \
}
// Check that all values of array (which contains `length` float elements) are
// close to `expectedValue`
bool checkHostArray(float *array, float expectedValue, size_t length){
float maxError = 0.0f;
for (int i = 0; i < length; i++)
maxError = fmax(maxError, fabs(array[i]-expectedValue));
std::cout << "Max error: " << maxError << std::endl;
return (maxError < 0.0001f);
}
/*
Getting GPU Data.
There is 1 device supporting CUDA
Device 0 name: NVIDIA GeForce RTX 3060 Ti
Computational Capabilities: 8.6
Maximum global memory size: 7979
Maximum constant memory size: 64
Maximum shared memory size per block: 48
Maximum block dimensions: 1024 x 1024 x 64
Maximum grid dimensions: 2147483647 x 65535 x 65535
Warp size: 32
End of GPU data gathering.
*/
int main(void)
{
int N = 1<<20;; //< Number of elements in arrays (1M, you may want to lower this to begin)
float *d_x; //< Pointer to the 1D buffer we will manipulate
printf("%d\n", N);
// Initialize grid and block sizes for later kernel launches.
// Use as many threads as possible.
//@@ Choose some values here, stick to 1D
int threadsPerBlock = 256; // FIXME
int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock; // FIXME
// Array allocation on device
//@@ Use hipMalloc to perform the allocation.
hipMalloc(&d_x, N * sizeof(float)); // FIXME
cudaCheckError();
// Initialize the x and y arrays on the device
const float firstValue = 1.f;
//@@ Call the fill1D kernel to fill d_x with `firstValue`, see kernels.h for the API
hipLaunchKernelGGL(( fill1D), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_x, firstValue, N); // FIXME
// Wait for GPU to finish and check for errors
hipDeviceSynchronize();
cudaCheckError();
// Check for errors on device
//@@ Call the check1D kernel to control device memory content, see kernels.h for API
hipLaunchKernelGGL(( check1D), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_x, firstValue, N); // FIXME
// Wait for GPU to finish and check for errors
//@@ call CUDA device synchronisation function
hipDeviceSynchronize(); //@@ ???
cudaCheckError();
// Copy back the buffer to the host for inspection:
//@@ Allocate a buffer on the host
float *h_x = (float*) std::malloc(N * sizeof(float)); //FIXME
//@@ Copy the buffer content from device to host
//@@ use hipMemcpy
hipMemcpy(h_x, d_x, N * sizeof(float), hipMemcpyDeviceToHost); // FIXME
cudaCheckError();
// Check for errors (all values should be close to `firstValue`)
std::cout << "First control..." << std::endl;
bool noerror = checkHostArray(h_x, firstValue, N);
// Now increment the array values by some other value
const float otherValue = 10.f;
//@@ Call the inc1D kernel to add `otherValue` to all values of our buffer, see kernels.h for API
hipLaunchKernelGGL(( inc1D), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_x, otherValue, N);
// Wait for GPU to finish
//@@ call CUDA device synchronisation function
hipDeviceSynchronize(); //@@ ???
cudaCheckError();
// Check for errors on device
//@@ Call the check1D kernel to control device memory content, see kernels.h for API
hipLaunchKernelGGL(( check1D), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_x, firstValue + otherValue, N); // FIXME
// Wait for GPU to finish and check for errors
//@@ call CUDA device synchronisation function
//@@ ???
hipDeviceSynchronize();
cudaCheckError();
// Copy back the buffer to the host for inspection:
//@@ Copy the buffer content from device to host (reuse previous buffer)
hipMemcpy(h_x, d_x, N * sizeof(float), hipMemcpyDeviceToHost); // FIXME
cudaCheckError();
// Check for errors (all values should be close to `firstValue+otherValue`)
std::cout << "Second control..." << std::endl;
noerror &= checkHostArray(h_x, firstValue+otherValue, N);
// Free memory
//@@ free d_h using CUDA primitives
hipFree(d_x);
cudaCheckError();
std::free(h_x);
if (noerror) {
printf("Test completed successfully.\n");
return 0;
} else {
printf("WARNING there were some errors.\n");
return 1;
}
}
| 972911af9b23bc749f3e0030b2ab3660d74f3fd5.cu | #include <iostream>
#include <cstdlib>
#include <cmath>
#include "kernels.h"
#define cudaCheckError() { \
cudaError_t e=cudaGetLastError(); \
if(e!=cudaSuccess) { \
printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(e)); \
exit(EXIT_FAILURE); \
} \
}
// Check that all values of array (which contains `length` float elements) are
// close to `expectedValue`
bool checkHostArray(float *array, float expectedValue, size_t length){
float maxError = 0.0f;
for (int i = 0; i < length; i++)
maxError = fmax(maxError, fabs(array[i]-expectedValue));
std::cout << "Max error: " << maxError << std::endl;
return (maxError < 0.0001f);
}
/*
Getting GPU Data.
There is 1 device supporting CUDA
Device 0 name: NVIDIA GeForce RTX 3060 Ti
Computational Capabilities: 8.6
Maximum global memory size: 7979
Maximum constant memory size: 64
Maximum shared memory size per block: 48
Maximum block dimensions: 1024 x 1024 x 64
Maximum grid dimensions: 2147483647 x 65535 x 65535
Warp size: 32
End of GPU data gathering.
*/
int main(void)
{
int N = 1<<20;; //< Number of elements in arrays (1M, you may want to lower this to begin)
float *d_x; //< Pointer to the 1D buffer we will manipulate
printf("%d\n", N);
// Initialize grid and block sizes for later kernel launches.
// Use as many threads as possible.
//@@ Choose some values here, stick to 1D
int threadsPerBlock = 256; // FIXME
int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock; // FIXME
// Array allocation on device
//@@ Use cudaMalloc to perform the allocation.
cudaMalloc(&d_x, N * sizeof(float)); // FIXME
cudaCheckError();
// Initialize the x and y arrays on the device
const float firstValue = 1.f;
//@@ Call the fill1D kernel to fill d_x with `firstValue`, see kernels.h for the API
fill1D<<<blocksPerGrid, threadsPerBlock>>>(d_x, firstValue, N); // FIXME
// Wait for GPU to finish and check for errors
cudaDeviceSynchronize();
cudaCheckError();
// Check for errors on device
//@@ Call the check1D kernel to control device memory content, see kernels.h for API
check1D<<<blocksPerGrid, threadsPerBlock>>>(d_x, firstValue, N); // FIXME
// Wait for GPU to finish and check for errors
//@@ call CUDA device synchronisation function
cudaDeviceSynchronize(); //@@ ???
cudaCheckError();
// Copy back the buffer to the host for inspection:
//@@ Allocate a buffer on the host
float *h_x = (float*) std::malloc(N * sizeof(float)); //FIXME
//@@ Copy the buffer content from device to host
//@@ use cudaMemcpy
cudaMemcpy(h_x, d_x, N * sizeof(float), cudaMemcpyDeviceToHost); // FIXME
cudaCheckError();
// Check for errors (all values should be close to `firstValue`)
std::cout << "First control..." << std::endl;
bool noerror = checkHostArray(h_x, firstValue, N);
// Now increment the array values by some other value
const float otherValue = 10.f;
//@@ Call the inc1D kernel to add `otherValue` to all values of our buffer, see kernels.h for API
inc1D<<<blocksPerGrid, threadsPerBlock>>>(d_x, otherValue, N);
// Wait for GPU to finish
//@@ call CUDA device synchronisation function
cudaDeviceSynchronize(); //@@ ???
cudaCheckError();
// Check for errors on device
//@@ Call the check1D kernel to control device memory content, see kernels.h for API
check1D<<<blocksPerGrid, threadsPerBlock>>>(d_x, firstValue + otherValue, N); // FIXME
// Wait for GPU to finish and check for errors
//@@ call CUDA device synchronisation function
//@@ ???
cudaDeviceSynchronize();
cudaCheckError();
// Copy back the buffer to the host for inspection:
//@@ Copy the buffer content from device to host (reuse previous buffer)
cudaMemcpy(h_x, d_x, N * sizeof(float), cudaMemcpyDeviceToHost); // FIXME
cudaCheckError();
// Check for errors (all values should be close to `firstValue+otherValue`)
std::cout << "Second control..." << std::endl;
noerror &= checkHostArray(h_x, firstValue+otherValue, N);
// Free memory
//@@ free d_h using CUDA primitives
cudaFree(d_x);
cudaCheckError();
std::free(h_x);
if (noerror) {
printf("Test completed successfully.\n");
return 0;
} else {
printf("WARNING there were some errors.\n");
return 1;
}
}
|
batch_norm_add_relu.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THHNumerics.cuh>
#include "THH/THH.h"
#include "batch_norm_add_relu.h"
#include <hip/hip_runtime.h>
//FIXME move the common stuff to common h file
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, hipGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
static size_t round_up_to_multiple(size_t x, int multiple) {
return ((x + multiple - 1) / multiple) * multiple;
}
// TODO: Stop manually allocating CUDA memory; allocate an ATen byte
// tensor instead.
struct Workspace {
Workspace(size_t size) : size(size), data(NULL) {
data = THCudaMalloc(at::globalContext().lazyInitCUDA(), size);
}
Workspace(const Workspace&) = delete;
Workspace(Workspace&&) = default;
Workspace& operator=(Workspace&&) = default;
~Workspace() {
if (data) {
THCudaFree(at::globalContext().lazyInitCUDA(), data);
}
}
size_t size;
void* data;
};
// Return {y}
at::Tensor nhwc_bn_addrelu_fwd_train(
const at::Tensor& x,
const at::Tensor& z,
const at::Tensor& scale,
const at::Tensor& bias,
const at::Tensor& running_mean,
const at::Tensor& running_inv_var,
const at::Tensor& minibatch_mean,
const at::Tensor& minibatch_inv_var,
const at::Tensor& bitmask,
const float momentum,
const float epsilon,
void * my_data,
void * pair_data,
void * pair_data2,
const int bn_group,
const at::Tensor& magic_tensor,
const int max_cta_per_sm,
const int cta_launch_margin) {
const int N = x.size(0);
const int H = x.size(1);
const int W = x.size(2);
const int C = x.size(3);
// generating new magic number and use that for sync
int* magic = magic_tensor.data<int>();
*magic = (*magic + 1) & 0xff;
// Allocate output tensor
at::Tensor y = at::empty({N, H, W, C}, x.options());
// Create wrapper
NhwcBatchNormAddRelu *bn = new NhwcBatchNormAddRelu();
bn->setInputDescriptor(CUDNN_TENSOR_NHWC, CUDNN_DATA_HALF, N, C, H, W, bn_group);
bn->setOutputDescriptor(CUDNN_TENSOR_NHWC, CUDNN_DATA_HALF, N, C, H, W);
bn->setConstants(momentum, epsilon);
// set pointers within the wrapper
bn->setInputOutputPointers(x.data<at::Half>(),
nullptr,
y.data<at::Half>(),
nullptr,
z.data<at::Half>(),
nullptr);
bn->setWeightPointers({scale.data<float>(), bias.data<float>()}, {nullptr, nullptr});
bn->setParameterPointers({running_mean.data<float>(), running_inv_var.data<float>()});
// deal with workspace(s)
auto workspace_bytes = bn->numWorkspaceBytes();
// We'll create explicit tensors for the first 2 workspace ptrs, then allocate & offset
// an allocated workspace for the others
size_t total_workspace_bytes = 0;
std::vector<size_t> workspace_offsets;
for (auto index = 4; index < workspace_bytes.size(); ++index) {
total_workspace_bytes = round_up_to_multiple(total_workspace_bytes, 512);
workspace_offsets.push_back(total_workspace_bytes);
auto alloc_bytes = workspace_bytes[index];
total_workspace_bytes += alloc_bytes;
}
// Allocate the workspace
Workspace ws(total_workspace_bytes);
std::vector<void *> workspace;
workspace.push_back(minibatch_mean.data<float>());
workspace.push_back(minibatch_inv_var.data<float>());
workspace.push_back(bitmask.data<int32_t>());
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream();
const int retired_cta_bytes = workspace_bytes[3];
void* retired_ctas = THCudaMalloc(at::globalContext().lazyInitCUDA(), retired_cta_bytes);
hipMemsetAsync(retired_ctas, 0, retired_cta_bytes, stream); //FIXME: is this legit?
workspace.push_back(retired_ctas);
for (auto index = 4; index < workspace_bytes.size(); ++index) {
void *ptr = reinterpret_cast<uint8_t*>(ws.data) + workspace_offsets[index-4];
workspace.push_back(ptr);
}
bn->setWorkspacePointers(workspace, workspace_bytes);
int device_id;
hipGetDevice(&device_id);
// Don't fuse in ReLU for now at least
bn->fwd(stream, device_id, my_data, pair_data, pair_data2, bn_group, *magic, max_cta_per_sm, cta_launch_margin);
THCudaFree(at::globalContext().lazyInitCUDA(), retired_ctas);
return y;
}
at::Tensor nhwc_bn_addrelu_fwd_eval(
const at::Tensor& x,
const at::Tensor& z,
const at::Tensor& scale,
const at::Tensor& bias,
const at::Tensor& running_mean,
const at::Tensor& running_inv_var,
const int bn_group,
const float momentum,
const float epsilon) {
const int N = x.size(0);
const int H = x.size(1);
const int W = x.size(2);
const int C = x.size(3);
// Allocate output tensor
at::Tensor y = at::empty({N, H, W, C}, x.options());
// Create wrapper
NhwcBatchNormAddRelu *bn = new NhwcBatchNormAddRelu();
bn->setInputDescriptor(CUDNN_TENSOR_NHWC, CUDNN_DATA_HALF, N, C, H, W, bn_group);
bn->setOutputDescriptor(CUDNN_TENSOR_NHWC, CUDNN_DATA_HALF, N, C, H, W);
bn->setConstants(momentum, epsilon);
// set pointers within the wrapper
bn->setInputOutputPointers(x.data<at::Half>(),
nullptr,
y.data<at::Half>(),
nullptr,
z.data<at::Half>(),
nullptr);
bn->setWeightPointers({scale.data<float>(), bias.data<float>()}, {nullptr, nullptr});
bn->setParameterPointers({running_mean.data<float>(), running_inv_var.data<float>()});
// deal with workspace(s)
auto workspace_bytes = bn->numWorkspaceBytes();
// We'll create explicit tensors for the first 2 workspace ptrs, then allocate & offset
// an allocated workspace for the others
size_t total_workspace_bytes = 0;
std::vector<size_t> workspace_offsets;
for (auto index = 4; index < workspace_bytes.size(); ++index) {
total_workspace_bytes = round_up_to_multiple(total_workspace_bytes, 512);
workspace_offsets.push_back(total_workspace_bytes);
auto alloc_bytes = workspace_bytes[index];
total_workspace_bytes += alloc_bytes;
}
// Allocate the workspace
Workspace ws(total_workspace_bytes);
std::vector<void *> workspace;
workspace.push_back(nullptr);
workspace.push_back(nullptr);
workspace.push_back(nullptr);
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream();
const int retired_cta_bytes = workspace_bytes[3];
void* retired_ctas = THCudaMalloc(at::globalContext().lazyInitCUDA(), retired_cta_bytes);
hipMemsetAsync(retired_ctas, 0, retired_cta_bytes, stream); //FIXME: is this legit?
workspace.push_back(retired_ctas);
for (auto index = 4; index < workspace_bytes.size(); ++index) {
void *ptr = reinterpret_cast<uint8_t*>(ws.data) + workspace_offsets[index-4];
workspace.push_back(ptr);
}
bn->setWorkspacePointers(workspace, workspace_bytes);
// Don't fuse in ReLU for now at least
bn->fwdInference(stream);
THCudaFree(at::globalContext().lazyInitCUDA(), retired_ctas);
return y;
}
std::vector<at::Tensor> nhwc_bn_addrelu_bwd(
const at::Tensor& x,
const at::Tensor& dy,
const at::Tensor& scale,
const at::Tensor& bias,
const at::Tensor& running_mean,
const at::Tensor& running_inv_var,
const at::Tensor& minibatch_mean,
const at::Tensor& minibatch_inv_var,
const at::Tensor& bitmask,
const float momentum,
const float epsilon,
void * my_data,
void * pair_data,
void * pair_data2,
const int bn_group,
const at::Tensor& magic_tensor,
const int max_cta_per_sm,
const int cta_launch_margin) {
// shape
const int N = x.size(0);
const int H = x.size(1);
const int W = x.size(2);
const int C = x.size(3);
// generating new magic number and use that for sync
int* magic = magic_tensor.data<int>();
*magic = (*magic + 1) & 0xff;
// outputs
at::Tensor x_grad, z_grad, scale_grad, bias_grad;
// Allocate outputs
x_grad = at::empty_like(x);
z_grad = at::empty_like(x);
scale_grad = at::empty_like(scale);
bias_grad = at::empty_like(bias);
// Create wrapper
NhwcBatchNormAddRelu *bn = new NhwcBatchNormAddRelu();
bn->setInputDescriptor(CUDNN_TENSOR_NHWC, CUDNN_DATA_HALF, N, C, H, W, bn_group);
bn->setOutputDescriptor(CUDNN_TENSOR_NHWC, CUDNN_DATA_HALF, N, C, H, W);
bn->setConstants(momentum, epsilon);
// set pointers within the wrapper
bn->setInputOutputPointers(x.data<at::Half>(),
x_grad.data<at::Half>(),
nullptr,
dy.data<at::Half>(),
nullptr,
z_grad.data<at::Half>());
bn->setWeightPointers({scale.data<float>(), bias.data<float>()}, {scale_grad.data<float>(), bias_grad.data<float>()});
bn->setParameterPointers({running_mean.data<float>(), running_inv_var.data<float>()});
// deal with workspace(s)
auto workspace_bytes = bn->numWorkspaceBytes();
// We'll create explicit tensors for the first 2 workspace ptrs, then allocate & offset
// an allocated workspace for the others
size_t total_workspace_bytes = 0;
std::vector<size_t> workspace_offsets;
for (auto index = 4; index < workspace_bytes.size(); ++index) {
total_workspace_bytes = round_up_to_multiple(total_workspace_bytes, 512);
workspace_offsets.push_back(total_workspace_bytes);
auto alloc_bytes = workspace_bytes[index];
total_workspace_bytes += alloc_bytes;
}
// Allocate the workspace
Workspace ws(total_workspace_bytes);
std::vector<void *> workspace;
workspace.push_back(minibatch_mean.data<float>());
workspace.push_back(minibatch_inv_var.data<float>());
workspace.push_back(bitmask.data<int32_t>());
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream();
const int retired_cta_bytes = workspace_bytes[3];
void* retired_ctas = THCudaMalloc(at::globalContext().lazyInitCUDA(), retired_cta_bytes);
hipMemsetAsync(retired_ctas, 0, retired_cta_bytes, stream); //FIXME: is this legit?
workspace.push_back(retired_ctas);
for (auto index = 4; index < workspace_bytes.size(); ++index) {
void *ptr = reinterpret_cast<uint8_t*>(ws.data) + workspace_offsets[index-4];
workspace.push_back(ptr);
}
bn->setWorkspacePointers(workspace, workspace_bytes);
int device_id;
hipGetDevice(&device_id);
bn->dgrad(stream, device_id, my_data, pair_data, pair_data2, bn_group, *magic, max_cta_per_sm, cta_launch_margin);
THCudaFree(at::globalContext().lazyInitCUDA(), retired_ctas);
return std::vector<at::Tensor>{x_grad, z_grad, scale_grad, bias_grad};
}
| batch_norm_add_relu.cu | #include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THCNumerics.cuh>
#include "THC/THC.h"
#include "batch_norm_add_relu.h"
#include <cuda.h>
//FIXME move the common stuff to common h file
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
static size_t round_up_to_multiple(size_t x, int multiple) {
return ((x + multiple - 1) / multiple) * multiple;
}
// TODO: Stop manually allocating CUDA memory; allocate an ATen byte
// tensor instead.
struct Workspace {
Workspace(size_t size) : size(size), data(NULL) {
data = THCudaMalloc(at::globalContext().lazyInitCUDA(), size);
}
Workspace(const Workspace&) = delete;
Workspace(Workspace&&) = default;
Workspace& operator=(Workspace&&) = default;
~Workspace() {
if (data) {
THCudaFree(at::globalContext().lazyInitCUDA(), data);
}
}
size_t size;
void* data;
};
// Return {y}
at::Tensor nhwc_bn_addrelu_fwd_train(
const at::Tensor& x,
const at::Tensor& z,
const at::Tensor& scale,
const at::Tensor& bias,
const at::Tensor& running_mean,
const at::Tensor& running_inv_var,
const at::Tensor& minibatch_mean,
const at::Tensor& minibatch_inv_var,
const at::Tensor& bitmask,
const float momentum,
const float epsilon,
void * my_data,
void * pair_data,
void * pair_data2,
const int bn_group,
const at::Tensor& magic_tensor,
const int max_cta_per_sm,
const int cta_launch_margin) {
const int N = x.size(0);
const int H = x.size(1);
const int W = x.size(2);
const int C = x.size(3);
// generating new magic number and use that for sync
int* magic = magic_tensor.data<int>();
*magic = (*magic + 1) & 0xff;
// Allocate output tensor
at::Tensor y = at::empty({N, H, W, C}, x.options());
// Create wrapper
NhwcBatchNormAddRelu *bn = new NhwcBatchNormAddRelu();
bn->setInputDescriptor(CUDNN_TENSOR_NHWC, CUDNN_DATA_HALF, N, C, H, W, bn_group);
bn->setOutputDescriptor(CUDNN_TENSOR_NHWC, CUDNN_DATA_HALF, N, C, H, W);
bn->setConstants(momentum, epsilon);
// set pointers within the wrapper
bn->setInputOutputPointers(x.data<at::Half>(),
nullptr,
y.data<at::Half>(),
nullptr,
z.data<at::Half>(),
nullptr);
bn->setWeightPointers({scale.data<float>(), bias.data<float>()}, {nullptr, nullptr});
bn->setParameterPointers({running_mean.data<float>(), running_inv_var.data<float>()});
// deal with workspace(s)
auto workspace_bytes = bn->numWorkspaceBytes();
// We'll create explicit tensors for the first 2 workspace ptrs, then allocate & offset
// an allocated workspace for the others
size_t total_workspace_bytes = 0;
std::vector<size_t> workspace_offsets;
for (auto index = 4; index < workspace_bytes.size(); ++index) {
total_workspace_bytes = round_up_to_multiple(total_workspace_bytes, 512);
workspace_offsets.push_back(total_workspace_bytes);
auto alloc_bytes = workspace_bytes[index];
total_workspace_bytes += alloc_bytes;
}
// Allocate the workspace
Workspace ws(total_workspace_bytes);
std::vector<void *> workspace;
workspace.push_back(minibatch_mean.data<float>());
workspace.push_back(minibatch_inv_var.data<float>());
workspace.push_back(bitmask.data<int32_t>());
auto stream = at::cuda::getCurrentCUDAStream().stream();
const int retired_cta_bytes = workspace_bytes[3];
void* retired_ctas = THCudaMalloc(at::globalContext().lazyInitCUDA(), retired_cta_bytes);
cudaMemsetAsync(retired_ctas, 0, retired_cta_bytes, stream); //FIXME: is this legit?
workspace.push_back(retired_ctas);
for (auto index = 4; index < workspace_bytes.size(); ++index) {
void *ptr = reinterpret_cast<uint8_t*>(ws.data) + workspace_offsets[index-4];
workspace.push_back(ptr);
}
bn->setWorkspacePointers(workspace, workspace_bytes);
int device_id;
cudaGetDevice(&device_id);
// Don't fuse in ReLU for now at least
bn->fwd(stream, device_id, my_data, pair_data, pair_data2, bn_group, *magic, max_cta_per_sm, cta_launch_margin);
THCudaFree(at::globalContext().lazyInitCUDA(), retired_ctas);
return y;
}
at::Tensor nhwc_bn_addrelu_fwd_eval(
const at::Tensor& x,
const at::Tensor& z,
const at::Tensor& scale,
const at::Tensor& bias,
const at::Tensor& running_mean,
const at::Tensor& running_inv_var,
const int bn_group,
const float momentum,
const float epsilon) {
const int N = x.size(0);
const int H = x.size(1);
const int W = x.size(2);
const int C = x.size(3);
// Allocate output tensor
at::Tensor y = at::empty({N, H, W, C}, x.options());
// Create wrapper
NhwcBatchNormAddRelu *bn = new NhwcBatchNormAddRelu();
bn->setInputDescriptor(CUDNN_TENSOR_NHWC, CUDNN_DATA_HALF, N, C, H, W, bn_group);
bn->setOutputDescriptor(CUDNN_TENSOR_NHWC, CUDNN_DATA_HALF, N, C, H, W);
bn->setConstants(momentum, epsilon);
// set pointers within the wrapper
bn->setInputOutputPointers(x.data<at::Half>(),
nullptr,
y.data<at::Half>(),
nullptr,
z.data<at::Half>(),
nullptr);
bn->setWeightPointers({scale.data<float>(), bias.data<float>()}, {nullptr, nullptr});
bn->setParameterPointers({running_mean.data<float>(), running_inv_var.data<float>()});
// deal with workspace(s)
auto workspace_bytes = bn->numWorkspaceBytes();
// We'll create explicit tensors for the first 2 workspace ptrs, then allocate & offset
// an allocated workspace for the others
size_t total_workspace_bytes = 0;
std::vector<size_t> workspace_offsets;
for (auto index = 4; index < workspace_bytes.size(); ++index) {
total_workspace_bytes = round_up_to_multiple(total_workspace_bytes, 512);
workspace_offsets.push_back(total_workspace_bytes);
auto alloc_bytes = workspace_bytes[index];
total_workspace_bytes += alloc_bytes;
}
// Allocate the workspace
Workspace ws(total_workspace_bytes);
std::vector<void *> workspace;
workspace.push_back(nullptr);
workspace.push_back(nullptr);
workspace.push_back(nullptr);
auto stream = at::cuda::getCurrentCUDAStream().stream();
const int retired_cta_bytes = workspace_bytes[3];
void* retired_ctas = THCudaMalloc(at::globalContext().lazyInitCUDA(), retired_cta_bytes);
cudaMemsetAsync(retired_ctas, 0, retired_cta_bytes, stream); //FIXME: is this legit?
workspace.push_back(retired_ctas);
for (auto index = 4; index < workspace_bytes.size(); ++index) {
void *ptr = reinterpret_cast<uint8_t*>(ws.data) + workspace_offsets[index-4];
workspace.push_back(ptr);
}
bn->setWorkspacePointers(workspace, workspace_bytes);
// Don't fuse in ReLU for now at least
bn->fwdInference(stream);
THCudaFree(at::globalContext().lazyInitCUDA(), retired_ctas);
return y;
}
std::vector<at::Tensor> nhwc_bn_addrelu_bwd(
const at::Tensor& x,
const at::Tensor& dy,
const at::Tensor& scale,
const at::Tensor& bias,
const at::Tensor& running_mean,
const at::Tensor& running_inv_var,
const at::Tensor& minibatch_mean,
const at::Tensor& minibatch_inv_var,
const at::Tensor& bitmask,
const float momentum,
const float epsilon,
void * my_data,
void * pair_data,
void * pair_data2,
const int bn_group,
const at::Tensor& magic_tensor,
const int max_cta_per_sm,
const int cta_launch_margin) {
// shape
const int N = x.size(0);
const int H = x.size(1);
const int W = x.size(2);
const int C = x.size(3);
// generating new magic number and use that for sync
int* magic = magic_tensor.data<int>();
*magic = (*magic + 1) & 0xff;
// outputs
at::Tensor x_grad, z_grad, scale_grad, bias_grad;
// Allocate outputs
x_grad = at::empty_like(x);
z_grad = at::empty_like(x);
scale_grad = at::empty_like(scale);
bias_grad = at::empty_like(bias);
// Create wrapper
NhwcBatchNormAddRelu *bn = new NhwcBatchNormAddRelu();
bn->setInputDescriptor(CUDNN_TENSOR_NHWC, CUDNN_DATA_HALF, N, C, H, W, bn_group);
bn->setOutputDescriptor(CUDNN_TENSOR_NHWC, CUDNN_DATA_HALF, N, C, H, W);
bn->setConstants(momentum, epsilon);
// set pointers within the wrapper
bn->setInputOutputPointers(x.data<at::Half>(),
x_grad.data<at::Half>(),
nullptr,
dy.data<at::Half>(),
nullptr,
z_grad.data<at::Half>());
bn->setWeightPointers({scale.data<float>(), bias.data<float>()}, {scale_grad.data<float>(), bias_grad.data<float>()});
bn->setParameterPointers({running_mean.data<float>(), running_inv_var.data<float>()});
// deal with workspace(s)
auto workspace_bytes = bn->numWorkspaceBytes();
// We'll create explicit tensors for the first 2 workspace ptrs, then allocate & offset
// an allocated workspace for the others
size_t total_workspace_bytes = 0;
std::vector<size_t> workspace_offsets;
for (auto index = 4; index < workspace_bytes.size(); ++index) {
total_workspace_bytes = round_up_to_multiple(total_workspace_bytes, 512);
workspace_offsets.push_back(total_workspace_bytes);
auto alloc_bytes = workspace_bytes[index];
total_workspace_bytes += alloc_bytes;
}
// Allocate the workspace
Workspace ws(total_workspace_bytes);
std::vector<void *> workspace;
workspace.push_back(minibatch_mean.data<float>());
workspace.push_back(minibatch_inv_var.data<float>());
workspace.push_back(bitmask.data<int32_t>());
auto stream = at::cuda::getCurrentCUDAStream().stream();
const int retired_cta_bytes = workspace_bytes[3];
void* retired_ctas = THCudaMalloc(at::globalContext().lazyInitCUDA(), retired_cta_bytes);
cudaMemsetAsync(retired_ctas, 0, retired_cta_bytes, stream); //FIXME: is this legit?
workspace.push_back(retired_ctas);
for (auto index = 4; index < workspace_bytes.size(); ++index) {
void *ptr = reinterpret_cast<uint8_t*>(ws.data) + workspace_offsets[index-4];
workspace.push_back(ptr);
}
bn->setWorkspacePointers(workspace, workspace_bytes);
int device_id;
cudaGetDevice(&device_id);
bn->dgrad(stream, device_id, my_data, pair_data, pair_data2, bn_group, *magic, max_cta_per_sm, cta_launch_margin);
THCudaFree(at::globalContext().lazyInitCUDA(), retired_ctas);
return std::vector<at::Tensor>{x_grad, z_grad, scale_grad, bias_grad};
}
|
ff3eaf4202427dc6e7260315527686e532be8aba.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cm.h"
#include "zone_map.h"
using namespace mgpu;
vector<void*> alloced_mem;
template<typename T>
struct distinct : public binary_function<T,T,T>
{
__host__ __device__ T operator()(const T &lhs, const T &rhs) const {
return lhs != rhs;
}
};
struct gpu_getyear
{
const int_type *source;
int_type *dest;
gpu_getyear(const int_type *_source, int_type *_dest):
source(_source), dest(_dest) {}
template <typename IndexType>
__host__ __device__
void operator()(const IndexType & i) {
uint64 sec;
uint quadricentennials, centennials, quadrennials, annuals/*1-ennial?*/;
uint year, leap;
uint yday, hour, min;
uint month, mday, wday;
const uint daysSinceJan1st[2][13]=
{
{0,31,59,90,120,151,181,212,243,273,304,334,365}, // 365 days, non-leap
{0,31,60,91,121,152,182,213,244,274,305,335,366} // 366 days, leap
};
uint64 SecondsSinceEpoch = source[i]/1000;
sec = SecondsSinceEpoch + 11644473600;
wday = (uint)((sec / 86400 + 1) % 7); // day of week
quadricentennials = (uint)(sec / 12622780800ULL); // 400*365.2425*24*3600
sec %= 12622780800ULL;
centennials = (uint)(sec / 3155673600ULL); // 100*(365+24/100)*24*3600
if (centennials > 3)
{
centennials = 3;
}
sec -= centennials * 3155673600ULL;
quadrennials = (uint)(sec / 126230400); // 4*(365+1/4)*24*3600
if (quadrennials > 24)
{
quadrennials = 24;
}
sec -= quadrennials * 126230400ULL;
annuals = (uint)(sec / 31536000); // 365*24*3600
if (annuals > 3)
{
annuals = 3;
}
sec -= annuals * 31536000ULL;
year = 1601 + quadricentennials * 400 + centennials * 100 + quadrennials * 4 + annuals;
leap = !(year % 4) && (year % 100 || !(year % 400));
// Calculate the day of the year and the time
yday = sec / 86400;
sec %= 86400;
hour = sec / 3600;
sec %= 3600;
min = sec / 60;
sec %= 60;
// Calculate the month
for (mday = month = 1; month < 13; month++)
{
if (yday < daysSinceJan1st[leap][month])
{
mday += yday - daysSinceJan1st[leap][month - 1];
break;
}
}
dest[i] = year;
}
};
struct gpu_getmonth
{
const int_type *source;
int_type *dest;
gpu_getmonth(const int_type *_source, int_type *_dest):
source(_source), dest(_dest) {}
template <typename IndexType>
__host__ __device__
void operator()(const IndexType & i) {
uint64 sec;
uint quadricentennials, centennials, quadrennials, annuals/*1-ennial?*/;
uint year, leap;
uint yday, hour, min;
uint month, mday, wday;
const uint daysSinceJan1st[2][13]=
{
{0,31,59,90,120,151,181,212,243,273,304,334,365}, // 365 days, non-leap
{0,31,60,91,121,152,182,213,244,274,305,335,366} // 366 days, leap
};
uint64 SecondsSinceEpoch = source[i]/1000;
sec = SecondsSinceEpoch + 11644473600;
wday = (uint)((sec / 86400 + 1) % 7); // day of week
quadricentennials = (uint)(sec / 12622780800ULL); // 400*365.2425*24*3600
sec %= 12622780800ULL;
centennials = (uint)(sec / 3155673600ULL); // 100*(365+24/100)*24*3600
if (centennials > 3)
{
centennials = 3;
}
sec -= centennials * 3155673600ULL;
quadrennials = (uint)(sec / 126230400); // 4*(365+1/4)*24*3600
if (quadrennials > 24)
{
quadrennials = 24;
}
sec -= quadrennials * 126230400ULL;
annuals = (uint)(sec / 31536000); // 365*24*3600
if (annuals > 3)
{
annuals = 3;
}
sec -= annuals * 31536000ULL;
year = 1601 + quadricentennials * 400 + centennials * 100 + quadrennials * 4 + annuals;
leap = !(year % 4) && (year % 100 || !(year % 400));
// Calculate the day of the year and the time
yday = sec / 86400;
sec %= 86400;
hour = sec / 3600;
sec %= 3600;
min = sec / 60;
sec %= 60;
// Calculate the month
for (mday = month = 1; month < 13; month++)
{
if (yday < daysSinceJan1st[leap][month])
{
mday += yday - daysSinceJan1st[leap][month - 1];
break;
}
}
dest[i] = year*100+month;
}
};
struct gpu_getday
{
const int_type *source;
int_type *dest;
gpu_getday(const int_type *_source, int_type *_dest):
source(_source), dest(_dest) {}
template <typename IndexType>
__host__ __device__
void operator()(const IndexType & i) {
uint64 sec;
uint quadricentennials, centennials, quadrennials, annuals/*1-ennial?*/;
uint year, leap;
uint yday, hour, min;
uint month, mday, wday;
const uint daysSinceJan1st[2][13]=
{
{0,31,59,90,120,151,181,212,243,273,304,334,365}, // 365 days, non-leap
{0,31,60,91,121,152,182,213,244,274,305,335,366} // 366 days, leap
};
uint64 SecondsSinceEpoch = source[i]/1000;
sec = SecondsSinceEpoch + 11644473600;
wday = (uint)((sec / 86400 + 1) % 7); // day of week
quadricentennials = (uint)(sec / 12622780800ULL); // 400*365.2425*24*3600
sec %= 12622780800ULL;
centennials = (uint)(sec / 3155673600ULL); // 100*(365+24/100)*24*3600
if (centennials > 3)
{
centennials = 3;
}
sec -= centennials * 3155673600ULL;
quadrennials = (uint)(sec / 126230400); // 4*(365+1/4)*24*3600
if (quadrennials > 24)
{
quadrennials = 24;
}
sec -= quadrennials * 126230400ULL;
annuals = (uint)(sec / 31536000); // 365*24*3600
if (annuals > 3)
{
annuals = 3;
}
sec -= annuals * 31536000ULL;
year = 1601 + quadricentennials * 400 + centennials * 100 + quadrennials * 4 + annuals;
leap = !(year % 4) && (year % 100 || !(year % 400));
// Calculate the day of the year and the time
yday = sec / 86400;
sec %= 86400;
hour = sec / 3600;
sec %= 3600;
min = sec / 60;
sec %= 60;
// Calculate the month
for (mday = month = 1; month < 13; month++)
{
if (yday < daysSinceJan1st[leap][month])
{
mday += yday - daysSinceJan1st[leap][month - 1];
break;
}
}
dest[i] = year*10000+month*100+mday;
}
};
void select(queue<string> op_type, queue<string> op_value, queue<int_type> op_nums, queue<float_type> op_nums_f, queue<unsigned int> op_nums_precision, CudaSet* a,
CudaSet* b, vector<thrust::device_vector<int_type> >& distinct_tmp, bool& one_liner)
{
stack<string> exe_type;
stack<string> exe_value;
stack<int_type*> exe_vectors;
stack<int_type> exe_nums;
string s1, s2, s1_val, s2_val;
int_type n1, n2, res;
unsigned int colCount = 0;
stack<int> col_type;
string grp_type;
stack<string> grp_type1;
stack<string> col_val;
size_t res_size = 0;
stack<string> exe_value1;
stack<int_type*> exe_vectors1;
stack<float_type*> exe_vectors1_d;
stack<int_type> exe_nums1;
stack<unsigned int> exe_precision;
stack<unsigned int> exe_precision1;
bool ts;
stack<bool> exe_ts;
stack<float_type*> exe_vectors_f;
stack<float_type> exe_nums_f;
float_type n1_f, n2_f, res_f;
bool one_line;
unsigned int dist_processed = 0;
bool prep = 0;
one_line = 0;
thrust::device_ptr<bool> d_di(thrust::raw_pointer_cast(a->grp.data()));
std::auto_ptr<ReduceByKeyPreprocessData> ppData;
if (a->grp_count && (a->mRecCount != 0))
res_size = a->grp_count;
std::clock_t start1 = std::clock();
for(int i=0; !op_type.empty(); ++i, op_type.pop()) {
string ss = op_type.front();
//cout << ss << endl;
if(ss.compare("emit sel_name") != 0) {
grp_type = "NULL";
if (ss.compare("COUNT") == 0 || ss.compare("SUM") == 0 || ss.compare("AVG") == 0 || ss.compare("MIN") == 0 || ss.compare("MAX") == 0 || ss.compare("DISTINCT") == 0 || ss.compare("YEAR") == 0 || ss.compare("MONTH") == 0 || ss.compare("DAY") == 0) {
if(!prep && a->grp_count) {
mgpu::ReduceByKeyPreprocess<float_type>((int)a->mRecCount, thrust::raw_pointer_cast(d_di),
(bool*)0, head_flag_predicate<bool>(), (int*)0, (int*)0,
&ppData, *context);
prep = 1;
};
if(!a->grp_count && ss.compare("YEAR") && ss.compare("MONTH") && ss.compare("DAY")) {
one_line = 1;
};
if (ss.compare("YEAR") == 0) {
s1_val = exe_value.top();
exe_value.pop();
exe_type.pop();
thrust::device_ptr<int_type> res = thrust::device_malloc<int_type>(a->mRecCount);
if(a->ts_cols[s1_val]) {
thrust::counting_iterator<unsigned int> begin(0);
gpu_getyear ff((const int_type*)thrust::raw_pointer_cast(a->d_columns_int[s1_val].data()), thrust::raw_pointer_cast(res));
thrust::for_each(begin, begin + a->mRecCount, ff);
exe_precision.push(0);
}
else {
thrust::transform(a->d_columns_int[s1_val].begin(), a->d_columns_int[s1_val].begin() + a->mRecCount, thrust::make_constant_iterator(10000), res, thrust::divides<int_type>());
exe_precision.push(a->decimal_zeroes[s1_val]);
};
exe_vectors.push(thrust::raw_pointer_cast(res));
exe_type.push("VECTOR");
}
else if (ss.compare("MONTH") == 0) {
s1_val = exe_value.top();
exe_value.pop();
exe_type.pop();
thrust::device_ptr<int_type> res = thrust::device_malloc<int_type>(a->mRecCount);
thrust::counting_iterator<unsigned int> begin(0);
gpu_getmonth ff((const int_type*)thrust::raw_pointer_cast(a->d_columns_int[s1_val].data()), thrust::raw_pointer_cast(res));
thrust::for_each(begin, begin + a->mRecCount, ff);
exe_precision.push(0);
exe_vectors.push(thrust::raw_pointer_cast(res));
exe_type.push("VECTOR");
}
else if (ss.compare("DAY") == 0) {
s1_val = exe_value.top();
exe_value.pop();
exe_type.pop();
thrust::device_ptr<int_type> res = thrust::device_malloc<int_type>(a->mRecCount);
thrust::counting_iterator<unsigned int> begin(0);
gpu_getday ff((const int_type*)thrust::raw_pointer_cast(a->d_columns_int[s1_val].data()), thrust::raw_pointer_cast(res));
thrust::for_each(begin, begin + a->mRecCount, ff);
exe_precision.push(0);
exe_vectors.push(thrust::raw_pointer_cast(res));
exe_type.push("VECTOR");
}
else if (ss.compare("DISTINCT") == 0) {
s1_val = exe_value.top();
exe_type.pop();
exe_value.pop();
if(a->type[s1_val] == 0) {
thrust::copy(a->d_columns_int[s1_val].begin(), a->d_columns_int[s1_val].begin() + a->mRecCount,
distinct_tmp[dist_processed].begin());
dist_processed++;
thrust::device_ptr<int_type> res = thrust::device_malloc<int_type>(res_size);
exe_vectors.push(thrust::raw_pointer_cast(res));
exe_type.push("VECTOR");
}
else if(a->type[s1_val] == 2) {
//will add a DISTINCT on strings if anyone needs it
cout << "DISTINCT on strings is not supported yet" << endl;
exit(0);
}
else {
cout << "DISTINCT on float is not supported yet" << endl;
exit(0);
};
}
else if (ss.compare("COUNT") == 0) {
s1 = exe_type.top();
if(s1.compare("VECTOR") != 0) { // non distinct
grp_type = "COUNT";
exe_type.pop();
s1_val = exe_value.top();
exe_value.pop();
if (a->grp_count) {
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size);
if(alloced_mem.empty()) {
alloc_pool(a->maxRecs);
};
thrust::device_ptr<int_type> const_seq((int_type*)alloced_mem.back());
thrust::fill(const_seq, const_seq+a->mRecCount, (int_type)1);
ReduceByKeyApply(*ppData, thrust::raw_pointer_cast(const_seq), (int_type)0,
mgpu::plus<int_type>(), thrust::raw_pointer_cast(count_diff), *context);
//thrust::device_free(const_seq);
//thrust::reduce_by_key(d_di, d_di+(a->mRecCount), thrust::constant_iterator<int_type>(1),
// thrust::make_discard_iterator(), count_diff,
// head_flag_predicate<bool>(),thrust::plus<int_type>());
exe_vectors.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR");
}
else {
thrust::device_ptr<int_type> dest = thrust::device_malloc<int_type>(1);
dest[0] = a->mRecCount;
exe_vectors.push(thrust::raw_pointer_cast(dest));
exe_type.push("VECTOR");
};
}
else
grp_type = "COUNTD";
exe_precision.push(0);
}
else if (ss.compare("SUM") == 0) {
/*if(op_case) {
cout << "found case " << endl;
op_case = 0;
while(!exe_type.empty())
{
cout << "CASE type " << exe_type.top() << endl;
exe_type.pop();
exit(0);
}
};
*/
grp_type = "SUM";
s1 = exe_type.top();
exe_type.pop();
if (s1.compare("VECTOR F") == 0) {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
if (a->grp_count) {
thrust::device_ptr<float_type> source((float_type*)(s3));
thrust::device_ptr<float_type> count_diff = thrust::device_malloc<float_type>(res_size);
ReduceByKeyApply(*ppData, s3, (float_type)0,
mgpu::plus<float_type>(), thrust::raw_pointer_cast(count_diff), *context);
//thrust::reduce_by_key(d_di, d_di + a->mRecCount, source,
// thrust::make_discard_iterator(), count_diff,
// head_flag_predicate<bool>(),thrust::plus<float_type>());
exe_vectors_f.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR F");
}
else {
thrust::device_ptr<float_type> source((float_type*)(s3));
thrust::device_ptr<float_type> count_diff = thrust::device_malloc<float_type>(1);
count_diff[0] = mgpu::Reduce(thrust::raw_pointer_cast(source), a->mRecCount, *context);
exe_vectors_f.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR F");
a->mRecCount = 1;
};
hipFree(s3);
}
if (s1.compare("VECTOR") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
if (a->grp_count) {
thrust::device_ptr<int_type> source((int_type*)(s3));
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size);
ReduceByKeyApply(*ppData, thrust::raw_pointer_cast(source), (int_type)0,
mgpu::plus<int_type>(), thrust::raw_pointer_cast(count_diff), *context);
exe_vectors.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR");
}
else {
thrust::device_ptr<int_type> source((int_type*)(s3));
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(1);
count_diff[0] = mgpu::Reduce(thrust::raw_pointer_cast(source), a->mRecCount, *context);
exe_vectors.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR");
};
hipFree(s3);
}
else if (s1.compare("NAME") == 0) {
s1_val = exe_value.top();
exe_value.pop();
if (a->grp_count) {
if(a->type[s1_val] == 0) {
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size);
ReduceByKeyApply(*ppData, thrust::raw_pointer_cast(a->d_columns_int[s1_val].data()), (int_type)0,
mgpu::plus<int_type>(), thrust::raw_pointer_cast(count_diff), *context);
exe_vectors.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR");
}
else if(a->type[s1_val] == 1) {
thrust::device_ptr<float_type> count_diff = thrust::device_malloc<float_type>(res_size);
ReduceByKeyApply(*ppData, thrust::raw_pointer_cast(a->d_columns_float[s1_val].data()), (float_type)0,
mgpu::plus<float_type>(), thrust::raw_pointer_cast(count_diff), *context);
exe_vectors_f.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR F");
}
}
else {
if(a->type[s1_val] == 0) {
thrust::device_ptr<int_type> dest;
int_type cc = mgpu::Reduce(thrust::raw_pointer_cast(a->d_columns_int[s1_val].data()), a->mRecCount, *context);
if (one_line) {
dest = thrust::device_malloc<int_type>(1);
dest[0] = cc;
}
else {
dest = thrust::device_malloc<int_type>(a->mRecCount);
thrust::sequence(dest, dest+(a->mRecCount), cc, (int_type)0);
};
exe_vectors.push(thrust::raw_pointer_cast(dest));
exe_type.push("VECTOR");
}
else if(a->type[s1_val] == 1) {
thrust::device_ptr<float_type> dest;
float_type cc = mgpu::Reduce(thrust::raw_pointer_cast(a->d_columns_float[s1_val].data()), a->mRecCount, *context);
if (one_line) {
dest = thrust::device_malloc<float_type>(1);
dest[0] = cc;
}
else {
dest = thrust::device_malloc<float_type>(a->mRecCount);
thrust::sequence(dest, dest+a->mRecCount, cc, (float_type)0);
};
exe_vectors_f.push(thrust::raw_pointer_cast(dest));
exe_type.push("VECTOR F");
};
};
exe_precision.push(a->decimal_zeroes[s1_val]);
}
}
else if (ss.compare("MIN") == 0) {
grp_type = "MIN";
s1 = exe_type.top();
exe_type.pop();
s1_val = exe_value.top();
exe_value.pop();
if(alloced_mem.empty()) {
alloc_pool(a->maxRecs);
};
thrust::device_ptr<unsigned int> d_di1((unsigned int*)alloced_mem.back());
thrust::copy(d_di, d_di+a->mRecCount,d_di1);
thrust::exclusive_scan(d_di1, d_di1+a->mRecCount, d_di1);
thrust::equal_to<unsigned int> binary_pred;
if(a->type[s1_val] == 0) {
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size);
//ReduceByKeyApply(*ppData, thrust::raw_pointer_cast(a->d_columns_int[s1_val].data()), (int_type)0,
// mgpu::minimum<int_type>(), thrust::raw_pointer_cast(count_diff), *context);
thrust::reduce_by_key(d_di1, d_di1+a->mRecCount, a->d_columns_int[s1_val].begin(),
thrust::make_discard_iterator(), count_diff,
binary_pred, thrust::minimum<int_type>());
exe_vectors.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR");
}
else if(a->type[s1_val] == 1) {
thrust::device_ptr<float_type> count_diff = thrust::device_malloc<float_type>(res_size);
//ReduceByKeyApply(*ppData, thrust::raw_pointer_cast(a->d_columns_float[s1_val].data()), (float_type)0,
// mgpu::minimum<float_type>(), thrust::raw_pointer_cast(count_diff), *context);
thrust::reduce_by_key(d_di1, d_di1+a->mRecCount, a->d_columns_float[s1_val].begin(),
thrust::make_discard_iterator(), count_diff,
binary_pred, thrust::minimum<float_type>());
exe_vectors_f.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR F");
}
exe_precision.push(a->decimal_zeroes[s1_val]);
}
else if (ss.compare("MAX") == 0) {
grp_type = "MAX";
s1 = exe_type.top();
exe_type.pop();
s1_val = exe_value.top();
exe_value.pop();
if(a->type[s1_val] == 0) {
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size);
ReduceByKeyApply(*ppData, thrust::raw_pointer_cast(a->d_columns_int[s1_val].data()), (int_type)0,
mgpu::maximum<int_type>(), thrust::raw_pointer_cast(count_diff), *context);
exe_vectors.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR");
}
else if(a->type[s1_val] == 1) {
thrust::device_ptr<float_type> count_diff = thrust::device_malloc<float_type>(res_size);
ReduceByKeyApply(*ppData, thrust::raw_pointer_cast(a->d_columns_float[s1_val].data()), (float_type)0,
mgpu::maximum<float_type>(), thrust::raw_pointer_cast(count_diff), *context);
//thrust::reduce_by_key(d_di, d_di+(a->mRecCount), a->d_columns_float[s1_val].begin(),
// thrust::make_discard_iterator(), count_diff,
// head_flag_predicate<bool>(), thrust::maximum<float_type>());
exe_vectors_f.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR F");
}
exe_precision.push(a->decimal_zeroes[s1_val]);
}
else if (ss.compare("AVG") == 0) {
grp_type = "AVG";
s1 = exe_type.top();
exe_type.pop();
s1_val = exe_value.top();
exe_value.pop();
if(a->type[s1_val] == 0) {
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size);
ReduceByKeyApply(*ppData, thrust::raw_pointer_cast(a->d_columns_int[s1_val].data()), (int_type)0,
mgpu::plus<int_type>(), thrust::raw_pointer_cast(count_diff), *context);
exe_vectors.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR");
}
else if(a->type[s1_val] == 1) {
thrust::device_ptr<float_type> count_diff = thrust::device_malloc<float_type>(res_size);
ReduceByKeyApply(*ppData, thrust::raw_pointer_cast(a->d_columns_float[s1_val].data()), (float_type)0,
mgpu::plus<float_type>(), thrust::raw_pointer_cast(count_diff), *context);
exe_vectors_f.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR F");
}
exe_precision.push(a->decimal_zeroes[s1_val]);
};
};
if (ss.compare("NAME") == 0 || ss.compare("NUMBER") == 0 || ss.compare("FLOAT") == 0 || ss.compare("VECTOR") == 0 || ss.compare("VECTOR F") == 0) {
exe_type.push(ss);
if (ss.compare("NUMBER") == 0) {
exe_nums.push(op_nums.front());
op_nums.pop();
exe_precision.push(op_nums_precision.front());
op_nums_precision.pop();
}
if (ss.compare("FLOAT") == 0) {
exe_nums_f.push(op_nums_f.front());
op_nums_f.pop();
}
else if (ss.compare("NAME") == 0) {
exe_value.push(op_value.front());
ts = a->ts_cols[op_value.front()];
op_value.pop();
}
}
else {
if (ss.compare("MUL") == 0 || ss.compare("ADD") == 0 || ss.compare("DIV") == 0 || ss.compare("MINUS") == 0) {
// get 2 values from the stack
s1 = exe_type.top();
exe_type.pop();
s2 = exe_type.top();
exe_type.pop();
if (s1.compare("NUMBER") == 0 && s2.compare("NUMBER") == 0) {
n1 = exe_nums.top();
exe_nums.pop();
n2 = exe_nums.top();
exe_nums.pop();
auto p1 = exe_precision.top();
exe_precision.pop();
auto p2 = exe_precision.top();
exe_precision.pop();
auto pres = precision_func(p1, p2, ss);
exe_precision.push(pres);
if(p1)
n1 = n1*(unsigned int)pow(10,p1);
if(p2)
n2 = n2*(unsigned int)pow(10,p2);
if (ss.compare("ADD") == 0 )
res = n1+n2;
else if (ss.compare("MUL") == 0 )
res = n1*n2;
else if (ss.compare("DIV") == 0 )
res = n1/n2;
else
res = n1-n2;
thrust::device_ptr<int_type> p = thrust::device_malloc<int_type>(a->mRecCount);
thrust::sequence(p, p+(a->mRecCount),res,(int_type)0);
exe_type.push("VECTOR");
exe_vectors.push(thrust::raw_pointer_cast(p));
}
else if (s1.compare("FLOAT") == 0 && s2.compare("FLOAT") == 0) {
n1_f = exe_nums_f.top();
exe_nums_f.pop();
n2_f = exe_nums_f.top();
exe_nums_f.pop();
if (ss.compare("ADD") == 0 )
res_f = n1_f+n2_f;
else if (ss.compare("MUL") == 0 )
res_f = n1_f*n2_f;
else if (ss.compare("DIV") == 0 )
res_f = n1_f/n2_f;
else
res_f = n1_f-n2_f;
thrust::device_ptr<float_type> p = thrust::device_malloc<float_type>(a->mRecCount);
thrust::sequence(p, p+(a->mRecCount),res_f,(float_type)0);
exe_type.push("VECTOR F");
exe_vectors_f.push(thrust::raw_pointer_cast(p));
}
else if (s1.compare("NAME") == 0 && s2.compare("FLOAT") == 0) {
s1_val = exe_value.top();
exe_value.pop();
n1_f = exe_nums_f.top();
exe_nums_f.pop();
exe_type.push("VECTOR F");
if (a->type[s1_val] == 1) {
float_type* t = a->get_float_type_by_name(s1_val);
exe_vectors_f.push(a->op(t,n1_f,ss,1));
}
else {
int_type* t = a->get_int_by_name(s1_val);
exe_vectors_f.push(a->op(t,n1_f,ss,1));
};
}
else if (s1.compare("FLOAT") == 0 && s2.compare("NAME") == 0) {
n1_f = exe_nums_f.top();
exe_nums_f.pop();
s2_val = exe_value.top();
exe_value.pop();
exe_type.push("VECTOR F");
if (a->type[s2_val] == 1) {
float_type* t = a->get_float_type_by_name(s2_val);
exe_vectors_f.push(a->op(t,n1_f,ss,0));
}
else {
int_type* t = a->get_int_by_name(s2_val);
exe_vectors_f.push(a->op(t,n1_f,ss,0));
};
}
else if (s1.compare("NAME") == 0 && s2.compare("NUMBER") == 0) {
s1_val = exe_value.top();
exe_value.pop();
n1 = exe_nums.top();
exe_nums.pop();
auto p1 = exe_precision.top();
exe_precision.pop();
auto p2 = a->decimal_zeroes[s1_val];
if (a->type[s1_val] == 1) {
float_type* t = a->get_float_type_by_name(s1_val);
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t,(float_type)n1,ss,1));
}
else {
int_type* t = a->get_int_by_name(s1_val);
auto pres = precision_func(p2, p1, ss);
exe_precision.push(pres);
exe_type.push("VECTOR");
exe_vectors.push(a->op(t,n1,ss,1, p2, p1));
};
}
else if (s1.compare("NUMBER") == 0 && s2.compare("NAME") == 0) {
n1 = exe_nums.top();
exe_nums.pop();
s2_val = exe_value.top();
exe_value.pop();
auto p1 = exe_precision.top();
exe_precision.pop();
auto p2 = a->decimal_zeroes[s2_val];
if (a->type[s2_val] == 1) {
float_type* t = a->get_float_type_by_name(s2_val);
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t,(float_type)n1,ss,0));
}
else {
int_type* t = a->get_int_by_name(s2_val);
auto pres = precision_func(p2, p1, ss);
exe_precision.push(pres);
exe_type.push("VECTOR");
exe_vectors.push(a->op(t,n1,ss,0, p2, p1));
};
}
else if (s1.compare("NAME") == 0 && s2.compare("NAME") == 0) {
s1_val = exe_value.top();
exe_value.pop();
s2_val = exe_value.top();
exe_value.pop();
if (a->type[s1_val] == 0) {
int_type* t1 = a->get_int_by_name(s1_val);
if (a->type[s2_val] == 0) {
int_type* t = a->get_int_by_name(s2_val);
auto p1 = a->decimal_zeroes[s1_val];
auto p2 = a->decimal_zeroes[s2_val];
auto pres = precision_func(p1, p2, ss);
exe_precision.push(pres);
exe_type.push("VECTOR");
exe_vectors.push(a->op(t,t1,ss,0,p2,p1));
}
else {
float_type* t = a->get_float_type_by_name(s2_val);
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t1,t,ss,0));
};
}
else {
float_type* t = a->get_float_type_by_name(s1_val);
if (a->type[s2_val] == 0) {
int_type* t1 = a->get_int_by_name(s2_val);
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t1,t,ss,0));
}
else {
float_type* t1 = a->get_float_type_by_name(s2_val);
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t1,t,ss,0));
};
}
}
else if ((s1.compare("VECTOR") == 0 || s1.compare("VECTOR F") == 0 ) && s2.compare("NAME") == 0) {
s2_val = exe_value.top();
exe_value.pop();
if (a->type[s2_val] == 0) {
int_type* t = a->get_int_by_name(s2_val);
if (s1.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
auto p1 = exe_precision.top();
exe_precision.pop();
auto p2 = a->decimal_zeroes[s2_val];
auto pres = precision_func(p1, p2, ss);
exe_precision.push(pres);
exe_vectors.push(a->op(t,s3,ss,0,p2,p1));
alloced_mem.push_back(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t,s3,ss,0));
alloced_mem.push_back(s3);
}
}
else {
float_type* t = a->get_float_type_by_name(s2_val);
if (s1.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3,t, ss,0));
alloced_mem.push_back(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t,s3,ss,0));
alloced_mem.push_back(s3);
}
};
}
else if ((s2.compare("VECTOR") == 0 || s2.compare("VECTOR F") == 0 ) && s1.compare("NAME") == 0) {
s1_val = exe_value.top();
exe_value.pop();
if (a->type[s1_val] == 0) {
int_type* t = a->get_int_by_name(s1_val);
if (s2.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
auto p1 = exe_precision.top();
exe_precision.pop();
auto p2 = a->decimal_zeroes[s1_val];
auto pres = precision_func(p1, p2, ss);
exe_precision.push(pres);
exe_vectors.push(a->op(t,s3,ss,1,p2,p1));
alloced_mem.push_back(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t,s3,ss,1));
alloced_mem.push_back(s3);
}
}
else {
float_type* t = a->get_float_type_by_name(s1_val);
if (s2.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3,t,ss,1));
alloced_mem.push_back(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t,s3,ss,1));
alloced_mem.push_back(s3);
}
};
}
else if ((s1.compare("VECTOR") == 0 || s1.compare("VECTOR F") == 0) && s2.compare("NUMBER") == 0) {
n1 = exe_nums.top();
exe_nums.pop();
if (s1.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
auto p1 = exe_precision.top();
exe_precision.pop();
auto p2 = exe_precision.top();
exe_precision.pop();
auto pres = precision_func(p1, p2, ss);
exe_precision.push(pres);
exe_vectors.push(a->op(s3,n1, ss,1, p1, p2));
//hipFree(s3);
alloced_mem.push_back(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3,(float_type)n1, ss,1));
alloced_mem.push_back(s3);
}
}
else if (s1.compare("NUMBER") == 0 && (s2.compare("VECTOR") || s2.compare("VECTOR F") == 0)) {
n1 = exe_nums.top();
exe_nums.pop();
if (s2.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
auto p1 = exe_precision.top();
exe_precision.pop();
auto p2 = exe_precision.top();
exe_precision.pop();
auto pres = precision_func(p2, p1, ss);
exe_precision.push(pres);
exe_vectors.push(a->op(s3,n1, ss,0, p2, p1));
alloced_mem.push_back(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3,(float_type)n1, ss,0));
alloced_mem.push_back(s3);
}
}
else if ((s1.compare("VECTOR") == 0 || s1.compare("VECTOR F") == 0) && s2.compare("FLOAT") == 0) {
n1_f = exe_nums_f.top();
exe_nums_f.pop();
if (s1.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3,n1_f, ss,1));
alloced_mem.push_back(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3,n1_f, ss,1));
alloced_mem.push_back(s3);
}
}
else if (s1.compare("FLOAT") == 0 && (s2.compare("VECTOR") == 0 || s2.compare("VECTOR F") == 0)) {
n1_f = exe_nums_f.top();
exe_nums_f.pop();
if (s2.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3,n1_f, ss,0));
alloced_mem.push_back(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3,n1_f, ss,0));
alloced_mem.push_back(s3);
}
}
else if (s1.compare("VECTOR") == 0 && s2.compare("VECTOR") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
int_type* s4 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
auto p1 = exe_precision.top();
exe_precision.pop();
auto p2 = exe_precision.top();
exe_precision.pop();
auto pres = precision_func(p1, p2, ss);
exe_precision.push(pres);
exe_vectors.push(a->op(s3, s4,ss,0,p1,p2));
alloced_mem.push_back(s3);
alloced_mem.push_back(s4);
}
else if(s1.compare("VECTOR") == 0 && s2.compare("VECTOR F") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
float_type* s4 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3, s4,ss,1));
alloced_mem.push_back(s3);
alloced_mem.push_back(s4);
}
else if(s1.compare("VECTOR F") == 0 && s2.compare("VECTOR") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
float_type* s4 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3, s4,ss,0));
alloced_mem.push_back(s3);
alloced_mem.push_back(s4);
}
else if(s1.compare("VECTOR F") == 0 && s2.compare("VECTOR F") == 0) {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
float_type* s4 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3, s4,ss,1));
alloced_mem.push_back(s3);
alloced_mem.push_back(s4);
}
}
}
} //
else {
// here we need to save what is where
col_val.push(op_value.front());
op_value.pop();
grp_type1.push(grp_type);
if(!exe_nums.empty()) { //number
col_type.push(0);
exe_nums1.push(exe_nums.top());
exe_nums.pop();
exe_precision1.push(exe_precision.top());
exe_precision.pop();
};
if(!exe_value.empty()) { //field name
col_type.push(1);
exe_precision1.push(a->decimal_zeroes[exe_value.top()]);
exe_value1.push(exe_value.top());
exe_ts.push(ts);
exe_value.pop();
};
if(!exe_vectors.empty()) { //vector int
exe_vectors1.push(exe_vectors.top());
exe_vectors.pop();
col_type.push(2);
exe_precision1.push(exe_precision.top());
exe_precision.pop();
};
if(!exe_vectors_f.empty()) { //vector float
exe_vectors1_d.push(exe_vectors_f.top());
exe_vectors_f.pop();
col_type.push(3);
};
colCount++;
};
};
for(unsigned int j=0; j < colCount; j++) {
if ((grp_type1.top()).compare("COUNT") == 0 )
b->grp_type[col_val.top()] = 0;
else if ((grp_type1.top()).compare("AVG") == 0 )
b->grp_type[col_val.top()] = 1;
else if ((grp_type1.top()).compare("SUM") == 0 )
b->grp_type[col_val.top()] = 2;
else if ((grp_type1.top()).compare("NULL") == 0 )
b->grp_type[col_val.top()] = 3;
else if ((grp_type1.top()).compare("MIN") == 0 )
b->grp_type[col_val.top()] = 4;
else if ((grp_type1.top()).compare("MAX") == 0 )
b->grp_type[col_val.top()] = 5;
else if ((grp_type1.top()).compare("COUNTD") == 0 ) {
b->grp_type[col_val.top()] = 6;
};
if(col_type.top() == 0) {
// create a vector
if (a->grp_count) {
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size);
thrust::copy_if(thrust::make_constant_iterator((int)exe_nums1.top()), thrust::make_constant_iterator((int)exe_nums1.top()) + a->mRecCount, d_di, count_diff, thrust::identity<bool>());
b->addDeviceColumn(thrust::raw_pointer_cast(count_diff) , col_val.top(), res_size);
thrust::device_free(count_diff);
}
else {
thrust::device_ptr<int_type> s = thrust::device_malloc<int_type>(a->mRecCount);
thrust::sequence(s, s+(a->mRecCount), (int)exe_nums1.top(), 0);
b->addDeviceColumn(thrust::raw_pointer_cast(s), col_val.top(), a->mRecCount);
}
exe_nums1.pop();
b->decimal_zeroes[col_val.top()] = exe_precision1.top();
exe_precision1.pop();
}
else if(col_type.top() == 1) {
if(a->type[exe_value1.top()] == 0 || a->type[exe_value1.top()] == 2) {
//modify what we push there in case of a grouping
if (a->grp_count) {
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size);
//thrust::device_ptr<bool> d_grp(a->grp);
thrust::copy_if(a->d_columns_int[exe_value1.top()].begin(),a->d_columns_int[exe_value1.top()].begin() + a->mRecCount,
d_di, count_diff, thrust::identity<bool>());
b->addDeviceColumn(thrust::raw_pointer_cast(count_diff) , col_val.top(), res_size);
thrust::device_free(count_diff);
}
else
b->addDeviceColumn(thrust::raw_pointer_cast(a->d_columns_int[exe_value1.top()].data()) , col_val.top(), a->mRecCount);
if(a->type[exe_value1.top()] == 0) {
b->decimal_zeroes[col_val.top()] = exe_precision1.top();
b->ts_cols[col_val.top()] = exe_ts.top();
};
if(a->type[exe_value1.top()] == 2 || (a->type[exe_value1.top()] == 0 && a->string_map.find(exe_value1.top()) != a->string_map.end())) {
b->string_map[col_val.top()] = a->string_map[exe_value1.top()];
};
exe_precision1.pop();
exe_ts.pop();
}
else if(a->type[exe_value1.top()] == 1) {
//modify what we push there in case of a grouping
if (a->grp_count) {
thrust::device_ptr<float_type> count_diff = thrust::device_malloc<float_type>(res_size);
//thrust::device_ptr<bool> d_grp(a->grp);
thrust::copy_if(a->d_columns_float[exe_value1.top()].begin(), a->d_columns_float[exe_value1.top()].begin() + a->mRecCount,
d_di, count_diff, thrust::identity<bool>());
b->addDeviceColumn(thrust::raw_pointer_cast(count_diff) , col_val.top(), res_size, a->decimal[exe_value1.top()]);
thrust::device_free(count_diff);
}
else {
b->addDeviceColumn(thrust::raw_pointer_cast(a->d_columns_float[exe_value1.top()].data()), col_val.top(), a->mRecCount, a->decimal[exe_value1.top()]);
};
}
exe_value1.pop();
}
else if(col_type.top() == 2) { // int
if (a->grp_count)
b->addDeviceColumn(exe_vectors1.top() , col_val.top(), res_size);
else {
if(!one_line)
b->addDeviceColumn(exe_vectors1.top() , col_val.top(), a->mRecCount);
else
b->addDeviceColumn(exe_vectors1.top() , col_val.top(), 1);
};
hipFree(exe_vectors1.top());
exe_vectors1.pop();
b->decimal_zeroes[col_val.top()] = exe_precision1.top();
exe_precision1.pop();
}
else if(col_type.top() == 3) { //float
if (a->grp_count) {
b->addDeviceColumn(exe_vectors1_d.top() , col_val.top(), res_size, 1);
}
else {
if(!one_line) {
b->addDeviceColumn(exe_vectors1_d.top() , col_val.top(), a->mRecCount, 1);
}
else {
b->addDeviceColumn(exe_vectors1_d.top() , col_val.top(), 1, 1);
};
};
hipFree(exe_vectors1_d.top());
exe_vectors1_d.pop();
};
col_type.pop();
col_val.pop();
grp_type1.pop();
};
if (!a->grp_count) {
if(!one_line)
b->mRecCount = a->mRecCount;
else
b->mRecCount = 1;
one_liner = one_line;
}
else {
b->mRecCount = res_size;
one_liner = 0;
};
}
| ff3eaf4202427dc6e7260315527686e532be8aba.cu | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cm.h"
#include "zone_map.h"
using namespace mgpu;
vector<void*> alloced_mem;
template<typename T>
struct distinct : public binary_function<T,T,T>
{
__host__ __device__ T operator()(const T &lhs, const T &rhs) const {
return lhs != rhs;
}
};
struct gpu_getyear
{
const int_type *source;
int_type *dest;
gpu_getyear(const int_type *_source, int_type *_dest):
source(_source), dest(_dest) {}
template <typename IndexType>
__host__ __device__
void operator()(const IndexType & i) {
uint64 sec;
uint quadricentennials, centennials, quadrennials, annuals/*1-ennial?*/;
uint year, leap;
uint yday, hour, min;
uint month, mday, wday;
const uint daysSinceJan1st[2][13]=
{
{0,31,59,90,120,151,181,212,243,273,304,334,365}, // 365 days, non-leap
{0,31,60,91,121,152,182,213,244,274,305,335,366} // 366 days, leap
};
uint64 SecondsSinceEpoch = source[i]/1000;
sec = SecondsSinceEpoch + 11644473600;
wday = (uint)((sec / 86400 + 1) % 7); // day of week
quadricentennials = (uint)(sec / 12622780800ULL); // 400*365.2425*24*3600
sec %= 12622780800ULL;
centennials = (uint)(sec / 3155673600ULL); // 100*(365+24/100)*24*3600
if (centennials > 3)
{
centennials = 3;
}
sec -= centennials * 3155673600ULL;
quadrennials = (uint)(sec / 126230400); // 4*(365+1/4)*24*3600
if (quadrennials > 24)
{
quadrennials = 24;
}
sec -= quadrennials * 126230400ULL;
annuals = (uint)(sec / 31536000); // 365*24*3600
if (annuals > 3)
{
annuals = 3;
}
sec -= annuals * 31536000ULL;
year = 1601 + quadricentennials * 400 + centennials * 100 + quadrennials * 4 + annuals;
leap = !(year % 4) && (year % 100 || !(year % 400));
// Calculate the day of the year and the time
yday = sec / 86400;
sec %= 86400;
hour = sec / 3600;
sec %= 3600;
min = sec / 60;
sec %= 60;
// Calculate the month
for (mday = month = 1; month < 13; month++)
{
if (yday < daysSinceJan1st[leap][month])
{
mday += yday - daysSinceJan1st[leap][month - 1];
break;
}
}
dest[i] = year;
}
};
struct gpu_getmonth
{
const int_type *source;
int_type *dest;
gpu_getmonth(const int_type *_source, int_type *_dest):
source(_source), dest(_dest) {}
template <typename IndexType>
__host__ __device__
void operator()(const IndexType & i) {
uint64 sec;
uint quadricentennials, centennials, quadrennials, annuals/*1-ennial?*/;
uint year, leap;
uint yday, hour, min;
uint month, mday, wday;
const uint daysSinceJan1st[2][13]=
{
{0,31,59,90,120,151,181,212,243,273,304,334,365}, // 365 days, non-leap
{0,31,60,91,121,152,182,213,244,274,305,335,366} // 366 days, leap
};
uint64 SecondsSinceEpoch = source[i]/1000;
sec = SecondsSinceEpoch + 11644473600;
wday = (uint)((sec / 86400 + 1) % 7); // day of week
quadricentennials = (uint)(sec / 12622780800ULL); // 400*365.2425*24*3600
sec %= 12622780800ULL;
centennials = (uint)(sec / 3155673600ULL); // 100*(365+24/100)*24*3600
if (centennials > 3)
{
centennials = 3;
}
sec -= centennials * 3155673600ULL;
quadrennials = (uint)(sec / 126230400); // 4*(365+1/4)*24*3600
if (quadrennials > 24)
{
quadrennials = 24;
}
sec -= quadrennials * 126230400ULL;
annuals = (uint)(sec / 31536000); // 365*24*3600
if (annuals > 3)
{
annuals = 3;
}
sec -= annuals * 31536000ULL;
year = 1601 + quadricentennials * 400 + centennials * 100 + quadrennials * 4 + annuals;
leap = !(year % 4) && (year % 100 || !(year % 400));
// Calculate the day of the year and the time
yday = sec / 86400;
sec %= 86400;
hour = sec / 3600;
sec %= 3600;
min = sec / 60;
sec %= 60;
// Calculate the month
for (mday = month = 1; month < 13; month++)
{
if (yday < daysSinceJan1st[leap][month])
{
mday += yday - daysSinceJan1st[leap][month - 1];
break;
}
}
dest[i] = year*100+month;
}
};
struct gpu_getday
{
const int_type *source;
int_type *dest;
gpu_getday(const int_type *_source, int_type *_dest):
source(_source), dest(_dest) {}
template <typename IndexType>
__host__ __device__
void operator()(const IndexType & i) {
uint64 sec;
uint quadricentennials, centennials, quadrennials, annuals/*1-ennial?*/;
uint year, leap;
uint yday, hour, min;
uint month, mday, wday;
const uint daysSinceJan1st[2][13]=
{
{0,31,59,90,120,151,181,212,243,273,304,334,365}, // 365 days, non-leap
{0,31,60,91,121,152,182,213,244,274,305,335,366} // 366 days, leap
};
uint64 SecondsSinceEpoch = source[i]/1000;
sec = SecondsSinceEpoch + 11644473600;
wday = (uint)((sec / 86400 + 1) % 7); // day of week
quadricentennials = (uint)(sec / 12622780800ULL); // 400*365.2425*24*3600
sec %= 12622780800ULL;
centennials = (uint)(sec / 3155673600ULL); // 100*(365+24/100)*24*3600
if (centennials > 3)
{
centennials = 3;
}
sec -= centennials * 3155673600ULL;
quadrennials = (uint)(sec / 126230400); // 4*(365+1/4)*24*3600
if (quadrennials > 24)
{
quadrennials = 24;
}
sec -= quadrennials * 126230400ULL;
annuals = (uint)(sec / 31536000); // 365*24*3600
if (annuals > 3)
{
annuals = 3;
}
sec -= annuals * 31536000ULL;
year = 1601 + quadricentennials * 400 + centennials * 100 + quadrennials * 4 + annuals;
leap = !(year % 4) && (year % 100 || !(year % 400));
// Calculate the day of the year and the time
yday = sec / 86400;
sec %= 86400;
hour = sec / 3600;
sec %= 3600;
min = sec / 60;
sec %= 60;
// Calculate the month
for (mday = month = 1; month < 13; month++)
{
if (yday < daysSinceJan1st[leap][month])
{
mday += yday - daysSinceJan1st[leap][month - 1];
break;
}
}
dest[i] = year*10000+month*100+mday;
}
};
void select(queue<string> op_type, queue<string> op_value, queue<int_type> op_nums, queue<float_type> op_nums_f, queue<unsigned int> op_nums_precision, CudaSet* a,
CudaSet* b, vector<thrust::device_vector<int_type> >& distinct_tmp, bool& one_liner)
{
stack<string> exe_type;
stack<string> exe_value;
stack<int_type*> exe_vectors;
stack<int_type> exe_nums;
string s1, s2, s1_val, s2_val;
int_type n1, n2, res;
unsigned int colCount = 0;
stack<int> col_type;
string grp_type;
stack<string> grp_type1;
stack<string> col_val;
size_t res_size = 0;
stack<string> exe_value1;
stack<int_type*> exe_vectors1;
stack<float_type*> exe_vectors1_d;
stack<int_type> exe_nums1;
stack<unsigned int> exe_precision;
stack<unsigned int> exe_precision1;
bool ts;
stack<bool> exe_ts;
stack<float_type*> exe_vectors_f;
stack<float_type> exe_nums_f;
float_type n1_f, n2_f, res_f;
bool one_line;
unsigned int dist_processed = 0;
bool prep = 0;
one_line = 0;
thrust::device_ptr<bool> d_di(thrust::raw_pointer_cast(a->grp.data()));
std::auto_ptr<ReduceByKeyPreprocessData> ppData;
if (a->grp_count && (a->mRecCount != 0))
res_size = a->grp_count;
std::clock_t start1 = std::clock();
for(int i=0; !op_type.empty(); ++i, op_type.pop()) {
string ss = op_type.front();
//cout << ss << endl;
if(ss.compare("emit sel_name") != 0) {
grp_type = "NULL";
if (ss.compare("COUNT") == 0 || ss.compare("SUM") == 0 || ss.compare("AVG") == 0 || ss.compare("MIN") == 0 || ss.compare("MAX") == 0 || ss.compare("DISTINCT") == 0 || ss.compare("YEAR") == 0 || ss.compare("MONTH") == 0 || ss.compare("DAY") == 0) {
if(!prep && a->grp_count) {
mgpu::ReduceByKeyPreprocess<float_type>((int)a->mRecCount, thrust::raw_pointer_cast(d_di),
(bool*)0, head_flag_predicate<bool>(), (int*)0, (int*)0,
&ppData, *context);
prep = 1;
};
if(!a->grp_count && ss.compare("YEAR") && ss.compare("MONTH") && ss.compare("DAY")) {
one_line = 1;
};
if (ss.compare("YEAR") == 0) {
s1_val = exe_value.top();
exe_value.pop();
exe_type.pop();
thrust::device_ptr<int_type> res = thrust::device_malloc<int_type>(a->mRecCount);
if(a->ts_cols[s1_val]) {
thrust::counting_iterator<unsigned int> begin(0);
gpu_getyear ff((const int_type*)thrust::raw_pointer_cast(a->d_columns_int[s1_val].data()), thrust::raw_pointer_cast(res));
thrust::for_each(begin, begin + a->mRecCount, ff);
exe_precision.push(0);
}
else {
thrust::transform(a->d_columns_int[s1_val].begin(), a->d_columns_int[s1_val].begin() + a->mRecCount, thrust::make_constant_iterator(10000), res, thrust::divides<int_type>());
exe_precision.push(a->decimal_zeroes[s1_val]);
};
exe_vectors.push(thrust::raw_pointer_cast(res));
exe_type.push("VECTOR");
}
else if (ss.compare("MONTH") == 0) {
s1_val = exe_value.top();
exe_value.pop();
exe_type.pop();
thrust::device_ptr<int_type> res = thrust::device_malloc<int_type>(a->mRecCount);
thrust::counting_iterator<unsigned int> begin(0);
gpu_getmonth ff((const int_type*)thrust::raw_pointer_cast(a->d_columns_int[s1_val].data()), thrust::raw_pointer_cast(res));
thrust::for_each(begin, begin + a->mRecCount, ff);
exe_precision.push(0);
exe_vectors.push(thrust::raw_pointer_cast(res));
exe_type.push("VECTOR");
}
else if (ss.compare("DAY") == 0) {
s1_val = exe_value.top();
exe_value.pop();
exe_type.pop();
thrust::device_ptr<int_type> res = thrust::device_malloc<int_type>(a->mRecCount);
thrust::counting_iterator<unsigned int> begin(0);
gpu_getday ff((const int_type*)thrust::raw_pointer_cast(a->d_columns_int[s1_val].data()), thrust::raw_pointer_cast(res));
thrust::for_each(begin, begin + a->mRecCount, ff);
exe_precision.push(0);
exe_vectors.push(thrust::raw_pointer_cast(res));
exe_type.push("VECTOR");
}
else if (ss.compare("DISTINCT") == 0) {
s1_val = exe_value.top();
exe_type.pop();
exe_value.pop();
if(a->type[s1_val] == 0) {
thrust::copy(a->d_columns_int[s1_val].begin(), a->d_columns_int[s1_val].begin() + a->mRecCount,
distinct_tmp[dist_processed].begin());
dist_processed++;
thrust::device_ptr<int_type> res = thrust::device_malloc<int_type>(res_size);
exe_vectors.push(thrust::raw_pointer_cast(res));
exe_type.push("VECTOR");
}
else if(a->type[s1_val] == 2) {
//will add a DISTINCT on strings if anyone needs it
cout << "DISTINCT on strings is not supported yet" << endl;
exit(0);
}
else {
cout << "DISTINCT on float is not supported yet" << endl;
exit(0);
};
}
else if (ss.compare("COUNT") == 0) {
s1 = exe_type.top();
if(s1.compare("VECTOR") != 0) { // non distinct
grp_type = "COUNT";
exe_type.pop();
s1_val = exe_value.top();
exe_value.pop();
if (a->grp_count) {
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size);
if(alloced_mem.empty()) {
alloc_pool(a->maxRecs);
};
thrust::device_ptr<int_type> const_seq((int_type*)alloced_mem.back());
thrust::fill(const_seq, const_seq+a->mRecCount, (int_type)1);
ReduceByKeyApply(*ppData, thrust::raw_pointer_cast(const_seq), (int_type)0,
mgpu::plus<int_type>(), thrust::raw_pointer_cast(count_diff), *context);
//thrust::device_free(const_seq);
//thrust::reduce_by_key(d_di, d_di+(a->mRecCount), thrust::constant_iterator<int_type>(1),
// thrust::make_discard_iterator(), count_diff,
// head_flag_predicate<bool>(),thrust::plus<int_type>());
exe_vectors.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR");
}
else {
thrust::device_ptr<int_type> dest = thrust::device_malloc<int_type>(1);
dest[0] = a->mRecCount;
exe_vectors.push(thrust::raw_pointer_cast(dest));
exe_type.push("VECTOR");
};
}
else
grp_type = "COUNTD";
exe_precision.push(0);
}
else if (ss.compare("SUM") == 0) {
/*if(op_case) {
cout << "found case " << endl;
op_case = 0;
while(!exe_type.empty())
{
cout << "CASE type " << exe_type.top() << endl;
exe_type.pop();
exit(0);
}
};
*/
grp_type = "SUM";
s1 = exe_type.top();
exe_type.pop();
if (s1.compare("VECTOR F") == 0) {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
if (a->grp_count) {
thrust::device_ptr<float_type> source((float_type*)(s3));
thrust::device_ptr<float_type> count_diff = thrust::device_malloc<float_type>(res_size);
ReduceByKeyApply(*ppData, s3, (float_type)0,
mgpu::plus<float_type>(), thrust::raw_pointer_cast(count_diff), *context);
//thrust::reduce_by_key(d_di, d_di + a->mRecCount, source,
// thrust::make_discard_iterator(), count_diff,
// head_flag_predicate<bool>(),thrust::plus<float_type>());
exe_vectors_f.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR F");
}
else {
thrust::device_ptr<float_type> source((float_type*)(s3));
thrust::device_ptr<float_type> count_diff = thrust::device_malloc<float_type>(1);
count_diff[0] = mgpu::Reduce(thrust::raw_pointer_cast(source), a->mRecCount, *context);
exe_vectors_f.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR F");
a->mRecCount = 1;
};
cudaFree(s3);
}
if (s1.compare("VECTOR") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
if (a->grp_count) {
thrust::device_ptr<int_type> source((int_type*)(s3));
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size);
ReduceByKeyApply(*ppData, thrust::raw_pointer_cast(source), (int_type)0,
mgpu::plus<int_type>(), thrust::raw_pointer_cast(count_diff), *context);
exe_vectors.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR");
}
else {
thrust::device_ptr<int_type> source((int_type*)(s3));
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(1);
count_diff[0] = mgpu::Reduce(thrust::raw_pointer_cast(source), a->mRecCount, *context);
exe_vectors.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR");
};
cudaFree(s3);
}
else if (s1.compare("NAME") == 0) {
s1_val = exe_value.top();
exe_value.pop();
if (a->grp_count) {
if(a->type[s1_val] == 0) {
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size);
ReduceByKeyApply(*ppData, thrust::raw_pointer_cast(a->d_columns_int[s1_val].data()), (int_type)0,
mgpu::plus<int_type>(), thrust::raw_pointer_cast(count_diff), *context);
exe_vectors.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR");
}
else if(a->type[s1_val] == 1) {
thrust::device_ptr<float_type> count_diff = thrust::device_malloc<float_type>(res_size);
ReduceByKeyApply(*ppData, thrust::raw_pointer_cast(a->d_columns_float[s1_val].data()), (float_type)0,
mgpu::plus<float_type>(), thrust::raw_pointer_cast(count_diff), *context);
exe_vectors_f.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR F");
}
}
else {
if(a->type[s1_val] == 0) {
thrust::device_ptr<int_type> dest;
int_type cc = mgpu::Reduce(thrust::raw_pointer_cast(a->d_columns_int[s1_val].data()), a->mRecCount, *context);
if (one_line) {
dest = thrust::device_malloc<int_type>(1);
dest[0] = cc;
}
else {
dest = thrust::device_malloc<int_type>(a->mRecCount);
thrust::sequence(dest, dest+(a->mRecCount), cc, (int_type)0);
};
exe_vectors.push(thrust::raw_pointer_cast(dest));
exe_type.push("VECTOR");
}
else if(a->type[s1_val] == 1) {
thrust::device_ptr<float_type> dest;
float_type cc = mgpu::Reduce(thrust::raw_pointer_cast(a->d_columns_float[s1_val].data()), a->mRecCount, *context);
if (one_line) {
dest = thrust::device_malloc<float_type>(1);
dest[0] = cc;
}
else {
dest = thrust::device_malloc<float_type>(a->mRecCount);
thrust::sequence(dest, dest+a->mRecCount, cc, (float_type)0);
};
exe_vectors_f.push(thrust::raw_pointer_cast(dest));
exe_type.push("VECTOR F");
};
};
exe_precision.push(a->decimal_zeroes[s1_val]);
}
}
else if (ss.compare("MIN") == 0) {
grp_type = "MIN";
s1 = exe_type.top();
exe_type.pop();
s1_val = exe_value.top();
exe_value.pop();
if(alloced_mem.empty()) {
alloc_pool(a->maxRecs);
};
thrust::device_ptr<unsigned int> d_di1((unsigned int*)alloced_mem.back());
thrust::copy(d_di, d_di+a->mRecCount,d_di1);
thrust::exclusive_scan(d_di1, d_di1+a->mRecCount, d_di1);
thrust::equal_to<unsigned int> binary_pred;
if(a->type[s1_val] == 0) {
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size);
//ReduceByKeyApply(*ppData, thrust::raw_pointer_cast(a->d_columns_int[s1_val].data()), (int_type)0,
// mgpu::minimum<int_type>(), thrust::raw_pointer_cast(count_diff), *context);
thrust::reduce_by_key(d_di1, d_di1+a->mRecCount, a->d_columns_int[s1_val].begin(),
thrust::make_discard_iterator(), count_diff,
binary_pred, thrust::minimum<int_type>());
exe_vectors.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR");
}
else if(a->type[s1_val] == 1) {
thrust::device_ptr<float_type> count_diff = thrust::device_malloc<float_type>(res_size);
//ReduceByKeyApply(*ppData, thrust::raw_pointer_cast(a->d_columns_float[s1_val].data()), (float_type)0,
// mgpu::minimum<float_type>(), thrust::raw_pointer_cast(count_diff), *context);
thrust::reduce_by_key(d_di1, d_di1+a->mRecCount, a->d_columns_float[s1_val].begin(),
thrust::make_discard_iterator(), count_diff,
binary_pred, thrust::minimum<float_type>());
exe_vectors_f.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR F");
}
exe_precision.push(a->decimal_zeroes[s1_val]);
}
else if (ss.compare("MAX") == 0) {
grp_type = "MAX";
s1 = exe_type.top();
exe_type.pop();
s1_val = exe_value.top();
exe_value.pop();
if(a->type[s1_val] == 0) {
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size);
ReduceByKeyApply(*ppData, thrust::raw_pointer_cast(a->d_columns_int[s1_val].data()), (int_type)0,
mgpu::maximum<int_type>(), thrust::raw_pointer_cast(count_diff), *context);
exe_vectors.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR");
}
else if(a->type[s1_val] == 1) {
thrust::device_ptr<float_type> count_diff = thrust::device_malloc<float_type>(res_size);
ReduceByKeyApply(*ppData, thrust::raw_pointer_cast(a->d_columns_float[s1_val].data()), (float_type)0,
mgpu::maximum<float_type>(), thrust::raw_pointer_cast(count_diff), *context);
//thrust::reduce_by_key(d_di, d_di+(a->mRecCount), a->d_columns_float[s1_val].begin(),
// thrust::make_discard_iterator(), count_diff,
// head_flag_predicate<bool>(), thrust::maximum<float_type>());
exe_vectors_f.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR F");
}
exe_precision.push(a->decimal_zeroes[s1_val]);
}
else if (ss.compare("AVG") == 0) {
grp_type = "AVG";
s1 = exe_type.top();
exe_type.pop();
s1_val = exe_value.top();
exe_value.pop();
if(a->type[s1_val] == 0) {
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size);
ReduceByKeyApply(*ppData, thrust::raw_pointer_cast(a->d_columns_int[s1_val].data()), (int_type)0,
mgpu::plus<int_type>(), thrust::raw_pointer_cast(count_diff), *context);
exe_vectors.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR");
}
else if(a->type[s1_val] == 1) {
thrust::device_ptr<float_type> count_diff = thrust::device_malloc<float_type>(res_size);
ReduceByKeyApply(*ppData, thrust::raw_pointer_cast(a->d_columns_float[s1_val].data()), (float_type)0,
mgpu::plus<float_type>(), thrust::raw_pointer_cast(count_diff), *context);
exe_vectors_f.push(thrust::raw_pointer_cast(count_diff));
exe_type.push("VECTOR F");
}
exe_precision.push(a->decimal_zeroes[s1_val]);
};
};
if (ss.compare("NAME") == 0 || ss.compare("NUMBER") == 0 || ss.compare("FLOAT") == 0 || ss.compare("VECTOR") == 0 || ss.compare("VECTOR F") == 0) {
exe_type.push(ss);
if (ss.compare("NUMBER") == 0) {
exe_nums.push(op_nums.front());
op_nums.pop();
exe_precision.push(op_nums_precision.front());
op_nums_precision.pop();
}
if (ss.compare("FLOAT") == 0) {
exe_nums_f.push(op_nums_f.front());
op_nums_f.pop();
}
else if (ss.compare("NAME") == 0) {
exe_value.push(op_value.front());
ts = a->ts_cols[op_value.front()];
op_value.pop();
}
}
else {
if (ss.compare("MUL") == 0 || ss.compare("ADD") == 0 || ss.compare("DIV") == 0 || ss.compare("MINUS") == 0) {
// get 2 values from the stack
s1 = exe_type.top();
exe_type.pop();
s2 = exe_type.top();
exe_type.pop();
if (s1.compare("NUMBER") == 0 && s2.compare("NUMBER") == 0) {
n1 = exe_nums.top();
exe_nums.pop();
n2 = exe_nums.top();
exe_nums.pop();
auto p1 = exe_precision.top();
exe_precision.pop();
auto p2 = exe_precision.top();
exe_precision.pop();
auto pres = precision_func(p1, p2, ss);
exe_precision.push(pres);
if(p1)
n1 = n1*(unsigned int)pow(10,p1);
if(p2)
n2 = n2*(unsigned int)pow(10,p2);
if (ss.compare("ADD") == 0 )
res = n1+n2;
else if (ss.compare("MUL") == 0 )
res = n1*n2;
else if (ss.compare("DIV") == 0 )
res = n1/n2;
else
res = n1-n2;
thrust::device_ptr<int_type> p = thrust::device_malloc<int_type>(a->mRecCount);
thrust::sequence(p, p+(a->mRecCount),res,(int_type)0);
exe_type.push("VECTOR");
exe_vectors.push(thrust::raw_pointer_cast(p));
}
else if (s1.compare("FLOAT") == 0 && s2.compare("FLOAT") == 0) {
n1_f = exe_nums_f.top();
exe_nums_f.pop();
n2_f = exe_nums_f.top();
exe_nums_f.pop();
if (ss.compare("ADD") == 0 )
res_f = n1_f+n2_f;
else if (ss.compare("MUL") == 0 )
res_f = n1_f*n2_f;
else if (ss.compare("DIV") == 0 )
res_f = n1_f/n2_f;
else
res_f = n1_f-n2_f;
thrust::device_ptr<float_type> p = thrust::device_malloc<float_type>(a->mRecCount);
thrust::sequence(p, p+(a->mRecCount),res_f,(float_type)0);
exe_type.push("VECTOR F");
exe_vectors_f.push(thrust::raw_pointer_cast(p));
}
else if (s1.compare("NAME") == 0 && s2.compare("FLOAT") == 0) {
s1_val = exe_value.top();
exe_value.pop();
n1_f = exe_nums_f.top();
exe_nums_f.pop();
exe_type.push("VECTOR F");
if (a->type[s1_val] == 1) {
float_type* t = a->get_float_type_by_name(s1_val);
exe_vectors_f.push(a->op(t,n1_f,ss,1));
}
else {
int_type* t = a->get_int_by_name(s1_val);
exe_vectors_f.push(a->op(t,n1_f,ss,1));
};
}
else if (s1.compare("FLOAT") == 0 && s2.compare("NAME") == 0) {
n1_f = exe_nums_f.top();
exe_nums_f.pop();
s2_val = exe_value.top();
exe_value.pop();
exe_type.push("VECTOR F");
if (a->type[s2_val] == 1) {
float_type* t = a->get_float_type_by_name(s2_val);
exe_vectors_f.push(a->op(t,n1_f,ss,0));
}
else {
int_type* t = a->get_int_by_name(s2_val);
exe_vectors_f.push(a->op(t,n1_f,ss,0));
};
}
else if (s1.compare("NAME") == 0 && s2.compare("NUMBER") == 0) {
s1_val = exe_value.top();
exe_value.pop();
n1 = exe_nums.top();
exe_nums.pop();
auto p1 = exe_precision.top();
exe_precision.pop();
auto p2 = a->decimal_zeroes[s1_val];
if (a->type[s1_val] == 1) {
float_type* t = a->get_float_type_by_name(s1_val);
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t,(float_type)n1,ss,1));
}
else {
int_type* t = a->get_int_by_name(s1_val);
auto pres = precision_func(p2, p1, ss);
exe_precision.push(pres);
exe_type.push("VECTOR");
exe_vectors.push(a->op(t,n1,ss,1, p2, p1));
};
}
else if (s1.compare("NUMBER") == 0 && s2.compare("NAME") == 0) {
n1 = exe_nums.top();
exe_nums.pop();
s2_val = exe_value.top();
exe_value.pop();
auto p1 = exe_precision.top();
exe_precision.pop();
auto p2 = a->decimal_zeroes[s2_val];
if (a->type[s2_val] == 1) {
float_type* t = a->get_float_type_by_name(s2_val);
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t,(float_type)n1,ss,0));
}
else {
int_type* t = a->get_int_by_name(s2_val);
auto pres = precision_func(p2, p1, ss);
exe_precision.push(pres);
exe_type.push("VECTOR");
exe_vectors.push(a->op(t,n1,ss,0, p2, p1));
};
}
else if (s1.compare("NAME") == 0 && s2.compare("NAME") == 0) {
s1_val = exe_value.top();
exe_value.pop();
s2_val = exe_value.top();
exe_value.pop();
if (a->type[s1_val] == 0) {
int_type* t1 = a->get_int_by_name(s1_val);
if (a->type[s2_val] == 0) {
int_type* t = a->get_int_by_name(s2_val);
auto p1 = a->decimal_zeroes[s1_val];
auto p2 = a->decimal_zeroes[s2_val];
auto pres = precision_func(p1, p2, ss);
exe_precision.push(pres);
exe_type.push("VECTOR");
exe_vectors.push(a->op(t,t1,ss,0,p2,p1));
}
else {
float_type* t = a->get_float_type_by_name(s2_val);
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t1,t,ss,0));
};
}
else {
float_type* t = a->get_float_type_by_name(s1_val);
if (a->type[s2_val] == 0) {
int_type* t1 = a->get_int_by_name(s2_val);
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t1,t,ss,0));
}
else {
float_type* t1 = a->get_float_type_by_name(s2_val);
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t1,t,ss,0));
};
}
}
else if ((s1.compare("VECTOR") == 0 || s1.compare("VECTOR F") == 0 ) && s2.compare("NAME") == 0) {
s2_val = exe_value.top();
exe_value.pop();
if (a->type[s2_val] == 0) {
int_type* t = a->get_int_by_name(s2_val);
if (s1.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
auto p1 = exe_precision.top();
exe_precision.pop();
auto p2 = a->decimal_zeroes[s2_val];
auto pres = precision_func(p1, p2, ss);
exe_precision.push(pres);
exe_vectors.push(a->op(t,s3,ss,0,p2,p1));
alloced_mem.push_back(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t,s3,ss,0));
alloced_mem.push_back(s3);
}
}
else {
float_type* t = a->get_float_type_by_name(s2_val);
if (s1.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3,t, ss,0));
alloced_mem.push_back(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t,s3,ss,0));
alloced_mem.push_back(s3);
}
};
}
else if ((s2.compare("VECTOR") == 0 || s2.compare("VECTOR F") == 0 ) && s1.compare("NAME") == 0) {
s1_val = exe_value.top();
exe_value.pop();
if (a->type[s1_val] == 0) {
int_type* t = a->get_int_by_name(s1_val);
if (s2.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
auto p1 = exe_precision.top();
exe_precision.pop();
auto p2 = a->decimal_zeroes[s1_val];
auto pres = precision_func(p1, p2, ss);
exe_precision.push(pres);
exe_vectors.push(a->op(t,s3,ss,1,p2,p1));
alloced_mem.push_back(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t,s3,ss,1));
alloced_mem.push_back(s3);
}
}
else {
float_type* t = a->get_float_type_by_name(s1_val);
if (s2.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3,t,ss,1));
alloced_mem.push_back(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(t,s3,ss,1));
alloced_mem.push_back(s3);
}
};
}
else if ((s1.compare("VECTOR") == 0 || s1.compare("VECTOR F") == 0) && s2.compare("NUMBER") == 0) {
n1 = exe_nums.top();
exe_nums.pop();
if (s1.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
auto p1 = exe_precision.top();
exe_precision.pop();
auto p2 = exe_precision.top();
exe_precision.pop();
auto pres = precision_func(p1, p2, ss);
exe_precision.push(pres);
exe_vectors.push(a->op(s3,n1, ss,1, p1, p2));
//cudaFree(s3);
alloced_mem.push_back(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3,(float_type)n1, ss,1));
alloced_mem.push_back(s3);
}
}
else if (s1.compare("NUMBER") == 0 && (s2.compare("VECTOR") || s2.compare("VECTOR F") == 0)) {
n1 = exe_nums.top();
exe_nums.pop();
if (s2.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
auto p1 = exe_precision.top();
exe_precision.pop();
auto p2 = exe_precision.top();
exe_precision.pop();
auto pres = precision_func(p2, p1, ss);
exe_precision.push(pres);
exe_vectors.push(a->op(s3,n1, ss,0, p2, p1));
alloced_mem.push_back(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3,(float_type)n1, ss,0));
alloced_mem.push_back(s3);
}
}
else if ((s1.compare("VECTOR") == 0 || s1.compare("VECTOR F") == 0) && s2.compare("FLOAT") == 0) {
n1_f = exe_nums_f.top();
exe_nums_f.pop();
if (s1.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3,n1_f, ss,1));
alloced_mem.push_back(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3,n1_f, ss,1));
alloced_mem.push_back(s3);
}
}
else if (s1.compare("FLOAT") == 0 && (s2.compare("VECTOR") == 0 || s2.compare("VECTOR F") == 0)) {
n1_f = exe_nums_f.top();
exe_nums_f.pop();
if (s2.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3,n1_f, ss,0));
alloced_mem.push_back(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3,n1_f, ss,0));
alloced_mem.push_back(s3);
}
}
else if (s1.compare("VECTOR") == 0 && s2.compare("VECTOR") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
int_type* s4 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
auto p1 = exe_precision.top();
exe_precision.pop();
auto p2 = exe_precision.top();
exe_precision.pop();
auto pres = precision_func(p1, p2, ss);
exe_precision.push(pres);
exe_vectors.push(a->op(s3, s4,ss,0,p1,p2));
alloced_mem.push_back(s3);
alloced_mem.push_back(s4);
}
else if(s1.compare("VECTOR") == 0 && s2.compare("VECTOR F") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
float_type* s4 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3, s4,ss,1));
alloced_mem.push_back(s3);
alloced_mem.push_back(s4);
}
else if(s1.compare("VECTOR F") == 0 && s2.compare("VECTOR") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
float_type* s4 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3, s4,ss,0));
alloced_mem.push_back(s3);
alloced_mem.push_back(s4);
}
else if(s1.compare("VECTOR F") == 0 && s2.compare("VECTOR F") == 0) {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
float_type* s4 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(a->op(s3, s4,ss,1));
alloced_mem.push_back(s3);
alloced_mem.push_back(s4);
}
}
}
} //
else {
// here we need to save what is where
col_val.push(op_value.front());
op_value.pop();
grp_type1.push(grp_type);
if(!exe_nums.empty()) { //number
col_type.push(0);
exe_nums1.push(exe_nums.top());
exe_nums.pop();
exe_precision1.push(exe_precision.top());
exe_precision.pop();
};
if(!exe_value.empty()) { //field name
col_type.push(1);
exe_precision1.push(a->decimal_zeroes[exe_value.top()]);
exe_value1.push(exe_value.top());
exe_ts.push(ts);
exe_value.pop();
};
if(!exe_vectors.empty()) { //vector int
exe_vectors1.push(exe_vectors.top());
exe_vectors.pop();
col_type.push(2);
exe_precision1.push(exe_precision.top());
exe_precision.pop();
};
if(!exe_vectors_f.empty()) { //vector float
exe_vectors1_d.push(exe_vectors_f.top());
exe_vectors_f.pop();
col_type.push(3);
};
colCount++;
};
};
for(unsigned int j=0; j < colCount; j++) {
if ((grp_type1.top()).compare("COUNT") == 0 )
b->grp_type[col_val.top()] = 0;
else if ((grp_type1.top()).compare("AVG") == 0 )
b->grp_type[col_val.top()] = 1;
else if ((grp_type1.top()).compare("SUM") == 0 )
b->grp_type[col_val.top()] = 2;
else if ((grp_type1.top()).compare("NULL") == 0 )
b->grp_type[col_val.top()] = 3;
else if ((grp_type1.top()).compare("MIN") == 0 )
b->grp_type[col_val.top()] = 4;
else if ((grp_type1.top()).compare("MAX") == 0 )
b->grp_type[col_val.top()] = 5;
else if ((grp_type1.top()).compare("COUNTD") == 0 ) {
b->grp_type[col_val.top()] = 6;
};
if(col_type.top() == 0) {
// create a vector
if (a->grp_count) {
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size);
thrust::copy_if(thrust::make_constant_iterator((int)exe_nums1.top()), thrust::make_constant_iterator((int)exe_nums1.top()) + a->mRecCount, d_di, count_diff, thrust::identity<bool>());
b->addDeviceColumn(thrust::raw_pointer_cast(count_diff) , col_val.top(), res_size);
thrust::device_free(count_diff);
}
else {
thrust::device_ptr<int_type> s = thrust::device_malloc<int_type>(a->mRecCount);
thrust::sequence(s, s+(a->mRecCount), (int)exe_nums1.top(), 0);
b->addDeviceColumn(thrust::raw_pointer_cast(s), col_val.top(), a->mRecCount);
}
exe_nums1.pop();
b->decimal_zeroes[col_val.top()] = exe_precision1.top();
exe_precision1.pop();
}
else if(col_type.top() == 1) {
if(a->type[exe_value1.top()] == 0 || a->type[exe_value1.top()] == 2) {
//modify what we push there in case of a grouping
if (a->grp_count) {
thrust::device_ptr<int_type> count_diff = thrust::device_malloc<int_type>(res_size);
//thrust::device_ptr<bool> d_grp(a->grp);
thrust::copy_if(a->d_columns_int[exe_value1.top()].begin(),a->d_columns_int[exe_value1.top()].begin() + a->mRecCount,
d_di, count_diff, thrust::identity<bool>());
b->addDeviceColumn(thrust::raw_pointer_cast(count_diff) , col_val.top(), res_size);
thrust::device_free(count_diff);
}
else
b->addDeviceColumn(thrust::raw_pointer_cast(a->d_columns_int[exe_value1.top()].data()) , col_val.top(), a->mRecCount);
if(a->type[exe_value1.top()] == 0) {
b->decimal_zeroes[col_val.top()] = exe_precision1.top();
b->ts_cols[col_val.top()] = exe_ts.top();
};
if(a->type[exe_value1.top()] == 2 || (a->type[exe_value1.top()] == 0 && a->string_map.find(exe_value1.top()) != a->string_map.end())) {
b->string_map[col_val.top()] = a->string_map[exe_value1.top()];
};
exe_precision1.pop();
exe_ts.pop();
}
else if(a->type[exe_value1.top()] == 1) {
//modify what we push there in case of a grouping
if (a->grp_count) {
thrust::device_ptr<float_type> count_diff = thrust::device_malloc<float_type>(res_size);
//thrust::device_ptr<bool> d_grp(a->grp);
thrust::copy_if(a->d_columns_float[exe_value1.top()].begin(), a->d_columns_float[exe_value1.top()].begin() + a->mRecCount,
d_di, count_diff, thrust::identity<bool>());
b->addDeviceColumn(thrust::raw_pointer_cast(count_diff) , col_val.top(), res_size, a->decimal[exe_value1.top()]);
thrust::device_free(count_diff);
}
else {
b->addDeviceColumn(thrust::raw_pointer_cast(a->d_columns_float[exe_value1.top()].data()), col_val.top(), a->mRecCount, a->decimal[exe_value1.top()]);
};
}
exe_value1.pop();
}
else if(col_type.top() == 2) { // int
if (a->grp_count)
b->addDeviceColumn(exe_vectors1.top() , col_val.top(), res_size);
else {
if(!one_line)
b->addDeviceColumn(exe_vectors1.top() , col_val.top(), a->mRecCount);
else
b->addDeviceColumn(exe_vectors1.top() , col_val.top(), 1);
};
cudaFree(exe_vectors1.top());
exe_vectors1.pop();
b->decimal_zeroes[col_val.top()] = exe_precision1.top();
exe_precision1.pop();
}
else if(col_type.top() == 3) { //float
if (a->grp_count) {
b->addDeviceColumn(exe_vectors1_d.top() , col_val.top(), res_size, 1);
}
else {
if(!one_line) {
b->addDeviceColumn(exe_vectors1_d.top() , col_val.top(), a->mRecCount, 1);
}
else {
b->addDeviceColumn(exe_vectors1_d.top() , col_val.top(), 1, 1);
};
};
cudaFree(exe_vectors1_d.top());
exe_vectors1_d.pop();
};
col_type.pop();
col_val.pop();
grp_type1.pop();
};
if (!a->grp_count) {
if(!one_line)
b->mRecCount = a->mRecCount;
else
b->mRecCount = 1;
one_liner = one_line;
}
else {
b->mRecCount = res_size;
one_liner = 0;
};
}
|
c1f7a66b53f81a2c8703de74387ee578816d6626.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef __cplusplus
extern "C" {
#endif
#include <stdio.h>
#include <math.h>
#include <float.h>
#include "anchor_target_kernel.h"
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
__device__ inline float devIoU(float const * const a, float const * const b, const int cls) {
float left = fmaxf(a[0], b[0]), right = fminf(a[2], b[2]);
float top = fmaxf(a[1], b[1]), bottom = fminf(a[3], b[3]);
float width = fmaxf(right - left + 1, 0.f), height = fmaxf(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
if (cls <= 0){
return interS / Sa;
}
return interS / (Sa + Sb - interS);
}
__device__ inline void bbox_encoding(float const * const a, float const * const b, float* c) {
// a: given box
// b: grouth truth
// c: output deltas
const float bw = b[2] - b[0] + 1.0;
const float bh = b[3] - b[1] + 1.0;
const float bx = b[0] + bw * 0.5;
const float by = b[1] + bh * 0.5;
const float aw = a[2] - a[0] + 1.0;
const float ah = a[3] - a[1] + 1.0;
const float ax = a[0] + aw * 0.5;
const float ay = a[1] + ah * 0.5;
c[0] = (bx - ax) / aw / 0.1;
c[1] = (by - ay) / ah / 0.1;
c[2] = log(bw / aw) / 0.2;
c[3] = log(bh / ah) / 0.2;
}
__global__ void AnchorTargetForward(
const float* anchors, const float* gt_boxes,
const int num_anchors, const int num_gts,
const float bg_overlap, const float fg_overlap, const float ignored_overlap,
long* labels, float* deltas, float* bbwght, float* overlaps) {
CUDA_1D_KERNEL_LOOP(index, num_anchors) {
int n = index * 4;
float best_iou = 0.;
int best_ind = -1;
int match_cls = -1;
for (int j = 0; j < num_gts; j ++){
int m = j * 5;
int cls = int(gt_boxes[m+4]);
float iou = devIoU(anchors + n, gt_boxes + m, cls);
overlaps[index * num_gts + j] = iou;
if (best_iou <= iou)
{
best_iou = iou;
best_ind = j;
match_cls = cls;
}
if (cls <= 0 && iou >= ignored_overlap){
best_iou = iou;
match_cls = cls;
best_ind = j;
break;
}
}
if(match_cls <= 0 && best_iou >= ignored_overlap){
labels[index] = -1;
}else if(match_cls > 0 && best_iou >= fg_overlap){
labels[index] = match_cls;
}else if(best_iou < fg_overlap && best_iou >= bg_overlap){
labels[index] = -1;
}else{
labels[index] = 0;
}
if (best_iou >= fminf(0.5, fg_overlap) && match_cls > 0 && best_ind >= 0){
bbox_encoding(anchors + n, gt_boxes + best_ind * 5, deltas + n);
bbwght[n] = 1.0;
bbwght[n + 1] = 1.0;
bbwght[n + 2] = 1.0;
bbwght[n + 3] = 1.0;
}
}
}
__global__ void AssignBestMatchKernel(
const float* anchors, const float* gt_boxes,
const int num_anchors, const int num_gts,
long* labels, float* deltas, float* bbwght, float* overlaps) {
CUDA_1D_KERNEL_LOOP(index, num_gts) {
int n = index * 5;
float best_iou = 0.;
int best_ind = -1;
int match_cls = gt_boxes[n + 4];
if(match_cls > 0){
for (int j = 0; j < num_anchors; j ++){
if (best_iou < overlaps[j * num_gts + index]){
best_iou = overlaps[j * num_gts + index];
best_ind = j;
}
}
labels[best_ind] = match_cls;
int m = best_ind * 4;
bbox_encoding(anchors + m, gt_boxes + n, deltas + m);
bbwght[m] = 1.0;
bbwght[m+1] = 1.0;
bbwght[m+2] = 1.0;
bbwght[m+3] = 1.0;
}
}
}
int AnchorTargetForwardLaucher(
const float* anchors, const float* gt_boxes,
const int num_anchors, const int num_gts,
const float bg_overlap, const float fg_overlap, const float ignored_overlap,
long* labels, float* deltas, float* bbwght, float* overlaps, hipStream_t stream)
{
const int kThreadsPerBlock = 1024;
hipError_t err;
hipLaunchKernelGGL(( AnchorTargetForward), dim3((num_anchors + kThreadsPerBlock - 1) / kThreadsPerBlock), dim3(kThreadsPerBlock), 0, stream,
anchors, gt_boxes, num_anchors, num_gts,
bg_overlap, fg_overlap, ignored_overlap,
labels, deltas, bbwght, overlaps
);
// AssignBestMatchKernel<<<(num_gts + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>(
// anchors, gt_boxes, num_anchors, num_gts,
// labels, deltas, bbwght, overlaps
// );
err = hipGetLastError();
if(hipSuccess != err) {
fprintf(stderr, "cudaCheckError() failed : %s\n", hipGetErrorString(err));
exit(-1);
}
return 1;
}
#ifdef __cplusplus
}
#endif
| c1f7a66b53f81a2c8703de74387ee578816d6626.cu | #ifdef __cplusplus
extern "C" {
#endif
#include <stdio.h>
#include <math.h>
#include <float.h>
#include "anchor_target_kernel.h"
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
__device__ inline float devIoU(float const * const a, float const * const b, const int cls) {
float left = fmaxf(a[0], b[0]), right = fminf(a[2], b[2]);
float top = fmaxf(a[1], b[1]), bottom = fminf(a[3], b[3]);
float width = fmaxf(right - left + 1, 0.f), height = fmaxf(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
if (cls <= 0){
return interS / Sa;
}
return interS / (Sa + Sb - interS);
}
__device__ inline void bbox_encoding(float const * const a, float const * const b, float* c) {
// a: given box
// b: grouth truth
// c: output deltas
const float bw = b[2] - b[0] + 1.0;
const float bh = b[3] - b[1] + 1.0;
const float bx = b[0] + bw * 0.5;
const float by = b[1] + bh * 0.5;
const float aw = a[2] - a[0] + 1.0;
const float ah = a[3] - a[1] + 1.0;
const float ax = a[0] + aw * 0.5;
const float ay = a[1] + ah * 0.5;
c[0] = (bx - ax) / aw / 0.1;
c[1] = (by - ay) / ah / 0.1;
c[2] = log(bw / aw) / 0.2;
c[3] = log(bh / ah) / 0.2;
}
__global__ void AnchorTargetForward(
const float* anchors, const float* gt_boxes,
const int num_anchors, const int num_gts,
const float bg_overlap, const float fg_overlap, const float ignored_overlap,
long* labels, float* deltas, float* bbwght, float* overlaps) {
CUDA_1D_KERNEL_LOOP(index, num_anchors) {
int n = index * 4;
float best_iou = 0.;
int best_ind = -1;
int match_cls = -1;
for (int j = 0; j < num_gts; j ++){
int m = j * 5;
int cls = int(gt_boxes[m+4]);
float iou = devIoU(anchors + n, gt_boxes + m, cls);
overlaps[index * num_gts + j] = iou;
if (best_iou <= iou)
{
best_iou = iou;
best_ind = j;
match_cls = cls;
}
if (cls <= 0 && iou >= ignored_overlap){
best_iou = iou;
match_cls = cls;
best_ind = j;
break;
}
}
if(match_cls <= 0 && best_iou >= ignored_overlap){
labels[index] = -1;
}else if(match_cls > 0 && best_iou >= fg_overlap){
labels[index] = match_cls;
}else if(best_iou < fg_overlap && best_iou >= bg_overlap){
labels[index] = -1;
}else{
labels[index] = 0;
}
if (best_iou >= fminf(0.5, fg_overlap) && match_cls > 0 && best_ind >= 0){
bbox_encoding(anchors + n, gt_boxes + best_ind * 5, deltas + n);
bbwght[n] = 1.0;
bbwght[n + 1] = 1.0;
bbwght[n + 2] = 1.0;
bbwght[n + 3] = 1.0;
}
}
}
__global__ void AssignBestMatchKernel(
const float* anchors, const float* gt_boxes,
const int num_anchors, const int num_gts,
long* labels, float* deltas, float* bbwght, float* overlaps) {
CUDA_1D_KERNEL_LOOP(index, num_gts) {
int n = index * 5;
float best_iou = 0.;
int best_ind = -1;
int match_cls = gt_boxes[n + 4];
if(match_cls > 0){
for (int j = 0; j < num_anchors; j ++){
if (best_iou < overlaps[j * num_gts + index]){
best_iou = overlaps[j * num_gts + index];
best_ind = j;
}
}
labels[best_ind] = match_cls;
int m = best_ind * 4;
bbox_encoding(anchors + m, gt_boxes + n, deltas + m);
bbwght[m] = 1.0;
bbwght[m+1] = 1.0;
bbwght[m+2] = 1.0;
bbwght[m+3] = 1.0;
}
}
}
int AnchorTargetForwardLaucher(
const float* anchors, const float* gt_boxes,
const int num_anchors, const int num_gts,
const float bg_overlap, const float fg_overlap, const float ignored_overlap,
long* labels, float* deltas, float* bbwght, float* overlaps, cudaStream_t stream)
{
const int kThreadsPerBlock = 1024;
cudaError_t err;
AnchorTargetForward<<<(num_anchors + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>(
anchors, gt_boxes, num_anchors, num_gts,
bg_overlap, fg_overlap, ignored_overlap,
labels, deltas, bbwght, overlaps
);
// AssignBestMatchKernel<<<(num_gts + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>(
// anchors, gt_boxes, num_anchors, num_gts,
// labels, deltas, bbwght, overlaps
// );
err = cudaGetLastError();
if(cudaSuccess != err) {
fprintf(stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
return 1;
}
#ifdef __cplusplus
}
#endif
|
f0d95226826d7447caef39c83749508a051bece0.hip | // !!! This is a file automatically generated by hipify!!!
//nvcc -arch=sm_30 -lcufft fft_batched.cu
#include <hip/hip_runtime.h>
#include <hipfft.h>
#include <stdio.h>
#include <math.h>
#define DATASIZE 8
#define BATCH 3
#define GRID_DIMENSION 3
#define BLOCK_DIMENSION 3
/********************/
/* CUDA ERROR CHECK */
/********************/
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %dn", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void conjugate(long int nelem, hipfftComplex *conj);
/********/
/* MAIN */
/********/
int main ()
{
// --- Host side input data allocation and initialization
hipfftReal *hostInputData = (hipfftReal*)malloc(DATASIZE*BATCH*sizeof(hipfftReal));
int grid_size = GRID_DIMENSION;
int block_size = BLOCK_DIMENSION;
dim3 DimGrid(grid_size, grid_size, grid_size);
dim3 DimBlock(block_size, block_size, block_size);
for (int i=0; i<BATCH; i++)
for (int j=0; j<DATASIZE; j++){
hostInputData[i*DATASIZE + j] = (hipfftReal)((i + 1) + j);
printf("hostInputData[%d]=%f\n",i*DATASIZE + j,hostInputData[i*DATASIZE + j]);
}
// --- Device side input data allocation and initialization
hipfftReal *deviceInputData;
gpuErrchk(hipMalloc((void**)&deviceInputData, DATASIZE * BATCH * sizeof(hipfftReal)));
hipMemcpy(deviceInputData, hostInputData, DATASIZE * BATCH * sizeof(hipfftReal), hipMemcpyHostToDevice);
// --- Host side output data allocation
hipfftComplex *hostOutputData = (hipfftComplex*)malloc((DATASIZE / 2 + 1) * BATCH * sizeof(hipfftComplex));
// --- Device side output data allocation
hipfftComplex *deviceOutputData;
hipfftComplex *fft_conj;
gpuErrchk(hipMalloc((void**)&deviceOutputData, (DATASIZE / 2 + 1) * BATCH * sizeof(hipfftComplex)));
gpuErrchk(hipMalloc((void**)&fft_conj, (DATASIZE / 2 + 1) * BATCH * sizeof(hipfftComplex)));
// --- Batched 1D FFTs
hipfftHandle handle;
int rank = 1; // --- 1D FFTs
int n[] = { DATASIZE }; // --- Size of the Fourier transform
int istride = 1, ostride = 1; // --- Distance between two successive input/output elements
int idist = DATASIZE, odist = (DATASIZE / 2 + 1); // --- Distance between batches
int inembed[] = { 0 }; // --- Input size with pitch (ignored for 1D transforms)
int onembed[] = { 0 }; // --- Output size with pitch (ignored for 1D transforms)
int batch = BATCH; // --- Number of batched executions
printf("idist = %d\n", idist);
printf("odist = %d\n", odist);
printf("n = %d\n", n[0]);
hipfftPlanMany(&handle, rank, n,
inembed, istride, idist,
onembed, ostride, odist, HIPFFT_R2C, batch);
//hipfftPlan1d(&handle, DATASIZE, HIPFFT_R2C, BATCH);
hipfftExecR2C(handle, deviceInputData, deviceOutputData);
gpuErrchk(hipMemcpy(fft_conj, deviceOutputData, (DATASIZE / 2 + 1) * BATCH * sizeof(hipfftComplex), hipMemcpyDeviceToDevice));
hipLaunchKernelGGL(( conjugate) , dim3(DimGrid), dim3(DimBlock) , 0, 0, (DATASIZE / 2 + 1) * BATCH, fft_conj );
// --- Device->Host copy of the results
gpuErrchk(hipMemcpy(hostOutputData, deviceOutputData, (DATASIZE / 2 + 1) * BATCH * sizeof(hipfftComplex), hipMemcpyDeviceToHost));
for (int i=0; i<BATCH; i++)
for (int j=0; j<(DATASIZE / 2 + 1); j++)
printf("Batch = %i j= %i real %f imag %f\n", i, j, hostOutputData[i*(DATASIZE / 2 + 1) + j].x, hostOutputData[i*(DATASIZE / 2 + 1) + j].y);
hipfftDestroy(handle);
gpuErrchk(hipFree(deviceOutputData));
gpuErrchk(hipFree(deviceInputData));
gpuErrchk(hipFree(fft_conj));
hipDeviceSynchronize();
hipDeviceReset();
return EXIT_SUCCESS;
}
__global__ void conjugate(long int nelem, hipfftComplex *conj)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int bz = blockIdx.z;
int thx = threadIdx.x;
int thy = threadIdx.y;
int thz = threadIdx.z;
int NumThread = blockDim.x*blockDim.y*blockDim.z;
int idThread = (thx + thy*blockDim.x) + thz*(blockDim.x*blockDim.y);
int BlockId = (bx + by*gridDim.x) + bz*(gridDim.x*gridDim.y);
int uniqueid = idThread + NumThread*BlockId;
if (uniqueid < nelem){
printf("Unique ID = %d - conj = %f\n", uniqueid, conj[uniqueid].y*-1);
}
//__syncthreads();
}
| f0d95226826d7447caef39c83749508a051bece0.cu | //nvcc -arch=sm_30 -lcufft fft_batched.cu
#include <cuda.h>
#include <cufft.h>
#include <stdio.h>
#include <math.h>
#define DATASIZE 8
#define BATCH 3
#define GRID_DIMENSION 3
#define BLOCK_DIMENSION 3
/********************/
/* CUDA ERROR CHECK */
/********************/
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %dn", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void conjugate(long int nelem, cufftComplex *conj);
/********/
/* MAIN */
/********/
int main ()
{
// --- Host side input data allocation and initialization
cufftReal *hostInputData = (cufftReal*)malloc(DATASIZE*BATCH*sizeof(cufftReal));
int grid_size = GRID_DIMENSION;
int block_size = BLOCK_DIMENSION;
dim3 DimGrid(grid_size, grid_size, grid_size);
dim3 DimBlock(block_size, block_size, block_size);
for (int i=0; i<BATCH; i++)
for (int j=0; j<DATASIZE; j++){
hostInputData[i*DATASIZE + j] = (cufftReal)((i + 1) + j);
printf("hostInputData[%d]=%f\n",i*DATASIZE + j,hostInputData[i*DATASIZE + j]);
}
// --- Device side input data allocation and initialization
cufftReal *deviceInputData;
gpuErrchk(cudaMalloc((void**)&deviceInputData, DATASIZE * BATCH * sizeof(cufftReal)));
cudaMemcpy(deviceInputData, hostInputData, DATASIZE * BATCH * sizeof(cufftReal), cudaMemcpyHostToDevice);
// --- Host side output data allocation
cufftComplex *hostOutputData = (cufftComplex*)malloc((DATASIZE / 2 + 1) * BATCH * sizeof(cufftComplex));
// --- Device side output data allocation
cufftComplex *deviceOutputData;
cufftComplex *fft_conj;
gpuErrchk(cudaMalloc((void**)&deviceOutputData, (DATASIZE / 2 + 1) * BATCH * sizeof(cufftComplex)));
gpuErrchk(cudaMalloc((void**)&fft_conj, (DATASIZE / 2 + 1) * BATCH * sizeof(cufftComplex)));
// --- Batched 1D FFTs
cufftHandle handle;
int rank = 1; // --- 1D FFTs
int n[] = { DATASIZE }; // --- Size of the Fourier transform
int istride = 1, ostride = 1; // --- Distance between two successive input/output elements
int idist = DATASIZE, odist = (DATASIZE / 2 + 1); // --- Distance between batches
int inembed[] = { 0 }; // --- Input size with pitch (ignored for 1D transforms)
int onembed[] = { 0 }; // --- Output size with pitch (ignored for 1D transforms)
int batch = BATCH; // --- Number of batched executions
printf("idist = %d\n", idist);
printf("odist = %d\n", odist);
printf("n = %d\n", n[0]);
cufftPlanMany(&handle, rank, n,
inembed, istride, idist,
onembed, ostride, odist, CUFFT_R2C, batch);
//cufftPlan1d(&handle, DATASIZE, CUFFT_R2C, BATCH);
cufftExecR2C(handle, deviceInputData, deviceOutputData);
gpuErrchk(cudaMemcpy(fft_conj, deviceOutputData, (DATASIZE / 2 + 1) * BATCH * sizeof(cufftComplex), cudaMemcpyDeviceToDevice));
conjugate <<< DimGrid, DimBlock >>> ((DATASIZE / 2 + 1) * BATCH, fft_conj );
// --- Device->Host copy of the results
gpuErrchk(cudaMemcpy(hostOutputData, deviceOutputData, (DATASIZE / 2 + 1) * BATCH * sizeof(cufftComplex), cudaMemcpyDeviceToHost));
for (int i=0; i<BATCH; i++)
for (int j=0; j<(DATASIZE / 2 + 1); j++)
printf("Batch = %i j= %i real %f imag %f\n", i, j, hostOutputData[i*(DATASIZE / 2 + 1) + j].x, hostOutputData[i*(DATASIZE / 2 + 1) + j].y);
cufftDestroy(handle);
gpuErrchk(cudaFree(deviceOutputData));
gpuErrchk(cudaFree(deviceInputData));
gpuErrchk(cudaFree(fft_conj));
cudaDeviceSynchronize();
cudaDeviceReset();
return EXIT_SUCCESS;
}
__global__ void conjugate(long int nelem, cufftComplex *conj)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int bz = blockIdx.z;
int thx = threadIdx.x;
int thy = threadIdx.y;
int thz = threadIdx.z;
int NumThread = blockDim.x*blockDim.y*blockDim.z;
int idThread = (thx + thy*blockDim.x) + thz*(blockDim.x*blockDim.y);
int BlockId = (bx + by*gridDim.x) + bz*(gridDim.x*gridDim.y);
int uniqueid = idThread + NumThread*BlockId;
if (uniqueid < nelem){
printf("Unique ID = %d - conj = %f\n", uniqueid, conj[uniqueid].y*-1);
}
//__syncthreads();
}
|
d429c5cfd198edff652491ac9f505e5b050c320e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/cum_kernel.h"
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/reverse.h>
#include <thrust/scan.h>
#ifdef __NVCC__
#include <hipcub/hipcub.hpp>
#endif
#ifdef __HIPCC__
#include <hipcub/hipcub.hpp>
namespace cub = hipcub;
#endif
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/amp_type_traits.h"
#include "paddle/phi/common/bfloat16.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/core/hostdevice.h"
#include "paddle/phi/core/kernel_registry.h"
namespace phi {
template <typename T, int BLOCK_SIZE>
__device__ void BlockReverse(
const T* idata, T* odata, int src_base, int dst_base, int valid_item) {
__shared__ T sh_mem[BLOCK_SIZE];
int tx = threadIdx.x;
int offset = tx;
T src_data = static_cast<T>(0);
int src_offset = BLOCK_SIZE - offset - 1;
if (src_offset < valid_item) {
src_data = idata[src_base + src_offset];
}
sh_mem[offset] = src_data;
__syncthreads();
int out_index = dst_base - offset;
if (offset < valid_item) {
int sh_mem_index = BLOCK_SIZE - offset - 1;
odata[out_index] = sh_mem[sh_mem_index];
}
}
template <typename T>
__global__ void MatrixRowReverse(const T* matrix_data,
T* reverse_data,
int reverse_size,
int outer_size,
int inner_size) {
int bx = blockIdx.x;
int by = blockIdx.y;
int item_per_block = 1024;
for (int block_offset = 0; block_offset < reverse_size;
block_offset += item_per_block) {
int valid_item = (reverse_size - block_offset > item_per_block)
? item_per_block
: reverse_size - block_offset;
int src_offset =
bx * reverse_size + block_offset + by * (inner_size * reverse_size);
int dst_offset = bx * reverse_size + by * (inner_size * reverse_size) +
reverse_size - 1 - block_offset;
if (reverse_size < item_per_block) {
valid_item = reverse_size;
}
BlockReverse<T, 1024>(
matrix_data, reverse_data, src_offset, dst_offset, valid_item);
}
}
template <typename T, typename Op>
struct BlockPrefixCallbackOp {
// Running prefix
T running_total_;
Op op_;
__device__ BlockPrefixCallbackOp(T running_total, Op op)
: running_total_(running_total), op_(op) {}
// Callback operator to be entered by the first warp of threads in the block.
// tid 0 is responsible for returning a value for seeding the block-wide scan.
__device__ T operator()(T block_aggregate) {
T old_prefix = running_total_;
running_total_ = op_(old_prefix, block_aggregate);
return old_prefix;
}
};
// No bank-conflict transpose
template <typename T, int TILE_DIM, int BLOCK_ROWS>
__global__ void MatrixTranspose(T* odata,
const T* idata,
size_t height,
size_t width) {
__shared__ T tile[TILE_DIM][TILE_DIM + 1];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) {
if (x < width && (y + j) < height) {
tile[threadIdx.y + j][threadIdx.x] = idata[(y + j) * width + x];
} else {
tile[threadIdx.y + j][threadIdx.x] = 0;
}
}
__syncthreads();
x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset
y = blockIdx.x * TILE_DIM + threadIdx.y;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) {
if (x < height && (y + j) < width) {
odata[(y + j) * height + x] = tile[threadIdx.x][threadIdx.y + j];
}
}
}
struct LogAddExp {
template <typename T>
__host__ __device__ __forceinline__ T operator()(const T& a,
const T& b) const {
return ::log(1 + ::exp(::min(a, b) - ::max(a, b))) +
::max(a, b);
}
};
template <typename T, typename op>
struct Identity;
template <typename T>
struct Identity<T, hipcub::Sum> {
static constexpr T value = 0;
};
template <typename T>
struct Identity<T, LogAddExp> {
static constexpr T value = std::numeric_limits<T>::lowest();
};
template <typename T, int BLOCK_THREADS, int ITEMS_PER_THREAD, typename Op>
__global__ void BlockScanKernel(T* d_out,
const T* d_in,
int inner_size,
int outer_size,
int scan_size,
bool exclusive,
Op op) {
using MT = typename phi::dtype::MPTypeTrait<T>::Type;
// Specialize BlockLoad, BlockStore, and BlockRadixSort collective types
typedef hipcub::
BlockLoad<MT, BLOCK_THREADS, ITEMS_PER_THREAD, cub::BLOCK_LOAD_TRANSPOSE>
BlockLoadT;
typedef cub::BlockStore<MT,
BLOCK_THREADS,
ITEMS_PER_THREAD,
cub::BLOCK_STORE_TRANSPOSE>
BlockStoreT;
typedef hipcub::BlockScan<MT, BLOCK_THREADS> BlockScanT;
// Allocate type-safe, repurposable shared memory for collectives
__shared__ union {
typename BlockLoadT::TempStorage load;
typename BlockStoreT::TempStorage store;
typename BlockScanT::TempStorage scan;
} temp_storage;
int bx = blockIdx.x;
BlockPrefixCallbackOp<MT, Op> prefix_op(Identity<MT, Op>::value, op);
// Obtain this block's segment of consecutive keys (blocked across threads)
int item_per_block = BLOCK_THREADS * ITEMS_PER_THREAD;
for (int block_offset = 0; block_offset < scan_size;
block_offset += BLOCK_THREADS * ITEMS_PER_THREAD) {
int valid_item = (scan_size - block_offset > item_per_block)
? item_per_block
: (scan_size - block_offset);
if (scan_size < item_per_block) {
valid_item = scan_size;
}
int offset = block_offset + bx * scan_size;
MT thread_keys[ITEMS_PER_THREAD];
BlockLoadT(temp_storage.load)
.Load(d_in + offset, thread_keys, valid_item, 0);
__syncthreads();
if (exclusive) {
BlockScanT(temp_storage.scan)
.ExclusiveScan(thread_keys, thread_keys, op, prefix_op);
} else {
BlockScanT(temp_storage.scan)
.InclusiveScan(thread_keys, thread_keys, op, prefix_op);
}
__syncthreads();
BlockStoreT(temp_storage.store)
.Store(d_out + offset, thread_keys, valid_item);
}
}
template <typename Context, typename T>
typename std::enable_if<!std::is_same<T, phi::dtype::float16>::value &&
!std::is_same<T, phi::dtype::bfloat16>::value>::type
ThrustCumsumKernel(const Context& dev_ctx,
const T* in_data,
T* out_data,
int64_t size,
bool reverse,
bool exclusive) {
#ifdef __HIPCC__
const auto& policy = thrust::hip::par.on(dev_ctx.stream());
#else
const auto& policy = thrust::hip::par.on(dev_ctx.stream());
#endif
if (reverse) {
thrust::reverse_iterator<thrust::device_ptr<const T>> reversed_in(
thrust::device_pointer_cast(in_data) + size);
thrust::reverse_iterator<thrust::device_ptr<T>> reversed_out(
thrust::device_pointer_cast(out_data) + size);
if (exclusive) {
thrust::exclusive_scan(
policy, reversed_in, reversed_in + size, reversed_out);
} else {
thrust::inclusive_scan(
policy, reversed_in, reversed_in + size, reversed_out);
}
} else {
if (exclusive) {
thrust::exclusive_scan(policy, in_data, in_data + size, out_data);
} else {
thrust::inclusive_scan(policy, in_data, in_data + size, out_data);
}
}
return;
}
template <typename Context, typename T>
typename std::enable_if<std::is_same<T, phi::dtype::float16>::value>::type
ThrustCumsumKernel(const Context& dev_ctx,
const phi::dtype::float16* in_data,
phi::dtype::float16* out_data,
int64_t size,
bool reverse,
bool exclusive) {}
template <typename Context, typename T>
typename std::enable_if<std::is_same<T, phi::dtype::bfloat16>::value>::type
ThrustCumsumKernel(const Context& dev_ctx,
const phi::dtype::bfloat16* in_data,
phi::dtype::bfloat16* out_data,
int64_t size,
bool reverse,
bool exclusive) {}
template <typename T, typename Context, typename Op>
void ScanKernel(const Context& dev_ctx,
const DenseTensor& x,
int axis,
bool flatten,
bool exclusive,
bool reverse,
Op op,
DenseTensor* out) {
T* out_data = dev_ctx.template Alloc<T>(out);
// For 0D Tensor
if (out->numel() == 1) {
auto raw_dims = out->dims();
phi::Copy<Context>(dev_ctx, x, dev_ctx.GetPlace(), false, out);
out->Resize(raw_dims);
return;
}
auto out_dims = out->dims();
auto size = x.numel();
PADDLE_ENFORCE_EQ(
axis < out_dims.size() && axis >= (0 - out_dims.size()),
true,
phi::errors::OutOfRange(
"Attr(axis) is out of range, It's expected "
"to be in range of [-%d, %d]. But received Attr(axis) = %d.",
out_dims.size(),
out_dims.size() - 1,
axis));
if (axis < 0) {
axis += out_dims.size();
}
const T* in_data = x.data<T>();
// Use thrust for parallel acceleration when the input size is equal to the
// length of the axis dimension.
if (!std::is_same<T, phi::dtype::float16>::value &&
!std::is_same<T, phi::dtype::bfloat16>::value &&
std::is_same<Op, hipcub::Sum>::value && size == out_dims[axis]) {
ThrustCumsumKernel<Context, T>(
dev_ctx, in_data, out_data, size, reverse, exclusive);
return;
}
size_t height = 1;
size_t width = 1;
for (size_t i = 0; i <= axis; i++) {
height *= out_dims[i];
}
for (size_t i = axis + 1; i < out_dims.size(); i++) {
width *= out_dims[i];
}
int scan_size = out_dims[axis];
bool transpose = (axis != out_dims.size() - 1);
int tile_size = 32;
dim3 blocks(32, 8);
dim3 transpose_grids((width + tile_size - 1) / tile_size,
(height + tile_size - 1) / tile_size);
DenseTensor tmp_tensor;
tmp_tensor.Resize(out_dims);
auto* tmp_data = dev_ctx.template Alloc<T>(&tmp_tensor);
T* next_in_data = out_data;
T* next_out_data = tmp_data;
if (transpose) {
hipLaunchKernelGGL(( MatrixTranspose<T, 32, 8>), dim3(transpose_grids), dim3(blocks), 0, dev_ctx.stream(),
out_data, in_data, height, width);
next_in_data = out_data;
next_out_data = tmp_data;
}
auto swap_ptr = [](T*& ptr1, T*& ptr2) {
T* tmp = ptr2;
ptr2 = ptr1;
ptr1 = tmp;
};
int outer_size = height / scan_size;
int inner_size = width;
// Consider the size of shared memory, here block size is 128
dim3 scan_grid(outer_size, inner_size);
dim3 reverse_grid = scan_grid;
if (reverse) {
if (transpose) {
reverse_grid.x = scan_grid.y;
reverse_grid.y = scan_grid.x;
hipLaunchKernelGGL(( MatrixRowReverse<T>), dim3(reverse_grid), dim3(1024), 0, dev_ctx.stream(),
next_in_data, next_out_data, scan_size, outer_size, inner_size);
if (!transpose) next_in_data = tmp_data;
swap_ptr(next_in_data, next_out_data);
} else {
hipLaunchKernelGGL(( MatrixRowReverse<T>), dim3(reverse_grid), dim3(1024), 0, dev_ctx.stream(),
in_data, out_data, scan_size, outer_size, inner_size);
}
}
int64_t grid_size = outer_size * inner_size;
if (!transpose && !reverse) {
hipLaunchKernelGGL(( BlockScanKernel<T, 128, 4, Op>), dim3(grid_size), dim3(128), 0, dev_ctx.stream(),
out_data, in_data, outer_size, inner_size, scan_size, exclusive, op);
} else {
hipLaunchKernelGGL(( BlockScanKernel<T, 128, 4, Op>)
, dim3(grid_size), dim3(128), 0, dev_ctx.stream(), next_out_data,
next_in_data,
outer_size,
inner_size,
scan_size,
exclusive,
op);
}
swap_ptr(next_in_data, next_out_data);
if (reverse) {
hipLaunchKernelGGL(( MatrixRowReverse<T>), dim3(reverse_grid), dim3(1024), 0, dev_ctx.stream(),
next_in_data, next_out_data, scan_size, outer_size, inner_size);
swap_ptr(next_in_data, next_out_data);
}
if (transpose) {
transpose_grids.x = (height + tile_size - 1) / tile_size;
transpose_grids.y = (width + tile_size - 1) / tile_size;
hipLaunchKernelGGL(( MatrixTranspose<T, 32, 8>), dim3(transpose_grids), dim3(blocks), 0, dev_ctx.stream(),
next_out_data, next_in_data, width, height);
}
}
template <typename T, typename Context>
void CumsumKernel(const Context& dev_ctx,
const DenseTensor& x,
const Scalar& axis,
bool flatten,
bool exclusive,
bool reverse,
DenseTensor* out) {
using Op = hipcub::Sum;
auto op = Op();
ScanKernel<T, Context, Op>(
dev_ctx, x, axis.to<int>(), flatten, exclusive, reverse, op, out);
}
template <typename T, typename Context>
void LogcumsumexpKernel(const Context& dev_ctx,
const DenseTensor& x,
int axis,
bool flatten,
bool exclusive,
bool reverse,
DenseTensor* out) {
using Op = LogAddExp;
auto op = Op();
ScanKernel<T, Context, Op>(
dev_ctx, x, axis, flatten, exclusive, reverse, op, out);
}
} // namespace phi
#ifdef PADDLE_WITH_HIP
PD_REGISTER_KERNEL(cumsum,
GPU,
ALL_LAYOUT,
phi::CumsumKernel,
float,
phi::dtype::float16,
double,
int16_t,
int,
int64_t) {}
PD_REGISTER_KERNEL(
logcumsumexp, GPU, ALL_LAYOUT, phi::LogcumsumexpKernel, float, double) {}
#else
PD_REGISTER_KERNEL(cumsum,
GPU,
ALL_LAYOUT,
phi::CumsumKernel,
float,
double,
int16_t,
int,
int64_t,
phi::dtype::float16,
phi::dtype::bfloat16) {}
PD_REGISTER_KERNEL(logcumsumexp,
GPU,
ALL_LAYOUT,
phi::LogcumsumexpKernel,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16) {}
#endif
| d429c5cfd198edff652491ac9f505e5b050c320e.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/cum_kernel.h"
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/reverse.h>
#include <thrust/scan.h>
#ifdef __NVCC__
#include <cub/cub.cuh>
#endif
#ifdef __HIPCC__
#include <hipcub/hipcub.hpp>
namespace cub = hipcub;
#endif
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/amp_type_traits.h"
#include "paddle/phi/common/bfloat16.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/core/hostdevice.h"
#include "paddle/phi/core/kernel_registry.h"
namespace phi {
template <typename T, int BLOCK_SIZE>
__device__ void BlockReverse(
const T* idata, T* odata, int src_base, int dst_base, int valid_item) {
__shared__ T sh_mem[BLOCK_SIZE];
int tx = threadIdx.x;
int offset = tx;
T src_data = static_cast<T>(0);
int src_offset = BLOCK_SIZE - offset - 1;
if (src_offset < valid_item) {
src_data = idata[src_base + src_offset];
}
sh_mem[offset] = src_data;
__syncthreads();
int out_index = dst_base - offset;
if (offset < valid_item) {
int sh_mem_index = BLOCK_SIZE - offset - 1;
odata[out_index] = sh_mem[sh_mem_index];
}
}
template <typename T>
__global__ void MatrixRowReverse(const T* matrix_data,
T* reverse_data,
int reverse_size,
int outer_size,
int inner_size) {
int bx = blockIdx.x;
int by = blockIdx.y;
int item_per_block = 1024;
for (int block_offset = 0; block_offset < reverse_size;
block_offset += item_per_block) {
int valid_item = (reverse_size - block_offset > item_per_block)
? item_per_block
: reverse_size - block_offset;
int src_offset =
bx * reverse_size + block_offset + by * (inner_size * reverse_size);
int dst_offset = bx * reverse_size + by * (inner_size * reverse_size) +
reverse_size - 1 - block_offset;
if (reverse_size < item_per_block) {
valid_item = reverse_size;
}
BlockReverse<T, 1024>(
matrix_data, reverse_data, src_offset, dst_offset, valid_item);
}
}
template <typename T, typename Op>
struct BlockPrefixCallbackOp {
// Running prefix
T running_total_;
Op op_;
__device__ BlockPrefixCallbackOp(T running_total, Op op)
: running_total_(running_total), op_(op) {}
// Callback operator to be entered by the first warp of threads in the block.
// tid 0 is responsible for returning a value for seeding the block-wide scan.
__device__ T operator()(T block_aggregate) {
T old_prefix = running_total_;
running_total_ = op_(old_prefix, block_aggregate);
return old_prefix;
}
};
// No bank-conflict transpose
template <typename T, int TILE_DIM, int BLOCK_ROWS>
__global__ void MatrixTranspose(T* odata,
const T* idata,
size_t height,
size_t width) {
__shared__ T tile[TILE_DIM][TILE_DIM + 1];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) {
if (x < width && (y + j) < height) {
tile[threadIdx.y + j][threadIdx.x] = idata[(y + j) * width + x];
} else {
tile[threadIdx.y + j][threadIdx.x] = 0;
}
}
__syncthreads();
x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset
y = blockIdx.x * TILE_DIM + threadIdx.y;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) {
if (x < height && (y + j) < width) {
odata[(y + j) * height + x] = tile[threadIdx.x][threadIdx.y + j];
}
}
}
struct LogAddExp {
template <typename T>
__host__ __device__ __forceinline__ T operator()(const T& a,
const T& b) const {
return std::log(1 + std::exp(std::min(a, b) - std::max(a, b))) +
std::max(a, b);
}
};
template <typename T, typename op>
struct Identity;
template <typename T>
struct Identity<T, cub::Sum> {
static constexpr T value = 0;
};
template <typename T>
struct Identity<T, LogAddExp> {
static constexpr T value = std::numeric_limits<T>::lowest();
};
template <typename T, int BLOCK_THREADS, int ITEMS_PER_THREAD, typename Op>
__global__ void BlockScanKernel(T* d_out,
const T* d_in,
int inner_size,
int outer_size,
int scan_size,
bool exclusive,
Op op) {
using MT = typename phi::dtype::MPTypeTrait<T>::Type;
// Specialize BlockLoad, BlockStore, and BlockRadixSort collective types
typedef cub::
BlockLoad<MT, BLOCK_THREADS, ITEMS_PER_THREAD, cub::BLOCK_LOAD_TRANSPOSE>
BlockLoadT;
typedef cub::BlockStore<MT,
BLOCK_THREADS,
ITEMS_PER_THREAD,
cub::BLOCK_STORE_TRANSPOSE>
BlockStoreT;
typedef cub::BlockScan<MT, BLOCK_THREADS> BlockScanT;
// Allocate type-safe, repurposable shared memory for collectives
__shared__ union {
typename BlockLoadT::TempStorage load;
typename BlockStoreT::TempStorage store;
typename BlockScanT::TempStorage scan;
} temp_storage;
int bx = blockIdx.x;
BlockPrefixCallbackOp<MT, Op> prefix_op(Identity<MT, Op>::value, op);
// Obtain this block's segment of consecutive keys (blocked across threads)
int item_per_block = BLOCK_THREADS * ITEMS_PER_THREAD;
for (int block_offset = 0; block_offset < scan_size;
block_offset += BLOCK_THREADS * ITEMS_PER_THREAD) {
int valid_item = (scan_size - block_offset > item_per_block)
? item_per_block
: (scan_size - block_offset);
if (scan_size < item_per_block) {
valid_item = scan_size;
}
int offset = block_offset + bx * scan_size;
MT thread_keys[ITEMS_PER_THREAD];
BlockLoadT(temp_storage.load)
.Load(d_in + offset, thread_keys, valid_item, 0);
__syncthreads();
if (exclusive) {
BlockScanT(temp_storage.scan)
.ExclusiveScan(thread_keys, thread_keys, op, prefix_op);
} else {
BlockScanT(temp_storage.scan)
.InclusiveScan(thread_keys, thread_keys, op, prefix_op);
}
__syncthreads();
BlockStoreT(temp_storage.store)
.Store(d_out + offset, thread_keys, valid_item);
}
}
template <typename Context, typename T>
typename std::enable_if<!std::is_same<T, phi::dtype::float16>::value &&
!std::is_same<T, phi::dtype::bfloat16>::value>::type
ThrustCumsumKernel(const Context& dev_ctx,
const T* in_data,
T* out_data,
int64_t size,
bool reverse,
bool exclusive) {
#ifdef __HIPCC__
const auto& policy = thrust::hip::par.on(dev_ctx.stream());
#else
const auto& policy = thrust::cuda::par.on(dev_ctx.stream());
#endif
if (reverse) {
thrust::reverse_iterator<thrust::device_ptr<const T>> reversed_in(
thrust::device_pointer_cast(in_data) + size);
thrust::reverse_iterator<thrust::device_ptr<T>> reversed_out(
thrust::device_pointer_cast(out_data) + size);
if (exclusive) {
thrust::exclusive_scan(
policy, reversed_in, reversed_in + size, reversed_out);
} else {
thrust::inclusive_scan(
policy, reversed_in, reversed_in + size, reversed_out);
}
} else {
if (exclusive) {
thrust::exclusive_scan(policy, in_data, in_data + size, out_data);
} else {
thrust::inclusive_scan(policy, in_data, in_data + size, out_data);
}
}
return;
}
template <typename Context, typename T>
typename std::enable_if<std::is_same<T, phi::dtype::float16>::value>::type
ThrustCumsumKernel(const Context& dev_ctx,
const phi::dtype::float16* in_data,
phi::dtype::float16* out_data,
int64_t size,
bool reverse,
bool exclusive) {}
template <typename Context, typename T>
typename std::enable_if<std::is_same<T, phi::dtype::bfloat16>::value>::type
ThrustCumsumKernel(const Context& dev_ctx,
const phi::dtype::bfloat16* in_data,
phi::dtype::bfloat16* out_data,
int64_t size,
bool reverse,
bool exclusive) {}
template <typename T, typename Context, typename Op>
void ScanKernel(const Context& dev_ctx,
const DenseTensor& x,
int axis,
bool flatten,
bool exclusive,
bool reverse,
Op op,
DenseTensor* out) {
T* out_data = dev_ctx.template Alloc<T>(out);
// For 0D Tensor
if (out->numel() == 1) {
auto raw_dims = out->dims();
phi::Copy<Context>(dev_ctx, x, dev_ctx.GetPlace(), false, out);
out->Resize(raw_dims);
return;
}
auto out_dims = out->dims();
auto size = x.numel();
PADDLE_ENFORCE_EQ(
axis < out_dims.size() && axis >= (0 - out_dims.size()),
true,
phi::errors::OutOfRange(
"Attr(axis) is out of range, It's expected "
"to be in range of [-%d, %d]. But received Attr(axis) = %d.",
out_dims.size(),
out_dims.size() - 1,
axis));
if (axis < 0) {
axis += out_dims.size();
}
const T* in_data = x.data<T>();
// Use thrust for parallel acceleration when the input size is equal to the
// length of the ‘axis’ dimension.
if (!std::is_same<T, phi::dtype::float16>::value &&
!std::is_same<T, phi::dtype::bfloat16>::value &&
std::is_same<Op, cub::Sum>::value && size == out_dims[axis]) {
ThrustCumsumKernel<Context, T>(
dev_ctx, in_data, out_data, size, reverse, exclusive);
return;
}
size_t height = 1;
size_t width = 1;
for (size_t i = 0; i <= axis; i++) {
height *= out_dims[i];
}
for (size_t i = axis + 1; i < out_dims.size(); i++) {
width *= out_dims[i];
}
int scan_size = out_dims[axis];
bool transpose = (axis != out_dims.size() - 1);
int tile_size = 32;
dim3 blocks(32, 8);
dim3 transpose_grids((width + tile_size - 1) / tile_size,
(height + tile_size - 1) / tile_size);
DenseTensor tmp_tensor;
tmp_tensor.Resize(out_dims);
auto* tmp_data = dev_ctx.template Alloc<T>(&tmp_tensor);
T* next_in_data = out_data;
T* next_out_data = tmp_data;
if (transpose) {
MatrixTranspose<T, 32, 8><<<transpose_grids, blocks, 0, dev_ctx.stream()>>>(
out_data, in_data, height, width);
next_in_data = out_data;
next_out_data = tmp_data;
}
auto swap_ptr = [](T*& ptr1, T*& ptr2) {
T* tmp = ptr2;
ptr2 = ptr1;
ptr1 = tmp;
};
int outer_size = height / scan_size;
int inner_size = width;
// Consider the size of shared memory, here block size is 128
dim3 scan_grid(outer_size, inner_size);
dim3 reverse_grid = scan_grid;
if (reverse) {
if (transpose) {
reverse_grid.x = scan_grid.y;
reverse_grid.y = scan_grid.x;
MatrixRowReverse<T><<<reverse_grid, 1024, 0, dev_ctx.stream()>>>(
next_in_data, next_out_data, scan_size, outer_size, inner_size);
if (!transpose) next_in_data = tmp_data;
swap_ptr(next_in_data, next_out_data);
} else {
MatrixRowReverse<T><<<reverse_grid, 1024, 0, dev_ctx.stream()>>>(
in_data, out_data, scan_size, outer_size, inner_size);
}
}
int64_t grid_size = outer_size * inner_size;
if (!transpose && !reverse) {
BlockScanKernel<T, 128, 4, Op><<<grid_size, 128, 0, dev_ctx.stream()>>>(
out_data, in_data, outer_size, inner_size, scan_size, exclusive, op);
} else {
BlockScanKernel<T, 128, 4, Op>
<<<grid_size, 128, 0, dev_ctx.stream()>>>(next_out_data,
next_in_data,
outer_size,
inner_size,
scan_size,
exclusive,
op);
}
swap_ptr(next_in_data, next_out_data);
if (reverse) {
MatrixRowReverse<T><<<reverse_grid, 1024, 0, dev_ctx.stream()>>>(
next_in_data, next_out_data, scan_size, outer_size, inner_size);
swap_ptr(next_in_data, next_out_data);
}
if (transpose) {
transpose_grids.x = (height + tile_size - 1) / tile_size;
transpose_grids.y = (width + tile_size - 1) / tile_size;
MatrixTranspose<T, 32, 8><<<transpose_grids, blocks, 0, dev_ctx.stream()>>>(
next_out_data, next_in_data, width, height);
}
}
template <typename T, typename Context>
void CumsumKernel(const Context& dev_ctx,
const DenseTensor& x,
const Scalar& axis,
bool flatten,
bool exclusive,
bool reverse,
DenseTensor* out) {
using Op = cub::Sum;
auto op = Op();
ScanKernel<T, Context, Op>(
dev_ctx, x, axis.to<int>(), flatten, exclusive, reverse, op, out);
}
template <typename T, typename Context>
void LogcumsumexpKernel(const Context& dev_ctx,
const DenseTensor& x,
int axis,
bool flatten,
bool exclusive,
bool reverse,
DenseTensor* out) {
using Op = LogAddExp;
auto op = Op();
ScanKernel<T, Context, Op>(
dev_ctx, x, axis, flatten, exclusive, reverse, op, out);
}
} // namespace phi
#ifdef PADDLE_WITH_HIP
PD_REGISTER_KERNEL(cumsum,
GPU,
ALL_LAYOUT,
phi::CumsumKernel,
float,
phi::dtype::float16,
double,
int16_t,
int,
int64_t) {}
PD_REGISTER_KERNEL(
logcumsumexp, GPU, ALL_LAYOUT, phi::LogcumsumexpKernel, float, double) {}
#else
PD_REGISTER_KERNEL(cumsum,
GPU,
ALL_LAYOUT,
phi::CumsumKernel,
float,
double,
int16_t,
int,
int64_t,
phi::dtype::float16,
phi::dtype::bfloat16) {}
PD_REGISTER_KERNEL(logcumsumexp,
GPU,
ALL_LAYOUT,
phi::LogcumsumexpKernel,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16) {}
#endif
|
49ad9075aed88603308bceeef4650ebb6b9b7a4c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "rd_kernel.h"
#include "uint_util.hcu"
#include <stdlib.h>
#include <stdio.h>
/*
* Utility function to initialize U and V
*/
__host__
void initializeConcentrations(unsigned int width, unsigned int height, float *U, float *V) {
float *_U = new float[width*height];
float *_V = new float[width*height];
int k = 0;
int i, j;
for (i = 0; i < width * height; ++i) {
_U[k] = 1.0f;
_V[k++] = 0.0f;
}
for (i = (0.48f)*height; i < (0.52f)*height; ++i) {
for (j = (0.48f)*width; j < (0.52f)*width; ++j) {
_U[ (i * width + j) ] = 0.5f;
_V[ (i * width + j) ] = 0.25f;
}
}
// Now perturb the entire grid. Bound the values by [0,1]
for (k = 0; k < width * height; ++k) {
if ( _U[k] < 1.0f ) {
float rRand = 0.02f*(float)rand() / RAND_MAX - 0.01f;
_U[k] += rRand * _U[k];
}
if ( _V[k] < 1.0f ) {
float rRand = 0.02f*(float)rand() / RAND_MAX - 0.01f;
_V[k] += rRand * _V[k];
}
}
// Upload initial state U and V to the GPU
hipMemcpy( U, _U, width*height*sizeof(float), hipMemcpyHostToDevice );
hipMemcpy( V, _V, width*height*sizeof(float), hipMemcpyHostToDevice );
delete[] _U;
delete[] _V;
}
__device__ void calculateLaplacian(float dx, float* array, const uint2& coordinate, float value, unsigned int width, unsigned int height, float& result) {
unsigned int up = ((coordinate.y-1 + height) % height)*width + coordinate.x;
unsigned int down = ((coordinate.y+1) % height) * width + coordinate.x;
unsigned int right = (coordinate.y)*width + (coordinate.x + 1) % width;
unsigned int left = (coordinate.y)*width + (coordinate.x - 1 + width) % width;
result = (array[right] + array[left] + array[up] + array[down] - 4*value)/(dx*dx);
}
/*
* Kernel for the reaction-diffusion model
* This kernel is responsible for updating 'U' and 'V'
*/
__global__
void rd_kernel(unsigned int width, unsigned int height,
float dt, float dx, float Du, float Dv,
float F, float k, float *U, float *V) {
// Coordinate of the current pixel (for this thread)
const uint2 co = make_uint2( blockIdx.x*blockDim.x + threadIdx.x,
blockIdx.y*blockDim.y + threadIdx.y );
// Linear index of the curernt pixel
const unsigned int idx = co.y*width + co.x;
//
// THIS IS WHERE YOU NEED TO IMPLEMENT THE REACTION-DIFFUSION KERNEL
//
// dU/dt = Du * lap U - U*V^2 + F * (1 - U)
// dV/dt = Dv * lap V + U*V^2 - (F + k) * V
float laplace;
float vU = U[idx], vV = V[idx];
calculateLaplacian(dx, U, co, vU, width, height, laplace);
float step = Du * laplace - vU*vV*vV + F * (1 - vU);
U[idx] = vU + dt*step;
calculateLaplacian(dx, V, co, vV, width, height, laplace);
step = Dv * laplace + vU*vV*vV - (F + k) * vV;
V[idx] = vV + dt*step;
}
/*
* Wrapper for the reaction-diffusion kernel.
* Called every frame by 'display'
* 'result_devPtr' is a floating buffer used for visualization.
* Make sure whatever needs visualization goes there.
*/
extern "C" __host__
void rd(unsigned int width, unsigned int height, float *result_devPtr) {
// Create buffers for 'U' and 'V' at first pass
static float *U, *V;
static bool first_pass = true;
if (first_pass){
// Allocate device memory for U and V
hipMalloc((void**)&U, width*height*sizeof(float));
hipMalloc((void**)&V, width*height*sizeof(float));
// Check for Cuda errors
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("\nCuda error detected: %s. Quitting.\n", hipGetErrorString(err) ); fflush(stdout);
exit(1);
}
// Initialize U and V on the CPU and upload to the GPU
initializeConcentrations( width, height, U, V );
// Make sure we never get in here again...
first_pass = false;
}
// Kernel block dimensions
const dim3 blockDim(16,16);
// Verify input image dimensions
if (width%blockDim.x || height%blockDim.y) {
printf("\nImage width and height must be a multiple of the block dimensions\n");
exit(1);
}
// Experiment with different settings of these constants
const float dt = 0.5f;
const float dx = 2.0f;
const float Du = 0.0004f*((width*height)/100.0f);
const float Dv = 0.0002f*((width*height)/100.0f);
const float F = 0.012f;
const float k = 0.052f;
// Invoke kernel (update U and V)
hipLaunchKernelGGL(( rd_kernel), dim3(dim3(width/blockDim.x, height/blockDim.y)), dim3(blockDim) , 0, 0, width, height, dt, dx, Du, Dv, F, k, U, V );
// Check for errors
hipError_t err = hipGetLastError();
if( err != hipSuccess ){
printf("\nCuda error detected in 'rd_kernel': %s. Quitting.\n", hipGetErrorString(err) ); fflush(stdout);
exit(1);
}
// For visualization we use a 'float1' image. You can use either 'U' or 'V'.
hipMemcpy( result_devPtr, U, width*height*sizeof(float), hipMemcpyDeviceToDevice );
}
| 49ad9075aed88603308bceeef4650ebb6b9b7a4c.cu | #include "rd_kernel.h"
#include "uint_util.hcu"
#include <stdlib.h>
#include <stdio.h>
/*
* Utility function to initialize U and V
*/
__host__
void initializeConcentrations(unsigned int width, unsigned int height, float *U, float *V) {
float *_U = new float[width*height];
float *_V = new float[width*height];
int k = 0;
int i, j;
for (i = 0; i < width * height; ++i) {
_U[k] = 1.0f;
_V[k++] = 0.0f;
}
for (i = (0.48f)*height; i < (0.52f)*height; ++i) {
for (j = (0.48f)*width; j < (0.52f)*width; ++j) {
_U[ (i * width + j) ] = 0.5f;
_V[ (i * width + j) ] = 0.25f;
}
}
// Now perturb the entire grid. Bound the values by [0,1]
for (k = 0; k < width * height; ++k) {
if ( _U[k] < 1.0f ) {
float rRand = 0.02f*(float)rand() / RAND_MAX - 0.01f;
_U[k] += rRand * _U[k];
}
if ( _V[k] < 1.0f ) {
float rRand = 0.02f*(float)rand() / RAND_MAX - 0.01f;
_V[k] += rRand * _V[k];
}
}
// Upload initial state U and V to the GPU
cudaMemcpy( U, _U, width*height*sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( V, _V, width*height*sizeof(float), cudaMemcpyHostToDevice );
delete[] _U;
delete[] _V;
}
__device__ void calculateLaplacian(float dx, float* array, const uint2& coordinate, float value, unsigned int width, unsigned int height, float& result) {
unsigned int up = ((coordinate.y-1 + height) % height)*width + coordinate.x;
unsigned int down = ((coordinate.y+1) % height) * width + coordinate.x;
unsigned int right = (coordinate.y)*width + (coordinate.x + 1) % width;
unsigned int left = (coordinate.y)*width + (coordinate.x - 1 + width) % width;
result = (array[right] + array[left] + array[up] + array[down] - 4*value)/(dx*dx);
}
/*
* Kernel for the reaction-diffusion model
* This kernel is responsible for updating 'U' and 'V'
*/
__global__
void rd_kernel(unsigned int width, unsigned int height,
float dt, float dx, float Du, float Dv,
float F, float k, float *U, float *V) {
// Coordinate of the current pixel (for this thread)
const uint2 co = make_uint2( blockIdx.x*blockDim.x + threadIdx.x,
blockIdx.y*blockDim.y + threadIdx.y );
// Linear index of the curernt pixel
const unsigned int idx = co.y*width + co.x;
//
// THIS IS WHERE YOU NEED TO IMPLEMENT THE REACTION-DIFFUSION KERNEL
//
// dU/dt = Du * lap U - U*V^2 + F * (1 - U)
// dV/dt = Dv * lap V + U*V^2 - (F + k) * V
float laplace;
float vU = U[idx], vV = V[idx];
calculateLaplacian(dx, U, co, vU, width, height, laplace);
float step = Du * laplace - vU*vV*vV + F * (1 - vU);
U[idx] = vU + dt*step;
calculateLaplacian(dx, V, co, vV, width, height, laplace);
step = Dv * laplace + vU*vV*vV - (F + k) * vV;
V[idx] = vV + dt*step;
}
/*
* Wrapper for the reaction-diffusion kernel.
* Called every frame by 'display'
* 'result_devPtr' is a floating buffer used for visualization.
* Make sure whatever needs visualization goes there.
*/
extern "C" __host__
void rd(unsigned int width, unsigned int height, float *result_devPtr) {
// Create buffers for 'U' and 'V' at first pass
static float *U, *V;
static bool first_pass = true;
if (first_pass){
// Allocate device memory for U and V
cudaMalloc((void**)&U, width*height*sizeof(float));
cudaMalloc((void**)&V, width*height*sizeof(float));
// Check for Cuda errors
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("\nCuda error detected: %s. Quitting.\n", cudaGetErrorString(err) ); fflush(stdout);
exit(1);
}
// Initialize U and V on the CPU and upload to the GPU
initializeConcentrations( width, height, U, V );
// Make sure we never get in here again...
first_pass = false;
}
// Kernel block dimensions
const dim3 blockDim(16,16);
// Verify input image dimensions
if (width%blockDim.x || height%blockDim.y) {
printf("\nImage width and height must be a multiple of the block dimensions\n");
exit(1);
}
// Experiment with different settings of these constants
const float dt = 0.5f;
const float dx = 2.0f;
const float Du = 0.0004f*((width*height)/100.0f);
const float Dv = 0.0002f*((width*height)/100.0f);
const float F = 0.012f;
const float k = 0.052f;
// Invoke kernel (update U and V)
rd_kernel<<< dim3(width/blockDim.x, height/blockDim.y), blockDim >>>( width, height, dt, dx, Du, Dv, F, k, U, V );
// Check for errors
cudaError_t err = cudaGetLastError();
if( err != cudaSuccess ){
printf("\nCuda error detected in 'rd_kernel': %s. Quitting.\n", cudaGetErrorString(err) ); fflush(stdout);
exit(1);
}
// For visualization we use a 'float1' image. You can use either 'U' or 'V'.
cudaMemcpy( result_devPtr, U, width*height*sizeof(float), cudaMemcpyDeviceToDevice );
}
|
038e649e676539db24bfff7cf53fab91c809167c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <math.h>
#include <unistd.h>
#include <fcntl.h>
#include <float.h>
#include <sys/time.h>
#define BLOCK_X 16
#define BLOCK_Y 16
//3D matrix consisting the picture and the frames
unsigned char * I;
//dimension X of the picture in pixels
int IszX = 128;
//dimension Y of the picture in pixels
int IszY = 128;
//number of frames
int Nfr = 10;
//define number of particles
int Nparticles = 100;
int * seed;
texture <float> tex_CDF;
texture <float> tex_sums;
const int threads_per_block = 512;
/*****************************
*GET_TIME
*returns a long int representing the time
*****************************/
long long get_time() {
struct timeval tv;
gettimeofday(&tv, NULL);
return (tv.tv_sec * 1000000) + tv.tv_usec;
}
// Returns the number of seconds elapsed between the two specified times
float elapsed_time(long long start_time, long long end_time) {
return (float) (end_time - start_time) / (1000 * 1000);
}
/*****************************
* CHECK_ERROR
* Checks for CUDA errors and prints them to the screen to help with
* debugging of CUDA related programming
*****************************/
void check_error(hipError_t e) {
if (e != hipSuccess) {
printf("\nCUDA error: %s\n", hipGetErrorString(e));
exit(1);
}
}
/********************************
* CALC LIKELIHOOD SUM
* DETERMINES THE LIKELIHOOD SUM BASED ON THE FORMULA: SUM( (IK[IND] - 100)^2 - (IK[IND] - 228)^2)/ 100
* param 1 I 3D matrix
* param 2 current ind array
* param 3 length of ind array
* returns a float representing the sum
********************************/
__device__ float calcLikelihoodSum(unsigned char * I, int * ind, int numOnes, int index){
float likelihoodSum = 0.0;
int x;
for(x = 0; x < numOnes; x++)
likelihoodSum += (pow((float)(I[ind[index*numOnes + x]] - 100),2) - pow((float)(I[ind[index*numOnes + x]]-228),2))/50.0;
return likelihoodSum;
}
/****************************
CDF CALCULATE
CALCULATES CDF
param1 CDF
param2 weights
param3 Nparticles
*****************************/
__device__ void cdfCalc(float * CDF, float * weights, int Nparticles){
int x;
CDF[0] = weights[0];
for(x = 1; x < Nparticles; x++){
CDF[x] = weights[x] + CDF[x-1];
}
}
/*****************************
* RANDU
* GENERATES A UNIFORM DISTRIBUTION
* returns a float representing a randomily generated number from a uniform distribution with range [0, 1)
******************************/
__device__ float d_randu(int * seed, int index)
{
int M = INT_MAX;
int A = 1103515245;
int C = 12345;
int num = A*seed[index] + C;
seed[index] = num % M;
num = seed[index];
return fabs(num/((float) M));
}
float randu()
{
float max = (float)RAND_MAX;
int num = rand();
return num/max;
}
/******************************
* RANDN
* GENERATES A NORMAL DISTRIBUTION
* returns a float representing random number generated using Irwin-Hall distribution method
* see http://en.wikipedia.org/wiki/Normal_distribution, section computing value for normal random distribution
******************************/
float randn(){
//Box-Muller algortihm
float u1, u2, v1, v2;
float s = 2;
while(s >= 1){
u1 = randu();
u2 = randu();
v1 = 2.0*u1 - 1.0;
v2 = 2.0*u2 - 1.0;
s = pow(v1, 2)+pow(v2, 2);
}
float x1 = v1*sqrt((-2.0*log(s))/s);
return x1;
}
__device__ float d_randn(int * seed, int index){
//Box-Muller algortihm
float pi = 3.14159265358979323846;
float u = d_randu(seed, index);
float v = d_randu(seed, index);
float cosine = cos(2*pi*v);
float rt = -2*log(u);
return sqrt(rt)*cosine;
}
/****************************
UPDATE WEIGHTS
UPDATES WEIGHTS
param1 weights
param2 likelihood
param3 Nparcitles
****************************/
__device__ float updateWeights(float * weights, float * likelihood, int Nparticles){
int x;
float sum = 0;
for(x = 0; x < Nparticles; x++){
weights[x] = weights[x] * exp(likelihood[x]);
sum += weights[x];
}
return sum;
}
__device__ int findIndexBin(float * CDF, int beginIndex, int endIndex, float value)
{
if(endIndex < beginIndex)
return -1;
int middleIndex;
while(endIndex > beginIndex)
{
middleIndex = beginIndex + ((endIndex-beginIndex)/2);
if(CDF[middleIndex] >= value)
{
if(middleIndex == 0)
return middleIndex;
else if(CDF[middleIndex-1] < value)
return middleIndex;
else if(CDF[middleIndex-1] == value)
{
while(CDF[middleIndex] == value && middleIndex >= 0)
middleIndex--;
middleIndex++;
return middleIndex;
}
}
if(CDF[middleIndex] > value)
endIndex = middleIndex-1;
else
beginIndex = middleIndex+1;
}
return -1;
}
/*****************************
* CUDA Find Index Kernel Function to replace FindIndex
* param1: arrayX
* param2: arrayY
* param3: CDF
* param4: u
* param5: xj
* param6: yj
* param7: weights
* param8: Nparticles
*****************************/
__global__ void find_index_kernel(float * arrayX, float * arrayY, float * CDF, float * u, float * xj, float * yj, float * weights, int Nparticles){
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
if(i < Nparticles){
int index = -1;
int x;
for(x = 0; x < Nparticles; x++){
if(tex1Dfetch(tex_CDF, x) >= u[i] && index == -1){
index = x;
}
}
if(index == -1){
index = Nparticles-1;
}
xj[i] = arrayX[index];
yj[i] = arrayY[index];
weights[i] = 1/((float)(Nparticles));
}
__syncthreads();
}
__global__ void normalize_weights_kernel(float * weights, int Nparticles, float * partial_sums, float * CDF, float * u, int * seed)
{
int block_id = blockIdx.x;
int i = blockDim.x*block_id + threadIdx.x;
__shared__ float u1, sumWeights;
sumWeights = partial_sums[0];
if(i < Nparticles)
{
weights[i] = weights[i]/sumWeights;
}
if(i == 0)
{
cdfCalc(CDF, weights, Nparticles);
u1 = (1/((float)(Nparticles)))*d_randu(seed, i);
}
if(i < Nparticles)
{
__syncthreads();
u[i] = u1 + i/((float)(Nparticles));
}
}
__global__ void sum_kernel(float* partial_sums, int Nparticles)
{
int block_id = blockIdx.x;
int i = blockDim.x*block_id + threadIdx.x;
if(i == 0)
{
int x;
float sum = 0;
for(x = 0; x < Nparticles/512; x++)
{
sum += tex1Dfetch(tex_sums, x);
}
partial_sums[0] = sum;
}
}
/*****************************
* CUDA Likelihood Kernel Function to replace FindIndex
* param1: arrayX
* param2: arrayY
* param2.5: CDF
* param3: ind
* param4: objxy
* param5: likelihood
* param6: I
* param6.5: u
* param6.75: weights
* param7: Nparticles
* param8: countOnes
* param9: max_size
* param10: k
* param11: IszY
* param12: Nfr
*****************************/
__global__ void likelihood_kernel(float * arrayX, float * arrayY, float * CDF, int * ind, int * objxy, float * likelihood, unsigned char * I, float * u, float * weights, int Nparticles, int countOnes, int max_size, int k, int IszY, int Nfr, int *seed, float * partial_sums){
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
int y;
float indX, indY;
__shared__ float buffer[512];
if(i < Nparticles){
arrayX[i] = arrayX[i] + 1.0 + 5.0*d_randn(seed, i);
arrayY[i] = arrayY[i] - 2.0 + 2.0*d_randn(seed, i);
__syncthreads();
}
if(i < Nparticles)
{
for(y = 0; y < countOnes; y++){
indX = round(arrayX[i]) + objxy[y*2 + 1];
indY = round(arrayY[i]) + objxy[y*2];
ind[i*countOnes + y] = fabs(indX*IszY*Nfr + indY*Nfr + k);
if(ind[i*countOnes + y] >= max_size)
ind[i*countOnes + y] = 0;
}
likelihood[i] = calcLikelihoodSum(I, ind, countOnes, i);
likelihood[i] = likelihood[i]/countOnes;
__syncthreads();
}
if(i < Nparticles)
{
weights[i] = weights[i]*likelihood[i];
__syncthreads();
buffer[threadIdx.x] = weights[i];
__syncthreads();
for(unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if(threadIdx.x < s)
{
buffer[threadIdx.x] += buffer[threadIdx.x + s];
}
__syncthreads();
}
if(threadIdx.x == 0)
{
partial_sums[blockIdx.x] = buffer[0];
}
__syncthreads();
}
}
float roundDouble(float value){
int newValue = (int)(value);
if(value - newValue < .5)
return newValue;
else
return newValue++;
}
/*****************************
* SETIF
* set values of the 3D array to a newValue if that value is equal to the testValue
* param1: value to test
* param2: 3D array
* param3: dim X
* param4: dim Y
* param5: dim Z
******************************/
void setIf(int testValue, int newValue, unsigned char * array3D, int * dimX, int * dimY, int * dimZ){
int x, y, z;
for(x = 0; x < *dimX; x++){
for(y = 0; y < *dimY; y++){
for(z = 0; z < *dimZ; z++){
if(array3D[x * *dimY * *dimZ+y * *dimZ + z] == testValue)
array3D[x * *dimY * *dimZ + y * *dimZ + z] = newValue;
}
}
}
}
/******************************
* ADDNOISE
* sets values of 3D matrix using randomly generated numbers from a normal distribution
* param matrix
******************************/
void addNoise(unsigned char * array3D, int * dimX, int * dimY, int * dimZ){
int x, y, z;
//
for(x = 0; x < *dimX; x++){
//
for(y = 0; y < *dimY; y++){
//
for(z = 0; z < *dimZ; z++){
array3D[x * *dimY * *dimZ + y * *dimZ + z] = array3D[x * *dimY * *dimZ + y * *dimZ + z] + (int)(5*randn());
}
}
}
}
/******************************
* STRELDISK
* param: pointer to the disk to be made
* creates a 9x9 matrix representing the disk
******************************/
int * strelDisk()
{
int * disk = (int *)malloc(sizeof(int)*9*9);
int x, y;
for(x = 0; x < 9; x++){
for(y = 0; y < 9; y++){
float distance = sqrt(pow((float)(x-4),2) + pow((float)(y-4),2));
if(distance < 5.0)
disk[x*9 + y] = 1;
}
}
return disk;
}
/******************************
* DILATE_MATRIX
* param1: matrix to be dilated
* param2: current x position
* param3: current y position
* param4: current z position
* param5: x length
* param6: y length
* param7: z length
* param8: error radius
*******************************/
void dilate_matrix(unsigned char * matrix, int posX, int posY, int posZ, int dimX, int dimY, int dimZ, int error)
{
int startX = posX - error;
while(startX < 0)
startX++;
int startY = posY - error;
while(startY < 0)
startY++;
/*int startZ = posZ - error;
while(startZ < 0)
startZ++;*/
int endX = posX + error;
while(endX > dimX)
endX--;
int endY = posY + error;
while(endY > dimY)
endY--;
/*int endZ = posZ + error;
while(endZ > dimZ)
endZ--;*/
int x,y;
for(x = startX; x < endX; x++){
for(y = startY; y < endY; y++){
float distance = sqrt( pow((float)(x-posX),2) + pow((float)(y-posY),2) );
if(distance < error)
matrix[x*dimY*dimZ + y*dimZ + posZ] = 1;
}
}
}
/******************************
* IMDILATE_DISK
* param1: target 3d matrix
* param2: dimX
* param3: dimY
* param4: dimZ
* param5: radius
* param6: error
* returns the dilated matrix
* dilates the target matrix using the radius as a guide
******************************/
unsigned char* imdilate_disk(unsigned char * matrix, int dimX, int dimY, int dimZ, int error)
{
unsigned char * newMatrix = (unsigned char *)malloc(sizeof(unsigned char)*dimX*dimY*dimZ);
int x, y, z;
for(z = 0; z < dimZ; z++){
for(x = 0; x < dimX; x++){
for(y = 0; y < dimY; y++){
if(matrix[x*dimY*dimZ + y*dimZ + z] == 1){
dilate_matrix(newMatrix, x, y, z, dimX, dimY, dimZ, error);
}
}
}
}
free(matrix);
return newMatrix;
}
/*****************************
* GET NEIGHBORS
* returns a 2D array describing the offets
* param 1 strel object
* param 2 dimX of object
* param 3 dimY of object
*******************************/
int * getneighbors(int * se, int numOnes){
int * neighbors = (int *)malloc(sizeof(int)*numOnes*2);
int x, y;
int neighY = 0;
int center = 4;
for(x = 0; x < 9; x++){
for(y = 0; y < 9; y++){
if(se[x*9 + y]){
neighbors[neighY*2] = (int)(y - center);
neighbors[neighY*2 + 1] = (int)(x - center);
neighY++;
}
}
}
return neighbors;
}
/******************************
* VIDEO SEQUENCE
* the synthetic video sequence we will work with here is composed of a
* single moving object, circular in shape (fixed radius)
* The motion here is a linear motion
* the foreground intensity and the backgrounf intensity is known
* the image is corrupted with zero mean Gaussian noise
*******************************/
void videoSequence(){
int k;
int max_size = IszX*IszY*Nfr;
//get object centers
int x0 = (int)roundDouble(IszY/2.0);
int y0 = (int)roundDouble(IszX/2.0);
I[x0 *IszY *Nfr + y0 * Nfr + 0] = 1;
//move point
int xk, yk, pos;
for(k = 1; k < Nfr; k++){
xk = abs(x0 + (k));
yk = abs(y0 - 2*(k));
pos = yk * IszY * Nfr + xk *Nfr + k;
if(pos >= max_size)
pos = 0;
I[pos] = 1;
}
int x, y;
int count = 0;
/*for(x = 0; x < IszX; x++)
for(y = 0; y < IszY; y++)
for(k = 0; k < Nfr; k++)
if(I[x*IszY*Nfr + y*Nfr + k]){
printf("ARRAY [%d][%d][%d]: %d\n", x, y, k, I[x*IszY*Nfr + y*Nfr + k]);
count++;
}
printf("COUNT: %d\n", count);*/
//dilate matrix
I = imdilate_disk(I, IszX, IszY, Nfr, 5);
count = 0;
/*printf("SECOND TIME\n");
for(k = 0; k< Nfr; k++)
for(x = 0; x < IszX; x++)
for(y = 0; y < IszY; y++)
if(I[x*IszY*Nfr + y*Nfr + k]){
printf("ARRAY [%d][%d][%d]: %d\n", x, y, k, I[x*IszY*Nfr + y*Nfr + k]);
count++;
}
printf("COUNT: %d", count);*/
//define background, add noise
setIf(0, 100, I, &IszX, &IszY, &Nfr);
setIf(1, 228, I, &IszX, &IszY, &Nfr);
//add noise
addNoise(I, &IszX, &IszY, &Nfr);
}
/******************************
* FIND INDEX
* FINDS THE FIRST OCCURRENCE OF AN ELEMENT IN CDF GREATER THAN THE PROVIDED VALUE AND RETURNS THAT INDEX
* param1 CDF
* param2 length of CDF
* param3 value
*******************************/
int findIndex(float * CDF, int lengthCDF, float value){
int index = -1;
int x;
for(x = 0; x < lengthCDF; x++){
if(CDF[x] >= value){
index = x;
break;
}
}
if(index == -1){
return lengthCDF-1;
}
return index;
}
void particleFilter(){
int max_size = IszX*IszY*Nfr;
//original particle centroid
float xe = roundDouble(IszY/2.0);
float ye = roundDouble(IszX/2.0);
//expected object locations, compared to center
int radius = 5;
int * disk = strelDisk();
int countOnes = 0;
int x, y;
for(x = 0; x < 9; x++){
for(y = 0; y < 9; y++){
if(disk[x*9 + y] == 1)
countOnes++;
//printf("%d ", disk[x*9+y]);
}
//printf("\n");
}
int * objxy = getneighbors(disk, countOnes);
/*for(x = 0; x < countOnes; x++){
printf("%d %d\n", objxy[x*2], objxy[x*2 + 1]);
}
printf("NUM ONES: %d\n", countOnes);*/
//initial weights are all equal (1/Nparticles)
float * weights = (float *)malloc(sizeof(float)*Nparticles);
for(x = 0; x < Nparticles; x++){
weights[x] = 1/((float)(Nparticles));
}
//initial likelihood to 0.0
float * likelihood = (float *)malloc(sizeof(float)*Nparticles);
float * arrayX = (float *)malloc(sizeof(float)*Nparticles);
float * arrayY = (float *)malloc(sizeof(float)*Nparticles);
float * xj = (float *)malloc(sizeof(float)*Nparticles);
float * yj = (float *)malloc(sizeof(float)*Nparticles);
float * CDF = (float *)malloc(sizeof(float)*Nparticles);
//GPU copies of arrays
float * arrayX_GPU;
float * arrayY_GPU;
float * xj_GPU;
float * yj_GPU;
float * CDF_GPU;
float * likelihood_GPU;
unsigned char * I_GPU;
float * weights_GPU;
int * objxy_GPU;
int * ind = (int*)malloc(sizeof(int)*countOnes);
int * ind_GPU;
float * u = (float *)malloc(sizeof(float)*Nparticles);
float * u_GPU;
int * seed_GPU;
float * partial_sums;
//CUDA memory allocation
check_error(hipMalloc((void **) &arrayX_GPU, sizeof(float)*Nparticles));
check_error(hipMalloc((void **) &arrayY_GPU, sizeof(float)*Nparticles));
check_error(hipMalloc((void **) &xj_GPU, sizeof(float)*Nparticles));
check_error(hipMalloc((void **) &yj_GPU, sizeof(float)*Nparticles));
check_error(hipMalloc((void **) &CDF_GPU, sizeof(float)*Nparticles));
check_error(hipMalloc((void **) &u_GPU, sizeof(float)*Nparticles));
check_error(hipMalloc((void **) &likelihood_GPU, sizeof(float)*Nparticles));
check_error(hipMalloc((void **) &weights_GPU, sizeof(float)*Nparticles));
check_error(hipMalloc((void **) &I_GPU, sizeof(unsigned char)*IszX*IszY*Nfr));
check_error(hipMalloc((void **) &objxy_GPU, sizeof(int)*countOnes));
check_error(hipMalloc((void **) &ind_GPU, sizeof(int)*countOnes*Nparticles));
check_error(hipMalloc((void **) &seed_GPU, sizeof(int)*Nparticles));
check_error(hipMalloc((void **) &partial_sums, sizeof(float)*Nparticles));
for(x = 0; x < Nparticles; x++){
arrayX[x] = xe;
arrayY[x] = ye;
}
int k;
//float * Ik = (float *)malloc(sizeof(float)*IszX*IszY);
int indX, indY;
//start send
long long send_start = get_time();
hipMemcpy(I_GPU, I, sizeof(unsigned char)*IszX*IszY*Nfr, hipMemcpyHostToDevice);
hipMemcpy(objxy_GPU, objxy, sizeof(int)*countOnes, hipMemcpyHostToDevice);
hipMemcpy(weights_GPU, weights, sizeof(float)*Nparticles, hipMemcpyHostToDevice);
hipMemcpy(arrayX_GPU, arrayX, sizeof(float)*Nparticles, hipMemcpyHostToDevice);
hipMemcpy(arrayY_GPU, arrayY, sizeof(float)*Nparticles, hipMemcpyHostToDevice);
hipMemcpy(seed_GPU, seed, sizeof(int)*Nparticles, hipMemcpyHostToDevice);
long long send_end = get_time();
printf("TIME TO SEND TO GPU: %f\n", elapsed_time(send_start, send_end));
int num_blocks = ceil((float) Nparticles/(float) threads_per_block);
for(k = 1; k < Nfr; k++){
//apply motion model
//draws sample from motion model (random walk). The only prior information
//is that the object moves 2x as fast as in the y direction
/*for(x = 0; x < Nparticles; x++){
arrayX[x] = arrayX[x] + 1.0 + 5.0*randn();
arrayY[x] = arrayY[x] - 2.0 + 2.0*randn();
}
//particle filter likelihood
for(x = 0; x < Nparticles; x++){
//compute the likelihood: remember our assumption is that you know
// foreground and the background image intensity distribution.
// Notice that we consider here a likelihood ratio, instead of
// p(z|x). It is possible in this case. why? a hometask for you.
//calc ind
for(y = 0; y < countOnes; y++){
indX = roundDouble(arrayX[x]) + objxy[y*2 + 1];
indY = roundDouble(arrayY[x]) + objxy[y*2];
ind[y] = fabs(indX*IszY*Nfr + indY*Nfr + k);
if(ind[y] >= max_size)
ind[y] = 0;
}
likelihood[x] = calcLikelihoodSum(I, ind, countOnes);
likelihood[x] = likelihood[x]/countOnes;
}
// update & normalize weights
// using equation (63) of Arulampalam Tutorial
float sumWeights = updateWeights(weights, likelihood, Nparticles);
for(x = 0; x < Nparticles; x++){
weights[x] = weights[x] * exp(likelihood[x]);
}
float sumWeights = 0;
for(x = 0; x < Nparticles; x++){
sumWeights += weights[x];
}
for(x = 0; x < Nparticles; x++){
weights[x] = weights[x]/sumWeights;
}
xe = 0;
ye = 0;
// estimate the object location by expected values
for(x = 0; x < Nparticles; x++){
xe += arrayX[x] * weights[x];
ye += arrayY[x] * weights[x];
}
printf("XE: %lf\n", xe);
printf("YE: %lf\n", ye);
//float distance = sqrt( pow((float)(xe-(int)roundDouble(IszY/2.0)),2) + pow((float)(ye-(int)roundDouble(IszX/2.0)),2) );
//printf("%lf\n", distance);
//display(hold off for now)
//pause(hold off for now)
//resampling
cdfCalc(CDF, weights, Nparticles);
CDF[0] = weights[0];
for(x = 1; x < Nparticles; x++){
CDF[x] = weights[x] + CDF[x-1];
}
float u1 = (1/((float)(Nparticles)))*randu();
for(x = 0; x < Nparticles; x++){
u[x] = u1 + x/((float)(Nparticles));
}*/
hipLaunchKernelGGL(( likelihood_kernel) , dim3(num_blocks), dim3(threads_per_block) , 0, 0, arrayX_GPU, arrayY_GPU, CDF_GPU, ind_GPU, objxy_GPU, likelihood_GPU, I_GPU, u_GPU, weights_GPU, Nparticles, countOnes, max_size, k, IszY, Nfr, seed_GPU, partial_sums);
hipLaunchKernelGGL(( sum_kernel) , dim3(num_blocks), dim3(threads_per_block) , 0, 0, partial_sums, Nparticles);
hipBindTexture(0, tex_sums, partial_sums, Nparticles);
hipLaunchKernelGGL(( normalize_weights_kernel) , dim3(num_blocks), dim3(threads_per_block) , 0, 0, weights_GPU, Nparticles, partial_sums, CDF_GPU, u_GPU, seed_GPU);
hipUnbindTexture(tex_sums);
//long long start_copy = get_time();
//CUDA memory copying from CPU memory to GPU memory
//long long end_copy = get_time();
//Set number of threads
hipBindTexture(0, tex_CDF, CDF_GPU, Nparticles);
//KERNEL FUNCTION CALL
hipLaunchKernelGGL(( find_index_kernel) , dim3(num_blocks), dim3(threads_per_block) , 0, 0, arrayX_GPU, arrayY_GPU, CDF_GPU, u_GPU, xj_GPU, yj_GPU, weights_GPU, Nparticles);
hipUnbindTexture(tex_CDF);
//long long start_copy_back = get_time();
//CUDA memory copying back from GPU to CPU memory
//hipMemcpy(arrayY_GPU, yj_GPU, sizeof(float)*Nparticles, hipMemcpyDeviceToDevice);
//hipMemcpy(arrayX_GPU, xj_GPU, sizeof(float)*Nparticles, hipMemcpyDeviceToDevice);
//long long end_copy_back = get_time();
//printf("SENDING TO GPU TOOK: %lf\n", elapsed_time(start_copy, end_copy));
//printf("CUDA EXEC TOOK: %lf\n", elapsed_time(end_copy, start_copy_back));
//printf("SENDING BACK FROM GPU TOOK: %lf\n", elapsed_time(start_copy_back, end_copy_back));
/**
int j, i;
for(j = 0; j < Nparticles; j++){
i = findIndex(CDF, Nparticles, u[j]);
xj[j] = arrayX[i];
yj[j] = arrayY[i];
}
**/
//reassign arrayX and arrayY
//arrayX = xj;
//arrayY = yj;
/*for(x = 0; x < Nparticles; x++){
weights[x] = 1/((float)(Nparticles));
}*/
}
/*check_error(hipMalloc((void **) &arrayX_GPU, sizeof(float)*Nparticles));
check_error(hipMalloc((void **) &arrayY_GPU, sizeof(float)*Nparticles));
check_error(hipMalloc((void **) &xj_GPU, sizeof(float)*Nparticles));
check_error(hipMalloc((void **) &yj_GPU, sizeof(float)*Nparticles));
check_error(hipMalloc((void **) &CDF_GPU, sizeof(float)*Nparticles));
check_error(hipMalloc((void **) &u_GPU, sizeof(float)*Nparticles));
check_error(hipMalloc((void **) &likelihood_GPU, sizeof(float)*Nparticles));
check_error(hipMalloc((void **) &weights_GPU, sizeof(float)*Nparticles));
check_error(hipMalloc((void **) &I_GPU, sizeof(unsigned char)*IszX*IszY*Nfr));
check_error(hipMalloc((void **) &objxy_GPU, sizeof(int)*countOnes));
check_error(hipMalloc((void **) &ind_GPU, sizeof(int)*countOnes*Nparticles));
check_error(hipMalloc((void **) &seed_GPU, sizeof(int)*Nparticles));
check_error(hipMalloc((void **) &partial_sums, sizeof(float)*Nparticles));*/
long long back_time = get_time();
hipFree(xj_GPU);
hipFree(yj_GPU);
hipFree(CDF_GPU);
hipFree(u_GPU);
hipFree(likelihood_GPU);
hipFree(I_GPU);
hipFree(objxy_GPU);
hipFree(ind_GPU);
hipFree(seed_GPU);
hipFree(partial_sums);
long long free_time = get_time();
hipMemcpy(arrayX, arrayX_GPU, sizeof(float)*Nparticles, hipMemcpyDeviceToHost);
long long arrayX_time = get_time();
hipMemcpy(arrayY, arrayY_GPU, sizeof(float)*Nparticles, hipMemcpyDeviceToHost);
long long arrayY_time = get_time();
hipMemcpy(weights, weights_GPU, sizeof(float)*Nparticles, hipMemcpyDeviceToHost);
long long back_end_time = get_time();
printf("GPU Execution: %lf\n", elapsed_time(send_end, back_time));
printf("FREE TIME: %lf\n", elapsed_time(back_time, free_time));
printf("SEND TO SEND BACK: %lf\n", elapsed_time(back_time, back_end_time));
printf("SEND ARRAY X BACK: %lf\n", elapsed_time(free_time, arrayX_time));
printf("SEND ARRAY Y BACK: %lf\n", elapsed_time(arrayX_time, arrayY_time));
printf("SEND WEIGHTS BACK: %lf\n", elapsed_time(arrayY_time, back_end_time));
xe = 0;
ye = 0;
// estimate the object location by expected values
for(x = 0; x < Nparticles; x++){
xe += arrayX[x] * weights[x];
ye += arrayY[x] * weights[x];
}
printf("XE: %lf\n", xe);
printf("YE: %lf\n", ye);
float distance = sqrt( pow((float)(xe-(int)roundDouble(IszY/2.0)),2) + pow((float)(ye-(int)roundDouble(IszX/2.0)),2) );
printf("%lf\n", distance);
//CUDA freeing of memory
hipFree(weights_GPU);
hipFree(arrayY_GPU);
hipFree(arrayX_GPU);
}
int main(){
//establish seed
seed = (int *)malloc(sizeof(int)*Nparticles);
int i;
for(i = 0; i < Nparticles; i++)
seed[i] = time(0)*i;
//malloc matrix
I = (unsigned char *)malloc(sizeof(unsigned char)*IszX*IszY*Nfr);
long long start = get_time();
//call video sequence
videoSequence();
long long endVideoSequence = get_time();
printf("VIDEO SEQUENCE TOOK %f\n", elapsed_time(start, endVideoSequence));
//call particle filter
particleFilter();
long long endParticleFilter = get_time();
printf("PARTICLE FILTER TOOK %f\n", elapsed_time(endVideoSequence, endParticleFilter));
printf("ENTIRE PROGRAM TOOK %f\n", elapsed_time(start, endParticleFilter));
return 0;
}
| 038e649e676539db24bfff7cf53fab91c809167c.cu | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <math.h>
#include <unistd.h>
#include <fcntl.h>
#include <float.h>
#include <sys/time.h>
#define BLOCK_X 16
#define BLOCK_Y 16
//3D matrix consisting the picture and the frames
unsigned char * I;
//dimension X of the picture in pixels
int IszX = 128;
//dimension Y of the picture in pixels
int IszY = 128;
//number of frames
int Nfr = 10;
//define number of particles
int Nparticles = 100;
int * seed;
texture <float> tex_CDF;
texture <float> tex_sums;
const int threads_per_block = 512;
/*****************************
*GET_TIME
*returns a long int representing the time
*****************************/
long long get_time() {
struct timeval tv;
gettimeofday(&tv, NULL);
return (tv.tv_sec * 1000000) + tv.tv_usec;
}
// Returns the number of seconds elapsed between the two specified times
float elapsed_time(long long start_time, long long end_time) {
return (float) (end_time - start_time) / (1000 * 1000);
}
/*****************************
* CHECK_ERROR
* Checks for CUDA errors and prints them to the screen to help with
* debugging of CUDA related programming
*****************************/
void check_error(cudaError e) {
if (e != cudaSuccess) {
printf("\nCUDA error: %s\n", cudaGetErrorString(e));
exit(1);
}
}
/********************************
* CALC LIKELIHOOD SUM
* DETERMINES THE LIKELIHOOD SUM BASED ON THE FORMULA: SUM( (IK[IND] - 100)^2 - (IK[IND] - 228)^2)/ 100
* param 1 I 3D matrix
* param 2 current ind array
* param 3 length of ind array
* returns a float representing the sum
********************************/
__device__ float calcLikelihoodSum(unsigned char * I, int * ind, int numOnes, int index){
float likelihoodSum = 0.0;
int x;
for(x = 0; x < numOnes; x++)
likelihoodSum += (pow((float)(I[ind[index*numOnes + x]] - 100),2) - pow((float)(I[ind[index*numOnes + x]]-228),2))/50.0;
return likelihoodSum;
}
/****************************
CDF CALCULATE
CALCULATES CDF
param1 CDF
param2 weights
param3 Nparticles
*****************************/
__device__ void cdfCalc(float * CDF, float * weights, int Nparticles){
int x;
CDF[0] = weights[0];
for(x = 1; x < Nparticles; x++){
CDF[x] = weights[x] + CDF[x-1];
}
}
/*****************************
* RANDU
* GENERATES A UNIFORM DISTRIBUTION
* returns a float representing a randomily generated number from a uniform distribution with range [0, 1)
******************************/
__device__ float d_randu(int * seed, int index)
{
int M = INT_MAX;
int A = 1103515245;
int C = 12345;
int num = A*seed[index] + C;
seed[index] = num % M;
num = seed[index];
return fabs(num/((float) M));
}
float randu()
{
float max = (float)RAND_MAX;
int num = rand();
return num/max;
}
/******************************
* RANDN
* GENERATES A NORMAL DISTRIBUTION
* returns a float representing random number generated using Irwin-Hall distribution method
* see http://en.wikipedia.org/wiki/Normal_distribution, section computing value for normal random distribution
******************************/
float randn(){
//Box-Muller algortihm
float u1, u2, v1, v2;
float s = 2;
while(s >= 1){
u1 = randu();
u2 = randu();
v1 = 2.0*u1 - 1.0;
v2 = 2.0*u2 - 1.0;
s = pow(v1, 2)+pow(v2, 2);
}
float x1 = v1*sqrt((-2.0*log(s))/s);
return x1;
}
__device__ float d_randn(int * seed, int index){
//Box-Muller algortihm
float pi = 3.14159265358979323846;
float u = d_randu(seed, index);
float v = d_randu(seed, index);
float cosine = cos(2*pi*v);
float rt = -2*log(u);
return sqrt(rt)*cosine;
}
/****************************
UPDATE WEIGHTS
UPDATES WEIGHTS
param1 weights
param2 likelihood
param3 Nparcitles
****************************/
__device__ float updateWeights(float * weights, float * likelihood, int Nparticles){
int x;
float sum = 0;
for(x = 0; x < Nparticles; x++){
weights[x] = weights[x] * exp(likelihood[x]);
sum += weights[x];
}
return sum;
}
__device__ int findIndexBin(float * CDF, int beginIndex, int endIndex, float value)
{
if(endIndex < beginIndex)
return -1;
int middleIndex;
while(endIndex > beginIndex)
{
middleIndex = beginIndex + ((endIndex-beginIndex)/2);
if(CDF[middleIndex] >= value)
{
if(middleIndex == 0)
return middleIndex;
else if(CDF[middleIndex-1] < value)
return middleIndex;
else if(CDF[middleIndex-1] == value)
{
while(CDF[middleIndex] == value && middleIndex >= 0)
middleIndex--;
middleIndex++;
return middleIndex;
}
}
if(CDF[middleIndex] > value)
endIndex = middleIndex-1;
else
beginIndex = middleIndex+1;
}
return -1;
}
/*****************************
* CUDA Find Index Kernel Function to replace FindIndex
* param1: arrayX
* param2: arrayY
* param3: CDF
* param4: u
* param5: xj
* param6: yj
* param7: weights
* param8: Nparticles
*****************************/
__global__ void find_index_kernel(float * arrayX, float * arrayY, float * CDF, float * u, float * xj, float * yj, float * weights, int Nparticles){
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
if(i < Nparticles){
int index = -1;
int x;
for(x = 0; x < Nparticles; x++){
if(tex1Dfetch(tex_CDF, x) >= u[i] && index == -1){
index = x;
}
}
if(index == -1){
index = Nparticles-1;
}
xj[i] = arrayX[index];
yj[i] = arrayY[index];
weights[i] = 1/((float)(Nparticles));
}
__syncthreads();
}
__global__ void normalize_weights_kernel(float * weights, int Nparticles, float * partial_sums, float * CDF, float * u, int * seed)
{
int block_id = blockIdx.x;
int i = blockDim.x*block_id + threadIdx.x;
__shared__ float u1, sumWeights;
sumWeights = partial_sums[0];
if(i < Nparticles)
{
weights[i] = weights[i]/sumWeights;
}
if(i == 0)
{
cdfCalc(CDF, weights, Nparticles);
u1 = (1/((float)(Nparticles)))*d_randu(seed, i);
}
if(i < Nparticles)
{
__syncthreads();
u[i] = u1 + i/((float)(Nparticles));
}
}
__global__ void sum_kernel(float* partial_sums, int Nparticles)
{
int block_id = blockIdx.x;
int i = blockDim.x*block_id + threadIdx.x;
if(i == 0)
{
int x;
float sum = 0;
for(x = 0; x < Nparticles/512; x++)
{
sum += tex1Dfetch(tex_sums, x);
}
partial_sums[0] = sum;
}
}
/*****************************
* CUDA Likelihood Kernel Function to replace FindIndex
* param1: arrayX
* param2: arrayY
* param2.5: CDF
* param3: ind
* param4: objxy
* param5: likelihood
* param6: I
* param6.5: u
* param6.75: weights
* param7: Nparticles
* param8: countOnes
* param9: max_size
* param10: k
* param11: IszY
* param12: Nfr
*****************************/
__global__ void likelihood_kernel(float * arrayX, float * arrayY, float * CDF, int * ind, int * objxy, float * likelihood, unsigned char * I, float * u, float * weights, int Nparticles, int countOnes, int max_size, int k, int IszY, int Nfr, int *seed, float * partial_sums){
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
int y;
float indX, indY;
__shared__ float buffer[512];
if(i < Nparticles){
arrayX[i] = arrayX[i] + 1.0 + 5.0*d_randn(seed, i);
arrayY[i] = arrayY[i] - 2.0 + 2.0*d_randn(seed, i);
__syncthreads();
}
if(i < Nparticles)
{
for(y = 0; y < countOnes; y++){
indX = round(arrayX[i]) + objxy[y*2 + 1];
indY = round(arrayY[i]) + objxy[y*2];
ind[i*countOnes + y] = fabs(indX*IszY*Nfr + indY*Nfr + k);
if(ind[i*countOnes + y] >= max_size)
ind[i*countOnes + y] = 0;
}
likelihood[i] = calcLikelihoodSum(I, ind, countOnes, i);
likelihood[i] = likelihood[i]/countOnes;
__syncthreads();
}
if(i < Nparticles)
{
weights[i] = weights[i]*likelihood[i];
__syncthreads();
buffer[threadIdx.x] = weights[i];
__syncthreads();
for(unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if(threadIdx.x < s)
{
buffer[threadIdx.x] += buffer[threadIdx.x + s];
}
__syncthreads();
}
if(threadIdx.x == 0)
{
partial_sums[blockIdx.x] = buffer[0];
}
__syncthreads();
}
}
float roundDouble(float value){
int newValue = (int)(value);
if(value - newValue < .5)
return newValue;
else
return newValue++;
}
/*****************************
* SETIF
* set values of the 3D array to a newValue if that value is equal to the testValue
* param1: value to test
* param2: 3D array
* param3: dim X
* param4: dim Y
* param5: dim Z
******************************/
void setIf(int testValue, int newValue, unsigned char * array3D, int * dimX, int * dimY, int * dimZ){
int x, y, z;
for(x = 0; x < *dimX; x++){
for(y = 0; y < *dimY; y++){
for(z = 0; z < *dimZ; z++){
if(array3D[x * *dimY * *dimZ+y * *dimZ + z] == testValue)
array3D[x * *dimY * *dimZ + y * *dimZ + z] = newValue;
}
}
}
}
/******************************
* ADDNOISE
* sets values of 3D matrix using randomly generated numbers from a normal distribution
* param matrix
******************************/
void addNoise(unsigned char * array3D, int * dimX, int * dimY, int * dimZ){
int x, y, z;
//
for(x = 0; x < *dimX; x++){
//
for(y = 0; y < *dimY; y++){
//
for(z = 0; z < *dimZ; z++){
array3D[x * *dimY * *dimZ + y * *dimZ + z] = array3D[x * *dimY * *dimZ + y * *dimZ + z] + (int)(5*randn());
}
}
}
}
/******************************
* STRELDISK
* param: pointer to the disk to be made
* creates a 9x9 matrix representing the disk
******************************/
int * strelDisk()
{
int * disk = (int *)malloc(sizeof(int)*9*9);
int x, y;
for(x = 0; x < 9; x++){
for(y = 0; y < 9; y++){
float distance = sqrt(pow((float)(x-4),2) + pow((float)(y-4),2));
if(distance < 5.0)
disk[x*9 + y] = 1;
}
}
return disk;
}
/******************************
* DILATE_MATRIX
* param1: matrix to be dilated
* param2: current x position
* param3: current y position
* param4: current z position
* param5: x length
* param6: y length
* param7: z length
* param8: error radius
*******************************/
void dilate_matrix(unsigned char * matrix, int posX, int posY, int posZ, int dimX, int dimY, int dimZ, int error)
{
int startX = posX - error;
while(startX < 0)
startX++;
int startY = posY - error;
while(startY < 0)
startY++;
/*int startZ = posZ - error;
while(startZ < 0)
startZ++;*/
int endX = posX + error;
while(endX > dimX)
endX--;
int endY = posY + error;
while(endY > dimY)
endY--;
/*int endZ = posZ + error;
while(endZ > dimZ)
endZ--;*/
int x,y;
for(x = startX; x < endX; x++){
for(y = startY; y < endY; y++){
float distance = sqrt( pow((float)(x-posX),2) + pow((float)(y-posY),2) );
if(distance < error)
matrix[x*dimY*dimZ + y*dimZ + posZ] = 1;
}
}
}
/******************************
* IMDILATE_DISK
* param1: target 3d matrix
* param2: dimX
* param3: dimY
* param4: dimZ
* param5: radius
* param6: error
* returns the dilated matrix
* dilates the target matrix using the radius as a guide
******************************/
unsigned char* imdilate_disk(unsigned char * matrix, int dimX, int dimY, int dimZ, int error)
{
unsigned char * newMatrix = (unsigned char *)malloc(sizeof(unsigned char)*dimX*dimY*dimZ);
int x, y, z;
for(z = 0; z < dimZ; z++){
for(x = 0; x < dimX; x++){
for(y = 0; y < dimY; y++){
if(matrix[x*dimY*dimZ + y*dimZ + z] == 1){
dilate_matrix(newMatrix, x, y, z, dimX, dimY, dimZ, error);
}
}
}
}
free(matrix);
return newMatrix;
}
/*****************************
* GET NEIGHBORS
* returns a 2D array describing the offets
* param 1 strel object
* param 2 dimX of object
* param 3 dimY of object
*******************************/
int * getneighbors(int * se, int numOnes){
int * neighbors = (int *)malloc(sizeof(int)*numOnes*2);
int x, y;
int neighY = 0;
int center = 4;
for(x = 0; x < 9; x++){
for(y = 0; y < 9; y++){
if(se[x*9 + y]){
neighbors[neighY*2] = (int)(y - center);
neighbors[neighY*2 + 1] = (int)(x - center);
neighY++;
}
}
}
return neighbors;
}
/******************************
* VIDEO SEQUENCE
* the synthetic video sequence we will work with here is composed of a
* single moving object, circular in shape (fixed radius)
* The motion here is a linear motion
* the foreground intensity and the backgrounf intensity is known
* the image is corrupted with zero mean Gaussian noise
*******************************/
void videoSequence(){
int k;
int max_size = IszX*IszY*Nfr;
//get object centers
int x0 = (int)roundDouble(IszY/2.0);
int y0 = (int)roundDouble(IszX/2.0);
I[x0 *IszY *Nfr + y0 * Nfr + 0] = 1;
//move point
int xk, yk, pos;
for(k = 1; k < Nfr; k++){
xk = abs(x0 + (k));
yk = abs(y0 - 2*(k));
pos = yk * IszY * Nfr + xk *Nfr + k;
if(pos >= max_size)
pos = 0;
I[pos] = 1;
}
int x, y;
int count = 0;
/*for(x = 0; x < IszX; x++)
for(y = 0; y < IszY; y++)
for(k = 0; k < Nfr; k++)
if(I[x*IszY*Nfr + y*Nfr + k]){
printf("ARRAY [%d][%d][%d]: %d\n", x, y, k, I[x*IszY*Nfr + y*Nfr + k]);
count++;
}
printf("COUNT: %d\n", count);*/
//dilate matrix
I = imdilate_disk(I, IszX, IszY, Nfr, 5);
count = 0;
/*printf("SECOND TIME\n");
for(k = 0; k< Nfr; k++)
for(x = 0; x < IszX; x++)
for(y = 0; y < IszY; y++)
if(I[x*IszY*Nfr + y*Nfr + k]){
printf("ARRAY [%d][%d][%d]: %d\n", x, y, k, I[x*IszY*Nfr + y*Nfr + k]);
count++;
}
printf("COUNT: %d", count);*/
//define background, add noise
setIf(0, 100, I, &IszX, &IszY, &Nfr);
setIf(1, 228, I, &IszX, &IszY, &Nfr);
//add noise
addNoise(I, &IszX, &IszY, &Nfr);
}
/******************************
* FIND INDEX
* FINDS THE FIRST OCCURRENCE OF AN ELEMENT IN CDF GREATER THAN THE PROVIDED VALUE AND RETURNS THAT INDEX
* param1 CDF
* param2 length of CDF
* param3 value
*******************************/
int findIndex(float * CDF, int lengthCDF, float value){
int index = -1;
int x;
for(x = 0; x < lengthCDF; x++){
if(CDF[x] >= value){
index = x;
break;
}
}
if(index == -1){
return lengthCDF-1;
}
return index;
}
void particleFilter(){
int max_size = IszX*IszY*Nfr;
//original particle centroid
float xe = roundDouble(IszY/2.0);
float ye = roundDouble(IszX/2.0);
//expected object locations, compared to center
int radius = 5;
int * disk = strelDisk();
int countOnes = 0;
int x, y;
for(x = 0; x < 9; x++){
for(y = 0; y < 9; y++){
if(disk[x*9 + y] == 1)
countOnes++;
//printf("%d ", disk[x*9+y]);
}
//printf("\n");
}
int * objxy = getneighbors(disk, countOnes);
/*for(x = 0; x < countOnes; x++){
printf("%d %d\n", objxy[x*2], objxy[x*2 + 1]);
}
printf("NUM ONES: %d\n", countOnes);*/
//initial weights are all equal (1/Nparticles)
float * weights = (float *)malloc(sizeof(float)*Nparticles);
for(x = 0; x < Nparticles; x++){
weights[x] = 1/((float)(Nparticles));
}
//initial likelihood to 0.0
float * likelihood = (float *)malloc(sizeof(float)*Nparticles);
float * arrayX = (float *)malloc(sizeof(float)*Nparticles);
float * arrayY = (float *)malloc(sizeof(float)*Nparticles);
float * xj = (float *)malloc(sizeof(float)*Nparticles);
float * yj = (float *)malloc(sizeof(float)*Nparticles);
float * CDF = (float *)malloc(sizeof(float)*Nparticles);
//GPU copies of arrays
float * arrayX_GPU;
float * arrayY_GPU;
float * xj_GPU;
float * yj_GPU;
float * CDF_GPU;
float * likelihood_GPU;
unsigned char * I_GPU;
float * weights_GPU;
int * objxy_GPU;
int * ind = (int*)malloc(sizeof(int)*countOnes);
int * ind_GPU;
float * u = (float *)malloc(sizeof(float)*Nparticles);
float * u_GPU;
int * seed_GPU;
float * partial_sums;
//CUDA memory allocation
check_error(cudaMalloc((void **) &arrayX_GPU, sizeof(float)*Nparticles));
check_error(cudaMalloc((void **) &arrayY_GPU, sizeof(float)*Nparticles));
check_error(cudaMalloc((void **) &xj_GPU, sizeof(float)*Nparticles));
check_error(cudaMalloc((void **) &yj_GPU, sizeof(float)*Nparticles));
check_error(cudaMalloc((void **) &CDF_GPU, sizeof(float)*Nparticles));
check_error(cudaMalloc((void **) &u_GPU, sizeof(float)*Nparticles));
check_error(cudaMalloc((void **) &likelihood_GPU, sizeof(float)*Nparticles));
check_error(cudaMalloc((void **) &weights_GPU, sizeof(float)*Nparticles));
check_error(cudaMalloc((void **) &I_GPU, sizeof(unsigned char)*IszX*IszY*Nfr));
check_error(cudaMalloc((void **) &objxy_GPU, sizeof(int)*countOnes));
check_error(cudaMalloc((void **) &ind_GPU, sizeof(int)*countOnes*Nparticles));
check_error(cudaMalloc((void **) &seed_GPU, sizeof(int)*Nparticles));
check_error(cudaMalloc((void **) &partial_sums, sizeof(float)*Nparticles));
for(x = 0; x < Nparticles; x++){
arrayX[x] = xe;
arrayY[x] = ye;
}
int k;
//float * Ik = (float *)malloc(sizeof(float)*IszX*IszY);
int indX, indY;
//start send
long long send_start = get_time();
cudaMemcpy(I_GPU, I, sizeof(unsigned char)*IszX*IszY*Nfr, cudaMemcpyHostToDevice);
cudaMemcpy(objxy_GPU, objxy, sizeof(int)*countOnes, cudaMemcpyHostToDevice);
cudaMemcpy(weights_GPU, weights, sizeof(float)*Nparticles, cudaMemcpyHostToDevice);
cudaMemcpy(arrayX_GPU, arrayX, sizeof(float)*Nparticles, cudaMemcpyHostToDevice);
cudaMemcpy(arrayY_GPU, arrayY, sizeof(float)*Nparticles, cudaMemcpyHostToDevice);
cudaMemcpy(seed_GPU, seed, sizeof(int)*Nparticles, cudaMemcpyHostToDevice);
long long send_end = get_time();
printf("TIME TO SEND TO GPU: %f\n", elapsed_time(send_start, send_end));
int num_blocks = ceil((float) Nparticles/(float) threads_per_block);
for(k = 1; k < Nfr; k++){
//apply motion model
//draws sample from motion model (random walk). The only prior information
//is that the object moves 2x as fast as in the y direction
/*for(x = 0; x < Nparticles; x++){
arrayX[x] = arrayX[x] + 1.0 + 5.0*randn();
arrayY[x] = arrayY[x] - 2.0 + 2.0*randn();
}
//particle filter likelihood
for(x = 0; x < Nparticles; x++){
//compute the likelihood: remember our assumption is that you know
// foreground and the background image intensity distribution.
// Notice that we consider here a likelihood ratio, instead of
// p(z|x). It is possible in this case. why? a hometask for you.
//calc ind
for(y = 0; y < countOnes; y++){
indX = roundDouble(arrayX[x]) + objxy[y*2 + 1];
indY = roundDouble(arrayY[x]) + objxy[y*2];
ind[y] = fabs(indX*IszY*Nfr + indY*Nfr + k);
if(ind[y] >= max_size)
ind[y] = 0;
}
likelihood[x] = calcLikelihoodSum(I, ind, countOnes);
likelihood[x] = likelihood[x]/countOnes;
}
// update & normalize weights
// using equation (63) of Arulampalam Tutorial
float sumWeights = updateWeights(weights, likelihood, Nparticles);
for(x = 0; x < Nparticles; x++){
weights[x] = weights[x] * exp(likelihood[x]);
}
float sumWeights = 0;
for(x = 0; x < Nparticles; x++){
sumWeights += weights[x];
}
for(x = 0; x < Nparticles; x++){
weights[x] = weights[x]/sumWeights;
}
xe = 0;
ye = 0;
// estimate the object location by expected values
for(x = 0; x < Nparticles; x++){
xe += arrayX[x] * weights[x];
ye += arrayY[x] * weights[x];
}
printf("XE: %lf\n", xe);
printf("YE: %lf\n", ye);
//float distance = sqrt( pow((float)(xe-(int)roundDouble(IszY/2.0)),2) + pow((float)(ye-(int)roundDouble(IszX/2.0)),2) );
//printf("%lf\n", distance);
//display(hold off for now)
//pause(hold off for now)
//resampling
cdfCalc(CDF, weights, Nparticles);
CDF[0] = weights[0];
for(x = 1; x < Nparticles; x++){
CDF[x] = weights[x] + CDF[x-1];
}
float u1 = (1/((float)(Nparticles)))*randu();
for(x = 0; x < Nparticles; x++){
u[x] = u1 + x/((float)(Nparticles));
}*/
likelihood_kernel <<< num_blocks, threads_per_block >>> (arrayX_GPU, arrayY_GPU, CDF_GPU, ind_GPU, objxy_GPU, likelihood_GPU, I_GPU, u_GPU, weights_GPU, Nparticles, countOnes, max_size, k, IszY, Nfr, seed_GPU, partial_sums);
sum_kernel <<< num_blocks, threads_per_block >>> (partial_sums, Nparticles);
cudaBindTexture(0, tex_sums, partial_sums, Nparticles);
normalize_weights_kernel <<< num_blocks, threads_per_block >>> (weights_GPU, Nparticles, partial_sums, CDF_GPU, u_GPU, seed_GPU);
cudaUnbindTexture(tex_sums);
//long long start_copy = get_time();
//CUDA memory copying from CPU memory to GPU memory
//long long end_copy = get_time();
//Set number of threads
cudaBindTexture(0, tex_CDF, CDF_GPU, Nparticles);
//KERNEL FUNCTION CALL
find_index_kernel <<< num_blocks, threads_per_block >>> (arrayX_GPU, arrayY_GPU, CDF_GPU, u_GPU, xj_GPU, yj_GPU, weights_GPU, Nparticles);
cudaUnbindTexture(tex_CDF);
//long long start_copy_back = get_time();
//CUDA memory copying back from GPU to CPU memory
//cudaMemcpy(arrayY_GPU, yj_GPU, sizeof(float)*Nparticles, cudaMemcpyDeviceToDevice);
//cudaMemcpy(arrayX_GPU, xj_GPU, sizeof(float)*Nparticles, cudaMemcpyDeviceToDevice);
//long long end_copy_back = get_time();
//printf("SENDING TO GPU TOOK: %lf\n", elapsed_time(start_copy, end_copy));
//printf("CUDA EXEC TOOK: %lf\n", elapsed_time(end_copy, start_copy_back));
//printf("SENDING BACK FROM GPU TOOK: %lf\n", elapsed_time(start_copy_back, end_copy_back));
/**
int j, i;
for(j = 0; j < Nparticles; j++){
i = findIndex(CDF, Nparticles, u[j]);
xj[j] = arrayX[i];
yj[j] = arrayY[i];
}
**/
//reassign arrayX and arrayY
//arrayX = xj;
//arrayY = yj;
/*for(x = 0; x < Nparticles; x++){
weights[x] = 1/((float)(Nparticles));
}*/
}
/*check_error(cudaMalloc((void **) &arrayX_GPU, sizeof(float)*Nparticles));
check_error(cudaMalloc((void **) &arrayY_GPU, sizeof(float)*Nparticles));
check_error(cudaMalloc((void **) &xj_GPU, sizeof(float)*Nparticles));
check_error(cudaMalloc((void **) &yj_GPU, sizeof(float)*Nparticles));
check_error(cudaMalloc((void **) &CDF_GPU, sizeof(float)*Nparticles));
check_error(cudaMalloc((void **) &u_GPU, sizeof(float)*Nparticles));
check_error(cudaMalloc((void **) &likelihood_GPU, sizeof(float)*Nparticles));
check_error(cudaMalloc((void **) &weights_GPU, sizeof(float)*Nparticles));
check_error(cudaMalloc((void **) &I_GPU, sizeof(unsigned char)*IszX*IszY*Nfr));
check_error(cudaMalloc((void **) &objxy_GPU, sizeof(int)*countOnes));
check_error(cudaMalloc((void **) &ind_GPU, sizeof(int)*countOnes*Nparticles));
check_error(cudaMalloc((void **) &seed_GPU, sizeof(int)*Nparticles));
check_error(cudaMalloc((void **) &partial_sums, sizeof(float)*Nparticles));*/
long long back_time = get_time();
cudaFree(xj_GPU);
cudaFree(yj_GPU);
cudaFree(CDF_GPU);
cudaFree(u_GPU);
cudaFree(likelihood_GPU);
cudaFree(I_GPU);
cudaFree(objxy_GPU);
cudaFree(ind_GPU);
cudaFree(seed_GPU);
cudaFree(partial_sums);
long long free_time = get_time();
cudaMemcpy(arrayX, arrayX_GPU, sizeof(float)*Nparticles, cudaMemcpyDeviceToHost);
long long arrayX_time = get_time();
cudaMemcpy(arrayY, arrayY_GPU, sizeof(float)*Nparticles, cudaMemcpyDeviceToHost);
long long arrayY_time = get_time();
cudaMemcpy(weights, weights_GPU, sizeof(float)*Nparticles, cudaMemcpyDeviceToHost);
long long back_end_time = get_time();
printf("GPU Execution: %lf\n", elapsed_time(send_end, back_time));
printf("FREE TIME: %lf\n", elapsed_time(back_time, free_time));
printf("SEND TO SEND BACK: %lf\n", elapsed_time(back_time, back_end_time));
printf("SEND ARRAY X BACK: %lf\n", elapsed_time(free_time, arrayX_time));
printf("SEND ARRAY Y BACK: %lf\n", elapsed_time(arrayX_time, arrayY_time));
printf("SEND WEIGHTS BACK: %lf\n", elapsed_time(arrayY_time, back_end_time));
xe = 0;
ye = 0;
// estimate the object location by expected values
for(x = 0; x < Nparticles; x++){
xe += arrayX[x] * weights[x];
ye += arrayY[x] * weights[x];
}
printf("XE: %lf\n", xe);
printf("YE: %lf\n", ye);
float distance = sqrt( pow((float)(xe-(int)roundDouble(IszY/2.0)),2) + pow((float)(ye-(int)roundDouble(IszX/2.0)),2) );
printf("%lf\n", distance);
//CUDA freeing of memory
cudaFree(weights_GPU);
cudaFree(arrayY_GPU);
cudaFree(arrayX_GPU);
}
int main(){
//establish seed
seed = (int *)malloc(sizeof(int)*Nparticles);
int i;
for(i = 0; i < Nparticles; i++)
seed[i] = time(0)*i;
//malloc matrix
I = (unsigned char *)malloc(sizeof(unsigned char)*IszX*IszY*Nfr);
long long start = get_time();
//call video sequence
videoSequence();
long long endVideoSequence = get_time();
printf("VIDEO SEQUENCE TOOK %f\n", elapsed_time(start, endVideoSequence));
//call particle filter
particleFilter();
long long endParticleFilter = get_time();
printf("PARTICLE FILTER TOOK %f\n", elapsed_time(endVideoSequence, endParticleFilter));
printf("ENTIRE PROGRAM TOOK %f\n", elapsed_time(start, endParticleFilter));
return 0;
}
|
8a418f97c5bec48ee129e4fe56ac5aebbe18f359.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ***************************************************************************
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* As a special exception, you may use this file as part of a free software
* library without restriction. Specifically, if other files instantiate
* templates or use macros or inline functions from this file, or you compile
* this file and link it with other files to produce an executable, this
* file does not by itself cause the resulting executable to be covered by
* the GNU General Public License. This exception does not however
* invalidate any other reasons why the executable file might be covered by
* the GNU General Public License.
*
****************************************************************************
*/
/*
Author: Marco Aldinucci.
email: [email protected]
[email protected]
date : 15/11/97
Modified by:
****************************************************************************
* Author: Dalvan Griebler <[email protected]>
* Author: Dinei Rockenbach <[email protected]>
*
* Copyright: GNU General Public License
* Description: This program simply computes the mandelbroat set.
* File Name: mandel.cpp
* Version: 1.0 (25/05/2018)
* Compilation Command: make
****************************************************************************
*/
#include <stdio.h>
#if !defined(NO_DISPLAY)
#include "marX2.h"
#endif
#include <sys/time.h>
#include <math.h>
#include <iostream>
#include <chrono>
#include "cudabase.h"
int block_size = 32;
int total_devices = 1;
int batch_size = 1;
__global__ void mandel_kernel(int batch, int batch_size, int dim, double init_a, double init_b, double step, int niter, unsigned char *M) {
int threadIdGlobal = blockIdx.x * blockDim.x + threadIdx.x;
int i_in_batch = floor((double)threadIdGlobal/dim);
int i = batch * batch_size + i_in_batch; //global i
int j = threadIdGlobal - i_in_batch*dim;
if (i < dim && j < dim) {
double im = init_b + (step * i);
double cr;
double a = cr = init_a + step * j;
double b = im;
int k = 0;
for (k = 0; k < niter; k++) {
double a2 = a*a;
double b2 = b*b;
if ((a2+b2) > 4.0) break;
b = 2*a*b+im;
a = a2-b2+cr;
}
M[i_in_batch*dim+j] = (unsigned char)255-((k*255 / niter));
}
}
#include <ff/farm.hpp>
using namespace ff;
#define DIM 800
#define ITERATION 1024
double diff(struct timeval a, struct timeval b) {
long sec = (a.tv_sec - b.tv_sec);
long usec = (a.tv_usec - b.tv_usec);
if(usec < 0) {
--sec;
usec += 1000000;
}
return ((double)(sec*1000)+ (double)usec/1000.0);
}
struct task_t {
task_t(int batch, unsigned char *M, unsigned char* dev_M, hipStream_t cuda_stream) : batch(batch), M(M), dev_M(dev_M), cuda_stream(cuda_stream){};
int batch;
unsigned char* M;
unsigned char* dev_M;
hipStream_t cuda_stream;
};
class Emitter: public ff_node_t<task_t> {
public:
int dim;
Emitter(int dim): dim(dim) {}
task_t *svc(task_t*) {
int batches = ceil((double)dim / batch_size);
for(int batch = 0; batch < batches;batch++) {
CudaSafeCall( hipSetDevice(batch % total_devices) );
unsigned char * M;
CudaSafeCall( hipHostMalloc((void**)&M, dim * batch_size, hipHostMallocDefault) );
unsigned char *dev_M; //on device
CudaSafeCall( hipMalloc((void**)&dev_M, dim * batch_size) );
hipStream_t cuda_stream;
CudaSafeCall( hipStreamCreate(&cuda_stream) );
ff_send_out(new task_t(batch, M, dev_M, cuda_stream));
}
return (task_t*)EOS;
}
};
class Worker: public ff_node_t<task_t> {
public:
int dim;
int niter;
double init_a;
double init_b;
double step;
Worker(int dim, int niter, double init_a, double init_b, double step): dim(dim), niter(niter), init_a(init_a), init_b(init_b), step(step) {}
task_t* svc(task_t* t) {
CudaSafeCall( hipSetDevice(t->batch % total_devices) );
int threads = block_size;
int blocks = ceil((double)dim*batch_size / block_size);
hipLaunchKernelGGL(( mandel_kernel), dim3(blocks), dim3(threads), 0, t->cuda_stream , t->batch, batch_size, dim, init_a, init_b, step, niter, t->dev_M);
CudaSafeCall( hipMemcpyAsync(t->M, t->dev_M, dim * batch_size, hipMemcpyDeviceToHost, t->cuda_stream) );
return t;
}
};
class Collector: public ff_node_t<task_t> {
public:
int dim;
Collector(int dim): dim(dim) {};
task_t* svc(task_t* t) {
CudaSafeCall( hipSetDevice(t->batch % total_devices) );
CudaSafeCall( hipStreamSynchronize(t->cuda_stream) );
#if !defined(NO_DISPLAY)
for(int i = 0; i < batch_size; i++) {
ShowLine(&t->M[i*dim], dim, t->batch*batch_size+i);
}
#endif
CudaSafeCall( hipHostFree(t->M) );
CudaSafeCall( hipFree(t->dev_M) );
CudaSafeCall( hipStreamDestroy(t->cuda_stream) );
return (task_t*)GO_ON;
}
};
int main(int argc, char **argv) {
double init_a=-2.125,init_b=-1.5,range=3.0;
int dim = DIM, niter = ITERATION;
// stats
struct timeval t1,t2;
int retries=1;
double avg = 0;
int n_workers = 1;
int num_gpus = 0;
if (argc<6) {
printf("Usage: %s size niterations retries workers batch_size [num_gpus]\n", argv[0]);
printf(" num_gpus : Number of GPUs that should be used. If not informed, use all available GPUs\n\n");
exit(-1);
}
else {
dim = atoi(argv[1]);
niter = atoi(argv[2]);
retries = atoi(argv[3]);
n_workers = atoi(argv[4]);
batch_size = atoi(argv[5]);
if (argc > 6) {
num_gpus = atoi(argv[6]);
}
}
double * runs = (double *) malloc(retries*sizeof(double));
double step = range/((double) dim);
#if !defined(NO_DISPLAY)
SetupXWindows(dim,dim,1,NULL,"Sequential Mandelbroot");
#endif
printf("bin;size;numiter;time (ms);workers;batch size\n");
if (!num_gpus) {
CudaSafeCall( hipGetDeviceCount(&total_devices) );
if (total_devices < 1) {
printf("No CUDA-enabled device found");
return 1;
}
} else {
total_devices = num_gpus;
}
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp, 0);
block_size = devProp.maxThreadsPerBlock; //max threads per block
for (int r=0; r<retries; r++) {
// Start time
gettimeofday(&t1,NULL);
std::vector<ff_node*> workers;
for (int w=0; w<n_workers; w++) {
workers.push_back(new Worker(dim, niter, init_a, init_b, step));
}
ff_farm<> farm;
farm.add_emitter(new Emitter(dim));
farm.add_workers(workers);
farm.add_collector(new Collector(dim));
int ret = farm.run_and_wait_end();
if (ret < 0) {
printf("Error on FastFlow farm: %d\n", ret);
return -1;
}
// Stop time
gettimeofday(&t2,NULL);
CudaCheckError();
avg += runs[r] = diff(t2,t1);
printf("%s (%d GPU);%d;%d;%.2f;%d;%d\n", argv[0], total_devices, dim, niter, runs[r], n_workers, batch_size);
}
avg = avg / (double) retries;
double var = 0;
for (int r=0; r<retries; r++) {
var += (runs[r] - avg) * (runs[r] - avg);
}
var /= retries;
#if !defined(NO_DISPLAY)
printf("Average on %d experiments = %f (ms) Std. Dev. %f\n\nPress a key\n",retries,avg,sqrt(var));
getchar();
CloseXWindows();
#endif
return 0;
}
| 8a418f97c5bec48ee129e4fe56ac5aebbe18f359.cu | /* ***************************************************************************
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* As a special exception, you may use this file as part of a free software
* library without restriction. Specifically, if other files instantiate
* templates or use macros or inline functions from this file, or you compile
* this file and link it with other files to produce an executable, this
* file does not by itself cause the resulting executable to be covered by
* the GNU General Public License. This exception does not however
* invalidate any other reasons why the executable file might be covered by
* the GNU General Public License.
*
****************************************************************************
*/
/*
Author: Marco Aldinucci.
email: [email protected]
[email protected]
date : 15/11/97
Modified by:
****************************************************************************
* Author: Dalvan Griebler <[email protected]>
* Author: Dinei Rockenbach <[email protected]>
*
* Copyright: GNU General Public License
* Description: This program simply computes the mandelbroat set.
* File Name: mandel.cpp
* Version: 1.0 (25/05/2018)
* Compilation Command: make
****************************************************************************
*/
#include <stdio.h>
#if !defined(NO_DISPLAY)
#include "marX2.h"
#endif
#include <sys/time.h>
#include <math.h>
#include <iostream>
#include <chrono>
#include "cudabase.h"
int block_size = 32;
int total_devices = 1;
int batch_size = 1;
__global__ void mandel_kernel(int batch, int batch_size, int dim, double init_a, double init_b, double step, int niter, unsigned char *M) {
int threadIdGlobal = blockIdx.x * blockDim.x + threadIdx.x;
int i_in_batch = floor((double)threadIdGlobal/dim);
int i = batch * batch_size + i_in_batch; //global i
int j = threadIdGlobal - i_in_batch*dim;
if (i < dim && j < dim) {
double im = init_b + (step * i);
double cr;
double a = cr = init_a + step * j;
double b = im;
int k = 0;
for (k = 0; k < niter; k++) {
double a2 = a*a;
double b2 = b*b;
if ((a2+b2) > 4.0) break;
b = 2*a*b+im;
a = a2-b2+cr;
}
M[i_in_batch*dim+j] = (unsigned char)255-((k*255 / niter));
}
}
#include <ff/farm.hpp>
using namespace ff;
#define DIM 800
#define ITERATION 1024
double diff(struct timeval a, struct timeval b) {
long sec = (a.tv_sec - b.tv_sec);
long usec = (a.tv_usec - b.tv_usec);
if(usec < 0) {
--sec;
usec += 1000000;
}
return ((double)(sec*1000)+ (double)usec/1000.0);
}
struct task_t {
task_t(int batch, unsigned char *M, unsigned char* dev_M, cudaStream_t cuda_stream) : batch(batch), M(M), dev_M(dev_M), cuda_stream(cuda_stream){};
int batch;
unsigned char* M;
unsigned char* dev_M;
cudaStream_t cuda_stream;
};
class Emitter: public ff_node_t<task_t> {
public:
int dim;
Emitter(int dim): dim(dim) {}
task_t *svc(task_t*) {
int batches = ceil((double)dim / batch_size);
for(int batch = 0; batch < batches;batch++) {
CudaSafeCall( cudaSetDevice(batch % total_devices) );
unsigned char * M;
CudaSafeCall( cudaHostAlloc((void**)&M, dim * batch_size, cudaHostAllocDefault) );
unsigned char *dev_M; //on device
CudaSafeCall( cudaMalloc((void**)&dev_M, dim * batch_size) );
cudaStream_t cuda_stream;
CudaSafeCall( cudaStreamCreate(&cuda_stream) );
ff_send_out(new task_t(batch, M, dev_M, cuda_stream));
}
return (task_t*)EOS;
}
};
class Worker: public ff_node_t<task_t> {
public:
int dim;
int niter;
double init_a;
double init_b;
double step;
Worker(int dim, int niter, double init_a, double init_b, double step): dim(dim), niter(niter), init_a(init_a), init_b(init_b), step(step) {}
task_t* svc(task_t* t) {
CudaSafeCall( cudaSetDevice(t->batch % total_devices) );
int threads = block_size;
int blocks = ceil((double)dim*batch_size / block_size);
mandel_kernel<<< blocks, threads, 0, t->cuda_stream >>>(t->batch, batch_size, dim, init_a, init_b, step, niter, t->dev_M);
CudaSafeCall( cudaMemcpyAsync(t->M, t->dev_M, dim * batch_size, cudaMemcpyDeviceToHost, t->cuda_stream) );
return t;
}
};
class Collector: public ff_node_t<task_t> {
public:
int dim;
Collector(int dim): dim(dim) {};
task_t* svc(task_t* t) {
CudaSafeCall( cudaSetDevice(t->batch % total_devices) );
CudaSafeCall( cudaStreamSynchronize(t->cuda_stream) );
#if !defined(NO_DISPLAY)
for(int i = 0; i < batch_size; i++) {
ShowLine(&t->M[i*dim], dim, t->batch*batch_size+i);
}
#endif
CudaSafeCall( cudaFreeHost(t->M) );
CudaSafeCall( cudaFree(t->dev_M) );
CudaSafeCall( cudaStreamDestroy(t->cuda_stream) );
return (task_t*)GO_ON;
}
};
int main(int argc, char **argv) {
double init_a=-2.125,init_b=-1.5,range=3.0;
int dim = DIM, niter = ITERATION;
// stats
struct timeval t1,t2;
int retries=1;
double avg = 0;
int n_workers = 1;
int num_gpus = 0;
if (argc<6) {
printf("Usage: %s size niterations retries workers batch_size [num_gpus]\n", argv[0]);
printf(" num_gpus : Number of GPUs that should be used. If not informed, use all available GPUs\n\n");
exit(-1);
}
else {
dim = atoi(argv[1]);
niter = atoi(argv[2]);
retries = atoi(argv[3]);
n_workers = atoi(argv[4]);
batch_size = atoi(argv[5]);
if (argc > 6) {
num_gpus = atoi(argv[6]);
}
}
double * runs = (double *) malloc(retries*sizeof(double));
double step = range/((double) dim);
#if !defined(NO_DISPLAY)
SetupXWindows(dim,dim,1,NULL,"Sequential Mandelbroot");
#endif
printf("bin;size;numiter;time (ms);workers;batch size\n");
if (!num_gpus) {
CudaSafeCall( cudaGetDeviceCount(&total_devices) );
if (total_devices < 1) {
printf("No CUDA-enabled device found");
return 1;
}
} else {
total_devices = num_gpus;
}
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, 0);
block_size = devProp.maxThreadsPerBlock; //max threads per block
for (int r=0; r<retries; r++) {
// Start time
gettimeofday(&t1,NULL);
std::vector<ff_node*> workers;
for (int w=0; w<n_workers; w++) {
workers.push_back(new Worker(dim, niter, init_a, init_b, step));
}
ff_farm<> farm;
farm.add_emitter(new Emitter(dim));
farm.add_workers(workers);
farm.add_collector(new Collector(dim));
int ret = farm.run_and_wait_end();
if (ret < 0) {
printf("Error on FastFlow farm: %d\n", ret);
return -1;
}
// Stop time
gettimeofday(&t2,NULL);
CudaCheckError();
avg += runs[r] = diff(t2,t1);
printf("%s (%d GPU);%d;%d;%.2f;%d;%d\n", argv[0], total_devices, dim, niter, runs[r], n_workers, batch_size);
}
avg = avg / (double) retries;
double var = 0;
for (int r=0; r<retries; r++) {
var += (runs[r] - avg) * (runs[r] - avg);
}
var /= retries;
#if !defined(NO_DISPLAY)
printf("Average on %d experiments = %f (ms) Std. Dev. %f\n\nPress a key\n",retries,avg,sqrt(var));
getchar();
CloseXWindows();
#endif
return 0;
}
|
c5f6bc417d4af5e71cf8783f257f6e08e7a75fa6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "MedV4D/Imaging/cuda/detail/EdgeDetection.cuh"
template< typename RegionType >
void
Sobel3D( RegionType input, RegionType output, typename RegionType::ElementType threshold )
{
typedef typename RegionType::ElementType TElement;
typedef Buffer3D< TElement > Buffer;
Buffer inBuffer = CudaBuffer3DFromImageRegionCopy( input );
Buffer outBuffer = CudaBuffer3DFromImageRegion( output );
SobelFilter3DFtor< TElement > filter( threshold );
//int3 radius = filter.radius;
dim3 blockSize( 8, 8, 8 );
int3 blockResolution = GetBlockResolution( inBuffer.mSize, blockSize, make_int3(0,0,0) );
dim3 gridSize( blockResolution.x * blockResolution.y, blockResolution.z, 1 );
M4D::Common::Clock clock;
CheckCudaErrorState( "Before kernel execution" );
hipLaunchKernelGGL(( FilterKernel3D< TElement, TElement, SobelFilter3DFtor< TElement > >)
, dim3(gridSize), dim3(blockSize) , 0, 0,
inBuffer,
outBuffer,
blockResolution,
filter
);
hipDeviceSynchronize();
CheckCudaErrorState( "After kernel execution" );
LOG( "Sobel3D computations took " << clock.SecondsPassed() )
hipMemcpy(output.GetPointer(), outBuffer.mData, outBuffer.mLength * sizeof(TElement), hipMemcpyDeviceToHost );
CheckCudaErrorState( "Copy back" );
//hipFree( inBuffer.mData );
//hipFree( outBuffer.mData );
CheckCudaErrorState( "Free memory" );
}
#define DECLARE_TEMPLATE_INSTANCE template void Sobel3D( M4D::Imaging::ImageRegion< TTYPE, 3 > input, M4D::Imaging::ImageRegion< TTYPE, 3 > output, TTYPE threshold );
#include "MedV4D/Common/DeclareTemplateNumericInstances.h"
| c5f6bc417d4af5e71cf8783f257f6e08e7a75fa6.cu | #include "MedV4D/Imaging/cuda/detail/EdgeDetection.cuh"
template< typename RegionType >
void
Sobel3D( RegionType input, RegionType output, typename RegionType::ElementType threshold )
{
typedef typename RegionType::ElementType TElement;
typedef Buffer3D< TElement > Buffer;
Buffer inBuffer = CudaBuffer3DFromImageRegionCopy( input );
Buffer outBuffer = CudaBuffer3DFromImageRegion( output );
SobelFilter3DFtor< TElement > filter( threshold );
//int3 radius = filter.radius;
dim3 blockSize( 8, 8, 8 );
int3 blockResolution = GetBlockResolution( inBuffer.mSize, blockSize, make_int3(0,0,0) );
dim3 gridSize( blockResolution.x * blockResolution.y, blockResolution.z, 1 );
M4D::Common::Clock clock;
CheckCudaErrorState( "Before kernel execution" );
FilterKernel3D< TElement, TElement, SobelFilter3DFtor< TElement > >
<<< gridSize, blockSize >>>(
inBuffer,
outBuffer,
blockResolution,
filter
);
cudaThreadSynchronize();
CheckCudaErrorState( "After kernel execution" );
LOG( "Sobel3D computations took " << clock.SecondsPassed() )
cudaMemcpy(output.GetPointer(), outBuffer.mData, outBuffer.mLength * sizeof(TElement), cudaMemcpyDeviceToHost );
CheckCudaErrorState( "Copy back" );
//cudaFree( inBuffer.mData );
//cudaFree( outBuffer.mData );
CheckCudaErrorState( "Free memory" );
}
#define DECLARE_TEMPLATE_INSTANCE template void Sobel3D( M4D::Imaging::ImageRegion< TTYPE, 3 > input, M4D::Imaging::ImageRegion< TTYPE, 3 > output, TTYPE threshold );
#include "MedV4D/Common/DeclareTemplateNumericInstances.h"
|
7a20d4fc05275e7277e5ef3c51ae11e412e49718.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void normal_kernel(int seed, float *data, int n, float mean, float std) {
if (threadIdx.x != 0) return;
hiprandState_t state;
hiprand_init(seed, 0, 0, &state);
for (size_t i(0); i < n; ++i)
data[i] = hiprand_normal(&state) * std + mean;
} | 7a20d4fc05275e7277e5ef3c51ae11e412e49718.cu | #include "includes.h"
__global__ void normal_kernel(int seed, float *data, int n, float mean, float std) {
if (threadIdx.x != 0) return;
curandState state;
curand_init(seed, 0, 0, &state);
for (size_t i(0); i < n; ++i)
data[i] = curand_normal(&state) * std + mean;
} |
fabc82aab4675272467fc75df58d8d3b079672be.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "custom_cuda_layers.h"
#include "general_kernels.h"
namespace cg = cooperative_groups;
// Fused attention + softmax
template <int tbSize, int blockStride, int tbSeq>
__global__ void attn_softmax(float* vals,
const float* attn_mask,
int heads,
int seq_length,
int iterations)
{
__shared__ float partialSum[MAX_WARP_NUM];
int warp_num = blockDim.x >> 5;
int iteration_stride = blockDim.x;
int block_width = blockStride * seq_length;
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<tbSize> g = cg::tiled_partition<tbSize>(b);
int batch = blockIdx.x;
int row = blockIdx.y;
int max_threads_in_sequence = ::max(seq_length, tbSeq);
int seq_lane = threadIdx.x % max_threads_in_sequence;
int data_offset = batch * (gridDim.y * block_width) + row * block_width +
(threadIdx.x / max_threads_in_sequence) * seq_length;
int mask_offset = batch * seq_length;
int wid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
float4* val_cast = reinterpret_cast<float4*>(vals);
const float4* attn_mask_cast = reinterpret_cast<const float4*>(attn_mask);
float4 data[MAX_THREAD_ITERATIONS];
float max_val = minus_infinity;
for (int i = 0; i < iterations; i++) {
int data_id = i * iteration_stride + seq_lane;
if (data_id < seq_length) {
float4 mask = attn_mask_cast[mask_offset + data_id];
data[i] = val_cast[data_offset + data_id];
data[i].x += mask.x;
data[i].y += mask.y;
data[i].z += mask.z;
data[i].w += mask.w;
max_val = (data[i].x > max_val ? data[i].x : max_val);
max_val = (data[i].y > max_val ? data[i].y : max_val);
max_val = (data[i].z > max_val ? data[i].z : max_val);
max_val = (data[i].w > max_val ? data[i].w : max_val);
} else {
data[i].x = minus_infinity;
data[i].y = minus_infinity;
data[i].z = minus_infinity;
data[i].w = minus_infinity;
}
}
for (int i = 1; i < tbSize; i *= 2) {
auto temp = g.shfl_xor(max_val, i);
max_val = (temp > max_val ? temp : max_val);
}
if (seq_length > tbSize) {
if (lane == 0) partialSum[wid] = max_val;
b.sync();
if (lane < warp_num) max_val = partialSum[lane];
#ifndef __STOCHASTIC_MODE__
b.sync();
#endif
int iters = warp_num;
if (seq_length < iteration_stride) iters = warp_num / (iteration_stride / seq_length);
for (int i = 1; i < iters; i *= 2) {
auto temp = g.shfl_xor(max_val, i);
max_val = (temp > max_val ? temp : max_val);
}
max_val = g.shfl(max_val, threadIdx.x / tbSize);
}
float sum = 0;
for (int i = 0; i < iterations; i++) {
data[i].x = __expf(data[i].x - max_val);
data[i].y = __expf(data[i].y - max_val);
data[i].z = __expf(data[i].z - max_val);
data[i].w = __expf(data[i].w - max_val);
sum += (data[i].x + data[i].y + data[i].z + data[i].w);
}
for (int i = 1; i < tbSize; i *= 2) { sum += g.shfl_xor(sum, i); }
if (seq_length > tbSize) {
if (lane == 0) partialSum[wid] = sum;
b.sync();
if (lane < warp_num) sum = partialSum[lane];
#ifndef __STOCHASTIC_MODE__
b.sync();
#endif
int iters = warp_num;
if (seq_length < iteration_stride) iters = warp_num / (iteration_stride / seq_length);
for (int i = 1; i < iters; i *= 2) { sum += g.shfl_xor(sum, i); }
sum = g.shfl(sum, threadIdx.x / tbSize);
}
sum += 1e-6;
for (int i = 0; i < iterations; i++) {
data[i].x /= sum;
data[i].y /= sum;
data[i].z /= sum;
data[i].w /= sum;
int data_id = i * iteration_stride + seq_lane;
if (data_id < seq_length) val_cast[data_offset + data_id] = data[i];
}
}
template <int tbSize, int blockStride, int tbSeq>
__global__ void attn_softmax(__half* vals,
const __half* attn_mask,
int heads,
int seq_length,
int iterations)
{
#if __CUDA_ARCH__ >= 700
__shared__ float partialSum[MAX_WARP_NUM];
int warp_num = blockDim.x >> 5;
int iteration_stride = blockDim.x;
int block_width = blockStride * seq_length;
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<tbSize> g = cg::tiled_partition<tbSize>(b);
int batch = blockIdx.x;
int row = blockIdx.y;
int max_threads_in_sequence = ::max(seq_length, tbSeq);
int seq_lane = threadIdx.x % max_threads_in_sequence;
int data_offset = batch * (gridDim.y * block_width) + row * block_width +
(threadIdx.x / max_threads_in_sequence) * seq_length;
int mask_offset = batch * seq_length;
int wid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
float2* val_cast = reinterpret_cast<float2*>(vals);
const float2* attn_mask_cast = reinterpret_cast<const float2*>(attn_mask);
val_cast += data_offset;
attn_mask_cast += mask_offset;
float2 low_data[MAX_THREAD_ITERATIONS];
float2 high_data[MAX_THREAD_ITERATIONS];
float max_val = minus_infinity;
for (int i = 0; i < iterations; i++) {
int data_id = i * iteration_stride + seq_lane;
if (data_id < seq_length) {
float2 data = val_cast[data_id];
float2 mask = attn_mask_cast[data_id];
__half2* data_arr = reinterpret_cast<__half2*>(&data);
__half2* mask_arr = reinterpret_cast<__half2*>(&mask);
low_data[i] = __half22float2(data_arr[0]);
high_data[i] = __half22float2(data_arr[1]);
float2 low_mask = __half22float2(mask_arr[0]);
float2 high_mask = __half22float2(mask_arr[1]);
low_data[i].x += low_mask.x;
low_data[i].y += low_mask.y;
high_data[i].x += high_mask.x;
high_data[i].y += high_mask.y;
max_val = (low_data[i].x > max_val ? low_data[i].x : max_val);
max_val = (low_data[i].y > max_val ? low_data[i].y : max_val);
max_val = (high_data[i].x > max_val ? high_data[i].x : max_val);
max_val = (high_data[i].y > max_val ? high_data[i].y : max_val);
}
}
for (int i = 1; i < tbSize; i *= 2) {
auto temp = g.shfl_xor(max_val, i);
max_val = (temp > max_val ? temp : max_val);
}
if (seq_length > tbSize) {
if (lane == 0) partialSum[wid] = max_val;
b.sync();
if (lane < warp_num) max_val = partialSum[lane];
#ifndef __STOCHASTIC_MODE__
b.sync();
#endif
int iters = warp_num;
if (seq_length < iteration_stride) iters = warp_num / (iteration_stride / seq_length);
for (int i = 1; i < iters; i *= 2) {
auto temp = g.shfl_xor(max_val, i);
max_val = (temp > max_val ? temp : max_val);
}
max_val = g.shfl(max_val, threadIdx.x / tbSize);
}
float sum = 0;
for (int i = 0; i < iterations; i++) {
int data_id = i * iteration_stride + seq_lane;
if (data_id < seq_length) {
low_data[i].x = __expf(low_data[i].x - max_val);
low_data[i].y = __expf(low_data[i].y - max_val);
high_data[i].x = __expf(high_data[i].x - max_val);
high_data[i].y = __expf(high_data[i].y - max_val);
sum += (low_data[i].x + low_data[i].y + high_data[i].x + high_data[i].y);
}
}
for (int i = 1; i < tbSize; i *= 2) { sum += g.shfl_xor(sum, i); }
if (seq_length > tbSize) {
if (lane == 0) partialSum[wid] = sum;
b.sync();
if (lane < warp_num) sum = partialSum[lane];
#ifndef __STOCHASTIC_MODE__
b.sync();
#endif
int iters = warp_num;
if (seq_length < iteration_stride) iters = warp_num / (iteration_stride / seq_length);
for (int i = 1; i < iters; i *= 2) { sum += g.shfl_xor(sum, i); }
sum = g.shfl(sum, threadIdx.x / tbSize);
}
sum += 1e-6;
for (int i = 0; i < iterations; i++) {
int data_id = i * iteration_stride + seq_lane;
if (data_id < seq_length) {
float2 result_f;
__half2* result_h = reinterpret_cast<__half2*>(&result_f);
low_data[i].x /= sum;
low_data[i].y /= sum;
high_data[i].x /= sum;
high_data[i].y /= sum;
result_h[0] = __float22half2_rn(low_data[i]);
result_h[1] = __float22half2_rn(high_data[i]);
val_cast[data_id] = result_f;
}
}
#endif
}
template <typename T>
void launch_attn_softmax(T*, const T*, int, int, int, hipStream_t, bool);
template <>
void launch_attn_softmax<float>(float* vals,
const float* attn_mask,
int batch_size,
int heads,
int sequence_length,
hipStream_t stream)
{
const int threads = 128;
int seq_length4 = sequence_length / 4;
int seq2 = sequence_length * seq_length4;
int block_compute_size =
(seq_length4 < threads ? ((threads / seq_length4) * seq_length4) : seq_length4);
dim3 grid_dim(batch_size, heads * seq2 / block_compute_size);
int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads;
dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) /
subblock_max_workload * threads)
: threads);
int iterations =
(sequence_length < subblock_max_workload ? (seq_length4 + threads - 1) / threads
: MAX_THREAD_ITERATIONS);
if (sequence_length <= 8)
hipLaunchKernelGGL(( attn_softmax<2, (threads / 2), 2>)
, dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 16)
hipLaunchKernelGGL(( attn_softmax<4, (threads / 4), 4>)
, dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 32)
hipLaunchKernelGGL(( attn_softmax<8, (threads / 8), 8>)
, dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 64)
hipLaunchKernelGGL(( attn_softmax<16, (threads / 16), 16>)
, dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 128)
hipLaunchKernelGGL(( attn_softmax<32, (threads / 32), 32>)
, dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 256)
hipLaunchKernelGGL(( attn_softmax<32, (threads / 64), 64>)
, dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations);
else {
const int threads = 256;
block_compute_size =
(seq_length4 < threads ? ((threads / seq_length4) * seq_length4) : seq_length4);
dim3 grid_dim(batch_size, heads * seq2 / block_compute_size);
int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads;
dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) /
subblock_max_workload * threads)
: threads);
if (sequence_length <= 512)
hipLaunchKernelGGL(( attn_softmax<32, (threads / 128), 128>), dim3(grid_dim), dim3(block_dim), 0, stream,
vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length < (MAX_THREADS * MAX_THREAD_ITERATIONS * 4))
hipLaunchKernelGGL(( attn_softmax<32, 1, 128>), dim3(grid_dim), dim3(block_dim), 0, stream,
vals, attn_mask, heads, seq_length4, iterations);
else
throw std::runtime_error(
"Unsupport Seq_Length! Check the restriction of the max_threads and "
"max_thread_iterations!");
}
}
template <>
void launch_attn_softmax<__half>(__half* vals,
const __half* attn_mask,
int batch_size,
int heads,
int sequence_length,
hipStream_t stream)
{
const int threads = 128;
int seq_length4 = sequence_length / 4;
int seq2 = sequence_length * seq_length4;
int block_compute_size =
(seq_length4 < threads ? ((threads / seq_length4) * seq_length4) : seq_length4);
dim3 grid_dim(batch_size, heads * seq2 / block_compute_size);
int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads;
dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) /
subblock_max_workload * threads)
: threads);
int iterations =
(sequence_length < subblock_max_workload ? (seq_length4 + threads - 1) / threads
: MAX_THREAD_ITERATIONS);
if (sequence_length <= 8)
hipLaunchKernelGGL(( attn_softmax<2, (threads / 2), 2>)
, dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 16)
hipLaunchKernelGGL(( attn_softmax<4, (threads / 4), 4>)
, dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 32)
hipLaunchKernelGGL(( attn_softmax<8, (threads / 8), 8>)
, dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 64)
hipLaunchKernelGGL(( attn_softmax<16, (threads / 16), 16>)
, dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 128)
hipLaunchKernelGGL(( attn_softmax<32, (threads / 32), 32>)
, dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 256)
hipLaunchKernelGGL(( attn_softmax<32, (threads / 64), 64>)
, dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations);
else {
const int threads = 256;
block_compute_size =
(seq_length4 < threads ? ((threads / seq_length4) * seq_length4) : seq_length4);
dim3 grid_dim(batch_size, heads * seq2 / block_compute_size);
int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads;
dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) /
subblock_max_workload * threads)
: threads);
if (sequence_length <= 512)
hipLaunchKernelGGL(( attn_softmax<32, (threads / 128), 128>), dim3(grid_dim), dim3(block_dim), 0, stream,
vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length < (MAX_THREADS * MAX_THREAD_ITERATIONS * 4))
hipLaunchKernelGGL(( attn_softmax<32, 1, 128>), dim3(grid_dim), dim3(block_dim), 0, stream,
vals, attn_mask, heads, seq_length4, iterations);
else
throw std::runtime_error(
"Unsupport Seq_Length! Check the restriction of the max_threads and "
"max_thread_iterations!");
}
}
template <typename T, int tbSize, int blockStride>
__global__ void softmax_backward_kernel(T* out_grad, const T* soft_inp, int seq_length)
{
__shared__ float partialSum[MAX_WARP_NUM];
int warp_num = blockDim.x >> 5; // warp-count = num_threads / WARP_SIZE (32)
int iteration_stride = blockDim.x;
int block_width = blockStride * seq_length;
int iterations = (seq_length < (MAX_THREAD_ITERATIONS * iteration_stride)
? (seq_length + iteration_stride - 1) / iteration_stride
: MAX_THREAD_ITERATIONS);
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<tbSize> g = cg::tiled_partition<tbSize>(b);
int row = blockIdx.x;
int id = threadIdx.x;
int wid = id >> 5;
int lane = id & 0x1f;
T val_reg[MAX_THREAD_ITERATIONS];
T soft_reg[MAX_THREAD_ITERATIONS];
float grad_reg = 0.0f;
#pragma unroll
for (int i = 0; i < iterations; i++) {
int data_id = i * iteration_stride + id;
if (data_id < block_width) {
val_reg[i] = out_grad[row * block_width + data_id];
soft_reg[i] = soft_inp[row * block_width + data_id];
grad_reg += ((float)val_reg[i] *
(float)soft_reg[i]); // if done in half, the multiplication, we may lose
// 2% of accuracy in computation!!
}
}
for (int i = 1; i < tbSize; i *= 2) grad_reg += g.shfl_xor(grad_reg, i);
if (seq_length > tbSize) {
if (lane == 0) partialSum[wid] = grad_reg;
b.sync();
if (lane < warp_num) grad_reg = partialSum[lane];
int iters = warp_num;
if (seq_length < iteration_stride) iters = warp_num / (iteration_stride / seq_length);
for (int i = 1; i < iters; i *= 2) grad_reg += g.shfl_xor(grad_reg, i);
grad_reg = g.shfl(grad_reg, id / tbSize);
}
for (int i = 0; i < iterations; i++) {
int data_id = i * iteration_stride + id;
if (data_id < block_width) {
float temp = (float)soft_reg[i] * ((float)val_reg[i] - grad_reg);
out_grad[row * block_width + data_id] = (T)temp;
}
}
}
template <typename T, int ITERATIONS>
__global__ void softmax_backward_kernel_v2(T* grad /* input & output*/,
const T* output,
int softmax_length)
{
int batch_idx = blockIdx.x * blockDim.y + threadIdx.y;
int offset = batch_idx * softmax_length + threadIdx.x;
grad += offset;
output += offset;
T grad_reg[ITERATIONS];
T output_reg[ITERATIONS];
float sum = 0.0;
#pragma unroll
for (int i = 0; i < ITERATIONS; ++i) {
int curr_idx = threadIdx.x + i * WARP_SIZE;
if (curr_idx < softmax_length) {
grad_reg[i] = grad[i * WARP_SIZE];
output_reg[i] = output[i * WARP_SIZE];
sum += (float)grad_reg[i] * (float)output_reg[i];
}
}
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
for (int i = 1; i < WARP_SIZE; i <<= 1) sum += g.shfl_xor(sum, i);
#pragma unroll
for (int i = 0; i < ITERATIONS; ++i) {
int curr_idx = threadIdx.x + i * WARP_SIZE;
if (curr_idx < softmax_length)
grad[i * WARP_SIZE] = (float)output_reg[i] * ((float)grad_reg[i] - sum);
}
}
template <typename T>
void launch_attn_softmax_backward_v2(T* out_grad,
const T* soft_inp,
int batch_size,
int heads,
int seq_length,
hipStream_t stream)
{
if ((seq_length % WARP_SIZE) != 0 || seq_length > 2048)
throw std::runtime_error("Invalid sequence length found in softmax backward.");
const int warps_per_block = 4;
dim3 grid_dim(batch_size * heads * seq_length / warps_per_block);
dim3 block_dim(WARP_SIZE, warps_per_block);
switch (seq_length) {
case 32:
hipLaunchKernelGGL(( softmax_backward_kernel_v2<T, 1>)
, dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, seq_length);
break;
case 64:
hipLaunchKernelGGL(( softmax_backward_kernel_v2<T, 2>)
, dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, seq_length);
break;
case 128:
hipLaunchKernelGGL(( softmax_backward_kernel_v2<T, 4>)
, dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, seq_length);
break;
case 256:
hipLaunchKernelGGL(( softmax_backward_kernel_v2<T, 8>)
, dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, seq_length);
break;
case 384:
hipLaunchKernelGGL(( softmax_backward_kernel_v2<T, 12>)
, dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, seq_length);
break;
case 512:
hipLaunchKernelGGL(( softmax_backward_kernel_v2<T, 16>)
, dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, seq_length);
break;
case 768:
hipLaunchKernelGGL(( softmax_backward_kernel_v2<T, 24>)
, dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, seq_length);
break;
case 1024:
hipLaunchKernelGGL(( softmax_backward_kernel_v2<T, 32>)
, dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, seq_length);
break;
case 2048:
hipLaunchKernelGGL(( softmax_backward_kernel_v2<T, 64>)
, dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, seq_length);
break;
default:
throw std::runtime_error(
std::string("Special sequence length found in softmax backward, seq_length: ") +
std::to_string(seq_length));
}
}
template void launch_attn_softmax_backward_v2<__half>(__half* out_grad,
const __half* soft_inp,
int batch_size,
int heads,
int seq_length,
hipStream_t stream);
template void launch_attn_softmax_backward_v2<float>(float* out_grad,
const float* soft_inp,
int batch_size,
int heads,
int seq_length,
hipStream_t stream);
| fabc82aab4675272467fc75df58d8d3b079672be.cu | #include "custom_cuda_layers.h"
#include "general_kernels.h"
namespace cg = cooperative_groups;
// Fused attention + softmax
template <int tbSize, int blockStride, int tbSeq>
__global__ void attn_softmax(float* vals,
const float* attn_mask,
int heads,
int seq_length,
int iterations)
{
__shared__ float partialSum[MAX_WARP_NUM];
int warp_num = blockDim.x >> 5;
int iteration_stride = blockDim.x;
int block_width = blockStride * seq_length;
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<tbSize> g = cg::tiled_partition<tbSize>(b);
int batch = blockIdx.x;
int row = blockIdx.y;
int max_threads_in_sequence = std::max(seq_length, tbSeq);
int seq_lane = threadIdx.x % max_threads_in_sequence;
int data_offset = batch * (gridDim.y * block_width) + row * block_width +
(threadIdx.x / max_threads_in_sequence) * seq_length;
int mask_offset = batch * seq_length;
int wid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
float4* val_cast = reinterpret_cast<float4*>(vals);
const float4* attn_mask_cast = reinterpret_cast<const float4*>(attn_mask);
float4 data[MAX_THREAD_ITERATIONS];
float max_val = minus_infinity;
for (int i = 0; i < iterations; i++) {
int data_id = i * iteration_stride + seq_lane;
if (data_id < seq_length) {
float4 mask = attn_mask_cast[mask_offset + data_id];
data[i] = val_cast[data_offset + data_id];
data[i].x += mask.x;
data[i].y += mask.y;
data[i].z += mask.z;
data[i].w += mask.w;
max_val = (data[i].x > max_val ? data[i].x : max_val);
max_val = (data[i].y > max_val ? data[i].y : max_val);
max_val = (data[i].z > max_val ? data[i].z : max_val);
max_val = (data[i].w > max_val ? data[i].w : max_val);
} else {
data[i].x = minus_infinity;
data[i].y = minus_infinity;
data[i].z = minus_infinity;
data[i].w = minus_infinity;
}
}
for (int i = 1; i < tbSize; i *= 2) {
auto temp = g.shfl_xor(max_val, i);
max_val = (temp > max_val ? temp : max_val);
}
if (seq_length > tbSize) {
if (lane == 0) partialSum[wid] = max_val;
b.sync();
if (lane < warp_num) max_val = partialSum[lane];
#ifndef __STOCHASTIC_MODE__
b.sync();
#endif
int iters = warp_num;
if (seq_length < iteration_stride) iters = warp_num / (iteration_stride / seq_length);
for (int i = 1; i < iters; i *= 2) {
auto temp = g.shfl_xor(max_val, i);
max_val = (temp > max_val ? temp : max_val);
}
max_val = g.shfl(max_val, threadIdx.x / tbSize);
}
float sum = 0;
for (int i = 0; i < iterations; i++) {
data[i].x = __expf(data[i].x - max_val);
data[i].y = __expf(data[i].y - max_val);
data[i].z = __expf(data[i].z - max_val);
data[i].w = __expf(data[i].w - max_val);
sum += (data[i].x + data[i].y + data[i].z + data[i].w);
}
for (int i = 1; i < tbSize; i *= 2) { sum += g.shfl_xor(sum, i); }
if (seq_length > tbSize) {
if (lane == 0) partialSum[wid] = sum;
b.sync();
if (lane < warp_num) sum = partialSum[lane];
#ifndef __STOCHASTIC_MODE__
b.sync();
#endif
int iters = warp_num;
if (seq_length < iteration_stride) iters = warp_num / (iteration_stride / seq_length);
for (int i = 1; i < iters; i *= 2) { sum += g.shfl_xor(sum, i); }
sum = g.shfl(sum, threadIdx.x / tbSize);
}
sum += 1e-6;
for (int i = 0; i < iterations; i++) {
data[i].x /= sum;
data[i].y /= sum;
data[i].z /= sum;
data[i].w /= sum;
int data_id = i * iteration_stride + seq_lane;
if (data_id < seq_length) val_cast[data_offset + data_id] = data[i];
}
}
template <int tbSize, int blockStride, int tbSeq>
__global__ void attn_softmax(__half* vals,
const __half* attn_mask,
int heads,
int seq_length,
int iterations)
{
#if __CUDA_ARCH__ >= 700
__shared__ float partialSum[MAX_WARP_NUM];
int warp_num = blockDim.x >> 5;
int iteration_stride = blockDim.x;
int block_width = blockStride * seq_length;
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<tbSize> g = cg::tiled_partition<tbSize>(b);
int batch = blockIdx.x;
int row = blockIdx.y;
int max_threads_in_sequence = std::max(seq_length, tbSeq);
int seq_lane = threadIdx.x % max_threads_in_sequence;
int data_offset = batch * (gridDim.y * block_width) + row * block_width +
(threadIdx.x / max_threads_in_sequence) * seq_length;
int mask_offset = batch * seq_length;
int wid = threadIdx.x >> 5;
int lane = threadIdx.x & 0x1f;
float2* val_cast = reinterpret_cast<float2*>(vals);
const float2* attn_mask_cast = reinterpret_cast<const float2*>(attn_mask);
val_cast += data_offset;
attn_mask_cast += mask_offset;
float2 low_data[MAX_THREAD_ITERATIONS];
float2 high_data[MAX_THREAD_ITERATIONS];
float max_val = minus_infinity;
for (int i = 0; i < iterations; i++) {
int data_id = i * iteration_stride + seq_lane;
if (data_id < seq_length) {
float2 data = val_cast[data_id];
float2 mask = attn_mask_cast[data_id];
__half2* data_arr = reinterpret_cast<__half2*>(&data);
__half2* mask_arr = reinterpret_cast<__half2*>(&mask);
low_data[i] = __half22float2(data_arr[0]);
high_data[i] = __half22float2(data_arr[1]);
float2 low_mask = __half22float2(mask_arr[0]);
float2 high_mask = __half22float2(mask_arr[1]);
low_data[i].x += low_mask.x;
low_data[i].y += low_mask.y;
high_data[i].x += high_mask.x;
high_data[i].y += high_mask.y;
max_val = (low_data[i].x > max_val ? low_data[i].x : max_val);
max_val = (low_data[i].y > max_val ? low_data[i].y : max_val);
max_val = (high_data[i].x > max_val ? high_data[i].x : max_val);
max_val = (high_data[i].y > max_val ? high_data[i].y : max_val);
}
}
for (int i = 1; i < tbSize; i *= 2) {
auto temp = g.shfl_xor(max_val, i);
max_val = (temp > max_val ? temp : max_val);
}
if (seq_length > tbSize) {
if (lane == 0) partialSum[wid] = max_val;
b.sync();
if (lane < warp_num) max_val = partialSum[lane];
#ifndef __STOCHASTIC_MODE__
b.sync();
#endif
int iters = warp_num;
if (seq_length < iteration_stride) iters = warp_num / (iteration_stride / seq_length);
for (int i = 1; i < iters; i *= 2) {
auto temp = g.shfl_xor(max_val, i);
max_val = (temp > max_val ? temp : max_val);
}
max_val = g.shfl(max_val, threadIdx.x / tbSize);
}
float sum = 0;
for (int i = 0; i < iterations; i++) {
int data_id = i * iteration_stride + seq_lane;
if (data_id < seq_length) {
low_data[i].x = __expf(low_data[i].x - max_val);
low_data[i].y = __expf(low_data[i].y - max_val);
high_data[i].x = __expf(high_data[i].x - max_val);
high_data[i].y = __expf(high_data[i].y - max_val);
sum += (low_data[i].x + low_data[i].y + high_data[i].x + high_data[i].y);
}
}
for (int i = 1; i < tbSize; i *= 2) { sum += g.shfl_xor(sum, i); }
if (seq_length > tbSize) {
if (lane == 0) partialSum[wid] = sum;
b.sync();
if (lane < warp_num) sum = partialSum[lane];
#ifndef __STOCHASTIC_MODE__
b.sync();
#endif
int iters = warp_num;
if (seq_length < iteration_stride) iters = warp_num / (iteration_stride / seq_length);
for (int i = 1; i < iters; i *= 2) { sum += g.shfl_xor(sum, i); }
sum = g.shfl(sum, threadIdx.x / tbSize);
}
sum += 1e-6;
for (int i = 0; i < iterations; i++) {
int data_id = i * iteration_stride + seq_lane;
if (data_id < seq_length) {
float2 result_f;
__half2* result_h = reinterpret_cast<__half2*>(&result_f);
low_data[i].x /= sum;
low_data[i].y /= sum;
high_data[i].x /= sum;
high_data[i].y /= sum;
result_h[0] = __float22half2_rn(low_data[i]);
result_h[1] = __float22half2_rn(high_data[i]);
val_cast[data_id] = result_f;
}
}
#endif
}
template <typename T>
void launch_attn_softmax(T*, const T*, int, int, int, cudaStream_t, bool);
template <>
void launch_attn_softmax<float>(float* vals,
const float* attn_mask,
int batch_size,
int heads,
int sequence_length,
cudaStream_t stream)
{
const int threads = 128;
int seq_length4 = sequence_length / 4;
int seq2 = sequence_length * seq_length4;
int block_compute_size =
(seq_length4 < threads ? ((threads / seq_length4) * seq_length4) : seq_length4);
dim3 grid_dim(batch_size, heads * seq2 / block_compute_size);
int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads;
dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) /
subblock_max_workload * threads)
: threads);
int iterations =
(sequence_length < subblock_max_workload ? (seq_length4 + threads - 1) / threads
: MAX_THREAD_ITERATIONS);
if (sequence_length <= 8)
attn_softmax<2, (threads / 2), 2>
<<<grid_dim, block_dim, 0, stream>>>(vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 16)
attn_softmax<4, (threads / 4), 4>
<<<grid_dim, block_dim, 0, stream>>>(vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 32)
attn_softmax<8, (threads / 8), 8>
<<<grid_dim, block_dim, 0, stream>>>(vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 64)
attn_softmax<16, (threads / 16), 16>
<<<grid_dim, block_dim, 0, stream>>>(vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 128)
attn_softmax<32, (threads / 32), 32>
<<<grid_dim, block_dim, 0, stream>>>(vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 256)
attn_softmax<32, (threads / 64), 64>
<<<grid_dim, block_dim, 0, stream>>>(vals, attn_mask, heads, seq_length4, iterations);
else {
const int threads = 256;
block_compute_size =
(seq_length4 < threads ? ((threads / seq_length4) * seq_length4) : seq_length4);
dim3 grid_dim(batch_size, heads * seq2 / block_compute_size);
int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads;
dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) /
subblock_max_workload * threads)
: threads);
if (sequence_length <= 512)
attn_softmax<32, (threads / 128), 128><<<grid_dim, block_dim, 0, stream>>>(
vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length < (MAX_THREADS * MAX_THREAD_ITERATIONS * 4))
attn_softmax<32, 1, 128><<<grid_dim, block_dim, 0, stream>>>(
vals, attn_mask, heads, seq_length4, iterations);
else
throw std::runtime_error(
"Unsupport Seq_Length! Check the restriction of the max_threads and "
"max_thread_iterations!");
}
}
template <>
void launch_attn_softmax<__half>(__half* vals,
const __half* attn_mask,
int batch_size,
int heads,
int sequence_length,
cudaStream_t stream)
{
const int threads = 128;
int seq_length4 = sequence_length / 4;
int seq2 = sequence_length * seq_length4;
int block_compute_size =
(seq_length4 < threads ? ((threads / seq_length4) * seq_length4) : seq_length4);
dim3 grid_dim(batch_size, heads * seq2 / block_compute_size);
int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads;
dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) /
subblock_max_workload * threads)
: threads);
int iterations =
(sequence_length < subblock_max_workload ? (seq_length4 + threads - 1) / threads
: MAX_THREAD_ITERATIONS);
if (sequence_length <= 8)
attn_softmax<2, (threads / 2), 2>
<<<grid_dim, block_dim, 0, stream>>>(vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 16)
attn_softmax<4, (threads / 4), 4>
<<<grid_dim, block_dim, 0, stream>>>(vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 32)
attn_softmax<8, (threads / 8), 8>
<<<grid_dim, block_dim, 0, stream>>>(vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 64)
attn_softmax<16, (threads / 16), 16>
<<<grid_dim, block_dim, 0, stream>>>(vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 128)
attn_softmax<32, (threads / 32), 32>
<<<grid_dim, block_dim, 0, stream>>>(vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length <= 256)
attn_softmax<32, (threads / 64), 64>
<<<grid_dim, block_dim, 0, stream>>>(vals, attn_mask, heads, seq_length4, iterations);
else {
const int threads = 256;
block_compute_size =
(seq_length4 < threads ? ((threads / seq_length4) * seq_length4) : seq_length4);
dim3 grid_dim(batch_size, heads * seq2 / block_compute_size);
int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads;
dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) /
subblock_max_workload * threads)
: threads);
if (sequence_length <= 512)
attn_softmax<32, (threads / 128), 128><<<grid_dim, block_dim, 0, stream>>>(
vals, attn_mask, heads, seq_length4, iterations);
else if (sequence_length < (MAX_THREADS * MAX_THREAD_ITERATIONS * 4))
attn_softmax<32, 1, 128><<<grid_dim, block_dim, 0, stream>>>(
vals, attn_mask, heads, seq_length4, iterations);
else
throw std::runtime_error(
"Unsupport Seq_Length! Check the restriction of the max_threads and "
"max_thread_iterations!");
}
}
template <typename T, int tbSize, int blockStride>
__global__ void softmax_backward_kernel(T* out_grad, const T* soft_inp, int seq_length)
{
__shared__ float partialSum[MAX_WARP_NUM];
int warp_num = blockDim.x >> 5; // warp-count = num_threads / WARP_SIZE (32)
int iteration_stride = blockDim.x;
int block_width = blockStride * seq_length;
int iterations = (seq_length < (MAX_THREAD_ITERATIONS * iteration_stride)
? (seq_length + iteration_stride - 1) / iteration_stride
: MAX_THREAD_ITERATIONS);
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<tbSize> g = cg::tiled_partition<tbSize>(b);
int row = blockIdx.x;
int id = threadIdx.x;
int wid = id >> 5;
int lane = id & 0x1f;
T val_reg[MAX_THREAD_ITERATIONS];
T soft_reg[MAX_THREAD_ITERATIONS];
float grad_reg = 0.0f;
#pragma unroll
for (int i = 0; i < iterations; i++) {
int data_id = i * iteration_stride + id;
if (data_id < block_width) {
val_reg[i] = out_grad[row * block_width + data_id];
soft_reg[i] = soft_inp[row * block_width + data_id];
grad_reg += ((float)val_reg[i] *
(float)soft_reg[i]); // if done in half, the multiplication, we may lose
// 2% of accuracy in computation!!
}
}
for (int i = 1; i < tbSize; i *= 2) grad_reg += g.shfl_xor(grad_reg, i);
if (seq_length > tbSize) {
if (lane == 0) partialSum[wid] = grad_reg;
b.sync();
if (lane < warp_num) grad_reg = partialSum[lane];
int iters = warp_num;
if (seq_length < iteration_stride) iters = warp_num / (iteration_stride / seq_length);
for (int i = 1; i < iters; i *= 2) grad_reg += g.shfl_xor(grad_reg, i);
grad_reg = g.shfl(grad_reg, id / tbSize);
}
for (int i = 0; i < iterations; i++) {
int data_id = i * iteration_stride + id;
if (data_id < block_width) {
float temp = (float)soft_reg[i] * ((float)val_reg[i] - grad_reg);
out_grad[row * block_width + data_id] = (T)temp;
}
}
}
template <typename T, int ITERATIONS>
__global__ void softmax_backward_kernel_v2(T* grad /* input & output*/,
const T* output,
int softmax_length)
{
int batch_idx = blockIdx.x * blockDim.y + threadIdx.y;
int offset = batch_idx * softmax_length + threadIdx.x;
grad += offset;
output += offset;
T grad_reg[ITERATIONS];
T output_reg[ITERATIONS];
float sum = 0.0;
#pragma unroll
for (int i = 0; i < ITERATIONS; ++i) {
int curr_idx = threadIdx.x + i * WARP_SIZE;
if (curr_idx < softmax_length) {
grad_reg[i] = grad[i * WARP_SIZE];
output_reg[i] = output[i * WARP_SIZE];
sum += (float)grad_reg[i] * (float)output_reg[i];
}
}
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
for (int i = 1; i < WARP_SIZE; i <<= 1) sum += g.shfl_xor(sum, i);
#pragma unroll
for (int i = 0; i < ITERATIONS; ++i) {
int curr_idx = threadIdx.x + i * WARP_SIZE;
if (curr_idx < softmax_length)
grad[i * WARP_SIZE] = (float)output_reg[i] * ((float)grad_reg[i] - sum);
}
}
template <typename T>
void launch_attn_softmax_backward_v2(T* out_grad,
const T* soft_inp,
int batch_size,
int heads,
int seq_length,
cudaStream_t stream)
{
if ((seq_length % WARP_SIZE) != 0 || seq_length > 2048)
throw std::runtime_error("Invalid sequence length found in softmax backward.");
const int warps_per_block = 4;
dim3 grid_dim(batch_size * heads * seq_length / warps_per_block);
dim3 block_dim(WARP_SIZE, warps_per_block);
switch (seq_length) {
case 32:
softmax_backward_kernel_v2<T, 1>
<<<grid_dim, block_dim, 0, stream>>>(out_grad, soft_inp, seq_length);
break;
case 64:
softmax_backward_kernel_v2<T, 2>
<<<grid_dim, block_dim, 0, stream>>>(out_grad, soft_inp, seq_length);
break;
case 128:
softmax_backward_kernel_v2<T, 4>
<<<grid_dim, block_dim, 0, stream>>>(out_grad, soft_inp, seq_length);
break;
case 256:
softmax_backward_kernel_v2<T, 8>
<<<grid_dim, block_dim, 0, stream>>>(out_grad, soft_inp, seq_length);
break;
case 384:
softmax_backward_kernel_v2<T, 12>
<<<grid_dim, block_dim, 0, stream>>>(out_grad, soft_inp, seq_length);
break;
case 512:
softmax_backward_kernel_v2<T, 16>
<<<grid_dim, block_dim, 0, stream>>>(out_grad, soft_inp, seq_length);
break;
case 768:
softmax_backward_kernel_v2<T, 24>
<<<grid_dim, block_dim, 0, stream>>>(out_grad, soft_inp, seq_length);
break;
case 1024:
softmax_backward_kernel_v2<T, 32>
<<<grid_dim, block_dim, 0, stream>>>(out_grad, soft_inp, seq_length);
break;
case 2048:
softmax_backward_kernel_v2<T, 64>
<<<grid_dim, block_dim, 0, stream>>>(out_grad, soft_inp, seq_length);
break;
default:
throw std::runtime_error(
std::string("Special sequence length found in softmax backward, seq_length: ") +
std::to_string(seq_length));
}
}
template void launch_attn_softmax_backward_v2<__half>(__half* out_grad,
const __half* soft_inp,
int batch_size,
int heads,
int seq_length,
cudaStream_t stream);
template void launch_attn_softmax_backward_v2<float>(float* out_grad,
const float* soft_inp,
int batch_size,
int heads,
int seq_length,
cudaStream_t stream);
|
2d18fd6f8d775062d1320ff973ce69831d8cc986.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <book.h>
#include <cpu_anim.h>
#define DIM 1024
#define PI 3.1415926535897932f
#define MAX_TEMP 1.0f
#define MIN_TEMP 0.0001f
#define SPEED 0.25f
// these exist on the GPU side
texture<float> texConstSrc;
texture<float> texIn;
texture<float> texOut;
// this kernel takes in a 2-d array of floats
// it updates the value-of-interest by a scaled value based
// on itself and its nearest neighbors
__global__ void blend_kernel( float *dst, bool dstOut )
{
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
int left = offset - 1;
int right = offset + 1;
if (x == 0) left++;
if (x == DIM-1) right--;
int top = offset - DIM;
int bottom = offset + DIM;
if (y == 0) top += DIM;
if (y == DIM-1) bottom -= DIM;
float t, l, c, r, b;
if (dstOut)
{
t = tex1Dfetch(texIn,top);
l = tex1Dfetch(texIn,left);
c = tex1Dfetch(texIn,offset);
r = tex1Dfetch(texIn,right);
b = tex1Dfetch(texIn,bottom);
} else
{
t = tex1Dfetch(texOut,top);
l = tex1Dfetch(texOut,left);
c = tex1Dfetch(texOut,offset);
r = tex1Dfetch(texOut,right);
b = tex1Dfetch(texOut,bottom);
}
dst[offset] = c + SPEED * (t + b + r + l - 4 * c);
}
// NOTE - texOffsetConstSrc could either be passed as a
// parameter to this function, or passed in __constant__ memory
// if we declared it as a global above, it would be
// a parameter here:
// __global__ void copy_const_kernel( float *iptr,
// size_t texOffset )
__global__ void copy_const_kernel( float *iptr )
{
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float c = tex1Dfetch(texConstSrc,offset);
if (c != 0)
iptr[offset] = c;
}
// globals needed by the update routine
struct DataBlock {
unsigned char *output_bitmap;
float *dev_inSrc;
float *dev_outSrc;
float *dev_constSrc;
CPUAnimBitmap *bitmap;
hipEvent_t start, stop;
float totalTime;
float frames;
};
void anim_gpu( DataBlock *d, int ticks )
{
HANDLE_ERROR( hipEventRecord( d->start, 0 ) );
dim3 blocks(DIM/16,DIM/16);
dim3 threads(16,16);
CPUAnimBitmap *bitmap = d->bitmap;
// since tex is global and bound, we have to use a flag to
// select which is in/out per iteration
volatile bool dstOut = true;
for (int i=0; i<90; i++)
{
float *in, *out;
if (dstOut)
{
in = d->dev_inSrc;
out = d->dev_outSrc;
} else
{
out = d->dev_inSrc;
in = d->dev_outSrc;
}
hipLaunchKernelGGL(( copy_const_kernel), dim3(blocks),dim3(threads), 0, 0, in );
hipLaunchKernelGGL(( blend_kernel), dim3(blocks),dim3(threads), 0, 0, out, dstOut);
dstOut = !dstOut;
}
hipLaunchKernelGGL(( float_to_color), dim3(blocks),dim3(threads), 0, 0, d->output_bitmap, d->dev_inSrc );
HANDLE_ERROR( hipMemcpy( bitmap->get_ptr(), d->output_bitmap, bitmap->image_size(), hipMemcpyDeviceToHost));
HANDLE_ERROR( hipEventRecord( d->stop, 0 ));
HANDLE_ERROR( hipEventSynchronize( d->stop));
float elapsedTime;
HANDLE_ERROR( hipEventElapsedTime( &elapsedTime, d->start, d->stop ));
d->totalTime += elapsedTime;
++d->frames;
printf( "Average Time per frame: %3.1f ms\n",
d->totalTime/d->frames );
}
// clean up memory allocated on the GPU
void anim_exit( DataBlock *d )
{
hipUnbindTexture( texIn );
hipUnbindTexture( texOut );
hipUnbindTexture( texConstSrc );
HANDLE_ERROR( hipFree( d->dev_inSrc ) );
HANDLE_ERROR( hipFree( d->dev_outSrc ) );
HANDLE_ERROR( hipFree( d->dev_constSrc ) );
HANDLE_ERROR( hipEventDestroy( d->start ) );
HANDLE_ERROR( hipEventDestroy( d->stop ) );
}
int main( void )
{
DataBlock data;
CPUAnimBitmap bitmap( DIM, DIM, &data );
data.bitmap = &bitmap;
data.totalTime = 0;
data.frames = 0;
HANDLE_ERROR( hipEventCreate( &data.start));
HANDLE_ERROR( hipEventCreate( &data.stop));
int imageSize = bitmap.image_size();
HANDLE_ERROR( hipMalloc( (void**)&data.output_bitmap, imageSize ));
// assume float == 4 chars in size (ie rgba)
HANDLE_ERROR( hipMalloc( (void**)&data.dev_inSrc, imageSize));
HANDLE_ERROR( hipMalloc( (void**)&data.dev_outSrc, imageSize));
HANDLE_ERROR( hipMalloc( (void**)&data.dev_constSrc, imageSize));
HANDLE_ERROR( hipBindTexture( NULL, texConstSrc, data.dev_constSrc, imageSize));
HANDLE_ERROR( hipBindTexture( NULL, texIn, data.dev_inSrc, imageSize));
HANDLE_ERROR( hipBindTexture( NULL, texOut, data.dev_outSrc, imageSize));
// intialize the constant data
float *temp = (float*)malloc( imageSize );
for (int i=0; i<DIM*DIM; i++)
{
temp[i] = 0;
int x = i % DIM;
int y = i / DIM;
if ((x>300) && (x<600) && (y>310) && (y<601))
temp[i] = MAX_TEMP;
}
temp[DIM*100+100] = (MAX_TEMP + MIN_TEMP)/2;
temp[DIM*700+100] = MIN_TEMP;
temp[DIM*300+300] = MIN_TEMP;
temp[DIM*200+700] = MIN_TEMP;
for (int y=800; y<900; y++)
{
for (int x=400; x<500; x++)
{
temp[x+y*DIM] = MIN_TEMP;
}
}
HANDLE_ERROR( hipMemcpy( data.dev_constSrc, temp, imageSize, hipMemcpyHostToDevice));
// initialize the input data
for (int y=800; y<DIM; y++)
{
for (int x=0; x<200; x++)
{
temp[x+y*DIM] = MAX_TEMP;
}
}
HANDLE_ERROR( hipMemcpy( data.dev_inSrc, temp, imageSize, hipMemcpyHostToDevice));
free( temp );
bitmap.anim_and_exit( (void (*)(void*,int))anim_gpu, (void (*)(void*))anim_exit );
return 0;
}
| 2d18fd6f8d775062d1320ff973ce69831d8cc986.cu | #include <cuda.h>
#include <book.h>
#include <cpu_anim.h>
#define DIM 1024
#define PI 3.1415926535897932f
#define MAX_TEMP 1.0f
#define MIN_TEMP 0.0001f
#define SPEED 0.25f
// these exist on the GPU side
texture<float> texConstSrc;
texture<float> texIn;
texture<float> texOut;
// this kernel takes in a 2-d array of floats
// it updates the value-of-interest by a scaled value based
// on itself and its nearest neighbors
__global__ void blend_kernel( float *dst, bool dstOut )
{
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
int left = offset - 1;
int right = offset + 1;
if (x == 0) left++;
if (x == DIM-1) right--;
int top = offset - DIM;
int bottom = offset + DIM;
if (y == 0) top += DIM;
if (y == DIM-1) bottom -= DIM;
float t, l, c, r, b;
if (dstOut)
{
t = tex1Dfetch(texIn,top);
l = tex1Dfetch(texIn,left);
c = tex1Dfetch(texIn,offset);
r = tex1Dfetch(texIn,right);
b = tex1Dfetch(texIn,bottom);
} else
{
t = tex1Dfetch(texOut,top);
l = tex1Dfetch(texOut,left);
c = tex1Dfetch(texOut,offset);
r = tex1Dfetch(texOut,right);
b = tex1Dfetch(texOut,bottom);
}
dst[offset] = c + SPEED * (t + b + r + l - 4 * c);
}
// NOTE - texOffsetConstSrc could either be passed as a
// parameter to this function, or passed in __constant__ memory
// if we declared it as a global above, it would be
// a parameter here:
// __global__ void copy_const_kernel( float *iptr,
// size_t texOffset )
__global__ void copy_const_kernel( float *iptr )
{
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float c = tex1Dfetch(texConstSrc,offset);
if (c != 0)
iptr[offset] = c;
}
// globals needed by the update routine
struct DataBlock {
unsigned char *output_bitmap;
float *dev_inSrc;
float *dev_outSrc;
float *dev_constSrc;
CPUAnimBitmap *bitmap;
cudaEvent_t start, stop;
float totalTime;
float frames;
};
void anim_gpu( DataBlock *d, int ticks )
{
HANDLE_ERROR( cudaEventRecord( d->start, 0 ) );
dim3 blocks(DIM/16,DIM/16);
dim3 threads(16,16);
CPUAnimBitmap *bitmap = d->bitmap;
// since tex is global and bound, we have to use a flag to
// select which is in/out per iteration
volatile bool dstOut = true;
for (int i=0; i<90; i++)
{
float *in, *out;
if (dstOut)
{
in = d->dev_inSrc;
out = d->dev_outSrc;
} else
{
out = d->dev_inSrc;
in = d->dev_outSrc;
}
copy_const_kernel<<<blocks,threads>>>( in );
blend_kernel<<<blocks,threads>>>( out, dstOut);
dstOut = !dstOut;
}
float_to_color<<<blocks,threads>>>( d->output_bitmap, d->dev_inSrc );
HANDLE_ERROR( cudaMemcpy( bitmap->get_ptr(), d->output_bitmap, bitmap->image_size(), cudaMemcpyDeviceToHost));
HANDLE_ERROR( cudaEventRecord( d->stop, 0 ));
HANDLE_ERROR( cudaEventSynchronize( d->stop));
float elapsedTime;
HANDLE_ERROR( cudaEventElapsedTime( &elapsedTime, d->start, d->stop ));
d->totalTime += elapsedTime;
++d->frames;
printf( "Average Time per frame: %3.1f ms\n",
d->totalTime/d->frames );
}
// clean up memory allocated on the GPU
void anim_exit( DataBlock *d )
{
cudaUnbindTexture( texIn );
cudaUnbindTexture( texOut );
cudaUnbindTexture( texConstSrc );
HANDLE_ERROR( cudaFree( d->dev_inSrc ) );
HANDLE_ERROR( cudaFree( d->dev_outSrc ) );
HANDLE_ERROR( cudaFree( d->dev_constSrc ) );
HANDLE_ERROR( cudaEventDestroy( d->start ) );
HANDLE_ERROR( cudaEventDestroy( d->stop ) );
}
int main( void )
{
DataBlock data;
CPUAnimBitmap bitmap( DIM, DIM, &data );
data.bitmap = &bitmap;
data.totalTime = 0;
data.frames = 0;
HANDLE_ERROR( cudaEventCreate( &data.start));
HANDLE_ERROR( cudaEventCreate( &data.stop));
int imageSize = bitmap.image_size();
HANDLE_ERROR( cudaMalloc( (void**)&data.output_bitmap, imageSize ));
// assume float == 4 chars in size (ie rgba)
HANDLE_ERROR( cudaMalloc( (void**)&data.dev_inSrc, imageSize));
HANDLE_ERROR( cudaMalloc( (void**)&data.dev_outSrc, imageSize));
HANDLE_ERROR( cudaMalloc( (void**)&data.dev_constSrc, imageSize));
HANDLE_ERROR( cudaBindTexture( NULL, texConstSrc, data.dev_constSrc, imageSize));
HANDLE_ERROR( cudaBindTexture( NULL, texIn, data.dev_inSrc, imageSize));
HANDLE_ERROR( cudaBindTexture( NULL, texOut, data.dev_outSrc, imageSize));
// intialize the constant data
float *temp = (float*)malloc( imageSize );
for (int i=0; i<DIM*DIM; i++)
{
temp[i] = 0;
int x = i % DIM;
int y = i / DIM;
if ((x>300) && (x<600) && (y>310) && (y<601))
temp[i] = MAX_TEMP;
}
temp[DIM*100+100] = (MAX_TEMP + MIN_TEMP)/2;
temp[DIM*700+100] = MIN_TEMP;
temp[DIM*300+300] = MIN_TEMP;
temp[DIM*200+700] = MIN_TEMP;
for (int y=800; y<900; y++)
{
for (int x=400; x<500; x++)
{
temp[x+y*DIM] = MIN_TEMP;
}
}
HANDLE_ERROR( cudaMemcpy( data.dev_constSrc, temp, imageSize, cudaMemcpyHostToDevice));
// initialize the input data
for (int y=800; y<DIM; y++)
{
for (int x=0; x<200; x++)
{
temp[x+y*DIM] = MAX_TEMP;
}
}
HANDLE_ERROR( cudaMemcpy( data.dev_inSrc, temp, imageSize, cudaMemcpyHostToDevice));
free( temp );
bitmap.anim_and_exit( (void (*)(void*,int))anim_gpu, (void (*)(void*))anim_exit );
return 0;
}
|
9f4e8bc117460d1a726e657de804b7f777086e3c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/math/selected_rows_functor.h"
#include "paddle/fluid/operators/optimizers/adagrad_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle {
namespace operators {
namespace {
template <typename T, int block_size>
__global__ void MergeGradKernel(const T* grad, const int64_t* grad_rows,
T* grad_merge, const int64_t* grad_merge_rows,
size_t grad_merge_rows_size,
int64_t row_numel) {
const int ty = blockIdx.y;
int tid = threadIdx.x;
__shared__ size_t grad_merge_idx;
if (tid == 0) {
for (size_t i = 0; i < grad_merge_rows_size; i++) {
if (grad_rows[ty] == grad_merge_rows[i]) {
grad_merge_idx = i;
}
}
}
__syncthreads();
grad += ty * row_numel;
grad_merge += grad_merge_idx * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
paddle::platform::CudaAtomicAdd(grad_merge + index, grad[index]);
}
}
template <typename T, int block_size>
__global__ void SparseAdagradFunctorKernel(const T* grad, const int64_t* rows,
const T* learning_rate, T* param,
T* moment, int64_t row_numel,
T epsilon) {
const int ty = blockIdx.y;
int tid = threadIdx.x;
grad += ty * row_numel;
param += rows[ty] * row_numel;
moment += rows[ty] * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
// Since index in rows of SelectedRows can be duplicate, we have to use
// Atomic Operation to avoid concurrent write error.
paddle::platform::CudaAtomicAdd(param + index,
-1.0 * learning_rate[0] * grad[index] /
(sqrt(moment[index]) + epsilon));
}
}
} // namespace
template <typename T>
struct SparseAdagradFunctor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const framework::SelectedRows& grad,
const framework::Tensor& learning_rate, T epsilon,
framework::Tensor* moment, framework::Tensor* param) {
// 1. g_m.rows = set(g.rows)
auto grad_width = grad.value().dims()[1];
math::scatter::MergeAdd<platform::CUDADeviceContext, T> merge_func;
auto grad_merge = merge_func(context, grad);
auto* grad_merge_data = grad_merge.mutable_value()->template data<T>();
framework::Vector<int64_t> merge_rows(grad_merge.rows());
// 2. m += g_m * g_m
auto grad_square =
SquareSelectedRows<platform::CUDADeviceContext, T>(context, grad_merge);
math::SelectedRowsAddToTensor<platform::CUDADeviceContext, T> functor;
functor(context, grad_square, moment);
// 3. update parameter
auto* lr = learning_rate.data<T>();
auto* param_data = param->data<T>();
auto* moment_data = moment->data<T>();
const int block_size = 256;
dim3 threads(block_size, 1);
dim3 grid2(1, merge_rows.size());
hipLaunchKernelGGL(( SparseAdagradFunctorKernel<
T, 256>), dim3(grid2), dim3(threads), 0,
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.stream(),
grad_merge_data, merge_rows.CUDAMutableData(context.GetPlace()), lr,
param_data, moment_data, grad_width, epsilon);
}
};
template struct SparseAdagradFunctor<platform::CUDADeviceContext, float>;
template struct SparseAdagradFunctor<platform::CUDADeviceContext, double>;
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
adagrad, ops::AdagradOpKernel<paddle::platform::CUDADeviceContext, float>,
ops::AdagradOpKernel<paddle::platform::CUDADeviceContext, double>);
| 9f4e8bc117460d1a726e657de804b7f777086e3c.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/math/selected_rows_functor.h"
#include "paddle/fluid/operators/optimizers/adagrad_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle {
namespace operators {
namespace {
template <typename T, int block_size>
__global__ void MergeGradKernel(const T* grad, const int64_t* grad_rows,
T* grad_merge, const int64_t* grad_merge_rows,
size_t grad_merge_rows_size,
int64_t row_numel) {
const int ty = blockIdx.y;
int tid = threadIdx.x;
__shared__ size_t grad_merge_idx;
if (tid == 0) {
for (size_t i = 0; i < grad_merge_rows_size; i++) {
if (grad_rows[ty] == grad_merge_rows[i]) {
grad_merge_idx = i;
}
}
}
__syncthreads();
grad += ty * row_numel;
grad_merge += grad_merge_idx * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
paddle::platform::CudaAtomicAdd(grad_merge + index, grad[index]);
}
}
template <typename T, int block_size>
__global__ void SparseAdagradFunctorKernel(const T* grad, const int64_t* rows,
const T* learning_rate, T* param,
T* moment, int64_t row_numel,
T epsilon) {
const int ty = blockIdx.y;
int tid = threadIdx.x;
grad += ty * row_numel;
param += rows[ty] * row_numel;
moment += rows[ty] * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
// Since index in rows of SelectedRows can be duplicate, we have to use
// Atomic Operation to avoid concurrent write error.
paddle::platform::CudaAtomicAdd(param + index,
-1.0 * learning_rate[0] * grad[index] /
(sqrt(moment[index]) + epsilon));
}
}
} // namespace
template <typename T>
struct SparseAdagradFunctor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const framework::SelectedRows& grad,
const framework::Tensor& learning_rate, T epsilon,
framework::Tensor* moment, framework::Tensor* param) {
// 1. g_m.rows = set(g.rows)
auto grad_width = grad.value().dims()[1];
math::scatter::MergeAdd<platform::CUDADeviceContext, T> merge_func;
auto grad_merge = merge_func(context, grad);
auto* grad_merge_data = grad_merge.mutable_value()->template data<T>();
framework::Vector<int64_t> merge_rows(grad_merge.rows());
// 2. m += g_m * g_m
auto grad_square =
SquareSelectedRows<platform::CUDADeviceContext, T>(context, grad_merge);
math::SelectedRowsAddToTensor<platform::CUDADeviceContext, T> functor;
functor(context, grad_square, moment);
// 3. update parameter
auto* lr = learning_rate.data<T>();
auto* param_data = param->data<T>();
auto* moment_data = moment->data<T>();
const int block_size = 256;
dim3 threads(block_size, 1);
dim3 grid2(1, merge_rows.size());
SparseAdagradFunctorKernel<
T, 256><<<grid2, threads, 0,
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.stream()>>>(
grad_merge_data, merge_rows.CUDAMutableData(context.GetPlace()), lr,
param_data, moment_data, grad_width, epsilon);
}
};
template struct SparseAdagradFunctor<platform::CUDADeviceContext, float>;
template struct SparseAdagradFunctor<platform::CUDADeviceContext, double>;
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
adagrad, ops::AdagradOpKernel<paddle::platform::CUDADeviceContext, float>,
ops::AdagradOpKernel<paddle::platform::CUDADeviceContext, double>);
|
aa74b69fa4c04f63f48a80278bd878aea7bb7946.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void MatrixMul(float *darray_1, float *darray_2 , float *dres_arr, int n){
// cols and rows definition
int col = threadIdx.x + blockIdx.x * blockDim.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
// Mat mult operation
for(int i = 0; i<n; i++){
dres_arr[row*n+col]+= darray_1[row*n+i]*darray_2[col+i*n];
// printf("row %i * height %i col %i index %i res %f\n", row, n, col, i, dres_arr[row*n+col]);
}
} | aa74b69fa4c04f63f48a80278bd878aea7bb7946.cu | #include "includes.h"
__global__ void MatrixMul(float *darray_1, float *darray_2 , float *dres_arr, int n){
// cols and rows definition
int col = threadIdx.x + blockIdx.x * blockDim.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
// Mat mult operation
for(int i = 0; i<n; i++){
dres_arr[row*n+col]+= darray_1[row*n+i]*darray_2[col+i*n];
// printf("row %i * height %i col %i index %i res %f\n", row, n, col, i, dres_arr[row*n+col]);
}
} |
8d1e60804b9f377a33930f4ed19ecf04753caf98.hip | // !!! This is a file automatically generated by hipify!!!
#include "finalProject.cuh"
// CUDA Runtime
#include <hip/hip_runtime.h>
// Utilities and system includes
#include <helper_cuda.h>
#include <helper_functions.h>
#include <algorithm>
#include <chrono>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
int main(int argc, char **argv)
{
printf("%s Starting...\n\n", argv[0]);
hipDeviceProp_t deviceProp;
deviceProp.major = 1;
deviceProp.minor = 0;
int minimumComputeVersion = 10;
int dev;
dev = findCudaDevice(argc, (const char **)argv);
checkCudaErrors(hipGetDeviceProperties(&deviceProp, dev));
if ((deviceProp.major * 10 + deviceProp.minor) >= minimumComputeVersion)
{
printf("Using Device %d: %s\n\n", dev, deviceProp.name);
checkCudaErrors(hipSetDevice(dev));
}
else
{
printf("Error: the selected device does not support the minimum compute capability of %d.%d.\n\n",
minimumComputeVersion / 10, minimumComputeVersion % 10);
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application exits
hipDeviceReset();
exit(EXIT_FAILURE);
}
bool bResult = false;
bResult = runTest(argc, argv);
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application exits
hipDeviceReset();
printf(bResult ? "Test passed\n" : "Test failed!\n");
}
char* calculateCPU(int boardHeight, int boardWidth, int colors, char * input_data, int epochs)
{
char * res_data = NULL;
char * lastEpoch = NULL;
char * newEpoch = NULL;
if (epochs > 0)
{
lastEpoch = (char *)malloc(boardHeight*boardWidth*sizeof(char));
newEpoch = (char *)malloc(boardHeight*boardWidth*sizeof(char));
// for the first time only - copy the input_data to lastEpoch.
for (int i = 0; i < boardHeight*boardWidth; i++)
{
lastEpoch[i] = input_data[i];
}
for (int i = 0; i < epochs; i++)
{
for (int currRow = 0; currRow < boardHeight; currRow++)
{
for (int currCol = 0; currCol < boardWidth; currCol++)
{
char currCellColor = lastEpoch[(currRow * boardWidth) + currCol];
char nextColor = (currCellColor + 1) % colors;
int NW_ROW = ((boardHeight + currRow - 1) % boardHeight);
int NW_COL = ((boardWidth + currCol - 1) % boardWidth);
int NW_POS = NW_ROW*boardWidth + NW_COL;
int N_ROW = ((boardHeight + currRow - 1) % boardHeight);
int N_COL = ((boardWidth + currCol) % boardWidth);
int N_POS = N_ROW*boardWidth + N_COL;
int NE_ROW = ((boardHeight + currRow - 1) % boardHeight);
int NE_COL = ((boardWidth + currCol + 1) % boardWidth);
int NE_POS = NE_ROW*boardWidth + NE_COL;
int W_ROW = ((boardHeight + currRow) % boardHeight);
int W_COL = ((boardWidth + currCol - 1) % boardWidth);
int W_POS = W_ROW*boardWidth + W_COL;
int E_ROW = ((boardHeight + currRow) % boardHeight);
int E_COL = ((boardWidth + currCol + 1) % boardWidth);
int E_POS = E_ROW*boardWidth + E_COL;
int SW_ROW = ((boardHeight + currRow + 1) % boardHeight);
int SW_COL = ((boardWidth + currCol - 1) % boardWidth);
int SW_POS = SW_ROW*boardWidth + SW_COL;
int S_ROW = ((boardHeight + currRow + 1) % boardHeight);
int S_COL = ((boardWidth + currCol) % boardWidth);
int S_POS = S_ROW*boardWidth + S_COL;
int SE_ROW = ((boardHeight + currRow + 1) % boardHeight);
int SE_COL = ((boardWidth + currCol + 1) % boardWidth);
int SE_POS = SE_ROW*boardWidth + SE_COL;
if ((lastEpoch[NW_POS] == nextColor) || (lastEpoch[N_POS] == nextColor) || (lastEpoch[NE_POS] == nextColor) || (lastEpoch[SW_POS] == nextColor) ||
(lastEpoch[S_POS] == nextColor) || (lastEpoch[SE_POS] == nextColor) || (lastEpoch[W_POS] == nextColor) || (lastEpoch[E_POS] == nextColor))
{
newEpoch[(currRow * boardWidth) + currCol] = nextColor;
}
else
{
newEpoch[(currRow * boardWidth) + currCol] = currCellColor;
}
}
}
res_data = newEpoch;
char* temp = lastEpoch;
lastEpoch = newEpoch;
newEpoch = temp;
}
free(newEpoch);
//res_data = lastEpoch;
}
return res_data;
}
char* generateRandomData(int size, int colors)
{
char * data = (char *)malloc(size * sizeof(char));
for (int i = 0; i<size; i++)
{
// Keep the numbers in the range of 0 to COLORS-1
//data[i] = i%colors;
data[i] = ((int)(rand() & 0xFF)) % colors;
}
return data;
}
bool runTest(int argc, char **argv)
{
int boardWidth = atoi(argv[2]);// BOARD_WIDTH;
int boardHeight = atoi(argv[2]);
int colors = COLORS;
int epochs = EPOCHS;
int maxThreads = 256; // number of threads per block
int maxBlocks = 65535;
long size = boardWidth*boardHeight;
int numBlocks = 0;
int numThreads = 0;
int kernelId = atoi(argv[1]);
getNumBlocksAndThreads(size, maxBlocks, maxThreads, numBlocks, numThreads);
printf("board size %d X %d\n", boardWidth, boardHeight);
printf("amount of colors is %d\n", colors);
printf("amount of EPOCHS is %d\n", epochs);
printf("num of blocks: %d \n", numBlocks);
printf("num of threads: %d \n", numThreads);
printf("kernelId: %d \n", kernelId);
// create random input data on CPU
char *h_idata = generateRandomData(size, colors);
// calculate the CPU result
auto t_start = std::chrono::high_resolution_clock::now();
char *cpu_odata = calculateCPU(boardHeight, boardWidth, colors, h_idata, epochs);
auto t_end = std::chrono::high_resolution_clock::now();
printf("Wall clock time passed: %f ms\n", std::chrono::duration<double, std::milli>(t_end - t_start).count());
outputBoardToFile(cpu_odata, boardHeight, boardWidth, colors, "C:\\Users\\yuval\\Downloads\\1.ppm");
// allocate mem for the result on host side
char *h_odata = (char *)malloc(size*sizeof(char));
// allocate device memory and data
char *d_idata = NULL;
char *d_odata = NULL;
checkCudaErrors(hipMalloc((void **)&d_idata, size*sizeof(char)));
checkCudaErrors(hipMalloc((void **)&d_odata, size*sizeof(char)));
// copy data directly to device memory
checkCudaErrors(hipMemcpy(d_idata, h_idata, size*sizeof(char), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_odata, h_idata, size*sizeof(char), hipMemcpyHostToDevice));
t_start = std::chrono::high_resolution_clock::now();
for (size_t i = 0; i < 10; i++)
{
reduce(boardHeight, boardWidth, numThreads, numBlocks, &d_idata, &d_odata, epochs, kernelId );
}
t_end = std::chrono::high_resolution_clock::now();
printf("Wall clock time passed: %f ms\n", std::chrono::duration<double, std::milli>(t_end - t_start).count()/10);
gpuErrchk(hipMemcpy(h_odata, d_idata, size*sizeof(char), hipMemcpyDeviceToHost));
bool isSame = true;
for (int i = 0; i < size; i++)
{
if (cpu_odata[i] != h_odata[i])
{
isSame = false;
break;
}
}
// cleanup
free(h_idata);
free(h_odata);
checkCudaErrors(hipFree(d_idata));
checkCudaErrors(hipFree(d_odata));
return isSame;
}
////////////////////////////////////////////////////////////////////////////////
// Compute the number of threads and blocks to use
// we observe the maximum specified number of blocks, because each thread in
// that kernel can process a variable number of elements.
////////////////////////////////////////////////////////////////////////////////
void getNumBlocksAndThreads(int n, int maxBlocks, int maxThreads, int &blocks, int &threads)
{
//get device capability, to avoid block/grid size excceed the upbound
hipDeviceProp_t prop;
int device;
checkCudaErrors(hipGetDevice(&device));
checkCudaErrors(hipGetDeviceProperties(&prop, device));
//threads = (n < maxThreads * 2) ? nextPow2((n + 1) / 2) : maxThreads;
threads = (n < maxThreads) ? nextPow2(n) : prop.maxThreadsPerBlock;
//blocks = (n + (threads * 2 - 1)) / (threads * 2);
blocks = n / threads;
//threads = 256;
//blocks = 8;
if ((float)threads*blocks >(float)prop.maxGridSize[0] * prop.maxThreadsPerBlock)
{
printf("n is too large, please choose a smaller number!\n");
}
// if (blocks > prop.maxGridSize[0])
// {
// printf("Grid size <%d> excceeds the device capability <%d>, set block size as %d (original %d)\n",
// blocks, prop.maxGridSize[0], threads * 2, threads);
//
// blocks /= 2;
// threads *= 2;
//}
blocks = (maxBlocks< blocks ? maxBlocks : blocks);
} | 8d1e60804b9f377a33930f4ed19ecf04753caf98.cu | #include "finalProject.cuh"
// CUDA Runtime
#include <cuda_runtime.h>
// Utilities and system includes
#include <helper_cuda.h>
#include <helper_functions.h>
#include <algorithm>
#include <chrono>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
int main(int argc, char **argv)
{
printf("%s Starting...\n\n", argv[0]);
cudaDeviceProp deviceProp;
deviceProp.major = 1;
deviceProp.minor = 0;
int minimumComputeVersion = 10;
int dev;
dev = findCudaDevice(argc, (const char **)argv);
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, dev));
if ((deviceProp.major * 10 + deviceProp.minor) >= minimumComputeVersion)
{
printf("Using Device %d: %s\n\n", dev, deviceProp.name);
checkCudaErrors(cudaSetDevice(dev));
}
else
{
printf("Error: the selected device does not support the minimum compute capability of %d.%d.\n\n",
minimumComputeVersion / 10, minimumComputeVersion % 10);
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
exit(EXIT_FAILURE);
}
bool bResult = false;
bResult = runTest(argc, argv);
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
printf(bResult ? "Test passed\n" : "Test failed!\n");
}
char* calculateCPU(int boardHeight, int boardWidth, int colors, char * input_data, int epochs)
{
char * res_data = NULL;
char * lastEpoch = NULL;
char * newEpoch = NULL;
if (epochs > 0)
{
lastEpoch = (char *)malloc(boardHeight*boardWidth*sizeof(char));
newEpoch = (char *)malloc(boardHeight*boardWidth*sizeof(char));
// for the first time only - copy the input_data to lastEpoch.
for (int i = 0; i < boardHeight*boardWidth; i++)
{
lastEpoch[i] = input_data[i];
}
for (int i = 0; i < epochs; i++)
{
for (int currRow = 0; currRow < boardHeight; currRow++)
{
for (int currCol = 0; currCol < boardWidth; currCol++)
{
char currCellColor = lastEpoch[(currRow * boardWidth) + currCol];
char nextColor = (currCellColor + 1) % colors;
int NW_ROW = ((boardHeight + currRow - 1) % boardHeight);
int NW_COL = ((boardWidth + currCol - 1) % boardWidth);
int NW_POS = NW_ROW*boardWidth + NW_COL;
int N_ROW = ((boardHeight + currRow - 1) % boardHeight);
int N_COL = ((boardWidth + currCol) % boardWidth);
int N_POS = N_ROW*boardWidth + N_COL;
int NE_ROW = ((boardHeight + currRow - 1) % boardHeight);
int NE_COL = ((boardWidth + currCol + 1) % boardWidth);
int NE_POS = NE_ROW*boardWidth + NE_COL;
int W_ROW = ((boardHeight + currRow) % boardHeight);
int W_COL = ((boardWidth + currCol - 1) % boardWidth);
int W_POS = W_ROW*boardWidth + W_COL;
int E_ROW = ((boardHeight + currRow) % boardHeight);
int E_COL = ((boardWidth + currCol + 1) % boardWidth);
int E_POS = E_ROW*boardWidth + E_COL;
int SW_ROW = ((boardHeight + currRow + 1) % boardHeight);
int SW_COL = ((boardWidth + currCol - 1) % boardWidth);
int SW_POS = SW_ROW*boardWidth + SW_COL;
int S_ROW = ((boardHeight + currRow + 1) % boardHeight);
int S_COL = ((boardWidth + currCol) % boardWidth);
int S_POS = S_ROW*boardWidth + S_COL;
int SE_ROW = ((boardHeight + currRow + 1) % boardHeight);
int SE_COL = ((boardWidth + currCol + 1) % boardWidth);
int SE_POS = SE_ROW*boardWidth + SE_COL;
if ((lastEpoch[NW_POS] == nextColor) || (lastEpoch[N_POS] == nextColor) || (lastEpoch[NE_POS] == nextColor) || (lastEpoch[SW_POS] == nextColor) ||
(lastEpoch[S_POS] == nextColor) || (lastEpoch[SE_POS] == nextColor) || (lastEpoch[W_POS] == nextColor) || (lastEpoch[E_POS] == nextColor))
{
newEpoch[(currRow * boardWidth) + currCol] = nextColor;
}
else
{
newEpoch[(currRow * boardWidth) + currCol] = currCellColor;
}
}
}
res_data = newEpoch;
char* temp = lastEpoch;
lastEpoch = newEpoch;
newEpoch = temp;
}
free(newEpoch);
//res_data = lastEpoch;
}
return res_data;
}
char* generateRandomData(int size, int colors)
{
char * data = (char *)malloc(size * sizeof(char));
for (int i = 0; i<size; i++)
{
// Keep the numbers in the range of 0 to COLORS-1
//data[i] = i%colors;
data[i] = ((int)(rand() & 0xFF)) % colors;
}
return data;
}
bool runTest(int argc, char **argv)
{
int boardWidth = atoi(argv[2]);// BOARD_WIDTH;
int boardHeight = atoi(argv[2]);
int colors = COLORS;
int epochs = EPOCHS;
int maxThreads = 256; // number of threads per block
int maxBlocks = 65535;
long size = boardWidth*boardHeight;
int numBlocks = 0;
int numThreads = 0;
int kernelId = atoi(argv[1]);
getNumBlocksAndThreads(size, maxBlocks, maxThreads, numBlocks, numThreads);
printf("board size %d X %d\n", boardWidth, boardHeight);
printf("amount of colors is %d\n", colors);
printf("amount of EPOCHS is %d\n", epochs);
printf("num of blocks: %d \n", numBlocks);
printf("num of threads: %d \n", numThreads);
printf("kernelId: %d \n", kernelId);
// create random input data on CPU
char *h_idata = generateRandomData(size, colors);
// calculate the CPU result
auto t_start = std::chrono::high_resolution_clock::now();
char *cpu_odata = calculateCPU(boardHeight, boardWidth, colors, h_idata, epochs);
auto t_end = std::chrono::high_resolution_clock::now();
printf("Wall clock time passed: %f ms\n", std::chrono::duration<double, std::milli>(t_end - t_start).count());
outputBoardToFile(cpu_odata, boardHeight, boardWidth, colors, "C:\\Users\\yuval\\Downloads\\1.ppm");
// allocate mem for the result on host side
char *h_odata = (char *)malloc(size*sizeof(char));
// allocate device memory and data
char *d_idata = NULL;
char *d_odata = NULL;
checkCudaErrors(cudaMalloc((void **)&d_idata, size*sizeof(char)));
checkCudaErrors(cudaMalloc((void **)&d_odata, size*sizeof(char)));
// copy data directly to device memory
checkCudaErrors(cudaMemcpy(d_idata, h_idata, size*sizeof(char), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_odata, h_idata, size*sizeof(char), cudaMemcpyHostToDevice));
t_start = std::chrono::high_resolution_clock::now();
for (size_t i = 0; i < 10; i++)
{
reduce(boardHeight, boardWidth, numThreads, numBlocks, &d_idata, &d_odata, epochs, kernelId );
}
t_end = std::chrono::high_resolution_clock::now();
printf("Wall clock time passed: %f ms\n", std::chrono::duration<double, std::milli>(t_end - t_start).count()/10);
gpuErrchk(cudaMemcpy(h_odata, d_idata, size*sizeof(char), cudaMemcpyDeviceToHost));
bool isSame = true;
for (int i = 0; i < size; i++)
{
if (cpu_odata[i] != h_odata[i])
{
isSame = false;
break;
}
}
// cleanup
free(h_idata);
free(h_odata);
checkCudaErrors(cudaFree(d_idata));
checkCudaErrors(cudaFree(d_odata));
return isSame;
}
////////////////////////////////////////////////////////////////////////////////
// Compute the number of threads and blocks to use
// we observe the maximum specified number of blocks, because each thread in
// that kernel can process a variable number of elements.
////////////////////////////////////////////////////////////////////////////////
void getNumBlocksAndThreads(int n, int maxBlocks, int maxThreads, int &blocks, int &threads)
{
//get device capability, to avoid block/grid size excceed the upbound
cudaDeviceProp prop;
int device;
checkCudaErrors(cudaGetDevice(&device));
checkCudaErrors(cudaGetDeviceProperties(&prop, device));
//threads = (n < maxThreads * 2) ? nextPow2((n + 1) / 2) : maxThreads;
threads = (n < maxThreads) ? nextPow2(n) : prop.maxThreadsPerBlock;
//blocks = (n + (threads * 2 - 1)) / (threads * 2);
blocks = n / threads;
//threads = 256;
//blocks = 8;
if ((float)threads*blocks >(float)prop.maxGridSize[0] * prop.maxThreadsPerBlock)
{
printf("n is too large, please choose a smaller number!\n");
}
// if (blocks > prop.maxGridSize[0])
// {
// printf("Grid size <%d> excceeds the device capability <%d>, set block size as %d (original %d)\n",
// blocks, prop.maxGridSize[0], threads * 2, threads);
//
// blocks /= 2;
// threads *= 2;
//}
blocks = (maxBlocks< blocks ? maxBlocks : blocks);
} |
8c128c26226d39979e3136d37fed73fe9f76b92c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*//*
*/
/** @file object.cu
* @author Thomas Mller, NVIDIA
* @brief API interface of a TCNN object
*/
#include <tiny-cuda-nn/object.h>
#include <tiny-cuda-nn/common.h>
#include <tiny-cuda-nn/common_device.h>
TCNN_NAMESPACE_BEGIN
template <typename T>
__global__ void one_hot_batched_kernel(const uint32_t num_elements, const uint32_t width, const uint32_t one_hot_dim, T* out, float scale) {
const uint32_t i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= num_elements) return;
const uint32_t dim = i % width;
out[i] = dim == one_hot_dim ? (T)scale : (T)0.0f;
}
template <typename T>
void one_hot_batched(hipStream_t stream, const uint32_t num_elements, const uint32_t width, const uint32_t one_hot_dim, T* out, float scale) {
linear_kernel(one_hot_batched_kernel<T>, 0, stream, num_elements, width, one_hot_dim, out, scale);
}
template void one_hot_batched(hipStream_t stream, const uint32_t num_elements, const uint32_t width, const uint32_t one_hot_dim, float* out, float scale);
template void one_hot_batched(hipStream_t stream, const uint32_t num_elements, const uint32_t width, const uint32_t one_hot_dim, __half* out, float scale);
template <typename T>
void mult(hipStream_t stream, const uint32_t num_elements, T* inout, float factor) {
linear_kernel(mult_scalar_kernel<T>, 0, stream, num_elements, inout, factor);
}
template void mult(hipStream_t stream, const uint32_t num_elements, float* inout, float factor);
template void mult(hipStream_t stream, const uint32_t num_elements, __half* inout, float factor);
template <typename T>
void trim_and_cast_from(hipStream_t stream, const MatrixLayout layout, const uint32_t num_elements, const uint32_t input_width, const uint32_t output_width, const T* in, float* out) {
if (layout == RM) {
linear_kernel(cast_from<T>, 0, stream, num_elements, in, out);
} else {
linear_kernel(trim_and_cast<T>, 0, stream, num_elements, input_width, output_width, in, out);
}
}
template void trim_and_cast_from(hipStream_t stream, const MatrixLayout layout, const uint32_t num_elements, const uint32_t input_width, const uint32_t output_width, const float* in, float* out);
template void trim_and_cast_from(hipStream_t stream, const MatrixLayout layout, const uint32_t num_elements, const uint32_t input_width, const uint32_t output_width, const __half* in, float* out);
TCNN_NAMESPACE_END
| 8c128c26226d39979e3136d37fed73fe9f76b92c.cu | /*
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*//*
*/
/** @file object.cu
* @author Thomas Müller, NVIDIA
* @brief API interface of a TCNN object
*/
#include <tiny-cuda-nn/object.h>
#include <tiny-cuda-nn/common.h>
#include <tiny-cuda-nn/common_device.h>
TCNN_NAMESPACE_BEGIN
template <typename T>
__global__ void one_hot_batched_kernel(const uint32_t num_elements, const uint32_t width, const uint32_t one_hot_dim, T* out, float scale) {
const uint32_t i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= num_elements) return;
const uint32_t dim = i % width;
out[i] = dim == one_hot_dim ? (T)scale : (T)0.0f;
}
template <typename T>
void one_hot_batched(cudaStream_t stream, const uint32_t num_elements, const uint32_t width, const uint32_t one_hot_dim, T* out, float scale) {
linear_kernel(one_hot_batched_kernel<T>, 0, stream, num_elements, width, one_hot_dim, out, scale);
}
template void one_hot_batched(cudaStream_t stream, const uint32_t num_elements, const uint32_t width, const uint32_t one_hot_dim, float* out, float scale);
template void one_hot_batched(cudaStream_t stream, const uint32_t num_elements, const uint32_t width, const uint32_t one_hot_dim, __half* out, float scale);
template <typename T>
void mult(cudaStream_t stream, const uint32_t num_elements, T* inout, float factor) {
linear_kernel(mult_scalar_kernel<T>, 0, stream, num_elements, inout, factor);
}
template void mult(cudaStream_t stream, const uint32_t num_elements, float* inout, float factor);
template void mult(cudaStream_t stream, const uint32_t num_elements, __half* inout, float factor);
template <typename T>
void trim_and_cast_from(cudaStream_t stream, const MatrixLayout layout, const uint32_t num_elements, const uint32_t input_width, const uint32_t output_width, const T* in, float* out) {
if (layout == RM) {
linear_kernel(cast_from<T>, 0, stream, num_elements, in, out);
} else {
linear_kernel(trim_and_cast<T>, 0, stream, num_elements, input_width, output_width, in, out);
}
}
template void trim_and_cast_from(cudaStream_t stream, const MatrixLayout layout, const uint32_t num_elements, const uint32_t input_width, const uint32_t output_width, const float* in, float* out);
template void trim_and_cast_from(cudaStream_t stream, const MatrixLayout layout, const uint32_t num_elements, const uint32_t input_width, const uint32_t output_width, const __half* in, float* out);
TCNN_NAMESPACE_END
|
d8fbe08672b5783c7ef2e53757b84e6721792f50.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdbool.h>
typedef unsigned long long int ull;
__device__ bool getval(int v, ull id, ull ie){
if (v<0) v=-v;
if (v<=30) return (id & (1llu<<v)) ? true : false;
return (ie & (1llu<<(v-31))) ? true : false;
}
__device__ bool test(int n, int* raw, ull id, ull ie){
bool ret = true;
for (int i = 0; i < n; i+=3){
bool tmp = false;
for (int j = 0; j < 3; j++)
tmp |= (getval(raw[i+j], id, ie) ^ (raw[i+j] < 0));
ret &= tmp;
}
return ret;
}
__device__ void fillres(int m, bool* res, ull id, ull ie){
for (int i=1;i<=m;i++)
res[i] = getval(i, id, ie);
}
__global__ void bf(int n, int m, int* raw, bool* res, int* flag){
ull myid = blockIdx.x * 1024llu + threadIdx.x;
ull mxstate = (1llu<<m) - 1;
if (myid > mxstate) return;
ull end = 1;
if (m-30 > 0) end <<= m-30;
for (ull i = 0; i < end; i ++){
if (test(n, raw, myid<<1, i)){
if (!atomicExch(flag, 1))
fillres(m, res, myid<<1, i);
return;
}
if ((i & 0xff) == (myid & 0xff) && *flag)
return;
}
}
int main (){
int *rawd, *raw, *flag;
bool *resd, *res;
int n, m, mflag = 0;
scanf("%d%d", &n,&m);
n*=3;
raw = (int*)malloc(sizeof(int)*n);
res = (bool*)malloc(m+1);
for (int i=0;i<n;i++)
scanf("%d", raw+i);
hipMalloc((void**)&rawd, sizeof(int)*n);
hipMalloc((void**)&resd, m+1);
hipMalloc((void**)&flag, sizeof(int));
hipMemcpy(rawd, raw, sizeof(int)*n, hipMemcpyHostToDevice);
hipMemcpy(flag, &mflag, sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( bf), dim3(1048576),dim3(1024), 0, 0, n, m, rawd, resd, flag);
hipMemcpy(&mflag, flag, sizeof(int), hipMemcpyDeviceToHost);
if (mflag){
hipMemcpy(res, resd, m+1, hipMemcpyDeviceToHost);
for (int i = 1; i <= m; i++)
printf("%d := %s\n", i, (res[i]?"true":"false"));
}
else printf("No satisfy!\n");
hipFree(rawd);
hipFree(resd);
hipFree(flag);
free(raw);
free(res);
}
| d8fbe08672b5783c7ef2e53757b84e6721792f50.cu | #include<stdio.h>
#include<stdbool.h>
typedef unsigned long long int ull;
__device__ bool getval(int v, ull id, ull ie){
if (v<0) v=-v;
if (v<=30) return (id & (1llu<<v)) ? true : false;
return (ie & (1llu<<(v-31))) ? true : false;
}
__device__ bool test(int n, int* raw, ull id, ull ie){
bool ret = true;
for (int i = 0; i < n; i+=3){
bool tmp = false;
for (int j = 0; j < 3; j++)
tmp |= (getval(raw[i+j], id, ie) ^ (raw[i+j] < 0));
ret &= tmp;
}
return ret;
}
__device__ void fillres(int m, bool* res, ull id, ull ie){
for (int i=1;i<=m;i++)
res[i] = getval(i, id, ie);
}
__global__ void bf(int n, int m, int* raw, bool* res, int* flag){
ull myid = blockIdx.x * 1024llu + threadIdx.x;
ull mxstate = (1llu<<m) - 1;
if (myid > mxstate) return;
ull end = 1;
if (m-30 > 0) end <<= m-30;
for (ull i = 0; i < end; i ++){
if (test(n, raw, myid<<1, i)){
if (!atomicExch(flag, 1))
fillres(m, res, myid<<1, i);
return;
}
if ((i & 0xff) == (myid & 0xff) && *flag)
return;
}
}
int main (){
int *rawd, *raw, *flag;
bool *resd, *res;
int n, m, mflag = 0;
scanf("%d%d", &n,&m);
n*=3;
raw = (int*)malloc(sizeof(int)*n);
res = (bool*)malloc(m+1);
for (int i=0;i<n;i++)
scanf("%d", raw+i);
cudaMalloc((void**)&rawd, sizeof(int)*n);
cudaMalloc((void**)&resd, m+1);
cudaMalloc((void**)&flag, sizeof(int));
cudaMemcpy(rawd, raw, sizeof(int)*n, cudaMemcpyHostToDevice);
cudaMemcpy(flag, &mflag, sizeof(int), cudaMemcpyHostToDevice);
bf<<<1048576,1024>>>(n, m, rawd, resd, flag);
cudaMemcpy(&mflag, flag, sizeof(int), cudaMemcpyDeviceToHost);
if (mflag){
cudaMemcpy(res, resd, m+1, cudaMemcpyDeviceToHost);
for (int i = 1; i <= m; i++)
printf("%d := %s\n", i, (res[i]?"true":"false"));
}
else printf("No satisfy!\n");
cudaFree(rawd);
cudaFree(resd);
cudaFree(flag);
free(raw);
free(res);
}
|
9ffc9bc7ebc5f006837a18e7cfb922d42fc881b6.hip | // !!! This is a file automatically generated by hipify!!!
//MIT License
//
//Copyright(c) 2020 Zheng Jiaqi @NUSComputing
//
//Permission is hereby granted, free of charge, to any person obtaining a copy
//of this software and associated documentation files(the "Software"), to deal
//in the Software without restriction, including without limitation the rights
//to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
//copies of the Software, and to permit persons to whom the Software is
//furnished to do so, subject to the following conditions :
//
//The above copyright notice and this permission notice shall be included in all
//copies or substantial portions of the Software.
//
//THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
//IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
//FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
//AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
//LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
//OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
//SOFTWARE.
#include "hip/hip_runtime.h"
#include <stdio.h>
#define TOID(x, y, n) (y) * (n) + (x)
__host__ __device__ double scalar_product(double x1, double y1, double x2, double y2){
return x1 * x2 + y1 * y2;
}
__host__ __device__ void barycentric_coordinate(
double x1, double y1, double x2, double y2,
double x3, double y3, double x0, double y0,
double &w3, double &w1, double &w2
){
double v0x = x2 - x1, v0y = y2 - y1;
double v1x = x3 - x1, v1y = y3 - y1;
double v2x = x0 - x1, v2y = y0 - y1;
double d00 = scalar_product(v0x, v0y, v0x, v0y);
double d01 = scalar_product(v0x, v0y, v1x, v1y);
double d11 = scalar_product(v1x, v1y, v1x, v1y);
double d20 = scalar_product(v2x, v2y, v0x, v0y);
double d21 = scalar_product(v2x, v2y, v1x, v1y);
double denom = d00 * d11 - d01 * d01;
if (denom == 0) {
w1 = w2 = w3 = -1;
return;
}
w1 = (d11 * d20 - d01 * d21) / denom;
w2 = (d00 * d21 - d01 * d20) / denom;
w3 = 1.0 - w1 - w2;
}
__global__ void kernelDiscretization(
double *points,
double *weight,
int *triangle,
int num_tri,
float *density,
double scale,
int n
){
int tx = blockDim.x * blockIdx.x + threadIdx.x;
int ty = blockDim.y * blockIdx.y + threadIdx.y;
int id = TOID(tx, ty, n);
double x = tx * scale, y = ty * scale;
float res = 0;
for (int i = 0; i < num_tri; ++i) {
int p1 = triangle[i*3], p2 = triangle[i*3+1], p3 = triangle[i*3+2];
double x1, x2, x3, y1, y2, y3, w1, w2, w3;
x1 = points[p1 << 1], y1 = points[p1 << 1 | 1];
x2 = points[p2 << 1], y2 = points[p2 << 1 | 1];
x3 = points[p3 << 1], y3 = points[p3 << 1 | 1];
barycentric_coordinate(x1, y1, x2, y2, x3, y3, x, y, w1, w2, w3);
if (w1 < 0 || w2 < 0 || w3 < 0) continue;
density[id] = w1 * weight[p1] + w2 * weight[p2] + w3 * weight[p3];
return;
}
density[id] = 0;
return;
}
void discretization_d(
double *points,
double *weight,
int num_point,
int *triangle,
int num_tri,
float *density,
double scale,
int n
){
double *points_d, *weight_d;
float *density_d;
int *triangle_d;
hipMalloc((void **) &points_d, num_point * sizeof(double) * 2);
hipMalloc((void **) &weight_d, num_point * sizeof(double));
hipMalloc((void **) &triangle_d, num_tri * sizeof(int) * 3);
hipMalloc((void **) &density_d, n * n * sizeof(float));
hipMemcpy(points_d, points, num_point * sizeof(double) * 2, hipMemcpyHostToDevice);
hipMemcpy(weight_d, weight, num_point * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(triangle_d, triangle, num_tri * sizeof(int) * 3, hipMemcpyHostToDevice);
dim3 block(16, 16);
dim3 grid(n/block.x, n/block.y);
hipLaunchKernelGGL(( kernelDiscretization) , dim3(grid), dim3(block) , 0, 0, points_d, weight_d, triangle_d, num_tri, density_d, scale, n);
hipMemcpy(density, density_d, n * n * sizeof(float), hipMemcpyDeviceToHost);
hipFree(points_d);
hipFree(weight_d);
hipFree(triangle_d);
hipFree(density_d);
}
| 9ffc9bc7ebc5f006837a18e7cfb922d42fc881b6.cu | //MIT License
//
//Copyright(c) 2020 Zheng Jiaqi @NUSComputing
//
//Permission is hereby granted, free of charge, to any person obtaining a copy
//of this software and associated documentation files(the "Software"), to deal
//in the Software without restriction, including without limitation the rights
//to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
//copies of the Software, and to permit persons to whom the Software is
//furnished to do so, subject to the following conditions :
//
//The above copyright notice and this permission notice shall be included in all
//copies or substantial portions of the Software.
//
//THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
//IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
//FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
//AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
//LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
//OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
//SOFTWARE.
#include "cuda_runtime.h"
#include <stdio.h>
#define TOID(x, y, n) (y) * (n) + (x)
__host__ __device__ double scalar_product(double x1, double y1, double x2, double y2){
return x1 * x2 + y1 * y2;
}
__host__ __device__ void barycentric_coordinate(
double x1, double y1, double x2, double y2,
double x3, double y3, double x0, double y0,
double &w3, double &w1, double &w2
){
double v0x = x2 - x1, v0y = y2 - y1;
double v1x = x3 - x1, v1y = y3 - y1;
double v2x = x0 - x1, v2y = y0 - y1;
double d00 = scalar_product(v0x, v0y, v0x, v0y);
double d01 = scalar_product(v0x, v0y, v1x, v1y);
double d11 = scalar_product(v1x, v1y, v1x, v1y);
double d20 = scalar_product(v2x, v2y, v0x, v0y);
double d21 = scalar_product(v2x, v2y, v1x, v1y);
double denom = d00 * d11 - d01 * d01;
if (denom == 0) {
w1 = w2 = w3 = -1;
return;
}
w1 = (d11 * d20 - d01 * d21) / denom;
w2 = (d00 * d21 - d01 * d20) / denom;
w3 = 1.0 - w1 - w2;
}
__global__ void kernelDiscretization(
double *points,
double *weight,
int *triangle,
int num_tri,
float *density,
double scale,
int n
){
int tx = blockDim.x * blockIdx.x + threadIdx.x;
int ty = blockDim.y * blockIdx.y + threadIdx.y;
int id = TOID(tx, ty, n);
double x = tx * scale, y = ty * scale;
float res = 0;
for (int i = 0; i < num_tri; ++i) {
int p1 = triangle[i*3], p2 = triangle[i*3+1], p3 = triangle[i*3+2];
double x1, x2, x3, y1, y2, y3, w1, w2, w3;
x1 = points[p1 << 1], y1 = points[p1 << 1 | 1];
x2 = points[p2 << 1], y2 = points[p2 << 1 | 1];
x3 = points[p3 << 1], y3 = points[p3 << 1 | 1];
barycentric_coordinate(x1, y1, x2, y2, x3, y3, x, y, w1, w2, w3);
if (w1 < 0 || w2 < 0 || w3 < 0) continue;
density[id] = w1 * weight[p1] + w2 * weight[p2] + w3 * weight[p3];
return;
}
density[id] = 0;
return;
}
void discretization_d(
double *points,
double *weight,
int num_point,
int *triangle,
int num_tri,
float *density,
double scale,
int n
){
double *points_d, *weight_d;
float *density_d;
int *triangle_d;
cudaMalloc((void **) &points_d, num_point * sizeof(double) * 2);
cudaMalloc((void **) &weight_d, num_point * sizeof(double));
cudaMalloc((void **) &triangle_d, num_tri * sizeof(int) * 3);
cudaMalloc((void **) &density_d, n * n * sizeof(float));
cudaMemcpy(points_d, points, num_point * sizeof(double) * 2, cudaMemcpyHostToDevice);
cudaMemcpy(weight_d, weight, num_point * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(triangle_d, triangle, num_tri * sizeof(int) * 3, cudaMemcpyHostToDevice);
dim3 block(16, 16);
dim3 grid(n/block.x, n/block.y);
kernelDiscretization <<< grid, block >>> (points_d, weight_d, triangle_d, num_tri, density_d, scale, n);
cudaMemcpy(density, density_d, n * n * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(points_d);
cudaFree(weight_d);
cudaFree(triangle_d);
cudaFree(density_d);
}
|
d6dfec405b3f11e5043e9319f6e93b841595c46d.hip | // !!! This is a file automatically generated by hipify!!!
//
// Created by fabian on 28.05.21.
//
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include <assert.h>
#include "main.cuh"
#include "helper.cuh"
#include "GPUStream.cuh"
#include "StreamFunctions.cuh"
#include "device_information.cuh"
#include "StreamFunctionHelper.cuh"
#define MAX_STREAMS 10
// Internal function declarations
__global__ void time_cuda(int* input_timestamp, int* output_timestamps, int* output_values, int size, int*offs, int* resultOffse);
__global__ void last_cuda(int* input_timestamp, int* input_values,int*unit_stream_timestamps, int* output_timestamps, int* output_values,int IntStreamSize, int size, int* offsInt, int* offsUnit);
void delay_preliminary_prune(std::shared_ptr<GPUIntStream> s, std::shared_ptr<GPUUnitStream> r, hipStream_t stream);
__global__ void delay_cuda_preliminary_prune(int *inputIntTimestamps, int *inputIntValues, int *resetTimestamps, int size, int resetSize, int *offset, int *resetOffset, hipStream_t stream);
__global__ void delay_iteration(int* reset_timestamps, int* reset_offset, int reset_size, int* inputInt_timestamps, int* inputInt_values, int* inputInt_offset, int inputInt_size, int* result_timestamps, int* result_offset, int result_size, hipStream_t stream);
__global__ void delay_cuda(int *inputIntTimestamps, int *inputIntValues, int *resetTimestamps, int *results, int size, int inputSize, int *inputOffset, int *resetOffset, int* resultOffset, int maxTimestamp, hipStream_t stream);
__global__ void lift_cuda( int *x_ts, int *y_ts, int *out_ts,
int *x_v, int *y_v, int *out_v,
int threads, int x_len, int y_len,
int op, int *valid, int *invalid,
int *out_ts_cpy, int *out_v_cpy, int *invalid_offset,
int *x_offset, int *y_offset);
__global__ void final_reduce(int* block_red,int size,int* offset);
__global__ void assign_vals(int *input, int *result, int *input_offset, int *result_offset, int size);
__device__ int lookUpElement(int size,int searchValue, int * input_timestamp);
__device__ int lookUpNextElement(int size,int searchValue, int * timestamps);
__global__ void calculate_offset(int* timestamps, int* offset, int size);
__global__ void remove_invalid( int threads, int *invalid, int *valid,
int x_len, int y_len,
int *out_ts_cpy, int *out_ts,
int *out_v_cpy, int *out_v,
int *x_offset_d, int *y_offset_d,
int *result_offset, int op);
__global__ void inval_multiples_merge( int op, int threads,
int x_len, int y_len,
int *x_offset_d, int *y_offset_d,
int *out_ts, int *invalid, int *valid);
__device__ void selection_sort( int *data, int left, int right );
__global__ void cdp_simple_quicksort(int *data, int left, int right, int depth );
__global__ void assign_vals(int *input, int *result_v, int *result_ts, int *input_offset, int *result_offset, int size);
/**
* Externally called functions
**/
// https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#numa-best-practices
// ADD stream argument to enable multiple kernels in parallel (10.5. Concurrent Kernel Execution)
// Note:Low Medium Priority: Use signed integers rather than unsigned integers as loop counters.
std::shared_ptr<GPUIntStream> time(std::shared_ptr<GPUIntStream> input, hipStream_t stream){
int threads = input->size;
int block_size = 1;
int blocks = 1;
calcThreadsBlocks(threads,&block_size,&blocks);
// Create new stream on device the size of the input stream
std::shared_ptr<GPUIntStream> result = std::make_shared<GPUIntStream>(input->size, true);
// Fire off the actual calculation
hipLaunchKernelGGL(( time_cuda), dim3(blocks),dim3(block_size),0,stream, input->device_timestamp, result->device_timestamp, result->device_values, threads,input->device_offset,result->device_offset);
//printf("Scheduled time() with <<<%d,%d>>> \n",blocks,block_size);
return result;
};
std::shared_ptr<GPUIntStream> time(std::shared_ptr<GPUUnitStream> input, hipStream_t stream){
int threads = input->size;
int block_size = 1;
int blocks = 1;
calcThreadsBlocks(threads,&block_size,&blocks);
// Create new stream on device the size of the input stream
std::shared_ptr<GPUIntStream> result = std::make_shared<GPUIntStream>(input->size, true);
// Fire off the actual calculation
hipLaunchKernelGGL(( time_cuda), dim3(blocks),dim3(block_size),0,stream, input->device_timestamp, result->device_timestamp, result->device_values, threads,input->device_offset,result->device_offset);
//printf("Scheduled time() with <<<%d,%d>>> \n",blocks,block_size);
return result;
};
std::shared_ptr<GPUIntStream> last(std::shared_ptr<GPUIntStream> inputInt, std::shared_ptr<GPUUnitStream> inputUnit, hipStream_t stream) {
int threads = (int) inputUnit->size;
int block_size = 1;
int blocks = 1;
calcThreadsBlocks(threads, &block_size, &blocks);
// Create new stream on device with the size of the unit input stream
std::shared_ptr<GPUIntStream> result = std::make_shared<GPUIntStream>(inputUnit->size, true);
result->copy_to_device();
// Fire off the CUDA calculation
hipLaunchKernelGGL(( last_cuda), dim3(blocks), dim3(block_size), 0, stream, inputInt->device_timestamp, inputInt->device_values,
inputUnit->device_timestamp, result->device_timestamp,
result->device_values, inputInt->size, threads,
inputInt->device_offset, inputUnit->device_offset);
hipLaunchKernelGGL(( last_cuda), dim3(blocks),dim3(block_size),0,stream, inputInt->device_timestamp, inputInt->device_values, inputUnit->device_timestamp,result->device_timestamp,result->device_values,inputInt->size, threads,inputInt->device_offset,inputUnit->device_offset);
hipDeviceSynchronize();
hipLaunchKernelGGL(( calculate_offset), dim3(blocks), dim3(block_size), 0, stream, result->device_timestamp, result->device_offset, threads);
return result;
}
std::shared_ptr<GPUUnitStream> delay(std::shared_ptr<GPUIntStream> s, std::shared_ptr<GPUUnitStream> r, hipStream_t stream) {
std::shared_ptr<GPUIntStream> s_prune = std::make_shared<GPUIntStream>(*s, true);
std::shared_ptr<GPUUnitStream> r_prune = std::make_shared<GPUUnitStream>(*r, true);
// Prune GPUIntStream s, mark all events that can't possibly trigger due to a reset event with value -1
delay_preliminary_prune(s_prune, r_prune, stream);
// Prepare original input data and result output
std::shared_ptr<GPUUnitStream> prevResults = std::make_shared<GPUUnitStream>(*r_prune, true);
std::shared_ptr<GPUUnitStream> result = std::make_shared<GPUUnitStream>(s->size, true);
// Launch actual iterative algorithm on device
hipLaunchKernelGGL(( delay_iteration), dim3(1), dim3(1), 0, stream, prevResults->device_timestamp, prevResults->device_offset, prevResults->size, s_prune->device_timestamp, s_prune->device_values, s_prune->device_offset, s_prune->size, result->device_timestamp, result->device_offset, result->size, stream);
hipDeviceSynchronize();
return result;
}
std::shared_ptr<GPUIntStream> slift(std::shared_ptr<GPUIntStream> x, std::shared_ptr<GPUIntStream> y, int op){
// Merge fast path
if (op == MRG){
std::shared_ptr<GPUIntStream> merge_result = lift(x, y, MRG);
hipDeviceSynchronize();
return merge_result;
}
// Fast path for 1/2 empty stream(s)
if (x->size == 0 || y->size == 0){
int *e_ts = (int*)malloc(0);
int *e_v = (int*)malloc(0);
std::shared_ptr<GPUIntStream> empty(new GPUIntStream(e_ts, e_v, 0));
empty->size = 0;
empty->copy_to_device();
return empty;
}
int *x_ts = (int*)malloc(x->size*sizeof(int));
int *y_ts = (int*)malloc(y->size*sizeof(int));
memcpy(x_ts, x->host_timestamp, x->size*sizeof(int));
memcpy(y_ts, y->host_timestamp, y->size*sizeof(int));
// xy ... y is the unit stream
int *xy_ts = (int*)malloc(y->size*sizeof(int));
int *yx_ts = (int*)malloc(x->size*sizeof(int));
int *xy_v = (int*)malloc(y->size*sizeof(int));
int *yx_v = (int*)malloc(x->size*sizeof(int));
memset(xy_ts, -1, y->size*sizeof(int));
memset(yx_ts, -1, x->size*sizeof(int));
// Make Unit streams from Int streams for last()
std::shared_ptr<GPUUnitStream> x_unit(new GPUUnitStream(x_ts, x->size, *(x->host_offset)));
std::shared_ptr<GPUUnitStream> y_unit(new GPUUnitStream(y_ts, y->size, *(y->host_offset)));
x_unit->copy_to_device();
y_unit->copy_to_device();
std::shared_ptr<GPUIntStream> last_xy = last(x, y_unit, 0);
std::shared_ptr<GPUIntStream> last_yx = last(y, x_unit, 0);
hipDeviceSynchronize();
last_yx->copy_to_host();
last_xy->copy_to_host();
std::shared_ptr<GPUIntStream> x_prime = lift(x, last_xy, MRG);
std::shared_ptr<GPUIntStream> y_prime = lift(y, last_yx, MRG);
hipDeviceSynchronize();
std::shared_ptr<GPUIntStream> result = lift(x_prime, y_prime, op);
hipDeviceSynchronize();
x_prime->free_device();
x_prime->free_host();
y_prime->free_device();
y_prime->free_host();
x_unit->free_device();
x_unit->free_host();
y_unit->free_device();
y_unit->free_host();
// MEMORY BUG WHEN FREEING LAST XY/YX
return result;
}
std::shared_ptr<GPUIntStream> lift(std::shared_ptr<GPUIntStream> x, std::shared_ptr<GPUIntStream> y, int op){
// Information about MergePath
// https://stackoverflow.com/questions/307291++06/merge-sort-using-cuda-efficient-implementation-for-small-input-arrays
/**
* See the following paper for parallel merging of sorted arrays:
* O. Green, R. Mccoll, and D. Bader
* GPU merge path: a GPU merging algorithm
* International Conference on Supercomputing
* November 2014
* URL: https://www.researchgate.net/publication/254462662_GPU_merge_path_a_GPU_merging_algorithm
*
* The paper claims a runtime complexity of O(log n + n/p), p ... # of processors
*/
int block_size = 0;
int blocks = 0;
int x_offset = *(x->host_offset);
int y_offset = *(y->host_offset);
//int len_offset = x_offset+y_offset;
int threads = (x->size-x_offset) + (y->size-y_offset);
calcThreadsBlocks(threads, &block_size, &blocks);
threads = (blocks) * (block_size);
// Create Result
std::shared_ptr<GPUIntStream> result(new GPUIntStream());
result->size = x->size + y->size;
result->host_timestamp = (int*)malloc(result->size * sizeof(int));
result->host_values = (int*)malloc(result->size * sizeof(int));
result->host_offset = (int*)malloc(sizeof(int));
memset(result->host_timestamp, -1, result->size);
memset(result->host_values, 0, result->size);
*result->host_offset = 0;
hipMalloc((int**)&result->device_timestamp, result->size*sizeof(int));
hipMalloc((int**)&result->device_values, result->size*sizeof(int));
hipMalloc((int**)&result->device_offset, sizeof(int));
result->copy_to_device(true);
// Array to count valid timestamps
int *valid_h = (int*)malloc(threads*sizeof(int));
int *valid_d;
memset(valid_h, 0, threads*sizeof(int));
hipMalloc((int**)&valid_d, threads*sizeof(int));
// Array to count invalid timestamps
int *invalid_h = (int*)malloc(threads*sizeof(int));
int *invalid_d;
memset(invalid_h, 0, threads*sizeof(int));
hipMalloc((int**)&invalid_d, threads*sizeof(int));
// Array to copy the result to ... needed for offset calculations
int *out_ts_cpy;
int *out_v_cpy;
hipMalloc((int**)&out_ts_cpy, result->size*sizeof(int));
hipMalloc((int**)&out_v_cpy, result->size*sizeof(int));
hipMemset(invalid_d, 0, threads*sizeof(int));
hipMemset(valid_d, 0, threads*sizeof(int));
hipDeviceSynchronize();
// 3, 2, 1, go
hipLaunchKernelGGL(( lift_cuda), dim3(blocks), dim3(block_size), 0, 0, x->device_timestamp, y->device_timestamp,
result->device_timestamp,
x->device_values, y->device_values,
result->device_values,
threads, (x->size), (y->size),
op, valid_d, invalid_d,
out_ts_cpy, out_v_cpy, result->device_offset,
x->device_offset, y->device_offset);
hipDeviceSynchronize();
if (op == MRG){
hipLaunchKernelGGL(( inval_multiples_merge), dim3(blocks), dim3(block_size), 0, 0, op, threads,
x->size, y->size,
x->device_offset, y->device_offset,
out_ts_cpy, invalid_d, valid_d);
hipDeviceSynchronize();
}
// Move invalid timestamps to front and set correct offset
hipLaunchKernelGGL(( remove_invalid), dim3(blocks), dim3(block_size), 0, 0, threads, invalid_d, valid_d,
x->size, y->size,
out_ts_cpy, result->device_timestamp,
out_v_cpy, result->device_values,
x->device_offset, y->device_offset,
result->device_offset, op);
hipDeviceSynchronize();
// Free arrays
// Something's not quite right yet
//hipFree(out_ts_cpy);
//hipFree(out_v_cpy);
//hipFree(invalid_d);
//hipFree(valid_d);
//free(valid_h);
//free(invalid_h);
return result;
}
std::shared_ptr<GPUIntStream> count(std::shared_ptr<GPUUnitStream> input){
int threads = input->size + 1;
int block_size = 1;
int blocks = 1;
calcThreadsBlocks(threads,&block_size,&blocks);
//std::shared_ptr<GPUIntStream> result(new GPUIntStream());
std::shared_ptr<GPUIntStream> result = std::make_shared<GPUIntStream>(input->size + 1, true);
hipLaunchKernelGGL(( assign_vals), dim3(blocks), dim3(block_size), 0, 0, input->device_timestamp, result->device_values, result->device_timestamp,
input->device_offset, result->device_offset,
input->size+1);
hipDeviceSynchronize();
return result;
}
// Internal functions
__device__ void calcThreadsBlocks_device(int threads, int *block_size, int*blocks){
*block_size = 0;
*blocks = 0;
if (MAX_BLOCKS*MAX_THREADS_PER_BLOCK<threads){
printf("Cannot schedule the whole stream! TODO! implement iterative scheduling \n");
//return;
}
//schedule in warp size
for (int bs = 32; bs <= MAX_THREADS_PER_BLOCK;bs +=32){
if (*block_size > threads){
break;
}
*block_size = bs;
}
//TODO! MAX_BLOCKS?
// the number of blocks per kernel launch should be in the thousands.
for (int bl=1; bl <= MAX_BLOCKS*1000; bl++){
*blocks = bl;
if (bl* (*block_size) > threads){
break;
}
}
//TODO! make iterative! see last for hints (code already there)
if (*blocks > 1024){
printf("Many Blocks");
return;
}
}
__global__ void delay_iteration(int* reset_timestamps, int* reset_offset, int reset_size, int* inputInt_timestamps, int* inputInt_values, int* inputInt_offset, int inputInt_size, int* result_timestamps, int* result_offset, int result_size, hipStream_t stream) {
hipError_t lastCudaError;
// Clear results
memset(result_timestamps, -1, result_size * sizeof(int));
// Allocate memory for temporary iteration results
int* tempResults_offset = 0;
lastCudaError = hipMalloc((void**)&tempResults_offset, sizeof(int));
if (lastCudaError == hipErrorMemoryAllocation) {
printf("Error allocating for tempResults_offset\n");
}
memset(tempResults_offset, 0, sizeof(int));
int* tempResults = 0;
lastCudaError = hipMalloc((void**)&tempResults, reset_size * sizeof(int));
if (lastCudaError == hipErrorMemoryAllocation) {
printf("Error allocating for tempResults\n");
}
memset(tempResults, -1, reset_size * sizeof(int));
int resultCount = 0;
int maxTimestamp = inputInt_timestamps[inputInt_size-1] > reset_timestamps[reset_size-1] ? inputInt_timestamps[inputInt_size-1] : reset_timestamps[reset_size-1];
int prevResultsCount = reset_size;
while (prevResultsCount > 0) {
int threads = prevResultsCount;
int block_size = 0;
int blocks = 0;
calcThreadsBlocks_device(threads, &block_size, &blocks);
hipLaunchKernelGGL(( delay_cuda), dim3(blocks), dim3(block_size), 0, stream, inputInt_timestamps, inputInt_values, reset_timestamps, tempResults, threads, inputInt_size, inputInt_offset, reset_offset, tempResults_offset, maxTimestamp, stream);
hipDeviceSynchronize();
hipLaunchKernelGGL(( cdp_simple_quicksort), dim3(1), dim3(1), 0, stream, tempResults, 0, threads - 1, 0);
hipDeviceSynchronize();
hipLaunchKernelGGL(( calculate_offset), dim3(blocks), dim3(block_size), 0, stream, tempResults + *tempResults_offset, tempResults_offset, threads);
hipDeviceSynchronize();
prevResultsCount = threads - (*tempResults_offset - *reset_offset);
if (prevResultsCount > 0) {
memcpy(result_timestamps + resultCount, tempResults + *tempResults_offset, prevResultsCount * sizeof(int));
resultCount += prevResultsCount;
}
int* temp_timestamps = reset_timestamps;
int* temp_offset = reset_offset;
reset_timestamps = tempResults;
reset_offset = tempResults_offset;
tempResults = temp_timestamps;
tempResults_offset = temp_offset;
*tempResults_offset = *reset_offset;
}
hipLaunchKernelGGL(( cdp_simple_quicksort), dim3(1), dim3(1), 0, stream, result_timestamps, 0, result_size - 1, 0);
hipDeviceSynchronize();
int threads = result_size;
int block_size = 0;
int blocks = 0;
calcThreadsBlocks_device(threads, &block_size, &blocks);
*result_offset = 0;
hipLaunchKernelGGL(( calculate_offset), dim3(blocks), dim3(block_size), 0, stream, result_timestamps, result_offset, threads);
}
/**
* Removes all timestamps that cannot cause delay due to reset events. Caution: Has side effects on input streams.
* @param s Integer input stream
* @param r Unit input stream
* @param stream CUDA stream number
*/
void delay_preliminary_prune(std::shared_ptr<GPUIntStream> s, std::shared_ptr<GPUUnitStream> r, hipStream_t stream) {
int threads = (int) s->size;
int block_size = 1;
int blocks = 1;
calcThreadsBlocks(threads,&block_size,&blocks);
printf("Scheduled delay_preliminary_prune() with <<<%d,%d>>>, %i threads \n",blocks,block_size, threads);
hipLaunchKernelGGL(( delay_cuda_preliminary_prune), dim3(blocks), dim3(block_size), 0, stream, s->device_timestamp, s->device_values, r->device_timestamp, threads, r->size, s->device_offset, r->device_offset, stream);
hipDeviceSynchronize();
}
__global__ void delay_cuda_preliminary_prune(int *inputIntTimestamps, int *inputIntValues, int *resetTimestamps, int size, int resetSize, int *offset, int *resetOffset, hipStream_t stream) {
const int i = threadIdx.x + blockIdx.x * blockDim.x;
inputIntTimestamps += *offset;
inputIntValues += *offset;
resetTimestamps += *resetOffset;
if (i < size) {
int m = lookUpNextElement(resetSize, inputIntTimestamps[i], resetTimestamps);
if (m > -1 && inputIntTimestamps[i] + inputIntValues[i] > resetTimestamps[m]) {
inputIntValues[i] = -1;
}
}
}
// binary search
// on failure returns INT_MIN
// returns position of the Element with value x
__device__ int lookUpElement(int size,int searchValue, int * input_timestamp){
int L = 0;
int R = size;
int m = INT_MIN;
int out = INT_MIN;
while (L<=R) {
__syncthreads();
m = (int) (L+R)/2;
if (input_timestamp[m]<searchValue){
L = m + 1;
}
else if (input_timestamp[m]>searchValue){
R = m -1;
}
else{
out = m;
break;
}
}
return out;
}
// Binary search looking for next highest timestamp instead of exact match
__device__ int lookUpNextElement(int size, int searchValue, int *timestamps) {
int L = 0;
int R = size - 1;
int m = INT_MIN;
int out = INT_MIN;
if (timestamps[size-1] > searchValue) {
while (L<=R) {
m = (int) (L+R)/2;
if (timestamps[m] <= searchValue) {
L = m + 1;
} else {
out = m;
R = m - 1;
}
}
}
return out;
}
__global__ void delay_cuda(int *inputIntTimestamps, int *inputIntValues, int *resetTimestamps, int *results, int size, int inputSize, int *inputOffset, int *resetOffset, int* resultOffset, int maxTimestamp, hipStream_t stream) {
const int i = threadIdx.x + blockIdx.x * blockDim.x;
inputIntTimestamps += *inputOffset;
inputIntValues += *inputOffset;
resetTimestamps += *resetOffset;
results += *resultOffset;
if (i < size) {
// For each tempEvent, check if there's a matching (valid) event in IntStream s
int index = lookUpElement(inputSize, resetTimestamps[i], inputIntTimestamps);
if (index != INT_MIN && inputIntValues[index] != -1) {
int outputTimestamp = inputIntTimestamps[index] + inputIntValues[index];
if (outputTimestamp <= maxTimestamp)
results[i] = outputTimestamp;
else
results[i] = -1;
} else {
results[i] = -1;
}
}
}
//reduction example followed from: https://developer.download.nvidia.com/assets/cuda/files/reduction.pdf
//calculates the number of non valid timestamps
__global__ void calculate_offset(int* timestamps, int* offset, int size){
__shared__ int sdata[1024];// each thread loadsone element from global to shared memunsigned
int tid = threadIdx.x;
unsigned int i= blockIdx.x*blockDim.x+ threadIdx.x;
int block_offset = 0;
sdata[tid] = 0;
if (i < size){
if (*(timestamps+i) < 0){
sdata[tid] = 1;
}
}
__syncthreads();
for(unsigned int s=1; s < blockDim.x; s *= 2) {
int index = 2 * s * tid;
if (index < blockDim.x) {
sdata[index] += sdata[index + s];
}
__syncthreads();
}
__syncthreads();
if(tid == 0){
block_offset = sdata[0];
atomicAdd(offset, block_offset);
}
}
__global__ void last_cuda(int* input_timestamp, int* input_values,int*unit_stream_timestamps, int* output_timestamps, int* output_values, int intStreamSize, int size, int* offsInt, int* offsUnit){
const int i = threadIdx.x + blockIdx.x * blockDim.x;
//shift accordingly to offset
unit_stream_timestamps += *offsUnit;
input_timestamp += *offsInt;
input_values += *offsInt;
int local_unit_timestamp;
if (i < size){
output_timestamps[i] = -1;
local_unit_timestamp = unit_stream_timestamps[i];
}
size -= *offsUnit;
intStreamSize -= *offsInt;
output_timestamps += *offsUnit;
output_values += *offsUnit;
int out = -2;
//Search for the timestamp per thread
int L = 0;
int R = intStreamSize-1;
int m;
__syncthreads();
if (i<size) {
while (L<=R) {
m = (int) (L+R)/2;
if (input_timestamp[m]<local_unit_timestamp){
L = m + 1;
out = input_values[m];
output_timestamps[i] = unit_stream_timestamps[i];
}
else if (input_timestamp[m]>=local_unit_timestamp){
R = m -1;
}
else{
out = input_values[m];
output_timestamps[i] = unit_stream_timestamps[i];
break;
}
}
}
//all have their respective out values
//the output_values array has been successfully filled
//now the threads perform an and reduction starting at 0 going to size
__syncthreads();
if (i < size){
output_values[i] = out;
}
}
// working
__global__ void time_cuda(int* input_timestamp, int* output_timestamps, int* output_values,int size, int*offset, int* resultOffset){
int i = threadIdx.x + blockIdx.x * blockDim.x;
input_timestamp += *offset;
output_timestamps += *offset;
output_values += *offset;
if (i<size-*offset){
output_timestamps[i] = input_timestamp[i];
output_values[i] = input_timestamp[i];
}
if (i == 0) *resultOffset = *offset;
}
/**
* MergePath, also used for lift
*/
__device__ int merge_path(int *x, int *y, int diag, int x_len, int y_len) {
// Just using UnitStreams for now
//const int i = threadIdx.x + blockIdx.x * blockDim.x;
int begin = max(0, diag - y_len); // Start of search window
int end = min(diag, x_len); // End of search window
int mid;
// Binary search
while(begin < end){
mid = (end + begin) / 2;
int x_ts = x[mid];
int y_ts = y[diag - 1 - mid];
if (x_ts < y_ts) {
begin = mid + 1;
}
else{
end = mid;
}
}
return begin;
}
typedef void (*lift_op) (int*, int*, int*);
typedef void (*lift_func) ( int*, int*,
int*, int*,
int*, int*,
int*, int*,
bool, bool, lift_op);
__device__ void lift_add(int *a, int *b, int *result){
*result = *a + *b;
}
__device__ void lift_sub(int *a, int *b, int *result){
*result = *a - *b;
}
__device__ void lift_mul(int *a, int *b, int *result){
*result = *a * *b;
}
__device__ void lift_div(int *a, int *b, int *result){
//assert(*b != 0 && "Divide by zero error in lift_div");
if (*b == 0){
printf("DIVISION BY ZERO\n");
*result = 0;
return;
}
*result = *a / *b;
}
__device__ void lift_mod(int *a, int *b, int *result){
//assert(*b != 0 && "Divide by zero error in lift_mod");
if (*b == 0){
printf("MODULO BY ZERO\n");
*result = 0;
return;
}
*result = *a % *b;
}
__device__ void lift_value( int *x_ts, int *y_ts,
int *x_v, int *y_v,
int *x_i, int *y_i,
int *out_ts, int *out_v,
bool x_done, bool y_done, lift_op op){
if (x_ts[*x_i] != y_ts[*y_i]){
// If timestamps don't match, result timestamp is invalid (-1)
*out_ts = -1;
if (*x_i < *y_i || y_done){
(*x_i)++;
}
else{
(*y_i)++;
}
}
else{
// If they match, result timestamp is x/y timestamp and result value is the cresult of the lift function
*out_ts = x_ts[*x_i];
// Specific value based lift operation
op(&x_v[*x_i], &y_v[*y_i], out_v);
if (!x_done){
(*x_i)++;
}
if (!y_done){
(*y_i)++;
}
}
}
__device__ void lift_merge( int *x_ts, int *y_ts,
int *x_v, int *y_v,
int *x_i, int *y_i,
int *out_ts, int *out_v,
bool x_done, bool y_done, lift_op op){
if (x_ts[*x_i] <= y_ts[*y_i] && !x_done || y_done){
*out_ts = x_ts[*x_i];
*out_v = x_v[*x_i];
(*x_i)++;
}
else{
*out_ts = y_ts[*y_i];
*out_v = y_v[*y_i];
(*y_i)++;
}
}
__device__ lift_func lift_funcs[] = { lift_value, lift_merge };
__device__ lift_op lift_ops[] = { lift_add, lift_sub, lift_mul, lift_div, lift_mod };
// Device internal sequential processing of small partitions
__device__ void lift_partition( int *x_ts, int *y_ts, int *out_ts,
int *x_v, int *y_v, int *out_v,
int x_start, int y_start,
int vpt, int tidx,
int x_len, int y_len,
lift_func fct, lift_op op,
int* valid, int *invalid){
int x_i = x_start;
int y_i = y_start;
//int size = vpt;
bool x_done = x_i >= x_len ? true : false;
bool y_done = y_i >= y_len ? true : false;
// Could possibly be optimized since only the last block needs range checks
// #pragma unroll is also an option according to https://moderngpu.github.io/merge.html
for(int i = 0; i < vpt; i++) {
// Break if last block doesn't fit
if (x_done && y_done){
break;
}
int offset = (tidx*vpt) + i;
fct(x_ts, y_ts,
x_v, y_v,
&x_i, &y_i,
out_ts+offset, out_v+offset,
x_done, y_done, op);
if (x_i >= x_len){
x_done = true;
}
if (y_i >= y_len){
y_done = true;
}
if ((x_i + y_i) - (x_start + y_start) >= vpt){
x_done = true;
y_done = true;
}
}
// Count valid/invalid timestamps per partition
for (int i = 0; i < vpt && tidx*vpt+i < x_len+y_len; i++){
if (out_ts[tidx*vpt+i] < 0){
invalid[tidx]++;
}
else{
valid[tidx]++;
}
}
}
__global__ void inval_multiples_merge( int op, int threads,
int x_len, int y_len,
int *x_offset_d, int *y_offset_d,
int *out_ts, int *invalid, int *valid){
// If op is merge, check for double timestamps
int x_offset = *x_offset_d;
int y_offset = *y_offset_d;
const int tidx = threadIdx.x + blockIdx.x * blockDim.x;
int len_offset = x_offset + y_offset;
if (tidx >= x_len+y_len - len_offset){
return;
}
out_ts += len_offset;
int vpt = ceil((double)((x_len + y_len)-len_offset) / (double)threads); // Values per thread
for (int i = 0; i < vpt && tidx*vpt+i < (x_len+y_len)-len_offset; i++){
if (tidx*vpt+i == 0){
continue;
}
int l = tidx*vpt + i - 1;
int r = tidx*vpt + i;
if (out_ts[l] == out_ts[r]){
out_ts[r] = -1;
invalid[tidx]++;
// Decrement valid, since it is at maximum due to incrementations before
valid[tidx]--;
}
}
}
__global__ void remove_invalid( int threads, int *invalid, int *valid,
int x_len, int y_len,
int *out_ts_cpy, int *out_ts,
int *out_v_cpy, int *out_v,
int *x_offset_d, int *y_offset_d,
int *result_offset, int op){
int x_offset = *x_offset_d;
int y_offset = *y_offset_d;
const int tidx = threadIdx.x + blockIdx.x * blockDim.x;
int len_offset = x_offset + y_offset;
if (tidx >= x_len+y_len - len_offset){
return;
}
int vpt = ceil((double)((x_len + y_len)-len_offset) / (double)threads); // Values per thread
// Each thread can now add up all valid/invalid timestamps and knows how to place their valid timestamps
int cuml_invalid = 0;
for (int i = 0; i < (x_len+y_len) - len_offset; i++){
cuml_invalid += invalid[i];
}
int cuml_valid_before = 0;
for (int i = 0; i < tidx; i++){
cuml_valid_before += valid[i];
}
int vals_before = cuml_invalid + cuml_valid_before;
int valid_cnt = 0;
for (int i = 0; i < vpt && tidx*vpt+i < (x_len+y_len)-len_offset; i++){
if (out_ts_cpy[tidx*vpt+i+len_offset] >= 0){
out_ts[vals_before+valid_cnt+len_offset] = out_ts_cpy[tidx*vpt+i+len_offset];
out_v[vals_before+valid_cnt+len_offset] = out_v_cpy[tidx*vpt+i+len_offset];
valid_cnt++;
}
}
// Only one thread does this
if (tidx == 0) {
(*result_offset) = cuml_invalid+len_offset;
}
}
__global__ void lift_cuda( int *x_ts, int *y_ts, int *out_ts,
int *x_v, int *y_v, int *out_v,
int threads, int x_len, int y_len,
int op, int *valid, int *invalid,
int *out_ts_cpy, int *out_v_cpy, int *invalid_offset,
int *x_offset, int *y_offset){
const int tidx = threadIdx.x + blockIdx.x * blockDim.x; // Thread ID
int xo = (*x_offset);
int yo = (*y_offset);
int len_offset = xo+yo;
int vpt = ceil((double)((x_len + y_len)-len_offset) / (double)threads); // Values per thread
int diag = tidx * vpt; // Binary search constraint
int intersect = merge_path(x_ts+xo, y_ts+yo, diag, x_len-xo, y_len-yo);
int x_start = intersect;
int y_start = diag - intersect;
// Split op into merge vs. value function and specific value operation
int fct = 0;
if (op == MRG){
op = 0;
fct = 1;
}
if (tidx*vpt < (x_len+y_len)-len_offset) {
int mems = min(vpt, ((x_len+y_len)-len_offset)-tidx*vpt);
memset(out_ts_cpy+len_offset+tidx*vpt, -1, mems*sizeof(int));
}
lift_partition( x_ts+xo, y_ts+yo, out_ts_cpy+len_offset,
x_v+xo, y_v+yo, out_v_cpy+len_offset,
x_start, y_start, vpt, tidx,
x_len-xo, y_len-yo, lift_funcs[fct], lift_ops[op],
valid, invalid);
}
// Scan from slides
__global__ void assign_vals(int *input, int *result_v, int *result_ts, int *input_offset, int *result_offset, int size){
const int tidx = threadIdx.x + blockIdx.x * blockDim.x;
bool input_zero_ts = input[*input_offset] == 0;
if (tidx == 0) {
if (input_zero_ts) {
// stream has same size as input => increase offset by one since stream has larger size
*result_offset = *input_offset + 1;
} else {
// stream is 1 _larger_ than input => don't increase offset
*result_offset = *input_offset;
}
}
__syncthreads();
if (input_zero_ts && tidx < size){
// timestamp at 0, result has same event count as input
result_v[*result_offset + tidx - 1] = tidx;
result_ts[*result_offset + tidx] = input[*input_offset + tidx];
}
else if (tidx < size){
// no timestamp at 0, result has input event count + 1
result_v[*result_offset + tidx] = tidx;
result_ts[*result_offset + tidx] = input[*input_offset + tidx - 1];
//(*result_offset)++;
}
//__syncthreads();
if (tidx == 0){
//memcpy(result_ts+ 1, input, (size-1)*sizeof(int));
if (!input_zero_ts) {
result_v[*result_offset] = 0;
result_ts[*result_offset] = 0;
}
}
return;
}
| d6dfec405b3f11e5043e9319f6e93b841595c46d.cu | //
// Created by fabian on 28.05.21.
//
#include <cuda_runtime.h>
#include <sys/time.h>
#include <assert.h>
#include "main.cuh"
#include "helper.cuh"
#include "GPUStream.cuh"
#include "StreamFunctions.cuh"
#include "device_information.cuh"
#include "StreamFunctionHelper.cuh"
#define MAX_STREAMS 10
// Internal function declarations
__global__ void time_cuda(int* input_timestamp, int* output_timestamps, int* output_values, int size, int*offs, int* resultOffse);
__global__ void last_cuda(int* input_timestamp, int* input_values,int*unit_stream_timestamps, int* output_timestamps, int* output_values,int IntStreamSize, int size, int* offsInt, int* offsUnit);
void delay_preliminary_prune(std::shared_ptr<GPUIntStream> s, std::shared_ptr<GPUUnitStream> r, cudaStream_t stream);
__global__ void delay_cuda_preliminary_prune(int *inputIntTimestamps, int *inputIntValues, int *resetTimestamps, int size, int resetSize, int *offset, int *resetOffset, cudaStream_t stream);
__global__ void delay_iteration(int* reset_timestamps, int* reset_offset, int reset_size, int* inputInt_timestamps, int* inputInt_values, int* inputInt_offset, int inputInt_size, int* result_timestamps, int* result_offset, int result_size, cudaStream_t stream);
__global__ void delay_cuda(int *inputIntTimestamps, int *inputIntValues, int *resetTimestamps, int *results, int size, int inputSize, int *inputOffset, int *resetOffset, int* resultOffset, int maxTimestamp, cudaStream_t stream);
__global__ void lift_cuda( int *x_ts, int *y_ts, int *out_ts,
int *x_v, int *y_v, int *out_v,
int threads, int x_len, int y_len,
int op, int *valid, int *invalid,
int *out_ts_cpy, int *out_v_cpy, int *invalid_offset,
int *x_offset, int *y_offset);
__global__ void final_reduce(int* block_red,int size,int* offset);
__global__ void assign_vals(int *input, int *result, int *input_offset, int *result_offset, int size);
__device__ int lookUpElement(int size,int searchValue, int * input_timestamp);
__device__ int lookUpNextElement(int size,int searchValue, int * timestamps);
__global__ void calculate_offset(int* timestamps, int* offset, int size);
__global__ void remove_invalid( int threads, int *invalid, int *valid,
int x_len, int y_len,
int *out_ts_cpy, int *out_ts,
int *out_v_cpy, int *out_v,
int *x_offset_d, int *y_offset_d,
int *result_offset, int op);
__global__ void inval_multiples_merge( int op, int threads,
int x_len, int y_len,
int *x_offset_d, int *y_offset_d,
int *out_ts, int *invalid, int *valid);
__device__ void selection_sort( int *data, int left, int right );
__global__ void cdp_simple_quicksort(int *data, int left, int right, int depth );
__global__ void assign_vals(int *input, int *result_v, int *result_ts, int *input_offset, int *result_offset, int size);
/**
* Externally called functions
**/
// https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#numa-best-practices
// ADD stream argument to enable multiple kernels in parallel (10.5. Concurrent Kernel Execution)
// Note:Low Medium Priority: Use signed integers rather than unsigned integers as loop counters.
std::shared_ptr<GPUIntStream> time(std::shared_ptr<GPUIntStream> input, cudaStream_t stream){
int threads = input->size;
int block_size = 1;
int blocks = 1;
calcThreadsBlocks(threads,&block_size,&blocks);
// Create new stream on device the size of the input stream
std::shared_ptr<GPUIntStream> result = std::make_shared<GPUIntStream>(input->size, true);
// Fire off the actual calculation
time_cuda<<<blocks,block_size,0,stream>>>(input->device_timestamp, result->device_timestamp, result->device_values, threads,input->device_offset,result->device_offset);
//printf("Scheduled time() with <<<%d,%d>>> \n",blocks,block_size);
return result;
};
std::shared_ptr<GPUIntStream> time(std::shared_ptr<GPUUnitStream> input, cudaStream_t stream){
int threads = input->size;
int block_size = 1;
int blocks = 1;
calcThreadsBlocks(threads,&block_size,&blocks);
// Create new stream on device the size of the input stream
std::shared_ptr<GPUIntStream> result = std::make_shared<GPUIntStream>(input->size, true);
// Fire off the actual calculation
time_cuda<<<blocks,block_size,0,stream>>>(input->device_timestamp, result->device_timestamp, result->device_values, threads,input->device_offset,result->device_offset);
//printf("Scheduled time() with <<<%d,%d>>> \n",blocks,block_size);
return result;
};
std::shared_ptr<GPUIntStream> last(std::shared_ptr<GPUIntStream> inputInt, std::shared_ptr<GPUUnitStream> inputUnit, cudaStream_t stream) {
int threads = (int) inputUnit->size;
int block_size = 1;
int blocks = 1;
calcThreadsBlocks(threads, &block_size, &blocks);
// Create new stream on device with the size of the unit input stream
std::shared_ptr<GPUIntStream> result = std::make_shared<GPUIntStream>(inputUnit->size, true);
result->copy_to_device();
// Fire off the CUDA calculation
last_cuda<<<blocks, block_size, 0, stream>>>(inputInt->device_timestamp, inputInt->device_values,
inputUnit->device_timestamp, result->device_timestamp,
result->device_values, inputInt->size, threads,
inputInt->device_offset, inputUnit->device_offset);
last_cuda<<<blocks,block_size,0,stream>>>(inputInt->device_timestamp, inputInt->device_values, inputUnit->device_timestamp,result->device_timestamp,result->device_values,inputInt->size, threads,inputInt->device_offset,inputUnit->device_offset);
cudaDeviceSynchronize();
calculate_offset<<<blocks, block_size, 0, stream>>>(result->device_timestamp, result->device_offset, threads);
return result;
}
std::shared_ptr<GPUUnitStream> delay(std::shared_ptr<GPUIntStream> s, std::shared_ptr<GPUUnitStream> r, cudaStream_t stream) {
std::shared_ptr<GPUIntStream> s_prune = std::make_shared<GPUIntStream>(*s, true);
std::shared_ptr<GPUUnitStream> r_prune = std::make_shared<GPUUnitStream>(*r, true);
// Prune GPUIntStream s, mark all events that can't possibly trigger due to a reset event with value -1
delay_preliminary_prune(s_prune, r_prune, stream);
// Prepare original input data and result output
std::shared_ptr<GPUUnitStream> prevResults = std::make_shared<GPUUnitStream>(*r_prune, true);
std::shared_ptr<GPUUnitStream> result = std::make_shared<GPUUnitStream>(s->size, true);
// Launch actual iterative algorithm on device
delay_iteration<<<1, 1, 0, stream>>>(prevResults->device_timestamp, prevResults->device_offset, prevResults->size, s_prune->device_timestamp, s_prune->device_values, s_prune->device_offset, s_prune->size, result->device_timestamp, result->device_offset, result->size, stream);
cudaDeviceSynchronize();
return result;
}
std::shared_ptr<GPUIntStream> slift(std::shared_ptr<GPUIntStream> x, std::shared_ptr<GPUIntStream> y, int op){
// Merge fast path
if (op == MRG){
std::shared_ptr<GPUIntStream> merge_result = lift(x, y, MRG);
cudaDeviceSynchronize();
return merge_result;
}
// Fast path for 1/2 empty stream(s)
if (x->size == 0 || y->size == 0){
int *e_ts = (int*)malloc(0);
int *e_v = (int*)malloc(0);
std::shared_ptr<GPUIntStream> empty(new GPUIntStream(e_ts, e_v, 0));
empty->size = 0;
empty->copy_to_device();
return empty;
}
int *x_ts = (int*)malloc(x->size*sizeof(int));
int *y_ts = (int*)malloc(y->size*sizeof(int));
memcpy(x_ts, x->host_timestamp, x->size*sizeof(int));
memcpy(y_ts, y->host_timestamp, y->size*sizeof(int));
// xy ... y is the unit stream
int *xy_ts = (int*)malloc(y->size*sizeof(int));
int *yx_ts = (int*)malloc(x->size*sizeof(int));
int *xy_v = (int*)malloc(y->size*sizeof(int));
int *yx_v = (int*)malloc(x->size*sizeof(int));
memset(xy_ts, -1, y->size*sizeof(int));
memset(yx_ts, -1, x->size*sizeof(int));
// Make Unit streams from Int streams for last()
std::shared_ptr<GPUUnitStream> x_unit(new GPUUnitStream(x_ts, x->size, *(x->host_offset)));
std::shared_ptr<GPUUnitStream> y_unit(new GPUUnitStream(y_ts, y->size, *(y->host_offset)));
x_unit->copy_to_device();
y_unit->copy_to_device();
std::shared_ptr<GPUIntStream> last_xy = last(x, y_unit, 0);
std::shared_ptr<GPUIntStream> last_yx = last(y, x_unit, 0);
cudaDeviceSynchronize();
last_yx->copy_to_host();
last_xy->copy_to_host();
std::shared_ptr<GPUIntStream> x_prime = lift(x, last_xy, MRG);
std::shared_ptr<GPUIntStream> y_prime = lift(y, last_yx, MRG);
cudaDeviceSynchronize();
std::shared_ptr<GPUIntStream> result = lift(x_prime, y_prime, op);
cudaDeviceSynchronize();
x_prime->free_device();
x_prime->free_host();
y_prime->free_device();
y_prime->free_host();
x_unit->free_device();
x_unit->free_host();
y_unit->free_device();
y_unit->free_host();
// MEMORY BUG WHEN FREEING LAST XY/YX
return result;
}
std::shared_ptr<GPUIntStream> lift(std::shared_ptr<GPUIntStream> x, std::shared_ptr<GPUIntStream> y, int op){
// Information about MergePath
// https://stackoverflow.com/questions/307291++06/merge-sort-using-cuda-efficient-implementation-for-small-input-arrays
/**
* See the following paper for parallel merging of sorted arrays:
* O. Green, R. Mccoll, and D. Bader
* GPU merge path: a GPU merging algorithm
* International Conference on Supercomputing
* November 2014
* URL: https://www.researchgate.net/publication/254462662_GPU_merge_path_a_GPU_merging_algorithm
*
* The paper claims a runtime complexity of O(log n + n/p), p ... # of processors
*/
int block_size = 0;
int blocks = 0;
int x_offset = *(x->host_offset);
int y_offset = *(y->host_offset);
//int len_offset = x_offset+y_offset;
int threads = (x->size-x_offset) + (y->size-y_offset);
calcThreadsBlocks(threads, &block_size, &blocks);
threads = (blocks) * (block_size);
// Create Result
std::shared_ptr<GPUIntStream> result(new GPUIntStream());
result->size = x->size + y->size;
result->host_timestamp = (int*)malloc(result->size * sizeof(int));
result->host_values = (int*)malloc(result->size * sizeof(int));
result->host_offset = (int*)malloc(sizeof(int));
memset(result->host_timestamp, -1, result->size);
memset(result->host_values, 0, result->size);
*result->host_offset = 0;
cudaMalloc((int**)&result->device_timestamp, result->size*sizeof(int));
cudaMalloc((int**)&result->device_values, result->size*sizeof(int));
cudaMalloc((int**)&result->device_offset, sizeof(int));
result->copy_to_device(true);
// Array to count valid timestamps
int *valid_h = (int*)malloc(threads*sizeof(int));
int *valid_d;
memset(valid_h, 0, threads*sizeof(int));
cudaMalloc((int**)&valid_d, threads*sizeof(int));
// Array to count invalid timestamps
int *invalid_h = (int*)malloc(threads*sizeof(int));
int *invalid_d;
memset(invalid_h, 0, threads*sizeof(int));
cudaMalloc((int**)&invalid_d, threads*sizeof(int));
// Array to copy the result to ... needed for offset calculations
int *out_ts_cpy;
int *out_v_cpy;
cudaMalloc((int**)&out_ts_cpy, result->size*sizeof(int));
cudaMalloc((int**)&out_v_cpy, result->size*sizeof(int));
cudaMemset(invalid_d, 0, threads*sizeof(int));
cudaMemset(valid_d, 0, threads*sizeof(int));
cudaDeviceSynchronize();
// 3, 2, 1, go
lift_cuda<<<blocks, block_size>>>( x->device_timestamp, y->device_timestamp,
result->device_timestamp,
x->device_values, y->device_values,
result->device_values,
threads, (x->size), (y->size),
op, valid_d, invalid_d,
out_ts_cpy, out_v_cpy, result->device_offset,
x->device_offset, y->device_offset);
cudaDeviceSynchronize();
if (op == MRG){
inval_multiples_merge<<<blocks, block_size>>> ( op, threads,
x->size, y->size,
x->device_offset, y->device_offset,
out_ts_cpy, invalid_d, valid_d);
cudaDeviceSynchronize();
}
// Move invalid timestamps to front and set correct offset
remove_invalid<<<blocks, block_size>>>( threads, invalid_d, valid_d,
x->size, y->size,
out_ts_cpy, result->device_timestamp,
out_v_cpy, result->device_values,
x->device_offset, y->device_offset,
result->device_offset, op);
cudaDeviceSynchronize();
// Free arrays
// Something's not quite right yet
//cudaFree(out_ts_cpy);
//cudaFree(out_v_cpy);
//cudaFree(invalid_d);
//cudaFree(valid_d);
//free(valid_h);
//free(invalid_h);
return result;
}
std::shared_ptr<GPUIntStream> count(std::shared_ptr<GPUUnitStream> input){
int threads = input->size + 1;
int block_size = 1;
int blocks = 1;
calcThreadsBlocks(threads,&block_size,&blocks);
//std::shared_ptr<GPUIntStream> result(new GPUIntStream());
std::shared_ptr<GPUIntStream> result = std::make_shared<GPUIntStream>(input->size + 1, true);
assign_vals<<<blocks, block_size>>>( input->device_timestamp, result->device_values, result->device_timestamp,
input->device_offset, result->device_offset,
input->size+1);
cudaDeviceSynchronize();
return result;
}
// Internal functions
__device__ void calcThreadsBlocks_device(int threads, int *block_size, int*blocks){
*block_size = 0;
*blocks = 0;
if (MAX_BLOCKS*MAX_THREADS_PER_BLOCK<threads){
printf("Cannot schedule the whole stream! TODO! implement iterative scheduling \n");
//return;
}
//schedule in warp size
for (int bs = 32; bs <= MAX_THREADS_PER_BLOCK;bs +=32){
if (*block_size > threads){
break;
}
*block_size = bs;
}
//TODO! MAX_BLOCKS?
// the number of blocks per kernel launch should be in the thousands.
for (int bl=1; bl <= MAX_BLOCKS*1000; bl++){
*blocks = bl;
if (bl* (*block_size) > threads){
break;
}
}
//TODO! make iterative! see last for hints (code already there)
if (*blocks > 1024){
printf("Many Blocks");
return;
}
}
__global__ void delay_iteration(int* reset_timestamps, int* reset_offset, int reset_size, int* inputInt_timestamps, int* inputInt_values, int* inputInt_offset, int inputInt_size, int* result_timestamps, int* result_offset, int result_size, cudaStream_t stream) {
cudaError_t lastCudaError;
// Clear results
memset(result_timestamps, -1, result_size * sizeof(int));
// Allocate memory for temporary iteration results
int* tempResults_offset = 0;
lastCudaError = cudaMalloc((void**)&tempResults_offset, sizeof(int));
if (lastCudaError == cudaErrorMemoryAllocation) {
printf("Error allocating for tempResults_offset\n");
}
memset(tempResults_offset, 0, sizeof(int));
int* tempResults = 0;
lastCudaError = cudaMalloc((void**)&tempResults, reset_size * sizeof(int));
if (lastCudaError == cudaErrorMemoryAllocation) {
printf("Error allocating for tempResults\n");
}
memset(tempResults, -1, reset_size * sizeof(int));
int resultCount = 0;
int maxTimestamp = inputInt_timestamps[inputInt_size-1] > reset_timestamps[reset_size-1] ? inputInt_timestamps[inputInt_size-1] : reset_timestamps[reset_size-1];
int prevResultsCount = reset_size;
while (prevResultsCount > 0) {
int threads = prevResultsCount;
int block_size = 0;
int blocks = 0;
calcThreadsBlocks_device(threads, &block_size, &blocks);
delay_cuda<<<blocks, block_size, 0, stream>>>(inputInt_timestamps, inputInt_values, reset_timestamps, tempResults, threads, inputInt_size, inputInt_offset, reset_offset, tempResults_offset, maxTimestamp, stream);
cudaDeviceSynchronize();
cdp_simple_quicksort<<<1, 1, 0, stream>>>(tempResults, 0, threads - 1, 0);
cudaDeviceSynchronize();
calculate_offset<<<blocks, block_size, 0, stream>>>(tempResults + *tempResults_offset, tempResults_offset, threads);
cudaDeviceSynchronize();
prevResultsCount = threads - (*tempResults_offset - *reset_offset);
if (prevResultsCount > 0) {
memcpy(result_timestamps + resultCount, tempResults + *tempResults_offset, prevResultsCount * sizeof(int));
resultCount += prevResultsCount;
}
int* temp_timestamps = reset_timestamps;
int* temp_offset = reset_offset;
reset_timestamps = tempResults;
reset_offset = tempResults_offset;
tempResults = temp_timestamps;
tempResults_offset = temp_offset;
*tempResults_offset = *reset_offset;
}
cdp_simple_quicksort<<<1, 1, 0, stream>>>(result_timestamps, 0, result_size - 1, 0);
cudaDeviceSynchronize();
int threads = result_size;
int block_size = 0;
int blocks = 0;
calcThreadsBlocks_device(threads, &block_size, &blocks);
*result_offset = 0;
calculate_offset<<<blocks, block_size, 0, stream>>>(result_timestamps, result_offset, threads);
}
/**
* Removes all timestamps that cannot cause delay due to reset events. Caution: Has side effects on input streams.
* @param s Integer input stream
* @param r Unit input stream
* @param stream CUDA stream number
*/
void delay_preliminary_prune(std::shared_ptr<GPUIntStream> s, std::shared_ptr<GPUUnitStream> r, cudaStream_t stream) {
int threads = (int) s->size;
int block_size = 1;
int blocks = 1;
calcThreadsBlocks(threads,&block_size,&blocks);
printf("Scheduled delay_preliminary_prune() with <<<%d,%d>>>, %i threads \n",blocks,block_size, threads);
delay_cuda_preliminary_prune<<<blocks, block_size, 0, stream>>>(s->device_timestamp, s->device_values, r->device_timestamp, threads, r->size, s->device_offset, r->device_offset, stream);
cudaDeviceSynchronize();
}
__global__ void delay_cuda_preliminary_prune(int *inputIntTimestamps, int *inputIntValues, int *resetTimestamps, int size, int resetSize, int *offset, int *resetOffset, cudaStream_t stream) {
const int i = threadIdx.x + blockIdx.x * blockDim.x;
inputIntTimestamps += *offset;
inputIntValues += *offset;
resetTimestamps += *resetOffset;
if (i < size) {
int m = lookUpNextElement(resetSize, inputIntTimestamps[i], resetTimestamps);
if (m > -1 && inputIntTimestamps[i] + inputIntValues[i] > resetTimestamps[m]) {
inputIntValues[i] = -1;
}
}
}
// binary search
// on failure returns INT_MIN
// returns position of the Element with value x
__device__ int lookUpElement(int size,int searchValue, int * input_timestamp){
int L = 0;
int R = size;
int m = INT_MIN;
int out = INT_MIN;
while (L<=R) {
__syncthreads();
m = (int) (L+R)/2;
if (input_timestamp[m]<searchValue){
L = m + 1;
}
else if (input_timestamp[m]>searchValue){
R = m -1;
}
else{
out = m;
break;
}
}
return out;
}
// Binary search looking for next highest timestamp instead of exact match
__device__ int lookUpNextElement(int size, int searchValue, int *timestamps) {
int L = 0;
int R = size - 1;
int m = INT_MIN;
int out = INT_MIN;
if (timestamps[size-1] > searchValue) {
while (L<=R) {
m = (int) (L+R)/2;
if (timestamps[m] <= searchValue) {
L = m + 1;
} else {
out = m;
R = m - 1;
}
}
}
return out;
}
__global__ void delay_cuda(int *inputIntTimestamps, int *inputIntValues, int *resetTimestamps, int *results, int size, int inputSize, int *inputOffset, int *resetOffset, int* resultOffset, int maxTimestamp, cudaStream_t stream) {
const int i = threadIdx.x + blockIdx.x * blockDim.x;
inputIntTimestamps += *inputOffset;
inputIntValues += *inputOffset;
resetTimestamps += *resetOffset;
results += *resultOffset;
if (i < size) {
// For each tempEvent, check if there's a matching (valid) event in IntStream s
int index = lookUpElement(inputSize, resetTimestamps[i], inputIntTimestamps);
if (index != INT_MIN && inputIntValues[index] != -1) {
int outputTimestamp = inputIntTimestamps[index] + inputIntValues[index];
if (outputTimestamp <= maxTimestamp)
results[i] = outputTimestamp;
else
results[i] = -1;
} else {
results[i] = -1;
}
}
}
//reduction example followed from: https://developer.download.nvidia.com/assets/cuda/files/reduction.pdf
//calculates the number of non valid timestamps
__global__ void calculate_offset(int* timestamps, int* offset, int size){
__shared__ int sdata[1024];// each thread loadsone element from global to shared memunsigned
int tid = threadIdx.x;
unsigned int i= blockIdx.x*blockDim.x+ threadIdx.x;
int block_offset = 0;
sdata[tid] = 0;
if (i < size){
if (*(timestamps+i) < 0){
sdata[tid] = 1;
}
}
__syncthreads();
for(unsigned int s=1; s < blockDim.x; s *= 2) {
int index = 2 * s * tid;
if (index < blockDim.x) {
sdata[index] += sdata[index + s];
}
__syncthreads();
}
__syncthreads();
if(tid == 0){
block_offset = sdata[0];
atomicAdd(offset, block_offset);
}
}
__global__ void last_cuda(int* input_timestamp, int* input_values,int*unit_stream_timestamps, int* output_timestamps, int* output_values, int intStreamSize, int size, int* offsInt, int* offsUnit){
const int i = threadIdx.x + blockIdx.x * blockDim.x;
//shift accordingly to offset
unit_stream_timestamps += *offsUnit;
input_timestamp += *offsInt;
input_values += *offsInt;
int local_unit_timestamp;
if (i < size){
output_timestamps[i] = -1;
local_unit_timestamp = unit_stream_timestamps[i];
}
size -= *offsUnit;
intStreamSize -= *offsInt;
output_timestamps += *offsUnit;
output_values += *offsUnit;
int out = -2;
//Search for the timestamp per thread
int L = 0;
int R = intStreamSize-1;
int m;
__syncthreads();
if (i<size) {
while (L<=R) {
m = (int) (L+R)/2;
if (input_timestamp[m]<local_unit_timestamp){
L = m + 1;
out = input_values[m];
output_timestamps[i] = unit_stream_timestamps[i];
}
else if (input_timestamp[m]>=local_unit_timestamp){
R = m -1;
}
else{
out = input_values[m];
output_timestamps[i] = unit_stream_timestamps[i];
break;
}
}
}
//all have their respective out values
//the output_values array has been successfully filled
//now the threads perform an and reduction starting at 0 going to size
__syncthreads();
if (i < size){
output_values[i] = out;
}
}
// working
__global__ void time_cuda(int* input_timestamp, int* output_timestamps, int* output_values,int size, int*offset, int* resultOffset){
int i = threadIdx.x + blockIdx.x * blockDim.x;
input_timestamp += *offset;
output_timestamps += *offset;
output_values += *offset;
if (i<size-*offset){
output_timestamps[i] = input_timestamp[i];
output_values[i] = input_timestamp[i];
}
if (i == 0) *resultOffset = *offset;
}
/**
* MergePath, also used for lift
*/
__device__ int merge_path(int *x, int *y, int diag, int x_len, int y_len) {
// Just using UnitStreams for now
//const int i = threadIdx.x + blockIdx.x * blockDim.x;
int begin = max(0, diag - y_len); // Start of search window
int end = min(diag, x_len); // End of search window
int mid;
// Binary search
while(begin < end){
mid = (end + begin) / 2;
int x_ts = x[mid];
int y_ts = y[diag - 1 - mid];
if (x_ts < y_ts) {
begin = mid + 1;
}
else{
end = mid;
}
}
return begin;
}
typedef void (*lift_op) (int*, int*, int*);
typedef void (*lift_func) ( int*, int*,
int*, int*,
int*, int*,
int*, int*,
bool, bool, lift_op);
__device__ void lift_add(int *a, int *b, int *result){
*result = *a + *b;
}
__device__ void lift_sub(int *a, int *b, int *result){
*result = *a - *b;
}
__device__ void lift_mul(int *a, int *b, int *result){
*result = *a * *b;
}
__device__ void lift_div(int *a, int *b, int *result){
//assert(*b != 0 && "Divide by zero error in lift_div");
if (*b == 0){
printf("DIVISION BY ZERO\n");
*result = 0;
return;
}
*result = *a / *b;
}
__device__ void lift_mod(int *a, int *b, int *result){
//assert(*b != 0 && "Divide by zero error in lift_mod");
if (*b == 0){
printf("MODULO BY ZERO\n");
*result = 0;
return;
}
*result = *a % *b;
}
__device__ void lift_value( int *x_ts, int *y_ts,
int *x_v, int *y_v,
int *x_i, int *y_i,
int *out_ts, int *out_v,
bool x_done, bool y_done, lift_op op){
if (x_ts[*x_i] != y_ts[*y_i]){
// If timestamps don't match, result timestamp is invalid (-1)
*out_ts = -1;
if (*x_i < *y_i || y_done){
(*x_i)++;
}
else{
(*y_i)++;
}
}
else{
// If they match, result timestamp is x/y timestamp and result value is the cresult of the lift function
*out_ts = x_ts[*x_i];
// Specific value based lift operation
op(&x_v[*x_i], &y_v[*y_i], out_v);
if (!x_done){
(*x_i)++;
}
if (!y_done){
(*y_i)++;
}
}
}
__device__ void lift_merge( int *x_ts, int *y_ts,
int *x_v, int *y_v,
int *x_i, int *y_i,
int *out_ts, int *out_v,
bool x_done, bool y_done, lift_op op){
if (x_ts[*x_i] <= y_ts[*y_i] && !x_done || y_done){
*out_ts = x_ts[*x_i];
*out_v = x_v[*x_i];
(*x_i)++;
}
else{
*out_ts = y_ts[*y_i];
*out_v = y_v[*y_i];
(*y_i)++;
}
}
__device__ lift_func lift_funcs[] = { lift_value, lift_merge };
__device__ lift_op lift_ops[] = { lift_add, lift_sub, lift_mul, lift_div, lift_mod };
// Device internal sequential processing of small partitions
__device__ void lift_partition( int *x_ts, int *y_ts, int *out_ts,
int *x_v, int *y_v, int *out_v,
int x_start, int y_start,
int vpt, int tidx,
int x_len, int y_len,
lift_func fct, lift_op op,
int* valid, int *invalid){
int x_i = x_start;
int y_i = y_start;
//int size = vpt;
bool x_done = x_i >= x_len ? true : false;
bool y_done = y_i >= y_len ? true : false;
// Could possibly be optimized since only the last block needs range checks
// #pragma unroll is also an option according to https://moderngpu.github.io/merge.html
for(int i = 0; i < vpt; i++) {
// Break if last block doesn't fit
if (x_done && y_done){
break;
}
int offset = (tidx*vpt) + i;
fct(x_ts, y_ts,
x_v, y_v,
&x_i, &y_i,
out_ts+offset, out_v+offset,
x_done, y_done, op);
if (x_i >= x_len){
x_done = true;
}
if (y_i >= y_len){
y_done = true;
}
if ((x_i + y_i) - (x_start + y_start) >= vpt){
x_done = true;
y_done = true;
}
}
// Count valid/invalid timestamps per partition
for (int i = 0; i < vpt && tidx*vpt+i < x_len+y_len; i++){
if (out_ts[tidx*vpt+i] < 0){
invalid[tidx]++;
}
else{
valid[tidx]++;
}
}
}
__global__ void inval_multiples_merge( int op, int threads,
int x_len, int y_len,
int *x_offset_d, int *y_offset_d,
int *out_ts, int *invalid, int *valid){
// If op is merge, check for double timestamps
int x_offset = *x_offset_d;
int y_offset = *y_offset_d;
const int tidx = threadIdx.x + blockIdx.x * blockDim.x;
int len_offset = x_offset + y_offset;
if (tidx >= x_len+y_len - len_offset){
return;
}
out_ts += len_offset;
int vpt = ceil((double)((x_len + y_len)-len_offset) / (double)threads); // Values per thread
for (int i = 0; i < vpt && tidx*vpt+i < (x_len+y_len)-len_offset; i++){
if (tidx*vpt+i == 0){
continue;
}
int l = tidx*vpt + i - 1;
int r = tidx*vpt + i;
if (out_ts[l] == out_ts[r]){
out_ts[r] = -1;
invalid[tidx]++;
// Decrement valid, since it is at maximum due to incrementations before
valid[tidx]--;
}
}
}
__global__ void remove_invalid( int threads, int *invalid, int *valid,
int x_len, int y_len,
int *out_ts_cpy, int *out_ts,
int *out_v_cpy, int *out_v,
int *x_offset_d, int *y_offset_d,
int *result_offset, int op){
int x_offset = *x_offset_d;
int y_offset = *y_offset_d;
const int tidx = threadIdx.x + blockIdx.x * blockDim.x;
int len_offset = x_offset + y_offset;
if (tidx >= x_len+y_len - len_offset){
return;
}
int vpt = ceil((double)((x_len + y_len)-len_offset) / (double)threads); // Values per thread
// Each thread can now add up all valid/invalid timestamps and knows how to place their valid timestamps
int cuml_invalid = 0;
for (int i = 0; i < (x_len+y_len) - len_offset; i++){
cuml_invalid += invalid[i];
}
int cuml_valid_before = 0;
for (int i = 0; i < tidx; i++){
cuml_valid_before += valid[i];
}
int vals_before = cuml_invalid + cuml_valid_before;
int valid_cnt = 0;
for (int i = 0; i < vpt && tidx*vpt+i < (x_len+y_len)-len_offset; i++){
if (out_ts_cpy[tidx*vpt+i+len_offset] >= 0){
out_ts[vals_before+valid_cnt+len_offset] = out_ts_cpy[tidx*vpt+i+len_offset];
out_v[vals_before+valid_cnt+len_offset] = out_v_cpy[tidx*vpt+i+len_offset];
valid_cnt++;
}
}
// Only one thread does this
if (tidx == 0) {
(*result_offset) = cuml_invalid+len_offset;
}
}
__global__ void lift_cuda( int *x_ts, int *y_ts, int *out_ts,
int *x_v, int *y_v, int *out_v,
int threads, int x_len, int y_len,
int op, int *valid, int *invalid,
int *out_ts_cpy, int *out_v_cpy, int *invalid_offset,
int *x_offset, int *y_offset){
const int tidx = threadIdx.x + blockIdx.x * blockDim.x; // Thread ID
int xo = (*x_offset);
int yo = (*y_offset);
int len_offset = xo+yo;
int vpt = ceil((double)((x_len + y_len)-len_offset) / (double)threads); // Values per thread
int diag = tidx * vpt; // Binary search constraint
int intersect = merge_path(x_ts+xo, y_ts+yo, diag, x_len-xo, y_len-yo);
int x_start = intersect;
int y_start = diag - intersect;
// Split op into merge vs. value function and specific value operation
int fct = 0;
if (op == MRG){
op = 0;
fct = 1;
}
if (tidx*vpt < (x_len+y_len)-len_offset) {
int mems = min(vpt, ((x_len+y_len)-len_offset)-tidx*vpt);
memset(out_ts_cpy+len_offset+tidx*vpt, -1, mems*sizeof(int));
}
lift_partition( x_ts+xo, y_ts+yo, out_ts_cpy+len_offset,
x_v+xo, y_v+yo, out_v_cpy+len_offset,
x_start, y_start, vpt, tidx,
x_len-xo, y_len-yo, lift_funcs[fct], lift_ops[op],
valid, invalid);
}
// Scan from slides
__global__ void assign_vals(int *input, int *result_v, int *result_ts, int *input_offset, int *result_offset, int size){
const int tidx = threadIdx.x + blockIdx.x * blockDim.x;
bool input_zero_ts = input[*input_offset] == 0;
if (tidx == 0) {
if (input_zero_ts) {
// stream has same size as input => increase offset by one since stream has larger size
*result_offset = *input_offset + 1;
} else {
// stream is 1 _larger_ than input => don't increase offset
*result_offset = *input_offset;
}
}
__syncthreads();
if (input_zero_ts && tidx < size){
// timestamp at 0, result has same event count as input
result_v[*result_offset + tidx - 1] = tidx;
result_ts[*result_offset + tidx] = input[*input_offset + tidx];
}
else if (tidx < size){
// no timestamp at 0, result has input event count + 1
result_v[*result_offset + tidx] = tidx;
result_ts[*result_offset + tidx] = input[*input_offset + tidx - 1];
//(*result_offset)++;
}
//__syncthreads();
if (tidx == 0){
//memcpy(result_ts+ 1, input, (size-1)*sizeof(int));
if (!input_zero_ts) {
result_v[*result_offset] = 0;
result_ts[*result_offset] = 0;
}
}
return;
}
|
c9fac91a28a791b00e036521137ad2f7cb87e6cc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file proposal.cu
* \brief Proposal Operator
* \author Shaoqing Ren, Jian Guo, Pengfei Chen
*/
#include <dmlc/logging.h>
#include <dmlc/parameter.h>
#include <mxnet/operator.h>
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
#include <map>
#include <vector>
#include <string>
#include <utility>
#include <ctime>
#include <iostream>
#include "../operator_common.h"
#include "../mshadow_op.h"
#include "./proposal-inl.h"
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
#define FRCNN_CUDA_CHECK(condition) \
/* Code block avoids redefinition of hipError_t error */ \
do { \
hipError_t error = condition; \
CHECK_EQ(error, hipSuccess) << " " << hipGetErrorString(error); \
} while (0)
namespace mshadow {
namespace cuda {
// scores are (b, anchor, h, w)
// workspace_proposals are (h * w * anchor, 5)
// w defines "x" and h defines "y"
// count should be total anchors numbers, h * w * anchors
template<typename Dtype>
__global__ void ProposalGridKernel(const int count,
const int num_anchors,
const int height,
const int width,
const int feature_stride,
const Dtype* scores,
Dtype* workspace_proposals) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
int a = index % num_anchors;
int w = (index / num_anchors) % width;
int h = index / num_anchors / width;
workspace_proposals[index * 5 + 0] = workspace_proposals[a * 5 + 0] + w * feature_stride;
workspace_proposals[index * 5 + 1] = workspace_proposals[a * 5 + 1] + h * feature_stride;
workspace_proposals[index * 5 + 2] = workspace_proposals[a * 5 + 2] + w * feature_stride;
workspace_proposals[index * 5 + 3] = workspace_proposals[a * 5 + 3] + h * feature_stride;
workspace_proposals[index * 5 + 4] = scores[(a * height + h) * width + w];
}
}
// boxes are (h * w * anchor, 5)
// deltas are (b, 4 * anchor, h, w)
// out_pred_boxes are (h * w * anchor, 5)
// count should be total anchors numbers, h * w * anchors
// in-place write: boxes and out_pred_boxes are the same location
template<typename Dtype>
__global__ void BBoxPredKernel(const int count,
const int num_anchors,
const int feat_height,
const int feat_width,
const int real_height,
const int real_width,
const float im_height,
const float im_width,
const Dtype* boxes,
const Dtype* deltas,
Dtype* out_pred_boxes) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
int a = index % num_anchors;
int w = (index / num_anchors) % feat_width;
int h = index / num_anchors / feat_width;
float width = boxes[index * 5 + 2] - boxes[index * 5 + 0] + 1.0f;
float height = boxes[index * 5 + 3] - boxes[index * 5 + 1] + 1.0f;
float ctr_x = boxes[index * 5 + 0] + 0.5f * (width - 1.0f);
float ctr_y = boxes[index * 5 + 1] + 0.5f * (height - 1.0f);
float dx = deltas[((a * 4) * feat_height + h) * feat_width + w];
float dy = deltas[((a * 4 + 1) * feat_height + h) * feat_width + w];
float dw = deltas[((a * 4 + 2) * feat_height + h) * feat_width + w];
float dh = deltas[((a * 4 + 3) * feat_height + h) * feat_width + w];
float pred_ctr_x = dx * width + ctr_x;
float pred_ctr_y = dy * height + ctr_y;
float pred_w = exp(dw) * width;
float pred_h = exp(dh) * height;
float pred_x1 = pred_ctr_x - 0.5f * (pred_w - 1.0f);
float pred_y1 = pred_ctr_y - 0.5f * (pred_h - 1.0f);
float pred_x2 = pred_ctr_x + 0.5f * (pred_w - 1.0f);
float pred_y2 = pred_ctr_y + 0.5f * (pred_h - 1.0f);
pred_x1 = max(min(pred_x1, im_width - 1.0f), 0.0f);
pred_y1 = max(min(pred_y1, im_height - 1.0f), 0.0f);
pred_x2 = max(min(pred_x2, im_width - 1.0f), 0.0f);
pred_y2 = max(min(pred_y2, im_height - 1.0f), 0.0f);
out_pred_boxes[index * 5 + 0] = pred_x1;
out_pred_boxes[index * 5 + 1] = pred_y1;
out_pred_boxes[index * 5 + 2] = pred_x2;
out_pred_boxes[index * 5 + 3] = pred_y2;
if (h >= real_height || w >= real_width) {
out_pred_boxes[index * 5 + 4] = -1.0f;
}
}
}
// boxes are (h * w * anchor, 5)
// deltas are (b, 4 * anchor, h, w)
// out_pred_boxes are (h * w * anchor, 5)
// count should be total anchors numbers, h * w * anchors
// in-place write: boxes and out_pred_boxes are the same location
template<typename Dtype>
__global__ void IoUPredKernel(const int count,
const int num_anchors,
const int feat_height,
const int feat_width,
const int real_height,
const int real_width,
const float im_height,
const float im_width,
const Dtype* boxes,
const Dtype* deltas,
Dtype* out_pred_boxes) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
int a = index % num_anchors;
int w = (index / num_anchors) % feat_width;
int h = index / num_anchors / feat_width;
float x1 = boxes[index * 5 + 0];
float y1 = boxes[index * 5 + 1];
float x2 = boxes[index * 5 + 2];
float y2 = boxes[index * 5 + 3];
float dx1 = deltas[((a * 4) * feat_height + h) * feat_width + w];
float dy1 = deltas[((a * 4 + 1) * feat_height + h) * feat_width + w];
float dx2 = deltas[((a * 4 + 2) * feat_height + h) * feat_width + w];
float dy2 = deltas[((a * 4 + 3) * feat_height + h) * feat_width + w];
float pred_x1 = max(min(x1 + dx1, im_width - 1.0f), 0.0f);
float pred_y1 = max(min(y1 + dy1, im_height - 1.0f), 0.0f);
float pred_x2 = max(min(x2 + dx2, im_width - 1.0f), 0.0f);
float pred_y2 = max(min(y2 + dy2, im_height - 1.0f), 0.0f);
out_pred_boxes[index * 5 + 0] = pred_x1;
out_pred_boxes[index * 5 + 1] = pred_y1;
out_pred_boxes[index * 5 + 2] = pred_x2;
out_pred_boxes[index * 5 + 3] = pred_y2;
if (h >= real_height || w >= real_width) {
out_pred_boxes[index * 5 + 4] = -1.0f;
}
}
}
// filter box with stride less than rpn_min_size
// filter: set score to zero
// dets (n, 5)
template<typename Dtype>
__global__ void FilterBoxKernel(const int count,
const float min_size,
Dtype* dets) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
float iw = dets[index * 5 + 2] - dets[index * 5 + 0] + 1.0f;
float ih = dets[index * 5 + 3] - dets[index * 5 + 1] + 1.0f;
if (iw < min_size || ih < min_size) {
dets[index * 5 + 0] -= min_size / 2;
dets[index * 5 + 1] -= min_size / 2;
dets[index * 5 + 2] += min_size / 2;
dets[index * 5 + 3] += min_size / 2;
dets[index * 5 + 4] = -1.0f;
}
}
}
// copy score and init order
// dets (n, 5); score (n, ); order (n, )
// count should be n (total anchors or proposals)
template<typename Dtype>
__global__ void CopyScoreKernel(const int count,
const Dtype* dets,
Dtype* score,
int* order) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
score[index] = dets[index * 5 + 4];
order[index] = index;
}
}
// reorder proposals according to order and keep the top_n proposals
// prev_dets (n, 5); order (n, ); dets (n, 5)
// count should be output anchor numbers (top_n)
template<typename Dtype>
__global__ void ReorderProposalsKernel(const int count,
const Dtype* prev_dets,
const int* order,
Dtype* dets) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
const int order_i = order[index];
for (int j = 0; j < 5; j ++) {
dets[index * 5 + j] = prev_dets[order_i * 5 + j];
}
}
}
__device__ inline float devIoU(float const * const a, float const * const b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return interS / (Sa + Sb - interS);
}
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, uint64_t *dev_mask) {
const int threadsPerBlock = sizeof(uint64_t) * 8;
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * 5;
int i = 0;
uint64_t t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
void _nms(const mshadow::Tensor<gpu, 2>& boxes,
const float nms_overlap_thresh,
int *keep,
int *num_out) {
const int threadsPerBlock = sizeof(uint64_t) * 8;
const int boxes_num = boxes.size(0);
const int boxes_dim = boxes.size(1);
float* boxes_dev = boxes.dptr_;
uint64_t* mask_dev = NULL;
const int col_blocks = DIVUP(boxes_num, threadsPerBlock);
FRCNN_CUDA_CHECK(hipMalloc(&mask_dev,
boxes_num * col_blocks * sizeof(uint64_t)));
dim3 blocks(DIVUP(boxes_num, threadsPerBlock),
DIVUP(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
hipLaunchKernelGGL(( nms_kernel), dim3(blocks), dim3(threads), 0, 0, boxes_num,
nms_overlap_thresh,
boxes_dev,
mask_dev);
FRCNN_CUDA_CHECK(hipPeekAtLastError());
std::vector<uint64_t> mask_host(boxes_num * col_blocks);
FRCNN_CUDA_CHECK(hipMemcpy(&mask_host[0],
mask_dev,
sizeof(uint64_t) * boxes_num * col_blocks,
hipMemcpyDeviceToHost));
std::vector<uint64_t> remv(col_blocks);
memset(&remv[0], 0, sizeof(uint64_t) * col_blocks);
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep[num_to_keep++] = i;
uint64_t *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
*num_out = num_to_keep;
FRCNN_CUDA_CHECK(hipFree(mask_dev));
}
// copy proposals to output
// dets (top_n, 5); keep (top_n, ); out (top_n, )
// count should be top_n (total anchors or proposals)
template<typename Dtype>
__global__ void PrepareOutput(const int count,
const Dtype* dets,
const int* keep,
const int out_size,
const int batchIdx,
Dtype* out,
Dtype* score) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
out[index * 5] = batchIdx;
if (index < out_size) {
int keep_i = keep[index];
for (int j = 0; j < 4; ++j) {
out[index * 5 + j + 1] = dets[keep_i * 5 + j];
}
score[index] = dets[keep_i * 5 + 4];
} else {
int keep_i = keep[index % out_size];
for (int j = 0; j < 4; ++j) {
out[index * 5 + j + 1] = dets[keep_i * 5 + j];
}
score[index] = dets[keep_i * 5 + 4];
}
}
}
} // namespace cuda
} // namespace mshadow
namespace mxnet {
namespace op {
template<typename xpu>
class ProposalGPUOp : public Operator{
public:
explicit ProposalGPUOp(ProposalParam param) {
this->param_ = param;
}
virtual void Forward(const OpContext &ctx,
const std::vector<TBlob> &in_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &out_data,
const std::vector<TBlob> &aux_states) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mshadow::cuda;
CHECK_EQ(in_data.size(), 3);
CHECK_EQ(out_data.size(), 2);
CHECK_GT(req.size(), 1);
CHECK_EQ(req[proposal::kOut], kWriteTo);
Stream<xpu> *s = ctx.get_stream<xpu>();
Shape<3> fg_scores_shape = Shape3(in_data[proposal::kClsProb].shape_[1] / 2,
in_data[proposal::kClsProb].shape_[2],
in_data[proposal::kClsProb].shape_[3]);
Tensor<xpu, 4> scores = in_data[proposal::kClsProb].get<xpu, 4, real_t>(s);
Tensor<xpu, 4> bbox_deltas = in_data[proposal::kBBoxPred].get<xpu, 4, real_t>(s);
Tensor<xpu, 2> im_info = in_data[proposal::kImInfo].get<xpu, 2, real_t>(s);
Tensor<xpu, 3> out = out_data[proposal::kOut].get<xpu, 3, real_t>(s);
Tensor<xpu, 3> out_score = out_data[proposal::kScore].get<xpu, 3, real_t>(s);
int nbatch = scores.size(0);
int num_anchors = scores.size(1) / 2;
int height = scores.size(2);
int width = scores.size(3);
int count = num_anchors * height * width; // count of total anchors
// set to -1 for max
int rpn_pre_nms_top_n = (param_.rpn_pre_nms_top_n > 0) ? param_.rpn_pre_nms_top_n : count;
rpn_pre_nms_top_n = ::min(rpn_pre_nms_top_n, count);
int rpn_post_nms_top_n = ::min(param_.rpn_post_nms_top_n, rpn_pre_nms_top_n);
// Generate first anchors based on base anchor
std::vector<float> base_anchor(4);
base_anchor[0] = 0.0;
base_anchor[1] = 0.0;
base_anchor[2] = param_.feature_stride - 1.0;
base_anchor[3] = param_.feature_stride - 1.0;
CHECK_EQ(num_anchors, param_.ratios.info.size() * param_.scales.info.size());
std::vector<float> anchors;
utils::GenerateAnchors(base_anchor,
param_.ratios.info,
param_.scales.info,
&anchors);
// Copy generated anchors to GPU
float* workspace_proposals_ptr = NULL;
FRCNN_CUDA_CHECK(hipMalloc(&workspace_proposals_ptr, sizeof(float) * nbatch * count * 5));
Tensor<xpu, 3> workspace_proposals(workspace_proposals_ptr, Shape3(nbatch, count, 5));
// im_info is small, we want to copy them to cpu
std::vector<float> cpu_im_info(3);
FRCNN_CUDA_CHECK(hipMemcpy(&cpu_im_info[0], im_info.dptr_,
sizeof(float) * cpu_im_info.size(),
hipMemcpyDeviceToHost));
// prevent padded predictions
int real_height = static_cast<int>(cpu_im_info[0] / param_.feature_stride);
int real_width = static_cast<int>(cpu_im_info[1] / param_.feature_stride);
CHECK_GE(height, real_height) << height << " " << real_height << std::endl;
CHECK_GE(width, real_width) << width << " " << real_width << std::endl;
// Copy anchors for all images in batch
for (int i = 0; i < nbatch; i++) {
float* cur_batch_workspace_proposals_ptr = workspace_proposals.dptr_ + i * 5 * count;
FRCNN_CUDA_CHECK(hipMemcpy(cur_batch_workspace_proposals_ptr,
&anchors[0], sizeof(float) * anchors.size(),
hipMemcpyHostToDevice));
// get current batch foreground score
real_t* foreground_score_ptr = reinterpret_cast<real_t *>(scores.dptr_) + i * 2 * count
+ fg_scores_shape.Size();
Tensor<xpu, 3> fg_scores = Tensor<xpu, 3>(foreground_score_ptr, fg_scores_shape);
// Copy proposals to a mesh grid
dim3 dimGrid((count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "ProposalGrid");
hipLaunchKernelGGL(( ProposalGridKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
count, num_anchors, height, width, param_.feature_stride,
fg_scores.dptr_, cur_batch_workspace_proposals_ptr);
FRCNN_CUDA_CHECK(hipPeekAtLastError());
// Transform anchors and bbox_deltas into bboxes
CheckLaunchParam(dimGrid, dimBlock, "BBoxPred");
if (param_.iou_loss) {
hipLaunchKernelGGL(( IoUPredKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
count, num_anchors, height, width, real_height, real_width,
cpu_im_info[0], cpu_im_info[1],
cur_batch_workspace_proposals_ptr, bbox_deltas.dptr_ + i * 4 * count, cur_batch_workspace_proposals_ptr);
} else {
hipLaunchKernelGGL(( BBoxPredKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
count, num_anchors, height, width, real_height, real_width,
cpu_im_info[0], cpu_im_info[1],
cur_batch_workspace_proposals_ptr, bbox_deltas.dptr_ + i * 4 * count, cur_batch_workspace_proposals_ptr);
}
FRCNN_CUDA_CHECK(hipPeekAtLastError());
// filter boxes with less than rpn_min_size
CheckLaunchParam(dimGrid, dimBlock, "FilterBox");
hipLaunchKernelGGL(( FilterBoxKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
count, param_.rpn_min_size * cpu_im_info[2], cur_batch_workspace_proposals_ptr);
FRCNN_CUDA_CHECK(hipPeekAtLastError());
// Copy score to a continuous memory
float* score_ptr = NULL;
FRCNN_CUDA_CHECK(hipMalloc(&score_ptr, sizeof(float) * count));
Tensor<xpu, 1> score(score_ptr, Shape1(count));
int* order_ptr = NULL;
FRCNN_CUDA_CHECK(hipMalloc(&order_ptr, sizeof(int) * count));
Tensor<xpu, 1, int> order(order_ptr, Shape1(count));
CheckLaunchParam(dimGrid, dimBlock, "CopyScore");
hipLaunchKernelGGL(( CopyScoreKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
count, cur_batch_workspace_proposals_ptr, score.dptr_, order.dptr_);
FRCNN_CUDA_CHECK(hipPeekAtLastError());
// argsort score, save order
thrust::stable_sort_by_key(thrust::device,
score.dptr_,
score.dptr_ + score.size(0),
order.dptr_,
thrust::greater<real_t>());
FRCNN_CUDA_CHECK(hipPeekAtLastError());
// Reorder proposals according to order
float* workspace_ordered_proposals_ptr = NULL;
FRCNN_CUDA_CHECK(hipMalloc(&workspace_ordered_proposals_ptr,
sizeof(float) * rpn_pre_nms_top_n * 5));
Tensor<xpu, 2> workspace_ordered_proposals(workspace_ordered_proposals_ptr,
Shape2(rpn_pre_nms_top_n, 5));
dimGrid.x = (rpn_pre_nms_top_n + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
CheckLaunchParam(dimGrid, dimBlock, "ReorderProposals");
hipLaunchKernelGGL(( ReorderProposalsKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
rpn_pre_nms_top_n, cur_batch_workspace_proposals_ptr, order.dptr_, workspace_ordered_proposals.dptr_);
FRCNN_CUDA_CHECK(hipPeekAtLastError());
FRCNN_CUDA_CHECK(hipFree(score_ptr));
FRCNN_CUDA_CHECK(hipFree(order_ptr));
// perform nms
std::vector<int> _keep(workspace_ordered_proposals.size(0));
int out_size = 0;
_nms(workspace_ordered_proposals,
param_.threshold,
&_keep[0],
&out_size);
// copy nms result to gpu
int* keep;
FRCNN_CUDA_CHECK(hipMalloc(&keep, sizeof(int) * _keep.size()));
FRCNN_CUDA_CHECK(hipMemcpy(keep, &_keep[0], sizeof(int) * _keep.size(),
hipMemcpyHostToDevice));
// copy results after nms
dimGrid.x = (rpn_post_nms_top_n + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
CheckLaunchParam(dimGrid, dimBlock, "PrepareOutput");
hipLaunchKernelGGL(( PrepareOutput), dim3(dimGrid), dim3(dimBlock), 0, 0,
rpn_post_nms_top_n, workspace_ordered_proposals.dptr_, keep, out_size, i,
out.dptr_ + i * 5 * rpn_post_nms_top_n,
out_score.dptr_ + i * rpn_post_nms_top_n);
FRCNN_CUDA_CHECK(hipPeekAtLastError());
// free temporary memory
FRCNN_CUDA_CHECK(hipFree(keep));
FRCNN_CUDA_CHECK(hipFree(workspace_ordered_proposals_ptr));
}
FRCNN_CUDA_CHECK(hipFree(workspace_proposals_ptr));
}
virtual void Backward(const OpContext &ctx,
const std::vector<TBlob> &out_grad,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &in_grad,
const std::vector<TBlob> &aux_states) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(in_grad.size(), 3);
Stream<xpu> *s = ctx.get_stream<xpu>();
Tensor<xpu, 4> gscores = in_grad[proposal::kClsProb].get<xpu, 4, real_t>(s);
Tensor<xpu, 4> gbbox = in_grad[proposal::kBBoxPred].get<xpu, 4, real_t>(s);
Tensor<xpu, 2> ginfo = in_grad[proposal::kImInfo].get<xpu, 2, real_t>(s);
// can not assume the grad would be zero
Assign(gscores, req[proposal::kClsProb], 0);
Assign(gbbox, req[proposal::kBBoxPred], 0);
Assign(ginfo, req[proposal::kImInfo], 0);
}
private:
ProposalParam param_;
}; // class ProposalGPUOp
template<>
Operator* CreateOp<gpu>(ProposalParam param) {
return new ProposalGPUOp<gpu>(param);
}
} // namespace op
} // namespace mxnet
| c9fac91a28a791b00e036521137ad2f7cb87e6cc.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file proposal.cu
* \brief Proposal Operator
* \author Shaoqing Ren, Jian Guo, Pengfei Chen
*/
#include <dmlc/logging.h>
#include <dmlc/parameter.h>
#include <mxnet/operator.h>
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
#include <map>
#include <vector>
#include <string>
#include <utility>
#include <ctime>
#include <iostream>
#include "../operator_common.h"
#include "../mshadow_op.h"
#include "./proposal-inl.h"
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
#define FRCNN_CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
CHECK_EQ(error, cudaSuccess) << " " << cudaGetErrorString(error); \
} while (0)
namespace mshadow {
namespace cuda {
// scores are (b, anchor, h, w)
// workspace_proposals are (h * w * anchor, 5)
// w defines "x" and h defines "y"
// count should be total anchors numbers, h * w * anchors
template<typename Dtype>
__global__ void ProposalGridKernel(const int count,
const int num_anchors,
const int height,
const int width,
const int feature_stride,
const Dtype* scores,
Dtype* workspace_proposals) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
int a = index % num_anchors;
int w = (index / num_anchors) % width;
int h = index / num_anchors / width;
workspace_proposals[index * 5 + 0] = workspace_proposals[a * 5 + 0] + w * feature_stride;
workspace_proposals[index * 5 + 1] = workspace_proposals[a * 5 + 1] + h * feature_stride;
workspace_proposals[index * 5 + 2] = workspace_proposals[a * 5 + 2] + w * feature_stride;
workspace_proposals[index * 5 + 3] = workspace_proposals[a * 5 + 3] + h * feature_stride;
workspace_proposals[index * 5 + 4] = scores[(a * height + h) * width + w];
}
}
// boxes are (h * w * anchor, 5)
// deltas are (b, 4 * anchor, h, w)
// out_pred_boxes are (h * w * anchor, 5)
// count should be total anchors numbers, h * w * anchors
// in-place write: boxes and out_pred_boxes are the same location
template<typename Dtype>
__global__ void BBoxPredKernel(const int count,
const int num_anchors,
const int feat_height,
const int feat_width,
const int real_height,
const int real_width,
const float im_height,
const float im_width,
const Dtype* boxes,
const Dtype* deltas,
Dtype* out_pred_boxes) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
int a = index % num_anchors;
int w = (index / num_anchors) % feat_width;
int h = index / num_anchors / feat_width;
float width = boxes[index * 5 + 2] - boxes[index * 5 + 0] + 1.0f;
float height = boxes[index * 5 + 3] - boxes[index * 5 + 1] + 1.0f;
float ctr_x = boxes[index * 5 + 0] + 0.5f * (width - 1.0f);
float ctr_y = boxes[index * 5 + 1] + 0.5f * (height - 1.0f);
float dx = deltas[((a * 4) * feat_height + h) * feat_width + w];
float dy = deltas[((a * 4 + 1) * feat_height + h) * feat_width + w];
float dw = deltas[((a * 4 + 2) * feat_height + h) * feat_width + w];
float dh = deltas[((a * 4 + 3) * feat_height + h) * feat_width + w];
float pred_ctr_x = dx * width + ctr_x;
float pred_ctr_y = dy * height + ctr_y;
float pred_w = exp(dw) * width;
float pred_h = exp(dh) * height;
float pred_x1 = pred_ctr_x - 0.5f * (pred_w - 1.0f);
float pred_y1 = pred_ctr_y - 0.5f * (pred_h - 1.0f);
float pred_x2 = pred_ctr_x + 0.5f * (pred_w - 1.0f);
float pred_y2 = pred_ctr_y + 0.5f * (pred_h - 1.0f);
pred_x1 = max(min(pred_x1, im_width - 1.0f), 0.0f);
pred_y1 = max(min(pred_y1, im_height - 1.0f), 0.0f);
pred_x2 = max(min(pred_x2, im_width - 1.0f), 0.0f);
pred_y2 = max(min(pred_y2, im_height - 1.0f), 0.0f);
out_pred_boxes[index * 5 + 0] = pred_x1;
out_pred_boxes[index * 5 + 1] = pred_y1;
out_pred_boxes[index * 5 + 2] = pred_x2;
out_pred_boxes[index * 5 + 3] = pred_y2;
if (h >= real_height || w >= real_width) {
out_pred_boxes[index * 5 + 4] = -1.0f;
}
}
}
// boxes are (h * w * anchor, 5)
// deltas are (b, 4 * anchor, h, w)
// out_pred_boxes are (h * w * anchor, 5)
// count should be total anchors numbers, h * w * anchors
// in-place write: boxes and out_pred_boxes are the same location
template<typename Dtype>
__global__ void IoUPredKernel(const int count,
const int num_anchors,
const int feat_height,
const int feat_width,
const int real_height,
const int real_width,
const float im_height,
const float im_width,
const Dtype* boxes,
const Dtype* deltas,
Dtype* out_pred_boxes) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
int a = index % num_anchors;
int w = (index / num_anchors) % feat_width;
int h = index / num_anchors / feat_width;
float x1 = boxes[index * 5 + 0];
float y1 = boxes[index * 5 + 1];
float x2 = boxes[index * 5 + 2];
float y2 = boxes[index * 5 + 3];
float dx1 = deltas[((a * 4) * feat_height + h) * feat_width + w];
float dy1 = deltas[((a * 4 + 1) * feat_height + h) * feat_width + w];
float dx2 = deltas[((a * 4 + 2) * feat_height + h) * feat_width + w];
float dy2 = deltas[((a * 4 + 3) * feat_height + h) * feat_width + w];
float pred_x1 = max(min(x1 + dx1, im_width - 1.0f), 0.0f);
float pred_y1 = max(min(y1 + dy1, im_height - 1.0f), 0.0f);
float pred_x2 = max(min(x2 + dx2, im_width - 1.0f), 0.0f);
float pred_y2 = max(min(y2 + dy2, im_height - 1.0f), 0.0f);
out_pred_boxes[index * 5 + 0] = pred_x1;
out_pred_boxes[index * 5 + 1] = pred_y1;
out_pred_boxes[index * 5 + 2] = pred_x2;
out_pred_boxes[index * 5 + 3] = pred_y2;
if (h >= real_height || w >= real_width) {
out_pred_boxes[index * 5 + 4] = -1.0f;
}
}
}
// filter box with stride less than rpn_min_size
// filter: set score to zero
// dets (n, 5)
template<typename Dtype>
__global__ void FilterBoxKernel(const int count,
const float min_size,
Dtype* dets) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
float iw = dets[index * 5 + 2] - dets[index * 5 + 0] + 1.0f;
float ih = dets[index * 5 + 3] - dets[index * 5 + 1] + 1.0f;
if (iw < min_size || ih < min_size) {
dets[index * 5 + 0] -= min_size / 2;
dets[index * 5 + 1] -= min_size / 2;
dets[index * 5 + 2] += min_size / 2;
dets[index * 5 + 3] += min_size / 2;
dets[index * 5 + 4] = -1.0f;
}
}
}
// copy score and init order
// dets (n, 5); score (n, ); order (n, )
// count should be n (total anchors or proposals)
template<typename Dtype>
__global__ void CopyScoreKernel(const int count,
const Dtype* dets,
Dtype* score,
int* order) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
score[index] = dets[index * 5 + 4];
order[index] = index;
}
}
// reorder proposals according to order and keep the top_n proposals
// prev_dets (n, 5); order (n, ); dets (n, 5)
// count should be output anchor numbers (top_n)
template<typename Dtype>
__global__ void ReorderProposalsKernel(const int count,
const Dtype* prev_dets,
const int* order,
Dtype* dets) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
const int order_i = order[index];
for (int j = 0; j < 5; j ++) {
dets[index * 5 + j] = prev_dets[order_i * 5 + j];
}
}
}
__device__ inline float devIoU(float const * const a, float const * const b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return interS / (Sa + Sb - interS);
}
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, uint64_t *dev_mask) {
const int threadsPerBlock = sizeof(uint64_t) * 8;
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * 5;
int i = 0;
uint64_t t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
void _nms(const mshadow::Tensor<gpu, 2>& boxes,
const float nms_overlap_thresh,
int *keep,
int *num_out) {
const int threadsPerBlock = sizeof(uint64_t) * 8;
const int boxes_num = boxes.size(0);
const int boxes_dim = boxes.size(1);
float* boxes_dev = boxes.dptr_;
uint64_t* mask_dev = NULL;
const int col_blocks = DIVUP(boxes_num, threadsPerBlock);
FRCNN_CUDA_CHECK(cudaMalloc(&mask_dev,
boxes_num * col_blocks * sizeof(uint64_t)));
dim3 blocks(DIVUP(boxes_num, threadsPerBlock),
DIVUP(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
nms_kernel<<<blocks, threads>>>(boxes_num,
nms_overlap_thresh,
boxes_dev,
mask_dev);
FRCNN_CUDA_CHECK(cudaPeekAtLastError());
std::vector<uint64_t> mask_host(boxes_num * col_blocks);
FRCNN_CUDA_CHECK(cudaMemcpy(&mask_host[0],
mask_dev,
sizeof(uint64_t) * boxes_num * col_blocks,
cudaMemcpyDeviceToHost));
std::vector<uint64_t> remv(col_blocks);
memset(&remv[0], 0, sizeof(uint64_t) * col_blocks);
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep[num_to_keep++] = i;
uint64_t *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
*num_out = num_to_keep;
FRCNN_CUDA_CHECK(cudaFree(mask_dev));
}
// copy proposals to output
// dets (top_n, 5); keep (top_n, ); out (top_n, )
// count should be top_n (total anchors or proposals)
template<typename Dtype>
__global__ void PrepareOutput(const int count,
const Dtype* dets,
const int* keep,
const int out_size,
const int batchIdx,
Dtype* out,
Dtype* score) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
out[index * 5] = batchIdx;
if (index < out_size) {
int keep_i = keep[index];
for (int j = 0; j < 4; ++j) {
out[index * 5 + j + 1] = dets[keep_i * 5 + j];
}
score[index] = dets[keep_i * 5 + 4];
} else {
int keep_i = keep[index % out_size];
for (int j = 0; j < 4; ++j) {
out[index * 5 + j + 1] = dets[keep_i * 5 + j];
}
score[index] = dets[keep_i * 5 + 4];
}
}
}
} // namespace cuda
} // namespace mshadow
namespace mxnet {
namespace op {
template<typename xpu>
class ProposalGPUOp : public Operator{
public:
explicit ProposalGPUOp(ProposalParam param) {
this->param_ = param;
}
virtual void Forward(const OpContext &ctx,
const std::vector<TBlob> &in_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &out_data,
const std::vector<TBlob> &aux_states) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mshadow::cuda;
CHECK_EQ(in_data.size(), 3);
CHECK_EQ(out_data.size(), 2);
CHECK_GT(req.size(), 1);
CHECK_EQ(req[proposal::kOut], kWriteTo);
Stream<xpu> *s = ctx.get_stream<xpu>();
Shape<3> fg_scores_shape = Shape3(in_data[proposal::kClsProb].shape_[1] / 2,
in_data[proposal::kClsProb].shape_[2],
in_data[proposal::kClsProb].shape_[3]);
Tensor<xpu, 4> scores = in_data[proposal::kClsProb].get<xpu, 4, real_t>(s);
Tensor<xpu, 4> bbox_deltas = in_data[proposal::kBBoxPred].get<xpu, 4, real_t>(s);
Tensor<xpu, 2> im_info = in_data[proposal::kImInfo].get<xpu, 2, real_t>(s);
Tensor<xpu, 3> out = out_data[proposal::kOut].get<xpu, 3, real_t>(s);
Tensor<xpu, 3> out_score = out_data[proposal::kScore].get<xpu, 3, real_t>(s);
int nbatch = scores.size(0);
int num_anchors = scores.size(1) / 2;
int height = scores.size(2);
int width = scores.size(3);
int count = num_anchors * height * width; // count of total anchors
// set to -1 for max
int rpn_pre_nms_top_n = (param_.rpn_pre_nms_top_n > 0) ? param_.rpn_pre_nms_top_n : count;
rpn_pre_nms_top_n = std::min(rpn_pre_nms_top_n, count);
int rpn_post_nms_top_n = std::min(param_.rpn_post_nms_top_n, rpn_pre_nms_top_n);
// Generate first anchors based on base anchor
std::vector<float> base_anchor(4);
base_anchor[0] = 0.0;
base_anchor[1] = 0.0;
base_anchor[2] = param_.feature_stride - 1.0;
base_anchor[3] = param_.feature_stride - 1.0;
CHECK_EQ(num_anchors, param_.ratios.info.size() * param_.scales.info.size());
std::vector<float> anchors;
utils::GenerateAnchors(base_anchor,
param_.ratios.info,
param_.scales.info,
&anchors);
// Copy generated anchors to GPU
float* workspace_proposals_ptr = NULL;
FRCNN_CUDA_CHECK(cudaMalloc(&workspace_proposals_ptr, sizeof(float) * nbatch * count * 5));
Tensor<xpu, 3> workspace_proposals(workspace_proposals_ptr, Shape3(nbatch, count, 5));
// im_info is small, we want to copy them to cpu
std::vector<float> cpu_im_info(3);
FRCNN_CUDA_CHECK(cudaMemcpy(&cpu_im_info[0], im_info.dptr_,
sizeof(float) * cpu_im_info.size(),
cudaMemcpyDeviceToHost));
// prevent padded predictions
int real_height = static_cast<int>(cpu_im_info[0] / param_.feature_stride);
int real_width = static_cast<int>(cpu_im_info[1] / param_.feature_stride);
CHECK_GE(height, real_height) << height << " " << real_height << std::endl;
CHECK_GE(width, real_width) << width << " " << real_width << std::endl;
// Copy anchors for all images in batch
for (int i = 0; i < nbatch; i++) {
float* cur_batch_workspace_proposals_ptr = workspace_proposals.dptr_ + i * 5 * count;
FRCNN_CUDA_CHECK(cudaMemcpy(cur_batch_workspace_proposals_ptr,
&anchors[0], sizeof(float) * anchors.size(),
cudaMemcpyHostToDevice));
// get current batch foreground score
real_t* foreground_score_ptr = reinterpret_cast<real_t *>(scores.dptr_) + i * 2 * count
+ fg_scores_shape.Size();
Tensor<xpu, 3> fg_scores = Tensor<xpu, 3>(foreground_score_ptr, fg_scores_shape);
// Copy proposals to a mesh grid
dim3 dimGrid((count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "ProposalGrid");
ProposalGridKernel<<<dimGrid, dimBlock>>>(
count, num_anchors, height, width, param_.feature_stride,
fg_scores.dptr_, cur_batch_workspace_proposals_ptr);
FRCNN_CUDA_CHECK(cudaPeekAtLastError());
// Transform anchors and bbox_deltas into bboxes
CheckLaunchParam(dimGrid, dimBlock, "BBoxPred");
if (param_.iou_loss) {
IoUPredKernel<<<dimGrid, dimBlock>>>(
count, num_anchors, height, width, real_height, real_width,
cpu_im_info[0], cpu_im_info[1],
cur_batch_workspace_proposals_ptr, bbox_deltas.dptr_ + i * 4 * count, cur_batch_workspace_proposals_ptr);
} else {
BBoxPredKernel<<<dimGrid, dimBlock>>>(
count, num_anchors, height, width, real_height, real_width,
cpu_im_info[0], cpu_im_info[1],
cur_batch_workspace_proposals_ptr, bbox_deltas.dptr_ + i * 4 * count, cur_batch_workspace_proposals_ptr);
}
FRCNN_CUDA_CHECK(cudaPeekAtLastError());
// filter boxes with less than rpn_min_size
CheckLaunchParam(dimGrid, dimBlock, "FilterBox");
FilterBoxKernel<<<dimGrid, dimBlock>>>(
count, param_.rpn_min_size * cpu_im_info[2], cur_batch_workspace_proposals_ptr);
FRCNN_CUDA_CHECK(cudaPeekAtLastError());
// Copy score to a continuous memory
float* score_ptr = NULL;
FRCNN_CUDA_CHECK(cudaMalloc(&score_ptr, sizeof(float) * count));
Tensor<xpu, 1> score(score_ptr, Shape1(count));
int* order_ptr = NULL;
FRCNN_CUDA_CHECK(cudaMalloc(&order_ptr, sizeof(int) * count));
Tensor<xpu, 1, int> order(order_ptr, Shape1(count));
CheckLaunchParam(dimGrid, dimBlock, "CopyScore");
CopyScoreKernel<<<dimGrid, dimBlock>>>(
count, cur_batch_workspace_proposals_ptr, score.dptr_, order.dptr_);
FRCNN_CUDA_CHECK(cudaPeekAtLastError());
// argsort score, save order
thrust::stable_sort_by_key(thrust::device,
score.dptr_,
score.dptr_ + score.size(0),
order.dptr_,
thrust::greater<real_t>());
FRCNN_CUDA_CHECK(cudaPeekAtLastError());
// Reorder proposals according to order
float* workspace_ordered_proposals_ptr = NULL;
FRCNN_CUDA_CHECK(cudaMalloc(&workspace_ordered_proposals_ptr,
sizeof(float) * rpn_pre_nms_top_n * 5));
Tensor<xpu, 2> workspace_ordered_proposals(workspace_ordered_proposals_ptr,
Shape2(rpn_pre_nms_top_n, 5));
dimGrid.x = (rpn_pre_nms_top_n + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
CheckLaunchParam(dimGrid, dimBlock, "ReorderProposals");
ReorderProposalsKernel<<<dimGrid, dimBlock>>>(
rpn_pre_nms_top_n, cur_batch_workspace_proposals_ptr, order.dptr_, workspace_ordered_proposals.dptr_);
FRCNN_CUDA_CHECK(cudaPeekAtLastError());
FRCNN_CUDA_CHECK(cudaFree(score_ptr));
FRCNN_CUDA_CHECK(cudaFree(order_ptr));
// perform nms
std::vector<int> _keep(workspace_ordered_proposals.size(0));
int out_size = 0;
_nms(workspace_ordered_proposals,
param_.threshold,
&_keep[0],
&out_size);
// copy nms result to gpu
int* keep;
FRCNN_CUDA_CHECK(cudaMalloc(&keep, sizeof(int) * _keep.size()));
FRCNN_CUDA_CHECK(cudaMemcpy(keep, &_keep[0], sizeof(int) * _keep.size(),
cudaMemcpyHostToDevice));
// copy results after nms
dimGrid.x = (rpn_post_nms_top_n + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
CheckLaunchParam(dimGrid, dimBlock, "PrepareOutput");
PrepareOutput<<<dimGrid, dimBlock>>>(
rpn_post_nms_top_n, workspace_ordered_proposals.dptr_, keep, out_size, i,
out.dptr_ + i * 5 * rpn_post_nms_top_n,
out_score.dptr_ + i * rpn_post_nms_top_n);
FRCNN_CUDA_CHECK(cudaPeekAtLastError());
// free temporary memory
FRCNN_CUDA_CHECK(cudaFree(keep));
FRCNN_CUDA_CHECK(cudaFree(workspace_ordered_proposals_ptr));
}
FRCNN_CUDA_CHECK(cudaFree(workspace_proposals_ptr));
}
virtual void Backward(const OpContext &ctx,
const std::vector<TBlob> &out_grad,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &in_grad,
const std::vector<TBlob> &aux_states) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(in_grad.size(), 3);
Stream<xpu> *s = ctx.get_stream<xpu>();
Tensor<xpu, 4> gscores = in_grad[proposal::kClsProb].get<xpu, 4, real_t>(s);
Tensor<xpu, 4> gbbox = in_grad[proposal::kBBoxPred].get<xpu, 4, real_t>(s);
Tensor<xpu, 2> ginfo = in_grad[proposal::kImInfo].get<xpu, 2, real_t>(s);
// can not assume the grad would be zero
Assign(gscores, req[proposal::kClsProb], 0);
Assign(gbbox, req[proposal::kBBoxPred], 0);
Assign(ginfo, req[proposal::kImInfo], 0);
}
private:
ProposalParam param_;
}; // class ProposalGPUOp
template<>
Operator* CreateOp<gpu>(ProposalParam param) {
return new ProposalGPUOp<gpu>(param);
}
} // namespace op
} // namespace mxnet
|
0775ace83485be232869441dcaea1de3738210b0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/* Example showing the use of CUFFT for fast 1D-convolution using FFT. */
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <hipfft.h>
#include <cutil_inline.h>
// Complex data type
typedef float2 Complex;
static __device__ __host__ inline Complex ComplexAdd(Complex, Complex);
static __device__ __host__ inline Complex ComplexScale(Complex, float);
static __device__ __host__ inline Complex ComplexMul(Complex, Complex);
static __global__ void ComplexPointwiseMulAndScale(Complex*, const Complex*, int, float);
// Filtering functions
void Convolve(const Complex*, int, const Complex*, int, Complex*);
// Padding functions
int PadData(const Complex*, Complex**, int,
const Complex*, Complex**, int);
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest(int argc, char** argv);
// The filter size is assumed to be a number smaller than the signal size
#define SIGNAL_SIZE 50
#define FILTER_KERNEL_SIZE 11
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv)
{
runTest(argc, argv);
cutilExit(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void runTest(int argc, char** argv)
{
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") )
cutilDeviceInit(argc, argv);
else
hipSetDevice( cutGetMaxGflopsDeviceId() );
// Allocate host memory for the signal
Complex* h_signal = (Complex*)malloc(sizeof(Complex) * SIGNAL_SIZE);
// Initalize the memory for the signal
for (unsigned int i = 0; i < SIGNAL_SIZE; ++i) {
h_signal[i].x = rand() / (float)RAND_MAX;
h_signal[i].y = 0;
}
// Allocate host memory for the filter
Complex* h_filter_kernel = (Complex*)malloc(sizeof(Complex) * FILTER_KERNEL_SIZE);
// Initalize the memory for the filter
for (unsigned int i = 0; i < FILTER_KERNEL_SIZE; ++i) {
h_filter_kernel[i].x = rand() / (float)RAND_MAX;
h_filter_kernel[i].y = 0;
}
// Pad signal and filter kernel
Complex* h_padded_signal;
Complex* h_padded_filter_kernel;
int new_size = PadData(h_signal, &h_padded_signal, SIGNAL_SIZE,
h_filter_kernel, &h_padded_filter_kernel, FILTER_KERNEL_SIZE);
int mem_size = sizeof(Complex) * new_size;
// Allocate device memory for signal
Complex* d_signal;
cutilSafeCall(hipMalloc((void**)&d_signal, mem_size));
// Copy host memory to device
cutilSafeCall(hipMemcpy(d_signal, h_padded_signal, mem_size,
hipMemcpyHostToDevice));
// Allocate device memory for filter kernel
Complex* d_filter_kernel;
cutilSafeCall(hipMalloc((void**)&d_filter_kernel, mem_size));
// Copy host memory to device
cutilSafeCall(hipMemcpy(d_filter_kernel, h_padded_filter_kernel, mem_size,
hipMemcpyHostToDevice));
// CUFFT plan
hipfftHandle plan;
cufftSafeCall(hipfftPlan1d(&plan, new_size, HIPFFT_C2C, 1));
// Transform signal and kernel
cufftSafeCall(hipfftExecC2C(plan, (hipfftComplex *)d_signal, (hipfftComplex *)d_signal, HIPFFT_FORWARD));
cufftSafeCall(hipfftExecC2C(plan, (hipfftComplex *)d_filter_kernel, (hipfftComplex *)d_filter_kernel, HIPFFT_FORWARD));
// Multiply the coefficients together and normalize the result
hipLaunchKernelGGL(( ComplexPointwiseMulAndScale), dim3(32), dim3(256), 0, 0, d_signal, d_filter_kernel, new_size, 1.0f / new_size);
// Check if kernel execution generated and error
cutilCheckMsg("Kernel execution failed [ ComplexPointwiseMulAndScale ]");
// Transform signal back
cufftSafeCall(hipfftExecC2C(plan, (hipfftComplex *)d_signal, (hipfftComplex *)d_signal, HIPFFT_BACKWARD));
// Copy device memory to host
Complex* h_convolved_signal = h_padded_signal;
cutilSafeCall(hipMemcpy(h_convolved_signal, d_signal, mem_size,
hipMemcpyDeviceToHost));
// Allocate host memory for the convolution result
Complex* h_convolved_signal_ref = (Complex*)malloc(sizeof(Complex) * SIGNAL_SIZE);
// Convolve on the host
Convolve(h_signal, SIGNAL_SIZE,
h_filter_kernel, FILTER_KERNEL_SIZE,
h_convolved_signal_ref);
// check result
CUTBoolean res = cutCompareL2fe((float*)h_convolved_signal_ref, (float*)h_convolved_signal, 2 * SIGNAL_SIZE, 1e-5f);
printf("%s\n", (1 == res) ? "PASSED" : "FAILED");
//Destroy CUFFT context
cufftSafeCall(hipfftDestroy(plan));
// cleanup memory
free(h_signal);
free(h_filter_kernel);
free(h_padded_signal);
free(h_padded_filter_kernel);
free(h_convolved_signal_ref);
cutilSafeCall(hipFree(d_signal));
cutilSafeCall(hipFree(d_filter_kernel));
hipDeviceReset();
}
// Pad data
int PadData(const Complex* signal, Complex** padded_signal, int signal_size,
const Complex* filter_kernel, Complex** padded_filter_kernel, int filter_kernel_size)
{
int minRadius = filter_kernel_size / 2;
int maxRadius = filter_kernel_size - minRadius;
int new_size = signal_size + maxRadius;
// Pad signal
Complex* new_data = (Complex*)malloc(sizeof(Complex) * new_size);
memcpy(new_data + 0, signal, signal_size * sizeof(Complex));
memset(new_data + signal_size, 0, (new_size - signal_size) * sizeof(Complex));
*padded_signal = new_data;
// Pad filter
new_data = (Complex*)malloc(sizeof(Complex) * new_size);
memcpy(new_data + 0, filter_kernel + minRadius, maxRadius * sizeof(Complex));
memset(new_data + maxRadius, 0, (new_size - filter_kernel_size) * sizeof(Complex));
memcpy(new_data + new_size - minRadius, filter_kernel, minRadius * sizeof(Complex));
*padded_filter_kernel = new_data;
return new_size;
}
////////////////////////////////////////////////////////////////////////////////
// Filtering operations
////////////////////////////////////////////////////////////////////////////////
// Computes convolution on the host
void Convolve(const Complex* signal, int signal_size,
const Complex* filter_kernel, int filter_kernel_size,
Complex* filtered_signal)
{
int minRadius = filter_kernel_size / 2;
int maxRadius = filter_kernel_size - minRadius;
// Loop over output element indices
for (int i = 0; i < signal_size; ++i) {
filtered_signal[i].x = filtered_signal[i].y = 0;
// Loop over convolution indices
for (int j = - maxRadius + 1; j <= minRadius; ++j) {
int k = i + j;
if (k >= 0 && k < signal_size)
filtered_signal[i] = ComplexAdd(filtered_signal[i], ComplexMul(signal[k], filter_kernel[minRadius - j]));
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Complex operations
////////////////////////////////////////////////////////////////////////////////
// Complex addition
static __device__ __host__ inline Complex ComplexAdd(Complex a, Complex b)
{
Complex c;
c.x = a.x + b.x;
c.y = a.y + b.y;
return c;
}
// Complex scale
static __device__ __host__ inline Complex ComplexScale(Complex a, float s)
{
Complex c;
c.x = s * a.x;
c.y = s * a.y;
return c;
}
// Complex multiplication
static __device__ __host__ inline Complex ComplexMul(Complex a, Complex b)
{
Complex c;
c.x = a.x * b.x - a.y * b.y;
c.y = a.x * b.y + a.y * b.x;
return c;
}
// Complex pointwise multiplication
static __global__ void ComplexPointwiseMulAndScale(Complex* a, const Complex* b, int size, float scale)
{
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = threadID; i < size; i += numThreads)
a[i] = ComplexScale(ComplexMul(a[i], b[i]), scale);
}
| 0775ace83485be232869441dcaea1de3738210b0.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/* Example showing the use of CUFFT for fast 1D-convolution using FFT. */
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cufft.h>
#include <cutil_inline.h>
// Complex data type
typedef float2 Complex;
static __device__ __host__ inline Complex ComplexAdd(Complex, Complex);
static __device__ __host__ inline Complex ComplexScale(Complex, float);
static __device__ __host__ inline Complex ComplexMul(Complex, Complex);
static __global__ void ComplexPointwiseMulAndScale(Complex*, const Complex*, int, float);
// Filtering functions
void Convolve(const Complex*, int, const Complex*, int, Complex*);
// Padding functions
int PadData(const Complex*, Complex**, int,
const Complex*, Complex**, int);
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest(int argc, char** argv);
// The filter size is assumed to be a number smaller than the signal size
#define SIGNAL_SIZE 50
#define FILTER_KERNEL_SIZE 11
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv)
{
runTest(argc, argv);
cutilExit(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void runTest(int argc, char** argv)
{
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") )
cutilDeviceInit(argc, argv);
else
cudaSetDevice( cutGetMaxGflopsDeviceId() );
// Allocate host memory for the signal
Complex* h_signal = (Complex*)malloc(sizeof(Complex) * SIGNAL_SIZE);
// Initalize the memory for the signal
for (unsigned int i = 0; i < SIGNAL_SIZE; ++i) {
h_signal[i].x = rand() / (float)RAND_MAX;
h_signal[i].y = 0;
}
// Allocate host memory for the filter
Complex* h_filter_kernel = (Complex*)malloc(sizeof(Complex) * FILTER_KERNEL_SIZE);
// Initalize the memory for the filter
for (unsigned int i = 0; i < FILTER_KERNEL_SIZE; ++i) {
h_filter_kernel[i].x = rand() / (float)RAND_MAX;
h_filter_kernel[i].y = 0;
}
// Pad signal and filter kernel
Complex* h_padded_signal;
Complex* h_padded_filter_kernel;
int new_size = PadData(h_signal, &h_padded_signal, SIGNAL_SIZE,
h_filter_kernel, &h_padded_filter_kernel, FILTER_KERNEL_SIZE);
int mem_size = sizeof(Complex) * new_size;
// Allocate device memory for signal
Complex* d_signal;
cutilSafeCall(cudaMalloc((void**)&d_signal, mem_size));
// Copy host memory to device
cutilSafeCall(cudaMemcpy(d_signal, h_padded_signal, mem_size,
cudaMemcpyHostToDevice));
// Allocate device memory for filter kernel
Complex* d_filter_kernel;
cutilSafeCall(cudaMalloc((void**)&d_filter_kernel, mem_size));
// Copy host memory to device
cutilSafeCall(cudaMemcpy(d_filter_kernel, h_padded_filter_kernel, mem_size,
cudaMemcpyHostToDevice));
// CUFFT plan
cufftHandle plan;
cufftSafeCall(cufftPlan1d(&plan, new_size, CUFFT_C2C, 1));
// Transform signal and kernel
cufftSafeCall(cufftExecC2C(plan, (cufftComplex *)d_signal, (cufftComplex *)d_signal, CUFFT_FORWARD));
cufftSafeCall(cufftExecC2C(plan, (cufftComplex *)d_filter_kernel, (cufftComplex *)d_filter_kernel, CUFFT_FORWARD));
// Multiply the coefficients together and normalize the result
ComplexPointwiseMulAndScale<<<32, 256>>>(d_signal, d_filter_kernel, new_size, 1.0f / new_size);
// Check if kernel execution generated and error
cutilCheckMsg("Kernel execution failed [ ComplexPointwiseMulAndScale ]");
// Transform signal back
cufftSafeCall(cufftExecC2C(plan, (cufftComplex *)d_signal, (cufftComplex *)d_signal, CUFFT_INVERSE));
// Copy device memory to host
Complex* h_convolved_signal = h_padded_signal;
cutilSafeCall(cudaMemcpy(h_convolved_signal, d_signal, mem_size,
cudaMemcpyDeviceToHost));
// Allocate host memory for the convolution result
Complex* h_convolved_signal_ref = (Complex*)malloc(sizeof(Complex) * SIGNAL_SIZE);
// Convolve on the host
Convolve(h_signal, SIGNAL_SIZE,
h_filter_kernel, FILTER_KERNEL_SIZE,
h_convolved_signal_ref);
// check result
CUTBoolean res = cutCompareL2fe((float*)h_convolved_signal_ref, (float*)h_convolved_signal, 2 * SIGNAL_SIZE, 1e-5f);
printf("%s\n", (1 == res) ? "PASSED" : "FAILED");
//Destroy CUFFT context
cufftSafeCall(cufftDestroy(plan));
// cleanup memory
free(h_signal);
free(h_filter_kernel);
free(h_padded_signal);
free(h_padded_filter_kernel);
free(h_convolved_signal_ref);
cutilSafeCall(cudaFree(d_signal));
cutilSafeCall(cudaFree(d_filter_kernel));
cudaThreadExit();
}
// Pad data
int PadData(const Complex* signal, Complex** padded_signal, int signal_size,
const Complex* filter_kernel, Complex** padded_filter_kernel, int filter_kernel_size)
{
int minRadius = filter_kernel_size / 2;
int maxRadius = filter_kernel_size - minRadius;
int new_size = signal_size + maxRadius;
// Pad signal
Complex* new_data = (Complex*)malloc(sizeof(Complex) * new_size);
memcpy(new_data + 0, signal, signal_size * sizeof(Complex));
memset(new_data + signal_size, 0, (new_size - signal_size) * sizeof(Complex));
*padded_signal = new_data;
// Pad filter
new_data = (Complex*)malloc(sizeof(Complex) * new_size);
memcpy(new_data + 0, filter_kernel + minRadius, maxRadius * sizeof(Complex));
memset(new_data + maxRadius, 0, (new_size - filter_kernel_size) * sizeof(Complex));
memcpy(new_data + new_size - minRadius, filter_kernel, minRadius * sizeof(Complex));
*padded_filter_kernel = new_data;
return new_size;
}
////////////////////////////////////////////////////////////////////////////////
// Filtering operations
////////////////////////////////////////////////////////////////////////////////
// Computes convolution on the host
void Convolve(const Complex* signal, int signal_size,
const Complex* filter_kernel, int filter_kernel_size,
Complex* filtered_signal)
{
int minRadius = filter_kernel_size / 2;
int maxRadius = filter_kernel_size - minRadius;
// Loop over output element indices
for (int i = 0; i < signal_size; ++i) {
filtered_signal[i].x = filtered_signal[i].y = 0;
// Loop over convolution indices
for (int j = - maxRadius + 1; j <= minRadius; ++j) {
int k = i + j;
if (k >= 0 && k < signal_size)
filtered_signal[i] = ComplexAdd(filtered_signal[i], ComplexMul(signal[k], filter_kernel[minRadius - j]));
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Complex operations
////////////////////////////////////////////////////////////////////////////////
// Complex addition
static __device__ __host__ inline Complex ComplexAdd(Complex a, Complex b)
{
Complex c;
c.x = a.x + b.x;
c.y = a.y + b.y;
return c;
}
// Complex scale
static __device__ __host__ inline Complex ComplexScale(Complex a, float s)
{
Complex c;
c.x = s * a.x;
c.y = s * a.y;
return c;
}
// Complex multiplication
static __device__ __host__ inline Complex ComplexMul(Complex a, Complex b)
{
Complex c;
c.x = a.x * b.x - a.y * b.y;
c.y = a.x * b.y + a.y * b.x;
return c;
}
// Complex pointwise multiplication
static __global__ void ComplexPointwiseMulAndScale(Complex* a, const Complex* b, int size, float scale)
{
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = threadID; i < size; i += numThreads)
a[i] = ComplexScale(ComplexMul(a[i], b[i]), scale);
}
|
687c2952ac2f85236c8455cca2fded1aaa07e8b5.hip | // !!! This is a file automatically generated by hipify!!!
/*----------------------------------------------------------------------------------*
* Copyright (c) 2010-2018 Pauli Parkkinen, Eelis Solala, Wen-Hua Xu, *
* Sergio Losilla, Elias Toivanen, Jonas Juselius *
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy *
* of this software and associated documentation files (the "Software"), to deal *
* in the Software without restriction, including without limitation the rights *
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell *
* copies of the Software, and to permit persons to whom the Software is *
* furnished to do so, subject to the following conditions: *
* *
* The above copyright notice and this permission notice shall be included in all*
* copies or substantial portions of the Software. *
* *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR *
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE *
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER *
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, *
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE *
* SOFTWARE. *
*----------------------------------------------------------------------------------*/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <stdlib.h>
//#include <algorithm> *std::max_element(result_cube, result_cube + totalPointCount)
#include "bubbles_cuda.h"
#include "streamcontainer.h"
#include "grid.h"
#include "bubbles_multiplier.h"
#include "function3d_multiplier.h"
#include "memory_leak_operators.h"
__host__ inline void check_multiplier_errors(const char *filename, const int line_number) {
#ifdef CUDA_DEBUG
hipDeviceSynchronize();
#endif
hipError_t error = hipGetLastError();
if(error != hipSuccess)
{
printf("CUDA error at %s:%i: %s\n", filename, line_number, hipGetErrorString(error));
exit(-1);
}
}
template<typename T>
__device__ __forceinline__ T ldg(const T* ptr) {
#if __CUDA_ARCH__ >= 350
return __ldg(ptr);
#else
return *ptr;
#endif
}
// the multiplying kernel functions
__device__ inline int product_identifier(const int idx1, const int idx2, const int lmax) {
int is=min(idx1,idx2)+1;
int il=max(idx1,idx2)+1;
int nmax = (lmax+1)*(lmax+1);
int result = (is-1)*(2*nmax-is)/2+il -1;
return result;
}
__global__ void Bubble_product_kernel(Bubble *bubble, Bubble *bubble1, Bubble *result_bubble,
const double* __restrict__ coefficients, const int* __restrict__ number_of_terms,
const int* __restrict__ result_lm, const int* __restrict__ positions,
const int offset, const int max_id, const size_t device_f_pitch, double factor) {
const int id = threadIdx.x + blockIdx.x * blockDim.x;
const int index= id + offset;
int result_index, prod_idx, i, lm_counter = 0, lm_counter1 = 0, term_id;
const int lmax0 = bubble->lmax, lmax1 = bubble1->lmax, lmax2 = bubble->lmax + bubble1->lmax;
const int nmax0 = (lmax0 +1) * (lmax0+1), nmax1 = (lmax1 +1) * (lmax1+1);
double value, value1, value12, value2;
if (id < max_id) {
// printf("begin b1[0], b2[0], br[0]: %f, %f, %f\n", bubble->f[0], bubble1->f[0], result_bubble->f[0]);
// go through all l, m values of input bubble 'bubble'
for (lm_counter = 0; lm_counter < nmax0; lm_counter++) {
// get the value for the point 'index' for 'bubble' with current l, m values
value = bubble->f[lm_counter * device_f_pitch / sizeof(double) + index];
value12 = (lm_counter < nmax1) ? bubble1->f[lm_counter * device_f_pitch / sizeof(double) + index] : 0.0;
for (lm_counter1 = lm_counter; lm_counter1 < nmax1 ; lm_counter1++) {
prod_idx = product_identifier(lm_counter, lm_counter1, lmax2);
term_id = ldg<int>(&positions[prod_idx])-1;
// get the value for the point 'index' for 'bubble' with current l1, m1 values
value1 = value * bubble1->f[lm_counter1 * device_f_pitch / sizeof(double)+index];
value2 = (lm_counter == lm_counter1 || lm_counter1 >= nmax0)
? 0.0 : value12 * bubble->f[lm_counter1 * device_f_pitch / sizeof(double)+index];
value1 += value2;
for (i = 0; i < ldg<int>(&number_of_terms[prod_idx]); i++) {
result_index = (ldg<int>(&result_lm[term_id]) - 1) * device_f_pitch / sizeof(double) + index;
result_bubble->f[result_index] += factor * ldg<double>(&coefficients[term_id]) * value1;
term_id ++;
}
}
}
// printf("end b1[0], b2[0], br[0]: %f, %f, %f\n", bubble->f[0], bubble1->f[0], result_bubble->f[0]);
}
}
// BubblesMultiplier-class functions
BubblesMultiplier::BubblesMultiplier(Bubbles *bubbles1, Bubbles *bubbles2, Bubbles *result_bubbles,
Bubbles *taylor_series_bubbles1, Bubbles *taylor_series_bubbles2,
int lmax,
double *coefficients, int *number_of_terms, int *result_lm, int *positions,
int result_lm_size, int processor_order_number,
int number_of_processors, StreamContainer *streamContainer) {
this->bubbles1 = bubbles1;
this->bubbles2 = bubbles2;
this->result_bubbles = result_bubbles;
this->taylor_series_bubbles1 = taylor_series_bubbles1;
this->taylor_series_bubbles2 = taylor_series_bubbles2;
this->processor_order_number = processor_order_number;
this->number_of_processors = number_of_processors;
this->streamContainer = streamContainer;
this->result_bubbles->setProcessorConfiguration(this->processor_order_number, this->number_of_processors);
this->bubbles1->setProcessorConfiguration(this->processor_order_number, this->number_of_processors);
this->bubbles2->setProcessorConfiguration(this->processor_order_number, this->number_of_processors);
if (this->taylor_series_bubbles1) {
this->taylor_series_bubbles1->setProcessorConfiguration(this->processor_order_number, this->number_of_processors);
}
if (this->taylor_series_bubbles2) {
this->taylor_series_bubbles2->setProcessorConfiguration(this->processor_order_number, this->number_of_processors);
}
// allocate the arrays to contain device-wise pointers
hipHostMalloc((void **)&this->device_number_of_terms, sizeof(int * ) * this->streamContainer->getNumberOfDevices(), hipHostMallocPortable);
check_multiplier_errors(__FILE__, __LINE__);
hipHostMalloc((void **)&this->device_positions, sizeof(int * ) * this->streamContainer->getNumberOfDevices(), hipHostMallocPortable);
check_multiplier_errors(__FILE__, __LINE__);
hipHostMalloc((void **)&this->device_result_lm, sizeof(int * ) * this->streamContainer->getNumberOfDevices(), hipHostMallocPortable);
check_multiplier_errors(__FILE__, __LINE__);
hipHostMalloc((void **)&this->device_coefficients, sizeof(double * ) * this->streamContainer->getNumberOfDevices(), hipHostMallocPortable);
check_multiplier_errors(__FILE__, __LINE__);
// allocate & copy the array containing the number of result terms per l,m -pair: 'number_of_terms'
int nmax = (lmax + 1) * (lmax + 1);
for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) {
hipSetDevice(device);
size_t size = nmax*(nmax+1)/2 * sizeof(int);
hipMalloc(&this->device_number_of_terms[device], size);
check_multiplier_errors(__FILE__, __LINE__);
hipMemcpy(this->device_number_of_terms[device], number_of_terms, size, hipMemcpyHostToDevice);
check_multiplier_errors(__FILE__, __LINE__);
hipMalloc(&this->device_positions[device], size);
check_multiplier_errors(__FILE__, __LINE__);
hipMemcpy(this->device_positions[device], positions, size, hipMemcpyHostToDevice);
check_multiplier_errors(__FILE__, __LINE__);
// allocate & copy the array containing the result 'ilm'-addresses (l, m pair) of the result bubbles
size = result_lm_size * sizeof(int);
hipMalloc(&this->device_result_lm[device], size);
check_multiplier_errors(__FILE__, __LINE__);
hipMemcpy(this->device_result_lm[device], result_lm, size, hipMemcpyHostToDevice);
check_multiplier_errors(__FILE__, __LINE__);
// allocate & copy the array containing the result 'coefficients' (l, m pair) of the result bubbles
size = result_lm_size * sizeof(double);
hipMalloc(&this->device_coefficients[device], size);
check_multiplier_errors(__FILE__, __LINE__);
hipMemcpy(this->device_coefficients[device], coefficients, size, hipMemcpyHostToDevice);
check_multiplier_errors(__FILE__, __LINE__);
}
}
void BubblesMultiplier::multiplyBubble(int ibub, Bubbles* bubbles1, Bubbles* bubbles2, Bubbles* result_bubbles, double factor, int first_cell, int last_cell) {
// calculate the total number of points in the bubbles each l,m -pair,
int total_point_count;
if (first_cell >= 0 && last_cell >= 0 ) {
int ncell = last_cell - first_cell;
total_point_count = ncell * (bubbles1->getBubble(ibub)->grid->nlip - 1) +1;
}
else {
total_point_count = bubbles1->getBubble(ibub)->grid->ncell * (bubbles1->getBubble(ibub)->grid->nlip - 1) +1;
}
check_multiplier_errors(__FILE__, __LINE__);
// determine how many of the points belong to the current mpi-node
int remainder = total_point_count % this->number_of_processors;
int processor_point_count = total_point_count / this->number_of_processors
+ ( remainder > this->processor_order_number);
// get the offset to the f-array caused by other processors
int offset = processor_order_number * total_point_count / this->number_of_processors +
((remainder < processor_order_number) ? remainder : processor_order_number);
int block_size = 256;
for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) {
hipSetDevice(device);
// detemine how many of the mpi-node's points belong to this device (gpu)
int device_point_count = processor_point_count / this->streamContainer->getNumberOfDevices() +
((processor_point_count % this->streamContainer->getNumberOfDevices()) > device);
for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream ++) {
// detemine the number of the points handled by this stream
int stream_point_count = device_point_count / this->streamContainer->getStreamsPerDevice() +
((device_point_count % this->streamContainer->getStreamsPerDevice()) > stream);
int grid_size = (stream_point_count + block_size - 1) / block_size;
// call the kernel
// printf("before b1[0], b2[0], br[0]: %f, %f, %f\n", bubbles1->getBubble(ibub)->f[0], bubbles2->getBubble(ibub)->f[0], result_bubbles->getBubble(ibub)->f[0]);
hipLaunchKernelGGL(( Bubble_product_kernel) , dim3(grid_size), dim3(block_size), 0, *this->streamContainer->getStream(device, stream),
bubbles1->getBubble(ibub)->device_copies[device], bubbles2->getBubble(ibub)->device_copies[device],
result_bubbles->getBubble(ibub)->device_copies[device], this->device_coefficients[device],
this->device_number_of_terms[device], this->device_result_lm[device], this->device_positions[device], offset, stream_point_count,
bubbles1->getBubble(ibub)->device_f_pitch[device], factor);
check_multiplier_errors(__FILE__, __LINE__);
// printf("after b1[0], b2[0], br[0]: %f, %f, %f\n", bubbles1->getBubble(ibub)->f[0], bubbles2->getBubble(ibub)->f[0], result_bubbles->getBubble(ibub)->f[0]);
// increase the offset for the next calls.
offset += stream_point_count;
}
}
}
void BubblesMultiplier::multiplyBubble(int ibub, double *bubble1_bf, double *bubble2_bf, double *result_bubble_bf,
double *taylor_series_bubble1_bf, double *taylor_series_bubble2_bf,
int lmax1, int lmax2, int tlmax1, int tlmax2
) {
this->complex_multiplication = true;
Bubble *result_bubble = this->result_bubbles->getBubble(ibub);
Bubble *bubble1 = this->bubbles1->getBubble(ibub);
Bubble *bubble2 = this->bubbles2->getBubble(ibub);
// register the host target of result array and set the values of the device array to zero
if (result_bubble && result_bubble_bf) {
result_bubble->registerHost(result_bubble_bf);
result_bubble->setToZero();
bubble1->setToZero();
bubble2->setToZero();
}
else {
printf("ERROR: the result_bubble or result_bubble_bf should not be NULL");
}
if (bubble1_bf) {
// upload the diagonal and off-diagonal parts of bubble 1 and add them together
bubble1->upload(bubble1_bf, lmax1);
}
else {
bubble1->setToZero();
}
check_multiplier_errors(__FILE__, __LINE__);
if (taylor_series_bubble1_bf) {
Bubble *taylor_bubble1 = this->taylor_series_bubbles1->getBubble(ibub);
taylor_bubble1->setToZero();
taylor_bubble1->upload(taylor_series_bubble1_bf, tlmax1);
bubble1->add(taylor_bubble1);
check_multiplier_errors(__FILE__, __LINE__);
}
if (bubble2_bf) {
// upload the diagonal and off-diagonal parts of bubble 2 and add them together
bubble2->upload(bubble2_bf, lmax2, bubble1_bf != bubble2_bf);
}
else {
bubble2->setToZero();
}
check_multiplier_errors(__FILE__, __LINE__);
if (taylor_series_bubble2_bf) {
Bubble *taylor_bubble2= this->taylor_series_bubbles2->getBubble(ibub);
taylor_bubble2->setToZero();
taylor_bubble2->upload(taylor_series_bubble2_bf, tlmax2, taylor_series_bubble1_bf != taylor_series_bubble2_bf);
bubble2->add(taylor_bubble2);
check_multiplier_errors(__FILE__, __LINE__);
}
// multiply the bubble1 with bubble2
this->multiplyBubble(ibub, this->bubbles1, this->bubbles2, this->result_bubbles, 1.0);
// deduct the taylor bubble1 * taylor_bubble2 from the result, if both are present
if (taylor_series_bubble1_bf && taylor_series_bubble2_bf) {
this->multiplyBubble(ibub, this->taylor_series_bubbles1, this->taylor_series_bubbles2, this->result_bubbles, -1.0);
}
check_multiplier_errors(__FILE__, __LINE__);
}
void BubblesMultiplier::downloadResult(int lmax, int *ibubs, int nbub) {
if (nbub > 0) {
for (int i = 0; i < nbub; i++) {
this->result_bubbles->getBubble(ibubs[i])->download(lmax);
}
check_multiplier_errors(__FILE__, __LINE__);
// as we are done with all uploading and downloading,
// unregister the host arrays of bubbles
this->result_bubbles->unregister();
check_multiplier_errors(__FILE__, __LINE__);
this->bubbles1->unregister();
check_multiplier_errors(__FILE__, __LINE__);
// if the multiplied bubbles are different, then unregister the second
// bubbles also
if (this->bubbles2->getBubbleWithLocalOrderNumber(0)->f != this->bubbles1->getBubbleWithLocalOrderNumber(0)->f) {
this->bubbles2->unregister();
check_multiplier_errors(__FILE__, __LINE__);
}
if (this->complex_multiplication && this->taylor_series_bubbles1 && this->taylor_series_bubbles2) {
this->taylor_series_bubbles1->unregister();
check_multiplier_errors(__FILE__, __LINE__);
// if the multiplied taylor series bubbles are different, then unregister the second
// taylor series bubbles also
if (this->taylor_series_bubbles1->getBubbleWithLocalOrderNumber(0)->f != this->taylor_series_bubbles2->getBubbleWithLocalOrderNumber(0)->f) {
this->taylor_series_bubbles2->unregister();
check_multiplier_errors(__FILE__, __LINE__);
}
}
}
}
void BubblesMultiplier::setK(int bubble1_k, int bubble2_k, int result_bubble_k, int taylor_series_bubble1_k, int taylor_series_bubble2_k) {
for (int i = 0; i < this->bubbles1->getBubbleCount(); i ++) {
this->bubbles1->getBubbleWithLocalOrderNumber(i)->k = bubble1_k;
this->bubbles2->getBubbleWithLocalOrderNumber(i)->k = bubble2_k;
this->result_bubbles->getBubbleWithLocalOrderNumber(i)->k = result_bubble_k;
if (this->taylor_series_bubbles1) this->taylor_series_bubbles1->getBubbleWithLocalOrderNumber(i)->k = taylor_series_bubble1_k;
if (this->taylor_series_bubbles2) this->taylor_series_bubbles2->getBubbleWithLocalOrderNumber(i)->k = taylor_series_bubble2_k;
}
check_multiplier_errors(__FILE__, __LINE__);
}
Bubbles *BubblesMultiplier::getBubbles1() {
return this->bubbles1;
}
Bubbles *BubblesMultiplier::getBubbles2() {
return this->bubbles2;
}
Bubbles *BubblesMultiplier::getResultBubbles() {
return this->result_bubbles;
}
/*
* Destroy all cuda related objects owned by this, i.e.,
* only the arrays
*/
void BubblesMultiplier::destroy() {
for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) {
hipSetDevice(device);
hipFree(this->device_number_of_terms[device]);
hipFree(this->device_positions[device]);
hipFree(this->device_result_lm[device]);
hipFree(this->device_coefficients[device]);
}
hipHostFree(this->device_number_of_terms);
hipHostFree(this->device_result_lm);
hipHostFree(this->device_positions);
hipHostFree(this->device_coefficients);
}
/***********************************************************
* The Fortran Interfaces *
***********************************************************/
extern "C" void bubblesmultiplier_destroy_cuda(BubblesMultiplier *multiplier) {
multiplier->destroy();
check_multiplier_errors(__FILE__, __LINE__);
}
extern "C" BubblesMultiplier *bubblesmultiplier_init_cuda(Bubbles *bubbles1, Bubbles *bubbles2, Bubbles *result_bubbles,
Bubbles *taylor_series_bubbles1, Bubbles *taylor_series_bubbles2, int lmax,
double *coefficients, int *number_of_terms, int *result_lm, int *positions,
int result_lm_size, int processor_order_number,
int number_of_processors, StreamContainer *streamContainer) {
BubblesMultiplier *new_multiplier = new BubblesMultiplier(bubbles1, bubbles2, result_bubbles,
taylor_series_bubbles1, taylor_series_bubbles2, lmax, coefficients,
number_of_terms, result_lm, positions, result_lm_size,
processor_order_number, number_of_processors, streamContainer);
check_multiplier_errors(__FILE__, __LINE__);
return new_multiplier;
}
extern "C" void bubblesmultiplier_download_result_cuda(BubblesMultiplier *multiplier, int lmax, int *ibubs, int nbub) {
multiplier->downloadResult(lmax, ibubs, nbub);
check_multiplier_errors(__FILE__, __LINE__);
}
extern "C" void bubblesmultiplier_multiply_bubble_cuda(BubblesMultiplier *multiplier, int ibub, double *bubble1_bf,
double *bubble2_bf, double *result_bubble_bf,
double *taylor_series_bubble1_bf, double *taylor_series_bubble2_bf, int lmax1, int lmax2, int tlmax1, int tlmax2) {
check_multiplier_errors(__FILE__, __LINE__);
multiplier->multiplyBubble(ibub, bubble1_bf, bubble2_bf, result_bubble_bf, taylor_series_bubble1_bf, taylor_series_bubble2_bf, lmax1, lmax2, tlmax1, tlmax2);
check_multiplier_errors(__FILE__, __LINE__);
}
extern "C" void bubblesmultiplier_set_ks(BubblesMultiplier *multiplier, int bubble1_k,
int bubble2_k, int result_bubble_k, int taylor_series_bubble1_k, int taylor_series_bubble2_k) {
multiplier->setK(bubble1_k, bubble2_k, result_bubble_k, taylor_series_bubble1_k, taylor_series_bubble2_k);
check_multiplier_errors(__FILE__, __LINE__);
}
| 687c2952ac2f85236c8455cca2fded1aaa07e8b5.cu | /*----------------------------------------------------------------------------------*
* Copyright (c) 2010-2018 Pauli Parkkinen, Eelis Solala, Wen-Hua Xu, *
* Sergio Losilla, Elias Toivanen, Jonas Juselius *
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy *
* of this software and associated documentation files (the "Software"), to deal *
* in the Software without restriction, including without limitation the rights *
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell *
* copies of the Software, and to permit persons to whom the Software is *
* furnished to do so, subject to the following conditions: *
* *
* The above copyright notice and this permission notice shall be included in all*
* copies or substantial portions of the Software. *
* *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR *
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE *
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER *
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, *
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE *
* SOFTWARE. *
*----------------------------------------------------------------------------------*/
#include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
//#include <algorithm> *std::max_element(result_cube, result_cube + totalPointCount)
#include "bubbles_cuda.h"
#include "streamcontainer.h"
#include "grid.h"
#include "bubbles_multiplier.h"
#include "function3d_multiplier.h"
#include "memory_leak_operators.h"
__host__ inline void check_multiplier_errors(const char *filename, const int line_number) {
#ifdef CUDA_DEBUG
cudaDeviceSynchronize();
#endif
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("CUDA error at %s:%i: %s\n", filename, line_number, cudaGetErrorString(error));
exit(-1);
}
}
template<typename T>
__device__ __forceinline__ T ldg(const T* ptr) {
#if __CUDA_ARCH__ >= 350
return __ldg(ptr);
#else
return *ptr;
#endif
}
// the multiplying kernel functions
__device__ inline int product_identifier(const int idx1, const int idx2, const int lmax) {
int is=min(idx1,idx2)+1;
int il=max(idx1,idx2)+1;
int nmax = (lmax+1)*(lmax+1);
int result = (is-1)*(2*nmax-is)/2+il -1;
return result;
}
__global__ void Bubble_product_kernel(Bubble *bubble, Bubble *bubble1, Bubble *result_bubble,
const double* __restrict__ coefficients, const int* __restrict__ number_of_terms,
const int* __restrict__ result_lm, const int* __restrict__ positions,
const int offset, const int max_id, const size_t device_f_pitch, double factor) {
const int id = threadIdx.x + blockIdx.x * blockDim.x;
const int index= id + offset;
int result_index, prod_idx, i, lm_counter = 0, lm_counter1 = 0, term_id;
const int lmax0 = bubble->lmax, lmax1 = bubble1->lmax, lmax2 = bubble->lmax + bubble1->lmax;
const int nmax0 = (lmax0 +1) * (lmax0+1), nmax1 = (lmax1 +1) * (lmax1+1);
double value, value1, value12, value2;
if (id < max_id) {
// printf("begin b1[0], b2[0], br[0]: %f, %f, %f\n", bubble->f[0], bubble1->f[0], result_bubble->f[0]);
// go through all l, m values of input bubble 'bubble'
for (lm_counter = 0; lm_counter < nmax0; lm_counter++) {
// get the value for the point 'index' for 'bubble' with current l, m values
value = bubble->f[lm_counter * device_f_pitch / sizeof(double) + index];
value12 = (lm_counter < nmax1) ? bubble1->f[lm_counter * device_f_pitch / sizeof(double) + index] : 0.0;
for (lm_counter1 = lm_counter; lm_counter1 < nmax1 ; lm_counter1++) {
prod_idx = product_identifier(lm_counter, lm_counter1, lmax2);
term_id = ldg<int>(&positions[prod_idx])-1;
// get the value for the point 'index' for 'bubble' with current l1, m1 values
value1 = value * bubble1->f[lm_counter1 * device_f_pitch / sizeof(double)+index];
value2 = (lm_counter == lm_counter1 || lm_counter1 >= nmax0)
? 0.0 : value12 * bubble->f[lm_counter1 * device_f_pitch / sizeof(double)+index];
value1 += value2;
for (i = 0; i < ldg<int>(&number_of_terms[prod_idx]); i++) {
result_index = (ldg<int>(&result_lm[term_id]) - 1) * device_f_pitch / sizeof(double) + index;
result_bubble->f[result_index] += factor * ldg<double>(&coefficients[term_id]) * value1;
term_id ++;
}
}
}
// printf("end b1[0], b2[0], br[0]: %f, %f, %f\n", bubble->f[0], bubble1->f[0], result_bubble->f[0]);
}
}
// BubblesMultiplier-class functions
BubblesMultiplier::BubblesMultiplier(Bubbles *bubbles1, Bubbles *bubbles2, Bubbles *result_bubbles,
Bubbles *taylor_series_bubbles1, Bubbles *taylor_series_bubbles2,
int lmax,
double *coefficients, int *number_of_terms, int *result_lm, int *positions,
int result_lm_size, int processor_order_number,
int number_of_processors, StreamContainer *streamContainer) {
this->bubbles1 = bubbles1;
this->bubbles2 = bubbles2;
this->result_bubbles = result_bubbles;
this->taylor_series_bubbles1 = taylor_series_bubbles1;
this->taylor_series_bubbles2 = taylor_series_bubbles2;
this->processor_order_number = processor_order_number;
this->number_of_processors = number_of_processors;
this->streamContainer = streamContainer;
this->result_bubbles->setProcessorConfiguration(this->processor_order_number, this->number_of_processors);
this->bubbles1->setProcessorConfiguration(this->processor_order_number, this->number_of_processors);
this->bubbles2->setProcessorConfiguration(this->processor_order_number, this->number_of_processors);
if (this->taylor_series_bubbles1) {
this->taylor_series_bubbles1->setProcessorConfiguration(this->processor_order_number, this->number_of_processors);
}
if (this->taylor_series_bubbles2) {
this->taylor_series_bubbles2->setProcessorConfiguration(this->processor_order_number, this->number_of_processors);
}
// allocate the arrays to contain device-wise pointers
cudaHostAlloc((void **)&this->device_number_of_terms, sizeof(int * ) * this->streamContainer->getNumberOfDevices(), cudaHostAllocPortable);
check_multiplier_errors(__FILE__, __LINE__);
cudaHostAlloc((void **)&this->device_positions, sizeof(int * ) * this->streamContainer->getNumberOfDevices(), cudaHostAllocPortable);
check_multiplier_errors(__FILE__, __LINE__);
cudaHostAlloc((void **)&this->device_result_lm, sizeof(int * ) * this->streamContainer->getNumberOfDevices(), cudaHostAllocPortable);
check_multiplier_errors(__FILE__, __LINE__);
cudaHostAlloc((void **)&this->device_coefficients, sizeof(double * ) * this->streamContainer->getNumberOfDevices(), cudaHostAllocPortable);
check_multiplier_errors(__FILE__, __LINE__);
// allocate & copy the array containing the number of result terms per l,m -pair: 'number_of_terms'
int nmax = (lmax + 1) * (lmax + 1);
for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) {
cudaSetDevice(device);
size_t size = nmax*(nmax+1)/2 * sizeof(int);
cudaMalloc(&this->device_number_of_terms[device], size);
check_multiplier_errors(__FILE__, __LINE__);
cudaMemcpy(this->device_number_of_terms[device], number_of_terms, size, cudaMemcpyHostToDevice);
check_multiplier_errors(__FILE__, __LINE__);
cudaMalloc(&this->device_positions[device], size);
check_multiplier_errors(__FILE__, __LINE__);
cudaMemcpy(this->device_positions[device], positions, size, cudaMemcpyHostToDevice);
check_multiplier_errors(__FILE__, __LINE__);
// allocate & copy the array containing the result 'ilm'-addresses (l, m pair) of the result bubbles
size = result_lm_size * sizeof(int);
cudaMalloc(&this->device_result_lm[device], size);
check_multiplier_errors(__FILE__, __LINE__);
cudaMemcpy(this->device_result_lm[device], result_lm, size, cudaMemcpyHostToDevice);
check_multiplier_errors(__FILE__, __LINE__);
// allocate & copy the array containing the result 'coefficients' (l, m pair) of the result bubbles
size = result_lm_size * sizeof(double);
cudaMalloc(&this->device_coefficients[device], size);
check_multiplier_errors(__FILE__, __LINE__);
cudaMemcpy(this->device_coefficients[device], coefficients, size, cudaMemcpyHostToDevice);
check_multiplier_errors(__FILE__, __LINE__);
}
}
void BubblesMultiplier::multiplyBubble(int ibub, Bubbles* bubbles1, Bubbles* bubbles2, Bubbles* result_bubbles, double factor, int first_cell, int last_cell) {
// calculate the total number of points in the bubbles each l,m -pair,
int total_point_count;
if (first_cell >= 0 && last_cell >= 0 ) {
int ncell = last_cell - first_cell;
total_point_count = ncell * (bubbles1->getBubble(ibub)->grid->nlip - 1) +1;
}
else {
total_point_count = bubbles1->getBubble(ibub)->grid->ncell * (bubbles1->getBubble(ibub)->grid->nlip - 1) +1;
}
check_multiplier_errors(__FILE__, __LINE__);
// determine how many of the points belong to the current mpi-node
int remainder = total_point_count % this->number_of_processors;
int processor_point_count = total_point_count / this->number_of_processors
+ ( remainder > this->processor_order_number);
// get the offset to the f-array caused by other processors
int offset = processor_order_number * total_point_count / this->number_of_processors +
((remainder < processor_order_number) ? remainder : processor_order_number);
int block_size = 256;
for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) {
cudaSetDevice(device);
// detemine how many of the mpi-node's points belong to this device (gpu)
int device_point_count = processor_point_count / this->streamContainer->getNumberOfDevices() +
((processor_point_count % this->streamContainer->getNumberOfDevices()) > device);
for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream ++) {
// detemine the number of the points handled by this stream
int stream_point_count = device_point_count / this->streamContainer->getStreamsPerDevice() +
((device_point_count % this->streamContainer->getStreamsPerDevice()) > stream);
int grid_size = (stream_point_count + block_size - 1) / block_size;
// call the kernel
// printf("before b1[0], b2[0], br[0]: %f, %f, %f\n", bubbles1->getBubble(ibub)->f[0], bubbles2->getBubble(ibub)->f[0], result_bubbles->getBubble(ibub)->f[0]);
Bubble_product_kernel <<<grid_size, block_size, 0, *this->streamContainer->getStream(device, stream)>>>
(bubbles1->getBubble(ibub)->device_copies[device], bubbles2->getBubble(ibub)->device_copies[device],
result_bubbles->getBubble(ibub)->device_copies[device], this->device_coefficients[device],
this->device_number_of_terms[device], this->device_result_lm[device], this->device_positions[device], offset, stream_point_count,
bubbles1->getBubble(ibub)->device_f_pitch[device], factor);
check_multiplier_errors(__FILE__, __LINE__);
// printf("after b1[0], b2[0], br[0]: %f, %f, %f\n", bubbles1->getBubble(ibub)->f[0], bubbles2->getBubble(ibub)->f[0], result_bubbles->getBubble(ibub)->f[0]);
// increase the offset for the next calls.
offset += stream_point_count;
}
}
}
void BubblesMultiplier::multiplyBubble(int ibub, double *bubble1_bf, double *bubble2_bf, double *result_bubble_bf,
double *taylor_series_bubble1_bf, double *taylor_series_bubble2_bf,
int lmax1, int lmax2, int tlmax1, int tlmax2
) {
this->complex_multiplication = true;
Bubble *result_bubble = this->result_bubbles->getBubble(ibub);
Bubble *bubble1 = this->bubbles1->getBubble(ibub);
Bubble *bubble2 = this->bubbles2->getBubble(ibub);
// register the host target of result array and set the values of the device array to zero
if (result_bubble && result_bubble_bf) {
result_bubble->registerHost(result_bubble_bf);
result_bubble->setToZero();
bubble1->setToZero();
bubble2->setToZero();
}
else {
printf("ERROR: the result_bubble or result_bubble_bf should not be NULL");
}
if (bubble1_bf) {
// upload the diagonal and off-diagonal parts of bubble 1 and add them together
bubble1->upload(bubble1_bf, lmax1);
}
else {
bubble1->setToZero();
}
check_multiplier_errors(__FILE__, __LINE__);
if (taylor_series_bubble1_bf) {
Bubble *taylor_bubble1 = this->taylor_series_bubbles1->getBubble(ibub);
taylor_bubble1->setToZero();
taylor_bubble1->upload(taylor_series_bubble1_bf, tlmax1);
bubble1->add(taylor_bubble1);
check_multiplier_errors(__FILE__, __LINE__);
}
if (bubble2_bf) {
// upload the diagonal and off-diagonal parts of bubble 2 and add them together
bubble2->upload(bubble2_bf, lmax2, bubble1_bf != bubble2_bf);
}
else {
bubble2->setToZero();
}
check_multiplier_errors(__FILE__, __LINE__);
if (taylor_series_bubble2_bf) {
Bubble *taylor_bubble2= this->taylor_series_bubbles2->getBubble(ibub);
taylor_bubble2->setToZero();
taylor_bubble2->upload(taylor_series_bubble2_bf, tlmax2, taylor_series_bubble1_bf != taylor_series_bubble2_bf);
bubble2->add(taylor_bubble2);
check_multiplier_errors(__FILE__, __LINE__);
}
// multiply the bubble1 with bubble2
this->multiplyBubble(ibub, this->bubbles1, this->bubbles2, this->result_bubbles, 1.0);
// deduct the taylor bubble1 * taylor_bubble2 from the result, if both are present
if (taylor_series_bubble1_bf && taylor_series_bubble2_bf) {
this->multiplyBubble(ibub, this->taylor_series_bubbles1, this->taylor_series_bubbles2, this->result_bubbles, -1.0);
}
check_multiplier_errors(__FILE__, __LINE__);
}
void BubblesMultiplier::downloadResult(int lmax, int *ibubs, int nbub) {
if (nbub > 0) {
for (int i = 0; i < nbub; i++) {
this->result_bubbles->getBubble(ibubs[i])->download(lmax);
}
check_multiplier_errors(__FILE__, __LINE__);
// as we are done with all uploading and downloading,
// unregister the host arrays of bubbles
this->result_bubbles->unregister();
check_multiplier_errors(__FILE__, __LINE__);
this->bubbles1->unregister();
check_multiplier_errors(__FILE__, __LINE__);
// if the multiplied bubbles are different, then unregister the second
// bubbles also
if (this->bubbles2->getBubbleWithLocalOrderNumber(0)->f != this->bubbles1->getBubbleWithLocalOrderNumber(0)->f) {
this->bubbles2->unregister();
check_multiplier_errors(__FILE__, __LINE__);
}
if (this->complex_multiplication && this->taylor_series_bubbles1 && this->taylor_series_bubbles2) {
this->taylor_series_bubbles1->unregister();
check_multiplier_errors(__FILE__, __LINE__);
// if the multiplied taylor series bubbles are different, then unregister the second
// taylor series bubbles also
if (this->taylor_series_bubbles1->getBubbleWithLocalOrderNumber(0)->f != this->taylor_series_bubbles2->getBubbleWithLocalOrderNumber(0)->f) {
this->taylor_series_bubbles2->unregister();
check_multiplier_errors(__FILE__, __LINE__);
}
}
}
}
void BubblesMultiplier::setK(int bubble1_k, int bubble2_k, int result_bubble_k, int taylor_series_bubble1_k, int taylor_series_bubble2_k) {
for (int i = 0; i < this->bubbles1->getBubbleCount(); i ++) {
this->bubbles1->getBubbleWithLocalOrderNumber(i)->k = bubble1_k;
this->bubbles2->getBubbleWithLocalOrderNumber(i)->k = bubble2_k;
this->result_bubbles->getBubbleWithLocalOrderNumber(i)->k = result_bubble_k;
if (this->taylor_series_bubbles1) this->taylor_series_bubbles1->getBubbleWithLocalOrderNumber(i)->k = taylor_series_bubble1_k;
if (this->taylor_series_bubbles2) this->taylor_series_bubbles2->getBubbleWithLocalOrderNumber(i)->k = taylor_series_bubble2_k;
}
check_multiplier_errors(__FILE__, __LINE__);
}
Bubbles *BubblesMultiplier::getBubbles1() {
return this->bubbles1;
}
Bubbles *BubblesMultiplier::getBubbles2() {
return this->bubbles2;
}
Bubbles *BubblesMultiplier::getResultBubbles() {
return this->result_bubbles;
}
/*
* Destroy all cuda related objects owned by this, i.e.,
* only the arrays
*/
void BubblesMultiplier::destroy() {
for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) {
cudaSetDevice(device);
cudaFree(this->device_number_of_terms[device]);
cudaFree(this->device_positions[device]);
cudaFree(this->device_result_lm[device]);
cudaFree(this->device_coefficients[device]);
}
cudaFreeHost(this->device_number_of_terms);
cudaFreeHost(this->device_result_lm);
cudaFreeHost(this->device_positions);
cudaFreeHost(this->device_coefficients);
}
/***********************************************************
* The Fortran Interfaces *
***********************************************************/
extern "C" void bubblesmultiplier_destroy_cuda(BubblesMultiplier *multiplier) {
multiplier->destroy();
check_multiplier_errors(__FILE__, __LINE__);
}
extern "C" BubblesMultiplier *bubblesmultiplier_init_cuda(Bubbles *bubbles1, Bubbles *bubbles2, Bubbles *result_bubbles,
Bubbles *taylor_series_bubbles1, Bubbles *taylor_series_bubbles2, int lmax,
double *coefficients, int *number_of_terms, int *result_lm, int *positions,
int result_lm_size, int processor_order_number,
int number_of_processors, StreamContainer *streamContainer) {
BubblesMultiplier *new_multiplier = new BubblesMultiplier(bubbles1, bubbles2, result_bubbles,
taylor_series_bubbles1, taylor_series_bubbles2, lmax, coefficients,
number_of_terms, result_lm, positions, result_lm_size,
processor_order_number, number_of_processors, streamContainer);
check_multiplier_errors(__FILE__, __LINE__);
return new_multiplier;
}
extern "C" void bubblesmultiplier_download_result_cuda(BubblesMultiplier *multiplier, int lmax, int *ibubs, int nbub) {
multiplier->downloadResult(lmax, ibubs, nbub);
check_multiplier_errors(__FILE__, __LINE__);
}
extern "C" void bubblesmultiplier_multiply_bubble_cuda(BubblesMultiplier *multiplier, int ibub, double *bubble1_bf,
double *bubble2_bf, double *result_bubble_bf,
double *taylor_series_bubble1_bf, double *taylor_series_bubble2_bf, int lmax1, int lmax2, int tlmax1, int tlmax2) {
check_multiplier_errors(__FILE__, __LINE__);
multiplier->multiplyBubble(ibub, bubble1_bf, bubble2_bf, result_bubble_bf, taylor_series_bubble1_bf, taylor_series_bubble2_bf, lmax1, lmax2, tlmax1, tlmax2);
check_multiplier_errors(__FILE__, __LINE__);
}
extern "C" void bubblesmultiplier_set_ks(BubblesMultiplier *multiplier, int bubble1_k,
int bubble2_k, int result_bubble_k, int taylor_series_bubble1_k, int taylor_series_bubble2_k) {
multiplier->setK(bubble1_k, bubble2_k, result_bubble_k, taylor_series_bubble1_k, taylor_series_bubble2_k);
check_multiplier_errors(__FILE__, __LINE__);
}
|
0eefd5a954f24a5930d5daa5d7e3f0c2db2e01f9.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "GPU_mt_info.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
GPU_mt_info), dim3(gridBlock),dim3(threadBlock), 0, 0, );
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
GPU_mt_info), dim3(gridBlock),dim3(threadBlock), 0, 0, );
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
GPU_mt_info), dim3(gridBlock),dim3(threadBlock), 0, 0, );
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 0eefd5a954f24a5930d5daa5d7e3f0c2db2e01f9.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "GPU_mt_info.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
GPU_mt_info<<<gridBlock,threadBlock>>>();
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
GPU_mt_info<<<gridBlock,threadBlock>>>();
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
GPU_mt_info<<<gridBlock,threadBlock>>>();
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
cd5a992e4b0dd586eafdbb1763b050e97034e04a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define THREADS_PER_BLOCK 512
// Global function means it will be executed on the device (GPU)
__global__ void add(int *in1, int *in2, int *out)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
out[index] = in1[index] + in2[index];
}
void random_ints(int *i, int size)
{
for(int k=0; k<size; k++)
{
i[k]=rand()%50;
}
}
int *testmain(int num)
{
int *in1, *in2, *out; // host copies of inputs and output
int *d_in1, *d_in2, *d_out; // device copies of inputs and output
int size = num * sizeof(int);
// Alloc space for device copies of three vectors
hipMalloc((void **)&d_in1, size);
hipMalloc((void **)&d_in2, size);
hipMalloc((void **)&d_out, size);
// Alloc space for host copies of the three vectors and setup input values
in1 = (int *)malloc(size); random_ints(in1, num);
in2 = (int *)malloc(size); random_ints(in2, num);
out = (int *)malloc(size);
// Copy inputs to device
hipMemcpy(d_in1, in1, size, hipMemcpyHostToDevice);
hipMemcpy(d_in2, in2, size, hipMemcpyHostToDevice);
// Launch add() kernel on GPU
hipLaunchKernelGGL(( add), dim3(num/THREADS_PER_BLOCK),dim3(THREADS_PER_BLOCK), 0, 0, d_in1, d_in2, d_out);
// Wait for the GPU to finish
hipDeviceSynchronize();
// Copy result back to host
hipMemcpy(out, d_out, size, hipMemcpyDeviceToHost);
// Cleanup
free(in1); free(in2); free(out);
hipFree(d_in1); hipFree(d_in2); hipFree(d_out);
return out;
}
| cd5a992e4b0dd586eafdbb1763b050e97034e04a.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define THREADS_PER_BLOCK 512
// Global function means it will be executed on the device (GPU)
__global__ void add(int *in1, int *in2, int *out)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
out[index] = in1[index] + in2[index];
}
void random_ints(int *i, int size)
{
for(int k=0; k<size; k++)
{
i[k]=rand()%50;
}
}
int *testmain(int num)
{
int *in1, *in2, *out; // host copies of inputs and output
int *d_in1, *d_in2, *d_out; // device copies of inputs and output
int size = num * sizeof(int);
// Alloc space for device copies of three vectors
cudaMalloc((void **)&d_in1, size);
cudaMalloc((void **)&d_in2, size);
cudaMalloc((void **)&d_out, size);
// Alloc space for host copies of the three vectors and setup input values
in1 = (int *)malloc(size); random_ints(in1, num);
in2 = (int *)malloc(size); random_ints(in2, num);
out = (int *)malloc(size);
// Copy inputs to device
cudaMemcpy(d_in1, in1, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_in2, in2, size, cudaMemcpyHostToDevice);
// Launch add() kernel on GPU
add<<<num/THREADS_PER_BLOCK,THREADS_PER_BLOCK>>>(d_in1, d_in2, d_out);
// Wait for the GPU to finish
cudaDeviceSynchronize();
// Copy result back to host
cudaMemcpy(out, d_out, size, cudaMemcpyDeviceToHost);
// Cleanup
free(in1); free(in2); free(out);
cudaFree(d_in1); cudaFree(d_in2); cudaFree(d_out);
return out;
}
|
b738df05f49a7d7b257f3f96c189ad2f964c1f11.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
using namespace std;
const int tread_size=1024;
#define CSC(call) do { \
hipError_t e = call; \
if (e != hipSuccess) { \
fprintf(stderr, "CUDA Error in %s:%d: %s\n"\
, __FILE__, __LINE__, hipGetErrorString(e)); \
exit(0); \
} \
} while(0)
#define f(n) (n)+((n)>>5)
#define f1(n,step) (((n)/step-1)*step+step-1)
__global__ void kernel1(int* height, long long sz,long long step) {
long long idx = threadIdx.x + blockIdx.x * blockDim.x;
long long offsetx = gridDim.x * blockDim.x;
long long j,i,k;
__shared__ int data[f(tread_size)];
for(i=idx*step+step-1;i<sz;i+=offsetx*step){
__syncthreads();
data[f(threadIdx.x)]=height[i];
for(k=1;k<tread_size;k*=2){
__syncthreads();
for( j=threadIdx.x*k*2+k-1;j+k<tread_size;j+=blockDim.x*k*2){
data[f(j+k)]+=data[f(j)];
}
__syncthreads();
}
data[f(tread_size-1)]=0;
__syncthreads();
for( k=tread_size;k>1;k/=2){
__syncthreads();
for(j=k-1+threadIdx.x*k;j<tread_size;j+=blockDim.x*k){
int tmp=data[f(j-k/2)];
data[f(j-k/2)]=data[f(j)];
data[f(j)]=tmp+data[f(j)];
}
__syncthreads();
}
__syncthreads();
height[i]+=data[f(threadIdx.x)];
}
/*for (int j = i - 1; j + i < sz; j += i * 2) {
height[j + i] += height[j];
}*/8
}
__global__ void kernel2(int* height, long long sz,long long step) {
long long idx = threadIdx.x + blockIdx.x * blockDim.x;
long long offsetx = gridDim.x * blockDim.x;
long long i;
for(i=idx*step/tread_size+step+step/tread_size-1;i<sz;i+=offsetx*step/tread_size){
if(i%step!=step-1){
height[i]+=height[f1(i,step)];
}
}
}
__global__ void DTH(int* data, int n,int* height) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int offsetx = gridDim.x * blockDim.x;
int i;
for(i = idx; i < n; i += offsetx){
//__threadfence_system();
atomicAdd(height+data[i],1);
//printf("%d %d\n",height[data[i]],data[i]);
}
}
__global__ void HTD(int* data,int* in, int n,int* height) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int offsetx = gridDim.x * blockDim.x;
int i;
for(i = idx; i < n; i += offsetx){
//__threadfence_system();
//__threadfence();
//printf("%d %d\n",height[in[i]],in[i]);
data[atomicSub(height + in[i],1)-1]=in[i];
//__threadfence();
}
}
int main() {
long long sz=1<<24 ;
int n;
//cin >> n;
fread(&n, sizeof(int), 1, stdin);
//int* in=(int*) malloc( sizeof(int) * n);
int* data=(int*) malloc( sizeof(int) * n);
//int* height=(int* ) malloc( sizeof(int) * n);
fread(data, sizeof(int), n, stdin);
/*for(int i=0;i<n;i++){
cin >> data[i];
}*/
int *gpu_in,*gpu_data,*gpu_height;
fprintf(stderr, "n=%d\n",n);
for(int i=0;i<min(n,100);i++){
fprintf(stderr, "%d ",data[i]);
}
fprintf(stderr, "\n");
CSC( hipMalloc( &gpu_in, n*sizeof(int) ) );
CSC( hipMalloc( &gpu_data, n*sizeof(int) ) );
CSC( hipMemcpy( gpu_in, data, n*sizeof(int), hipMemcpyHostToDevice ) );
CSC( hipMalloc( &gpu_height, sz*sizeof(int)));
CSC( hipMemset(gpu_height,0,sz*sizeof(int)));
dim3 threads = tread_size;
dim3 blocks = tread_size;
/*for(i = 0; i < n; i++){
height[data[i]]++;
}*/
hipLaunchKernelGGL(( DTH), dim3(blocks),dim3(threads), 0, 0, gpu_in,n,gpu_height);
long long i=1;
for(;i<sz;i*=tread_size)
hipLaunchKernelGGL(( kernel1), dim3(blocks),dim3(threads), 0, 0, gpu_height,sz,i);
/*for (int j = i - 1; j + i < sz; j += i * 2) {
height[j + i] += height[j];
}*/
//__threadfence_system();
for(;i>1;i/=tread_size)
hipLaunchKernelGGL(( kernel2), dim3(blocks),dim3(threads), 0, 0, gpu_height,sz,i);
/*for(j=i-1;j+i/2<sz-1;j+=i){
height[j+i/2]+=height[j];
}*/
//__threadfence_system();
/*for(i = idx; i < n; i += offsetx){
in[--height[in[i]]]=data[i];
}*/
hipLaunchKernelGGL(( HTD), dim3(blocks),dim3(threads), 0, 0, gpu_data,gpu_in,n,gpu_height);
CSC( hipMemcpy( data,gpu_data, n*sizeof(int), hipMemcpyDeviceToHost ) );
/*for(int i=0;i<n;i++){
cout << data[i]<<" ";
}
cout << endl;*/
fwrite(data, sizeof(int), n, stdout);
/*for(int i=0;i<n;i++){
fprintf(stderr, "%d ",data[i]);
}
fprintf(stderr, "\n");*/
CSC(hipFree ( gpu_height ));
CSC(hipFree ( gpu_in ));
CSC(hipFree ( gpu_data ));
free(data);
return 0;
| b738df05f49a7d7b257f3f96c189ad2f964c1f11.cu | #include <stdio.h>
#include <stdlib.h>
using namespace std;
const int tread_size=1024;
#define CSC(call) do { \
cudaError_t e = call; \
if (e != cudaSuccess) { \
fprintf(stderr, "CUDA Error in %s:%d: %s\n"\
, __FILE__, __LINE__, cudaGetErrorString(e)); \
exit(0); \
} \
} while(0)
#define f(n) (n)+((n)>>5)
#define f1(n,step) (((n)/step-1)*step+step-1)
__global__ void kernel1(int* height, long long sz,long long step) {
long long idx = threadIdx.x + blockIdx.x * blockDim.x;
long long offsetx = gridDim.x * blockDim.x;
long long j,i,k;
__shared__ int data[f(tread_size)];
for(i=idx*step+step-1;i<sz;i+=offsetx*step){
__syncthreads();
data[f(threadIdx.x)]=height[i];
for(k=1;k<tread_size;k*=2){
__syncthreads();
for( j=threadIdx.x*k*2+k-1;j+k<tread_size;j+=blockDim.x*k*2){
data[f(j+k)]+=data[f(j)];
}
__syncthreads();
}
data[f(tread_size-1)]=0;
__syncthreads();
for( k=tread_size;k>1;k/=2){
__syncthreads();
for(j=k-1+threadIdx.x*k;j<tread_size;j+=blockDim.x*k){
int tmp=data[f(j-k/2)];
data[f(j-k/2)]=data[f(j)];
data[f(j)]=tmp+data[f(j)];
}
__syncthreads();
}
__syncthreads();
height[i]+=data[f(threadIdx.x)];
}
/*for (int j = i - 1; j + i < sz; j += i * 2) {
height[j + i] += height[j];
}*/8
}
__global__ void kernel2(int* height, long long sz,long long step) {
long long idx = threadIdx.x + blockIdx.x * blockDim.x;
long long offsetx = gridDim.x * blockDim.x;
long long i;
for(i=idx*step/tread_size+step+step/tread_size-1;i<sz;i+=offsetx*step/tread_size){
if(i%step!=step-1){
height[i]+=height[f1(i,step)];
}
}
}
__global__ void DTH(int* data, int n,int* height) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int offsetx = gridDim.x * blockDim.x;
int i;
for(i = idx; i < n; i += offsetx){
//__threadfence_system();
atomicAdd(height+data[i],1);
//printf("%d %d\n",height[data[i]],data[i]);
}
}
__global__ void HTD(int* data,int* in, int n,int* height) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int offsetx = gridDim.x * blockDim.x;
int i;
for(i = idx; i < n; i += offsetx){
//__threadfence_system();
//__threadfence();
//printf("%d %d\n",height[in[i]],in[i]);
data[atomicSub(height + in[i],1)-1]=in[i];
//__threadfence();
}
}
int main() {
long long sz=1<<24 ;
int n;
//cin >> n;
fread(&n, sizeof(int), 1, stdin);
//int* in=(int*) malloc( sizeof(int) * n);
int* data=(int*) malloc( sizeof(int) * n);
//int* height=(int* ) malloc( sizeof(int) * n);
fread(data, sizeof(int), n, stdin);
/*for(int i=0;i<n;i++){
cin >> data[i];
}*/
int *gpu_in,*gpu_data,*gpu_height;
fprintf(stderr, "n=%d\n",n);
for(int i=0;i<min(n,100);i++){
fprintf(stderr, "%d ",data[i]);
}
fprintf(stderr, "\n");
CSC( cudaMalloc( &gpu_in, n*sizeof(int) ) );
CSC( cudaMalloc( &gpu_data, n*sizeof(int) ) );
CSC( cudaMemcpy( gpu_in, data, n*sizeof(int), cudaMemcpyHostToDevice ) );
CSC( cudaMalloc( &gpu_height, sz*sizeof(int)));
CSC( cudaMemset(gpu_height,0,sz*sizeof(int)));
dim3 threads = tread_size;
dim3 blocks = tread_size;
/*for(i = 0; i < n; i++){
height[data[i]]++;
}*/
DTH<<<blocks,threads>>>(gpu_in,n,gpu_height);
long long i=1;
for(;i<sz;i*=tread_size)
kernel1<<<blocks,threads>>>(gpu_height,sz,i);
/*for (int j = i - 1; j + i < sz; j += i * 2) {
height[j + i] += height[j];
}*/
//__threadfence_system();
for(;i>1;i/=tread_size)
kernel2<<<blocks,threads>>>(gpu_height,sz,i);
/*for(j=i-1;j+i/2<sz-1;j+=i){
height[j+i/2]+=height[j];
}*/
//__threadfence_system();
/*for(i = idx; i < n; i += offsetx){
in[--height[in[i]]]=data[i];
}*/
HTD<<<blocks,threads>>>(gpu_data,gpu_in,n,gpu_height);
CSC( cudaMemcpy( data,gpu_data, n*sizeof(int), cudaMemcpyDeviceToHost ) );
/*for(int i=0;i<n;i++){
cout << data[i]<<" ";
}
cout << endl;*/
fwrite(data, sizeof(int), n, stdout);
/*for(int i=0;i<n;i++){
fprintf(stderr, "%d ",data[i]);
}
fprintf(stderr, "\n");*/
CSC(cudaFree ( gpu_height ));
CSC(cudaFree ( gpu_in ));
CSC(cudaFree ( gpu_data ));
free(data);
return 0;
|
0e1f45815320b1ca02e6cd1c6aece33b9191c5e5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include "timer.h"
#include "cuda_utils.h"
typedef float dtype;
#define N_ (8 * 1024 * 1024)
#define MAX_THREADS 256
#define MAX_BLOCKS 64
#define MIN(x,y) ((x < y) ? x : y)
/* return the next power of 2 number that is larger than x */
unsigned int nextPow2( unsigned int x ) {
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
/* find out # of threads and # thread blocks for a particular kernel */
void getNumBlocksAndThreads(int whichKernel, int n, int maxBlocks, int maxThreads, int &blocks, int &threads)
{
if (whichKernel < 3)
{
/* 1 thread per element */
threads = (n < maxThreads) ? nextPow2(n) : maxThreads;
blocks = (n + threads - 1) / threads;
}
else
{
/* 1 thread per 2 elements */
threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads;
blocks = (n + (threads * 2 - 1)) / (threads * 2);
}
/* limit the total number of threads */
if (whichKernel == 5)
blocks = MIN(maxBlocks, blocks);
}
/* special type of reduction to account for floating point error */
dtype reduce_cpu(dtype *data, int n) {
dtype sum = data[0];
dtype c = (dtype)0.0;
for (int i = 1; i < n; i++)
{
dtype y = data[i] - c;
dtype t = sum + y;
c = (t - sum) - y;
sum = t;
}
return sum;
}
__global__ void
kernel5(dtype *g_idata, dtype *g_odata, unsigned int n)
{
__shared__ dtype scratch[MAX_THREADS];
unsigned int bid = gridDim.x * blockIdx.y + blockIdx.x;
unsigned int i = bid * blockDim.x + threadIdx.x;
unsigned int c = 0;
unsigned int s = 256*64;
unsigned int i = 0;
c = n/2;
while(i < 512){
scratch[threadIdx.x] = g_idata[i] + g_idata[i + c];
c += s
i++;
}
//scratch[threadIdx.x] = g_idata[i] + g_idata[i + c];
__syncthreads ();
c = blockDim.x/2;
// unsigned int start = threadIdx.x * 32;
for(unsigned int s = c; s > 32 ; s = s >> 1){
if(threadIdx.x < s){
scratch[threadIdx.x] += scratch[threadIdx.x + s];
}
__syncthreads ();
}
// the last warp
// SIMD architecture, each instruction applies the same operation in parallel across many data elements
if(threadIdx.x < 32){
scratch[threadIdx.x] += scratch[threadIdx.x + 32];
scratch[threadIdx.x] += scratch[threadIdx.x + 16];
scratch[threadIdx.x] += scratch[threadIdx.x + 8];
scratch[threadIdx.x] += scratch[threadIdx.x + 4];
scratch[threadIdx.x] += scratch[threadIdx.x + 2];
scratch[threadIdx.x] += scratch[threadIdx.x + 1];
}
if(threadIdx.x == 0) {
g_odata[bid] = scratch[0];
}
}
int
main(int argc, char** argv)
{
int i;
/* data structure */
dtype *h_idata, h_odata, h_cpu;
dtype *d_idata, *d_odata;
/* timer */
struct stopwatch_t* timer = NULL;
long double t_kernel_5, t_cpu;
/* which kernel are we running */
int whichKernel;
/* number of threads and thread blocks */
int threads, blocks;
int N;
if(argc > 1) {
N = atoi (argv[1]);
printf("N: %d\n", N);
} else {
N = N_;
printf("N: %d\n", N);
}
/* naive kernel */
whichKernel = 5;
getNumBlocksAndThreads (whichKernel, N, MAX_BLOCKS, MAX_THREADS,
blocks, threads);
/* initialize timer */
stopwatch_init ();
timer = stopwatch_create ();
/* allocate memory */
h_idata = (dtype*) malloc (N * sizeof (dtype));
CUDA_CHECK_ERROR (hipMalloc (&d_idata, N * sizeof (dtype)));
CUDA_CHECK_ERROR (hipMalloc (&d_odata, blocks * sizeof (dtype)));
/* Initialize array */
srand48(time(NULL));
for(i = 0; i < N; i++) {
h_idata[i] = drand48() / 100000;
}
CUDA_CHECK_ERROR (hipMemcpy (d_idata, h_idata, N * sizeof (dtype),
hipMemcpyHostToDevice));
/* ================================================== */
/* GPU kernel */
dim3 gb(blocks, 1, 1);
dim3 tb(threads, 1, 1);
/* warm up */
hipLaunchKernelGGL(( kernel5) , dim3(gb), dim3(tb), 0, 0, d_idata, d_odata, N);
hipDeviceSynchronize ();
stopwatch_start (timer);
/* execute kernel */
hipLaunchKernelGGL(( kernel5) , dim3(gb), dim3(tb), 0, 0, d_idata, d_odata, N);
int s = blocks;
while(s > 1) {
threads = 0;
blocks = 0;
getNumBlocksAndThreads (whichKernel, s, MAX_BLOCKS, MAX_THREADS,
blocks, threads);
dim3 gb(blocks, 1, 1);
dim3 tb(threads, 1, 1);
hipLaunchKernelGGL(( kernel5) , dim3(gb), dim3(tb), 0, 0, d_odata, d_odata, s);
s = (s + threads * 2 - 1) / (threads * 2);
}
hipDeviceSynchronize ();
t_kernel_5 = stopwatch_stop (timer);
fprintf (stdout, "Time to execute multiple add GPU reduction kernel: %Lg secs\n", t_kernel_5);
double bw = (N * sizeof(dtype)) / (t_kernel_5 * 1e9);
fprintf (stdout, "Effective bandwidth: %.2lf GB/s\n", bw);
/* copy result back from GPU */
CUDA_CHECK_ERROR (hipMemcpy (&h_odata, d_odata, sizeof (dtype),
hipMemcpyDeviceToHost));
/* ================================================== */
/* ================================================== */
/* CPU kernel */
stopwatch_start (timer);
h_cpu = reduce_cpu (h_idata, N);
t_cpu = stopwatch_stop (timer);
fprintf (stdout, "Time to execute naive CPU reduction: %Lg secs\n",
t_cpu);
/* ================================================== */
if(abs (h_odata - h_cpu) > 1e-5) {
fprintf(stderr, "FAILURE: GPU: %f CPU: %f\n", h_odata, h_cpu);
} else {
printf("SUCCESS: GPU: %f CPU: %f\n", h_odata, h_cpu);
}
return 0;
}
| 0e1f45815320b1ca02e6cd1c6aece33b9191c5e5.cu | #include <stdlib.h>
#include <stdio.h>
#include "timer.h"
#include "cuda_utils.h"
typedef float dtype;
#define N_ (8 * 1024 * 1024)
#define MAX_THREADS 256
#define MAX_BLOCKS 64
#define MIN(x,y) ((x < y) ? x : y)
/* return the next power of 2 number that is larger than x */
unsigned int nextPow2( unsigned int x ) {
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
/* find out # of threads and # thread blocks for a particular kernel */
void getNumBlocksAndThreads(int whichKernel, int n, int maxBlocks, int maxThreads, int &blocks, int &threads)
{
if (whichKernel < 3)
{
/* 1 thread per element */
threads = (n < maxThreads) ? nextPow2(n) : maxThreads;
blocks = (n + threads - 1) / threads;
}
else
{
/* 1 thread per 2 elements */
threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads;
blocks = (n + (threads * 2 - 1)) / (threads * 2);
}
/* limit the total number of threads */
if (whichKernel == 5)
blocks = MIN(maxBlocks, blocks);
}
/* special type of reduction to account for floating point error */
dtype reduce_cpu(dtype *data, int n) {
dtype sum = data[0];
dtype c = (dtype)0.0;
for (int i = 1; i < n; i++)
{
dtype y = data[i] - c;
dtype t = sum + y;
c = (t - sum) - y;
sum = t;
}
return sum;
}
__global__ void
kernel5(dtype *g_idata, dtype *g_odata, unsigned int n)
{
__shared__ dtype scratch[MAX_THREADS];
unsigned int bid = gridDim.x * blockIdx.y + blockIdx.x;
unsigned int i = bid * blockDim.x + threadIdx.x;
unsigned int c = 0;
unsigned int s = 256*64;
unsigned int i = 0;
c = n/2;
while(i < 512){
scratch[threadIdx.x] = g_idata[i] + g_idata[i + c];
c += s
i++;
}
//scratch[threadIdx.x] = g_idata[i] + g_idata[i + c];
__syncthreads ();
c = blockDim.x/2;
// unsigned int start = threadIdx.x * 32;
for(unsigned int s = c; s > 32 ; s = s >> 1){
if(threadIdx.x < s){
scratch[threadIdx.x] += scratch[threadIdx.x + s];
}
__syncthreads ();
}
// the last warp
// SIMD architecture, each instruction applies the same operation in parallel across many data elements
if(threadIdx.x < 32){
scratch[threadIdx.x] += scratch[threadIdx.x + 32];
scratch[threadIdx.x] += scratch[threadIdx.x + 16];
scratch[threadIdx.x] += scratch[threadIdx.x + 8];
scratch[threadIdx.x] += scratch[threadIdx.x + 4];
scratch[threadIdx.x] += scratch[threadIdx.x + 2];
scratch[threadIdx.x] += scratch[threadIdx.x + 1];
}
if(threadIdx.x == 0) {
g_odata[bid] = scratch[0];
}
}
int
main(int argc, char** argv)
{
int i;
/* data structure */
dtype *h_idata, h_odata, h_cpu;
dtype *d_idata, *d_odata;
/* timer */
struct stopwatch_t* timer = NULL;
long double t_kernel_5, t_cpu;
/* which kernel are we running */
int whichKernel;
/* number of threads and thread blocks */
int threads, blocks;
int N;
if(argc > 1) {
N = atoi (argv[1]);
printf("N: %d\n", N);
} else {
N = N_;
printf("N: %d\n", N);
}
/* naive kernel */
whichKernel = 5;
getNumBlocksAndThreads (whichKernel, N, MAX_BLOCKS, MAX_THREADS,
blocks, threads);
/* initialize timer */
stopwatch_init ();
timer = stopwatch_create ();
/* allocate memory */
h_idata = (dtype*) malloc (N * sizeof (dtype));
CUDA_CHECK_ERROR (cudaMalloc (&d_idata, N * sizeof (dtype)));
CUDA_CHECK_ERROR (cudaMalloc (&d_odata, blocks * sizeof (dtype)));
/* Initialize array */
srand48(time(NULL));
for(i = 0; i < N; i++) {
h_idata[i] = drand48() / 100000;
}
CUDA_CHECK_ERROR (cudaMemcpy (d_idata, h_idata, N * sizeof (dtype),
cudaMemcpyHostToDevice));
/* ================================================== */
/* GPU kernel */
dim3 gb(blocks, 1, 1);
dim3 tb(threads, 1, 1);
/* warm up */
kernel5 <<<gb, tb>>> (d_idata, d_odata, N);
cudaThreadSynchronize ();
stopwatch_start (timer);
/* execute kernel */
kernel5 <<<gb, tb>>> (d_idata, d_odata, N);
int s = blocks;
while(s > 1) {
threads = 0;
blocks = 0;
getNumBlocksAndThreads (whichKernel, s, MAX_BLOCKS, MAX_THREADS,
blocks, threads);
dim3 gb(blocks, 1, 1);
dim3 tb(threads, 1, 1);
kernel5 <<<gb, tb>>> (d_odata, d_odata, s);
s = (s + threads * 2 - 1) / (threads * 2);
}
cudaThreadSynchronize ();
t_kernel_5 = stopwatch_stop (timer);
fprintf (stdout, "Time to execute multiple add GPU reduction kernel: %Lg secs\n", t_kernel_5);
double bw = (N * sizeof(dtype)) / (t_kernel_5 * 1e9);
fprintf (stdout, "Effective bandwidth: %.2lf GB/s\n", bw);
/* copy result back from GPU */
CUDA_CHECK_ERROR (cudaMemcpy (&h_odata, d_odata, sizeof (dtype),
cudaMemcpyDeviceToHost));
/* ================================================== */
/* ================================================== */
/* CPU kernel */
stopwatch_start (timer);
h_cpu = reduce_cpu (h_idata, N);
t_cpu = stopwatch_stop (timer);
fprintf (stdout, "Time to execute naive CPU reduction: %Lg secs\n",
t_cpu);
/* ================================================== */
if(abs (h_odata - h_cpu) > 1e-5) {
fprintf(stderr, "FAILURE: GPU: %f CPU: %f\n", h_odata, h_cpu);
} else {
printf("SUCCESS: GPU: %f CPU: %f\n", h_odata, h_cpu);
}
return 0;
}
|
4af8f81fbf50a6e1d0c058e251b262208ea8ae05.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
using namespace std;
__global__ void SumaColMatrizKernel(int f,int c,float*Md,float*Nd){
float Pvalue=0;
for(int k=threadIdx.x;k<f*c;k+=c){
Pvalue=Pvalue+Md[k];
}
Nd[threadIdx.x]=Pvalue;
}
void SumaColMatrizHost(int f,int c,float*Mh){
float *P;
P=new float[c];
for (int j=0;j<c;j++){
P[j]=0;
for (int i=0;i<f;i++){
P[j]+=Mh[i*c+j];
}
}
cout<<"\nResultados HOST:"<<endl;
for (int j=0;j<c;j++) cout<<P[j]<<" ";
}
int main(){
int f=10,c=2;
cout<<"Filas: "<<f<<endl;
cout<<"Columnas: "<<c<<endl;
int size=f*c*sizeof(float);
int size2=c*sizeof(float);
//Guardando memoria en el host
float *Mh=(float*)malloc(size);
float *Nh=(float*)malloc(size2);
cout<<"Matriz: ";
for (int i=0;i<f*c;i++){
Mh[i]=i+1;
cout<<Mh[i]<<" ";
}
//Guardando memoria en el GPU
float *Md,*Nd;
hipMalloc(&Md,size);
hipMalloc(&Nd,size2);
hipMemcpy(Md, Mh, size, hipMemcpyHostToDevice);
hipMemset(Nd, 0, size2);
//Suma columnas en GPU
int bloques=f/128+1;
dim3 tamGrid(bloques,1);
dim3 tamBlock(128,1,1);
hipLaunchKernelGGL(( SumaColMatrizKernel), dim3(tamGrid), dim3(tamBlock), 0, 0, f,c,Md,Nd);
hipMemcpy(Nh, Nd, size2, hipMemcpyDeviceToHost);
//Suma columnas en HOST
SumaColMatrizHost(f,c,Mh);
hipFree(Md); hipFree(Nd);
cout<<"\nResultados GPU: "<<endl;
for(int i=0;i<c;i++){
cout<<Nh[i]<<" ";
}
} | 4af8f81fbf50a6e1d0c058e251b262208ea8ae05.cu | #include <iostream>
using namespace std;
__global__ void SumaColMatrizKernel(int f,int c,float*Md,float*Nd){
float Pvalue=0;
for(int k=threadIdx.x;k<f*c;k+=c){
Pvalue=Pvalue+Md[k];
}
Nd[threadIdx.x]=Pvalue;
}
void SumaColMatrizHost(int f,int c,float*Mh){
float *P;
P=new float[c];
for (int j=0;j<c;j++){
P[j]=0;
for (int i=0;i<f;i++){
P[j]+=Mh[i*c+j];
}
}
cout<<"\nResultados HOST:"<<endl;
for (int j=0;j<c;j++) cout<<P[j]<<" ";
}
int main(){
int f=10,c=2;
cout<<"Filas: "<<f<<endl;
cout<<"Columnas: "<<c<<endl;
int size=f*c*sizeof(float);
int size2=c*sizeof(float);
//Guardando memoria en el host
float *Mh=(float*)malloc(size);
float *Nh=(float*)malloc(size2);
cout<<"Matriz: ";
for (int i=0;i<f*c;i++){
Mh[i]=i+1;
cout<<Mh[i]<<" ";
}
//Guardando memoria en el GPU
float *Md,*Nd;
cudaMalloc(&Md,size);
cudaMalloc(&Nd,size2);
cudaMemcpy(Md, Mh, size, cudaMemcpyHostToDevice);
cudaMemset(Nd, 0, size2);
//Suma columnas en GPU
int bloques=f/128+1;
dim3 tamGrid(bloques,1);
dim3 tamBlock(128,1,1);
SumaColMatrizKernel<<<tamGrid, tamBlock>>>(f,c,Md,Nd);
cudaMemcpy(Nh, Nd, size2, cudaMemcpyDeviceToHost);
//Suma columnas en HOST
SumaColMatrizHost(f,c,Mh);
cudaFree(Md); cudaFree(Nd);
cout<<"\nResultados GPU: "<<endl;
for(int i=0;i<c;i++){
cout<<Nh[i]<<" ";
}
} |
e0c6a772d80019896a0f90dbeffe2e654a02852a.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <builtin_types.h>
//no need for extern c according to stackoverflow answer by nvidia employee
extern "C"
__global__ void vectorAdditionCUDA(const float* a, const float* b, float* c, int n)
{
int ii = blockDim.x * blockIdx.x + threadIdx.x;
if (ii < n)
c[ii] = a[ii] + b[ii];
}
void vectorAddition(const float* a, const float* b, float* c, int n) {
float *a_cuda, *b_cuda, *c_cuda;
unsigned int nBytes = sizeof(float) * n;
int threadsPerBlock = 256;
int blocksPerGrid = (n + threadsPerBlock - 1) / threadsPerBlock;
// allocate and copy memory into the device
hipMalloc((void **)& a_cuda, nBytes);
hipMalloc((void **)& b_cuda, nBytes);
hipMalloc((void **)& c_cuda, nBytes);
hipMemcpy(a_cuda, a, nBytes, hipMemcpyHostToDevice);
hipMemcpy(b_cuda, b, nBytes, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( vectorAdditionCUDA), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, a_cuda, b_cuda, c_cuda, n);
// load the answer back into the host
hipMemcpy(c, c_cuda, nBytes, hipMemcpyDeviceToHost);
hipFree(a_cuda);
hipFree(b_cuda);
hipFree(c_cuda);
}
| e0c6a772d80019896a0f90dbeffe2e654a02852a.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <builtin_types.h>
//no need for extern c according to stackoverflow answer by nvidia employee
extern "C"
__global__ void vectorAdditionCUDA(const float* a, const float* b, float* c, int n)
{
int ii = blockDim.x * blockIdx.x + threadIdx.x;
if (ii < n)
c[ii] = a[ii] + b[ii];
}
void vectorAddition(const float* a, const float* b, float* c, int n) {
float *a_cuda, *b_cuda, *c_cuda;
unsigned int nBytes = sizeof(float) * n;
int threadsPerBlock = 256;
int blocksPerGrid = (n + threadsPerBlock - 1) / threadsPerBlock;
// allocate and copy memory into the device
cudaMalloc((void **)& a_cuda, nBytes);
cudaMalloc((void **)& b_cuda, nBytes);
cudaMalloc((void **)& c_cuda, nBytes);
cudaMemcpy(a_cuda, a, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(b_cuda, b, nBytes, cudaMemcpyHostToDevice);
vectorAdditionCUDA<<<blocksPerGrid, threadsPerBlock>>>(a_cuda, b_cuda, c_cuda, n);
// load the answer back into the host
cudaMemcpy(c, c_cuda, nBytes, cudaMemcpyDeviceToHost);
cudaFree(a_cuda);
cudaFree(b_cuda);
cudaFree(c_cuda);
}
|
002c770003c4822fc2c24cb5fbfc30d9c2785bd8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../SDDK/GPU/cuda_common.h"
__global__ void spline_inner_product_gpu_kernel_v3(int num_points__,
int const* idx_ri__,
double const* x__,
double const* dx__,
double const* f__,
double const* g__,
double* result__)
{
int nb = num_blocks(num_points__, blockDim.x);
int idx_f = idx_ri__[array2D_offset(0, blockIdx.x, 2)];
int idx_g = idx_ri__[array2D_offset(1, blockIdx.x, 2)];
extern __shared__ char sdata_ptr[];
double* sdata = (double*)&sdata_ptr[0];
int a_offs_f = array3D_offset(0, 0, idx_f, num_points__, 4);
int b_offs_f = array3D_offset(0, 1, idx_f, num_points__, 4);
int c_offs_f = array3D_offset(0, 2, idx_f, num_points__, 4);
int d_offs_f = array3D_offset(0, 3, idx_f, num_points__, 4);
int a_offs_g = array3D_offset(0, 0, idx_g, num_points__, 4);
int b_offs_g = array3D_offset(0, 1, idx_g, num_points__, 4);
int c_offs_g = array3D_offset(0, 2, idx_g, num_points__, 4);
int d_offs_g = array3D_offset(0, 3, idx_g, num_points__, 4);
sdata[threadIdx.x] = 0;
for (int ib = 0; ib < nb; ib++)
{
int i = ib * blockDim.x + threadIdx.x;
if (i < num_points__ - 1)
{
double xi = x__[i];
double dxi = dx__[i];
double a1 = f__[a_offs_f + i];
double b1 = f__[b_offs_f + i];
double c1 = f__[c_offs_f + i];
double d1 = f__[d_offs_f + i];
double a2 = g__[a_offs_g + i];
double b2 = g__[b_offs_g + i];
double c2 = g__[c_offs_g + i];
double d2 = g__[d_offs_g + i];
double k0 = a1 * a2;
double k1 = d1 * b2 + c1 * c2 + b1 * d2;
double k2 = d1 * a2 + c1 * b2 + b1 * c2 + a1 * d2;
double k3 = c1 * a2 + b1 * b2 + a1 * c2;
double k4 = d1 * c2 + c1 * d2;
double k5 = b1 * a2 + a1 * b2;
double k6 = d1 * d2; // 25 flop in total
//double v1 = dxi * k6 * (1.0 / 9.0);
//double r = (k4 + 2.0 * k6 * xi) * 0.125;
//double v2 = dxi * (r + v1);
//double v3 = dxi * ((k1 + xi * (2.0 * k4 + k6 * xi)) * (1.0 / 7.0) + v2);
//double v4 = dxi * ((k2 + xi * (2.0 * k1 + k4 * xi)) * (1.0 / 6.0) + v3);
//double v5 = dxi * ((k3 + xi * (2.0 * k2 + k1 * xi)) * 0.2 + v4);
//double v6 = dxi * ((k5 + xi * (2.0 * k3 + k2 * xi)) * 0.25 + v5);
//double v7 = dxi * ((k0 + xi * (2.0 * k5 + k3 * xi)) / 3.0 + v6);
//double v8 = dxi * ((xi * (2.0 * k0 + xi * k5)) * 0.5 + v7);
double v = dxi * k6 * 0.11111111111111111111;
double r1 = k4 * 0.125 + k6 * xi * 0.25;
v = dxi * (r1 + v);
double r2 = (k1 + xi * (2.0 * k4 + k6 * xi)) * 0.14285714285714285714;
v = dxi * (r2 + v);
double r3 = (k2 + xi * (2.0 * k1 + k4 * xi)) * 0.16666666666666666667;
v = dxi * (r3 + v);
double r4 = (k3 + xi * (2.0 * k2 + k1 * xi)) * 0.2;
v = dxi * (r4 + v);
double r5 = (k5 + xi * (2.0 * k3 + k2 * xi)) * 0.25;
v = dxi * (r5 + v);
double r6 = (k0 + xi * (2.0 * k5 + k3 * xi)) * 0.33333333333333333333;
v = dxi * (r6 + v);
double r7 = (xi * (2.0 * k0 + xi * k5)) * 0.5;
v = dxi * (r7 + v);
sdata[threadIdx.x] += dxi * (k0 * xi * xi + v);
}
}
__syncthreads();
for (int s = 1; s < blockDim.x; s *= 2)
{
if (threadIdx.x % (2 * s) == 0) sdata[threadIdx.x] += sdata[threadIdx.x + s];
__syncthreads();
}
result__[blockIdx.x] = sdata[0];
}
extern "C" void spline_inner_product_gpu_v3(int const* idx_ri__,
int num_ri__,
int num_points__,
double const* x__,
double const* dx__,
double const* f__,
double const* g__,
double* result__)
{
dim3 grid_t(64);
dim3 grid_b(num_ri__);
hipLaunchKernelGGL(( spline_inner_product_gpu_kernel_v3) , dim3(grid_b), dim3(grid_t), grid_t.x * sizeof(double), 0,
num_points__,
idx_ri__,
x__,
dx__,
f__,
g__,
result__
);
}
| 002c770003c4822fc2c24cb5fbfc30d9c2785bd8.cu | #include "../SDDK/GPU/cuda_common.h"
__global__ void spline_inner_product_gpu_kernel_v3(int num_points__,
int const* idx_ri__,
double const* x__,
double const* dx__,
double const* f__,
double const* g__,
double* result__)
{
int nb = num_blocks(num_points__, blockDim.x);
int idx_f = idx_ri__[array2D_offset(0, blockIdx.x, 2)];
int idx_g = idx_ri__[array2D_offset(1, blockIdx.x, 2)];
extern __shared__ char sdata_ptr[];
double* sdata = (double*)&sdata_ptr[0];
int a_offs_f = array3D_offset(0, 0, idx_f, num_points__, 4);
int b_offs_f = array3D_offset(0, 1, idx_f, num_points__, 4);
int c_offs_f = array3D_offset(0, 2, idx_f, num_points__, 4);
int d_offs_f = array3D_offset(0, 3, idx_f, num_points__, 4);
int a_offs_g = array3D_offset(0, 0, idx_g, num_points__, 4);
int b_offs_g = array3D_offset(0, 1, idx_g, num_points__, 4);
int c_offs_g = array3D_offset(0, 2, idx_g, num_points__, 4);
int d_offs_g = array3D_offset(0, 3, idx_g, num_points__, 4);
sdata[threadIdx.x] = 0;
for (int ib = 0; ib < nb; ib++)
{
int i = ib * blockDim.x + threadIdx.x;
if (i < num_points__ - 1)
{
double xi = x__[i];
double dxi = dx__[i];
double a1 = f__[a_offs_f + i];
double b1 = f__[b_offs_f + i];
double c1 = f__[c_offs_f + i];
double d1 = f__[d_offs_f + i];
double a2 = g__[a_offs_g + i];
double b2 = g__[b_offs_g + i];
double c2 = g__[c_offs_g + i];
double d2 = g__[d_offs_g + i];
double k0 = a1 * a2;
double k1 = d1 * b2 + c1 * c2 + b1 * d2;
double k2 = d1 * a2 + c1 * b2 + b1 * c2 + a1 * d2;
double k3 = c1 * a2 + b1 * b2 + a1 * c2;
double k4 = d1 * c2 + c1 * d2;
double k5 = b1 * a2 + a1 * b2;
double k6 = d1 * d2; // 25 flop in total
//double v1 = dxi * k6 * (1.0 / 9.0);
//double r = (k4 + 2.0 * k6 * xi) * 0.125;
//double v2 = dxi * (r + v1);
//double v3 = dxi * ((k1 + xi * (2.0 * k4 + k6 * xi)) * (1.0 / 7.0) + v2);
//double v4 = dxi * ((k2 + xi * (2.0 * k1 + k4 * xi)) * (1.0 / 6.0) + v3);
//double v5 = dxi * ((k3 + xi * (2.0 * k2 + k1 * xi)) * 0.2 + v4);
//double v6 = dxi * ((k5 + xi * (2.0 * k3 + k2 * xi)) * 0.25 + v5);
//double v7 = dxi * ((k0 + xi * (2.0 * k5 + k3 * xi)) / 3.0 + v6);
//double v8 = dxi * ((xi * (2.0 * k0 + xi * k5)) * 0.5 + v7);
double v = dxi * k6 * 0.11111111111111111111;
double r1 = k4 * 0.125 + k6 * xi * 0.25;
v = dxi * (r1 + v);
double r2 = (k1 + xi * (2.0 * k4 + k6 * xi)) * 0.14285714285714285714;
v = dxi * (r2 + v);
double r3 = (k2 + xi * (2.0 * k1 + k4 * xi)) * 0.16666666666666666667;
v = dxi * (r3 + v);
double r4 = (k3 + xi * (2.0 * k2 + k1 * xi)) * 0.2;
v = dxi * (r4 + v);
double r5 = (k5 + xi * (2.0 * k3 + k2 * xi)) * 0.25;
v = dxi * (r5 + v);
double r6 = (k0 + xi * (2.0 * k5 + k3 * xi)) * 0.33333333333333333333;
v = dxi * (r6 + v);
double r7 = (xi * (2.0 * k0 + xi * k5)) * 0.5;
v = dxi * (r7 + v);
sdata[threadIdx.x] += dxi * (k0 * xi * xi + v);
}
}
__syncthreads();
for (int s = 1; s < blockDim.x; s *= 2)
{
if (threadIdx.x % (2 * s) == 0) sdata[threadIdx.x] += sdata[threadIdx.x + s];
__syncthreads();
}
result__[blockIdx.x] = sdata[0];
}
extern "C" void spline_inner_product_gpu_v3(int const* idx_ri__,
int num_ri__,
int num_points__,
double const* x__,
double const* dx__,
double const* f__,
double const* g__,
double* result__)
{
dim3 grid_t(64);
dim3 grid_b(num_ri__);
spline_inner_product_gpu_kernel_v3 <<<grid_b, grid_t, grid_t.x * sizeof(double)>>>
(
num_points__,
idx_ri__,
x__,
dx__,
f__,
g__,
result__
);
}
|
f5b6590fc18a12fc63dc62147626df35a91a6c8f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "pointwise_add.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_res = NULL;
hipMalloc(&d_res, XSIZE*YSIZE);
const float *d_op1 = NULL;
hipMalloc(&d_op1, XSIZE*YSIZE);
const float *d_op2 = NULL;
hipMalloc(&d_op2, XSIZE*YSIZE);
const int len = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
pointwise_add), dim3(gridBlock),dim3(threadBlock), 0, 0, d_res,d_op1,d_op2,len);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
pointwise_add), dim3(gridBlock),dim3(threadBlock), 0, 0, d_res,d_op1,d_op2,len);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
pointwise_add), dim3(gridBlock),dim3(threadBlock), 0, 0, d_res,d_op1,d_op2,len);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | f5b6590fc18a12fc63dc62147626df35a91a6c8f.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "pointwise_add.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_res = NULL;
cudaMalloc(&d_res, XSIZE*YSIZE);
const float *d_op1 = NULL;
cudaMalloc(&d_op1, XSIZE*YSIZE);
const float *d_op2 = NULL;
cudaMalloc(&d_op2, XSIZE*YSIZE);
const int len = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
pointwise_add<<<gridBlock,threadBlock>>>(d_res,d_op1,d_op2,len);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
pointwise_add<<<gridBlock,threadBlock>>>(d_res,d_op1,d_op2,len);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
pointwise_add<<<gridBlock,threadBlock>>>(d_res,d_op1,d_op2,len);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
ae52b482233dd243bd93dca968d360366751d073.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <arbor/gpu/gpu_common.hpp>
#include <arbor/gpu/math_cu.hpp>
#include <arbor/gpu/reduce_by_key.hpp>
#include <arbor/mechanism_abi.h>
namespace arb {
namespace allen_catalogue {
#define PPACK_IFACE_BLOCK \
auto _pp_var_width __attribute__((unused)) = params_.width;\
auto _pp_var_n_detectors __attribute__((unused)) = params_.n_detectors;\
auto* _pp_var_vec_ci __attribute__((unused)) = params_.vec_ci;\
auto* _pp_var_vec_di __attribute__((unused)) = params_.vec_di;\
auto* _pp_var_vec_t __attribute__((unused)) = params_.vec_t;\
auto* _pp_var_vec_dt __attribute__((unused)) = params_.vec_dt;\
auto* _pp_var_vec_v __attribute__((unused)) = params_.vec_v;\
auto* _pp_var_vec_i __attribute__((unused)) = params_.vec_i;\
auto* _pp_var_vec_g __attribute__((unused)) = params_.vec_g;\
auto* _pp_var_temperature_degC __attribute__((unused)) = params_.temperature_degC;\
auto* _pp_var_diam_um __attribute__((unused)) = params_.diam_um;\
auto* _pp_var_time_since_spike __attribute__((unused)) = params_.time_since_spike;\
auto* _pp_var_node_index __attribute__((unused)) = params_.node_index;\
auto* _pp_var_peer_index __attribute__((unused)) = params_.peer_index;\
auto* _pp_var_multiplicity __attribute__((unused)) = params_.multiplicity;\
auto* _pp_var_state_vars __attribute__((unused)) = params_.state_vars;\
auto* _pp_var_weight __attribute__((unused)) = params_.weight;\
auto& _pp_var_events __attribute__((unused)) = params_.events;\
auto& _pp_var_mechanism_id __attribute__((unused)) = params_.mechanism_id;\
auto& _pp_var_index_constraints __attribute__((unused)) = params_.index_constraints;\
auto* _pp_var_m __attribute__((unused)) = params_.state_vars[0];\
auto* _pp_var_h1 __attribute__((unused)) = params_.state_vars[1];\
auto* _pp_var_h2 __attribute__((unused)) = params_.state_vars[2];\
auto* _pp_var_gbar __attribute__((unused)) = params_.parameters[0];\
auto& _pp_var_ion_k __attribute__((unused)) = params_.ion_states[0];\
auto* _pp_var_ion_k_index __attribute__((unused)) = params_.ion_states[0].index;\
//End of IFACEBLOCK
namespace {
using ::arb::gpu::exprelr;
using ::arb::gpu::safeinv;
using ::arb::gpu::min;
using ::arb::gpu::max;
__global__
void init(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
auto node_indexi_ = _pp_var_node_index[tid_];
arb_value_type v = _pp_var_vec_v[node_indexi_];
arb_value_type mBeta, mAlpha, hInf, ll0_, ll1_;
ll1_ = 0.;
ll0_ = 0.;
ll0_ = 43.0-v;
ll1_ = 11.0*exprelr(ll0_* 0.090909090909090912);
mAlpha = 0.12*ll1_;
mBeta = 0.02*exp( -(v+ 1.27)* 0.0083333333333333332);
hInf = 1.0/( 1.0+exp((v+ 58.0)* 0.090909090909090912));
_pp_var_m[tid_] = mAlpha/(mAlpha+mBeta);
_pp_var_h1[tid_] = hInf;
_pp_var_h2[tid_] = hInf;
}
}
__global__
void multiply(arb_mechanism_ppack params_) {
PPACK_IFACE_BLOCK;
auto tid_ = threadIdx.x + blockDim.x*blockIdx.x;
auto idx_ = blockIdx.y; if(tid_<_pp_var_width) {
_pp_var_state_vars[idx_][tid_] *= _pp_var_multiplicity[tid_];
}
}
__global__
void advance_state(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
auto node_indexi_ = _pp_var_node_index[tid_];
arb_value_type v = _pp_var_vec_v[node_indexi_];
arb_value_type celsius = _pp_var_temperature_degC[node_indexi_];
arb_value_type dt = _pp_var_vec_dt[node_indexi_];
arb_value_type a_1_, a_2_, a_0_, ll4_, qt, mRat, mBeta, hInf, ba_0_, ba_1_, ll3_, ll6_, h1Rat, ll1_, ba_2_, ll2_, mAlpha, h2Rat, ll5_, ll0_, ll7_;
ll7_ = 0.;
ll6_ = 0.;
ll5_ = 0.;
ll4_ = 0.;
ll3_ = 0.;
ll2_ = 0.;
ll1_ = 0.;
ll0_ = 0.;
qt = pow( 2.2999999999999998, (celsius- 21.0)* 0.10000000000000001);
ll0_ = 43.0-v;
ll1_ = 11.0*exprelr(ll0_* 0.090909090909090912);
mAlpha = 0.12*ll1_;
mBeta = 0.02*exp( -(v+ 1.27)* 0.0083333333333333332);
mRat = 0.40000000000000002*qt*(mAlpha+mBeta);
hInf = 1.0/( 1.0+exp((v+ 58.0)* 0.090909090909090912));
h1Rat = qt/( 360.0+( 1010.0+ 23.699999999999999*(v+ 54.0))*exp(pow( -((v+ 75.0)* 0.020833333333333332), 2.0)));
h2Rat = qt/( 2350.0+ 1380.0*exp( -0.010999999999999999*v)- 210.0*exp( -0.029999999999999999*v));
if (h2Rat< 0.) {
h2Rat = 0.001;
}
a_0_ = -mRat;
ba_0_ = 0.40000000000000002*qt*mAlpha/a_0_;
ll2_ = a_0_*dt;
ll3_ = ( 1.0+ 0.5*ll2_)/( 1.0- 0.5*ll2_);
_pp_var_m[tid_] = -ba_0_+(_pp_var_m[tid_]+ba_0_)*ll3_;
a_1_ = -1.0*h1Rat;
ba_1_ = hInf*h1Rat/a_1_;
ll4_ = a_1_*dt;
ll5_ = ( 1.0+ 0.5*ll4_)/( 1.0- 0.5*ll4_);
_pp_var_h1[tid_] = -ba_1_+(_pp_var_h1[tid_]+ba_1_)*ll5_;
a_2_ = -1.0*h2Rat;
ba_2_ = hInf*h2Rat/a_2_;
ll6_ = a_2_*dt;
ll7_ = ( 1.0+ 0.5*ll6_)/( 1.0- 0.5*ll6_);
_pp_var_h2[tid_] = -ba_2_+(_pp_var_h2[tid_]+ba_2_)*ll7_;
}
}
__global__
void compute_currents(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
auto ion_k_indexi_ = _pp_var_ion_k_index[tid_];
auto node_indexi_ = _pp_var_node_index[tid_];
arb_value_type conductivity_ = 0;
arb_value_type current_ = 0;
arb_value_type ek = _pp_var_ion_k.reversal_potential[ion_k_indexi_];
arb_value_type v = _pp_var_vec_v[node_indexi_];
arb_value_type ik = 0;
ik = 0.5*_pp_var_gbar[tid_]*_pp_var_m[tid_]*_pp_var_m[tid_]*(_pp_var_h1[tid_]+_pp_var_h2[tid_])*(v-ek);
current_ = ik;
conductivity_ = 0.5*_pp_var_gbar[tid_]*_pp_var_m[tid_]*_pp_var_m[tid_]*(_pp_var_h1[tid_]+_pp_var_h2[tid_]);
_pp_var_vec_g[node_indexi_] = fma(10.0*_pp_var_weight[tid_], conductivity_, _pp_var_vec_g[node_indexi_]);
_pp_var_vec_i[node_indexi_] = fma(10.0*_pp_var_weight[tid_], current_, _pp_var_vec_i[node_indexi_]);
_pp_var_ion_k.current_density[ion_k_indexi_] = fma(10.0*_pp_var_weight[tid_], ik, _pp_var_ion_k.current_density[ion_k_indexi_]);
}
}
} // namespace
void mechanism_Kv2like_gpu_init_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
hipLaunchKernelGGL(( init), dim3(grid_dim), dim3(block_dim), 0, 0, *p);
if (!p->multiplicity) return;
hipLaunchKernelGGL(( multiply), dim3(dim3{grid_dim), dim3(3}), block_dim, 0, *p);
}
void mechanism_Kv2like_gpu_compute_currents_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
hipLaunchKernelGGL(( compute_currents), dim3(grid_dim), dim3(block_dim), 0, 0, *p);
}
void mechanism_Kv2like_gpu_advance_state_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
hipLaunchKernelGGL(( advance_state), dim3(grid_dim), dim3(block_dim), 0, 0, *p);
}
void mechanism_Kv2like_gpu_write_ions_(arb_mechanism_ppack* p) {}
void mechanism_Kv2like_gpu_post_event_(arb_mechanism_ppack* p) {}
void mechanism_Kv2like_gpu_apply_events_(arb_mechanism_ppack* p, arb_deliverable_event_stream* events) {}
} // namespace allen_catalogue
} // namespace arb
| ae52b482233dd243bd93dca968d360366751d073.cu | #include <arbor/gpu/gpu_common.hpp>
#include <arbor/gpu/math_cu.hpp>
#include <arbor/gpu/reduce_by_key.hpp>
#include <arbor/mechanism_abi.h>
namespace arb {
namespace allen_catalogue {
#define PPACK_IFACE_BLOCK \
auto _pp_var_width __attribute__((unused)) = params_.width;\
auto _pp_var_n_detectors __attribute__((unused)) = params_.n_detectors;\
auto* _pp_var_vec_ci __attribute__((unused)) = params_.vec_ci;\
auto* _pp_var_vec_di __attribute__((unused)) = params_.vec_di;\
auto* _pp_var_vec_t __attribute__((unused)) = params_.vec_t;\
auto* _pp_var_vec_dt __attribute__((unused)) = params_.vec_dt;\
auto* _pp_var_vec_v __attribute__((unused)) = params_.vec_v;\
auto* _pp_var_vec_i __attribute__((unused)) = params_.vec_i;\
auto* _pp_var_vec_g __attribute__((unused)) = params_.vec_g;\
auto* _pp_var_temperature_degC __attribute__((unused)) = params_.temperature_degC;\
auto* _pp_var_diam_um __attribute__((unused)) = params_.diam_um;\
auto* _pp_var_time_since_spike __attribute__((unused)) = params_.time_since_spike;\
auto* _pp_var_node_index __attribute__((unused)) = params_.node_index;\
auto* _pp_var_peer_index __attribute__((unused)) = params_.peer_index;\
auto* _pp_var_multiplicity __attribute__((unused)) = params_.multiplicity;\
auto* _pp_var_state_vars __attribute__((unused)) = params_.state_vars;\
auto* _pp_var_weight __attribute__((unused)) = params_.weight;\
auto& _pp_var_events __attribute__((unused)) = params_.events;\
auto& _pp_var_mechanism_id __attribute__((unused)) = params_.mechanism_id;\
auto& _pp_var_index_constraints __attribute__((unused)) = params_.index_constraints;\
auto* _pp_var_m __attribute__((unused)) = params_.state_vars[0];\
auto* _pp_var_h1 __attribute__((unused)) = params_.state_vars[1];\
auto* _pp_var_h2 __attribute__((unused)) = params_.state_vars[2];\
auto* _pp_var_gbar __attribute__((unused)) = params_.parameters[0];\
auto& _pp_var_ion_k __attribute__((unused)) = params_.ion_states[0];\
auto* _pp_var_ion_k_index __attribute__((unused)) = params_.ion_states[0].index;\
//End of IFACEBLOCK
namespace {
using ::arb::gpu::exprelr;
using ::arb::gpu::safeinv;
using ::arb::gpu::min;
using ::arb::gpu::max;
__global__
void init(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
auto node_indexi_ = _pp_var_node_index[tid_];
arb_value_type v = _pp_var_vec_v[node_indexi_];
arb_value_type mBeta, mAlpha, hInf, ll0_, ll1_;
ll1_ = 0.;
ll0_ = 0.;
ll0_ = 43.0-v;
ll1_ = 11.0*exprelr(ll0_* 0.090909090909090912);
mAlpha = 0.12*ll1_;
mBeta = 0.02*exp( -(v+ 1.27)* 0.0083333333333333332);
hInf = 1.0/( 1.0+exp((v+ 58.0)* 0.090909090909090912));
_pp_var_m[tid_] = mAlpha/(mAlpha+mBeta);
_pp_var_h1[tid_] = hInf;
_pp_var_h2[tid_] = hInf;
}
}
__global__
void multiply(arb_mechanism_ppack params_) {
PPACK_IFACE_BLOCK;
auto tid_ = threadIdx.x + blockDim.x*blockIdx.x;
auto idx_ = blockIdx.y; if(tid_<_pp_var_width) {
_pp_var_state_vars[idx_][tid_] *= _pp_var_multiplicity[tid_];
}
}
__global__
void advance_state(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
auto node_indexi_ = _pp_var_node_index[tid_];
arb_value_type v = _pp_var_vec_v[node_indexi_];
arb_value_type celsius = _pp_var_temperature_degC[node_indexi_];
arb_value_type dt = _pp_var_vec_dt[node_indexi_];
arb_value_type a_1_, a_2_, a_0_, ll4_, qt, mRat, mBeta, hInf, ba_0_, ba_1_, ll3_, ll6_, h1Rat, ll1_, ba_2_, ll2_, mAlpha, h2Rat, ll5_, ll0_, ll7_;
ll7_ = 0.;
ll6_ = 0.;
ll5_ = 0.;
ll4_ = 0.;
ll3_ = 0.;
ll2_ = 0.;
ll1_ = 0.;
ll0_ = 0.;
qt = pow( 2.2999999999999998, (celsius- 21.0)* 0.10000000000000001);
ll0_ = 43.0-v;
ll1_ = 11.0*exprelr(ll0_* 0.090909090909090912);
mAlpha = 0.12*ll1_;
mBeta = 0.02*exp( -(v+ 1.27)* 0.0083333333333333332);
mRat = 0.40000000000000002*qt*(mAlpha+mBeta);
hInf = 1.0/( 1.0+exp((v+ 58.0)* 0.090909090909090912));
h1Rat = qt/( 360.0+( 1010.0+ 23.699999999999999*(v+ 54.0))*exp(pow( -((v+ 75.0)* 0.020833333333333332), 2.0)));
h2Rat = qt/( 2350.0+ 1380.0*exp( -0.010999999999999999*v)- 210.0*exp( -0.029999999999999999*v));
if (h2Rat< 0.) {
h2Rat = 0.001;
}
a_0_ = -mRat;
ba_0_ = 0.40000000000000002*qt*mAlpha/a_0_;
ll2_ = a_0_*dt;
ll3_ = ( 1.0+ 0.5*ll2_)/( 1.0- 0.5*ll2_);
_pp_var_m[tid_] = -ba_0_+(_pp_var_m[tid_]+ba_0_)*ll3_;
a_1_ = -1.0*h1Rat;
ba_1_ = hInf*h1Rat/a_1_;
ll4_ = a_1_*dt;
ll5_ = ( 1.0+ 0.5*ll4_)/( 1.0- 0.5*ll4_);
_pp_var_h1[tid_] = -ba_1_+(_pp_var_h1[tid_]+ba_1_)*ll5_;
a_2_ = -1.0*h2Rat;
ba_2_ = hInf*h2Rat/a_2_;
ll6_ = a_2_*dt;
ll7_ = ( 1.0+ 0.5*ll6_)/( 1.0- 0.5*ll6_);
_pp_var_h2[tid_] = -ba_2_+(_pp_var_h2[tid_]+ba_2_)*ll7_;
}
}
__global__
void compute_currents(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
auto ion_k_indexi_ = _pp_var_ion_k_index[tid_];
auto node_indexi_ = _pp_var_node_index[tid_];
arb_value_type conductivity_ = 0;
arb_value_type current_ = 0;
arb_value_type ek = _pp_var_ion_k.reversal_potential[ion_k_indexi_];
arb_value_type v = _pp_var_vec_v[node_indexi_];
arb_value_type ik = 0;
ik = 0.5*_pp_var_gbar[tid_]*_pp_var_m[tid_]*_pp_var_m[tid_]*(_pp_var_h1[tid_]+_pp_var_h2[tid_])*(v-ek);
current_ = ik;
conductivity_ = 0.5*_pp_var_gbar[tid_]*_pp_var_m[tid_]*_pp_var_m[tid_]*(_pp_var_h1[tid_]+_pp_var_h2[tid_]);
_pp_var_vec_g[node_indexi_] = fma(10.0*_pp_var_weight[tid_], conductivity_, _pp_var_vec_g[node_indexi_]);
_pp_var_vec_i[node_indexi_] = fma(10.0*_pp_var_weight[tid_], current_, _pp_var_vec_i[node_indexi_]);
_pp_var_ion_k.current_density[ion_k_indexi_] = fma(10.0*_pp_var_weight[tid_], ik, _pp_var_ion_k.current_density[ion_k_indexi_]);
}
}
} // namespace
void mechanism_Kv2like_gpu_init_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
init<<<grid_dim, block_dim>>>(*p);
if (!p->multiplicity) return;
multiply<<<dim3{grid_dim, 3}, block_dim>>>(*p);
}
void mechanism_Kv2like_gpu_compute_currents_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
compute_currents<<<grid_dim, block_dim>>>(*p);
}
void mechanism_Kv2like_gpu_advance_state_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
advance_state<<<grid_dim, block_dim>>>(*p);
}
void mechanism_Kv2like_gpu_write_ions_(arb_mechanism_ppack* p) {}
void mechanism_Kv2like_gpu_post_event_(arb_mechanism_ppack* p) {}
void mechanism_Kv2like_gpu_apply_events_(arb_mechanism_ppack* p, arb_deliverable_event_stream* events) {}
} // namespace allen_catalogue
} // namespace arb
|
a3a94d7f123cdc5ba29f43af257f3b6cbed87455.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright 2017 the arraydiff authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "common_hip.cuh"
#include <hip/hip_runtime_api.h>
#include <stdint.h>
#include <stdlib.h>
__global__ void symm_unit_clip_fwd_f32_kernel(
uint32_t dim,
const float *clip,
const float *x,
float *y)
{
uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
/*float c = clip[0];
float a = c / max(fabs(c), 1.0f);*/
float a = clip[0];
float x_i = x[idx];
y[idx] = x_i * ((x_i > 0.0f) + a * (x_i < 0.0f));
}
}
extern "C" void arraydiff_cuda_kernel_symm_unit_clip_fwd_f32(
size_t dim,
const float *clip,
const float *x,
float *y,
hipStream_t stream)
{
hipLaunchKernelGGL(( symm_unit_clip_fwd_f32_kernel), dim3((dim+1024-1)/1024), dim3(1024), 0, stream,
dim, clip, x, y);
}
__global__ void symm_unit_clip_param_bwd_f32_atomic_naive_kernel(
uint32_t dim,
const float *clip,
const float *x,
const float *dy,
float *grad)
{
uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
/*float c = clip[0];
float u = max(fabs(c), 1.0f);
float du = 1.0f * (c > 1.0f) - 1.0f * (c < -1.0f);
float x_i = x[idx];
atomicAdd(&grad[0], (1.0f / u) * (1.0f - du * c / u) * dy[idx] * x_i * (x_i < 0.0f));*/
float x_i = x[idx];
atomicAdd(&grad[0], dy[idx] * x_i * (x_i < 0.0f));
}
}
__global__ void symm_unit_clip_param_bwd_f32_atomic_fast_kernel(
uint32_t dim,
const float *clip,
const float *x,
const float *dy,
float *grad)
{
__shared__ float cache[1024];
uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
/*float c = clip[0];
float u = max(fabs(c), 1.0f);
float du = 1.0f * (c > 1.0f) - 1.0f * (c < -1.0f);
float x_i = x[idx];
cache[threadIdx.x] = (1.0f / u) * (1.0f - du * c / u) * dy[idx] * x_i * (x_i < 0.0f);*/
float x_i = x[idx];
cache[threadIdx.x] = dy[idx] * x_i * (x_i < 0.0f);
} else {
cache[threadIdx.x] = 0.0f;
}
__syncthreads();
threadblock1024_reduce_sum_f32(cache);
if (idx < dim) {
if (threadIdx.x == 0) {
atomicAdd(&grad[0], cache[0]);
}
}
}
extern "C" void arraydiff_cuda_kernel_symm_unit_clip_param_bwd_nondeterministic_f32(
size_t dim,
const float *clip,
const float *x,
const float *dy,
float *grad,
hipStream_t stream)
{
hipLaunchKernelGGL(( symm_unit_clip_param_bwd_f32_atomic_fast_kernel), dim3((dim+1024-1)/1024), dim3(1024), 0, stream,
dim, clip, x, dy, grad);
}
__global__ void symm_unit_clip_input_bwd_f32_kernel(
uint32_t dim,
const float *clip,
const float *x,
const float *dy,
float *dx)
{
uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
/*float c = clip[0];
float a = c / max(fabs(c), 1.0f);*/
float a = clip[0];
float x_i = x[idx];
dx[idx] += dy[idx] * ((x_i > 0.0f) + a * (x_i < 0.0f));
}
}
extern "C" void arraydiff_cuda_kernel_symm_unit_clip_input_bwd_f32(
size_t dim,
const float *clip,
const float *x,
const float *dy,
float *dx,
hipStream_t stream)
{
hipLaunchKernelGGL(( symm_unit_clip_input_bwd_f32_kernel), dim3((dim+1024-1)/1024), dim3(1024), 0, stream,
dim, clip, x, dy, dx);
}
| a3a94d7f123cdc5ba29f43af257f3b6cbed87455.cu | /*
Copyright 2017 the arraydiff authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "common.cuh"
#include <cuda_runtime_api.h>
#include <stdint.h>
#include <stdlib.h>
__global__ void symm_unit_clip_fwd_f32_kernel(
uint32_t dim,
const float *clip,
const float *x,
float *y)
{
uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
/*float c = clip[0];
float a = c / max(fabs(c), 1.0f);*/
float a = clip[0];
float x_i = x[idx];
y[idx] = x_i * ((x_i > 0.0f) + a * (x_i < 0.0f));
}
}
extern "C" void arraydiff_cuda_kernel_symm_unit_clip_fwd_f32(
size_t dim,
const float *clip,
const float *x,
float *y,
cudaStream_t stream)
{
symm_unit_clip_fwd_f32_kernel<<<(dim+1024-1)/1024, 1024, 0, stream>>>(
dim, clip, x, y);
}
__global__ void symm_unit_clip_param_bwd_f32_atomic_naive_kernel(
uint32_t dim,
const float *clip,
const float *x,
const float *dy,
float *grad)
{
uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
/*float c = clip[0];
float u = max(fabs(c), 1.0f);
float du = 1.0f * (c > 1.0f) - 1.0f * (c < -1.0f);
float x_i = x[idx];
atomicAdd(&grad[0], (1.0f / u) * (1.0f - du * c / u) * dy[idx] * x_i * (x_i < 0.0f));*/
float x_i = x[idx];
atomicAdd(&grad[0], dy[idx] * x_i * (x_i < 0.0f));
}
}
__global__ void symm_unit_clip_param_bwd_f32_atomic_fast_kernel(
uint32_t dim,
const float *clip,
const float *x,
const float *dy,
float *grad)
{
__shared__ float cache[1024];
uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
/*float c = clip[0];
float u = max(fabs(c), 1.0f);
float du = 1.0f * (c > 1.0f) - 1.0f * (c < -1.0f);
float x_i = x[idx];
cache[threadIdx.x] = (1.0f / u) * (1.0f - du * c / u) * dy[idx] * x_i * (x_i < 0.0f);*/
float x_i = x[idx];
cache[threadIdx.x] = dy[idx] * x_i * (x_i < 0.0f);
} else {
cache[threadIdx.x] = 0.0f;
}
__syncthreads();
threadblock1024_reduce_sum_f32(cache);
if (idx < dim) {
if (threadIdx.x == 0) {
atomicAdd(&grad[0], cache[0]);
}
}
}
extern "C" void arraydiff_cuda_kernel_symm_unit_clip_param_bwd_nondeterministic_f32(
size_t dim,
const float *clip,
const float *x,
const float *dy,
float *grad,
cudaStream_t stream)
{
symm_unit_clip_param_bwd_f32_atomic_fast_kernel<<<(dim+1024-1)/1024, 1024, 0, stream>>>(
dim, clip, x, dy, grad);
}
__global__ void symm_unit_clip_input_bwd_f32_kernel(
uint32_t dim,
const float *clip,
const float *x,
const float *dy,
float *dx)
{
uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
/*float c = clip[0];
float a = c / max(fabs(c), 1.0f);*/
float a = clip[0];
float x_i = x[idx];
dx[idx] += dy[idx] * ((x_i > 0.0f) + a * (x_i < 0.0f));
}
}
extern "C" void arraydiff_cuda_kernel_symm_unit_clip_input_bwd_f32(
size_t dim,
const float *clip,
const float *x,
const float *dy,
float *dx,
cudaStream_t stream)
{
symm_unit_clip_input_bwd_f32_kernel<<<(dim+1024-1)/1024, 1024, 0, stream>>>(
dim, clip, x, dy, dx);
}
|
54ea1ac608a60981d7ab5fa448acc0e23157f78a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@precisions mixed zc -> ds
@author Mark Gates
*/
#include "magma_internal.h"
// mixed precision generation has issues with COMPLEX, so use PRECISION_z
#define PRECISION_z
#define BLK_X 64
#define BLK_Y 32
// TODO get rid of global variable!
static __device__ int flag = 0;
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to zlat2c and zlaset.
*/
__global__
void zlag2c_kernel(
int m, int n,
const magmaDoubleComplex *A, int lda,
magmaFloatComplex *SA, int ldsa,
double rmax )
{
magmaDoubleComplex tmp;
double neg_rmax = - rmax;
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
A += ind + iby*lda;
SA += ind + iby*ldsa;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
tmp = A[j*lda];
if ( (MAGMA_Z_REAL(tmp) < neg_rmax) || (MAGMA_Z_REAL(tmp) > rmax)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (MAGMA_Z_IMAG(tmp) < neg_rmax) || (MAGMA_Z_IMAG(tmp) > rmax)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = MAGMA_C_MAKE( MAGMA_Z_REAL(tmp), MAGMA_Z_IMAG(tmp) );
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
tmp = A[j*lda];
if ( (MAGMA_Z_REAL(tmp) < neg_rmax) || (MAGMA_Z_REAL(tmp) > rmax)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (MAGMA_Z_IMAG(tmp) < neg_rmax) || (MAGMA_Z_IMAG(tmp) > rmax)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = MAGMA_C_MAKE( MAGMA_Z_REAL(tmp), MAGMA_Z_IMAG(tmp) );
}
}
}
}
/***************************************************************************//**
Purpose
-------
ZLAG2C converts a double-complex matrix, A,
to a single-complex matrix, SA.
RMAX is the overflow for the single-complex arithmetic.
ZLAG2C checks that all the entries of A are between -RMAX and
RMAX. If not, the conversion is aborted and a flag is raised.
Arguments
---------
@param[in]
m INTEGER
The number of lines of the matrix A. m >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. n >= 0.
@param[in]
A COMPLEX_16 array, dimension (LDA,n)
On entry, the m-by-n coefficient matrix A.
@param[in]
lda INTEGER
The leading dimension of the array A. LDA >= max(1,m).
@param[out]
SA COMPLEX array, dimension (LDSA,n)
On exit, if INFO=0, the m-by-n coefficient matrix SA;
if INFO > 0, the content of SA is unspecified.
@param[in]
ldsa INTEGER
The leading dimension of the array SA. LDSA >= max(1,m).
@param[out]
info INTEGER
- = 0: successful exit.
- < 0: if INFO = -i, the i-th argument had an illegal value
- = 1: an entry of the matrix A is greater than the COMPLEX
overflow threshold, in this case, the content
of SA on exit is unspecified.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lag2
*******************************************************************************/
extern "C" void
magmablas_zlag2c(
magma_int_t m, magma_int_t n,
magmaDoubleComplex_const_ptr A, magma_int_t lda,
magmaFloatComplex_ptr SA, magma_int_t ldsa,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( m < 0 )
*info = -1;
else if ( n < 0 )
*info = -2;
else if ( lda < max(1,m) )
*info = -4;
else if ( ldsa < max(1,m) )
*info = -6;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //*info;
}
/* quick return */
if ( m == 0 || n == 0 ) {
return;
}
double rmax = (double)lapackf77_slamch("O");
dim3 threads( BLK_X, 1 );
dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ) );
hipMemcpyToSymbol( flag, info, sizeof(flag) ); // flag = 0
hipLaunchKernelGGL(( zlag2c_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, A, lda, SA, ldsa, rmax );
hipMemcpyFromSymbol( info, flag, sizeof(flag) ); // info = flag
}
| 54ea1ac608a60981d7ab5fa448acc0e23157f78a.cu | /*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@precisions mixed zc -> ds
@author Mark Gates
*/
#include "magma_internal.h"
// mixed precision generation has issues with COMPLEX, so use PRECISION_z
#define PRECISION_z
#define BLK_X 64
#define BLK_Y 32
// TODO get rid of global variable!
static __device__ int flag = 0;
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to zlat2c and zlaset.
*/
__global__
void zlag2c_kernel(
int m, int n,
const magmaDoubleComplex *A, int lda,
magmaFloatComplex *SA, int ldsa,
double rmax )
{
magmaDoubleComplex tmp;
double neg_rmax = - rmax;
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
A += ind + iby*lda;
SA += ind + iby*ldsa;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
tmp = A[j*lda];
if ( (MAGMA_Z_REAL(tmp) < neg_rmax) || (MAGMA_Z_REAL(tmp) > rmax)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (MAGMA_Z_IMAG(tmp) < neg_rmax) || (MAGMA_Z_IMAG(tmp) > rmax)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = MAGMA_C_MAKE( MAGMA_Z_REAL(tmp), MAGMA_Z_IMAG(tmp) );
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
tmp = A[j*lda];
if ( (MAGMA_Z_REAL(tmp) < neg_rmax) || (MAGMA_Z_REAL(tmp) > rmax)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (MAGMA_Z_IMAG(tmp) < neg_rmax) || (MAGMA_Z_IMAG(tmp) > rmax)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = MAGMA_C_MAKE( MAGMA_Z_REAL(tmp), MAGMA_Z_IMAG(tmp) );
}
}
}
}
/***************************************************************************//**
Purpose
-------
ZLAG2C converts a double-complex matrix, A,
to a single-complex matrix, SA.
RMAX is the overflow for the single-complex arithmetic.
ZLAG2C checks that all the entries of A are between -RMAX and
RMAX. If not, the conversion is aborted and a flag is raised.
Arguments
---------
@param[in]
m INTEGER
The number of lines of the matrix A. m >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. n >= 0.
@param[in]
A COMPLEX_16 array, dimension (LDA,n)
On entry, the m-by-n coefficient matrix A.
@param[in]
lda INTEGER
The leading dimension of the array A. LDA >= max(1,m).
@param[out]
SA COMPLEX array, dimension (LDSA,n)
On exit, if INFO=0, the m-by-n coefficient matrix SA;
if INFO > 0, the content of SA is unspecified.
@param[in]
ldsa INTEGER
The leading dimension of the array SA. LDSA >= max(1,m).
@param[out]
info INTEGER
- = 0: successful exit.
- < 0: if INFO = -i, the i-th argument had an illegal value
- = 1: an entry of the matrix A is greater than the COMPLEX
overflow threshold, in this case, the content
of SA on exit is unspecified.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lag2
*******************************************************************************/
extern "C" void
magmablas_zlag2c(
magma_int_t m, magma_int_t n,
magmaDoubleComplex_const_ptr A, magma_int_t lda,
magmaFloatComplex_ptr SA, magma_int_t ldsa,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( m < 0 )
*info = -1;
else if ( n < 0 )
*info = -2;
else if ( lda < max(1,m) )
*info = -4;
else if ( ldsa < max(1,m) )
*info = -6;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //*info;
}
/* quick return */
if ( m == 0 || n == 0 ) {
return;
}
double rmax = (double)lapackf77_slamch("O");
dim3 threads( BLK_X, 1 );
dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ) );
cudaMemcpyToSymbol( flag, info, sizeof(flag) ); // flag = 0
zlag2c_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( m, n, A, lda, SA, ldsa, rmax );
cudaMemcpyFromSymbol( info, flag, sizeof(flag) ); // info = flag
}
|
fc52ad9f1a0f42719d8f9a17e6033d96ded88a53.hip | // !!! This is a file automatically generated by hipify!!!
#include "../include/imageMatrix.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
//ImMatG function definition
ImMatG::ImMatG(){
rows = 0;
cols = 0;
}
ImMatG::ImMatG(size_t rows, size_t cols, double * data, bool onDeviceMemory){
this->rows = rows;
this->cols = cols;
if (onDeviceMemory){
this->data_d = data;
} else{
hipMalloc(&(this->data_d), rows*cols*sizeof(double));
hipError_t cuerror = hipMemcpy(this->data_d, data, rows*cols*sizeof(double), hipMemcpyHostToDevice);
}
}
ImMatG::ImMatG(size_t rows, size_t cols){
this->rows = rows;
this->cols = cols;
hipMalloc(&(this->data_d), rows*cols*sizeof(double));
}
ImMatG::~ImMatG(){
hipFree((this->data_d));
}
size_t ImMatG::getLength(void){
return rows*cols;
}
// GPU KERNELS
__global__ void transposeKernel(const double *input, double *output, int height, int width){
extern __shared__ double temp[];
int xIndex = blockIdx.x*blockDim.x + threadIdx.x;
int yIndex = blockIdx.y*blockDim.y + threadIdx.y;
if ((xIndex < width) && (yIndex < height)){
int id_in = yIndex*width + xIndex;
temp[threadIdx.x+threadIdx.y*(blockDim.x)] = input[id_in];
}
__syncthreads();
int tempXIndex = xIndex;
xIndex = yIndex;
yIndex = tempXIndex;
if ((xIndex < height) && (yIndex < width)){
int id_out = xIndex+yIndex*height;
output[id_out] = temp[threadIdx.x+threadIdx.y*(blockDim.x)];
}
}
ImMatG* ImMatG::transpose(){
ImMatG *result= new ImMatG(cols, rows);
int numThreads = 16;
int blocksX = ceil(((float)cols) / numThreads);
int blocksY = ceil(((float)rows) / numThreads);
hipLaunchKernelGGL(( transposeKernel), dim3(dim3(blocksX, blocksY, 1)), dim3(dim3(numThreads, numThreads, 1)), (numThreads)*(numThreads)*sizeof(double), 0, data_d, result->data_d, rows, cols);
return result;
}
__global__ void fillRowKernel(double *data, size_t cols, size_t row, double value){
int Xidx = threadIdx.x + blockIdx.x*blockDim.x;
if (Xidx < cols){
data[Xidx + row*cols] = value;
}
}
void ImMatG::fillRow(size_t row, double value){
if ((row >= this->rows) || (row < 0)){
std::cout << "Index doesn't agree with image size" << std::endl;
return;
}
int threadNum = 128;
fillRowKernel << <dim3(ceil(cols / threadNum), 1, 1), dim3(threadNum, 1, 1) >> >(data_d, cols, row, value);
}
// creates im mat object from csv file
// parameter:
// filename - filename of the files
// returns: image matrix allocated on gpu
ImMatG* readCSV(std::string fileName){
std::ifstream fileStream(fileName);
std::string line;
double val;
std::vector<double> values;
int rows = 0, cols = 0;
while (getline(fileStream, line)){
std::stringstream ss(line);
cols = 0;
while (ss >> val){
values.push_back(val);
cols++;
if (ss.peek() == ','){
ss.ignore();
}
}
rows++;
}
ImMatG * result = new ImMatG(rows, cols, values.data(), false);
return result;
}
double *ImMatG::getData(){
double * data = new double[getLength()];
hipMemcpy(data, data_d, sizeof(double)*getLength(), hipMemcpyDeviceToHost);
return data;
}
__global__ void getColumnKernel(double *image, size_t rows, size_t cols, double *column){
int xIdx = threadIdx.x + blockIdx.x*blockDim.x;
int yIdx = threadIdx.y + blockIdx.y*blockDim.y;
if ((xIdx > cols) && (yIdx < cols)){
}
}
| fc52ad9f1a0f42719d8f9a17e6033d96ded88a53.cu | #include "../include/imageMatrix.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
//ImMatG function definition
ImMatG::ImMatG(){
rows = 0;
cols = 0;
}
ImMatG::ImMatG(size_t rows, size_t cols, double * data, bool onDeviceMemory){
this->rows = rows;
this->cols = cols;
if (onDeviceMemory){
this->data_d = data;
} else{
cudaMalloc(&(this->data_d), rows*cols*sizeof(double));
cudaError_t cuerror = cudaMemcpy(this->data_d, data, rows*cols*sizeof(double), cudaMemcpyHostToDevice);
}
}
ImMatG::ImMatG(size_t rows, size_t cols){
this->rows = rows;
this->cols = cols;
cudaMalloc(&(this->data_d), rows*cols*sizeof(double));
}
ImMatG::~ImMatG(){
cudaFree((this->data_d));
}
size_t ImMatG::getLength(void){
return rows*cols;
}
// GPU KERNELS
__global__ void transposeKernel(const double *input, double *output, int height, int width){
extern __shared__ double temp[];
int xIndex = blockIdx.x*blockDim.x + threadIdx.x;
int yIndex = blockIdx.y*blockDim.y + threadIdx.y;
if ((xIndex < width) && (yIndex < height)){
int id_in = yIndex*width + xIndex;
temp[threadIdx.x+threadIdx.y*(blockDim.x)] = input[id_in];
}
__syncthreads();
int tempXIndex = xIndex;
xIndex = yIndex;
yIndex = tempXIndex;
if ((xIndex < height) && (yIndex < width)){
int id_out = xIndex+yIndex*height;
output[id_out] = temp[threadIdx.x+threadIdx.y*(blockDim.x)];
}
}
ImMatG* ImMatG::transpose(){
ImMatG *result= new ImMatG(cols, rows);
int numThreads = 16;
int blocksX = ceil(((float)cols) / numThreads);
int blocksY = ceil(((float)rows) / numThreads);
transposeKernel<<<dim3(blocksX, blocksY, 1), dim3(numThreads, numThreads, 1), (numThreads)*(numThreads)*sizeof(double)>>>(data_d, result->data_d, rows, cols);
return result;
}
__global__ void fillRowKernel(double *data, size_t cols, size_t row, double value){
int Xidx = threadIdx.x + blockIdx.x*blockDim.x;
if (Xidx < cols){
data[Xidx + row*cols] = value;
}
}
void ImMatG::fillRow(size_t row, double value){
if ((row >= this->rows) || (row < 0)){
std::cout << "Index doesn't agree with image size" << std::endl;
return;
}
int threadNum = 128;
fillRowKernel << <dim3(ceil(cols / threadNum), 1, 1), dim3(threadNum, 1, 1) >> >(data_d, cols, row, value);
}
// creates im mat object from csv file
// parameter:
// filename - filename of the files
// returns: image matrix allocated on gpu
ImMatG* readCSV(std::string fileName){
std::ifstream fileStream(fileName);
std::string line;
double val;
std::vector<double> values;
int rows = 0, cols = 0;
while (getline(fileStream, line)){
std::stringstream ss(line);
cols = 0;
while (ss >> val){
values.push_back(val);
cols++;
if (ss.peek() == ','){
ss.ignore();
}
}
rows++;
}
ImMatG * result = new ImMatG(rows, cols, values.data(), false);
return result;
}
double *ImMatG::getData(){
double * data = new double[getLength()];
cudaMemcpy(data, data_d, sizeof(double)*getLength(), cudaMemcpyDeviceToHost);
return data;
}
__global__ void getColumnKernel(double *image, size_t rows, size_t cols, double *column){
int xIdx = threadIdx.x + blockIdx.x*blockDim.x;
int yIdx = threadIdx.y + blockIdx.y*blockDim.y;
if ((xIdx > cols) && (yIdx < cols)){
}
}
|
2a03ae6203c81abd3b5fbf96c2ad6c36b3b20666.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Device code
__global__ void VecAdd(float* A, float* B, float* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
C[i] = A[i] + B[i];
}
// Host code
int main()
{
int N = ...;
size_t size = N * sizeof(float);
// Allocate input vectors h_A and h_B in host memory
float* h_A = (float*)malloc(size);
float* h_B = (float*)malloc(size);
// Initialize input vectors
...
// Allocate vectors in device memory
float* d_A;
hipMalloc(&d_A, size);
float* d_B;
hipMalloc(&d_B, size);
float* d_C;
hipMalloc(&d_C, size);
// Copy vectors from host memory to device memory
hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
// Invoke kernel
int threadsPerBlock = 256;
int blocksPerGrid =
(N + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( VecAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, N);
// Copy result from device memory to host memory
// h_C contains the result in host memory
hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
// Free device memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
// Free host memory
...
} | 2a03ae6203c81abd3b5fbf96c2ad6c36b3b20666.cu | // Device code
__global__ void VecAdd(float* A, float* B, float* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
C[i] = A[i] + B[i];
}
// Host code
int main()
{
int N = ...;
size_t size = N * sizeof(float);
// Allocate input vectors h_A and h_B in host memory
float* h_A = (float*)malloc(size);
float* h_B = (float*)malloc(size);
// Initialize input vectors
...
// Allocate vectors in device memory
float* d_A;
cudaMalloc(&d_A, size);
float* d_B;
cudaMalloc(&d_B, size);
float* d_C;
cudaMalloc(&d_C, size);
// Copy vectors from host memory to device memory
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
// Invoke kernel
int threadsPerBlock = 256;
int blocksPerGrid =
(N + threadsPerBlock - 1) / threadsPerBlock;
VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
// Copy result from device memory to host memory
// h_C contains the result in host memory
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// Free host memory
...
} |
b8085a78023ffee6dd140c93505d316bebfeb889.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "p2pPingPongLatencyTest.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
void *_pLocal = NULL;
hipMalloc(&_pLocal, XSIZE*YSIZE);
void *_pRemote = NULL;
hipMalloc(&_pRemote, XSIZE*YSIZE);
uint64_t *pTimestamps = NULL;
hipMalloc(&pTimestamps, XSIZE*YSIZE);
int bWait = 1;
int cIterations = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
p2pPingPongLatencyTest), dim3(gridBlock),dim3(threadBlock), 0, 0, _pLocal,_pRemote,pTimestamps,bWait,cIterations);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
p2pPingPongLatencyTest), dim3(gridBlock),dim3(threadBlock), 0, 0, _pLocal,_pRemote,pTimestamps,bWait,cIterations);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
p2pPingPongLatencyTest), dim3(gridBlock),dim3(threadBlock), 0, 0, _pLocal,_pRemote,pTimestamps,bWait,cIterations);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | b8085a78023ffee6dd140c93505d316bebfeb889.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "p2pPingPongLatencyTest.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
void *_pLocal = NULL;
cudaMalloc(&_pLocal, XSIZE*YSIZE);
void *_pRemote = NULL;
cudaMalloc(&_pRemote, XSIZE*YSIZE);
uint64_t *pTimestamps = NULL;
cudaMalloc(&pTimestamps, XSIZE*YSIZE);
int bWait = 1;
int cIterations = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
p2pPingPongLatencyTest<<<gridBlock,threadBlock>>>(_pLocal,_pRemote,pTimestamps,bWait,cIterations);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
p2pPingPongLatencyTest<<<gridBlock,threadBlock>>>(_pLocal,_pRemote,pTimestamps,bWait,cIterations);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
p2pPingPongLatencyTest<<<gridBlock,threadBlock>>>(_pLocal,_pRemote,pTimestamps,bWait,cIterations);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
6fa3b3b54e20a5f457b0a3d1aedb681e9f34fc85.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/for_each.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include "NVStrings.h"
#include "NVStringsImpl.h"
#include "../custring_view.cuh"
#include "../util.h"
// remove the target characters from the beginning of each string
NVStrings* NVStrings::lstrip( const char* to_strip )
{
unsigned int count = size();
custring_view_array d_strings = pImpl->getStringsPtr();
auto execpol = rmm::exec_policy(0);
char* d_strip = nullptr;
if( to_strip )
{
int len = (int)strlen(to_strip) + 1; // include null
d_strip = device_alloc<char>(len,0);
CUDA_TRY( hipMemcpyAsync(d_strip,to_strip,len,hipMemcpyHostToDevice))
}
// compute size of output buffer
rmm::device_vector<size_t> lengths(count,0);
size_t* d_lengths = lengths.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_strip, d_lengths] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
unsigned int len = dstr->lstrip_size(d_strip);
len = ALIGN_SIZE(len);
d_lengths[idx] = (size_t)len;
});
// create output object
NVStrings* rtn = new NVStrings(count);
char* d_buffer = rtn->pImpl->createMemoryFor(d_lengths);
if( d_buffer==0 )
{
if( d_strip )
RMM_FREE(d_strip,0);
return rtn; // all strings are null
}
// create offsets
rmm::device_vector<size_t> offsets(count,0);
thrust::exclusive_scan(execpol->on(0),lengths.begin(),lengths.end(),offsets.begin());
// do the strip
custring_view** d_results = rtn->pImpl->getStringsPtr();
size_t* d_offsets = offsets.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_strip, d_buffer, d_offsets, d_results] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
char* buffer = d_buffer + d_offsets[idx];
d_results[idx] = dstr->lstrip(d_strip,buffer);
});
//
if( d_strip )
RMM_FREE(d_strip,0);
return rtn;
}
// remove the target character from the beginning and the end of each string
NVStrings* NVStrings::strip( const char* to_strip )
{
unsigned int count = size();
custring_view_array d_strings = pImpl->getStringsPtr();
auto execpol = rmm::exec_policy(0);
char* d_strip = nullptr;
if( to_strip )
{
int len = (int)strlen(to_strip) + 1; // include null
d_strip = device_alloc<char>(len,0);
CUDA_TRY( hipMemcpyAsync(d_strip,to_strip,len,hipMemcpyHostToDevice))
}
// compute size of output buffer
rmm::device_vector<size_t> lengths(count,0);
size_t* d_lengths = lengths.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_strip, d_lengths] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
unsigned int len = dstr->strip_size(d_strip);
len = ALIGN_SIZE(len);
d_lengths[idx] = (size_t)len;
});
// create output object
NVStrings* rtn = new NVStrings(count);
char* d_buffer = rtn->pImpl->createMemoryFor(d_lengths);
if( d_buffer==0 )
{
if( d_strip )
RMM_FREE(d_strip,0);
return rtn;
}
// create offsets
rmm::device_vector<size_t> offsets(count,0);
thrust::exclusive_scan(execpol->on(0),lengths.begin(),lengths.end(),offsets.begin());
// do the strip
custring_view_array d_results = rtn->pImpl->getStringsPtr();
size_t* d_offsets = offsets.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_strip, d_buffer, d_offsets, d_results] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
char* buffer = d_buffer + d_offsets[idx];
d_results[idx] = dstr->strip(d_strip,buffer);
});
//
if( d_strip )
RMM_FREE(d_strip,0);
return rtn;
}
// remove the target character from the end of each string
NVStrings* NVStrings::rstrip( const char* to_strip )
{
unsigned int count = size();
custring_view_array d_strings = pImpl->getStringsPtr();
auto execpol = rmm::exec_policy(0);
char* d_strip = nullptr;
if( to_strip )
{
int len = (int)strlen(to_strip) + 1; // include null
d_strip = device_alloc<char>(len,0);
CUDA_TRY( hipMemcpyAsync(d_strip,to_strip,len,hipMemcpyHostToDevice))
}
// compute size of output buffer
rmm::device_vector<size_t> lengths(count,0);
size_t* d_lengths = lengths.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_strip, d_lengths] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
unsigned int len = dstr->rstrip_size(d_strip);
len = ALIGN_SIZE(len);
d_lengths[idx] = (size_t)len;
});
// create output object
NVStrings* rtn = new NVStrings(count);
char* d_buffer = rtn->pImpl->createMemoryFor(d_lengths);
if( d_buffer==0 )
{
if( d_strip )
RMM_FREE(d_strip,0);
return rtn; // all strings are null
}
// create offsets
rmm::device_vector<size_t> offsets(count,0);
thrust::exclusive_scan(execpol->on(0),lengths.begin(),lengths.end(),offsets.begin());
// do the strip
custring_view_array d_results = rtn->pImpl->getStringsPtr();
size_t* d_offsets = offsets.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_strip, d_buffer, d_offsets, d_results] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
char* buffer = d_buffer + d_offsets[idx];
d_results[idx] = dstr->rstrip(d_strip,buffer);
});
//
if( d_strip )
RMM_FREE(d_strip,0);
return rtn;
}
| 6fa3b3b54e20a5f457b0a3d1aedb681e9f34fc85.cu | /*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/for_each.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include "NVStrings.h"
#include "NVStringsImpl.h"
#include "../custring_view.cuh"
#include "../util.h"
// remove the target characters from the beginning of each string
NVStrings* NVStrings::lstrip( const char* to_strip )
{
unsigned int count = size();
custring_view_array d_strings = pImpl->getStringsPtr();
auto execpol = rmm::exec_policy(0);
char* d_strip = nullptr;
if( to_strip )
{
int len = (int)strlen(to_strip) + 1; // include null
d_strip = device_alloc<char>(len,0);
CUDA_TRY( cudaMemcpyAsync(d_strip,to_strip,len,cudaMemcpyHostToDevice))
}
// compute size of output buffer
rmm::device_vector<size_t> lengths(count,0);
size_t* d_lengths = lengths.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_strip, d_lengths] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
unsigned int len = dstr->lstrip_size(d_strip);
len = ALIGN_SIZE(len);
d_lengths[idx] = (size_t)len;
});
// create output object
NVStrings* rtn = new NVStrings(count);
char* d_buffer = rtn->pImpl->createMemoryFor(d_lengths);
if( d_buffer==0 )
{
if( d_strip )
RMM_FREE(d_strip,0);
return rtn; // all strings are null
}
// create offsets
rmm::device_vector<size_t> offsets(count,0);
thrust::exclusive_scan(execpol->on(0),lengths.begin(),lengths.end(),offsets.begin());
// do the strip
custring_view** d_results = rtn->pImpl->getStringsPtr();
size_t* d_offsets = offsets.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_strip, d_buffer, d_offsets, d_results] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
char* buffer = d_buffer + d_offsets[idx];
d_results[idx] = dstr->lstrip(d_strip,buffer);
});
//
if( d_strip )
RMM_FREE(d_strip,0);
return rtn;
}
// remove the target character from the beginning and the end of each string
NVStrings* NVStrings::strip( const char* to_strip )
{
unsigned int count = size();
custring_view_array d_strings = pImpl->getStringsPtr();
auto execpol = rmm::exec_policy(0);
char* d_strip = nullptr;
if( to_strip )
{
int len = (int)strlen(to_strip) + 1; // include null
d_strip = device_alloc<char>(len,0);
CUDA_TRY( cudaMemcpyAsync(d_strip,to_strip,len,cudaMemcpyHostToDevice))
}
// compute size of output buffer
rmm::device_vector<size_t> lengths(count,0);
size_t* d_lengths = lengths.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_strip, d_lengths] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
unsigned int len = dstr->strip_size(d_strip);
len = ALIGN_SIZE(len);
d_lengths[idx] = (size_t)len;
});
// create output object
NVStrings* rtn = new NVStrings(count);
char* d_buffer = rtn->pImpl->createMemoryFor(d_lengths);
if( d_buffer==0 )
{
if( d_strip )
RMM_FREE(d_strip,0);
return rtn;
}
// create offsets
rmm::device_vector<size_t> offsets(count,0);
thrust::exclusive_scan(execpol->on(0),lengths.begin(),lengths.end(),offsets.begin());
// do the strip
custring_view_array d_results = rtn->pImpl->getStringsPtr();
size_t* d_offsets = offsets.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_strip, d_buffer, d_offsets, d_results] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
char* buffer = d_buffer + d_offsets[idx];
d_results[idx] = dstr->strip(d_strip,buffer);
});
//
if( d_strip )
RMM_FREE(d_strip,0);
return rtn;
}
// remove the target character from the end of each string
NVStrings* NVStrings::rstrip( const char* to_strip )
{
unsigned int count = size();
custring_view_array d_strings = pImpl->getStringsPtr();
auto execpol = rmm::exec_policy(0);
char* d_strip = nullptr;
if( to_strip )
{
int len = (int)strlen(to_strip) + 1; // include null
d_strip = device_alloc<char>(len,0);
CUDA_TRY( cudaMemcpyAsync(d_strip,to_strip,len,cudaMemcpyHostToDevice))
}
// compute size of output buffer
rmm::device_vector<size_t> lengths(count,0);
size_t* d_lengths = lengths.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_strip, d_lengths] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
unsigned int len = dstr->rstrip_size(d_strip);
len = ALIGN_SIZE(len);
d_lengths[idx] = (size_t)len;
});
// create output object
NVStrings* rtn = new NVStrings(count);
char* d_buffer = rtn->pImpl->createMemoryFor(d_lengths);
if( d_buffer==0 )
{
if( d_strip )
RMM_FREE(d_strip,0);
return rtn; // all strings are null
}
// create offsets
rmm::device_vector<size_t> offsets(count,0);
thrust::exclusive_scan(execpol->on(0),lengths.begin(),lengths.end(),offsets.begin());
// do the strip
custring_view_array d_results = rtn->pImpl->getStringsPtr();
size_t* d_offsets = offsets.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_strip, d_buffer, d_offsets, d_results] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
char* buffer = d_buffer + d_offsets[idx];
d_results[idx] = dstr->rstrip(d_strip,buffer);
});
//
if( d_strip )
RMM_FREE(d_strip,0);
return rtn;
}
|
c45711c41457143ef093b781bdc8f4caaf96a6d6.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include "datadef.h"
__global__ void absorb_kernel(unsigned N, unsigned* active, unsigned * rxn , unsigned* done){
//PLACEHOLDER FOR FISSIONS, NEED TO READ NU TABLES LATER
int tid = threadIdx.x+blockIdx.x*blockDim.x;
if (tid >= N){return;} //return if out of bounds
//remap to active
//tid=active[tid];
if(done[tid]){return;}
if (rxn[tid] < 102 | rxn[tid] > 800 ){return;} //return if not some sort of absorption, ie (n,not-n)
//printf("in abs, rxn=%u\n",rxn[tid]);
done[tid] = 1;
}
void absorb( hipStream_t stream, unsigned NUM_THREADS, unsigned N, unsigned* active, unsigned * rxn , unsigned* done){
unsigned blks = ( N + NUM_THREADS - 1 ) / NUM_THREADS;
//absorb_kernel <<< blks, NUM_THREADS >>> ( N, active, rxn , done);
hipLaunchKernelGGL(( absorb_kernel) , dim3(blks), dim3(NUM_THREADS) , 0 , stream , N, active, rxn , done);
hipDeviceSynchronize();
}
| c45711c41457143ef093b781bdc8f4caaf96a6d6.cu | #include <cuda.h>
#include <stdio.h>
#include "datadef.h"
__global__ void absorb_kernel(unsigned N, unsigned* active, unsigned * rxn , unsigned* done){
//PLACEHOLDER FOR FISSIONS, NEED TO READ NU TABLES LATER
int tid = threadIdx.x+blockIdx.x*blockDim.x;
if (tid >= N){return;} //return if out of bounds
//remap to active
//tid=active[tid];
if(done[tid]){return;}
if (rxn[tid] < 102 | rxn[tid] > 800 ){return;} //return if not some sort of absorption, ie (n,not-n)
//printf("in abs, rxn=%u\n",rxn[tid]);
done[tid] = 1;
}
void absorb( cudaStream_t stream, unsigned NUM_THREADS, unsigned N, unsigned* active, unsigned * rxn , unsigned* done){
unsigned blks = ( N + NUM_THREADS - 1 ) / NUM_THREADS;
//absorb_kernel <<< blks, NUM_THREADS >>> ( N, active, rxn , done);
absorb_kernel <<< blks, NUM_THREADS , 0 , stream >>> ( N, active, rxn , done);
cudaThreadSynchronize();
}
|
b0fd91a2ac2f231607f434026e6fe3be8803428f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Find BLANK and replace your own code.
* And submit report why do you replace the blank that way.
*/
#include<stdlib.h>
#include<iostream>
#include<fstream>
#include<vector>
#include<string>
#define TILE_WIDTH 16 /* set TILE_WIDTH 16 for the evaluation! */
#define MAXPOOL_INPUT_FILENAME "input.txt"
#define A_FILENAME "a.txt"
#define B_FILENAME "b.txt"
#define C_FILENAME "c.txt"
using namespace std;
__global__ void maxpool(float *input, float *output, const int input_size, const int filter_size) {
// input : input_matrix address
// output : output buffer address
// input_size : width, height of input matrix
// filter_size : filter_size of maxpolling
// all input, output matrices are vectorized
int col = blockDim.x * blockIdx.x + threadIdx.x;
int row = blockDim.y * blockIdx.y + threadIdx.y;
int large = 0;
__shared__ float s_large[TILE_WIDTH];
if(threadIdx.x == 0){
large = input[input_size*row];
for(int i = 1; i < TILE_WIDTH; i++){
if (large < input[input_size*row+i]){
large = input[input_size*row+i];
}
}
s_large[threadIdx.y] = large;
}else{
return;
}
__syncthreads();
if(threadIdx.x == 0 && threadIdx.y == 0){
for(int i = 1; i < TILE_WIDTH; i++){
if(large < s_large[i]){
large = s_large[i];
}
}
output[blockIdx.y*TILE_WIDTH + blockIdx.x] = large;
}else{
return;
}
}
// a, b, c : input matrix address
// alpha, beta : input constant
// output : output buffer address
// input_size : width, height of input matrix
// all input, output matrices are vectorized
__global__ void gemm(float *a, float *b, float *c, const float alpha, const float beta, float *output, const int input_s){
int tx = threadIdx.x, ty = threadIdx.y;
int bx = blockIdx.x, by = blockIdx.y;
int row = by*blockDim.y + ty;
int col = bx*blockDim.x + tx;
int input_size = input_s;
int a_default = input_size*row +tx;
int b_default = input_size*ty + col;
//if(row>=input_size ||col>=input_size) { return; }
// allocate 2D tiles in __shared__ memory
__shared__ float s_a[TILE_WIDTH][TILE_WIDTH];
__shared__ float s_b[TILE_WIDTH][TILE_WIDTH];
float result = 0;
for(int p = 0; p < input_size / TILE_WIDTH + 1; ++p){
s_a[ty][tx] = a[a_default + p*TILE_WIDTH];
s_b[ty][tx] = b[b_default + p*input_size*TILE_WIDTH];
if(col >= input_size || p*TILE_WIDTH + ty >= input_size){
s_b[ty][tx] = 0;
}
if(row >= input_size || p*TILE_WIDTH + tx >= input_size){
s_a[ty][tx] = 0;
}
__syncthreads();
for(int i = 0; i < TILE_WIDTH;i++){
result += (s_a[ty][i]*s_b[i][tx]);
}
__syncthreads();
}
if(col < input_size && row < input_size){
output[row*input_size + col] = alpha * result + beta * c[row*input_size + col];
}
}
int main(int argc, char **argv) {
if(argc < 4) {
cout << "usage : " << argv[0] << " input_size filter_size alpha beta\n" << "example : " << argv[0] << " 100 2 0.5 0.8\n";
return 1;
}
const int input_size = stoi(argv[1]);
const int filter_size = stoi(argv[2]); // used for maxpooling
const float alpha = stof(argv[3]);
const float beta = stof(argv[4]);
const int maxpool_output_size = input_size/filter_size;
// check input_size is power of 2
if(input_size == 0 && (input_size & (input_size-1))){
cout << "input_size must be power of 2\n";
return 1;
}
if(filter_size == 0){
cout << "filter_size cannot be 0\n";
return 1;
}
float maxpool_input[input_size*input_size];
float a[input_size*input_size];
float b[input_size*input_size];
float c[input_size*input_size];
// read input matrices
ifstream input_in(MAXPOOL_INPUT_FILENAME);
ifstream a_in(A_FILENAME);
ifstream b_in(B_FILENAME);
ifstream c_in(C_FILENAME);
for (int i = 0; i < input_size*input_size; ++i) {
input_in >> maxpool_input[i];
a_in >> a[i];
b_in >> b[i];
c_in >> c[i];
}
// prints inputs for debugging.
cout<<"filter size : "<<filter_size;
cout<<"\n========== MAXPOOL_INPUT ==========\n";
for (int i = 0; i < input_size * input_size; ++i) {
if(i%input_size==0) cout<<"\n";
cout<<maxpool_input[i]<<" ";
}
cout<<"\nalpha : "<<alpha<<'\n';
cout<<"========== A ==========\n";
for (int i = 0; i < input_size * input_size; ++i) {
if(i%input_size==0) cout<<"\n";
cout<<a[i]<<" ";
}
cout<<"\n========== B ==========\n";
for (int i = 0; i < input_size * input_size; ++i) {
if(i%input_size==0) cout<<"\n";
cout<<b[i]<<" ";
}
cout<<"\nbeta : "<<beta<<'\n';
cout<<"========== C ==========\n";
for (int i = 0; i < input_size * input_size; ++i) {
if(i%input_size==0) cout<<"\n";
cout<<c[i]<<" ";
}
cout<<'\n';
// set thread, block dimensions
const dim3 block_size(TILE_WIDTH, TILE_WIDTH);
cout<<block_size.x;
const dim3 num_of_maxpool_blocks(input_size / block_size.x, input_size/ block_size.y);
const dim3 num_of_blocks(input_size/block_size.x+1, input_size/block_size.y+1);
// memory allocation for the device
float *dev_mem_a, *dev_mem_b, *dev_mem_c, *dev_mem_input, *gemm_output, *maxpool_output;
hipMalloc(&dev_mem_a, sizeof(float) * input_size * input_size);
hipMalloc(&dev_mem_b, sizeof(float) * input_size * input_size);
hipMalloc(&dev_mem_c, sizeof(float) * input_size * input_size);
hipMalloc(&gemm_output, sizeof(float) * input_size * input_size);
hipMalloc(&dev_mem_input, sizeof(float) * input_size * input_size);
hipMalloc(&maxpool_output, sizeof(float) * maxpool_output_size * maxpool_output_size);
// copy variable to device memory
hipMemcpy(dev_mem_a, &a, sizeof(float) * input_size * input_size, hipMemcpyHostToDevice);
hipMemcpy(dev_mem_b, &b, sizeof(float) * input_size * input_size, hipMemcpyHostToDevice);
hipMemcpy(dev_mem_c, &c, sizeof(float) * input_size * input_size, hipMemcpyHostToDevice);
hipMemcpy(dev_mem_input, &maxpool_input, sizeof(float) * input_size * input_size, hipMemcpyHostToDevice);
hipEvent_t gemm_start, gemm_stop, maxpool_start, maxpool_stop;
hipEventCreate(&gemm_start);
hipEventCreate(&gemm_stop);
hipEventCreate(&maxpool_start);
hipEventCreate(&maxpool_stop);
// launch CUDA kernels
// First launch gemm kernel
hipEventRecord(gemm_start);
hipLaunchKernelGGL(( gemm), dim3(num_of_blocks), dim3(block_size), 0, 0, dev_mem_a, dev_mem_b, dev_mem_c, alpha, beta, gemm_output, input_size);
hipEventRecord(gemm_stop);
hipDeviceSynchronize();
hipError_t error = hipGetLastError();
if(error!=hipSuccess) {
fprintf(stderr, "ERROR %s\n", hipGetErrorString(error));
return 1;
}
hipEventSynchronize(gemm_stop);
float gemm_t = 0;
hipEventElapsedTime(&gemm_t,gemm_start,gemm_stop);
// Then run maxpooling
hipEventRecord(maxpool_start);
hipLaunchKernelGGL(( maxpool), dim3(num_of_maxpool_blocks), dim3(block_size), 0, 0, dev_mem_input, maxpool_output, input_size, filter_size);
hipEventRecord(maxpool_stop);
hipDeviceSynchronize();
//hipError_t error = hipGetLastError();
error = hipGetLastError();
if(error!=hipSuccess) {
fprintf(stderr, "ERROR %s\n", hipGetErrorString(error));
return 1;
}
hipEventSynchronize(maxpool_stop);
float maxpool_t = 0;
hipEventElapsedTime(&maxpool_t, maxpool_start, maxpool_stop);
// allocate output buf in main memory
float *gemm_output_buf = (float*) malloc (sizeof(float)*input_size*input_size);
float *maxpool_output_buf = (float*) malloc (sizeof(float)*maxpool_output_size*maxpool_output_size);
// copy results from device to host
hipMemcpy(gemm_output_buf, gemm_output, sizeof(float)*input_size*input_size, hipMemcpyDeviceToHost);
hipMemcpy(maxpool_output_buf, maxpool_output, sizeof(float)*maxpool_output_size*maxpool_output_size, hipMemcpyDeviceToHost);
// prints the results
cout<<"\n========== GEMM OUTPUT ==========\n";
for (int i = 0; i < input_size * input_size; ++i) {
if(i%input_size==0) cout<<"\n";
cout<<gemm_output_buf[i]<<" ";
}
cout<<'\n';
cout<<"gemm time: " << gemm_t;
cout<<"\n========== MAXPOOL OUTPUT ==========\n";
for (int i = 0; i < maxpool_output_size * maxpool_output_size; ++i) {
if(i%maxpool_output_size==0) cout<<"\n";
cout<<maxpool_output_buf[i]<<" ";
}
cout<<'\n';
cout <<"maxpool time: " <<maxpool_t;
hipFree(dev_mem_a);
hipFree(dev_mem_b);
hipFree(dev_mem_c);
hipFree(gemm_output);
hipFree(dev_mem_input);
hipFree(maxpool_output);
free(gemm_output_buf);
free(maxpool_output_buf);
return 0;
}
| b0fd91a2ac2f231607f434026e6fe3be8803428f.cu | /*
* Find BLANK and replace your own code.
* And submit report why do you replace the blank that way.
*/
#include<stdlib.h>
#include<iostream>
#include<fstream>
#include<vector>
#include<string>
#define TILE_WIDTH 16 /* set TILE_WIDTH 16 for the evaluation! */
#define MAXPOOL_INPUT_FILENAME "input.txt"
#define A_FILENAME "a.txt"
#define B_FILENAME "b.txt"
#define C_FILENAME "c.txt"
using namespace std;
__global__ void maxpool(float *input, float *output, const int input_size, const int filter_size) {
// input : input_matrix address
// output : output buffer address
// input_size : width, height of input matrix
// filter_size : filter_size of maxpolling
// all input, output matrices are vectorized
int col = blockDim.x * blockIdx.x + threadIdx.x;
int row = blockDim.y * blockIdx.y + threadIdx.y;
int large = 0;
__shared__ float s_large[TILE_WIDTH];
if(threadIdx.x == 0){
large = input[input_size*row];
for(int i = 1; i < TILE_WIDTH; i++){
if (large < input[input_size*row+i]){
large = input[input_size*row+i];
}
}
s_large[threadIdx.y] = large;
}else{
return;
}
__syncthreads();
if(threadIdx.x == 0 && threadIdx.y == 0){
for(int i = 1; i < TILE_WIDTH; i++){
if(large < s_large[i]){
large = s_large[i];
}
}
output[blockIdx.y*TILE_WIDTH + blockIdx.x] = large;
}else{
return;
}
}
// a, b, c : input matrix address
// alpha, beta : input constant
// output : output buffer address
// input_size : width, height of input matrix
// all input, output matrices are vectorized
__global__ void gemm(float *a, float *b, float *c, const float alpha, const float beta, float *output, const int input_s){
int tx = threadIdx.x, ty = threadIdx.y;
int bx = blockIdx.x, by = blockIdx.y;
int row = by*blockDim.y + ty;
int col = bx*blockDim.x + tx;
int input_size = input_s;
int a_default = input_size*row +tx;
int b_default = input_size*ty + col;
//if(row>=input_size ||col>=input_size) { return; }
// allocate 2D tiles in __shared__ memory
__shared__ float s_a[TILE_WIDTH][TILE_WIDTH];
__shared__ float s_b[TILE_WIDTH][TILE_WIDTH];
float result = 0;
for(int p = 0; p < input_size / TILE_WIDTH + 1; ++p){
s_a[ty][tx] = a[a_default + p*TILE_WIDTH];
s_b[ty][tx] = b[b_default + p*input_size*TILE_WIDTH];
if(col >= input_size || p*TILE_WIDTH + ty >= input_size){
s_b[ty][tx] = 0;
}
if(row >= input_size || p*TILE_WIDTH + tx >= input_size){
s_a[ty][tx] = 0;
}
__syncthreads();
for(int i = 0; i < TILE_WIDTH;i++){
result += (s_a[ty][i]*s_b[i][tx]);
}
__syncthreads();
}
if(col < input_size && row < input_size){
output[row*input_size + col] = alpha * result + beta * c[row*input_size + col];
}
}
int main(int argc, char **argv) {
if(argc < 4) {
cout << "usage : " << argv[0] << " input_size filter_size alpha beta\n" << "example : " << argv[0] << " 100 2 0.5 0.8\n";
return 1;
}
const int input_size = stoi(argv[1]);
const int filter_size = stoi(argv[2]); // used for maxpooling
const float alpha = stof(argv[3]);
const float beta = stof(argv[4]);
const int maxpool_output_size = input_size/filter_size;
// check input_size is power of 2
if(input_size == 0 && (input_size & (input_size-1))){
cout << "input_size must be power of 2\n";
return 1;
}
if(filter_size == 0){
cout << "filter_size cannot be 0\n";
return 1;
}
float maxpool_input[input_size*input_size];
float a[input_size*input_size];
float b[input_size*input_size];
float c[input_size*input_size];
// read input matrices
ifstream input_in(MAXPOOL_INPUT_FILENAME);
ifstream a_in(A_FILENAME);
ifstream b_in(B_FILENAME);
ifstream c_in(C_FILENAME);
for (int i = 0; i < input_size*input_size; ++i) {
input_in >> maxpool_input[i];
a_in >> a[i];
b_in >> b[i];
c_in >> c[i];
}
// prints inputs for debugging.
cout<<"filter size : "<<filter_size;
cout<<"\n========== MAXPOOL_INPUT ==========\n";
for (int i = 0; i < input_size * input_size; ++i) {
if(i%input_size==0) cout<<"\n";
cout<<maxpool_input[i]<<" ";
}
cout<<"\nalpha : "<<alpha<<'\n';
cout<<"========== A ==========\n";
for (int i = 0; i < input_size * input_size; ++i) {
if(i%input_size==0) cout<<"\n";
cout<<a[i]<<" ";
}
cout<<"\n========== B ==========\n";
for (int i = 0; i < input_size * input_size; ++i) {
if(i%input_size==0) cout<<"\n";
cout<<b[i]<<" ";
}
cout<<"\nbeta : "<<beta<<'\n';
cout<<"========== C ==========\n";
for (int i = 0; i < input_size * input_size; ++i) {
if(i%input_size==0) cout<<"\n";
cout<<c[i]<<" ";
}
cout<<'\n';
// set thread, block dimensions
const dim3 block_size(TILE_WIDTH, TILE_WIDTH);
cout<<block_size.x;
const dim3 num_of_maxpool_blocks(input_size / block_size.x, input_size/ block_size.y);
const dim3 num_of_blocks(input_size/block_size.x+1, input_size/block_size.y+1);
// memory allocation for the device
float *dev_mem_a, *dev_mem_b, *dev_mem_c, *dev_mem_input, *gemm_output, *maxpool_output;
cudaMalloc(&dev_mem_a, sizeof(float) * input_size * input_size);
cudaMalloc(&dev_mem_b, sizeof(float) * input_size * input_size);
cudaMalloc(&dev_mem_c, sizeof(float) * input_size * input_size);
cudaMalloc(&gemm_output, sizeof(float) * input_size * input_size);
cudaMalloc(&dev_mem_input, sizeof(float) * input_size * input_size);
cudaMalloc(&maxpool_output, sizeof(float) * maxpool_output_size * maxpool_output_size);
// copy variable to device memory
cudaMemcpy(dev_mem_a, &a, sizeof(float) * input_size * input_size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_mem_b, &b, sizeof(float) * input_size * input_size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_mem_c, &c, sizeof(float) * input_size * input_size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_mem_input, &maxpool_input, sizeof(float) * input_size * input_size, cudaMemcpyHostToDevice);
cudaEvent_t gemm_start, gemm_stop, maxpool_start, maxpool_stop;
cudaEventCreate(&gemm_start);
cudaEventCreate(&gemm_stop);
cudaEventCreate(&maxpool_start);
cudaEventCreate(&maxpool_stop);
// launch CUDA kernels
// First launch gemm kernel
cudaEventRecord(gemm_start);
gemm<<<num_of_blocks, block_size>>>(dev_mem_a, dev_mem_b, dev_mem_c, alpha, beta, gemm_output, input_size);
cudaEventRecord(gemm_stop);
cudaDeviceSynchronize();
cudaError_t error = cudaGetLastError();
if(error!=cudaSuccess) {
fprintf(stderr, "ERROR %s\n", cudaGetErrorString(error));
return 1;
}
cudaEventSynchronize(gemm_stop);
float gemm_t = 0;
cudaEventElapsedTime(&gemm_t,gemm_start,gemm_stop);
// Then run maxpooling
cudaEventRecord(maxpool_start);
maxpool<<<num_of_maxpool_blocks, block_size>>>(dev_mem_input, maxpool_output, input_size, filter_size);
cudaEventRecord(maxpool_stop);
cudaDeviceSynchronize();
//cudaError_t error = cudaGetLastError();
error = cudaGetLastError();
if(error!=cudaSuccess) {
fprintf(stderr, "ERROR %s\n", cudaGetErrorString(error));
return 1;
}
cudaEventSynchronize(maxpool_stop);
float maxpool_t = 0;
cudaEventElapsedTime(&maxpool_t, maxpool_start, maxpool_stop);
// allocate output buf in main memory
float *gemm_output_buf = (float*) malloc (sizeof(float)*input_size*input_size);
float *maxpool_output_buf = (float*) malloc (sizeof(float)*maxpool_output_size*maxpool_output_size);
// copy results from device to host
cudaMemcpy(gemm_output_buf, gemm_output, sizeof(float)*input_size*input_size, cudaMemcpyDeviceToHost);
cudaMemcpy(maxpool_output_buf, maxpool_output, sizeof(float)*maxpool_output_size*maxpool_output_size, cudaMemcpyDeviceToHost);
// prints the results
cout<<"\n========== GEMM OUTPUT ==========\n";
for (int i = 0; i < input_size * input_size; ++i) {
if(i%input_size==0) cout<<"\n";
cout<<gemm_output_buf[i]<<" ";
}
cout<<'\n';
cout<<"gemm time: " << gemm_t;
cout<<"\n========== MAXPOOL OUTPUT ==========\n";
for (int i = 0; i < maxpool_output_size * maxpool_output_size; ++i) {
if(i%maxpool_output_size==0) cout<<"\n";
cout<<maxpool_output_buf[i]<<" ";
}
cout<<'\n';
cout <<"maxpool time: " <<maxpool_t;
cudaFree(dev_mem_a);
cudaFree(dev_mem_b);
cudaFree(dev_mem_c);
cudaFree(gemm_output);
cudaFree(dev_mem_input);
cudaFree(maxpool_output);
free(gemm_output_buf);
free(maxpool_output_buf);
return 0;
}
|
9ed83b7e597825d060a4456d60adc7fdc6c8bfb4.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
#include <getopt.h>
#include <cstdio>
#define checkError(err) \
if ((err) != hipSuccess) { \
printf("ERROR: %s in %s, line %d\n",hipGetErrorString(err), __FILE__, __LINE__); \
exit(EXIT_FAILURE); \
}
#define BLOCK_SIZE 32
#define KERNEL_ORDER 3
#define KERNEL_SIDE 1
using namespace cv;
using namespace std;
__device__
bool inside_image(int row, int col, int width, int height) {
return row >= 0 && row < height && col >= 0 && col < width;
}
__device__
unsigned char bound_to_image(unsigned char* image, int row, int col, int width, int height) {
if (inside_image(row, col, width, height))
return image[row * width + col];
else
return 0;
}
__device__
float Q_rsqrt(float number) {
// Implementation in 1999 in the source code of Quake III Arena
// see https://en.wikipedia.org/wiki/Fast_inverse_square_root
int i;
float x2, y;
const float threehalfs = 1.5F;
x2 = number * 0.5F;
y = number;
i = *(int*) &y; // evil floating point bit level hacking
i = 0x5f3759df - (i >> 1); // what the fuck?
y = *(float*) &i;
y = y * (threehalfs - (x2 * y * y)); // 1st iteration
return y;
}
__device__
float Q_sqrt(float x) {
return x * Q_rsqrt(x);
}
__device__
unsigned char saturate_uchar(float x) {
return (unsigned char) min(max(round(x), 0.0f), 255.0f);
}
__constant__ float sobel_x[KERNEL_ORDER * KERNEL_ORDER];
__constant__ float sobel_y[KERNEL_ORDER * KERNEL_ORDER];
__global__
void sobelOperatorKernel(unsigned char* image, unsigned char* out_image, int width, int height) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float tile[BLOCK_SIZE + 2 * KERNEL_SIDE][BLOCK_SIZE + 2 * KERNEL_SIDE];
if (row < height && col < width) {
// Loading tile
int y_centre = threadIdx.y + 1;
int x_centre = threadIdx.x + 1;
tile[y_centre][x_centre] = image[row * width + col];
// Check corners
if (threadIdx.y == 0 && threadIdx.x == 0) {
tile[y_centre - 1][x_centre - 1] = bound_to_image(image, row - 1, col - 1, width, height);
} else if (threadIdx.y == 0 && threadIdx.x == blockDim.x - 1) {
tile[y_centre - 1][x_centre + 1] = bound_to_image(image, row - 1, col + 1, width, height);
} else if (threadIdx.y == blockDim.y - 1 && threadIdx.x == 0) {
tile[y_centre + 1][x_centre - 1] = bound_to_image(image, row + 1, col - 1, width, height);
} else if (threadIdx.y == blockDim.y - 1 && threadIdx.x == blockDim.x - 1) {
tile[y_centre + 1][x_centre + 1] = bound_to_image(image, row + 1, col + 1, width, height);
}
// Check sides
if (threadIdx.y == 0) {
tile[y_centre - 1][x_centre] = bound_to_image(image, row - 1, col, width, height);
} else if (threadIdx.y == blockDim.y - 1) {
tile[y_centre + 1][x_centre] = bound_to_image(image, row + 1, col, width, height);
}
if (threadIdx.x == 0) {
tile[y_centre][x_centre - 1] = bound_to_image(image, row, col - 1, width, height);
} else if (threadIdx.x == blockDim.x - 1) {
tile[y_centre][x_centre + 1] = bound_to_image(image, row, col + 1, width, height);
}
__syncthreads();
// Calculate gradient in x-direction
float grad_x = 0;
for (int i = -KERNEL_SIDE; i <= KERNEL_SIDE; i++) {
for (int j = -KERNEL_SIDE; j <= KERNEL_SIDE; j++) {
grad_x += tile[y_centre + i][x_centre + j] * sobel_x[(KERNEL_SIDE + i) * KERNEL_ORDER + (KERNEL_SIDE + j)];
}
}
// Calculate gradient in y-direction
float grad_y = 0;
for (int i = -KERNEL_SIDE; i <= KERNEL_SIDE; i++) {
for (int j = -KERNEL_SIDE; j <= KERNEL_SIDE; j++) {
grad_y += tile[y_centre + i][x_centre + j] * sobel_y[(KERNEL_SIDE + i) * KERNEL_ORDER + (KERNEL_SIDE + j)];
}
}
// Calculate gradient magnitude
out_image[row * width + col] = saturate_uchar(Q_sqrt(grad_x * grad_x + grad_y * grad_y));
}
}
void sobel(unsigned char *h_img, unsigned char *h_img_sobel, int width, int height, bool measure) {
unsigned char *d_img, *d_img_sobel;
long long size = width * height;
hipError_t err;
hipEvent_t start, stop;
err = hipMalloc((void**) &d_img, size * sizeof(unsigned char)); checkError(err);
err = hipMalloc((void**) &d_img_sobel, size * sizeof(unsigned char)); checkError(err);
err = hipMemcpy(d_img, h_img, size * sizeof(unsigned char), hipMemcpyHostToDevice); checkError(err);
float h_sobel_x[] = {1, 0, -1, 2, 0, -2, 1, 0, -1};
float h_sobel_y[] = {1, 2, 1, 0, 0, 0, -1, -2, -1};
err = hipMemcpyToSymbol(sobel_x, h_sobel_x, 9 * sizeof(float)); checkError(err);
err = hipMemcpyToSymbol(sobel_y, h_sobel_y, 9 * sizeof(float)); checkError(err);
dim3 dim_grid(ceil((double) width / BLOCK_SIZE), ceil((double) height / BLOCK_SIZE), 1);
dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1);
if (measure) {
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
}
hipLaunchKernelGGL(( sobelOperatorKernel), dim3(dim_grid), dim3(dim_block), 0, 0, d_img, d_img_sobel, width, height);
hipDeviceSynchronize();
if (measure) {
hipEventRecord(stop);
hipEventSynchronize(stop);
float seconds = 0;
hipEventElapsedTime(&seconds, start, stop);
seconds *= 1E-3;
int num_arrays = 2;
int bytes = num_arrays * sizeof(unsigned char);
int num_ops = 2 * 9 * 2 + 6;
float bw = size * bytes / seconds * 1E-9;
float th = size * num_ops / seconds * 1E-9;
printf("Effective bandwidth & Computational throughput\n");
printf("%2.5f (GB/s) & %2.5f (GFLOPS/s)\n", bw, th);
}
err = hipMemcpy(h_img_sobel, d_img_sobel, size * sizeof(unsigned char), hipMemcpyDeviceToHost); checkError(err);
err = hipFree(d_img); checkError(err);
err = hipFree(d_img_sobel); checkError(err);
}
void runProgram(Mat& image, bool show, bool measure) {
int height = image.rows;
int width = image.cols;
unsigned char *img_sobel = (unsigned char*) malloc(width * height * sizeof(unsigned char));
unsigned char *img = (unsigned char*) image.data;
sobel(img, img_sobel, width, height, measure);
if (show) {
imshow("Input", Mat(height, width, CV_8UC1, img));
waitKey(0);
imshow("Sobel operator", Mat(height, width, CV_8UC1, img_sobel));
waitKey(0);
}
free(img_sobel);
}
void usage(char* program_name) {
int n = 2;
string opts[] = {"-s, --show", "-m, --measure"};
string description[] = {
"Show original image and result",
"Permormance measures"
};
cout << "Usage: " << program_name << " [options ...] img1" << endl;
cout << endl;
cout << "Options" << endl;
for (int i = 0; i < n; i++) {
cout << " " << opts[i] << ": " << description[i] << endl;
}
exit(EXIT_FAILURE);
}
int main(int argc, char** argv) {
int opt, opt_index = 0;
static struct option options[] = {
{"show", no_argument, 0, 's'},
{"measure", no_argument, 0, 'm'},
{0, 0, 0, 0}
};
bool show = false;
bool measure = false;
while ((opt = getopt_long(argc, argv, "sm", options, &opt_index)) != -1) {
switch (opt) {
case 's':
show = true;
break;
case 'm':
measure = true;
break;
default:
usage(argv[0]);
break;
}
}
if (argc - optind != 1) {
cout << "Error: You must provide an image" << endl << endl;
usage(argv[0]);
}
Mat image = imread(argv[optind], CV_LOAD_IMAGE_GRAYSCALE);
if (!image.data) {
printf("Could not open or find %s\n", argv[optind]);
exit(EXIT_FAILURE);
}
runProgram(image, show, measure);
return 0;
}
| 9ed83b7e597825d060a4456d60adc7fdc6c8bfb4.cu | #include <cuda.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
#include <getopt.h>
#include <cstdio>
#define checkError(err) \
if ((err) != cudaSuccess) { \
printf("ERROR: %s in %s, line %d\n",cudaGetErrorString(err), __FILE__, __LINE__); \
exit(EXIT_FAILURE); \
}
#define BLOCK_SIZE 32
#define KERNEL_ORDER 3
#define KERNEL_SIDE 1
using namespace cv;
using namespace std;
__device__
bool inside_image(int row, int col, int width, int height) {
return row >= 0 && row < height && col >= 0 && col < width;
}
__device__
unsigned char bound_to_image(unsigned char* image, int row, int col, int width, int height) {
if (inside_image(row, col, width, height))
return image[row * width + col];
else
return 0;
}
__device__
float Q_rsqrt(float number) {
// Implementation in 1999 in the source code of Quake III Arena
// see https://en.wikipedia.org/wiki/Fast_inverse_square_root
int i;
float x2, y;
const float threehalfs = 1.5F;
x2 = number * 0.5F;
y = number;
i = *(int*) &y; // evil floating point bit level hacking
i = 0x5f3759df - (i >> 1); // what the fuck?
y = *(float*) &i;
y = y * (threehalfs - (x2 * y * y)); // 1st iteration
return y;
}
__device__
float Q_sqrt(float x) {
return x * Q_rsqrt(x);
}
__device__
unsigned char saturate_uchar(float x) {
return (unsigned char) min(max(round(x), 0.0f), 255.0f);
}
__constant__ float sobel_x[KERNEL_ORDER * KERNEL_ORDER];
__constant__ float sobel_y[KERNEL_ORDER * KERNEL_ORDER];
__global__
void sobelOperatorKernel(unsigned char* image, unsigned char* out_image, int width, int height) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float tile[BLOCK_SIZE + 2 * KERNEL_SIDE][BLOCK_SIZE + 2 * KERNEL_SIDE];
if (row < height && col < width) {
// Loading tile
int y_centre = threadIdx.y + 1;
int x_centre = threadIdx.x + 1;
tile[y_centre][x_centre] = image[row * width + col];
// Check corners
if (threadIdx.y == 0 && threadIdx.x == 0) {
tile[y_centre - 1][x_centre - 1] = bound_to_image(image, row - 1, col - 1, width, height);
} else if (threadIdx.y == 0 && threadIdx.x == blockDim.x - 1) {
tile[y_centre - 1][x_centre + 1] = bound_to_image(image, row - 1, col + 1, width, height);
} else if (threadIdx.y == blockDim.y - 1 && threadIdx.x == 0) {
tile[y_centre + 1][x_centre - 1] = bound_to_image(image, row + 1, col - 1, width, height);
} else if (threadIdx.y == blockDim.y - 1 && threadIdx.x == blockDim.x - 1) {
tile[y_centre + 1][x_centre + 1] = bound_to_image(image, row + 1, col + 1, width, height);
}
// Check sides
if (threadIdx.y == 0) {
tile[y_centre - 1][x_centre] = bound_to_image(image, row - 1, col, width, height);
} else if (threadIdx.y == blockDim.y - 1) {
tile[y_centre + 1][x_centre] = bound_to_image(image, row + 1, col, width, height);
}
if (threadIdx.x == 0) {
tile[y_centre][x_centre - 1] = bound_to_image(image, row, col - 1, width, height);
} else if (threadIdx.x == blockDim.x - 1) {
tile[y_centre][x_centre + 1] = bound_to_image(image, row, col + 1, width, height);
}
__syncthreads();
// Calculate gradient in x-direction
float grad_x = 0;
for (int i = -KERNEL_SIDE; i <= KERNEL_SIDE; i++) {
for (int j = -KERNEL_SIDE; j <= KERNEL_SIDE; j++) {
grad_x += tile[y_centre + i][x_centre + j] * sobel_x[(KERNEL_SIDE + i) * KERNEL_ORDER + (KERNEL_SIDE + j)];
}
}
// Calculate gradient in y-direction
float grad_y = 0;
for (int i = -KERNEL_SIDE; i <= KERNEL_SIDE; i++) {
for (int j = -KERNEL_SIDE; j <= KERNEL_SIDE; j++) {
grad_y += tile[y_centre + i][x_centre + j] * sobel_y[(KERNEL_SIDE + i) * KERNEL_ORDER + (KERNEL_SIDE + j)];
}
}
// Calculate gradient magnitude
out_image[row * width + col] = saturate_uchar(Q_sqrt(grad_x * grad_x + grad_y * grad_y));
}
}
void sobel(unsigned char *h_img, unsigned char *h_img_sobel, int width, int height, bool measure) {
unsigned char *d_img, *d_img_sobel;
long long size = width * height;
cudaError_t err;
cudaEvent_t start, stop;
err = cudaMalloc((void**) &d_img, size * sizeof(unsigned char)); checkError(err);
err = cudaMalloc((void**) &d_img_sobel, size * sizeof(unsigned char)); checkError(err);
err = cudaMemcpy(d_img, h_img, size * sizeof(unsigned char), cudaMemcpyHostToDevice); checkError(err);
float h_sobel_x[] = {1, 0, -1, 2, 0, -2, 1, 0, -1};
float h_sobel_y[] = {1, 2, 1, 0, 0, 0, -1, -2, -1};
err = cudaMemcpyToSymbol(sobel_x, h_sobel_x, 9 * sizeof(float)); checkError(err);
err = cudaMemcpyToSymbol(sobel_y, h_sobel_y, 9 * sizeof(float)); checkError(err);
dim3 dim_grid(ceil((double) width / BLOCK_SIZE), ceil((double) height / BLOCK_SIZE), 1);
dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1);
if (measure) {
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
}
sobelOperatorKernel<<<dim_grid, dim_block>>>(d_img, d_img_sobel, width, height);
cudaDeviceSynchronize();
if (measure) {
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float seconds = 0;
cudaEventElapsedTime(&seconds, start, stop);
seconds *= 1E-3;
int num_arrays = 2;
int bytes = num_arrays * sizeof(unsigned char);
int num_ops = 2 * 9 * 2 + 6;
float bw = size * bytes / seconds * 1E-9;
float th = size * num_ops / seconds * 1E-9;
printf("Effective bandwidth & Computational throughput\n");
printf("%2.5f (GB/s) & %2.5f (GFLOPS/s)\n", bw, th);
}
err = cudaMemcpy(h_img_sobel, d_img_sobel, size * sizeof(unsigned char), cudaMemcpyDeviceToHost); checkError(err);
err = cudaFree(d_img); checkError(err);
err = cudaFree(d_img_sobel); checkError(err);
}
void runProgram(Mat& image, bool show, bool measure) {
int height = image.rows;
int width = image.cols;
unsigned char *img_sobel = (unsigned char*) malloc(width * height * sizeof(unsigned char));
unsigned char *img = (unsigned char*) image.data;
sobel(img, img_sobel, width, height, measure);
if (show) {
imshow("Input", Mat(height, width, CV_8UC1, img));
waitKey(0);
imshow("Sobel operator", Mat(height, width, CV_8UC1, img_sobel));
waitKey(0);
}
free(img_sobel);
}
void usage(char* program_name) {
int n = 2;
string opts[] = {"-s, --show", "-m, --measure"};
string description[] = {
"Show original image and result",
"Permormance measures"
};
cout << "Usage: " << program_name << " [options ...] img1" << endl;
cout << endl;
cout << "Options" << endl;
for (int i = 0; i < n; i++) {
cout << " " << opts[i] << ": " << description[i] << endl;
}
exit(EXIT_FAILURE);
}
int main(int argc, char** argv) {
int opt, opt_index = 0;
static struct option options[] = {
{"show", no_argument, 0, 's'},
{"measure", no_argument, 0, 'm'},
{0, 0, 0, 0}
};
bool show = false;
bool measure = false;
while ((opt = getopt_long(argc, argv, "sm", options, &opt_index)) != -1) {
switch (opt) {
case 's':
show = true;
break;
case 'm':
measure = true;
break;
default:
usage(argv[0]);
break;
}
}
if (argc - optind != 1) {
cout << "Error: You must provide an image" << endl << endl;
usage(argv[0]);
}
Mat image = imread(argv[optind], CV_LOAD_IMAGE_GRAYSCALE);
if (!image.data) {
printf("Could not open or find %s\n", argv[optind]);
exit(EXIT_FAILURE);
}
runProgram(image, show, measure);
return 0;
}
|
3cdd13b06ed94a16c027008e3a3466cf383f78c9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void loop()
{
/*
*
*/
int i = blockIdx.x * blockDim.x + threadIdx.x;
printf("%d\n", i);
}
int main()
{
/*
*
*
* <<<5, 2>>>
* <<<10, 1>>>
*/
hipLaunchKernelGGL(( loop), dim3(2), dim3(5), 0, 0, );
hipDeviceSynchronize();
}
| 3cdd13b06ed94a16c027008e3a3466cf383f78c9.cu | #include <stdio.h>
__global__ void loop()
{
/*
* 以下の慣用表現では、各スレッドにグリッド全体で一意のインデックスを割り当てます。
*/
int i = blockIdx.x * blockDim.x + threadIdx.x;
printf("%d\n", i);
}
int main()
{
/*
* 演習の制約を満たしたうえで動作する追加の実行構成は次のとおりです。
*
* <<<5, 2>>>
* <<<10, 1>>>
*/
loop<<<2, 5>>>();
cudaDeviceSynchronize();
}
|
5cd10a605d299c2ec70a7526941f35b2690d2e66.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "shared4R1W1G.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *A = NULL;
hipMalloc(&A, XSIZE*YSIZE);
float *B = NULL;
hipMalloc(&B, XSIZE*YSIZE);
float *C = NULL;
hipMalloc(&C, XSIZE*YSIZE);
const int N = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
shared4R1W1G), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,N);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
shared4R1W1G), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
shared4R1W1G), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 5cd10a605d299c2ec70a7526941f35b2690d2e66.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "shared4R1W1G.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *A = NULL;
cudaMalloc(&A, XSIZE*YSIZE);
float *B = NULL;
cudaMalloc(&B, XSIZE*YSIZE);
float *C = NULL;
cudaMalloc(&C, XSIZE*YSIZE);
const int N = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
shared4R1W1G<<<gridBlock,threadBlock>>>(A,B,C,N);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
shared4R1W1G<<<gridBlock,threadBlock>>>(A,B,C,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
shared4R1W1G<<<gridBlock,threadBlock>>>(A,B,C,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
135e6048230f9dd9ada8efff607d4d472712b54b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Software License Agreement (BSD License)
*
* Point Cloud Library (PCL) - www.pointclouds.org
* Copyright (c) 2011, Willow Garage, Inc.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "device.hpp"
//#include <pcl/gpu/utils/device/funcattrib.hpp>
//include <pcl/gpu/utils/device/block.hpp>
//#include <pcl/gpu/utils/device/warp.hpp>
namespace pcl
{
namespace device
{
////////////////////////////////////////////////////////////////////////////////////////
///// Full Volume Scan6
enum
{
CTA_SIZE_X = 32,
CTA_SIZE_Y = 6,
CTA_SIZE = CTA_SIZE_X * CTA_SIZE_Y,
MAX_LOCAL_POINTS = 3
};
__device__ int global_count = 0;
__device__ int output_count;
__device__ unsigned int blocks_done = 0;
__shared__ float storage_X[CTA_SIZE * MAX_LOCAL_POINTS];
__shared__ float storage_Y[CTA_SIZE * MAX_LOCAL_POINTS];
__shared__ float storage_Z[CTA_SIZE * MAX_LOCAL_POINTS];
struct FullScan6
{
PtrStep<short2> volume;
float3 cell_size;
mutable PtrSz<PointType> output;
__device__ __forceinline__ float
fetch (int x, int y, int z, int& weight) const
{
float tsdf;
unpack_tsdf (volume.ptr (VOLUME_Y * z + y)[x], tsdf, weight);
return tsdf;
}
__device__ __forceinline__ void
operator () () const
{
int x = threadIdx.x + blockIdx.x * CTA_SIZE_X;
int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
#if CUDART_VERSION >= 9000
if (__all_sync (__activemask (), x >= VOLUME_X)
|| __all_sync (__activemask (), y >= VOLUME_Y))
return;
#else
if (__all (x >= VOLUME_X) || __all (y >= VOLUME_Y))
return;
#endif
float3 V;
V.x = (x + 0.5f) * cell_size.x;
V.y = (y + 0.5f) * cell_size.y;
int ftid = Block::flattenedThreadId ();
for (int z = 0; z < VOLUME_Z - 1; ++z)
{
float3 points[MAX_LOCAL_POINTS];
int local_count = 0;
if (x < VOLUME_X && y < VOLUME_Y)
{
int W;
float F = fetch (x, y, z, W);
if (W != 0 && F != 1.f)
{
V.z = (z + 0.5f) * cell_size.z;
//process dx
if (x + 1 < VOLUME_X)
{
int Wn;
float Fn = fetch (x + 1, y, z, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.y = V.y;
p.z = V.z;
float Vnx = V.x + cell_size.x;
float d_inv = 1.f / (std::abs (F) + std::abs (Fn));
p.x = (V.x * std::abs (Fn) + Vnx * std::abs (F)) * d_inv;
points[local_count++] = p;
}
} /* if (x + 1 < VOLUME_X) */
//process dy
if (y + 1 < VOLUME_Y)
{
int Wn;
float Fn = fetch (x, y + 1, z, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.x = V.x;
p.z = V.z;
float Vny = V.y + cell_size.y;
float d_inv = 1.f / (std::abs (F) + std::abs (Fn));
p.y = (V.y * std::abs (Fn) + Vny * std::abs (F)) * d_inv;
points[local_count++] = p;
}
} /* if (y + 1 < VOLUME_Y) */
//process dz
//if (z + 1 < VOLUME_Z) // guaranteed by loop
{
int Wn;
float Fn = fetch (x, y, z + 1, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.x = V.x;
p.y = V.y;
float Vnz = V.z + cell_size.z;
float d_inv = 1.f / (std::abs (F) + std::abs (Fn));
p.z = (V.z * std::abs (Fn) + Vnz * std::abs (F)) * d_inv;
points[local_count++] = p;
}
} /* if (z + 1 < VOLUME_Z) */
} /* if (W != 0 && F != 1.f) */
} /* if (x < VOLUME_X && y < VOLUME_Y) */
#if CUDART_VERSION >= 9000
int total_warp = __popc (__ballot_sync (__activemask (), local_count > 0))
+ __popc (__ballot_sync (__activemask (), local_count > 1))
+ __popc (__ballot_sync (__activemask (), local_count > 2));
#else
///not we fulfilled points array at current iteration
int total_warp = __popc (__ballot (local_count > 0)) + __popc (__ballot (local_count > 1)) + __popc (__ballot (local_count > 2));
#endif
if (total_warp > 0)
{
int lane = Warp::laneId ();
int storage_index = (ftid >> Warp::LOG_WARP_SIZE) * Warp::WARP_SIZE * MAX_LOCAL_POINTS;
volatile int* cta_buffer = (int*)(storage_X + storage_index);
cta_buffer[lane] = local_count;
int offset = scan_warp<exclusive>(cta_buffer, lane);
if (lane == 0)
{
int old_global_count = atomicAdd (&global_count, total_warp);
cta_buffer[0] = old_global_count;
}
int old_global_count = cta_buffer[0];
for (int l = 0; l < local_count; ++l)
{
storage_X[storage_index + offset + l] = points[l].x;
storage_Y[storage_index + offset + l] = points[l].y;
storage_Z[storage_index + offset + l] = points[l].z;
}
PointType *pos = output.data + old_global_count + lane;
for (int idx = lane; idx < total_warp; idx += Warp::STRIDE, pos += Warp::STRIDE)
{
float x = storage_X[storage_index + idx];
float y = storage_Y[storage_index + idx];
float z = storage_Z[storage_index + idx];
store_point_type (x, y, z, pos);
}
bool full = (old_global_count + total_warp) >= output.size;
if (full)
break;
}
} /* for(int z = 0; z < VOLUME_Z - 1; ++z) */
///////////////////////////
// prepare for future scans
if (ftid == 0)
{
unsigned int total_blocks = gridDim.x * gridDim.y * gridDim.z;
unsigned int value = atomicInc (&blocks_done, total_blocks);
//last block
if (value == total_blocks - 1)
{
output_count = min ((int)output.size, global_count);
blocks_done = 0;
global_count = 0;
}
}
} /* operator() */
__device__ __forceinline__ void
store_point_type (float x, float y, float z, float4* ptr) const {
*ptr = make_float4 (x, y, z, 0);
}
__device__ __forceinline__ void
store_point_type (float x, float y, float z, float3* ptr) const {
*ptr = make_float3 (x, y, z);
}
};
__global__ void
extractKernel (const FullScan6 fs) {
fs ();
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
size_t
pcl::device::extractCloud (const PtrStep<short2>& volume, const float3& volume_size,
PtrSz<PointType> output)
{
FullScan6 fs;
fs.volume = volume;
fs.cell_size.x = volume_size.x / VOLUME_X;
fs.cell_size.y = volume_size.y / VOLUME_Y;
fs.cell_size.z = volume_size.z / VOLUME_Z;
fs.output = output;
dim3 block (CTA_SIZE_X, CTA_SIZE_Y);
dim3 grid (divUp (VOLUME_X, block.x), divUp (VOLUME_Y, block.y));
//hipFuncSetCacheConfig(extractKernel, hipFuncCachePreferL1);
//printFuncAttrib(extractKernel);
hipLaunchKernelGGL(( extractKernel), dim3(grid), dim3(block), 0, 0, fs);
cudaSafeCall ( hipGetLastError () );
cudaSafeCall (hipDeviceSynchronize ());
int size;
cudaSafeCall ( hipMemcpyFromSymbol (&size, output_count, sizeof(size)) );
return (size_t)size;
}
namespace pcl
{
namespace device
{
template<typename NormalType>
struct ExtractNormals
{
float3 cell_size;
PtrStep<short2> volume;
PtrSz<PointType> points;
mutable NormalType* output;
__device__ __forceinline__ float
readTsdf (int x, int y, int z) const
{
return unpack_tsdf (volume.ptr (VOLUME_Y * z + y)[x]);
}
__device__ __forceinline__ float3
fetchPoint (int idx) const
{
PointType p = points.data[idx];
return make_float3 (p.x, p.y, p.z);
}
__device__ __forceinline__ void
storeNormal (int idx, float3 normal) const
{
NormalType n;
n.x = normal.x; n.y = normal.y; n.z = normal.z;
output[idx] = n;
}
__device__ __forceinline__ int3
getVoxel (const float3& point) const
{
int vx = __float2int_rd (point.x / cell_size.x); // round to negative infinity
int vy = __float2int_rd (point.y / cell_size.y);
int vz = __float2int_rd (point.z / cell_size.z);
return make_int3 (vx, vy, vz);
}
__device__ __forceinline__ void
operator () () const
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= points.size)
return;
constexpr float qnan = std::numeric_limits<float>::quiet_NaN ();
float3 n = make_float3 (qnan, qnan, qnan);
float3 point = fetchPoint (idx);
int3 g = getVoxel (point);
if (g.x > 1 && g.y > 1 && g.z > 1 && g.x < VOLUME_X - 2 && g.y < VOLUME_Y - 2 && g.z < VOLUME_Z - 2)
{
float3 t;
t = point;
t.x += cell_size.x;
float Fx1 = interpolateTrilineary (t);
t = point;
t.x -= cell_size.x;
float Fx2 = interpolateTrilineary (t);
n.x = (Fx1 - Fx2);
t = point;
t.y += cell_size.y;
float Fy1 = interpolateTrilineary (t);
t = point;
t.y -= cell_size.y;
float Fy2 = interpolateTrilineary (t);
n.y = (Fy1 - Fy2);
t = point;
t.z += cell_size.z;
float Fz1 = interpolateTrilineary (t);
t = point;
t.z -= cell_size.z;
float Fz2 = interpolateTrilineary (t);
n.z = (Fz1 - Fz2);
n = normalized (n);
}
storeNormal (idx, n);
}
__device__ __forceinline__ float
interpolateTrilineary (const float3& point) const
{
int3 g = getVoxel (point);
float vx = (g.x + 0.5f) * cell_size.x;
float vy = (g.y + 0.5f) * cell_size.y;
float vz = (g.z + 0.5f) * cell_size.z;
g.x = (point.x < vx) ? (g.x - 1) : g.x;
g.y = (point.y < vy) ? (g.y - 1) : g.y;
g.z = (point.z < vz) ? (g.z - 1) : g.z;
float a = (point.x - (g.x + 0.5f) * cell_size.x) / cell_size.x;
float b = (point.y - (g.y + 0.5f) * cell_size.y) / cell_size.y;
float c = (point.z - (g.z + 0.5f) * cell_size.z) / cell_size.z;
float res = readTsdf (g.x + 0, g.y + 0, g.z + 0) * (1 - a) * (1 - b) * (1 - c) +
readTsdf (g.x + 0, g.y + 0, g.z + 1) * (1 - a) * (1 - b) * c +
readTsdf (g.x + 0, g.y + 1, g.z + 0) * (1 - a) * b * (1 - c) +
readTsdf (g.x + 0, g.y + 1, g.z + 1) * (1 - a) * b * c +
readTsdf (g.x + 1, g.y + 0, g.z + 0) * a * (1 - b) * (1 - c) +
readTsdf (g.x + 1, g.y + 0, g.z + 1) * a * (1 - b) * c +
readTsdf (g.x + 1, g.y + 1, g.z + 0) * a * b * (1 - c) +
readTsdf (g.x + 1, g.y + 1, g.z + 1) * a * b * c;
return res;
}
};
template<typename NormalType>
__global__ void
extractNormalsKernel (const ExtractNormals<NormalType> en) {
en ();
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template<typename NormalType> void
pcl::device::extractNormals (const PtrStep<short2>& volume, const float3& volume_size,
const PtrSz<PointType>& points, NormalType* output)
{
ExtractNormals<NormalType> en;
en.volume = volume;
en.cell_size.x = volume_size.x / VOLUME_X;
en.cell_size.y = volume_size.y / VOLUME_Y;
en.cell_size.z = volume_size.z / VOLUME_Z;
en.points = points;
en.output = output;
dim3 block (256);
dim3 grid (divUp (points.size, block.x));
hipLaunchKernelGGL(( extractNormalsKernel), dim3(grid), dim3(block), 0, 0, en);
cudaSafeCall ( hipGetLastError () );
cudaSafeCall (hipDeviceSynchronize ());
}
using namespace pcl::device;
template void pcl::device::extractNormals<PointType>(const PtrStep<short2>&volume, const float3 &volume_size, const PtrSz<PointType>&input, PointType * output);
template void pcl::device::extractNormals<float8>(const PtrStep<short2>&volume, const float3 &volume_size, const PtrSz<PointType>&input, float8 * output);
| 135e6048230f9dd9ada8efff607d4d472712b54b.cu | /*
* Software License Agreement (BSD License)
*
* Point Cloud Library (PCL) - www.pointclouds.org
* Copyright (c) 2011, Willow Garage, Inc.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "device.hpp"
//#include <pcl/gpu/utils/device/funcattrib.hpp>
//include <pcl/gpu/utils/device/block.hpp>
//#include <pcl/gpu/utils/device/warp.hpp>
namespace pcl
{
namespace device
{
////////////////////////////////////////////////////////////////////////////////////////
///// Full Volume Scan6
enum
{
CTA_SIZE_X = 32,
CTA_SIZE_Y = 6,
CTA_SIZE = CTA_SIZE_X * CTA_SIZE_Y,
MAX_LOCAL_POINTS = 3
};
__device__ int global_count = 0;
__device__ int output_count;
__device__ unsigned int blocks_done = 0;
__shared__ float storage_X[CTA_SIZE * MAX_LOCAL_POINTS];
__shared__ float storage_Y[CTA_SIZE * MAX_LOCAL_POINTS];
__shared__ float storage_Z[CTA_SIZE * MAX_LOCAL_POINTS];
struct FullScan6
{
PtrStep<short2> volume;
float3 cell_size;
mutable PtrSz<PointType> output;
__device__ __forceinline__ float
fetch (int x, int y, int z, int& weight) const
{
float tsdf;
unpack_tsdf (volume.ptr (VOLUME_Y * z + y)[x], tsdf, weight);
return tsdf;
}
__device__ __forceinline__ void
operator () () const
{
int x = threadIdx.x + blockIdx.x * CTA_SIZE_X;
int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
#if CUDART_VERSION >= 9000
if (__all_sync (__activemask (), x >= VOLUME_X)
|| __all_sync (__activemask (), y >= VOLUME_Y))
return;
#else
if (__all (x >= VOLUME_X) || __all (y >= VOLUME_Y))
return;
#endif
float3 V;
V.x = (x + 0.5f) * cell_size.x;
V.y = (y + 0.5f) * cell_size.y;
int ftid = Block::flattenedThreadId ();
for (int z = 0; z < VOLUME_Z - 1; ++z)
{
float3 points[MAX_LOCAL_POINTS];
int local_count = 0;
if (x < VOLUME_X && y < VOLUME_Y)
{
int W;
float F = fetch (x, y, z, W);
if (W != 0 && F != 1.f)
{
V.z = (z + 0.5f) * cell_size.z;
//process dx
if (x + 1 < VOLUME_X)
{
int Wn;
float Fn = fetch (x + 1, y, z, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.y = V.y;
p.z = V.z;
float Vnx = V.x + cell_size.x;
float d_inv = 1.f / (std::abs (F) + std::abs (Fn));
p.x = (V.x * std::abs (Fn) + Vnx * std::abs (F)) * d_inv;
points[local_count++] = p;
}
} /* if (x + 1 < VOLUME_X) */
//process dy
if (y + 1 < VOLUME_Y)
{
int Wn;
float Fn = fetch (x, y + 1, z, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.x = V.x;
p.z = V.z;
float Vny = V.y + cell_size.y;
float d_inv = 1.f / (std::abs (F) + std::abs (Fn));
p.y = (V.y * std::abs (Fn) + Vny * std::abs (F)) * d_inv;
points[local_count++] = p;
}
} /* if (y + 1 < VOLUME_Y) */
//process dz
//if (z + 1 < VOLUME_Z) // guaranteed by loop
{
int Wn;
float Fn = fetch (x, y, z + 1, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.x = V.x;
p.y = V.y;
float Vnz = V.z + cell_size.z;
float d_inv = 1.f / (std::abs (F) + std::abs (Fn));
p.z = (V.z * std::abs (Fn) + Vnz * std::abs (F)) * d_inv;
points[local_count++] = p;
}
} /* if (z + 1 < VOLUME_Z) */
} /* if (W != 0 && F != 1.f) */
} /* if (x < VOLUME_X && y < VOLUME_Y) */
#if CUDART_VERSION >= 9000
int total_warp = __popc (__ballot_sync (__activemask (), local_count > 0))
+ __popc (__ballot_sync (__activemask (), local_count > 1))
+ __popc (__ballot_sync (__activemask (), local_count > 2));
#else
///not we fulfilled points array at current iteration
int total_warp = __popc (__ballot (local_count > 0)) + __popc (__ballot (local_count > 1)) + __popc (__ballot (local_count > 2));
#endif
if (total_warp > 0)
{
int lane = Warp::laneId ();
int storage_index = (ftid >> Warp::LOG_WARP_SIZE) * Warp::WARP_SIZE * MAX_LOCAL_POINTS;
volatile int* cta_buffer = (int*)(storage_X + storage_index);
cta_buffer[lane] = local_count;
int offset = scan_warp<exclusive>(cta_buffer, lane);
if (lane == 0)
{
int old_global_count = atomicAdd (&global_count, total_warp);
cta_buffer[0] = old_global_count;
}
int old_global_count = cta_buffer[0];
for (int l = 0; l < local_count; ++l)
{
storage_X[storage_index + offset + l] = points[l].x;
storage_Y[storage_index + offset + l] = points[l].y;
storage_Z[storage_index + offset + l] = points[l].z;
}
PointType *pos = output.data + old_global_count + lane;
for (int idx = lane; idx < total_warp; idx += Warp::STRIDE, pos += Warp::STRIDE)
{
float x = storage_X[storage_index + idx];
float y = storage_Y[storage_index + idx];
float z = storage_Z[storage_index + idx];
store_point_type (x, y, z, pos);
}
bool full = (old_global_count + total_warp) >= output.size;
if (full)
break;
}
} /* for(int z = 0; z < VOLUME_Z - 1; ++z) */
///////////////////////////
// prepare for future scans
if (ftid == 0)
{
unsigned int total_blocks = gridDim.x * gridDim.y * gridDim.z;
unsigned int value = atomicInc (&blocks_done, total_blocks);
//last block
if (value == total_blocks - 1)
{
output_count = min ((int)output.size, global_count);
blocks_done = 0;
global_count = 0;
}
}
} /* operator() */
__device__ __forceinline__ void
store_point_type (float x, float y, float z, float4* ptr) const {
*ptr = make_float4 (x, y, z, 0);
}
__device__ __forceinline__ void
store_point_type (float x, float y, float z, float3* ptr) const {
*ptr = make_float3 (x, y, z);
}
};
__global__ void
extractKernel (const FullScan6 fs) {
fs ();
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
size_t
pcl::device::extractCloud (const PtrStep<short2>& volume, const float3& volume_size,
PtrSz<PointType> output)
{
FullScan6 fs;
fs.volume = volume;
fs.cell_size.x = volume_size.x / VOLUME_X;
fs.cell_size.y = volume_size.y / VOLUME_Y;
fs.cell_size.z = volume_size.z / VOLUME_Z;
fs.output = output;
dim3 block (CTA_SIZE_X, CTA_SIZE_Y);
dim3 grid (divUp (VOLUME_X, block.x), divUp (VOLUME_Y, block.y));
//cudaFuncSetCacheConfig(extractKernel, cudaFuncCachePreferL1);
//printFuncAttrib(extractKernel);
extractKernel<<<grid, block>>>(fs);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
int size;
cudaSafeCall ( cudaMemcpyFromSymbol (&size, output_count, sizeof(size)) );
return (size_t)size;
}
namespace pcl
{
namespace device
{
template<typename NormalType>
struct ExtractNormals
{
float3 cell_size;
PtrStep<short2> volume;
PtrSz<PointType> points;
mutable NormalType* output;
__device__ __forceinline__ float
readTsdf (int x, int y, int z) const
{
return unpack_tsdf (volume.ptr (VOLUME_Y * z + y)[x]);
}
__device__ __forceinline__ float3
fetchPoint (int idx) const
{
PointType p = points.data[idx];
return make_float3 (p.x, p.y, p.z);
}
__device__ __forceinline__ void
storeNormal (int idx, float3 normal) const
{
NormalType n;
n.x = normal.x; n.y = normal.y; n.z = normal.z;
output[idx] = n;
}
__device__ __forceinline__ int3
getVoxel (const float3& point) const
{
int vx = __float2int_rd (point.x / cell_size.x); // round to negative infinity
int vy = __float2int_rd (point.y / cell_size.y);
int vz = __float2int_rd (point.z / cell_size.z);
return make_int3 (vx, vy, vz);
}
__device__ __forceinline__ void
operator () () const
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= points.size)
return;
constexpr float qnan = std::numeric_limits<float>::quiet_NaN ();
float3 n = make_float3 (qnan, qnan, qnan);
float3 point = fetchPoint (idx);
int3 g = getVoxel (point);
if (g.x > 1 && g.y > 1 && g.z > 1 && g.x < VOLUME_X - 2 && g.y < VOLUME_Y - 2 && g.z < VOLUME_Z - 2)
{
float3 t;
t = point;
t.x += cell_size.x;
float Fx1 = interpolateTrilineary (t);
t = point;
t.x -= cell_size.x;
float Fx2 = interpolateTrilineary (t);
n.x = (Fx1 - Fx2);
t = point;
t.y += cell_size.y;
float Fy1 = interpolateTrilineary (t);
t = point;
t.y -= cell_size.y;
float Fy2 = interpolateTrilineary (t);
n.y = (Fy1 - Fy2);
t = point;
t.z += cell_size.z;
float Fz1 = interpolateTrilineary (t);
t = point;
t.z -= cell_size.z;
float Fz2 = interpolateTrilineary (t);
n.z = (Fz1 - Fz2);
n = normalized (n);
}
storeNormal (idx, n);
}
__device__ __forceinline__ float
interpolateTrilineary (const float3& point) const
{
int3 g = getVoxel (point);
float vx = (g.x + 0.5f) * cell_size.x;
float vy = (g.y + 0.5f) * cell_size.y;
float vz = (g.z + 0.5f) * cell_size.z;
g.x = (point.x < vx) ? (g.x - 1) : g.x;
g.y = (point.y < vy) ? (g.y - 1) : g.y;
g.z = (point.z < vz) ? (g.z - 1) : g.z;
float a = (point.x - (g.x + 0.5f) * cell_size.x) / cell_size.x;
float b = (point.y - (g.y + 0.5f) * cell_size.y) / cell_size.y;
float c = (point.z - (g.z + 0.5f) * cell_size.z) / cell_size.z;
float res = readTsdf (g.x + 0, g.y + 0, g.z + 0) * (1 - a) * (1 - b) * (1 - c) +
readTsdf (g.x + 0, g.y + 0, g.z + 1) * (1 - a) * (1 - b) * c +
readTsdf (g.x + 0, g.y + 1, g.z + 0) * (1 - a) * b * (1 - c) +
readTsdf (g.x + 0, g.y + 1, g.z + 1) * (1 - a) * b * c +
readTsdf (g.x + 1, g.y + 0, g.z + 0) * a * (1 - b) * (1 - c) +
readTsdf (g.x + 1, g.y + 0, g.z + 1) * a * (1 - b) * c +
readTsdf (g.x + 1, g.y + 1, g.z + 0) * a * b * (1 - c) +
readTsdf (g.x + 1, g.y + 1, g.z + 1) * a * b * c;
return res;
}
};
template<typename NormalType>
__global__ void
extractNormalsKernel (const ExtractNormals<NormalType> en) {
en ();
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template<typename NormalType> void
pcl::device::extractNormals (const PtrStep<short2>& volume, const float3& volume_size,
const PtrSz<PointType>& points, NormalType* output)
{
ExtractNormals<NormalType> en;
en.volume = volume;
en.cell_size.x = volume_size.x / VOLUME_X;
en.cell_size.y = volume_size.y / VOLUME_Y;
en.cell_size.z = volume_size.z / VOLUME_Z;
en.points = points;
en.output = output;
dim3 block (256);
dim3 grid (divUp (points.size, block.x));
extractNormalsKernel<<<grid, block>>>(en);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
}
using namespace pcl::device;
template void pcl::device::extractNormals<PointType>(const PtrStep<short2>&volume, const float3 &volume_size, const PtrSz<PointType>&input, PointType * output);
template void pcl::device::extractNormals<float8>(const PtrStep<short2>&volume, const float3 &volume_size, const PtrSz<PointType>&input, float8 * output);
|
edd7bb68d5dc3eba9bb89d56aec531465bf91b59.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0-beta3) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date July 2014
@precisions normal d -> s
*/
#include "common_magma.h"
#include "commonblas_d.h"
/*
* daxpy computes c += alpha*b, where b and c are 16-element vectors.
*/
static __device__ void daxpy(
double alpha,
const double* __restrict__ b,
double* __restrict__ c )
{
c[0] += alpha * b[0];
c[1] += alpha * b[1];
c[2] += alpha * b[2];
c[3] += alpha * b[3];
c[4] += alpha * b[4];
c[5] += alpha * b[5];
c[6] += alpha * b[6];
c[7] += alpha * b[7];
c[8] += alpha * b[8];
c[9] += alpha * b[9];
c[10] += alpha * b[10];
c[11] += alpha * b[11];
c[12] += alpha * b[12];
c[13] += alpha * b[13];
c[14] += alpha * b[14];
c[15] += alpha * b[15];
}
/**
Purpose:
--------
This routine computes
C = alpha * A^T*B^T + beta * C
B is put into shared memory
Parameters Used:
blk_M=64 blk_N=16 blk_K=16 nthd_x=16 nthd_y=4
This code should run for any matrix size.
This kernel outperforms cuda-2.2 when m, n, k >= 512
@ingroup magma_dblas3
********************************************************************/
__global__ void
dgemm_kernel_T_T_64_16_16_16_4(
double* __restrict__ C,
const double* __restrict__ A,
const double* __restrict__ B,
int m, int n, int k,
int lda, int ldb, int ldc,
double alpha, double beta )
{
__shared__ double Bb[16][17];
const int tx = threadIdx.x;
const int ty = threadIdx.y;
int ibx = blockIdx.x * 64;
int iby = blockIdx.y * 16;
const int idt = ty * 16 + tx;
/*
Taking care of invalid memory access in dimension M
*/
if ( ibx + idt >= m )
A += ibx + 0;
else
A += ibx + idt;
C += __mul24(ibx + idt, ldc) + iby;
B += tx + __mul24(iby, ldb);
/*
These variables guide the threads to avoid invalid memory
accesses in dimension N
Simply it's the stopping criterion.
or you can say that access index wraps around to a valid
memory location.
*/
int s1=0, s2=4*ldb, s3=8*ldb, s4=12*ldb;
if ( iby+ty >= n ) { s1=1; s2=0*ldb; s3=0*ldb; s4=0*ldb; } else
if ( iby+ty+4 >= n ) { s1=0; s2=0*ldb; s3=0*ldb; s4=0*ldb; } else
if ( iby+ty+8 >= n ) { s1=0; s2=4*ldb; s3=0*ldb; s4=0*ldb; } else
if ( iby+ty+12 >= n ) { s1=0; s2=4*ldb; s3=8*ldb; s4=0*ldb; }
if ( s1 == 0 )
B += __mul24(ty, ldb);
else
s1 = 0;
const double *Bend = B + k - k % 16;
double Cb[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
if ( k > 15 ) {
do {
double Ab[4] = { A[0], A[lda], A[2*lda], A[3*lda] };
Bb[tx][ty+0 ] = B[s1];
Bb[tx][ty+4 ] = B[s2];
Bb[tx][ty+8 ] = B[s3];
Bb[tx][ty+12] = B[s4];
__syncthreads();
A += 4 * lda;
daxpy( Ab[0], &Bb[0][0], Cb ); Ab[0] = A[0*lda];
daxpy( Ab[1], &Bb[1][0], Cb ); Ab[1] = A[1*lda];
daxpy( Ab[2], &Bb[2][0], Cb ); Ab[2] = A[2*lda];
daxpy( Ab[3], &Bb[3][0], Cb ); Ab[3] = A[3*lda];
A += 4 * lda;
daxpy( Ab[0], &Bb[4][0], Cb ); Ab[0] = A[0*lda];
daxpy( Ab[1], &Bb[5][0], Cb ); Ab[1] = A[1*lda];
daxpy( Ab[2], &Bb[6][0], Cb ); Ab[2] = A[2*lda];
daxpy( Ab[3], &Bb[7][0], Cb ); Ab[3] = A[3*lda];
A += 4 * lda;
daxpy( Ab[0], &Bb[8][0], Cb ); Ab[0] = A[0*lda];
daxpy( Ab[1], &Bb[9][0], Cb ); Ab[1] = A[1*lda];
daxpy( Ab[2], &Bb[10][0], Cb ); Ab[2] = A[2*lda];
daxpy( Ab[3], &Bb[11][0], Cb ); Ab[3] = A[3*lda];
A += 4 * lda;
daxpy( Ab[0], &Bb[12][0], Cb );
daxpy( Ab[1], &Bb[13][0], Cb );
daxpy( Ab[2], &Bb[14][0], Cb );
daxpy( Ab[3], &Bb[15][0], Cb );
B += 16;
__syncthreads();
} while (B < Bend);
}
/*
Common sub expression elimination.
*/
ibx = ibx + idt - m;
/*
remembering k dimension
*/
ldb = m = k;
/*
k changed to support the generic case and reuse valuable registers
*/
k = k % 16;
m -= k;
/*
Here we are taking care of k % dim_k portions
*/
if ( k != 0 ) {
/*
Avoid Invalid Memory access in dimension K
If some thread enters this if ( ) block first access to B
should be valid as K isn't divisible by blk_K
Note that dimension N has been taken care of by s1, s2, s3, s4
But depending upon K and thread index tx, some memory access
may be still invalid, so take care of them now by
setting s1, s2, s3, s4 = 0
B might have been advanced in the previous loop, take care
of that, this is about right bottom corner.
*/
if ( m + tx >= ldb ) {
s1 = s2 = s3 = s4 = 0;
B -= tx;
}
Bb[tx][ty+0 ] = B[s1];
Bb[tx][ty+4 ] = B[s2];
Bb[tx][ty+8 ] = B[s3];
Bb[tx][ty+12] = B[s4];
__syncthreads();
for(int i=0; i < k; i++) {
daxpy( A[0], &Bb[i+0][0], Cb );
A += lda;
}
}
/*
Now taking care of dimension M, N that doesnt fit into blocks
*/
if ( (iby + 16) >= n ) {
lda = n - iby;
}
else {
lda = 16;
}
if ( ibx >= 0 )
lda = 0;
else
lda = lda;
switch(lda) {
case 16:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
C[4] = alpha * Cb[4] + beta * C[4];
C[5] = alpha * Cb[5] + beta * C[5];
C[6] = alpha * Cb[6] + beta * C[6];
C[7] = alpha * Cb[7] + beta * C[7];
C[8] = alpha * Cb[8] + beta * C[8];
C[9] = alpha * Cb[9] + beta * C[9];
C[10] = alpha * Cb[10] + beta * C[10];
C[11] = alpha * Cb[11] + beta * C[11];
C[12] = alpha * Cb[12] + beta * C[12];
C[13] = alpha * Cb[13] + beta * C[13];
C[14] = alpha * Cb[14] + beta * C[14];
C[15] = alpha * Cb[15] + beta * C[15];
break;
case 15:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
C[4] = alpha * Cb[4] + beta * C[4];
C[5] = alpha * Cb[5] + beta * C[5];
C[6] = alpha * Cb[6] + beta * C[6];
C[7] = alpha * Cb[7] + beta * C[7];
C[8] = alpha * Cb[8] + beta * C[8];
C[9] = alpha * Cb[9] + beta * C[9];
C[10] = alpha * Cb[10] + beta * C[10];
C[11] = alpha * Cb[11] + beta * C[11];
C[12] = alpha * Cb[12] + beta * C[12];
C[13] = alpha * Cb[13] + beta * C[13];
C[14] = alpha * Cb[14] + beta * C[14];
break;
case 14:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
C[4] = alpha * Cb[4] + beta * C[4];
C[5] = alpha * Cb[5] + beta * C[5];
C[6] = alpha * Cb[6] + beta * C[6];
C[7] = alpha * Cb[7] + beta * C[7];
C[8] = alpha * Cb[8] + beta * C[8];
C[9] = alpha * Cb[9] + beta * C[9];
C[10] = alpha * Cb[10] + beta * C[10];
C[11] = alpha * Cb[11] + beta * C[11];
C[12] = alpha * Cb[12] + beta * C[12];
C[13] = alpha * Cb[13] + beta * C[13];
break;
case 13:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
C[4] = alpha * Cb[4] + beta * C[4];
C[5] = alpha * Cb[5] + beta * C[5];
C[6] = alpha * Cb[6] + beta * C[6];
C[7] = alpha * Cb[7] + beta * C[7];
C[8] = alpha * Cb[8] + beta * C[8];
C[9] = alpha * Cb[9] + beta * C[9];
C[10] = alpha * Cb[10] + beta * C[10];
C[11] = alpha * Cb[11] + beta * C[11];
C[12] = alpha * Cb[12] + beta * C[12];
break;
case 12:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
C[4] = alpha * Cb[4] + beta * C[4];
C[5] = alpha * Cb[5] + beta * C[5];
C[6] = alpha * Cb[6] + beta * C[6];
C[7] = alpha * Cb[7] + beta * C[7];
C[8] = alpha * Cb[8] + beta * C[8];
C[9] = alpha * Cb[9] + beta * C[9];
C[10] = alpha * Cb[10] + beta * C[10];
C[11] = alpha * Cb[11] + beta * C[11];
break;
case 11:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
C[4] = alpha * Cb[4] + beta * C[4];
C[5] = alpha * Cb[5] + beta * C[5];
C[6] = alpha * Cb[6] + beta * C[6];
C[7] = alpha * Cb[7] + beta * C[7];
C[8] = alpha * Cb[8] + beta * C[8];
C[9] = alpha * Cb[9] + beta * C[9];
C[10] = alpha * Cb[10] + beta * C[10];
break;
case 10:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
C[4] = alpha * Cb[4] + beta * C[4];
C[5] = alpha * Cb[5] + beta * C[5];
C[6] = alpha * Cb[6] + beta * C[6];
C[7] = alpha * Cb[7] + beta * C[7];
C[8] = alpha * Cb[8] + beta * C[8];
C[9] = alpha * Cb[9] + beta * C[9];
break;
case 9:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
C[4] = alpha * Cb[4] + beta * C[4];
C[5] = alpha * Cb[5] + beta * C[5];
C[6] = alpha * Cb[6] + beta * C[6];
C[7] = alpha * Cb[7] + beta * C[7];
C[8] = alpha * Cb[8] + beta * C[8];
break;
case 8:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
C[4] = alpha * Cb[4] + beta * C[4];
C[5] = alpha * Cb[5] + beta * C[5];
C[6] = alpha * Cb[6] + beta * C[6];
C[7] = alpha * Cb[7] + beta * C[7];
break;
case 7:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
C[4] = alpha * Cb[4] + beta * C[4];
C[5] = alpha * Cb[5] + beta * C[5];
C[6] = alpha * Cb[6] + beta * C[6];
break;
case 6:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
C[4] = alpha * Cb[4] + beta * C[4];
C[5] = alpha * Cb[5] + beta * C[5];
break;
case 5:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
C[4] = alpha * Cb[4] + beta * C[4];
break;
case 4:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
break;
case 3:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
break;
case 2:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
break;
case 1:
C[0] = alpha * Cb[0] + beta * C[0];
break;
case 0:
break;
}
}
extern "C" void
magmablas_dgemm_T_T_64_16_16_16_4(
double *C, const double *A, const double *B,
magma_int_t m, magma_int_t n, magma_int_t k,
magma_int_t lda, magma_int_t ldb, magma_int_t ldc,
double alpha, double beta )
{
dim3 threads( 16, 4 );
dim3 grid( (m - 1)/64 + 1, (n - 1)/16 + 1 );
hipLaunchKernelGGL(( dgemm_kernel_T_T_64_16_16_16_4), dim3(grid), dim3(threads), 0, magma_stream ,
C, A, B, m, n, k, lda, ldb, ldc, alpha, beta );
}
| edd7bb68d5dc3eba9bb89d56aec531465bf91b59.cu | /*
-- MAGMA (version 1.5.0-beta3) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date July 2014
@precisions normal d -> s
*/
#include "common_magma.h"
#include "commonblas_d.h"
/*
* daxpy computes c += alpha*b, where b and c are 16-element vectors.
*/
static __device__ void daxpy(
double alpha,
const double* __restrict__ b,
double* __restrict__ c )
{
c[0] += alpha * b[0];
c[1] += alpha * b[1];
c[2] += alpha * b[2];
c[3] += alpha * b[3];
c[4] += alpha * b[4];
c[5] += alpha * b[5];
c[6] += alpha * b[6];
c[7] += alpha * b[7];
c[8] += alpha * b[8];
c[9] += alpha * b[9];
c[10] += alpha * b[10];
c[11] += alpha * b[11];
c[12] += alpha * b[12];
c[13] += alpha * b[13];
c[14] += alpha * b[14];
c[15] += alpha * b[15];
}
/**
Purpose:
--------
This routine computes
C = alpha * A^T*B^T + beta * C
B is put into shared memory
Parameters Used:
blk_M=64 blk_N=16 blk_K=16 nthd_x=16 nthd_y=4
This code should run for any matrix size.
This kernel outperforms cuda-2.2 when m, n, k >= 512
@ingroup magma_dblas3
********************************************************************/
__global__ void
dgemm_kernel_T_T_64_16_16_16_4(
double* __restrict__ C,
const double* __restrict__ A,
const double* __restrict__ B,
int m, int n, int k,
int lda, int ldb, int ldc,
double alpha, double beta )
{
__shared__ double Bb[16][17];
const int tx = threadIdx.x;
const int ty = threadIdx.y;
int ibx = blockIdx.x * 64;
int iby = blockIdx.y * 16;
const int idt = ty * 16 + tx;
/*
Taking care of invalid memory access in dimension M
*/
if ( ibx + idt >= m )
A += ibx + 0;
else
A += ibx + idt;
C += __mul24(ibx + idt, ldc) + iby;
B += tx + __mul24(iby, ldb);
/*
These variables guide the threads to avoid invalid memory
accesses in dimension N
Simply it's the stopping criterion.
or you can say that access index wraps around to a valid
memory location.
*/
int s1=0, s2=4*ldb, s3=8*ldb, s4=12*ldb;
if ( iby+ty >= n ) { s1=1; s2=0*ldb; s3=0*ldb; s4=0*ldb; } else
if ( iby+ty+4 >= n ) { s1=0; s2=0*ldb; s3=0*ldb; s4=0*ldb; } else
if ( iby+ty+8 >= n ) { s1=0; s2=4*ldb; s3=0*ldb; s4=0*ldb; } else
if ( iby+ty+12 >= n ) { s1=0; s2=4*ldb; s3=8*ldb; s4=0*ldb; }
if ( s1 == 0 )
B += __mul24(ty, ldb);
else
s1 = 0;
const double *Bend = B + k - k % 16;
double Cb[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
if ( k > 15 ) {
do {
double Ab[4] = { A[0], A[lda], A[2*lda], A[3*lda] };
Bb[tx][ty+0 ] = B[s1];
Bb[tx][ty+4 ] = B[s2];
Bb[tx][ty+8 ] = B[s3];
Bb[tx][ty+12] = B[s4];
__syncthreads();
A += 4 * lda;
daxpy( Ab[0], &Bb[0][0], Cb ); Ab[0] = A[0*lda];
daxpy( Ab[1], &Bb[1][0], Cb ); Ab[1] = A[1*lda];
daxpy( Ab[2], &Bb[2][0], Cb ); Ab[2] = A[2*lda];
daxpy( Ab[3], &Bb[3][0], Cb ); Ab[3] = A[3*lda];
A += 4 * lda;
daxpy( Ab[0], &Bb[4][0], Cb ); Ab[0] = A[0*lda];
daxpy( Ab[1], &Bb[5][0], Cb ); Ab[1] = A[1*lda];
daxpy( Ab[2], &Bb[6][0], Cb ); Ab[2] = A[2*lda];
daxpy( Ab[3], &Bb[7][0], Cb ); Ab[3] = A[3*lda];
A += 4 * lda;
daxpy( Ab[0], &Bb[8][0], Cb ); Ab[0] = A[0*lda];
daxpy( Ab[1], &Bb[9][0], Cb ); Ab[1] = A[1*lda];
daxpy( Ab[2], &Bb[10][0], Cb ); Ab[2] = A[2*lda];
daxpy( Ab[3], &Bb[11][0], Cb ); Ab[3] = A[3*lda];
A += 4 * lda;
daxpy( Ab[0], &Bb[12][0], Cb );
daxpy( Ab[1], &Bb[13][0], Cb );
daxpy( Ab[2], &Bb[14][0], Cb );
daxpy( Ab[3], &Bb[15][0], Cb );
B += 16;
__syncthreads();
} while (B < Bend);
}
/*
Common sub expression elimination.
*/
ibx = ibx + idt - m;
/*
remembering k dimension
*/
ldb = m = k;
/*
k changed to support the generic case and reuse valuable registers
*/
k = k % 16;
m -= k;
/*
Here we are taking care of k % dim_k portions
*/
if ( k != 0 ) {
/*
Avoid Invalid Memory access in dimension K
If some thread enters this if ( ) block first access to B
should be valid as K isn't divisible by blk_K
Note that dimension N has been taken care of by s1, s2, s3, s4
But depending upon K and thread index tx, some memory access
may be still invalid, so take care of them now by
setting s1, s2, s3, s4 = 0
B might have been advanced in the previous loop, take care
of that, this is about right bottom corner.
*/
if ( m + tx >= ldb ) {
s1 = s2 = s3 = s4 = 0;
B -= tx;
}
Bb[tx][ty+0 ] = B[s1];
Bb[tx][ty+4 ] = B[s2];
Bb[tx][ty+8 ] = B[s3];
Bb[tx][ty+12] = B[s4];
__syncthreads();
for(int i=0; i < k; i++) {
daxpy( A[0], &Bb[i+0][0], Cb );
A += lda;
}
}
/*
Now taking care of dimension M, N that doesnt fit into blocks
*/
if ( (iby + 16) >= n ) {
lda = n - iby;
}
else {
lda = 16;
}
if ( ibx >= 0 )
lda = 0;
else
lda = lda;
switch(lda) {
case 16:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
C[4] = alpha * Cb[4] + beta * C[4];
C[5] = alpha * Cb[5] + beta * C[5];
C[6] = alpha * Cb[6] + beta * C[6];
C[7] = alpha * Cb[7] + beta * C[7];
C[8] = alpha * Cb[8] + beta * C[8];
C[9] = alpha * Cb[9] + beta * C[9];
C[10] = alpha * Cb[10] + beta * C[10];
C[11] = alpha * Cb[11] + beta * C[11];
C[12] = alpha * Cb[12] + beta * C[12];
C[13] = alpha * Cb[13] + beta * C[13];
C[14] = alpha * Cb[14] + beta * C[14];
C[15] = alpha * Cb[15] + beta * C[15];
break;
case 15:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
C[4] = alpha * Cb[4] + beta * C[4];
C[5] = alpha * Cb[5] + beta * C[5];
C[6] = alpha * Cb[6] + beta * C[6];
C[7] = alpha * Cb[7] + beta * C[7];
C[8] = alpha * Cb[8] + beta * C[8];
C[9] = alpha * Cb[9] + beta * C[9];
C[10] = alpha * Cb[10] + beta * C[10];
C[11] = alpha * Cb[11] + beta * C[11];
C[12] = alpha * Cb[12] + beta * C[12];
C[13] = alpha * Cb[13] + beta * C[13];
C[14] = alpha * Cb[14] + beta * C[14];
break;
case 14:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
C[4] = alpha * Cb[4] + beta * C[4];
C[5] = alpha * Cb[5] + beta * C[5];
C[6] = alpha * Cb[6] + beta * C[6];
C[7] = alpha * Cb[7] + beta * C[7];
C[8] = alpha * Cb[8] + beta * C[8];
C[9] = alpha * Cb[9] + beta * C[9];
C[10] = alpha * Cb[10] + beta * C[10];
C[11] = alpha * Cb[11] + beta * C[11];
C[12] = alpha * Cb[12] + beta * C[12];
C[13] = alpha * Cb[13] + beta * C[13];
break;
case 13:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
C[4] = alpha * Cb[4] + beta * C[4];
C[5] = alpha * Cb[5] + beta * C[5];
C[6] = alpha * Cb[6] + beta * C[6];
C[7] = alpha * Cb[7] + beta * C[7];
C[8] = alpha * Cb[8] + beta * C[8];
C[9] = alpha * Cb[9] + beta * C[9];
C[10] = alpha * Cb[10] + beta * C[10];
C[11] = alpha * Cb[11] + beta * C[11];
C[12] = alpha * Cb[12] + beta * C[12];
break;
case 12:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
C[4] = alpha * Cb[4] + beta * C[4];
C[5] = alpha * Cb[5] + beta * C[5];
C[6] = alpha * Cb[6] + beta * C[6];
C[7] = alpha * Cb[7] + beta * C[7];
C[8] = alpha * Cb[8] + beta * C[8];
C[9] = alpha * Cb[9] + beta * C[9];
C[10] = alpha * Cb[10] + beta * C[10];
C[11] = alpha * Cb[11] + beta * C[11];
break;
case 11:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
C[4] = alpha * Cb[4] + beta * C[4];
C[5] = alpha * Cb[5] + beta * C[5];
C[6] = alpha * Cb[6] + beta * C[6];
C[7] = alpha * Cb[7] + beta * C[7];
C[8] = alpha * Cb[8] + beta * C[8];
C[9] = alpha * Cb[9] + beta * C[9];
C[10] = alpha * Cb[10] + beta * C[10];
break;
case 10:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
C[4] = alpha * Cb[4] + beta * C[4];
C[5] = alpha * Cb[5] + beta * C[5];
C[6] = alpha * Cb[6] + beta * C[6];
C[7] = alpha * Cb[7] + beta * C[7];
C[8] = alpha * Cb[8] + beta * C[8];
C[9] = alpha * Cb[9] + beta * C[9];
break;
case 9:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
C[4] = alpha * Cb[4] + beta * C[4];
C[5] = alpha * Cb[5] + beta * C[5];
C[6] = alpha * Cb[6] + beta * C[6];
C[7] = alpha * Cb[7] + beta * C[7];
C[8] = alpha * Cb[8] + beta * C[8];
break;
case 8:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
C[4] = alpha * Cb[4] + beta * C[4];
C[5] = alpha * Cb[5] + beta * C[5];
C[6] = alpha * Cb[6] + beta * C[6];
C[7] = alpha * Cb[7] + beta * C[7];
break;
case 7:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
C[4] = alpha * Cb[4] + beta * C[4];
C[5] = alpha * Cb[5] + beta * C[5];
C[6] = alpha * Cb[6] + beta * C[6];
break;
case 6:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
C[4] = alpha * Cb[4] + beta * C[4];
C[5] = alpha * Cb[5] + beta * C[5];
break;
case 5:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
C[4] = alpha * Cb[4] + beta * C[4];
break;
case 4:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
C[3] = alpha * Cb[3] + beta * C[3];
break;
case 3:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
C[2] = alpha * Cb[2] + beta * C[2];
break;
case 2:
C[0] = alpha * Cb[0] + beta * C[0];
C[1] = alpha * Cb[1] + beta * C[1];
break;
case 1:
C[0] = alpha * Cb[0] + beta * C[0];
break;
case 0:
break;
}
}
extern "C" void
magmablas_dgemm_T_T_64_16_16_16_4(
double *C, const double *A, const double *B,
magma_int_t m, magma_int_t n, magma_int_t k,
magma_int_t lda, magma_int_t ldb, magma_int_t ldc,
double alpha, double beta )
{
dim3 threads( 16, 4 );
dim3 grid( (m - 1)/64 + 1, (n - 1)/16 + 1 );
dgemm_kernel_T_T_64_16_16_16_4<<< grid, threads, 0, magma_stream >>>
( C, A, B, m, n, k, lda, ldb, ldc, alpha, beta );
}
|
205572ca765497d70a807fbb80008ca506c24fa3.hip | // !!! This is a file automatically generated by hipify!!!
//xfail:BOOGIE_ERROR
//--blockDim=8 --gridDim=1 --no-inline
// The statically given values for A are not preserved when we translate CUDA
// since the host is free to change the contents of A.
// cf. testsuite/OpenCL/globalarray/pass2
#include <call_kernel.h>
#include <stdio.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#define N 2//8
#define THREAD_CHANGE 1
__constant__ float A[8] = {0,1,2,3,4,5,6,7};
__global__ void globalarray(float* p) {
int i = threadIdx.x;
A[THREAD_CHANGE] = 0; // forando a entrada no lao, alterando uma constante!
int a = A[i];
if(a != threadIdx.x) {
p[0] = threadIdx.x; //entra aqui apenas para para thread=1, por isso no h corrida de dados
}
}
int main(){
float *a;
float *c;
float *dev_a;
int size = N*sizeof(float);
hipMalloc((void**)&dev_a, size);
a = (float*)malloc(size);
c = (float*)malloc(size);
for (int i = 0; i < N; i++)
a[i] = 5;
hipMemcpy(dev_a,a,size, hipMemcpyHostToDevice);
// printf("a: ");
// for (int i = 0; i < N; i++)
// printf("%f ", a[i]);
// globalarray<<<1,N>>>(dev_a);
ESBMC_verify_kernel(globalarray,1,N,dev_a);
hipMemcpy(c,dev_a,size,hipMemcpyDeviceToHost);
//assert(c[0]!=THREAD_CHANGE); //forar o ERRO
free(a);
free(c);
hipFree(dev_a);
return 0;
}
| 205572ca765497d70a807fbb80008ca506c24fa3.cu | //xfail:BOOGIE_ERROR
//--blockDim=8 --gridDim=1 --no-inline
// The statically given values for A are not preserved when we translate CUDA
// since the host is free to change the contents of A.
// cf. testsuite/OpenCL/globalarray/pass2
#include <call_kernel.h>
#include <stdio.h>
#include <assert.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#define N 2//8
#define THREAD_CHANGE 1
__constant__ float A[8] = {0,1,2,3,4,5,6,7};
__global__ void globalarray(float* p) {
int i = threadIdx.x;
A[THREAD_CHANGE] = 0; // forçando a entrada no laço, alterando uma constante!
int a = A[i];
if(a != threadIdx.x) {
p[0] = threadIdx.x; //entra aqui apenas para para thread=1, por isso não há corrida de dados
}
}
int main(){
float *a;
float *c;
float *dev_a;
int size = N*sizeof(float);
cudaMalloc((void**)&dev_a, size);
a = (float*)malloc(size);
c = (float*)malloc(size);
for (int i = 0; i < N; i++)
a[i] = 5;
cudaMemcpy(dev_a,a,size, cudaMemcpyHostToDevice);
// printf("a: ");
// for (int i = 0; i < N; i++)
// printf("%f ", a[i]);
// globalarray<<<1,N>>>(dev_a);
ESBMC_verify_kernel(globalarray,1,N,dev_a);
cudaMemcpy(c,dev_a,size,cudaMemcpyDeviceToHost);
//assert(c[0]!=THREAD_CHANGE); //forçar o ERRO
free(a);
free(c);
cudaFree(dev_a);
return 0;
}
|
f72cd2df6454d5db826eb563c44188e04cadb047.hip | // !!! This is a file automatically generated by hipify!!!
#include <cmath>
#include "SamplingPoint.cuh"
#include "CUDAHelper.cuh"
SamplingPoint::SamplingPoint(const SamplingPoint &other) : _x(other._x), _y(other._y), _i (other._i),
_s(other._s), _t(other._t), _fo(other._fo), _kernelSize(other._kernelSize), _kernel(nullptr), d_kernel(nullptr) {
if (other._kernel != nullptr) {
_kernel = new double[_kernelSize * _kernelSize];
memcpy(_kernel, other._kernel, sizeof(double) * _kernelSize * _kernelSize);
}
}
SamplingPoint::~SamplingPoint() {
if (_kernel != nullptr)
delete [] _kernel;
}
double* SamplingPoint::setKernel(std::vector<double> kernel, bool overrideSize) {
if (!overrideSize && kernel.size() != _kernelSize * _kernelSize) {
return nullptr;
} else {
_kernelSize = sqrt(kernel.size());
if (_kernel != nullptr)
delete [] _kernel;
_kernel = new double[_kernelSize * _kernelSize];
for (int i = 0; i != _kernelSize * _kernelSize; ++i) {
_kernel[i] = kernel.at(i);
}
if (d_kernel != nullptr) {
hipFree(d_kernel);
hipMalloc((void**)&d_kernel, sizeof(double) * _kernelSize * _kernelSize);
hipMemcpy(d_kernel, _kernel, sizeof(double) * _kernelSize * _kernelSize, hipMemcpyHostToDevice);
}
return _kernel;
}
}
void SamplingPoint::copyToDevice() {
if (_kernel == nullptr)
return;
if (d_kernel != nullptr) {
hipFree(d_kernel);
}
hipMalloc((void**)&d_kernel, sizeof(double) * _kernelSize * _kernelSize);
hipMemcpy(d_kernel, _kernel, sizeof(double) * _kernelSize * _kernelSize, hipMemcpyHostToDevice);
cudaCheckErrors("ERROR");
}
void SamplingPoint::removeFromDevice() {
if (d_kernel == nullptr)
return;
hipFree(d_kernel);
d_kernel = nullptr;
cudaCheckErrors("ERROR");
}
| f72cd2df6454d5db826eb563c44188e04cadb047.cu | #include <cmath>
#include "SamplingPoint.cuh"
#include "CUDAHelper.cuh"
SamplingPoint::SamplingPoint(const SamplingPoint &other) : _x(other._x), _y(other._y), _i (other._i),
_s(other._s), _t(other._t), _fo(other._fo), _kernelSize(other._kernelSize), _kernel(nullptr), d_kernel(nullptr) {
if (other._kernel != nullptr) {
_kernel = new double[_kernelSize * _kernelSize];
memcpy(_kernel, other._kernel, sizeof(double) * _kernelSize * _kernelSize);
}
}
SamplingPoint::~SamplingPoint() {
if (_kernel != nullptr)
delete [] _kernel;
}
double* SamplingPoint::setKernel(std::vector<double> kernel, bool overrideSize) {
if (!overrideSize && kernel.size() != _kernelSize * _kernelSize) {
return nullptr;
} else {
_kernelSize = sqrt(kernel.size());
if (_kernel != nullptr)
delete [] _kernel;
_kernel = new double[_kernelSize * _kernelSize];
for (int i = 0; i != _kernelSize * _kernelSize; ++i) {
_kernel[i] = kernel.at(i);
}
if (d_kernel != nullptr) {
cudaFree(d_kernel);
cudaMalloc((void**)&d_kernel, sizeof(double) * _kernelSize * _kernelSize);
cudaMemcpy(d_kernel, _kernel, sizeof(double) * _kernelSize * _kernelSize, cudaMemcpyHostToDevice);
}
return _kernel;
}
}
void SamplingPoint::copyToDevice() {
if (_kernel == nullptr)
return;
if (d_kernel != nullptr) {
cudaFree(d_kernel);
}
cudaMalloc((void**)&d_kernel, sizeof(double) * _kernelSize * _kernelSize);
cudaMemcpy(d_kernel, _kernel, sizeof(double) * _kernelSize * _kernelSize, cudaMemcpyHostToDevice);
cudaCheckErrors("ERROR");
}
void SamplingPoint::removeFromDevice() {
if (d_kernel == nullptr)
return;
cudaFree(d_kernel);
d_kernel = nullptr;
cudaCheckErrors("ERROR");
}
|
3a135182ae79615c82b9e43b64d2f038fd244236.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date
@precisions normal z -> s d c
@author Adrien Remy
@author Azzam Haidar
*/
#include "magma_internal.h"
#include "zgerbt.h"
#define block_height 32
#define block_width 4
#define block_length 256
#define NB 64
/***************************************************************************//**
Purpose
-------
ZPRBT_MVT compute B = UTB to randomize B
Arguments
---------
@param[in]
n INTEGER
The number of values of db. n >= 0.
@param[in]
du COMPLEX_16 array, dimension (n,2)
The 2*n vector representing the random butterfly matrix V
@param[in,out]
db COMPLEX_16 array, dimension (n)
The n vector db computed by ZGESV_NOPIV_GPU
On exit db = du*db
@param[in]
queue magma_queue_t
Queue to execute in.
*******************************************************************************/
extern "C" void
magmablas_zprbt_mtv_batched(
magma_int_t n,
magmaDoubleComplex *du, magmaDoubleComplex **db_array,
magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t threads = block_length;
magma_int_t max_batchCount = queue->get_maxBatch();
for(int i = 0; i < batchCount; i+=max_batchCount) {
magma_int_t ibatch = min(max_batchCount, batchCount-i);
dim3 grid( magma_ceildiv( n, 4*block_length ), ibatch);
hipLaunchKernelGGL(( magmablas_zapply_transpose_vector_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n/2, du, n, db_array+i, 0);
hipLaunchKernelGGL(( magmablas_zapply_transpose_vector_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n/2, du, n+n/2, db_array+i, n/2);
threads = block_length;
grid = magma_ceildiv( n, 2*block_length );
hipLaunchKernelGGL(( magmablas_zapply_transpose_vector_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, du, 0, db_array+i, 0);
}
}
/***************************************************************************//**
Purpose
-------
ZPRBT_MV compute B = VB to obtain the non randomized solution
Arguments
---------
@param[in]
n INTEGER
The number of values of db. n >= 0.
@param[in,out]
db COMPLEX_16 array, dimension (n)
The n vector db computed by ZGESV_NOPIV_GPU
On exit db = dv*db
@param[in]
dv COMPLEX_16 array, dimension (n,2)
The 2*n vector representing the random butterfly matrix V
@param[in]
queue magma_queue_t
Queue to execute in.
*******************************************************************************/
extern "C" void
magmablas_zprbt_mv_batched(
magma_int_t n,
magmaDoubleComplex *dv, magmaDoubleComplex **db_array,
magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t threads = block_length;
magma_int_t max_batchCount = queue->get_maxBatch();
for(magma_int_t i = 0; i < batchCount; i+=max_batchCount) {
magma_int_t ibatch = min(max_batchCount, batchCount-i);
dim3 grid ( magma_ceildiv( n, 2*block_length ), ibatch);
hipLaunchKernelGGL(( magmablas_zapply_vector_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dv, 0, db_array+i, 0);
threads = block_length;
grid = magma_ceildiv( n, 4*block_length );
hipLaunchKernelGGL(( magmablas_zapply_vector_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n/2, dv, n, db_array+i, 0);
hipLaunchKernelGGL(( magmablas_zapply_vector_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n/2, dv, n+n/2, db_array+i, n/2);
}
}
/***************************************************************************//**
Purpose
-------
ZPRBT randomize a square general matrix using partial randomized transformation
Arguments
---------
@param[in]
n INTEGER
The number of columns and rows of the matrix dA. n >= 0.
@param[in,out]
dA COMPLEX_16 array, dimension (n,ldda)
The n-by-n matrix dA
On exit dA = duT*dA*d_V
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDA >= max(1,n).
@param[in]
du COMPLEX_16 array, dimension (n,2)
The 2*n vector representing the random butterfly matrix U
@param[in]
dv COMPLEX_16 array, dimension (n,2)
The 2*n vector representing the random butterfly matrix V
@param[in]
queue magma_queue_t
Queue to execute in.
*******************************************************************************/
extern "C" void
magmablas_zprbt_batched(
magma_int_t n,
magmaDoubleComplex **dA_array, magma_int_t ldda,
magmaDoubleComplex *du, magmaDoubleComplex *dv,
magma_int_t batchCount, magma_queue_t queue)
{
du += ldda;
dv += ldda;
dim3 threads(block_height, block_width);
dim3 threads2(block_height, block_width);
magma_int_t max_batchCount = queue->get_maxBatch();
for(magma_int_t i = 0; i < batchCount; i+=max_batchCount) {
magma_int_t ibatch = min(max_batchCount, batchCount-i);
dim3 grid( magma_ceildiv( n, 4*block_height ), magma_ceildiv( n, 4*block_width ), ibatch );
hipLaunchKernelGGL(( magmablas_zelementary_multiplication_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n/2, dA_array+i, 0, ldda, du, 0, dv, 0);
hipLaunchKernelGGL(( magmablas_zelementary_multiplication_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n/2, dA_array+i, ldda*n/2, ldda, du, 0, dv, n/2);
hipLaunchKernelGGL(( magmablas_zelementary_multiplication_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n/2, dA_array+i, n/2, ldda, du, n/2, dv, 0);
hipLaunchKernelGGL(( magmablas_zelementary_multiplication_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n/2, dA_array+i, ldda*n/2+n/2, ldda, du, n/2, dv, n/2);
dim3 grid2( magma_ceildiv( n, 2*block_height ), magma_ceildiv( n, 2*block_width ), ibatch );
hipLaunchKernelGGL(( magmablas_zelementary_multiplication_kernel_batched), dim3(grid2), dim3(threads2), 0, queue->cuda_stream() , n, dA_array+i, 0, ldda, du, -ldda, dv, -ldda);
}
}
| 3a135182ae79615c82b9e43b64d2f038fd244236.cu | /*
-- MAGMA (version 2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date
@precisions normal z -> s d c
@author Adrien Remy
@author Azzam Haidar
*/
#include "magma_internal.h"
#include "zgerbt.h"
#define block_height 32
#define block_width 4
#define block_length 256
#define NB 64
/***************************************************************************//**
Purpose
-------
ZPRBT_MVT compute B = UTB to randomize B
Arguments
---------
@param[in]
n INTEGER
The number of values of db. n >= 0.
@param[in]
du COMPLEX_16 array, dimension (n,2)
The 2*n vector representing the random butterfly matrix V
@param[in,out]
db COMPLEX_16 array, dimension (n)
The n vector db computed by ZGESV_NOPIV_GPU
On exit db = du*db
@param[in]
queue magma_queue_t
Queue to execute in.
*******************************************************************************/
extern "C" void
magmablas_zprbt_mtv_batched(
magma_int_t n,
magmaDoubleComplex *du, magmaDoubleComplex **db_array,
magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t threads = block_length;
magma_int_t max_batchCount = queue->get_maxBatch();
for(int i = 0; i < batchCount; i+=max_batchCount) {
magma_int_t ibatch = min(max_batchCount, batchCount-i);
dim3 grid( magma_ceildiv( n, 4*block_length ), ibatch);
magmablas_zapply_transpose_vector_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>(n/2, du, n, db_array+i, 0);
magmablas_zapply_transpose_vector_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>(n/2, du, n+n/2, db_array+i, n/2);
threads = block_length;
grid = magma_ceildiv( n, 2*block_length );
magmablas_zapply_transpose_vector_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>(n, du, 0, db_array+i, 0);
}
}
/***************************************************************************//**
Purpose
-------
ZPRBT_MV compute B = VB to obtain the non randomized solution
Arguments
---------
@param[in]
n INTEGER
The number of values of db. n >= 0.
@param[in,out]
db COMPLEX_16 array, dimension (n)
The n vector db computed by ZGESV_NOPIV_GPU
On exit db = dv*db
@param[in]
dv COMPLEX_16 array, dimension (n,2)
The 2*n vector representing the random butterfly matrix V
@param[in]
queue magma_queue_t
Queue to execute in.
*******************************************************************************/
extern "C" void
magmablas_zprbt_mv_batched(
magma_int_t n,
magmaDoubleComplex *dv, magmaDoubleComplex **db_array,
magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t threads = block_length;
magma_int_t max_batchCount = queue->get_maxBatch();
for(magma_int_t i = 0; i < batchCount; i+=max_batchCount) {
magma_int_t ibatch = min(max_batchCount, batchCount-i);
dim3 grid ( magma_ceildiv( n, 2*block_length ), ibatch);
magmablas_zapply_vector_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>(n, dv, 0, db_array+i, 0);
threads = block_length;
grid = magma_ceildiv( n, 4*block_length );
magmablas_zapply_vector_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>(n/2, dv, n, db_array+i, 0);
magmablas_zapply_vector_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>(n/2, dv, n+n/2, db_array+i, n/2);
}
}
/***************************************************************************//**
Purpose
-------
ZPRBT randomize a square general matrix using partial randomized transformation
Arguments
---------
@param[in]
n INTEGER
The number of columns and rows of the matrix dA. n >= 0.
@param[in,out]
dA COMPLEX_16 array, dimension (n,ldda)
The n-by-n matrix dA
On exit dA = duT*dA*d_V
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDA >= max(1,n).
@param[in]
du COMPLEX_16 array, dimension (n,2)
The 2*n vector representing the random butterfly matrix U
@param[in]
dv COMPLEX_16 array, dimension (n,2)
The 2*n vector representing the random butterfly matrix V
@param[in]
queue magma_queue_t
Queue to execute in.
*******************************************************************************/
extern "C" void
magmablas_zprbt_batched(
magma_int_t n,
magmaDoubleComplex **dA_array, magma_int_t ldda,
magmaDoubleComplex *du, magmaDoubleComplex *dv,
magma_int_t batchCount, magma_queue_t queue)
{
du += ldda;
dv += ldda;
dim3 threads(block_height, block_width);
dim3 threads2(block_height, block_width);
magma_int_t max_batchCount = queue->get_maxBatch();
for(magma_int_t i = 0; i < batchCount; i+=max_batchCount) {
magma_int_t ibatch = min(max_batchCount, batchCount-i);
dim3 grid( magma_ceildiv( n, 4*block_height ), magma_ceildiv( n, 4*block_width ), ibatch );
magmablas_zelementary_multiplication_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>(n/2, dA_array+i, 0, ldda, du, 0, dv, 0);
magmablas_zelementary_multiplication_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>(n/2, dA_array+i, ldda*n/2, ldda, du, 0, dv, n/2);
magmablas_zelementary_multiplication_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>(n/2, dA_array+i, n/2, ldda, du, n/2, dv, 0);
magmablas_zelementary_multiplication_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>(n/2, dA_array+i, ldda*n/2+n/2, ldda, du, n/2, dv, n/2);
dim3 grid2( magma_ceildiv( n, 2*block_height ), magma_ceildiv( n, 2*block_width ), ibatch );
magmablas_zelementary_multiplication_kernel_batched<<< grid2, threads2, 0, queue->cuda_stream() >>>(n, dA_array+i, 0, ldda, du, -ldda, dv, -ldda);
}
}
|
62f027a32795fa1dfa9bec4f0d64f079b5835096.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hiprand/hiprand_kernel.h>
#define CURAND_CALL(x) do { \
if((x)!=HIPRAND_STATUS_SUCCESS) { \
printf("Error at %s:%d\n",__FILE__,__LINE__); \
return EXIT_FAILURE;}} while(0)
int main(int argc, char *argv[]) {
int n = 100;
int i;
hiprandGenerator_t gen;
float *devData, *hostData;
/* Allocate n floats on host */
hostData = (float *)calloc(n, sizeof(float));
/* Allocate n floats on device */
hipMalloc(&devData, n*sizeof(float));
CURAND_CALL(hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT));
/* Set seed */
CURAND_CALL(hiprandSetPseudoRandomGeneratorSeed(gen, 1234567));
/* Generate n floats on device */
CURAND_CALL(hiprandGenerateUniform(gen, devData, n));
/* Copy device memory to host */
hipMemcpy(hostData, devData, n * sizeof(float), hipMemcpyDeviceToHost);
printf("hello world\n");
/* Show result */
for(i = 0; i < n; i++) {
printf("%1.4f ", hostData[i]); }
printf("\n");
/* Cleanup */
CURAND_CALL(hiprandDestroyGenerator(gen));
hipFree(devData);
free(hostData);
return 0;
}
| 62f027a32795fa1dfa9bec4f0d64f079b5835096.cu | #include <stdio.h>
#include <curand_kernel.h>
#define CURAND_CALL(x) do { \
if((x)!=CURAND_STATUS_SUCCESS) { \
printf("Error at %s:%d\n",__FILE__,__LINE__); \
return EXIT_FAILURE;}} while(0)
int main(int argc, char *argv[]) {
int n = 100;
int i;
curandGenerator_t gen;
float *devData, *hostData;
/* Allocate n floats on host */
hostData = (float *)calloc(n, sizeof(float));
/* Allocate n floats on device */
cudaMalloc(&devData, n*sizeof(float));
CURAND_CALL(curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT));
/* Set seed */
CURAND_CALL(curandSetPseudoRandomGeneratorSeed(gen, 1234567));
/* Generate n floats on device */
CURAND_CALL(curandGenerateUniform(gen, devData, n));
/* Copy device memory to host */
cudaMemcpy(hostData, devData, n * sizeof(float), cudaMemcpyDeviceToHost);
printf("hello world\n");
/* Show result */
for(i = 0; i < n; i++) {
printf("%1.4f ", hostData[i]); }
printf("\n");
/* Cleanup */
CURAND_CALL(curandDestroyGenerator(gen));
cudaFree(devData);
free(hostData);
return 0;
}
|
ebac250dd19d04af66f5e407ad6ea622092302f0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include"stdio.h"
#include <string.h>
#include <stdlib.h>
#include <time.h>
#include"math.h"
#include <ctype.h>
#include <assert.h>
/* Euclidean distance calculation */
__host__ __device__ long distD(int i,int j,float *x,float*y)
{
float dx=x[i]-x[j];
float dy=y[i]-y[j];
return(sqrtf( (dx*dx) + (dy*dy) ));
}
//all these strats are for the two opt move,
/*A kenel function that finds a minimal weighted neighbor using TPR mapping strategy*/
__global__ void tsp_tpr(float *pox,float *poy,long initcost,unsigned long long *dst_tid,long cit)
{
//threads per row strategy
long id,j;
register long change,mincost=initcost,cost;
long i=threadIdx.x+blockIdx.x*blockDim.x;
if(i < cit)
{ //
for(j=i+1;j<cit;j++)
{//pox and poy are arrays that store the positions (x y) of ith city
change = 0; cost=initcost;
change=distD(i,j,pox,poy)+distD((i+1)%cit,(j+1)%cit,pox,poy)-distD(i,(i+1)%cit,pox,poy)-distD(j,(j+1)%cit,pox,poy);
cost+=change;
if(cost < mincost)
{
mincost = cost;
id = i * (cit-1)+(j-1)-i*(i+1)/2;
}
}
if(mincost < initcost)
atomicMin(dst_tid, ((unsigned long long)mincost << 32) | id);
}
}
/*A kenel function that finds a minimal weighted neighbor using TPRED mapping strategy*/
__global__ void tsp_tpred(float *pox,float *poy,long initcost,unsigned long long *dst_tid,long cit,long itr)
{
long id,j,k;
register long change,mincost=initcost,cost;
long i=threadIdx.x+blockIdx.x*blockDim.x;
if(i < cit)
{
//itr is how many iterations we can stand to do.
for(k=0;k<itr;k++)
{
change = 0; cost=initcost;
j=(i+1+k)%cit;
change=distD(i,j,pox,poy)+distD((i+1)%cit,(j+1)%cit,pox,poy)-distD(i,(i+1)%cit,pox,poy)-distD(j,(j+1)%cit,pox,poy);
cost+=change;
if(cost < mincost)
{
mincost = cost;
if(i < j)
id = i * (cit-1)+(j-1)-i*(i+1)/2;
else
id = j * (cit-1)+(i-1)-j*(j+1)/2;
}
}
if(mincost < initcost)
atomicMin(dst_tid, ((unsigned long long)mincost << 32) | id);
}
}
/*A kenel function that finds a minimal weighted neighbor using TPRC mapping strategy*/
__global__ void tsp_tprc(float *pox,float *poy,long initcost,unsigned long long *dst_tid,long cit)
{
long id;
long change,cost;
long i=threadIdx.x+blockIdx.x*blockDim.x;
long j=threadIdx.y+blockIdx.y*blockDim.y;
//if city in bounds and the column you choose is more than the row, so there is no repeat issues
if(i < cit && j < cit && i < j)
{
change = 0; cost = initcost;
change=distD(i,j,pox,poy)+distD((i+1)%cit,(j+1)%cit,pox,poy)-distD(i,(i+1)%cit,pox,poy)-distD(j,(j+1)%cit,pox,poy);
cost+=change;
if(change < 0)
{
id = i * (cit - 1) + (j - 1) - i * (i + 1) / 2;
atomicMin(dst_tid, ((unsigned long long)cost << 32) | id);
}
}
}
/*A kenel function that finds a minimal weighted neighbor using TPN mapping strategy*/
__global__ void tsp_tpn(float *pox,float *poy,long cost,unsigned long long *dst_tid,long cit,long sol)
{
long i,j;
register long change=0;
int id=threadIdx.x+blockIdx.x*blockDim.x;
if(id<sol)
{
i=cit-2-floorf(((int)__dsqrt_rn(8*(sol-id-1)+1)-1)/2);
j=id-i*(cit-1)+(i*(i+1)/2)+1;
change=distD(i,j,pox,poy)+distD((i+1)%cit,(j+1)%cit,pox,poy)-distD(i,(i+1)%cit,pox,poy)-distD(j,(j+1)%cit,pox,poy);
cost+=change;
if(change < 0)
atomicMin(dst_tid, ((unsigned long long)cost << 32) | id);
}
}
/* At each IHC steps, XY coordinates are arranged using next initial solution's order*/
void twoOpt(long x,long y,float *pox,float *poy)
{
float *tmp_x,*tmp_y;
int i,j;
tmp_x=(float*)malloc(sizeof(float)*(y-x));
tmp_y=(float*)malloc(sizeof(float)*(y-x));
for(j=0,i=y;i>x;i--,j++)
{
tmp_x[j]=pox[i];
tmp_y[j]=poy[i];
}
for(j=0,i=x+1;i<=y;i++,j++)
{
pox[i]=tmp_x[j];
poy[i]=tmp_y[j];
}
free(tmp_x);
free(tmp_y);
}
/*Arranges XY coordinates in initial solution's order*/
void setCoord(int *r,float *posx,float *posy,float *px,float *py,long cities)
{
for(int i=0;i<cities;i++)
{
px[i]=posx[r[i]];
py[i]=posy[r[i]];
}
}
/* Initial solution construction using NN */
long nn_init(int *route,long cities,float *posx,float*posy)
{ //route stores the route taken, cities is the number of cities, posx and posy are the positions of the ith city
route[0]=0;
int k=1,i=0,j;
float min;
int minj,mini,count=1,flag=0;
long dst=0;
int *visited=(int*)calloc(cities,sizeof(int));
visited[0]=1;
while(count!=cities)
{
flag=0;
for(j=1;j<cities;j++)
{ //if j isn't visited yet
if(i!=j && !visited[j])
{
min=distD(i,j,posx,posy);
minj=j;
break;
}
}
//for the minimum cost j
for(j=minj+1;j<cities;j++)
{//for every node from the min cost, if you haven't visited, then check and generate the pair
if( !visited[j])
{
if(min>distD(i,j,posx,posy))
{
min=distD(i,j,posx,posy);
mini=j;
flag=1;
}
}
}
if(flag==0)
i=minj;
else
i=mini;
dst+=min;
route[k++]=i;
visited[i]=1;
count++;
}
free(visited);
dst+=distD(route[0],route[cities-1],posx,posy);
return dst;
}
/* Initial solution construction using sequenced approach */
void seq_init(int*route,long N)
{
int i;
for(i=0;i<N;i++)
route[i]=i;
}
/* Initial solution construction using random approach */
void random_init(int *route,long cities)
{
int i=0,j;
int *visited = (int*)calloc(cities,sizeof(int));
while(i<cities)
{
//srand (clock() );
j=rand() % cities;
if(visited[j])
{
continue;
}
else
{
route[i]=j;
visited[j]=1;
i++;
}
}
free(visited);
}
struct nearest_insert
{
int city;
struct nearest_insert *next;
};
struct odd_degree
{
int city;
struct odd_degree *next;
};
struct rev_visit
{
int i,j;
struct rev_visit *next;
};
/* Initial solution construction using nearest insertion approach */
void nearest_insertion(int *r, float *posx, float *posy, long cities)
{
struct nearest_insert *node,*p1,*tmp,*current,*route,*first = NULL;
int i,j,dist,min=0;
int count,minI,minJ;
int min_diff,diff,min_i,min_j;
int *v;
v = (int *)calloc(cities, sizeof(int));
node = (struct nearest_insert *)malloc(sizeof(struct nearest_insert ));
node->city = 0;
node->next = NULL;
first = node;
current = node;
count = 1;
v[0]=1;
while(count != cities)
{//as long as there are new cities
min = 0;
for(route = first; route != NULL; route=route->next)
{//traverse the whole route to find the shortest edge
i = route->city;
for(j = 0; j < cities; j++)
{
if(i !=j &&!v[j])
{
dist = distD(i,j,posx,posy);
if(min==0)
{
min=dist;
minI=i;
minJ=j;
}
if(min>dist)
{
min=dist;
minI=i;
minJ=j;
}
}
}
}
//setting that node on edge to explored
v[minJ]=1;
//when you're starting out
if(count < 3)
{
if(first->city == minI)
{
if(first->next == NULL)
{
node = (struct nearest_insert *)malloc(sizeof(struct nearest_insert ));
node->city = minJ;
node->next = NULL;
first->next = node;
current = current->next;
}
else
{
tmp = first->next;
node = (struct nearest_insert *)malloc(sizeof(struct nearest_insert ));
node->city = minJ;
node->next = tmp;
first->next = node;
}
}
else if(current->city == minI)
{
node = (struct nearest_insert *)malloc(sizeof(struct nearest_insert ));
node->city = minJ;
node->next = NULL;
current->next = node;
current = current->next;
}
else
{
p1 = first->next;
while (p1->city != minI)
p1=p1->next;
tmp = p1->next;
node = (struct nearest_insert *)malloc(sizeof(struct nearest_insert ));
node->city = minJ;
node->next = tmp;
p1->next = node;
}
}
else
{//more than 3 nodes
p1 = first;
min_i = p1->city;
min_j = p1->next->city;
min_diff = distD(min_i,minJ,posx,posy) + distD(minJ,min_j,posx,posy) - distD(min_i,min_j,posx,posy);
p1 = p1->next;
while(p1->next!=NULL)
{//go through the path
i = p1->city;
j = p1->next->city;
//check two opt
diff = distD(i,minJ,posx,posy) + distD(minJ,j,posx,posy) - distD(i,j,posx,posy);
if(min_diff > diff )
{
min_diff = diff;
min_i = i;
min_j = j;
}
p1 = p1->next;
}
i = p1->city;
j = 0;
diff = distD(i,minJ,posx,posy) + distD(minJ,j,posx,posy) - distD(i,j,posx,posy);
//and cycle around
if(min_diff > diff )
{
min_diff = diff;
min_i = i;
min_j = j;
}
if(current->city == min_i)
{
node = (struct nearest_insert *)malloc(sizeof(struct nearest_insert ));
node->city = minJ;
node->next = NULL;
current->next = node;
current = current->next;
}
else
{
p1 = first;
while (p1->city != min_i)
{ p1=p1->next;}
tmp = p1->next;
node = (struct nearest_insert *)malloc(sizeof(struct nearest_insert ));
node->city = minJ;
node->next = tmp;
p1->next = node;
}
}
count++;
}
i=0;
p1=first;
while(p1!=NULL)
{
r[i] = p1->city;
p1=p1->next;
i++;
}
}
struct greedy
{
int city;
struct greedy *next;
};
/* Initial solution construction using greedy approach */
void greedy(int *r, float *posx, float *posy, long cities)
{
struct greedy *node,*p1,*current,*first = NULL;
int i,j,min=0,dist;
int count,minI,minJ;
int *v;
v = (int *)calloc(cities, sizeof(int));
node = (struct greedy *)malloc(sizeof(struct greedy ));
node->city = 0;
node->next = NULL;
first = node;
current = node;
count = 1;
v[0]=1;
min = 0;
while(count != cities)
{ //operate from the first city,
i = first->city;
min = 0;
//keep exploring cities
for(j = 0; j < cities; j++)
{ //until you find a new one
if(!v[j] && i != j)
{ //calc distance and store min dist
dist = distD(i,j,posx,posy);
if(min==0)
{
min=dist;
minI=i;
minJ=j;
}
if(min>dist)
{
min=dist;
minI=i;
minJ=j;
}
}
}
//if this is not the first pass
if(first != current)
{
i = current->city;
for(j = 0; j < cities; j++)
{//then store into list. basically the same because we needed cases
if(!v[j] && i != j)
{
dist = distD(i,j,posx,posy);
if(min>dist)
{
min=dist;
minI=i;
minJ=j;
}
}
}
}
v[minJ]=1;
if(first->city == minI)
{
if(first->next == NULL)
{
node = (struct greedy *)malloc(sizeof(struct greedy ));
node->city = minJ;
node->next = NULL;
first->next = node;
current = current->next;
}
else
{
node = (struct greedy *)malloc(sizeof(struct greedy ));
node->city = minJ;
node->next = first;
first = node;
}
}
else
{
if (current->city == minI)
{
node = (struct greedy *)malloc(sizeof(struct greedy ));
node->city = minJ;
node->next = NULL;
current->next = node;
current = current->next;
}
}
count++;
}
i=0;
p1=first;
while(p1!=NULL)
{
r[i] = p1->city;
p1=p1->next;
i++;
}
}
struct visit_list
{
int city;
struct visit_list *next;
};
struct MST
{
int i,j,weight;
struct MST *next;
struct MST *prev;
};
struct eul_tour
{
int city;
struct eul_tour *next;
struct eul_tour *prev;
};
/* Initial solution construction using MST approach */
//minimum spanning tree
void mst_init(int *r, float *posx, float *posy, long cities)
{
int *deg,*var_deg,dist;
int i,j, min,min_i,min_j, count,*v;
struct eul_tour *et,*top=NULL,*curr, *node1,*rev;
struct visit_list *first=NULL,*current, *p1,*visited;
struct MST *head =NULL, *cur, *node,*p;
deg = (int*)calloc(cities,sizeof(int) );
v = (int*) calloc(cities, sizeof(int));
visited = (struct visit_list*)malloc(sizeof(struct visit_list));
visited->city = 0;
visited->next = NULL;
first = visited;
current = first;
count = 1;
p1 =first;
v[0] = 1;
while(count != cities )
{ //while all cities aren't explored
min = 0;
for(p1 = first; p1!=NULL; p1=p1->next)
{
//fix a node,
i = p1->city;
for(j = 0; j < cities; j++)
{
//check and find the smallest edge with that node
if(i != j && !v[j])
{
dist = distD(i,j,posx,posy);
if(min == 0 )
{
min = dist;
min_i =i;
min_j =j;
}
if(min > dist)
{
min = dist;
min_i =i;
min_j =j;
}
}
}
}
v[min_j] =1;
visited = (struct visit_list*)malloc(sizeof(struct visit_list));
visited->city = min_j;
visited->next = NULL;
current->next =visited;
current = current->next;
//and now add that edge
deg[min_i]+=1;
deg[min_j]+=1;
//make a node of the mst,
//and add that edge
node = (struct MST*)malloc(sizeof(struct MST));
node->i = min_i;
node->j = min_j;
node->weight = min;
node->next = NULL;
//linked list stuff
if(head == NULL)
{
node->prev = NULL;
head = node;
cur = node;
}
else
{
node->prev = cur;
cur->next = node;
cur = cur->next;
}
count++;
}
v = (int*) calloc(cities, sizeof(int));
var_deg = (int*) calloc(cities, sizeof(int));
p = head;
//find a leaf,
while(deg[p->i] != 1 && deg[p->j] != 1)
p = p->next;
//take the leaf city,
if(deg[p->i] == 1 )
{ //take the leaf,make it a node in the euler tour, make the jth node the other node of the edge in the tour,
i = p->i;
node1 = (struct eul_tour*)malloc(sizeof(struct eul_tour));
node1->city = i;
node1->next = NULL;
node1->prev = NULL;
top = node1;
curr = node1;
v[i] = 1;
var_deg[i]++;
node1 = (struct eul_tour*)malloc(sizeof(struct eul_tour));
node1->city = p->j;
node1->next = NULL;
node1->prev = curr;
curr->next = node1;
curr = curr->next;
j = p->j;
v[j] = 1;
var_deg[j]++;
}
else
{
i = p->j;
node1 = (struct eul_tour*)malloc(sizeof(struct eul_tour));
node1->city = i;
node1->next = NULL;
node1->prev = NULL;
top = node1;
curr = node1;
v[i] = 1;
var_deg[i]++;
node1 = (struct eul_tour*)malloc(sizeof(struct eul_tour));
node1->city = p->i;
node1->next = NULL;
node1->prev = curr;
curr->next = node1;
curr = curr->next;
j = p->i;
v[j] = 1;
var_deg[j]++;
}
//now we have 2 nodes, ie one edge,
count = 2;
p = head;
while(count != cities)
{
if(deg[j]!= 1)
{
if(p->i == j && !v[p->j])
{
node1 = (struct eul_tour*)malloc(sizeof(struct eul_tour));
node1->city = p->j;
node1->next = NULL;
node1->prev = curr;
curr->next = node1;
curr = curr->next;
j = p->j;
v[j] = 1;
var_deg[p->i]++;
var_deg[p->j]++;
count++;
p = p->next;
}
else if(p->j == j && !v[p->i])
{
node1 = (struct eul_tour*)malloc(sizeof(struct eul_tour));
node1->city = p->i;
node1->next = NULL;
node1->prev = curr;
curr->next = node1;
curr = curr->next;
j = p->i;
v[j] = 1;
var_deg[p->i]++;
var_deg[p->j]++;
count++;
p = p->next;
}
else
{
p = head;
while( (p->i != j || v[p->j]) && (p->j != j || v[p->i]) )
p = p->next;
}
}
else
{
rev = curr->prev;
while(deg[rev->city] == var_deg[rev->city])
{
rev = rev ->prev;
}
j = rev->city;
p = head;
}
}
v = (int*) calloc(cities, sizeof(int));
i=0;
et = top;
while(et != NULL)
{
if(v[et->city] == 0)
{
r[i++] = et->city;
v[et->city] = 1;
}
et = et->next;
}
}
//if the edge exists, then return 1
int searchEdge(int min_i,int min_j, struct MST * p)
{
int flag =0;
while(p != NULL )
{
if( (p->i == min_i && p->j == min_j) || (p->i == min_j && p->j == min_i ) )
{
flag = 1;
break;
}
p = p->next;
}
if(flag == 1 )
return 1;
else
return 0;
}
/* Initial solution construction using Christofides' approach */
void christofide_init(int *r, float *posx, float *posy, long cities)
{
int *deg,*var_deg,dist,flg=0;
int i,j, min,min_i,min_j, count,*v,size;
int *odd_array,flag = 0;
FILE *fp; char line[100];
struct eul_tour *et,*top=NULL,*curr, *node1,*rev;
struct visit_list *first=NULL,*current, *p1,*visited;
struct MST *head =NULL, *cur, *node,*p;
struct odd_degree *init=NULL, *at, *odd;
struct rev_visit* rev_node=NULL,*loop;
deg = (int*)calloc(cities,sizeof(int));
v = (int*) calloc(cities, sizeof(int));
visited = (struct visit_list*)malloc(sizeof(struct visit_list));
visited->city = 0;
visited->next = NULL;
first = visited;
current = first;
count = 1;
p1 =first;
v[0] = 1;
while(count != cities )
{
min = 0;
for(p1 = first; p1!=NULL; p1=p1->next)
{
i = p1->city;
for(j = 0; j < cities; j++)
{
if(i != j && !v[j])
{
dist = distD(i,j,posx,posy);
if(min == 0 )
{
min = dist;
min_i =i;
min_j =j;
}
if(min > dist)
{
min = dist;
min_i =i;
min_j =j;
}
}
}
}
v[min_j] =1;
visited = (struct visit_list*)malloc(sizeof(struct visit_list));
visited->city = min_j;
visited->next = NULL;
current->next =visited;
current = current->next;
deg[min_i]+=1;
deg[min_j]+=1;
node = (struct MST*)malloc(sizeof(struct MST));
node->i = min_i;
node->j = min_j;
node->weight = min;
node->next = NULL;
if(head == NULL)
{
node->prev = NULL;
head = node;
cur = node;
}
else
{
node->prev = cur;
cur->next = node;
cur = cur->next;
}
count++;
}
p = head;
size = 0;
//make set of all odd degree nodes,
for(i = 0; i < cities; i++)
{
if(deg[i]%2 != 0)
{
odd = (struct odd_degree*)malloc(sizeof(struct odd_degree));
odd->city = i;
odd->next = NULL;
if(init == NULL)
{
init = odd;
at = odd;
}
else
{
at->next = odd;
at = at->next;
}
size++;
}
}
odd_array = (int*)malloc(sizeof(int)*size);
odd = init;
i = 0;
while(odd != NULL)
{
odd_array[i++] = odd->city;
odd = odd->next;
}
//odd_array has all nodes with odd degrees
v = (int*) calloc(size, sizeof(int));
//make sure there are even no.s of odd degree nodes
assert(size % 2 == 0);
fp = fopen("odd_edges.txt", "w");
//foul play case
assert(size >= 2);
fprintf(fp, "%d %d\n", size, (size*(size-1))/2);
for (i = 0; i < size; i++)
{
for (j = i+1; j < size; j++)
{
//store the matrix into a file
fprintf(fp, "%d %d %ld\n", i, j, distD(odd_array[i],odd_array[j],posx,posy));
}
}
fclose(fp);
if(system("/home/sparklab/pramod/blossom5-v2.05.src/blossom5 -e odd_edges.txt -w min_edges.txt") != 0)
{
printf("\nError: please install blossom5 matching code\n");
exit(-1);
}
fp = fopen("min_edges.txt", "r");
fgets(line, sizeof(line), fp);
for (i = 0; i < size/2; i++)
{
assert(fgets(line, sizeof(line), fp) != NULL);
assert(sscanf(line, "%d %d", &i, &j) == 2);
if(searchEdge(odd_array[i],odd_array[j],head) == 0)
{
deg[odd_array[i]]+=1;
deg[odd_array[j]]+=1;
node = (struct MST*)malloc(sizeof(struct MST));
node->i = odd_array[i];
node->j = odd_array[j];
node->weight = distD(odd_array[i],odd_array[j], posx, posy);
node->next = NULL;
node->prev = cur;
cur->next = node;
cur = cur->next;
}
}
fclose(fp);
v = (int*) calloc(cities, sizeof(int));
var_deg = (int*) calloc(cities, sizeof(int));
p = head;
while(p != NULL)
{
if(deg[p->i] != 1 && deg[p->j] != 1)
{
p = p->next;
}
else
{
flag = 1;
break;
}
}
//we have either i or jth city is a leaf node
if(flag == 1)
{ //i is a leaf node
if(deg[p->i] == 1 )
{ //start at the leaf node, make the other one the first non leaf
i = p->i;
node1 = (struct eul_tour*)malloc(sizeof(struct eul_tour));
node1->city = i;
node1->next = NULL;
node1->prev = NULL;
top = node1;
curr = node1;
v[i] = 1;
var_deg[i]++;
node1 = (struct eul_tour*)malloc(sizeof(struct eul_tour));
node1->city = p->j;
node1->next = NULL;
node1->prev = curr;
curr->next = node1;
curr = curr->next;
j = p->j;
v[j] = 1;
var_deg[j]++;
}
else
{
i = p->j;
node1 = (struct eul_tour*)malloc(sizeof(struct eul_tour));
node1->city = i;
node1->next = NULL;
node1->prev = NULL;
top = node1;
curr = node1;
v[i] = 1;
var_deg[i]++;
node1 = (struct eul_tour*)malloc(sizeof(struct eul_tour));
node1->city = p->i;
node1->next = NULL;
node1->prev = curr;
curr->next = node1;
curr = curr->next;
j = p->i;
v[j] = 1;
var_deg[j]++;
}
count = 2;
p = head;
while(count != cities)
{ //if j for the i you're considering is not 1,
if(deg[j]!= 1)
{
if(p->i == j && !v[p->j])
{//if you're considering i, and it's neighbour j isnt' discovered yet, make it the next node
node1 = (struct eul_tour*)malloc(sizeof(struct eul_tour));
node1->city = p->j;
node1->next = NULL;
node1->prev = curr;
curr->next = node1;
curr = curr->next;
var_deg[p->i]++;
var_deg[p->j]++;
count++;
j = p->j;
v[j] = 1;
p = p->next;
if(p == NULL)
p = head;
}
else if(p->j == j && !v[p->i])
{
node1 = (struct eul_tour*)malloc(sizeof(struct eul_tour));
node1->city = p->i;
node1->next = NULL;
node1->prev = curr;
curr->next = node1;
curr = curr->next;
var_deg[p->i]++;
var_deg[p->j]++;
count++;
j = p->i;
v[j] = 1;
p = p->next;
if(p == NULL)
p = head;
}
else
{ //traverse to find the node where
p = head;
while(p != NULL)
{
if( (p->i != j || v[p->j]) && (p->j != j || v[p->i]) )
{//the node i, j is explored, or the node doesn't have j
p = p->next;
}
else
{
flg = 1;
break;
}
}
if(flg == 0)
{//p is null
var_deg[j]++;
et = curr-> prev;
if(rev_node == NULL)
{//we need to loop to visit both
loop = (struct rev_visit *)malloc(sizeof(struct rev_visit));
loop->i = j;
while(deg[et->city] == var_deg[et->city] || et->city == j)
{
et = et-> prev;
}
j = et->city;
loop->j = j;
rev_node = loop;
p = head;
}
else
{
if(j == rev_node->i || j == rev_node->j)
{
i = j == rev_node->i ? rev_node->j :rev_node->i;
while(deg[et->city]==var_deg[et->city]|| et->city == j || et->city == i)
{
et = et-> prev;
}
}
else
{
while(deg[et->city] == var_deg[et->city] || et->city == j)
{
et = et-> prev;
}
}
rev_node->i = j;
j = et->city;
rev_node->j = j;
p = head;
}
}
flg = 0;
}
}
else
{
rev = curr->prev;
while(deg[rev->city] == var_deg[rev->city] || rev->city == j)
{
rev = rev ->prev;
}
j = rev->city;
p = head;
}
}
v = (int*) calloc(cities, sizeof(int));
i=0;
et = top;
while(et != NULL)
{
if(v[et->city] == 0)
{
r[i++] = et->city;
v[et->city] = 1;
}
et = et->next;
}
}
else
{//if flag==0
v = (int*) calloc(cities, sizeof(int));
p = head;
i = 0;
while(i != cities )
{
if(v[p->i] == 0)
{
r[i++] = p->i;
v[p->i] = 1;
}
if(v[p->j] == 0)
{
r[i++] = p->j;
v[p->j] = 1;
}
p = p->next;
}
}
}
/* Initial solution construction using Clarke-Wright approach */
struct init_route
{
int city;
struct init_route *next;
};
struct clarke_wright
{
int i,j, save;
struct clarke_wright *next;
};
void clarke_wright_init(int *r, float *posx, float *posy, long cities, long no_pairs)
{
int i,j,cnt;
int *v;
struct clarke_wright *cw,*cur,*cw1,*cw2;
struct clarke_wright *top = NULL;
//centre is 0th city
for(i=1; i<cities-1; i++)
for(j=i+1; j<cities; j++)
{//make new node and store distance
cw = (struct clarke_wright*)malloc(sizeof(struct clarke_wright) );
cw->save = distD(0,i,posx,posy) + distD(0,j,posx,posy) - distD(i,j,posx,posy);
cw->i = i;
cw->j = j;
if(top==NULL)
{
cw->next= NULL;
top = cw;
cur = cw;
}
else if( cw->save > top->save)
{
cw->next = top;
top = cw;
}
else if (cw->save > cur->save && cw->save < top->save && cur != top)
{
cw1 = top;
cw2 = top->next;
while(cw2->save > cw->save)
{
cw2 = cw2->next;
cw1 = cw1->next;
}
cw->next = cw2;
cw1->next = cw;
}
else
{
cw->next = NULL;
cur->next =cw;
cur = cur->next;
}
}
i = 0;
r[i++] = 0;
v=(int*)calloc(cities,sizeof(int));
v[0] = 1;
cw = top;
r[i++] = cw->i;
v[cw->i] = 1;
r[i++] = cw->j;
v[cw->j] = 1;
cnt = 3;
cw = cw->next;
while(cnt != cities)
{
if( !v[cw->i] && !v[cw->j] )
{
r[i++] = cw->i;
v[cw->i] = 1;
r[i++] = cw->j;
v[cw->j] = 1;
cnt+=2;
}
else if( !v[cw->i] )
{
r[i++] = cw->i;
v[cw->i] = 1;
cnt++;
}
else if( !v[cw->j] )
{
r[i++] = cw->j;
v[cw->j] = 1;
cnt++;
}
cw = cw->next;
}
}
void routeChecker(long N,int *r)
{
int *v,i,flag=0;
v=(int*)calloc(N,sizeof(int));
for(i=0;i<N;i++)
v[r[i]]++;
for(i=0;i<N;i++)
{
if(v[i] != 1 )
{
flag=1;
printf("breaking at %d",i);
break;
}
}
if(flag==1)
printf("\nroute is not valid");
else
printf("\nroute is valid");
}
/*Distance calculation of the initial solution */
long distH(float *px,float *py,long cit)
{
float dx,dy;
long cost=0;
int i;
for(i=0;i<(cit-1);i++)
{
dx=px[i]-px[i+1];
dy=py[i]-py[i+1];
cost+=sqrtf( (dx*dx) + (dy*dy) );
}
dx=px[i]-px[0];
dy=py[i]-py[0];
cost+=sqrtf( (dx*dx) + (dy*dy) );
return cost;
}
int main(int argc, char *argv[])
{
float *posx, *posy;
float *px, *py,tm;
char str[256];
float *d_posx, *d_posy;
long x,y;
int blk,thrd;
clock_t start,end,start1,end1;
long sol,tid,cities,no_pairs,dst,d;
int *route,count=0;
int ch, cnt, in1;
float in2, in3;
unsigned long long *d_dst_tid;
FILE *f;
f = fopen(argv[1], "r");
if (f == NULL) {fprintf(stderr, "could not open file \n"); exit(-1);}
ch = getc(f); while ((ch != EOF) && (ch != '\n')) ch = getc(f);
ch = getc(f); while ((ch != EOF) && (ch != '\n')) ch = getc(f);
ch = getc(f); while ((ch != EOF) && (ch != '\n')) ch = getc(f);
ch = getc(f); while ((ch != EOF) && (ch != ':')) ch = getc(f);
fscanf(f, "%s\n", str);
cities = atoi(str);
if (cities <= 2) {fprintf(stderr, "only %d cities\n", cities); exit(-1);}
posx = (float *)malloc(sizeof(float) * cities); if (posx == NULL) {fprintf(stderr, "cannot allocate posx\n"); exit(-1);}
posy = (float *)malloc(sizeof(float) * cities); if (posy == NULL) {fprintf(stderr, "cannot allocate posy\n"); exit(-1);}
px = (float *)malloc(sizeof(float) * cities); if (px == NULL) {fprintf(stderr, "cannot allocate posx\n"); exit(-1);}
py = (float *)malloc(sizeof(float) * cities); if (py == NULL) {fprintf(stderr, "cannot allocate posy\n"); exit(-1);}
route = (int *)malloc(sizeof(int) * cities); if (route == NULL) {fprintf(stderr, "cannot allocate route\n"); exit(-1);}
ch = getc(f); while ((ch != EOF) && (ch != '\n')) ch = getc(f);
fscanf(f, "%s\n", str);
if (strcmp(str, "NODE_COORD_SECTION") != 0) {fprintf(stderr, "wrong file format\n"); exit(-1);}
cnt = 0;
while (fscanf(f, "%d %f %f\n", &in1, &in2, &in3))
{
posx[cnt] = in2;
posy[cnt] = in3;
cnt++;
if (cnt > cities) {fprintf(stderr, "input too long\n"); exit(-1);}
if (cnt != in1) {fprintf(stderr, "input line mismatch: expected %d instead of %d\n", cnt, in1); exit(-1);}
}
if (cnt != cities) {fprintf(stderr, "read %d instead of %d cities\n", cnt, cities); exit(-1);}
fscanf(f, "%s", str);
if (strcmp(str, "EOF") != 0) {fprintf(stderr, "didn't see 'EOF' at end of file\n"); exit(-1);}
fflush(f);
fclose(f);
sol=cities*(cities-1)/2;
int intl;
printf("\nChoose an initial solution setup approach\n1.Sequenced\n2.Random\n3.NN\n4.NI\n5.Greedy\n6.MST\n7.Christofide\n8.Clarke-Wright\n");
scanf("%d",&intl);
start = clock();
switch(intl)
{
case 1:
seq_init(route,cities);
routeChecker(cities, route);
setCoord(route,posx,posy,px,py,cities);
dst=distH(px,py,cities);
break;
case 2:
random_init(route,cities);
routeChecker(cities, route);
setCoord(route,posx,posy,px,py,cities);
dst=distH(px,py,cities);
break;
case 3:
dst = nn_init(route,cities,posx,posy);
routeChecker(cities, route);
setCoord(route,posx,posy,px,py,cities);
break;
case 4:
nearest_insertion(route,posx,posy,cities);
routeChecker(cities, route);
setCoord(route,posx,posy,px,py,cities);
dst=distH(px,py,cities);
break;
case 5:
greedy(route,posx,posy,cities);
routeChecker(cities, route);
setCoord(route,posx,posy,px,py,cities);
dst=distH(px,py,cities);
break;
case 6:
mst_init(route,posx,posy,cities);
routeChecker(cities, route);
setCoord(route,posx,posy,px,py,cities);
dst=distH(px,py,cities);
break;
case 7:
christofide_init(route, posx, posy, cities);
routeChecker(cities, route);
setCoord(route,posx,posy,px,py,cities);
dst=distH(px,py,cities);
break;
case 8:
no_pairs = (cities-1)*(cities-2)/2;
clarke_wright_init(route, posx, posy, cities, no_pairs);
routeChecker(cities, route);
setCoord(route,posx,posy,px,py,cities);
dst=distH(px,py,cities);
break;
}
end = clock();
tm = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("\ninitial cost : %ld time : %f\n",dst,tm);
start1 = clock();
count = 1;
unsigned long long dst_tid = (((long)dst+1) << 32) -1;
unsigned long long dtid;
long itr=floor(cities/2);
int nx, ny;
if(cities <= 32)
{
blk = 1 ;
nx = cities;
ny = cities;
}
else
{
blk = (cities - 1) / 32 + 1;
nx = 32;
ny = 32;
}
dim3 thrds (nx,ny);
dim3 blks (blk,blk);
if(hipSuccess!=hipMalloc((void**)&d_posx,sizeof(float)*cities))
printf("\nCan't allocate memory for coordinate x on GPU");
if(hipSuccess!=hipMalloc((void**)&d_posy,sizeof(float)*cities))
printf("\nCan't allocate memory for coordinate y on GPU");
if(hipSuccess!=hipMalloc((void**)&d_dst_tid,sizeof(unsigned long long)))
printf("\nCan't allocate memory for dst_tid on GPU");
if(hipSuccess!=hipMemcpy(d_dst_tid,&dst_tid,sizeof(unsigned long long),hipMemcpyHostToDevice))
printf("\nCan't transfer dst_tid on GPU");
if(hipSuccess!=hipMemcpy(d_posx,px,sizeof(float)*cities,hipMemcpyHostToDevice))
printf("\nCan't transfer px on GPU");
if(hipSuccess!=hipMemcpy(d_posy,py,sizeof(float)*cities,hipMemcpyHostToDevice))
printf("\nCan't transfer py on GPU");
int strat;
printf("\n Choose a CUDA thread mapping strategy\n1.TPR\n2.TPRED\n3.TPRC\n4.TPN\n");
scanf("%d",&strat);
switch(strat)
{
case 1:
if(cities<=1024)
{
blk=1;
thrd=cities;
}
else
{
blk=(cities-1)/1024+1;
thrd=1024;
}
hipLaunchKernelGGL(( tsp_tpr), dim3(blk),dim3(thrd), 0, 0, d_posx,d_posy,dst,d_dst_tid,cities);
if(hipSuccess!=hipMemcpy(&dtid,d_dst_tid,sizeof(unsigned long long),hipMemcpyDeviceToHost))
printf("\nCan't transfer minimal cost back to CPU");
d = dtid >> 32;
while( d < dst )
{
dst=d;
tid = dtid & ((1ull<<32)-1);
x=cities-2-floor((sqrt(8*(sol-tid-1)+1)-1)/2);
y=tid-x*(cities-1)+(x*(x+1)/2)+1;
twoOpt(x,y,px,py);
if(hipSuccess!=hipMemcpy(d_posx,px,sizeof(float)*cities,hipMemcpyHostToDevice))
printf("\nCan't transfer px on GPU");
if(hipSuccess!=hipMemcpy(d_posy,py,sizeof(float)*cities,hipMemcpyHostToDevice))
printf("\nCan't transfer py on GPU");
unsigned long long dst_tid = (((long)dst+1) << 32) -1;
if(hipSuccess!=hipMemcpy(d_dst_tid,&dst_tid,sizeof(unsigned long long),hipMemcpyHostToDevice))
printf("\nCan't transfer dst_tid on GPU");
hipLaunchKernelGGL(( tsp_tpr), dim3(blk),dim3(thrd), 0, 0, d_posx,d_posy,dst,d_dst_tid,cities);
if(hipSuccess!=hipMemcpy(&dtid,d_dst_tid,sizeof(unsigned long long),hipMemcpyDeviceToHost))
printf("\nCan't transfer minimal cost back to CPU");
d = dtid >> 32;
count++;
}
break;
case 2:
if(cities<1024)
{
blk=1;
thrd=cities;
}
else
{
blk=(cities-1)/1024+1;
thrd=1024;
}
hipLaunchKernelGGL(( tsp_tpred), dim3(blk),dim3(thrd), 0, 0, d_posx,d_posy,dst,d_dst_tid,cities,itr);
if(hipSuccess!=hipMemcpy(&dtid,d_dst_tid,sizeof(unsigned long long),hipMemcpyDeviceToHost))
printf("\nCan't transfer minimal cost back to CPU");
d = dtid >> 32;
while( d < dst )
{
dst=d;
tid = dtid & ((1ull<<32)-1);
x=cities-2-floor((sqrt(8*(sol-tid-1)+1)-1)/2);
y=tid-x*(cities-1)+(x*(x+1)/2)+1;
twoOpt(x,y,px,py);
if(hipSuccess!=hipMemcpy(d_posx,px,sizeof(float)*cities,hipMemcpyHostToDevice))
printf("\nCan't transfer px on GPU");
if(hipSuccess!=hipMemcpy(d_posy,py,sizeof(float)*cities,hipMemcpyHostToDevice))
printf("\nCan't transfer py on GPU");
unsigned long long dst_tid = (((long)dst+1) << 32) -1;
if(hipSuccess!=hipMemcpy(d_dst_tid,&dst_tid,sizeof(unsigned long long),hipMemcpyHostToDevice))
printf("\nCan't transfer dst_tid on GPU");
hipLaunchKernelGGL(( tsp_tpred), dim3(blk),dim3(thrd), 0, 0, d_posx,d_posy,dst,d_dst_tid,cities,itr);
if(hipSuccess!=hipMemcpy(&dtid,d_dst_tid,sizeof(unsigned long long),hipMemcpyDeviceToHost))
printf("\nCan't transfer minimal cost back to CPU");
d = dtid >> 32;
count++;
}
break;
case 3:
hipLaunchKernelGGL(( tsp_tprc), dim3(blks),dim3(thrds), 0, 0, d_posx,d_posy,dst,d_dst_tid,cities);
if(hipSuccess!=hipMemcpy(&dtid,d_dst_tid,sizeof(unsigned long long),hipMemcpyDeviceToHost))
printf("\nCan't transfer minimal cost back to CPU");
d = dtid >> 32;
while( d < dst )
{
dst=d;
tid = dtid & ((1ull<<32)-1);
x=cities-2-floor((sqrt(8*(sol-tid-1)+1)-1)/2);
y=tid-x*(cities-1)+(x*(x+1)/2)+1;
twoOpt(x,y,px,py);
if(hipSuccess!=hipMemcpy(d_posx,px,sizeof(float)*cities,hipMemcpyHostToDevice))
printf("\nCan't transfer px on GPU");
if(hipSuccess!=hipMemcpy(d_posy,py,sizeof(float)*cities,hipMemcpyHostToDevice))
printf("\nCan't transfer py on GPU");
unsigned long long dst_tid = (((long)dst+1) << 32) -1;
if(hipSuccess!=hipMemcpy(d_dst_tid,&dst_tid,sizeof(unsigned long long),hipMemcpyHostToDevice))
printf("\nCan't transfer dst_tid on GPU");
hipLaunchKernelGGL(( tsp_tprc), dim3(blks),dim3(thrds), 0, 0, d_posx,d_posy,dst,d_dst_tid,cities);
if(hipSuccess!=hipMemcpy(&dtid,d_dst_tid,sizeof(unsigned long long),hipMemcpyDeviceToHost))
printf("\nCan't transfer minimal cost back to CPU");
d = dtid >> 32;
count++;
}
break;
case 4:
if(sol < 1024)
{
blk=1;
thrd=sol;
}
else
{
blk=(sol-1)/1024+1;
thrd=1024;
}
hipLaunchKernelGGL(( tsp_tpn), dim3(blk),dim3(thrd), 0, 0, d_posx,d_posy,dst,d_dst_tid,cities,sol);
if(hipSuccess!=hipMemcpy(&dtid,d_dst_tid,sizeof(unsigned long long),hipMemcpyDeviceToHost))
printf("\nCan't transfer minimal cost back to CPU");
d = dtid >> 32;
while( d < dst )
{
dst=d;
tid = dtid & ((1ull<<32)-1);
x=cities-2-floor((sqrt(8*(sol-tid-1)+1)-1)/2);
y=tid-x*(cities-1)+(x*(x+1)/2)+1;
twoOpt(x,y,px,py);
if(hipSuccess!=hipMemcpy(d_posx,px,sizeof(float)*cities,hipMemcpyHostToDevice))
printf("\nCan't transfer px on GPU");
if(hipSuccess!=hipMemcpy(d_posy,py,sizeof(float)*cities,hipMemcpyHostToDevice))
printf("\nCan't transfer py on GPU");
unsigned long long dst_tid = (((long)dst+1) << 32) -1;
if(hipSuccess!=hipMemcpy(d_dst_tid,&dst_tid,sizeof(unsigned long long),hipMemcpyHostToDevice))
printf("\nCan't transfer dst_tid on GPU");
hipLaunchKernelGGL(( tsp_tpn), dim3(blk),dim3(thrd), 0, 0, d_posx,d_posy,dst,d_dst_tid,cities,sol);
if(hipSuccess!=hipMemcpy(&dtid,d_dst_tid,sizeof(unsigned long long),hipMemcpyDeviceToHost))
printf("\nCan't transfer minimal cost back to CPU");
d = dtid >> 32;
count++;
}
break;
}
printf("\nMinimal Distance : %ld\n",d);
printf("\nnumber of time climbed %d\n",count);
end1 = clock();
double t=((double) (end1 - start1)) / CLOCKS_PER_SEC;
printf("\ntime : %f\n",t);
hipFree(d_posy);
hipFree(d_posx);
hipFree(d_dst_tid);
free(posx);
free(posy);
free(px);
free(py);
free(route);
return 0;
}
| ebac250dd19d04af66f5e407ad6ea622092302f0.cu | #include"stdio.h"
#include <string.h>
#include <stdlib.h>
#include <time.h>
#include"math.h"
#include <ctype.h>
#include <assert.h>
/* Euclidean distance calculation */
__host__ __device__ long distD(int i,int j,float *x,float*y)
{
float dx=x[i]-x[j];
float dy=y[i]-y[j];
return(sqrtf( (dx*dx) + (dy*dy) ));
}
//all these strats are for the two opt move,
/*A kenel function that finds a minimal weighted neighbor using TPR mapping strategy*/
__global__ void tsp_tpr(float *pox,float *poy,long initcost,unsigned long long *dst_tid,long cit)
{
//threads per row strategy
long id,j;
register long change,mincost=initcost,cost;
long i=threadIdx.x+blockIdx.x*blockDim.x;
if(i < cit)
{ //
for(j=i+1;j<cit;j++)
{//pox and poy are arrays that store the positions (x y) of ith city
change = 0; cost=initcost;
change=distD(i,j,pox,poy)+distD((i+1)%cit,(j+1)%cit,pox,poy)-distD(i,(i+1)%cit,pox,poy)-distD(j,(j+1)%cit,pox,poy);
cost+=change;
if(cost < mincost)
{
mincost = cost;
id = i * (cit-1)+(j-1)-i*(i+1)/2;
}
}
if(mincost < initcost)
atomicMin(dst_tid, ((unsigned long long)mincost << 32) | id);
}
}
/*A kenel function that finds a minimal weighted neighbor using TPRED mapping strategy*/
__global__ void tsp_tpred(float *pox,float *poy,long initcost,unsigned long long *dst_tid,long cit,long itr)
{
long id,j,k;
register long change,mincost=initcost,cost;
long i=threadIdx.x+blockIdx.x*blockDim.x;
if(i < cit)
{
//itr is how many iterations we can stand to do.
for(k=0;k<itr;k++)
{
change = 0; cost=initcost;
j=(i+1+k)%cit;
change=distD(i,j,pox,poy)+distD((i+1)%cit,(j+1)%cit,pox,poy)-distD(i,(i+1)%cit,pox,poy)-distD(j,(j+1)%cit,pox,poy);
cost+=change;
if(cost < mincost)
{
mincost = cost;
if(i < j)
id = i * (cit-1)+(j-1)-i*(i+1)/2;
else
id = j * (cit-1)+(i-1)-j*(j+1)/2;
}
}
if(mincost < initcost)
atomicMin(dst_tid, ((unsigned long long)mincost << 32) | id);
}
}
/*A kenel function that finds a minimal weighted neighbor using TPRC mapping strategy*/
__global__ void tsp_tprc(float *pox,float *poy,long initcost,unsigned long long *dst_tid,long cit)
{
long id;
long change,cost;
long i=threadIdx.x+blockIdx.x*blockDim.x;
long j=threadIdx.y+blockIdx.y*blockDim.y;
//if city in bounds and the column you choose is more than the row, so there is no repeat issues
if(i < cit && j < cit && i < j)
{
change = 0; cost = initcost;
change=distD(i,j,pox,poy)+distD((i+1)%cit,(j+1)%cit,pox,poy)-distD(i,(i+1)%cit,pox,poy)-distD(j,(j+1)%cit,pox,poy);
cost+=change;
if(change < 0)
{
id = i * (cit - 1) + (j - 1) - i * (i + 1) / 2;
atomicMin(dst_tid, ((unsigned long long)cost << 32) | id);
}
}
}
/*A kenel function that finds a minimal weighted neighbor using TPN mapping strategy*/
__global__ void tsp_tpn(float *pox,float *poy,long cost,unsigned long long *dst_tid,long cit,long sol)
{
long i,j;
register long change=0;
int id=threadIdx.x+blockIdx.x*blockDim.x;
if(id<sol)
{
i=cit-2-floorf(((int)__dsqrt_rn(8*(sol-id-1)+1)-1)/2);
j=id-i*(cit-1)+(i*(i+1)/2)+1;
change=distD(i,j,pox,poy)+distD((i+1)%cit,(j+1)%cit,pox,poy)-distD(i,(i+1)%cit,pox,poy)-distD(j,(j+1)%cit,pox,poy);
cost+=change;
if(change < 0)
atomicMin(dst_tid, ((unsigned long long)cost << 32) | id);
}
}
/* At each IHC steps, XY coordinates are arranged using next initial solution's order*/
void twoOpt(long x,long y,float *pox,float *poy)
{
float *tmp_x,*tmp_y;
int i,j;
tmp_x=(float*)malloc(sizeof(float)*(y-x));
tmp_y=(float*)malloc(sizeof(float)*(y-x));
for(j=0,i=y;i>x;i--,j++)
{
tmp_x[j]=pox[i];
tmp_y[j]=poy[i];
}
for(j=0,i=x+1;i<=y;i++,j++)
{
pox[i]=tmp_x[j];
poy[i]=tmp_y[j];
}
free(tmp_x);
free(tmp_y);
}
/*Arranges XY coordinates in initial solution's order*/
void setCoord(int *r,float *posx,float *posy,float *px,float *py,long cities)
{
for(int i=0;i<cities;i++)
{
px[i]=posx[r[i]];
py[i]=posy[r[i]];
}
}
/* Initial solution construction using NN */
long nn_init(int *route,long cities,float *posx,float*posy)
{ //route stores the route taken, cities is the number of cities, posx and posy are the positions of the ith city
route[0]=0;
int k=1,i=0,j;
float min;
int minj,mini,count=1,flag=0;
long dst=0;
int *visited=(int*)calloc(cities,sizeof(int));
visited[0]=1;
while(count!=cities)
{
flag=0;
for(j=1;j<cities;j++)
{ //if j isn't visited yet
if(i!=j && !visited[j])
{
min=distD(i,j,posx,posy);
minj=j;
break;
}
}
//for the minimum cost j
for(j=minj+1;j<cities;j++)
{//for every node from the min cost, if you haven't visited, then check and generate the pair
if( !visited[j])
{
if(min>distD(i,j,posx,posy))
{
min=distD(i,j,posx,posy);
mini=j;
flag=1;
}
}
}
if(flag==0)
i=minj;
else
i=mini;
dst+=min;
route[k++]=i;
visited[i]=1;
count++;
}
free(visited);
dst+=distD(route[0],route[cities-1],posx,posy);
return dst;
}
/* Initial solution construction using sequenced approach */
void seq_init(int*route,long N)
{
int i;
for(i=0;i<N;i++)
route[i]=i;
}
/* Initial solution construction using random approach */
void random_init(int *route,long cities)
{
int i=0,j;
int *visited = (int*)calloc(cities,sizeof(int));
while(i<cities)
{
//srand (clock() );
j=rand() % cities;
if(visited[j])
{
continue;
}
else
{
route[i]=j;
visited[j]=1;
i++;
}
}
free(visited);
}
struct nearest_insert
{
int city;
struct nearest_insert *next;
};
struct odd_degree
{
int city;
struct odd_degree *next;
};
struct rev_visit
{
int i,j;
struct rev_visit *next;
};
/* Initial solution construction using nearest insertion approach */
void nearest_insertion(int *r, float *posx, float *posy, long cities)
{
struct nearest_insert *node,*p1,*tmp,*current,*route,*first = NULL;
int i,j,dist,min=0;
int count,minI,minJ;
int min_diff,diff,min_i,min_j;
int *v;
v = (int *)calloc(cities, sizeof(int));
node = (struct nearest_insert *)malloc(sizeof(struct nearest_insert ));
node->city = 0;
node->next = NULL;
first = node;
current = node;
count = 1;
v[0]=1;
while(count != cities)
{//as long as there are new cities
min = 0;
for(route = first; route != NULL; route=route->next)
{//traverse the whole route to find the shortest edge
i = route->city;
for(j = 0; j < cities; j++)
{
if(i !=j &&!v[j])
{
dist = distD(i,j,posx,posy);
if(min==0)
{
min=dist;
minI=i;
minJ=j;
}
if(min>dist)
{
min=dist;
minI=i;
minJ=j;
}
}
}
}
//setting that node on edge to explored
v[minJ]=1;
//when you're starting out
if(count < 3)
{
if(first->city == minI)
{
if(first->next == NULL)
{
node = (struct nearest_insert *)malloc(sizeof(struct nearest_insert ));
node->city = minJ;
node->next = NULL;
first->next = node;
current = current->next;
}
else
{
tmp = first->next;
node = (struct nearest_insert *)malloc(sizeof(struct nearest_insert ));
node->city = minJ;
node->next = tmp;
first->next = node;
}
}
else if(current->city == minI)
{
node = (struct nearest_insert *)malloc(sizeof(struct nearest_insert ));
node->city = minJ;
node->next = NULL;
current->next = node;
current = current->next;
}
else
{
p1 = first->next;
while (p1->city != minI)
p1=p1->next;
tmp = p1->next;
node = (struct nearest_insert *)malloc(sizeof(struct nearest_insert ));
node->city = minJ;
node->next = tmp;
p1->next = node;
}
}
else
{//more than 3 nodes
p1 = first;
min_i = p1->city;
min_j = p1->next->city;
min_diff = distD(min_i,minJ,posx,posy) + distD(minJ,min_j,posx,posy) - distD(min_i,min_j,posx,posy);
p1 = p1->next;
while(p1->next!=NULL)
{//go through the path
i = p1->city;
j = p1->next->city;
//check two opt
diff = distD(i,minJ,posx,posy) + distD(minJ,j,posx,posy) - distD(i,j,posx,posy);
if(min_diff > diff )
{
min_diff = diff;
min_i = i;
min_j = j;
}
p1 = p1->next;
}
i = p1->city;
j = 0;
diff = distD(i,minJ,posx,posy) + distD(minJ,j,posx,posy) - distD(i,j,posx,posy);
//and cycle around
if(min_diff > diff )
{
min_diff = diff;
min_i = i;
min_j = j;
}
if(current->city == min_i)
{
node = (struct nearest_insert *)malloc(sizeof(struct nearest_insert ));
node->city = minJ;
node->next = NULL;
current->next = node;
current = current->next;
}
else
{
p1 = first;
while (p1->city != min_i)
{ p1=p1->next;}
tmp = p1->next;
node = (struct nearest_insert *)malloc(sizeof(struct nearest_insert ));
node->city = minJ;
node->next = tmp;
p1->next = node;
}
}
count++;
}
i=0;
p1=first;
while(p1!=NULL)
{
r[i] = p1->city;
p1=p1->next;
i++;
}
}
struct greedy
{
int city;
struct greedy *next;
};
/* Initial solution construction using greedy approach */
void greedy(int *r, float *posx, float *posy, long cities)
{
struct greedy *node,*p1,*current,*first = NULL;
int i,j,min=0,dist;
int count,minI,minJ;
int *v;
v = (int *)calloc(cities, sizeof(int));
node = (struct greedy *)malloc(sizeof(struct greedy ));
node->city = 0;
node->next = NULL;
first = node;
current = node;
count = 1;
v[0]=1;
min = 0;
while(count != cities)
{ //operate from the first city,
i = first->city;
min = 0;
//keep exploring cities
for(j = 0; j < cities; j++)
{ //until you find a new one
if(!v[j] && i != j)
{ //calc distance and store min dist
dist = distD(i,j,posx,posy);
if(min==0)
{
min=dist;
minI=i;
minJ=j;
}
if(min>dist)
{
min=dist;
minI=i;
minJ=j;
}
}
}
//if this is not the first pass
if(first != current)
{
i = current->city;
for(j = 0; j < cities; j++)
{//then store into list. basically the same because we needed cases
if(!v[j] && i != j)
{
dist = distD(i,j,posx,posy);
if(min>dist)
{
min=dist;
minI=i;
minJ=j;
}
}
}
}
v[minJ]=1;
if(first->city == minI)
{
if(first->next == NULL)
{
node = (struct greedy *)malloc(sizeof(struct greedy ));
node->city = minJ;
node->next = NULL;
first->next = node;
current = current->next;
}
else
{
node = (struct greedy *)malloc(sizeof(struct greedy ));
node->city = minJ;
node->next = first;
first = node;
}
}
else
{
if (current->city == minI)
{
node = (struct greedy *)malloc(sizeof(struct greedy ));
node->city = minJ;
node->next = NULL;
current->next = node;
current = current->next;
}
}
count++;
}
i=0;
p1=first;
while(p1!=NULL)
{
r[i] = p1->city;
p1=p1->next;
i++;
}
}
struct visit_list
{
int city;
struct visit_list *next;
};
struct MST
{
int i,j,weight;
struct MST *next;
struct MST *prev;
};
struct eul_tour
{
int city;
struct eul_tour *next;
struct eul_tour *prev;
};
/* Initial solution construction using MST approach */
//minimum spanning tree
void mst_init(int *r, float *posx, float *posy, long cities)
{
int *deg,*var_deg,dist;
int i,j, min,min_i,min_j, count,*v;
struct eul_tour *et,*top=NULL,*curr, *node1,*rev;
struct visit_list *first=NULL,*current, *p1,*visited;
struct MST *head =NULL, *cur, *node,*p;
deg = (int*)calloc(cities,sizeof(int) );
v = (int*) calloc(cities, sizeof(int));
visited = (struct visit_list*)malloc(sizeof(struct visit_list));
visited->city = 0;
visited->next = NULL;
first = visited;
current = first;
count = 1;
p1 =first;
v[0] = 1;
while(count != cities )
{ //while all cities aren't explored
min = 0;
for(p1 = first; p1!=NULL; p1=p1->next)
{
//fix a node,
i = p1->city;
for(j = 0; j < cities; j++)
{
//check and find the smallest edge with that node
if(i != j && !v[j])
{
dist = distD(i,j,posx,posy);
if(min == 0 )
{
min = dist;
min_i =i;
min_j =j;
}
if(min > dist)
{
min = dist;
min_i =i;
min_j =j;
}
}
}
}
v[min_j] =1;
visited = (struct visit_list*)malloc(sizeof(struct visit_list));
visited->city = min_j;
visited->next = NULL;
current->next =visited;
current = current->next;
//and now add that edge
deg[min_i]+=1;
deg[min_j]+=1;
//make a node of the mst,
//and add that edge
node = (struct MST*)malloc(sizeof(struct MST));
node->i = min_i;
node->j = min_j;
node->weight = min;
node->next = NULL;
//linked list stuff
if(head == NULL)
{
node->prev = NULL;
head = node;
cur = node;
}
else
{
node->prev = cur;
cur->next = node;
cur = cur->next;
}
count++;
}
v = (int*) calloc(cities, sizeof(int));
var_deg = (int*) calloc(cities, sizeof(int));
p = head;
//find a leaf,
while(deg[p->i] != 1 && deg[p->j] != 1)
p = p->next;
//take the leaf city,
if(deg[p->i] == 1 )
{ //take the leaf,make it a node in the euler tour, make the jth node the other node of the edge in the tour,
i = p->i;
node1 = (struct eul_tour*)malloc(sizeof(struct eul_tour));
node1->city = i;
node1->next = NULL;
node1->prev = NULL;
top = node1;
curr = node1;
v[i] = 1;
var_deg[i]++;
node1 = (struct eul_tour*)malloc(sizeof(struct eul_tour));
node1->city = p->j;
node1->next = NULL;
node1->prev = curr;
curr->next = node1;
curr = curr->next;
j = p->j;
v[j] = 1;
var_deg[j]++;
}
else
{
i = p->j;
node1 = (struct eul_tour*)malloc(sizeof(struct eul_tour));
node1->city = i;
node1->next = NULL;
node1->prev = NULL;
top = node1;
curr = node1;
v[i] = 1;
var_deg[i]++;
node1 = (struct eul_tour*)malloc(sizeof(struct eul_tour));
node1->city = p->i;
node1->next = NULL;
node1->prev = curr;
curr->next = node1;
curr = curr->next;
j = p->i;
v[j] = 1;
var_deg[j]++;
}
//now we have 2 nodes, ie one edge,
count = 2;
p = head;
while(count != cities)
{
if(deg[j]!= 1)
{
if(p->i == j && !v[p->j])
{
node1 = (struct eul_tour*)malloc(sizeof(struct eul_tour));
node1->city = p->j;
node1->next = NULL;
node1->prev = curr;
curr->next = node1;
curr = curr->next;
j = p->j;
v[j] = 1;
var_deg[p->i]++;
var_deg[p->j]++;
count++;
p = p->next;
}
else if(p->j == j && !v[p->i])
{
node1 = (struct eul_tour*)malloc(sizeof(struct eul_tour));
node1->city = p->i;
node1->next = NULL;
node1->prev = curr;
curr->next = node1;
curr = curr->next;
j = p->i;
v[j] = 1;
var_deg[p->i]++;
var_deg[p->j]++;
count++;
p = p->next;
}
else
{
p = head;
while( (p->i != j || v[p->j]) && (p->j != j || v[p->i]) )
p = p->next;
}
}
else
{
rev = curr->prev;
while(deg[rev->city] == var_deg[rev->city])
{
rev = rev ->prev;
}
j = rev->city;
p = head;
}
}
v = (int*) calloc(cities, sizeof(int));
i=0;
et = top;
while(et != NULL)
{
if(v[et->city] == 0)
{
r[i++] = et->city;
v[et->city] = 1;
}
et = et->next;
}
}
//if the edge exists, then return 1
int searchEdge(int min_i,int min_j, struct MST * p)
{
int flag =0;
while(p != NULL )
{
if( (p->i == min_i && p->j == min_j) || (p->i == min_j && p->j == min_i ) )
{
flag = 1;
break;
}
p = p->next;
}
if(flag == 1 )
return 1;
else
return 0;
}
/* Initial solution construction using Christofides' approach */
void christofide_init(int *r, float *posx, float *posy, long cities)
{
int *deg,*var_deg,dist,flg=0;
int i,j, min,min_i,min_j, count,*v,size;
int *odd_array,flag = 0;
FILE *fp; char line[100];
struct eul_tour *et,*top=NULL,*curr, *node1,*rev;
struct visit_list *first=NULL,*current, *p1,*visited;
struct MST *head =NULL, *cur, *node,*p;
struct odd_degree *init=NULL, *at, *odd;
struct rev_visit* rev_node=NULL,*loop;
deg = (int*)calloc(cities,sizeof(int));
v = (int*) calloc(cities, sizeof(int));
visited = (struct visit_list*)malloc(sizeof(struct visit_list));
visited->city = 0;
visited->next = NULL;
first = visited;
current = first;
count = 1;
p1 =first;
v[0] = 1;
while(count != cities )
{
min = 0;
for(p1 = first; p1!=NULL; p1=p1->next)
{
i = p1->city;
for(j = 0; j < cities; j++)
{
if(i != j && !v[j])
{
dist = distD(i,j,posx,posy);
if(min == 0 )
{
min = dist;
min_i =i;
min_j =j;
}
if(min > dist)
{
min = dist;
min_i =i;
min_j =j;
}
}
}
}
v[min_j] =1;
visited = (struct visit_list*)malloc(sizeof(struct visit_list));
visited->city = min_j;
visited->next = NULL;
current->next =visited;
current = current->next;
deg[min_i]+=1;
deg[min_j]+=1;
node = (struct MST*)malloc(sizeof(struct MST));
node->i = min_i;
node->j = min_j;
node->weight = min;
node->next = NULL;
if(head == NULL)
{
node->prev = NULL;
head = node;
cur = node;
}
else
{
node->prev = cur;
cur->next = node;
cur = cur->next;
}
count++;
}
p = head;
size = 0;
//make set of all odd degree nodes,
for(i = 0; i < cities; i++)
{
if(deg[i]%2 != 0)
{
odd = (struct odd_degree*)malloc(sizeof(struct odd_degree));
odd->city = i;
odd->next = NULL;
if(init == NULL)
{
init = odd;
at = odd;
}
else
{
at->next = odd;
at = at->next;
}
size++;
}
}
odd_array = (int*)malloc(sizeof(int)*size);
odd = init;
i = 0;
while(odd != NULL)
{
odd_array[i++] = odd->city;
odd = odd->next;
}
//odd_array has all nodes with odd degrees
v = (int*) calloc(size, sizeof(int));
//make sure there are even no.s of odd degree nodes
assert(size % 2 == 0);
fp = fopen("odd_edges.txt", "w");
//foul play case
assert(size >= 2);
fprintf(fp, "%d %d\n", size, (size*(size-1))/2);
for (i = 0; i < size; i++)
{
for (j = i+1; j < size; j++)
{
//store the matrix into a file
fprintf(fp, "%d %d %ld\n", i, j, distD(odd_array[i],odd_array[j],posx,posy));
}
}
fclose(fp);
if(system("/home/sparklab/pramod/blossom5-v2.05.src/blossom5 -e odd_edges.txt -w min_edges.txt") != 0)
{
printf("\nError: please install blossom5 matching code\n");
exit(-1);
}
fp = fopen("min_edges.txt", "r");
fgets(line, sizeof(line), fp);
for (i = 0; i < size/2; i++)
{
assert(fgets(line, sizeof(line), fp) != NULL);
assert(sscanf(line, "%d %d", &i, &j) == 2);
if(searchEdge(odd_array[i],odd_array[j],head) == 0)
{
deg[odd_array[i]]+=1;
deg[odd_array[j]]+=1;
node = (struct MST*)malloc(sizeof(struct MST));
node->i = odd_array[i];
node->j = odd_array[j];
node->weight = distD(odd_array[i],odd_array[j], posx, posy);
node->next = NULL;
node->prev = cur;
cur->next = node;
cur = cur->next;
}
}
fclose(fp);
v = (int*) calloc(cities, sizeof(int));
var_deg = (int*) calloc(cities, sizeof(int));
p = head;
while(p != NULL)
{
if(deg[p->i] != 1 && deg[p->j] != 1)
{
p = p->next;
}
else
{
flag = 1;
break;
}
}
//we have either i or jth city is a leaf node
if(flag == 1)
{ //i is a leaf node
if(deg[p->i] == 1 )
{ //start at the leaf node, make the other one the first non leaf
i = p->i;
node1 = (struct eul_tour*)malloc(sizeof(struct eul_tour));
node1->city = i;
node1->next = NULL;
node1->prev = NULL;
top = node1;
curr = node1;
v[i] = 1;
var_deg[i]++;
node1 = (struct eul_tour*)malloc(sizeof(struct eul_tour));
node1->city = p->j;
node1->next = NULL;
node1->prev = curr;
curr->next = node1;
curr = curr->next;
j = p->j;
v[j] = 1;
var_deg[j]++;
}
else
{
i = p->j;
node1 = (struct eul_tour*)malloc(sizeof(struct eul_tour));
node1->city = i;
node1->next = NULL;
node1->prev = NULL;
top = node1;
curr = node1;
v[i] = 1;
var_deg[i]++;
node1 = (struct eul_tour*)malloc(sizeof(struct eul_tour));
node1->city = p->i;
node1->next = NULL;
node1->prev = curr;
curr->next = node1;
curr = curr->next;
j = p->i;
v[j] = 1;
var_deg[j]++;
}
count = 2;
p = head;
while(count != cities)
{ //if j for the i you're considering is not 1,
if(deg[j]!= 1)
{
if(p->i == j && !v[p->j])
{//if you're considering i, and it's neighbour j isnt' discovered yet, make it the next node
node1 = (struct eul_tour*)malloc(sizeof(struct eul_tour));
node1->city = p->j;
node1->next = NULL;
node1->prev = curr;
curr->next = node1;
curr = curr->next;
var_deg[p->i]++;
var_deg[p->j]++;
count++;
j = p->j;
v[j] = 1;
p = p->next;
if(p == NULL)
p = head;
}
else if(p->j == j && !v[p->i])
{
node1 = (struct eul_tour*)malloc(sizeof(struct eul_tour));
node1->city = p->i;
node1->next = NULL;
node1->prev = curr;
curr->next = node1;
curr = curr->next;
var_deg[p->i]++;
var_deg[p->j]++;
count++;
j = p->i;
v[j] = 1;
p = p->next;
if(p == NULL)
p = head;
}
else
{ //traverse to find the node where
p = head;
while(p != NULL)
{
if( (p->i != j || v[p->j]) && (p->j != j || v[p->i]) )
{//the node i, j is explored, or the node doesn't have j
p = p->next;
}
else
{
flg = 1;
break;
}
}
if(flg == 0)
{//p is null
var_deg[j]++;
et = curr-> prev;
if(rev_node == NULL)
{//we need to loop to visit both
loop = (struct rev_visit *)malloc(sizeof(struct rev_visit));
loop->i = j;
while(deg[et->city] == var_deg[et->city] || et->city == j)
{
et = et-> prev;
}
j = et->city;
loop->j = j;
rev_node = loop;
p = head;
}
else
{
if(j == rev_node->i || j == rev_node->j)
{
i = j == rev_node->i ? rev_node->j :rev_node->i;
while(deg[et->city]==var_deg[et->city]|| et->city == j || et->city == i)
{
et = et-> prev;
}
}
else
{
while(deg[et->city] == var_deg[et->city] || et->city == j)
{
et = et-> prev;
}
}
rev_node->i = j;
j = et->city;
rev_node->j = j;
p = head;
}
}
flg = 0;
}
}
else
{
rev = curr->prev;
while(deg[rev->city] == var_deg[rev->city] || rev->city == j)
{
rev = rev ->prev;
}
j = rev->city;
p = head;
}
}
v = (int*) calloc(cities, sizeof(int));
i=0;
et = top;
while(et != NULL)
{
if(v[et->city] == 0)
{
r[i++] = et->city;
v[et->city] = 1;
}
et = et->next;
}
}
else
{//if flag==0
v = (int*) calloc(cities, sizeof(int));
p = head;
i = 0;
while(i != cities )
{
if(v[p->i] == 0)
{
r[i++] = p->i;
v[p->i] = 1;
}
if(v[p->j] == 0)
{
r[i++] = p->j;
v[p->j] = 1;
}
p = p->next;
}
}
}
/* Initial solution construction using Clarke-Wright approach */
struct init_route
{
int city;
struct init_route *next;
};
struct clarke_wright
{
int i,j, save;
struct clarke_wright *next;
};
void clarke_wright_init(int *r, float *posx, float *posy, long cities, long no_pairs)
{
int i,j,cnt;
int *v;
struct clarke_wright *cw,*cur,*cw1,*cw2;
struct clarke_wright *top = NULL;
//centre is 0th city
for(i=1; i<cities-1; i++)
for(j=i+1; j<cities; j++)
{//make new node and store distance
cw = (struct clarke_wright*)malloc(sizeof(struct clarke_wright) );
cw->save = distD(0,i,posx,posy) + distD(0,j,posx,posy) - distD(i,j,posx,posy);
cw->i = i;
cw->j = j;
if(top==NULL)
{
cw->next= NULL;
top = cw;
cur = cw;
}
else if( cw->save > top->save)
{
cw->next = top;
top = cw;
}
else if (cw->save > cur->save && cw->save < top->save && cur != top)
{
cw1 = top;
cw2 = top->next;
while(cw2->save > cw->save)
{
cw2 = cw2->next;
cw1 = cw1->next;
}
cw->next = cw2;
cw1->next = cw;
}
else
{
cw->next = NULL;
cur->next =cw;
cur = cur->next;
}
}
i = 0;
r[i++] = 0;
v=(int*)calloc(cities,sizeof(int));
v[0] = 1;
cw = top;
r[i++] = cw->i;
v[cw->i] = 1;
r[i++] = cw->j;
v[cw->j] = 1;
cnt = 3;
cw = cw->next;
while(cnt != cities)
{
if( !v[cw->i] && !v[cw->j] )
{
r[i++] = cw->i;
v[cw->i] = 1;
r[i++] = cw->j;
v[cw->j] = 1;
cnt+=2;
}
else if( !v[cw->i] )
{
r[i++] = cw->i;
v[cw->i] = 1;
cnt++;
}
else if( !v[cw->j] )
{
r[i++] = cw->j;
v[cw->j] = 1;
cnt++;
}
cw = cw->next;
}
}
void routeChecker(long N,int *r)
{
int *v,i,flag=0;
v=(int*)calloc(N,sizeof(int));
for(i=0;i<N;i++)
v[r[i]]++;
for(i=0;i<N;i++)
{
if(v[i] != 1 )
{
flag=1;
printf("breaking at %d",i);
break;
}
}
if(flag==1)
printf("\nroute is not valid");
else
printf("\nroute is valid");
}
/*Distance calculation of the initial solution */
long distH(float *px,float *py,long cit)
{
float dx,dy;
long cost=0;
int i;
for(i=0;i<(cit-1);i++)
{
dx=px[i]-px[i+1];
dy=py[i]-py[i+1];
cost+=sqrtf( (dx*dx) + (dy*dy) );
}
dx=px[i]-px[0];
dy=py[i]-py[0];
cost+=sqrtf( (dx*dx) + (dy*dy) );
return cost;
}
int main(int argc, char *argv[])
{
float *posx, *posy;
float *px, *py,tm;
char str[256];
float *d_posx, *d_posy;
long x,y;
int blk,thrd;
clock_t start,end,start1,end1;
long sol,tid,cities,no_pairs,dst,d;
int *route,count=0;
int ch, cnt, in1;
float in2, in3;
unsigned long long *d_dst_tid;
FILE *f;
f = fopen(argv[1], "r");
if (f == NULL) {fprintf(stderr, "could not open file \n"); exit(-1);}
ch = getc(f); while ((ch != EOF) && (ch != '\n')) ch = getc(f);
ch = getc(f); while ((ch != EOF) && (ch != '\n')) ch = getc(f);
ch = getc(f); while ((ch != EOF) && (ch != '\n')) ch = getc(f);
ch = getc(f); while ((ch != EOF) && (ch != ':')) ch = getc(f);
fscanf(f, "%s\n", str);
cities = atoi(str);
if (cities <= 2) {fprintf(stderr, "only %d cities\n", cities); exit(-1);}
posx = (float *)malloc(sizeof(float) * cities); if (posx == NULL) {fprintf(stderr, "cannot allocate posx\n"); exit(-1);}
posy = (float *)malloc(sizeof(float) * cities); if (posy == NULL) {fprintf(stderr, "cannot allocate posy\n"); exit(-1);}
px = (float *)malloc(sizeof(float) * cities); if (px == NULL) {fprintf(stderr, "cannot allocate posx\n"); exit(-1);}
py = (float *)malloc(sizeof(float) * cities); if (py == NULL) {fprintf(stderr, "cannot allocate posy\n"); exit(-1);}
route = (int *)malloc(sizeof(int) * cities); if (route == NULL) {fprintf(stderr, "cannot allocate route\n"); exit(-1);}
ch = getc(f); while ((ch != EOF) && (ch != '\n')) ch = getc(f);
fscanf(f, "%s\n", str);
if (strcmp(str, "NODE_COORD_SECTION") != 0) {fprintf(stderr, "wrong file format\n"); exit(-1);}
cnt = 0;
while (fscanf(f, "%d %f %f\n", &in1, &in2, &in3))
{
posx[cnt] = in2;
posy[cnt] = in3;
cnt++;
if (cnt > cities) {fprintf(stderr, "input too long\n"); exit(-1);}
if (cnt != in1) {fprintf(stderr, "input line mismatch: expected %d instead of %d\n", cnt, in1); exit(-1);}
}
if (cnt != cities) {fprintf(stderr, "read %d instead of %d cities\n", cnt, cities); exit(-1);}
fscanf(f, "%s", str);
if (strcmp(str, "EOF") != 0) {fprintf(stderr, "didn't see 'EOF' at end of file\n"); exit(-1);}
fflush(f);
fclose(f);
sol=cities*(cities-1)/2;
int intl;
printf("\nChoose an initial solution setup approach\n1.Sequenced\n2.Random\n3.NN\n4.NI\n5.Greedy\n6.MST\n7.Christofide\n8.Clarke-Wright\n");
scanf("%d",&intl);
start = clock();
switch(intl)
{
case 1:
seq_init(route,cities);
routeChecker(cities, route);
setCoord(route,posx,posy,px,py,cities);
dst=distH(px,py,cities);
break;
case 2:
random_init(route,cities);
routeChecker(cities, route);
setCoord(route,posx,posy,px,py,cities);
dst=distH(px,py,cities);
break;
case 3:
dst = nn_init(route,cities,posx,posy);
routeChecker(cities, route);
setCoord(route,posx,posy,px,py,cities);
break;
case 4:
nearest_insertion(route,posx,posy,cities);
routeChecker(cities, route);
setCoord(route,posx,posy,px,py,cities);
dst=distH(px,py,cities);
break;
case 5:
greedy(route,posx,posy,cities);
routeChecker(cities, route);
setCoord(route,posx,posy,px,py,cities);
dst=distH(px,py,cities);
break;
case 6:
mst_init(route,posx,posy,cities);
routeChecker(cities, route);
setCoord(route,posx,posy,px,py,cities);
dst=distH(px,py,cities);
break;
case 7:
christofide_init(route, posx, posy, cities);
routeChecker(cities, route);
setCoord(route,posx,posy,px,py,cities);
dst=distH(px,py,cities);
break;
case 8:
no_pairs = (cities-1)*(cities-2)/2;
clarke_wright_init(route, posx, posy, cities, no_pairs);
routeChecker(cities, route);
setCoord(route,posx,posy,px,py,cities);
dst=distH(px,py,cities);
break;
}
end = clock();
tm = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("\ninitial cost : %ld time : %f\n",dst,tm);
start1 = clock();
count = 1;
unsigned long long dst_tid = (((long)dst+1) << 32) -1;
unsigned long long dtid;
long itr=floor(cities/2);
int nx, ny;
if(cities <= 32)
{
blk = 1 ;
nx = cities;
ny = cities;
}
else
{
blk = (cities - 1) / 32 + 1;
nx = 32;
ny = 32;
}
dim3 thrds (nx,ny);
dim3 blks (blk,blk);
if(cudaSuccess!=cudaMalloc((void**)&d_posx,sizeof(float)*cities))
printf("\nCan't allocate memory for coordinate x on GPU");
if(cudaSuccess!=cudaMalloc((void**)&d_posy,sizeof(float)*cities))
printf("\nCan't allocate memory for coordinate y on GPU");
if(cudaSuccess!=cudaMalloc((void**)&d_dst_tid,sizeof(unsigned long long)))
printf("\nCan't allocate memory for dst_tid on GPU");
if(cudaSuccess!=cudaMemcpy(d_dst_tid,&dst_tid,sizeof(unsigned long long),cudaMemcpyHostToDevice))
printf("\nCan't transfer dst_tid on GPU");
if(cudaSuccess!=cudaMemcpy(d_posx,px,sizeof(float)*cities,cudaMemcpyHostToDevice))
printf("\nCan't transfer px on GPU");
if(cudaSuccess!=cudaMemcpy(d_posy,py,sizeof(float)*cities,cudaMemcpyHostToDevice))
printf("\nCan't transfer py on GPU");
int strat;
printf("\n Choose a CUDA thread mapping strategy\n1.TPR\n2.TPRED\n3.TPRC\n4.TPN\n");
scanf("%d",&strat);
switch(strat)
{
case 1:
if(cities<=1024)
{
blk=1;
thrd=cities;
}
else
{
blk=(cities-1)/1024+1;
thrd=1024;
}
tsp_tpr<<<blk,thrd>>>(d_posx,d_posy,dst,d_dst_tid,cities);
if(cudaSuccess!=cudaMemcpy(&dtid,d_dst_tid,sizeof(unsigned long long),cudaMemcpyDeviceToHost))
printf("\nCan't transfer minimal cost back to CPU");
d = dtid >> 32;
while( d < dst )
{
dst=d;
tid = dtid & ((1ull<<32)-1);
x=cities-2-floor((sqrt(8*(sol-tid-1)+1)-1)/2);
y=tid-x*(cities-1)+(x*(x+1)/2)+1;
twoOpt(x,y,px,py);
if(cudaSuccess!=cudaMemcpy(d_posx,px,sizeof(float)*cities,cudaMemcpyHostToDevice))
printf("\nCan't transfer px on GPU");
if(cudaSuccess!=cudaMemcpy(d_posy,py,sizeof(float)*cities,cudaMemcpyHostToDevice))
printf("\nCan't transfer py on GPU");
unsigned long long dst_tid = (((long)dst+1) << 32) -1;
if(cudaSuccess!=cudaMemcpy(d_dst_tid,&dst_tid,sizeof(unsigned long long),cudaMemcpyHostToDevice))
printf("\nCan't transfer dst_tid on GPU");
tsp_tpr<<<blk,thrd>>>(d_posx,d_posy,dst,d_dst_tid,cities);
if(cudaSuccess!=cudaMemcpy(&dtid,d_dst_tid,sizeof(unsigned long long),cudaMemcpyDeviceToHost))
printf("\nCan't transfer minimal cost back to CPU");
d = dtid >> 32;
count++;
}
break;
case 2:
if(cities<1024)
{
blk=1;
thrd=cities;
}
else
{
blk=(cities-1)/1024+1;
thrd=1024;
}
tsp_tpred<<<blk,thrd>>>(d_posx,d_posy,dst,d_dst_tid,cities,itr);
if(cudaSuccess!=cudaMemcpy(&dtid,d_dst_tid,sizeof(unsigned long long),cudaMemcpyDeviceToHost))
printf("\nCan't transfer minimal cost back to CPU");
d = dtid >> 32;
while( d < dst )
{
dst=d;
tid = dtid & ((1ull<<32)-1);
x=cities-2-floor((sqrt(8*(sol-tid-1)+1)-1)/2);
y=tid-x*(cities-1)+(x*(x+1)/2)+1;
twoOpt(x,y,px,py);
if(cudaSuccess!=cudaMemcpy(d_posx,px,sizeof(float)*cities,cudaMemcpyHostToDevice))
printf("\nCan't transfer px on GPU");
if(cudaSuccess!=cudaMemcpy(d_posy,py,sizeof(float)*cities,cudaMemcpyHostToDevice))
printf("\nCan't transfer py on GPU");
unsigned long long dst_tid = (((long)dst+1) << 32) -1;
if(cudaSuccess!=cudaMemcpy(d_dst_tid,&dst_tid,sizeof(unsigned long long),cudaMemcpyHostToDevice))
printf("\nCan't transfer dst_tid on GPU");
tsp_tpred<<<blk,thrd>>>(d_posx,d_posy,dst,d_dst_tid,cities,itr);
if(cudaSuccess!=cudaMemcpy(&dtid,d_dst_tid,sizeof(unsigned long long),cudaMemcpyDeviceToHost))
printf("\nCan't transfer minimal cost back to CPU");
d = dtid >> 32;
count++;
}
break;
case 3:
tsp_tprc<<<blks,thrds>>>(d_posx,d_posy,dst,d_dst_tid,cities);
if(cudaSuccess!=cudaMemcpy(&dtid,d_dst_tid,sizeof(unsigned long long),cudaMemcpyDeviceToHost))
printf("\nCan't transfer minimal cost back to CPU");
d = dtid >> 32;
while( d < dst )
{
dst=d;
tid = dtid & ((1ull<<32)-1);
x=cities-2-floor((sqrt(8*(sol-tid-1)+1)-1)/2);
y=tid-x*(cities-1)+(x*(x+1)/2)+1;
twoOpt(x,y,px,py);
if(cudaSuccess!=cudaMemcpy(d_posx,px,sizeof(float)*cities,cudaMemcpyHostToDevice))
printf("\nCan't transfer px on GPU");
if(cudaSuccess!=cudaMemcpy(d_posy,py,sizeof(float)*cities,cudaMemcpyHostToDevice))
printf("\nCan't transfer py on GPU");
unsigned long long dst_tid = (((long)dst+1) << 32) -1;
if(cudaSuccess!=cudaMemcpy(d_dst_tid,&dst_tid,sizeof(unsigned long long),cudaMemcpyHostToDevice))
printf("\nCan't transfer dst_tid on GPU");
tsp_tprc<<<blks,thrds>>>(d_posx,d_posy,dst,d_dst_tid,cities);
if(cudaSuccess!=cudaMemcpy(&dtid,d_dst_tid,sizeof(unsigned long long),cudaMemcpyDeviceToHost))
printf("\nCan't transfer minimal cost back to CPU");
d = dtid >> 32;
count++;
}
break;
case 4:
if(sol < 1024)
{
blk=1;
thrd=sol;
}
else
{
blk=(sol-1)/1024+1;
thrd=1024;
}
tsp_tpn<<<blk,thrd>>>(d_posx,d_posy,dst,d_dst_tid,cities,sol);
if(cudaSuccess!=cudaMemcpy(&dtid,d_dst_tid,sizeof(unsigned long long),cudaMemcpyDeviceToHost))
printf("\nCan't transfer minimal cost back to CPU");
d = dtid >> 32;
while( d < dst )
{
dst=d;
tid = dtid & ((1ull<<32)-1);
x=cities-2-floor((sqrt(8*(sol-tid-1)+1)-1)/2);
y=tid-x*(cities-1)+(x*(x+1)/2)+1;
twoOpt(x,y,px,py);
if(cudaSuccess!=cudaMemcpy(d_posx,px,sizeof(float)*cities,cudaMemcpyHostToDevice))
printf("\nCan't transfer px on GPU");
if(cudaSuccess!=cudaMemcpy(d_posy,py,sizeof(float)*cities,cudaMemcpyHostToDevice))
printf("\nCan't transfer py on GPU");
unsigned long long dst_tid = (((long)dst+1) << 32) -1;
if(cudaSuccess!=cudaMemcpy(d_dst_tid,&dst_tid,sizeof(unsigned long long),cudaMemcpyHostToDevice))
printf("\nCan't transfer dst_tid on GPU");
tsp_tpn<<<blk,thrd>>>(d_posx,d_posy,dst,d_dst_tid,cities,sol);
if(cudaSuccess!=cudaMemcpy(&dtid,d_dst_tid,sizeof(unsigned long long),cudaMemcpyDeviceToHost))
printf("\nCan't transfer minimal cost back to CPU");
d = dtid >> 32;
count++;
}
break;
}
printf("\nMinimal Distance : %ld\n",d);
printf("\nnumber of time climbed %d\n",count);
end1 = clock();
double t=((double) (end1 - start1)) / CLOCKS_PER_SEC;
printf("\ntime : %f\n",t);
cudaFree(d_posy);
cudaFree(d_posx);
cudaFree(d_dst_tid);
free(posx);
free(posy);
free(px);
free(py);
free(route);
return 0;
}
|
93078f41756c9b7e8f6ba0e52759c8813b0ac4cc.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
__global__ void addVectors(int N, float *a, float *b, float *c) {
int n = threadIdx.x + blockIdx.x*blockDim.x;
if(n<N) {
c[n] = a[n] + b[n];
}
}
int main(int argc, char **argv) {
int N = 100;
//Host memory allocation
float *h_a = (float*) malloc(N*sizeof(float));
float *h_b = (float*) malloc(N*sizeof(float));
float *h_c = (float*) malloc(N*sizeof(float));
int n;
for(n=0;n<N;n++) {
h_a[n] = 1+n;
h_b[n] = 1-n;
}
// Device memory allocation
float *d_a, *d_b, *d_c;
hipMalloc(&d_a, N*sizeof(float));
hipMalloc(&d_b, N*sizeof(float));
hipMalloc(&d_c, N*sizeof(float));
// Copy data from host to device
hipMemcpy(d_a, h_a, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, N*sizeof(float), hipMemcpyHostToDevice);
//save this for later
int NthreadsPerBlock = 10;
int NthreadBlocks = (N+NthreadsPerBlock-1)/NthreadsPerBlock ;
hipLaunchKernelGGL(( addVectors), dim3(NthreadBlocks), dim3(NthreadsPerBlock), 0, 0, N,d_a,d_b,d_c);
//copy result from device to host
hipMemcpy(h_c, d_c, N*sizeof(float), hipMemcpyDeviceToHost);
for(n=0;n<5;++n) {
printf("h_c[%d] = %g\n",n,h_c[n]);
}
free(h_a);
free(h_b);
free(h_b);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
return 0;
}
| 93078f41756c9b7e8f6ba0e52759c8813b0ac4cc.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
__global__ void addVectors(int N, float *a, float *b, float *c) {
int n = threadIdx.x + blockIdx.x*blockDim.x;
if(n<N) {
c[n] = a[n] + b[n];
}
}
int main(int argc, char **argv) {
int N = 100;
//Host memory allocation
float *h_a = (float*) malloc(N*sizeof(float));
float *h_b = (float*) malloc(N*sizeof(float));
float *h_c = (float*) malloc(N*sizeof(float));
int n;
for(n=0;n<N;n++) {
h_a[n] = 1+n;
h_b[n] = 1-n;
}
// Device memory allocation
float *d_a, *d_b, *d_c;
cudaMalloc(&d_a, N*sizeof(float));
cudaMalloc(&d_b, N*sizeof(float));
cudaMalloc(&d_c, N*sizeof(float));
// Copy data from host to device
cudaMemcpy(d_a, h_a, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, N*sizeof(float), cudaMemcpyHostToDevice);
//save this for later
int NthreadsPerBlock = 10;
int NthreadBlocks = (N+NthreadsPerBlock-1)/NthreadsPerBlock ;
addVectors<<<NthreadBlocks, NthreadsPerBlock>>>(N,d_a,d_b,d_c);
//copy result from device to host
cudaMemcpy(h_c, d_c, N*sizeof(float), cudaMemcpyDeviceToHost);
for(n=0;n<5;++n) {
printf("h_c[%d] = %g\n",n,h_c[n]);
}
free(h_a);
free(h_b);
free(h_b);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
8a056a860406d5a0e20a5d93c13593df1f2bec35.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// pbrt is Copyright(c) 1998-2020 Matt Pharr, Wenzel Jakob, and Greg Humphreys.
// The pbrt source code is licensed under the Apache License, Version 2.0.
// SPDX: Apache-2.0
#include <pbrt/pbrt.h>
#include <pbrt/gpu/accel.h>
#include <pbrt/gpu/optix.h>
#include <pbrt/interaction.h>
#include <pbrt/materials.h>
#include <pbrt/media.h>
#include <pbrt/shapes.h>
#include <pbrt/textures.h>
#include <pbrt/util/float.h>
#include <pbrt/util/rng.h>
#include <pbrt/util/transform.h>
#include <pbrt/util/vecmath.h>
// Make various functions visible to OptiX, which doesn't get to link
// shader code with the CUDA code in the main executable...
#include <pbrt/util/color.cpp>
#include <pbrt/util/colorspace.cpp>
#include <pbrt/util/noise.cpp>
#include <pbrt/util/spectrum.cpp>
#include <pbrt/util/transform.cpp>
#include <optix_device.h>
#include <utility>
using namespace pbrt;
extern "C" {
extern __constant__ pbrt::RayIntersectParameters params;
}
///////////////////////////////////////////////////////////////////////////
// Utility functions
// Payload management
__device__ inline uint32_t packPointer0(void *ptr) {
uint64_t uptr = reinterpret_cast<uint64_t>(ptr);
return uptr >> 32;
}
__device__ inline uint32_t packPointer1(void *ptr) {
uint64_t uptr = reinterpret_cast<uint64_t>(ptr);
return uint32_t(uptr);
}
template <typename T>
static __forceinline__ __device__ T *getPayload() {
uint32_t p0 = optixGetPayload_0(), p1 = optixGetPayload_1();
const uint64_t uptr = (uint64_t(p0) << 32) | p1;
return reinterpret_cast<T *>(uptr);
}
template <typename... Args>
__device__ inline void Trace(OptixTraversableHandle traversable, Ray ray, Float tMin,
Float tMax, OptixRayFlags flags, Args &&... payload) {
optixTrace(traversable, make_float3(ray.o.x, ray.o.y, ray.o.z),
make_float3(ray.d.x, ray.d.y, ray.d.z), tMin, tMax, ray.time,
OptixVisibilityMask(255), flags, 0, /* ray type */
1, /* number of ray types */
0, /* missSBTIndex */
std::forward<Args>(payload)...);
}
///////////////////////////////////////////////////////////////////////////
// Closest hit
struct ClosestHitContext {
PBRT_GPU
ClosestHitContext(MediumHandle rayMedium, bool shadowRay)
: rayMedium(rayMedium), shadowRay(shadowRay) {}
MediumHandle rayMedium;
bool shadowRay;
// out
Point3fi piHit;
Normal3f nHit;
MaterialHandle material;
MediumInterface mediumInterface;
PBRT_GPU
Ray SpawnRayTo(const Point3f &p) const {
Interaction intr(piHit, nHit);
intr.mediumInterface = &mediumInterface;
return intr.SpawnRayTo(p);
}
};
extern "C" __global__ void __raygen__findClosest() {
int rayIndex(optixGetLaunchIndex().x);
if (rayIndex >= params.rayQueue->Size())
return;
RayWorkItem r = (*params.rayQueue)[rayIndex];
Ray ray = r.ray;
Float tMax = 1e30f;
ClosestHitContext ctx(ray.medium, false);
uint32_t p0 = packPointer0(&ctx), p1 = packPointer1(&ctx);
PBRT_DBG("ray o %f %f %f dir %f %f %f tmax %f\n", ray.o.x, ray.o.y, ray.o.z, ray.d.x,
ray.d.y, ray.d.z, tMax);
uint32_t missed = 0;
Trace(params.traversable, ray, 0.f /* tMin */, tMax, OPTIX_RAY_FLAG_NONE, p0, p1,
missed);
if (missed) {
if (ray.medium) {
PBRT_DBG("Adding miss ray to mediumSampleQueue. "
"ray %f %f %f d %f %f %f beta %f %f %f %f\n",
r.ray.o.x, r.ray.o.y, r.ray.o.z, r.ray.d.x, r.ray.d.y, r.ray.d.z,
r.beta[0], r.beta[1], r.beta[2], r.beta[3]);
params.mediumSampleQueue->Push(r.ray, Infinity, r.lambda, r.beta, r.pdfUni,
r.pdfNEE, rayIndex, r.pixelIndex, r.piPrev,
r.nPrev, r.nsPrev, r.isSpecularBounce,
r.anyNonSpecularBounces, r.etaScale);
} else if (params.escapedRayQueue) {
PBRT_DBG("Adding ray to escapedRayQueue ray index %d pixel index %d\n", rayIndex,
r.pixelIndex);
params.escapedRayQueue->Push(EscapedRayWorkItem{
r.beta, r.pdfUni, r.pdfNEE, r.lambda, ray.o, ray.d, r.piPrev, r.nPrev,
r.nsPrev, (int)r.isSpecularBounce, r.pixelIndex});
}
}
}
extern "C" __global__ void __miss__noop() {
optixSetPayload_2(1);
}
static __forceinline__ __device__ void ProcessClosestIntersection(
SurfaceInteraction intr) {
int rayIndex = optixGetLaunchIndex().x;
MediumHandle rayMedium = getPayload<ClosestHitContext>()->rayMedium;
if (intr.mediumInterface)
getPayload<ClosestHitContext>()->mediumInterface = *intr.mediumInterface;
else
getPayload<ClosestHitContext>()->mediumInterface = MediumInterface(rayMedium);
getPayload<ClosestHitContext>()->piHit = intr.pi;
getPayload<ClosestHitContext>()->nHit = intr.n;
getPayload<ClosestHitContext>()->material = intr.material;
if (getPayload<ClosestHitContext>()->shadowRay)
return;
// We only have the ray queue (and it only makes sense to access) for
// regular closest hit rays.
RayWorkItem r = (*params.rayQueue)[rayIndex];
if (rayMedium) {
assert(params.mediumSampleQueue);
PBRT_DBG("Enqueuing into medium sample queue\n");
params.mediumSampleQueue->Push(
MediumSampleWorkItem{r.ray,
optixGetRayTmax(),
r.lambda,
r.beta,
r.pdfUni,
r.pdfNEE,
rayIndex,
r.pixelIndex,
r.piPrev,
r.nPrev,
r.nsPrev,
r.isSpecularBounce,
r.anyNonSpecularBounces,
r.etaScale,
intr.areaLight,
intr.pi,
intr.n,
-r.ray.d,
intr.uv,
intr.material,
intr.shading.n,
intr.shading.dpdu,
intr.shading.dpdv,
intr.shading.dndu,
intr.shading.dndv,
getPayload<ClosestHitContext>()->mediumInterface});
return;
}
// FIXME: this is all basically duplicate code w/medium.cpp
MaterialHandle material = intr.material;
const MixMaterial *mix = material.CastOrNullptr<MixMaterial>();
if (mix) {
MaterialEvalContext ctx(intr);
material = mix->ChooseMaterial(BasicTextureEvaluator(), ctx);
}
if (!material) {
PBRT_DBG("Enqueuing into medium transition queue: ray index %d pixel index %d \n",
rayIndex, r.pixelIndex);
Ray newRay = intr.SpawnRay(r.ray.d);
params.mediumTransitionQueue->Push(MediumTransitionWorkItem{
newRay, r.lambda, r.beta, r.pdfUni, r.pdfNEE, r.piPrev, r.nPrev, r.nsPrev,
r.isSpecularBounce, r.anyNonSpecularBounces, r.etaScale, r.pixelIndex});
return;
}
if (intr.areaLight) {
PBRT_DBG("Ray hit an area light: adding to hitAreaLightQueue ray index %d pixel index "
"%d\n",
rayIndex, r.pixelIndex);
Ray ray = r.ray;
// TODO: intr.wo == -ray.d?
params.hitAreaLightQueue->Push(HitAreaLightWorkItem{
intr.areaLight, r.lambda, r.beta, r.pdfUni, r.pdfNEE, intr.p(), intr.n,
intr.uv, intr.wo, r.piPrev, ray.d, ray.time, r.nPrev, r.nsPrev,
(int)r.isSpecularBounce, r.pixelIndex});
}
FloatTextureHandle displacement = material.GetDisplacement();
MaterialEvalQueue *q =
(material.CanEvaluateTextures(BasicTextureEvaluator()) &&
(!displacement || BasicTextureEvaluator().CanEvaluate({displacement}, {})))
? params.basicEvalMaterialQueue
: params.universalEvalMaterialQueue;
PBRT_DBG("Enqueuing for material eval, mtl tag %d\n", material.Tag());
auto enqueue = [=](auto ptr) {
using Material = typename std::remove_reference_t<decltype(*ptr)>;
q->Push<Material>(MaterialEvalWorkItem<Material>{
ptr, r.lambda, r.beta, r.pdfUni, intr.pi, intr.n, intr.shading.n,
intr.shading.dpdu, intr.shading.dpdv, intr.shading.dndu, intr.shading.dndv,
intr.wo, intr.uv, intr.time, r.anyNonSpecularBounces, r.etaScale,
getPayload<ClosestHitContext>()->mediumInterface, rayIndex, r.pixelIndex});
};
material.Dispatch(enqueue);
PBRT_DBG("Closest hit found intersection at t %f\n", optixGetRayTmax());
}
///////////////////////////////////////////////////////////////////////////
// Triangles
static __forceinline__ __device__ pstd::optional<SurfaceInteraction>
getTriangleIntersection() {
const TriangleMeshRecord &rec = *(const TriangleMeshRecord *)optixGetSbtDataPointer();
float b1 = optixGetTriangleBarycentrics().x;
float b2 = optixGetTriangleBarycentrics().y;
float b0 = 1 - b1 - b2;
float3 rd = optixGetWorldRayDirection();
Vector3f wo = -Vector3f(rd.x, rd.y, rd.z);
assert(optixGetTransformListSize() == 1);
float worldFromObj[12], objFromWorld[12];
optixGetObjectToWorldTransformMatrix(worldFromObj);
optixGetWorldToObjectTransformMatrix(objFromWorld);
SquareMatrix<4> worldFromObjM(worldFromObj[0], worldFromObj[1], worldFromObj[2],
worldFromObj[3], worldFromObj[4], worldFromObj[5],
worldFromObj[6], worldFromObj[7], worldFromObj[8],
worldFromObj[9], worldFromObj[10], worldFromObj[11],
0.f, 0.f, 0.f, 1.f);
SquareMatrix<4> objFromWorldM(objFromWorld[0], objFromWorld[1], objFromWorld[2],
objFromWorld[3], objFromWorld[4], objFromWorld[5],
objFromWorld[6], objFromWorld[7], objFromWorld[8],
objFromWorld[9], objFromWorld[10], objFromWorld[11],
0.f, 0.f, 0.f, 1.f);
Transform worldFromInstance(worldFromObjM, objFromWorldM);
return Triangle::InteractionFromIntersection(rec.mesh, optixGetPrimitiveIndex(),
{b0, b1, b2}, optixGetRayTime(), wo,
worldFromInstance);
}
static __forceinline__ __device__ bool alphaKilled(const TriangleMeshRecord &rec) {
if (!rec.alphaTexture)
return false;
pstd::optional<SurfaceInteraction> intr = getTriangleIntersection();
if (!intr)
return true;
BasicTextureEvaluator eval;
Float alpha = eval(rec.alphaTexture, *intr);
if (alpha >= 1)
return false;
if (alpha <= 0)
return true;
else {
float3 o = optixGetWorldRayOrigin();
float3 d = optixGetWorldRayDirection();
Float u = uint32_t(Hash(o.x, o.y, o.z, d.x, d.y, d.z)) * 0x1p-32f;
return u > alpha;
}
}
extern "C" __global__ void __closesthit__triangle() {
const TriangleMeshRecord &rec = *(const TriangleMeshRecord *)optixGetSbtDataPointer();
// It's slightly dicey to assume intr is valid. But invalid would
// presumably mean that OptiX returned a hit with a degenerate
// triangle...
SurfaceInteraction intr = *getTriangleIntersection();
if (rec.mediumInterface && rec.mediumInterface->IsMediumTransition())
intr.mediumInterface = rec.mediumInterface;
intr.material = rec.material;
if (!rec.areaLights.empty())
intr.areaLight = rec.areaLights[optixGetPrimitiveIndex()];
ProcessClosestIntersection(intr);
}
extern "C" __global__ void __anyhit__triangle() {
const TriangleMeshRecord &rec = *(const TriangleMeshRecord *)optixGetSbtDataPointer();
if (alphaKilled(rec))
optixIgnoreIntersection();
}
extern "C" __global__ void __anyhit__shadowTriangle() {
const TriangleMeshRecord &rec = *(const TriangleMeshRecord *)optixGetSbtDataPointer();
if (rec.material && rec.material.IsTransparent())
optixIgnoreIntersection();
if (alphaKilled(rec))
optixIgnoreIntersection();
}
///////////////////////////////////////////////////////////////////////////
// Shadow rays
extern "C" __global__ void __raygen__shadow() {
int index = optixGetLaunchIndex().x;
if (index >= params.shadowRayQueue->Size())
return;
ShadowRayWorkItem sr = (*params.shadowRayQueue)[index];
uint32_t missed = 0;
Trace(params.traversable, sr.ray, 1e-5f /* tMin */, sr.tMax, OPTIX_RAY_FLAG_NONE,
missed);
SampledSpectrum Ld;
if (missed)
Ld = sr.Ld / (sr.pdfUni + sr.pdfNEE).Average();
else
Ld = SampledSpectrum(0.);
params.shadowRayQueue->Ld[index] = Ld;
}
extern "C" __global__ void __miss__shadow() {
optixSetPayload_0(1);
}
__device__
inline void rescale(SampledSpectrum &beta, SampledSpectrum &pdfLight,
SampledSpectrum &pdfUni) {
if (beta.MaxComponentValue() > 0x1p24f ||
pdfLight.MaxComponentValue() > 0x1p24f ||
pdfUni.MaxComponentValue() > 0x1p24f) {
beta *= 1.f / 0x1p24f;
pdfLight *= 1.f / 0x1p24f;
pdfUni *= 1.f / 0x1p24f;
} else if (beta.MaxComponentValue() < 0x1p-24f ||
pdfLight.MaxComponentValue() < 0x1p-24f ||
pdfUni.MaxComponentValue() < 0x1p-24f) {
beta *= 0x1p24f;
pdfLight *= 0x1p24f;
pdfUni *= 0x1p24f;
}
}
extern "C" __global__ void __raygen__shadow_Tr() {
PBRT_DBG("raygen sahadow tr %d\n", optixGetLaunchIndex().x);
int index = optixGetLaunchIndex().x;
if (index >= params.shadowRayQueue->Size())
return;
ShadowRayWorkItem sr = (*params.shadowRayQueue)[index];
SampledWavelengths lambda = sr.lambda;
SampledSpectrum Ld = sr.Ld;
PBRT_DBG("Initial Ld %f %f %f %f shadow ray index %d pixel index %d\n", Ld[0], Ld[1],
Ld[2], Ld[3], index, sr.pixelIndex);
Ray ray = sr.ray;
Float tMax = sr.tMax;
Point3f pLight = ray(tMax);
RNG rng(Hash(ray.o), Hash(ray.d));
SampledSpectrum throughput(1.f);
SampledSpectrum pdfUni(1.f), pdfNEE(1.f);
while (true) {
ClosestHitContext ctx(ray.medium, true);
uint32_t p0 = packPointer0(&ctx), p1 = packPointer1(&ctx);
PBRT_DBG("Tracing shadow tr shadow ray index %d pixel index %d "
"ray %f %f %f d %f %f %f tMax %f\n",
index, sr.pixelIndex, ray.o.x, ray.o.y, ray.o.z, ray.d.x, ray.d.y, ray.d.z,
tMax);
uint32_t missed = 0;
Trace(params.traversable, ray, 1e-5f /* tMin */, tMax, OPTIX_RAY_FLAG_NONE, p0,
p1, missed);
if (!missed && ctx.material) {
PBRT_DBG("Hit opaque. Bye\n");
// Hit opaque surface
throughput = SampledSpectrum(0.f);
break;
}
if (ray.medium) {
PBRT_DBG("Ray medium %p. Will sample tmaj...\n", ray.medium.ptr());
Float tEnd =
missed ? tMax : (Distance(ray.o, Point3f(ctx.piHit)) / Length(ray.d));
ray.medium.SampleTmaj(ray, tEnd, rng, lambda,
[&](const MediumSample &mediumSample) {
if (!mediumSample.intr)
// FIXME: include last Tmaj?
return false;
const SampledSpectrum &Tmaj = mediumSample.Tmaj;
const MediumInteraction &intr = *mediumSample.intr;
SampledSpectrum sigma_n = intr.sigma_n();
// ratio-tracking: only evaluate null scattering
throughput *= Tmaj * sigma_n;
pdfNEE *= Tmaj * intr.sigma_maj;
pdfUni *= Tmaj * sigma_n;
Float pSurvive = throughput.MaxComponentValue() /
(pdfNEE + pdfUni).Average();
PBRT_DBG("throughput %f %f %f %f pdfNEE %f %f %f %f pdfUni %f %f %f %f "
"pSurvive %f\n",
throughput[0], throughput[1], throughput[2], throughput[3],
pdfNEE[0], pdfNEE[1], pdfNEE[2], pdfNEE[3],
pdfUni[0], pdfUni[1], pdfUni[2], pdfUni[3], pSurvive);
if (pSurvive < .25f) {
if (rng.Uniform<Float>() > pSurvive)
throughput = SampledSpectrum(0.);
else
throughput /= pSurvive;
}
PBRT_DBG("Tmaj %f %f %f %f sigma_n %f %f %f %f sigma_maj %f %f %f %f\n",
Tmaj[0], Tmaj[1], Tmaj[2], Tmaj[3],
sigma_n[0], sigma_n[1], sigma_n[2], sigma_n[3],
intr.sigma_maj[0], intr.sigma_maj[1], intr.sigma_maj[2],
intr.sigma_maj[3]);
PBRT_DBG("throughput %f %f %f %f pdfNEE %f %f %f %f pdfUni %f %f %f %f\n",
throughput[0], throughput[1], throughput[2], throughput[3],
pdfNEE[0], pdfNEE[1], pdfNEE[2], pdfNEE[3],
pdfUni[0], pdfUni[1], pdfUni[2], pdfUni[3]);
if (!throughput)
return false;
rescale(throughput, pdfNEE, pdfUni);
return true;
});
}
if (missed || !throughput)
// done
break;
ray = ctx.SpawnRayTo(pLight);
if (ray.d == Vector3f(0, 0, 0))
break;
}
PBRT_DBG("Final throughput %.9g %.9g %.9g %.9g sr.pdfUni %.9g %.9g %.9g %.9g pdfUni %.9g %.9g %.9g %.9g\n",
throughput[0], throughput[1], throughput[2], throughput[3],
sr.pdfUni[0], sr.pdfUni[1], sr.pdfUni[2], sr.pdfUni[3],
pdfUni[0], pdfUni[1], pdfUni[2], pdfUni[3]);
PBRT_DBG("sr.pdfNEE %.9g %.9g %.9g %.9g pdfNEE %.9g %.9g %.9g %.9g\n",
sr.pdfNEE[0], sr.pdfNEE[1], sr.pdfNEE[2], sr.pdfNEE[3],
pdfNEE[0], pdfNEE[1], pdfNEE[2], pdfNEE[3]);
PBRT_DBG("scaled throughput %.9g %.9g %.9g %.9g\n",
throughput[0] / (sr.pdfUni * pdfUni + sr.pdfNEE * pdfNEE).Average(),
throughput[1] / (sr.pdfUni * pdfUni + sr.pdfNEE * pdfNEE).Average(),
throughput[2] / (sr.pdfUni * pdfUni + sr.pdfNEE * pdfNEE).Average(),
throughput[3] / (sr.pdfUni * pdfUni + sr.pdfNEE * pdfNEE).Average());
if (!throughput)
Ld = SampledSpectrum(0.f);
else
Ld *= throughput / (sr.pdfUni * pdfUni + sr.pdfNEE * pdfNEE).Average();
PBRT_DBG("Setting final Ld for shadow ray index %d pixel index %d = as %f %f %f %f\n",
index, sr.pixelIndex, Ld[0], Ld[1], Ld[2], Ld[3]);
params.shadowRayQueue->Ld[index] = Ld;
}
extern "C" __global__ void __miss__shadow_Tr() {
optixSetPayload_2(1);
}
/////////////////////////////////////////////////////////////////////////////////////
// Quadrics
static __device__ inline SurfaceInteraction getQuadricIntersection(
const QuadricIntersection &si) {
QuadricRecord &rec = *((QuadricRecord *)optixGetSbtDataPointer());
float3 rd = optixGetWorldRayDirection();
Vector3f wo = -Vector3f(rd.x, rd.y, rd.z);
Float time = optixGetRayTime();
SurfaceInteraction intr;
if (const Sphere *sphere = rec.shape.CastOrNullptr<Sphere>())
intr = sphere->InteractionFromIntersection(si, wo, time);
else if (const Cylinder *cylinder = rec.shape.CastOrNullptr<Cylinder>())
intr = cylinder->InteractionFromIntersection(si, wo, time);
else if (const Disk *disk = rec.shape.CastOrNullptr<Disk>())
intr = disk->InteractionFromIntersection(si, wo, time);
else
assert(!"unexpected quadric");
return intr;
}
extern "C" __global__ void __closesthit__quadric() {
QuadricRecord &rec = *((QuadricRecord *)optixGetSbtDataPointer());
QuadricIntersection qi;
qi.pObj =
Point3f(BitsToFloat(optixGetAttribute_0()), BitsToFloat(optixGetAttribute_1()),
BitsToFloat(optixGetAttribute_2()));
qi.phi = BitsToFloat(optixGetAttribute_3());
SurfaceInteraction intr = getQuadricIntersection(qi);
if (rec.mediumInterface && rec.mediumInterface->IsMediumTransition())
intr.mediumInterface = rec.mediumInterface;
intr.material = rec.material;
if (rec.areaLight)
intr.areaLight = rec.areaLight;
ProcessClosestIntersection(intr);
}
extern "C" __global__ void __anyhit__shadowQuadric() {
QuadricRecord &rec = *((QuadricRecord *)optixGetSbtDataPointer());
if (rec.material && rec.material.IsTransparent())
optixIgnoreIntersection();
}
extern "C" __global__ void __intersection__quadric() {
QuadricRecord &rec = *((QuadricRecord *)optixGetSbtDataPointer());
float3 org = optixGetObjectRayOrigin();
float3 dir = optixGetObjectRayDirection();
Float tMax = optixGetRayTmax();
Ray ray(Point3f(org.x, org.y, org.z), Vector3f(dir.x, dir.y, dir.z));
pstd::optional<QuadricIntersection> isect;
if (const Sphere *sphere = rec.shape.CastOrNullptr<Sphere>())
isect = sphere->BasicIntersect(ray, tMax);
else if (const Cylinder *cylinder = rec.shape.CastOrNullptr<Cylinder>())
isect = cylinder->BasicIntersect(ray, tMax);
else if (const Disk *disk = rec.shape.CastOrNullptr<Disk>())
isect = disk->BasicIntersect(ray, tMax);
if (!isect)
return;
if (rec.alphaTexture) {
SurfaceInteraction intr = getQuadricIntersection(*isect);
BasicTextureEvaluator eval;
Float alpha = eval(rec.alphaTexture, intr);
if (alpha < 1) {
if (alpha == 0)
// No hit
return;
float3 o = optixGetWorldRayOrigin();
float3 d = optixGetWorldRayDirection();
Float u = uint32_t(Hash(o.x, o.y, o.z, d.x, d.y, d.z)) * 0x1p-32f;
if (u > alpha)
// no hit
return;
}
}
optixReportIntersection(isect->tHit, 0 /* hit kind */, FloatToBits(isect->pObj.x),
FloatToBits(isect->pObj.y), FloatToBits(isect->pObj.z),
FloatToBits(isect->phi));
}
///////////////////////////////////////////////////////////////////////////
// Bilinear patches
static __forceinline__ __device__ SurfaceInteraction
getBilinearPatchIntersection(Point2f uv) {
BilinearMeshRecord &rec = *((BilinearMeshRecord *)optixGetSbtDataPointer());
float3 rd = optixGetWorldRayDirection();
Vector3f wo = -Vector3f(rd.x, rd.y, rd.z);
return BilinearPatch::InteractionFromIntersection(rec.mesh, optixGetPrimitiveIndex(),
uv, optixGetRayTime(), wo);
}
extern "C" __global__ void __closesthit__bilinearPatch() {
BilinearMeshRecord &rec = *((BilinearMeshRecord *)optixGetSbtDataPointer());
Point2f uv(BitsToFloat(optixGetAttribute_0()), BitsToFloat(optixGetAttribute_1()));
SurfaceInteraction intr = getBilinearPatchIntersection(uv);
if (rec.mediumInterface && rec.mediumInterface->IsMediumTransition())
intr.mediumInterface = rec.mediumInterface;
intr.material = rec.material;
if (!rec.areaLights.empty())
intr.areaLight = rec.areaLights[optixGetPrimitiveIndex()];
ProcessClosestIntersection(intr);
}
extern "C" __global__ void __anyhit__shadowBilinearPatch() {
BilinearMeshRecord &rec = *((BilinearMeshRecord *)optixGetSbtDataPointer());
if (rec.material && rec.material.IsTransparent())
optixIgnoreIntersection();
}
extern "C" __global__ void __intersection__bilinearPatch() {
BilinearMeshRecord &rec = *((BilinearMeshRecord *)optixGetSbtDataPointer());
float3 org = optixGetObjectRayOrigin();
float3 dir = optixGetObjectRayDirection();
Float tMax = optixGetRayTmax();
Ray ray(Point3f(org.x, org.y, org.z), Vector3f(dir.x, dir.y, dir.z));
int vertexIndex = 4 * optixGetPrimitiveIndex();
Point3f p00 = rec.mesh->p[rec.mesh->vertexIndices[vertexIndex]];
Point3f p10 = rec.mesh->p[rec.mesh->vertexIndices[vertexIndex + 1]];
Point3f p01 = rec.mesh->p[rec.mesh->vertexIndices[vertexIndex + 2]];
Point3f p11 = rec.mesh->p[rec.mesh->vertexIndices[vertexIndex + 3]];
pstd::optional<BilinearIntersection> isect =
BilinearPatch::Intersect(ray, tMax, p00, p10, p01, p11);
if (!isect)
return;
if (rec.alphaTexture) {
SurfaceInteraction intr = getBilinearPatchIntersection(isect->uv);
BasicTextureEvaluator eval;
Float alpha = eval(rec.alphaTexture, intr);
if (alpha < 1) {
if (alpha == 0)
// No hit
return;
float3 o = optixGetWorldRayOrigin();
float3 d = optixGetWorldRayDirection();
Float u = uint32_t(Hash(o.x, o.y, o.z, d.x, d.y, d.z)) * 0x1p-32f;
if (u > alpha)
// no hit
return;
}
}
optixReportIntersection(isect->t, 0 /* hit kind */, FloatToBits(isect->uv[0]),
FloatToBits(isect->uv[1]));
}
///////////////////////////////////////////////////////////////////////////
// Random hit (for subsurface scattering)
struct RandomHitPayload {
WeightedReservoirSampler<SubsurfaceInteraction> wrs;
MaterialHandle material;
};
extern "C" __global__ void __raygen__randomHit() {
// Keep as uint32_t so can pass directly to optixTrace.
uint32_t index = optixGetLaunchIndex().x;
if (index >= params.subsurfaceScatterQueue->Size())
return;
SubsurfaceScatterWorkItem s = (*params.subsurfaceScatterQueue)[index];
Ray ray(s.p0, s.p1 - s.p0);
Float tMax = 1.f;
RandomHitPayload payload;
payload.wrs.Seed(Hash(s.p0, s.p1));
payload.material = s.material;
uint32_t ptr0 = packPointer0(&payload), ptr1 = packPointer1(&payload);
PBRT_DBG("Randomhit raygen ray.o %f %f %f ray.d %f %f %f tMax %f\n", ray.o.x, ray.o.y,
ray.o.z, ray.d.x, ray.d.y, ray.d.z, tMax);
Trace(params.traversable, ray, 0.f /* tMin */, tMax, OPTIX_RAY_FLAG_NONE, ptr0, ptr1);
if (payload.wrs.HasSample() &&
payload.wrs.WeightSum() > 0) { // TODO: latter check shouldn't be needed...
const SubsurfaceInteraction &si = payload.wrs.GetSample();
PBRT_DBG("optix si p %f %f %f n %f %f %f\n", si.p().x, si.p().y, si.p().z, si.n.x,
si.n.y, si.n.z);
params.subsurfaceScatterQueue->weight[index] = payload.wrs.WeightSum();
params.subsurfaceScatterQueue->ssi[index] = payload.wrs.GetSample();
} else
params.subsurfaceScatterQueue->weight[index] = 0;
}
extern "C" __global__ void __anyhit__randomHitTriangle() {
const TriangleMeshRecord &rec = *(const TriangleMeshRecord *)optixGetSbtDataPointer();
RandomHitPayload *p = getPayload<RandomHitPayload>();
PBRT_DBG("Anyhit triangle for random hit: rec.material %p params.materials %p\n",
rec.material.ptr(), p->material.ptr());
if (rec.material == p->material)
p->wrs.Add([&] PBRT_CPU_GPU() { return *getTriangleIntersection(); }, 1.f);
optixIgnoreIntersection();
}
extern "C" __global__ void __anyhit__randomHitBilinearPatch() {
BilinearMeshRecord &rec = *(BilinearMeshRecord *)optixGetSbtDataPointer();
RandomHitPayload *p = getPayload<RandomHitPayload>();
PBRT_DBG("Anyhit blp for random hit: rec.material %p params.materials %p\n",
rec.material.ptr(), p->material.ptr());
if (rec.material == p->material)
p->wrs.Add(
[&] PBRT_CPU_GPU() {
Point2f uv(BitsToFloat(optixGetAttribute_0()),
BitsToFloat(optixGetAttribute_1()));
return getBilinearPatchIntersection(uv);
},
1.f);
optixIgnoreIntersection();
}
extern "C" __global__ void __anyhit__randomHitQuadric() {
QuadricRecord &rec = *((QuadricRecord *)optixGetSbtDataPointer());
RandomHitPayload *p = getPayload<RandomHitPayload>();
PBRT_DBG("Anyhit quadric for random hit: rec.material %p params.materials %p\n",
rec.material.ptr(), p->material.ptr());
if (rec.material == p->material) {
p->wrs.Add(
[&] PBRT_CPU_GPU() {
QuadricIntersection qi;
qi.pObj = Point3f(BitsToFloat(optixGetAttribute_0()),
BitsToFloat(optixGetAttribute_1()),
BitsToFloat(optixGetAttribute_2()));
qi.phi = BitsToFloat(optixGetAttribute_3());
return getQuadricIntersection(qi);
},
1.f);
}
optixIgnoreIntersection();
}
| 8a056a860406d5a0e20a5d93c13593df1f2bec35.cu | // pbrt is Copyright(c) 1998-2020 Matt Pharr, Wenzel Jakob, and Greg Humphreys.
// The pbrt source code is licensed under the Apache License, Version 2.0.
// SPDX: Apache-2.0
#include <pbrt/pbrt.h>
#include <pbrt/gpu/accel.h>
#include <pbrt/gpu/optix.h>
#include <pbrt/interaction.h>
#include <pbrt/materials.h>
#include <pbrt/media.h>
#include <pbrt/shapes.h>
#include <pbrt/textures.h>
#include <pbrt/util/float.h>
#include <pbrt/util/rng.h>
#include <pbrt/util/transform.h>
#include <pbrt/util/vecmath.h>
// Make various functions visible to OptiX, which doesn't get to link
// shader code with the CUDA code in the main executable...
#include <pbrt/util/color.cpp>
#include <pbrt/util/colorspace.cpp>
#include <pbrt/util/noise.cpp>
#include <pbrt/util/spectrum.cpp>
#include <pbrt/util/transform.cpp>
#include <optix_device.h>
#include <utility>
using namespace pbrt;
extern "C" {
extern __constant__ pbrt::RayIntersectParameters params;
}
///////////////////////////////////////////////////////////////////////////
// Utility functions
// Payload management
__device__ inline uint32_t packPointer0(void *ptr) {
uint64_t uptr = reinterpret_cast<uint64_t>(ptr);
return uptr >> 32;
}
__device__ inline uint32_t packPointer1(void *ptr) {
uint64_t uptr = reinterpret_cast<uint64_t>(ptr);
return uint32_t(uptr);
}
template <typename T>
static __forceinline__ __device__ T *getPayload() {
uint32_t p0 = optixGetPayload_0(), p1 = optixGetPayload_1();
const uint64_t uptr = (uint64_t(p0) << 32) | p1;
return reinterpret_cast<T *>(uptr);
}
template <typename... Args>
__device__ inline void Trace(OptixTraversableHandle traversable, Ray ray, Float tMin,
Float tMax, OptixRayFlags flags, Args &&... payload) {
optixTrace(traversable, make_float3(ray.o.x, ray.o.y, ray.o.z),
make_float3(ray.d.x, ray.d.y, ray.d.z), tMin, tMax, ray.time,
OptixVisibilityMask(255), flags, 0, /* ray type */
1, /* number of ray types */
0, /* missSBTIndex */
std::forward<Args>(payload)...);
}
///////////////////////////////////////////////////////////////////////////
// Closest hit
struct ClosestHitContext {
PBRT_GPU
ClosestHitContext(MediumHandle rayMedium, bool shadowRay)
: rayMedium(rayMedium), shadowRay(shadowRay) {}
MediumHandle rayMedium;
bool shadowRay;
// out
Point3fi piHit;
Normal3f nHit;
MaterialHandle material;
MediumInterface mediumInterface;
PBRT_GPU
Ray SpawnRayTo(const Point3f &p) const {
Interaction intr(piHit, nHit);
intr.mediumInterface = &mediumInterface;
return intr.SpawnRayTo(p);
}
};
extern "C" __global__ void __raygen__findClosest() {
int rayIndex(optixGetLaunchIndex().x);
if (rayIndex >= params.rayQueue->Size())
return;
RayWorkItem r = (*params.rayQueue)[rayIndex];
Ray ray = r.ray;
Float tMax = 1e30f;
ClosestHitContext ctx(ray.medium, false);
uint32_t p0 = packPointer0(&ctx), p1 = packPointer1(&ctx);
PBRT_DBG("ray o %f %f %f dir %f %f %f tmax %f\n", ray.o.x, ray.o.y, ray.o.z, ray.d.x,
ray.d.y, ray.d.z, tMax);
uint32_t missed = 0;
Trace(params.traversable, ray, 0.f /* tMin */, tMax, OPTIX_RAY_FLAG_NONE, p0, p1,
missed);
if (missed) {
if (ray.medium) {
PBRT_DBG("Adding miss ray to mediumSampleQueue. "
"ray %f %f %f d %f %f %f beta %f %f %f %f\n",
r.ray.o.x, r.ray.o.y, r.ray.o.z, r.ray.d.x, r.ray.d.y, r.ray.d.z,
r.beta[0], r.beta[1], r.beta[2], r.beta[3]);
params.mediumSampleQueue->Push(r.ray, Infinity, r.lambda, r.beta, r.pdfUni,
r.pdfNEE, rayIndex, r.pixelIndex, r.piPrev,
r.nPrev, r.nsPrev, r.isSpecularBounce,
r.anyNonSpecularBounces, r.etaScale);
} else if (params.escapedRayQueue) {
PBRT_DBG("Adding ray to escapedRayQueue ray index %d pixel index %d\n", rayIndex,
r.pixelIndex);
params.escapedRayQueue->Push(EscapedRayWorkItem{
r.beta, r.pdfUni, r.pdfNEE, r.lambda, ray.o, ray.d, r.piPrev, r.nPrev,
r.nsPrev, (int)r.isSpecularBounce, r.pixelIndex});
}
}
}
extern "C" __global__ void __miss__noop() {
optixSetPayload_2(1);
}
static __forceinline__ __device__ void ProcessClosestIntersection(
SurfaceInteraction intr) {
int rayIndex = optixGetLaunchIndex().x;
MediumHandle rayMedium = getPayload<ClosestHitContext>()->rayMedium;
if (intr.mediumInterface)
getPayload<ClosestHitContext>()->mediumInterface = *intr.mediumInterface;
else
getPayload<ClosestHitContext>()->mediumInterface = MediumInterface(rayMedium);
getPayload<ClosestHitContext>()->piHit = intr.pi;
getPayload<ClosestHitContext>()->nHit = intr.n;
getPayload<ClosestHitContext>()->material = intr.material;
if (getPayload<ClosestHitContext>()->shadowRay)
return;
// We only have the ray queue (and it only makes sense to access) for
// regular closest hit rays.
RayWorkItem r = (*params.rayQueue)[rayIndex];
if (rayMedium) {
assert(params.mediumSampleQueue);
PBRT_DBG("Enqueuing into medium sample queue\n");
params.mediumSampleQueue->Push(
MediumSampleWorkItem{r.ray,
optixGetRayTmax(),
r.lambda,
r.beta,
r.pdfUni,
r.pdfNEE,
rayIndex,
r.pixelIndex,
r.piPrev,
r.nPrev,
r.nsPrev,
r.isSpecularBounce,
r.anyNonSpecularBounces,
r.etaScale,
intr.areaLight,
intr.pi,
intr.n,
-r.ray.d,
intr.uv,
intr.material,
intr.shading.n,
intr.shading.dpdu,
intr.shading.dpdv,
intr.shading.dndu,
intr.shading.dndv,
getPayload<ClosestHitContext>()->mediumInterface});
return;
}
// FIXME: this is all basically duplicate code w/medium.cpp
MaterialHandle material = intr.material;
const MixMaterial *mix = material.CastOrNullptr<MixMaterial>();
if (mix) {
MaterialEvalContext ctx(intr);
material = mix->ChooseMaterial(BasicTextureEvaluator(), ctx);
}
if (!material) {
PBRT_DBG("Enqueuing into medium transition queue: ray index %d pixel index %d \n",
rayIndex, r.pixelIndex);
Ray newRay = intr.SpawnRay(r.ray.d);
params.mediumTransitionQueue->Push(MediumTransitionWorkItem{
newRay, r.lambda, r.beta, r.pdfUni, r.pdfNEE, r.piPrev, r.nPrev, r.nsPrev,
r.isSpecularBounce, r.anyNonSpecularBounces, r.etaScale, r.pixelIndex});
return;
}
if (intr.areaLight) {
PBRT_DBG("Ray hit an area light: adding to hitAreaLightQueue ray index %d pixel index "
"%d\n",
rayIndex, r.pixelIndex);
Ray ray = r.ray;
// TODO: intr.wo == -ray.d?
params.hitAreaLightQueue->Push(HitAreaLightWorkItem{
intr.areaLight, r.lambda, r.beta, r.pdfUni, r.pdfNEE, intr.p(), intr.n,
intr.uv, intr.wo, r.piPrev, ray.d, ray.time, r.nPrev, r.nsPrev,
(int)r.isSpecularBounce, r.pixelIndex});
}
FloatTextureHandle displacement = material.GetDisplacement();
MaterialEvalQueue *q =
(material.CanEvaluateTextures(BasicTextureEvaluator()) &&
(!displacement || BasicTextureEvaluator().CanEvaluate({displacement}, {})))
? params.basicEvalMaterialQueue
: params.universalEvalMaterialQueue;
PBRT_DBG("Enqueuing for material eval, mtl tag %d\n", material.Tag());
auto enqueue = [=](auto ptr) {
using Material = typename std::remove_reference_t<decltype(*ptr)>;
q->Push<Material>(MaterialEvalWorkItem<Material>{
ptr, r.lambda, r.beta, r.pdfUni, intr.pi, intr.n, intr.shading.n,
intr.shading.dpdu, intr.shading.dpdv, intr.shading.dndu, intr.shading.dndv,
intr.wo, intr.uv, intr.time, r.anyNonSpecularBounces, r.etaScale,
getPayload<ClosestHitContext>()->mediumInterface, rayIndex, r.pixelIndex});
};
material.Dispatch(enqueue);
PBRT_DBG("Closest hit found intersection at t %f\n", optixGetRayTmax());
}
///////////////////////////////////////////////////////////////////////////
// Triangles
static __forceinline__ __device__ pstd::optional<SurfaceInteraction>
getTriangleIntersection() {
const TriangleMeshRecord &rec = *(const TriangleMeshRecord *)optixGetSbtDataPointer();
float b1 = optixGetTriangleBarycentrics().x;
float b2 = optixGetTriangleBarycentrics().y;
float b0 = 1 - b1 - b2;
float3 rd = optixGetWorldRayDirection();
Vector3f wo = -Vector3f(rd.x, rd.y, rd.z);
assert(optixGetTransformListSize() == 1);
float worldFromObj[12], objFromWorld[12];
optixGetObjectToWorldTransformMatrix(worldFromObj);
optixGetWorldToObjectTransformMatrix(objFromWorld);
SquareMatrix<4> worldFromObjM(worldFromObj[0], worldFromObj[1], worldFromObj[2],
worldFromObj[3], worldFromObj[4], worldFromObj[5],
worldFromObj[6], worldFromObj[7], worldFromObj[8],
worldFromObj[9], worldFromObj[10], worldFromObj[11],
0.f, 0.f, 0.f, 1.f);
SquareMatrix<4> objFromWorldM(objFromWorld[0], objFromWorld[1], objFromWorld[2],
objFromWorld[3], objFromWorld[4], objFromWorld[5],
objFromWorld[6], objFromWorld[7], objFromWorld[8],
objFromWorld[9], objFromWorld[10], objFromWorld[11],
0.f, 0.f, 0.f, 1.f);
Transform worldFromInstance(worldFromObjM, objFromWorldM);
return Triangle::InteractionFromIntersection(rec.mesh, optixGetPrimitiveIndex(),
{b0, b1, b2}, optixGetRayTime(), wo,
worldFromInstance);
}
static __forceinline__ __device__ bool alphaKilled(const TriangleMeshRecord &rec) {
if (!rec.alphaTexture)
return false;
pstd::optional<SurfaceInteraction> intr = getTriangleIntersection();
if (!intr)
return true;
BasicTextureEvaluator eval;
Float alpha = eval(rec.alphaTexture, *intr);
if (alpha >= 1)
return false;
if (alpha <= 0)
return true;
else {
float3 o = optixGetWorldRayOrigin();
float3 d = optixGetWorldRayDirection();
Float u = uint32_t(Hash(o.x, o.y, o.z, d.x, d.y, d.z)) * 0x1p-32f;
return u > alpha;
}
}
extern "C" __global__ void __closesthit__triangle() {
const TriangleMeshRecord &rec = *(const TriangleMeshRecord *)optixGetSbtDataPointer();
// It's slightly dicey to assume intr is valid. But invalid would
// presumably mean that OptiX returned a hit with a degenerate
// triangle...
SurfaceInteraction intr = *getTriangleIntersection();
if (rec.mediumInterface && rec.mediumInterface->IsMediumTransition())
intr.mediumInterface = rec.mediumInterface;
intr.material = rec.material;
if (!rec.areaLights.empty())
intr.areaLight = rec.areaLights[optixGetPrimitiveIndex()];
ProcessClosestIntersection(intr);
}
extern "C" __global__ void __anyhit__triangle() {
const TriangleMeshRecord &rec = *(const TriangleMeshRecord *)optixGetSbtDataPointer();
if (alphaKilled(rec))
optixIgnoreIntersection();
}
extern "C" __global__ void __anyhit__shadowTriangle() {
const TriangleMeshRecord &rec = *(const TriangleMeshRecord *)optixGetSbtDataPointer();
if (rec.material && rec.material.IsTransparent())
optixIgnoreIntersection();
if (alphaKilled(rec))
optixIgnoreIntersection();
}
///////////////////////////////////////////////////////////////////////////
// Shadow rays
extern "C" __global__ void __raygen__shadow() {
int index = optixGetLaunchIndex().x;
if (index >= params.shadowRayQueue->Size())
return;
ShadowRayWorkItem sr = (*params.shadowRayQueue)[index];
uint32_t missed = 0;
Trace(params.traversable, sr.ray, 1e-5f /* tMin */, sr.tMax, OPTIX_RAY_FLAG_NONE,
missed);
SampledSpectrum Ld;
if (missed)
Ld = sr.Ld / (sr.pdfUni + sr.pdfNEE).Average();
else
Ld = SampledSpectrum(0.);
params.shadowRayQueue->Ld[index] = Ld;
}
extern "C" __global__ void __miss__shadow() {
optixSetPayload_0(1);
}
__device__
inline void rescale(SampledSpectrum &beta, SampledSpectrum &pdfLight,
SampledSpectrum &pdfUni) {
if (beta.MaxComponentValue() > 0x1p24f ||
pdfLight.MaxComponentValue() > 0x1p24f ||
pdfUni.MaxComponentValue() > 0x1p24f) {
beta *= 1.f / 0x1p24f;
pdfLight *= 1.f / 0x1p24f;
pdfUni *= 1.f / 0x1p24f;
} else if (beta.MaxComponentValue() < 0x1p-24f ||
pdfLight.MaxComponentValue() < 0x1p-24f ||
pdfUni.MaxComponentValue() < 0x1p-24f) {
beta *= 0x1p24f;
pdfLight *= 0x1p24f;
pdfUni *= 0x1p24f;
}
}
extern "C" __global__ void __raygen__shadow_Tr() {
PBRT_DBG("raygen sahadow tr %d\n", optixGetLaunchIndex().x);
int index = optixGetLaunchIndex().x;
if (index >= params.shadowRayQueue->Size())
return;
ShadowRayWorkItem sr = (*params.shadowRayQueue)[index];
SampledWavelengths lambda = sr.lambda;
SampledSpectrum Ld = sr.Ld;
PBRT_DBG("Initial Ld %f %f %f %f shadow ray index %d pixel index %d\n", Ld[0], Ld[1],
Ld[2], Ld[3], index, sr.pixelIndex);
Ray ray = sr.ray;
Float tMax = sr.tMax;
Point3f pLight = ray(tMax);
RNG rng(Hash(ray.o), Hash(ray.d));
SampledSpectrum throughput(1.f);
SampledSpectrum pdfUni(1.f), pdfNEE(1.f);
while (true) {
ClosestHitContext ctx(ray.medium, true);
uint32_t p0 = packPointer0(&ctx), p1 = packPointer1(&ctx);
PBRT_DBG("Tracing shadow tr shadow ray index %d pixel index %d "
"ray %f %f %f d %f %f %f tMax %f\n",
index, sr.pixelIndex, ray.o.x, ray.o.y, ray.o.z, ray.d.x, ray.d.y, ray.d.z,
tMax);
uint32_t missed = 0;
Trace(params.traversable, ray, 1e-5f /* tMin */, tMax, OPTIX_RAY_FLAG_NONE, p0,
p1, missed);
if (!missed && ctx.material) {
PBRT_DBG("Hit opaque. Bye\n");
// Hit opaque surface
throughput = SampledSpectrum(0.f);
break;
}
if (ray.medium) {
PBRT_DBG("Ray medium %p. Will sample tmaj...\n", ray.medium.ptr());
Float tEnd =
missed ? tMax : (Distance(ray.o, Point3f(ctx.piHit)) / Length(ray.d));
ray.medium.SampleTmaj(ray, tEnd, rng, lambda,
[&](const MediumSample &mediumSample) {
if (!mediumSample.intr)
// FIXME: include last Tmaj?
return false;
const SampledSpectrum &Tmaj = mediumSample.Tmaj;
const MediumInteraction &intr = *mediumSample.intr;
SampledSpectrum sigma_n = intr.sigma_n();
// ratio-tracking: only evaluate null scattering
throughput *= Tmaj * sigma_n;
pdfNEE *= Tmaj * intr.sigma_maj;
pdfUni *= Tmaj * sigma_n;
Float pSurvive = throughput.MaxComponentValue() /
(pdfNEE + pdfUni).Average();
PBRT_DBG("throughput %f %f %f %f pdfNEE %f %f %f %f pdfUni %f %f %f %f "
"pSurvive %f\n",
throughput[0], throughput[1], throughput[2], throughput[3],
pdfNEE[0], pdfNEE[1], pdfNEE[2], pdfNEE[3],
pdfUni[0], pdfUni[1], pdfUni[2], pdfUni[3], pSurvive);
if (pSurvive < .25f) {
if (rng.Uniform<Float>() > pSurvive)
throughput = SampledSpectrum(0.);
else
throughput /= pSurvive;
}
PBRT_DBG("Tmaj %f %f %f %f sigma_n %f %f %f %f sigma_maj %f %f %f %f\n",
Tmaj[0], Tmaj[1], Tmaj[2], Tmaj[3],
sigma_n[0], sigma_n[1], sigma_n[2], sigma_n[3],
intr.sigma_maj[0], intr.sigma_maj[1], intr.sigma_maj[2],
intr.sigma_maj[3]);
PBRT_DBG("throughput %f %f %f %f pdfNEE %f %f %f %f pdfUni %f %f %f %f\n",
throughput[0], throughput[1], throughput[2], throughput[3],
pdfNEE[0], pdfNEE[1], pdfNEE[2], pdfNEE[3],
pdfUni[0], pdfUni[1], pdfUni[2], pdfUni[3]);
if (!throughput)
return false;
rescale(throughput, pdfNEE, pdfUni);
return true;
});
}
if (missed || !throughput)
// done
break;
ray = ctx.SpawnRayTo(pLight);
if (ray.d == Vector3f(0, 0, 0))
break;
}
PBRT_DBG("Final throughput %.9g %.9g %.9g %.9g sr.pdfUni %.9g %.9g %.9g %.9g pdfUni %.9g %.9g %.9g %.9g\n",
throughput[0], throughput[1], throughput[2], throughput[3],
sr.pdfUni[0], sr.pdfUni[1], sr.pdfUni[2], sr.pdfUni[3],
pdfUni[0], pdfUni[1], pdfUni[2], pdfUni[3]);
PBRT_DBG("sr.pdfNEE %.9g %.9g %.9g %.9g pdfNEE %.9g %.9g %.9g %.9g\n",
sr.pdfNEE[0], sr.pdfNEE[1], sr.pdfNEE[2], sr.pdfNEE[3],
pdfNEE[0], pdfNEE[1], pdfNEE[2], pdfNEE[3]);
PBRT_DBG("scaled throughput %.9g %.9g %.9g %.9g\n",
throughput[0] / (sr.pdfUni * pdfUni + sr.pdfNEE * pdfNEE).Average(),
throughput[1] / (sr.pdfUni * pdfUni + sr.pdfNEE * pdfNEE).Average(),
throughput[2] / (sr.pdfUni * pdfUni + sr.pdfNEE * pdfNEE).Average(),
throughput[3] / (sr.pdfUni * pdfUni + sr.pdfNEE * pdfNEE).Average());
if (!throughput)
Ld = SampledSpectrum(0.f);
else
Ld *= throughput / (sr.pdfUni * pdfUni + sr.pdfNEE * pdfNEE).Average();
PBRT_DBG("Setting final Ld for shadow ray index %d pixel index %d = as %f %f %f %f\n",
index, sr.pixelIndex, Ld[0], Ld[1], Ld[2], Ld[3]);
params.shadowRayQueue->Ld[index] = Ld;
}
extern "C" __global__ void __miss__shadow_Tr() {
optixSetPayload_2(1);
}
/////////////////////////////////////////////////////////////////////////////////////
// Quadrics
static __device__ inline SurfaceInteraction getQuadricIntersection(
const QuadricIntersection &si) {
QuadricRecord &rec = *((QuadricRecord *)optixGetSbtDataPointer());
float3 rd = optixGetWorldRayDirection();
Vector3f wo = -Vector3f(rd.x, rd.y, rd.z);
Float time = optixGetRayTime();
SurfaceInteraction intr;
if (const Sphere *sphere = rec.shape.CastOrNullptr<Sphere>())
intr = sphere->InteractionFromIntersection(si, wo, time);
else if (const Cylinder *cylinder = rec.shape.CastOrNullptr<Cylinder>())
intr = cylinder->InteractionFromIntersection(si, wo, time);
else if (const Disk *disk = rec.shape.CastOrNullptr<Disk>())
intr = disk->InteractionFromIntersection(si, wo, time);
else
assert(!"unexpected quadric");
return intr;
}
extern "C" __global__ void __closesthit__quadric() {
QuadricRecord &rec = *((QuadricRecord *)optixGetSbtDataPointer());
QuadricIntersection qi;
qi.pObj =
Point3f(BitsToFloat(optixGetAttribute_0()), BitsToFloat(optixGetAttribute_1()),
BitsToFloat(optixGetAttribute_2()));
qi.phi = BitsToFloat(optixGetAttribute_3());
SurfaceInteraction intr = getQuadricIntersection(qi);
if (rec.mediumInterface && rec.mediumInterface->IsMediumTransition())
intr.mediumInterface = rec.mediumInterface;
intr.material = rec.material;
if (rec.areaLight)
intr.areaLight = rec.areaLight;
ProcessClosestIntersection(intr);
}
extern "C" __global__ void __anyhit__shadowQuadric() {
QuadricRecord &rec = *((QuadricRecord *)optixGetSbtDataPointer());
if (rec.material && rec.material.IsTransparent())
optixIgnoreIntersection();
}
extern "C" __global__ void __intersection__quadric() {
QuadricRecord &rec = *((QuadricRecord *)optixGetSbtDataPointer());
float3 org = optixGetObjectRayOrigin();
float3 dir = optixGetObjectRayDirection();
Float tMax = optixGetRayTmax();
Ray ray(Point3f(org.x, org.y, org.z), Vector3f(dir.x, dir.y, dir.z));
pstd::optional<QuadricIntersection> isect;
if (const Sphere *sphere = rec.shape.CastOrNullptr<Sphere>())
isect = sphere->BasicIntersect(ray, tMax);
else if (const Cylinder *cylinder = rec.shape.CastOrNullptr<Cylinder>())
isect = cylinder->BasicIntersect(ray, tMax);
else if (const Disk *disk = rec.shape.CastOrNullptr<Disk>())
isect = disk->BasicIntersect(ray, tMax);
if (!isect)
return;
if (rec.alphaTexture) {
SurfaceInteraction intr = getQuadricIntersection(*isect);
BasicTextureEvaluator eval;
Float alpha = eval(rec.alphaTexture, intr);
if (alpha < 1) {
if (alpha == 0)
// No hit
return;
float3 o = optixGetWorldRayOrigin();
float3 d = optixGetWorldRayDirection();
Float u = uint32_t(Hash(o.x, o.y, o.z, d.x, d.y, d.z)) * 0x1p-32f;
if (u > alpha)
// no hit
return;
}
}
optixReportIntersection(isect->tHit, 0 /* hit kind */, FloatToBits(isect->pObj.x),
FloatToBits(isect->pObj.y), FloatToBits(isect->pObj.z),
FloatToBits(isect->phi));
}
///////////////////////////////////////////////////////////////////////////
// Bilinear patches
static __forceinline__ __device__ SurfaceInteraction
getBilinearPatchIntersection(Point2f uv) {
BilinearMeshRecord &rec = *((BilinearMeshRecord *)optixGetSbtDataPointer());
float3 rd = optixGetWorldRayDirection();
Vector3f wo = -Vector3f(rd.x, rd.y, rd.z);
return BilinearPatch::InteractionFromIntersection(rec.mesh, optixGetPrimitiveIndex(),
uv, optixGetRayTime(), wo);
}
extern "C" __global__ void __closesthit__bilinearPatch() {
BilinearMeshRecord &rec = *((BilinearMeshRecord *)optixGetSbtDataPointer());
Point2f uv(BitsToFloat(optixGetAttribute_0()), BitsToFloat(optixGetAttribute_1()));
SurfaceInteraction intr = getBilinearPatchIntersection(uv);
if (rec.mediumInterface && rec.mediumInterface->IsMediumTransition())
intr.mediumInterface = rec.mediumInterface;
intr.material = rec.material;
if (!rec.areaLights.empty())
intr.areaLight = rec.areaLights[optixGetPrimitiveIndex()];
ProcessClosestIntersection(intr);
}
extern "C" __global__ void __anyhit__shadowBilinearPatch() {
BilinearMeshRecord &rec = *((BilinearMeshRecord *)optixGetSbtDataPointer());
if (rec.material && rec.material.IsTransparent())
optixIgnoreIntersection();
}
extern "C" __global__ void __intersection__bilinearPatch() {
BilinearMeshRecord &rec = *((BilinearMeshRecord *)optixGetSbtDataPointer());
float3 org = optixGetObjectRayOrigin();
float3 dir = optixGetObjectRayDirection();
Float tMax = optixGetRayTmax();
Ray ray(Point3f(org.x, org.y, org.z), Vector3f(dir.x, dir.y, dir.z));
int vertexIndex = 4 * optixGetPrimitiveIndex();
Point3f p00 = rec.mesh->p[rec.mesh->vertexIndices[vertexIndex]];
Point3f p10 = rec.mesh->p[rec.mesh->vertexIndices[vertexIndex + 1]];
Point3f p01 = rec.mesh->p[rec.mesh->vertexIndices[vertexIndex + 2]];
Point3f p11 = rec.mesh->p[rec.mesh->vertexIndices[vertexIndex + 3]];
pstd::optional<BilinearIntersection> isect =
BilinearPatch::Intersect(ray, tMax, p00, p10, p01, p11);
if (!isect)
return;
if (rec.alphaTexture) {
SurfaceInteraction intr = getBilinearPatchIntersection(isect->uv);
BasicTextureEvaluator eval;
Float alpha = eval(rec.alphaTexture, intr);
if (alpha < 1) {
if (alpha == 0)
// No hit
return;
float3 o = optixGetWorldRayOrigin();
float3 d = optixGetWorldRayDirection();
Float u = uint32_t(Hash(o.x, o.y, o.z, d.x, d.y, d.z)) * 0x1p-32f;
if (u > alpha)
// no hit
return;
}
}
optixReportIntersection(isect->t, 0 /* hit kind */, FloatToBits(isect->uv[0]),
FloatToBits(isect->uv[1]));
}
///////////////////////////////////////////////////////////////////////////
// Random hit (for subsurface scattering)
struct RandomHitPayload {
WeightedReservoirSampler<SubsurfaceInteraction> wrs;
MaterialHandle material;
};
extern "C" __global__ void __raygen__randomHit() {
// Keep as uint32_t so can pass directly to optixTrace.
uint32_t index = optixGetLaunchIndex().x;
if (index >= params.subsurfaceScatterQueue->Size())
return;
SubsurfaceScatterWorkItem s = (*params.subsurfaceScatterQueue)[index];
Ray ray(s.p0, s.p1 - s.p0);
Float tMax = 1.f;
RandomHitPayload payload;
payload.wrs.Seed(Hash(s.p0, s.p1));
payload.material = s.material;
uint32_t ptr0 = packPointer0(&payload), ptr1 = packPointer1(&payload);
PBRT_DBG("Randomhit raygen ray.o %f %f %f ray.d %f %f %f tMax %f\n", ray.o.x, ray.o.y,
ray.o.z, ray.d.x, ray.d.y, ray.d.z, tMax);
Trace(params.traversable, ray, 0.f /* tMin */, tMax, OPTIX_RAY_FLAG_NONE, ptr0, ptr1);
if (payload.wrs.HasSample() &&
payload.wrs.WeightSum() > 0) { // TODO: latter check shouldn't be needed...
const SubsurfaceInteraction &si = payload.wrs.GetSample();
PBRT_DBG("optix si p %f %f %f n %f %f %f\n", si.p().x, si.p().y, si.p().z, si.n.x,
si.n.y, si.n.z);
params.subsurfaceScatterQueue->weight[index] = payload.wrs.WeightSum();
params.subsurfaceScatterQueue->ssi[index] = payload.wrs.GetSample();
} else
params.subsurfaceScatterQueue->weight[index] = 0;
}
extern "C" __global__ void __anyhit__randomHitTriangle() {
const TriangleMeshRecord &rec = *(const TriangleMeshRecord *)optixGetSbtDataPointer();
RandomHitPayload *p = getPayload<RandomHitPayload>();
PBRT_DBG("Anyhit triangle for random hit: rec.material %p params.materials %p\n",
rec.material.ptr(), p->material.ptr());
if (rec.material == p->material)
p->wrs.Add([&] PBRT_CPU_GPU() { return *getTriangleIntersection(); }, 1.f);
optixIgnoreIntersection();
}
extern "C" __global__ void __anyhit__randomHitBilinearPatch() {
BilinearMeshRecord &rec = *(BilinearMeshRecord *)optixGetSbtDataPointer();
RandomHitPayload *p = getPayload<RandomHitPayload>();
PBRT_DBG("Anyhit blp for random hit: rec.material %p params.materials %p\n",
rec.material.ptr(), p->material.ptr());
if (rec.material == p->material)
p->wrs.Add(
[&] PBRT_CPU_GPU() {
Point2f uv(BitsToFloat(optixGetAttribute_0()),
BitsToFloat(optixGetAttribute_1()));
return getBilinearPatchIntersection(uv);
},
1.f);
optixIgnoreIntersection();
}
extern "C" __global__ void __anyhit__randomHitQuadric() {
QuadricRecord &rec = *((QuadricRecord *)optixGetSbtDataPointer());
RandomHitPayload *p = getPayload<RandomHitPayload>();
PBRT_DBG("Anyhit quadric for random hit: rec.material %p params.materials %p\n",
rec.material.ptr(), p->material.ptr());
if (rec.material == p->material) {
p->wrs.Add(
[&] PBRT_CPU_GPU() {
QuadricIntersection qi;
qi.pObj = Point3f(BitsToFloat(optixGetAttribute_0()),
BitsToFloat(optixGetAttribute_1()),
BitsToFloat(optixGetAttribute_2()));
qi.phi = BitsToFloat(optixGetAttribute_3());
return getQuadricIntersection(qi);
},
1.f);
}
optixIgnoreIntersection();
}
|
f9cb982146b968b7fa5797d1bb199f7c7c67cd05.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include "cudakernel/common/common.h"
#include "cudakernel/gemm/gemm.h"
#include "cudakernel/nn/lstm.h"
#include <stdio.h>
__device__ float sigmoidf(float a)
{
return expf(a) / (1 + expf(a));
}
//(seq, batch, dir, 4*hidden)
//(dir, batch, 4*hidden)
// P: (dir, 3*hidden)
// output: (dir, batch, hidden)
__global__ void fuse_gate(
const void *hidden,
const void *X_in,
const void *bias,
const void *ceil,
const void *P,
const int num_direction,
const int batch,
const int hidden_size,
void *out_c,
void *out_h)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int h_id = tid % (hidden_size);
int hb_id = tid / (hidden_size);
bool in_range = tid < batch * hidden_size;
int x_off = hb_id * num_direction * 4 * hidden_size + h_id;
int h_off = hb_id * 4 * hidden_size + h_id;
if (!in_range)
return;
float x1 = ((__half *)X_in)[x_off];
float x2 = ((__half *)X_in)[x_off + 1 * hidden_size];
float x3 = ((__half *)X_in)[x_off + 2 * hidden_size];
float x4 = ((__half *)X_in)[x_off + 3 * hidden_size];
float h1 = ((__half *)hidden)[h_off];
float h2 = ((__half *)hidden)[h_off + 1 * hidden_size];
float h3 = ((__half *)hidden)[h_off + 2 * hidden_size];
float h4 = ((__half *)hidden)[h_off + 3 * hidden_size];
float xb1 = bias ? (float)((__half *)bias)[h_id] : 0.f;
float xb2 = bias ? (float)((__half *)bias)[h_id + 1 * hidden_size] : 0.f;
float xb3 = bias ? (float)((__half *)bias)[h_id + 2 * hidden_size] : 0.f;
float xb4 = bias ? (float)((__half *)bias)[h_id + 3 * hidden_size] : 0.f;
float hb1 = bias ? (float)((__half *)bias)[h_id + 4 * hidden_size] : 0.f;
float hb2 = bias ? (float)((__half *)bias)[h_id + 5 * hidden_size] : 0.f;
float hb3 = bias ? (float)((__half *)bias)[h_id + 6 * hidden_size] : 0.f;
float hb4 = bias ? (float)((__half *)bias)[h_id + 7 * hidden_size] : 0.f;
float c_pre = ceil ? (float)((__half *)ceil)[tid] : 0.f;
float pi = P ? (float)((__half *)P)[h_id] : 0.f;
float po = P ? (float)((__half *)P)[h_id + 1 * hidden_size] : 0.f;
float pf = P ? (float)((__half *)P)[h_id + 2 * hidden_size] : 0.f;
float gi = (x1 + xb1) + (h1 + hb1) + pi * c_pre;
float go = (x2 + xb2) + (h2 + hb2) + po * c_pre;
float gf = (x3 + xb3) + (h3 + hb3) + pf * c_pre;
float gc = (x4 + xb4) + (h4 + hb4);
gf = sigmoidf(gf);
gi = sigmoidf(gi);
gc = tanhf(gc);
float c = gf * c_pre + gi * gc;
go = go + po * c;
float output = sigmoidf(go);
float ht = output * tanhf(c);
((__half *)out_h)[tid] = (__half)ht;
((__half *)out_c)[tid] = (__half)c;
}
int64_t PPLCUDALstmGetRuntimeBufSize(
const ppl::nn::TensorShape *X_shape,
const unsigned int direction,
const int64_t hidden_size)
{
int seq_len = X_shape->GetDim(0); // max seq_len
int batch = X_shape->GetDim(1);
int num_direction = direction == RnnDirection::bidirectional ? 2 : 1;
int64_t size = 0;
size += seq_len * batch * num_direction * 4 * hidden_size; // X_in
size += batch * 4 * hidden_size; // hidden_buf
size += batch * hidden_size; // ceil_buf
return size * sizeof(__half);
}
/*
X: (sequence_len, batch, input_size)
W: (direction, 4*hidden_size, input_size)
=(seq, batch, dir, 4*hidden)
c: (direction, batch, hidden_size)
h: (direction, batch, hidden_size)
R: (direction, 4*hidden_size, hidden_size))
=(dir, batch, 4*hidden)
Bias: (dir, 2*4*hidden)
y: (sequence_len, dir, batch, hidden_size)
y_h: (dir, batch, hidden_size)
y_c: (dir, batch, hidden_size)
*/
ppl::common::RetCode PPLCUDALstmForwardImp(
hipStream_t stream,
ppl::nn::cuda::CUDAModule *module,
const ppl::nn::TensorShape *X_shape,
const void *X,
const void *X_weight,
const void *R_weight,
const void *P_weight,
const void *bias,
const void *sequence_lens, // FIXME: batch-wise output is different
const void *initial_h,
const void *initial_c,
const unsigned int direction,
const int64_t hidden_size,
void *temp_buffer,
void *Y,
void *Y_h,
void *Y_c)
{
int seq_len = X_shape->GetDim(0); // max seq_len
int batch = X_shape->GetDim(1);
int input_size = X_shape->GetDim(2);
int num_direction = direction == RnnDirection::bidirectional ? 2 : 1;
//(seq, batch, dir, 4*hidden)
ppl::nn::TensorShape input_shape, weight_shape, output_shape;
int M = seq_len * batch;
int K = input_size;
int N = num_direction * 4 * hidden_size;
if (sequence_lens) {
printf("error: lstm sequence_lens are different.\n");
return ppl::common::RC_UNSUPPORTED;
}
if (K % 8 != 0 || hidden_size % 8 != 0) {
printf("error: lstm input size or hidden_size is not aligned.\n");
return ppl::common::RC_UNSUPPORTED;
}
#define GET_GEMM_PARAM \
input_shape.Reshape({M, K}); \
weight_shape.Reshape({N, K}); \
output_shape.Reshape({M, N}); \
input_shape.SetDataType(ppl::common::DATATYPE_FLOAT16); \
weight_shape.SetDataType(ppl::common::DATATYPE_FLOAT16); \
output_shape.SetDataType(ppl::common::DATATYPE_FLOAT16); \
fuse_param_t fuse_param; \
ppl::nn::common::GemmParam gemm_param; \
gemm_param.bias_term = 0; \
gemm_param.transA = 0; \
gemm_param.transB = 1; \
gemm_param.alpha = 1.f; \
gemm_param.beta = 1.f; \
gemm_param.N = N; /*padN*/ \
void *tmp_buf = NULL;
GET_GEMM_PARAM
__half *X_in = (__half *)temp_buffer;
algo_param_t algo_param;
algo_param.UseDefaultF1Kernel();
PPLCUDAGemmForwardImp(
stream, module, &input_shape, X, &weight_shape, X_weight, NULL, &output_shape, X_in, gemm_param, tmp_buf, fuse_param, algo_param);
__half *hidden_buf = (__half *)X_in + M * N;
__half *ceil_buf = hidden_buf + batch * 4 * hidden_size;
int reverse = direction == RnnDirection::reverse ? 1 : 0;
for (int d = 0; d < num_direction; d++) {
bool rev = (reverse || d == 1);
int dir = rev ? -1 : 1;
__half *tR = (__half *)R_weight + d * 4 * hidden_size * hidden_size;
__half *P = P_weight ? (__half *)P_weight + d * 3 * hidden_size : NULL;
__half *t_bias = (__half *)bias + d * 8 * hidden_size;
for (int i = 0; i < seq_len; i++) {
int pre_idx = rev ? seq_len - i : i - 1;
int cur_idx = pre_idx + dir;
__half *pre_hidden = i == 0 ? (__half *)initial_h : (__half *)Y + pre_idx * num_direction * batch * hidden_size + d * batch * hidden_size;
__half *post_hidden = hidden_buf;
if (initial_h != nullptr) {
int M = batch;
int K = hidden_size;
int N = 4 * hidden_size;
GET_GEMM_PARAM
PPLCUDAGemmForwardImp(
stream, module, &input_shape, pre_hidden, &weight_shape, tR, NULL, &output_shape, post_hidden, gemm_param, tmp_buf, fuse_param, algo_param);
} else {
hipMemset(post_hidden, 0, 4 * hidden_size * sizeof(__half));
}
__half *out_h = (__half *)Y + cur_idx * num_direction * batch * hidden_size + d * batch * hidden_size;
__half *pre_c = i == 0 ? (__half *)initial_c : ceil_buf;
__half *out_c = ceil_buf;
__half *Xt = X_in + cur_idx * num_direction * batch * 4 * hidden_size + d * batch * 4 * hidden_size;
dim3 grid;
const int threads = 512;
grid.x = DivUp(batch * hidden_size, threads);
grid.y = 1;
grid.z = 1;
hipLaunchKernelGGL(( fuse_gate), dim3(grid), dim3(threads), 0, stream, post_hidden, Xt, t_bias, pre_c, P, num_direction, batch, hidden_size, out_c, out_h);
if (Y_h && i == seq_len - 1) {
hipMemcpyAsync((__half *)Y_h + d * batch * hidden_size, out_h, batch * hidden_size * sizeof(__half), hipMemcpyDeviceToDevice, stream);
}
if (Y_c && i == seq_len - 1) {
hipMemcpyAsync((__half *)Y_c + d * batch * hidden_size, out_c, batch * hidden_size * sizeof(__half), hipMemcpyDeviceToDevice, stream);
}
}
}
return ppl::common::RC_SUCCESS;
}
| f9cb982146b968b7fa5797d1bb199f7c7c67cd05.cu | #include <cuda.h>
#include <cuda_fp16.h>
#include "cudakernel/common/common.h"
#include "cudakernel/gemm/gemm.h"
#include "cudakernel/nn/lstm.h"
#include <stdio.h>
__device__ float sigmoidf(float a)
{
return expf(a) / (1 + expf(a));
}
//(seq, batch, dir, 4*hidden)
//(dir, batch, 4*hidden)
// P: (dir, 3*hidden)
// output: (dir, batch, hidden)
__global__ void fuse_gate(
const void *hidden,
const void *X_in,
const void *bias,
const void *ceil,
const void *P,
const int num_direction,
const int batch,
const int hidden_size,
void *out_c,
void *out_h)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int h_id = tid % (hidden_size);
int hb_id = tid / (hidden_size);
bool in_range = tid < batch * hidden_size;
int x_off = hb_id * num_direction * 4 * hidden_size + h_id;
int h_off = hb_id * 4 * hidden_size + h_id;
if (!in_range)
return;
float x1 = ((__half *)X_in)[x_off];
float x2 = ((__half *)X_in)[x_off + 1 * hidden_size];
float x3 = ((__half *)X_in)[x_off + 2 * hidden_size];
float x4 = ((__half *)X_in)[x_off + 3 * hidden_size];
float h1 = ((__half *)hidden)[h_off];
float h2 = ((__half *)hidden)[h_off + 1 * hidden_size];
float h3 = ((__half *)hidden)[h_off + 2 * hidden_size];
float h4 = ((__half *)hidden)[h_off + 3 * hidden_size];
float xb1 = bias ? (float)((__half *)bias)[h_id] : 0.f;
float xb2 = bias ? (float)((__half *)bias)[h_id + 1 * hidden_size] : 0.f;
float xb3 = bias ? (float)((__half *)bias)[h_id + 2 * hidden_size] : 0.f;
float xb4 = bias ? (float)((__half *)bias)[h_id + 3 * hidden_size] : 0.f;
float hb1 = bias ? (float)((__half *)bias)[h_id + 4 * hidden_size] : 0.f;
float hb2 = bias ? (float)((__half *)bias)[h_id + 5 * hidden_size] : 0.f;
float hb3 = bias ? (float)((__half *)bias)[h_id + 6 * hidden_size] : 0.f;
float hb4 = bias ? (float)((__half *)bias)[h_id + 7 * hidden_size] : 0.f;
float c_pre = ceil ? (float)((__half *)ceil)[tid] : 0.f;
float pi = P ? (float)((__half *)P)[h_id] : 0.f;
float po = P ? (float)((__half *)P)[h_id + 1 * hidden_size] : 0.f;
float pf = P ? (float)((__half *)P)[h_id + 2 * hidden_size] : 0.f;
float gi = (x1 + xb1) + (h1 + hb1) + pi * c_pre;
float go = (x2 + xb2) + (h2 + hb2) + po * c_pre;
float gf = (x3 + xb3) + (h3 + hb3) + pf * c_pre;
float gc = (x4 + xb4) + (h4 + hb4);
gf = sigmoidf(gf);
gi = sigmoidf(gi);
gc = tanhf(gc);
float c = gf * c_pre + gi * gc;
go = go + po * c;
float output = sigmoidf(go);
float ht = output * tanhf(c);
((__half *)out_h)[tid] = (__half)ht;
((__half *)out_c)[tid] = (__half)c;
}
int64_t PPLCUDALstmGetRuntimeBufSize(
const ppl::nn::TensorShape *X_shape,
const unsigned int direction,
const int64_t hidden_size)
{
int seq_len = X_shape->GetDim(0); // max seq_len
int batch = X_shape->GetDim(1);
int num_direction = direction == RnnDirection::bidirectional ? 2 : 1;
int64_t size = 0;
size += seq_len * batch * num_direction * 4 * hidden_size; // X_in
size += batch * 4 * hidden_size; // hidden_buf
size += batch * hidden_size; // ceil_buf
return size * sizeof(__half);
}
/*
X: (sequence_len, batch, input_size)
W: (direction, 4*hidden_size, input_size)
=(seq, batch, dir, 4*hidden)
c: (direction, batch, hidden_size)
h: (direction, batch, hidden_size)
R: (direction, 4*hidden_size, hidden_size))
=(dir, batch, 4*hidden)
Bias: (dir, 2*4*hidden)
y: (sequence_len, dir, batch, hidden_size)
y_h: (dir, batch, hidden_size)
y_c: (dir, batch, hidden_size)
*/
ppl::common::RetCode PPLCUDALstmForwardImp(
cudaStream_t stream,
ppl::nn::cuda::CUDAModule *module,
const ppl::nn::TensorShape *X_shape,
const void *X,
const void *X_weight,
const void *R_weight,
const void *P_weight,
const void *bias,
const void *sequence_lens, // FIXME: batch-wise output is different
const void *initial_h,
const void *initial_c,
const unsigned int direction,
const int64_t hidden_size,
void *temp_buffer,
void *Y,
void *Y_h,
void *Y_c)
{
int seq_len = X_shape->GetDim(0); // max seq_len
int batch = X_shape->GetDim(1);
int input_size = X_shape->GetDim(2);
int num_direction = direction == RnnDirection::bidirectional ? 2 : 1;
//(seq, batch, dir, 4*hidden)
ppl::nn::TensorShape input_shape, weight_shape, output_shape;
int M = seq_len * batch;
int K = input_size;
int N = num_direction * 4 * hidden_size;
if (sequence_lens) {
printf("error: lstm sequence_lens are different.\n");
return ppl::common::RC_UNSUPPORTED;
}
if (K % 8 != 0 || hidden_size % 8 != 0) {
printf("error: lstm input size or hidden_size is not aligned.\n");
return ppl::common::RC_UNSUPPORTED;
}
#define GET_GEMM_PARAM \
input_shape.Reshape({M, K}); \
weight_shape.Reshape({N, K}); \
output_shape.Reshape({M, N}); \
input_shape.SetDataType(ppl::common::DATATYPE_FLOAT16); \
weight_shape.SetDataType(ppl::common::DATATYPE_FLOAT16); \
output_shape.SetDataType(ppl::common::DATATYPE_FLOAT16); \
fuse_param_t fuse_param; \
ppl::nn::common::GemmParam gemm_param; \
gemm_param.bias_term = 0; \
gemm_param.transA = 0; \
gemm_param.transB = 1; \
gemm_param.alpha = 1.f; \
gemm_param.beta = 1.f; \
gemm_param.N = N; /*padN*/ \
void *tmp_buf = NULL;
GET_GEMM_PARAM
__half *X_in = (__half *)temp_buffer;
algo_param_t algo_param;
algo_param.UseDefaultF1Kernel();
PPLCUDAGemmForwardImp(
stream, module, &input_shape, X, &weight_shape, X_weight, NULL, &output_shape, X_in, gemm_param, tmp_buf, fuse_param, algo_param);
__half *hidden_buf = (__half *)X_in + M * N;
__half *ceil_buf = hidden_buf + batch * 4 * hidden_size;
int reverse = direction == RnnDirection::reverse ? 1 : 0;
for (int d = 0; d < num_direction; d++) {
bool rev = (reverse || d == 1);
int dir = rev ? -1 : 1;
__half *tR = (__half *)R_weight + d * 4 * hidden_size * hidden_size;
__half *P = P_weight ? (__half *)P_weight + d * 3 * hidden_size : NULL;
__half *t_bias = (__half *)bias + d * 8 * hidden_size;
for (int i = 0; i < seq_len; i++) {
int pre_idx = rev ? seq_len - i : i - 1;
int cur_idx = pre_idx + dir;
__half *pre_hidden = i == 0 ? (__half *)initial_h : (__half *)Y + pre_idx * num_direction * batch * hidden_size + d * batch * hidden_size;
__half *post_hidden = hidden_buf;
if (initial_h != nullptr) {
int M = batch;
int K = hidden_size;
int N = 4 * hidden_size;
GET_GEMM_PARAM
PPLCUDAGemmForwardImp(
stream, module, &input_shape, pre_hidden, &weight_shape, tR, NULL, &output_shape, post_hidden, gemm_param, tmp_buf, fuse_param, algo_param);
} else {
cudaMemset(post_hidden, 0, 4 * hidden_size * sizeof(__half));
}
__half *out_h = (__half *)Y + cur_idx * num_direction * batch * hidden_size + d * batch * hidden_size;
__half *pre_c = i == 0 ? (__half *)initial_c : ceil_buf;
__half *out_c = ceil_buf;
__half *Xt = X_in + cur_idx * num_direction * batch * 4 * hidden_size + d * batch * 4 * hidden_size;
dim3 grid;
const int threads = 512;
grid.x = DivUp(batch * hidden_size, threads);
grid.y = 1;
grid.z = 1;
fuse_gate<<<grid, threads, 0, stream>>>(post_hidden, Xt, t_bias, pre_c, P, num_direction, batch, hidden_size, out_c, out_h);
if (Y_h && i == seq_len - 1) {
cudaMemcpyAsync((__half *)Y_h + d * batch * hidden_size, out_h, batch * hidden_size * sizeof(__half), cudaMemcpyDeviceToDevice, stream);
}
if (Y_c && i == seq_len - 1) {
cudaMemcpyAsync((__half *)Y_c + d * batch * hidden_size, out_c, batch * hidden_size * sizeof(__half), cudaMemcpyDeviceToDevice, stream);
}
}
}
return ppl::common::RC_SUCCESS;
}
|
1b610f33fabde27188eeaddc0157346e9cb191e2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by Huy Vo on 11/21/18.
//
#include "fspmat_hyb_kernels.h"
namespace cuFSP{
__global__
void fspmat_hyb_fill_data(int n_species, int n_reactions, int n_states, int *fsp_bounds, int *states, cuFSP::CSRMatInt stoich,
cuFSP::PropFun propensity,
double *diag_vals, double *offdiag_vals, int *offdiag_colindxs){
extern __shared__ int wsp[];
int tix = threadIdx.x;
int tid = blockDim.x * blockIdx.x + tix;
int stoich_nnz = stoich.nnz;
int *fsp_bounds_copy = wsp;
int *stoich_vals = &wsp[n_species];
int *stoich_colidxs = &stoich_vals[stoich_nnz];
int *stoich_rowptrs = &stoich_colidxs[stoich_nnz];
if (tix < n_species) {
fsp_bounds_copy[tix] = fsp_bounds[tix];
}
__syncthreads();
if (tix < n_reactions+1){
stoich_rowptrs[tix] = stoich.row_ptrs[tix];
}
__syncthreads();
if (tix < stoich_nnz){
stoich_vals[tix] = stoich.vals[tix];
stoich_colidxs[tix] = stoich.col_idxs[tix];
}
__syncthreads();
int *state;
if (tid < n_states) {
state = &states[tid * n_species];
for (int reaction{0}; reaction < n_reactions; ++reaction){
diag_vals[n_states*reaction + tid] = -1.0*propensity(state, reaction);
// Fill the off-diagonal entries
reachable_state(state, state, reaction, -1,
n_species, stoich_vals, stoich_colidxs, stoich_rowptrs);
bool reachable = true;
for (int k{0}; k < n_species; ++k) {
reachable = reachable && ((state[k] >= 0) && (state[k] <= fsp_bounds_copy[k]));
}
if (reachable) {
offdiag_colindxs[n_states*reaction + tid] = state2indx(state, n_species, fsp_bounds_copy);
offdiag_vals[n_states*reaction + tid] = propensity(state, reaction);
} else {
offdiag_colindxs[n_states*reaction + tid] = 0;
offdiag_vals[n_states*reaction + tid] = 0.0;
}
reachable_state(state, state, reaction, 1,
n_species, stoich_vals, stoich_colidxs, stoich_rowptrs);
}
}
}
__global__
void fspmat_hyb_mv(int n_states, int n_reactions, double *diag_vals, double *offdiag_vals, int *offdiag_colidxs,
double *coef, double *x, double *y){
double y_val = 0.0;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < n_states){
for (int reaction = 0; reaction < n_reactions; ++reaction){
y_val += coef[reaction]*diag_vals[n_states*reaction + tid]*x[tid];
__syncthreads();
y_val += coef[reaction]*offdiag_vals[n_states*reaction + tid]*x[offdiag_colidxs[n_states*reaction + tid]];
__syncthreads();
}
y[tid] += y_val;
}
}
void HYBMatSet::destroy() {
if (diag_vals) hipFree(diag_vals); CUDACHKERR();
if (offdiag_vals) hipFree(offdiag_vals); CUDACHKERR();
if (offdiag_colidxs) hipFree(offdiag_colidxs); CUDACHKERR();
if (d_coefs) hipFree(d_coefs); CUDACHKERR();
}
void HYBMatSet::action(double *x, double *y, double *coefs) {
hipMemcpy(d_coefs, coefs, num_matrices*sizeof(double), hipMemcpyHostToDevice);
CUDACHKERR();
// Get the max number of threads that can fit to a block
int max_block_size, num_blocks;
int device_id;
hipGetDevice(&device_id);
CUDACHKERR();
hipDeviceGetAttribute(&max_block_size, hipDeviceAttributeMaxThreadsPerBlock, device_id);
CUDACHKERR();
num_blocks = (int) ::ceil(n_rows/(max_block_size*1.0));
hipLaunchKernelGGL(( fspmat_hyb_mv), dim3(num_blocks), dim3(max_block_size), 0, 0, n_rows, num_matrices, diag_vals,
offdiag_vals, offdiag_colidxs, d_coefs, x, y);
hipDeviceSynchronize();
CUDACHKERR();
}
void generate_fsp_mats_hyb(int n_states, int n_reactions, int n_species, int *fsp_bounds, CSRMatInt stoich,
PropFun prop_func, HYBMatSet *hyb) {
int *states;
hipMallocManaged(&states, n_species*n_states*sizeof(int)); CUDACHKERR();
int *d_stoich_vals, *d_stoich_colidxs, *d_stoich_rowptrs;
hipMalloc(&d_stoich_vals, stoich.nnz * sizeof(int));
CUDACHKERR();
hipMalloc(&d_stoich_colidxs, stoich.nnz * sizeof(int));
CUDACHKERR();
hipMalloc(&d_stoich_rowptrs, (stoich.n_rows + 1) * sizeof(int));
CUDACHKERR();
hipMemcpy(d_stoich_vals, stoich.vals, stoich.nnz * sizeof(int), hipMemcpyHostToDevice);
CUDACHKERR();
hipMemcpy(d_stoich_colidxs, stoich.col_idxs, stoich.nnz * sizeof(int),
hipMemcpyHostToDevice);
CUDACHKERR();
hipMemcpy(d_stoich_rowptrs, stoich.row_ptrs, (stoich.n_rows + 1) * sizeof(int), hipMemcpyHostToDevice);
CUDACHKERR();
CSRMatInt d_stoich;
d_stoich.n_rows = stoich.n_rows;
d_stoich.nnz = stoich.nnz;
d_stoich.n_cols = stoich.n_cols;
d_stoich.vals = d_stoich_vals;
d_stoich.col_idxs = d_stoich_colidxs;
d_stoich.row_ptrs = d_stoich_rowptrs;
int *d_fsp_bounds;
hipMalloc(&d_fsp_bounds, n_species*sizeof(int)); CUDACHKERR();
hipMemcpy(d_fsp_bounds, fsp_bounds, n_species*sizeof(int), hipMemcpyHostToDevice); CUDACHKERR();
hipMalloc(&(hyb->diag_vals), n_states * n_reactions * sizeof(double));
CUDACHKERR();
hipMalloc(&(hyb->offdiag_vals), n_states * n_reactions * sizeof(double));
CUDACHKERR();
hipMalloc(&(hyb->offdiag_colidxs), n_states * n_reactions * sizeof(int));
CUDACHKERR();
// Get the max number of threads that can fit to a block
int max_block_size, num_blocks;
int device_id;
hipGetDevice(&device_id);
CUDACHKERR();
hipDeviceGetAttribute(&max_block_size, hipDeviceAttributeMaxThreadsPerBlock, device_id);
CUDACHKERR();
// Generate data for hyb
hyb->n_rows = n_states;
hyb->num_matrices = n_reactions;
hipMalloc(&(hyb->d_coefs), hyb->num_matrices*sizeof(double)); CUDACHKERR();
num_blocks = (int) ::ceil(n_states/(max_block_size*1.0));
hipLaunchKernelGGL(( fsp_get_states), dim3(num_blocks), dim3(max_block_size), n_species*sizeof(int), 0, states, n_species, n_states, d_fsp_bounds);
hipDeviceSynchronize();
CUDACHKERR();
int shared_mem_size = n_species*sizeof(int) + (stoich.nnz*2 + stoich.n_rows+1)*sizeof(int);
hipLaunchKernelGGL(( fspmat_hyb_fill_data), dim3(num_blocks), dim3(max_block_size), shared_mem_size, 0, n_species, n_reactions, n_states, d_fsp_bounds, states, d_stoich,
prop_func, hyb->diag_vals, hyb->offdiag_vals, hyb->offdiag_colidxs);
hipDeviceSynchronize();
CUDACHKERR();
hipFree(d_stoich_colidxs);
CUDACHKERR();
hipFree(d_stoich_rowptrs);
CUDACHKERR();
hipFree(d_stoich_vals);
CUDACHKERR();
hipFree(d_fsp_bounds);
CUDACHKERR();
hipFree(states);
CUDACHKERR();
}
} | 1b610f33fabde27188eeaddc0157346e9cb191e2.cu | //
// Created by Huy Vo on 11/21/18.
//
#include "fspmat_hyb_kernels.h"
namespace cuFSP{
__global__
void fspmat_hyb_fill_data(int n_species, int n_reactions, int n_states, int *fsp_bounds, int *states, cuFSP::CSRMatInt stoich,
cuFSP::PropFun propensity,
double *diag_vals, double *offdiag_vals, int *offdiag_colindxs){
extern __shared__ int wsp[];
int tix = threadIdx.x;
int tid = blockDim.x * blockIdx.x + tix;
int stoich_nnz = stoich.nnz;
int *fsp_bounds_copy = wsp;
int *stoich_vals = &wsp[n_species];
int *stoich_colidxs = &stoich_vals[stoich_nnz];
int *stoich_rowptrs = &stoich_colidxs[stoich_nnz];
if (tix < n_species) {
fsp_bounds_copy[tix] = fsp_bounds[tix];
}
__syncthreads();
if (tix < n_reactions+1){
stoich_rowptrs[tix] = stoich.row_ptrs[tix];
}
__syncthreads();
if (tix < stoich_nnz){
stoich_vals[tix] = stoich.vals[tix];
stoich_colidxs[tix] = stoich.col_idxs[tix];
}
__syncthreads();
int *state;
if (tid < n_states) {
state = &states[tid * n_species];
for (int reaction{0}; reaction < n_reactions; ++reaction){
diag_vals[n_states*reaction + tid] = -1.0*propensity(state, reaction);
// Fill the off-diagonal entries
reachable_state(state, state, reaction, -1,
n_species, stoich_vals, stoich_colidxs, stoich_rowptrs);
bool reachable = true;
for (int k{0}; k < n_species; ++k) {
reachable = reachable && ((state[k] >= 0) && (state[k] <= fsp_bounds_copy[k]));
}
if (reachable) {
offdiag_colindxs[n_states*reaction + tid] = state2indx(state, n_species, fsp_bounds_copy);
offdiag_vals[n_states*reaction + tid] = propensity(state, reaction);
} else {
offdiag_colindxs[n_states*reaction + tid] = 0;
offdiag_vals[n_states*reaction + tid] = 0.0;
}
reachable_state(state, state, reaction, 1,
n_species, stoich_vals, stoich_colidxs, stoich_rowptrs);
}
}
}
__global__
void fspmat_hyb_mv(int n_states, int n_reactions, double *diag_vals, double *offdiag_vals, int *offdiag_colidxs,
double *coef, double *x, double *y){
double y_val = 0.0;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < n_states){
for (int reaction = 0; reaction < n_reactions; ++reaction){
y_val += coef[reaction]*diag_vals[n_states*reaction + tid]*x[tid];
__syncthreads();
y_val += coef[reaction]*offdiag_vals[n_states*reaction + tid]*x[offdiag_colidxs[n_states*reaction + tid]];
__syncthreads();
}
y[tid] += y_val;
}
}
void HYBMatSet::destroy() {
if (diag_vals) cudaFree(diag_vals); CUDACHKERR();
if (offdiag_vals) cudaFree(offdiag_vals); CUDACHKERR();
if (offdiag_colidxs) cudaFree(offdiag_colidxs); CUDACHKERR();
if (d_coefs) cudaFree(d_coefs); CUDACHKERR();
}
void HYBMatSet::action(double *x, double *y, double *coefs) {
cudaMemcpy(d_coefs, coefs, num_matrices*sizeof(double), cudaMemcpyHostToDevice);
CUDACHKERR();
// Get the max number of threads that can fit to a block
int max_block_size, num_blocks;
int device_id;
cudaGetDevice(&device_id);
CUDACHKERR();
cudaDeviceGetAttribute(&max_block_size, cudaDevAttrMaxThreadsPerBlock, device_id);
CUDACHKERR();
num_blocks = (int) std::ceil(n_rows/(max_block_size*1.0));
fspmat_hyb_mv<<<num_blocks, max_block_size>>>(n_rows, num_matrices, diag_vals,
offdiag_vals, offdiag_colidxs, d_coefs, x, y);
cudaDeviceSynchronize();
CUDACHKERR();
}
void generate_fsp_mats_hyb(int n_states, int n_reactions, int n_species, int *fsp_bounds, CSRMatInt stoich,
PropFun prop_func, HYBMatSet *hyb) {
int *states;
cudaMallocManaged(&states, n_species*n_states*sizeof(int)); CUDACHKERR();
int *d_stoich_vals, *d_stoich_colidxs, *d_stoich_rowptrs;
cudaMalloc(&d_stoich_vals, stoich.nnz * sizeof(int));
CUDACHKERR();
cudaMalloc(&d_stoich_colidxs, stoich.nnz * sizeof(int));
CUDACHKERR();
cudaMalloc(&d_stoich_rowptrs, (stoich.n_rows + 1) * sizeof(int));
CUDACHKERR();
cudaMemcpy(d_stoich_vals, stoich.vals, stoich.nnz * sizeof(int), cudaMemcpyHostToDevice);
CUDACHKERR();
cudaMemcpy(d_stoich_colidxs, stoich.col_idxs, stoich.nnz * sizeof(int),
cudaMemcpyHostToDevice);
CUDACHKERR();
cudaMemcpy(d_stoich_rowptrs, stoich.row_ptrs, (stoich.n_rows + 1) * sizeof(int), cudaMemcpyHostToDevice);
CUDACHKERR();
CSRMatInt d_stoich;
d_stoich.n_rows = stoich.n_rows;
d_stoich.nnz = stoich.nnz;
d_stoich.n_cols = stoich.n_cols;
d_stoich.vals = d_stoich_vals;
d_stoich.col_idxs = d_stoich_colidxs;
d_stoich.row_ptrs = d_stoich_rowptrs;
int *d_fsp_bounds;
cudaMalloc(&d_fsp_bounds, n_species*sizeof(int)); CUDACHKERR();
cudaMemcpy(d_fsp_bounds, fsp_bounds, n_species*sizeof(int), cudaMemcpyHostToDevice); CUDACHKERR();
cudaMalloc(&(hyb->diag_vals), n_states * n_reactions * sizeof(double));
CUDACHKERR();
cudaMalloc(&(hyb->offdiag_vals), n_states * n_reactions * sizeof(double));
CUDACHKERR();
cudaMalloc(&(hyb->offdiag_colidxs), n_states * n_reactions * sizeof(int));
CUDACHKERR();
// Get the max number of threads that can fit to a block
int max_block_size, num_blocks;
int device_id;
cudaGetDevice(&device_id);
CUDACHKERR();
cudaDeviceGetAttribute(&max_block_size, cudaDevAttrMaxThreadsPerBlock, device_id);
CUDACHKERR();
// Generate data for hyb
hyb->n_rows = n_states;
hyb->num_matrices = n_reactions;
cudaMalloc(&(hyb->d_coefs), hyb->num_matrices*sizeof(double)); CUDACHKERR();
num_blocks = (int) std::ceil(n_states/(max_block_size*1.0));
fsp_get_states<<<num_blocks, max_block_size, n_species*sizeof(int)>>>(states, n_species, n_states, d_fsp_bounds);
cudaDeviceSynchronize();
CUDACHKERR();
int shared_mem_size = n_species*sizeof(int) + (stoich.nnz*2 + stoich.n_rows+1)*sizeof(int);
fspmat_hyb_fill_data<<<num_blocks, max_block_size, shared_mem_size>>>(n_species, n_reactions, n_states, d_fsp_bounds, states, d_stoich,
prop_func, hyb->diag_vals, hyb->offdiag_vals, hyb->offdiag_colidxs);
cudaDeviceSynchronize();
CUDACHKERR();
cudaFree(d_stoich_colidxs);
CUDACHKERR();
cudaFree(d_stoich_rowptrs);
CUDACHKERR();
cudaFree(d_stoich_vals);
CUDACHKERR();
cudaFree(d_fsp_bounds);
CUDACHKERR();
cudaFree(states);
CUDACHKERR();
}
} |
c5edb2f0dce373d39ca3c85374eaa4e2eecfd8af.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <iostream>
#include <hip/hip_runtime_api.h>
//#include <cutil.h>
#include <hip/hip_runtime.h>
#include <string>
#define GPUJOULE_DIR ""
#define SHARED_MEM_ELEMENTS 1024
#define GLOBAL_MEM_ELEMENTS 4096
int num_blocks;
int num_threads_per_block;
int num_iterations;
int divergence;
float* h_A;
float* h_B;
float* h_C;
float* h_res;
float* d_A;
float* d_B;
float* d_C;
float* d_res;
__global__ void init_memory (unsigned long long ** my_ptr_array, unsigned long long * my_array, int stride, int num_blocks_k, int num_threads_per_block_k) {
int block_id;
int warp_id;
int i;
int index;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
void **ptr_array = (void **)my_ptr_array;
unsigned long long *array = (unsigned long long *)my_array;
if (tid == 0) {
// int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
//int elements_per_warp = elements_per_block / num_warps_per_block;
int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
// for (block_id = 0; block_id < num_blocks_k; block_id++) {
for (warp_id = 0; warp_id < num_warps_per_block; warp_id++) {
for (i = 0; i < elements_per_warp; i++) {
//index = (block_id * elements_per_block) + (warp_id * elements_per_warp);
index = (warp_id * elements_per_warp);
ptr_array[index + i] = (void*)&array[(index + ((i + 16) % elements_per_warp))];
}
}
/* for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
ptr_array[i] = (void*)&array[(i + 32)%GLOBAL_MEM_ELEMENTS];
}
*/
for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
//array[i] = (unsigned long long)ptr_array[(i+stride)%GLOBAL_MEM_ELEMENTS];
array[i] = (unsigned long long)ptr_array[i];
}
}
__syncthreads();
}
__global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k) {
// unsigned long long int start_time, end_time;
unsigned long long int sum_time = 0;
int i, k;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int block_id = blockIdx.x;
int warp_id = threadIdx.x / 32;
int warp_thread_id = threadIdx.x % 32;
// int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
// int elements_per_warp = elements_per_block / num_warps_per_block;
int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
//int index1 = (block_id * elements_per_block) + (warp_id * elements_per_warp) + warp_thread_id;
int index1 = (warp_id * elements_per_warp) + warp_thread_id;
void **ptr_array = (void **)my_ptr_array;
unsigned long long int *array = (unsigned long long int *)my_array;
void **tmp_ptr;
//tmp_ptr = (void *)sdata;
//tmp_ptr = (void **)(&(ptr_array[(threadIdx.x * stride)%GLOBAL_MEM_ELEMENTS]));
//tmp_ptr = (void **)(&(ptr_array[(tid * stride)%GLOBAL_MEM_ELEMENTS]));
//tmp_ptr = (void **)(&(ptr_array[index1]));
tmp_ptr = (void **)(&(array[index1]));
double f1, f2, f3;
f1 = 1.1;
f2 = 2.5;
if (warp_thread_id < divergence) {
/* __asm volatile (
".reg .f32 %r14;\n\t"
"mov.f32 %r14, 2.2;\n\t"
);
*/
for (int l = 0; l < iterations; l++) {
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
}
}
// __syncthreads();
// if ((blockDim.x * blockIdx.x + threadIdx.x) == 0)
duration[tid] = (unsigned long long)(*tmp_ptr) + (f1 * tid);
// __syncthreads();
}
void usage() {
std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl;
}
void parametric_measure_shared(int N, int iterations, int stride) {
hipProfilerStop();
int i;
unsigned long long int * h_a;
unsigned long long int * d_a;
unsigned long long ** h_ptr_a;
unsigned long long ** d_ptr_a;
unsigned long long * duration;
unsigned long long * latency;
hipError_t error_id;
/* allocate array on CPU */
h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N);
h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N);
latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks);
/* initialize array elements on CPU */
for (i = 0; i < N; i++) {
h_ptr_a[i] = (unsigned long long *)&h_a[i];
}
for (i = 0; i < N; i++) {
h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N];
}
/* allocate arrays on GPU */
hipMalloc ((void **) &d_a, sizeof(unsigned long long int) * N );
hipMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N );
hipMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks);
hipDeviceSynchronize ();
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 1 is %s\n", hipGetErrorString(error_id));
}
/* copy array elements from CPU to GPU */
hipMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, hipMemcpyHostToDevice);
hipMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, hipMemcpyHostToDevice);
hipMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyHostToDevice);
hipDeviceSynchronize ();
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 2 is %s\n", hipGetErrorString(error_id));
}
hipLaunchKernelGGL(( init_memory) , dim3(1), dim3(1), 0, 0, d_ptr_a, d_a, stride, num_blocks, num_threads_per_block);
hipDeviceSynchronize();
/* launch kernel*/
//dim3 Db = dim3(13);
//dim3 Dg = dim3(768,1,1);
//printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride);
// int sharedMemSize = sizeof(unsigned long long int) * N ;
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
std::string cmd = "GPUJOULE_DIR/nvml/example/power_monitor 5 > GPUJOULE_DIR/energy_model_ubench/energy_model_data/data_movement_energy/l1_cache/fadd_l1d_10_90_64p_asm_power.txt &";
std::system(cmd.c_str());
std::system("sleep 5");
hipEventRecord(start, 0);
hipProfilerStart();
hipFuncSetCacheConfig(shared_latency, hipFuncCachePreferL1);
//shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration);
//shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence);
hipLaunchKernelGGL(( shared_latency) , dim3(num_blocks), dim3(num_threads_per_block), 0, 0, d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block);
hipDeviceSynchronize();
///hipDeviceSynchronize ();
hipProfilerStop();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
std::system("killall power_monitor");
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 3 is %s\n", hipGetErrorString(error_id));
}
/* copy results from GPU to CPU */
hipMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, hipMemcpyDeviceToHost);
hipMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyDeviceToHost);
hipDeviceSynchronize ();
/* print results*/
unsigned long long max_dur = latency[0];
unsigned long long min_dur = latency[0];
unsigned long long avg_lat = latency[0];
for (int i = 1; i < num_threads_per_block * num_blocks; i++) {
avg_lat += latency[i];
if (latency[i] > max_dur) {
max_dur = latency[i];
} else if (latency[i] < min_dur) {
min_dur = latency[i];
}
}
// printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 256.0 *num_iterations)), (double)(min_dur/(256.0 * num_iterations)), (double)(max_dur/(256.0 * num_iterations)), time);
printf("%f\n", time);
/* free memory on GPU */
hipFree(d_a);
hipFree(d_ptr_a);
hipFree(duration);
hipDeviceSynchronize ();
/*free memory on CPU */
free(h_a);
free(h_ptr_a);
free(latency);
}
int main(int argc, char **argv)
{
int N;
if (argc != 6) {
usage();
exit(1);
}
num_blocks = atoi(argv[1]);
num_threads_per_block = atoi(argv[2]);
num_iterations = atoi(argv[3]);
divergence = atoi(argv[4]);
int stride = atoi(argv[5]);
N = GLOBAL_MEM_ELEMENTS;
parametric_measure_shared(N, 10, stride);
return 0;
}
| c5edb2f0dce373d39ca3c85374eaa4e2eecfd8af.cu | #include <stdio.h>
#include <iostream>
#include <cuda_profiler_api.h>
//#include <cutil.h>
#include <cuda_runtime.h>
#include <string>
#define GPUJOULE_DIR ""
#define SHARED_MEM_ELEMENTS 1024
#define GLOBAL_MEM_ELEMENTS 4096
int num_blocks;
int num_threads_per_block;
int num_iterations;
int divergence;
float* h_A;
float* h_B;
float* h_C;
float* h_res;
float* d_A;
float* d_B;
float* d_C;
float* d_res;
__global__ void init_memory (unsigned long long ** my_ptr_array, unsigned long long * my_array, int stride, int num_blocks_k, int num_threads_per_block_k) {
int block_id;
int warp_id;
int i;
int index;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
void **ptr_array = (void **)my_ptr_array;
unsigned long long *array = (unsigned long long *)my_array;
if (tid == 0) {
// int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
//int elements_per_warp = elements_per_block / num_warps_per_block;
int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
// for (block_id = 0; block_id < num_blocks_k; block_id++) {
for (warp_id = 0; warp_id < num_warps_per_block; warp_id++) {
for (i = 0; i < elements_per_warp; i++) {
//index = (block_id * elements_per_block) + (warp_id * elements_per_warp);
index = (warp_id * elements_per_warp);
ptr_array[index + i] = (void*)&array[(index + ((i + 16) % elements_per_warp))];
}
}
/* for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
ptr_array[i] = (void*)&array[(i + 32)%GLOBAL_MEM_ELEMENTS];
}
*/
for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
//array[i] = (unsigned long long)ptr_array[(i+stride)%GLOBAL_MEM_ELEMENTS];
array[i] = (unsigned long long)ptr_array[i];
}
}
__syncthreads();
}
__global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k) {
// unsigned long long int start_time, end_time;
unsigned long long int sum_time = 0;
int i, k;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int block_id = blockIdx.x;
int warp_id = threadIdx.x / 32;
int warp_thread_id = threadIdx.x % 32;
// int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
// int elements_per_warp = elements_per_block / num_warps_per_block;
int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
//int index1 = (block_id * elements_per_block) + (warp_id * elements_per_warp) + warp_thread_id;
int index1 = (warp_id * elements_per_warp) + warp_thread_id;
void **ptr_array = (void **)my_ptr_array;
unsigned long long int *array = (unsigned long long int *)my_array;
void **tmp_ptr;
//tmp_ptr = (void *)sdata;
//tmp_ptr = (void **)(&(ptr_array[(threadIdx.x * stride)%GLOBAL_MEM_ELEMENTS]));
//tmp_ptr = (void **)(&(ptr_array[(tid * stride)%GLOBAL_MEM_ELEMENTS]));
//tmp_ptr = (void **)(&(ptr_array[index1]));
tmp_ptr = (void **)(&(array[index1]));
double f1, f2, f3;
f1 = 1.1;
f2 = 2.5;
if (warp_thread_id < divergence) {
/* __asm volatile (
".reg .f32 %r14;\n\t"
"mov.f32 %r14, 2.2;\n\t"
);
*/
for (int l = 0; l < iterations; l++) {
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
}
}
// __syncthreads();
// if ((blockDim.x * blockIdx.x + threadIdx.x) == 0)
duration[tid] = (unsigned long long)(*tmp_ptr) + (f1 * tid);
// __syncthreads();
}
void usage() {
std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl;
}
void parametric_measure_shared(int N, int iterations, int stride) {
cudaProfilerStop();
int i;
unsigned long long int * h_a;
unsigned long long int * d_a;
unsigned long long ** h_ptr_a;
unsigned long long ** d_ptr_a;
unsigned long long * duration;
unsigned long long * latency;
cudaError_t error_id;
/* allocate array on CPU */
h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N);
h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N);
latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks);
/* initialize array elements on CPU */
for (i = 0; i < N; i++) {
h_ptr_a[i] = (unsigned long long *)&h_a[i];
}
for (i = 0; i < N; i++) {
h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N];
}
/* allocate arrays on GPU */
cudaMalloc ((void **) &d_a, sizeof(unsigned long long int) * N );
cudaMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N );
cudaMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks);
cudaThreadSynchronize ();
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 1 is %s\n", cudaGetErrorString(error_id));
}
/* copy array elements from CPU to GPU */
cudaMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, cudaMemcpyHostToDevice);
cudaMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, cudaMemcpyHostToDevice);
cudaMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyHostToDevice);
cudaThreadSynchronize ();
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 2 is %s\n", cudaGetErrorString(error_id));
}
init_memory <<<1, 1>>>(d_ptr_a, d_a, stride, num_blocks, num_threads_per_block);
cudaDeviceSynchronize();
/* launch kernel*/
//dim3 Db = dim3(13);
//dim3 Dg = dim3(768,1,1);
//printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride);
// int sharedMemSize = sizeof(unsigned long long int) * N ;
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
std::string cmd = "GPUJOULE_DIR/nvml/example/power_monitor 5 > GPUJOULE_DIR/energy_model_ubench/energy_model_data/data_movement_energy/l1_cache/fadd_l1d_10_90_64p_asm_power.txt &";
std::system(cmd.c_str());
std::system("sleep 5");
cudaEventRecord(start, 0);
cudaProfilerStart();
cudaFuncSetCacheConfig(shared_latency, cudaFuncCachePreferL1);
//shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration);
//shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence);
shared_latency <<<num_blocks, num_threads_per_block>>>(d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block);
cudaDeviceSynchronize();
///cudaThreadSynchronize ();
cudaProfilerStop();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
std::system("killall power_monitor");
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 3 is %s\n", cudaGetErrorString(error_id));
}
/* copy results from GPU to CPU */
cudaMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, cudaMemcpyDeviceToHost);
cudaMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyDeviceToHost);
cudaThreadSynchronize ();
/* print results*/
unsigned long long max_dur = latency[0];
unsigned long long min_dur = latency[0];
unsigned long long avg_lat = latency[0];
for (int i = 1; i < num_threads_per_block * num_blocks; i++) {
avg_lat += latency[i];
if (latency[i] > max_dur) {
max_dur = latency[i];
} else if (latency[i] < min_dur) {
min_dur = latency[i];
}
}
// printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 256.0 *num_iterations)), (double)(min_dur/(256.0 * num_iterations)), (double)(max_dur/(256.0 * num_iterations)), time);
printf("%f\n", time);
/* free memory on GPU */
cudaFree(d_a);
cudaFree(d_ptr_a);
cudaFree(duration);
cudaThreadSynchronize ();
/*free memory on CPU */
free(h_a);
free(h_ptr_a);
free(latency);
}
int main(int argc, char **argv)
{
int N;
if (argc != 6) {
usage();
exit(1);
}
num_blocks = atoi(argv[1]);
num_threads_per_block = atoi(argv[2]);
num_iterations = atoi(argv[3]);
divergence = atoi(argv[4]);
int stride = atoi(argv[5]);
N = GLOBAL_MEM_ELEMENTS;
parametric_measure_shared(N, 10, stride);
return 0;
}
|
f91b7b835104e5b4d43f0b6e9935227550e0c754.hip | // !!! This is a file automatically generated by hipify!!!
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "internal_shared.hpp"
#include "opencv2/gpu/device/transform.hpp"
#include "opencv2/gpu/device/color.hpp"
#include "cvt_color_internal.h"
namespace cv { namespace gpu { namespace device
{
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_rgba_traits<uchar>::functor_type)
{
enum { smart_block_dim_x = 8 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_bgr555_traits::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(rgba_to_bgr555_traits::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_bgr565_traits::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(rgba_to_bgr565_traits::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgr555_to_bgra_traits::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgr555_to_rgba_traits::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgr565_to_bgra_traits::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgr565_to_rgba_traits::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(gray_to_bgra_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(gray_to_bgr555_traits::functor_type)
{
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(gray_to_bgr565_traits::functor_type)
{
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_yuv4_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(rgba_to_yuv4_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(yuv4_to_bgra_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(yuv4_to_rgba_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_YCrCb4_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(rgba_to_YCrCb4_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(YCrCb4_to_bgra_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(YCrCb4_to_rgba_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_xyz4_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(rgba_to_xyz4_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(xyz4_to_bgra_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(xyz4_to_rgba_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_hsv4_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(rgba_to_hsv4_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(hsv4_to_bgra_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(hsv4_to_rgba_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_hls4_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(rgba_to_hls4_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(hls4_to_bgra_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(hls4_to_rgba_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
#define OPENCV_GPU_IMPLEMENT_CVTCOLOR(name, traits) \
void name(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream) \
{ \
traits::functor_type functor = traits::create_functor(); \
typedef typename traits::functor_type::argument_type src_t; \
typedef typename traits::functor_type::result_type dst_t; \
cv::gpu::device::transform((PtrStepSz<src_t>)src, (PtrStepSz<dst_t>)dst, functor, WithOutMask(), stream); \
}
#define OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(name) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name, name ## _traits)
<<<<<<< HEAD
#define OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(name) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _8u, name ## _traits<uchar>) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _16u, name ## _traits<ushort>) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _32f, name ## _traits<float>)
=======
#ifdef OPENCV_TINY_GPU_MODULE
#define OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(name) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _8u, name ## _traits<uchar>) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _32f, name ## _traits<float>)
#else
#define OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(name) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _8u, name ## _traits<uchar>) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _16u, name ## _traits<ushort>) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _32f, name ## _traits<float>)
#endif
>>>>>>> 4a5a6cfc1ba26f73cbd6c6fcaf561ca6dbced81d
#define OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(name) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _8u, name ## _traits<uchar>) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _32f, name ## _traits<float>)
#define OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(name) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _8u, name ## _traits<uchar>) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _32f, name ## _traits<float>) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _full_8u, name ## _full_traits<uchar>) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _full_32f, name ## _full_traits<float>)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr_to_bgr555)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr_to_bgr565)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(rgb_to_bgr555)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(rgb_to_bgr565)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgra_to_bgr555)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgra_to_bgr565)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(rgba_to_bgr555)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(rgba_to_bgr565)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(gray_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(gray_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(gray_to_bgr555)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(gray_to_bgr565)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_gray)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_gray)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_gray)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_gray)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_gray)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_gray)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_yuv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_yuv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_yuv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_yuv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_yuv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_yuv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_yuv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_yuv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_YCrCb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_YCrCb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_YCrCb4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_YCrCb4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_YCrCb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_YCrCb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_YCrCb4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_YCrCb4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_xyz)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_xyz)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_xyz4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_xyz4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_xyz)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_xyz)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_xyz4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_xyz4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgb_to_hsv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgba_to_hsv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgb_to_hsv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgba_to_hsv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgr_to_hsv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgra_to_hsv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgr_to_hsv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgra_to_hsv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv4_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv4_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv4_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv4_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgb_to_hls)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgba_to_hls)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgb_to_hls4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgba_to_hls4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgr_to_hls)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgra_to_hls)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgr_to_hls4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgra_to_hls4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls4_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls4_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls4_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls4_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_lab)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_lab)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_lab4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_lab4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_lab)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_lab)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_lab4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_lab4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lrgb_to_lab)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lrgba_to_lab)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lrgb_to_lab4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lrgba_to_lab4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lbgr_to_lab)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lbgra_to_lab)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lbgr_to_lab4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lbgra_to_lab4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab_to_lrgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_lrgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab_to_lrgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_lrgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab_to_lbgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_lbgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab_to_lbgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_lbgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_luv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_luv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_luv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_luv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_luv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_luv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_luv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_luv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lrgb_to_luv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lrgba_to_luv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lrgb_to_luv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lrgba_to_luv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lbgr_to_luv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lbgra_to_luv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lbgr_to_luv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lbgra_to_luv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv_to_lrgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_lrgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv_to_lrgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_lrgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv_to_lbgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_lbgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv_to_lbgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_lbgra)
#undef OPENCV_GPU_IMPLEMENT_CVTCOLOR
#undef OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE
#undef OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL
#undef OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F
#undef OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL
}}} // namespace cv { namespace gpu { namespace device
#endif /* CUDA_DISABLER */
| f91b7b835104e5b4d43f0b6e9935227550e0c754.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "internal_shared.hpp"
#include "opencv2/gpu/device/transform.hpp"
#include "opencv2/gpu/device/color.hpp"
#include "cvt_color_internal.h"
namespace cv { namespace gpu { namespace device
{
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_rgba_traits<uchar>::functor_type)
{
enum { smart_block_dim_x = 8 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_bgr555_traits::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(rgba_to_bgr555_traits::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_bgr565_traits::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(rgba_to_bgr565_traits::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgr555_to_bgra_traits::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgr555_to_rgba_traits::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgr565_to_bgra_traits::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgr565_to_rgba_traits::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(gray_to_bgra_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(gray_to_bgr555_traits::functor_type)
{
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(gray_to_bgr565_traits::functor_type)
{
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_yuv4_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(rgba_to_yuv4_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(yuv4_to_bgra_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(yuv4_to_rgba_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_YCrCb4_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(rgba_to_YCrCb4_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(YCrCb4_to_bgra_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(YCrCb4_to_rgba_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_xyz4_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(rgba_to_xyz4_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(xyz4_to_bgra_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(xyz4_to_rgba_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_hsv4_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(rgba_to_hsv4_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(hsv4_to_bgra_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(hsv4_to_rgba_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_hls4_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(rgba_to_hls4_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(hls4_to_bgra_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(hls4_to_rgba_traits<uchar>::functor_type)
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
#define OPENCV_GPU_IMPLEMENT_CVTCOLOR(name, traits) \
void name(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream) \
{ \
traits::functor_type functor = traits::create_functor(); \
typedef typename traits::functor_type::argument_type src_t; \
typedef typename traits::functor_type::result_type dst_t; \
cv::gpu::device::transform((PtrStepSz<src_t>)src, (PtrStepSz<dst_t>)dst, functor, WithOutMask(), stream); \
}
#define OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(name) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name, name ## _traits)
<<<<<<< HEAD
#define OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(name) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _8u, name ## _traits<uchar>) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _16u, name ## _traits<ushort>) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _32f, name ## _traits<float>)
=======
#ifdef OPENCV_TINY_GPU_MODULE
#define OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(name) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _8u, name ## _traits<uchar>) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _32f, name ## _traits<float>)
#else
#define OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(name) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _8u, name ## _traits<uchar>) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _16u, name ## _traits<ushort>) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _32f, name ## _traits<float>)
#endif
>>>>>>> 4a5a6cfc1ba26f73cbd6c6fcaf561ca6dbced81d
#define OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(name) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _8u, name ## _traits<uchar>) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _32f, name ## _traits<float>)
#define OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(name) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _8u, name ## _traits<uchar>) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _32f, name ## _traits<float>) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _full_8u, name ## _full_traits<uchar>) \
OPENCV_GPU_IMPLEMENT_CVTCOLOR(name ## _full_32f, name ## _full_traits<float>)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr_to_bgr555)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr_to_bgr565)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(rgb_to_bgr555)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(rgb_to_bgr565)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgra_to_bgr555)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgra_to_bgr565)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(rgba_to_bgr555)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(rgba_to_bgr565)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(gray_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(gray_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(gray_to_bgr555)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(gray_to_bgr565)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr555_to_gray)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE(bgr565_to_gray)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_gray)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_gray)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_gray)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_gray)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_yuv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_yuv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_yuv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_yuv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_yuv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_yuv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_yuv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_yuv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(yuv4_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_YCrCb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_YCrCb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_YCrCb4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_YCrCb4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_YCrCb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_YCrCb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_YCrCb4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_YCrCb4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(YCrCb4_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_xyz)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_xyz)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgb_to_xyz4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(rgba_to_xyz4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_xyz)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_xyz)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgr_to_xyz4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(bgra_to_xyz4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL(xyz4_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgb_to_hsv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgba_to_hsv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgb_to_hsv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgba_to_hsv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgr_to_hsv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgra_to_hsv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgr_to_hsv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgra_to_hsv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv4_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv4_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv4_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hsv4_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgb_to_hls)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgba_to_hls)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgb_to_hls4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(rgba_to_hls4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgr_to_hls)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgra_to_hls)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgr_to_hls4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(bgra_to_hls4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls4_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls4_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls4_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL(hls4_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_lab)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_lab)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_lab4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_lab4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_lab)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_lab)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_lab4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_lab4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lrgb_to_lab)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lrgba_to_lab)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lrgb_to_lab4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lrgba_to_lab4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lbgr_to_lab)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lbgra_to_lab)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lbgr_to_lab4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lbgra_to_lab4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab_to_lrgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_lrgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab_to_lrgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_lrgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab_to_lbgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_lbgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab_to_lbgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lab4_to_lbgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_luv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_luv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgb_to_luv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(rgba_to_luv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_luv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_luv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgr_to_luv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(bgra_to_luv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lrgb_to_luv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lrgba_to_luv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lrgb_to_luv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lrgba_to_luv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lbgr_to_luv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lbgra_to_luv)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lbgr_to_luv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(lbgra_to_luv4)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_rgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_rgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_bgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_bgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv_to_lrgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_lrgb)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv_to_lrgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_lrgba)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv_to_lbgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_lbgr)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv_to_lbgra)
OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F(luv4_to_lbgra)
#undef OPENCV_GPU_IMPLEMENT_CVTCOLOR
#undef OPENCV_GPU_IMPLEMENT_CVTCOLOR_ONE
#undef OPENCV_GPU_IMPLEMENT_CVTCOLOR_ALL
#undef OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F
#undef OPENCV_GPU_IMPLEMENT_CVTCOLOR_8U32F_FULL
}}} // namespace cv { namespace gpu { namespace device
#endif /* CUDA_DISABLER */
|
3694adb5407955f3f272d45385da366d56f575d7.hip | // !!! This is a file automatically generated by hipify!!!
#include "KNNBinDataV1.cuh"
#include "CUDAGlobals.cuh"
//#include "DebugUtils.h"
using namespace OpenSteer;
extern "C"
{
// Bind the textures to the input hipArray.
__host__ void KNNBinningV1BindTexture( hipArray * pCudaArray );
// Unbind the textures.
__host__ void KNNBinningV1UnbindTexture( void );
}
KNNBinDataV1::KNNBinDataV1( uint3 const& worldCells, float3 const& worldSize, uint const searchRadius )
: m_worldCells( worldCells ),
m_worldSize( worldSize ),
m_nSearchRadius( searchRadius )
{
m_nCells = m_worldCells.x * m_worldCells.y * m_worldCells.z;
// Create the cells.
CreateCells();
}
void KNNBinDataV1::CreateCells( void )
{
float3 const step = make_float3( m_worldSize.x / m_worldCells.x, // width
m_worldSize.y / m_worldCells.y, // height
m_worldSize.z / m_worldCells.z ); // depth
float3 const stepNormalized = make_float3( step.x / m_worldSize.x,
step.y / m_worldSize.y,
step.z / m_worldSize.z
);
/*
Texture addressing in CUDA operates as follows.
z|
| y/
| /
| /
| /
| /
|/_________x
*/
size_t const numCells = m_worldCells.x * m_worldCells.y * m_worldCells.z;
// Allocate host memory to temporarily store the 3D texture data.
uint * phCellIndices = (uint*)malloc( numCells * sizeof(uint) );
uint index = 0;
for( size_t iHeight = 0; iHeight < m_worldCells.y; iHeight++ ) // height - texture z axis, world y axis
{
for( size_t iDepth = 0; iDepth < m_worldCells.z; iDepth++ ) // depth - texture y axis, world z axis
{
for( size_t iWidth = 0; iWidth < m_worldCells.x; iWidth++ ) // width - texture x axis, world x axis
{
// Make a bin_cell structure.
bin_cell bc;
//bc.iBinIndex = iBinIndex;
bc.index = iWidth + (iDepth * m_worldCells.x) + (iHeight * m_worldCells.z * m_worldCells.x);
// Set the offset value for the cell lookup texture.
phCellIndices[index] = bc.index;
index++;
}
}
}
// Prepare the bin_cell index lookup texture.
hipExtent const extent = make_hipExtent( m_worldCells.x, m_worldCells.y, m_worldCells.z );
hipChannelFormatDesc const desc = hipCreateChannelDesc< uint >();
hipPitchedPtr srcPtr = make_hipPitchedPtr( (void*)phCellIndices, extent.width * sizeof(uint), extent.width, extent.height );
// Allocate m_pdCellIndexArray.
CUDA_SAFE_CALL( hipMalloc3DArray( &m_pdCellIndexArray, &desc, extent, hipArrayDefault ) );
// Copy data to 3D array.
hipMemcpy3DParms copyParms = {0};
copyParms.srcPtr = srcPtr;
copyParms.dstArray = m_pdCellIndexArray;
copyParms.extent = extent;
copyParms.kind = hipMemcpyHostToDevice;
CUDA_SAFE_CALL( hipMemcpy3D( ©Parms ) );
// Copy the m_worldSize and m_worldCells values to constant memory.
CUDA_SAFE_CALL( hipMemcpyToSymbol( "constWorldSizeV1", &m_worldSize, sizeof(float3) ) );
CUDA_SAFE_CALL( hipMemcpyToSymbol( "constWorldStepV1", &step, sizeof(float3) ) );
CUDA_SAFE_CALL( hipMemcpyToSymbol( "constWorldStepNormalizedV1", &stepNormalized, sizeof(float3) ) );
CUDA_SAFE_CALL( hipMemcpyToSymbol( "constWorldCellsV1", &m_worldCells, sizeof(uint3) ) );
// Free host memory.
free( phCellIndices );
}
| 3694adb5407955f3f272d45385da366d56f575d7.cu | #include "KNNBinDataV1.cuh"
#include "CUDAGlobals.cuh"
//#include "DebugUtils.h"
using namespace OpenSteer;
extern "C"
{
// Bind the textures to the input cudaArray.
__host__ void KNNBinningV1BindTexture( cudaArray * pCudaArray );
// Unbind the textures.
__host__ void KNNBinningV1UnbindTexture( void );
}
KNNBinDataV1::KNNBinDataV1( uint3 const& worldCells, float3 const& worldSize, uint const searchRadius )
: m_worldCells( worldCells ),
m_worldSize( worldSize ),
m_nSearchRadius( searchRadius )
{
m_nCells = m_worldCells.x * m_worldCells.y * m_worldCells.z;
// Create the cells.
CreateCells();
}
void KNNBinDataV1::CreateCells( void )
{
float3 const step = make_float3( m_worldSize.x / m_worldCells.x, // width
m_worldSize.y / m_worldCells.y, // height
m_worldSize.z / m_worldCells.z ); // depth
float3 const stepNormalized = make_float3( step.x / m_worldSize.x,
step.y / m_worldSize.y,
step.z / m_worldSize.z
);
/*
Texture addressing in CUDA operates as follows.
z|
| y/
| /
| /
| /
| /
|/_________x
*/
size_t const numCells = m_worldCells.x * m_worldCells.y * m_worldCells.z;
// Allocate host memory to temporarily store the 3D texture data.
uint * phCellIndices = (uint*)malloc( numCells * sizeof(uint) );
uint index = 0;
for( size_t iHeight = 0; iHeight < m_worldCells.y; iHeight++ ) // height - texture z axis, world y axis
{
for( size_t iDepth = 0; iDepth < m_worldCells.z; iDepth++ ) // depth - texture y axis, world z axis
{
for( size_t iWidth = 0; iWidth < m_worldCells.x; iWidth++ ) // width - texture x axis, world x axis
{
// Make a bin_cell structure.
bin_cell bc;
//bc.iBinIndex = iBinIndex;
bc.index = iWidth + (iDepth * m_worldCells.x) + (iHeight * m_worldCells.z * m_worldCells.x);
// Set the offset value for the cell lookup texture.
phCellIndices[index] = bc.index;
index++;
}
}
}
// Prepare the bin_cell index lookup texture.
cudaExtent const extent = make_cudaExtent( m_worldCells.x, m_worldCells.y, m_worldCells.z );
cudaChannelFormatDesc const desc = cudaCreateChannelDesc< uint >();
cudaPitchedPtr srcPtr = make_cudaPitchedPtr( (void*)phCellIndices, extent.width * sizeof(uint), extent.width, extent.height );
// Allocate m_pdCellIndexArray.
CUDA_SAFE_CALL( cudaMalloc3DArray( &m_pdCellIndexArray, &desc, extent, cudaArrayDefault ) );
// Copy data to 3D array.
cudaMemcpy3DParms copyParms = {0};
copyParms.srcPtr = srcPtr;
copyParms.dstArray = m_pdCellIndexArray;
copyParms.extent = extent;
copyParms.kind = cudaMemcpyHostToDevice;
CUDA_SAFE_CALL( cudaMemcpy3D( ©Parms ) );
// Copy the m_worldSize and m_worldCells values to constant memory.
CUDA_SAFE_CALL( cudaMemcpyToSymbol( "constWorldSizeV1", &m_worldSize, sizeof(float3) ) );
CUDA_SAFE_CALL( cudaMemcpyToSymbol( "constWorldStepV1", &step, sizeof(float3) ) );
CUDA_SAFE_CALL( cudaMemcpyToSymbol( "constWorldStepNormalizedV1", &stepNormalized, sizeof(float3) ) );
CUDA_SAFE_CALL( cudaMemcpyToSymbol( "constWorldCellsV1", &m_worldCells, sizeof(uint3) ) );
// Free host memory.
free( phCellIndices );
}
|
8aa6050ea40eb9afc70645278f2dc147b5c5279e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/activation/repmat_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
static __global__ void repmat_kernel(int count, int channels,int spatial_dim, const float *in, float *out)
{
CUDA_KERNEL_LOOP(i, count)
{
int n = i / spatial_dim / channels;
int c = i / spatial_dim % channels;
out[i] = in[n*channels+c];
}
}
void RepmatLayer::Forward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top)
{
int num = bottom[0]->num();
int channels = bottom[0]->channels();
int spatial_dim;
if (bottom.size() == 2)
spatial_dim = bottom[1]->height() * bottom[1]->width();
else
spatial_dim = this->layer_param_.shape_param().height() * this->layer_param_.shape_param().width();
hipLaunchKernelGGL(( repmat_kernel), dim3(CAFFE_GET_BLOCKS(top[0]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
top[0]->count(),channels,spatial_dim,bottom[0]->gpu_data(),top[0]->mutable_gpu_data());
}
void RepmatLayer::Backward_gpu(const vector<Blob*>& top, const vector<Blob*>& bottom)
{
int num = bottom[0]->num();
int channels = bottom[0]->channels();
int spatial_dim;
if (bottom.size() == 2)
spatial_dim = bottom[1]->height() * bottom[1]->width();
else
spatial_dim = this->layer_param_.shape_param().height() * this->layer_param_.shape_param().width();
caffe_gpu_set(one_multiplier_->count(),float(1),one_multiplier_->mutable_gpu_data());
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num*channels, 1, spatial_dim,
(float)1., top[0]->gpu_diff() , one_multiplier_->gpu_data(),
(float)0., bottom[0]->mutable_gpu_diff());
}
void RepmatLayer::SecForward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top)
{
}
} // namespace caffe
| 8aa6050ea40eb9afc70645278f2dc147b5c5279e.cu |
#include <vector>
#include "caffe/layers/activation/repmat_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
static __global__ void repmat_kernel(int count, int channels,int spatial_dim, const float *in, float *out)
{
CUDA_KERNEL_LOOP(i, count)
{
int n = i / spatial_dim / channels;
int c = i / spatial_dim % channels;
out[i] = in[n*channels+c];
}
}
void RepmatLayer::Forward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top)
{
int num = bottom[0]->num();
int channels = bottom[0]->channels();
int spatial_dim;
if (bottom.size() == 2)
spatial_dim = bottom[1]->height() * bottom[1]->width();
else
spatial_dim = this->layer_param_.shape_param().height() * this->layer_param_.shape_param().width();
repmat_kernel<<<CAFFE_GET_BLOCKS(top[0]->count()), CAFFE_CUDA_NUM_THREADS>>>
(top[0]->count(),channels,spatial_dim,bottom[0]->gpu_data(),top[0]->mutable_gpu_data());
}
void RepmatLayer::Backward_gpu(const vector<Blob*>& top, const vector<Blob*>& bottom)
{
int num = bottom[0]->num();
int channels = bottom[0]->channels();
int spatial_dim;
if (bottom.size() == 2)
spatial_dim = bottom[1]->height() * bottom[1]->width();
else
spatial_dim = this->layer_param_.shape_param().height() * this->layer_param_.shape_param().width();
caffe_gpu_set(one_multiplier_->count(),float(1),one_multiplier_->mutable_gpu_data());
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num*channels, 1, spatial_dim,
(float)1., top[0]->gpu_diff() , one_multiplier_->gpu_data(),
(float)0., bottom[0]->mutable_gpu_diff());
}
void RepmatLayer::SecForward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top)
{
}
} // namespace caffe
|
e3a8a9f32ead4d6c0a54817ceac4c6795a68e64e.hip | // !!! This is a file automatically generated by hipify!!!
#include "CrystalBallThrustFunctor.hh"
__device__ fptype device_CrystalBall (fptype* evt, fptype* p, unsigned int* indices) {
// Left-hand tail if alpha is less than 0,
// right-hand tail if greater, pure Gaussian if 0.
//return 1;
fptype x = evt[indices[2 + indices[0]]];
fptype mean = p[indices[1]];
fptype sigma = p[indices[2]];
fptype alpha = p[indices[3]];
fptype power = p[indices[4]];
fptype rx = (sigma != 0) ? (x-mean) / sigma : 0;
fptype ret = 0;
if ((alpha > 0 && rx <= alpha) || // Right-hand tail, in Gaussian region
(alpha < 0 && rx >= alpha) || // Left-hand tail, in Gaussian region
(alpha == 0)) { // Pure Gaussian
ret = EXP(-0.5*rx*rx);
}
else { // Tail part
fptype n_over_alpha = power / alpha;
fptype a = EXP(-0.5 * alpha*alpha);
fptype b = n_over_alpha - alpha;
fptype d = b + rx;
d = (d != 0) ? n_over_alpha / d : 0;
ret = a*POW(d, power);
}
//if ((0 == threadIdx.x) && (0 == blockIdx.x)) printf("device_CB: %f %f %f %f %f %f\n", x, mean, sigma, alpha, power, ret);
return ret;
}
__device__ device_function_ptr ptr_to_CrystalBall = device_CrystalBall;
__host__ CrystalBallThrustFunctor::CrystalBallThrustFunctor (std::string n, Variable* _x, Variable* mean, Variable* sigma, Variable* alpha, Variable* power)
: ThrustPdfFunctor(_x, n)
{
std::vector<unsigned int> pindices;
pindices.push_back(registerParameter(mean));
pindices.push_back(registerParameter(sigma));
pindices.push_back(registerParameter(alpha));
if (!power) power = new Variable(n + "_n", 2);
pindices.push_back(registerParameter(power));
hipMemcpyFromSymbol((void**) &host_fcn_ptr, ptr_to_CrystalBall, sizeof(void*));
initialise(pindices);
}
__host__ fptype CrystalBallThrustFunctor::integrate (fptype lo, fptype hi) const {
static const fptype sqrtPiOver2 = 1.2533141373;
static const fptype sqrt2 = 1.4142135624;
fptype result = 0.0;
bool useLog = false;
unsigned int* indices = host_indices + parameters;
fptype mean = host_params[indices[1]];
fptype sigma = host_params[indices[2]];
fptype alpha = host_params[indices[3]];
fptype power = host_params[indices[4]];
if (fabs(power-1.0) < 1.0e-05) useLog = true;
fptype tmin = (lo - mean) / sigma;
fptype tmax = (hi - mean) / sigma;
if (alpha < 0) {
fptype tmp = tmin;
tmin = -tmax;
tmax = -tmp;
}
fptype absAlpha = fabs(alpha);
if (tmin >= -absAlpha) {
result += sigma*sqrtPiOver2*(ERF(tmax/sqrt2) -
ERF(tmin/sqrt2));
}
else if (tmax <= -absAlpha) {
fptype a = POW(power/absAlpha,power)*exp(-0.5*absAlpha*absAlpha);
fptype b = power/absAlpha - absAlpha;
if (useLog) {
result += a*sigma*(log(b-tmin) - log(b-tmax));
}
else {
result += a*sigma/(1.0-power)*(1.0/(POW(b-tmin,power-1.0)) - 1.0/(POW(b-tmax,power-1.0)));
}
}
else {
fptype a = POW(power/absAlpha,power)*exp(-0.5*absAlpha*absAlpha);
fptype b = power/absAlpha - absAlpha;
fptype term1 = 0.0;
if (useLog) {
term1 = a*sigma*( log(b-tmin) - log(power/absAlpha));
}
else {
term1 = a*sigma/(1.0-power)*(1.0/(POW(b-tmin,power-1.0)) - 1.0/(POW(power/absAlpha,power-1.0)));
}
fptype term2 = sigma*sqrtPiOver2*(ERF(tmax/sqrt2) - ERF(-absAlpha/sqrt2));
result += term1 + term2;
}
return result;
}
| e3a8a9f32ead4d6c0a54817ceac4c6795a68e64e.cu | #include "CrystalBallThrustFunctor.hh"
__device__ fptype device_CrystalBall (fptype* evt, fptype* p, unsigned int* indices) {
// Left-hand tail if alpha is less than 0,
// right-hand tail if greater, pure Gaussian if 0.
//return 1;
fptype x = evt[indices[2 + indices[0]]];
fptype mean = p[indices[1]];
fptype sigma = p[indices[2]];
fptype alpha = p[indices[3]];
fptype power = p[indices[4]];
fptype rx = (sigma != 0) ? (x-mean) / sigma : 0;
fptype ret = 0;
if ((alpha > 0 && rx <= alpha) || // Right-hand tail, in Gaussian region
(alpha < 0 && rx >= alpha) || // Left-hand tail, in Gaussian region
(alpha == 0)) { // Pure Gaussian
ret = EXP(-0.5*rx*rx);
}
else { // Tail part
fptype n_over_alpha = power / alpha;
fptype a = EXP(-0.5 * alpha*alpha);
fptype b = n_over_alpha - alpha;
fptype d = b + rx;
d = (d != 0) ? n_over_alpha / d : 0;
ret = a*POW(d, power);
}
//if ((0 == threadIdx.x) && (0 == blockIdx.x)) printf("device_CB: %f %f %f %f %f %f\n", x, mean, sigma, alpha, power, ret);
return ret;
}
__device__ device_function_ptr ptr_to_CrystalBall = device_CrystalBall;
__host__ CrystalBallThrustFunctor::CrystalBallThrustFunctor (std::string n, Variable* _x, Variable* mean, Variable* sigma, Variable* alpha, Variable* power)
: ThrustPdfFunctor(_x, n)
{
std::vector<unsigned int> pindices;
pindices.push_back(registerParameter(mean));
pindices.push_back(registerParameter(sigma));
pindices.push_back(registerParameter(alpha));
if (!power) power = new Variable(n + "_n", 2);
pindices.push_back(registerParameter(power));
cudaMemcpyFromSymbol((void**) &host_fcn_ptr, ptr_to_CrystalBall, sizeof(void*));
initialise(pindices);
}
__host__ fptype CrystalBallThrustFunctor::integrate (fptype lo, fptype hi) const {
static const fptype sqrtPiOver2 = 1.2533141373;
static const fptype sqrt2 = 1.4142135624;
fptype result = 0.0;
bool useLog = false;
unsigned int* indices = host_indices + parameters;
fptype mean = host_params[indices[1]];
fptype sigma = host_params[indices[2]];
fptype alpha = host_params[indices[3]];
fptype power = host_params[indices[4]];
if (fabs(power-1.0) < 1.0e-05) useLog = true;
fptype tmin = (lo - mean) / sigma;
fptype tmax = (hi - mean) / sigma;
if (alpha < 0) {
fptype tmp = tmin;
tmin = -tmax;
tmax = -tmp;
}
fptype absAlpha = fabs(alpha);
if (tmin >= -absAlpha) {
result += sigma*sqrtPiOver2*(ERF(tmax/sqrt2) -
ERF(tmin/sqrt2));
}
else if (tmax <= -absAlpha) {
fptype a = POW(power/absAlpha,power)*exp(-0.5*absAlpha*absAlpha);
fptype b = power/absAlpha - absAlpha;
if (useLog) {
result += a*sigma*(log(b-tmin) - log(b-tmax));
}
else {
result += a*sigma/(1.0-power)*(1.0/(POW(b-tmin,power-1.0)) - 1.0/(POW(b-tmax,power-1.0)));
}
}
else {
fptype a = POW(power/absAlpha,power)*exp(-0.5*absAlpha*absAlpha);
fptype b = power/absAlpha - absAlpha;
fptype term1 = 0.0;
if (useLog) {
term1 = a*sigma*( log(b-tmin) - log(power/absAlpha));
}
else {
term1 = a*sigma/(1.0-power)*(1.0/(POW(b-tmin,power-1.0)) - 1.0/(POW(power/absAlpha,power-1.0)));
}
fptype term2 = sigma*sqrtPiOver2*(ERF(tmax/sqrt2) - ERF(-absAlpha/sqrt2));
result += term1 + term2;
}
return result;
}
|
92a879266909fd45d968b5bf7666f73398f79e2c.hip | // !!! This is a file automatically generated by hipify!!!
#include "sample-common.h"
#include <stdio.h>
#include <sys/time.h>
double cpuSecond(){
struct timespec ts;
clock_gettime(CLOCK_REALTIME, &ts);
double sec = ts.tv_sec;
double usec = (double)ts.tv_nsec / (double)(1000 * 1000 * 1000);
// printf("sec = %f\n", sec);
// printf("usec = %f\n", usec);
return sec + usec;
}
void initialData(float* data, int size){
time_t t;
srand((unsigned) time(&t));
for(int i = 0; i < size; i++){
data[i] = (float)(rand() & 0xff) / 10.0f;
}
}
void initialDataInt(int* data, int size){
time_t t;
srand((unsigned) time(&t));
for(int i = 0; i < size; i++){
data[i] = (int)(rand() & 0xff);
}
}
void clearData(float* data, int size){
for(int i = 0; i < size; i++){
data[i] = 0;
}
}
void initAtNum(int* A, size_t size, int val)
{
for(int i = 0;i < size; i++){
A[i] = val;
}
}
void initDataSeq(int* A, size_t size)
{
for(int i = 0;i < size; i++){
A[i] = i;
}
}
void printMatrixInt(int* A, const int x, const int y){
int *ic = A;
printf("Matrix: (%d, %d)\n", x, y);
for(int iy = 0; iy < y; iy++){
for(int ix = 0; ix < x; ix++){
printf("%3d, ", ic[ix]);
}
ic += x;
printf("\n");
}
printf("\n");
}
void cudaDeviceInit(int dev){
hipDeviceProp_t prop;
CHECK(hipGetDeviceProperties(&prop, dev));
printf("Using device %d, %s\n", dev, prop.name);
CHECK(hipSetDevice(dev));
}
int checkResult(float* A, float* B, size_t size){
for(int i = 0;i < size; i++){
if(A[i] != B[i]){
return i;
}
}
return 0;
}
| 92a879266909fd45d968b5bf7666f73398f79e2c.cu | #include "sample-common.h"
#include <stdio.h>
#include <sys/time.h>
double cpuSecond(){
struct timespec ts;
clock_gettime(CLOCK_REALTIME, &ts);
double sec = ts.tv_sec;
double usec = (double)ts.tv_nsec / (double)(1000 * 1000 * 1000);
// printf("sec = %f\n", sec);
// printf("usec = %f\n", usec);
return sec + usec;
}
void initialData(float* data, int size){
time_t t;
srand((unsigned) time(&t));
for(int i = 0; i < size; i++){
data[i] = (float)(rand() & 0xff) / 10.0f;
}
}
void initialDataInt(int* data, int size){
time_t t;
srand((unsigned) time(&t));
for(int i = 0; i < size; i++){
data[i] = (int)(rand() & 0xff);
}
}
void clearData(float* data, int size){
for(int i = 0; i < size; i++){
data[i] = 0;
}
}
void initAtNum(int* A, size_t size, int val)
{
for(int i = 0;i < size; i++){
A[i] = val;
}
}
void initDataSeq(int* A, size_t size)
{
for(int i = 0;i < size; i++){
A[i] = i;
}
}
void printMatrixInt(int* A, const int x, const int y){
int *ic = A;
printf("Matrix: (%d, %d)\n", x, y);
for(int iy = 0; iy < y; iy++){
for(int ix = 0; ix < x; ix++){
printf("%3d, ", ic[ix]);
}
ic += x;
printf("\n");
}
printf("\n");
}
void cudaDeviceInit(int dev){
cudaDeviceProp prop;
CHECK(cudaGetDeviceProperties(&prop, dev));
printf("Using device %d, %s\n", dev, prop.name);
CHECK(cudaSetDevice(dev));
}
int checkResult(float* A, float* B, size_t size){
for(int i = 0;i < size; i++){
if(A[i] != B[i]){
return i;
}
}
return 0;
}
|
502aec0f94a126d4d0e341ad39f834649fc88ae0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void MaskByNaN( float* inputImage, float* mask, float* outputImage, int count ) {
int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x;
if (id < count)
{
if (mask[id] == 0.0f)
{
outputImage[id] = NAN;
}
else {
outputImage[id] = inputImage[id];
}
}
} | 502aec0f94a126d4d0e341ad39f834649fc88ae0.cu | #include "includes.h"
__global__ void MaskByNaN( float* inputImage, float* mask, float* outputImage, int count ) {
int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x;
if (id < count)
{
if (mask[id] == 0.0f)
{
outputImage[id] = NAN;
}
else {
outputImage[id] = inputImage[id];
}
}
} |
b85d2ab4c9b14dd9d180f990869abbc9f839108e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include"pathalg.h"
static const int WORK_SIZE =258;
void parallelor::copydata(int s,vector<edge>&edges,int nodenum){
};
void parallelor::dellocate(){
};
void parallelor::allocate(int maxn,int maxedge){
}
void parallelor::topsort()
{
cout<<" in top sort "<<endl;
queue<int>zero;
vector<int>order(nodenum*LY,-1);
for(int i=0;i<nodenum*LY;i++)
zero.push(i);
int biao=0;
while(!zero.empty())
{
int node=zero.front();
zero.pop();
order[node]=biao++;
for(int i=0;i<neibn[node].size();i++)
{
if((--ancestor[neibn[node][i]])==0)
zero.push(neibn[node][i]);
}
}
vector<pair<int,int>>tmp;
for(int i=0;i<order.size();i++)
tmp.push_back(make_pair(i,order[i]));
sort(tmp.begin(),tmp.end(),pairless());
for(int i=0;i<order.size();i++)
ordernode.push_back(tmp[i].first);
};
void parallelor::init(pair<vector<edge>,vector<vector<int>>>ext,vector<pair<int,int>>stpair,vector<vector<int>>&relate,ginfo ginf)
{
cout<<"in cuda init"<<endl;
nodenum=ginf.pnodesize;
edges=ext.first;
vector<vector<int>>esigns;
esigns=ext.second;
mark=new int;
*mark=0;
W=WD+1;
int *d,*dev_d,*pred,*dev_pred;
st=new int[2*edges.size()*LY];
te=new int[2*edges.size()*LY];
d=new int[nodenum*LY*YE];
esignes=new int[edges.size()*LY];
vector<vector<int>>nein(nodenum*LY,vector<int>());
neibn=nein;
vector<vector<int>>neie(nodenum,vector<int>());
for(int i=0;i<edges.size();i++)
{
int s=edges[i].s;
int t=edges[i].t;
neibn[s].push_back(t);
neibn[t].push_back(s);
neie[s].push_back(i);
neie[t].push_back(i);
}
int count=0;
for(int k=0;k<LY;k++)
for(int i=0;i<nodenum;i++)
for(int j=0;j<neibn[i].size();j++)
{
st[count]=i;
if(esigns[k][neie[i][j]]==-1)
te[count]=i;
te[count]=neibn[i][j];
count++;
}
count=0;
for(int i=0;i<nodenum*LY*YE;i++)
d[i]=INT_MAX/2;
for(int k=0;k<LY;k++)
{
int boff=k*YE*nodenum;
for(int i=0;i<YE;i++)
{
int soff=i*nodenum;
for(int j=0;j<stpair.size();j++)
d[boff+soff+stpair[i].first]=0;
}
}
hipMalloc((void**)&dev_st,edges.size()*sizeof(int));
hipMalloc((void**)&dev_te,edges.size()*sizeof(int));
hipMalloc((void**)&dev_d,YE*LY*nodenum*sizeof(int));
hipMemcpy(dev_te,te,LY*edges.size()*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(dev_st,st,LY*edges.size()*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(dev_d,d,YE*LY*nodenum*sizeof(int),hipMemcpyHostToDevice);
cout<<"get out"<<endl;
};
parallelor::parallelor()
{
};
__global__ void BFSfast(int *st,int *te,int *d,int*esign,int round,int E,int N)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
int ye=i/(E*LY);
int eid=(i%(E*LY));
int ly=eid/E;
int off=ye*N+ly*N*YE;
int s=st[eid],t=te[eid];
if(d[s+off]==round-1&&d[t+off]>round)
d[t+off]=round;
}
vector<int> parallelor:: routalg(int s,int t,int bw)
{
cout<<"blasting "<<endl;
int kk=1;
int size=edges.size()*LY*YE;
time_t start,end;
start=clock();
for(int i=0;i<=WD;i++)
hipLaunchKernelGGL(( BFSfast), dim3(size/512+1),dim3(512), 0, 0, st,te,d,esignes,i,edges.size(),nodenum);
end=clock();
cout<<"GPU time is : "<<end-start<<endl;
cout<<"over!"<<endl;
return vector<int>();
};
int fls(int x)
{
int position;
int i;
if(x!=0)
for(i=(x>>1),position=0;i!=0;++position)
i>>=1;
else
position=-1;
return pow(2,position+1);
} | b85d2ab4c9b14dd9d180f990869abbc9f839108e.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include"pathalg.h"
static const int WORK_SIZE =258;
void parallelor::copydata(int s,vector<edge>&edges,int nodenum){
};
void parallelor::dellocate(){
};
void parallelor::allocate(int maxn,int maxedge){
}
void parallelor::topsort()
{
cout<<" in top sort "<<endl;
queue<int>zero;
vector<int>order(nodenum*LY,-1);
for(int i=0;i<nodenum*LY;i++)
zero.push(i);
int biao=0;
while(!zero.empty())
{
int node=zero.front();
zero.pop();
order[node]=biao++;
for(int i=0;i<neibn[node].size();i++)
{
if((--ancestor[neibn[node][i]])==0)
zero.push(neibn[node][i]);
}
}
vector<pair<int,int>>tmp;
for(int i=0;i<order.size();i++)
tmp.push_back(make_pair(i,order[i]));
sort(tmp.begin(),tmp.end(),pairless());
for(int i=0;i<order.size();i++)
ordernode.push_back(tmp[i].first);
};
void parallelor::init(pair<vector<edge>,vector<vector<int>>>ext,vector<pair<int,int>>stpair,vector<vector<int>>&relate,ginfo ginf)
{
cout<<"in cuda init"<<endl;
nodenum=ginf.pnodesize;
edges=ext.first;
vector<vector<int>>esigns;
esigns=ext.second;
mark=new int;
*mark=0;
W=WD+1;
int *d,*dev_d,*pred,*dev_pred;
st=new int[2*edges.size()*LY];
te=new int[2*edges.size()*LY];
d=new int[nodenum*LY*YE];
esignes=new int[edges.size()*LY];
vector<vector<int>>nein(nodenum*LY,vector<int>());
neibn=nein;
vector<vector<int>>neie(nodenum,vector<int>());
for(int i=0;i<edges.size();i++)
{
int s=edges[i].s;
int t=edges[i].t;
neibn[s].push_back(t);
neibn[t].push_back(s);
neie[s].push_back(i);
neie[t].push_back(i);
}
int count=0;
for(int k=0;k<LY;k++)
for(int i=0;i<nodenum;i++)
for(int j=0;j<neibn[i].size();j++)
{
st[count]=i;
if(esigns[k][neie[i][j]]==-1)
te[count]=i;
te[count]=neibn[i][j];
count++;
}
count=0;
for(int i=0;i<nodenum*LY*YE;i++)
d[i]=INT_MAX/2;
for(int k=0;k<LY;k++)
{
int boff=k*YE*nodenum;
for(int i=0;i<YE;i++)
{
int soff=i*nodenum;
for(int j=0;j<stpair.size();j++)
d[boff+soff+stpair[i].first]=0;
}
}
cudaMalloc((void**)&dev_st,edges.size()*sizeof(int));
cudaMalloc((void**)&dev_te,edges.size()*sizeof(int));
cudaMalloc((void**)&dev_d,YE*LY*nodenum*sizeof(int));
cudaMemcpy(dev_te,te,LY*edges.size()*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(dev_st,st,LY*edges.size()*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(dev_d,d,YE*LY*nodenum*sizeof(int),cudaMemcpyHostToDevice);
cout<<"get out"<<endl;
};
parallelor::parallelor()
{
};
__global__ void BFSfast(int *st,int *te,int *d,int*esign,int round,int E,int N)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
int ye=i/(E*LY);
int eid=(i%(E*LY));
int ly=eid/E;
int off=ye*N+ly*N*YE;
int s=st[eid],t=te[eid];
if(d[s+off]==round-1&&d[t+off]>round)
d[t+off]=round;
}
vector<int> parallelor:: routalg(int s,int t,int bw)
{
cout<<"blasting "<<endl;
int kk=1;
int size=edges.size()*LY*YE;
time_t start,end;
start=clock();
for(int i=0;i<=WD;i++)
BFSfast<<<size/512+1,512>>>(st,te,d,esignes,i,edges.size(),nodenum);
end=clock();
cout<<"GPU time is : "<<end-start<<endl;
cout<<"over!"<<endl;
return vector<int>();
};
int fls(int x)
{
int position;
int i;
if(x!=0)
for(i=(x>>1),position=0;i!=0;++position)
i>>=1;
else
position=-1;
return pow(2,position+1);
} |
9ec529171bd5e2ec52de42c881f56d4ea6e764ea.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "reg_GetConjugateGradient2_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float4 *nodeNMIGradientArray_d = NULL;
hipMalloc(&nodeNMIGradientArray_d, XSIZE*YSIZE);
float4 *conjugateG_d = NULL;
hipMalloc(&conjugateG_d, XSIZE*YSIZE);
float4 *conjugateH_d = NULL;
hipMalloc(&conjugateH_d, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
reg_GetConjugateGradient2_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, nodeNMIGradientArray_d,conjugateG_d,conjugateH_d);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
reg_GetConjugateGradient2_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, nodeNMIGradientArray_d,conjugateG_d,conjugateH_d);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
reg_GetConjugateGradient2_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, nodeNMIGradientArray_d,conjugateG_d,conjugateH_d);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 9ec529171bd5e2ec52de42c881f56d4ea6e764ea.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "reg_GetConjugateGradient2_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float4 *nodeNMIGradientArray_d = NULL;
cudaMalloc(&nodeNMIGradientArray_d, XSIZE*YSIZE);
float4 *conjugateG_d = NULL;
cudaMalloc(&conjugateG_d, XSIZE*YSIZE);
float4 *conjugateH_d = NULL;
cudaMalloc(&conjugateH_d, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
reg_GetConjugateGradient2_kernel<<<gridBlock,threadBlock>>>(nodeNMIGradientArray_d,conjugateG_d,conjugateH_d);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
reg_GetConjugateGradient2_kernel<<<gridBlock,threadBlock>>>(nodeNMIGradientArray_d,conjugateG_d,conjugateH_d);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
reg_GetConjugateGradient2_kernel<<<gridBlock,threadBlock>>>(nodeNMIGradientArray_d,conjugateG_d,conjugateH_d);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
6e7a130fa38a622af7b6ba61e65cf7706435e27c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@generated from zlarf.cu normal z -> d, Sat Nov 15 19:53:59 2014
@author Azzam Haidar
*/
#include "common_magma.h"
#include "magma_templates.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define BLOCK_SIZEx 32
#define BLOCK_SIZEy 16
//==============================================================================
//==============================================================================
__global__
void magma_dlarf_kernel( int m, const double *dv, const double *dtau,
double *dc, int lddc )
{
if ( !MAGMA_D_EQUAL(*dtau, MAGMA_D_ZERO) ) {
const int tx = threadIdx.x;
dc = dc + blockIdx.x * lddc;
__shared__ double sum[ BLOCK_SIZE ];
double tmp;
/* perform w := v**H * C */
if (tx==0)
tmp = dc[0]; //since V[0] should be one
else
tmp = MAGMA_D_ZERO;
for( int j = tx+1; j < m; j += BLOCK_SIZE ){
tmp += MAGMA_D_MUL( MAGMA_D_CNJG( dv[j] ), dc[j] );
}
sum[tx] = tmp;
magma_sum_reduce< BLOCK_SIZE >( tx, sum );
/* C := C - v * w */
__syncthreads();
tmp = - MAGMA_D_CNJG(*dtau) * sum[0];
for( int j = m-tx-1; j>0 ; j -= BLOCK_SIZE )
dc[j] += tmp * dv[j];
if(tx==0) dc[0] += tmp;
}
}
//==============================================================================
//==============================================================================
__global__
void magma_dlarf_smkernel( int m, int n, double *dv, double *dtau,
double *dc, int lddc )
{
if ( ! MAGMA_D_EQUAL(*dtau, MAGMA_D_ZERO) ) {
const int i = threadIdx.x, col= threadIdx.y;
for( int k = col; k < n; k += BLOCK_SIZEy ) {
dc = dc + k * lddc;
__shared__ double sum[ BLOCK_SIZEx ][ BLOCK_SIZEy + 1];
double lsum;
/* w := v**H * C */
lsum = MAGMA_D_ZERO;
for( int j = i; j < m; j += BLOCK_SIZEx ){
if (j==0)
lsum += MAGMA_D_MUL( MAGMA_D_ONE, dc[j] );
else
lsum += MAGMA_D_MUL( MAGMA_D_CNJG( dv[j] ), dc[j] );
}
sum[i][col] = lsum;
magma_sum_reduce_2d< BLOCK_SIZEx, BLOCK_SIZEy+1 >( i, col, sum );
/* C := C - v * w */
__syncthreads();
double z__1 = - MAGMA_D_CNJG(*dtau) * sum[0][col];
for( int j = m-i-1; j>=0 ; j -= BLOCK_SIZEx ) {
if (j==0)
dc[j] += z__1;
else
dc[j] += z__1 * dv[j];
}
}
}
}
//==============================================================================
/*
Apply a real elementary reflector H to a real M-by-N
matrix C from the left. H is represented in the form
H = I - tau * v * v**H
where tau is a real scalar and v is a real vector.
If tau = 0, then H is taken to be the unit matrix.
To apply H**H (the conjugate transpose of H), supply conjg(tau)
instead tau.
This routine uses only one SM (block).
*/
extern "C" void
magma_dlarf_sm(magma_int_t m, magma_int_t n, double *dv, double *dtau,
double *dc, magma_int_t lddc)
{
dim3 blocks( 1 );
dim3 threads( BLOCK_SIZEx, BLOCK_SIZEy );
hipLaunchKernelGGL(( magma_dlarf_smkernel), dim3(blocks), dim3(threads), 0, magma_stream , m, n, dv, dtau, dc, lddc );
}
//==============================================================================
/*
Apply a real elementary reflector H to a real M-by-N
matrix C from the left. H is represented in the form
H = I - tau * v * v**H
where tau is a real scalar and v is a real vector.
If tau = 0, then H is taken to be the unit matrix.
To apply H**H (the conjugate transpose of H), supply conjg(tau)
instead tau.
*/
extern "C" magma_int_t
magma_dlarf_gpu(
magma_int_t m, magma_int_t n,
magmaDouble_const_ptr dv,
magmaDouble_const_ptr dtau,
magmaDouble_ptr dC, magma_int_t lddc)
{
dim3 grid( n, 1, 1 );
dim3 threads( BLOCK_SIZE );
if ( n > 0 ) {
hipLaunchKernelGGL(( magma_dlarf_kernel), dim3(grid), dim3(threads), 0, magma_stream , m, dv, dtau, dC, lddc);
}
// The computation can be done on 1 SM with the following routine.
// magma_dlarf_sm(m, n, dv, dtau, dc, lddc);
return MAGMA_SUCCESS;
}
//==============================================================================
| 6e7a130fa38a622af7b6ba61e65cf7706435e27c.cu | /*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@generated from zlarf.cu normal z -> d, Sat Nov 15 19:53:59 2014
@author Azzam Haidar
*/
#include "common_magma.h"
#include "magma_templates.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define BLOCK_SIZEx 32
#define BLOCK_SIZEy 16
//==============================================================================
//==============================================================================
__global__
void magma_dlarf_kernel( int m, const double *dv, const double *dtau,
double *dc, int lddc )
{
if ( !MAGMA_D_EQUAL(*dtau, MAGMA_D_ZERO) ) {
const int tx = threadIdx.x;
dc = dc + blockIdx.x * lddc;
__shared__ double sum[ BLOCK_SIZE ];
double tmp;
/* perform w := v**H * C */
if (tx==0)
tmp = dc[0]; //since V[0] should be one
else
tmp = MAGMA_D_ZERO;
for( int j = tx+1; j < m; j += BLOCK_SIZE ){
tmp += MAGMA_D_MUL( MAGMA_D_CNJG( dv[j] ), dc[j] );
}
sum[tx] = tmp;
magma_sum_reduce< BLOCK_SIZE >( tx, sum );
/* C := C - v * w */
__syncthreads();
tmp = - MAGMA_D_CNJG(*dtau) * sum[0];
for( int j = m-tx-1; j>0 ; j -= BLOCK_SIZE )
dc[j] += tmp * dv[j];
if(tx==0) dc[0] += tmp;
}
}
//==============================================================================
//==============================================================================
__global__
void magma_dlarf_smkernel( int m, int n, double *dv, double *dtau,
double *dc, int lddc )
{
if ( ! MAGMA_D_EQUAL(*dtau, MAGMA_D_ZERO) ) {
const int i = threadIdx.x, col= threadIdx.y;
for( int k = col; k < n; k += BLOCK_SIZEy ) {
dc = dc + k * lddc;
__shared__ double sum[ BLOCK_SIZEx ][ BLOCK_SIZEy + 1];
double lsum;
/* w := v**H * C */
lsum = MAGMA_D_ZERO;
for( int j = i; j < m; j += BLOCK_SIZEx ){
if (j==0)
lsum += MAGMA_D_MUL( MAGMA_D_ONE, dc[j] );
else
lsum += MAGMA_D_MUL( MAGMA_D_CNJG( dv[j] ), dc[j] );
}
sum[i][col] = lsum;
magma_sum_reduce_2d< BLOCK_SIZEx, BLOCK_SIZEy+1 >( i, col, sum );
/* C := C - v * w */
__syncthreads();
double z__1 = - MAGMA_D_CNJG(*dtau) * sum[0][col];
for( int j = m-i-1; j>=0 ; j -= BLOCK_SIZEx ) {
if (j==0)
dc[j] += z__1;
else
dc[j] += z__1 * dv[j];
}
}
}
}
//==============================================================================
/*
Apply a real elementary reflector H to a real M-by-N
matrix C from the left. H is represented in the form
H = I - tau * v * v**H
where tau is a real scalar and v is a real vector.
If tau = 0, then H is taken to be the unit matrix.
To apply H**H (the conjugate transpose of H), supply conjg(tau)
instead tau.
This routine uses only one SM (block).
*/
extern "C" void
magma_dlarf_sm(magma_int_t m, magma_int_t n, double *dv, double *dtau,
double *dc, magma_int_t lddc)
{
dim3 blocks( 1 );
dim3 threads( BLOCK_SIZEx, BLOCK_SIZEy );
magma_dlarf_smkernel<<< blocks, threads, 0, magma_stream >>>( m, n, dv, dtau, dc, lddc );
}
//==============================================================================
/*
Apply a real elementary reflector H to a real M-by-N
matrix C from the left. H is represented in the form
H = I - tau * v * v**H
where tau is a real scalar and v is a real vector.
If tau = 0, then H is taken to be the unit matrix.
To apply H**H (the conjugate transpose of H), supply conjg(tau)
instead tau.
*/
extern "C" magma_int_t
magma_dlarf_gpu(
magma_int_t m, magma_int_t n,
magmaDouble_const_ptr dv,
magmaDouble_const_ptr dtau,
magmaDouble_ptr dC, magma_int_t lddc)
{
dim3 grid( n, 1, 1 );
dim3 threads( BLOCK_SIZE );
if ( n > 0 ) {
magma_dlarf_kernel<<< grid, threads, 0, magma_stream >>>( m, dv, dtau, dC, lddc);
}
// The computation can be done on 1 SM with the following routine.
// magma_dlarf_sm(m, n, dv, dtau, dc, lddc);
return MAGMA_SUCCESS;
}
//==============================================================================
|
c4fab504c2fd408e5048a1136d5366c695b41621.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* test_search.cpp
*
* Created on: Oct 19, 2011
* Author: mac
*/
#include <iostream>
#include "universal_header.h"
using namespace std;
#include "hash_test.cu"
int test_cuda(void) {
int ref_num = loadRef("toy_ref_0");
long long coord_num = loadHash("toy_hash_0");
hash_test(coord_num);
ref_test(ref_num);
int fragment_set = 4;
GPU_fragment test_fragment[fragment_set];
strcpy(test_fragment[0].fragment,
"GGGTGGTAGGTGCAGAGACGGGAGGGGCAGAGCCGCAGGCACAGCCAAGAGGGCTGAAGAAATGGTAGAACGGAGCAGCTGGTGATGTGTGGGCCCACCGGCCCCAGG");
strcpy(test_fragment[1].fragment,
"TGGCCCTGGGAGAGCAGGTGGAAGATCAGGCAGGCCATCGCTGCCACAGAACCCAGTGGATTGGCCTAGGTGGGATCTCTGAGCTCAACAAGCCCTCTCTCTTAGTTT");
strcpy(test_fragment[2].fragment,
"CCTAACCCAACCCTAACCCTAACCCTAACCCTAACCCTAACCCTAACCCCTAACCCTAACCCTAACCCTAACCCTAACCTAACCCTAACCCTAACCCTAACCCTAACC");
strcpy(test_fragment[3].fragment,
"CCTAACCCTAACCCTAACCCTAACCCTAACCCTAACCCCTAACCCTAACCCTAACCCTAACCCTCGCGGTACCCTCAGCCGGCCCGCCCGCCCGGGTCTGACCTGAGG");
//Getting the sort key.
cout << "********** Fragment Input **************************************************" << endl;
for (int k = 0; k < fragment_set; k++) {
key_struct sort_input[KEY_NUMBER];
for (int i = 0; i < KEY_NUMBER; i++) {
char key[KEY_LENGTH];
for (int j = 0; j < KEY_LENGTH; j++) {
key[j] = test_fragment[k].fragment[j + KEY_LENGTH * i];
}
int key_hash = hashVal(key);
int key_entry = hash_table[key_hash];
int key_entry_size = coordinate[key_entry];
sort_input[i].order = 0;
sort_input[i].key_number = i;
sort_input[i].key_entry = key_entry;
sort_input[i].key_entry_size = key_entry_size;
}
sortPrefilter(test_fragment[k].sorted_keys, sort_input);
for (int i = 0; i < KEY_NUMBER; i++) {
cout << " fragment_set: " << k
<< " key_number:" << test_fragment[k].sorted_keys[i].key_number
<< " key_entry: " << test_fragment[k].sorted_keys[i].key_entry
<< " key_entry_size: " << test_fragment[k].sorted_keys[i].key_entry_size
<< " base: " << test_fragment[k].sorted_keys[i].base << endl;
}
cout << "****************************************************************************" << endl;
}
GPU_fragment* dev_fragment;
final_result* dev_result;
final_result test_result[fragment_set];
hipMalloc((void**) &dev_fragment, sizeof(GPU_fragment)*fragment_set);
hipMalloc((void**) &dev_result, sizeof(final_result)*fragment_set);
hipMemcpy(dev_fragment, &test_fragment, sizeof(GPU_fragment)*fragment_set, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( searchFragment) , dim3(fragment_set), dim3(9), 0, 0, dev_fragment, fragment_set*10, dev_ref_string, dev_hash_table,
dev_coordinate, 3, 3, dev_result);
hipMemcpy(test_result, dev_result, sizeof(final_result)*fragment_set, hipMemcpyDeviceToHost);
for (int j = 0; j < fragment_set; j++) {
cout << "****************************************************************************" << endl;
cout << "Fragment : " << test_result[j].fragment << endl;
cout << "Spilled : " << test_result[j].spilled << endl;
cout << "Result Size : " << test_result[j].size << endl;
cout << "****************************************************************************" << endl;
for (int i = 0; i < test_result[j].size; i++) {
cout << "* Result coor : " << test_result[j].coor_results[i].coordiante;
cout << " Result diff : " << test_result[j].coor_results[i].diff_num << endl;
}
cout << "****************************************************************************" << endl;
}
hipFree(dev_fragment);
hipFree(dev_result);
freeHash();
freeRef();
return 0;
}
int main() {
test_cuda();
return 0;
}
| c4fab504c2fd408e5048a1136d5366c695b41621.cu | /*
* test_search.cpp
*
* Created on: Oct 19, 2011
* Author: mac
*/
#include <iostream>
#include "universal_header.h"
using namespace std;
#include "hash_test.cu"
int test_cuda(void) {
int ref_num = loadRef("toy_ref_0");
long long coord_num = loadHash("toy_hash_0");
hash_test(coord_num);
ref_test(ref_num);
int fragment_set = 4;
GPU_fragment test_fragment[fragment_set];
strcpy(test_fragment[0].fragment,
"GGGTGGTAGGTGCAGAGACGGGAGGGGCAGAGCCGCAGGCACAGCCAAGAGGGCTGAAGAAATGGTAGAACGGAGCAGCTGGTGATGTGTGGGCCCACCGGCCCCAGG");
strcpy(test_fragment[1].fragment,
"TGGCCCTGGGAGAGCAGGTGGAAGATCAGGCAGGCCATCGCTGCCACAGAACCCAGTGGATTGGCCTAGGTGGGATCTCTGAGCTCAACAAGCCCTCTCTCTTAGTTT");
strcpy(test_fragment[2].fragment,
"CCTAACCCAACCCTAACCCTAACCCTAACCCTAACCCTAACCCTAACCCCTAACCCTAACCCTAACCCTAACCCTAACCTAACCCTAACCCTAACCCTAACCCTAACC");
strcpy(test_fragment[3].fragment,
"CCTAACCCTAACCCTAACCCTAACCCTAACCCTAACCCCTAACCCTAACCCTAACCCTAACCCTCGCGGTACCCTCAGCCGGCCCGCCCGCCCGGGTCTGACCTGAGG");
//Getting the sort key.
cout << "********** Fragment Input **************************************************" << endl;
for (int k = 0; k < fragment_set; k++) {
key_struct sort_input[KEY_NUMBER];
for (int i = 0; i < KEY_NUMBER; i++) {
char key[KEY_LENGTH];
for (int j = 0; j < KEY_LENGTH; j++) {
key[j] = test_fragment[k].fragment[j + KEY_LENGTH * i];
}
int key_hash = hashVal(key);
int key_entry = hash_table[key_hash];
int key_entry_size = coordinate[key_entry];
sort_input[i].order = 0;
sort_input[i].key_number = i;
sort_input[i].key_entry = key_entry;
sort_input[i].key_entry_size = key_entry_size;
}
sortPrefilter(test_fragment[k].sorted_keys, sort_input);
for (int i = 0; i < KEY_NUMBER; i++) {
cout << " fragment_set: " << k
<< " key_number:" << test_fragment[k].sorted_keys[i].key_number
<< " key_entry: " << test_fragment[k].sorted_keys[i].key_entry
<< " key_entry_size: " << test_fragment[k].sorted_keys[i].key_entry_size
<< " base: " << test_fragment[k].sorted_keys[i].base << endl;
}
cout << "****************************************************************************" << endl;
}
GPU_fragment* dev_fragment;
final_result* dev_result;
final_result test_result[fragment_set];
cudaMalloc((void**) &dev_fragment, sizeof(GPU_fragment)*fragment_set);
cudaMalloc((void**) &dev_result, sizeof(final_result)*fragment_set);
cudaMemcpy(dev_fragment, &test_fragment, sizeof(GPU_fragment)*fragment_set, cudaMemcpyHostToDevice);
searchFragment <<<fragment_set, 9>>> (dev_fragment, fragment_set*10, dev_ref_string, dev_hash_table,
dev_coordinate, 3, 3, dev_result);
cudaMemcpy(test_result, dev_result, sizeof(final_result)*fragment_set, cudaMemcpyDeviceToHost);
for (int j = 0; j < fragment_set; j++) {
cout << "****************************************************************************" << endl;
cout << "Fragment : " << test_result[j].fragment << endl;
cout << "Spilled : " << test_result[j].spilled << endl;
cout << "Result Size : " << test_result[j].size << endl;
cout << "****************************************************************************" << endl;
for (int i = 0; i < test_result[j].size; i++) {
cout << "* Result coor : " << test_result[j].coor_results[i].coordiante;
cout << " Result diff : " << test_result[j].coor_results[i].diff_num << endl;
}
cout << "****************************************************************************" << endl;
}
cudaFree(dev_fragment);
cudaFree(dev_result);
freeHash();
freeRef();
return 0;
}
int main() {
test_cuda();
return 0;
}
|
45ad17b184abc0d61183a9048c911a395b3a4f35.hip | // !!! This is a file automatically generated by hipify!!!
static char *Ptxdata =
" .version 1.4\n"
" .target sm_10, map_f64_to_f32\n"
" // compiled with /usr/local/cuda4.1/cuda/open64/lib//be\n"
" // nvopencc 4.1 built on 2012-01-12\n"
"\n"
" //-----------------------------------------------------------\n"
" // Compiling /tmp/tmpxft_000072dc_00000000-9_direct.cpp3.i (/tmp/ccBI#.T0CKuW)\n"
" //-----------------------------------------------------------\n"
"\n"
" //-----------------------------------------------------------\n"
" // Options:\n"
" //-----------------------------------------------------------\n"
" // Target:ptx, ISA:sm_10, Endian:little, Pointer Size:64\n"
" // -O3 (Optimization level)\n"
" // -g0 (Debug level)\n"
" // -m2 (Report advisories)\n"
" //-----------------------------------------------------------\n"
"\n"
" .file 1 \"<command-line>\"\n"
" .file 2 \"/tmp/tmpxft_000072dc_00000000-8_direct.cudafe2.gpu\"\n"
" .file 3 \"/usr/lib/gcc/x86_64-redhat-linux/4.5.1/include/stddef.h\"\n"
" .file 4 \"/usr/local/cuda4.1/cuda/include/crt/device_runtime.h\"\n"
" .file 5 \"/usr/local/cuda4.1/cuda/include/host_defines.h\"\n"
" .file 6 \"/usr/local/cuda4.1/cuda/include/builtin_types.h\"\n"
" .file 7 \"/usr/local/cuda4.1/cuda/include/device_types.h\"\n"
" .file 8 \"/usr/local/cuda4.1/cuda/include/hip/driver_types.h\"\n"
" .file 9 \"/usr/local/cuda4.1/cuda/include/surface_types.h\"\n"
" .file 10 \"/usr/local/cuda4.1/cuda/include/texture_types.h\"\n"
" .file 11 \"/usr/local/cuda4.1/cuda/include/hip/hip_vector_types.h\"\n"
" .file 12 \"/usr/local/cuda4.1/cuda/include/device_launch_parameters.h\"\n"
" .file 13 \"/usr/local/cuda4.1/cuda/include/crt/storage_class.h\"\n"
" .file 14 \"direct.cu\"\n"
" .file 15 \"/usr/local/cuda4.1/cuda/include/common_functions.h\"\n"
" .file 16 \"/usr/local/cuda4.1/cuda/include/math_functions.h\"\n"
" .file 17 \"/usr/local/cuda4.1/cuda/include/math_constants.h\"\n"
" .file 18 \"/usr/local/cuda4.1/cuda/include/hip/device_functions.h\"\n"
" .file 19 \"/usr/local/cuda4.1/cuda/include/sm_11_atomic_functions.h\"\n"
" .file 20 \"/usr/local/cuda4.1/cuda/include/sm_12_atomic_functions.h\"\n"
" .file 21 \"/usr/local/cuda4.1/cuda/include/sm_13_double_functions.h\"\n"
" .file 22 \"/usr/local/cuda4.1/cuda/include/sm_20_atomic_functions.h\"\n"
" .file 23 \"/usr/local/cuda4.1/cuda/include/sm_20_intrinsics.h\"\n"
" .file 24 \"/usr/local/cuda4.1/cuda/include/surface_functions.h\"\n"
" .file 25 \"/usr/local/cuda4.1/cuda/include/texture_fetch_functions.h\"\n"
" .file 26 \"/usr/local/cuda4.1/cuda/include/math_functions_dbl_ptx1.h\"\n"
"\n"
"\n"
" .entry _Z14gravity_kernelPfPA3_ffS1_S_i (\n"
" .param .u64 __cudaparm__Z14gravity_kernelPfPA3_ffS1_S_i_m,\n"
" .param .u64 __cudaparm__Z14gravity_kernelPfPA3_ffS1_S_i_x,\n"
" .param .f32 __cudaparm__Z14gravity_kernelPfPA3_ffS1_S_i_eps,\n"
" .param .u64 __cudaparm__Z14gravity_kernelPfPA3_ffS1_S_i_a,\n"
" .param .u64 __cudaparm__Z14gravity_kernelPfPA3_ffS1_S_i_p,\n"
" .param .s32 __cudaparm__Z14gravity_kernelPfPA3_ffS1_S_i_n)\n"
" {\n"
" .reg .u16 %rh<4>;\n"
" .reg .u32 %r<12>;\n"
" .reg .u64 %rd<13>;\n"
" .reg .f32 %f<37>;\n"
" .reg .pred %p<4>;\n"
" .loc 14 12 0\n"
"$LDWbegin__Z14gravity_kernelPfPA3_ffS1_S_i:\n"
" .loc 14 51 0\n"
" mov.f32 %f1, 0f00000000; // 0\n"
" mov.f32 %f2, %f1;\n"
" mov.f32 %f3, 0f00000000; // 0\n"
" mov.f32 %f4, %f3;\n"
" mov.f32 %f5, 0f00000000; // 0\n"
" mov.f32 %f6, %f5;\n"
" cvt.u32.u16 %r1, %tid.x;\n"
" mov.u16 %rh1, %ntid.x;\n"
" mov.u16 %rh2, %ctaid.x;\n"
" ld.param.s32 %r2, [__cudaparm__Z14gravity_kernelPfPA3_ffS1_S_i_n];\n"
" mov.u32 %r3, 0;\n"
" setp.le.s32 %p1, %r2, %r3;\n"
" @%p1 bra $Lt_0_7426;\n"
" ld.param.s32 %r2, [__cudaparm__Z14gravity_kernelPfPA3_ffS1_S_i_n];\n"
" mov.s32 %r4, %r2;\n"
" mul.wide.u16 %r5, %rh1, %rh2;\n"
" ld.param.f32 %f7, [__cudaparm__Z14gravity_kernelPfPA3_ffS1_S_i_eps];\n"
" mul.f32 %f8, %f7, %f7;\n"
" add.u32 %r6, %r5, %r1;\n"
" cvt.s64.s32 %rd1, %r6;\n"
" ld.param.u64 %rd2, [__cudaparm__Z14gravity_kernelPfPA3_ffS1_S_i_x];\n"
" mov.s64 %rd3, %rd2;\n"
" ld.param.u64 %rd4, [__cudaparm__Z14gravity_kernelPfPA3_ffS1_S_i_m];\n"
" mul.wide.s32 %rd5, %r6, 12;\n"
" add.u64 %rd6, %rd5, %rd2;\n"
" ld.global.f32 %f9, [%rd6+0];\n"
" ld.global.f32 %f10, [%rd6+4];\n"
" ld.global.f32 %f11, [%rd6+8];\n"
" mov.s32 %r7, 0;\n"
" mov.f32 %f12, 0f00000000; // 0\n"
" mov.s32 %r8, %r4;\n"
"$Lt_0_6914:\n"
" //<loop> Loop body line 51, nesting depth: 1, estimated iterations: unknown\n"
" .loc 14 62 0\n"
" ld.global.f32 %f13, [%rd3+0];\n"
" ld.global.f32 %f14, [%rd3+4];\n"
" ld.global.f32 %f15, [%rd3+8];\n"
" sub.f32 %f16, %f13, %f9;\n"
" sub.f32 %f17, %f14, %f10;\n"
" sub.f32 %f18, %f15, %f11;\n"
" mad.f32 %f19, %f16, %f16, %f8;\n"
" mad.f32 %f20, %f17, %f17, %f19;\n"
" mad.f32 %f21, %f18, %f18, %f20;\n"
" .loc 14 65 0\n"
" ld.global.f32 %f22, [%rd4+0];\n"
" .loc 14 67 0\n"
" sqrt.approx.f32 %f23, %f21;\n"
" mul.f32 %f24, %f23, %f21;\n"
" div.full.f32 %f25, %f22, %f24;\n"
" mov.f32 %f26, %f2;\n"
" mad.f32 %f27, %f16, %f25, %f26;\n"
" mov.f32 %f2, %f27;\n"
" mov.f32 %f28, %f4;\n"
" mad.f32 %f29, %f17, %f25, %f28;\n"
" mov.f32 %f4, %f29;\n"
" mov.f32 %f30, %f6;\n"
" mad.f32 %f31, %f18, %f25, %f30;\n"
" mov.f32 %f6, %f31;\n"
" .loc 14 69 0\n"
" div.full.f32 %f32, %f22, %f23;\n"
" sub.f32 %f12, %f12, %f32;\n"
" add.s32 %r7, %r7, 1;\n"
" add.u64 %rd4, %rd4, 4;\n"
" add.u64 %rd3, %rd3, 12;\n"
" .loc 14 51 0\n"
" ld.param.s32 %r2, [__cudaparm__Z14gravity_kernelPfPA3_ffS1_S_i_n];\n"
" .loc 14 69 0\n"
" setp.ne.s32 %p2, %r2, %r7;\n"
" @%p2 bra $Lt_0_6914;\n"
" bra.uni $Lt_0_6402;\n"
"$Lt_0_7426:\n"
" mul.wide.u16 %r9, %rh1, %rh2;\n"
" add.u32 %r10, %r1, %r9;\n"
" cvt.s64.s32 %rd1, %r10;\n"
" mul.wide.s32 %rd5, %r10, 12;\n"
" mov.f32 %f12, 0f00000000; // 0\n"
"$Lt_0_6402:\n"
" .loc 14 72 0\n"
" ld.param.u64 %rd7, [__cudaparm__Z14gravity_kernelPfPA3_ffS1_S_i_a];\n"
" add.u64 %rd8, %rd7, %rd5;\n"
" mov.f32 %f33, %f2;\n"
" st.global.f32 [%rd8+0], %f33;\n"
" mov.f32 %f34, %f4;\n"
" st.global.f32 [%rd8+4], %f34;\n"
" mov.f32 %f35, %f6;\n"
" st.global.f32 [%rd8+8], %f35;\n"
" .loc 14 74 0\n"
" ld.param.u64 %rd9, [__cudaparm__Z14gravity_kernelPfPA3_ffS1_S_i_p];\n"
" mul.lo.u64 %rd10, %rd1, 4;\n"
" add.u64 %rd11, %rd9, %rd10;\n"
" st.global.f32 [%rd11+0], %f12;\n"
" .loc 14 77 0\n"
" exit;\n"
"$LDWend__Z14gravity_kernelPfPA3_ffS1_S_i:\n"
" } // _Z14gravity_kernelPfPA3_ffS1_S_i\n"
"\n";
#pragma dscuda endofptx
#pragma begin dscuda.h
#ifndef _DSCUDA_H
#define _DSCUDA_H
#include <hip/hip_runtime_api.h>
#include <cutil.h>
#include <builtin_types.h>
#include <hip/driver_types.h>
#include <hip/hip_texture_types.h>
#include <texture_types.h>
#pragma begin dscudarpc.h
#ifndef _DSCUDARPC_H_RPCGEN
#define _DSCUDARPC_H_RPCGEN
#include <rpc/rpc.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef u_quad_t RCadr;
typedef u_quad_t RCstream;
typedef u_quad_t RCevent;
typedef u_quad_t RCipaddr;
typedef u_int RCsize;
typedef u_int RCerror;
typedef struct {
u_int RCbuf_len;
char *RCbuf_val;
} RCbuf;
typedef u_int RCchannelformat;
typedef u_long RCpid;
struct RCchanneldesc_t {
RCchannelformat f;
int w;
int x;
int y;
int z;
};
typedef struct RCchanneldesc_t RCchanneldesc_t;
typedef RCchanneldesc_t RCchanneldesc;
struct RCtexture_t {
int normalized;
int filterMode;
int addressMode[3];
RCchannelformat f;
int w;
int x;
int y;
int z;
};
typedef struct RCtexture_t RCtexture_t;
typedef RCtexture_t RCtexture;
struct RCfuncattr_t {
int binaryVersion;
RCsize constSizeBytes;
RCsize localSizeBytes;
int maxThreadsPerBlock;
int numRegs;
int ptxVersion;
RCsize sharedSizeBytes;
};
typedef struct RCfuncattr_t RCfuncattr_t;
typedef RCfuncattr_t RCfuncattr;
enum RCargType {
dscudaArgTypeP = 0,
dscudaArgTypeI = 1,
dscudaArgTypeF = 2,
dscudaArgTypeV = 3,
};
typedef enum RCargType RCargType;
struct RCargVal {
RCargType type;
union {
RCadr address;
u_int valuei;
float valuef;
char valuev[64];
} RCargVal_u;
};
typedef struct RCargVal RCargVal;
struct RCarg {
RCargVal val;
u_int offset;
u_int size;
};
typedef struct RCarg RCarg;
typedef struct {
u_int RCargs_len;
RCarg *RCargs_val;
} RCargs;
struct dscudaResult {
RCerror err;
};
typedef struct dscudaResult dscudaResult;
struct dscudaThreadGetLimitResult {
RCerror err;
RCsize value;
};
typedef struct dscudaThreadGetLimitResult dscudaThreadGetLimitResult;
struct dscudaThreadGetCacheConfigResult {
RCerror err;
int cacheConfig;
};
typedef struct dscudaThreadGetCacheConfigResult dscudaThreadGetCacheConfigResult;
struct dscudaMallocResult {
RCerror err;
RCadr devAdr;
};
typedef struct dscudaMallocResult dscudaMallocResult;
struct dscudaHostAllocResult {
RCerror err;
RCadr pHost;
};
typedef struct dscudaHostAllocResult dscudaHostAllocResult;
struct dscudaMallocHostResult {
RCerror err;
RCadr ptr;
};
typedef struct dscudaMallocHostResult dscudaMallocHostResult;
struct dscudaMallocArrayResult {
RCerror err;
RCadr array;
};
typedef struct dscudaMallocArrayResult dscudaMallocArrayResult;
struct dscudaMallocPitchResult {
RCerror err;
RCadr devPtr;
RCsize pitch;
};
typedef struct dscudaMallocPitchResult dscudaMallocPitchResult;
struct dscudaMemcpyD2HResult {
RCerror err;
RCbuf buf;
};
typedef struct dscudaMemcpyD2HResult dscudaMemcpyD2HResult;
struct dscudaMemcpyH2HResult {
RCerror err;
RCbuf buf;
};
typedef struct dscudaMemcpyH2HResult dscudaMemcpyH2HResult;
struct dscudaMemcpyToArrayD2HResult {
RCerror err;
RCbuf buf;
};
typedef struct dscudaMemcpyToArrayD2HResult dscudaMemcpyToArrayD2HResult;
struct dscudaMemcpyToArrayH2HResult {
RCerror err;
RCbuf buf;
};
typedef struct dscudaMemcpyToArrayH2HResult dscudaMemcpyToArrayH2HResult;
struct dscudaMemcpy2DToArrayD2HResult {
RCerror err;
RCbuf buf;
};
typedef struct dscudaMemcpy2DToArrayD2HResult dscudaMemcpy2DToArrayD2HResult;
struct dscudaMemcpy2DToArrayH2HResult {
RCerror err;
RCbuf buf;
};
typedef struct dscudaMemcpy2DToArrayH2HResult dscudaMemcpy2DToArrayH2HResult;
struct dscudaMemcpy2DD2HResult {
RCerror err;
RCbuf buf;
};
typedef struct dscudaMemcpy2DD2HResult dscudaMemcpy2DD2HResult;
struct dscudaMemcpy2DH2HResult {
RCerror err;
RCbuf buf;
};
typedef struct dscudaMemcpy2DH2HResult dscudaMemcpy2DH2HResult;
struct dscudaGetDeviceResult {
RCerror err;
int device;
};
typedef struct dscudaGetDeviceResult dscudaGetDeviceResult;
struct dscudaGetDeviceCountResult {
RCerror err;
int count;
};
typedef struct dscudaGetDeviceCountResult dscudaGetDeviceCountResult;
struct dscudaGetDevicePropertiesResult {
RCerror err;
RCbuf prop;
};
typedef struct dscudaGetDevicePropertiesResult dscudaGetDevicePropertiesResult;
struct dscudaDriverGetVersionResult {
RCerror err;
int ver;
};
typedef struct dscudaDriverGetVersionResult dscudaDriverGetVersionResult;
struct dscudaRuntimeGetVersionResult {
RCerror err;
int ver;
};
typedef struct dscudaRuntimeGetVersionResult dscudaRuntimeGetVersionResult;
struct dscudaGetErrorStringResult {
char *errmsg;
};
typedef struct dscudaGetErrorStringResult dscudaGetErrorStringResult;
struct dscudaCreateChannelDescResult {
int x;
int y;
int z;
int w;
RCchannelformat f;
};
typedef struct dscudaCreateChannelDescResult dscudaCreateChannelDescResult;
struct dscudaGetChannelDescResult {
RCerror err;
int x;
int y;
int z;
int w;
RCchannelformat f;
};
typedef struct dscudaGetChannelDescResult dscudaGetChannelDescResult;
struct dscudaChooseDeviceResult {
RCerror err;
int device;
};
typedef struct dscudaChooseDeviceResult dscudaChooseDeviceResult;
struct dscudaMemcpyAsyncD2HResult {
RCerror err;
RCbuf buf;
};
typedef struct dscudaMemcpyAsyncD2HResult dscudaMemcpyAsyncD2HResult;
struct dscudaMemcpyAsyncH2HResult {
RCerror err;
RCbuf buf;
};
typedef struct dscudaMemcpyAsyncH2HResult dscudaMemcpyAsyncH2HResult;
struct dscudaMemcpyFromSymbolD2HResult {
RCerror err;
RCbuf buf;
};
typedef struct dscudaMemcpyFromSymbolD2HResult dscudaMemcpyFromSymbolD2HResult;
struct dscudaMemcpyFromSymbolAsyncD2HResult {
RCerror err;
RCbuf buf;
};
typedef struct dscudaMemcpyFromSymbolAsyncD2HResult dscudaMemcpyFromSymbolAsyncD2HResult;
struct dscudaStreamCreateResult {
RCerror err;
RCadr stream;
};
typedef struct dscudaStreamCreateResult dscudaStreamCreateResult;
struct dscudaEventCreateResult {
RCerror err;
RCadr event;
};
typedef struct dscudaEventCreateResult dscudaEventCreateResult;
struct dscudaEventElapsedTimeResult {
RCerror err;
float ms;
};
typedef struct dscudaEventElapsedTimeResult dscudaEventElapsedTimeResult;
struct dscudaHostGetDevicePointerResult {
RCerror err;
RCadr pDevice;
};
typedef struct dscudaHostGetDevicePointerResult dscudaHostGetDevicePointerResult;
struct dscudaHostGetFlagsResult {
RCerror err;
u_int flags;
};
typedef struct dscudaHostGetFlagsResult dscudaHostGetFlagsResult;
struct dscudaLoadModuleResult {
u_int id;
};
typedef struct dscudaLoadModuleResult dscudaLoadModuleResult;
struct dscudaFuncGetAttributesResult {
RCerror err;
RCfuncattr attr;
};
typedef struct dscudaFuncGetAttributesResult dscudaFuncGetAttributesResult;
struct dscudaBindTextureResult {
RCerror err;
RCsize offset;
};
typedef struct dscudaBindTextureResult dscudaBindTextureResult;
struct dscudaBindTexture2DResult {
RCerror err;
RCsize offset;
};
typedef struct dscudaBindTexture2DResult dscudaBindTexture2DResult;
struct dscufftResult {
RCerror err;
};
typedef struct dscufftResult dscufftResult;
struct dscufftPlanResult {
RCerror err;
u_int plan;
};
typedef struct dscufftPlanResult dscufftPlanResult;
struct dscublasResult {
RCerror err;
u_int stat;
};
typedef struct dscublasResult dscublasResult;
struct dscublasCreateResult {
RCerror err;
u_int stat;
RCadr handle;
};
typedef struct dscublasCreateResult dscublasCreateResult;
struct dscublasGetVectorResult {
RCerror err;
u_int stat;
RCbuf y;
};
typedef struct dscublasGetVectorResult dscublasGetVectorResult;
struct RCdim3 {
u_int x;
u_int y;
u_int z;
};
typedef struct RCdim3 RCdim3;
struct dscudathreadsetlimitid_1_argument {
int limit;
RCsize value;
};
typedef struct dscudathreadsetlimitid_1_argument dscudathreadsetlimitid_1_argument;
struct dscudastreamwaiteventid_1_argument {
RCstream stream;
RCevent event;
u_int flags;
};
typedef struct dscudastreamwaiteventid_1_argument dscudastreamwaiteventid_1_argument;
struct dscudaeventelapsedtimeid_1_argument {
RCevent start;
RCevent end;
};
typedef struct dscudaeventelapsedtimeid_1_argument dscudaeventelapsedtimeid_1_argument;
struct dscudaeventrecordid_1_argument {
RCevent event;
RCstream stream;
};
typedef struct dscudaeventrecordid_1_argument dscudaeventrecordid_1_argument;
struct dscudalaunchkernelid_1_argument {
int moduleid;
int kid;
char *kname;
RCdim3 gdim;
RCdim3 bdim;
RCsize smemsize;
RCstream stream;
RCargs args;
};
typedef struct dscudalaunchkernelid_1_argument dscudalaunchkernelid_1_argument;
struct dscudaloadmoduleid_1_argument {
RCipaddr ipaddr;
RCpid pid;
char *mname;
char *image;
};
typedef struct dscudaloadmoduleid_1_argument dscudaloadmoduleid_1_argument;
struct dscudafuncgetattributesid_1_argument {
int moduleid;
char *kname;
};
typedef struct dscudafuncgetattributesid_1_argument dscudafuncgetattributesid_1_argument;
struct dscudamemcpyh2hid_1_argument {
RCadr dst;
RCbuf src;
RCsize count;
};
typedef struct dscudamemcpyh2hid_1_argument dscudamemcpyh2hid_1_argument;
struct dscudamemcpyh2did_1_argument {
RCadr dst;
RCbuf src;
RCsize count;
};
typedef struct dscudamemcpyh2did_1_argument dscudamemcpyh2did_1_argument;
struct dscudamemcpyd2hid_1_argument {
RCadr src;
RCsize count;
};
typedef struct dscudamemcpyd2hid_1_argument dscudamemcpyd2hid_1_argument;
struct dscudamemcpyd2did_1_argument {
RCadr dst;
RCadr src;
RCsize count;
};
typedef struct dscudamemcpyd2did_1_argument dscudamemcpyd2did_1_argument;
struct dscudamemcpyasynch2hid_1_argument {
RCadr dst;
RCbuf src;
RCsize count;
RCstream stream;
};
typedef struct dscudamemcpyasynch2hid_1_argument dscudamemcpyasynch2hid_1_argument;
struct dscudamemcpyasynch2did_1_argument {
RCadr dst;
RCbuf src;
RCsize count;
RCstream stream;
};
typedef struct dscudamemcpyasynch2did_1_argument dscudamemcpyasynch2did_1_argument;
struct dscudamemcpyasyncd2hid_1_argument {
RCadr src;
RCsize count;
RCstream stream;
};
typedef struct dscudamemcpyasyncd2hid_1_argument dscudamemcpyasyncd2hid_1_argument;
struct dscudamemcpyasyncd2did_1_argument {
RCadr dst;
RCadr src;
RCsize count;
RCstream stream;
};
typedef struct dscudamemcpyasyncd2did_1_argument dscudamemcpyasyncd2did_1_argument;
struct dscudamemcpytosymbolh2did_1_argument {
int moduleid;
char *symbol;
RCbuf src;
RCsize count;
RCsize offset;
};
typedef struct dscudamemcpytosymbolh2did_1_argument dscudamemcpytosymbolh2did_1_argument;
struct dscudamemcpytosymbold2did_1_argument {
int moduleid;
char *symbol;
RCadr src;
RCsize count;
RCsize offset;
};
typedef struct dscudamemcpytosymbold2did_1_argument dscudamemcpytosymbold2did_1_argument;
struct dscudamemcpyfromsymbold2hid_1_argument {
int moduleid;
char *symbol;
RCsize count;
RCsize offset;
};
typedef struct dscudamemcpyfromsymbold2hid_1_argument dscudamemcpyfromsymbold2hid_1_argument;
struct dscudamemcpyfromsymbold2did_1_argument {
int moduleid;
RCadr dst;
char *symbol;
RCsize count;
RCsize offset;
};
typedef struct dscudamemcpyfromsymbold2did_1_argument dscudamemcpyfromsymbold2did_1_argument;
struct dscudamemsetid_1_argument {
RCadr dst;
int value;
RCsize count;
};
typedef struct dscudamemsetid_1_argument dscudamemsetid_1_argument;
struct dscudahostallocid_1_argument {
RCsize size;
u_int flags;
};
typedef struct dscudahostallocid_1_argument dscudahostallocid_1_argument;
struct dscudahostgetdevicepointerid_1_argument {
RCadr pHost;
u_int flags;
};
typedef struct dscudahostgetdevicepointerid_1_argument dscudahostgetdevicepointerid_1_argument;
struct dscudamallocarrayid_1_argument {
RCchanneldesc desc;
RCsize width;
RCsize height;
u_int flags;
};
typedef struct dscudamallocarrayid_1_argument dscudamallocarrayid_1_argument;
struct dscudamemcpytoarrayh2hid_1_argument {
RCadr dst;
RCsize wOffset;
RCsize hOffset;
RCbuf src;
RCsize count;
};
typedef struct dscudamemcpytoarrayh2hid_1_argument dscudamemcpytoarrayh2hid_1_argument;
struct dscudamemcpytoarrayh2did_1_argument {
RCadr dst;
RCsize wOffset;
RCsize hOffset;
RCbuf src;
RCsize count;
};
typedef struct dscudamemcpytoarrayh2did_1_argument dscudamemcpytoarrayh2did_1_argument;
struct dscudamemcpytoarrayd2hid_1_argument {
RCsize wOffset;
RCsize hOffset;
RCadr src;
RCsize count;
};
typedef struct dscudamemcpytoarrayd2hid_1_argument dscudamemcpytoarrayd2hid_1_argument;
struct dscudamemcpytoarrayd2did_1_argument {
RCadr dst;
RCsize wOffset;
RCsize hOffset;
RCadr src;
RCsize count;
};
typedef struct dscudamemcpytoarrayd2did_1_argument dscudamemcpytoarrayd2did_1_argument;
struct dscudamallocpitchid_1_argument {
RCsize width;
RCsize height;
};
typedef struct dscudamallocpitchid_1_argument dscudamallocpitchid_1_argument;
struct dscudamemcpy2dtoarrayh2hid_1_argument {
RCadr dst;
RCsize wOffset;
RCsize hOffset;
RCbuf src;
RCsize spitch;
RCsize width;
RCsize height;
};
typedef struct dscudamemcpy2dtoarrayh2hid_1_argument dscudamemcpy2dtoarrayh2hid_1_argument;
struct dscudamemcpy2dtoarrayh2did_1_argument {
RCadr dst;
RCsize wOffset;
RCsize hOffset;
RCbuf srcbuf;
RCsize spitch;
RCsize width;
RCsize height;
};
typedef struct dscudamemcpy2dtoarrayh2did_1_argument dscudamemcpy2dtoarrayh2did_1_argument;
struct dscudamemcpy2dtoarrayd2hid_1_argument {
RCsize wOffset;
RCsize hOffset;
RCadr src;
RCsize spitch;
RCsize width;
RCsize height;
};
typedef struct dscudamemcpy2dtoarrayd2hid_1_argument dscudamemcpy2dtoarrayd2hid_1_argument;
struct dscudamemcpy2dtoarrayd2did_1_argument {
RCadr dst;
RCsize wOffset;
RCsize hOffset;
RCadr src;
RCsize spitch;
RCsize width;
RCsize height;
};
typedef struct dscudamemcpy2dtoarrayd2did_1_argument dscudamemcpy2dtoarrayd2did_1_argument;
struct dscudamemcpy2dh2hid_1_argument {
RCadr dst;
RCsize dpitch;
RCbuf src;
RCsize spitch;
RCsize width;
RCsize height;
};
typedef struct dscudamemcpy2dh2hid_1_argument dscudamemcpy2dh2hid_1_argument;
struct dscudamemcpy2dh2did_1_argument {
RCadr dst;
RCsize dpitch;
RCbuf src;
RCsize spitch;
RCsize width;
RCsize height;
};
typedef struct dscudamemcpy2dh2did_1_argument dscudamemcpy2dh2did_1_argument;
struct dscudamemcpy2dd2hid_1_argument {
RCsize dpitch;
RCadr src;
RCsize spitch;
RCsize width;
RCsize height;
};
typedef struct dscudamemcpy2dd2hid_1_argument dscudamemcpy2dd2hid_1_argument;
struct dscudamemcpy2dd2did_1_argument {
RCadr dst;
RCsize dpitch;
RCadr src;
RCsize spitch;
RCsize width;
RCsize height;
};
typedef struct dscudamemcpy2dd2did_1_argument dscudamemcpy2dd2did_1_argument;
struct dscudamemset2did_1_argument {
RCadr dst;
RCsize pitch;
int value;
RCsize width;
RCsize height;
};
typedef struct dscudamemset2did_1_argument dscudamemset2did_1_argument;
struct dscudamemcpytosymbolasynch2did_1_argument {
int moduleid;
char *symbol;
RCbuf src;
RCsize count;
RCsize offset;
RCstream stream;
};
typedef struct dscudamemcpytosymbolasynch2did_1_argument dscudamemcpytosymbolasynch2did_1_argument;
struct dscudamemcpytosymbolasyncd2did_1_argument {
int moduleid;
char *symbol;
RCadr src;
RCsize count;
RCsize offset;
RCstream stream;
};
typedef struct dscudamemcpytosymbolasyncd2did_1_argument dscudamemcpytosymbolasyncd2did_1_argument;
struct dscudamemcpyfromsymbolasyncd2hid_1_argument {
int moduleid;
char *symbol;
RCsize count;
RCsize offset;
RCstream stream;
};
typedef struct dscudamemcpyfromsymbolasyncd2hid_1_argument dscudamemcpyfromsymbolasyncd2hid_1_argument;
struct dscudamemcpyfromsymbolasyncd2did_1_argument {
int moduleid;
RCadr dst;
char *symbol;
RCsize count;
RCsize offset;
RCstream stream;
};
typedef struct dscudamemcpyfromsymbolasyncd2did_1_argument dscudamemcpyfromsymbolasyncd2did_1_argument;
struct dscudacreatechanneldescid_1_argument {
int x;
int y;
int z;
int w;
RCchannelformat f;
};
typedef struct dscudacreatechanneldescid_1_argument dscudacreatechanneldescid_1_argument;
struct dscudabindtextureid_1_argument {
int moduleid;
char *texname;
RCadr devPtr;
RCsize size;
RCtexture texbuf;
};
typedef struct dscudabindtextureid_1_argument dscudabindtextureid_1_argument;
struct dscudabindtexture2did_1_argument {
int moduleid;
char *texname;
RCadr devPtr;
RCsize width;
RCsize height;
RCsize pitch;
RCtexture texbuf;
};
typedef struct dscudabindtexture2did_1_argument dscudabindtexture2did_1_argument;
struct dscudabindtexturetoarrayid_1_argument {
int moduleid;
char *texname;
RCadr array;
RCtexture texbuf;
};
typedef struct dscudabindtexturetoarrayid_1_argument dscudabindtexturetoarrayid_1_argument;
struct dscufftplan3did_1_argument {
int nx;
int ny;
int nz;
u_int type;
};
typedef struct dscufftplan3did_1_argument dscufftplan3did_1_argument;
struct dscufftexecc2cid_1_argument {
u_int plan;
RCadr idata;
RCadr odata;
int direction;
};
typedef struct dscufftexecc2cid_1_argument dscufftexecc2cid_1_argument;
#define DSCUDA_PROG 60000
#define DSCUDA_VER 1
#if defined(__STDC__) || defined(__cplusplus)
#define dscudaThreadExitId 100
extern dscudaResult * dscudathreadexitid_1(CLIENT *);
extern dscudaResult * dscudathreadexitid_1_svc(struct svc_req *);
#define dscudaThreadSynchronizeId 101
extern dscudaResult * dscudathreadsynchronizeid_1(CLIENT *);
extern dscudaResult * dscudathreadsynchronizeid_1_svc(struct svc_req *);
#define dscudaThreadSetLimitId 102
extern dscudaResult * dscudathreadsetlimitid_1(int , RCsize , CLIENT *);
extern dscudaResult * dscudathreadsetlimitid_1_svc(int , RCsize , struct svc_req *);
#define dscudaThreadGetLimitId 103
extern dscudaThreadGetLimitResult * dscudathreadgetlimitid_1(int , CLIENT *);
extern dscudaThreadGetLimitResult * dscudathreadgetlimitid_1_svc(int , struct svc_req *);
#define dscudaThreadSetCacheConfigId 104
extern dscudaResult * dscudathreadsetcacheconfigid_1(int , CLIENT *);
extern dscudaResult * dscudathreadsetcacheconfigid_1_svc(int , struct svc_req *);
#define dscudaThreadGetCacheConfigId 105
extern dscudaThreadGetCacheConfigResult * dscudathreadgetcacheconfigid_1(CLIENT *);
extern dscudaThreadGetCacheConfigResult * dscudathreadgetcacheconfigid_1_svc(struct svc_req *);
#define dscudaGetLastErrorId 200
extern dscudaResult * dscudagetlasterrorid_1(CLIENT *);
extern dscudaResult * dscudagetlasterrorid_1_svc(struct svc_req *);
#define dscudaPeekAtLastErrorId 201
extern dscudaResult * dscudapeekatlasterrorid_1(CLIENT *);
extern dscudaResult * dscudapeekatlasterrorid_1_svc(struct svc_req *);
#define dscudaGetErrorStringId 202
extern dscudaGetErrorStringResult * dscudageterrorstringid_1(int , CLIENT *);
extern dscudaGetErrorStringResult * dscudageterrorstringid_1_svc(int , struct svc_req *);
#define dscudaGetDeviceId 300
extern dscudaGetDeviceResult * dscudagetdeviceid_1(CLIENT *);
extern dscudaGetDeviceResult * dscudagetdeviceid_1_svc(struct svc_req *);
#define dscudaGetDeviceCountId 301
extern dscudaGetDeviceCountResult * dscudagetdevicecountid_1(CLIENT *);
extern dscudaGetDeviceCountResult * dscudagetdevicecountid_1_svc(struct svc_req *);
#define dscudaGetDevicePropertiesId 302
extern dscudaGetDevicePropertiesResult * dscudagetdevicepropertiesid_1(int , CLIENT *);
extern dscudaGetDevicePropertiesResult * dscudagetdevicepropertiesid_1_svc(int , struct svc_req *);
#define dscudaDriverGetVersionId 303
extern dscudaDriverGetVersionResult * dscudadrivergetversionid_1(CLIENT *);
extern dscudaDriverGetVersionResult * dscudadrivergetversionid_1_svc(struct svc_req *);
#define dscudaRuntimeGetVersionId 304
extern dscudaRuntimeGetVersionResult * dscudaruntimegetversionid_1(CLIENT *);
extern dscudaRuntimeGetVersionResult * dscudaruntimegetversionid_1_svc(struct svc_req *);
#define dscudaSetDeviceId 305
extern dscudaResult * dscudasetdeviceid_1(int , CLIENT *);
extern dscudaResult * dscudasetdeviceid_1_svc(int , struct svc_req *);
#define dscudaSetDeviceFlagsId 306
extern dscudaResult * dscudasetdeviceflagsid_1(u_int , CLIENT *);
extern dscudaResult * dscudasetdeviceflagsid_1_svc(u_int , struct svc_req *);
#define dscudaChooseDeviceId 307
extern dscudaChooseDeviceResult * dscudachoosedeviceid_1(RCbuf , CLIENT *);
extern dscudaChooseDeviceResult * dscudachoosedeviceid_1_svc(RCbuf , struct svc_req *);
#define dscudaDeviceSynchronize 308
extern dscudaResult * dscudadevicesynchronize_1(CLIENT *);
extern dscudaResult * dscudadevicesynchronize_1_svc(struct svc_req *);
#define dscudaDeviceReset 309
extern dscudaResult * dscudadevicereset_1(CLIENT *);
extern dscudaResult * dscudadevicereset_1_svc(struct svc_req *);
#define dscudaStreamCreateId 400
extern dscudaStreamCreateResult * dscudastreamcreateid_1(CLIENT *);
extern dscudaStreamCreateResult * dscudastreamcreateid_1_svc(struct svc_req *);
#define dscudaStreamDestroyId 401
extern dscudaResult * dscudastreamdestroyid_1(RCstream , CLIENT *);
extern dscudaResult * dscudastreamdestroyid_1_svc(RCstream , struct svc_req *);
#define dscudaStreamSynchronizeId 402
extern dscudaResult * dscudastreamsynchronizeid_1(RCstream , CLIENT *);
extern dscudaResult * dscudastreamsynchronizeid_1_svc(RCstream , struct svc_req *);
#define dscudaStreamQueryId 403
extern dscudaResult * dscudastreamqueryid_1(RCstream , CLIENT *);
extern dscudaResult * dscudastreamqueryid_1_svc(RCstream , struct svc_req *);
#define dscudaStreamWaitEventId 404
extern dscudaResult * dscudastreamwaiteventid_1(RCstream , RCevent , u_int , CLIENT *);
extern dscudaResult * dscudastreamwaiteventid_1_svc(RCstream , RCevent , u_int , struct svc_req *);
#define dscudaEventCreateId 500
extern dscudaEventCreateResult * dscudaeventcreateid_1(CLIENT *);
extern dscudaEventCreateResult * dscudaeventcreateid_1_svc(struct svc_req *);
#define dscudaEventCreateWithFlagsId 501
extern dscudaEventCreateResult * dscudaeventcreatewithflagsid_1(u_int , CLIENT *);
extern dscudaEventCreateResult * dscudaeventcreatewithflagsid_1_svc(u_int , struct svc_req *);
#define dscudaEventDestroyId 502
extern dscudaResult * dscudaeventdestroyid_1(RCevent , CLIENT *);
extern dscudaResult * dscudaeventdestroyid_1_svc(RCevent , struct svc_req *);
#define dscudaEventElapsedTimeId 503
extern dscudaEventElapsedTimeResult * dscudaeventelapsedtimeid_1(RCevent , RCevent , CLIENT *);
extern dscudaEventElapsedTimeResult * dscudaeventelapsedtimeid_1_svc(RCevent , RCevent , struct svc_req *);
#define dscudaEventRecordId 504
extern dscudaResult * dscudaeventrecordid_1(RCevent , RCstream , CLIENT *);
extern dscudaResult * dscudaeventrecordid_1_svc(RCevent , RCstream , struct svc_req *);
#define dscudaEventSynchronizeId 505
extern dscudaResult * dscudaeventsynchronizeid_1(RCevent , CLIENT *);
extern dscudaResult * dscudaeventsynchronizeid_1_svc(RCevent , struct svc_req *);
#define dscudaEventQueryId 506
extern dscudaResult * dscudaeventqueryid_1(RCevent , CLIENT *);
extern dscudaResult * dscudaeventqueryid_1_svc(RCevent , struct svc_req *);
#define dscudaLaunchKernelId 600
extern void * dscudalaunchkernelid_1(int , int , char *, RCdim3 , RCdim3 , RCsize , RCstream , RCargs , CLIENT *);
extern void * dscudalaunchkernelid_1_svc(int , int , char *, RCdim3 , RCdim3 , RCsize , RCstream , RCargs , struct svc_req *);
#define dscudaLoadModuleId 601
extern dscudaLoadModuleResult * dscudaloadmoduleid_1(RCipaddr , RCpid , char *, char *, CLIENT *);
extern dscudaLoadModuleResult * dscudaloadmoduleid_1_svc(RCipaddr , RCpid , char *, char *, struct svc_req *);
#define dscudaFuncGetAttributesId 602
extern dscudaFuncGetAttributesResult * dscudafuncgetattributesid_1(int , char *, CLIENT *);
extern dscudaFuncGetAttributesResult * dscudafuncgetattributesid_1_svc(int , char *, struct svc_req *);
#define dscudaMallocId 700
extern dscudaMallocResult * dscudamallocid_1(RCsize , CLIENT *);
extern dscudaMallocResult * dscudamallocid_1_svc(RCsize , struct svc_req *);
#define dscudaFreeId 701
extern dscudaResult * dscudafreeid_1(RCadr , CLIENT *);
extern dscudaResult * dscudafreeid_1_svc(RCadr , struct svc_req *);
#define dscudaMemcpyH2HId 702
extern dscudaMemcpyH2HResult * dscudamemcpyh2hid_1(RCadr , RCbuf , RCsize , CLIENT *);
extern dscudaMemcpyH2HResult * dscudamemcpyh2hid_1_svc(RCadr , RCbuf , RCsize , struct svc_req *);
#define dscudaMemcpyH2DId 703
extern dscudaResult * dscudamemcpyh2did_1(RCadr , RCbuf , RCsize , CLIENT *);
extern dscudaResult * dscudamemcpyh2did_1_svc(RCadr , RCbuf , RCsize , struct svc_req *);
#define dscudaMemcpyD2HId 704
extern dscudaMemcpyD2HResult * dscudamemcpyd2hid_1(RCadr , RCsize , CLIENT *);
extern dscudaMemcpyD2HResult * dscudamemcpyd2hid_1_svc(RCadr , RCsize , struct svc_req *);
#define dscudaMemcpyD2DId 705
extern dscudaResult * dscudamemcpyd2did_1(RCadr , RCadr , RCsize , CLIENT *);
extern dscudaResult * dscudamemcpyd2did_1_svc(RCadr , RCadr , RCsize , struct svc_req *);
#define dscudaMemcpyAsyncH2HId 706
extern dscudaMemcpyAsyncH2HResult * dscudamemcpyasynch2hid_1(RCadr , RCbuf , RCsize , RCstream , CLIENT *);
extern dscudaMemcpyAsyncH2HResult * dscudamemcpyasynch2hid_1_svc(RCadr , RCbuf , RCsize , RCstream , struct svc_req *);
#define dscudaMemcpyAsyncH2DId 707
extern dscudaResult * dscudamemcpyasynch2did_1(RCadr , RCbuf , RCsize , RCstream , CLIENT *);
extern dscudaResult * dscudamemcpyasynch2did_1_svc(RCadr , RCbuf , RCsize , RCstream , struct svc_req *);
#define dscudaMemcpyAsyncD2HId 708
extern dscudaMemcpyAsyncD2HResult * dscudamemcpyasyncd2hid_1(RCadr , RCsize , RCstream , CLIENT *);
extern dscudaMemcpyAsyncD2HResult * dscudamemcpyasyncd2hid_1_svc(RCadr , RCsize , RCstream , struct svc_req *);
#define dscudaMemcpyAsyncD2DId 709
extern dscudaResult * dscudamemcpyasyncd2did_1(RCadr , RCadr , RCsize , RCstream , CLIENT *);
extern dscudaResult * dscudamemcpyasyncd2did_1_svc(RCadr , RCadr , RCsize , RCstream , struct svc_req *);
#define dscudaMemcpyToSymbolH2DId 710
extern dscudaResult * dscudamemcpytosymbolh2did_1(int , char *, RCbuf , RCsize , RCsize , CLIENT *);
extern dscudaResult * dscudamemcpytosymbolh2did_1_svc(int , char *, RCbuf , RCsize , RCsize , struct svc_req *);
#define dscudaMemcpyToSymbolD2DId 711
extern dscudaResult * dscudamemcpytosymbold2did_1(int , char *, RCadr , RCsize , RCsize , CLIENT *);
extern dscudaResult * dscudamemcpytosymbold2did_1_svc(int , char *, RCadr , RCsize , RCsize , struct svc_req *);
#define dscudaMemcpyFromSymbolD2HId 712
extern dscudaMemcpyFromSymbolD2HResult * dscudamemcpyfromsymbold2hid_1(int , char *, RCsize , RCsize , CLIENT *);
extern dscudaMemcpyFromSymbolD2HResult * dscudamemcpyfromsymbold2hid_1_svc(int , char *, RCsize , RCsize , struct svc_req *);
#define dscudaMemcpyFromSymbolD2DId 713
extern dscudaResult * dscudamemcpyfromsymbold2did_1(int , RCadr , char *, RCsize , RCsize , CLIENT *);
extern dscudaResult * dscudamemcpyfromsymbold2did_1_svc(int , RCadr , char *, RCsize , RCsize , struct svc_req *);
#define dscudaMemsetId 714
extern dscudaResult * dscudamemsetid_1(RCadr , int , RCsize , CLIENT *);
extern dscudaResult * dscudamemsetid_1_svc(RCadr , int , RCsize , struct svc_req *);
#define dscudaHostAllocId 715
extern dscudaHostAllocResult * dscudahostallocid_1(RCsize , u_int , CLIENT *);
extern dscudaHostAllocResult * dscudahostallocid_1_svc(RCsize , u_int , struct svc_req *);
#define dscudaMallocHostId 716
extern dscudaMallocHostResult * dscudamallochostid_1(RCsize , CLIENT *);
extern dscudaMallocHostResult * dscudamallochostid_1_svc(RCsize , struct svc_req *);
#define dscudaFreeHostId 717
extern dscudaResult * dscudafreehostid_1(RCadr , CLIENT *);
extern dscudaResult * dscudafreehostid_1_svc(RCadr , struct svc_req *);
#define dscudaHostGetDevicePointerId 718
extern dscudaHostGetDevicePointerResult * dscudahostgetdevicepointerid_1(RCadr , u_int , CLIENT *);
extern dscudaHostGetDevicePointerResult * dscudahostgetdevicepointerid_1_svc(RCadr , u_int , struct svc_req *);
#define dscudaHostGetFlagsID 719
extern dscudaHostGetFlagsResult * dscudahostgetflagsid_1(RCadr , CLIENT *);
extern dscudaHostGetFlagsResult * dscudahostgetflagsid_1_svc(RCadr , struct svc_req *);
#define dscudaMallocArrayId 720
extern dscudaMallocArrayResult * dscudamallocarrayid_1(RCchanneldesc , RCsize , RCsize , u_int , CLIENT *);
extern dscudaMallocArrayResult * dscudamallocarrayid_1_svc(RCchanneldesc , RCsize , RCsize , u_int , struct svc_req *);
#define dscudaFreeArrayId 721
extern dscudaResult * dscudafreearrayid_1(RCadr , CLIENT *);
extern dscudaResult * dscudafreearrayid_1_svc(RCadr , struct svc_req *);
#define dscudaMemcpyToArrayH2HId 722
extern dscudaMemcpyToArrayH2HResult * dscudamemcpytoarrayh2hid_1(RCadr , RCsize , RCsize , RCbuf , RCsize , CLIENT *);
extern dscudaMemcpyToArrayH2HResult * dscudamemcpytoarrayh2hid_1_svc(RCadr , RCsize , RCsize , RCbuf , RCsize , struct svc_req *);
#define dscudaMemcpyToArrayH2DId 723
extern dscudaResult * dscudamemcpytoarrayh2did_1(RCadr , RCsize , RCsize , RCbuf , RCsize , CLIENT *);
extern dscudaResult * dscudamemcpytoarrayh2did_1_svc(RCadr , RCsize , RCsize , RCbuf , RCsize , struct svc_req *);
#define dscudaMemcpyToArrayD2HId 724
extern dscudaMemcpyToArrayD2HResult * dscudamemcpytoarrayd2hid_1(RCsize , RCsize , RCadr , RCsize , CLIENT *);
extern dscudaMemcpyToArrayD2HResult * dscudamemcpytoarrayd2hid_1_svc(RCsize , RCsize , RCadr , RCsize , struct svc_req *);
#define dscudaMemcpyToArrayD2DId 725
extern dscudaResult * dscudamemcpytoarrayd2did_1(RCadr , RCsize , RCsize , RCadr , RCsize , CLIENT *);
extern dscudaResult * dscudamemcpytoarrayd2did_1_svc(RCadr , RCsize , RCsize , RCadr , RCsize , struct svc_req *);
#define dscudaMallocPitchId 726
extern dscudaMallocPitchResult * dscudamallocpitchid_1(RCsize , RCsize , CLIENT *);
extern dscudaMallocPitchResult * dscudamallocpitchid_1_svc(RCsize , RCsize , struct svc_req *);
#define dscudaMemcpy2DToArrayH2HId 727
extern dscudaMemcpy2DToArrayH2HResult * dscudamemcpy2dtoarrayh2hid_1(RCadr , RCsize , RCsize , RCbuf , RCsize , RCsize , RCsize , CLIENT *);
extern dscudaMemcpy2DToArrayH2HResult * dscudamemcpy2dtoarrayh2hid_1_svc(RCadr , RCsize , RCsize , RCbuf , RCsize , RCsize , RCsize , struct svc_req *);
#define dscudaMemcpy2DToArrayH2DId 728
extern dscudaResult * dscudamemcpy2dtoarrayh2did_1(RCadr , RCsize , RCsize , RCbuf , RCsize , RCsize , RCsize , CLIENT *);
extern dscudaResult * dscudamemcpy2dtoarrayh2did_1_svc(RCadr , RCsize , RCsize , RCbuf , RCsize , RCsize , RCsize , struct svc_req *);
#define dscudaMemcpy2DToArrayD2HId 729
extern dscudaMemcpy2DToArrayD2HResult * dscudamemcpy2dtoarrayd2hid_1(RCsize , RCsize , RCadr , RCsize , RCsize , RCsize , CLIENT *);
extern dscudaMemcpy2DToArrayD2HResult * dscudamemcpy2dtoarrayd2hid_1_svc(RCsize , RCsize , RCadr , RCsize , RCsize , RCsize , struct svc_req *);
#define dscudaMemcpy2DToArrayD2DId 730
extern dscudaResult * dscudamemcpy2dtoarrayd2did_1(RCadr , RCsize , RCsize , RCadr , RCsize , RCsize , RCsize , CLIENT *);
extern dscudaResult * dscudamemcpy2dtoarrayd2did_1_svc(RCadr , RCsize , RCsize , RCadr , RCsize , RCsize , RCsize , struct svc_req *);
#define dscudaMemcpy2DH2HId 731
extern dscudaMemcpy2DH2HResult * dscudamemcpy2dh2hid_1(RCadr , RCsize , RCbuf , RCsize , RCsize , RCsize , CLIENT *);
extern dscudaMemcpy2DH2HResult * dscudamemcpy2dh2hid_1_svc(RCadr , RCsize , RCbuf , RCsize , RCsize , RCsize , struct svc_req *);
#define dscudaMemcpy2DH2DId 732
extern dscudaResult * dscudamemcpy2dh2did_1(RCadr , RCsize , RCbuf , RCsize , RCsize , RCsize , CLIENT *);
extern dscudaResult * dscudamemcpy2dh2did_1_svc(RCadr , RCsize , RCbuf , RCsize , RCsize , RCsize , struct svc_req *);
#define dscudaMemcpy2DD2HId 733
extern dscudaMemcpy2DD2HResult * dscudamemcpy2dd2hid_1(RCsize , RCadr , RCsize , RCsize , RCsize , CLIENT *);
extern dscudaMemcpy2DD2HResult * dscudamemcpy2dd2hid_1_svc(RCsize , RCadr , RCsize , RCsize , RCsize , struct svc_req *);
#define dscudaMemcpy2DD2DId 734
extern dscudaResult * dscudamemcpy2dd2did_1(RCadr , RCsize , RCadr , RCsize , RCsize , RCsize , CLIENT *);
extern dscudaResult * dscudamemcpy2dd2did_1_svc(RCadr , RCsize , RCadr , RCsize , RCsize , RCsize , struct svc_req *);
#define dscudaMemset2DId 735
extern dscudaResult * dscudamemset2did_1(RCadr , RCsize , int , RCsize , RCsize , CLIENT *);
extern dscudaResult * dscudamemset2did_1_svc(RCadr , RCsize , int , RCsize , RCsize , struct svc_req *);
#define dscudaMemcpyToSymbolAsyncH2DId 736
extern dscudaResult * dscudamemcpytosymbolasynch2did_1(int , char *, RCbuf , RCsize , RCsize , RCstream , CLIENT *);
extern dscudaResult * dscudamemcpytosymbolasynch2did_1_svc(int , char *, RCbuf , RCsize , RCsize , RCstream , struct svc_req *);
#define dscudaMemcpyToSymbolAsyncD2DId 737
extern dscudaResult * dscudamemcpytosymbolasyncd2did_1(int , char *, RCadr , RCsize , RCsize , RCstream , CLIENT *);
extern dscudaResult * dscudamemcpytosymbolasyncd2did_1_svc(int , char *, RCadr , RCsize , RCsize , RCstream , struct svc_req *);
#define dscudaMemcpyFromSymbolAsyncD2HId 738
extern dscudaMemcpyFromSymbolAsyncD2HResult * dscudamemcpyfromsymbolasyncd2hid_1(int , char *, RCsize , RCsize , RCstream , CLIENT *);
extern dscudaMemcpyFromSymbolAsyncD2HResult * dscudamemcpyfromsymbolasyncd2hid_1_svc(int , char *, RCsize , RCsize , RCstream , struct svc_req *);
#define dscudaMemcpyFromSymbolAsyncD2DId 739
extern dscudaResult * dscudamemcpyfromsymbolasyncd2did_1(int , RCadr , char *, RCsize , RCsize , RCstream , CLIENT *);
extern dscudaResult * dscudamemcpyfromsymbolasyncd2did_1_svc(int , RCadr , char *, RCsize , RCsize , RCstream , struct svc_req *);
#define dscudaCreateChannelDescId 1400
extern dscudaCreateChannelDescResult * dscudacreatechanneldescid_1(int , int , int , int , RCchannelformat , CLIENT *);
extern dscudaCreateChannelDescResult * dscudacreatechanneldescid_1_svc(int , int , int , int , RCchannelformat , struct svc_req *);
#define dscudaGetChannelDescId 1401
extern dscudaGetChannelDescResult * dscudagetchanneldescid_1(RCadr , CLIENT *);
extern dscudaGetChannelDescResult * dscudagetchanneldescid_1_svc(RCadr , struct svc_req *);
#define dscudaBindTextureId 1402
extern dscudaBindTextureResult * dscudabindtextureid_1(int , char *, RCadr , RCsize , RCtexture , CLIENT *);
extern dscudaBindTextureResult * dscudabindtextureid_1_svc(int , char *, RCadr , RCsize , RCtexture , struct svc_req *);
#define dscudaBindTexture2DId 1403
extern dscudaBindTexture2DResult * dscudabindtexture2did_1(int , char *, RCadr , RCsize , RCsize , RCsize , RCtexture , CLIENT *);
extern dscudaBindTexture2DResult * dscudabindtexture2did_1_svc(int , char *, RCadr , RCsize , RCsize , RCsize , RCtexture , struct svc_req *);
#define dscudaBindTextureToArrayId 1404
extern dscudaResult * dscudabindtexturetoarrayid_1(int , char *, RCadr , RCtexture , CLIENT *);
extern dscudaResult * dscudabindtexturetoarrayid_1_svc(int , char *, RCadr , RCtexture , struct svc_req *);
#define dscudaUnbindTextureId 1405
extern dscudaResult * dscudaunbindtextureid_1(RCtexture , CLIENT *);
extern dscudaResult * dscudaunbindtextureid_1_svc(RCtexture , struct svc_req *);
#define dscufftPlan3dId 2002
extern dscufftPlanResult * dscufftplan3did_1(int , int , int , u_int , CLIENT *);
extern dscufftPlanResult * dscufftplan3did_1_svc(int , int , int , u_int , struct svc_req *);
#define dscufftDestroyId 2004
extern dscufftResult * dscufftdestroyid_1(u_int , CLIENT *);
extern dscufftResult * dscufftdestroyid_1_svc(u_int , struct svc_req *);
#define dscufftExecC2CId 2005
extern dscufftResult * dscufftexecc2cid_1(u_int , RCadr , RCadr , int , CLIENT *);
extern dscufftResult * dscufftexecc2cid_1_svc(u_int , RCadr , RCadr , int , struct svc_req *);
extern int dscuda_prog_1_freeresult (SVCXPRT *, xdrproc_t, caddr_t);
#else
#define dscudaThreadExitId 100
extern dscudaResult * dscudathreadexitid_1();
extern dscudaResult * dscudathreadexitid_1_svc();
#define dscudaThreadSynchronizeId 101
extern dscudaResult * dscudathreadsynchronizeid_1();
extern dscudaResult * dscudathreadsynchronizeid_1_svc();
#define dscudaThreadSetLimitId 102
extern dscudaResult * dscudathreadsetlimitid_1();
extern dscudaResult * dscudathreadsetlimitid_1_svc();
#define dscudaThreadGetLimitId 103
extern dscudaThreadGetLimitResult * dscudathreadgetlimitid_1();
extern dscudaThreadGetLimitResult * dscudathreadgetlimitid_1_svc();
#define dscudaThreadSetCacheConfigId 104
extern dscudaResult * dscudathreadsetcacheconfigid_1();
extern dscudaResult * dscudathreadsetcacheconfigid_1_svc();
#define dscudaThreadGetCacheConfigId 105
extern dscudaThreadGetCacheConfigResult * dscudathreadgetcacheconfigid_1();
extern dscudaThreadGetCacheConfigResult * dscudathreadgetcacheconfigid_1_svc();
#define dscudaGetLastErrorId 200
extern dscudaResult * dscudagetlasterrorid_1();
extern dscudaResult * dscudagetlasterrorid_1_svc();
#define dscudaPeekAtLastErrorId 201
extern dscudaResult * dscudapeekatlasterrorid_1();
extern dscudaResult * dscudapeekatlasterrorid_1_svc();
#define dscudaGetErrorStringId 202
extern dscudaGetErrorStringResult * dscudageterrorstringid_1();
extern dscudaGetErrorStringResult * dscudageterrorstringid_1_svc();
#define dscudaGetDeviceId 300
extern dscudaGetDeviceResult * dscudagetdeviceid_1();
extern dscudaGetDeviceResult * dscudagetdeviceid_1_svc();
#define dscudaGetDeviceCountId 301
extern dscudaGetDeviceCountResult * dscudagetdevicecountid_1();
extern dscudaGetDeviceCountResult * dscudagetdevicecountid_1_svc();
#define dscudaGetDevicePropertiesId 302
extern dscudaGetDevicePropertiesResult * dscudagetdevicepropertiesid_1();
extern dscudaGetDevicePropertiesResult * dscudagetdevicepropertiesid_1_svc();
#define dscudaDriverGetVersionId 303
extern dscudaDriverGetVersionResult * dscudadrivergetversionid_1();
extern dscudaDriverGetVersionResult * dscudadrivergetversionid_1_svc();
#define dscudaRuntimeGetVersionId 304
extern dscudaRuntimeGetVersionResult * dscudaruntimegetversionid_1();
extern dscudaRuntimeGetVersionResult * dscudaruntimegetversionid_1_svc();
#define dscudaSetDeviceId 305
extern dscudaResult * dscudasetdeviceid_1();
extern dscudaResult * dscudasetdeviceid_1_svc();
#define dscudaSetDeviceFlagsId 306
extern dscudaResult * dscudasetdeviceflagsid_1();
extern dscudaResult * dscudasetdeviceflagsid_1_svc();
#define dscudaChooseDeviceId 307
extern dscudaChooseDeviceResult * dscudachoosedeviceid_1();
extern dscudaChooseDeviceResult * dscudachoosedeviceid_1_svc();
#define dscudaDeviceSynchronize 308
extern dscudaResult * dscudadevicesynchronize_1();
extern dscudaResult * dscudadevicesynchronize_1_svc();
#define dscudaDeviceReset 309
extern dscudaResult * dscudadevicereset_1();
extern dscudaResult * dscudadevicereset_1_svc();
#define dscudaStreamCreateId 400
extern dscudaStreamCreateResult * dscudastreamcreateid_1();
extern dscudaStreamCreateResult * dscudastreamcreateid_1_svc();
#define dscudaStreamDestroyId 401
extern dscudaResult * dscudastreamdestroyid_1();
extern dscudaResult * dscudastreamdestroyid_1_svc();
#define dscudaStreamSynchronizeId 402
extern dscudaResult * dscudastreamsynchronizeid_1();
extern dscudaResult * dscudastreamsynchronizeid_1_svc();
#define dscudaStreamQueryId 403
extern dscudaResult * dscudastreamqueryid_1();
extern dscudaResult * dscudastreamqueryid_1_svc();
#define dscudaStreamWaitEventId 404
extern dscudaResult * dscudastreamwaiteventid_1();
extern dscudaResult * dscudastreamwaiteventid_1_svc();
#define dscudaEventCreateId 500
extern dscudaEventCreateResult * dscudaeventcreateid_1();
extern dscudaEventCreateResult * dscudaeventcreateid_1_svc();
#define dscudaEventCreateWithFlagsId 501
extern dscudaEventCreateResult * dscudaeventcreatewithflagsid_1();
extern dscudaEventCreateResult * dscudaeventcreatewithflagsid_1_svc();
#define dscudaEventDestroyId 502
extern dscudaResult * dscudaeventdestroyid_1();
extern dscudaResult * dscudaeventdestroyid_1_svc();
#define dscudaEventElapsedTimeId 503
extern dscudaEventElapsedTimeResult * dscudaeventelapsedtimeid_1();
extern dscudaEventElapsedTimeResult * dscudaeventelapsedtimeid_1_svc();
#define dscudaEventRecordId 504
extern dscudaResult * dscudaeventrecordid_1();
extern dscudaResult * dscudaeventrecordid_1_svc();
#define dscudaEventSynchronizeId 505
extern dscudaResult * dscudaeventsynchronizeid_1();
extern dscudaResult * dscudaeventsynchronizeid_1_svc();
#define dscudaEventQueryId 506
extern dscudaResult * dscudaeventqueryid_1();
extern dscudaResult * dscudaeventqueryid_1_svc();
#define dscudaLaunchKernelId 600
extern void * dscudalaunchkernelid_1();
extern void * dscudalaunchkernelid_1_svc();
#define dscudaLoadModuleId 601
extern dscudaLoadModuleResult * dscudaloadmoduleid_1();
extern dscudaLoadModuleResult * dscudaloadmoduleid_1_svc();
#define dscudaFuncGetAttributesId 602
extern dscudaFuncGetAttributesResult * dscudafuncgetattributesid_1();
extern dscudaFuncGetAttributesResult * dscudafuncgetattributesid_1_svc();
#define dscudaMallocId 700
extern dscudaMallocResult * dscudamallocid_1();
extern dscudaMallocResult * dscudamallocid_1_svc();
#define dscudaFreeId 701
extern dscudaResult * dscudafreeid_1();
extern dscudaResult * dscudafreeid_1_svc();
#define dscudaMemcpyH2HId 702
extern dscudaMemcpyH2HResult * dscudamemcpyh2hid_1();
extern dscudaMemcpyH2HResult * dscudamemcpyh2hid_1_svc();
#define dscudaMemcpyH2DId 703
extern dscudaResult * dscudamemcpyh2did_1();
extern dscudaResult * dscudamemcpyh2did_1_svc();
#define dscudaMemcpyD2HId 704
extern dscudaMemcpyD2HResult * dscudamemcpyd2hid_1();
extern dscudaMemcpyD2HResult * dscudamemcpyd2hid_1_svc();
#define dscudaMemcpyD2DId 705
extern dscudaResult * dscudamemcpyd2did_1();
extern dscudaResult * dscudamemcpyd2did_1_svc();
#define dscudaMemcpyAsyncH2HId 706
extern dscudaMemcpyAsyncH2HResult * dscudamemcpyasynch2hid_1();
extern dscudaMemcpyAsyncH2HResult * dscudamemcpyasynch2hid_1_svc();
#define dscudaMemcpyAsyncH2DId 707
extern dscudaResult * dscudamemcpyasynch2did_1();
extern dscudaResult * dscudamemcpyasynch2did_1_svc();
#define dscudaMemcpyAsyncD2HId 708
extern dscudaMemcpyAsyncD2HResult * dscudamemcpyasyncd2hid_1();
extern dscudaMemcpyAsyncD2HResult * dscudamemcpyasyncd2hid_1_svc();
#define dscudaMemcpyAsyncD2DId 709
extern dscudaResult * dscudamemcpyasyncd2did_1();
extern dscudaResult * dscudamemcpyasyncd2did_1_svc();
#define dscudaMemcpyToSymbolH2DId 710
extern dscudaResult * dscudamemcpytosymbolh2did_1();
extern dscudaResult * dscudamemcpytosymbolh2did_1_svc();
#define dscudaMemcpyToSymbolD2DId 711
extern dscudaResult * dscudamemcpytosymbold2did_1();
extern dscudaResult * dscudamemcpytosymbold2did_1_svc();
#define dscudaMemcpyFromSymbolD2HId 712
extern dscudaMemcpyFromSymbolD2HResult * dscudamemcpyfromsymbold2hid_1();
extern dscudaMemcpyFromSymbolD2HResult * dscudamemcpyfromsymbold2hid_1_svc();
#define dscudaMemcpyFromSymbolD2DId 713
extern dscudaResult * dscudamemcpyfromsymbold2did_1();
extern dscudaResult * dscudamemcpyfromsymbold2did_1_svc();
#define dscudaMemsetId 714
extern dscudaResult * dscudamemsetid_1();
extern dscudaResult * dscudamemsetid_1_svc();
#define dscudaHostAllocId 715
extern dscudaHostAllocResult * dscudahostallocid_1();
extern dscudaHostAllocResult * dscudahostallocid_1_svc();
#define dscudaMallocHostId 716
extern dscudaMallocHostResult * dscudamallochostid_1();
extern dscudaMallocHostResult * dscudamallochostid_1_svc();
#define dscudaFreeHostId 717
extern dscudaResult * dscudafreehostid_1();
extern dscudaResult * dscudafreehostid_1_svc();
#define dscudaHostGetDevicePointerId 718
extern dscudaHostGetDevicePointerResult * dscudahostgetdevicepointerid_1();
extern dscudaHostGetDevicePointerResult * dscudahostgetdevicepointerid_1_svc();
#define dscudaHostGetFlagsID 719
extern dscudaHostGetFlagsResult * dscudahostgetflagsid_1();
extern dscudaHostGetFlagsResult * dscudahostgetflagsid_1_svc();
#define dscudaMallocArrayId 720
extern dscudaMallocArrayResult * dscudamallocarrayid_1();
extern dscudaMallocArrayResult * dscudamallocarrayid_1_svc();
#define dscudaFreeArrayId 721
extern dscudaResult * dscudafreearrayid_1();
extern dscudaResult * dscudafreearrayid_1_svc();
#define dscudaMemcpyToArrayH2HId 722
extern dscudaMemcpyToArrayH2HResult * dscudamemcpytoarrayh2hid_1();
extern dscudaMemcpyToArrayH2HResult * dscudamemcpytoarrayh2hid_1_svc();
#define dscudaMemcpyToArrayH2DId 723
extern dscudaResult * dscudamemcpytoarrayh2did_1();
extern dscudaResult * dscudamemcpytoarrayh2did_1_svc();
#define dscudaMemcpyToArrayD2HId 724
extern dscudaMemcpyToArrayD2HResult * dscudamemcpytoarrayd2hid_1();
extern dscudaMemcpyToArrayD2HResult * dscudamemcpytoarrayd2hid_1_svc();
#define dscudaMemcpyToArrayD2DId 725
extern dscudaResult * dscudamemcpytoarrayd2did_1();
extern dscudaResult * dscudamemcpytoarrayd2did_1_svc();
#define dscudaMallocPitchId 726
extern dscudaMallocPitchResult * dscudamallocpitchid_1();
extern dscudaMallocPitchResult * dscudamallocpitchid_1_svc();
#define dscudaMemcpy2DToArrayH2HId 727
extern dscudaMemcpy2DToArrayH2HResult * dscudamemcpy2dtoarrayh2hid_1();
extern dscudaMemcpy2DToArrayH2HResult * dscudamemcpy2dtoarrayh2hid_1_svc();
#define dscudaMemcpy2DToArrayH2DId 728
extern dscudaResult * dscudamemcpy2dtoarrayh2did_1();
extern dscudaResult * dscudamemcpy2dtoarrayh2did_1_svc();
#define dscudaMemcpy2DToArrayD2HId 729
extern dscudaMemcpy2DToArrayD2HResult * dscudamemcpy2dtoarrayd2hid_1();
extern dscudaMemcpy2DToArrayD2HResult * dscudamemcpy2dtoarrayd2hid_1_svc();
#define dscudaMemcpy2DToArrayD2DId 730
extern dscudaResult * dscudamemcpy2dtoarrayd2did_1();
extern dscudaResult * dscudamemcpy2dtoarrayd2did_1_svc();
#define dscudaMemcpy2DH2HId 731
extern dscudaMemcpy2DH2HResult * dscudamemcpy2dh2hid_1();
extern dscudaMemcpy2DH2HResult * dscudamemcpy2dh2hid_1_svc();
#define dscudaMemcpy2DH2DId 732
extern dscudaResult * dscudamemcpy2dh2did_1();
extern dscudaResult * dscudamemcpy2dh2did_1_svc();
#define dscudaMemcpy2DD2HId 733
extern dscudaMemcpy2DD2HResult * dscudamemcpy2dd2hid_1();
extern dscudaMemcpy2DD2HResult * dscudamemcpy2dd2hid_1_svc();
#define dscudaMemcpy2DD2DId 734
extern dscudaResult * dscudamemcpy2dd2did_1();
extern dscudaResult * dscudamemcpy2dd2did_1_svc();
#define dscudaMemset2DId 735
extern dscudaResult * dscudamemset2did_1();
extern dscudaResult * dscudamemset2did_1_svc();
#define dscudaMemcpyToSymbolAsyncH2DId 736
extern dscudaResult * dscudamemcpytosymbolasynch2did_1();
extern dscudaResult * dscudamemcpytosymbolasynch2did_1_svc();
#define dscudaMemcpyToSymbolAsyncD2DId 737
extern dscudaResult * dscudamemcpytosymbolasyncd2did_1();
extern dscudaResult * dscudamemcpytosymbolasyncd2did_1_svc();
#define dscudaMemcpyFromSymbolAsyncD2HId 738
extern dscudaMemcpyFromSymbolAsyncD2HResult * dscudamemcpyfromsymbolasyncd2hid_1();
extern dscudaMemcpyFromSymbolAsyncD2HResult * dscudamemcpyfromsymbolasyncd2hid_1_svc();
#define dscudaMemcpyFromSymbolAsyncD2DId 739
extern dscudaResult * dscudamemcpyfromsymbolasyncd2did_1();
extern dscudaResult * dscudamemcpyfromsymbolasyncd2did_1_svc();
#define dscudaCreateChannelDescId 1400
extern dscudaCreateChannelDescResult * dscudacreatechanneldescid_1();
extern dscudaCreateChannelDescResult * dscudacreatechanneldescid_1_svc();
#define dscudaGetChannelDescId 1401
extern dscudaGetChannelDescResult * dscudagetchanneldescid_1();
extern dscudaGetChannelDescResult * dscudagetchanneldescid_1_svc();
#define dscudaBindTextureId 1402
extern dscudaBindTextureResult * dscudabindtextureid_1();
extern dscudaBindTextureResult * dscudabindtextureid_1_svc();
#define dscudaBindTexture2DId 1403
extern dscudaBindTexture2DResult * dscudabindtexture2did_1();
extern dscudaBindTexture2DResult * dscudabindtexture2did_1_svc();
#define dscudaBindTextureToArrayId 1404
extern dscudaResult * dscudabindtexturetoarrayid_1();
extern dscudaResult * dscudabindtexturetoarrayid_1_svc();
#define dscudaUnbindTextureId 1405
extern dscudaResult * dscudaunbindtextureid_1();
extern dscudaResult * dscudaunbindtextureid_1_svc();
#define dscufftPlan3dId 2002
extern dscufftPlanResult * dscufftplan3did_1();
extern dscufftPlanResult * dscufftplan3did_1_svc();
#define dscufftDestroyId 2004
extern dscufftResult * dscufftdestroyid_1();
extern dscufftResult * dscufftdestroyid_1_svc();
#define dscufftExecC2CId 2005
extern dscufftResult * dscufftexecc2cid_1();
extern dscufftResult * dscufftexecc2cid_1_svc();
extern int dscuda_prog_1_freeresult ();
#endif
#if defined(__STDC__) || defined(__cplusplus)
extern bool_t xdr_RCadr (XDR *, RCadr*);
extern bool_t xdr_RCstream (XDR *, RCstream*);
extern bool_t xdr_RCevent (XDR *, RCevent*);
extern bool_t xdr_RCipaddr (XDR *, RCipaddr*);
extern bool_t xdr_RCsize (XDR *, RCsize*);
extern bool_t xdr_RCerror (XDR *, RCerror*);
extern bool_t xdr_RCbuf (XDR *, RCbuf*);
extern bool_t xdr_RCchannelformat (XDR *, RCchannelformat*);
extern bool_t xdr_RCpid (XDR *, RCpid*);
extern bool_t xdr_RCchanneldesc_t (XDR *, RCchanneldesc_t*);
extern bool_t xdr_RCchanneldesc (XDR *, RCchanneldesc*);
extern bool_t xdr_RCtexture_t (XDR *, RCtexture_t*);
extern bool_t xdr_RCtexture (XDR *, RCtexture*);
extern bool_t xdr_RCfuncattr_t (XDR *, RCfuncattr_t*);
extern bool_t xdr_RCfuncattr (XDR *, RCfuncattr*);
extern bool_t xdr_RCargType (XDR *, RCargType*);
extern bool_t xdr_RCargVal (XDR *, RCargVal*);
extern bool_t xdr_RCarg (XDR *, RCarg*);
extern bool_t xdr_RCargs (XDR *, RCargs*);
extern bool_t xdr_dscudaResult (XDR *, dscudaResult*);
extern bool_t xdr_dscudaThreadGetLimitResult (XDR *, dscudaThreadGetLimitResult*);
extern bool_t xdr_dscudaThreadGetCacheConfigResult (XDR *, dscudaThreadGetCacheConfigResult*);
extern bool_t xdr_dscudaMallocResult (XDR *, dscudaMallocResult*);
extern bool_t xdr_dscudaHostAllocResult (XDR *, dscudaHostAllocResult*);
extern bool_t xdr_dscudaMallocHostResult (XDR *, dscudaMallocHostResult*);
extern bool_t xdr_dscudaMallocArrayResult (XDR *, dscudaMallocArrayResult*);
extern bool_t xdr_dscudaMallocPitchResult (XDR *, dscudaMallocPitchResult*);
extern bool_t xdr_dscudaMemcpyD2HResult (XDR *, dscudaMemcpyD2HResult*);
extern bool_t xdr_dscudaMemcpyH2HResult (XDR *, dscudaMemcpyH2HResult*);
extern bool_t xdr_dscudaMemcpyToArrayD2HResult (XDR *, dscudaMemcpyToArrayD2HResult*);
extern bool_t xdr_dscudaMemcpyToArrayH2HResult (XDR *, dscudaMemcpyToArrayH2HResult*);
extern bool_t xdr_dscudaMemcpy2DToArrayD2HResult (XDR *, dscudaMemcpy2DToArrayD2HResult*);
extern bool_t xdr_dscudaMemcpy2DToArrayH2HResult (XDR *, dscudaMemcpy2DToArrayH2HResult*);
extern bool_t xdr_dscudaMemcpy2DD2HResult (XDR *, dscudaMemcpy2DD2HResult*);
extern bool_t xdr_dscudaMemcpy2DH2HResult (XDR *, dscudaMemcpy2DH2HResult*);
extern bool_t xdr_dscudaGetDeviceResult (XDR *, dscudaGetDeviceResult*);
extern bool_t xdr_dscudaGetDeviceCountResult (XDR *, dscudaGetDeviceCountResult*);
extern bool_t xdr_dscudaGetDevicePropertiesResult (XDR *, dscudaGetDevicePropertiesResult*);
extern bool_t xdr_dscudaDriverGetVersionResult (XDR *, dscudaDriverGetVersionResult*);
extern bool_t xdr_dscudaRuntimeGetVersionResult (XDR *, dscudaRuntimeGetVersionResult*);
extern bool_t xdr_dscudaGetErrorStringResult (XDR *, dscudaGetErrorStringResult*);
extern bool_t xdr_dscudaCreateChannelDescResult (XDR *, dscudaCreateChannelDescResult*);
extern bool_t xdr_dscudaGetChannelDescResult (XDR *, dscudaGetChannelDescResult*);
extern bool_t xdr_dscudaChooseDeviceResult (XDR *, dscudaChooseDeviceResult*);
extern bool_t xdr_dscudaMemcpyAsyncD2HResult (XDR *, dscudaMemcpyAsyncD2HResult*);
extern bool_t xdr_dscudaMemcpyAsyncH2HResult (XDR *, dscudaMemcpyAsyncH2HResult*);
extern bool_t xdr_dscudaMemcpyFromSymbolD2HResult (XDR *, dscudaMemcpyFromSymbolD2HResult*);
extern bool_t xdr_dscudaMemcpyFromSymbolAsyncD2HResult (XDR *, dscudaMemcpyFromSymbolAsyncD2HResult*);
extern bool_t xdr_dscudaStreamCreateResult (XDR *, dscudaStreamCreateResult*);
extern bool_t xdr_dscudaEventCreateResult (XDR *, dscudaEventCreateResult*);
extern bool_t xdr_dscudaEventElapsedTimeResult (XDR *, dscudaEventElapsedTimeResult*);
extern bool_t xdr_dscudaHostGetDevicePointerResult (XDR *, dscudaHostGetDevicePointerResult*);
extern bool_t xdr_dscudaHostGetFlagsResult (XDR *, dscudaHostGetFlagsResult*);
extern bool_t xdr_dscudaLoadModuleResult (XDR *, dscudaLoadModuleResult*);
extern bool_t xdr_dscudaFuncGetAttributesResult (XDR *, dscudaFuncGetAttributesResult*);
extern bool_t xdr_dscudaBindTextureResult (XDR *, dscudaBindTextureResult*);
extern bool_t xdr_dscudaBindTexture2DResult (XDR *, dscudaBindTexture2DResult*);
extern bool_t xdr_dscufftResult (XDR *, dscufftResult*);
extern bool_t xdr_dscufftPlanResult (XDR *, dscufftPlanResult*);
extern bool_t xdr_dscublasResult (XDR *, dscublasResult*);
extern bool_t xdr_dscublasCreateResult (XDR *, dscublasCreateResult*);
extern bool_t xdr_dscublasGetVectorResult (XDR *, dscublasGetVectorResult*);
extern bool_t xdr_RCdim3 (XDR *, RCdim3*);
extern bool_t xdr_dscudathreadsetlimitid_1_argument (XDR *, dscudathreadsetlimitid_1_argument*);
extern bool_t xdr_dscudastreamwaiteventid_1_argument (XDR *, dscudastreamwaiteventid_1_argument*);
extern bool_t xdr_dscudaeventelapsedtimeid_1_argument (XDR *, dscudaeventelapsedtimeid_1_argument*);
extern bool_t xdr_dscudaeventrecordid_1_argument (XDR *, dscudaeventrecordid_1_argument*);
extern bool_t xdr_dscudalaunchkernelid_1_argument (XDR *, dscudalaunchkernelid_1_argument*);
extern bool_t xdr_dscudaloadmoduleid_1_argument (XDR *, dscudaloadmoduleid_1_argument*);
extern bool_t xdr_dscudafuncgetattributesid_1_argument (XDR *, dscudafuncgetattributesid_1_argument*);
extern bool_t xdr_dscudamemcpyh2hid_1_argument (XDR *, dscudamemcpyh2hid_1_argument*);
extern bool_t xdr_dscudamemcpyh2did_1_argument (XDR *, dscudamemcpyh2did_1_argument*);
extern bool_t xdr_dscudamemcpyd2hid_1_argument (XDR *, dscudamemcpyd2hid_1_argument*);
extern bool_t xdr_dscudamemcpyd2did_1_argument (XDR *, dscudamemcpyd2did_1_argument*);
extern bool_t xdr_dscudamemcpyasynch2hid_1_argument (XDR *, dscudamemcpyasynch2hid_1_argument*);
extern bool_t xdr_dscudamemcpyasynch2did_1_argument (XDR *, dscudamemcpyasynch2did_1_argument*);
extern bool_t xdr_dscudamemcpyasyncd2hid_1_argument (XDR *, dscudamemcpyasyncd2hid_1_argument*);
extern bool_t xdr_dscudamemcpyasyncd2did_1_argument (XDR *, dscudamemcpyasyncd2did_1_argument*);
extern bool_t xdr_dscudamemcpytosymbolh2did_1_argument (XDR *, dscudamemcpytosymbolh2did_1_argument*);
extern bool_t xdr_dscudamemcpytosymbold2did_1_argument (XDR *, dscudamemcpytosymbold2did_1_argument*);
extern bool_t xdr_dscudamemcpyfromsymbold2hid_1_argument (XDR *, dscudamemcpyfromsymbold2hid_1_argument*);
extern bool_t xdr_dscudamemcpyfromsymbold2did_1_argument (XDR *, dscudamemcpyfromsymbold2did_1_argument*);
extern bool_t xdr_dscudamemsetid_1_argument (XDR *, dscudamemsetid_1_argument*);
extern bool_t xdr_dscudahostallocid_1_argument (XDR *, dscudahostallocid_1_argument*);
extern bool_t xdr_dscudahostgetdevicepointerid_1_argument (XDR *, dscudahostgetdevicepointerid_1_argument*);
extern bool_t xdr_dscudamallocarrayid_1_argument (XDR *, dscudamallocarrayid_1_argument*);
extern bool_t xdr_dscudamemcpytoarrayh2hid_1_argument (XDR *, dscudamemcpytoarrayh2hid_1_argument*);
extern bool_t xdr_dscudamemcpytoarrayh2did_1_argument (XDR *, dscudamemcpytoarrayh2did_1_argument*);
extern bool_t xdr_dscudamemcpytoarrayd2hid_1_argument (XDR *, dscudamemcpytoarrayd2hid_1_argument*);
extern bool_t xdr_dscudamemcpytoarrayd2did_1_argument (XDR *, dscudamemcpytoarrayd2did_1_argument*);
extern bool_t xdr_dscudamallocpitchid_1_argument (XDR *, dscudamallocpitchid_1_argument*);
extern bool_t xdr_dscudamemcpy2dtoarrayh2hid_1_argument (XDR *, dscudamemcpy2dtoarrayh2hid_1_argument*);
extern bool_t xdr_dscudamemcpy2dtoarrayh2did_1_argument (XDR *, dscudamemcpy2dtoarrayh2did_1_argument*);
extern bool_t xdr_dscudamemcpy2dtoarrayd2hid_1_argument (XDR *, dscudamemcpy2dtoarrayd2hid_1_argument*);
extern bool_t xdr_dscudamemcpy2dtoarrayd2did_1_argument (XDR *, dscudamemcpy2dtoarrayd2did_1_argument*);
extern bool_t xdr_dscudamemcpy2dh2hid_1_argument (XDR *, dscudamemcpy2dh2hid_1_argument*);
extern bool_t xdr_dscudamemcpy2dh2did_1_argument (XDR *, dscudamemcpy2dh2did_1_argument*);
extern bool_t xdr_dscudamemcpy2dd2hid_1_argument (XDR *, dscudamemcpy2dd2hid_1_argument*);
extern bool_t xdr_dscudamemcpy2dd2did_1_argument (XDR *, dscudamemcpy2dd2did_1_argument*);
extern bool_t xdr_dscudamemset2did_1_argument (XDR *, dscudamemset2did_1_argument*);
extern bool_t xdr_dscudamemcpytosymbolasynch2did_1_argument (XDR *, dscudamemcpytosymbolasynch2did_1_argument*);
extern bool_t xdr_dscudamemcpytosymbolasyncd2did_1_argument (XDR *, dscudamemcpytosymbolasyncd2did_1_argument*);
extern bool_t xdr_dscudamemcpyfromsymbolasyncd2hid_1_argument (XDR *, dscudamemcpyfromsymbolasyncd2hid_1_argument*);
extern bool_t xdr_dscudamemcpyfromsymbolasyncd2did_1_argument (XDR *, dscudamemcpyfromsymbolasyncd2did_1_argument*);
extern bool_t xdr_dscudacreatechanneldescid_1_argument (XDR *, dscudacreatechanneldescid_1_argument*);
extern bool_t xdr_dscudabindtextureid_1_argument (XDR *, dscudabindtextureid_1_argument*);
extern bool_t xdr_dscudabindtexture2did_1_argument (XDR *, dscudabindtexture2did_1_argument*);
extern bool_t xdr_dscudabindtexturetoarrayid_1_argument (XDR *, dscudabindtexturetoarrayid_1_argument*);
extern bool_t xdr_dscufftplan3did_1_argument (XDR *, dscufftplan3did_1_argument*);
extern bool_t xdr_dscufftexecc2cid_1_argument (XDR *, dscufftexecc2cid_1_argument*);
#else
extern bool_t xdr_RCadr ();
extern bool_t xdr_RCstream ();
extern bool_t xdr_RCevent ();
extern bool_t xdr_RCipaddr ();
extern bool_t xdr_RCsize ();
extern bool_t xdr_RCerror ();
extern bool_t xdr_RCbuf ();
extern bool_t xdr_RCchannelformat ();
extern bool_t xdr_RCpid ();
extern bool_t xdr_RCchanneldesc_t ();
extern bool_t xdr_RCchanneldesc ();
extern bool_t xdr_RCtexture_t ();
extern bool_t xdr_RCtexture ();
extern bool_t xdr_RCfuncattr_t ();
extern bool_t xdr_RCfuncattr ();
extern bool_t xdr_RCargType ();
extern bool_t xdr_RCargVal ();
extern bool_t xdr_RCarg ();
extern bool_t xdr_RCargs ();
extern bool_t xdr_dscudaResult ();
extern bool_t xdr_dscudaThreadGetLimitResult ();
extern bool_t xdr_dscudaThreadGetCacheConfigResult ();
extern bool_t xdr_dscudaMallocResult ();
extern bool_t xdr_dscudaHostAllocResult ();
extern bool_t xdr_dscudaMallocHostResult ();
extern bool_t xdr_dscudaMallocArrayResult ();
extern bool_t xdr_dscudaMallocPitchResult ();
extern bool_t xdr_dscudaMemcpyD2HResult ();
extern bool_t xdr_dscudaMemcpyH2HResult ();
extern bool_t xdr_dscudaMemcpyToArrayD2HResult ();
extern bool_t xdr_dscudaMemcpyToArrayH2HResult ();
extern bool_t xdr_dscudaMemcpy2DToArrayD2HResult ();
extern bool_t xdr_dscudaMemcpy2DToArrayH2HResult ();
extern bool_t xdr_dscudaMemcpy2DD2HResult ();
extern bool_t xdr_dscudaMemcpy2DH2HResult ();
extern bool_t xdr_dscudaGetDeviceResult ();
extern bool_t xdr_dscudaGetDeviceCountResult ();
extern bool_t xdr_dscudaGetDevicePropertiesResult ();
extern bool_t xdr_dscudaDriverGetVersionResult ();
extern bool_t xdr_dscudaRuntimeGetVersionResult ();
extern bool_t xdr_dscudaGetErrorStringResult ();
extern bool_t xdr_dscudaCreateChannelDescResult ();
extern bool_t xdr_dscudaGetChannelDescResult ();
extern bool_t xdr_dscudaChooseDeviceResult ();
extern bool_t xdr_dscudaMemcpyAsyncD2HResult ();
extern bool_t xdr_dscudaMemcpyAsyncH2HResult ();
extern bool_t xdr_dscudaMemcpyFromSymbolD2HResult ();
extern bool_t xdr_dscudaMemcpyFromSymbolAsyncD2HResult ();
extern bool_t xdr_dscudaStreamCreateResult ();
extern bool_t xdr_dscudaEventCreateResult ();
extern bool_t xdr_dscudaEventElapsedTimeResult ();
extern bool_t xdr_dscudaHostGetDevicePointerResult ();
extern bool_t xdr_dscudaHostGetFlagsResult ();
extern bool_t xdr_dscudaLoadModuleResult ();
extern bool_t xdr_dscudaFuncGetAttributesResult ();
extern bool_t xdr_dscudaBindTextureResult ();
extern bool_t xdr_dscudaBindTexture2DResult ();
extern bool_t xdr_dscufftResult ();
extern bool_t xdr_dscufftPlanResult ();
extern bool_t xdr_dscublasResult ();
extern bool_t xdr_dscublasCreateResult ();
extern bool_t xdr_dscublasGetVectorResult ();
extern bool_t xdr_RCdim3 ();
extern bool_t xdr_dscudathreadsetlimitid_1_argument ();
extern bool_t xdr_dscudastreamwaiteventid_1_argument ();
extern bool_t xdr_dscudaeventelapsedtimeid_1_argument ();
extern bool_t xdr_dscudaeventrecordid_1_argument ();
extern bool_t xdr_dscudalaunchkernelid_1_argument ();
extern bool_t xdr_dscudaloadmoduleid_1_argument ();
extern bool_t xdr_dscudafuncgetattributesid_1_argument ();
extern bool_t xdr_dscudamemcpyh2hid_1_argument ();
extern bool_t xdr_dscudamemcpyh2did_1_argument ();
extern bool_t xdr_dscudamemcpyd2hid_1_argument ();
extern bool_t xdr_dscudamemcpyd2did_1_argument ();
extern bool_t xdr_dscudamemcpyasynch2hid_1_argument ();
extern bool_t xdr_dscudamemcpyasynch2did_1_argument ();
extern bool_t xdr_dscudamemcpyasyncd2hid_1_argument ();
extern bool_t xdr_dscudamemcpyasyncd2did_1_argument ();
extern bool_t xdr_dscudamemcpytosymbolh2did_1_argument ();
extern bool_t xdr_dscudamemcpytosymbold2did_1_argument ();
extern bool_t xdr_dscudamemcpyfromsymbold2hid_1_argument ();
extern bool_t xdr_dscudamemcpyfromsymbold2did_1_argument ();
extern bool_t xdr_dscudamemsetid_1_argument ();
extern bool_t xdr_dscudahostallocid_1_argument ();
extern bool_t xdr_dscudahostgetdevicepointerid_1_argument ();
extern bool_t xdr_dscudamallocarrayid_1_argument ();
extern bool_t xdr_dscudamemcpytoarrayh2hid_1_argument ();
extern bool_t xdr_dscudamemcpytoarrayh2did_1_argument ();
extern bool_t xdr_dscudamemcpytoarrayd2hid_1_argument ();
extern bool_t xdr_dscudamemcpytoarrayd2did_1_argument ();
extern bool_t xdr_dscudamallocpitchid_1_argument ();
extern bool_t xdr_dscudamemcpy2dtoarrayh2hid_1_argument ();
extern bool_t xdr_dscudamemcpy2dtoarrayh2did_1_argument ();
extern bool_t xdr_dscudamemcpy2dtoarrayd2hid_1_argument ();
extern bool_t xdr_dscudamemcpy2dtoarrayd2did_1_argument ();
extern bool_t xdr_dscudamemcpy2dh2hid_1_argument ();
extern bool_t xdr_dscudamemcpy2dh2did_1_argument ();
extern bool_t xdr_dscudamemcpy2dd2hid_1_argument ();
extern bool_t xdr_dscudamemcpy2dd2did_1_argument ();
extern bool_t xdr_dscudamemset2did_1_argument ();
extern bool_t xdr_dscudamemcpytosymbolasynch2did_1_argument ();
extern bool_t xdr_dscudamemcpytosymbolasyncd2did_1_argument ();
extern bool_t xdr_dscudamemcpyfromsymbolasyncd2hid_1_argument ();
extern bool_t xdr_dscudamemcpyfromsymbolasyncd2did_1_argument ();
extern bool_t xdr_dscudacreatechanneldescid_1_argument ();
extern bool_t xdr_dscudabindtextureid_1_argument ();
extern bool_t xdr_dscudabindtexture2did_1_argument ();
extern bool_t xdr_dscudabindtexturetoarrayid_1_argument ();
extern bool_t xdr_dscufftplan3did_1_argument ();
extern bool_t xdr_dscufftexecc2cid_1_argument ();
#endif
#ifdef __cplusplus
}
#endif
#endif
#pragma end dscudarpc.h
#pragma begin dscudadefs.h
#ifndef _DSCUDADEFS_H
#define _DSCUDADEFS_H
#define RC_NSERVERMAX 32
#define RC_NDEVICEMAX 32
#define RC_NREDUNDANCYMAX 4
#define RC_NVDEVMAX 64
#define RC_NPTHREADMAX 64
#define RC_BUFSIZE (1024*1024)
#define RC_NKMODULEMAX 128
#define RC_NKFUNCMAX 128
#define RC_KARGMAX 64
#define RC_KMODULENAMELEN 64
#define RC_KNAMELEN 64
#define RC_KMODULEIMAGELEN (1024*1024*2)
#define RC_SNAMELEN 64
#define RC_CACHE_MODULE (1)
#define RC_CLIENT_CACHE_LIFETIME (30)
#define RC_SERVER_CACHE_LIFETIME (RC_CLIENT_CACHE_LIFETIME+30)
#define RC_SUPPORT_PAGELOCK (0)
#define RC_SUPPORT_STREAM (0)
#define RC_SUPPORT_CONCURRENT_EXEC (0)
#define RC_DAEMON_IP_PORT (65432)
#define RC_SERVER_IP_PORT (RC_DAEMON_IP_PORT+1)
#endif
#pragma end dscudadefs.h
#pragma begin dscudamacros.h
#ifndef DSCUDA_MACROS_H
#define DSCUDA_MACROS_H
#define WARN(lv, fmt, args...) if (lv <= dscudaWarnLevel()) fprintf(stderr, fmt, ## args);
#define WARNONCE(lv, fmt, args...) if (lv <= dscudaWarnLevel()) { \
static int firstcall = 1; \
if (firstcall) { \
firstcall = 0; \
fprintf(stderr, fmt, ## args); \
} \
}
#define ALIGN_UP(off, align) (off) = ((off) + (align) - 1) & ~((align) - 1)
int dscudaWarnLevel(void);
void dscudaSetWarnLevel(int level);
#endif
#pragma end dscudamacros.h
#pragma begin ibv_rdma.h
#ifndef RDMA_COMMON_H
#define RDMA_COMMON_H
#ifdef RPC_ONLY
typedef struct {
int type;
union {
uint64_t pointerval;
unsigned int intval;
float floatval;
char customval[RC_KARGMAX];
} val;
unsigned int offset;
unsigned int size;
} IbvArg;
#else
#include <netdb.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <rdma/rdma_cma.h>
#include <hip/hip_runtime_api.h>
#pragma begin dscudadefs.h
#ifndef _DSCUDADEFS_H
#define _DSCUDADEFS_H
#define RC_NSERVERMAX 32
#define RC_NDEVICEMAX 32
#define RC_NREDUNDANCYMAX 4
#define RC_NVDEVMAX 64
#define RC_NPTHREADMAX 64
#define RC_BUFSIZE (1024*1024)
#define RC_NKMODULEMAX 128
#define RC_NKFUNCMAX 128
#define RC_KARGMAX 64
#define RC_KMODULENAMELEN 64
#define RC_KNAMELEN 64
#define RC_KMODULEIMAGELEN (1024*1024*2)
#define RC_SNAMELEN 64
#define RC_CACHE_MODULE (1)
#define RC_CLIENT_CACHE_LIFETIME (30)
#define RC_SERVER_CACHE_LIFETIME (RC_CLIENT_CACHE_LIFETIME+30)
#define RC_SUPPORT_PAGELOCK (0)
#define RC_SUPPORT_STREAM (0)
#define RC_SUPPORT_CONCURRENT_EXEC (0)
#define RC_DAEMON_IP_PORT (65432)
#define RC_SERVER_IP_PORT (RC_DAEMON_IP_PORT+1)
#endif
#pragma end dscudadefs.h
#pragma begin dscudarpc.h
#ifndef _DSCUDARPC_H_RPCGEN
#define _DSCUDARPC_H_RPCGEN
#include <rpc/rpc.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef u_quad_t RCadr;
typedef u_quad_t RCstream;
typedef u_quad_t RCevent;
typedef u_quad_t RCipaddr;
typedef u_int RCsize;
typedef u_int RCerror;
typedef struct {
u_int RCbuf_len;
char *RCbuf_val;
} RCbuf;
typedef u_int RCchannelformat;
typedef u_long RCpid;
struct RCchanneldesc_t {
RCchannelformat f;
int w;
int x;
int y;
int z;
};
typedef struct RCchanneldesc_t RCchanneldesc_t;
typedef RCchanneldesc_t RCchanneldesc;
struct RCtexture_t {
int normalized;
int filterMode;
int addressMode[3];
RCchannelformat f;
int w;
int x;
int y;
int z;
};
typedef struct RCtexture_t RCtexture_t;
typedef RCtexture_t RCtexture;
struct RCfuncattr_t {
int binaryVersion;
RCsize constSizeBytes;
RCsize localSizeBytes;
int maxThreadsPerBlock;
int numRegs;
int ptxVersion;
RCsize sharedSizeBytes;
};
typedef struct RCfuncattr_t RCfuncattr_t;
typedef RCfuncattr_t RCfuncattr;
enum RCargType {
dscudaArgTypeP = 0,
dscudaArgTypeI = 1,
dscudaArgTypeF = 2,
dscudaArgTypeV = 3,
};
typedef enum RCargType RCargType;
struct RCargVal {
RCargType type;
union {
RCadr address;
u_int valuei;
float valuef;
char valuev[64];
} RCargVal_u;
};
typedef struct RCargVal RCargVal;
struct RCarg {
RCargVal val;
u_int offset;
u_int size;
};
typedef struct RCarg RCarg;
typedef struct {
u_int RCargs_len;
RCarg *RCargs_val;
} RCargs;
struct dscudaResult {
RCerror err;
};
typedef struct dscudaResult dscudaResult;
struct dscudaThreadGetLimitResult {
RCerror err;
RCsize value;
};
typedef struct dscudaThreadGetLimitResult dscudaThreadGetLimitResult;
struct dscudaThreadGetCacheConfigResult {
RCerror err;
int cacheConfig;
};
typedef struct dscudaThreadGetCacheConfigResult dscudaThreadGetCacheConfigResult;
struct dscudaMallocResult {
RCerror err;
RCadr devAdr;
};
typedef struct dscudaMallocResult dscudaMallocResult;
struct dscudaHostAllocResult {
RCerror err;
RCadr pHost;
};
typedef struct dscudaHostAllocResult dscudaHostAllocResult;
struct dscudaMallocHostResult {
RCerror err;
RCadr ptr;
};
typedef struct dscudaMallocHostResult dscudaMallocHostResult;
struct dscudaMallocArrayResult {
RCerror err;
RCadr array;
};
typedef struct dscudaMallocArrayResult dscudaMallocArrayResult;
struct dscudaMallocPitchResult {
RCerror err;
RCadr devPtr;
RCsize pitch;
};
typedef struct dscudaMallocPitchResult dscudaMallocPitchResult;
struct dscudaMemcpyD2HResult {
RCerror err;
RCbuf buf;
};
typedef struct dscudaMemcpyD2HResult dscudaMemcpyD2HResult;
struct dscudaMemcpyH2HResult {
RCerror err;
RCbuf buf;
};
typedef struct dscudaMemcpyH2HResult dscudaMemcpyH2HResult;
struct dscudaMemcpyToArrayD2HResult {
RCerror err;
RCbuf buf;
};
typedef struct dscudaMemcpyToArrayD2HResult dscudaMemcpyToArrayD2HResult;
struct dscudaMemcpyToArrayH2HResult {
RCerror err;
RCbuf buf;
};
typedef struct dscudaMemcpyToArrayH2HResult dscudaMemcpyToArrayH2HResult;
struct dscudaMemcpy2DToArrayD2HResult {
RCerror err;
RCbuf buf;
};
typedef struct dscudaMemcpy2DToArrayD2HResult dscudaMemcpy2DToArrayD2HResult;
struct dscudaMemcpy2DToArrayH2HResult {
RCerror err;
RCbuf buf;
};
typedef struct dscudaMemcpy2DToArrayH2HResult dscudaMemcpy2DToArrayH2HResult;
struct dscudaMemcpy2DD2HResult {
RCerror err;
RCbuf buf;
};
typedef struct dscudaMemcpy2DD2HResult dscudaMemcpy2DD2HResult;
struct dscudaMemcpy2DH2HResult {
RCerror err;
RCbuf buf;
};
typedef struct dscudaMemcpy2DH2HResult dscudaMemcpy2DH2HResult;
struct dscudaGetDeviceResult {
RCerror err;
int device;
};
typedef struct dscudaGetDeviceResult dscudaGetDeviceResult;
struct dscudaGetDeviceCountResult {
RCerror err;
int count;
};
typedef struct dscudaGetDeviceCountResult dscudaGetDeviceCountResult;
struct dscudaGetDevicePropertiesResult {
RCerror err;
RCbuf prop;
};
typedef struct dscudaGetDevicePropertiesResult dscudaGetDevicePropertiesResult;
struct dscudaDriverGetVersionResult {
RCerror err;
int ver;
};
typedef struct dscudaDriverGetVersionResult dscudaDriverGetVersionResult;
struct dscudaRuntimeGetVersionResult {
RCerror err;
int ver;
};
typedef struct dscudaRuntimeGetVersionResult dscudaRuntimeGetVersionResult;
struct dscudaGetErrorStringResult {
char *errmsg;
};
typedef struct dscudaGetErrorStringResult dscudaGetErrorStringResult;
struct dscudaCreateChannelDescResult {
int x;
int y;
int z;
int w;
RCchannelformat f;
};
typedef struct dscudaCreateChannelDescResult dscudaCreateChannelDescResult;
struct dscudaGetChannelDescResult {
RCerror err;
int x;
int y;
int z;
int w;
RCchannelformat f;
};
typedef struct dscudaGetChannelDescResult dscudaGetChannelDescResult;
struct dscudaChooseDeviceResult {
RCerror err;
int device;
};
typedef struct dscudaChooseDeviceResult dscudaChooseDeviceResult;
struct dscudaMemcpyAsyncD2HResult {
RCerror err;
RCbuf buf;
};
typedef struct dscudaMemcpyAsyncD2HResult dscudaMemcpyAsyncD2HResult;
struct dscudaMemcpyAsyncH2HResult {
RCerror err;
RCbuf buf;
};
typedef struct dscudaMemcpyAsyncH2HResult dscudaMemcpyAsyncH2HResult;
struct dscudaMemcpyFromSymbolD2HResult {
RCerror err;
RCbuf buf;
};
typedef struct dscudaMemcpyFromSymbolD2HResult dscudaMemcpyFromSymbolD2HResult;
struct dscudaMemcpyFromSymbolAsyncD2HResult {
RCerror err;
RCbuf buf;
};
typedef struct dscudaMemcpyFromSymbolAsyncD2HResult dscudaMemcpyFromSymbolAsyncD2HResult;
struct dscudaStreamCreateResult {
RCerror err;
RCadr stream;
};
typedef struct dscudaStreamCreateResult dscudaStreamCreateResult;
struct dscudaEventCreateResult {
RCerror err;
RCadr event;
};
typedef struct dscudaEventCreateResult dscudaEventCreateResult;
struct dscudaEventElapsedTimeResult {
RCerror err;
float ms;
};
typedef struct dscudaEventElapsedTimeResult dscudaEventElapsedTimeResult;
struct dscudaHostGetDevicePointerResult {
RCerror err;
RCadr pDevice;
};
typedef struct dscudaHostGetDevicePointerResult dscudaHostGetDevicePointerResult;
struct dscudaHostGetFlagsResult {
RCerror err;
u_int flags;
};
typedef struct dscudaHostGetFlagsResult dscudaHostGetFlagsResult;
struct dscudaLoadModuleResult {
u_int id;
};
typedef struct dscudaLoadModuleResult dscudaLoadModuleResult;
struct dscudaFuncGetAttributesResult {
RCerror err;
RCfuncattr attr;
};
typedef struct dscudaFuncGetAttributesResult dscudaFuncGetAttributesResult;
struct dscudaBindTextureResult {
RCerror err;
RCsize offset;
};
typedef struct dscudaBindTextureResult dscudaBindTextureResult;
struct dscudaBindTexture2DResult {
RCerror err;
RCsize offset;
};
typedef struct dscudaBindTexture2DResult dscudaBindTexture2DResult;
struct dscufftResult {
RCerror err;
};
typedef struct dscufftResult dscufftResult;
struct dscufftPlanResult {
RCerror err;
u_int plan;
};
typedef struct dscufftPlanResult dscufftPlanResult;
struct dscublasResult {
RCerror err;
u_int stat;
};
typedef struct dscublasResult dscublasResult;
struct dscublasCreateResult {
RCerror err;
u_int stat;
RCadr handle;
};
typedef struct dscublasCreateResult dscublasCreateResult;
struct dscublasGetVectorResult {
RCerror err;
u_int stat;
RCbuf y;
};
typedef struct dscublasGetVectorResult dscublasGetVectorResult;
struct RCdim3 {
u_int x;
u_int y;
u_int z;
};
typedef struct RCdim3 RCdim3;
struct dscudathreadsetlimitid_1_argument {
int limit;
RCsize value;
};
typedef struct dscudathreadsetlimitid_1_argument dscudathreadsetlimitid_1_argument;
struct dscudastreamwaiteventid_1_argument {
RCstream stream;
RCevent event;
u_int flags;
};
typedef struct dscudastreamwaiteventid_1_argument dscudastreamwaiteventid_1_argument;
struct dscudaeventelapsedtimeid_1_argument {
RCevent start;
RCevent end;
};
typedef struct dscudaeventelapsedtimeid_1_argument dscudaeventelapsedtimeid_1_argument;
struct dscudaeventrecordid_1_argument {
RCevent event;
RCstream stream;
};
typedef struct dscudaeventrecordid_1_argument dscudaeventrecordid_1_argument;
struct dscudalaunchkernelid_1_argument {
int moduleid;
int kid;
char *kname;
RCdim3 gdim;
RCdim3 bdim;
RCsize smemsize;
RCstream stream;
RCargs args;
};
typedef struct dscudalaunchkernelid_1_argument dscudalaunchkernelid_1_argument;
struct dscudaloadmoduleid_1_argument {
RCipaddr ipaddr;
RCpid pid;
char *mname;
char *image;
};
typedef struct dscudaloadmoduleid_1_argument dscudaloadmoduleid_1_argument;
struct dscudafuncgetattributesid_1_argument {
int moduleid;
char *kname;
};
typedef struct dscudafuncgetattributesid_1_argument dscudafuncgetattributesid_1_argument;
struct dscudamemcpyh2hid_1_argument {
RCadr dst;
RCbuf src;
RCsize count;
};
typedef struct dscudamemcpyh2hid_1_argument dscudamemcpyh2hid_1_argument;
struct dscudamemcpyh2did_1_argument {
RCadr dst;
RCbuf src;
RCsize count;
};
typedef struct dscudamemcpyh2did_1_argument dscudamemcpyh2did_1_argument;
struct dscudamemcpyd2hid_1_argument {
RCadr src;
RCsize count;
};
typedef struct dscudamemcpyd2hid_1_argument dscudamemcpyd2hid_1_argument;
struct dscudamemcpyd2did_1_argument {
RCadr dst;
RCadr src;
RCsize count;
};
typedef struct dscudamemcpyd2did_1_argument dscudamemcpyd2did_1_argument;
struct dscudamemcpyasynch2hid_1_argument {
RCadr dst;
RCbuf src;
RCsize count;
RCstream stream;
};
typedef struct dscudamemcpyasynch2hid_1_argument dscudamemcpyasynch2hid_1_argument;
struct dscudamemcpyasynch2did_1_argument {
RCadr dst;
RCbuf src;
RCsize count;
RCstream stream;
};
typedef struct dscudamemcpyasynch2did_1_argument dscudamemcpyasynch2did_1_argument;
struct dscudamemcpyasyncd2hid_1_argument {
RCadr src;
RCsize count;
RCstream stream;
};
typedef struct dscudamemcpyasyncd2hid_1_argument dscudamemcpyasyncd2hid_1_argument;
struct dscudamemcpyasyncd2did_1_argument {
RCadr dst;
RCadr src;
RCsize count;
RCstream stream;
};
typedef struct dscudamemcpyasyncd2did_1_argument dscudamemcpyasyncd2did_1_argument;
struct dscudamemcpytosymbolh2did_1_argument {
int moduleid;
char *symbol;
RCbuf src;
RCsize count;
RCsize offset;
};
typedef struct dscudamemcpytosymbolh2did_1_argument dscudamemcpytosymbolh2did_1_argument;
struct dscudamemcpytosymbold2did_1_argument {
int moduleid;
char *symbol;
RCadr src;
RCsize count;
RCsize offset;
};
typedef struct dscudamemcpytosymbold2did_1_argument dscudamemcpytosymbold2did_1_argument;
struct dscudamemcpyfromsymbold2hid_1_argument {
int moduleid;
char *symbol;
RCsize count;
RCsize offset;
};
typedef struct dscudamemcpyfromsymbold2hid_1_argument dscudamemcpyfromsymbold2hid_1_argument;
struct dscudamemcpyfromsymbold2did_1_argument {
int moduleid;
RCadr dst;
char *symbol;
RCsize count;
RCsize offset;
};
typedef struct dscudamemcpyfromsymbold2did_1_argument dscudamemcpyfromsymbold2did_1_argument;
struct dscudamemsetid_1_argument {
RCadr dst;
int value;
RCsize count;
};
typedef struct dscudamemsetid_1_argument dscudamemsetid_1_argument;
struct dscudahostallocid_1_argument {
RCsize size;
u_int flags;
};
typedef struct dscudahostallocid_1_argument dscudahostallocid_1_argument;
struct dscudahostgetdevicepointerid_1_argument {
RCadr pHost;
u_int flags;
};
typedef struct dscudahostgetdevicepointerid_1_argument dscudahostgetdevicepointerid_1_argument;
struct dscudamallocarrayid_1_argument {
RCchanneldesc desc;
RCsize width;
RCsize height;
u_int flags;
};
typedef struct dscudamallocarrayid_1_argument dscudamallocarrayid_1_argument;
struct dscudamemcpytoarrayh2hid_1_argument {
RCadr dst;
RCsize wOffset;
RCsize hOffset;
RCbuf src;
RCsize count;
};
typedef struct dscudamemcpytoarrayh2hid_1_argument dscudamemcpytoarrayh2hid_1_argument;
struct dscudamemcpytoarrayh2did_1_argument {
RCadr dst;
RCsize wOffset;
RCsize hOffset;
RCbuf src;
RCsize count;
};
typedef struct dscudamemcpytoarrayh2did_1_argument dscudamemcpytoarrayh2did_1_argument;
struct dscudamemcpytoarrayd2hid_1_argument {
RCsize wOffset;
RCsize hOffset;
RCadr src;
RCsize count;
};
typedef struct dscudamemcpytoarrayd2hid_1_argument dscudamemcpytoarrayd2hid_1_argument;
struct dscudamemcpytoarrayd2did_1_argument {
RCadr dst;
RCsize wOffset;
RCsize hOffset;
RCadr src;
RCsize count;
};
typedef struct dscudamemcpytoarrayd2did_1_argument dscudamemcpytoarrayd2did_1_argument;
struct dscudamallocpitchid_1_argument {
RCsize width;
RCsize height;
};
typedef struct dscudamallocpitchid_1_argument dscudamallocpitchid_1_argument;
struct dscudamemcpy2dtoarrayh2hid_1_argument {
RCadr dst;
RCsize wOffset;
RCsize hOffset;
RCbuf src;
RCsize spitch;
RCsize width;
RCsize height;
};
typedef struct dscudamemcpy2dtoarrayh2hid_1_argument dscudamemcpy2dtoarrayh2hid_1_argument;
struct dscudamemcpy2dtoarrayh2did_1_argument {
RCadr dst;
RCsize wOffset;
RCsize hOffset;
RCbuf srcbuf;
RCsize spitch;
RCsize width;
RCsize height;
};
typedef struct dscudamemcpy2dtoarrayh2did_1_argument dscudamemcpy2dtoarrayh2did_1_argument;
struct dscudamemcpy2dtoarrayd2hid_1_argument {
RCsize wOffset;
RCsize hOffset;
RCadr src;
RCsize spitch;
RCsize width;
RCsize height;
};
typedef struct dscudamemcpy2dtoarrayd2hid_1_argument dscudamemcpy2dtoarrayd2hid_1_argument;
struct dscudamemcpy2dtoarrayd2did_1_argument {
RCadr dst;
RCsize wOffset;
RCsize hOffset;
RCadr src;
RCsize spitch;
RCsize width;
RCsize height;
};
typedef struct dscudamemcpy2dtoarrayd2did_1_argument dscudamemcpy2dtoarrayd2did_1_argument;
struct dscudamemcpy2dh2hid_1_argument {
RCadr dst;
RCsize dpitch;
RCbuf src;
RCsize spitch;
RCsize width;
RCsize height;
};
typedef struct dscudamemcpy2dh2hid_1_argument dscudamemcpy2dh2hid_1_argument;
struct dscudamemcpy2dh2did_1_argument {
RCadr dst;
RCsize dpitch;
RCbuf src;
RCsize spitch;
RCsize width;
RCsize height;
};
typedef struct dscudamemcpy2dh2did_1_argument dscudamemcpy2dh2did_1_argument;
struct dscudamemcpy2dd2hid_1_argument {
RCsize dpitch;
RCadr src;
RCsize spitch;
RCsize width;
RCsize height;
};
typedef struct dscudamemcpy2dd2hid_1_argument dscudamemcpy2dd2hid_1_argument;
struct dscudamemcpy2dd2did_1_argument {
RCadr dst;
RCsize dpitch;
RCadr src;
RCsize spitch;
RCsize width;
RCsize height;
};
typedef struct dscudamemcpy2dd2did_1_argument dscudamemcpy2dd2did_1_argument;
struct dscudamemset2did_1_argument {
RCadr dst;
RCsize pitch;
int value;
RCsize width;
RCsize height;
};
typedef struct dscudamemset2did_1_argument dscudamemset2did_1_argument;
struct dscudamemcpytosymbolasynch2did_1_argument {
int moduleid;
char *symbol;
RCbuf src;
RCsize count;
RCsize offset;
RCstream stream;
};
typedef struct dscudamemcpytosymbolasynch2did_1_argument dscudamemcpytosymbolasynch2did_1_argument;
struct dscudamemcpytosymbolasyncd2did_1_argument {
int moduleid;
char *symbol;
RCadr src;
RCsize count;
RCsize offset;
RCstream stream;
};
typedef struct dscudamemcpytosymbolasyncd2did_1_argument dscudamemcpytosymbolasyncd2did_1_argument;
struct dscudamemcpyfromsymbolasyncd2hid_1_argument {
int moduleid;
char *symbol;
RCsize count;
RCsize offset;
RCstream stream;
};
typedef struct dscudamemcpyfromsymbolasyncd2hid_1_argument dscudamemcpyfromsymbolasyncd2hid_1_argument;
struct dscudamemcpyfromsymbolasyncd2did_1_argument {
int moduleid;
RCadr dst;
char *symbol;
RCsize count;
RCsize offset;
RCstream stream;
};
typedef struct dscudamemcpyfromsymbolasyncd2did_1_argument dscudamemcpyfromsymbolasyncd2did_1_argument;
struct dscudacreatechanneldescid_1_argument {
int x;
int y;
int z;
int w;
RCchannelformat f;
};
typedef struct dscudacreatechanneldescid_1_argument dscudacreatechanneldescid_1_argument;
struct dscudabindtextureid_1_argument {
int moduleid;
char *texname;
RCadr devPtr;
RCsize size;
RCtexture texbuf;
};
typedef struct dscudabindtextureid_1_argument dscudabindtextureid_1_argument;
struct dscudabindtexture2did_1_argument {
int moduleid;
char *texname;
RCadr devPtr;
RCsize width;
RCsize height;
RCsize pitch;
RCtexture texbuf;
};
typedef struct dscudabindtexture2did_1_argument dscudabindtexture2did_1_argument;
struct dscudabindtexturetoarrayid_1_argument {
int moduleid;
char *texname;
RCadr array;
RCtexture texbuf;
};
typedef struct dscudabindtexturetoarrayid_1_argument dscudabindtexturetoarrayid_1_argument;
struct dscufftplan3did_1_argument {
int nx;
int ny;
int nz;
u_int type;
};
typedef struct dscufftplan3did_1_argument dscufftplan3did_1_argument;
struct dscufftexecc2cid_1_argument {
u_int plan;
RCadr idata;
RCadr odata;
int direction;
};
typedef struct dscufftexecc2cid_1_argument dscufftexecc2cid_1_argument;
#define DSCUDA_PROG 60000
#define DSCUDA_VER 1
#if defined(__STDC__) || defined(__cplusplus)
#define dscudaThreadExitId 100
extern dscudaResult * dscudathreadexitid_1(CLIENT *);
extern dscudaResult * dscudathreadexitid_1_svc(struct svc_req *);
#define dscudaThreadSynchronizeId 101
extern dscudaResult * dscudathreadsynchronizeid_1(CLIENT *);
extern dscudaResult * dscudathreadsynchronizeid_1_svc(struct svc_req *);
#define dscudaThreadSetLimitId 102
extern dscudaResult * dscudathreadsetlimitid_1(int , RCsize , CLIENT *);
extern dscudaResult * dscudathreadsetlimitid_1_svc(int , RCsize , struct svc_req *);
#define dscudaThreadGetLimitId 103
extern dscudaThreadGetLimitResult * dscudathreadgetlimitid_1(int , CLIENT *);
extern dscudaThreadGetLimitResult * dscudathreadgetlimitid_1_svc(int , struct svc_req *);
#define dscudaThreadSetCacheConfigId 104
extern dscudaResult * dscudathreadsetcacheconfigid_1(int , CLIENT *);
extern dscudaResult * dscudathreadsetcacheconfigid_1_svc(int , struct svc_req *);
#define dscudaThreadGetCacheConfigId 105
extern dscudaThreadGetCacheConfigResult * dscudathreadgetcacheconfigid_1(CLIENT *);
extern dscudaThreadGetCacheConfigResult * dscudathreadgetcacheconfigid_1_svc(struct svc_req *);
#define dscudaGetLastErrorId 200
extern dscudaResult * dscudagetlasterrorid_1(CLIENT *);
extern dscudaResult * dscudagetlasterrorid_1_svc(struct svc_req *);
#define dscudaPeekAtLastErrorId 201
extern dscudaResult * dscudapeekatlasterrorid_1(CLIENT *);
extern dscudaResult * dscudapeekatlasterrorid_1_svc(struct svc_req *);
#define dscudaGetErrorStringId 202
extern dscudaGetErrorStringResult * dscudageterrorstringid_1(int , CLIENT *);
extern dscudaGetErrorStringResult * dscudageterrorstringid_1_svc(int , struct svc_req *);
#define dscudaGetDeviceId 300
extern dscudaGetDeviceResult * dscudagetdeviceid_1(CLIENT *);
extern dscudaGetDeviceResult * dscudagetdeviceid_1_svc(struct svc_req *);
#define dscudaGetDeviceCountId 301
extern dscudaGetDeviceCountResult * dscudagetdevicecountid_1(CLIENT *);
extern dscudaGetDeviceCountResult * dscudagetdevicecountid_1_svc(struct svc_req *);
#define dscudaGetDevicePropertiesId 302
extern dscudaGetDevicePropertiesResult * dscudagetdevicepropertiesid_1(int , CLIENT *);
extern dscudaGetDevicePropertiesResult * dscudagetdevicepropertiesid_1_svc(int , struct svc_req *);
#define dscudaDriverGetVersionId 303
extern dscudaDriverGetVersionResult * dscudadrivergetversionid_1(CLIENT *);
extern dscudaDriverGetVersionResult * dscudadrivergetversionid_1_svc(struct svc_req *);
#define dscudaRuntimeGetVersionId 304
extern dscudaRuntimeGetVersionResult * dscudaruntimegetversionid_1(CLIENT *);
extern dscudaRuntimeGetVersionResult * dscudaruntimegetversionid_1_svc(struct svc_req *);
#define dscudaSetDeviceId 305
extern dscudaResult * dscudasetdeviceid_1(int , CLIENT *);
extern dscudaResult * dscudasetdeviceid_1_svc(int , struct svc_req *);
#define dscudaSetDeviceFlagsId 306
extern dscudaResult * dscudasetdeviceflagsid_1(u_int , CLIENT *);
extern dscudaResult * dscudasetdeviceflagsid_1_svc(u_int , struct svc_req *);
#define dscudaChooseDeviceId 307
extern dscudaChooseDeviceResult * dscudachoosedeviceid_1(RCbuf , CLIENT *);
extern dscudaChooseDeviceResult * dscudachoosedeviceid_1_svc(RCbuf , struct svc_req *);
#define dscudaDeviceSynchronize 308
extern dscudaResult * dscudadevicesynchronize_1(CLIENT *);
extern dscudaResult * dscudadevicesynchronize_1_svc(struct svc_req *);
#define dscudaDeviceReset 309
extern dscudaResult * dscudadevicereset_1(CLIENT *);
extern dscudaResult * dscudadevicereset_1_svc(struct svc_req *);
#define dscudaStreamCreateId 400
extern dscudaStreamCreateResult * dscudastreamcreateid_1(CLIENT *);
extern dscudaStreamCreateResult * dscudastreamcreateid_1_svc(struct svc_req *);
#define dscudaStreamDestroyId 401
extern dscudaResult * dscudastreamdestroyid_1(RCstream , CLIENT *);
extern dscudaResult * dscudastreamdestroyid_1_svc(RCstream , struct svc_req *);
#define dscudaStreamSynchronizeId 402
extern dscudaResult * dscudastreamsynchronizeid_1(RCstream , CLIENT *);
extern dscudaResult * dscudastreamsynchronizeid_1_svc(RCstream , struct svc_req *);
#define dscudaStreamQueryId 403
extern dscudaResult * dscudastreamqueryid_1(RCstream , CLIENT *);
extern dscudaResult * dscudastreamqueryid_1_svc(RCstream , struct svc_req *);
#define dscudaStreamWaitEventId 404
extern dscudaResult * dscudastreamwaiteventid_1(RCstream , RCevent , u_int , CLIENT *);
extern dscudaResult * dscudastreamwaiteventid_1_svc(RCstream , RCevent , u_int , struct svc_req *);
#define dscudaEventCreateId 500
extern dscudaEventCreateResult * dscudaeventcreateid_1(CLIENT *);
extern dscudaEventCreateResult * dscudaeventcreateid_1_svc(struct svc_req *);
#define dscudaEventCreateWithFlagsId 501
extern dscudaEventCreateResult * dscudaeventcreatewithflagsid_1(u_int , CLIENT *);
extern dscudaEventCreateResult * dscudaeventcreatewithflagsid_1_svc(u_int , struct svc_req *);
#define dscudaEventDestroyId 502
extern dscudaResult * dscudaeventdestroyid_1(RCevent , CLIENT *);
extern dscudaResult * dscudaeventdestroyid_1_svc(RCevent , struct svc_req *);
#define dscudaEventElapsedTimeId 503
extern dscudaEventElapsedTimeResult * dscudaeventelapsedtimeid_1(RCevent , RCevent , CLIENT *);
extern dscudaEventElapsedTimeResult * dscudaeventelapsedtimeid_1_svc(RCevent , RCevent , struct svc_req *);
#define dscudaEventRecordId 504
extern dscudaResult * dscudaeventrecordid_1(RCevent , RCstream , CLIENT *);
extern dscudaResult * dscudaeventrecordid_1_svc(RCevent , RCstream , struct svc_req *);
#define dscudaEventSynchronizeId 505
extern dscudaResult * dscudaeventsynchronizeid_1(RCevent , CLIENT *);
extern dscudaResult * dscudaeventsynchronizeid_1_svc(RCevent , struct svc_req *);
#define dscudaEventQueryId 506
extern dscudaResult * dscudaeventqueryid_1(RCevent , CLIENT *);
extern dscudaResult * dscudaeventqueryid_1_svc(RCevent , struct svc_req *);
#define dscudaLaunchKernelId 600
extern void * dscudalaunchkernelid_1(int , int , char *, RCdim3 , RCdim3 , RCsize , RCstream , RCargs , CLIENT *);
extern void * dscudalaunchkernelid_1_svc(int , int , char *, RCdim3 , RCdim3 , RCsize , RCstream , RCargs , struct svc_req *);
#define dscudaLoadModuleId 601
extern dscudaLoadModuleResult * dscudaloadmoduleid_1(RCipaddr , RCpid , char *, char *, CLIENT *);
extern dscudaLoadModuleResult * dscudaloadmoduleid_1_svc(RCipaddr , RCpid , char *, char *, struct svc_req *);
#define dscudaFuncGetAttributesId 602
extern dscudaFuncGetAttributesResult * dscudafuncgetattributesid_1(int , char *, CLIENT *);
extern dscudaFuncGetAttributesResult * dscudafuncgetattributesid_1_svc(int , char *, struct svc_req *);
#define dscudaMallocId 700
extern dscudaMallocResult * dscudamallocid_1(RCsize , CLIENT *);
extern dscudaMallocResult * dscudamallocid_1_svc(RCsize , struct svc_req *);
#define dscudaFreeId 701
extern dscudaResult * dscudafreeid_1(RCadr , CLIENT *);
extern dscudaResult * dscudafreeid_1_svc(RCadr , struct svc_req *);
#define dscudaMemcpyH2HId 702
extern dscudaMemcpyH2HResult * dscudamemcpyh2hid_1(RCadr , RCbuf , RCsize , CLIENT *);
extern dscudaMemcpyH2HResult * dscudamemcpyh2hid_1_svc(RCadr , RCbuf , RCsize , struct svc_req *);
#define dscudaMemcpyH2DId 703
extern dscudaResult * dscudamemcpyh2did_1(RCadr , RCbuf , RCsize , CLIENT *);
extern dscudaResult * dscudamemcpyh2did_1_svc(RCadr , RCbuf , RCsize , struct svc_req *);
#define dscudaMemcpyD2HId 704
extern dscudaMemcpyD2HResult * dscudamemcpyd2hid_1(RCadr , RCsize , CLIENT *);
extern dscudaMemcpyD2HResult * dscudamemcpyd2hid_1_svc(RCadr , RCsize , struct svc_req *);
#define dscudaMemcpyD2DId 705
extern dscudaResult * dscudamemcpyd2did_1(RCadr , RCadr , RCsize , CLIENT *);
extern dscudaResult * dscudamemcpyd2did_1_svc(RCadr , RCadr , RCsize , struct svc_req *);
#define dscudaMemcpyAsyncH2HId 706
extern dscudaMemcpyAsyncH2HResult * dscudamemcpyasynch2hid_1(RCadr , RCbuf , RCsize , RCstream , CLIENT *);
extern dscudaMemcpyAsyncH2HResult * dscudamemcpyasynch2hid_1_svc(RCadr , RCbuf , RCsize , RCstream , struct svc_req *);
#define dscudaMemcpyAsyncH2DId 707
extern dscudaResult * dscudamemcpyasynch2did_1(RCadr , RCbuf , RCsize , RCstream , CLIENT *);
extern dscudaResult * dscudamemcpyasynch2did_1_svc(RCadr , RCbuf , RCsize , RCstream , struct svc_req *);
#define dscudaMemcpyAsyncD2HId 708
extern dscudaMemcpyAsyncD2HResult * dscudamemcpyasyncd2hid_1(RCadr , RCsize , RCstream , CLIENT *);
extern dscudaMemcpyAsyncD2HResult * dscudamemcpyasyncd2hid_1_svc(RCadr , RCsize , RCstream , struct svc_req *);
#define dscudaMemcpyAsyncD2DId 709
extern dscudaResult * dscudamemcpyasyncd2did_1(RCadr , RCadr , RCsize , RCstream , CLIENT *);
extern dscudaResult * dscudamemcpyasyncd2did_1_svc(RCadr , RCadr , RCsize , RCstream , struct svc_req *);
#define dscudaMemcpyToSymbolH2DId 710
extern dscudaResult * dscudamemcpytosymbolh2did_1(int , char *, RCbuf , RCsize , RCsize , CLIENT *);
extern dscudaResult * dscudamemcpytosymbolh2did_1_svc(int , char *, RCbuf , RCsize , RCsize , struct svc_req *);
#define dscudaMemcpyToSymbolD2DId 711
extern dscudaResult * dscudamemcpytosymbold2did_1(int , char *, RCadr , RCsize , RCsize , CLIENT *);
extern dscudaResult * dscudamemcpytosymbold2did_1_svc(int , char *, RCadr , RCsize , RCsize , struct svc_req *);
#define dscudaMemcpyFromSymbolD2HId 712
extern dscudaMemcpyFromSymbolD2HResult * dscudamemcpyfromsymbold2hid_1(int , char *, RCsize , RCsize , CLIENT *);
extern dscudaMemcpyFromSymbolD2HResult * dscudamemcpyfromsymbold2hid_1_svc(int , char *, RCsize , RCsize , struct svc_req *);
#define dscudaMemcpyFromSymbolD2DId 713
extern dscudaResult * dscudamemcpyfromsymbold2did_1(int , RCadr , char *, RCsize , RCsize , CLIENT *);
extern dscudaResult * dscudamemcpyfromsymbold2did_1_svc(int , RCadr , char *, RCsize , RCsize , struct svc_req *);
#define dscudaMemsetId 714
extern dscudaResult * dscudamemsetid_1(RCadr , int , RCsize , CLIENT *);
extern dscudaResult * dscudamemsetid_1_svc(RCadr , int , RCsize , struct svc_req *);
#define dscudaHostAllocId 715
extern dscudaHostAllocResult * dscudahostallocid_1(RCsize , u_int , CLIENT *);
extern dscudaHostAllocResult * dscudahostallocid_1_svc(RCsize , u_int , struct svc_req *);
#define dscudaMallocHostId 716
extern dscudaMallocHostResult * dscudamallochostid_1(RCsize , CLIENT *);
extern dscudaMallocHostResult * dscudamallochostid_1_svc(RCsize , struct svc_req *);
#define dscudaFreeHostId 717
extern dscudaResult * dscudafreehostid_1(RCadr , CLIENT *);
extern dscudaResult * dscudafreehostid_1_svc(RCadr , struct svc_req *);
#define dscudaHostGetDevicePointerId 718
extern dscudaHostGetDevicePointerResult * dscudahostgetdevicepointerid_1(RCadr , u_int , CLIENT *);
extern dscudaHostGetDevicePointerResult * dscudahostgetdevicepointerid_1_svc(RCadr , u_int , struct svc_req *);
#define dscudaHostGetFlagsID 719
extern dscudaHostGetFlagsResult * dscudahostgetflagsid_1(RCadr , CLIENT *);
extern dscudaHostGetFlagsResult * dscudahostgetflagsid_1_svc(RCadr , struct svc_req *);
#define dscudaMallocArrayId 720
extern dscudaMallocArrayResult * dscudamallocarrayid_1(RCchanneldesc , RCsize , RCsize , u_int , CLIENT *);
extern dscudaMallocArrayResult * dscudamallocarrayid_1_svc(RCchanneldesc , RCsize , RCsize , u_int , struct svc_req *);
#define dscudaFreeArrayId 721
extern dscudaResult * dscudafreearrayid_1(RCadr , CLIENT *);
extern dscudaResult * dscudafreearrayid_1_svc(RCadr , struct svc_req *);
#define dscudaMemcpyToArrayH2HId 722
extern dscudaMemcpyToArrayH2HResult * dscudamemcpytoarrayh2hid_1(RCadr , RCsize , RCsize , RCbuf , RCsize , CLIENT *);
extern dscudaMemcpyToArrayH2HResult * dscudamemcpytoarrayh2hid_1_svc(RCadr , RCsize , RCsize , RCbuf , RCsize , struct svc_req *);
#define dscudaMemcpyToArrayH2DId 723
extern dscudaResult * dscudamemcpytoarrayh2did_1(RCadr , RCsize , RCsize , RCbuf , RCsize , CLIENT *);
extern dscudaResult * dscudamemcpytoarrayh2did_1_svc(RCadr , RCsize , RCsize , RCbuf , RCsize , struct svc_req *);
#define dscudaMemcpyToArrayD2HId 724
extern dscudaMemcpyToArrayD2HResult * dscudamemcpytoarrayd2hid_1(RCsize , RCsize , RCadr , RCsize , CLIENT *);
extern dscudaMemcpyToArrayD2HResult * dscudamemcpytoarrayd2hid_1_svc(RCsize , RCsize , RCadr , RCsize , struct svc_req *);
#define dscudaMemcpyToArrayD2DId 725
extern dscudaResult * dscudamemcpytoarrayd2did_1(RCadr , RCsize , RCsize , RCadr , RCsize , CLIENT *);
extern dscudaResult * dscudamemcpytoarrayd2did_1_svc(RCadr , RCsize , RCsize , RCadr , RCsize , struct svc_req *);
#define dscudaMallocPitchId 726
extern dscudaMallocPitchResult * dscudamallocpitchid_1(RCsize , RCsize , CLIENT *);
extern dscudaMallocPitchResult * dscudamallocpitchid_1_svc(RCsize , RCsize , struct svc_req *);
#define dscudaMemcpy2DToArrayH2HId 727
extern dscudaMemcpy2DToArrayH2HResult * dscudamemcpy2dtoarrayh2hid_1(RCadr , RCsize , RCsize , RCbuf , RCsize , RCsize , RCsize , CLIENT *);
extern dscudaMemcpy2DToArrayH2HResult * dscudamemcpy2dtoarrayh2hid_1_svc(RCadr , RCsize , RCsize , RCbuf , RCsize , RCsize , RCsize , struct svc_req *);
#define dscudaMemcpy2DToArrayH2DId 728
extern dscudaResult * dscudamemcpy2dtoarrayh2did_1(RCadr , RCsize , RCsize , RCbuf , RCsize , RCsize , RCsize , CLIENT *);
extern dscudaResult * dscudamemcpy2dtoarrayh2did_1_svc(RCadr , RCsize , RCsize , RCbuf , RCsize , RCsize , RCsize , struct svc_req *);
#define dscudaMemcpy2DToArrayD2HId 729
extern dscudaMemcpy2DToArrayD2HResult * dscudamemcpy2dtoarrayd2hid_1(RCsize , RCsize , RCadr , RCsize , RCsize , RCsize , CLIENT *);
extern dscudaMemcpy2DToArrayD2HResult * dscudamemcpy2dtoarrayd2hid_1_svc(RCsize , RCsize , RCadr , RCsize , RCsize , RCsize , struct svc_req *);
#define dscudaMemcpy2DToArrayD2DId 730
extern dscudaResult * dscudamemcpy2dtoarrayd2did_1(RCadr , RCsize , RCsize , RCadr , RCsize , RCsize , RCsize , CLIENT *);
extern dscudaResult * dscudamemcpy2dtoarrayd2did_1_svc(RCadr , RCsize , RCsize , RCadr , RCsize , RCsize , RCsize , struct svc_req *);
#define dscudaMemcpy2DH2HId 731
extern dscudaMemcpy2DH2HResult * dscudamemcpy2dh2hid_1(RCadr , RCsize , RCbuf , RCsize , RCsize , RCsize , CLIENT *);
extern dscudaMemcpy2DH2HResult * dscudamemcpy2dh2hid_1_svc(RCadr , RCsize , RCbuf , RCsize , RCsize , RCsize , struct svc_req *);
#define dscudaMemcpy2DH2DId 732
extern dscudaResult * dscudamemcpy2dh2did_1(RCadr , RCsize , RCbuf , RCsize , RCsize , RCsize , CLIENT *);
extern dscudaResult * dscudamemcpy2dh2did_1_svc(RCadr , RCsize , RCbuf , RCsize , RCsize , RCsize , struct svc_req *);
#define dscudaMemcpy2DD2HId 733
extern dscudaMemcpy2DD2HResult * dscudamemcpy2dd2hid_1(RCsize , RCadr , RCsize , RCsize , RCsize , CLIENT *);
extern dscudaMemcpy2DD2HResult * dscudamemcpy2dd2hid_1_svc(RCsize , RCadr , RCsize , RCsize , RCsize , struct svc_req *);
#define dscudaMemcpy2DD2DId 734
extern dscudaResult * dscudamemcpy2dd2did_1(RCadr , RCsize , RCadr , RCsize , RCsize , RCsize , CLIENT *);
extern dscudaResult * dscudamemcpy2dd2did_1_svc(RCadr , RCsize , RCadr , RCsize , RCsize , RCsize , struct svc_req *);
#define dscudaMemset2DId 735
extern dscudaResult * dscudamemset2did_1(RCadr , RCsize , int , RCsize , RCsize , CLIENT *);
extern dscudaResult * dscudamemset2did_1_svc(RCadr , RCsize , int , RCsize , RCsize , struct svc_req *);
#define dscudaMemcpyToSymbolAsyncH2DId 736
extern dscudaResult * dscudamemcpytosymbolasynch2did_1(int , char *, RCbuf , RCsize , RCsize , RCstream , CLIENT *);
extern dscudaResult * dscudamemcpytosymbolasynch2did_1_svc(int , char *, RCbuf , RCsize , RCsize , RCstream , struct svc_req *);
#define dscudaMemcpyToSymbolAsyncD2DId 737
extern dscudaResult * dscudamemcpytosymbolasyncd2did_1(int , char *, RCadr , RCsize , RCsize , RCstream , CLIENT *);
extern dscudaResult * dscudamemcpytosymbolasyncd2did_1_svc(int , char *, RCadr , RCsize , RCsize , RCstream , struct svc_req *);
#define dscudaMemcpyFromSymbolAsyncD2HId 738
extern dscudaMemcpyFromSymbolAsyncD2HResult * dscudamemcpyfromsymbolasyncd2hid_1(int , char *, RCsize , RCsize , RCstream , CLIENT *);
extern dscudaMemcpyFromSymbolAsyncD2HResult * dscudamemcpyfromsymbolasyncd2hid_1_svc(int , char *, RCsize , RCsize , RCstream , struct svc_req *);
#define dscudaMemcpyFromSymbolAsyncD2DId 739
extern dscudaResult * dscudamemcpyfromsymbolasyncd2did_1(int , RCadr , char *, RCsize , RCsize , RCstream , CLIENT *);
extern dscudaResult * dscudamemcpyfromsymbolasyncd2did_1_svc(int , RCadr , char *, RCsize , RCsize , RCstream , struct svc_req *);
#define dscudaCreateChannelDescId 1400
extern dscudaCreateChannelDescResult * dscudacreatechanneldescid_1(int , int , int , int , RCchannelformat , CLIENT *);
extern dscudaCreateChannelDescResult * dscudacreatechanneldescid_1_svc(int , int , int , int , RCchannelformat , struct svc_req *);
#define dscudaGetChannelDescId 1401
extern dscudaGetChannelDescResult * dscudagetchanneldescid_1(RCadr , CLIENT *);
extern dscudaGetChannelDescResult * dscudagetchanneldescid_1_svc(RCadr , struct svc_req *);
#define dscudaBindTextureId 1402
extern dscudaBindTextureResult * dscudabindtextureid_1(int , char *, RCadr , RCsize , RCtexture , CLIENT *);
extern dscudaBindTextureResult * dscudabindtextureid_1_svc(int , char *, RCadr , RCsize , RCtexture , struct svc_req *);
#define dscudaBindTexture2DId 1403
extern dscudaBindTexture2DResult * dscudabindtexture2did_1(int , char *, RCadr , RCsize , RCsize , RCsize , RCtexture , CLIENT *);
extern dscudaBindTexture2DResult * dscudabindtexture2did_1_svc(int , char *, RCadr , RCsize , RCsize , RCsize , RCtexture , struct svc_req *);
#define dscudaBindTextureToArrayId 1404
extern dscudaResult * dscudabindtexturetoarrayid_1(int , char *, RCadr , RCtexture , CLIENT *);
extern dscudaResult * dscudabindtexturetoarrayid_1_svc(int , char *, RCadr , RCtexture , struct svc_req *);
#define dscudaUnbindTextureId 1405
extern dscudaResult * dscudaunbindtextureid_1(RCtexture , CLIENT *);
extern dscudaResult * dscudaunbindtextureid_1_svc(RCtexture , struct svc_req *);
#define dscufftPlan3dId 2002
extern dscufftPlanResult * dscufftplan3did_1(int , int , int , u_int , CLIENT *);
extern dscufftPlanResult * dscufftplan3did_1_svc(int , int , int , u_int , struct svc_req *);
#define dscufftDestroyId 2004
extern dscufftResult * dscufftdestroyid_1(u_int , CLIENT *);
extern dscufftResult * dscufftdestroyid_1_svc(u_int , struct svc_req *);
#define dscufftExecC2CId 2005
extern dscufftResult * dscufftexecc2cid_1(u_int , RCadr , RCadr , int , CLIENT *);
extern dscufftResult * dscufftexecc2cid_1_svc(u_int , RCadr , RCadr , int , struct svc_req *);
extern int dscuda_prog_1_freeresult (SVCXPRT *, xdrproc_t, caddr_t);
#else
#define dscudaThreadExitId 100
extern dscudaResult * dscudathreadexitid_1();
extern dscudaResult * dscudathreadexitid_1_svc();
#define dscudaThreadSynchronizeId 101
extern dscudaResult * dscudathreadsynchronizeid_1();
extern dscudaResult * dscudathreadsynchronizeid_1_svc();
#define dscudaThreadSetLimitId 102
extern dscudaResult * dscudathreadsetlimitid_1();
extern dscudaResult * dscudathreadsetlimitid_1_svc();
#define dscudaThreadGetLimitId 103
extern dscudaThreadGetLimitResult * dscudathreadgetlimitid_1();
extern dscudaThreadGetLimitResult * dscudathreadgetlimitid_1_svc();
#define dscudaThreadSetCacheConfigId 104
extern dscudaResult * dscudathreadsetcacheconfigid_1();
extern dscudaResult * dscudathreadsetcacheconfigid_1_svc();
#define dscudaThreadGetCacheConfigId 105
extern dscudaThreadGetCacheConfigResult * dscudathreadgetcacheconfigid_1();
extern dscudaThreadGetCacheConfigResult * dscudathreadgetcacheconfigid_1_svc();
#define dscudaGetLastErrorId 200
extern dscudaResult * dscudagetlasterrorid_1();
extern dscudaResult * dscudagetlasterrorid_1_svc();
#define dscudaPeekAtLastErrorId 201
extern dscudaResult * dscudapeekatlasterrorid_1();
extern dscudaResult * dscudapeekatlasterrorid_1_svc();
#define dscudaGetErrorStringId 202
extern dscudaGetErrorStringResult * dscudageterrorstringid_1();
extern dscudaGetErrorStringResult * dscudageterrorstringid_1_svc();
#define dscudaGetDeviceId 300
extern dscudaGetDeviceResult * dscudagetdeviceid_1();
extern dscudaGetDeviceResult * dscudagetdeviceid_1_svc();
#define dscudaGetDeviceCountId 301
extern dscudaGetDeviceCountResult * dscudagetdevicecountid_1();
extern dscudaGetDeviceCountResult * dscudagetdevicecountid_1_svc();
#define dscudaGetDevicePropertiesId 302
extern dscudaGetDevicePropertiesResult * dscudagetdevicepropertiesid_1();
extern dscudaGetDevicePropertiesResult * dscudagetdevicepropertiesid_1_svc();
#define dscudaDriverGetVersionId 303
extern dscudaDriverGetVersionResult * dscudadrivergetversionid_1();
extern dscudaDriverGetVersionResult * dscudadrivergetversionid_1_svc();
#define dscudaRuntimeGetVersionId 304
extern dscudaRuntimeGetVersionResult * dscudaruntimegetversionid_1();
extern dscudaRuntimeGetVersionResult * dscudaruntimegetversionid_1_svc();
#define dscudaSetDeviceId 305
extern dscudaResult * dscudasetdeviceid_1();
extern dscudaResult * dscudasetdeviceid_1_svc();
#define dscudaSetDeviceFlagsId 306
extern dscudaResult * dscudasetdeviceflagsid_1();
extern dscudaResult * dscudasetdeviceflagsid_1_svc();
#define dscudaChooseDeviceId 307
extern dscudaChooseDeviceResult * dscudachoosedeviceid_1();
extern dscudaChooseDeviceResult * dscudachoosedeviceid_1_svc();
#define dscudaDeviceSynchronize 308
extern dscudaResult * dscudadevicesynchronize_1();
extern dscudaResult * dscudadevicesynchronize_1_svc();
#define dscudaDeviceReset 309
extern dscudaResult * dscudadevicereset_1();
extern dscudaResult * dscudadevicereset_1_svc();
#define dscudaStreamCreateId 400
extern dscudaStreamCreateResult * dscudastreamcreateid_1();
extern dscudaStreamCreateResult * dscudastreamcreateid_1_svc();
#define dscudaStreamDestroyId 401
extern dscudaResult * dscudastreamdestroyid_1();
extern dscudaResult * dscudastreamdestroyid_1_svc();
#define dscudaStreamSynchronizeId 402
extern dscudaResult * dscudastreamsynchronizeid_1();
extern dscudaResult * dscudastreamsynchronizeid_1_svc();
#define dscudaStreamQueryId 403
extern dscudaResult * dscudastreamqueryid_1();
extern dscudaResult * dscudastreamqueryid_1_svc();
#define dscudaStreamWaitEventId 404
extern dscudaResult * dscudastreamwaiteventid_1();
extern dscudaResult * dscudastreamwaiteventid_1_svc();
#define dscudaEventCreateId 500
extern dscudaEventCreateResult * dscudaeventcreateid_1();
extern dscudaEventCreateResult * dscudaeventcreateid_1_svc();
#define dscudaEventCreateWithFlagsId 501
extern dscudaEventCreateResult * dscudaeventcreatewithflagsid_1();
extern dscudaEventCreateResult * dscudaeventcreatewithflagsid_1_svc();
#define dscudaEventDestroyId 502
extern dscudaResult * dscudaeventdestroyid_1();
extern dscudaResult * dscudaeventdestroyid_1_svc();
#define dscudaEventElapsedTimeId 503
extern dscudaEventElapsedTimeResult * dscudaeventelapsedtimeid_1();
extern dscudaEventElapsedTimeResult * dscudaeventelapsedtimeid_1_svc();
#define dscudaEventRecordId 504
extern dscudaResult * dscudaeventrecordid_1();
extern dscudaResult * dscudaeventrecordid_1_svc();
#define dscudaEventSynchronizeId 505
extern dscudaResult * dscudaeventsynchronizeid_1();
extern dscudaResult * dscudaeventsynchronizeid_1_svc();
#define dscudaEventQueryId 506
extern dscudaResult * dscudaeventqueryid_1();
extern dscudaResult * dscudaeventqueryid_1_svc();
#define dscudaLaunchKernelId 600
extern void * dscudalaunchkernelid_1();
extern void * dscudalaunchkernelid_1_svc();
#define dscudaLoadModuleId 601
extern dscudaLoadModuleResult * dscudaloadmoduleid_1();
extern dscudaLoadModuleResult * dscudaloadmoduleid_1_svc();
#define dscudaFuncGetAttributesId 602
extern dscudaFuncGetAttributesResult * dscudafuncgetattributesid_1();
extern dscudaFuncGetAttributesResult * dscudafuncgetattributesid_1_svc();
#define dscudaMallocId 700
extern dscudaMallocResult * dscudamallocid_1();
extern dscudaMallocResult * dscudamallocid_1_svc();
#define dscudaFreeId 701
extern dscudaResult * dscudafreeid_1();
extern dscudaResult * dscudafreeid_1_svc();
#define dscudaMemcpyH2HId 702
extern dscudaMemcpyH2HResult * dscudamemcpyh2hid_1();
extern dscudaMemcpyH2HResult * dscudamemcpyh2hid_1_svc();
#define dscudaMemcpyH2DId 703
extern dscudaResult * dscudamemcpyh2did_1();
extern dscudaResult * dscudamemcpyh2did_1_svc();
#define dscudaMemcpyD2HId 704
extern dscudaMemcpyD2HResult * dscudamemcpyd2hid_1();
extern dscudaMemcpyD2HResult * dscudamemcpyd2hid_1_svc();
#define dscudaMemcpyD2DId 705
extern dscudaResult * dscudamemcpyd2did_1();
extern dscudaResult * dscudamemcpyd2did_1_svc();
#define dscudaMemcpyAsyncH2HId 706
extern dscudaMemcpyAsyncH2HResult * dscudamemcpyasynch2hid_1();
extern dscudaMemcpyAsyncH2HResult * dscudamemcpyasynch2hid_1_svc();
#define dscudaMemcpyAsyncH2DId 707
extern dscudaResult * dscudamemcpyasynch2did_1();
extern dscudaResult * dscudamemcpyasynch2did_1_svc();
#define dscudaMemcpyAsyncD2HId 708
extern dscudaMemcpyAsyncD2HResult * dscudamemcpyasyncd2hid_1();
extern dscudaMemcpyAsyncD2HResult * dscudamemcpyasyncd2hid_1_svc();
#define dscudaMemcpyAsyncD2DId 709
extern dscudaResult * dscudamemcpyasyncd2did_1();
extern dscudaResult * dscudamemcpyasyncd2did_1_svc();
#define dscudaMemcpyToSymbolH2DId 710
extern dscudaResult * dscudamemcpytosymbolh2did_1();
extern dscudaResult * dscudamemcpytosymbolh2did_1_svc();
#define dscudaMemcpyToSymbolD2DId 711
extern dscudaResult * dscudamemcpytosymbold2did_1();
extern dscudaResult * dscudamemcpytosymbold2did_1_svc();
#define dscudaMemcpyFromSymbolD2HId 712
extern dscudaMemcpyFromSymbolD2HResult * dscudamemcpyfromsymbold2hid_1();
extern dscudaMemcpyFromSymbolD2HResult * dscudamemcpyfromsymbold2hid_1_svc();
#define dscudaMemcpyFromSymbolD2DId 713
extern dscudaResult * dscudamemcpyfromsymbold2did_1();
extern dscudaResult * dscudamemcpyfromsymbold2did_1_svc();
#define dscudaMemsetId 714
extern dscudaResult * dscudamemsetid_1();
extern dscudaResult * dscudamemsetid_1_svc();
#define dscudaHostAllocId 715
extern dscudaHostAllocResult * dscudahostallocid_1();
extern dscudaHostAllocResult * dscudahostallocid_1_svc();
#define dscudaMallocHostId 716
extern dscudaMallocHostResult * dscudamallochostid_1();
extern dscudaMallocHostResult * dscudamallochostid_1_svc();
#define dscudaFreeHostId 717
extern dscudaResult * dscudafreehostid_1();
extern dscudaResult * dscudafreehostid_1_svc();
#define dscudaHostGetDevicePointerId 718
extern dscudaHostGetDevicePointerResult * dscudahostgetdevicepointerid_1();
extern dscudaHostGetDevicePointerResult * dscudahostgetdevicepointerid_1_svc();
#define dscudaHostGetFlagsID 719
extern dscudaHostGetFlagsResult * dscudahostgetflagsid_1();
extern dscudaHostGetFlagsResult * dscudahostgetflagsid_1_svc();
#define dscudaMallocArrayId 720
extern dscudaMallocArrayResult * dscudamallocarrayid_1();
extern dscudaMallocArrayResult * dscudamallocarrayid_1_svc();
#define dscudaFreeArrayId 721
extern dscudaResult * dscudafreearrayid_1();
extern dscudaResult * dscudafreearrayid_1_svc();
#define dscudaMemcpyToArrayH2HId 722
extern dscudaMemcpyToArrayH2HResult * dscudamemcpytoarrayh2hid_1();
extern dscudaMemcpyToArrayH2HResult * dscudamemcpytoarrayh2hid_1_svc();
#define dscudaMemcpyToArrayH2DId 723
extern dscudaResult * dscudamemcpytoarrayh2did_1();
extern dscudaResult * dscudamemcpytoarrayh2did_1_svc();
#define dscudaMemcpyToArrayD2HId 724
extern dscudaMemcpyToArrayD2HResult * dscudamemcpytoarrayd2hid_1();
extern dscudaMemcpyToArrayD2HResult * dscudamemcpytoarrayd2hid_1_svc();
#define dscudaMemcpyToArrayD2DId 725
extern dscudaResult * dscudamemcpytoarrayd2did_1();
extern dscudaResult * dscudamemcpytoarrayd2did_1_svc();
#define dscudaMallocPitchId 726
extern dscudaMallocPitchResult * dscudamallocpitchid_1();
extern dscudaMallocPitchResult * dscudamallocpitchid_1_svc();
#define dscudaMemcpy2DToArrayH2HId 727
extern dscudaMemcpy2DToArrayH2HResult * dscudamemcpy2dtoarrayh2hid_1();
extern dscudaMemcpy2DToArrayH2HResult * dscudamemcpy2dtoarrayh2hid_1_svc();
#define dscudaMemcpy2DToArrayH2DId 728
extern dscudaResult * dscudamemcpy2dtoarrayh2did_1();
extern dscudaResult * dscudamemcpy2dtoarrayh2did_1_svc();
#define dscudaMemcpy2DToArrayD2HId 729
extern dscudaMemcpy2DToArrayD2HResult * dscudamemcpy2dtoarrayd2hid_1();
extern dscudaMemcpy2DToArrayD2HResult * dscudamemcpy2dtoarrayd2hid_1_svc();
#define dscudaMemcpy2DToArrayD2DId 730
extern dscudaResult * dscudamemcpy2dtoarrayd2did_1();
extern dscudaResult * dscudamemcpy2dtoarrayd2did_1_svc();
#define dscudaMemcpy2DH2HId 731
extern dscudaMemcpy2DH2HResult * dscudamemcpy2dh2hid_1();
extern dscudaMemcpy2DH2HResult * dscudamemcpy2dh2hid_1_svc();
#define dscudaMemcpy2DH2DId 732
extern dscudaResult * dscudamemcpy2dh2did_1();
extern dscudaResult * dscudamemcpy2dh2did_1_svc();
#define dscudaMemcpy2DD2HId 733
extern dscudaMemcpy2DD2HResult * dscudamemcpy2dd2hid_1();
extern dscudaMemcpy2DD2HResult * dscudamemcpy2dd2hid_1_svc();
#define dscudaMemcpy2DD2DId 734
extern dscudaResult * dscudamemcpy2dd2did_1();
extern dscudaResult * dscudamemcpy2dd2did_1_svc();
#define dscudaMemset2DId 735
extern dscudaResult * dscudamemset2did_1();
extern dscudaResult * dscudamemset2did_1_svc();
#define dscudaMemcpyToSymbolAsyncH2DId 736
extern dscudaResult * dscudamemcpytosymbolasynch2did_1();
extern dscudaResult * dscudamemcpytosymbolasynch2did_1_svc();
#define dscudaMemcpyToSymbolAsyncD2DId 737
extern dscudaResult * dscudamemcpytosymbolasyncd2did_1();
extern dscudaResult * dscudamemcpytosymbolasyncd2did_1_svc();
#define dscudaMemcpyFromSymbolAsyncD2HId 738
extern dscudaMemcpyFromSymbolAsyncD2HResult * dscudamemcpyfromsymbolasyncd2hid_1();
extern dscudaMemcpyFromSymbolAsyncD2HResult * dscudamemcpyfromsymbolasyncd2hid_1_svc();
#define dscudaMemcpyFromSymbolAsyncD2DId 739
extern dscudaResult * dscudamemcpyfromsymbolasyncd2did_1();
extern dscudaResult * dscudamemcpyfromsymbolasyncd2did_1_svc();
#define dscudaCreateChannelDescId 1400
extern dscudaCreateChannelDescResult * dscudacreatechanneldescid_1();
extern dscudaCreateChannelDescResult * dscudacreatechanneldescid_1_svc();
#define dscudaGetChannelDescId 1401
extern dscudaGetChannelDescResult * dscudagetchanneldescid_1();
extern dscudaGetChannelDescResult * dscudagetchanneldescid_1_svc();
#define dscudaBindTextureId 1402
extern dscudaBindTextureResult * dscudabindtextureid_1();
extern dscudaBindTextureResult * dscudabindtextureid_1_svc();
#define dscudaBindTexture2DId 1403
extern dscudaBindTexture2DResult * dscudabindtexture2did_1();
extern dscudaBindTexture2DResult * dscudabindtexture2did_1_svc();
#define dscudaBindTextureToArrayId 1404
extern dscudaResult * dscudabindtexturetoarrayid_1();
extern dscudaResult * dscudabindtexturetoarrayid_1_svc();
#define dscudaUnbindTextureId 1405
extern dscudaResult * dscudaunbindtextureid_1();
extern dscudaResult * dscudaunbindtextureid_1_svc();
#define dscufftPlan3dId 2002
extern dscufftPlanResult * dscufftplan3did_1();
extern dscufftPlanResult * dscufftplan3did_1_svc();
#define dscufftDestroyId 2004
extern dscufftResult * dscufftdestroyid_1();
extern dscufftResult * dscufftdestroyid_1_svc();
#define dscufftExecC2CId 2005
extern dscufftResult * dscufftexecc2cid_1();
extern dscufftResult * dscufftexecc2cid_1_svc();
extern int dscuda_prog_1_freeresult ();
#endif
#if defined(__STDC__) || defined(__cplusplus)
extern bool_t xdr_RCadr (XDR *, RCadr*);
extern bool_t xdr_RCstream (XDR *, RCstream*);
extern bool_t xdr_RCevent (XDR *, RCevent*);
extern bool_t xdr_RCipaddr (XDR *, RCipaddr*);
extern bool_t xdr_RCsize (XDR *, RCsize*);
extern bool_t xdr_RCerror (XDR *, RCerror*);
extern bool_t xdr_RCbuf (XDR *, RCbuf*);
extern bool_t xdr_RCchannelformat (XDR *, RCchannelformat*);
extern bool_t xdr_RCpid (XDR *, RCpid*);
extern bool_t xdr_RCchanneldesc_t (XDR *, RCchanneldesc_t*);
extern bool_t xdr_RCchanneldesc (XDR *, RCchanneldesc*);
extern bool_t xdr_RCtexture_t (XDR *, RCtexture_t*);
extern bool_t xdr_RCtexture (XDR *, RCtexture*);
extern bool_t xdr_RCfuncattr_t (XDR *, RCfuncattr_t*);
extern bool_t xdr_RCfuncattr (XDR *, RCfuncattr*);
extern bool_t xdr_RCargType (XDR *, RCargType*);
extern bool_t xdr_RCargVal (XDR *, RCargVal*);
extern bool_t xdr_RCarg (XDR *, RCarg*);
extern bool_t xdr_RCargs (XDR *, RCargs*);
extern bool_t xdr_dscudaResult (XDR *, dscudaResult*);
extern bool_t xdr_dscudaThreadGetLimitResult (XDR *, dscudaThreadGetLimitResult*);
extern bool_t xdr_dscudaThreadGetCacheConfigResult (XDR *, dscudaThreadGetCacheConfigResult*);
extern bool_t xdr_dscudaMallocResult (XDR *, dscudaMallocResult*);
extern bool_t xdr_dscudaHostAllocResult (XDR *, dscudaHostAllocResult*);
extern bool_t xdr_dscudaMallocHostResult (XDR *, dscudaMallocHostResult*);
extern bool_t xdr_dscudaMallocArrayResult (XDR *, dscudaMallocArrayResult*);
extern bool_t xdr_dscudaMallocPitchResult (XDR *, dscudaMallocPitchResult*);
extern bool_t xdr_dscudaMemcpyD2HResult (XDR *, dscudaMemcpyD2HResult*);
extern bool_t xdr_dscudaMemcpyH2HResult (XDR *, dscudaMemcpyH2HResult*);
extern bool_t xdr_dscudaMemcpyToArrayD2HResult (XDR *, dscudaMemcpyToArrayD2HResult*);
extern bool_t xdr_dscudaMemcpyToArrayH2HResult (XDR *, dscudaMemcpyToArrayH2HResult*);
extern bool_t xdr_dscudaMemcpy2DToArrayD2HResult (XDR *, dscudaMemcpy2DToArrayD2HResult*);
extern bool_t xdr_dscudaMemcpy2DToArrayH2HResult (XDR *, dscudaMemcpy2DToArrayH2HResult*);
extern bool_t xdr_dscudaMemcpy2DD2HResult (XDR *, dscudaMemcpy2DD2HResult*);
extern bool_t xdr_dscudaMemcpy2DH2HResult (XDR *, dscudaMemcpy2DH2HResult*);
extern bool_t xdr_dscudaGetDeviceResult (XDR *, dscudaGetDeviceResult*);
extern bool_t xdr_dscudaGetDeviceCountResult (XDR *, dscudaGetDeviceCountResult*);
extern bool_t xdr_dscudaGetDevicePropertiesResult (XDR *, dscudaGetDevicePropertiesResult*);
extern bool_t xdr_dscudaDriverGetVersionResult (XDR *, dscudaDriverGetVersionResult*);
extern bool_t xdr_dscudaRuntimeGetVersionResult (XDR *, dscudaRuntimeGetVersionResult*);
extern bool_t xdr_dscudaGetErrorStringResult (XDR *, dscudaGetErrorStringResult*);
extern bool_t xdr_dscudaCreateChannelDescResult (XDR *, dscudaCreateChannelDescResult*);
extern bool_t xdr_dscudaGetChannelDescResult (XDR *, dscudaGetChannelDescResult*);
extern bool_t xdr_dscudaChooseDeviceResult (XDR *, dscudaChooseDeviceResult*);
extern bool_t xdr_dscudaMemcpyAsyncD2HResult (XDR *, dscudaMemcpyAsyncD2HResult*);
extern bool_t xdr_dscudaMemcpyAsyncH2HResult (XDR *, dscudaMemcpyAsyncH2HResult*);
extern bool_t xdr_dscudaMemcpyFromSymbolD2HResult (XDR *, dscudaMemcpyFromSymbolD2HResult*);
extern bool_t xdr_dscudaMemcpyFromSymbolAsyncD2HResult (XDR *, dscudaMemcpyFromSymbolAsyncD2HResult*);
extern bool_t xdr_dscudaStreamCreateResult (XDR *, dscudaStreamCreateResult*);
extern bool_t xdr_dscudaEventCreateResult (XDR *, dscudaEventCreateResult*);
extern bool_t xdr_dscudaEventElapsedTimeResult (XDR *, dscudaEventElapsedTimeResult*);
extern bool_t xdr_dscudaHostGetDevicePointerResult (XDR *, dscudaHostGetDevicePointerResult*);
extern bool_t xdr_dscudaHostGetFlagsResult (XDR *, dscudaHostGetFlagsResult*);
extern bool_t xdr_dscudaLoadModuleResult (XDR *, dscudaLoadModuleResult*);
extern bool_t xdr_dscudaFuncGetAttributesResult (XDR *, dscudaFuncGetAttributesResult*);
extern bool_t xdr_dscudaBindTextureResult (XDR *, dscudaBindTextureResult*);
extern bool_t xdr_dscudaBindTexture2DResult (XDR *, dscudaBindTexture2DResult*);
extern bool_t xdr_dscufftResult (XDR *, dscufftResult*);
extern bool_t xdr_dscufftPlanResult (XDR *, dscufftPlanResult*);
extern bool_t xdr_dscublasResult (XDR *, dscublasResult*);
extern bool_t xdr_dscublasCreateResult (XDR *, dscublasCreateResult*);
extern bool_t xdr_dscublasGetVectorResult (XDR *, dscublasGetVectorResult*);
extern bool_t xdr_RCdim3 (XDR *, RCdim3*);
extern bool_t xdr_dscudathreadsetlimitid_1_argument (XDR *, dscudathreadsetlimitid_1_argument*);
extern bool_t xdr_dscudastreamwaiteventid_1_argument (XDR *, dscudastreamwaiteventid_1_argument*);
extern bool_t xdr_dscudaeventelapsedtimeid_1_argument (XDR *, dscudaeventelapsedtimeid_1_argument*);
extern bool_t xdr_dscudaeventrecordid_1_argument (XDR *, dscudaeventrecordid_1_argument*);
extern bool_t xdr_dscudalaunchkernelid_1_argument (XDR *, dscudalaunchkernelid_1_argument*);
extern bool_t xdr_dscudaloadmoduleid_1_argument (XDR *, dscudaloadmoduleid_1_argument*);
extern bool_t xdr_dscudafuncgetattributesid_1_argument (XDR *, dscudafuncgetattributesid_1_argument*);
extern bool_t xdr_dscudamemcpyh2hid_1_argument (XDR *, dscudamemcpyh2hid_1_argument*);
extern bool_t xdr_dscudamemcpyh2did_1_argument (XDR *, dscudamemcpyh2did_1_argument*);
extern bool_t xdr_dscudamemcpyd2hid_1_argument (XDR *, dscudamemcpyd2hid_1_argument*);
extern bool_t xdr_dscudamemcpyd2did_1_argument (XDR *, dscudamemcpyd2did_1_argument*);
extern bool_t xdr_dscudamemcpyasynch2hid_1_argument (XDR *, dscudamemcpyasynch2hid_1_argument*);
extern bool_t xdr_dscudamemcpyasynch2did_1_argument (XDR *, dscudamemcpyasynch2did_1_argument*);
extern bool_t xdr_dscudamemcpyasyncd2hid_1_argument (XDR *, dscudamemcpyasyncd2hid_1_argument*);
extern bool_t xdr_dscudamemcpyasyncd2did_1_argument (XDR *, dscudamemcpyasyncd2did_1_argument*);
extern bool_t xdr_dscudamemcpytosymbolh2did_1_argument (XDR *, dscudamemcpytosymbolh2did_1_argument*);
extern bool_t xdr_dscudamemcpytosymbold2did_1_argument (XDR *, dscudamemcpytosymbold2did_1_argument*);
extern bool_t xdr_dscudamemcpyfromsymbold2hid_1_argument (XDR *, dscudamemcpyfromsymbold2hid_1_argument*);
extern bool_t xdr_dscudamemcpyfromsymbold2did_1_argument (XDR *, dscudamemcpyfromsymbold2did_1_argument*);
extern bool_t xdr_dscudamemsetid_1_argument (XDR *, dscudamemsetid_1_argument*);
extern bool_t xdr_dscudahostallocid_1_argument (XDR *, dscudahostallocid_1_argument*);
extern bool_t xdr_dscudahostgetdevicepointerid_1_argument (XDR *, dscudahostgetdevicepointerid_1_argument*);
extern bool_t xdr_dscudamallocarrayid_1_argument (XDR *, dscudamallocarrayid_1_argument*);
extern bool_t xdr_dscudamemcpytoarrayh2hid_1_argument (XDR *, dscudamemcpytoarrayh2hid_1_argument*);
extern bool_t xdr_dscudamemcpytoarrayh2did_1_argument (XDR *, dscudamemcpytoarrayh2did_1_argument*);
extern bool_t xdr_dscudamemcpytoarrayd2hid_1_argument (XDR *, dscudamemcpytoarrayd2hid_1_argument*);
extern bool_t xdr_dscudamemcpytoarrayd2did_1_argument (XDR *, dscudamemcpytoarrayd2did_1_argument*);
extern bool_t xdr_dscudamallocpitchid_1_argument (XDR *, dscudamallocpitchid_1_argument*);
extern bool_t xdr_dscudamemcpy2dtoarrayh2hid_1_argument (XDR *, dscudamemcpy2dtoarrayh2hid_1_argument*);
extern bool_t xdr_dscudamemcpy2dtoarrayh2did_1_argument (XDR *, dscudamemcpy2dtoarrayh2did_1_argument*);
extern bool_t xdr_dscudamemcpy2dtoarrayd2hid_1_argument (XDR *, dscudamemcpy2dtoarrayd2hid_1_argument*);
extern bool_t xdr_dscudamemcpy2dtoarrayd2did_1_argument (XDR *, dscudamemcpy2dtoarrayd2did_1_argument*);
extern bool_t xdr_dscudamemcpy2dh2hid_1_argument (XDR *, dscudamemcpy2dh2hid_1_argument*);
extern bool_t xdr_dscudamemcpy2dh2did_1_argument (XDR *, dscudamemcpy2dh2did_1_argument*);
extern bool_t xdr_dscudamemcpy2dd2hid_1_argument (XDR *, dscudamemcpy2dd2hid_1_argument*);
extern bool_t xdr_dscudamemcpy2dd2did_1_argument (XDR *, dscudamemcpy2dd2did_1_argument*);
extern bool_t xdr_dscudamemset2did_1_argument (XDR *, dscudamemset2did_1_argument*);
extern bool_t xdr_dscudamemcpytosymbolasynch2did_1_argument (XDR *, dscudamemcpytosymbolasynch2did_1_argument*);
extern bool_t xdr_dscudamemcpytosymbolasyncd2did_1_argument (XDR *, dscudamemcpytosymbolasyncd2did_1_argument*);
extern bool_t xdr_dscudamemcpyfromsymbolasyncd2hid_1_argument (XDR *, dscudamemcpyfromsymbolasyncd2hid_1_argument*);
extern bool_t xdr_dscudamemcpyfromsymbolasyncd2did_1_argument (XDR *, dscudamemcpyfromsymbolasyncd2did_1_argument*);
extern bool_t xdr_dscudacreatechanneldescid_1_argument (XDR *, dscudacreatechanneldescid_1_argument*);
extern bool_t xdr_dscudabindtextureid_1_argument (XDR *, dscudabindtextureid_1_argument*);
extern bool_t xdr_dscudabindtexture2did_1_argument (XDR *, dscudabindtexture2did_1_argument*);
extern bool_t xdr_dscudabindtexturetoarrayid_1_argument (XDR *, dscudabindtexturetoarrayid_1_argument*);
extern bool_t xdr_dscufftplan3did_1_argument (XDR *, dscufftplan3did_1_argument*);
extern bool_t xdr_dscufftexecc2cid_1_argument (XDR *, dscufftexecc2cid_1_argument*);
#else
extern bool_t xdr_RCadr ();
extern bool_t xdr_RCstream ();
extern bool_t xdr_RCevent ();
extern bool_t xdr_RCipaddr ();
extern bool_t xdr_RCsize ();
extern bool_t xdr_RCerror ();
extern bool_t xdr_RCbuf ();
extern bool_t xdr_RCchannelformat ();
extern bool_t xdr_RCpid ();
extern bool_t xdr_RCchanneldesc_t ();
extern bool_t xdr_RCchanneldesc ();
extern bool_t xdr_RCtexture_t ();
extern bool_t xdr_RCtexture ();
extern bool_t xdr_RCfuncattr_t ();
extern bool_t xdr_RCfuncattr ();
extern bool_t xdr_RCargType ();
extern bool_t xdr_RCargVal ();
extern bool_t xdr_RCarg ();
extern bool_t xdr_RCargs ();
extern bool_t xdr_dscudaResult ();
extern bool_t xdr_dscudaThreadGetLimitResult ();
extern bool_t xdr_dscudaThreadGetCacheConfigResult ();
extern bool_t xdr_dscudaMallocResult ();
extern bool_t xdr_dscudaHostAllocResult ();
extern bool_t xdr_dscudaMallocHostResult ();
extern bool_t xdr_dscudaMallocArrayResult ();
extern bool_t xdr_dscudaMallocPitchResult ();
extern bool_t xdr_dscudaMemcpyD2HResult ();
extern bool_t xdr_dscudaMemcpyH2HResult ();
extern bool_t xdr_dscudaMemcpyToArrayD2HResult ();
extern bool_t xdr_dscudaMemcpyToArrayH2HResult ();
extern bool_t xdr_dscudaMemcpy2DToArrayD2HResult ();
extern bool_t xdr_dscudaMemcpy2DToArrayH2HResult ();
extern bool_t xdr_dscudaMemcpy2DD2HResult ();
extern bool_t xdr_dscudaMemcpy2DH2HResult ();
extern bool_t xdr_dscudaGetDeviceResult ();
extern bool_t xdr_dscudaGetDeviceCountResult ();
extern bool_t xdr_dscudaGetDevicePropertiesResult ();
extern bool_t xdr_dscudaDriverGetVersionResult ();
extern bool_t xdr_dscudaRuntimeGetVersionResult ();
extern bool_t xdr_dscudaGetErrorStringResult ();
extern bool_t xdr_dscudaCreateChannelDescResult ();
extern bool_t xdr_dscudaGetChannelDescResult ();
extern bool_t xdr_dscudaChooseDeviceResult ();
extern bool_t xdr_dscudaMemcpyAsyncD2HResult ();
extern bool_t xdr_dscudaMemcpyAsyncH2HResult ();
extern bool_t xdr_dscudaMemcpyFromSymbolD2HResult ();
extern bool_t xdr_dscudaMemcpyFromSymbolAsyncD2HResult ();
extern bool_t xdr_dscudaStreamCreateResult ();
extern bool_t xdr_dscudaEventCreateResult ();
extern bool_t xdr_dscudaEventElapsedTimeResult ();
extern bool_t xdr_dscudaHostGetDevicePointerResult ();
extern bool_t xdr_dscudaHostGetFlagsResult ();
extern bool_t xdr_dscudaLoadModuleResult ();
extern bool_t xdr_dscudaFuncGetAttributesResult ();
extern bool_t xdr_dscudaBindTextureResult ();
extern bool_t xdr_dscudaBindTexture2DResult ();
extern bool_t xdr_dscufftResult ();
extern bool_t xdr_dscufftPlanResult ();
extern bool_t xdr_dscublasResult ();
extern bool_t xdr_dscublasCreateResult ();
extern bool_t xdr_dscublasGetVectorResult ();
extern bool_t xdr_RCdim3 ();
extern bool_t xdr_dscudathreadsetlimitid_1_argument ();
extern bool_t xdr_dscudastreamwaiteventid_1_argument ();
extern bool_t xdr_dscudaeventelapsedtimeid_1_argument ();
extern bool_t xdr_dscudaeventrecordid_1_argument ();
extern bool_t xdr_dscudalaunchkernelid_1_argument ();
extern bool_t xdr_dscudaloadmoduleid_1_argument ();
extern bool_t xdr_dscudafuncgetattributesid_1_argument ();
extern bool_t xdr_dscudamemcpyh2hid_1_argument ();
extern bool_t xdr_dscudamemcpyh2did_1_argument ();
extern bool_t xdr_dscudamemcpyd2hid_1_argument ();
extern bool_t xdr_dscudamemcpyd2did_1_argument ();
extern bool_t xdr_dscudamemcpyasynch2hid_1_argument ();
extern bool_t xdr_dscudamemcpyasynch2did_1_argument ();
extern bool_t xdr_dscudamemcpyasyncd2hid_1_argument ();
extern bool_t xdr_dscudamemcpyasyncd2did_1_argument ();
extern bool_t xdr_dscudamemcpytosymbolh2did_1_argument ();
extern bool_t xdr_dscudamemcpytosymbold2did_1_argument ();
extern bool_t xdr_dscudamemcpyfromsymbold2hid_1_argument ();
extern bool_t xdr_dscudamemcpyfromsymbold2did_1_argument ();
extern bool_t xdr_dscudamemsetid_1_argument ();
extern bool_t xdr_dscudahostallocid_1_argument ();
extern bool_t xdr_dscudahostgetdevicepointerid_1_argument ();
extern bool_t xdr_dscudamallocarrayid_1_argument ();
extern bool_t xdr_dscudamemcpytoarrayh2hid_1_argument ();
extern bool_t xdr_dscudamemcpytoarrayh2did_1_argument ();
extern bool_t xdr_dscudamemcpytoarrayd2hid_1_argument ();
extern bool_t xdr_dscudamemcpytoarrayd2did_1_argument ();
extern bool_t xdr_dscudamallocpitchid_1_argument ();
extern bool_t xdr_dscudamemcpy2dtoarrayh2hid_1_argument ();
extern bool_t xdr_dscudamemcpy2dtoarrayh2did_1_argument ();
extern bool_t xdr_dscudamemcpy2dtoarrayd2hid_1_argument ();
extern bool_t xdr_dscudamemcpy2dtoarrayd2did_1_argument ();
extern bool_t xdr_dscudamemcpy2dh2hid_1_argument ();
extern bool_t xdr_dscudamemcpy2dh2did_1_argument ();
extern bool_t xdr_dscudamemcpy2dd2hid_1_argument ();
extern bool_t xdr_dscudamemcpy2dd2did_1_argument ();
extern bool_t xdr_dscudamemset2did_1_argument ();
extern bool_t xdr_dscudamemcpytosymbolasynch2did_1_argument ();
extern bool_t xdr_dscudamemcpytosymbolasyncd2did_1_argument ();
extern bool_t xdr_dscudamemcpyfromsymbolasyncd2hid_1_argument ();
extern bool_t xdr_dscudamemcpyfromsymbolasyncd2did_1_argument ();
extern bool_t xdr_dscudacreatechanneldescid_1_argument ();
extern bool_t xdr_dscudabindtextureid_1_argument ();
extern bool_t xdr_dscudabindtexture2did_1_argument ();
extern bool_t xdr_dscudabindtexturetoarrayid_1_argument ();
extern bool_t xdr_dscufftplan3did_1_argument ();
extern bool_t xdr_dscufftexecc2cid_1_argument ();
#endif
#ifdef __cplusplus
}
#endif
#endif
#pragma end dscudarpc.h
#pragma begin dscudamacros.h
#ifndef DSCUDA_MACROS_H
#define DSCUDA_MACROS_H
#define WARN(lv, fmt, args...) if (lv <= dscudaWarnLevel()) fprintf(stderr, fmt, ## args);
#define WARNONCE(lv, fmt, args...) if (lv <= dscudaWarnLevel()) { \
static int firstcall = 1; \
if (firstcall) { \
firstcall = 0; \
fprintf(stderr, fmt, ## args); \
} \
}
#define ALIGN_UP(off, align) (off) = ((off) + (align) - 1) & ~((align) - 1)
int dscudaWarnLevel(void);
void dscudaSetWarnLevel(int level);
#endif
#pragma end dscudamacros.h
#define TEST_NZ(x) do { if ( (x)) {WARN(0, #x " failed (returned non-zero).\n" ); exit(EXIT_FAILURE); } } while (0)
#define TEST_Z(x) do { if (!(x)) {WARN(0, #x " failed (returned zero/null).\n"); exit(EXIT_FAILURE); } } while (0)
#define RC_NWR_PER_POST (16)
#define RC_SGE_SIZE (1024 * 1024 * 2)
#define RC_WR_MAX (RC_NWR_PER_POST * 16)
#define RC_RDMA_BUF_SIZE (RC_NWR_PER_POST * RC_SGE_SIZE)
#if RC_RDMA_BUF_SIZE < RC_KMODULEIMAGELEN
#error "RC_RDMA_BUF_SIZE too small."
#endif
#define RC_SERVER_IBV_CQ_SIZE (RC_WR_MAX)
#define RC_CLIENT_IBV_CQ_SIZE (65536)
#define RC_IBV_IP_PORT_BASE (65432)
#define RC_IBV_TIMEOUT (500)
struct message {
struct ibv_mr mr[RC_NWR_PER_POST];
};
enum rdma_state_t {
STATE_INIT,
STATE_READY,
STATE_BUSY,
};
typedef struct {
struct rdma_cm_id *id;
struct ibv_qp *qp;
struct ibv_context *ibvctx;
struct ibv_pd *pd;
struct ibv_cq *cq;
struct ibv_comp_channel *comp_channel;
struct message *recv_msg;
struct message *send_msg;
char *rdma_local_region;
char *rdma_remote_region;
struct ibv_mr *recv_mr;
struct ibv_mr *send_mr;
struct ibv_mr peer_mr[RC_NWR_PER_POST];
struct ibv_mr *rdma_local_mr[RC_NWR_PER_POST];
struct ibv_mr *rdma_remote_mr[RC_NWR_PER_POST];
pthread_t cq_poller_thread;
int connected;
enum rdma_state_t rdma_state;
int rdma_nreq_pending;
} IbvConnection;
typedef enum {
RCMethodNone = 0,
RCMethodMemcpyH2D,
RCMethodMemcpyD2H,
RCMethodMemcpyD2D,
RCMethodMalloc,
RCMethodFree,
RCMethodGetErrorString,
RCMethodGetDeviceProperties,
RCMethodRuntimeGetVersion,
RCMethodThreadSynchronize,
RCMethodThreadExit,
RCMethodDeviceSynchronize,
RCMethodDscudaMemcpyToSymbolH2D,
RCMethodDscudaMemcpyToSymbolD2D,
RCMethodDscudaMemcpyFromSymbolD2H,
RCMethodDscudaMemcpyFromSymbolD2D,
RCMethodDscudaMemcpyToSymbolAsyncH2D,
RCMethodDscudaMemcpyToSymbolAsyncD2D,
RCMethodDscudaMemcpyFromSymbolAsyncD2H,
RCMethodDscudaMemcpyFromSymbolAsyncD2D,
RCMethodDscudaLoadModule,
RCMethodDscudaLaunchKernel,
RCMethodEnd
} RCMethod;
typedef struct {
RCMethod method;
int payload;
} IbvHdr;
typedef struct {
RCMethod method;
size_t count;
RCadr dstadr;
void *srcbuf;
} IbvMemcpyH2DInvokeHdr;
typedef struct {
RCMethod method;
hipError_t err;
} IbvMemcpyH2DReturnHdr;
typedef struct {
RCMethod method;
size_t count;
RCadr srcadr;
} IbvMemcpyD2HInvokeHdr;
typedef struct {
RCMethod method;
hipError_t err;
void *dstbuf;
} IbvMemcpyD2HReturnHdr;
typedef struct {
RCMethod method;
size_t count;
RCadr dstadr;
RCadr srcadr;
} IbvMemcpyD2DInvokeHdr;
typedef struct {
RCMethod method;
hipError_t err;
} IbvMemcpyD2DReturnHdr;
typedef struct {
RCMethod method;
size_t size;
} IbvMallocInvokeHdr;
typedef struct {
RCMethod method;
hipError_t err;
RCadr devAdr;
} IbvMallocReturnHdr;
typedef struct {
RCMethod method;
RCadr devAdr;
} IbvFreeInvokeHdr;
typedef struct {
RCMethod method;
hipError_t err;
} IbvFreeReturnHdr;
typedef struct {
RCMethod method;
int device;
hipError_t err;
} IbvGetErrorStringInvokeHdr;
typedef struct {
RCMethod method;
char *errmsg;
} IbvGetErrorStringReturnHdr;
typedef struct {
RCMethod method;
int device;
} IbvGetDevicePropertiesInvokeHdr;
typedef struct {
RCMethod method;
hipError_t err;
hipDeviceProp_t prop;
} IbvGetDevicePropertiesReturnHdr;
typedef struct {
RCMethod method;
char dummy[8];
} IbvRuntimeGetVersionInvokeHdr;
typedef struct {
RCMethod method;
hipError_t err;
int version;
} IbvRuntimeGetVersionReturnHdr;
typedef struct {
RCMethod method;
char dummy[8];
} IbvThreadSynchronizeInvokeHdr;
typedef struct {
RCMethod method;
hipError_t err;
} IbvThreadSynchronizeReturnHdr;
typedef struct {
RCMethod method;
char dummy[8];
} IbvThreadExitInvokeHdr;
typedef struct {
RCMethod method;
hipError_t err;
} IbvThreadExitReturnHdr;
typedef struct {
RCMethod method;
char dummy[8];
} IbvDeviceSynchronizeInvokeHdr;
typedef struct {
RCMethod method;
hipError_t err;
} IbvDeviceSynchronizeReturnHdr;
typedef struct {
RCMethod method;
int moduleid;
char symbol[RC_SNAMELEN];
size_t count;
size_t offset;
void *src;
} IbvDscudaMemcpyToSymbolH2DInvokeHdr;
typedef struct {
RCMethod method;
hipError_t err;
} IbvDscudaMemcpyToSymbolH2DReturnHdr;
typedef struct {
RCMethod method;
int moduleid;
char symbol[RC_SNAMELEN];
size_t count;
size_t offset;
RCadr srcadr;
} IbvDscudaMemcpyToSymbolD2DInvokeHdr;
typedef struct {
RCMethod method;
hipError_t err;
} IbvDscudaMemcpyToSymbolD2DReturnHdr;
typedef struct {
RCMethod method;
int moduleid;
char symbol[RC_SNAMELEN];
size_t count;
size_t offset;
} IbvDscudaMemcpyFromSymbolD2HInvokeHdr;
typedef struct {
RCMethod method;
hipError_t err;
void *dst;
} IbvDscudaMemcpyFromSymbolD2HReturnHdr;
typedef struct {
RCMethod method;
int moduleid;
char symbol[RC_SNAMELEN];
size_t count;
size_t offset;
RCadr dstadr;
} IbvDscudaMemcpyFromSymbolD2DInvokeHdr;
typedef struct {
RCMethod method;
hipError_t err;
} IbvDscudaMemcpyFromSymbolD2DReturnHdr;
typedef struct {
RCMethod method;
int moduleid;
char symbol[RC_SNAMELEN];
size_t count;
size_t offset;
RCstream stream;
void *src;
} IbvDscudaMemcpyToSymbolAsyncH2DInvokeHdr;
typedef struct {
RCMethod method;
hipError_t err;
} IbvDscudaMemcpyToSymbolAsyncH2DReturnHdr;
typedef struct {
RCMethod method;
int moduleid;
char symbol[RC_SNAMELEN];
size_t count;
size_t offset;
RCstream stream;
RCadr srcadr;
} IbvDscudaMemcpyToSymbolAsyncD2DInvokeHdr;
typedef struct {
RCMethod method;
hipError_t err;
} IbvDscudaMemcpyToSymbolAsyncD2DReturnHdr;
typedef struct {
RCMethod method;
int moduleid;
char symbol[RC_SNAMELEN];
size_t count;
size_t offset;
RCstream stream;
} IbvDscudaMemcpyFromSymbolAsyncD2HInvokeHdr;
typedef struct {
RCMethod method;
hipError_t err;
void *dst;
} IbvDscudaMemcpyFromSymbolAsyncD2HReturnHdr;
typedef struct {
RCMethod method;
int moduleid;
char symbol[RC_SNAMELEN];
size_t count;
size_t offset;
RCstream stream;
RCadr dstadr;
} IbvDscudaMemcpyFromSymbolAsyncD2DInvokeHdr;
typedef struct {
RCMethod method;
hipError_t err;
} IbvDscudaMemcpyFromSymbolAsyncD2DReturnHdr;
typedef struct {
RCMethod method;
uint64_t ipaddr;
unsigned long int pid;
char modulename[RC_KMODULENAMELEN];
void *moduleimage;
} IbvDscudaLoadModuleInvokeHdr;
typedef struct {
RCMethod method;
hipError_t err;
int moduleid;
} IbvDscudaLoadModuleReturnHdr;
typedef struct {
RCMethod method;
int moduleid;
int kernelid;
char kernelname[RC_KNAMELEN];
unsigned int gdim[3];
unsigned int bdim[3];
unsigned int smemsize;
RCstream stream;
int narg;
void *args;
} IbvDscudaLaunchKernelInvokeHdr;
typedef struct {
RCMethod method;
hipError_t err;
} IbvDscudaLaunchKernelReturnHdr;
typedef struct {
int type;
union {
uint64_t pointerval;
unsigned int intval;
float floatval;
char customval[RC_KARGMAX];
} val;
unsigned int offset;
unsigned int size;
} IbvArg;
void rdmaBuildConnection(struct rdma_cm_id *id, bool is_server);
void rdmaBuildParams(struct rdma_conn_param *params);
void rdmaDestroyConnection(IbvConnection *conn);
void rdmaSetOnCompletionHandler(void (*handler)(struct ibv_wc *));
void rdmaOnCompletionClient(struct ibv_wc *);
void rdmaOnCompletionServer(struct ibv_wc *);
void rdmaWaitEvent(struct rdma_event_channel *ec, rdma_cm_event_type et, int (*handler)(struct rdma_cm_id *id));
void rdmaWaitReadyToKickoff(IbvConnection *conn);
void rdmaWaitReadyToDisconnect(IbvConnection *conn);
void rdmaKickoff(IbvConnection *conn, int length);
void rdmaPipelinedKickoff(IbvConnection *conn, int length, char *payload_buf, char *payload_src, int payload_size);
void rdmaSendMr(IbvConnection *conn);
#endif
#endif
#pragma end ibv_rdma.h
enum {
RC_REMOTECALL_TYPE_RPC,
RC_REMOTECALL_TYPE_IBV,
};
int dscudaWarnLevel(void);
void dscudaSetWarnLevel(int level);
char *dscudaMemcpyKindName(hipMemcpyKind kind);
const char *dscudaGetIpaddrString(unsigned int addr);
double RCgetCputime(double *t0);
void *dscudaUvaOfAdr(void *adr, int devid);
int dscudaDevidOfUva(void *adr);
void *dscudaAdrOfUva(void *adr);
int dscudaNredundancy(void);
void dscudaSetAutoVerb(int verb);
int dscudaRemoteCallType(void);
void dscudaSetErrorHandler(void (*handler)(void *), void *handler_arg);
void dscudaGetMangledFunctionName(char *name, const char *funcif, const char *ptxdata);
int *dscudaLoadModule(char *srcname, char *strdata);
void rpcDscudaLaunchKernelWrapper(int *moduleid, int kid, char *kname,
RCdim3 gdim, RCdim3 bdim, RCsize smemsize, RCstream stream,
RCargs args);
void ibvDscudaLaunchKernelWrapper(int *moduleid, int kid, char *kname,
int *gdim, int *bdim, RCsize smemsize, RCstream stream,
int narg, IbvArg *arg);
hipError_t dscudaFuncGetAttributesWrapper(int *moduleid, struct hipFuncAttributes *attr, const char *func);
hipError_t dscudaMemcpyToSymbolWrapper(int *moduleid, const char *symbol, const void *src,
size_t count, size_t offset = 0,
enum hipMemcpyKind kind = hipMemcpyHostToDevice);
hipError_t dscudaMemcpyToSymbolAsyncWrapper(int *moduleid, const char *symbol, const void *src,
size_t count, size_t offset = 0,
enum hipMemcpyKind kind = hipMemcpyHostToDevice, hipStream_t stream = 0);
hipError_t dscudaMemcpyFromSymbolWrapper(int *moduleid, void *dst, const char *symbol,
size_t count, size_t offset = 0,
enum hipMemcpyKind kind = hipMemcpyDeviceToHost);
hipError_t dscudaMemcpyFromSymbolAsyncWrapper(int *moduleid, void *dst, const char *symbol,
size_t count, size_t offset = 0,
enum hipMemcpyKind kind = hipMemcpyDeviceToHost, hipStream_t stream = 0);
hipError_t dscudaBindTextureWrapper(int *moduleid, char *texname,
size_t *offset,
const struct textureReference *tex,
const void *devPtr,
const struct hipChannelFormatDesc *desc,
size_t size = UINT_MAX);
template<class T, int dim, enum hipTextureReadMode readMode>
hipError_t dscudaBindTextureWrapper(int *moduleid, char *texname,
size_t *offset,
const struct texture<T, dim, readMode> &tex,
const void *devPtr,
const struct hipChannelFormatDesc &desc,
size_t size = UINT_MAX)
{
return dscudaBindTextureWrapper(dscudaLoadModule("./dscudatmp/direct.cu.ptx", Ptxdata), "tex", offset, &tex, devPtr, &desc, size);
}
template<class T, int dim, enum hipTextureReadMode readMode>
hipError_t dscudaBindTextureWrapper(int *moduleid, char *texname,
size_t *offset,
const struct texture<T, dim, readMode> &tex,
const void *devPtr,
size_t size = UINT_MAX)
{
return dscudaBindTextureWrapper(dscudaLoadModule("./dscudatmp/direct.cu.ptx", Ptxdata), "tex", offset, tex, devPtr, tex.channelDesc, size);
}
hipError_t dscudaBindTexture2DWrapper(int *moduleid, char *texname,
size_t *offset,
const struct textureReference *tex,
const void *devPtr,
const struct hipChannelFormatDesc *desc,
size_t width, size_t height, size_t pitch);
template<class T, int dim, enum hipTextureReadMode readMode>
hipError_t dscudaBindTexture2DWrapper(int *moduleid, char *texname,
size_t *offset,
const struct texture<T, dim, readMode> &tex,
const void *devPtr,
const struct hipChannelFormatDesc &desc,
size_t width, size_t height, size_t pitch)
{
return dscudaBindTexture2DWrapper(moduleid, texname,
offset, &tex, devPtr, &desc, width, height, pitch);
}
template<class T, int dim, enum hipTextureReadMode readMode>
hipError_t dscudaBindTexture2DWrapper(int *moduleid, char *texname,
size_t *offset,
const struct texture<T, dim, readMode> &tex,
const void *devPtr,
size_t width, size_t height, size_t pitch)
{
return dscudaBindTexture2DWrapper(moduleid, texname,
offset, &tex, devPtr, &tex.channelDesc, width, height, pitch);
}
hipError_t dscudaBindTextureToArrayWrapper(int *moduleid, char *texname,
const struct textureReference *tex,
const struct hipArray * array,
const struct hipChannelFormatDesc *desc);
template<class T, int dim, enum hipTextureReadMode readMode>
hipError_t dscudaBindTextureToArrayWrapper(int *moduleid, char *texname,
const struct texture<T, dim, readMode> &tex,
const struct hipArray * array,
const struct hipChannelFormatDesc & desc)
{
return dscudaBindTextureToArrayWrapper(moduleid, texname, &tex, array, &desc);
}
template<class T, int dim, enum hipTextureReadMode readMode>
hipError_t dscudaBindTextureToArrayWrapper(int *moduleid, char *texname,
const struct texture<T, dim, readMode> &tex,
const struct hipArray * array)
{
struct hipChannelFormatDesc desc;
hipError_t err = hipGetChannelDesc(&desc, array);
return err == hipSuccess ? dscudaBindTextureToArrayWrapper(moduleid, texname, &tex, array, &desc) : err;
}
#endif
#pragma end dscuda.h
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
#ifdef MIN
#undef MIN
#endif
#ifdef MAX
#undef MAX
#endif
#include <cutil_inline.h>
#pragma begin direct.h
#ifndef DIRECT_H
#define DIRECT_H
#define NMAX (1024*1024*8)
#define NDEVMAX 1024
void readnbody(int *nj, double *mj, double (*xj)[3], double (*vj)[3], char *fname);
void writenbody(int nj, double *mj, double (*xj)[3], double (*vj)[3], char *fname);
void push_velocity(double (*vj)[3], double (*a)[3], double dt, int nj);
void push_position(double (*xj)[3], double (*vj)[3], double (*a)[3], double dt, int nj);
void energy(double *mj, double (*vj)[3], double *p, int nj, double *ke, double *pe);
void get_cputime(double *lap, double *split);
void plot_star(double x[NMAX][3], int n, double time, double ratio, double m[NMAX], double initm);
void create_cold_homosphere(int n, double *mj, double (*xj)[3], double (*vj)[3]);
void create_plummer(int n, double *mj, double (*xj)[3], double (*vj)[3]);
#endif
#pragma end direct.h
/*
* stub for remote call to gravity_kernel.
*/
void
dscudagravity_kernel(dim3 _gdim, dim3 _bdim, size_t _smemsize, hipStream_t _stream , float *m, float (*x)[3], float eps, float (*a)[3], float *p, int n)
{
int _narg = 6;
int _ibvgdim[3], _ibvbdim[3];
IbvArg _ibvarg[6], *_ibvargp;
RCargs _rcargs;
RCarg _rcarg[6], *_rcargp;
RCdim3 _gdimrc, _bdimrc;
int _off = 0;
int _rcargc = 0;
void *_devptr;
_rcargs.RCargs_val = _rcarg;
_rcargs.RCargs_len = _narg;
static char mangledname_[512] = {0,};
if (!mangledname_[0]) {
if (1) {
dscudaGetMangledFunctionName(mangledname_, __PRETTY_FUNCTION__, Ptxdata);
}
else {
char buf_[256];
sprintf(buf_, "%s", __FUNCTION__);
strcpy(mangledname_, buf_ + strlen("dscuda")); // obtain original function name.
}
WARN(3, "mangled name : %s\n", mangledname_);
}
if (dscudaRemoteCallType() == RC_REMOTECALL_TYPE_IBV) {
// a pointer to a device-address 'dscudaAdrOfUva(m)'.
_ibvargp = _ibvarg + _rcargc;
_rcargc++;
_devptr = (void*)(size_t)dscudaAdrOfUva(m);
ALIGN_UP(_off, __alignof(_devptr));
_ibvargp->type = dscudaArgTypeP;
_ibvargp->offset = _off;
_ibvargp->val.pointerval = (RCadr)_devptr;
_ibvargp->size = sizeof(_devptr);
_off += _ibvargp->size;
// a pointer to a device-address 'dscudaAdrOfUva(x)'.
_ibvargp = _ibvarg + _rcargc;
_rcargc++;
_devptr = (void*)(size_t)dscudaAdrOfUva(x);
ALIGN_UP(_off, __alignof(_devptr));
_ibvargp->type = dscudaArgTypeP;
_ibvargp->offset = _off;
_ibvargp->val.pointerval = (RCadr)_devptr;
_ibvargp->size = sizeof(_devptr);
_off += _ibvargp->size;
// a float 'eps'.
_ibvargp = _ibvarg + _rcargc;
_rcargc++;
ALIGN_UP(_off, __alignof(float));
_ibvargp->type = dscudaArgTypeF;
_ibvargp->offset = _off;
_ibvargp->val.floatval = eps;
_ibvargp->size = sizeof(float);
_off += _ibvargp->size;
// a pointer to a device-address 'dscudaAdrOfUva(a)'.
_ibvargp = _ibvarg + _rcargc;
_rcargc++;
_devptr = (void*)(size_t)dscudaAdrOfUva(a);
ALIGN_UP(_off, __alignof(_devptr));
_ibvargp->type = dscudaArgTypeP;
_ibvargp->offset = _off;
_ibvargp->val.pointerval = (RCadr)_devptr;
_ibvargp->size = sizeof(_devptr);
_off += _ibvargp->size;
// a pointer to a device-address 'dscudaAdrOfUva(p)'.
_ibvargp = _ibvarg + _rcargc;
_rcargc++;
_devptr = (void*)(size_t)dscudaAdrOfUva(p);
ALIGN_UP(_off, __alignof(_devptr));
_ibvargp->type = dscudaArgTypeP;
_ibvargp->offset = _off;
_ibvargp->val.pointerval = (RCadr)_devptr;
_ibvargp->size = sizeof(_devptr);
_off += _ibvargp->size;
// an integer 'n'.
_ibvargp = _ibvarg + _rcargc;
_rcargc++;
ALIGN_UP(_off, __alignof(int));
_ibvargp->type = dscudaArgTypeI;
_ibvargp->offset = _off;
_ibvargp->val.intval = n;
_ibvargp->size = sizeof(int);
_off += _ibvargp->size;
_ibvgdim[0] = _gdim.x; _ibvgdim[1] = _gdim.y; _ibvgdim[2] = _gdim.z;
_ibvbdim[0] = _bdim.x; _ibvbdim[1] = _bdim.y; _ibvbdim[2] = _gdim.z;
#if !RPC_ONLY
ibvDscudaLaunchKernelWrapper(dscudaLoadModule("./dscudatmp/direct.cu.ptx", Ptxdata), 0, mangledname_,
_ibvgdim, _ibvbdim, _smemsize, (RCstream)_stream,
_narg, _ibvarg);
#endif
}
else {
// a pointer to a device-address 'dscudaAdrOfUva(m)'.
_rcargp = &(_rcargs.RCargs_val[_rcargc++]);
_devptr = (void*)(size_t)dscudaAdrOfUva(m);
ALIGN_UP(_off, __alignof(_devptr));
_rcargp->val.type = dscudaArgTypeP;
_rcargp->offset = _off;
_rcargp->val.RCargVal_u.address = (RCadr)_devptr;
_rcargp->size = sizeof(_devptr);
_off += _rcargp->size;
// a pointer to a device-address 'dscudaAdrOfUva(x)'.
_rcargp = &(_rcargs.RCargs_val[_rcargc++]);
_devptr = (void*)(size_t)dscudaAdrOfUva(x);
ALIGN_UP(_off, __alignof(_devptr));
_rcargp->val.type = dscudaArgTypeP;
_rcargp->offset = _off;
_rcargp->val.RCargVal_u.address = (RCadr)_devptr;
_rcargp->size = sizeof(_devptr);
_off += _rcargp->size;
// a float 'eps'.
_rcargp = &(_rcargs.RCargs_val[_rcargc++]);
ALIGN_UP(_off, __alignof(float));
_rcargp->val.type = dscudaArgTypeF;
_rcargp->offset = _off;
_rcargp->val.RCargVal_u.valuef = eps;
_rcargp->size = sizeof(float);
_off += _rcargp->size;
// a pointer to a device-address 'dscudaAdrOfUva(a)'.
_rcargp = &(_rcargs.RCargs_val[_rcargc++]);
_devptr = (void*)(size_t)dscudaAdrOfUva(a);
ALIGN_UP(_off, __alignof(_devptr));
_rcargp->val.type = dscudaArgTypeP;
_rcargp->offset = _off;
_rcargp->val.RCargVal_u.address = (RCadr)_devptr;
_rcargp->size = sizeof(_devptr);
_off += _rcargp->size;
// a pointer to a device-address 'dscudaAdrOfUva(p)'.
_rcargp = &(_rcargs.RCargs_val[_rcargc++]);
_devptr = (void*)(size_t)dscudaAdrOfUva(p);
ALIGN_UP(_off, __alignof(_devptr));
_rcargp->val.type = dscudaArgTypeP;
_rcargp->offset = _off;
_rcargp->val.RCargVal_u.address = (RCadr)_devptr;
_rcargp->size = sizeof(_devptr);
_off += _rcargp->size;
// an integer 'n'.
_rcargp = &(_rcargs.RCargs_val[_rcargc++]);
ALIGN_UP(_off, __alignof(int));
_rcargp->val.type = dscudaArgTypeI;
_rcargp->offset = _off;
_rcargp->val.RCargVal_u.valuei = n;
_rcargp->size = sizeof(int);
_off += _rcargp->size;
_gdimrc.x = _gdim.x; _gdimrc.y = _gdim.y; _gdimrc.z = _gdim.z;
_bdimrc.x = _bdim.x; _bdimrc.y = _bdim.y; _bdimrc.z = _bdim.z;
rpcDscudaLaunchKernelWrapper(dscudaLoadModule("./dscudatmp/direct.cu.ptx", Ptxdata), 0, mangledname_,
_gdimrc, _bdimrc, _smemsize, (RCstream)_stream,
_rcargs);
}
}
void gravity_kernel(float *m, float (*x)[3], float eps, float (*a)[3], float *p, int n);
static void calc_gravity_gpu(double *m, double (*x)[3], double eps, double (*a)[3], double *p, int n);
static void calc_gravity(double *m, double (*x)[3], double eps, double (*a)[3], double *p, int n);
void
gravity_kernel(float *m, float (*x)[3], float eps, float (*a)[3], float *p, int n)
{
/* nop */
}
static void
calc_gravity_gpu(double *m, double (*x)[3], double eps, double (*a)[3], double *p, int n)
{
static int firstcall = 1;
static float *d_m, (*d_x)[3], (*d_a)[3], *d_p;
static float floatbuf[NMAX*3];
int i, k;
int nth = 64;
dim3 threads(nth, 1, 1);
dim3 grids((n+nth-1)/nth, 1, 1);
if (firstcall) {
firstcall = 0;
cutilSafeCall(hipMalloc((void**)&d_m, sizeof(float) * n));
cutilSafeCall(hipMalloc((void**)&d_x, sizeof(float) * 3 * n));
cutilSafeCall(hipMalloc((void**)&d_a, sizeof(float) * 3 * n));
cutilSafeCall(hipMalloc((void**)&d_p, sizeof(float) * n));
}
for (i = 0 ; i < n; i++) {
floatbuf[i] = (float)m[i];
}
cutilSafeCall(hipMemcpy(d_m, floatbuf, sizeof(float) * n, hipMemcpyHostToDevice));
for (i = 0 ; i < n; i++) {
for (k = 0; k < 3; k++) {
floatbuf[3 * i + k] = (float)x[i][k];
}
}
cutilSafeCall(hipMemcpy(d_x, floatbuf, sizeof(float) * 3 * n, hipMemcpyHostToDevice));
dscudagravity_kernel(grids, threads, 0, NULL, d_m, d_x, (float)eps, d_a, d_p, n);
cutilSafeCall(hipMemcpy(floatbuf, d_a, sizeof(float) * 3 * n, hipMemcpyDeviceToHost));
for (i = 0 ; i < n; i++) {
for (k = 0; k < 3; k++) {
a[i][k] = (double)floatbuf[3 * i + k];
}
}
cutilSafeCall(hipMemcpy(floatbuf, d_p, sizeof(float) * n, hipMemcpyDeviceToHost));
for (i = 0 ; i < n; i++) {
p[i]= (double)floatbuf[i];
}
}
static void
calc_gravity(double *m, double (*x)[3], double eps, double (*a)[3], double *p, int n)
{
double r, r2, mf, dx[3];
int i, j, k;
for (i = 0; i < n; i++) {
for (k = 0; k < 3; k++) {
a[i][k] = 0.0;
}
p[i] = 0.0;
for (j = 0; j < n; j++) {
for (k = 0; k < 3; k++) {
dx[k] = x[j][k] - x[i][k];
}
r2 = eps * eps;
for (k = 0; k < 3; k++) {
r2 += dx[k] * dx[k];
}
r = sqrt(r2);
mf = m[j] / (r * r2);
for (k = 0; k < 3; k++) {
a[i][k] += mf * dx[k];
}
p[i] -= m[j] / r;
}
}
if (eps != 0.0) {
double epsinv;
epsinv = 1.0 / eps;
for (i = 0; i < n; i++) {
p[i] += m[i] * epsinv;
}
}
}
#ifdef __DSCUDA__
static void
errhandler(void *arg)
{
fprintf(stderr, "calculation error on some GPU at timestep: %d\n",
*(int *)arg);
exit(1);
}
#endif
int
main(int argc, char **argv)
{
static double mj[NMAX], xj[NMAX][3], vj[NMAX][3];
static double a[NMAX][3], p[NMAX];
double time, dt, endt;;
double eps;
double e, e0, ke, pe;
double lt=0.0, st=0.0, sustained;
int n, nstep, interval;
static int step;
#ifdef __DSCUDA__
dscudaSetErrorHandler(errhandler, (void *)&step);
#endif
eps = 0.02;
dt = 0.01;
endt = 1.1;
time = 0.0;
nstep = endt/dt;
if (argc < 3) {
fprintf(stderr, "performs gravitational N-body simulation with naive direct summation algorithm.\n"
"usage: %s <infile> <outfile>\n", argv[0]);
exit(1);
}
readnbody(&n, mj, xj, vj, argv[1]);
interval = 500 * (10000.0/n) * (10000.0/n);
if (interval * 10 > nstep) {
interval = nstep / 10;
}
interval = 1;
fprintf(stderr, "interval: %d\n", interval);
get_cputime(<,&st);
#if 1
calc_gravity_gpu(mj, xj, eps, a, p, n);
#else
calc_gravity(mj, xj, eps, a, p, n);
#endif
energy(mj, vj, p, n, &ke, &pe);
e0 = ke+pe;
printf("ke: %f pe: %f e0: %f\n", ke, pe, e0);
for (step = 1; step < nstep; step++) {
push_velocity(vj, a, 0.5*dt, n);
push_position(xj, vj, a, dt, n);
time = time + dt;
#if 1
calc_gravity_gpu(mj, xj, eps, a, p, n);
#else
calc_gravity(mj, xj, eps, a, p, n);
#endif
push_velocity(vj, a, 0.5*dt, n);
if (step % interval == 0) {
energy(mj, vj, p, n, &ke, &pe);
e = ke+pe;
sustained = 38.0*((double)n)*((double)n)*interval/lt/1e9;
printf("speed: %g Gflops\n", sustained);
printf("step: %d time: %e\n", step, time);
printf("e: %e de: %e\n", e, e-e0);
printf("ke: %e pe: %e\n", ke, pe);
printf("ke/pe: %e\n\n", ke/pe);
get_cputime(<,&st);
}
}
writenbody(n, mj, xj, vj, argv[2]);
}
| 45ad17b184abc0d61183a9048c911a395b3a4f35.cu | static char *Ptxdata =
" .version 1.4\n"
" .target sm_10, map_f64_to_f32\n"
" // compiled with /usr/local/cuda4.1/cuda/open64/lib//be\n"
" // nvopencc 4.1 built on 2012-01-12\n"
"\n"
" //-----------------------------------------------------------\n"
" // Compiling /tmp/tmpxft_000072dc_00000000-9_direct.cpp3.i (/tmp/ccBI#.T0CKuW)\n"
" //-----------------------------------------------------------\n"
"\n"
" //-----------------------------------------------------------\n"
" // Options:\n"
" //-----------------------------------------------------------\n"
" // Target:ptx, ISA:sm_10, Endian:little, Pointer Size:64\n"
" // -O3 (Optimization level)\n"
" // -g0 (Debug level)\n"
" // -m2 (Report advisories)\n"
" //-----------------------------------------------------------\n"
"\n"
" .file 1 \"<command-line>\"\n"
" .file 2 \"/tmp/tmpxft_000072dc_00000000-8_direct.cudafe2.gpu\"\n"
" .file 3 \"/usr/lib/gcc/x86_64-redhat-linux/4.5.1/include/stddef.h\"\n"
" .file 4 \"/usr/local/cuda4.1/cuda/include/crt/device_runtime.h\"\n"
" .file 5 \"/usr/local/cuda4.1/cuda/include/host_defines.h\"\n"
" .file 6 \"/usr/local/cuda4.1/cuda/include/builtin_types.h\"\n"
" .file 7 \"/usr/local/cuda4.1/cuda/include/device_types.h\"\n"
" .file 8 \"/usr/local/cuda4.1/cuda/include/driver_types.h\"\n"
" .file 9 \"/usr/local/cuda4.1/cuda/include/surface_types.h\"\n"
" .file 10 \"/usr/local/cuda4.1/cuda/include/texture_types.h\"\n"
" .file 11 \"/usr/local/cuda4.1/cuda/include/vector_types.h\"\n"
" .file 12 \"/usr/local/cuda4.1/cuda/include/device_launch_parameters.h\"\n"
" .file 13 \"/usr/local/cuda4.1/cuda/include/crt/storage_class.h\"\n"
" .file 14 \"direct.cu\"\n"
" .file 15 \"/usr/local/cuda4.1/cuda/include/common_functions.h\"\n"
" .file 16 \"/usr/local/cuda4.1/cuda/include/math_functions.h\"\n"
" .file 17 \"/usr/local/cuda4.1/cuda/include/math_constants.h\"\n"
" .file 18 \"/usr/local/cuda4.1/cuda/include/device_functions.h\"\n"
" .file 19 \"/usr/local/cuda4.1/cuda/include/sm_11_atomic_functions.h\"\n"
" .file 20 \"/usr/local/cuda4.1/cuda/include/sm_12_atomic_functions.h\"\n"
" .file 21 \"/usr/local/cuda4.1/cuda/include/sm_13_double_functions.h\"\n"
" .file 22 \"/usr/local/cuda4.1/cuda/include/sm_20_atomic_functions.h\"\n"
" .file 23 \"/usr/local/cuda4.1/cuda/include/sm_20_intrinsics.h\"\n"
" .file 24 \"/usr/local/cuda4.1/cuda/include/surface_functions.h\"\n"
" .file 25 \"/usr/local/cuda4.1/cuda/include/texture_fetch_functions.h\"\n"
" .file 26 \"/usr/local/cuda4.1/cuda/include/math_functions_dbl_ptx1.h\"\n"
"\n"
"\n"
" .entry _Z14gravity_kernelPfPA3_ffS1_S_i (\n"
" .param .u64 __cudaparm__Z14gravity_kernelPfPA3_ffS1_S_i_m,\n"
" .param .u64 __cudaparm__Z14gravity_kernelPfPA3_ffS1_S_i_x,\n"
" .param .f32 __cudaparm__Z14gravity_kernelPfPA3_ffS1_S_i_eps,\n"
" .param .u64 __cudaparm__Z14gravity_kernelPfPA3_ffS1_S_i_a,\n"
" .param .u64 __cudaparm__Z14gravity_kernelPfPA3_ffS1_S_i_p,\n"
" .param .s32 __cudaparm__Z14gravity_kernelPfPA3_ffS1_S_i_n)\n"
" {\n"
" .reg .u16 %rh<4>;\n"
" .reg .u32 %r<12>;\n"
" .reg .u64 %rd<13>;\n"
" .reg .f32 %f<37>;\n"
" .reg .pred %p<4>;\n"
" .loc 14 12 0\n"
"$LDWbegin__Z14gravity_kernelPfPA3_ffS1_S_i:\n"
" .loc 14 51 0\n"
" mov.f32 %f1, 0f00000000; // 0\n"
" mov.f32 %f2, %f1;\n"
" mov.f32 %f3, 0f00000000; // 0\n"
" mov.f32 %f4, %f3;\n"
" mov.f32 %f5, 0f00000000; // 0\n"
" mov.f32 %f6, %f5;\n"
" cvt.u32.u16 %r1, %tid.x;\n"
" mov.u16 %rh1, %ntid.x;\n"
" mov.u16 %rh2, %ctaid.x;\n"
" ld.param.s32 %r2, [__cudaparm__Z14gravity_kernelPfPA3_ffS1_S_i_n];\n"
" mov.u32 %r3, 0;\n"
" setp.le.s32 %p1, %r2, %r3;\n"
" @%p1 bra $Lt_0_7426;\n"
" ld.param.s32 %r2, [__cudaparm__Z14gravity_kernelPfPA3_ffS1_S_i_n];\n"
" mov.s32 %r4, %r2;\n"
" mul.wide.u16 %r5, %rh1, %rh2;\n"
" ld.param.f32 %f7, [__cudaparm__Z14gravity_kernelPfPA3_ffS1_S_i_eps];\n"
" mul.f32 %f8, %f7, %f7;\n"
" add.u32 %r6, %r5, %r1;\n"
" cvt.s64.s32 %rd1, %r6;\n"
" ld.param.u64 %rd2, [__cudaparm__Z14gravity_kernelPfPA3_ffS1_S_i_x];\n"
" mov.s64 %rd3, %rd2;\n"
" ld.param.u64 %rd4, [__cudaparm__Z14gravity_kernelPfPA3_ffS1_S_i_m];\n"
" mul.wide.s32 %rd5, %r6, 12;\n"
" add.u64 %rd6, %rd5, %rd2;\n"
" ld.global.f32 %f9, [%rd6+0];\n"
" ld.global.f32 %f10, [%rd6+4];\n"
" ld.global.f32 %f11, [%rd6+8];\n"
" mov.s32 %r7, 0;\n"
" mov.f32 %f12, 0f00000000; // 0\n"
" mov.s32 %r8, %r4;\n"
"$Lt_0_6914:\n"
" //<loop> Loop body line 51, nesting depth: 1, estimated iterations: unknown\n"
" .loc 14 62 0\n"
" ld.global.f32 %f13, [%rd3+0];\n"
" ld.global.f32 %f14, [%rd3+4];\n"
" ld.global.f32 %f15, [%rd3+8];\n"
" sub.f32 %f16, %f13, %f9;\n"
" sub.f32 %f17, %f14, %f10;\n"
" sub.f32 %f18, %f15, %f11;\n"
" mad.f32 %f19, %f16, %f16, %f8;\n"
" mad.f32 %f20, %f17, %f17, %f19;\n"
" mad.f32 %f21, %f18, %f18, %f20;\n"
" .loc 14 65 0\n"
" ld.global.f32 %f22, [%rd4+0];\n"
" .loc 14 67 0\n"
" sqrt.approx.f32 %f23, %f21;\n"
" mul.f32 %f24, %f23, %f21;\n"
" div.full.f32 %f25, %f22, %f24;\n"
" mov.f32 %f26, %f2;\n"
" mad.f32 %f27, %f16, %f25, %f26;\n"
" mov.f32 %f2, %f27;\n"
" mov.f32 %f28, %f4;\n"
" mad.f32 %f29, %f17, %f25, %f28;\n"
" mov.f32 %f4, %f29;\n"
" mov.f32 %f30, %f6;\n"
" mad.f32 %f31, %f18, %f25, %f30;\n"
" mov.f32 %f6, %f31;\n"
" .loc 14 69 0\n"
" div.full.f32 %f32, %f22, %f23;\n"
" sub.f32 %f12, %f12, %f32;\n"
" add.s32 %r7, %r7, 1;\n"
" add.u64 %rd4, %rd4, 4;\n"
" add.u64 %rd3, %rd3, 12;\n"
" .loc 14 51 0\n"
" ld.param.s32 %r2, [__cudaparm__Z14gravity_kernelPfPA3_ffS1_S_i_n];\n"
" .loc 14 69 0\n"
" setp.ne.s32 %p2, %r2, %r7;\n"
" @%p2 bra $Lt_0_6914;\n"
" bra.uni $Lt_0_6402;\n"
"$Lt_0_7426:\n"
" mul.wide.u16 %r9, %rh1, %rh2;\n"
" add.u32 %r10, %r1, %r9;\n"
" cvt.s64.s32 %rd1, %r10;\n"
" mul.wide.s32 %rd5, %r10, 12;\n"
" mov.f32 %f12, 0f00000000; // 0\n"
"$Lt_0_6402:\n"
" .loc 14 72 0\n"
" ld.param.u64 %rd7, [__cudaparm__Z14gravity_kernelPfPA3_ffS1_S_i_a];\n"
" add.u64 %rd8, %rd7, %rd5;\n"
" mov.f32 %f33, %f2;\n"
" st.global.f32 [%rd8+0], %f33;\n"
" mov.f32 %f34, %f4;\n"
" st.global.f32 [%rd8+4], %f34;\n"
" mov.f32 %f35, %f6;\n"
" st.global.f32 [%rd8+8], %f35;\n"
" .loc 14 74 0\n"
" ld.param.u64 %rd9, [__cudaparm__Z14gravity_kernelPfPA3_ffS1_S_i_p];\n"
" mul.lo.u64 %rd10, %rd1, 4;\n"
" add.u64 %rd11, %rd9, %rd10;\n"
" st.global.f32 [%rd11+0], %f12;\n"
" .loc 14 77 0\n"
" exit;\n"
"$LDWend__Z14gravity_kernelPfPA3_ffS1_S_i:\n"
" } // _Z14gravity_kernelPfPA3_ffS1_S_i\n"
"\n";
#pragma dscuda endofptx
#pragma begin dscuda.h
#ifndef _DSCUDA_H
#define _DSCUDA_H
#include <cuda_runtime_api.h>
#include <cutil.h>
#include <builtin_types.h>
#include <driver_types.h>
#include <cuda_texture_types.h>
#include <texture_types.h>
#pragma begin dscudarpc.h
#ifndef _DSCUDARPC_H_RPCGEN
#define _DSCUDARPC_H_RPCGEN
#include <rpc/rpc.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef u_quad_t RCadr;
typedef u_quad_t RCstream;
typedef u_quad_t RCevent;
typedef u_quad_t RCipaddr;
typedef u_int RCsize;
typedef u_int RCerror;
typedef struct {
u_int RCbuf_len;
char *RCbuf_val;
} RCbuf;
typedef u_int RCchannelformat;
typedef u_long RCpid;
struct RCchanneldesc_t {
RCchannelformat f;
int w;
int x;
int y;
int z;
};
typedef struct RCchanneldesc_t RCchanneldesc_t;
typedef RCchanneldesc_t RCchanneldesc;
struct RCtexture_t {
int normalized;
int filterMode;
int addressMode[3];
RCchannelformat f;
int w;
int x;
int y;
int z;
};
typedef struct RCtexture_t RCtexture_t;
typedef RCtexture_t RCtexture;
struct RCfuncattr_t {
int binaryVersion;
RCsize constSizeBytes;
RCsize localSizeBytes;
int maxThreadsPerBlock;
int numRegs;
int ptxVersion;
RCsize sharedSizeBytes;
};
typedef struct RCfuncattr_t RCfuncattr_t;
typedef RCfuncattr_t RCfuncattr;
enum RCargType {
dscudaArgTypeP = 0,
dscudaArgTypeI = 1,
dscudaArgTypeF = 2,
dscudaArgTypeV = 3,
};
typedef enum RCargType RCargType;
struct RCargVal {
RCargType type;
union {
RCadr address;
u_int valuei;
float valuef;
char valuev[64];
} RCargVal_u;
};
typedef struct RCargVal RCargVal;
struct RCarg {
RCargVal val;
u_int offset;
u_int size;
};
typedef struct RCarg RCarg;
typedef struct {
u_int RCargs_len;
RCarg *RCargs_val;
} RCargs;
struct dscudaResult {
RCerror err;
};
typedef struct dscudaResult dscudaResult;
struct dscudaThreadGetLimitResult {
RCerror err;
RCsize value;
};
typedef struct dscudaThreadGetLimitResult dscudaThreadGetLimitResult;
struct dscudaThreadGetCacheConfigResult {
RCerror err;
int cacheConfig;
};
typedef struct dscudaThreadGetCacheConfigResult dscudaThreadGetCacheConfigResult;
struct dscudaMallocResult {
RCerror err;
RCadr devAdr;
};
typedef struct dscudaMallocResult dscudaMallocResult;
struct dscudaHostAllocResult {
RCerror err;
RCadr pHost;
};
typedef struct dscudaHostAllocResult dscudaHostAllocResult;
struct dscudaMallocHostResult {
RCerror err;
RCadr ptr;
};
typedef struct dscudaMallocHostResult dscudaMallocHostResult;
struct dscudaMallocArrayResult {
RCerror err;
RCadr array;
};
typedef struct dscudaMallocArrayResult dscudaMallocArrayResult;
struct dscudaMallocPitchResult {
RCerror err;
RCadr devPtr;
RCsize pitch;
};
typedef struct dscudaMallocPitchResult dscudaMallocPitchResult;
struct dscudaMemcpyD2HResult {
RCerror err;
RCbuf buf;
};
typedef struct dscudaMemcpyD2HResult dscudaMemcpyD2HResult;
struct dscudaMemcpyH2HResult {
RCerror err;
RCbuf buf;
};
typedef struct dscudaMemcpyH2HResult dscudaMemcpyH2HResult;
struct dscudaMemcpyToArrayD2HResult {
RCerror err;
RCbuf buf;
};
typedef struct dscudaMemcpyToArrayD2HResult dscudaMemcpyToArrayD2HResult;
struct dscudaMemcpyToArrayH2HResult {
RCerror err;
RCbuf buf;
};
typedef struct dscudaMemcpyToArrayH2HResult dscudaMemcpyToArrayH2HResult;
struct dscudaMemcpy2DToArrayD2HResult {
RCerror err;
RCbuf buf;
};
typedef struct dscudaMemcpy2DToArrayD2HResult dscudaMemcpy2DToArrayD2HResult;
struct dscudaMemcpy2DToArrayH2HResult {
RCerror err;
RCbuf buf;
};
typedef struct dscudaMemcpy2DToArrayH2HResult dscudaMemcpy2DToArrayH2HResult;
struct dscudaMemcpy2DD2HResult {
RCerror err;
RCbuf buf;
};
typedef struct dscudaMemcpy2DD2HResult dscudaMemcpy2DD2HResult;
struct dscudaMemcpy2DH2HResult {
RCerror err;
RCbuf buf;
};
typedef struct dscudaMemcpy2DH2HResult dscudaMemcpy2DH2HResult;
struct dscudaGetDeviceResult {
RCerror err;
int device;
};
typedef struct dscudaGetDeviceResult dscudaGetDeviceResult;
struct dscudaGetDeviceCountResult {
RCerror err;
int count;
};
typedef struct dscudaGetDeviceCountResult dscudaGetDeviceCountResult;
struct dscudaGetDevicePropertiesResult {
RCerror err;
RCbuf prop;
};
typedef struct dscudaGetDevicePropertiesResult dscudaGetDevicePropertiesResult;
struct dscudaDriverGetVersionResult {
RCerror err;
int ver;
};
typedef struct dscudaDriverGetVersionResult dscudaDriverGetVersionResult;
struct dscudaRuntimeGetVersionResult {
RCerror err;
int ver;
};
typedef struct dscudaRuntimeGetVersionResult dscudaRuntimeGetVersionResult;
struct dscudaGetErrorStringResult {
char *errmsg;
};
typedef struct dscudaGetErrorStringResult dscudaGetErrorStringResult;
struct dscudaCreateChannelDescResult {
int x;
int y;
int z;
int w;
RCchannelformat f;
};
typedef struct dscudaCreateChannelDescResult dscudaCreateChannelDescResult;
struct dscudaGetChannelDescResult {
RCerror err;
int x;
int y;
int z;
int w;
RCchannelformat f;
};
typedef struct dscudaGetChannelDescResult dscudaGetChannelDescResult;
struct dscudaChooseDeviceResult {
RCerror err;
int device;
};
typedef struct dscudaChooseDeviceResult dscudaChooseDeviceResult;
struct dscudaMemcpyAsyncD2HResult {
RCerror err;
RCbuf buf;
};
typedef struct dscudaMemcpyAsyncD2HResult dscudaMemcpyAsyncD2HResult;
struct dscudaMemcpyAsyncH2HResult {
RCerror err;
RCbuf buf;
};
typedef struct dscudaMemcpyAsyncH2HResult dscudaMemcpyAsyncH2HResult;
struct dscudaMemcpyFromSymbolD2HResult {
RCerror err;
RCbuf buf;
};
typedef struct dscudaMemcpyFromSymbolD2HResult dscudaMemcpyFromSymbolD2HResult;
struct dscudaMemcpyFromSymbolAsyncD2HResult {
RCerror err;
RCbuf buf;
};
typedef struct dscudaMemcpyFromSymbolAsyncD2HResult dscudaMemcpyFromSymbolAsyncD2HResult;
struct dscudaStreamCreateResult {
RCerror err;
RCadr stream;
};
typedef struct dscudaStreamCreateResult dscudaStreamCreateResult;
struct dscudaEventCreateResult {
RCerror err;
RCadr event;
};
typedef struct dscudaEventCreateResult dscudaEventCreateResult;
struct dscudaEventElapsedTimeResult {
RCerror err;
float ms;
};
typedef struct dscudaEventElapsedTimeResult dscudaEventElapsedTimeResult;
struct dscudaHostGetDevicePointerResult {
RCerror err;
RCadr pDevice;
};
typedef struct dscudaHostGetDevicePointerResult dscudaHostGetDevicePointerResult;
struct dscudaHostGetFlagsResult {
RCerror err;
u_int flags;
};
typedef struct dscudaHostGetFlagsResult dscudaHostGetFlagsResult;
struct dscudaLoadModuleResult {
u_int id;
};
typedef struct dscudaLoadModuleResult dscudaLoadModuleResult;
struct dscudaFuncGetAttributesResult {
RCerror err;
RCfuncattr attr;
};
typedef struct dscudaFuncGetAttributesResult dscudaFuncGetAttributesResult;
struct dscudaBindTextureResult {
RCerror err;
RCsize offset;
};
typedef struct dscudaBindTextureResult dscudaBindTextureResult;
struct dscudaBindTexture2DResult {
RCerror err;
RCsize offset;
};
typedef struct dscudaBindTexture2DResult dscudaBindTexture2DResult;
struct dscufftResult {
RCerror err;
};
typedef struct dscufftResult dscufftResult;
struct dscufftPlanResult {
RCerror err;
u_int plan;
};
typedef struct dscufftPlanResult dscufftPlanResult;
struct dscublasResult {
RCerror err;
u_int stat;
};
typedef struct dscublasResult dscublasResult;
struct dscublasCreateResult {
RCerror err;
u_int stat;
RCadr handle;
};
typedef struct dscublasCreateResult dscublasCreateResult;
struct dscublasGetVectorResult {
RCerror err;
u_int stat;
RCbuf y;
};
typedef struct dscublasGetVectorResult dscublasGetVectorResult;
struct RCdim3 {
u_int x;
u_int y;
u_int z;
};
typedef struct RCdim3 RCdim3;
struct dscudathreadsetlimitid_1_argument {
int limit;
RCsize value;
};
typedef struct dscudathreadsetlimitid_1_argument dscudathreadsetlimitid_1_argument;
struct dscudastreamwaiteventid_1_argument {
RCstream stream;
RCevent event;
u_int flags;
};
typedef struct dscudastreamwaiteventid_1_argument dscudastreamwaiteventid_1_argument;
struct dscudaeventelapsedtimeid_1_argument {
RCevent start;
RCevent end;
};
typedef struct dscudaeventelapsedtimeid_1_argument dscudaeventelapsedtimeid_1_argument;
struct dscudaeventrecordid_1_argument {
RCevent event;
RCstream stream;
};
typedef struct dscudaeventrecordid_1_argument dscudaeventrecordid_1_argument;
struct dscudalaunchkernelid_1_argument {
int moduleid;
int kid;
char *kname;
RCdim3 gdim;
RCdim3 bdim;
RCsize smemsize;
RCstream stream;
RCargs args;
};
typedef struct dscudalaunchkernelid_1_argument dscudalaunchkernelid_1_argument;
struct dscudaloadmoduleid_1_argument {
RCipaddr ipaddr;
RCpid pid;
char *mname;
char *image;
};
typedef struct dscudaloadmoduleid_1_argument dscudaloadmoduleid_1_argument;
struct dscudafuncgetattributesid_1_argument {
int moduleid;
char *kname;
};
typedef struct dscudafuncgetattributesid_1_argument dscudafuncgetattributesid_1_argument;
struct dscudamemcpyh2hid_1_argument {
RCadr dst;
RCbuf src;
RCsize count;
};
typedef struct dscudamemcpyh2hid_1_argument dscudamemcpyh2hid_1_argument;
struct dscudamemcpyh2did_1_argument {
RCadr dst;
RCbuf src;
RCsize count;
};
typedef struct dscudamemcpyh2did_1_argument dscudamemcpyh2did_1_argument;
struct dscudamemcpyd2hid_1_argument {
RCadr src;
RCsize count;
};
typedef struct dscudamemcpyd2hid_1_argument dscudamemcpyd2hid_1_argument;
struct dscudamemcpyd2did_1_argument {
RCadr dst;
RCadr src;
RCsize count;
};
typedef struct dscudamemcpyd2did_1_argument dscudamemcpyd2did_1_argument;
struct dscudamemcpyasynch2hid_1_argument {
RCadr dst;
RCbuf src;
RCsize count;
RCstream stream;
};
typedef struct dscudamemcpyasynch2hid_1_argument dscudamemcpyasynch2hid_1_argument;
struct dscudamemcpyasynch2did_1_argument {
RCadr dst;
RCbuf src;
RCsize count;
RCstream stream;
};
typedef struct dscudamemcpyasynch2did_1_argument dscudamemcpyasynch2did_1_argument;
struct dscudamemcpyasyncd2hid_1_argument {
RCadr src;
RCsize count;
RCstream stream;
};
typedef struct dscudamemcpyasyncd2hid_1_argument dscudamemcpyasyncd2hid_1_argument;
struct dscudamemcpyasyncd2did_1_argument {
RCadr dst;
RCadr src;
RCsize count;
RCstream stream;
};
typedef struct dscudamemcpyasyncd2did_1_argument dscudamemcpyasyncd2did_1_argument;
struct dscudamemcpytosymbolh2did_1_argument {
int moduleid;
char *symbol;
RCbuf src;
RCsize count;
RCsize offset;
};
typedef struct dscudamemcpytosymbolh2did_1_argument dscudamemcpytosymbolh2did_1_argument;
struct dscudamemcpytosymbold2did_1_argument {
int moduleid;
char *symbol;
RCadr src;
RCsize count;
RCsize offset;
};
typedef struct dscudamemcpytosymbold2did_1_argument dscudamemcpytosymbold2did_1_argument;
struct dscudamemcpyfromsymbold2hid_1_argument {
int moduleid;
char *symbol;
RCsize count;
RCsize offset;
};
typedef struct dscudamemcpyfromsymbold2hid_1_argument dscudamemcpyfromsymbold2hid_1_argument;
struct dscudamemcpyfromsymbold2did_1_argument {
int moduleid;
RCadr dst;
char *symbol;
RCsize count;
RCsize offset;
};
typedef struct dscudamemcpyfromsymbold2did_1_argument dscudamemcpyfromsymbold2did_1_argument;
struct dscudamemsetid_1_argument {
RCadr dst;
int value;
RCsize count;
};
typedef struct dscudamemsetid_1_argument dscudamemsetid_1_argument;
struct dscudahostallocid_1_argument {
RCsize size;
u_int flags;
};
typedef struct dscudahostallocid_1_argument dscudahostallocid_1_argument;
struct dscudahostgetdevicepointerid_1_argument {
RCadr pHost;
u_int flags;
};
typedef struct dscudahostgetdevicepointerid_1_argument dscudahostgetdevicepointerid_1_argument;
struct dscudamallocarrayid_1_argument {
RCchanneldesc desc;
RCsize width;
RCsize height;
u_int flags;
};
typedef struct dscudamallocarrayid_1_argument dscudamallocarrayid_1_argument;
struct dscudamemcpytoarrayh2hid_1_argument {
RCadr dst;
RCsize wOffset;
RCsize hOffset;
RCbuf src;
RCsize count;
};
typedef struct dscudamemcpytoarrayh2hid_1_argument dscudamemcpytoarrayh2hid_1_argument;
struct dscudamemcpytoarrayh2did_1_argument {
RCadr dst;
RCsize wOffset;
RCsize hOffset;
RCbuf src;
RCsize count;
};
typedef struct dscudamemcpytoarrayh2did_1_argument dscudamemcpytoarrayh2did_1_argument;
struct dscudamemcpytoarrayd2hid_1_argument {
RCsize wOffset;
RCsize hOffset;
RCadr src;
RCsize count;
};
typedef struct dscudamemcpytoarrayd2hid_1_argument dscudamemcpytoarrayd2hid_1_argument;
struct dscudamemcpytoarrayd2did_1_argument {
RCadr dst;
RCsize wOffset;
RCsize hOffset;
RCadr src;
RCsize count;
};
typedef struct dscudamemcpytoarrayd2did_1_argument dscudamemcpytoarrayd2did_1_argument;
struct dscudamallocpitchid_1_argument {
RCsize width;
RCsize height;
};
typedef struct dscudamallocpitchid_1_argument dscudamallocpitchid_1_argument;
struct dscudamemcpy2dtoarrayh2hid_1_argument {
RCadr dst;
RCsize wOffset;
RCsize hOffset;
RCbuf src;
RCsize spitch;
RCsize width;
RCsize height;
};
typedef struct dscudamemcpy2dtoarrayh2hid_1_argument dscudamemcpy2dtoarrayh2hid_1_argument;
struct dscudamemcpy2dtoarrayh2did_1_argument {
RCadr dst;
RCsize wOffset;
RCsize hOffset;
RCbuf srcbuf;
RCsize spitch;
RCsize width;
RCsize height;
};
typedef struct dscudamemcpy2dtoarrayh2did_1_argument dscudamemcpy2dtoarrayh2did_1_argument;
struct dscudamemcpy2dtoarrayd2hid_1_argument {
RCsize wOffset;
RCsize hOffset;
RCadr src;
RCsize spitch;
RCsize width;
RCsize height;
};
typedef struct dscudamemcpy2dtoarrayd2hid_1_argument dscudamemcpy2dtoarrayd2hid_1_argument;
struct dscudamemcpy2dtoarrayd2did_1_argument {
RCadr dst;
RCsize wOffset;
RCsize hOffset;
RCadr src;
RCsize spitch;
RCsize width;
RCsize height;
};
typedef struct dscudamemcpy2dtoarrayd2did_1_argument dscudamemcpy2dtoarrayd2did_1_argument;
struct dscudamemcpy2dh2hid_1_argument {
RCadr dst;
RCsize dpitch;
RCbuf src;
RCsize spitch;
RCsize width;
RCsize height;
};
typedef struct dscudamemcpy2dh2hid_1_argument dscudamemcpy2dh2hid_1_argument;
struct dscudamemcpy2dh2did_1_argument {
RCadr dst;
RCsize dpitch;
RCbuf src;
RCsize spitch;
RCsize width;
RCsize height;
};
typedef struct dscudamemcpy2dh2did_1_argument dscudamemcpy2dh2did_1_argument;
struct dscudamemcpy2dd2hid_1_argument {
RCsize dpitch;
RCadr src;
RCsize spitch;
RCsize width;
RCsize height;
};
typedef struct dscudamemcpy2dd2hid_1_argument dscudamemcpy2dd2hid_1_argument;
struct dscudamemcpy2dd2did_1_argument {
RCadr dst;
RCsize dpitch;
RCadr src;
RCsize spitch;
RCsize width;
RCsize height;
};
typedef struct dscudamemcpy2dd2did_1_argument dscudamemcpy2dd2did_1_argument;
struct dscudamemset2did_1_argument {
RCadr dst;
RCsize pitch;
int value;
RCsize width;
RCsize height;
};
typedef struct dscudamemset2did_1_argument dscudamemset2did_1_argument;
struct dscudamemcpytosymbolasynch2did_1_argument {
int moduleid;
char *symbol;
RCbuf src;
RCsize count;
RCsize offset;
RCstream stream;
};
typedef struct dscudamemcpytosymbolasynch2did_1_argument dscudamemcpytosymbolasynch2did_1_argument;
struct dscudamemcpytosymbolasyncd2did_1_argument {
int moduleid;
char *symbol;
RCadr src;
RCsize count;
RCsize offset;
RCstream stream;
};
typedef struct dscudamemcpytosymbolasyncd2did_1_argument dscudamemcpytosymbolasyncd2did_1_argument;
struct dscudamemcpyfromsymbolasyncd2hid_1_argument {
int moduleid;
char *symbol;
RCsize count;
RCsize offset;
RCstream stream;
};
typedef struct dscudamemcpyfromsymbolasyncd2hid_1_argument dscudamemcpyfromsymbolasyncd2hid_1_argument;
struct dscudamemcpyfromsymbolasyncd2did_1_argument {
int moduleid;
RCadr dst;
char *symbol;
RCsize count;
RCsize offset;
RCstream stream;
};
typedef struct dscudamemcpyfromsymbolasyncd2did_1_argument dscudamemcpyfromsymbolasyncd2did_1_argument;
struct dscudacreatechanneldescid_1_argument {
int x;
int y;
int z;
int w;
RCchannelformat f;
};
typedef struct dscudacreatechanneldescid_1_argument dscudacreatechanneldescid_1_argument;
struct dscudabindtextureid_1_argument {
int moduleid;
char *texname;
RCadr devPtr;
RCsize size;
RCtexture texbuf;
};
typedef struct dscudabindtextureid_1_argument dscudabindtextureid_1_argument;
struct dscudabindtexture2did_1_argument {
int moduleid;
char *texname;
RCadr devPtr;
RCsize width;
RCsize height;
RCsize pitch;
RCtexture texbuf;
};
typedef struct dscudabindtexture2did_1_argument dscudabindtexture2did_1_argument;
struct dscudabindtexturetoarrayid_1_argument {
int moduleid;
char *texname;
RCadr array;
RCtexture texbuf;
};
typedef struct dscudabindtexturetoarrayid_1_argument dscudabindtexturetoarrayid_1_argument;
struct dscufftplan3did_1_argument {
int nx;
int ny;
int nz;
u_int type;
};
typedef struct dscufftplan3did_1_argument dscufftplan3did_1_argument;
struct dscufftexecc2cid_1_argument {
u_int plan;
RCadr idata;
RCadr odata;
int direction;
};
typedef struct dscufftexecc2cid_1_argument dscufftexecc2cid_1_argument;
#define DSCUDA_PROG 60000
#define DSCUDA_VER 1
#if defined(__STDC__) || defined(__cplusplus)
#define dscudaThreadExitId 100
extern dscudaResult * dscudathreadexitid_1(CLIENT *);
extern dscudaResult * dscudathreadexitid_1_svc(struct svc_req *);
#define dscudaThreadSynchronizeId 101
extern dscudaResult * dscudathreadsynchronizeid_1(CLIENT *);
extern dscudaResult * dscudathreadsynchronizeid_1_svc(struct svc_req *);
#define dscudaThreadSetLimitId 102
extern dscudaResult * dscudathreadsetlimitid_1(int , RCsize , CLIENT *);
extern dscudaResult * dscudathreadsetlimitid_1_svc(int , RCsize , struct svc_req *);
#define dscudaThreadGetLimitId 103
extern dscudaThreadGetLimitResult * dscudathreadgetlimitid_1(int , CLIENT *);
extern dscudaThreadGetLimitResult * dscudathreadgetlimitid_1_svc(int , struct svc_req *);
#define dscudaThreadSetCacheConfigId 104
extern dscudaResult * dscudathreadsetcacheconfigid_1(int , CLIENT *);
extern dscudaResult * dscudathreadsetcacheconfigid_1_svc(int , struct svc_req *);
#define dscudaThreadGetCacheConfigId 105
extern dscudaThreadGetCacheConfigResult * dscudathreadgetcacheconfigid_1(CLIENT *);
extern dscudaThreadGetCacheConfigResult * dscudathreadgetcacheconfigid_1_svc(struct svc_req *);
#define dscudaGetLastErrorId 200
extern dscudaResult * dscudagetlasterrorid_1(CLIENT *);
extern dscudaResult * dscudagetlasterrorid_1_svc(struct svc_req *);
#define dscudaPeekAtLastErrorId 201
extern dscudaResult * dscudapeekatlasterrorid_1(CLIENT *);
extern dscudaResult * dscudapeekatlasterrorid_1_svc(struct svc_req *);
#define dscudaGetErrorStringId 202
extern dscudaGetErrorStringResult * dscudageterrorstringid_1(int , CLIENT *);
extern dscudaGetErrorStringResult * dscudageterrorstringid_1_svc(int , struct svc_req *);
#define dscudaGetDeviceId 300
extern dscudaGetDeviceResult * dscudagetdeviceid_1(CLIENT *);
extern dscudaGetDeviceResult * dscudagetdeviceid_1_svc(struct svc_req *);
#define dscudaGetDeviceCountId 301
extern dscudaGetDeviceCountResult * dscudagetdevicecountid_1(CLIENT *);
extern dscudaGetDeviceCountResult * dscudagetdevicecountid_1_svc(struct svc_req *);
#define dscudaGetDevicePropertiesId 302
extern dscudaGetDevicePropertiesResult * dscudagetdevicepropertiesid_1(int , CLIENT *);
extern dscudaGetDevicePropertiesResult * dscudagetdevicepropertiesid_1_svc(int , struct svc_req *);
#define dscudaDriverGetVersionId 303
extern dscudaDriverGetVersionResult * dscudadrivergetversionid_1(CLIENT *);
extern dscudaDriverGetVersionResult * dscudadrivergetversionid_1_svc(struct svc_req *);
#define dscudaRuntimeGetVersionId 304
extern dscudaRuntimeGetVersionResult * dscudaruntimegetversionid_1(CLIENT *);
extern dscudaRuntimeGetVersionResult * dscudaruntimegetversionid_1_svc(struct svc_req *);
#define dscudaSetDeviceId 305
extern dscudaResult * dscudasetdeviceid_1(int , CLIENT *);
extern dscudaResult * dscudasetdeviceid_1_svc(int , struct svc_req *);
#define dscudaSetDeviceFlagsId 306
extern dscudaResult * dscudasetdeviceflagsid_1(u_int , CLIENT *);
extern dscudaResult * dscudasetdeviceflagsid_1_svc(u_int , struct svc_req *);
#define dscudaChooseDeviceId 307
extern dscudaChooseDeviceResult * dscudachoosedeviceid_1(RCbuf , CLIENT *);
extern dscudaChooseDeviceResult * dscudachoosedeviceid_1_svc(RCbuf , struct svc_req *);
#define dscudaDeviceSynchronize 308
extern dscudaResult * dscudadevicesynchronize_1(CLIENT *);
extern dscudaResult * dscudadevicesynchronize_1_svc(struct svc_req *);
#define dscudaDeviceReset 309
extern dscudaResult * dscudadevicereset_1(CLIENT *);
extern dscudaResult * dscudadevicereset_1_svc(struct svc_req *);
#define dscudaStreamCreateId 400
extern dscudaStreamCreateResult * dscudastreamcreateid_1(CLIENT *);
extern dscudaStreamCreateResult * dscudastreamcreateid_1_svc(struct svc_req *);
#define dscudaStreamDestroyId 401
extern dscudaResult * dscudastreamdestroyid_1(RCstream , CLIENT *);
extern dscudaResult * dscudastreamdestroyid_1_svc(RCstream , struct svc_req *);
#define dscudaStreamSynchronizeId 402
extern dscudaResult * dscudastreamsynchronizeid_1(RCstream , CLIENT *);
extern dscudaResult * dscudastreamsynchronizeid_1_svc(RCstream , struct svc_req *);
#define dscudaStreamQueryId 403
extern dscudaResult * dscudastreamqueryid_1(RCstream , CLIENT *);
extern dscudaResult * dscudastreamqueryid_1_svc(RCstream , struct svc_req *);
#define dscudaStreamWaitEventId 404
extern dscudaResult * dscudastreamwaiteventid_1(RCstream , RCevent , u_int , CLIENT *);
extern dscudaResult * dscudastreamwaiteventid_1_svc(RCstream , RCevent , u_int , struct svc_req *);
#define dscudaEventCreateId 500
extern dscudaEventCreateResult * dscudaeventcreateid_1(CLIENT *);
extern dscudaEventCreateResult * dscudaeventcreateid_1_svc(struct svc_req *);
#define dscudaEventCreateWithFlagsId 501
extern dscudaEventCreateResult * dscudaeventcreatewithflagsid_1(u_int , CLIENT *);
extern dscudaEventCreateResult * dscudaeventcreatewithflagsid_1_svc(u_int , struct svc_req *);
#define dscudaEventDestroyId 502
extern dscudaResult * dscudaeventdestroyid_1(RCevent , CLIENT *);
extern dscudaResult * dscudaeventdestroyid_1_svc(RCevent , struct svc_req *);
#define dscudaEventElapsedTimeId 503
extern dscudaEventElapsedTimeResult * dscudaeventelapsedtimeid_1(RCevent , RCevent , CLIENT *);
extern dscudaEventElapsedTimeResult * dscudaeventelapsedtimeid_1_svc(RCevent , RCevent , struct svc_req *);
#define dscudaEventRecordId 504
extern dscudaResult * dscudaeventrecordid_1(RCevent , RCstream , CLIENT *);
extern dscudaResult * dscudaeventrecordid_1_svc(RCevent , RCstream , struct svc_req *);
#define dscudaEventSynchronizeId 505
extern dscudaResult * dscudaeventsynchronizeid_1(RCevent , CLIENT *);
extern dscudaResult * dscudaeventsynchronizeid_1_svc(RCevent , struct svc_req *);
#define dscudaEventQueryId 506
extern dscudaResult * dscudaeventqueryid_1(RCevent , CLIENT *);
extern dscudaResult * dscudaeventqueryid_1_svc(RCevent , struct svc_req *);
#define dscudaLaunchKernelId 600
extern void * dscudalaunchkernelid_1(int , int , char *, RCdim3 , RCdim3 , RCsize , RCstream , RCargs , CLIENT *);
extern void * dscudalaunchkernelid_1_svc(int , int , char *, RCdim3 , RCdim3 , RCsize , RCstream , RCargs , struct svc_req *);
#define dscudaLoadModuleId 601
extern dscudaLoadModuleResult * dscudaloadmoduleid_1(RCipaddr , RCpid , char *, char *, CLIENT *);
extern dscudaLoadModuleResult * dscudaloadmoduleid_1_svc(RCipaddr , RCpid , char *, char *, struct svc_req *);
#define dscudaFuncGetAttributesId 602
extern dscudaFuncGetAttributesResult * dscudafuncgetattributesid_1(int , char *, CLIENT *);
extern dscudaFuncGetAttributesResult * dscudafuncgetattributesid_1_svc(int , char *, struct svc_req *);
#define dscudaMallocId 700
extern dscudaMallocResult * dscudamallocid_1(RCsize , CLIENT *);
extern dscudaMallocResult * dscudamallocid_1_svc(RCsize , struct svc_req *);
#define dscudaFreeId 701
extern dscudaResult * dscudafreeid_1(RCadr , CLIENT *);
extern dscudaResult * dscudafreeid_1_svc(RCadr , struct svc_req *);
#define dscudaMemcpyH2HId 702
extern dscudaMemcpyH2HResult * dscudamemcpyh2hid_1(RCadr , RCbuf , RCsize , CLIENT *);
extern dscudaMemcpyH2HResult * dscudamemcpyh2hid_1_svc(RCadr , RCbuf , RCsize , struct svc_req *);
#define dscudaMemcpyH2DId 703
extern dscudaResult * dscudamemcpyh2did_1(RCadr , RCbuf , RCsize , CLIENT *);
extern dscudaResult * dscudamemcpyh2did_1_svc(RCadr , RCbuf , RCsize , struct svc_req *);
#define dscudaMemcpyD2HId 704
extern dscudaMemcpyD2HResult * dscudamemcpyd2hid_1(RCadr , RCsize , CLIENT *);
extern dscudaMemcpyD2HResult * dscudamemcpyd2hid_1_svc(RCadr , RCsize , struct svc_req *);
#define dscudaMemcpyD2DId 705
extern dscudaResult * dscudamemcpyd2did_1(RCadr , RCadr , RCsize , CLIENT *);
extern dscudaResult * dscudamemcpyd2did_1_svc(RCadr , RCadr , RCsize , struct svc_req *);
#define dscudaMemcpyAsyncH2HId 706
extern dscudaMemcpyAsyncH2HResult * dscudamemcpyasynch2hid_1(RCadr , RCbuf , RCsize , RCstream , CLIENT *);
extern dscudaMemcpyAsyncH2HResult * dscudamemcpyasynch2hid_1_svc(RCadr , RCbuf , RCsize , RCstream , struct svc_req *);
#define dscudaMemcpyAsyncH2DId 707
extern dscudaResult * dscudamemcpyasynch2did_1(RCadr , RCbuf , RCsize , RCstream , CLIENT *);
extern dscudaResult * dscudamemcpyasynch2did_1_svc(RCadr , RCbuf , RCsize , RCstream , struct svc_req *);
#define dscudaMemcpyAsyncD2HId 708
extern dscudaMemcpyAsyncD2HResult * dscudamemcpyasyncd2hid_1(RCadr , RCsize , RCstream , CLIENT *);
extern dscudaMemcpyAsyncD2HResult * dscudamemcpyasyncd2hid_1_svc(RCadr , RCsize , RCstream , struct svc_req *);
#define dscudaMemcpyAsyncD2DId 709
extern dscudaResult * dscudamemcpyasyncd2did_1(RCadr , RCadr , RCsize , RCstream , CLIENT *);
extern dscudaResult * dscudamemcpyasyncd2did_1_svc(RCadr , RCadr , RCsize , RCstream , struct svc_req *);
#define dscudaMemcpyToSymbolH2DId 710
extern dscudaResult * dscudamemcpytosymbolh2did_1(int , char *, RCbuf , RCsize , RCsize , CLIENT *);
extern dscudaResult * dscudamemcpytosymbolh2did_1_svc(int , char *, RCbuf , RCsize , RCsize , struct svc_req *);
#define dscudaMemcpyToSymbolD2DId 711
extern dscudaResult * dscudamemcpytosymbold2did_1(int , char *, RCadr , RCsize , RCsize , CLIENT *);
extern dscudaResult * dscudamemcpytosymbold2did_1_svc(int , char *, RCadr , RCsize , RCsize , struct svc_req *);
#define dscudaMemcpyFromSymbolD2HId 712
extern dscudaMemcpyFromSymbolD2HResult * dscudamemcpyfromsymbold2hid_1(int , char *, RCsize , RCsize , CLIENT *);
extern dscudaMemcpyFromSymbolD2HResult * dscudamemcpyfromsymbold2hid_1_svc(int , char *, RCsize , RCsize , struct svc_req *);
#define dscudaMemcpyFromSymbolD2DId 713
extern dscudaResult * dscudamemcpyfromsymbold2did_1(int , RCadr , char *, RCsize , RCsize , CLIENT *);
extern dscudaResult * dscudamemcpyfromsymbold2did_1_svc(int , RCadr , char *, RCsize , RCsize , struct svc_req *);
#define dscudaMemsetId 714
extern dscudaResult * dscudamemsetid_1(RCadr , int , RCsize , CLIENT *);
extern dscudaResult * dscudamemsetid_1_svc(RCadr , int , RCsize , struct svc_req *);
#define dscudaHostAllocId 715
extern dscudaHostAllocResult * dscudahostallocid_1(RCsize , u_int , CLIENT *);
extern dscudaHostAllocResult * dscudahostallocid_1_svc(RCsize , u_int , struct svc_req *);
#define dscudaMallocHostId 716
extern dscudaMallocHostResult * dscudamallochostid_1(RCsize , CLIENT *);
extern dscudaMallocHostResult * dscudamallochostid_1_svc(RCsize , struct svc_req *);
#define dscudaFreeHostId 717
extern dscudaResult * dscudafreehostid_1(RCadr , CLIENT *);
extern dscudaResult * dscudafreehostid_1_svc(RCadr , struct svc_req *);
#define dscudaHostGetDevicePointerId 718
extern dscudaHostGetDevicePointerResult * dscudahostgetdevicepointerid_1(RCadr , u_int , CLIENT *);
extern dscudaHostGetDevicePointerResult * dscudahostgetdevicepointerid_1_svc(RCadr , u_int , struct svc_req *);
#define dscudaHostGetFlagsID 719
extern dscudaHostGetFlagsResult * dscudahostgetflagsid_1(RCadr , CLIENT *);
extern dscudaHostGetFlagsResult * dscudahostgetflagsid_1_svc(RCadr , struct svc_req *);
#define dscudaMallocArrayId 720
extern dscudaMallocArrayResult * dscudamallocarrayid_1(RCchanneldesc , RCsize , RCsize , u_int , CLIENT *);
extern dscudaMallocArrayResult * dscudamallocarrayid_1_svc(RCchanneldesc , RCsize , RCsize , u_int , struct svc_req *);
#define dscudaFreeArrayId 721
extern dscudaResult * dscudafreearrayid_1(RCadr , CLIENT *);
extern dscudaResult * dscudafreearrayid_1_svc(RCadr , struct svc_req *);
#define dscudaMemcpyToArrayH2HId 722
extern dscudaMemcpyToArrayH2HResult * dscudamemcpytoarrayh2hid_1(RCadr , RCsize , RCsize , RCbuf , RCsize , CLIENT *);
extern dscudaMemcpyToArrayH2HResult * dscudamemcpytoarrayh2hid_1_svc(RCadr , RCsize , RCsize , RCbuf , RCsize , struct svc_req *);
#define dscudaMemcpyToArrayH2DId 723
extern dscudaResult * dscudamemcpytoarrayh2did_1(RCadr , RCsize , RCsize , RCbuf , RCsize , CLIENT *);
extern dscudaResult * dscudamemcpytoarrayh2did_1_svc(RCadr , RCsize , RCsize , RCbuf , RCsize , struct svc_req *);
#define dscudaMemcpyToArrayD2HId 724
extern dscudaMemcpyToArrayD2HResult * dscudamemcpytoarrayd2hid_1(RCsize , RCsize , RCadr , RCsize , CLIENT *);
extern dscudaMemcpyToArrayD2HResult * dscudamemcpytoarrayd2hid_1_svc(RCsize , RCsize , RCadr , RCsize , struct svc_req *);
#define dscudaMemcpyToArrayD2DId 725
extern dscudaResult * dscudamemcpytoarrayd2did_1(RCadr , RCsize , RCsize , RCadr , RCsize , CLIENT *);
extern dscudaResult * dscudamemcpytoarrayd2did_1_svc(RCadr , RCsize , RCsize , RCadr , RCsize , struct svc_req *);
#define dscudaMallocPitchId 726
extern dscudaMallocPitchResult * dscudamallocpitchid_1(RCsize , RCsize , CLIENT *);
extern dscudaMallocPitchResult * dscudamallocpitchid_1_svc(RCsize , RCsize , struct svc_req *);
#define dscudaMemcpy2DToArrayH2HId 727
extern dscudaMemcpy2DToArrayH2HResult * dscudamemcpy2dtoarrayh2hid_1(RCadr , RCsize , RCsize , RCbuf , RCsize , RCsize , RCsize , CLIENT *);
extern dscudaMemcpy2DToArrayH2HResult * dscudamemcpy2dtoarrayh2hid_1_svc(RCadr , RCsize , RCsize , RCbuf , RCsize , RCsize , RCsize , struct svc_req *);
#define dscudaMemcpy2DToArrayH2DId 728
extern dscudaResult * dscudamemcpy2dtoarrayh2did_1(RCadr , RCsize , RCsize , RCbuf , RCsize , RCsize , RCsize , CLIENT *);
extern dscudaResult * dscudamemcpy2dtoarrayh2did_1_svc(RCadr , RCsize , RCsize , RCbuf , RCsize , RCsize , RCsize , struct svc_req *);
#define dscudaMemcpy2DToArrayD2HId 729
extern dscudaMemcpy2DToArrayD2HResult * dscudamemcpy2dtoarrayd2hid_1(RCsize , RCsize , RCadr , RCsize , RCsize , RCsize , CLIENT *);
extern dscudaMemcpy2DToArrayD2HResult * dscudamemcpy2dtoarrayd2hid_1_svc(RCsize , RCsize , RCadr , RCsize , RCsize , RCsize , struct svc_req *);
#define dscudaMemcpy2DToArrayD2DId 730
extern dscudaResult * dscudamemcpy2dtoarrayd2did_1(RCadr , RCsize , RCsize , RCadr , RCsize , RCsize , RCsize , CLIENT *);
extern dscudaResult * dscudamemcpy2dtoarrayd2did_1_svc(RCadr , RCsize , RCsize , RCadr , RCsize , RCsize , RCsize , struct svc_req *);
#define dscudaMemcpy2DH2HId 731
extern dscudaMemcpy2DH2HResult * dscudamemcpy2dh2hid_1(RCadr , RCsize , RCbuf , RCsize , RCsize , RCsize , CLIENT *);
extern dscudaMemcpy2DH2HResult * dscudamemcpy2dh2hid_1_svc(RCadr , RCsize , RCbuf , RCsize , RCsize , RCsize , struct svc_req *);
#define dscudaMemcpy2DH2DId 732
extern dscudaResult * dscudamemcpy2dh2did_1(RCadr , RCsize , RCbuf , RCsize , RCsize , RCsize , CLIENT *);
extern dscudaResult * dscudamemcpy2dh2did_1_svc(RCadr , RCsize , RCbuf , RCsize , RCsize , RCsize , struct svc_req *);
#define dscudaMemcpy2DD2HId 733
extern dscudaMemcpy2DD2HResult * dscudamemcpy2dd2hid_1(RCsize , RCadr , RCsize , RCsize , RCsize , CLIENT *);
extern dscudaMemcpy2DD2HResult * dscudamemcpy2dd2hid_1_svc(RCsize , RCadr , RCsize , RCsize , RCsize , struct svc_req *);
#define dscudaMemcpy2DD2DId 734
extern dscudaResult * dscudamemcpy2dd2did_1(RCadr , RCsize , RCadr , RCsize , RCsize , RCsize , CLIENT *);
extern dscudaResult * dscudamemcpy2dd2did_1_svc(RCadr , RCsize , RCadr , RCsize , RCsize , RCsize , struct svc_req *);
#define dscudaMemset2DId 735
extern dscudaResult * dscudamemset2did_1(RCadr , RCsize , int , RCsize , RCsize , CLIENT *);
extern dscudaResult * dscudamemset2did_1_svc(RCadr , RCsize , int , RCsize , RCsize , struct svc_req *);
#define dscudaMemcpyToSymbolAsyncH2DId 736
extern dscudaResult * dscudamemcpytosymbolasynch2did_1(int , char *, RCbuf , RCsize , RCsize , RCstream , CLIENT *);
extern dscudaResult * dscudamemcpytosymbolasynch2did_1_svc(int , char *, RCbuf , RCsize , RCsize , RCstream , struct svc_req *);
#define dscudaMemcpyToSymbolAsyncD2DId 737
extern dscudaResult * dscudamemcpytosymbolasyncd2did_1(int , char *, RCadr , RCsize , RCsize , RCstream , CLIENT *);
extern dscudaResult * dscudamemcpytosymbolasyncd2did_1_svc(int , char *, RCadr , RCsize , RCsize , RCstream , struct svc_req *);
#define dscudaMemcpyFromSymbolAsyncD2HId 738
extern dscudaMemcpyFromSymbolAsyncD2HResult * dscudamemcpyfromsymbolasyncd2hid_1(int , char *, RCsize , RCsize , RCstream , CLIENT *);
extern dscudaMemcpyFromSymbolAsyncD2HResult * dscudamemcpyfromsymbolasyncd2hid_1_svc(int , char *, RCsize , RCsize , RCstream , struct svc_req *);
#define dscudaMemcpyFromSymbolAsyncD2DId 739
extern dscudaResult * dscudamemcpyfromsymbolasyncd2did_1(int , RCadr , char *, RCsize , RCsize , RCstream , CLIENT *);
extern dscudaResult * dscudamemcpyfromsymbolasyncd2did_1_svc(int , RCadr , char *, RCsize , RCsize , RCstream , struct svc_req *);
#define dscudaCreateChannelDescId 1400
extern dscudaCreateChannelDescResult * dscudacreatechanneldescid_1(int , int , int , int , RCchannelformat , CLIENT *);
extern dscudaCreateChannelDescResult * dscudacreatechanneldescid_1_svc(int , int , int , int , RCchannelformat , struct svc_req *);
#define dscudaGetChannelDescId 1401
extern dscudaGetChannelDescResult * dscudagetchanneldescid_1(RCadr , CLIENT *);
extern dscudaGetChannelDescResult * dscudagetchanneldescid_1_svc(RCadr , struct svc_req *);
#define dscudaBindTextureId 1402
extern dscudaBindTextureResult * dscudabindtextureid_1(int , char *, RCadr , RCsize , RCtexture , CLIENT *);
extern dscudaBindTextureResult * dscudabindtextureid_1_svc(int , char *, RCadr , RCsize , RCtexture , struct svc_req *);
#define dscudaBindTexture2DId 1403
extern dscudaBindTexture2DResult * dscudabindtexture2did_1(int , char *, RCadr , RCsize , RCsize , RCsize , RCtexture , CLIENT *);
extern dscudaBindTexture2DResult * dscudabindtexture2did_1_svc(int , char *, RCadr , RCsize , RCsize , RCsize , RCtexture , struct svc_req *);
#define dscudaBindTextureToArrayId 1404
extern dscudaResult * dscudabindtexturetoarrayid_1(int , char *, RCadr , RCtexture , CLIENT *);
extern dscudaResult * dscudabindtexturetoarrayid_1_svc(int , char *, RCadr , RCtexture , struct svc_req *);
#define dscudaUnbindTextureId 1405
extern dscudaResult * dscudaunbindtextureid_1(RCtexture , CLIENT *);
extern dscudaResult * dscudaunbindtextureid_1_svc(RCtexture , struct svc_req *);
#define dscufftPlan3dId 2002
extern dscufftPlanResult * dscufftplan3did_1(int , int , int , u_int , CLIENT *);
extern dscufftPlanResult * dscufftplan3did_1_svc(int , int , int , u_int , struct svc_req *);
#define dscufftDestroyId 2004
extern dscufftResult * dscufftdestroyid_1(u_int , CLIENT *);
extern dscufftResult * dscufftdestroyid_1_svc(u_int , struct svc_req *);
#define dscufftExecC2CId 2005
extern dscufftResult * dscufftexecc2cid_1(u_int , RCadr , RCadr , int , CLIENT *);
extern dscufftResult * dscufftexecc2cid_1_svc(u_int , RCadr , RCadr , int , struct svc_req *);
extern int dscuda_prog_1_freeresult (SVCXPRT *, xdrproc_t, caddr_t);
#else
#define dscudaThreadExitId 100
extern dscudaResult * dscudathreadexitid_1();
extern dscudaResult * dscudathreadexitid_1_svc();
#define dscudaThreadSynchronizeId 101
extern dscudaResult * dscudathreadsynchronizeid_1();
extern dscudaResult * dscudathreadsynchronizeid_1_svc();
#define dscudaThreadSetLimitId 102
extern dscudaResult * dscudathreadsetlimitid_1();
extern dscudaResult * dscudathreadsetlimitid_1_svc();
#define dscudaThreadGetLimitId 103
extern dscudaThreadGetLimitResult * dscudathreadgetlimitid_1();
extern dscudaThreadGetLimitResult * dscudathreadgetlimitid_1_svc();
#define dscudaThreadSetCacheConfigId 104
extern dscudaResult * dscudathreadsetcacheconfigid_1();
extern dscudaResult * dscudathreadsetcacheconfigid_1_svc();
#define dscudaThreadGetCacheConfigId 105
extern dscudaThreadGetCacheConfigResult * dscudathreadgetcacheconfigid_1();
extern dscudaThreadGetCacheConfigResult * dscudathreadgetcacheconfigid_1_svc();
#define dscudaGetLastErrorId 200
extern dscudaResult * dscudagetlasterrorid_1();
extern dscudaResult * dscudagetlasterrorid_1_svc();
#define dscudaPeekAtLastErrorId 201
extern dscudaResult * dscudapeekatlasterrorid_1();
extern dscudaResult * dscudapeekatlasterrorid_1_svc();
#define dscudaGetErrorStringId 202
extern dscudaGetErrorStringResult * dscudageterrorstringid_1();
extern dscudaGetErrorStringResult * dscudageterrorstringid_1_svc();
#define dscudaGetDeviceId 300
extern dscudaGetDeviceResult * dscudagetdeviceid_1();
extern dscudaGetDeviceResult * dscudagetdeviceid_1_svc();
#define dscudaGetDeviceCountId 301
extern dscudaGetDeviceCountResult * dscudagetdevicecountid_1();
extern dscudaGetDeviceCountResult * dscudagetdevicecountid_1_svc();
#define dscudaGetDevicePropertiesId 302
extern dscudaGetDevicePropertiesResult * dscudagetdevicepropertiesid_1();
extern dscudaGetDevicePropertiesResult * dscudagetdevicepropertiesid_1_svc();
#define dscudaDriverGetVersionId 303
extern dscudaDriverGetVersionResult * dscudadrivergetversionid_1();
extern dscudaDriverGetVersionResult * dscudadrivergetversionid_1_svc();
#define dscudaRuntimeGetVersionId 304
extern dscudaRuntimeGetVersionResult * dscudaruntimegetversionid_1();
extern dscudaRuntimeGetVersionResult * dscudaruntimegetversionid_1_svc();
#define dscudaSetDeviceId 305
extern dscudaResult * dscudasetdeviceid_1();
extern dscudaResult * dscudasetdeviceid_1_svc();
#define dscudaSetDeviceFlagsId 306
extern dscudaResult * dscudasetdeviceflagsid_1();
extern dscudaResult * dscudasetdeviceflagsid_1_svc();
#define dscudaChooseDeviceId 307
extern dscudaChooseDeviceResult * dscudachoosedeviceid_1();
extern dscudaChooseDeviceResult * dscudachoosedeviceid_1_svc();
#define dscudaDeviceSynchronize 308
extern dscudaResult * dscudadevicesynchronize_1();
extern dscudaResult * dscudadevicesynchronize_1_svc();
#define dscudaDeviceReset 309
extern dscudaResult * dscudadevicereset_1();
extern dscudaResult * dscudadevicereset_1_svc();
#define dscudaStreamCreateId 400
extern dscudaStreamCreateResult * dscudastreamcreateid_1();
extern dscudaStreamCreateResult * dscudastreamcreateid_1_svc();
#define dscudaStreamDestroyId 401
extern dscudaResult * dscudastreamdestroyid_1();
extern dscudaResult * dscudastreamdestroyid_1_svc();
#define dscudaStreamSynchronizeId 402
extern dscudaResult * dscudastreamsynchronizeid_1();
extern dscudaResult * dscudastreamsynchronizeid_1_svc();
#define dscudaStreamQueryId 403
extern dscudaResult * dscudastreamqueryid_1();
extern dscudaResult * dscudastreamqueryid_1_svc();
#define dscudaStreamWaitEventId 404
extern dscudaResult * dscudastreamwaiteventid_1();
extern dscudaResult * dscudastreamwaiteventid_1_svc();
#define dscudaEventCreateId 500
extern dscudaEventCreateResult * dscudaeventcreateid_1();
extern dscudaEventCreateResult * dscudaeventcreateid_1_svc();
#define dscudaEventCreateWithFlagsId 501
extern dscudaEventCreateResult * dscudaeventcreatewithflagsid_1();
extern dscudaEventCreateResult * dscudaeventcreatewithflagsid_1_svc();
#define dscudaEventDestroyId 502
extern dscudaResult * dscudaeventdestroyid_1();
extern dscudaResult * dscudaeventdestroyid_1_svc();
#define dscudaEventElapsedTimeId 503
extern dscudaEventElapsedTimeResult * dscudaeventelapsedtimeid_1();
extern dscudaEventElapsedTimeResult * dscudaeventelapsedtimeid_1_svc();
#define dscudaEventRecordId 504
extern dscudaResult * dscudaeventrecordid_1();
extern dscudaResult * dscudaeventrecordid_1_svc();
#define dscudaEventSynchronizeId 505
extern dscudaResult * dscudaeventsynchronizeid_1();
extern dscudaResult * dscudaeventsynchronizeid_1_svc();
#define dscudaEventQueryId 506
extern dscudaResult * dscudaeventqueryid_1();
extern dscudaResult * dscudaeventqueryid_1_svc();
#define dscudaLaunchKernelId 600
extern void * dscudalaunchkernelid_1();
extern void * dscudalaunchkernelid_1_svc();
#define dscudaLoadModuleId 601
extern dscudaLoadModuleResult * dscudaloadmoduleid_1();
extern dscudaLoadModuleResult * dscudaloadmoduleid_1_svc();
#define dscudaFuncGetAttributesId 602
extern dscudaFuncGetAttributesResult * dscudafuncgetattributesid_1();
extern dscudaFuncGetAttributesResult * dscudafuncgetattributesid_1_svc();
#define dscudaMallocId 700
extern dscudaMallocResult * dscudamallocid_1();
extern dscudaMallocResult * dscudamallocid_1_svc();
#define dscudaFreeId 701
extern dscudaResult * dscudafreeid_1();
extern dscudaResult * dscudafreeid_1_svc();
#define dscudaMemcpyH2HId 702
extern dscudaMemcpyH2HResult * dscudamemcpyh2hid_1();
extern dscudaMemcpyH2HResult * dscudamemcpyh2hid_1_svc();
#define dscudaMemcpyH2DId 703
extern dscudaResult * dscudamemcpyh2did_1();
extern dscudaResult * dscudamemcpyh2did_1_svc();
#define dscudaMemcpyD2HId 704
extern dscudaMemcpyD2HResult * dscudamemcpyd2hid_1();
extern dscudaMemcpyD2HResult * dscudamemcpyd2hid_1_svc();
#define dscudaMemcpyD2DId 705
extern dscudaResult * dscudamemcpyd2did_1();
extern dscudaResult * dscudamemcpyd2did_1_svc();
#define dscudaMemcpyAsyncH2HId 706
extern dscudaMemcpyAsyncH2HResult * dscudamemcpyasynch2hid_1();
extern dscudaMemcpyAsyncH2HResult * dscudamemcpyasynch2hid_1_svc();
#define dscudaMemcpyAsyncH2DId 707
extern dscudaResult * dscudamemcpyasynch2did_1();
extern dscudaResult * dscudamemcpyasynch2did_1_svc();
#define dscudaMemcpyAsyncD2HId 708
extern dscudaMemcpyAsyncD2HResult * dscudamemcpyasyncd2hid_1();
extern dscudaMemcpyAsyncD2HResult * dscudamemcpyasyncd2hid_1_svc();
#define dscudaMemcpyAsyncD2DId 709
extern dscudaResult * dscudamemcpyasyncd2did_1();
extern dscudaResult * dscudamemcpyasyncd2did_1_svc();
#define dscudaMemcpyToSymbolH2DId 710
extern dscudaResult * dscudamemcpytosymbolh2did_1();
extern dscudaResult * dscudamemcpytosymbolh2did_1_svc();
#define dscudaMemcpyToSymbolD2DId 711
extern dscudaResult * dscudamemcpytosymbold2did_1();
extern dscudaResult * dscudamemcpytosymbold2did_1_svc();
#define dscudaMemcpyFromSymbolD2HId 712
extern dscudaMemcpyFromSymbolD2HResult * dscudamemcpyfromsymbold2hid_1();
extern dscudaMemcpyFromSymbolD2HResult * dscudamemcpyfromsymbold2hid_1_svc();
#define dscudaMemcpyFromSymbolD2DId 713
extern dscudaResult * dscudamemcpyfromsymbold2did_1();
extern dscudaResult * dscudamemcpyfromsymbold2did_1_svc();
#define dscudaMemsetId 714
extern dscudaResult * dscudamemsetid_1();
extern dscudaResult * dscudamemsetid_1_svc();
#define dscudaHostAllocId 715
extern dscudaHostAllocResult * dscudahostallocid_1();
extern dscudaHostAllocResult * dscudahostallocid_1_svc();
#define dscudaMallocHostId 716
extern dscudaMallocHostResult * dscudamallochostid_1();
extern dscudaMallocHostResult * dscudamallochostid_1_svc();
#define dscudaFreeHostId 717
extern dscudaResult * dscudafreehostid_1();
extern dscudaResult * dscudafreehostid_1_svc();
#define dscudaHostGetDevicePointerId 718
extern dscudaHostGetDevicePointerResult * dscudahostgetdevicepointerid_1();
extern dscudaHostGetDevicePointerResult * dscudahostgetdevicepointerid_1_svc();
#define dscudaHostGetFlagsID 719
extern dscudaHostGetFlagsResult * dscudahostgetflagsid_1();
extern dscudaHostGetFlagsResult * dscudahostgetflagsid_1_svc();
#define dscudaMallocArrayId 720
extern dscudaMallocArrayResult * dscudamallocarrayid_1();
extern dscudaMallocArrayResult * dscudamallocarrayid_1_svc();
#define dscudaFreeArrayId 721
extern dscudaResult * dscudafreearrayid_1();
extern dscudaResult * dscudafreearrayid_1_svc();
#define dscudaMemcpyToArrayH2HId 722
extern dscudaMemcpyToArrayH2HResult * dscudamemcpytoarrayh2hid_1();
extern dscudaMemcpyToArrayH2HResult * dscudamemcpytoarrayh2hid_1_svc();
#define dscudaMemcpyToArrayH2DId 723
extern dscudaResult * dscudamemcpytoarrayh2did_1();
extern dscudaResult * dscudamemcpytoarrayh2did_1_svc();
#define dscudaMemcpyToArrayD2HId 724
extern dscudaMemcpyToArrayD2HResult * dscudamemcpytoarrayd2hid_1();
extern dscudaMemcpyToArrayD2HResult * dscudamemcpytoarrayd2hid_1_svc();
#define dscudaMemcpyToArrayD2DId 725
extern dscudaResult * dscudamemcpytoarrayd2did_1();
extern dscudaResult * dscudamemcpytoarrayd2did_1_svc();
#define dscudaMallocPitchId 726
extern dscudaMallocPitchResult * dscudamallocpitchid_1();
extern dscudaMallocPitchResult * dscudamallocpitchid_1_svc();
#define dscudaMemcpy2DToArrayH2HId 727
extern dscudaMemcpy2DToArrayH2HResult * dscudamemcpy2dtoarrayh2hid_1();
extern dscudaMemcpy2DToArrayH2HResult * dscudamemcpy2dtoarrayh2hid_1_svc();
#define dscudaMemcpy2DToArrayH2DId 728
extern dscudaResult * dscudamemcpy2dtoarrayh2did_1();
extern dscudaResult * dscudamemcpy2dtoarrayh2did_1_svc();
#define dscudaMemcpy2DToArrayD2HId 729
extern dscudaMemcpy2DToArrayD2HResult * dscudamemcpy2dtoarrayd2hid_1();
extern dscudaMemcpy2DToArrayD2HResult * dscudamemcpy2dtoarrayd2hid_1_svc();
#define dscudaMemcpy2DToArrayD2DId 730
extern dscudaResult * dscudamemcpy2dtoarrayd2did_1();
extern dscudaResult * dscudamemcpy2dtoarrayd2did_1_svc();
#define dscudaMemcpy2DH2HId 731
extern dscudaMemcpy2DH2HResult * dscudamemcpy2dh2hid_1();
extern dscudaMemcpy2DH2HResult * dscudamemcpy2dh2hid_1_svc();
#define dscudaMemcpy2DH2DId 732
extern dscudaResult * dscudamemcpy2dh2did_1();
extern dscudaResult * dscudamemcpy2dh2did_1_svc();
#define dscudaMemcpy2DD2HId 733
extern dscudaMemcpy2DD2HResult * dscudamemcpy2dd2hid_1();
extern dscudaMemcpy2DD2HResult * dscudamemcpy2dd2hid_1_svc();
#define dscudaMemcpy2DD2DId 734
extern dscudaResult * dscudamemcpy2dd2did_1();
extern dscudaResult * dscudamemcpy2dd2did_1_svc();
#define dscudaMemset2DId 735
extern dscudaResult * dscudamemset2did_1();
extern dscudaResult * dscudamemset2did_1_svc();
#define dscudaMemcpyToSymbolAsyncH2DId 736
extern dscudaResult * dscudamemcpytosymbolasynch2did_1();
extern dscudaResult * dscudamemcpytosymbolasynch2did_1_svc();
#define dscudaMemcpyToSymbolAsyncD2DId 737
extern dscudaResult * dscudamemcpytosymbolasyncd2did_1();
extern dscudaResult * dscudamemcpytosymbolasyncd2did_1_svc();
#define dscudaMemcpyFromSymbolAsyncD2HId 738
extern dscudaMemcpyFromSymbolAsyncD2HResult * dscudamemcpyfromsymbolasyncd2hid_1();
extern dscudaMemcpyFromSymbolAsyncD2HResult * dscudamemcpyfromsymbolasyncd2hid_1_svc();
#define dscudaMemcpyFromSymbolAsyncD2DId 739
extern dscudaResult * dscudamemcpyfromsymbolasyncd2did_1();
extern dscudaResult * dscudamemcpyfromsymbolasyncd2did_1_svc();
#define dscudaCreateChannelDescId 1400
extern dscudaCreateChannelDescResult * dscudacreatechanneldescid_1();
extern dscudaCreateChannelDescResult * dscudacreatechanneldescid_1_svc();
#define dscudaGetChannelDescId 1401
extern dscudaGetChannelDescResult * dscudagetchanneldescid_1();
extern dscudaGetChannelDescResult * dscudagetchanneldescid_1_svc();
#define dscudaBindTextureId 1402
extern dscudaBindTextureResult * dscudabindtextureid_1();
extern dscudaBindTextureResult * dscudabindtextureid_1_svc();
#define dscudaBindTexture2DId 1403
extern dscudaBindTexture2DResult * dscudabindtexture2did_1();
extern dscudaBindTexture2DResult * dscudabindtexture2did_1_svc();
#define dscudaBindTextureToArrayId 1404
extern dscudaResult * dscudabindtexturetoarrayid_1();
extern dscudaResult * dscudabindtexturetoarrayid_1_svc();
#define dscudaUnbindTextureId 1405
extern dscudaResult * dscudaunbindtextureid_1();
extern dscudaResult * dscudaunbindtextureid_1_svc();
#define dscufftPlan3dId 2002
extern dscufftPlanResult * dscufftplan3did_1();
extern dscufftPlanResult * dscufftplan3did_1_svc();
#define dscufftDestroyId 2004
extern dscufftResult * dscufftdestroyid_1();
extern dscufftResult * dscufftdestroyid_1_svc();
#define dscufftExecC2CId 2005
extern dscufftResult * dscufftexecc2cid_1();
extern dscufftResult * dscufftexecc2cid_1_svc();
extern int dscuda_prog_1_freeresult ();
#endif
#if defined(__STDC__) || defined(__cplusplus)
extern bool_t xdr_RCadr (XDR *, RCadr*);
extern bool_t xdr_RCstream (XDR *, RCstream*);
extern bool_t xdr_RCevent (XDR *, RCevent*);
extern bool_t xdr_RCipaddr (XDR *, RCipaddr*);
extern bool_t xdr_RCsize (XDR *, RCsize*);
extern bool_t xdr_RCerror (XDR *, RCerror*);
extern bool_t xdr_RCbuf (XDR *, RCbuf*);
extern bool_t xdr_RCchannelformat (XDR *, RCchannelformat*);
extern bool_t xdr_RCpid (XDR *, RCpid*);
extern bool_t xdr_RCchanneldesc_t (XDR *, RCchanneldesc_t*);
extern bool_t xdr_RCchanneldesc (XDR *, RCchanneldesc*);
extern bool_t xdr_RCtexture_t (XDR *, RCtexture_t*);
extern bool_t xdr_RCtexture (XDR *, RCtexture*);
extern bool_t xdr_RCfuncattr_t (XDR *, RCfuncattr_t*);
extern bool_t xdr_RCfuncattr (XDR *, RCfuncattr*);
extern bool_t xdr_RCargType (XDR *, RCargType*);
extern bool_t xdr_RCargVal (XDR *, RCargVal*);
extern bool_t xdr_RCarg (XDR *, RCarg*);
extern bool_t xdr_RCargs (XDR *, RCargs*);
extern bool_t xdr_dscudaResult (XDR *, dscudaResult*);
extern bool_t xdr_dscudaThreadGetLimitResult (XDR *, dscudaThreadGetLimitResult*);
extern bool_t xdr_dscudaThreadGetCacheConfigResult (XDR *, dscudaThreadGetCacheConfigResult*);
extern bool_t xdr_dscudaMallocResult (XDR *, dscudaMallocResult*);
extern bool_t xdr_dscudaHostAllocResult (XDR *, dscudaHostAllocResult*);
extern bool_t xdr_dscudaMallocHostResult (XDR *, dscudaMallocHostResult*);
extern bool_t xdr_dscudaMallocArrayResult (XDR *, dscudaMallocArrayResult*);
extern bool_t xdr_dscudaMallocPitchResult (XDR *, dscudaMallocPitchResult*);
extern bool_t xdr_dscudaMemcpyD2HResult (XDR *, dscudaMemcpyD2HResult*);
extern bool_t xdr_dscudaMemcpyH2HResult (XDR *, dscudaMemcpyH2HResult*);
extern bool_t xdr_dscudaMemcpyToArrayD2HResult (XDR *, dscudaMemcpyToArrayD2HResult*);
extern bool_t xdr_dscudaMemcpyToArrayH2HResult (XDR *, dscudaMemcpyToArrayH2HResult*);
extern bool_t xdr_dscudaMemcpy2DToArrayD2HResult (XDR *, dscudaMemcpy2DToArrayD2HResult*);
extern bool_t xdr_dscudaMemcpy2DToArrayH2HResult (XDR *, dscudaMemcpy2DToArrayH2HResult*);
extern bool_t xdr_dscudaMemcpy2DD2HResult (XDR *, dscudaMemcpy2DD2HResult*);
extern bool_t xdr_dscudaMemcpy2DH2HResult (XDR *, dscudaMemcpy2DH2HResult*);
extern bool_t xdr_dscudaGetDeviceResult (XDR *, dscudaGetDeviceResult*);
extern bool_t xdr_dscudaGetDeviceCountResult (XDR *, dscudaGetDeviceCountResult*);
extern bool_t xdr_dscudaGetDevicePropertiesResult (XDR *, dscudaGetDevicePropertiesResult*);
extern bool_t xdr_dscudaDriverGetVersionResult (XDR *, dscudaDriverGetVersionResult*);
extern bool_t xdr_dscudaRuntimeGetVersionResult (XDR *, dscudaRuntimeGetVersionResult*);
extern bool_t xdr_dscudaGetErrorStringResult (XDR *, dscudaGetErrorStringResult*);
extern bool_t xdr_dscudaCreateChannelDescResult (XDR *, dscudaCreateChannelDescResult*);
extern bool_t xdr_dscudaGetChannelDescResult (XDR *, dscudaGetChannelDescResult*);
extern bool_t xdr_dscudaChooseDeviceResult (XDR *, dscudaChooseDeviceResult*);
extern bool_t xdr_dscudaMemcpyAsyncD2HResult (XDR *, dscudaMemcpyAsyncD2HResult*);
extern bool_t xdr_dscudaMemcpyAsyncH2HResult (XDR *, dscudaMemcpyAsyncH2HResult*);
extern bool_t xdr_dscudaMemcpyFromSymbolD2HResult (XDR *, dscudaMemcpyFromSymbolD2HResult*);
extern bool_t xdr_dscudaMemcpyFromSymbolAsyncD2HResult (XDR *, dscudaMemcpyFromSymbolAsyncD2HResult*);
extern bool_t xdr_dscudaStreamCreateResult (XDR *, dscudaStreamCreateResult*);
extern bool_t xdr_dscudaEventCreateResult (XDR *, dscudaEventCreateResult*);
extern bool_t xdr_dscudaEventElapsedTimeResult (XDR *, dscudaEventElapsedTimeResult*);
extern bool_t xdr_dscudaHostGetDevicePointerResult (XDR *, dscudaHostGetDevicePointerResult*);
extern bool_t xdr_dscudaHostGetFlagsResult (XDR *, dscudaHostGetFlagsResult*);
extern bool_t xdr_dscudaLoadModuleResult (XDR *, dscudaLoadModuleResult*);
extern bool_t xdr_dscudaFuncGetAttributesResult (XDR *, dscudaFuncGetAttributesResult*);
extern bool_t xdr_dscudaBindTextureResult (XDR *, dscudaBindTextureResult*);
extern bool_t xdr_dscudaBindTexture2DResult (XDR *, dscudaBindTexture2DResult*);
extern bool_t xdr_dscufftResult (XDR *, dscufftResult*);
extern bool_t xdr_dscufftPlanResult (XDR *, dscufftPlanResult*);
extern bool_t xdr_dscublasResult (XDR *, dscublasResult*);
extern bool_t xdr_dscublasCreateResult (XDR *, dscublasCreateResult*);
extern bool_t xdr_dscublasGetVectorResult (XDR *, dscublasGetVectorResult*);
extern bool_t xdr_RCdim3 (XDR *, RCdim3*);
extern bool_t xdr_dscudathreadsetlimitid_1_argument (XDR *, dscudathreadsetlimitid_1_argument*);
extern bool_t xdr_dscudastreamwaiteventid_1_argument (XDR *, dscudastreamwaiteventid_1_argument*);
extern bool_t xdr_dscudaeventelapsedtimeid_1_argument (XDR *, dscudaeventelapsedtimeid_1_argument*);
extern bool_t xdr_dscudaeventrecordid_1_argument (XDR *, dscudaeventrecordid_1_argument*);
extern bool_t xdr_dscudalaunchkernelid_1_argument (XDR *, dscudalaunchkernelid_1_argument*);
extern bool_t xdr_dscudaloadmoduleid_1_argument (XDR *, dscudaloadmoduleid_1_argument*);
extern bool_t xdr_dscudafuncgetattributesid_1_argument (XDR *, dscudafuncgetattributesid_1_argument*);
extern bool_t xdr_dscudamemcpyh2hid_1_argument (XDR *, dscudamemcpyh2hid_1_argument*);
extern bool_t xdr_dscudamemcpyh2did_1_argument (XDR *, dscudamemcpyh2did_1_argument*);
extern bool_t xdr_dscudamemcpyd2hid_1_argument (XDR *, dscudamemcpyd2hid_1_argument*);
extern bool_t xdr_dscudamemcpyd2did_1_argument (XDR *, dscudamemcpyd2did_1_argument*);
extern bool_t xdr_dscudamemcpyasynch2hid_1_argument (XDR *, dscudamemcpyasynch2hid_1_argument*);
extern bool_t xdr_dscudamemcpyasynch2did_1_argument (XDR *, dscudamemcpyasynch2did_1_argument*);
extern bool_t xdr_dscudamemcpyasyncd2hid_1_argument (XDR *, dscudamemcpyasyncd2hid_1_argument*);
extern bool_t xdr_dscudamemcpyasyncd2did_1_argument (XDR *, dscudamemcpyasyncd2did_1_argument*);
extern bool_t xdr_dscudamemcpytosymbolh2did_1_argument (XDR *, dscudamemcpytosymbolh2did_1_argument*);
extern bool_t xdr_dscudamemcpytosymbold2did_1_argument (XDR *, dscudamemcpytosymbold2did_1_argument*);
extern bool_t xdr_dscudamemcpyfromsymbold2hid_1_argument (XDR *, dscudamemcpyfromsymbold2hid_1_argument*);
extern bool_t xdr_dscudamemcpyfromsymbold2did_1_argument (XDR *, dscudamemcpyfromsymbold2did_1_argument*);
extern bool_t xdr_dscudamemsetid_1_argument (XDR *, dscudamemsetid_1_argument*);
extern bool_t xdr_dscudahostallocid_1_argument (XDR *, dscudahostallocid_1_argument*);
extern bool_t xdr_dscudahostgetdevicepointerid_1_argument (XDR *, dscudahostgetdevicepointerid_1_argument*);
extern bool_t xdr_dscudamallocarrayid_1_argument (XDR *, dscudamallocarrayid_1_argument*);
extern bool_t xdr_dscudamemcpytoarrayh2hid_1_argument (XDR *, dscudamemcpytoarrayh2hid_1_argument*);
extern bool_t xdr_dscudamemcpytoarrayh2did_1_argument (XDR *, dscudamemcpytoarrayh2did_1_argument*);
extern bool_t xdr_dscudamemcpytoarrayd2hid_1_argument (XDR *, dscudamemcpytoarrayd2hid_1_argument*);
extern bool_t xdr_dscudamemcpytoarrayd2did_1_argument (XDR *, dscudamemcpytoarrayd2did_1_argument*);
extern bool_t xdr_dscudamallocpitchid_1_argument (XDR *, dscudamallocpitchid_1_argument*);
extern bool_t xdr_dscudamemcpy2dtoarrayh2hid_1_argument (XDR *, dscudamemcpy2dtoarrayh2hid_1_argument*);
extern bool_t xdr_dscudamemcpy2dtoarrayh2did_1_argument (XDR *, dscudamemcpy2dtoarrayh2did_1_argument*);
extern bool_t xdr_dscudamemcpy2dtoarrayd2hid_1_argument (XDR *, dscudamemcpy2dtoarrayd2hid_1_argument*);
extern bool_t xdr_dscudamemcpy2dtoarrayd2did_1_argument (XDR *, dscudamemcpy2dtoarrayd2did_1_argument*);
extern bool_t xdr_dscudamemcpy2dh2hid_1_argument (XDR *, dscudamemcpy2dh2hid_1_argument*);
extern bool_t xdr_dscudamemcpy2dh2did_1_argument (XDR *, dscudamemcpy2dh2did_1_argument*);
extern bool_t xdr_dscudamemcpy2dd2hid_1_argument (XDR *, dscudamemcpy2dd2hid_1_argument*);
extern bool_t xdr_dscudamemcpy2dd2did_1_argument (XDR *, dscudamemcpy2dd2did_1_argument*);
extern bool_t xdr_dscudamemset2did_1_argument (XDR *, dscudamemset2did_1_argument*);
extern bool_t xdr_dscudamemcpytosymbolasynch2did_1_argument (XDR *, dscudamemcpytosymbolasynch2did_1_argument*);
extern bool_t xdr_dscudamemcpytosymbolasyncd2did_1_argument (XDR *, dscudamemcpytosymbolasyncd2did_1_argument*);
extern bool_t xdr_dscudamemcpyfromsymbolasyncd2hid_1_argument (XDR *, dscudamemcpyfromsymbolasyncd2hid_1_argument*);
extern bool_t xdr_dscudamemcpyfromsymbolasyncd2did_1_argument (XDR *, dscudamemcpyfromsymbolasyncd2did_1_argument*);
extern bool_t xdr_dscudacreatechanneldescid_1_argument (XDR *, dscudacreatechanneldescid_1_argument*);
extern bool_t xdr_dscudabindtextureid_1_argument (XDR *, dscudabindtextureid_1_argument*);
extern bool_t xdr_dscudabindtexture2did_1_argument (XDR *, dscudabindtexture2did_1_argument*);
extern bool_t xdr_dscudabindtexturetoarrayid_1_argument (XDR *, dscudabindtexturetoarrayid_1_argument*);
extern bool_t xdr_dscufftplan3did_1_argument (XDR *, dscufftplan3did_1_argument*);
extern bool_t xdr_dscufftexecc2cid_1_argument (XDR *, dscufftexecc2cid_1_argument*);
#else
extern bool_t xdr_RCadr ();
extern bool_t xdr_RCstream ();
extern bool_t xdr_RCevent ();
extern bool_t xdr_RCipaddr ();
extern bool_t xdr_RCsize ();
extern bool_t xdr_RCerror ();
extern bool_t xdr_RCbuf ();
extern bool_t xdr_RCchannelformat ();
extern bool_t xdr_RCpid ();
extern bool_t xdr_RCchanneldesc_t ();
extern bool_t xdr_RCchanneldesc ();
extern bool_t xdr_RCtexture_t ();
extern bool_t xdr_RCtexture ();
extern bool_t xdr_RCfuncattr_t ();
extern bool_t xdr_RCfuncattr ();
extern bool_t xdr_RCargType ();
extern bool_t xdr_RCargVal ();
extern bool_t xdr_RCarg ();
extern bool_t xdr_RCargs ();
extern bool_t xdr_dscudaResult ();
extern bool_t xdr_dscudaThreadGetLimitResult ();
extern bool_t xdr_dscudaThreadGetCacheConfigResult ();
extern bool_t xdr_dscudaMallocResult ();
extern bool_t xdr_dscudaHostAllocResult ();
extern bool_t xdr_dscudaMallocHostResult ();
extern bool_t xdr_dscudaMallocArrayResult ();
extern bool_t xdr_dscudaMallocPitchResult ();
extern bool_t xdr_dscudaMemcpyD2HResult ();
extern bool_t xdr_dscudaMemcpyH2HResult ();
extern bool_t xdr_dscudaMemcpyToArrayD2HResult ();
extern bool_t xdr_dscudaMemcpyToArrayH2HResult ();
extern bool_t xdr_dscudaMemcpy2DToArrayD2HResult ();
extern bool_t xdr_dscudaMemcpy2DToArrayH2HResult ();
extern bool_t xdr_dscudaMemcpy2DD2HResult ();
extern bool_t xdr_dscudaMemcpy2DH2HResult ();
extern bool_t xdr_dscudaGetDeviceResult ();
extern bool_t xdr_dscudaGetDeviceCountResult ();
extern bool_t xdr_dscudaGetDevicePropertiesResult ();
extern bool_t xdr_dscudaDriverGetVersionResult ();
extern bool_t xdr_dscudaRuntimeGetVersionResult ();
extern bool_t xdr_dscudaGetErrorStringResult ();
extern bool_t xdr_dscudaCreateChannelDescResult ();
extern bool_t xdr_dscudaGetChannelDescResult ();
extern bool_t xdr_dscudaChooseDeviceResult ();
extern bool_t xdr_dscudaMemcpyAsyncD2HResult ();
extern bool_t xdr_dscudaMemcpyAsyncH2HResult ();
extern bool_t xdr_dscudaMemcpyFromSymbolD2HResult ();
extern bool_t xdr_dscudaMemcpyFromSymbolAsyncD2HResult ();
extern bool_t xdr_dscudaStreamCreateResult ();
extern bool_t xdr_dscudaEventCreateResult ();
extern bool_t xdr_dscudaEventElapsedTimeResult ();
extern bool_t xdr_dscudaHostGetDevicePointerResult ();
extern bool_t xdr_dscudaHostGetFlagsResult ();
extern bool_t xdr_dscudaLoadModuleResult ();
extern bool_t xdr_dscudaFuncGetAttributesResult ();
extern bool_t xdr_dscudaBindTextureResult ();
extern bool_t xdr_dscudaBindTexture2DResult ();
extern bool_t xdr_dscufftResult ();
extern bool_t xdr_dscufftPlanResult ();
extern bool_t xdr_dscublasResult ();
extern bool_t xdr_dscublasCreateResult ();
extern bool_t xdr_dscublasGetVectorResult ();
extern bool_t xdr_RCdim3 ();
extern bool_t xdr_dscudathreadsetlimitid_1_argument ();
extern bool_t xdr_dscudastreamwaiteventid_1_argument ();
extern bool_t xdr_dscudaeventelapsedtimeid_1_argument ();
extern bool_t xdr_dscudaeventrecordid_1_argument ();
extern bool_t xdr_dscudalaunchkernelid_1_argument ();
extern bool_t xdr_dscudaloadmoduleid_1_argument ();
extern bool_t xdr_dscudafuncgetattributesid_1_argument ();
extern bool_t xdr_dscudamemcpyh2hid_1_argument ();
extern bool_t xdr_dscudamemcpyh2did_1_argument ();
extern bool_t xdr_dscudamemcpyd2hid_1_argument ();
extern bool_t xdr_dscudamemcpyd2did_1_argument ();
extern bool_t xdr_dscudamemcpyasynch2hid_1_argument ();
extern bool_t xdr_dscudamemcpyasynch2did_1_argument ();
extern bool_t xdr_dscudamemcpyasyncd2hid_1_argument ();
extern bool_t xdr_dscudamemcpyasyncd2did_1_argument ();
extern bool_t xdr_dscudamemcpytosymbolh2did_1_argument ();
extern bool_t xdr_dscudamemcpytosymbold2did_1_argument ();
extern bool_t xdr_dscudamemcpyfromsymbold2hid_1_argument ();
extern bool_t xdr_dscudamemcpyfromsymbold2did_1_argument ();
extern bool_t xdr_dscudamemsetid_1_argument ();
extern bool_t xdr_dscudahostallocid_1_argument ();
extern bool_t xdr_dscudahostgetdevicepointerid_1_argument ();
extern bool_t xdr_dscudamallocarrayid_1_argument ();
extern bool_t xdr_dscudamemcpytoarrayh2hid_1_argument ();
extern bool_t xdr_dscudamemcpytoarrayh2did_1_argument ();
extern bool_t xdr_dscudamemcpytoarrayd2hid_1_argument ();
extern bool_t xdr_dscudamemcpytoarrayd2did_1_argument ();
extern bool_t xdr_dscudamallocpitchid_1_argument ();
extern bool_t xdr_dscudamemcpy2dtoarrayh2hid_1_argument ();
extern bool_t xdr_dscudamemcpy2dtoarrayh2did_1_argument ();
extern bool_t xdr_dscudamemcpy2dtoarrayd2hid_1_argument ();
extern bool_t xdr_dscudamemcpy2dtoarrayd2did_1_argument ();
extern bool_t xdr_dscudamemcpy2dh2hid_1_argument ();
extern bool_t xdr_dscudamemcpy2dh2did_1_argument ();
extern bool_t xdr_dscudamemcpy2dd2hid_1_argument ();
extern bool_t xdr_dscudamemcpy2dd2did_1_argument ();
extern bool_t xdr_dscudamemset2did_1_argument ();
extern bool_t xdr_dscudamemcpytosymbolasynch2did_1_argument ();
extern bool_t xdr_dscudamemcpytosymbolasyncd2did_1_argument ();
extern bool_t xdr_dscudamemcpyfromsymbolasyncd2hid_1_argument ();
extern bool_t xdr_dscudamemcpyfromsymbolasyncd2did_1_argument ();
extern bool_t xdr_dscudacreatechanneldescid_1_argument ();
extern bool_t xdr_dscudabindtextureid_1_argument ();
extern bool_t xdr_dscudabindtexture2did_1_argument ();
extern bool_t xdr_dscudabindtexturetoarrayid_1_argument ();
extern bool_t xdr_dscufftplan3did_1_argument ();
extern bool_t xdr_dscufftexecc2cid_1_argument ();
#endif
#ifdef __cplusplus
}
#endif
#endif
#pragma end dscudarpc.h
#pragma begin dscudadefs.h
#ifndef _DSCUDADEFS_H
#define _DSCUDADEFS_H
#define RC_NSERVERMAX 32
#define RC_NDEVICEMAX 32
#define RC_NREDUNDANCYMAX 4
#define RC_NVDEVMAX 64
#define RC_NPTHREADMAX 64
#define RC_BUFSIZE (1024*1024)
#define RC_NKMODULEMAX 128
#define RC_NKFUNCMAX 128
#define RC_KARGMAX 64
#define RC_KMODULENAMELEN 64
#define RC_KNAMELEN 64
#define RC_KMODULEIMAGELEN (1024*1024*2)
#define RC_SNAMELEN 64
#define RC_CACHE_MODULE (1)
#define RC_CLIENT_CACHE_LIFETIME (30)
#define RC_SERVER_CACHE_LIFETIME (RC_CLIENT_CACHE_LIFETIME+30)
#define RC_SUPPORT_PAGELOCK (0)
#define RC_SUPPORT_STREAM (0)
#define RC_SUPPORT_CONCURRENT_EXEC (0)
#define RC_DAEMON_IP_PORT (65432)
#define RC_SERVER_IP_PORT (RC_DAEMON_IP_PORT+1)
#endif
#pragma end dscudadefs.h
#pragma begin dscudamacros.h
#ifndef DSCUDA_MACROS_H
#define DSCUDA_MACROS_H
#define WARN(lv, fmt, args...) if (lv <= dscudaWarnLevel()) fprintf(stderr, fmt, ## args);
#define WARNONCE(lv, fmt, args...) if (lv <= dscudaWarnLevel()) { \
static int firstcall = 1; \
if (firstcall) { \
firstcall = 0; \
fprintf(stderr, fmt, ## args); \
} \
}
#define ALIGN_UP(off, align) (off) = ((off) + (align) - 1) & ~((align) - 1)
int dscudaWarnLevel(void);
void dscudaSetWarnLevel(int level);
#endif
#pragma end dscudamacros.h
#pragma begin ibv_rdma.h
#ifndef RDMA_COMMON_H
#define RDMA_COMMON_H
#ifdef RPC_ONLY
typedef struct {
int type;
union {
uint64_t pointerval;
unsigned int intval;
float floatval;
char customval[RC_KARGMAX];
} val;
unsigned int offset;
unsigned int size;
} IbvArg;
#else
#include <netdb.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <rdma/rdma_cma.h>
#include <cuda_runtime_api.h>
#pragma begin dscudadefs.h
#ifndef _DSCUDADEFS_H
#define _DSCUDADEFS_H
#define RC_NSERVERMAX 32
#define RC_NDEVICEMAX 32
#define RC_NREDUNDANCYMAX 4
#define RC_NVDEVMAX 64
#define RC_NPTHREADMAX 64
#define RC_BUFSIZE (1024*1024)
#define RC_NKMODULEMAX 128
#define RC_NKFUNCMAX 128
#define RC_KARGMAX 64
#define RC_KMODULENAMELEN 64
#define RC_KNAMELEN 64
#define RC_KMODULEIMAGELEN (1024*1024*2)
#define RC_SNAMELEN 64
#define RC_CACHE_MODULE (1)
#define RC_CLIENT_CACHE_LIFETIME (30)
#define RC_SERVER_CACHE_LIFETIME (RC_CLIENT_CACHE_LIFETIME+30)
#define RC_SUPPORT_PAGELOCK (0)
#define RC_SUPPORT_STREAM (0)
#define RC_SUPPORT_CONCURRENT_EXEC (0)
#define RC_DAEMON_IP_PORT (65432)
#define RC_SERVER_IP_PORT (RC_DAEMON_IP_PORT+1)
#endif
#pragma end dscudadefs.h
#pragma begin dscudarpc.h
#ifndef _DSCUDARPC_H_RPCGEN
#define _DSCUDARPC_H_RPCGEN
#include <rpc/rpc.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef u_quad_t RCadr;
typedef u_quad_t RCstream;
typedef u_quad_t RCevent;
typedef u_quad_t RCipaddr;
typedef u_int RCsize;
typedef u_int RCerror;
typedef struct {
u_int RCbuf_len;
char *RCbuf_val;
} RCbuf;
typedef u_int RCchannelformat;
typedef u_long RCpid;
struct RCchanneldesc_t {
RCchannelformat f;
int w;
int x;
int y;
int z;
};
typedef struct RCchanneldesc_t RCchanneldesc_t;
typedef RCchanneldesc_t RCchanneldesc;
struct RCtexture_t {
int normalized;
int filterMode;
int addressMode[3];
RCchannelformat f;
int w;
int x;
int y;
int z;
};
typedef struct RCtexture_t RCtexture_t;
typedef RCtexture_t RCtexture;
struct RCfuncattr_t {
int binaryVersion;
RCsize constSizeBytes;
RCsize localSizeBytes;
int maxThreadsPerBlock;
int numRegs;
int ptxVersion;
RCsize sharedSizeBytes;
};
typedef struct RCfuncattr_t RCfuncattr_t;
typedef RCfuncattr_t RCfuncattr;
enum RCargType {
dscudaArgTypeP = 0,
dscudaArgTypeI = 1,
dscudaArgTypeF = 2,
dscudaArgTypeV = 3,
};
typedef enum RCargType RCargType;
struct RCargVal {
RCargType type;
union {
RCadr address;
u_int valuei;
float valuef;
char valuev[64];
} RCargVal_u;
};
typedef struct RCargVal RCargVal;
struct RCarg {
RCargVal val;
u_int offset;
u_int size;
};
typedef struct RCarg RCarg;
typedef struct {
u_int RCargs_len;
RCarg *RCargs_val;
} RCargs;
struct dscudaResult {
RCerror err;
};
typedef struct dscudaResult dscudaResult;
struct dscudaThreadGetLimitResult {
RCerror err;
RCsize value;
};
typedef struct dscudaThreadGetLimitResult dscudaThreadGetLimitResult;
struct dscudaThreadGetCacheConfigResult {
RCerror err;
int cacheConfig;
};
typedef struct dscudaThreadGetCacheConfigResult dscudaThreadGetCacheConfigResult;
struct dscudaMallocResult {
RCerror err;
RCadr devAdr;
};
typedef struct dscudaMallocResult dscudaMallocResult;
struct dscudaHostAllocResult {
RCerror err;
RCadr pHost;
};
typedef struct dscudaHostAllocResult dscudaHostAllocResult;
struct dscudaMallocHostResult {
RCerror err;
RCadr ptr;
};
typedef struct dscudaMallocHostResult dscudaMallocHostResult;
struct dscudaMallocArrayResult {
RCerror err;
RCadr array;
};
typedef struct dscudaMallocArrayResult dscudaMallocArrayResult;
struct dscudaMallocPitchResult {
RCerror err;
RCadr devPtr;
RCsize pitch;
};
typedef struct dscudaMallocPitchResult dscudaMallocPitchResult;
struct dscudaMemcpyD2HResult {
RCerror err;
RCbuf buf;
};
typedef struct dscudaMemcpyD2HResult dscudaMemcpyD2HResult;
struct dscudaMemcpyH2HResult {
RCerror err;
RCbuf buf;
};
typedef struct dscudaMemcpyH2HResult dscudaMemcpyH2HResult;
struct dscudaMemcpyToArrayD2HResult {
RCerror err;
RCbuf buf;
};
typedef struct dscudaMemcpyToArrayD2HResult dscudaMemcpyToArrayD2HResult;
struct dscudaMemcpyToArrayH2HResult {
RCerror err;
RCbuf buf;
};
typedef struct dscudaMemcpyToArrayH2HResult dscudaMemcpyToArrayH2HResult;
struct dscudaMemcpy2DToArrayD2HResult {
RCerror err;
RCbuf buf;
};
typedef struct dscudaMemcpy2DToArrayD2HResult dscudaMemcpy2DToArrayD2HResult;
struct dscudaMemcpy2DToArrayH2HResult {
RCerror err;
RCbuf buf;
};
typedef struct dscudaMemcpy2DToArrayH2HResult dscudaMemcpy2DToArrayH2HResult;
struct dscudaMemcpy2DD2HResult {
RCerror err;
RCbuf buf;
};
typedef struct dscudaMemcpy2DD2HResult dscudaMemcpy2DD2HResult;
struct dscudaMemcpy2DH2HResult {
RCerror err;
RCbuf buf;
};
typedef struct dscudaMemcpy2DH2HResult dscudaMemcpy2DH2HResult;
struct dscudaGetDeviceResult {
RCerror err;
int device;
};
typedef struct dscudaGetDeviceResult dscudaGetDeviceResult;
struct dscudaGetDeviceCountResult {
RCerror err;
int count;
};
typedef struct dscudaGetDeviceCountResult dscudaGetDeviceCountResult;
struct dscudaGetDevicePropertiesResult {
RCerror err;
RCbuf prop;
};
typedef struct dscudaGetDevicePropertiesResult dscudaGetDevicePropertiesResult;
struct dscudaDriverGetVersionResult {
RCerror err;
int ver;
};
typedef struct dscudaDriverGetVersionResult dscudaDriverGetVersionResult;
struct dscudaRuntimeGetVersionResult {
RCerror err;
int ver;
};
typedef struct dscudaRuntimeGetVersionResult dscudaRuntimeGetVersionResult;
struct dscudaGetErrorStringResult {
char *errmsg;
};
typedef struct dscudaGetErrorStringResult dscudaGetErrorStringResult;
struct dscudaCreateChannelDescResult {
int x;
int y;
int z;
int w;
RCchannelformat f;
};
typedef struct dscudaCreateChannelDescResult dscudaCreateChannelDescResult;
struct dscudaGetChannelDescResult {
RCerror err;
int x;
int y;
int z;
int w;
RCchannelformat f;
};
typedef struct dscudaGetChannelDescResult dscudaGetChannelDescResult;
struct dscudaChooseDeviceResult {
RCerror err;
int device;
};
typedef struct dscudaChooseDeviceResult dscudaChooseDeviceResult;
struct dscudaMemcpyAsyncD2HResult {
RCerror err;
RCbuf buf;
};
typedef struct dscudaMemcpyAsyncD2HResult dscudaMemcpyAsyncD2HResult;
struct dscudaMemcpyAsyncH2HResult {
RCerror err;
RCbuf buf;
};
typedef struct dscudaMemcpyAsyncH2HResult dscudaMemcpyAsyncH2HResult;
struct dscudaMemcpyFromSymbolD2HResult {
RCerror err;
RCbuf buf;
};
typedef struct dscudaMemcpyFromSymbolD2HResult dscudaMemcpyFromSymbolD2HResult;
struct dscudaMemcpyFromSymbolAsyncD2HResult {
RCerror err;
RCbuf buf;
};
typedef struct dscudaMemcpyFromSymbolAsyncD2HResult dscudaMemcpyFromSymbolAsyncD2HResult;
struct dscudaStreamCreateResult {
RCerror err;
RCadr stream;
};
typedef struct dscudaStreamCreateResult dscudaStreamCreateResult;
struct dscudaEventCreateResult {
RCerror err;
RCadr event;
};
typedef struct dscudaEventCreateResult dscudaEventCreateResult;
struct dscudaEventElapsedTimeResult {
RCerror err;
float ms;
};
typedef struct dscudaEventElapsedTimeResult dscudaEventElapsedTimeResult;
struct dscudaHostGetDevicePointerResult {
RCerror err;
RCadr pDevice;
};
typedef struct dscudaHostGetDevicePointerResult dscudaHostGetDevicePointerResult;
struct dscudaHostGetFlagsResult {
RCerror err;
u_int flags;
};
typedef struct dscudaHostGetFlagsResult dscudaHostGetFlagsResult;
struct dscudaLoadModuleResult {
u_int id;
};
typedef struct dscudaLoadModuleResult dscudaLoadModuleResult;
struct dscudaFuncGetAttributesResult {
RCerror err;
RCfuncattr attr;
};
typedef struct dscudaFuncGetAttributesResult dscudaFuncGetAttributesResult;
struct dscudaBindTextureResult {
RCerror err;
RCsize offset;
};
typedef struct dscudaBindTextureResult dscudaBindTextureResult;
struct dscudaBindTexture2DResult {
RCerror err;
RCsize offset;
};
typedef struct dscudaBindTexture2DResult dscudaBindTexture2DResult;
struct dscufftResult {
RCerror err;
};
typedef struct dscufftResult dscufftResult;
struct dscufftPlanResult {
RCerror err;
u_int plan;
};
typedef struct dscufftPlanResult dscufftPlanResult;
struct dscublasResult {
RCerror err;
u_int stat;
};
typedef struct dscublasResult dscublasResult;
struct dscublasCreateResult {
RCerror err;
u_int stat;
RCadr handle;
};
typedef struct dscublasCreateResult dscublasCreateResult;
struct dscublasGetVectorResult {
RCerror err;
u_int stat;
RCbuf y;
};
typedef struct dscublasGetVectorResult dscublasGetVectorResult;
struct RCdim3 {
u_int x;
u_int y;
u_int z;
};
typedef struct RCdim3 RCdim3;
struct dscudathreadsetlimitid_1_argument {
int limit;
RCsize value;
};
typedef struct dscudathreadsetlimitid_1_argument dscudathreadsetlimitid_1_argument;
struct dscudastreamwaiteventid_1_argument {
RCstream stream;
RCevent event;
u_int flags;
};
typedef struct dscudastreamwaiteventid_1_argument dscudastreamwaiteventid_1_argument;
struct dscudaeventelapsedtimeid_1_argument {
RCevent start;
RCevent end;
};
typedef struct dscudaeventelapsedtimeid_1_argument dscudaeventelapsedtimeid_1_argument;
struct dscudaeventrecordid_1_argument {
RCevent event;
RCstream stream;
};
typedef struct dscudaeventrecordid_1_argument dscudaeventrecordid_1_argument;
struct dscudalaunchkernelid_1_argument {
int moduleid;
int kid;
char *kname;
RCdim3 gdim;
RCdim3 bdim;
RCsize smemsize;
RCstream stream;
RCargs args;
};
typedef struct dscudalaunchkernelid_1_argument dscudalaunchkernelid_1_argument;
struct dscudaloadmoduleid_1_argument {
RCipaddr ipaddr;
RCpid pid;
char *mname;
char *image;
};
typedef struct dscudaloadmoduleid_1_argument dscudaloadmoduleid_1_argument;
struct dscudafuncgetattributesid_1_argument {
int moduleid;
char *kname;
};
typedef struct dscudafuncgetattributesid_1_argument dscudafuncgetattributesid_1_argument;
struct dscudamemcpyh2hid_1_argument {
RCadr dst;
RCbuf src;
RCsize count;
};
typedef struct dscudamemcpyh2hid_1_argument dscudamemcpyh2hid_1_argument;
struct dscudamemcpyh2did_1_argument {
RCadr dst;
RCbuf src;
RCsize count;
};
typedef struct dscudamemcpyh2did_1_argument dscudamemcpyh2did_1_argument;
struct dscudamemcpyd2hid_1_argument {
RCadr src;
RCsize count;
};
typedef struct dscudamemcpyd2hid_1_argument dscudamemcpyd2hid_1_argument;
struct dscudamemcpyd2did_1_argument {
RCadr dst;
RCadr src;
RCsize count;
};
typedef struct dscudamemcpyd2did_1_argument dscudamemcpyd2did_1_argument;
struct dscudamemcpyasynch2hid_1_argument {
RCadr dst;
RCbuf src;
RCsize count;
RCstream stream;
};
typedef struct dscudamemcpyasynch2hid_1_argument dscudamemcpyasynch2hid_1_argument;
struct dscudamemcpyasynch2did_1_argument {
RCadr dst;
RCbuf src;
RCsize count;
RCstream stream;
};
typedef struct dscudamemcpyasynch2did_1_argument dscudamemcpyasynch2did_1_argument;
struct dscudamemcpyasyncd2hid_1_argument {
RCadr src;
RCsize count;
RCstream stream;
};
typedef struct dscudamemcpyasyncd2hid_1_argument dscudamemcpyasyncd2hid_1_argument;
struct dscudamemcpyasyncd2did_1_argument {
RCadr dst;
RCadr src;
RCsize count;
RCstream stream;
};
typedef struct dscudamemcpyasyncd2did_1_argument dscudamemcpyasyncd2did_1_argument;
struct dscudamemcpytosymbolh2did_1_argument {
int moduleid;
char *symbol;
RCbuf src;
RCsize count;
RCsize offset;
};
typedef struct dscudamemcpytosymbolh2did_1_argument dscudamemcpytosymbolh2did_1_argument;
struct dscudamemcpytosymbold2did_1_argument {
int moduleid;
char *symbol;
RCadr src;
RCsize count;
RCsize offset;
};
typedef struct dscudamemcpytosymbold2did_1_argument dscudamemcpytosymbold2did_1_argument;
struct dscudamemcpyfromsymbold2hid_1_argument {
int moduleid;
char *symbol;
RCsize count;
RCsize offset;
};
typedef struct dscudamemcpyfromsymbold2hid_1_argument dscudamemcpyfromsymbold2hid_1_argument;
struct dscudamemcpyfromsymbold2did_1_argument {
int moduleid;
RCadr dst;
char *symbol;
RCsize count;
RCsize offset;
};
typedef struct dscudamemcpyfromsymbold2did_1_argument dscudamemcpyfromsymbold2did_1_argument;
struct dscudamemsetid_1_argument {
RCadr dst;
int value;
RCsize count;
};
typedef struct dscudamemsetid_1_argument dscudamemsetid_1_argument;
struct dscudahostallocid_1_argument {
RCsize size;
u_int flags;
};
typedef struct dscudahostallocid_1_argument dscudahostallocid_1_argument;
struct dscudahostgetdevicepointerid_1_argument {
RCadr pHost;
u_int flags;
};
typedef struct dscudahostgetdevicepointerid_1_argument dscudahostgetdevicepointerid_1_argument;
struct dscudamallocarrayid_1_argument {
RCchanneldesc desc;
RCsize width;
RCsize height;
u_int flags;
};
typedef struct dscudamallocarrayid_1_argument dscudamallocarrayid_1_argument;
struct dscudamemcpytoarrayh2hid_1_argument {
RCadr dst;
RCsize wOffset;
RCsize hOffset;
RCbuf src;
RCsize count;
};
typedef struct dscudamemcpytoarrayh2hid_1_argument dscudamemcpytoarrayh2hid_1_argument;
struct dscudamemcpytoarrayh2did_1_argument {
RCadr dst;
RCsize wOffset;
RCsize hOffset;
RCbuf src;
RCsize count;
};
typedef struct dscudamemcpytoarrayh2did_1_argument dscudamemcpytoarrayh2did_1_argument;
struct dscudamemcpytoarrayd2hid_1_argument {
RCsize wOffset;
RCsize hOffset;
RCadr src;
RCsize count;
};
typedef struct dscudamemcpytoarrayd2hid_1_argument dscudamemcpytoarrayd2hid_1_argument;
struct dscudamemcpytoarrayd2did_1_argument {
RCadr dst;
RCsize wOffset;
RCsize hOffset;
RCadr src;
RCsize count;
};
typedef struct dscudamemcpytoarrayd2did_1_argument dscudamemcpytoarrayd2did_1_argument;
struct dscudamallocpitchid_1_argument {
RCsize width;
RCsize height;
};
typedef struct dscudamallocpitchid_1_argument dscudamallocpitchid_1_argument;
struct dscudamemcpy2dtoarrayh2hid_1_argument {
RCadr dst;
RCsize wOffset;
RCsize hOffset;
RCbuf src;
RCsize spitch;
RCsize width;
RCsize height;
};
typedef struct dscudamemcpy2dtoarrayh2hid_1_argument dscudamemcpy2dtoarrayh2hid_1_argument;
struct dscudamemcpy2dtoarrayh2did_1_argument {
RCadr dst;
RCsize wOffset;
RCsize hOffset;
RCbuf srcbuf;
RCsize spitch;
RCsize width;
RCsize height;
};
typedef struct dscudamemcpy2dtoarrayh2did_1_argument dscudamemcpy2dtoarrayh2did_1_argument;
struct dscudamemcpy2dtoarrayd2hid_1_argument {
RCsize wOffset;
RCsize hOffset;
RCadr src;
RCsize spitch;
RCsize width;
RCsize height;
};
typedef struct dscudamemcpy2dtoarrayd2hid_1_argument dscudamemcpy2dtoarrayd2hid_1_argument;
struct dscudamemcpy2dtoarrayd2did_1_argument {
RCadr dst;
RCsize wOffset;
RCsize hOffset;
RCadr src;
RCsize spitch;
RCsize width;
RCsize height;
};
typedef struct dscudamemcpy2dtoarrayd2did_1_argument dscudamemcpy2dtoarrayd2did_1_argument;
struct dscudamemcpy2dh2hid_1_argument {
RCadr dst;
RCsize dpitch;
RCbuf src;
RCsize spitch;
RCsize width;
RCsize height;
};
typedef struct dscudamemcpy2dh2hid_1_argument dscudamemcpy2dh2hid_1_argument;
struct dscudamemcpy2dh2did_1_argument {
RCadr dst;
RCsize dpitch;
RCbuf src;
RCsize spitch;
RCsize width;
RCsize height;
};
typedef struct dscudamemcpy2dh2did_1_argument dscudamemcpy2dh2did_1_argument;
struct dscudamemcpy2dd2hid_1_argument {
RCsize dpitch;
RCadr src;
RCsize spitch;
RCsize width;
RCsize height;
};
typedef struct dscudamemcpy2dd2hid_1_argument dscudamemcpy2dd2hid_1_argument;
struct dscudamemcpy2dd2did_1_argument {
RCadr dst;
RCsize dpitch;
RCadr src;
RCsize spitch;
RCsize width;
RCsize height;
};
typedef struct dscudamemcpy2dd2did_1_argument dscudamemcpy2dd2did_1_argument;
struct dscudamemset2did_1_argument {
RCadr dst;
RCsize pitch;
int value;
RCsize width;
RCsize height;
};
typedef struct dscudamemset2did_1_argument dscudamemset2did_1_argument;
struct dscudamemcpytosymbolasynch2did_1_argument {
int moduleid;
char *symbol;
RCbuf src;
RCsize count;
RCsize offset;
RCstream stream;
};
typedef struct dscudamemcpytosymbolasynch2did_1_argument dscudamemcpytosymbolasynch2did_1_argument;
struct dscudamemcpytosymbolasyncd2did_1_argument {
int moduleid;
char *symbol;
RCadr src;
RCsize count;
RCsize offset;
RCstream stream;
};
typedef struct dscudamemcpytosymbolasyncd2did_1_argument dscudamemcpytosymbolasyncd2did_1_argument;
struct dscudamemcpyfromsymbolasyncd2hid_1_argument {
int moduleid;
char *symbol;
RCsize count;
RCsize offset;
RCstream stream;
};
typedef struct dscudamemcpyfromsymbolasyncd2hid_1_argument dscudamemcpyfromsymbolasyncd2hid_1_argument;
struct dscudamemcpyfromsymbolasyncd2did_1_argument {
int moduleid;
RCadr dst;
char *symbol;
RCsize count;
RCsize offset;
RCstream stream;
};
typedef struct dscudamemcpyfromsymbolasyncd2did_1_argument dscudamemcpyfromsymbolasyncd2did_1_argument;
struct dscudacreatechanneldescid_1_argument {
int x;
int y;
int z;
int w;
RCchannelformat f;
};
typedef struct dscudacreatechanneldescid_1_argument dscudacreatechanneldescid_1_argument;
struct dscudabindtextureid_1_argument {
int moduleid;
char *texname;
RCadr devPtr;
RCsize size;
RCtexture texbuf;
};
typedef struct dscudabindtextureid_1_argument dscudabindtextureid_1_argument;
struct dscudabindtexture2did_1_argument {
int moduleid;
char *texname;
RCadr devPtr;
RCsize width;
RCsize height;
RCsize pitch;
RCtexture texbuf;
};
typedef struct dscudabindtexture2did_1_argument dscudabindtexture2did_1_argument;
struct dscudabindtexturetoarrayid_1_argument {
int moduleid;
char *texname;
RCadr array;
RCtexture texbuf;
};
typedef struct dscudabindtexturetoarrayid_1_argument dscudabindtexturetoarrayid_1_argument;
struct dscufftplan3did_1_argument {
int nx;
int ny;
int nz;
u_int type;
};
typedef struct dscufftplan3did_1_argument dscufftplan3did_1_argument;
struct dscufftexecc2cid_1_argument {
u_int plan;
RCadr idata;
RCadr odata;
int direction;
};
typedef struct dscufftexecc2cid_1_argument dscufftexecc2cid_1_argument;
#define DSCUDA_PROG 60000
#define DSCUDA_VER 1
#if defined(__STDC__) || defined(__cplusplus)
#define dscudaThreadExitId 100
extern dscudaResult * dscudathreadexitid_1(CLIENT *);
extern dscudaResult * dscudathreadexitid_1_svc(struct svc_req *);
#define dscudaThreadSynchronizeId 101
extern dscudaResult * dscudathreadsynchronizeid_1(CLIENT *);
extern dscudaResult * dscudathreadsynchronizeid_1_svc(struct svc_req *);
#define dscudaThreadSetLimitId 102
extern dscudaResult * dscudathreadsetlimitid_1(int , RCsize , CLIENT *);
extern dscudaResult * dscudathreadsetlimitid_1_svc(int , RCsize , struct svc_req *);
#define dscudaThreadGetLimitId 103
extern dscudaThreadGetLimitResult * dscudathreadgetlimitid_1(int , CLIENT *);
extern dscudaThreadGetLimitResult * dscudathreadgetlimitid_1_svc(int , struct svc_req *);
#define dscudaThreadSetCacheConfigId 104
extern dscudaResult * dscudathreadsetcacheconfigid_1(int , CLIENT *);
extern dscudaResult * dscudathreadsetcacheconfigid_1_svc(int , struct svc_req *);
#define dscudaThreadGetCacheConfigId 105
extern dscudaThreadGetCacheConfigResult * dscudathreadgetcacheconfigid_1(CLIENT *);
extern dscudaThreadGetCacheConfigResult * dscudathreadgetcacheconfigid_1_svc(struct svc_req *);
#define dscudaGetLastErrorId 200
extern dscudaResult * dscudagetlasterrorid_1(CLIENT *);
extern dscudaResult * dscudagetlasterrorid_1_svc(struct svc_req *);
#define dscudaPeekAtLastErrorId 201
extern dscudaResult * dscudapeekatlasterrorid_1(CLIENT *);
extern dscudaResult * dscudapeekatlasterrorid_1_svc(struct svc_req *);
#define dscudaGetErrorStringId 202
extern dscudaGetErrorStringResult * dscudageterrorstringid_1(int , CLIENT *);
extern dscudaGetErrorStringResult * dscudageterrorstringid_1_svc(int , struct svc_req *);
#define dscudaGetDeviceId 300
extern dscudaGetDeviceResult * dscudagetdeviceid_1(CLIENT *);
extern dscudaGetDeviceResult * dscudagetdeviceid_1_svc(struct svc_req *);
#define dscudaGetDeviceCountId 301
extern dscudaGetDeviceCountResult * dscudagetdevicecountid_1(CLIENT *);
extern dscudaGetDeviceCountResult * dscudagetdevicecountid_1_svc(struct svc_req *);
#define dscudaGetDevicePropertiesId 302
extern dscudaGetDevicePropertiesResult * dscudagetdevicepropertiesid_1(int , CLIENT *);
extern dscudaGetDevicePropertiesResult * dscudagetdevicepropertiesid_1_svc(int , struct svc_req *);
#define dscudaDriverGetVersionId 303
extern dscudaDriverGetVersionResult * dscudadrivergetversionid_1(CLIENT *);
extern dscudaDriverGetVersionResult * dscudadrivergetversionid_1_svc(struct svc_req *);
#define dscudaRuntimeGetVersionId 304
extern dscudaRuntimeGetVersionResult * dscudaruntimegetversionid_1(CLIENT *);
extern dscudaRuntimeGetVersionResult * dscudaruntimegetversionid_1_svc(struct svc_req *);
#define dscudaSetDeviceId 305
extern dscudaResult * dscudasetdeviceid_1(int , CLIENT *);
extern dscudaResult * dscudasetdeviceid_1_svc(int , struct svc_req *);
#define dscudaSetDeviceFlagsId 306
extern dscudaResult * dscudasetdeviceflagsid_1(u_int , CLIENT *);
extern dscudaResult * dscudasetdeviceflagsid_1_svc(u_int , struct svc_req *);
#define dscudaChooseDeviceId 307
extern dscudaChooseDeviceResult * dscudachoosedeviceid_1(RCbuf , CLIENT *);
extern dscudaChooseDeviceResult * dscudachoosedeviceid_1_svc(RCbuf , struct svc_req *);
#define dscudaDeviceSynchronize 308
extern dscudaResult * dscudadevicesynchronize_1(CLIENT *);
extern dscudaResult * dscudadevicesynchronize_1_svc(struct svc_req *);
#define dscudaDeviceReset 309
extern dscudaResult * dscudadevicereset_1(CLIENT *);
extern dscudaResult * dscudadevicereset_1_svc(struct svc_req *);
#define dscudaStreamCreateId 400
extern dscudaStreamCreateResult * dscudastreamcreateid_1(CLIENT *);
extern dscudaStreamCreateResult * dscudastreamcreateid_1_svc(struct svc_req *);
#define dscudaStreamDestroyId 401
extern dscudaResult * dscudastreamdestroyid_1(RCstream , CLIENT *);
extern dscudaResult * dscudastreamdestroyid_1_svc(RCstream , struct svc_req *);
#define dscudaStreamSynchronizeId 402
extern dscudaResult * dscudastreamsynchronizeid_1(RCstream , CLIENT *);
extern dscudaResult * dscudastreamsynchronizeid_1_svc(RCstream , struct svc_req *);
#define dscudaStreamQueryId 403
extern dscudaResult * dscudastreamqueryid_1(RCstream , CLIENT *);
extern dscudaResult * dscudastreamqueryid_1_svc(RCstream , struct svc_req *);
#define dscudaStreamWaitEventId 404
extern dscudaResult * dscudastreamwaiteventid_1(RCstream , RCevent , u_int , CLIENT *);
extern dscudaResult * dscudastreamwaiteventid_1_svc(RCstream , RCevent , u_int , struct svc_req *);
#define dscudaEventCreateId 500
extern dscudaEventCreateResult * dscudaeventcreateid_1(CLIENT *);
extern dscudaEventCreateResult * dscudaeventcreateid_1_svc(struct svc_req *);
#define dscudaEventCreateWithFlagsId 501
extern dscudaEventCreateResult * dscudaeventcreatewithflagsid_1(u_int , CLIENT *);
extern dscudaEventCreateResult * dscudaeventcreatewithflagsid_1_svc(u_int , struct svc_req *);
#define dscudaEventDestroyId 502
extern dscudaResult * dscudaeventdestroyid_1(RCevent , CLIENT *);
extern dscudaResult * dscudaeventdestroyid_1_svc(RCevent , struct svc_req *);
#define dscudaEventElapsedTimeId 503
extern dscudaEventElapsedTimeResult * dscudaeventelapsedtimeid_1(RCevent , RCevent , CLIENT *);
extern dscudaEventElapsedTimeResult * dscudaeventelapsedtimeid_1_svc(RCevent , RCevent , struct svc_req *);
#define dscudaEventRecordId 504
extern dscudaResult * dscudaeventrecordid_1(RCevent , RCstream , CLIENT *);
extern dscudaResult * dscudaeventrecordid_1_svc(RCevent , RCstream , struct svc_req *);
#define dscudaEventSynchronizeId 505
extern dscudaResult * dscudaeventsynchronizeid_1(RCevent , CLIENT *);
extern dscudaResult * dscudaeventsynchronizeid_1_svc(RCevent , struct svc_req *);
#define dscudaEventQueryId 506
extern dscudaResult * dscudaeventqueryid_1(RCevent , CLIENT *);
extern dscudaResult * dscudaeventqueryid_1_svc(RCevent , struct svc_req *);
#define dscudaLaunchKernelId 600
extern void * dscudalaunchkernelid_1(int , int , char *, RCdim3 , RCdim3 , RCsize , RCstream , RCargs , CLIENT *);
extern void * dscudalaunchkernelid_1_svc(int , int , char *, RCdim3 , RCdim3 , RCsize , RCstream , RCargs , struct svc_req *);
#define dscudaLoadModuleId 601
extern dscudaLoadModuleResult * dscudaloadmoduleid_1(RCipaddr , RCpid , char *, char *, CLIENT *);
extern dscudaLoadModuleResult * dscudaloadmoduleid_1_svc(RCipaddr , RCpid , char *, char *, struct svc_req *);
#define dscudaFuncGetAttributesId 602
extern dscudaFuncGetAttributesResult * dscudafuncgetattributesid_1(int , char *, CLIENT *);
extern dscudaFuncGetAttributesResult * dscudafuncgetattributesid_1_svc(int , char *, struct svc_req *);
#define dscudaMallocId 700
extern dscudaMallocResult * dscudamallocid_1(RCsize , CLIENT *);
extern dscudaMallocResult * dscudamallocid_1_svc(RCsize , struct svc_req *);
#define dscudaFreeId 701
extern dscudaResult * dscudafreeid_1(RCadr , CLIENT *);
extern dscudaResult * dscudafreeid_1_svc(RCadr , struct svc_req *);
#define dscudaMemcpyH2HId 702
extern dscudaMemcpyH2HResult * dscudamemcpyh2hid_1(RCadr , RCbuf , RCsize , CLIENT *);
extern dscudaMemcpyH2HResult * dscudamemcpyh2hid_1_svc(RCadr , RCbuf , RCsize , struct svc_req *);
#define dscudaMemcpyH2DId 703
extern dscudaResult * dscudamemcpyh2did_1(RCadr , RCbuf , RCsize , CLIENT *);
extern dscudaResult * dscudamemcpyh2did_1_svc(RCadr , RCbuf , RCsize , struct svc_req *);
#define dscudaMemcpyD2HId 704
extern dscudaMemcpyD2HResult * dscudamemcpyd2hid_1(RCadr , RCsize , CLIENT *);
extern dscudaMemcpyD2HResult * dscudamemcpyd2hid_1_svc(RCadr , RCsize , struct svc_req *);
#define dscudaMemcpyD2DId 705
extern dscudaResult * dscudamemcpyd2did_1(RCadr , RCadr , RCsize , CLIENT *);
extern dscudaResult * dscudamemcpyd2did_1_svc(RCadr , RCadr , RCsize , struct svc_req *);
#define dscudaMemcpyAsyncH2HId 706
extern dscudaMemcpyAsyncH2HResult * dscudamemcpyasynch2hid_1(RCadr , RCbuf , RCsize , RCstream , CLIENT *);
extern dscudaMemcpyAsyncH2HResult * dscudamemcpyasynch2hid_1_svc(RCadr , RCbuf , RCsize , RCstream , struct svc_req *);
#define dscudaMemcpyAsyncH2DId 707
extern dscudaResult * dscudamemcpyasynch2did_1(RCadr , RCbuf , RCsize , RCstream , CLIENT *);
extern dscudaResult * dscudamemcpyasynch2did_1_svc(RCadr , RCbuf , RCsize , RCstream , struct svc_req *);
#define dscudaMemcpyAsyncD2HId 708
extern dscudaMemcpyAsyncD2HResult * dscudamemcpyasyncd2hid_1(RCadr , RCsize , RCstream , CLIENT *);
extern dscudaMemcpyAsyncD2HResult * dscudamemcpyasyncd2hid_1_svc(RCadr , RCsize , RCstream , struct svc_req *);
#define dscudaMemcpyAsyncD2DId 709
extern dscudaResult * dscudamemcpyasyncd2did_1(RCadr , RCadr , RCsize , RCstream , CLIENT *);
extern dscudaResult * dscudamemcpyasyncd2did_1_svc(RCadr , RCadr , RCsize , RCstream , struct svc_req *);
#define dscudaMemcpyToSymbolH2DId 710
extern dscudaResult * dscudamemcpytosymbolh2did_1(int , char *, RCbuf , RCsize , RCsize , CLIENT *);
extern dscudaResult * dscudamemcpytosymbolh2did_1_svc(int , char *, RCbuf , RCsize , RCsize , struct svc_req *);
#define dscudaMemcpyToSymbolD2DId 711
extern dscudaResult * dscudamemcpytosymbold2did_1(int , char *, RCadr , RCsize , RCsize , CLIENT *);
extern dscudaResult * dscudamemcpytosymbold2did_1_svc(int , char *, RCadr , RCsize , RCsize , struct svc_req *);
#define dscudaMemcpyFromSymbolD2HId 712
extern dscudaMemcpyFromSymbolD2HResult * dscudamemcpyfromsymbold2hid_1(int , char *, RCsize , RCsize , CLIENT *);
extern dscudaMemcpyFromSymbolD2HResult * dscudamemcpyfromsymbold2hid_1_svc(int , char *, RCsize , RCsize , struct svc_req *);
#define dscudaMemcpyFromSymbolD2DId 713
extern dscudaResult * dscudamemcpyfromsymbold2did_1(int , RCadr , char *, RCsize , RCsize , CLIENT *);
extern dscudaResult * dscudamemcpyfromsymbold2did_1_svc(int , RCadr , char *, RCsize , RCsize , struct svc_req *);
#define dscudaMemsetId 714
extern dscudaResult * dscudamemsetid_1(RCadr , int , RCsize , CLIENT *);
extern dscudaResult * dscudamemsetid_1_svc(RCadr , int , RCsize , struct svc_req *);
#define dscudaHostAllocId 715
extern dscudaHostAllocResult * dscudahostallocid_1(RCsize , u_int , CLIENT *);
extern dscudaHostAllocResult * dscudahostallocid_1_svc(RCsize , u_int , struct svc_req *);
#define dscudaMallocHostId 716
extern dscudaMallocHostResult * dscudamallochostid_1(RCsize , CLIENT *);
extern dscudaMallocHostResult * dscudamallochostid_1_svc(RCsize , struct svc_req *);
#define dscudaFreeHostId 717
extern dscudaResult * dscudafreehostid_1(RCadr , CLIENT *);
extern dscudaResult * dscudafreehostid_1_svc(RCadr , struct svc_req *);
#define dscudaHostGetDevicePointerId 718
extern dscudaHostGetDevicePointerResult * dscudahostgetdevicepointerid_1(RCadr , u_int , CLIENT *);
extern dscudaHostGetDevicePointerResult * dscudahostgetdevicepointerid_1_svc(RCadr , u_int , struct svc_req *);
#define dscudaHostGetFlagsID 719
extern dscudaHostGetFlagsResult * dscudahostgetflagsid_1(RCadr , CLIENT *);
extern dscudaHostGetFlagsResult * dscudahostgetflagsid_1_svc(RCadr , struct svc_req *);
#define dscudaMallocArrayId 720
extern dscudaMallocArrayResult * dscudamallocarrayid_1(RCchanneldesc , RCsize , RCsize , u_int , CLIENT *);
extern dscudaMallocArrayResult * dscudamallocarrayid_1_svc(RCchanneldesc , RCsize , RCsize , u_int , struct svc_req *);
#define dscudaFreeArrayId 721
extern dscudaResult * dscudafreearrayid_1(RCadr , CLIENT *);
extern dscudaResult * dscudafreearrayid_1_svc(RCadr , struct svc_req *);
#define dscudaMemcpyToArrayH2HId 722
extern dscudaMemcpyToArrayH2HResult * dscudamemcpytoarrayh2hid_1(RCadr , RCsize , RCsize , RCbuf , RCsize , CLIENT *);
extern dscudaMemcpyToArrayH2HResult * dscudamemcpytoarrayh2hid_1_svc(RCadr , RCsize , RCsize , RCbuf , RCsize , struct svc_req *);
#define dscudaMemcpyToArrayH2DId 723
extern dscudaResult * dscudamemcpytoarrayh2did_1(RCadr , RCsize , RCsize , RCbuf , RCsize , CLIENT *);
extern dscudaResult * dscudamemcpytoarrayh2did_1_svc(RCadr , RCsize , RCsize , RCbuf , RCsize , struct svc_req *);
#define dscudaMemcpyToArrayD2HId 724
extern dscudaMemcpyToArrayD2HResult * dscudamemcpytoarrayd2hid_1(RCsize , RCsize , RCadr , RCsize , CLIENT *);
extern dscudaMemcpyToArrayD2HResult * dscudamemcpytoarrayd2hid_1_svc(RCsize , RCsize , RCadr , RCsize , struct svc_req *);
#define dscudaMemcpyToArrayD2DId 725
extern dscudaResult * dscudamemcpytoarrayd2did_1(RCadr , RCsize , RCsize , RCadr , RCsize , CLIENT *);
extern dscudaResult * dscudamemcpytoarrayd2did_1_svc(RCadr , RCsize , RCsize , RCadr , RCsize , struct svc_req *);
#define dscudaMallocPitchId 726
extern dscudaMallocPitchResult * dscudamallocpitchid_1(RCsize , RCsize , CLIENT *);
extern dscudaMallocPitchResult * dscudamallocpitchid_1_svc(RCsize , RCsize , struct svc_req *);
#define dscudaMemcpy2DToArrayH2HId 727
extern dscudaMemcpy2DToArrayH2HResult * dscudamemcpy2dtoarrayh2hid_1(RCadr , RCsize , RCsize , RCbuf , RCsize , RCsize , RCsize , CLIENT *);
extern dscudaMemcpy2DToArrayH2HResult * dscudamemcpy2dtoarrayh2hid_1_svc(RCadr , RCsize , RCsize , RCbuf , RCsize , RCsize , RCsize , struct svc_req *);
#define dscudaMemcpy2DToArrayH2DId 728
extern dscudaResult * dscudamemcpy2dtoarrayh2did_1(RCadr , RCsize , RCsize , RCbuf , RCsize , RCsize , RCsize , CLIENT *);
extern dscudaResult * dscudamemcpy2dtoarrayh2did_1_svc(RCadr , RCsize , RCsize , RCbuf , RCsize , RCsize , RCsize , struct svc_req *);
#define dscudaMemcpy2DToArrayD2HId 729
extern dscudaMemcpy2DToArrayD2HResult * dscudamemcpy2dtoarrayd2hid_1(RCsize , RCsize , RCadr , RCsize , RCsize , RCsize , CLIENT *);
extern dscudaMemcpy2DToArrayD2HResult * dscudamemcpy2dtoarrayd2hid_1_svc(RCsize , RCsize , RCadr , RCsize , RCsize , RCsize , struct svc_req *);
#define dscudaMemcpy2DToArrayD2DId 730
extern dscudaResult * dscudamemcpy2dtoarrayd2did_1(RCadr , RCsize , RCsize , RCadr , RCsize , RCsize , RCsize , CLIENT *);
extern dscudaResult * dscudamemcpy2dtoarrayd2did_1_svc(RCadr , RCsize , RCsize , RCadr , RCsize , RCsize , RCsize , struct svc_req *);
#define dscudaMemcpy2DH2HId 731
extern dscudaMemcpy2DH2HResult * dscudamemcpy2dh2hid_1(RCadr , RCsize , RCbuf , RCsize , RCsize , RCsize , CLIENT *);
extern dscudaMemcpy2DH2HResult * dscudamemcpy2dh2hid_1_svc(RCadr , RCsize , RCbuf , RCsize , RCsize , RCsize , struct svc_req *);
#define dscudaMemcpy2DH2DId 732
extern dscudaResult * dscudamemcpy2dh2did_1(RCadr , RCsize , RCbuf , RCsize , RCsize , RCsize , CLIENT *);
extern dscudaResult * dscudamemcpy2dh2did_1_svc(RCadr , RCsize , RCbuf , RCsize , RCsize , RCsize , struct svc_req *);
#define dscudaMemcpy2DD2HId 733
extern dscudaMemcpy2DD2HResult * dscudamemcpy2dd2hid_1(RCsize , RCadr , RCsize , RCsize , RCsize , CLIENT *);
extern dscudaMemcpy2DD2HResult * dscudamemcpy2dd2hid_1_svc(RCsize , RCadr , RCsize , RCsize , RCsize , struct svc_req *);
#define dscudaMemcpy2DD2DId 734
extern dscudaResult * dscudamemcpy2dd2did_1(RCadr , RCsize , RCadr , RCsize , RCsize , RCsize , CLIENT *);
extern dscudaResult * dscudamemcpy2dd2did_1_svc(RCadr , RCsize , RCadr , RCsize , RCsize , RCsize , struct svc_req *);
#define dscudaMemset2DId 735
extern dscudaResult * dscudamemset2did_1(RCadr , RCsize , int , RCsize , RCsize , CLIENT *);
extern dscudaResult * dscudamemset2did_1_svc(RCadr , RCsize , int , RCsize , RCsize , struct svc_req *);
#define dscudaMemcpyToSymbolAsyncH2DId 736
extern dscudaResult * dscudamemcpytosymbolasynch2did_1(int , char *, RCbuf , RCsize , RCsize , RCstream , CLIENT *);
extern dscudaResult * dscudamemcpytosymbolasynch2did_1_svc(int , char *, RCbuf , RCsize , RCsize , RCstream , struct svc_req *);
#define dscudaMemcpyToSymbolAsyncD2DId 737
extern dscudaResult * dscudamemcpytosymbolasyncd2did_1(int , char *, RCadr , RCsize , RCsize , RCstream , CLIENT *);
extern dscudaResult * dscudamemcpytosymbolasyncd2did_1_svc(int , char *, RCadr , RCsize , RCsize , RCstream , struct svc_req *);
#define dscudaMemcpyFromSymbolAsyncD2HId 738
extern dscudaMemcpyFromSymbolAsyncD2HResult * dscudamemcpyfromsymbolasyncd2hid_1(int , char *, RCsize , RCsize , RCstream , CLIENT *);
extern dscudaMemcpyFromSymbolAsyncD2HResult * dscudamemcpyfromsymbolasyncd2hid_1_svc(int , char *, RCsize , RCsize , RCstream , struct svc_req *);
#define dscudaMemcpyFromSymbolAsyncD2DId 739
extern dscudaResult * dscudamemcpyfromsymbolasyncd2did_1(int , RCadr , char *, RCsize , RCsize , RCstream , CLIENT *);
extern dscudaResult * dscudamemcpyfromsymbolasyncd2did_1_svc(int , RCadr , char *, RCsize , RCsize , RCstream , struct svc_req *);
#define dscudaCreateChannelDescId 1400
extern dscudaCreateChannelDescResult * dscudacreatechanneldescid_1(int , int , int , int , RCchannelformat , CLIENT *);
extern dscudaCreateChannelDescResult * dscudacreatechanneldescid_1_svc(int , int , int , int , RCchannelformat , struct svc_req *);
#define dscudaGetChannelDescId 1401
extern dscudaGetChannelDescResult * dscudagetchanneldescid_1(RCadr , CLIENT *);
extern dscudaGetChannelDescResult * dscudagetchanneldescid_1_svc(RCadr , struct svc_req *);
#define dscudaBindTextureId 1402
extern dscudaBindTextureResult * dscudabindtextureid_1(int , char *, RCadr , RCsize , RCtexture , CLIENT *);
extern dscudaBindTextureResult * dscudabindtextureid_1_svc(int , char *, RCadr , RCsize , RCtexture , struct svc_req *);
#define dscudaBindTexture2DId 1403
extern dscudaBindTexture2DResult * dscudabindtexture2did_1(int , char *, RCadr , RCsize , RCsize , RCsize , RCtexture , CLIENT *);
extern dscudaBindTexture2DResult * dscudabindtexture2did_1_svc(int , char *, RCadr , RCsize , RCsize , RCsize , RCtexture , struct svc_req *);
#define dscudaBindTextureToArrayId 1404
extern dscudaResult * dscudabindtexturetoarrayid_1(int , char *, RCadr , RCtexture , CLIENT *);
extern dscudaResult * dscudabindtexturetoarrayid_1_svc(int , char *, RCadr , RCtexture , struct svc_req *);
#define dscudaUnbindTextureId 1405
extern dscudaResult * dscudaunbindtextureid_1(RCtexture , CLIENT *);
extern dscudaResult * dscudaunbindtextureid_1_svc(RCtexture , struct svc_req *);
#define dscufftPlan3dId 2002
extern dscufftPlanResult * dscufftplan3did_1(int , int , int , u_int , CLIENT *);
extern dscufftPlanResult * dscufftplan3did_1_svc(int , int , int , u_int , struct svc_req *);
#define dscufftDestroyId 2004
extern dscufftResult * dscufftdestroyid_1(u_int , CLIENT *);
extern dscufftResult * dscufftdestroyid_1_svc(u_int , struct svc_req *);
#define dscufftExecC2CId 2005
extern dscufftResult * dscufftexecc2cid_1(u_int , RCadr , RCadr , int , CLIENT *);
extern dscufftResult * dscufftexecc2cid_1_svc(u_int , RCadr , RCadr , int , struct svc_req *);
extern int dscuda_prog_1_freeresult (SVCXPRT *, xdrproc_t, caddr_t);
#else
#define dscudaThreadExitId 100
extern dscudaResult * dscudathreadexitid_1();
extern dscudaResult * dscudathreadexitid_1_svc();
#define dscudaThreadSynchronizeId 101
extern dscudaResult * dscudathreadsynchronizeid_1();
extern dscudaResult * dscudathreadsynchronizeid_1_svc();
#define dscudaThreadSetLimitId 102
extern dscudaResult * dscudathreadsetlimitid_1();
extern dscudaResult * dscudathreadsetlimitid_1_svc();
#define dscudaThreadGetLimitId 103
extern dscudaThreadGetLimitResult * dscudathreadgetlimitid_1();
extern dscudaThreadGetLimitResult * dscudathreadgetlimitid_1_svc();
#define dscudaThreadSetCacheConfigId 104
extern dscudaResult * dscudathreadsetcacheconfigid_1();
extern dscudaResult * dscudathreadsetcacheconfigid_1_svc();
#define dscudaThreadGetCacheConfigId 105
extern dscudaThreadGetCacheConfigResult * dscudathreadgetcacheconfigid_1();
extern dscudaThreadGetCacheConfigResult * dscudathreadgetcacheconfigid_1_svc();
#define dscudaGetLastErrorId 200
extern dscudaResult * dscudagetlasterrorid_1();
extern dscudaResult * dscudagetlasterrorid_1_svc();
#define dscudaPeekAtLastErrorId 201
extern dscudaResult * dscudapeekatlasterrorid_1();
extern dscudaResult * dscudapeekatlasterrorid_1_svc();
#define dscudaGetErrorStringId 202
extern dscudaGetErrorStringResult * dscudageterrorstringid_1();
extern dscudaGetErrorStringResult * dscudageterrorstringid_1_svc();
#define dscudaGetDeviceId 300
extern dscudaGetDeviceResult * dscudagetdeviceid_1();
extern dscudaGetDeviceResult * dscudagetdeviceid_1_svc();
#define dscudaGetDeviceCountId 301
extern dscudaGetDeviceCountResult * dscudagetdevicecountid_1();
extern dscudaGetDeviceCountResult * dscudagetdevicecountid_1_svc();
#define dscudaGetDevicePropertiesId 302
extern dscudaGetDevicePropertiesResult * dscudagetdevicepropertiesid_1();
extern dscudaGetDevicePropertiesResult * dscudagetdevicepropertiesid_1_svc();
#define dscudaDriverGetVersionId 303
extern dscudaDriverGetVersionResult * dscudadrivergetversionid_1();
extern dscudaDriverGetVersionResult * dscudadrivergetversionid_1_svc();
#define dscudaRuntimeGetVersionId 304
extern dscudaRuntimeGetVersionResult * dscudaruntimegetversionid_1();
extern dscudaRuntimeGetVersionResult * dscudaruntimegetversionid_1_svc();
#define dscudaSetDeviceId 305
extern dscudaResult * dscudasetdeviceid_1();
extern dscudaResult * dscudasetdeviceid_1_svc();
#define dscudaSetDeviceFlagsId 306
extern dscudaResult * dscudasetdeviceflagsid_1();
extern dscudaResult * dscudasetdeviceflagsid_1_svc();
#define dscudaChooseDeviceId 307
extern dscudaChooseDeviceResult * dscudachoosedeviceid_1();
extern dscudaChooseDeviceResult * dscudachoosedeviceid_1_svc();
#define dscudaDeviceSynchronize 308
extern dscudaResult * dscudadevicesynchronize_1();
extern dscudaResult * dscudadevicesynchronize_1_svc();
#define dscudaDeviceReset 309
extern dscudaResult * dscudadevicereset_1();
extern dscudaResult * dscudadevicereset_1_svc();
#define dscudaStreamCreateId 400
extern dscudaStreamCreateResult * dscudastreamcreateid_1();
extern dscudaStreamCreateResult * dscudastreamcreateid_1_svc();
#define dscudaStreamDestroyId 401
extern dscudaResult * dscudastreamdestroyid_1();
extern dscudaResult * dscudastreamdestroyid_1_svc();
#define dscudaStreamSynchronizeId 402
extern dscudaResult * dscudastreamsynchronizeid_1();
extern dscudaResult * dscudastreamsynchronizeid_1_svc();
#define dscudaStreamQueryId 403
extern dscudaResult * dscudastreamqueryid_1();
extern dscudaResult * dscudastreamqueryid_1_svc();
#define dscudaStreamWaitEventId 404
extern dscudaResult * dscudastreamwaiteventid_1();
extern dscudaResult * dscudastreamwaiteventid_1_svc();
#define dscudaEventCreateId 500
extern dscudaEventCreateResult * dscudaeventcreateid_1();
extern dscudaEventCreateResult * dscudaeventcreateid_1_svc();
#define dscudaEventCreateWithFlagsId 501
extern dscudaEventCreateResult * dscudaeventcreatewithflagsid_1();
extern dscudaEventCreateResult * dscudaeventcreatewithflagsid_1_svc();
#define dscudaEventDestroyId 502
extern dscudaResult * dscudaeventdestroyid_1();
extern dscudaResult * dscudaeventdestroyid_1_svc();
#define dscudaEventElapsedTimeId 503
extern dscudaEventElapsedTimeResult * dscudaeventelapsedtimeid_1();
extern dscudaEventElapsedTimeResult * dscudaeventelapsedtimeid_1_svc();
#define dscudaEventRecordId 504
extern dscudaResult * dscudaeventrecordid_1();
extern dscudaResult * dscudaeventrecordid_1_svc();
#define dscudaEventSynchronizeId 505
extern dscudaResult * dscudaeventsynchronizeid_1();
extern dscudaResult * dscudaeventsynchronizeid_1_svc();
#define dscudaEventQueryId 506
extern dscudaResult * dscudaeventqueryid_1();
extern dscudaResult * dscudaeventqueryid_1_svc();
#define dscudaLaunchKernelId 600
extern void * dscudalaunchkernelid_1();
extern void * dscudalaunchkernelid_1_svc();
#define dscudaLoadModuleId 601
extern dscudaLoadModuleResult * dscudaloadmoduleid_1();
extern dscudaLoadModuleResult * dscudaloadmoduleid_1_svc();
#define dscudaFuncGetAttributesId 602
extern dscudaFuncGetAttributesResult * dscudafuncgetattributesid_1();
extern dscudaFuncGetAttributesResult * dscudafuncgetattributesid_1_svc();
#define dscudaMallocId 700
extern dscudaMallocResult * dscudamallocid_1();
extern dscudaMallocResult * dscudamallocid_1_svc();
#define dscudaFreeId 701
extern dscudaResult * dscudafreeid_1();
extern dscudaResult * dscudafreeid_1_svc();
#define dscudaMemcpyH2HId 702
extern dscudaMemcpyH2HResult * dscudamemcpyh2hid_1();
extern dscudaMemcpyH2HResult * dscudamemcpyh2hid_1_svc();
#define dscudaMemcpyH2DId 703
extern dscudaResult * dscudamemcpyh2did_1();
extern dscudaResult * dscudamemcpyh2did_1_svc();
#define dscudaMemcpyD2HId 704
extern dscudaMemcpyD2HResult * dscudamemcpyd2hid_1();
extern dscudaMemcpyD2HResult * dscudamemcpyd2hid_1_svc();
#define dscudaMemcpyD2DId 705
extern dscudaResult * dscudamemcpyd2did_1();
extern dscudaResult * dscudamemcpyd2did_1_svc();
#define dscudaMemcpyAsyncH2HId 706
extern dscudaMemcpyAsyncH2HResult * dscudamemcpyasynch2hid_1();
extern dscudaMemcpyAsyncH2HResult * dscudamemcpyasynch2hid_1_svc();
#define dscudaMemcpyAsyncH2DId 707
extern dscudaResult * dscudamemcpyasynch2did_1();
extern dscudaResult * dscudamemcpyasynch2did_1_svc();
#define dscudaMemcpyAsyncD2HId 708
extern dscudaMemcpyAsyncD2HResult * dscudamemcpyasyncd2hid_1();
extern dscudaMemcpyAsyncD2HResult * dscudamemcpyasyncd2hid_1_svc();
#define dscudaMemcpyAsyncD2DId 709
extern dscudaResult * dscudamemcpyasyncd2did_1();
extern dscudaResult * dscudamemcpyasyncd2did_1_svc();
#define dscudaMemcpyToSymbolH2DId 710
extern dscudaResult * dscudamemcpytosymbolh2did_1();
extern dscudaResult * dscudamemcpytosymbolh2did_1_svc();
#define dscudaMemcpyToSymbolD2DId 711
extern dscudaResult * dscudamemcpytosymbold2did_1();
extern dscudaResult * dscudamemcpytosymbold2did_1_svc();
#define dscudaMemcpyFromSymbolD2HId 712
extern dscudaMemcpyFromSymbolD2HResult * dscudamemcpyfromsymbold2hid_1();
extern dscudaMemcpyFromSymbolD2HResult * dscudamemcpyfromsymbold2hid_1_svc();
#define dscudaMemcpyFromSymbolD2DId 713
extern dscudaResult * dscudamemcpyfromsymbold2did_1();
extern dscudaResult * dscudamemcpyfromsymbold2did_1_svc();
#define dscudaMemsetId 714
extern dscudaResult * dscudamemsetid_1();
extern dscudaResult * dscudamemsetid_1_svc();
#define dscudaHostAllocId 715
extern dscudaHostAllocResult * dscudahostallocid_1();
extern dscudaHostAllocResult * dscudahostallocid_1_svc();
#define dscudaMallocHostId 716
extern dscudaMallocHostResult * dscudamallochostid_1();
extern dscudaMallocHostResult * dscudamallochostid_1_svc();
#define dscudaFreeHostId 717
extern dscudaResult * dscudafreehostid_1();
extern dscudaResult * dscudafreehostid_1_svc();
#define dscudaHostGetDevicePointerId 718
extern dscudaHostGetDevicePointerResult * dscudahostgetdevicepointerid_1();
extern dscudaHostGetDevicePointerResult * dscudahostgetdevicepointerid_1_svc();
#define dscudaHostGetFlagsID 719
extern dscudaHostGetFlagsResult * dscudahostgetflagsid_1();
extern dscudaHostGetFlagsResult * dscudahostgetflagsid_1_svc();
#define dscudaMallocArrayId 720
extern dscudaMallocArrayResult * dscudamallocarrayid_1();
extern dscudaMallocArrayResult * dscudamallocarrayid_1_svc();
#define dscudaFreeArrayId 721
extern dscudaResult * dscudafreearrayid_1();
extern dscudaResult * dscudafreearrayid_1_svc();
#define dscudaMemcpyToArrayH2HId 722
extern dscudaMemcpyToArrayH2HResult * dscudamemcpytoarrayh2hid_1();
extern dscudaMemcpyToArrayH2HResult * dscudamemcpytoarrayh2hid_1_svc();
#define dscudaMemcpyToArrayH2DId 723
extern dscudaResult * dscudamemcpytoarrayh2did_1();
extern dscudaResult * dscudamemcpytoarrayh2did_1_svc();
#define dscudaMemcpyToArrayD2HId 724
extern dscudaMemcpyToArrayD2HResult * dscudamemcpytoarrayd2hid_1();
extern dscudaMemcpyToArrayD2HResult * dscudamemcpytoarrayd2hid_1_svc();
#define dscudaMemcpyToArrayD2DId 725
extern dscudaResult * dscudamemcpytoarrayd2did_1();
extern dscudaResult * dscudamemcpytoarrayd2did_1_svc();
#define dscudaMallocPitchId 726
extern dscudaMallocPitchResult * dscudamallocpitchid_1();
extern dscudaMallocPitchResult * dscudamallocpitchid_1_svc();
#define dscudaMemcpy2DToArrayH2HId 727
extern dscudaMemcpy2DToArrayH2HResult * dscudamemcpy2dtoarrayh2hid_1();
extern dscudaMemcpy2DToArrayH2HResult * dscudamemcpy2dtoarrayh2hid_1_svc();
#define dscudaMemcpy2DToArrayH2DId 728
extern dscudaResult * dscudamemcpy2dtoarrayh2did_1();
extern dscudaResult * dscudamemcpy2dtoarrayh2did_1_svc();
#define dscudaMemcpy2DToArrayD2HId 729
extern dscudaMemcpy2DToArrayD2HResult * dscudamemcpy2dtoarrayd2hid_1();
extern dscudaMemcpy2DToArrayD2HResult * dscudamemcpy2dtoarrayd2hid_1_svc();
#define dscudaMemcpy2DToArrayD2DId 730
extern dscudaResult * dscudamemcpy2dtoarrayd2did_1();
extern dscudaResult * dscudamemcpy2dtoarrayd2did_1_svc();
#define dscudaMemcpy2DH2HId 731
extern dscudaMemcpy2DH2HResult * dscudamemcpy2dh2hid_1();
extern dscudaMemcpy2DH2HResult * dscudamemcpy2dh2hid_1_svc();
#define dscudaMemcpy2DH2DId 732
extern dscudaResult * dscudamemcpy2dh2did_1();
extern dscudaResult * dscudamemcpy2dh2did_1_svc();
#define dscudaMemcpy2DD2HId 733
extern dscudaMemcpy2DD2HResult * dscudamemcpy2dd2hid_1();
extern dscudaMemcpy2DD2HResult * dscudamemcpy2dd2hid_1_svc();
#define dscudaMemcpy2DD2DId 734
extern dscudaResult * dscudamemcpy2dd2did_1();
extern dscudaResult * dscudamemcpy2dd2did_1_svc();
#define dscudaMemset2DId 735
extern dscudaResult * dscudamemset2did_1();
extern dscudaResult * dscudamemset2did_1_svc();
#define dscudaMemcpyToSymbolAsyncH2DId 736
extern dscudaResult * dscudamemcpytosymbolasynch2did_1();
extern dscudaResult * dscudamemcpytosymbolasynch2did_1_svc();
#define dscudaMemcpyToSymbolAsyncD2DId 737
extern dscudaResult * dscudamemcpytosymbolasyncd2did_1();
extern dscudaResult * dscudamemcpytosymbolasyncd2did_1_svc();
#define dscudaMemcpyFromSymbolAsyncD2HId 738
extern dscudaMemcpyFromSymbolAsyncD2HResult * dscudamemcpyfromsymbolasyncd2hid_1();
extern dscudaMemcpyFromSymbolAsyncD2HResult * dscudamemcpyfromsymbolasyncd2hid_1_svc();
#define dscudaMemcpyFromSymbolAsyncD2DId 739
extern dscudaResult * dscudamemcpyfromsymbolasyncd2did_1();
extern dscudaResult * dscudamemcpyfromsymbolasyncd2did_1_svc();
#define dscudaCreateChannelDescId 1400
extern dscudaCreateChannelDescResult * dscudacreatechanneldescid_1();
extern dscudaCreateChannelDescResult * dscudacreatechanneldescid_1_svc();
#define dscudaGetChannelDescId 1401
extern dscudaGetChannelDescResult * dscudagetchanneldescid_1();
extern dscudaGetChannelDescResult * dscudagetchanneldescid_1_svc();
#define dscudaBindTextureId 1402
extern dscudaBindTextureResult * dscudabindtextureid_1();
extern dscudaBindTextureResult * dscudabindtextureid_1_svc();
#define dscudaBindTexture2DId 1403
extern dscudaBindTexture2DResult * dscudabindtexture2did_1();
extern dscudaBindTexture2DResult * dscudabindtexture2did_1_svc();
#define dscudaBindTextureToArrayId 1404
extern dscudaResult * dscudabindtexturetoarrayid_1();
extern dscudaResult * dscudabindtexturetoarrayid_1_svc();
#define dscudaUnbindTextureId 1405
extern dscudaResult * dscudaunbindtextureid_1();
extern dscudaResult * dscudaunbindtextureid_1_svc();
#define dscufftPlan3dId 2002
extern dscufftPlanResult * dscufftplan3did_1();
extern dscufftPlanResult * dscufftplan3did_1_svc();
#define dscufftDestroyId 2004
extern dscufftResult * dscufftdestroyid_1();
extern dscufftResult * dscufftdestroyid_1_svc();
#define dscufftExecC2CId 2005
extern dscufftResult * dscufftexecc2cid_1();
extern dscufftResult * dscufftexecc2cid_1_svc();
extern int dscuda_prog_1_freeresult ();
#endif
#if defined(__STDC__) || defined(__cplusplus)
extern bool_t xdr_RCadr (XDR *, RCadr*);
extern bool_t xdr_RCstream (XDR *, RCstream*);
extern bool_t xdr_RCevent (XDR *, RCevent*);
extern bool_t xdr_RCipaddr (XDR *, RCipaddr*);
extern bool_t xdr_RCsize (XDR *, RCsize*);
extern bool_t xdr_RCerror (XDR *, RCerror*);
extern bool_t xdr_RCbuf (XDR *, RCbuf*);
extern bool_t xdr_RCchannelformat (XDR *, RCchannelformat*);
extern bool_t xdr_RCpid (XDR *, RCpid*);
extern bool_t xdr_RCchanneldesc_t (XDR *, RCchanneldesc_t*);
extern bool_t xdr_RCchanneldesc (XDR *, RCchanneldesc*);
extern bool_t xdr_RCtexture_t (XDR *, RCtexture_t*);
extern bool_t xdr_RCtexture (XDR *, RCtexture*);
extern bool_t xdr_RCfuncattr_t (XDR *, RCfuncattr_t*);
extern bool_t xdr_RCfuncattr (XDR *, RCfuncattr*);
extern bool_t xdr_RCargType (XDR *, RCargType*);
extern bool_t xdr_RCargVal (XDR *, RCargVal*);
extern bool_t xdr_RCarg (XDR *, RCarg*);
extern bool_t xdr_RCargs (XDR *, RCargs*);
extern bool_t xdr_dscudaResult (XDR *, dscudaResult*);
extern bool_t xdr_dscudaThreadGetLimitResult (XDR *, dscudaThreadGetLimitResult*);
extern bool_t xdr_dscudaThreadGetCacheConfigResult (XDR *, dscudaThreadGetCacheConfigResult*);
extern bool_t xdr_dscudaMallocResult (XDR *, dscudaMallocResult*);
extern bool_t xdr_dscudaHostAllocResult (XDR *, dscudaHostAllocResult*);
extern bool_t xdr_dscudaMallocHostResult (XDR *, dscudaMallocHostResult*);
extern bool_t xdr_dscudaMallocArrayResult (XDR *, dscudaMallocArrayResult*);
extern bool_t xdr_dscudaMallocPitchResult (XDR *, dscudaMallocPitchResult*);
extern bool_t xdr_dscudaMemcpyD2HResult (XDR *, dscudaMemcpyD2HResult*);
extern bool_t xdr_dscudaMemcpyH2HResult (XDR *, dscudaMemcpyH2HResult*);
extern bool_t xdr_dscudaMemcpyToArrayD2HResult (XDR *, dscudaMemcpyToArrayD2HResult*);
extern bool_t xdr_dscudaMemcpyToArrayH2HResult (XDR *, dscudaMemcpyToArrayH2HResult*);
extern bool_t xdr_dscudaMemcpy2DToArrayD2HResult (XDR *, dscudaMemcpy2DToArrayD2HResult*);
extern bool_t xdr_dscudaMemcpy2DToArrayH2HResult (XDR *, dscudaMemcpy2DToArrayH2HResult*);
extern bool_t xdr_dscudaMemcpy2DD2HResult (XDR *, dscudaMemcpy2DD2HResult*);
extern bool_t xdr_dscudaMemcpy2DH2HResult (XDR *, dscudaMemcpy2DH2HResult*);
extern bool_t xdr_dscudaGetDeviceResult (XDR *, dscudaGetDeviceResult*);
extern bool_t xdr_dscudaGetDeviceCountResult (XDR *, dscudaGetDeviceCountResult*);
extern bool_t xdr_dscudaGetDevicePropertiesResult (XDR *, dscudaGetDevicePropertiesResult*);
extern bool_t xdr_dscudaDriverGetVersionResult (XDR *, dscudaDriverGetVersionResult*);
extern bool_t xdr_dscudaRuntimeGetVersionResult (XDR *, dscudaRuntimeGetVersionResult*);
extern bool_t xdr_dscudaGetErrorStringResult (XDR *, dscudaGetErrorStringResult*);
extern bool_t xdr_dscudaCreateChannelDescResult (XDR *, dscudaCreateChannelDescResult*);
extern bool_t xdr_dscudaGetChannelDescResult (XDR *, dscudaGetChannelDescResult*);
extern bool_t xdr_dscudaChooseDeviceResult (XDR *, dscudaChooseDeviceResult*);
extern bool_t xdr_dscudaMemcpyAsyncD2HResult (XDR *, dscudaMemcpyAsyncD2HResult*);
extern bool_t xdr_dscudaMemcpyAsyncH2HResult (XDR *, dscudaMemcpyAsyncH2HResult*);
extern bool_t xdr_dscudaMemcpyFromSymbolD2HResult (XDR *, dscudaMemcpyFromSymbolD2HResult*);
extern bool_t xdr_dscudaMemcpyFromSymbolAsyncD2HResult (XDR *, dscudaMemcpyFromSymbolAsyncD2HResult*);
extern bool_t xdr_dscudaStreamCreateResult (XDR *, dscudaStreamCreateResult*);
extern bool_t xdr_dscudaEventCreateResult (XDR *, dscudaEventCreateResult*);
extern bool_t xdr_dscudaEventElapsedTimeResult (XDR *, dscudaEventElapsedTimeResult*);
extern bool_t xdr_dscudaHostGetDevicePointerResult (XDR *, dscudaHostGetDevicePointerResult*);
extern bool_t xdr_dscudaHostGetFlagsResult (XDR *, dscudaHostGetFlagsResult*);
extern bool_t xdr_dscudaLoadModuleResult (XDR *, dscudaLoadModuleResult*);
extern bool_t xdr_dscudaFuncGetAttributesResult (XDR *, dscudaFuncGetAttributesResult*);
extern bool_t xdr_dscudaBindTextureResult (XDR *, dscudaBindTextureResult*);
extern bool_t xdr_dscudaBindTexture2DResult (XDR *, dscudaBindTexture2DResult*);
extern bool_t xdr_dscufftResult (XDR *, dscufftResult*);
extern bool_t xdr_dscufftPlanResult (XDR *, dscufftPlanResult*);
extern bool_t xdr_dscublasResult (XDR *, dscublasResult*);
extern bool_t xdr_dscublasCreateResult (XDR *, dscublasCreateResult*);
extern bool_t xdr_dscublasGetVectorResult (XDR *, dscublasGetVectorResult*);
extern bool_t xdr_RCdim3 (XDR *, RCdim3*);
extern bool_t xdr_dscudathreadsetlimitid_1_argument (XDR *, dscudathreadsetlimitid_1_argument*);
extern bool_t xdr_dscudastreamwaiteventid_1_argument (XDR *, dscudastreamwaiteventid_1_argument*);
extern bool_t xdr_dscudaeventelapsedtimeid_1_argument (XDR *, dscudaeventelapsedtimeid_1_argument*);
extern bool_t xdr_dscudaeventrecordid_1_argument (XDR *, dscudaeventrecordid_1_argument*);
extern bool_t xdr_dscudalaunchkernelid_1_argument (XDR *, dscudalaunchkernelid_1_argument*);
extern bool_t xdr_dscudaloadmoduleid_1_argument (XDR *, dscudaloadmoduleid_1_argument*);
extern bool_t xdr_dscudafuncgetattributesid_1_argument (XDR *, dscudafuncgetattributesid_1_argument*);
extern bool_t xdr_dscudamemcpyh2hid_1_argument (XDR *, dscudamemcpyh2hid_1_argument*);
extern bool_t xdr_dscudamemcpyh2did_1_argument (XDR *, dscudamemcpyh2did_1_argument*);
extern bool_t xdr_dscudamemcpyd2hid_1_argument (XDR *, dscudamemcpyd2hid_1_argument*);
extern bool_t xdr_dscudamemcpyd2did_1_argument (XDR *, dscudamemcpyd2did_1_argument*);
extern bool_t xdr_dscudamemcpyasynch2hid_1_argument (XDR *, dscudamemcpyasynch2hid_1_argument*);
extern bool_t xdr_dscudamemcpyasynch2did_1_argument (XDR *, dscudamemcpyasynch2did_1_argument*);
extern bool_t xdr_dscudamemcpyasyncd2hid_1_argument (XDR *, dscudamemcpyasyncd2hid_1_argument*);
extern bool_t xdr_dscudamemcpyasyncd2did_1_argument (XDR *, dscudamemcpyasyncd2did_1_argument*);
extern bool_t xdr_dscudamemcpytosymbolh2did_1_argument (XDR *, dscudamemcpytosymbolh2did_1_argument*);
extern bool_t xdr_dscudamemcpytosymbold2did_1_argument (XDR *, dscudamemcpytosymbold2did_1_argument*);
extern bool_t xdr_dscudamemcpyfromsymbold2hid_1_argument (XDR *, dscudamemcpyfromsymbold2hid_1_argument*);
extern bool_t xdr_dscudamemcpyfromsymbold2did_1_argument (XDR *, dscudamemcpyfromsymbold2did_1_argument*);
extern bool_t xdr_dscudamemsetid_1_argument (XDR *, dscudamemsetid_1_argument*);
extern bool_t xdr_dscudahostallocid_1_argument (XDR *, dscudahostallocid_1_argument*);
extern bool_t xdr_dscudahostgetdevicepointerid_1_argument (XDR *, dscudahostgetdevicepointerid_1_argument*);
extern bool_t xdr_dscudamallocarrayid_1_argument (XDR *, dscudamallocarrayid_1_argument*);
extern bool_t xdr_dscudamemcpytoarrayh2hid_1_argument (XDR *, dscudamemcpytoarrayh2hid_1_argument*);
extern bool_t xdr_dscudamemcpytoarrayh2did_1_argument (XDR *, dscudamemcpytoarrayh2did_1_argument*);
extern bool_t xdr_dscudamemcpytoarrayd2hid_1_argument (XDR *, dscudamemcpytoarrayd2hid_1_argument*);
extern bool_t xdr_dscudamemcpytoarrayd2did_1_argument (XDR *, dscudamemcpytoarrayd2did_1_argument*);
extern bool_t xdr_dscudamallocpitchid_1_argument (XDR *, dscudamallocpitchid_1_argument*);
extern bool_t xdr_dscudamemcpy2dtoarrayh2hid_1_argument (XDR *, dscudamemcpy2dtoarrayh2hid_1_argument*);
extern bool_t xdr_dscudamemcpy2dtoarrayh2did_1_argument (XDR *, dscudamemcpy2dtoarrayh2did_1_argument*);
extern bool_t xdr_dscudamemcpy2dtoarrayd2hid_1_argument (XDR *, dscudamemcpy2dtoarrayd2hid_1_argument*);
extern bool_t xdr_dscudamemcpy2dtoarrayd2did_1_argument (XDR *, dscudamemcpy2dtoarrayd2did_1_argument*);
extern bool_t xdr_dscudamemcpy2dh2hid_1_argument (XDR *, dscudamemcpy2dh2hid_1_argument*);
extern bool_t xdr_dscudamemcpy2dh2did_1_argument (XDR *, dscudamemcpy2dh2did_1_argument*);
extern bool_t xdr_dscudamemcpy2dd2hid_1_argument (XDR *, dscudamemcpy2dd2hid_1_argument*);
extern bool_t xdr_dscudamemcpy2dd2did_1_argument (XDR *, dscudamemcpy2dd2did_1_argument*);
extern bool_t xdr_dscudamemset2did_1_argument (XDR *, dscudamemset2did_1_argument*);
extern bool_t xdr_dscudamemcpytosymbolasynch2did_1_argument (XDR *, dscudamemcpytosymbolasynch2did_1_argument*);
extern bool_t xdr_dscudamemcpytosymbolasyncd2did_1_argument (XDR *, dscudamemcpytosymbolasyncd2did_1_argument*);
extern bool_t xdr_dscudamemcpyfromsymbolasyncd2hid_1_argument (XDR *, dscudamemcpyfromsymbolasyncd2hid_1_argument*);
extern bool_t xdr_dscudamemcpyfromsymbolasyncd2did_1_argument (XDR *, dscudamemcpyfromsymbolasyncd2did_1_argument*);
extern bool_t xdr_dscudacreatechanneldescid_1_argument (XDR *, dscudacreatechanneldescid_1_argument*);
extern bool_t xdr_dscudabindtextureid_1_argument (XDR *, dscudabindtextureid_1_argument*);
extern bool_t xdr_dscudabindtexture2did_1_argument (XDR *, dscudabindtexture2did_1_argument*);
extern bool_t xdr_dscudabindtexturetoarrayid_1_argument (XDR *, dscudabindtexturetoarrayid_1_argument*);
extern bool_t xdr_dscufftplan3did_1_argument (XDR *, dscufftplan3did_1_argument*);
extern bool_t xdr_dscufftexecc2cid_1_argument (XDR *, dscufftexecc2cid_1_argument*);
#else
extern bool_t xdr_RCadr ();
extern bool_t xdr_RCstream ();
extern bool_t xdr_RCevent ();
extern bool_t xdr_RCipaddr ();
extern bool_t xdr_RCsize ();
extern bool_t xdr_RCerror ();
extern bool_t xdr_RCbuf ();
extern bool_t xdr_RCchannelformat ();
extern bool_t xdr_RCpid ();
extern bool_t xdr_RCchanneldesc_t ();
extern bool_t xdr_RCchanneldesc ();
extern bool_t xdr_RCtexture_t ();
extern bool_t xdr_RCtexture ();
extern bool_t xdr_RCfuncattr_t ();
extern bool_t xdr_RCfuncattr ();
extern bool_t xdr_RCargType ();
extern bool_t xdr_RCargVal ();
extern bool_t xdr_RCarg ();
extern bool_t xdr_RCargs ();
extern bool_t xdr_dscudaResult ();
extern bool_t xdr_dscudaThreadGetLimitResult ();
extern bool_t xdr_dscudaThreadGetCacheConfigResult ();
extern bool_t xdr_dscudaMallocResult ();
extern bool_t xdr_dscudaHostAllocResult ();
extern bool_t xdr_dscudaMallocHostResult ();
extern bool_t xdr_dscudaMallocArrayResult ();
extern bool_t xdr_dscudaMallocPitchResult ();
extern bool_t xdr_dscudaMemcpyD2HResult ();
extern bool_t xdr_dscudaMemcpyH2HResult ();
extern bool_t xdr_dscudaMemcpyToArrayD2HResult ();
extern bool_t xdr_dscudaMemcpyToArrayH2HResult ();
extern bool_t xdr_dscudaMemcpy2DToArrayD2HResult ();
extern bool_t xdr_dscudaMemcpy2DToArrayH2HResult ();
extern bool_t xdr_dscudaMemcpy2DD2HResult ();
extern bool_t xdr_dscudaMemcpy2DH2HResult ();
extern bool_t xdr_dscudaGetDeviceResult ();
extern bool_t xdr_dscudaGetDeviceCountResult ();
extern bool_t xdr_dscudaGetDevicePropertiesResult ();
extern bool_t xdr_dscudaDriverGetVersionResult ();
extern bool_t xdr_dscudaRuntimeGetVersionResult ();
extern bool_t xdr_dscudaGetErrorStringResult ();
extern bool_t xdr_dscudaCreateChannelDescResult ();
extern bool_t xdr_dscudaGetChannelDescResult ();
extern bool_t xdr_dscudaChooseDeviceResult ();
extern bool_t xdr_dscudaMemcpyAsyncD2HResult ();
extern bool_t xdr_dscudaMemcpyAsyncH2HResult ();
extern bool_t xdr_dscudaMemcpyFromSymbolD2HResult ();
extern bool_t xdr_dscudaMemcpyFromSymbolAsyncD2HResult ();
extern bool_t xdr_dscudaStreamCreateResult ();
extern bool_t xdr_dscudaEventCreateResult ();
extern bool_t xdr_dscudaEventElapsedTimeResult ();
extern bool_t xdr_dscudaHostGetDevicePointerResult ();
extern bool_t xdr_dscudaHostGetFlagsResult ();
extern bool_t xdr_dscudaLoadModuleResult ();
extern bool_t xdr_dscudaFuncGetAttributesResult ();
extern bool_t xdr_dscudaBindTextureResult ();
extern bool_t xdr_dscudaBindTexture2DResult ();
extern bool_t xdr_dscufftResult ();
extern bool_t xdr_dscufftPlanResult ();
extern bool_t xdr_dscublasResult ();
extern bool_t xdr_dscublasCreateResult ();
extern bool_t xdr_dscublasGetVectorResult ();
extern bool_t xdr_RCdim3 ();
extern bool_t xdr_dscudathreadsetlimitid_1_argument ();
extern bool_t xdr_dscudastreamwaiteventid_1_argument ();
extern bool_t xdr_dscudaeventelapsedtimeid_1_argument ();
extern bool_t xdr_dscudaeventrecordid_1_argument ();
extern bool_t xdr_dscudalaunchkernelid_1_argument ();
extern bool_t xdr_dscudaloadmoduleid_1_argument ();
extern bool_t xdr_dscudafuncgetattributesid_1_argument ();
extern bool_t xdr_dscudamemcpyh2hid_1_argument ();
extern bool_t xdr_dscudamemcpyh2did_1_argument ();
extern bool_t xdr_dscudamemcpyd2hid_1_argument ();
extern bool_t xdr_dscudamemcpyd2did_1_argument ();
extern bool_t xdr_dscudamemcpyasynch2hid_1_argument ();
extern bool_t xdr_dscudamemcpyasynch2did_1_argument ();
extern bool_t xdr_dscudamemcpyasyncd2hid_1_argument ();
extern bool_t xdr_dscudamemcpyasyncd2did_1_argument ();
extern bool_t xdr_dscudamemcpytosymbolh2did_1_argument ();
extern bool_t xdr_dscudamemcpytosymbold2did_1_argument ();
extern bool_t xdr_dscudamemcpyfromsymbold2hid_1_argument ();
extern bool_t xdr_dscudamemcpyfromsymbold2did_1_argument ();
extern bool_t xdr_dscudamemsetid_1_argument ();
extern bool_t xdr_dscudahostallocid_1_argument ();
extern bool_t xdr_dscudahostgetdevicepointerid_1_argument ();
extern bool_t xdr_dscudamallocarrayid_1_argument ();
extern bool_t xdr_dscudamemcpytoarrayh2hid_1_argument ();
extern bool_t xdr_dscudamemcpytoarrayh2did_1_argument ();
extern bool_t xdr_dscudamemcpytoarrayd2hid_1_argument ();
extern bool_t xdr_dscudamemcpytoarrayd2did_1_argument ();
extern bool_t xdr_dscudamallocpitchid_1_argument ();
extern bool_t xdr_dscudamemcpy2dtoarrayh2hid_1_argument ();
extern bool_t xdr_dscudamemcpy2dtoarrayh2did_1_argument ();
extern bool_t xdr_dscudamemcpy2dtoarrayd2hid_1_argument ();
extern bool_t xdr_dscudamemcpy2dtoarrayd2did_1_argument ();
extern bool_t xdr_dscudamemcpy2dh2hid_1_argument ();
extern bool_t xdr_dscudamemcpy2dh2did_1_argument ();
extern bool_t xdr_dscudamemcpy2dd2hid_1_argument ();
extern bool_t xdr_dscudamemcpy2dd2did_1_argument ();
extern bool_t xdr_dscudamemset2did_1_argument ();
extern bool_t xdr_dscudamemcpytosymbolasynch2did_1_argument ();
extern bool_t xdr_dscudamemcpytosymbolasyncd2did_1_argument ();
extern bool_t xdr_dscudamemcpyfromsymbolasyncd2hid_1_argument ();
extern bool_t xdr_dscudamemcpyfromsymbolasyncd2did_1_argument ();
extern bool_t xdr_dscudacreatechanneldescid_1_argument ();
extern bool_t xdr_dscudabindtextureid_1_argument ();
extern bool_t xdr_dscudabindtexture2did_1_argument ();
extern bool_t xdr_dscudabindtexturetoarrayid_1_argument ();
extern bool_t xdr_dscufftplan3did_1_argument ();
extern bool_t xdr_dscufftexecc2cid_1_argument ();
#endif
#ifdef __cplusplus
}
#endif
#endif
#pragma end dscudarpc.h
#pragma begin dscudamacros.h
#ifndef DSCUDA_MACROS_H
#define DSCUDA_MACROS_H
#define WARN(lv, fmt, args...) if (lv <= dscudaWarnLevel()) fprintf(stderr, fmt, ## args);
#define WARNONCE(lv, fmt, args...) if (lv <= dscudaWarnLevel()) { \
static int firstcall = 1; \
if (firstcall) { \
firstcall = 0; \
fprintf(stderr, fmt, ## args); \
} \
}
#define ALIGN_UP(off, align) (off) = ((off) + (align) - 1) & ~((align) - 1)
int dscudaWarnLevel(void);
void dscudaSetWarnLevel(int level);
#endif
#pragma end dscudamacros.h
#define TEST_NZ(x) do { if ( (x)) {WARN(0, #x " failed (returned non-zero).\n" ); exit(EXIT_FAILURE); } } while (0)
#define TEST_Z(x) do { if (!(x)) {WARN(0, #x " failed (returned zero/null).\n"); exit(EXIT_FAILURE); } } while (0)
#define RC_NWR_PER_POST (16)
#define RC_SGE_SIZE (1024 * 1024 * 2)
#define RC_WR_MAX (RC_NWR_PER_POST * 16)
#define RC_RDMA_BUF_SIZE (RC_NWR_PER_POST * RC_SGE_SIZE)
#if RC_RDMA_BUF_SIZE < RC_KMODULEIMAGELEN
#error "RC_RDMA_BUF_SIZE too small."
#endif
#define RC_SERVER_IBV_CQ_SIZE (RC_WR_MAX)
#define RC_CLIENT_IBV_CQ_SIZE (65536)
#define RC_IBV_IP_PORT_BASE (65432)
#define RC_IBV_TIMEOUT (500)
struct message {
struct ibv_mr mr[RC_NWR_PER_POST];
};
enum rdma_state_t {
STATE_INIT,
STATE_READY,
STATE_BUSY,
};
typedef struct {
struct rdma_cm_id *id;
struct ibv_qp *qp;
struct ibv_context *ibvctx;
struct ibv_pd *pd;
struct ibv_cq *cq;
struct ibv_comp_channel *comp_channel;
struct message *recv_msg;
struct message *send_msg;
char *rdma_local_region;
char *rdma_remote_region;
struct ibv_mr *recv_mr;
struct ibv_mr *send_mr;
struct ibv_mr peer_mr[RC_NWR_PER_POST];
struct ibv_mr *rdma_local_mr[RC_NWR_PER_POST];
struct ibv_mr *rdma_remote_mr[RC_NWR_PER_POST];
pthread_t cq_poller_thread;
int connected;
enum rdma_state_t rdma_state;
int rdma_nreq_pending;
} IbvConnection;
typedef enum {
RCMethodNone = 0,
RCMethodMemcpyH2D,
RCMethodMemcpyD2H,
RCMethodMemcpyD2D,
RCMethodMalloc,
RCMethodFree,
RCMethodGetErrorString,
RCMethodGetDeviceProperties,
RCMethodRuntimeGetVersion,
RCMethodThreadSynchronize,
RCMethodThreadExit,
RCMethodDeviceSynchronize,
RCMethodDscudaMemcpyToSymbolH2D,
RCMethodDscudaMemcpyToSymbolD2D,
RCMethodDscudaMemcpyFromSymbolD2H,
RCMethodDscudaMemcpyFromSymbolD2D,
RCMethodDscudaMemcpyToSymbolAsyncH2D,
RCMethodDscudaMemcpyToSymbolAsyncD2D,
RCMethodDscudaMemcpyFromSymbolAsyncD2H,
RCMethodDscudaMemcpyFromSymbolAsyncD2D,
RCMethodDscudaLoadModule,
RCMethodDscudaLaunchKernel,
RCMethodEnd
} RCMethod;
typedef struct {
RCMethod method;
int payload;
} IbvHdr;
typedef struct {
RCMethod method;
size_t count;
RCadr dstadr;
void *srcbuf;
} IbvMemcpyH2DInvokeHdr;
typedef struct {
RCMethod method;
cudaError_t err;
} IbvMemcpyH2DReturnHdr;
typedef struct {
RCMethod method;
size_t count;
RCadr srcadr;
} IbvMemcpyD2HInvokeHdr;
typedef struct {
RCMethod method;
cudaError_t err;
void *dstbuf;
} IbvMemcpyD2HReturnHdr;
typedef struct {
RCMethod method;
size_t count;
RCadr dstadr;
RCadr srcadr;
} IbvMemcpyD2DInvokeHdr;
typedef struct {
RCMethod method;
cudaError_t err;
} IbvMemcpyD2DReturnHdr;
typedef struct {
RCMethod method;
size_t size;
} IbvMallocInvokeHdr;
typedef struct {
RCMethod method;
cudaError_t err;
RCadr devAdr;
} IbvMallocReturnHdr;
typedef struct {
RCMethod method;
RCadr devAdr;
} IbvFreeInvokeHdr;
typedef struct {
RCMethod method;
cudaError_t err;
} IbvFreeReturnHdr;
typedef struct {
RCMethod method;
int device;
cudaError_t err;
} IbvGetErrorStringInvokeHdr;
typedef struct {
RCMethod method;
char *errmsg;
} IbvGetErrorStringReturnHdr;
typedef struct {
RCMethod method;
int device;
} IbvGetDevicePropertiesInvokeHdr;
typedef struct {
RCMethod method;
cudaError_t err;
cudaDeviceProp prop;
} IbvGetDevicePropertiesReturnHdr;
typedef struct {
RCMethod method;
char dummy[8];
} IbvRuntimeGetVersionInvokeHdr;
typedef struct {
RCMethod method;
cudaError_t err;
int version;
} IbvRuntimeGetVersionReturnHdr;
typedef struct {
RCMethod method;
char dummy[8];
} IbvThreadSynchronizeInvokeHdr;
typedef struct {
RCMethod method;
cudaError_t err;
} IbvThreadSynchronizeReturnHdr;
typedef struct {
RCMethod method;
char dummy[8];
} IbvThreadExitInvokeHdr;
typedef struct {
RCMethod method;
cudaError_t err;
} IbvThreadExitReturnHdr;
typedef struct {
RCMethod method;
char dummy[8];
} IbvDeviceSynchronizeInvokeHdr;
typedef struct {
RCMethod method;
cudaError_t err;
} IbvDeviceSynchronizeReturnHdr;
typedef struct {
RCMethod method;
int moduleid;
char symbol[RC_SNAMELEN];
size_t count;
size_t offset;
void *src;
} IbvDscudaMemcpyToSymbolH2DInvokeHdr;
typedef struct {
RCMethod method;
cudaError_t err;
} IbvDscudaMemcpyToSymbolH2DReturnHdr;
typedef struct {
RCMethod method;
int moduleid;
char symbol[RC_SNAMELEN];
size_t count;
size_t offset;
RCadr srcadr;
} IbvDscudaMemcpyToSymbolD2DInvokeHdr;
typedef struct {
RCMethod method;
cudaError_t err;
} IbvDscudaMemcpyToSymbolD2DReturnHdr;
typedef struct {
RCMethod method;
int moduleid;
char symbol[RC_SNAMELEN];
size_t count;
size_t offset;
} IbvDscudaMemcpyFromSymbolD2HInvokeHdr;
typedef struct {
RCMethod method;
cudaError_t err;
void *dst;
} IbvDscudaMemcpyFromSymbolD2HReturnHdr;
typedef struct {
RCMethod method;
int moduleid;
char symbol[RC_SNAMELEN];
size_t count;
size_t offset;
RCadr dstadr;
} IbvDscudaMemcpyFromSymbolD2DInvokeHdr;
typedef struct {
RCMethod method;
cudaError_t err;
} IbvDscudaMemcpyFromSymbolD2DReturnHdr;
typedef struct {
RCMethod method;
int moduleid;
char symbol[RC_SNAMELEN];
size_t count;
size_t offset;
RCstream stream;
void *src;
} IbvDscudaMemcpyToSymbolAsyncH2DInvokeHdr;
typedef struct {
RCMethod method;
cudaError_t err;
} IbvDscudaMemcpyToSymbolAsyncH2DReturnHdr;
typedef struct {
RCMethod method;
int moduleid;
char symbol[RC_SNAMELEN];
size_t count;
size_t offset;
RCstream stream;
RCadr srcadr;
} IbvDscudaMemcpyToSymbolAsyncD2DInvokeHdr;
typedef struct {
RCMethod method;
cudaError_t err;
} IbvDscudaMemcpyToSymbolAsyncD2DReturnHdr;
typedef struct {
RCMethod method;
int moduleid;
char symbol[RC_SNAMELEN];
size_t count;
size_t offset;
RCstream stream;
} IbvDscudaMemcpyFromSymbolAsyncD2HInvokeHdr;
typedef struct {
RCMethod method;
cudaError_t err;
void *dst;
} IbvDscudaMemcpyFromSymbolAsyncD2HReturnHdr;
typedef struct {
RCMethod method;
int moduleid;
char symbol[RC_SNAMELEN];
size_t count;
size_t offset;
RCstream stream;
RCadr dstadr;
} IbvDscudaMemcpyFromSymbolAsyncD2DInvokeHdr;
typedef struct {
RCMethod method;
cudaError_t err;
} IbvDscudaMemcpyFromSymbolAsyncD2DReturnHdr;
typedef struct {
RCMethod method;
uint64_t ipaddr;
unsigned long int pid;
char modulename[RC_KMODULENAMELEN];
void *moduleimage;
} IbvDscudaLoadModuleInvokeHdr;
typedef struct {
RCMethod method;
cudaError_t err;
int moduleid;
} IbvDscudaLoadModuleReturnHdr;
typedef struct {
RCMethod method;
int moduleid;
int kernelid;
char kernelname[RC_KNAMELEN];
unsigned int gdim[3];
unsigned int bdim[3];
unsigned int smemsize;
RCstream stream;
int narg;
void *args;
} IbvDscudaLaunchKernelInvokeHdr;
typedef struct {
RCMethod method;
cudaError_t err;
} IbvDscudaLaunchKernelReturnHdr;
typedef struct {
int type;
union {
uint64_t pointerval;
unsigned int intval;
float floatval;
char customval[RC_KARGMAX];
} val;
unsigned int offset;
unsigned int size;
} IbvArg;
void rdmaBuildConnection(struct rdma_cm_id *id, bool is_server);
void rdmaBuildParams(struct rdma_conn_param *params);
void rdmaDestroyConnection(IbvConnection *conn);
void rdmaSetOnCompletionHandler(void (*handler)(struct ibv_wc *));
void rdmaOnCompletionClient(struct ibv_wc *);
void rdmaOnCompletionServer(struct ibv_wc *);
void rdmaWaitEvent(struct rdma_event_channel *ec, rdma_cm_event_type et, int (*handler)(struct rdma_cm_id *id));
void rdmaWaitReadyToKickoff(IbvConnection *conn);
void rdmaWaitReadyToDisconnect(IbvConnection *conn);
void rdmaKickoff(IbvConnection *conn, int length);
void rdmaPipelinedKickoff(IbvConnection *conn, int length, char *payload_buf, char *payload_src, int payload_size);
void rdmaSendMr(IbvConnection *conn);
#endif
#endif
#pragma end ibv_rdma.h
enum {
RC_REMOTECALL_TYPE_RPC,
RC_REMOTECALL_TYPE_IBV,
};
int dscudaWarnLevel(void);
void dscudaSetWarnLevel(int level);
char *dscudaMemcpyKindName(cudaMemcpyKind kind);
const char *dscudaGetIpaddrString(unsigned int addr);
double RCgetCputime(double *t0);
void *dscudaUvaOfAdr(void *adr, int devid);
int dscudaDevidOfUva(void *adr);
void *dscudaAdrOfUva(void *adr);
int dscudaNredundancy(void);
void dscudaSetAutoVerb(int verb);
int dscudaRemoteCallType(void);
void dscudaSetErrorHandler(void (*handler)(void *), void *handler_arg);
void dscudaGetMangledFunctionName(char *name, const char *funcif, const char *ptxdata);
int *dscudaLoadModule(char *srcname, char *strdata);
void rpcDscudaLaunchKernelWrapper(int *moduleid, int kid, char *kname,
RCdim3 gdim, RCdim3 bdim, RCsize smemsize, RCstream stream,
RCargs args);
void ibvDscudaLaunchKernelWrapper(int *moduleid, int kid, char *kname,
int *gdim, int *bdim, RCsize smemsize, RCstream stream,
int narg, IbvArg *arg);
cudaError_t dscudaFuncGetAttributesWrapper(int *moduleid, struct cudaFuncAttributes *attr, const char *func);
cudaError_t dscudaMemcpyToSymbolWrapper(int *moduleid, const char *symbol, const void *src,
size_t count, size_t offset = 0,
enum cudaMemcpyKind kind = cudaMemcpyHostToDevice);
cudaError_t dscudaMemcpyToSymbolAsyncWrapper(int *moduleid, const char *symbol, const void *src,
size_t count, size_t offset = 0,
enum cudaMemcpyKind kind = cudaMemcpyHostToDevice, cudaStream_t stream = 0);
cudaError_t dscudaMemcpyFromSymbolWrapper(int *moduleid, void *dst, const char *symbol,
size_t count, size_t offset = 0,
enum cudaMemcpyKind kind = cudaMemcpyDeviceToHost);
cudaError_t dscudaMemcpyFromSymbolAsyncWrapper(int *moduleid, void *dst, const char *symbol,
size_t count, size_t offset = 0,
enum cudaMemcpyKind kind = cudaMemcpyDeviceToHost, cudaStream_t stream = 0);
cudaError_t dscudaBindTextureWrapper(int *moduleid, char *texname,
size_t *offset,
const struct textureReference *tex,
const void *devPtr,
const struct cudaChannelFormatDesc *desc,
size_t size = UINT_MAX);
template<class T, int dim, enum cudaTextureReadMode readMode>
cudaError_t dscudaBindTextureWrapper(int *moduleid, char *texname,
size_t *offset,
const struct texture<T, dim, readMode> &tex,
const void *devPtr,
const struct cudaChannelFormatDesc &desc,
size_t size = UINT_MAX)
{
return dscudaBindTextureWrapper(dscudaLoadModule("./dscudatmp/direct.cu.ptx", Ptxdata), "tex", offset, &tex, devPtr, &desc, size);
}
template<class T, int dim, enum cudaTextureReadMode readMode>
cudaError_t dscudaBindTextureWrapper(int *moduleid, char *texname,
size_t *offset,
const struct texture<T, dim, readMode> &tex,
const void *devPtr,
size_t size = UINT_MAX)
{
return dscudaBindTextureWrapper(dscudaLoadModule("./dscudatmp/direct.cu.ptx", Ptxdata), "tex", offset, tex, devPtr, tex.channelDesc, size);
}
cudaError_t dscudaBindTexture2DWrapper(int *moduleid, char *texname,
size_t *offset,
const struct textureReference *tex,
const void *devPtr,
const struct cudaChannelFormatDesc *desc,
size_t width, size_t height, size_t pitch);
template<class T, int dim, enum cudaTextureReadMode readMode>
cudaError_t dscudaBindTexture2DWrapper(int *moduleid, char *texname,
size_t *offset,
const struct texture<T, dim, readMode> &tex,
const void *devPtr,
const struct cudaChannelFormatDesc &desc,
size_t width, size_t height, size_t pitch)
{
return dscudaBindTexture2DWrapper(moduleid, texname,
offset, &tex, devPtr, &desc, width, height, pitch);
}
template<class T, int dim, enum cudaTextureReadMode readMode>
cudaError_t dscudaBindTexture2DWrapper(int *moduleid, char *texname,
size_t *offset,
const struct texture<T, dim, readMode> &tex,
const void *devPtr,
size_t width, size_t height, size_t pitch)
{
return dscudaBindTexture2DWrapper(moduleid, texname,
offset, &tex, devPtr, &tex.channelDesc, width, height, pitch);
}
cudaError_t dscudaBindTextureToArrayWrapper(int *moduleid, char *texname,
const struct textureReference *tex,
const struct cudaArray * array,
const struct cudaChannelFormatDesc *desc);
template<class T, int dim, enum cudaTextureReadMode readMode>
cudaError_t dscudaBindTextureToArrayWrapper(int *moduleid, char *texname,
const struct texture<T, dim, readMode> &tex,
const struct cudaArray * array,
const struct cudaChannelFormatDesc & desc)
{
return dscudaBindTextureToArrayWrapper(moduleid, texname, &tex, array, &desc);
}
template<class T, int dim, enum cudaTextureReadMode readMode>
cudaError_t dscudaBindTextureToArrayWrapper(int *moduleid, char *texname,
const struct texture<T, dim, readMode> &tex,
const struct cudaArray * array)
{
struct cudaChannelFormatDesc desc;
cudaError_t err = cudaGetChannelDesc(&desc, array);
return err == cudaSuccess ? dscudaBindTextureToArrayWrapper(moduleid, texname, &tex, array, &desc) : err;
}
#endif
#pragma end dscuda.h
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
#ifdef MIN
#undef MIN
#endif
#ifdef MAX
#undef MAX
#endif
#include <cutil_inline.h>
#pragma begin direct.h
#ifndef DIRECT_H
#define DIRECT_H
#define NMAX (1024*1024*8)
#define NDEVMAX 1024
void readnbody(int *nj, double *mj, double (*xj)[3], double (*vj)[3], char *fname);
void writenbody(int nj, double *mj, double (*xj)[3], double (*vj)[3], char *fname);
void push_velocity(double (*vj)[3], double (*a)[3], double dt, int nj);
void push_position(double (*xj)[3], double (*vj)[3], double (*a)[3], double dt, int nj);
void energy(double *mj, double (*vj)[3], double *p, int nj, double *ke, double *pe);
void get_cputime(double *lap, double *split);
void plot_star(double x[NMAX][3], int n, double time, double ratio, double m[NMAX], double initm);
void create_cold_homosphere(int n, double *mj, double (*xj)[3], double (*vj)[3]);
void create_plummer(int n, double *mj, double (*xj)[3], double (*vj)[3]);
#endif
#pragma end direct.h
/*
* stub for remote call to gravity_kernel.
*/
void
dscudagravity_kernel(dim3 _gdim, dim3 _bdim, size_t _smemsize, cudaStream_t _stream , float *m, float (*x)[3], float eps, float (*a)[3], float *p, int n)
{
int _narg = 6;
int _ibvgdim[3], _ibvbdim[3];
IbvArg _ibvarg[6], *_ibvargp;
RCargs _rcargs;
RCarg _rcarg[6], *_rcargp;
RCdim3 _gdimrc, _bdimrc;
int _off = 0;
int _rcargc = 0;
void *_devptr;
_rcargs.RCargs_val = _rcarg;
_rcargs.RCargs_len = _narg;
static char mangledname_[512] = {0,};
if (!mangledname_[0]) {
if (1) {
dscudaGetMangledFunctionName(mangledname_, __PRETTY_FUNCTION__, Ptxdata);
}
else {
char buf_[256];
sprintf(buf_, "%s", __FUNCTION__);
strcpy(mangledname_, buf_ + strlen("dscuda")); // obtain original function name.
}
WARN(3, "mangled name : %s\n", mangledname_);
}
if (dscudaRemoteCallType() == RC_REMOTECALL_TYPE_IBV) {
// a pointer to a device-address 'dscudaAdrOfUva(m)'.
_ibvargp = _ibvarg + _rcargc;
_rcargc++;
_devptr = (void*)(size_t)dscudaAdrOfUva(m);
ALIGN_UP(_off, __alignof(_devptr));
_ibvargp->type = dscudaArgTypeP;
_ibvargp->offset = _off;
_ibvargp->val.pointerval = (RCadr)_devptr;
_ibvargp->size = sizeof(_devptr);
_off += _ibvargp->size;
// a pointer to a device-address 'dscudaAdrOfUva(x)'.
_ibvargp = _ibvarg + _rcargc;
_rcargc++;
_devptr = (void*)(size_t)dscudaAdrOfUva(x);
ALIGN_UP(_off, __alignof(_devptr));
_ibvargp->type = dscudaArgTypeP;
_ibvargp->offset = _off;
_ibvargp->val.pointerval = (RCadr)_devptr;
_ibvargp->size = sizeof(_devptr);
_off += _ibvargp->size;
// a float 'eps'.
_ibvargp = _ibvarg + _rcargc;
_rcargc++;
ALIGN_UP(_off, __alignof(float));
_ibvargp->type = dscudaArgTypeF;
_ibvargp->offset = _off;
_ibvargp->val.floatval = eps;
_ibvargp->size = sizeof(float);
_off += _ibvargp->size;
// a pointer to a device-address 'dscudaAdrOfUva(a)'.
_ibvargp = _ibvarg + _rcargc;
_rcargc++;
_devptr = (void*)(size_t)dscudaAdrOfUva(a);
ALIGN_UP(_off, __alignof(_devptr));
_ibvargp->type = dscudaArgTypeP;
_ibvargp->offset = _off;
_ibvargp->val.pointerval = (RCadr)_devptr;
_ibvargp->size = sizeof(_devptr);
_off += _ibvargp->size;
// a pointer to a device-address 'dscudaAdrOfUva(p)'.
_ibvargp = _ibvarg + _rcargc;
_rcargc++;
_devptr = (void*)(size_t)dscudaAdrOfUva(p);
ALIGN_UP(_off, __alignof(_devptr));
_ibvargp->type = dscudaArgTypeP;
_ibvargp->offset = _off;
_ibvargp->val.pointerval = (RCadr)_devptr;
_ibvargp->size = sizeof(_devptr);
_off += _ibvargp->size;
// an integer 'n'.
_ibvargp = _ibvarg + _rcargc;
_rcargc++;
ALIGN_UP(_off, __alignof(int));
_ibvargp->type = dscudaArgTypeI;
_ibvargp->offset = _off;
_ibvargp->val.intval = n;
_ibvargp->size = sizeof(int);
_off += _ibvargp->size;
_ibvgdim[0] = _gdim.x; _ibvgdim[1] = _gdim.y; _ibvgdim[2] = _gdim.z;
_ibvbdim[0] = _bdim.x; _ibvbdim[1] = _bdim.y; _ibvbdim[2] = _gdim.z;
#if !RPC_ONLY
ibvDscudaLaunchKernelWrapper(dscudaLoadModule("./dscudatmp/direct.cu.ptx", Ptxdata), 0, mangledname_,
_ibvgdim, _ibvbdim, _smemsize, (RCstream)_stream,
_narg, _ibvarg);
#endif
}
else {
// a pointer to a device-address 'dscudaAdrOfUva(m)'.
_rcargp = &(_rcargs.RCargs_val[_rcargc++]);
_devptr = (void*)(size_t)dscudaAdrOfUva(m);
ALIGN_UP(_off, __alignof(_devptr));
_rcargp->val.type = dscudaArgTypeP;
_rcargp->offset = _off;
_rcargp->val.RCargVal_u.address = (RCadr)_devptr;
_rcargp->size = sizeof(_devptr);
_off += _rcargp->size;
// a pointer to a device-address 'dscudaAdrOfUva(x)'.
_rcargp = &(_rcargs.RCargs_val[_rcargc++]);
_devptr = (void*)(size_t)dscudaAdrOfUva(x);
ALIGN_UP(_off, __alignof(_devptr));
_rcargp->val.type = dscudaArgTypeP;
_rcargp->offset = _off;
_rcargp->val.RCargVal_u.address = (RCadr)_devptr;
_rcargp->size = sizeof(_devptr);
_off += _rcargp->size;
// a float 'eps'.
_rcargp = &(_rcargs.RCargs_val[_rcargc++]);
ALIGN_UP(_off, __alignof(float));
_rcargp->val.type = dscudaArgTypeF;
_rcargp->offset = _off;
_rcargp->val.RCargVal_u.valuef = eps;
_rcargp->size = sizeof(float);
_off += _rcargp->size;
// a pointer to a device-address 'dscudaAdrOfUva(a)'.
_rcargp = &(_rcargs.RCargs_val[_rcargc++]);
_devptr = (void*)(size_t)dscudaAdrOfUva(a);
ALIGN_UP(_off, __alignof(_devptr));
_rcargp->val.type = dscudaArgTypeP;
_rcargp->offset = _off;
_rcargp->val.RCargVal_u.address = (RCadr)_devptr;
_rcargp->size = sizeof(_devptr);
_off += _rcargp->size;
// a pointer to a device-address 'dscudaAdrOfUva(p)'.
_rcargp = &(_rcargs.RCargs_val[_rcargc++]);
_devptr = (void*)(size_t)dscudaAdrOfUva(p);
ALIGN_UP(_off, __alignof(_devptr));
_rcargp->val.type = dscudaArgTypeP;
_rcargp->offset = _off;
_rcargp->val.RCargVal_u.address = (RCadr)_devptr;
_rcargp->size = sizeof(_devptr);
_off += _rcargp->size;
// an integer 'n'.
_rcargp = &(_rcargs.RCargs_val[_rcargc++]);
ALIGN_UP(_off, __alignof(int));
_rcargp->val.type = dscudaArgTypeI;
_rcargp->offset = _off;
_rcargp->val.RCargVal_u.valuei = n;
_rcargp->size = sizeof(int);
_off += _rcargp->size;
_gdimrc.x = _gdim.x; _gdimrc.y = _gdim.y; _gdimrc.z = _gdim.z;
_bdimrc.x = _bdim.x; _bdimrc.y = _bdim.y; _bdimrc.z = _bdim.z;
rpcDscudaLaunchKernelWrapper(dscudaLoadModule("./dscudatmp/direct.cu.ptx", Ptxdata), 0, mangledname_,
_gdimrc, _bdimrc, _smemsize, (RCstream)_stream,
_rcargs);
}
}
void gravity_kernel(float *m, float (*x)[3], float eps, float (*a)[3], float *p, int n);
static void calc_gravity_gpu(double *m, double (*x)[3], double eps, double (*a)[3], double *p, int n);
static void calc_gravity(double *m, double (*x)[3], double eps, double (*a)[3], double *p, int n);
void
gravity_kernel(float *m, float (*x)[3], float eps, float (*a)[3], float *p, int n)
{
/* nop */
}
static void
calc_gravity_gpu(double *m, double (*x)[3], double eps, double (*a)[3], double *p, int n)
{
static int firstcall = 1;
static float *d_m, (*d_x)[3], (*d_a)[3], *d_p;
static float floatbuf[NMAX*3];
int i, k;
int nth = 64;
dim3 threads(nth, 1, 1);
dim3 grids((n+nth-1)/nth, 1, 1);
if (firstcall) {
firstcall = 0;
cutilSafeCall(cudaMalloc((void**)&d_m, sizeof(float) * n));
cutilSafeCall(cudaMalloc((void**)&d_x, sizeof(float) * 3 * n));
cutilSafeCall(cudaMalloc((void**)&d_a, sizeof(float) * 3 * n));
cutilSafeCall(cudaMalloc((void**)&d_p, sizeof(float) * n));
}
for (i = 0 ; i < n; i++) {
floatbuf[i] = (float)m[i];
}
cutilSafeCall(cudaMemcpy(d_m, floatbuf, sizeof(float) * n, cudaMemcpyHostToDevice));
for (i = 0 ; i < n; i++) {
for (k = 0; k < 3; k++) {
floatbuf[3 * i + k] = (float)x[i][k];
}
}
cutilSafeCall(cudaMemcpy(d_x, floatbuf, sizeof(float) * 3 * n, cudaMemcpyHostToDevice));
dscudagravity_kernel(grids, threads, 0, NULL, d_m, d_x, (float)eps, d_a, d_p, n);
cutilSafeCall(cudaMemcpy(floatbuf, d_a, sizeof(float) * 3 * n, cudaMemcpyDeviceToHost));
for (i = 0 ; i < n; i++) {
for (k = 0; k < 3; k++) {
a[i][k] = (double)floatbuf[3 * i + k];
}
}
cutilSafeCall(cudaMemcpy(floatbuf, d_p, sizeof(float) * n, cudaMemcpyDeviceToHost));
for (i = 0 ; i < n; i++) {
p[i]= (double)floatbuf[i];
}
}
static void
calc_gravity(double *m, double (*x)[3], double eps, double (*a)[3], double *p, int n)
{
double r, r2, mf, dx[3];
int i, j, k;
for (i = 0; i < n; i++) {
for (k = 0; k < 3; k++) {
a[i][k] = 0.0;
}
p[i] = 0.0;
for (j = 0; j < n; j++) {
for (k = 0; k < 3; k++) {
dx[k] = x[j][k] - x[i][k];
}
r2 = eps * eps;
for (k = 0; k < 3; k++) {
r2 += dx[k] * dx[k];
}
r = sqrt(r2);
mf = m[j] / (r * r2);
for (k = 0; k < 3; k++) {
a[i][k] += mf * dx[k];
}
p[i] -= m[j] / r;
}
}
if (eps != 0.0) {
double epsinv;
epsinv = 1.0 / eps;
for (i = 0; i < n; i++) {
p[i] += m[i] * epsinv;
}
}
}
#ifdef __DSCUDA__
static void
errhandler(void *arg)
{
fprintf(stderr, "calculation error on some GPU at timestep: %d\n",
*(int *)arg);
exit(1);
}
#endif
int
main(int argc, char **argv)
{
static double mj[NMAX], xj[NMAX][3], vj[NMAX][3];
static double a[NMAX][3], p[NMAX];
double time, dt, endt;;
double eps;
double e, e0, ke, pe;
double lt=0.0, st=0.0, sustained;
int n, nstep, interval;
static int step;
#ifdef __DSCUDA__
dscudaSetErrorHandler(errhandler, (void *)&step);
#endif
eps = 0.02;
dt = 0.01;
endt = 1.1;
time = 0.0;
nstep = endt/dt;
if (argc < 3) {
fprintf(stderr, "performs gravitational N-body simulation with naive direct summation algorithm.\n"
"usage: %s <infile> <outfile>\n", argv[0]);
exit(1);
}
readnbody(&n, mj, xj, vj, argv[1]);
interval = 500 * (10000.0/n) * (10000.0/n);
if (interval * 10 > nstep) {
interval = nstep / 10;
}
interval = 1;
fprintf(stderr, "interval: %d\n", interval);
get_cputime(<,&st);
#if 1
calc_gravity_gpu(mj, xj, eps, a, p, n);
#else
calc_gravity(mj, xj, eps, a, p, n);
#endif
energy(mj, vj, p, n, &ke, &pe);
e0 = ke+pe;
printf("ke: %f pe: %f e0: %f\n", ke, pe, e0);
for (step = 1; step < nstep; step++) {
push_velocity(vj, a, 0.5*dt, n);
push_position(xj, vj, a, dt, n);
time = time + dt;
#if 1
calc_gravity_gpu(mj, xj, eps, a, p, n);
#else
calc_gravity(mj, xj, eps, a, p, n);
#endif
push_velocity(vj, a, 0.5*dt, n);
if (step % interval == 0) {
energy(mj, vj, p, n, &ke, &pe);
e = ke+pe;
sustained = 38.0*((double)n)*((double)n)*interval/lt/1e9;
printf("speed: %g Gflops\n", sustained);
printf("step: %d time: %e\n", step, time);
printf("e: %e de: %e\n", e, e-e0);
printf("ke: %e pe: %e\n", ke, pe);
printf("ke/pe: %e\n\n", ke/pe);
get_cputime(<,&st);
}
}
writenbody(n, mj, xj, vj, argv[2]);
}
|
e27d07bf4b976eb45f43cf8dc6ab5f28817ed199.hip | // !!! This is a file automatically generated by hipify!!!
#include <hipcub/hipcub.hpp>
int main() {
hipcub::CachingDeviceAllocator allocator;
void *d;
allocator.DeviceAllocate(&d, 1024);
allocator.DeviceFree(d);
}
| e27d07bf4b976eb45f43cf8dc6ab5f28817ed199.cu | #include <cub/cub.cuh>
int main() {
cub::CachingDeviceAllocator allocator;
void *d;
allocator.DeviceAllocate(&d, 1024);
allocator.DeviceFree(d);
}
|
43654a60cddb86925036c591a4f4125819b284fc.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This sample demonstrates how use texture fetches in CUDA
*
* This sample takes an input PGM image (image_filename) and generates
* an output PGM image (image_filename_out). This CUDA kernel performs
* a simple 2D transform (rotation) on the texture coordinates (u,v).
*/
// Includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#ifdef _WIN32
# define WINDOWS_LEAN_AND_MEAN
# define NOMINMAX
# include <windows.h>
#endif
// Includes CUDA
#include <hip/hip_runtime.h>
// Utilities and timing functions
#include <helper_functions.h> // includes cuda.h and hip/hip_runtime_api.h
// CUDA helper functions
#include <helper_cuda.h> // helper functions for CUDA error check
#define MAX_EPSILON_ERROR 5e-3f
// Define the files that are to be save and the reference images for validation
const char *imageFilename = "lena_bw.pgm";
const char *refFilename = "ref_rotated.pgm";
const char *sampleName = "simpleTexture";
////////////////////////////////////////////////////////////////////////////////
// Constants
const float angle = 0.5f; // angle to rotate image by (in radians)
// Auto-Verification Code
bool testResult = true;
////////////////////////////////////////////////////////////////////////////////
//! Transform an image using texture lookups
//! @param outputData output data in global memory
////////////////////////////////////////////////////////////////////////////////
#ifdef __EMSCRIPTEN__
#include <cuda/emcuda.h>
// Texture reference for 2D float texture
texture tex;
#define STRINGIGY(a) #a
const char * transformKernel = STRINGIGY(
#endif
__global__ void transformKernel(float *outputData,
int width,
int height,
float theta)
{
// calculate normalized texture coordinates
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
float u = x / (float) width;
float v = y / (float) height;
// transform coordinates
u -= 0.5f;
v -= 0.5f;
float tu = u*cosf(theta) - v*sinf(theta) + 0.5f;
float tv = v*cosf(theta) + u*sinf(theta) + 0.5f;
// read from texture and write to global memory
outputData[y*width + x] = tex2D(tex, tu, tv);
}
#ifdef __EMSCRIPTEN__
);
#endif
#ifndef __EMSCRIPTEN__
// Texture reference for 2D float texture
texture<float, 2, hipReadModeElementType> tex;
#endif
////////////////////////////////////////////////////////////////////////////////
// Declaration, forward
void runTest(int argc, char **argv);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
printf("%s starting...\n", sampleName);
// Process command-line arguments
if (argc > 1)
{
if (checkCmdLineFlag(argc, (const char **) argv, "input"))
{
getCmdLineArgumentString(argc,
(const char **) argv,
"input",
(char **) &imageFilename);
if (checkCmdLineFlag(argc, (const char **) argv, "reference"))
{
getCmdLineArgumentString(argc,
(const char **) argv,
"reference",
(char **) &refFilename);
}
else
{
printf("-input flag should be used with -reference flag");
exit(EXIT_FAILURE);
}
}
else if (checkCmdLineFlag(argc, (const char **) argv, "reference"))
{
printf("-reference flag should be used with -input flag");
exit(EXIT_FAILURE);
}
}
runTest(argc, argv);
hipDeviceReset();
printf("%s completed, returned %s\n",
sampleName,
testResult ? "OK" : "ERROR!");
exit(testResult ? EXIT_SUCCESS : EXIT_FAILURE);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void runTest(int argc, char **argv)
{
int devID = findCudaDevice(argc, (const char **) argv);
// load image from disk
float *hData = NULL;
unsigned int width, height;
char *imagePath = sdkFindFilePath(imageFilename, argv[0]);
if (imagePath == NULL)
{
printf("Unable to source image file: %s\n", imageFilename);
exit(EXIT_FAILURE);
}
sdkLoadPGM(imagePath, &hData, &width, &height);
unsigned int size = width * height * sizeof(float);
printf("Loaded '%s', %d x %d pixels\n", imageFilename, width, height);
// Load reference image from image (output)
float *hDataRef = (float *) malloc(size);
char *refPath = sdkFindFilePath(refFilename, argv[0]);
if (refPath == NULL)
{
printf("Unable to find reference image file: %s\n", refFilename);
exit(EXIT_FAILURE);
}
sdkLoadPGM(refPath, &hDataRef, &width, &height);
// Allocate device memory for result
float *dData = NULL;
checkCudaErrors(hipMalloc((void **) &dData, size));
// Allocate array and copy image data
hipChannelFormatDesc channelDesc =
hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
hipArray *cuArray;
checkCudaErrors(hipMallocArray(&cuArray,
&channelDesc,
width,
height));
checkCudaErrors(hipMemcpyToArray(cuArray,
0,
0,
hData,
size,
hipMemcpyHostToDevice));
// Set texture parameters
tex.addressMode[0] = hipAddressModeWrap;
tex.addressMode[1] = hipAddressModeWrap;
tex.filterMode = hipFilterModeLinear;
tex.normalized = true; // access with normalized texture coordinates
// Bind the array to the texture
checkCudaErrors(hipBindTextureToArray(tex, cuArray, channelDesc));
dim3 dimBlock(8, 8, 1);
dim3 dimGrid(width / dimBlock.x, height / dimBlock.y, 1);
// Warmup
#ifdef __EMSCRIPTEN__
//cudaRunKernelDim4<float*,unsigned int,unsigned int, float>("transformKernel", transformKernel, "", dimGrid, dimBlock , 4, dData, width, height, angle);
#else
hipLaunchKernelGGL(( transformKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, dData, width, height, angle);
#endif
checkCudaErrors(hipDeviceSynchronize());
StopWatchInterface *timer = NULL;
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
// Execute the kernel
#ifdef __EMSCRIPTEN__
//cudaRunKernelDim4("transformKernel", transformKernel, "", dimGrid, dimBlock , 4, dData, width, height, angle);
#else
hipLaunchKernelGGL(( transformKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, dData, width, height, angle);
#endif
// Check if kernel execution generated an error
getLastCudaError("Kernel execution failed");
checkCudaErrors(hipDeviceSynchronize());
sdkStopTimer(&timer);
printf("Processing time: %f (ms)\n", sdkGetTimerValue(&timer));
printf("%.2f Mpixels/sec\n",
(width *height / (sdkGetTimerValue(&timer) / 1000.0f)) / 1e6);
sdkDeleteTimer(&timer);
// Allocate mem for the result on host side
float *hOutputData = (float *) malloc(size);
// copy result from device to host
checkCudaErrors(hipMemcpy(hOutputData,
dData,
size,
hipMemcpyDeviceToHost));
// Write result to file
char outputFilename[1024];
strcpy(outputFilename, imagePath);
strcpy(outputFilename + strlen(imagePath) - 4, "_out.pgm");
sdkSavePGM(outputFilename, hOutputData, width, height);
printf("Wrote '%s'\n", outputFilename);
// Write regression file if necessary
if (checkCmdLineFlag(argc, (const char **) argv, "regression"))
{
// Write file for regression test
sdkWriteFile<float>("./data/regression.dat",
hOutputData,
width*height,
0.0f,
false);
}
else
{
// We need to reload the data from disk,
// because it is inverted upon output
sdkLoadPGM(outputFilename, &hOutputData, &width, &height);
printf("Comparing files\n");
printf("\toutput: <%s>\n", outputFilename);
printf("\treference: <%s>\n", refPath);
testResult = compareData(hOutputData,
hDataRef,
width*height,
MAX_EPSILON_ERROR,
0.15f);
}
checkCudaErrors(hipFree(dData));
checkCudaErrors(hipFreeArray(cuArray));
free(imagePath);
free(refPath);
}
| 43654a60cddb86925036c591a4f4125819b284fc.cu | /*
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This sample demonstrates how use texture fetches in CUDA
*
* This sample takes an input PGM image (image_filename) and generates
* an output PGM image (image_filename_out). This CUDA kernel performs
* a simple 2D transform (rotation) on the texture coordinates (u,v).
*/
// Includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#ifdef _WIN32
# define WINDOWS_LEAN_AND_MEAN
# define NOMINMAX
# include <windows.h>
#endif
// Includes CUDA
#include <cuda_runtime.h>
// Utilities and timing functions
#include <helper_functions.h> // includes cuda.h and cuda_runtime_api.h
// CUDA helper functions
#include <helper_cuda.h> // helper functions for CUDA error check
#define MAX_EPSILON_ERROR 5e-3f
// Define the files that are to be save and the reference images for validation
const char *imageFilename = "lena_bw.pgm";
const char *refFilename = "ref_rotated.pgm";
const char *sampleName = "simpleTexture";
////////////////////////////////////////////////////////////////////////////////
// Constants
const float angle = 0.5f; // angle to rotate image by (in radians)
// Auto-Verification Code
bool testResult = true;
////////////////////////////////////////////////////////////////////////////////
//! Transform an image using texture lookups
//! @param outputData output data in global memory
////////////////////////////////////////////////////////////////////////////////
#ifdef __EMSCRIPTEN__
#include <cuda/emcuda.h>
// Texture reference for 2D float texture
texture tex;
#define STRINGIGY(a) #a
const char * transformKernel = STRINGIGY(
#endif
__global__ void transformKernel(float *outputData,
int width,
int height,
float theta)
{
// calculate normalized texture coordinates
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
float u = x / (float) width;
float v = y / (float) height;
// transform coordinates
u -= 0.5f;
v -= 0.5f;
float tu = u*cosf(theta) - v*sinf(theta) + 0.5f;
float tv = v*cosf(theta) + u*sinf(theta) + 0.5f;
// read from texture and write to global memory
outputData[y*width + x] = tex2D(tex, tu, tv);
}
#ifdef __EMSCRIPTEN__
);
#endif
#ifndef __EMSCRIPTEN__
// Texture reference for 2D float texture
texture<float, 2, cudaReadModeElementType> tex;
#endif
////////////////////////////////////////////////////////////////////////////////
// Declaration, forward
void runTest(int argc, char **argv);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
printf("%s starting...\n", sampleName);
// Process command-line arguments
if (argc > 1)
{
if (checkCmdLineFlag(argc, (const char **) argv, "input"))
{
getCmdLineArgumentString(argc,
(const char **) argv,
"input",
(char **) &imageFilename);
if (checkCmdLineFlag(argc, (const char **) argv, "reference"))
{
getCmdLineArgumentString(argc,
(const char **) argv,
"reference",
(char **) &refFilename);
}
else
{
printf("-input flag should be used with -reference flag");
exit(EXIT_FAILURE);
}
}
else if (checkCmdLineFlag(argc, (const char **) argv, "reference"))
{
printf("-reference flag should be used with -input flag");
exit(EXIT_FAILURE);
}
}
runTest(argc, argv);
cudaDeviceReset();
printf("%s completed, returned %s\n",
sampleName,
testResult ? "OK" : "ERROR!");
exit(testResult ? EXIT_SUCCESS : EXIT_FAILURE);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void runTest(int argc, char **argv)
{
int devID = findCudaDevice(argc, (const char **) argv);
// load image from disk
float *hData = NULL;
unsigned int width, height;
char *imagePath = sdkFindFilePath(imageFilename, argv[0]);
if (imagePath == NULL)
{
printf("Unable to source image file: %s\n", imageFilename);
exit(EXIT_FAILURE);
}
sdkLoadPGM(imagePath, &hData, &width, &height);
unsigned int size = width * height * sizeof(float);
printf("Loaded '%s', %d x %d pixels\n", imageFilename, width, height);
// Load reference image from image (output)
float *hDataRef = (float *) malloc(size);
char *refPath = sdkFindFilePath(refFilename, argv[0]);
if (refPath == NULL)
{
printf("Unable to find reference image file: %s\n", refFilename);
exit(EXIT_FAILURE);
}
sdkLoadPGM(refPath, &hDataRef, &width, &height);
// Allocate device memory for result
float *dData = NULL;
checkCudaErrors(cudaMalloc((void **) &dData, size));
// Allocate array and copy image data
cudaChannelFormatDesc channelDesc =
cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
cudaArray *cuArray;
checkCudaErrors(cudaMallocArray(&cuArray,
&channelDesc,
width,
height));
checkCudaErrors(cudaMemcpyToArray(cuArray,
0,
0,
hData,
size,
cudaMemcpyHostToDevice));
// Set texture parameters
tex.addressMode[0] = cudaAddressModeWrap;
tex.addressMode[1] = cudaAddressModeWrap;
tex.filterMode = cudaFilterModeLinear;
tex.normalized = true; // access with normalized texture coordinates
// Bind the array to the texture
checkCudaErrors(cudaBindTextureToArray(tex, cuArray, channelDesc));
dim3 dimBlock(8, 8, 1);
dim3 dimGrid(width / dimBlock.x, height / dimBlock.y, 1);
// Warmup
#ifdef __EMSCRIPTEN__
//cudaRunKernelDim4<float*,unsigned int,unsigned int, float>("transformKernel", transformKernel, "", dimGrid, dimBlock , 4, dData, width, height, angle);
#else
transformKernel<<<dimGrid, dimBlock, 0>>>(dData, width, height, angle);
#endif
checkCudaErrors(cudaDeviceSynchronize());
StopWatchInterface *timer = NULL;
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
// Execute the kernel
#ifdef __EMSCRIPTEN__
//cudaRunKernelDim4("transformKernel", transformKernel, "", dimGrid, dimBlock , 4, dData, width, height, angle);
#else
transformKernel<<<dimGrid, dimBlock, 0>>>(dData, width, height, angle);
#endif
// Check if kernel execution generated an error
getLastCudaError("Kernel execution failed");
checkCudaErrors(cudaDeviceSynchronize());
sdkStopTimer(&timer);
printf("Processing time: %f (ms)\n", sdkGetTimerValue(&timer));
printf("%.2f Mpixels/sec\n",
(width *height / (sdkGetTimerValue(&timer) / 1000.0f)) / 1e6);
sdkDeleteTimer(&timer);
// Allocate mem for the result on host side
float *hOutputData = (float *) malloc(size);
// copy result from device to host
checkCudaErrors(cudaMemcpy(hOutputData,
dData,
size,
cudaMemcpyDeviceToHost));
// Write result to file
char outputFilename[1024];
strcpy(outputFilename, imagePath);
strcpy(outputFilename + strlen(imagePath) - 4, "_out.pgm");
sdkSavePGM(outputFilename, hOutputData, width, height);
printf("Wrote '%s'\n", outputFilename);
// Write regression file if necessary
if (checkCmdLineFlag(argc, (const char **) argv, "regression"))
{
// Write file for regression test
sdkWriteFile<float>("./data/regression.dat",
hOutputData,
width*height,
0.0f,
false);
}
else
{
// We need to reload the data from disk,
// because it is inverted upon output
sdkLoadPGM(outputFilename, &hOutputData, &width, &height);
printf("Comparing files\n");
printf("\toutput: <%s>\n", outputFilename);
printf("\treference: <%s>\n", refPath);
testResult = compareData(hOutputData,
hDataRef,
width*height,
MAX_EPSILON_ERROR,
0.15f);
}
checkCudaErrors(cudaFree(dData));
checkCudaErrors(cudaFreeArray(cuArray));
free(imagePath);
free(refPath);
}
|
3bdedf6c38c4c8c6e7b56756f04e2282da5155fb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//#include "histogram_atomics.cuh"
#include <stdio.h>
#include <float.h>
//#include "hip/device_functions.h"
#define %(c_precision_def)s
#define fType %(c_ftype)s
#define iType %(c_itype)s
#define uiType %(c_uitype)s
#define changeType %(c_changetype)s
// See ieee floating point specification
#define CUDART_INF_F __ull_as_fType(0x7ff0000000000000ULL)
#define CUDART_NEG_INF_F __ull_as_fType(0xfff0000000000000ULL)
__device__ fType __ull_as_fType(unsigned long long int a)
{
union {unsigned long long int a; fType b;} u;
u.a = a;
return u.b;
}
__device__ fType __change_as_fType(changeType a)
{
union {changeType a; fType b;} u;
u.a = a;
return u.b;
}
__device__ unsigned long long int __fType_as_change(fType a)
{
union {fType a; changeType b;} u;
u.a = a;
return u.b;
}
// You can use atomicCAS() to create an atomicMax for any type.
// See https://docs.nvidia.com/cuda/cuda-c-programming-guide/ for further
// information.
__device__ fType atomicMaxfType(fType *address, fType val)
{
changeType* address_as_ull = (changeType*) address;
changeType old = *address_as_ull, assumed;
while(val > __change_as_fType(old))
{
assumed = old;
old = atomicCAS(address_as_ull, assumed, __fType_as_change(val));
}
return __change_as_fType(old);
}
__device__ fType atomicMinfType(fType *address, fType val)
{
changeType* address_as_ull = (changeType*) address;
changeType old = *address_as_ull, assumed;
while(val < __change_as_fType(old))
{
assumed = old;
old = atomicCAS(address_as_ull, assumed, __fType_as_change(val));
}
return __change_as_fType(old);
}
__global__ void max_min_reduce(const fType *d_array, const iType n_elements,
const iType no_of_dimensions, fType *d_max, fType *d_min)
{
// First n_elements entries are used for max reduction the last
// n_elements entries are used for min reduction.
extern __shared__ fType shared[];
fType *shared_max = (fType*)shared;
fType *shared_min = (fType*)&shared[blockDim.x];
int tid = threadIdx.x;
int gid = blockIdx.x * blockDim.x + tid;
// Init global max and min value. This is a separated loop to avoid
// race conditions.
for(int d = 0; d < no_of_dimensions; d++)
{
if(gid == 0)
{
d_max[d] = CUDART_NEG_INF_F;
d_min[d] = CUDART_INF_F;
}
}
// Max- and Min-Reduce for each dimension
for(int d = 0; d < no_of_dimensions; d++)
{
// Initialize shared memory with input memory
if(gid < n_elements)
{
shared_max[tid] = d_array[gid*no_of_dimensions+d];
shared_min[tid] = d_array[gid*no_of_dimensions+d];
gid += gridDim.x * blockDim.x;
}
// Start max reduction in each block with left overs from input array.
// If there are more elements than threads, then we copy the next
// elements from input if they are bigger/lower than the last copied
// values.
while(gid < n_elements && gid >= n_elements)
{
shared_max[tid] = max(shared_max[tid],
d_array[gid*no_of_dimensions+d]);
shared_min[tid] = min(shared_min[tid],
d_array[gid*no_of_dimensions+d]);
gid += gridDim.x * blockDim.x;
}
__syncthreads();
gid = blockIdx.x * blockDim.x + threadIdx.x;
// Blockwise reduction
for(int i=blockDim.x/2; i > 0; i >>= 1)
{
// First check: For reduce algorithm
// Second check: Do not access memory outside of our input elements
// Third check: If there are less elements than threads in one block
// do not access out of bounds. Only for "last block" and
// n_elements/blockDim.x != 0
if(tid < i && gid < n_elements && gid + i < n_elements)
{
shared_max[tid] = max(shared_max[tid], shared_max[tid + i]);
shared_min[tid] = min(shared_min[tid], shared_min[tid + i]);
}
__syncthreads();
}
// Now return max value of all blocks in global memory
if(tid == 0 && gid < n_elements)
{
atomicMaxfType(&d_max[d], shared_max[0]);
atomicMinfType(&d_min[d], shared_min[0]);
}
}
}
// Takes max and min value for each dimension and the number of bins and
// returns a histogram with equally sized bins.
__global__ void histogram_gmem_atomics(const fType *in, const iType length,
const iType no_of_dimensions, const iType no_of_bins,
const iType no_of_flat_bins, uiType *out, fType *max_in, fType *min_in)
{
unsigned int gid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int tid = threadIdx.x;
unsigned int total_threads = blockDim.x * gridDim.x;
// initialize temporary histogram for each block in global memory
uiType *gmem = out + no_of_flat_bins * blockIdx.x;
// Each thread writes zeros to global memory
for(unsigned int i = tid; i < no_of_flat_bins; i += blockDim.x)
{
gmem[i] = 0;
}
// Process input data by updating the histogram of each block in global
// memory. Each thread processes one element with all its dimensions at a
// time.
for(unsigned int i = gid*no_of_dimensions; i < length;
i+=no_of_dimensions*total_threads)
{
int current_bin = 0;
for(unsigned int d = 0; d < no_of_dimensions; d++)
{
fType bin_width = (max_in[d]-min_in[d])/no_of_bins;
fType val = in[i + d];
// Get the bin in the current dimension
int tmp_bin = (val-min_in[d])/bin_width;
if(tmp_bin >= no_of_bins) tmp_bin--;
// Get the right place in the histogram
int power_bins = 1;
for(unsigned int k=no_of_dimensions-1; k > d; k--)
{
power_bins = no_of_bins * power_bins;
}
current_bin += tmp_bin * power_bins;
}
// Avoid illegal memory access
if(current_bin < no_of_flat_bins)
{
atomicAdd(&gmem[current_bin], 1);
}
}
}
// Takes edges for each dimension and the number of bins and
// returns a histogram with equally sized bins.
__global__ void histogram_gmem_atomics_with_edges(const fType *in,
const iType length, const iType no_of_dimensions,
const iType no_of_bins, const iType no_of_flat_bins,
uiType *out, const fType *edges_in)
{
unsigned int gid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int tid = threadIdx.x;
unsigned int total_threads = blockDim.x * gridDim.x;
// initialize temporary histogram for each block in global memory
uiType *gmem = out + no_of_flat_bins * blockIdx.x;
// Each thread writes zeros to global memory
for(unsigned int i = tid; i <no_of_flat_bins; i += blockDim.x)
{
gmem[i] = 0;
}
__syncthreads();
// Process input data by updating the histogram of each block in global
// memory.
for(unsigned int i = gid * no_of_dimensions; i < length;
i += no_of_dimensions * total_threads)
{
int current_bin = 0;
for(unsigned int d = 0; d < no_of_dimensions; d++)
{
fType val = in[i + d];
int tmp_bin = 0;
while(val > edges_in[(no_of_bins+1)*d+tmp_bin+1] && tmp_bin < no_of_bins)
{
tmp_bin++;
}
int power_bins = 1;
for(unsigned int k=no_of_dimensions-1; k > d; k--)
{
power_bins = no_of_bins * power_bins;
}
current_bin += tmp_bin * power_bins;
}
__syncthreads();
// Avoid illegal memory access
if(current_bin < no_of_flat_bins)
{
atomicAdd(&gmem[current_bin], 1);
}
}
}
__global__ void histogram_smem_atomics(const fType *in, const iType length,
const iType no_of_dimensions, const iType no_of_bins,
const iType no_of_flat_bins, uiType *out, fType *max_in, fType *min_in)
{
unsigned int gid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int tid = threadIdx.x;
unsigned int total_threads = blockDim.x * gridDim.x;
unsigned int threads_per_block = blockDim.x;
// initialize temporary accumulation array in shared memory
extern __shared__ uiType smem[];
// __shared__ uiType smem[no_of_bins * no_of_dimensions]; <- this is the idea
for(unsigned int i = tid; i < no_of_flat_bins; i+= threads_per_block)
{
smem[i] = 0;
}
__syncthreads();
// Process input data by updating the histogram of each block in global
// memory. Each thread processes one element with all its dimensions at a
// time.
for(unsigned int i = gid*no_of_dimensions; i < length;
i+=no_of_dimensions*total_threads)
{
int current_bin = 0;
for(unsigned int d = 0; d < no_of_dimensions; d++)
{
fType bin_width = (max_in[d]-min_in[d])/no_of_bins;
fType val = in[i + d];
// Get the bin in the current dimension
int tmp_bin = (val-min_in[d])/bin_width;
if(tmp_bin >= no_of_bins) tmp_bin--;
// Get the right place in the histogram
int power_bins = 1;
for(unsigned int k=no_of_dimensions-1; k > d; k--)
{
power_bins = no_of_bins * power_bins;
}
current_bin += tmp_bin * power_bins;
}
// Avoid illegal memory access
if(current_bin < no_of_flat_bins)
{
atomicAdd(&smem[current_bin], 1);
}
}
__syncthreads();
// Write partial histograms in global memory
out = &out[blockIdx.x * no_of_flat_bins];
for(unsigned int i = tid; i < no_of_flat_bins; i+= threads_per_block)
{
out[i] = smem[i];
}
}
__global__ void histogram_smem_atomics_with_edges(const fType *in,
const iType length, const iType no_of_dimensions,
const iType no_of_bins, const iType no_of_flat_bins,
uiType *out, const fType *edges_in)
{
unsigned int gid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int tid = threadIdx.x;
unsigned int total_threads = blockDim.x * gridDim.x;
unsigned int threads_per_block = blockDim.x;
// initialize temporary accumulation array in shared memory
extern __shared__ uiType smem[];
for(unsigned int i = tid; i < no_of_flat_bins; i+= threads_per_block)
{
smem[i] = 0;
}
__syncthreads();
// Process input data by updating the histogram of each block in global
// memory. Each thread processes one element with all its dimensions at a
// time.
for(unsigned int i = gid*no_of_dimensions; i < length;
i+=no_of_dimensions*total_threads)
{
int current_bin = 0;
for(unsigned int d = 0; d < no_of_dimensions; d++)
{
fType val = in[i + d];
int tmp_bin = 0;
while(val > edges_in[(no_of_bins+1)*d+tmp_bin+1] && tmp_bin < no_of_bins)
{
tmp_bin++;
}
int power_bins = 1;
for(unsigned int k=no_of_dimensions-1; k > d; k--)
{
power_bins = no_of_bins * power_bins;
}
current_bin += tmp_bin * power_bins;
}
__syncthreads();
// Avoid illegal memory access
if(current_bin < no_of_flat_bins)
{
atomicAdd(&smem[current_bin], 1);
}
}
__syncthreads();
// Write partial histograms in global memory
uiType *overall_out = &out[blockIdx.x * no_of_flat_bins];
for(unsigned int i = tid; i < no_of_flat_bins; i+= threads_per_block)
{
overall_out[i] = smem[i];
}
}
__global__ void histogram_final_accum(const uiType *in,
iType no_of_histograms, uiType *out,
iType no_of_bins, iType histo_length, iType no_of_dimensions)
{
unsigned int gid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int total_threads = blockDim.x * gridDim.x;
// Each thread merges values for another bin
for(unsigned int current_bin = gid; current_bin < histo_length;
current_bin += total_threads)
{
if(current_bin < histo_length)
{
uiType total = 0;
for(unsigned int j = 0; j < no_of_histograms; j++)
{
total += in[histo_length * j + current_bin];
}
out[current_bin] = total;
}
}
}
| 3bdedf6c38c4c8c6e7b56756f04e2282da5155fb.cu |
//#include "histogram_atomics.cuh"
#include <stdio.h>
#include <float.h>
//#include "device_functions.h"
#define %(c_precision_def)s
#define fType %(c_ftype)s
#define iType %(c_itype)s
#define uiType %(c_uitype)s
#define changeType %(c_changetype)s
// See ieee floating point specification
#define CUDART_INF_F __ull_as_fType(0x7ff0000000000000ULL)
#define CUDART_NEG_INF_F __ull_as_fType(0xfff0000000000000ULL)
__device__ fType __ull_as_fType(unsigned long long int a)
{
union {unsigned long long int a; fType b;} u;
u.a = a;
return u.b;
}
__device__ fType __change_as_fType(changeType a)
{
union {changeType a; fType b;} u;
u.a = a;
return u.b;
}
__device__ unsigned long long int __fType_as_change(fType a)
{
union {fType a; changeType b;} u;
u.a = a;
return u.b;
}
// You can use atomicCAS() to create an atomicMax for any type.
// See https://docs.nvidia.com/cuda/cuda-c-programming-guide/ for further
// information.
__device__ fType atomicMaxfType(fType *address, fType val)
{
changeType* address_as_ull = (changeType*) address;
changeType old = *address_as_ull, assumed;
while(val > __change_as_fType(old))
{
assumed = old;
old = atomicCAS(address_as_ull, assumed, __fType_as_change(val));
}
return __change_as_fType(old);
}
__device__ fType atomicMinfType(fType *address, fType val)
{
changeType* address_as_ull = (changeType*) address;
changeType old = *address_as_ull, assumed;
while(val < __change_as_fType(old))
{
assumed = old;
old = atomicCAS(address_as_ull, assumed, __fType_as_change(val));
}
return __change_as_fType(old);
}
__global__ void max_min_reduce(const fType *d_array, const iType n_elements,
const iType no_of_dimensions, fType *d_max, fType *d_min)
{
// First n_elements entries are used for max reduction the last
// n_elements entries are used for min reduction.
extern __shared__ fType shared[];
fType *shared_max = (fType*)shared;
fType *shared_min = (fType*)&shared[blockDim.x];
int tid = threadIdx.x;
int gid = blockIdx.x * blockDim.x + tid;
// Init global max and min value. This is a separated loop to avoid
// race conditions.
for(int d = 0; d < no_of_dimensions; d++)
{
if(gid == 0)
{
d_max[d] = CUDART_NEG_INF_F;
d_min[d] = CUDART_INF_F;
}
}
// Max- and Min-Reduce for each dimension
for(int d = 0; d < no_of_dimensions; d++)
{
// Initialize shared memory with input memory
if(gid < n_elements)
{
shared_max[tid] = d_array[gid*no_of_dimensions+d];
shared_min[tid] = d_array[gid*no_of_dimensions+d];
gid += gridDim.x * blockDim.x;
}
// Start max reduction in each block with left overs from input array.
// If there are more elements than threads, then we copy the next
// elements from input if they are bigger/lower than the last copied
// values.
while(gid < n_elements && gid >= n_elements)
{
shared_max[tid] = max(shared_max[tid],
d_array[gid*no_of_dimensions+d]);
shared_min[tid] = min(shared_min[tid],
d_array[gid*no_of_dimensions+d]);
gid += gridDim.x * blockDim.x;
}
__syncthreads();
gid = blockIdx.x * blockDim.x + threadIdx.x;
// Blockwise reduction
for(int i=blockDim.x/2; i > 0; i >>= 1)
{
// First check: For reduce algorithm
// Second check: Do not access memory outside of our input elements
// Third check: If there are less elements than threads in one block
// do not access out of bounds. Only for "last block" and
// n_elements/blockDim.x != 0
if(tid < i && gid < n_elements && gid + i < n_elements)
{
shared_max[tid] = max(shared_max[tid], shared_max[tid + i]);
shared_min[tid] = min(shared_min[tid], shared_min[tid + i]);
}
__syncthreads();
}
// Now return max value of all blocks in global memory
if(tid == 0 && gid < n_elements)
{
atomicMaxfType(&d_max[d], shared_max[0]);
atomicMinfType(&d_min[d], shared_min[0]);
}
}
}
// Takes max and min value for each dimension and the number of bins and
// returns a histogram with equally sized bins.
__global__ void histogram_gmem_atomics(const fType *in, const iType length,
const iType no_of_dimensions, const iType no_of_bins,
const iType no_of_flat_bins, uiType *out, fType *max_in, fType *min_in)
{
unsigned int gid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int tid = threadIdx.x;
unsigned int total_threads = blockDim.x * gridDim.x;
// initialize temporary histogram for each block in global memory
uiType *gmem = out + no_of_flat_bins * blockIdx.x;
// Each thread writes zeros to global memory
for(unsigned int i = tid; i < no_of_flat_bins; i += blockDim.x)
{
gmem[i] = 0;
}
// Process input data by updating the histogram of each block in global
// memory. Each thread processes one element with all its dimensions at a
// time.
for(unsigned int i = gid*no_of_dimensions; i < length;
i+=no_of_dimensions*total_threads)
{
int current_bin = 0;
for(unsigned int d = 0; d < no_of_dimensions; d++)
{
fType bin_width = (max_in[d]-min_in[d])/no_of_bins;
fType val = in[i + d];
// Get the bin in the current dimension
int tmp_bin = (val-min_in[d])/bin_width;
if(tmp_bin >= no_of_bins) tmp_bin--;
// Get the right place in the histogram
int power_bins = 1;
for(unsigned int k=no_of_dimensions-1; k > d; k--)
{
power_bins = no_of_bins * power_bins;
}
current_bin += tmp_bin * power_bins;
}
// Avoid illegal memory access
if(current_bin < no_of_flat_bins)
{
atomicAdd(&gmem[current_bin], 1);
}
}
}
// Takes edges for each dimension and the number of bins and
// returns a histogram with equally sized bins.
__global__ void histogram_gmem_atomics_with_edges(const fType *in,
const iType length, const iType no_of_dimensions,
const iType no_of_bins, const iType no_of_flat_bins,
uiType *out, const fType *edges_in)
{
unsigned int gid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int tid = threadIdx.x;
unsigned int total_threads = blockDim.x * gridDim.x;
// initialize temporary histogram for each block in global memory
uiType *gmem = out + no_of_flat_bins * blockIdx.x;
// Each thread writes zeros to global memory
for(unsigned int i = tid; i <no_of_flat_bins; i += blockDim.x)
{
gmem[i] = 0;
}
__syncthreads();
// Process input data by updating the histogram of each block in global
// memory.
for(unsigned int i = gid * no_of_dimensions; i < length;
i += no_of_dimensions * total_threads)
{
int current_bin = 0;
for(unsigned int d = 0; d < no_of_dimensions; d++)
{
fType val = in[i + d];
int tmp_bin = 0;
while(val > edges_in[(no_of_bins+1)*d+tmp_bin+1] && tmp_bin < no_of_bins)
{
tmp_bin++;
}
int power_bins = 1;
for(unsigned int k=no_of_dimensions-1; k > d; k--)
{
power_bins = no_of_bins * power_bins;
}
current_bin += tmp_bin * power_bins;
}
__syncthreads();
// Avoid illegal memory access
if(current_bin < no_of_flat_bins)
{
atomicAdd(&gmem[current_bin], 1);
}
}
}
__global__ void histogram_smem_atomics(const fType *in, const iType length,
const iType no_of_dimensions, const iType no_of_bins,
const iType no_of_flat_bins, uiType *out, fType *max_in, fType *min_in)
{
unsigned int gid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int tid = threadIdx.x;
unsigned int total_threads = blockDim.x * gridDim.x;
unsigned int threads_per_block = blockDim.x;
// initialize temporary accumulation array in shared memory
extern __shared__ uiType smem[];
// __shared__ uiType smem[no_of_bins * no_of_dimensions]; <- this is the idea
for(unsigned int i = tid; i < no_of_flat_bins; i+= threads_per_block)
{
smem[i] = 0;
}
__syncthreads();
// Process input data by updating the histogram of each block in global
// memory. Each thread processes one element with all its dimensions at a
// time.
for(unsigned int i = gid*no_of_dimensions; i < length;
i+=no_of_dimensions*total_threads)
{
int current_bin = 0;
for(unsigned int d = 0; d < no_of_dimensions; d++)
{
fType bin_width = (max_in[d]-min_in[d])/no_of_bins;
fType val = in[i + d];
// Get the bin in the current dimension
int tmp_bin = (val-min_in[d])/bin_width;
if(tmp_bin >= no_of_bins) tmp_bin--;
// Get the right place in the histogram
int power_bins = 1;
for(unsigned int k=no_of_dimensions-1; k > d; k--)
{
power_bins = no_of_bins * power_bins;
}
current_bin += tmp_bin * power_bins;
}
// Avoid illegal memory access
if(current_bin < no_of_flat_bins)
{
atomicAdd(&smem[current_bin], 1);
}
}
__syncthreads();
// Write partial histograms in global memory
out = &out[blockIdx.x * no_of_flat_bins];
for(unsigned int i = tid; i < no_of_flat_bins; i+= threads_per_block)
{
out[i] = smem[i];
}
}
__global__ void histogram_smem_atomics_with_edges(const fType *in,
const iType length, const iType no_of_dimensions,
const iType no_of_bins, const iType no_of_flat_bins,
uiType *out, const fType *edges_in)
{
unsigned int gid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int tid = threadIdx.x;
unsigned int total_threads = blockDim.x * gridDim.x;
unsigned int threads_per_block = blockDim.x;
// initialize temporary accumulation array in shared memory
extern __shared__ uiType smem[];
for(unsigned int i = tid; i < no_of_flat_bins; i+= threads_per_block)
{
smem[i] = 0;
}
__syncthreads();
// Process input data by updating the histogram of each block in global
// memory. Each thread processes one element with all its dimensions at a
// time.
for(unsigned int i = gid*no_of_dimensions; i < length;
i+=no_of_dimensions*total_threads)
{
int current_bin = 0;
for(unsigned int d = 0; d < no_of_dimensions; d++)
{
fType val = in[i + d];
int tmp_bin = 0;
while(val > edges_in[(no_of_bins+1)*d+tmp_bin+1] && tmp_bin < no_of_bins)
{
tmp_bin++;
}
int power_bins = 1;
for(unsigned int k=no_of_dimensions-1; k > d; k--)
{
power_bins = no_of_bins * power_bins;
}
current_bin += tmp_bin * power_bins;
}
__syncthreads();
// Avoid illegal memory access
if(current_bin < no_of_flat_bins)
{
atomicAdd(&smem[current_bin], 1);
}
}
__syncthreads();
// Write partial histograms in global memory
uiType *overall_out = &out[blockIdx.x * no_of_flat_bins];
for(unsigned int i = tid; i < no_of_flat_bins; i+= threads_per_block)
{
overall_out[i] = smem[i];
}
}
__global__ void histogram_final_accum(const uiType *in,
iType no_of_histograms, uiType *out,
iType no_of_bins, iType histo_length, iType no_of_dimensions)
{
unsigned int gid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int total_threads = blockDim.x * gridDim.x;
// Each thread merges values for another bin
for(unsigned int current_bin = gid; current_bin < histo_length;
current_bin += total_threads)
{
if(current_bin < histo_length)
{
uiType total = 0;
for(unsigned int j = 0; j < no_of_histograms; j++)
{
total += in[histo_length * j + current_bin];
}
out[current_bin] = total;
}
}
}
|
f9920ad043b07950c6dd936d6fa58a248b27cfeb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include <cfloat>
#include <string>
#include <vector>
#include "hipcub/hipcub.hpp"
#include "paddle/fluid/framework/data_layout.h"
#include "paddle/fluid/operators/batch_norm_op.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/platform/cudnn_helper.h"
#include "paddle/fluid/platform/float16.h"
DECLARE_bool(cudnn_batchnorm_spatial_persistent);
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using DataLayout = framework::DataLayout;
template <typename T>
using CudnnDataType = platform::CudnnDataType<T>;
template <typename T>
using BatchNormParamType = typename CudnnDataType<T>::BatchNormParamType;
template <typename T>
class BatchNormKernel<platform::CUDADeviceContext, T>
: public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(ctx.GetPlace()), true,
platform::errors::InvalidArgument("It must use CUDAPlace."));
double epsilon = static_cast<double>(ctx.Attr<float>("epsilon"));
float momentum = ctx.Attr<float>("momentum");
const bool is_test = ctx.Attr<bool>("is_test");
const bool use_global_stats = ctx.Attr<bool>("use_global_stats");
const bool trainable_stats = ctx.Attr<bool>("trainable_statistics");
const std::string data_layout_str = ctx.Attr<std::string>("data_layout");
const DataLayout data_layout =
framework::StringToDataLayout(data_layout_str);
bool test_mode = is_test && (!trainable_stats);
// Get the size for each dimension.
// NCHW [batch_size, in_channels, in_height, in_width]
const auto *x = ctx.Input<Tensor>("X");
const auto &x_dims = x->dims();
PADDLE_ENFORCE(x_dims.size() >= 2 && x_dims.size() <= 5,
"The Input dim size should be between 2 and 5");
auto *y = ctx.Output<Tensor>("Y");
y->mutable_data<T>(ctx.GetPlace());
int N, C, H, W, D;
ExtractNCWHD(x_dims, data_layout, &N, &C, &H, &W, &D);
auto dtype = platform::CudnnDataType<T>::type;
const bool fast_nhwc_batch_norm =
test_mode ||
(dtype == CUDNN_DATA_HALF && FLAGS_cudnn_batchnorm_spatial_persistent);
auto compute_format =
fast_nhwc_batch_norm && data_layout == DataLayout::kNHWC
? DataLayout::kNHWC
: DataLayout::kNCHW;
Tensor transformed_x(x->type());
Tensor transformed_y(y->type());
if (data_layout == DataLayout::kNHWC &&
compute_format == DataLayout::kNCHW && x_dims.size() > 2) {
VLOG(3) << "Transform input tensor from NHWC to NCHW.";
ResizeToChannelFirst<platform::CUDADeviceContext, T>(ctx, x,
&transformed_x);
TransToChannelFirst<platform::CUDADeviceContext, T>(ctx, x,
&transformed_x);
ResizeToChannelFirst<platform::CUDADeviceContext, T>(ctx, y,
&transformed_y);
} else {
transformed_x.ShareDataWith(*x);
transformed_y.ShareDataWith(*y);
}
// ------------------- cudnn descriptors ---------------------
cudnnTensorDescriptor_t data_desc_;
cudnnTensorDescriptor_t bn_param_desc_;
cudnnBatchNormMode_t mode_;
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnCreateTensorDescriptor(&data_desc_));
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnCreateTensorDescriptor(&bn_param_desc_));
if (epsilon <= CUDNN_BN_MIN_EPSILON - FLT_EPSILON) {
LOG(ERROR) << "Provided epsilon is smaller than "
<< "CUDNN_BN_MIN_EPSILON. Setting it to "
<< "CUDNN_BN_MIN_EPSILON instead.";
}
epsilon = ::max(epsilon, CUDNN_BN_MIN_EPSILON);
#if CUDNN_VERSION_MIN(7, 0, 0)
if (FLAGS_cudnn_batchnorm_spatial_persistent) {
mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT;
} else {
mode_ = CUDNN_BATCHNORM_SPATIAL;
}
#else
mode_ = CUDNN_BATCHNORM_SPATIAL;
#endif
VLOG(3) << "Setting descriptors.";
std::vector<int> dims;
std::vector<int> strides;
if (compute_format == DataLayout::kNCHW) {
dims = {N, C, H, W, D};
strides = {C * H * W * D, H * W * D, W * D, D, 1};
} else {
dims = {N, C, H, W, D};
strides = {H * W * D * C, 1, W * D * C, D * C, C};
}
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor(
data_desc_, CudnnDataType<T>::type,
x_dims.size() > 3 ? x_dims.size() : 4, dims.data(), strides.data()));
// Note: PERSISTENT not implemented for inference
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnDeriveBNTensorDescriptor(
bn_param_desc_, data_desc_,
test_mode ? CUDNN_BATCHNORM_SPATIAL : mode_));
const auto *scale = ctx.Input<Tensor>("Scale");
const auto *bias = ctx.Input<Tensor>("Bias");
auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
auto handle = dev_ctx.cudnn_handle();
// Now, depending on whether we are running test or not, we have two paths.
if (test_mode || use_global_stats) {
// only when test we use input to do computation.
const auto *est_mean = ctx.Input<Tensor>("Mean");
const auto *est_var = ctx.Input<Tensor>("Variance");
// Run inference mode.
PADDLE_ENFORCE_EQ(est_mean->dims().size(), 1UL);
PADDLE_ENFORCE_EQ(est_var->dims().size(), 1UL);
PADDLE_ENFORCE_EQ(est_mean->dims()[0], C);
PADDLE_ENFORCE_EQ(est_var->dims()[0], C);
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnBatchNormalizationForwardInference(
handle,
// Note: PERSISTENT not implemented for inference
CUDNN_BATCHNORM_SPATIAL, CudnnDataType<T>::kOne(),
CudnnDataType<T>::kZero(), data_desc_,
transformed_x.template data<T>(), data_desc_,
transformed_y.template mutable_data<T>(ctx.GetPlace()),
bn_param_desc_, scale->template data<BatchNormParamType<T>>(),
bias->template data<BatchNormParamType<T>>(),
est_mean->template data<BatchNormParamType<T>>(),
est_var->template data<BatchNormParamType<T>>(), epsilon));
} else {
// if MomentumTensor is set, use MomentumTensor value, momentum
// is only used in this training branch
if (ctx.HasInput("MomentumTensor")) {
const auto *mom_tensor = ctx.Input<Tensor>("MomentumTensor");
Tensor mom_cpu;
TensorCopySync(*mom_tensor, platform::CPUPlace(), &mom_cpu);
momentum = mom_cpu.data<float>()[0];
}
// Run training mode.
// obtain running mean and running inv var, and see if we need to
// initialize them.
auto *mean_out = ctx.Output<Tensor>("MeanOut");
auto *variance_out = ctx.Output<Tensor>("VarianceOut");
mean_out->mutable_data<BatchNormParamType<T>>(ctx.GetPlace());
variance_out->mutable_data<BatchNormParamType<T>>(ctx.GetPlace());
auto *saved_mean = ctx.Output<Tensor>("SavedMean");
auto *saved_variance = ctx.Output<Tensor>("SavedVariance");
saved_mean->mutable_data<BatchNormParamType<T>>(ctx.GetPlace());
saved_variance->mutable_data<BatchNormParamType<T>>(ctx.GetPlace());
math::SetConstant<platform::CUDADeviceContext, BatchNormParamType<T>>
functor;
functor(dev_ctx, saved_mean, static_cast<BatchNormParamType<T>>(0));
functor(dev_ctx, saved_variance, static_cast<BatchNormParamType<T>>(0));
if ((N * H * W * D) == 1) {
// Only 1 element in normalization dimension,
// skip the batch norm calculation, let y = x.
framework::TensorCopy(*x, ctx.GetPlace(), y);
} else {
double this_factor = 1. - momentum;
bool called = false;
#if CUDNN_VERSION_MIN(7, 4, 1)
if (compute_format == DataLayout::kNHWC) {
called = true;
size_t workspace_size = 0;
size_t reserve_space_size = 0;
void *reserve_space_ptr = nullptr;
void *workspace_ptr = nullptr;
Tensor workspace_tensor;
// Create reserve space and workspace for batch norm.
// Create tensor for each batchnorm op, it will be used in the
// backward. Thus this tensor shouldn't be temp.
auto *reserve_space = ctx.Output<Tensor>("ReserveSpace");
PADDLE_ENFORCE_NOT_NULL(
reserve_space,
platform::errors::NotFound(
"The argument ReserveSpace of batch_norm op is not found."));
// --------------- cudnn batchnorm workspace ---------------
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::
cudnnGetBatchNormalizationForwardTrainingExWorkspaceSize(
/*handle=*/handle,
/*mode=*/mode_,
/*bnIps=*/CUDNN_BATCHNORM_OPS_BN,
/*xDesc=*/data_desc_,
/*zDesc=*/nullptr,
/*yDesc=*/data_desc_,
/*bnScaleBiasMeanVarDesc=*/bn_param_desc_,
/*activationDesc=*/nullptr,
/*sizeInBytes=*/&workspace_size));
// -------------- cudnn batchnorm reserve space --------------
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::
cudnnGetBatchNormalizationTrainingExReserveSpaceSize(
/*handle=*/handle,
/*mode=*/mode_,
/*bnOps=*/CUDNN_BATCHNORM_OPS_BN,
/*activationDesc=*/nullptr,
/*xDesc=*/data_desc_,
/*sizeInBytes=*/&reserve_space_size));
reserve_space_ptr = reserve_space->mutable_data(
ctx.GetPlace(), transformed_x.type(), reserve_space_size);
workspace_ptr = workspace_tensor.mutable_data(
ctx.GetPlace(), transformed_x.type(), workspace_size);
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnBatchNormalizationForwardTrainingEx(
handle, mode_, CUDNN_BATCHNORM_OPS_BN,
CudnnDataType<T>::kOne(), CudnnDataType<T>::kZero(),
data_desc_, transformed_x.template data<T>(), nullptr,
nullptr, data_desc_, transformed_y.template data<T>(),
bn_param_desc_, scale->template data<BatchNormParamType<T>>(),
bias->template data<BatchNormParamType<T>>(), this_factor,
mean_out->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
variance_out->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
epsilon,
saved_mean->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
saved_variance->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
nullptr, workspace_ptr, workspace_size, reserve_space_ptr,
reserve_space_size));
}
#endif
if (!called) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnBatchNormalizationForwardTraining(
handle, mode_, CudnnDataType<T>::kOne(),
CudnnDataType<T>::kZero(), data_desc_,
transformed_x.template data<T>(), data_desc_,
transformed_y.template mutable_data<T>(ctx.GetPlace()),
bn_param_desc_, scale->template data<BatchNormParamType<T>>(),
bias->template data<BatchNormParamType<T>>(), this_factor,
mean_out->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
variance_out->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
epsilon,
saved_mean->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
saved_variance->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace())));
}
}
}
if (data_layout == DataLayout::kNHWC &&
compute_format == DataLayout::kNCHW && x_dims.size() > 2) {
VLOG(3) << "Transform batchnorm output from NCHW to NHWC";
TransToChannelLast<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_y, y);
}
// clean when exit.
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnDestroyTensorDescriptor(data_desc_));
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnDestroyTensorDescriptor(bn_param_desc_));
}
};
template <typename T, int BlockDim, framework::DataLayout layout>
static __global__ void KeBNBackwardScaleBias(
const T *dy, const T *x, const BatchNormParamType<T> *mean,
const BatchNormParamType<T> *variance, const double epsilon, const int N,
const int C, const int HxW, BatchNormParamType<T> *dscale,
BatchNormParamType<T> *dbias) {
const int outer_size = C;
const int inner_size = N * HxW;
typedef hipcub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce;
__shared__ typename BlockReduce::TempStorage ds_storage;
__shared__ typename BlockReduce::TempStorage db_storage;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
BatchNormParamType<T> ds_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> db_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> inv_var_i = 1.0 / sqrt(variance[i] + epsilon);
BatchNormParamType<T> mean_i = mean[i];
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == framework::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
ds_sum += static_cast<BatchNormParamType<T>>(dy[index]) *
(static_cast<BatchNormParamType<T>>(x[index]) - mean_i);
db_sum += static_cast<BatchNormParamType<T>>(dy[index]);
}
ds_sum = BlockReduce(ds_storage).Reduce(ds_sum, hipcub::Sum());
db_sum = BlockReduce(db_storage).Reduce(db_sum, hipcub::Sum());
if (threadIdx.x == 0) {
dscale[i] = ds_sum * inv_var_i;
dbias[i] = db_sum;
}
__syncthreads();
}
}
template <typename T, framework::DataLayout layout>
static __global__ void KeBNBackwardData(const T *dy,
const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *variance,
const double epsilon, const int C,
const int HxW, const int num, T *dx) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = gid; i < num; i += stride) {
const int c = layout == framework::DataLayout::kNCHW ? i / HxW % C : i % C;
BatchNormParamType<T> inv_var = 1.0 / sqrt(variance[c] + epsilon);
dx[i] = static_cast<T>(static_cast<BatchNormParamType<T>>(dy[i]) *
scale[c] * inv_var);
}
}
template <typename T>
static __global__ void KeBNRestoreData(const framework::DataLayout layout, T *x,
const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *bias,
const BatchNormParamType<T> *mean,
const BatchNormParamType<T> *variance,
double epsilon, int C, int M,
const int num, const T *y) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = gid; i < num; i += stride) {
const int c = layout == framework::DataLayout::kNCHW ? (i / M) % C : i % C;
auto y_i = static_cast<BatchNormParamType<T>>(y[i]);
auto x_i = (y_i - bias[c]) / scale[c] / variance[c] + mean[c];
x[i] = static_cast<T>(x_i);
}
}
template <typename T>
class InplaceHelper {
public:
void operator()(const framework::DataLayout layout, T *x,
const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *bias,
const BatchNormParamType<T> *mean,
const BatchNormParamType<T> *variance, double epsilon, int C,
int M, const int num, const T *y, int grid2, const int block,
const hipStream_t &stream) {
PADDLE_ENFORCE_EQ(x, y, platform::errors::InvalidArgument(
"X and Y should be inplaced in inplace mode"));
hipLaunchKernelGGL(( KeBNRestoreData), dim3(grid2), dim3(block), 0, stream,
layout, x, scale, bias, mean, variance, epsilon, C, M, num, y);
}
};
template <typename T, int BlockDim, framework::DataLayout layout>
static __global__ void BNBackwardData(const T *dy,
const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *mean,
const T *x,
const BatchNormParamType<T> *variance,
const int C, const int N, const int HxW,
T *dx) {
const int outer_size = C;
const int inner_size = N * HxW;
typedef hipcub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce;
__shared__ typename BlockReduce::TempStorage dy_storage;
__shared__ typename BlockReduce::TempStorage dy_x_sub_mean_storage;
__shared__ BatchNormParamType<T> dy_sum_val;
__shared__ BatchNormParamType<T> dy_x_sub_mean_sum_val;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
BatchNormParamType<T> inv_var_i = variance[i];
BatchNormParamType<T> mean_i = mean[i];
BatchNormParamType<T> dy_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> dy_x_sub_mean_sum =
static_cast<BatchNormParamType<T>>(0);
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == framework::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
BatchNormParamType<T> dy_i =
static_cast<BatchNormParamType<T>>(dy[index]);
dy_sum += dy_i;
dy_x_sub_mean_sum +=
dy_i * (static_cast<BatchNormParamType<T>>(x[index]) - mean_i);
}
dy_sum = BlockReduce(dy_storage).Reduce(dy_sum, hipcub::Sum());
dy_x_sub_mean_sum = BlockReduce(dy_x_sub_mean_storage)
.Reduce(dy_x_sub_mean_sum, hipcub::Sum());
if (threadIdx.x == 0) {
dy_sum_val = dy_sum;
dy_x_sub_mean_sum_val = dy_x_sub_mean_sum;
}
__syncthreads();
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == framework::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
dx[index] =
(static_cast<BatchNormParamType<T>>(dy[index]) -
dy_sum_val / static_cast<BatchNormParamType<T>>(inner_size) -
(static_cast<BatchNormParamType<T>>(x[index]) - mean_i) *
dy_x_sub_mean_sum_val * inv_var_i * inv_var_i / inner_size) *
scale[i] * inv_var_i;
}
}
}
template <typename T>
class BatchNormGradKernel<platform::CUDADeviceContext, T>
: public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(ctx.GetPlace()), true,
platform::errors::InvalidArgument("It must use CUDAPlace."));
double epsilon = static_cast<double>(ctx.Attr<float>("epsilon"));
const std::string data_layout_str = ctx.Attr<std::string>("data_layout");
const bool use_global_stats = ctx.Attr<bool>("use_global_stats");
const DataLayout data_layout =
framework::StringToDataLayout(data_layout_str);
const auto *d_y = ctx.Input<Tensor>(framework::GradVarName("Y"));
const auto *scale = ctx.Input<Tensor>("Scale");
const auto *bias = ctx.Input<Tensor>("Bias");
auto *d_x = ctx.Output<Tensor>(framework::GradVarName("X"));
auto *d_scale = ctx.Output<Tensor>(framework::GradVarName("Scale"));
auto *d_bias = ctx.Output<Tensor>(framework::GradVarName("Bias"));
// batch_norm with inplace as false will take X as grad input, which
// is same as cuDNN batch_norm backward calculation, batch_norm
// with inplace as true only take Y as input and X should be calculate
// by inverse operation of batch_norm on Y
const Tensor *x;
bool is_inplace;
if (ctx.HasInput("Y")) {
x = ctx.Input<Tensor>("Y");
is_inplace = true;
PADDLE_ENFORCE_EQ(d_x, d_y,
platform::errors::InvalidArgument(
"X@GRAD and Y@GRAD not inplace in inplace mode"));
} else {
x = ctx.Input<Tensor>("X");
is_inplace = false;
PADDLE_ENFORCE_NE(d_x, d_y,
platform::errors::InvalidArgument(
"X@GRAD and Y@GRAD inplaced in non-inplace mode"));
}
const bool is_test = ctx.Attr<bool>("is_test");
PADDLE_ENFORCE_EQ(
is_test, false,
platform::errors::InvalidArgument(
"`is_test = True` CANNOT be used in train program. If "
"you want to use global status in pre_train model, "
"please set `use_global_stats = True`"));
const auto &x_dims = x->dims();
PADDLE_ENFORCE(x_dims.size() >= 2 && x_dims.size() <= 5,
"The Input dim size should be between 2 and 5");
int N, C, H, W, D;
ExtractNCWHD(x_dims, data_layout, &N, &C, &H, &W, &D);
// init output
d_x->mutable_data<T>(ctx.GetPlace());
if (d_scale && d_bias) {
d_scale->mutable_data<BatchNormParamType<T>>(ctx.GetPlace());
d_bias->mutable_data<BatchNormParamType<T>>(ctx.GetPlace());
}
PADDLE_ENFORCE_EQ(scale->dims().size(), 1UL);
PADDLE_ENFORCE_EQ(scale->dims()[0], C);
auto dtype = platform::CudnnDataType<T>::type;
const auto *reserve_space = ctx.Input<Tensor>("ReserveSpace");
const bool fast_nhwc_batch_norm =
dtype == CUDNN_DATA_HALF && FLAGS_cudnn_batchnorm_spatial_persistent &&
reserve_space != nullptr;
auto compute_format =
fast_nhwc_batch_norm && data_layout == DataLayout::kNHWC
? DataLayout::kNHWC
: DataLayout::kNCHW;
Tensor transformed_x(x->type());
Tensor transformed_d_y(d_y->type());
Tensor transformed_d_x(d_x->type());
if (data_layout == DataLayout::kNHWC &&
compute_format == DataLayout::kNCHW) {
VLOG(3) << "Transform input tensor from NHWC to NCHW.";
ResizeToChannelFirst<platform::CUDADeviceContext, T>(ctx, x,
&transformed_x);
TransToChannelFirst<platform::CUDADeviceContext, T>(ctx, x,
&transformed_x);
ResizeToChannelFirst<platform::CUDADeviceContext, T>(ctx, d_y,
&transformed_d_y);
TransToChannelFirst<platform::CUDADeviceContext, T>(ctx, d_y,
&transformed_d_y);
ResizeToChannelFirst<platform::CUDADeviceContext, T>(ctx, d_x,
&transformed_d_x);
} else {
transformed_x.ShareDataWith(*x);
transformed_d_y.ShareDataWith(*d_y);
transformed_d_x.ShareDataWith(*d_x);
}
std::vector<int> dims;
std::vector<int> strides;
if (compute_format == DataLayout::kNCHW) {
dims = {N, C, H, W, D};
strides = {C * H * W * D, H * W * D, W * D, D, 1};
} else {
dims = {N, C, H, W, D};
strides = {H * W * C * D, 1, W * D * C, D * C, C};
}
auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
const int num = transformed_x.numel();
const int block = 512;
int max_threads = dev_ctx.GetMaxPhysicalThreadCount();
const int max_blocks = ::max(max_threads / block, 1);
int grid1 = (num + block - 1) / block;
int grid2 = ::min(C, max_blocks);
auto stream = dev_ctx.stream();
InplaceHelper<T> inplace_functor;
if (!use_global_stats) {
if ((N * H * W * D) == 1) {
framework::TensorCopy(*d_y, ctx.GetPlace(), d_x);
math::SetConstant<platform::CUDADeviceContext, BatchNormParamType<T>>
functor;
functor(dev_ctx, d_scale, static_cast<BatchNormParamType<T>>(0));
functor(dev_ctx, d_bias, static_cast<BatchNormParamType<T>>(0));
return;
}
// ------------------- cudnn descriptors ---------------------
cudnnTensorDescriptor_t data_desc_;
cudnnTensorDescriptor_t bn_param_desc_;
cudnnBatchNormMode_t mode_;
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnCreateTensorDescriptor(&data_desc_));
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnCreateTensorDescriptor(&bn_param_desc_));
if (epsilon <= CUDNN_BN_MIN_EPSILON - FLT_EPSILON) {
LOG(ERROR) << "Provided epsilon is smaller than "
<< "CUDNN_BN_MIN_EPSILON. Setting it to "
<< "CUDNN_BN_MIN_EPSILON instead.";
}
epsilon = ::max(epsilon, CUDNN_BN_MIN_EPSILON);
#if CUDNN_VERSION_MIN(7, 0, 0)
if (FLAGS_cudnn_batchnorm_spatial_persistent) {
mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT;
} else {
mode_ = CUDNN_BATCHNORM_SPATIAL;
}
#else
mode_ = CUDNN_BATCHNORM_SPATIAL;
#endif
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor(
data_desc_, CudnnDataType<T>::type,
x_dims.size() > 3 ? x_dims.size() : 4, dims.data(), strides.data()));
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnDeriveBNTensorDescriptor(bn_param_desc_,
data_desc_, mode_));
const auto *saved_mean = ctx.Input<Tensor>("SavedMean");
const auto *saved_var = ctx.Input<Tensor>("SavedVariance");
const auto *saved_mean_data =
saved_mean->template data<BatchNormParamType<T>>();
const auto *saved_var_data =
saved_var->template data<BatchNormParamType<T>>();
if (is_inplace) {
inplace_functor(compute_format, transformed_x.data<T>(),
scale->template data<BatchNormParamType<T>>(),
bias->template data<BatchNormParamType<T>>(),
saved_mean_data, saved_var_data, epsilon, C, H * W * D,
num, transformed_x.data<T>(), grid2, block, stream);
}
if (d_scale && d_bias) {
bool called = false;
#if CUDNN_VERSION_MIN(7, 4, 1)
if (compute_format == DataLayout::kNHWC) {
called = true;
size_t workspace_size = 0;
void *workspace_ptr = nullptr;
Tensor workspace_tensor;
auto reserve_space_size = reserve_space->memory_size();
// --------------- cudnn batchnorm workspace ---------------
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::
cudnnGetBatchNormalizationBackwardExWorkspaceSize(
/*handle=*/dev_ctx.cudnn_handle(),
/*mode=*/mode_,
/*bnIps=*/CUDNN_BATCHNORM_OPS_BN,
/*xDesc=*/data_desc_,
/*yDesc=*/data_desc_,
/*dyDesc=*/data_desc_,
/*dzDesc=*/nullptr,
/*dxDesc=*/data_desc_,
/*bnScaleBiasMeanVarDesc=*/bn_param_desc_,
/*activationDesc=*/nullptr,
/*sizeInBytes=*/&workspace_size));
workspace_ptr = workspace_tensor.mutable_data(
ctx.GetPlace(), transformed_x.type(), workspace_size);
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnBatchNormalizationBackwardEx(
/*handle=*/dev_ctx.cudnn_handle(),
/*mode=*/mode_,
/*bnOps=*/CUDNN_BATCHNORM_OPS_BN,
/*alphaDataDiff=*/CudnnDataType<T>::kOne(),
/*betaDataDiff=*/CudnnDataType<T>::kZero(),
/*alphaParamDiff=*/CudnnDataType<T>::kOne(),
/*betaParamDiff=*/CudnnDataType<T>::kZero(),
/*xDesc=*/data_desc_,
/*xData=*/transformed_x.template data<T>(),
/*yDesc=*/nullptr,
/*yData=*/nullptr,
/*dyDesc=*/data_desc_,
/*dyData=*/transformed_d_y.template data<T>(),
/*dzDesc=*/nullptr,
/*dzData=*/nullptr,
/*dxDesc=*/data_desc_,
/*dxData=*/transformed_d_x.template mutable_data<T>(
ctx.GetPlace()),
/*dBnScaleBiasDesc=*/bn_param_desc_,
/*bnScaleData=*/scale->template data<BatchNormParamType<T>>(),
/*bnBiasData=*/nullptr,
/*dBnScaleData=*/d_scale
->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
/*dBnBiasData=*/d_bias
->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
/*epsilon=*/epsilon,
/*savedMean=*/saved_mean_data,
/*savedInvVariance=*/saved_var_data,
/*activationDesc=*/nullptr,
/*workspace=*/workspace_ptr,
/*workSpaceSizeInBytes=*/workspace_size,
/*reserveSpace=*/const_cast<T *>(
reserve_space->template data<T>()),
/*reserveSpaceSizeInBytes=*/reserve_space_size));
}
#endif
if (!called) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnBatchNormalizationBackward(
dev_ctx.cudnn_handle(), mode_, CudnnDataType<T>::kOne(),
CudnnDataType<T>::kZero(), CudnnDataType<T>::kOne(),
CudnnDataType<T>::kZero(), data_desc_,
transformed_x.template data<T>(), data_desc_,
transformed_d_y.template data<T>(), data_desc_,
transformed_d_x.template mutable_data<T>(ctx.GetPlace()),
bn_param_desc_, scale->template data<BatchNormParamType<T>>(),
d_scale->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
d_bias->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
epsilon, saved_mean_data, saved_var_data));
}
if (data_layout == DataLayout::kNHWC &&
compute_format == DataLayout::kNCHW) {
VLOG(3) << "Transform batchnorm output from NCHW to NHWC";
TransToChannelLast<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_d_x, d_x);
}
} else {
if (compute_format == DataLayout::kNCHW) {
if (d_x) {
hipLaunchKernelGGL(( BNBackwardData<T, block, framework::DataLayout::kNCHW>),
dim3(grid2), dim3(block), 0, dev_ctx.stream(),
d_y->data<T>(), scale->data<BatchNormParamType<T>>(),
saved_mean_data, x->data<T>(), saved_var_data, C, N, H * W * D,
d_x->data<T>());
}
} else {
if (d_x) {
hipLaunchKernelGGL(( BNBackwardData<T, block, framework::DataLayout::kNHWC>),
dim3(grid2), dim3(block), 0, dev_ctx.stream(),
d_y->data<T>(), scale->data<BatchNormParamType<T>>(),
saved_mean_data, x->data<T>(), saved_var_data, C, N, H * W * D,
d_x->data<T>());
}
}
}
// clean when exit.
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnDestroyTensorDescriptor(data_desc_));
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnDestroyTensorDescriptor(bn_param_desc_));
} else {
const auto *running_mean = ctx.Input<Tensor>("Mean");
const auto *running_var = ctx.Input<Tensor>("Variance");
const auto *running_mean_data =
running_mean->template data<BatchNormParamType<T>>();
const auto *running_var_data =
running_var->template data<BatchNormParamType<T>>();
if (is_inplace) {
auto px = *x;
inplace_functor(data_layout, px.mutable_data<T>(ctx.GetPlace()),
scale->template data<BatchNormParamType<T>>(),
bias->template data<BatchNormParamType<T>>(),
running_mean_data, running_var_data, epsilon, C,
H * W * D, num, x->data<T>(), grid2, block, stream);
}
if (compute_format == DataLayout::kNCHW) {
if (d_x) {
hipLaunchKernelGGL(( KeBNBackwardData<
T, framework::DataLayout::kNCHW>), dim3(grid1), dim3(block), 0, stream,
d_y->data<T>(), scale->data<BatchNormParamType<T>>(),
running_var_data, epsilon, C, H * W, num, d_x->data<T>());
}
if (d_scale && d_bias) {
hipLaunchKernelGGL(( KeBNBackwardScaleBias<
T, block,
framework::DataLayout::kNCHW>), dim3(grid2), dim3(block), 0, stream,
d_y->data<T>(), x->data<T>(), running_mean_data, running_var_data,
epsilon, N, C, H * W * D, d_scale->data<BatchNormParamType<T>>(),
d_bias->data<BatchNormParamType<T>>());
}
} else {
if (d_x) {
hipLaunchKernelGGL(( KeBNBackwardData<
T, framework::DataLayout::kNHWC>), dim3(grid1), dim3(block), 0, stream,
d_y->data<T>(), scale->data<BatchNormParamType<T>>(),
running_var_data, epsilon, C, H * W, num, d_x->data<T>());
}
if (d_scale && d_bias) {
hipLaunchKernelGGL(( KeBNBackwardScaleBias<
T, block,
framework::DataLayout::kNHWC>), dim3(grid2), dim3(block), 0, stream,
d_y->data<T>(), x->data<T>(), running_mean_data, running_var_data,
epsilon, N, C, H * W * D, d_scale->data<BatchNormParamType<T>>(),
d_bias->data<BatchNormParamType<T>>());
}
}
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(
batch_norm, ops::BatchNormKernel<plat::CUDADeviceContext, float>,
ops::BatchNormKernel<plat::CUDADeviceContext, double>,
ops::BatchNormKernel<plat::CUDADeviceContext, plat::float16>);
REGISTER_OP_CUDA_KERNEL(
batch_norm_grad, ops::BatchNormGradKernel<plat::CUDADeviceContext, float>,
ops::BatchNormGradKernel<plat::CUDADeviceContext, double>,
ops::BatchNormGradKernel<plat::CUDADeviceContext, plat::float16>);
| f9920ad043b07950c6dd936d6fa58a248b27cfeb.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include <cfloat>
#include <string>
#include <vector>
#include "cub/cub.cuh"
#include "paddle/fluid/framework/data_layout.h"
#include "paddle/fluid/operators/batch_norm_op.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/platform/cudnn_helper.h"
#include "paddle/fluid/platform/float16.h"
DECLARE_bool(cudnn_batchnorm_spatial_persistent);
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using DataLayout = framework::DataLayout;
template <typename T>
using CudnnDataType = platform::CudnnDataType<T>;
template <typename T>
using BatchNormParamType = typename CudnnDataType<T>::BatchNormParamType;
template <typename T>
class BatchNormKernel<platform::CUDADeviceContext, T>
: public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(ctx.GetPlace()), true,
platform::errors::InvalidArgument("It must use CUDAPlace."));
double epsilon = static_cast<double>(ctx.Attr<float>("epsilon"));
float momentum = ctx.Attr<float>("momentum");
const bool is_test = ctx.Attr<bool>("is_test");
const bool use_global_stats = ctx.Attr<bool>("use_global_stats");
const bool trainable_stats = ctx.Attr<bool>("trainable_statistics");
const std::string data_layout_str = ctx.Attr<std::string>("data_layout");
const DataLayout data_layout =
framework::StringToDataLayout(data_layout_str);
bool test_mode = is_test && (!trainable_stats);
// Get the size for each dimension.
// NCHW [batch_size, in_channels, in_height, in_width]
const auto *x = ctx.Input<Tensor>("X");
const auto &x_dims = x->dims();
PADDLE_ENFORCE(x_dims.size() >= 2 && x_dims.size() <= 5,
"The Input dim size should be between 2 and 5");
auto *y = ctx.Output<Tensor>("Y");
y->mutable_data<T>(ctx.GetPlace());
int N, C, H, W, D;
ExtractNCWHD(x_dims, data_layout, &N, &C, &H, &W, &D);
auto dtype = platform::CudnnDataType<T>::type;
const bool fast_nhwc_batch_norm =
test_mode ||
(dtype == CUDNN_DATA_HALF && FLAGS_cudnn_batchnorm_spatial_persistent);
auto compute_format =
fast_nhwc_batch_norm && data_layout == DataLayout::kNHWC
? DataLayout::kNHWC
: DataLayout::kNCHW;
Tensor transformed_x(x->type());
Tensor transformed_y(y->type());
if (data_layout == DataLayout::kNHWC &&
compute_format == DataLayout::kNCHW && x_dims.size() > 2) {
VLOG(3) << "Transform input tensor from NHWC to NCHW.";
ResizeToChannelFirst<platform::CUDADeviceContext, T>(ctx, x,
&transformed_x);
TransToChannelFirst<platform::CUDADeviceContext, T>(ctx, x,
&transformed_x);
ResizeToChannelFirst<platform::CUDADeviceContext, T>(ctx, y,
&transformed_y);
} else {
transformed_x.ShareDataWith(*x);
transformed_y.ShareDataWith(*y);
}
// ------------------- cudnn descriptors ---------------------
cudnnTensorDescriptor_t data_desc_;
cudnnTensorDescriptor_t bn_param_desc_;
cudnnBatchNormMode_t mode_;
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnCreateTensorDescriptor(&data_desc_));
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnCreateTensorDescriptor(&bn_param_desc_));
if (epsilon <= CUDNN_BN_MIN_EPSILON - FLT_EPSILON) {
LOG(ERROR) << "Provided epsilon is smaller than "
<< "CUDNN_BN_MIN_EPSILON. Setting it to "
<< "CUDNN_BN_MIN_EPSILON instead.";
}
epsilon = std::max(epsilon, CUDNN_BN_MIN_EPSILON);
#if CUDNN_VERSION_MIN(7, 0, 0)
if (FLAGS_cudnn_batchnorm_spatial_persistent) {
mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT;
} else {
mode_ = CUDNN_BATCHNORM_SPATIAL;
}
#else
mode_ = CUDNN_BATCHNORM_SPATIAL;
#endif
VLOG(3) << "Setting descriptors.";
std::vector<int> dims;
std::vector<int> strides;
if (compute_format == DataLayout::kNCHW) {
dims = {N, C, H, W, D};
strides = {C * H * W * D, H * W * D, W * D, D, 1};
} else {
dims = {N, C, H, W, D};
strides = {H * W * D * C, 1, W * D * C, D * C, C};
}
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor(
data_desc_, CudnnDataType<T>::type,
x_dims.size() > 3 ? x_dims.size() : 4, dims.data(), strides.data()));
// Note: PERSISTENT not implemented for inference
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnDeriveBNTensorDescriptor(
bn_param_desc_, data_desc_,
test_mode ? CUDNN_BATCHNORM_SPATIAL : mode_));
const auto *scale = ctx.Input<Tensor>("Scale");
const auto *bias = ctx.Input<Tensor>("Bias");
auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
auto handle = dev_ctx.cudnn_handle();
// Now, depending on whether we are running test or not, we have two paths.
if (test_mode || use_global_stats) {
// only when test we use input to do computation.
const auto *est_mean = ctx.Input<Tensor>("Mean");
const auto *est_var = ctx.Input<Tensor>("Variance");
// Run inference mode.
PADDLE_ENFORCE_EQ(est_mean->dims().size(), 1UL);
PADDLE_ENFORCE_EQ(est_var->dims().size(), 1UL);
PADDLE_ENFORCE_EQ(est_mean->dims()[0], C);
PADDLE_ENFORCE_EQ(est_var->dims()[0], C);
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnBatchNormalizationForwardInference(
handle,
// Note: PERSISTENT not implemented for inference
CUDNN_BATCHNORM_SPATIAL, CudnnDataType<T>::kOne(),
CudnnDataType<T>::kZero(), data_desc_,
transformed_x.template data<T>(), data_desc_,
transformed_y.template mutable_data<T>(ctx.GetPlace()),
bn_param_desc_, scale->template data<BatchNormParamType<T>>(),
bias->template data<BatchNormParamType<T>>(),
est_mean->template data<BatchNormParamType<T>>(),
est_var->template data<BatchNormParamType<T>>(), epsilon));
} else {
// if MomentumTensor is set, use MomentumTensor value, momentum
// is only used in this training branch
if (ctx.HasInput("MomentumTensor")) {
const auto *mom_tensor = ctx.Input<Tensor>("MomentumTensor");
Tensor mom_cpu;
TensorCopySync(*mom_tensor, platform::CPUPlace(), &mom_cpu);
momentum = mom_cpu.data<float>()[0];
}
// Run training mode.
// obtain running mean and running inv var, and see if we need to
// initialize them.
auto *mean_out = ctx.Output<Tensor>("MeanOut");
auto *variance_out = ctx.Output<Tensor>("VarianceOut");
mean_out->mutable_data<BatchNormParamType<T>>(ctx.GetPlace());
variance_out->mutable_data<BatchNormParamType<T>>(ctx.GetPlace());
auto *saved_mean = ctx.Output<Tensor>("SavedMean");
auto *saved_variance = ctx.Output<Tensor>("SavedVariance");
saved_mean->mutable_data<BatchNormParamType<T>>(ctx.GetPlace());
saved_variance->mutable_data<BatchNormParamType<T>>(ctx.GetPlace());
math::SetConstant<platform::CUDADeviceContext, BatchNormParamType<T>>
functor;
functor(dev_ctx, saved_mean, static_cast<BatchNormParamType<T>>(0));
functor(dev_ctx, saved_variance, static_cast<BatchNormParamType<T>>(0));
if ((N * H * W * D) == 1) {
// Only 1 element in normalization dimension,
// skip the batch norm calculation, let y = x.
framework::TensorCopy(*x, ctx.GetPlace(), y);
} else {
double this_factor = 1. - momentum;
bool called = false;
#if CUDNN_VERSION_MIN(7, 4, 1)
if (compute_format == DataLayout::kNHWC) {
called = true;
size_t workspace_size = 0;
size_t reserve_space_size = 0;
void *reserve_space_ptr = nullptr;
void *workspace_ptr = nullptr;
Tensor workspace_tensor;
// Create reserve space and workspace for batch norm.
// Create tensor for each batchnorm op, it will be used in the
// backward. Thus this tensor shouldn't be temp.
auto *reserve_space = ctx.Output<Tensor>("ReserveSpace");
PADDLE_ENFORCE_NOT_NULL(
reserve_space,
platform::errors::NotFound(
"The argument ReserveSpace of batch_norm op is not found."));
// --------------- cudnn batchnorm workspace ---------------
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::
cudnnGetBatchNormalizationForwardTrainingExWorkspaceSize(
/*handle=*/handle,
/*mode=*/mode_,
/*bnIps=*/CUDNN_BATCHNORM_OPS_BN,
/*xDesc=*/data_desc_,
/*zDesc=*/nullptr,
/*yDesc=*/data_desc_,
/*bnScaleBiasMeanVarDesc=*/bn_param_desc_,
/*activationDesc=*/nullptr,
/*sizeInBytes=*/&workspace_size));
// -------------- cudnn batchnorm reserve space --------------
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::
cudnnGetBatchNormalizationTrainingExReserveSpaceSize(
/*handle=*/handle,
/*mode=*/mode_,
/*bnOps=*/CUDNN_BATCHNORM_OPS_BN,
/*activationDesc=*/nullptr,
/*xDesc=*/data_desc_,
/*sizeInBytes=*/&reserve_space_size));
reserve_space_ptr = reserve_space->mutable_data(
ctx.GetPlace(), transformed_x.type(), reserve_space_size);
workspace_ptr = workspace_tensor.mutable_data(
ctx.GetPlace(), transformed_x.type(), workspace_size);
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnBatchNormalizationForwardTrainingEx(
handle, mode_, CUDNN_BATCHNORM_OPS_BN,
CudnnDataType<T>::kOne(), CudnnDataType<T>::kZero(),
data_desc_, transformed_x.template data<T>(), nullptr,
nullptr, data_desc_, transformed_y.template data<T>(),
bn_param_desc_, scale->template data<BatchNormParamType<T>>(),
bias->template data<BatchNormParamType<T>>(), this_factor,
mean_out->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
variance_out->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
epsilon,
saved_mean->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
saved_variance->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
nullptr, workspace_ptr, workspace_size, reserve_space_ptr,
reserve_space_size));
}
#endif
if (!called) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnBatchNormalizationForwardTraining(
handle, mode_, CudnnDataType<T>::kOne(),
CudnnDataType<T>::kZero(), data_desc_,
transformed_x.template data<T>(), data_desc_,
transformed_y.template mutable_data<T>(ctx.GetPlace()),
bn_param_desc_, scale->template data<BatchNormParamType<T>>(),
bias->template data<BatchNormParamType<T>>(), this_factor,
mean_out->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
variance_out->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
epsilon,
saved_mean->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
saved_variance->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace())));
}
}
}
if (data_layout == DataLayout::kNHWC &&
compute_format == DataLayout::kNCHW && x_dims.size() > 2) {
VLOG(3) << "Transform batchnorm output from NCHW to NHWC";
TransToChannelLast<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_y, y);
}
// clean when exit.
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnDestroyTensorDescriptor(data_desc_));
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnDestroyTensorDescriptor(bn_param_desc_));
}
};
template <typename T, int BlockDim, framework::DataLayout layout>
static __global__ void KeBNBackwardScaleBias(
const T *dy, const T *x, const BatchNormParamType<T> *mean,
const BatchNormParamType<T> *variance, const double epsilon, const int N,
const int C, const int HxW, BatchNormParamType<T> *dscale,
BatchNormParamType<T> *dbias) {
const int outer_size = C;
const int inner_size = N * HxW;
typedef cub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce;
__shared__ typename BlockReduce::TempStorage ds_storage;
__shared__ typename BlockReduce::TempStorage db_storage;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
BatchNormParamType<T> ds_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> db_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> inv_var_i = 1.0 / sqrt(variance[i] + epsilon);
BatchNormParamType<T> mean_i = mean[i];
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == framework::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
ds_sum += static_cast<BatchNormParamType<T>>(dy[index]) *
(static_cast<BatchNormParamType<T>>(x[index]) - mean_i);
db_sum += static_cast<BatchNormParamType<T>>(dy[index]);
}
ds_sum = BlockReduce(ds_storage).Reduce(ds_sum, cub::Sum());
db_sum = BlockReduce(db_storage).Reduce(db_sum, cub::Sum());
if (threadIdx.x == 0) {
dscale[i] = ds_sum * inv_var_i;
dbias[i] = db_sum;
}
__syncthreads();
}
}
template <typename T, framework::DataLayout layout>
static __global__ void KeBNBackwardData(const T *dy,
const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *variance,
const double epsilon, const int C,
const int HxW, const int num, T *dx) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = gid; i < num; i += stride) {
const int c = layout == framework::DataLayout::kNCHW ? i / HxW % C : i % C;
BatchNormParamType<T> inv_var = 1.0 / sqrt(variance[c] + epsilon);
dx[i] = static_cast<T>(static_cast<BatchNormParamType<T>>(dy[i]) *
scale[c] * inv_var);
}
}
template <typename T>
static __global__ void KeBNRestoreData(const framework::DataLayout layout, T *x,
const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *bias,
const BatchNormParamType<T> *mean,
const BatchNormParamType<T> *variance,
double epsilon, int C, int M,
const int num, const T *y) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = gid; i < num; i += stride) {
const int c = layout == framework::DataLayout::kNCHW ? (i / M) % C : i % C;
auto y_i = static_cast<BatchNormParamType<T>>(y[i]);
auto x_i = (y_i - bias[c]) / scale[c] / variance[c] + mean[c];
x[i] = static_cast<T>(x_i);
}
}
template <typename T>
class InplaceHelper {
public:
void operator()(const framework::DataLayout layout, T *x,
const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *bias,
const BatchNormParamType<T> *mean,
const BatchNormParamType<T> *variance, double epsilon, int C,
int M, const int num, const T *y, int grid2, const int block,
const cudaStream_t &stream) {
PADDLE_ENFORCE_EQ(x, y, platform::errors::InvalidArgument(
"X and Y should be inplaced in inplace mode"));
KeBNRestoreData<<<grid2, block, 0, stream>>>(
layout, x, scale, bias, mean, variance, epsilon, C, M, num, y);
}
};
template <typename T, int BlockDim, framework::DataLayout layout>
static __global__ void BNBackwardData(const T *dy,
const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *mean,
const T *x,
const BatchNormParamType<T> *variance,
const int C, const int N, const int HxW,
T *dx) {
const int outer_size = C;
const int inner_size = N * HxW;
typedef cub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce;
__shared__ typename BlockReduce::TempStorage dy_storage;
__shared__ typename BlockReduce::TempStorage dy_x_sub_mean_storage;
__shared__ BatchNormParamType<T> dy_sum_val;
__shared__ BatchNormParamType<T> dy_x_sub_mean_sum_val;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
BatchNormParamType<T> inv_var_i = variance[i];
BatchNormParamType<T> mean_i = mean[i];
BatchNormParamType<T> dy_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> dy_x_sub_mean_sum =
static_cast<BatchNormParamType<T>>(0);
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == framework::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
BatchNormParamType<T> dy_i =
static_cast<BatchNormParamType<T>>(dy[index]);
dy_sum += dy_i;
dy_x_sub_mean_sum +=
dy_i * (static_cast<BatchNormParamType<T>>(x[index]) - mean_i);
}
dy_sum = BlockReduce(dy_storage).Reduce(dy_sum, cub::Sum());
dy_x_sub_mean_sum = BlockReduce(dy_x_sub_mean_storage)
.Reduce(dy_x_sub_mean_sum, cub::Sum());
if (threadIdx.x == 0) {
dy_sum_val = dy_sum;
dy_x_sub_mean_sum_val = dy_x_sub_mean_sum;
}
__syncthreads();
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == framework::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
dx[index] =
(static_cast<BatchNormParamType<T>>(dy[index]) -
dy_sum_val / static_cast<BatchNormParamType<T>>(inner_size) -
(static_cast<BatchNormParamType<T>>(x[index]) - mean_i) *
dy_x_sub_mean_sum_val * inv_var_i * inv_var_i / inner_size) *
scale[i] * inv_var_i;
}
}
}
template <typename T>
class BatchNormGradKernel<platform::CUDADeviceContext, T>
: public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(ctx.GetPlace()), true,
platform::errors::InvalidArgument("It must use CUDAPlace."));
double epsilon = static_cast<double>(ctx.Attr<float>("epsilon"));
const std::string data_layout_str = ctx.Attr<std::string>("data_layout");
const bool use_global_stats = ctx.Attr<bool>("use_global_stats");
const DataLayout data_layout =
framework::StringToDataLayout(data_layout_str);
const auto *d_y = ctx.Input<Tensor>(framework::GradVarName("Y"));
const auto *scale = ctx.Input<Tensor>("Scale");
const auto *bias = ctx.Input<Tensor>("Bias");
auto *d_x = ctx.Output<Tensor>(framework::GradVarName("X"));
auto *d_scale = ctx.Output<Tensor>(framework::GradVarName("Scale"));
auto *d_bias = ctx.Output<Tensor>(framework::GradVarName("Bias"));
// batch_norm with inplace as false will take X as grad input, which
// is same as cuDNN batch_norm backward calculation, batch_norm
// with inplace as true only take Y as input and X should be calculate
// by inverse operation of batch_norm on Y
const Tensor *x;
bool is_inplace;
if (ctx.HasInput("Y")) {
x = ctx.Input<Tensor>("Y");
is_inplace = true;
PADDLE_ENFORCE_EQ(d_x, d_y,
platform::errors::InvalidArgument(
"X@GRAD and Y@GRAD not inplace in inplace mode"));
} else {
x = ctx.Input<Tensor>("X");
is_inplace = false;
PADDLE_ENFORCE_NE(d_x, d_y,
platform::errors::InvalidArgument(
"X@GRAD and Y@GRAD inplaced in non-inplace mode"));
}
const bool is_test = ctx.Attr<bool>("is_test");
PADDLE_ENFORCE_EQ(
is_test, false,
platform::errors::InvalidArgument(
"`is_test = True` CANNOT be used in train program. If "
"you want to use global status in pre_train model, "
"please set `use_global_stats = True`"));
const auto &x_dims = x->dims();
PADDLE_ENFORCE(x_dims.size() >= 2 && x_dims.size() <= 5,
"The Input dim size should be between 2 and 5");
int N, C, H, W, D;
ExtractNCWHD(x_dims, data_layout, &N, &C, &H, &W, &D);
// init output
d_x->mutable_data<T>(ctx.GetPlace());
if (d_scale && d_bias) {
d_scale->mutable_data<BatchNormParamType<T>>(ctx.GetPlace());
d_bias->mutable_data<BatchNormParamType<T>>(ctx.GetPlace());
}
PADDLE_ENFORCE_EQ(scale->dims().size(), 1UL);
PADDLE_ENFORCE_EQ(scale->dims()[0], C);
auto dtype = platform::CudnnDataType<T>::type;
const auto *reserve_space = ctx.Input<Tensor>("ReserveSpace");
const bool fast_nhwc_batch_norm =
dtype == CUDNN_DATA_HALF && FLAGS_cudnn_batchnorm_spatial_persistent &&
reserve_space != nullptr;
auto compute_format =
fast_nhwc_batch_norm && data_layout == DataLayout::kNHWC
? DataLayout::kNHWC
: DataLayout::kNCHW;
Tensor transformed_x(x->type());
Tensor transformed_d_y(d_y->type());
Tensor transformed_d_x(d_x->type());
if (data_layout == DataLayout::kNHWC &&
compute_format == DataLayout::kNCHW) {
VLOG(3) << "Transform input tensor from NHWC to NCHW.";
ResizeToChannelFirst<platform::CUDADeviceContext, T>(ctx, x,
&transformed_x);
TransToChannelFirst<platform::CUDADeviceContext, T>(ctx, x,
&transformed_x);
ResizeToChannelFirst<platform::CUDADeviceContext, T>(ctx, d_y,
&transformed_d_y);
TransToChannelFirst<platform::CUDADeviceContext, T>(ctx, d_y,
&transformed_d_y);
ResizeToChannelFirst<platform::CUDADeviceContext, T>(ctx, d_x,
&transformed_d_x);
} else {
transformed_x.ShareDataWith(*x);
transformed_d_y.ShareDataWith(*d_y);
transformed_d_x.ShareDataWith(*d_x);
}
std::vector<int> dims;
std::vector<int> strides;
if (compute_format == DataLayout::kNCHW) {
dims = {N, C, H, W, D};
strides = {C * H * W * D, H * W * D, W * D, D, 1};
} else {
dims = {N, C, H, W, D};
strides = {H * W * C * D, 1, W * D * C, D * C, C};
}
auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
const int num = transformed_x.numel();
const int block = 512;
int max_threads = dev_ctx.GetMaxPhysicalThreadCount();
const int max_blocks = std::max(max_threads / block, 1);
int grid1 = (num + block - 1) / block;
int grid2 = std::min(C, max_blocks);
auto stream = dev_ctx.stream();
InplaceHelper<T> inplace_functor;
if (!use_global_stats) {
if ((N * H * W * D) == 1) {
framework::TensorCopy(*d_y, ctx.GetPlace(), d_x);
math::SetConstant<platform::CUDADeviceContext, BatchNormParamType<T>>
functor;
functor(dev_ctx, d_scale, static_cast<BatchNormParamType<T>>(0));
functor(dev_ctx, d_bias, static_cast<BatchNormParamType<T>>(0));
return;
}
// ------------------- cudnn descriptors ---------------------
cudnnTensorDescriptor_t data_desc_;
cudnnTensorDescriptor_t bn_param_desc_;
cudnnBatchNormMode_t mode_;
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnCreateTensorDescriptor(&data_desc_));
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnCreateTensorDescriptor(&bn_param_desc_));
if (epsilon <= CUDNN_BN_MIN_EPSILON - FLT_EPSILON) {
LOG(ERROR) << "Provided epsilon is smaller than "
<< "CUDNN_BN_MIN_EPSILON. Setting it to "
<< "CUDNN_BN_MIN_EPSILON instead.";
}
epsilon = std::max(epsilon, CUDNN_BN_MIN_EPSILON);
#if CUDNN_VERSION_MIN(7, 0, 0)
if (FLAGS_cudnn_batchnorm_spatial_persistent) {
mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT;
} else {
mode_ = CUDNN_BATCHNORM_SPATIAL;
}
#else
mode_ = CUDNN_BATCHNORM_SPATIAL;
#endif
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor(
data_desc_, CudnnDataType<T>::type,
x_dims.size() > 3 ? x_dims.size() : 4, dims.data(), strides.data()));
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnDeriveBNTensorDescriptor(bn_param_desc_,
data_desc_, mode_));
const auto *saved_mean = ctx.Input<Tensor>("SavedMean");
const auto *saved_var = ctx.Input<Tensor>("SavedVariance");
const auto *saved_mean_data =
saved_mean->template data<BatchNormParamType<T>>();
const auto *saved_var_data =
saved_var->template data<BatchNormParamType<T>>();
if (is_inplace) {
inplace_functor(compute_format, transformed_x.data<T>(),
scale->template data<BatchNormParamType<T>>(),
bias->template data<BatchNormParamType<T>>(),
saved_mean_data, saved_var_data, epsilon, C, H * W * D,
num, transformed_x.data<T>(), grid2, block, stream);
}
if (d_scale && d_bias) {
bool called = false;
#if CUDNN_VERSION_MIN(7, 4, 1)
if (compute_format == DataLayout::kNHWC) {
called = true;
size_t workspace_size = 0;
void *workspace_ptr = nullptr;
Tensor workspace_tensor;
auto reserve_space_size = reserve_space->memory_size();
// --------------- cudnn batchnorm workspace ---------------
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::
cudnnGetBatchNormalizationBackwardExWorkspaceSize(
/*handle=*/dev_ctx.cudnn_handle(),
/*mode=*/mode_,
/*bnIps=*/CUDNN_BATCHNORM_OPS_BN,
/*xDesc=*/data_desc_,
/*yDesc=*/data_desc_,
/*dyDesc=*/data_desc_,
/*dzDesc=*/nullptr,
/*dxDesc=*/data_desc_,
/*bnScaleBiasMeanVarDesc=*/bn_param_desc_,
/*activationDesc=*/nullptr,
/*sizeInBytes=*/&workspace_size));
workspace_ptr = workspace_tensor.mutable_data(
ctx.GetPlace(), transformed_x.type(), workspace_size);
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnBatchNormalizationBackwardEx(
/*handle=*/dev_ctx.cudnn_handle(),
/*mode=*/mode_,
/*bnOps=*/CUDNN_BATCHNORM_OPS_BN,
/*alphaDataDiff=*/CudnnDataType<T>::kOne(),
/*betaDataDiff=*/CudnnDataType<T>::kZero(),
/*alphaParamDiff=*/CudnnDataType<T>::kOne(),
/*betaParamDiff=*/CudnnDataType<T>::kZero(),
/*xDesc=*/data_desc_,
/*xData=*/transformed_x.template data<T>(),
/*yDesc=*/nullptr,
/*yData=*/nullptr,
/*dyDesc=*/data_desc_,
/*dyData=*/transformed_d_y.template data<T>(),
/*dzDesc=*/nullptr,
/*dzData=*/nullptr,
/*dxDesc=*/data_desc_,
/*dxData=*/transformed_d_x.template mutable_data<T>(
ctx.GetPlace()),
/*dBnScaleBiasDesc=*/bn_param_desc_,
/*bnScaleData=*/scale->template data<BatchNormParamType<T>>(),
/*bnBiasData=*/nullptr,
/*dBnScaleData=*/d_scale
->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
/*dBnBiasData=*/d_bias
->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
/*epsilon=*/epsilon,
/*savedMean=*/saved_mean_data,
/*savedInvVariance=*/saved_var_data,
/*activationDesc=*/nullptr,
/*workspace=*/workspace_ptr,
/*workSpaceSizeInBytes=*/workspace_size,
/*reserveSpace=*/const_cast<T *>(
reserve_space->template data<T>()),
/*reserveSpaceSizeInBytes=*/reserve_space_size));
}
#endif
if (!called) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnBatchNormalizationBackward(
dev_ctx.cudnn_handle(), mode_, CudnnDataType<T>::kOne(),
CudnnDataType<T>::kZero(), CudnnDataType<T>::kOne(),
CudnnDataType<T>::kZero(), data_desc_,
transformed_x.template data<T>(), data_desc_,
transformed_d_y.template data<T>(), data_desc_,
transformed_d_x.template mutable_data<T>(ctx.GetPlace()),
bn_param_desc_, scale->template data<BatchNormParamType<T>>(),
d_scale->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
d_bias->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
epsilon, saved_mean_data, saved_var_data));
}
if (data_layout == DataLayout::kNHWC &&
compute_format == DataLayout::kNCHW) {
VLOG(3) << "Transform batchnorm output from NCHW to NHWC";
TransToChannelLast<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_d_x, d_x);
}
} else {
if (compute_format == DataLayout::kNCHW) {
if (d_x) {
BNBackwardData<T, block, framework::DataLayout::kNCHW><<<
grid2, block, 0, dev_ctx.stream()>>>(
d_y->data<T>(), scale->data<BatchNormParamType<T>>(),
saved_mean_data, x->data<T>(), saved_var_data, C, N, H * W * D,
d_x->data<T>());
}
} else {
if (d_x) {
BNBackwardData<T, block, framework::DataLayout::kNHWC><<<
grid2, block, 0, dev_ctx.stream()>>>(
d_y->data<T>(), scale->data<BatchNormParamType<T>>(),
saved_mean_data, x->data<T>(), saved_var_data, C, N, H * W * D,
d_x->data<T>());
}
}
}
// clean when exit.
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnDestroyTensorDescriptor(data_desc_));
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnDestroyTensorDescriptor(bn_param_desc_));
} else {
const auto *running_mean = ctx.Input<Tensor>("Mean");
const auto *running_var = ctx.Input<Tensor>("Variance");
const auto *running_mean_data =
running_mean->template data<BatchNormParamType<T>>();
const auto *running_var_data =
running_var->template data<BatchNormParamType<T>>();
if (is_inplace) {
auto px = *x;
inplace_functor(data_layout, px.mutable_data<T>(ctx.GetPlace()),
scale->template data<BatchNormParamType<T>>(),
bias->template data<BatchNormParamType<T>>(),
running_mean_data, running_var_data, epsilon, C,
H * W * D, num, x->data<T>(), grid2, block, stream);
}
if (compute_format == DataLayout::kNCHW) {
if (d_x) {
KeBNBackwardData<
T, framework::DataLayout::kNCHW><<<grid1, block, 0, stream>>>(
d_y->data<T>(), scale->data<BatchNormParamType<T>>(),
running_var_data, epsilon, C, H * W, num, d_x->data<T>());
}
if (d_scale && d_bias) {
KeBNBackwardScaleBias<
T, block,
framework::DataLayout::kNCHW><<<grid2, block, 0, stream>>>(
d_y->data<T>(), x->data<T>(), running_mean_data, running_var_data,
epsilon, N, C, H * W * D, d_scale->data<BatchNormParamType<T>>(),
d_bias->data<BatchNormParamType<T>>());
}
} else {
if (d_x) {
KeBNBackwardData<
T, framework::DataLayout::kNHWC><<<grid1, block, 0, stream>>>(
d_y->data<T>(), scale->data<BatchNormParamType<T>>(),
running_var_data, epsilon, C, H * W, num, d_x->data<T>());
}
if (d_scale && d_bias) {
KeBNBackwardScaleBias<
T, block,
framework::DataLayout::kNHWC><<<grid2, block, 0, stream>>>(
d_y->data<T>(), x->data<T>(), running_mean_data, running_var_data,
epsilon, N, C, H * W * D, d_scale->data<BatchNormParamType<T>>(),
d_bias->data<BatchNormParamType<T>>());
}
}
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(
batch_norm, ops::BatchNormKernel<plat::CUDADeviceContext, float>,
ops::BatchNormKernel<plat::CUDADeviceContext, double>,
ops::BatchNormKernel<plat::CUDADeviceContext, plat::float16>);
REGISTER_OP_CUDA_KERNEL(
batch_norm_grad, ops::BatchNormGradKernel<plat::CUDADeviceContext, float>,
ops::BatchNormGradKernel<plat::CUDADeviceContext, double>,
ops::BatchNormGradKernel<plat::CUDADeviceContext, plat::float16>);
|
8d0a1d5a56e902836a08a4a3c40b6d536e1768d0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "struct.h"
#include "settings.h"
#include "jobcore.h"
#include <stdio.h>
// maybe it should be dynamic...
__constant__ Ampl_mod_coeff amod_d[MAX_DETECTORS];
void copy_amod_coeff(int nifo) {
int i;
Ampl_mod_coeff amod_coeff_tmp[nifo];
for(i=0; i<nifo; ++i){
amod_coeff_tmp[i] = ifo[i].amod;
}
hipMemcpyToSymbol(amod_d, amod_coeff_tmp, sizeof(Ampl_mod_coeff)*nifo,
0, hipMemcpyHostToDevice);
}
__global__ void modvir_kern(double *aa_d, double *bb_d, double cosalfr, double sinalfr,
double c2d, double c2sd,
double *sinmodf_d, double *cosmodf_d,
double sindel, double cosdel, int Np, int idet) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx<Np) {
double c = cosalfr * cosmodf_d[idx] + sinalfr * sinmodf_d[idx];
double s = sinalfr * cosmodf_d[idx] - cosalfr * sinmodf_d[idx];
double c2s = 2.*c*c;
double cs = c*s;
aa_d[idx] = amod_d[idet].c1*(2.-c2d)*c2s
+ amod_d[idet].c2*(2.-c2d)*2.*cs
+ amod_d[idet].c3*c2sd*c
+ amod_d[idet].c4*c2sd*s
- amod_d[idet].c1*(2.-c2d)
+ amod_d[idet].c5*c2d;
bb_d[idx] = amod_d[idet].c6*sindel*c2s
+ amod_d[idet].c7*sindel*2.*cs
+ amod_d[idet].c8*cosdel*c
+ amod_d[idet].c9*cosdel*s
- amod_d[idet].c6*sindel;
}
}
__global__ void tshift_pmod_kern(double shft1, double het0,
double ns0, double ns1, double ns2,
double *xDat_d,
hipfftDoubleComplex *xa_d, hipfftDoubleComplex *xb_d,
FLOAT_TYPE *shft_d, double *shftf_d,
double *tshift_d,
double *aa_d, double *bb_d,
double *DetSSB_d,
double oms, int N, int nfft, int interpftpad) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
double S = ns0 * DetSSB_d[i*3]
+ ns1 * DetSSB_d[i*3+1]
+ ns2 * DetSSB_d[i*3+2];
shft_d[i] = S;
shftf_d[i]= S - shft1;
/* phase mod */
// dlaczego - ?
double phase = -het0*i - oms * S;
double c = cos(phase), s = sin(phase);
xa_d[i].x = xDat_d[i] * aa_d[i] * c;
xa_d[i].y = xDat_d[i] * aa_d[i] * s;
xb_d[i].x = xDat_d[i] * bb_d[i] * c;
xb_d[i].y = xDat_d[i] * bb_d[i] * s;
//calculate time positions for spline interpolation
tshift_d[i] = interpftpad * ( i - shftf_d[i] );
// no need for this on gpu
//_tmp1[n][i] = aux->t2[i] + (double)(2*i)*ifo[n].sig.shft[i];
} else if (i < nfft) {
xa_d[i].x = xa_d[i].y = xb_d[i].x = xb_d[i].y = 0.;
}
}
__global__ void resample_postfft(hipfftDoubleComplex *xa_d, hipfftDoubleComplex *xb_d,
int nfft, int Ninterp, int nyqst) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
//move frequencies from second half of spectrum; loop length: nfft - nyqst =
// = nfft - nfft/2 - 1 = nfft/2 - 1
if (idx < nfft/2 - 1) {
int i = nyqst + Ninterp - nfft + idx;
int j = nyqst + idx;
xa_d[i].x=xa_d[j].x;
xa_d[i].y=xa_d[j].y;
xb_d[i].x=xb_d[j].x;
xb_d[i].y=xb_d[j].y;
}
//zero frequencies higher than nyquist, length: Ninterp - nfft
//loop length: Ninterp - nfft ~ nfft
if (idx < Ninterp - nfft) {
xa_d[nyqst+idx].x = xa_d[nyqst+idx].y = 0.;
xb_d[nyqst+idx].x = xb_d[nyqst+idx].y = 0.;
}
}
__global__ void compute_sincosmodf(double *s, double *c, double omr, int N) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx<N) {
s[idx] = sin(omr*idx);
c[idx] = cos(omr*idx);
}
}
__global__ void phase_mod_1(hipfftDoubleComplex *xa, hipfftDoubleComplex *xb,
hipfftDoubleComplex *xar, hipfftDoubleComplex *xbr,
double het1, double sgnlt1, double *shft,
int N){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) {
// FLOAT_TYPE phase = - ( het1*idx + sgnlt1 * ( (double)idx*idx + 2 * idx * shft[idx] ) );
double phase = - idx * ( het1 + sgnlt1 * ( idx + 2 * shft[idx] ) );
double s, c;
sincos(phase, &s, &c);
xa[idx].x = xar[idx].x*c - xar[idx].y*s;
xa[idx].y = xar[idx].x*s + xar[idx].y*c;
xb[idx].x = xbr[idx].x*c - xbr[idx].y*s;
xb[idx].y = xbr[idx].x*s + xbr[idx].y*c;
// if (idx==1) printf("xa=(%f, %f)\n", xa[idx].x, xa[idx].y);
}
}
__global__ void phase_mod_2(hipfftDoubleComplex *xa, hipfftDoubleComplex *xb,
hipfftDoubleComplex *xar, hipfftDoubleComplex *xbr,
double het1, double sgnlt1, double *shft,
int N){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) {
// FLOAT_TYPE phase = - ( het1*idx + sgnlt1 * ( (double)idx*idx + 2 * idx * shft[idx] ) );
double phase = - idx * ( het1 + sgnlt1 * ( idx + 2 * shft[idx] ) );
double s, c;
sincos(phase, &s, &c);
xa[idx].x += xar[idx].x*c - xar[idx].y*s;
xa[idx].y += xar[idx].x*s + xar[idx].y*c;
xb[idx].x += xbr[idx].x*c - xbr[idx].y*s;
xb[idx].y += xbr[idx].x*s + xbr[idx].y*c;
// if (idx==1) printf("xa=(%f, %f)\n", xa[idx].x, xa[idx].y);
}
}
extern __constant__ double maa_d, mbb_d;
__global__ void compute_Fstat(hipfftDoubleComplex *xa, hipfftDoubleComplex *xb,
double *F, int N) { // N = nmax - nmin
int i = blockIdx.x * blockDim.x + threadIdx.x;
// if (i==0) printf("maa_d=%f\n", maa_d);
if (i < N) {
F[i] = ( xa[i].x*xa[i].x + xa[i].y*xa[i].y)/maa_d + (xb[i].x*xb[i].x + xb[i].y*xb[i].y)/mbb_d;
}
}
/*
__global__ void reduction_sum(double *in, double *out, int N) {
extern __shared__ double sd_data[];
int tid = threadIdx.x;
int i = blockIdx.x * blockDim.x + threadIdx.x;
sd_data[tid] = (i<N) ? in[i] : 0;
__syncthreads();
for (int s = blockDim.x/2; s>0; s>>=1) {
if (tid < s) {
sd_data[tid] += sd_data[tid + s];
}
__syncthreads();
}
if (tid==0) out[blockIdx.x] = sd_data[0];
}
*/
__global__ void fstat_norm_simple(FLOAT_TYPE *F_d, int nav) {
// int idx = blockIdx.x * blockDim.x + threadIdx.x;
int i;
FLOAT_TYPE *fr = F_d + blockIdx.x*nav;
FLOAT_TYPE mu = 0.;
for (i=0; i<nav; ++i)
mu += *fr++;
mu /= 2.*nav;
fr = F_d + blockIdx.x*nav;
for (i=0; i<nav; i++)
*fr++ /= mu;
}
// parameters are:
// [frequency, spindown, position1, position2, snr]
#define ADD_PARAMS_MACRO \
int p = atomicAdd(found, 1); \
params[p*NPAR + 0] = 2.0*M_PI*(idx)*fftpad*nfft+sgnl0; \
params[p*NPAR + 1] = sgnl1; \
params[p*NPAR + 2] = sgnl2; \
params[p*NPAR + 3] = sgnl3; \
params[p*NPAR + 4] = sqrt(2*(F[idx]-ndf));
__global__ void find_candidates(FLOAT_TYPE *F, FLOAT_TYPE *params, int *found, FLOAT_TYPE val,
int nmin, int nmax, double fftpad, double nfft, FLOAT_TYPE sgnl0, int ndf,
FLOAT_TYPE sgnl1, FLOAT_TYPE sgnl2, FLOAT_TYPE sgnl3) {
int idx = blockIdx.x * blockDim.x + threadIdx.x + nmin;
if (idx > nmin && idx < nmax && F[idx] >= val && F[idx] > F[idx+1] && F[idx] > F[idx-1]) {
ADD_PARAMS_MACRO
} else if (idx == nmin && F[idx] >= val && F[idx] > F[idx+1]) {
ADD_PARAMS_MACRO
} else if (idx == nmax-1 && F[idx] >= val && F[idx] > F[idx-1]) {
ADD_PARAMS_MACRO
}
}
//---------------------------------------------------------------
//second reduction used in fstat
__global__ void reduction_sum(FLOAT_TYPE *in, FLOAT_TYPE *out, int N) {
extern __shared__ FLOAT_TYPE sf_data[];
int tid = threadIdx.x;
int i = blockIdx.x * blockDim.x + threadIdx.x;
sf_data[tid] = (i<N) ? in[i] : 0;
__syncthreads();
for (int s = blockDim.x/2; s>0; s>>=1) {
if (tid < s) {
sf_data[tid] += sf_data[tid + s];
}
__syncthreads();
}
if (tid==0) out[blockIdx.x] = 1.0f/sf_data[0];
}
__global__ void fstat_norm(FLOAT_TYPE *F, FLOAT_TYPE *mu, int N, int nav) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
int block = i/nav; //block index
F[i] *= 2*nav * mu[block];
}
}
| 8d0a1d5a56e902836a08a4a3c40b6d536e1768d0.cu | #include "struct.h"
#include "settings.h"
#include "jobcore.h"
#include <stdio.h>
// maybe it should be dynamic...
__constant__ Ampl_mod_coeff amod_d[MAX_DETECTORS];
void copy_amod_coeff(int nifo) {
int i;
Ampl_mod_coeff amod_coeff_tmp[nifo];
for(i=0; i<nifo; ++i){
amod_coeff_tmp[i] = ifo[i].amod;
}
cudaMemcpyToSymbol(amod_d, amod_coeff_tmp, sizeof(Ampl_mod_coeff)*nifo,
0, cudaMemcpyHostToDevice);
}
__global__ void modvir_kern(double *aa_d, double *bb_d, double cosalfr, double sinalfr,
double c2d, double c2sd,
double *sinmodf_d, double *cosmodf_d,
double sindel, double cosdel, int Np, int idet) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx<Np) {
double c = cosalfr * cosmodf_d[idx] + sinalfr * sinmodf_d[idx];
double s = sinalfr * cosmodf_d[idx] - cosalfr * sinmodf_d[idx];
double c2s = 2.*c*c;
double cs = c*s;
aa_d[idx] = amod_d[idet].c1*(2.-c2d)*c2s
+ amod_d[idet].c2*(2.-c2d)*2.*cs
+ amod_d[idet].c3*c2sd*c
+ amod_d[idet].c4*c2sd*s
- amod_d[idet].c1*(2.-c2d)
+ amod_d[idet].c5*c2d;
bb_d[idx] = amod_d[idet].c6*sindel*c2s
+ amod_d[idet].c7*sindel*2.*cs
+ amod_d[idet].c8*cosdel*c
+ amod_d[idet].c9*cosdel*s
- amod_d[idet].c6*sindel;
}
}
__global__ void tshift_pmod_kern(double shft1, double het0,
double ns0, double ns1, double ns2,
double *xDat_d,
cufftDoubleComplex *xa_d, cufftDoubleComplex *xb_d,
FLOAT_TYPE *shft_d, double *shftf_d,
double *tshift_d,
double *aa_d, double *bb_d,
double *DetSSB_d,
double oms, int N, int nfft, int interpftpad) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
double S = ns0 * DetSSB_d[i*3]
+ ns1 * DetSSB_d[i*3+1]
+ ns2 * DetSSB_d[i*3+2];
shft_d[i] = S;
shftf_d[i]= S - shft1;
/* phase mod */
// dlaczego - ?
double phase = -het0*i - oms * S;
double c = cos(phase), s = sin(phase);
xa_d[i].x = xDat_d[i] * aa_d[i] * c;
xa_d[i].y = xDat_d[i] * aa_d[i] * s;
xb_d[i].x = xDat_d[i] * bb_d[i] * c;
xb_d[i].y = xDat_d[i] * bb_d[i] * s;
//calculate time positions for spline interpolation
tshift_d[i] = interpftpad * ( i - shftf_d[i] );
// no need for this on gpu
//_tmp1[n][i] = aux->t2[i] + (double)(2*i)*ifo[n].sig.shft[i];
} else if (i < nfft) {
xa_d[i].x = xa_d[i].y = xb_d[i].x = xb_d[i].y = 0.;
}
}
__global__ void resample_postfft(cufftDoubleComplex *xa_d, cufftDoubleComplex *xb_d,
int nfft, int Ninterp, int nyqst) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
//move frequencies from second half of spectrum; loop length: nfft - nyqst =
// = nfft - nfft/2 - 1 = nfft/2 - 1
if (idx < nfft/2 - 1) {
int i = nyqst + Ninterp - nfft + idx;
int j = nyqst + idx;
xa_d[i].x=xa_d[j].x;
xa_d[i].y=xa_d[j].y;
xb_d[i].x=xb_d[j].x;
xb_d[i].y=xb_d[j].y;
}
//zero frequencies higher than nyquist, length: Ninterp - nfft
//loop length: Ninterp - nfft ~ nfft
if (idx < Ninterp - nfft) {
xa_d[nyqst+idx].x = xa_d[nyqst+idx].y = 0.;
xb_d[nyqst+idx].x = xb_d[nyqst+idx].y = 0.;
}
}
__global__ void compute_sincosmodf(double *s, double *c, double omr, int N) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx<N) {
s[idx] = sin(omr*idx);
c[idx] = cos(omr*idx);
}
}
__global__ void phase_mod_1(cufftDoubleComplex *xa, cufftDoubleComplex *xb,
cufftDoubleComplex *xar, cufftDoubleComplex *xbr,
double het1, double sgnlt1, double *shft,
int N){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) {
// FLOAT_TYPE phase = - ( het1*idx + sgnlt1 * ( (double)idx*idx + 2 * idx * shft[idx] ) );
double phase = - idx * ( het1 + sgnlt1 * ( idx + 2 * shft[idx] ) );
double s, c;
sincos(phase, &s, &c);
xa[idx].x = xar[idx].x*c - xar[idx].y*s;
xa[idx].y = xar[idx].x*s + xar[idx].y*c;
xb[idx].x = xbr[idx].x*c - xbr[idx].y*s;
xb[idx].y = xbr[idx].x*s + xbr[idx].y*c;
// if (idx==1) printf("xa=(%f, %f)\n", xa[idx].x, xa[idx].y);
}
}
__global__ void phase_mod_2(cufftDoubleComplex *xa, cufftDoubleComplex *xb,
cufftDoubleComplex *xar, cufftDoubleComplex *xbr,
double het1, double sgnlt1, double *shft,
int N){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) {
// FLOAT_TYPE phase = - ( het1*idx + sgnlt1 * ( (double)idx*idx + 2 * idx * shft[idx] ) );
double phase = - idx * ( het1 + sgnlt1 * ( idx + 2 * shft[idx] ) );
double s, c;
sincos(phase, &s, &c);
xa[idx].x += xar[idx].x*c - xar[idx].y*s;
xa[idx].y += xar[idx].x*s + xar[idx].y*c;
xb[idx].x += xbr[idx].x*c - xbr[idx].y*s;
xb[idx].y += xbr[idx].x*s + xbr[idx].y*c;
// if (idx==1) printf("xa=(%f, %f)\n", xa[idx].x, xa[idx].y);
}
}
extern __constant__ double maa_d, mbb_d;
__global__ void compute_Fstat(cufftDoubleComplex *xa, cufftDoubleComplex *xb,
double *F, int N) { // N = nmax - nmin
int i = blockIdx.x * blockDim.x + threadIdx.x;
// if (i==0) printf("maa_d=%f\n", maa_d);
if (i < N) {
F[i] = ( xa[i].x*xa[i].x + xa[i].y*xa[i].y)/maa_d + (xb[i].x*xb[i].x + xb[i].y*xb[i].y)/mbb_d;
}
}
/*
__global__ void reduction_sum(double *in, double *out, int N) {
extern __shared__ double sd_data[];
int tid = threadIdx.x;
int i = blockIdx.x * blockDim.x + threadIdx.x;
sd_data[tid] = (i<N) ? in[i] : 0;
__syncthreads();
for (int s = blockDim.x/2; s>0; s>>=1) {
if (tid < s) {
sd_data[tid] += sd_data[tid + s];
}
__syncthreads();
}
if (tid==0) out[blockIdx.x] = sd_data[0];
}
*/
__global__ void fstat_norm_simple(FLOAT_TYPE *F_d, int nav) {
// int idx = blockIdx.x * blockDim.x + threadIdx.x;
int i;
FLOAT_TYPE *fr = F_d + blockIdx.x*nav;
FLOAT_TYPE mu = 0.;
for (i=0; i<nav; ++i)
mu += *fr++;
mu /= 2.*nav;
fr = F_d + blockIdx.x*nav;
for (i=0; i<nav; i++)
*fr++ /= mu;
}
// parameters are:
// [frequency, spindown, position1, position2, snr]
#define ADD_PARAMS_MACRO \
int p = atomicAdd(found, 1); \
params[p*NPAR + 0] = 2.0*M_PI*(idx)*fftpad*nfft+sgnl0; \
params[p*NPAR + 1] = sgnl1; \
params[p*NPAR + 2] = sgnl2; \
params[p*NPAR + 3] = sgnl3; \
params[p*NPAR + 4] = sqrt(2*(F[idx]-ndf));
__global__ void find_candidates(FLOAT_TYPE *F, FLOAT_TYPE *params, int *found, FLOAT_TYPE val,
int nmin, int nmax, double fftpad, double nfft, FLOAT_TYPE sgnl0, int ndf,
FLOAT_TYPE sgnl1, FLOAT_TYPE sgnl2, FLOAT_TYPE sgnl3) {
int idx = blockIdx.x * blockDim.x + threadIdx.x + nmin;
if (idx > nmin && idx < nmax && F[idx] >= val && F[idx] > F[idx+1] && F[idx] > F[idx-1]) {
ADD_PARAMS_MACRO
} else if (idx == nmin && F[idx] >= val && F[idx] > F[idx+1]) {
ADD_PARAMS_MACRO
} else if (idx == nmax-1 && F[idx] >= val && F[idx] > F[idx-1]) {
ADD_PARAMS_MACRO
}
}
//---------------------------------------------------------------
//second reduction used in fstat
__global__ void reduction_sum(FLOAT_TYPE *in, FLOAT_TYPE *out, int N) {
extern __shared__ FLOAT_TYPE sf_data[];
int tid = threadIdx.x;
int i = blockIdx.x * blockDim.x + threadIdx.x;
sf_data[tid] = (i<N) ? in[i] : 0;
__syncthreads();
for (int s = blockDim.x/2; s>0; s>>=1) {
if (tid < s) {
sf_data[tid] += sf_data[tid + s];
}
__syncthreads();
}
if (tid==0) out[blockIdx.x] = 1.0f/sf_data[0];
}
__global__ void fstat_norm(FLOAT_TYPE *F, FLOAT_TYPE *mu, int N, int nav) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
int block = i/nav; //block index
F[i] *= 2*nav * mu[block];
}
}
|
a0ccd6289e87cef54749abd42f8263f50f0926f9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef _TIMER_
#include "hip/hip_runtime_api.h"
#endif
#include "stdio.h"
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#define mod(x,y) ( (x) & (y-1))
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
void Check_CUDA_Error(const char* message);
/*Texture references */
/*Shared Memory Variable */
extern __shared__ char __FORMA_SHARED_MEM__[];
/* Device code Begin */
__global__ void __kernel___forma_kernel__0__(double * __restrict__ input, int N, int M, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, double * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
double * __tilevar_2__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*(FORMA_BLOCKDIM_X);
double * __tilevar_3__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*(FORMA_BLOCKDIM_X);
double * __tilevar_4__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*(FORMA_BLOCKDIM_X);
double * __tilevar_5__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*(FORMA_BLOCKDIM_X);
double t2=0.0f, t3=0.0f, t4=0.0f, t5=0.0f;
double b2=0.0f, b3=0.0f, b4=0.0f, b5=0.0f;
int __iter_0__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X-8);
int __iter_y__ = FORMA_MAX((int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y)-4, 0);
// Initialize the values
int __iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if (__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))) {
__tilevar_2__[__iter_3__-__iter_0__] = input[__iter_3__+M*__iter_y__];
t2 = input[__iter_3__+M*(__iter_y__+1)];
}
// Initial computation
for (int __iter_1__ = FORMA_MAX(1,__iter_y__+1); __iter_1__ < FORMA_MIN(N-1,__iter_y__+7); __iter_1__++) {
if(__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))){
b2 = __tilevar_2__[__iter_3__-__iter_0__];
__tilevar_2__[__iter_3__-__iter_0__] = t2;
t2 = input[__iter_3__+M*(__iter_1__+1)];
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+1),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){
double __temp_1__ = (__tilevar_2__[__iter_3__-__iter_0__] - b2);
double __temp_2__ = (__temp_1__ * __temp_1__);
double __temp_3__ = (0.000100f + __temp_2__);
double __temp_5__ = (__tilevar_2__[__iter_3__-__iter_0__] - t2);
double __temp_6__ = (__temp_5__ * __temp_5__);
double __temp_7__ = (__temp_3__ + __temp_6__);
double __temp_9__ = (__tilevar_2__[__iter_3__-__iter_0__] - __tilevar_2__[__iter_3__+1-__iter_0__]);
double __temp_10__ = (__temp_9__ * __temp_9__);
double __temp_11__ = (__temp_7__ + __temp_10__);
double __temp_13__ = (__tilevar_2__[__iter_3__-__iter_0__] - __tilevar_2__[__iter_3__-1-__iter_0__]);
double __temp_14__ = (__temp_13__ * __temp_13__);
double __temp_15__ = (__temp_11__ + __temp_14__);
double __temp_16__ = sqrt(__temp_15__);
double __temp_17__ = (1.000000f / __temp_16__);
double __temp_18__ = (__tilevar_2__[__iter_3__-__iter_0__] + __temp_17__);
b3 = __tilevar_3__[__iter_3__-__iter_0__];
__tilevar_3__[__iter_3__-__iter_0__] = t3;
t3 = __temp_18__;
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+2),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){
double __temp_1__ = (__tilevar_3__[__iter_3__-__iter_0__] - b3);
double __temp_2__ = (__temp_1__ * __temp_1__);
double __temp_3__ = (0.000100f + __temp_2__);
double __temp_5__ = (__tilevar_3__[__iter_3__-__iter_0__] - t3);
double __temp_6__ = (__temp_5__ * __temp_5__);
double __temp_7__ = (__temp_3__ + __temp_6__);
double __temp_9__ = (__tilevar_3__[__iter_3__-__iter_0__] - __tilevar_3__[__iter_3__+1-__iter_0__]);
double __temp_10__ = (__temp_9__ * __temp_9__);
double __temp_11__ = (__temp_7__ + __temp_10__);
double __temp_13__ = (__tilevar_3__[__iter_3__-__iter_0__] - __tilevar_3__[__iter_3__-1-__iter_0__]);
double __temp_14__ = (__temp_13__ * __temp_13__);
double __temp_15__ = (__temp_11__ + __temp_14__);
double __temp_16__ = sqrt(__temp_15__);
double __temp_17__ = (1.000000f / __temp_16__);
double __temp_18__ = (__tilevar_3__[__iter_3__-__iter_0__] + __temp_17__);
b4 = __tilevar_4__[__iter_3__-__iter_0__];
__tilevar_4__[__iter_3__-__iter_0__] = t4;
t4 = __temp_18__;
}
__syncthreads ();
if(__iter_3__ >= FORMA_MAX((__iter_0__+3),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){
double __temp_1__ = (__tilevar_4__[__iter_3__-__iter_0__] - b4);
double __temp_2__ = (__temp_1__ * __temp_1__);
double __temp_3__ = (0.000100f + __temp_2__);
double __temp_5__ = (__tilevar_4__[__iter_3__-__iter_0__] - t4);
double __temp_6__ = (__temp_5__ * __temp_5__);
double __temp_7__ = (__temp_3__ + __temp_6__);
double __temp_9__ = (__tilevar_4__[__iter_3__-__iter_0__] - __tilevar_4__[__iter_3__+1-__iter_0__]);
double __temp_10__ = (__temp_9__ * __temp_9__);
double __temp_11__ = (__temp_7__ + __temp_10__);
double __temp_13__ = (__tilevar_4__[__iter_3__-__iter_0__] - __tilevar_4__[__iter_3__-1-__iter_0__]);
double __temp_14__ = (__temp_13__ * __temp_13__);
double __temp_15__ = (__temp_11__ + __temp_14__);
double __temp_16__ = sqrt(__temp_15__);
double __temp_17__ = (1.000000f / __temp_16__);
double __temp_18__ = (__tilevar_4__[__iter_3__-__iter_0__] + __temp_17__);
b5 = __tilevar_5__[__iter_3__-__iter_0__];
__tilevar_5__[__iter_3__-__iter_0__] = t5;
t5 = __temp_18__;
}
}
// Rest of the computation
for (int __iter_1__ = FORMA_MAX(1,__iter_y__+7); __iter_1__ < FORMA_MIN(N-1,__iter_y__+FORMA_BLOCKDIM_Y+8); __iter_1__++) {
if(__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))){
b2 = __tilevar_2__[__iter_3__-__iter_0__];
__tilevar_2__[__iter_3__-__iter_0__] = t2;
t2 = input[__iter_3__+M*(__iter_1__+1)];
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+1),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){
double __temp_1__ = (__tilevar_2__[__iter_3__-__iter_0__] - b2);
double __temp_2__ = (__temp_1__ * __temp_1__);
double __temp_3__ = (0.000100f + __temp_2__);
double __temp_5__ = (__tilevar_2__[__iter_3__-__iter_0__] - t2);
double __temp_6__ = (__temp_5__ * __temp_5__);
double __temp_7__ = (__temp_3__ + __temp_6__);
double __temp_9__ = (__tilevar_2__[__iter_3__-__iter_0__] - __tilevar_2__[__iter_3__+1-__iter_0__]);
double __temp_10__ = (__temp_9__ * __temp_9__);
double __temp_11__ = (__temp_7__ + __temp_10__);
double __temp_13__ = (__tilevar_2__[__iter_3__-__iter_0__] - __tilevar_2__[__iter_3__-1-__iter_0__]);
double __temp_14__ = (__temp_13__ * __temp_13__);
double __temp_15__ = (__temp_11__ + __temp_14__);
double __temp_16__ = sqrt(__temp_15__);
double __temp_17__ = (1.000000f / __temp_16__);
double __temp_18__ = (__tilevar_2__[__iter_3__-__iter_0__] + __temp_17__);
b3 = __tilevar_3__[__iter_3__-__iter_0__];
__tilevar_3__[__iter_3__-__iter_0__] = t3;
t3 = __temp_18__;
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+2),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){
double __temp_1__ = (__tilevar_3__[__iter_3__-__iter_0__] - b3);
double __temp_2__ = (__temp_1__ * __temp_1__);
double __temp_3__ = (0.000100f + __temp_2__);
double __temp_5__ = (__tilevar_3__[__iter_3__-__iter_0__] - t3);
double __temp_6__ = (__temp_5__ * __temp_5__);
double __temp_7__ = (__temp_3__ + __temp_6__);
double __temp_9__ = (__tilevar_3__[__iter_3__-__iter_0__] - __tilevar_3__[__iter_3__+1-__iter_0__]);
double __temp_10__ = (__temp_9__ * __temp_9__);
double __temp_11__ = (__temp_7__ + __temp_10__);
double __temp_13__ = (__tilevar_3__[__iter_3__-__iter_0__] - __tilevar_3__[__iter_3__-1-__iter_0__]);
double __temp_14__ = (__temp_13__ * __temp_13__);
double __temp_15__ = (__temp_11__ + __temp_14__);
double __temp_16__ = sqrt(__temp_15__);
double __temp_17__ = (1.000000f / __temp_16__);
double __temp_18__ = (__tilevar_3__[__iter_3__-__iter_0__] + __temp_17__);
b4 = __tilevar_4__[__iter_3__-__iter_0__];
__tilevar_4__[__iter_3__-__iter_0__] = t4;
t4 = __temp_18__;
}
__syncthreads ();
if(__iter_3__ >= FORMA_MAX((__iter_0__+3),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){
double __temp_1__ = (__tilevar_4__[__iter_3__-__iter_0__] - b4);
double __temp_2__ = (__temp_1__ * __temp_1__);
double __temp_3__ = (0.000100f + __temp_2__);
double __temp_5__ = (__tilevar_4__[__iter_3__-__iter_0__] - t4);
double __temp_6__ = (__temp_5__ * __temp_5__);
double __temp_7__ = (__temp_3__ + __temp_6__);
double __temp_9__ = (__tilevar_4__[__iter_3__-__iter_0__] - __tilevar_4__[__iter_3__+1-__iter_0__]);
double __temp_10__ = (__temp_9__ * __temp_9__);
double __temp_11__ = (__temp_7__ + __temp_10__);
double __temp_13__ = (__tilevar_4__[__iter_3__-__iter_0__] - __tilevar_4__[__iter_3__-1-__iter_0__]);
double __temp_14__ = (__temp_13__ * __temp_13__);
double __temp_15__ = (__temp_11__ + __temp_14__);
double __temp_16__ = sqrt(__temp_15__);
double __temp_17__ = (1.000000f / __temp_16__);
double __temp_18__ = (__tilevar_4__[__iter_3__-__iter_0__] + __temp_17__);
b5 = __tilevar_5__[__iter_3__-__iter_0__];
__tilevar_5__[__iter_3__-__iter_0__] = t5;
t5 = __temp_18__;
}
__syncthreads ();
if(__iter_3__ >= FORMA_MAX((__iter_0__+4),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-2)) ){
double __temp_1__ = (__tilevar_5__[__iter_3__-__iter_0__] - b5);
double __temp_2__ = (__temp_1__ * __temp_1__);
double __temp_3__ = (0.000100f + __temp_2__);
double __temp_5__ = (__tilevar_5__[__iter_3__-__iter_0__] - t5);
double __temp_6__ = (__temp_5__ * __temp_5__);
double __temp_7__ = (__temp_3__ + __temp_6__);
double __temp_9__ = (__tilevar_5__[__iter_3__-__iter_0__] - __tilevar_5__[__iter_3__+1-__iter_0__]);
double __temp_10__ = (__temp_9__ * __temp_9__);
double __temp_11__ = (__temp_7__ + __temp_10__);
double __temp_13__ = (__tilevar_5__[__iter_3__-__iter_0__] - __tilevar_5__[__iter_3__-1-__iter_0__]);
double __temp_14__ = (__temp_13__ * __temp_13__);
double __temp_15__ = (__temp_11__ + __temp_14__);
double __temp_16__ = sqrt(__temp_15__);
double __temp_17__ = (1.000000f / __temp_16__);
double __temp_18__ = (__tilevar_5__[__iter_3__-__iter_0__] + __temp_17__);
__var_1__[__iter_3__+(M)*FORMA_MAX(__iter_1__-3,0)] = __temp_18__;
}
}
}
int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int SMemSize = 0;
SMemSize += sizeof(double)*(4*FORMA_BLOCKDIM_X);
return SMemSize;
}
/*Device code End */
/* Host Code Begin */
extern "C" void gradient (double * h_input, int N, int M, double * __var_0__){
/* Host allocation Begin */
double * input;
hipMalloc(&input,sizeof(double)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : input\n");
hipPointerAttribute_t ptrAttrib_h_input;
hipMemcpyKind memcpy_kind_h_input = hipMemcpyHostToDevice;
if (hipPointerGetAttributes(&ptrAttrib_h_input, h_input) == hipSuccess)
if (ptrAttrib_h_input.memoryType == hipMemoryTypeDevice)
memcpy_kind_h_input = hipMemcpyDeviceToDevice;
hipGetLastError();
if( memcpy_kind_h_input != hipMemcpyDeviceToDevice ){
hipMemcpy(input,h_input,sizeof(double)*((N)*(M)), memcpy_kind_h_input);
}
double * __var_1__;
hipMalloc(&__var_1__,sizeof(double)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
int __FORMA_MAX_SHARED_MEM__;
hipDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,hipDeviceAttributeMaxSharedMemoryPerBlock,0);
int __size_0___kernel___forma_kernel__0__ = M;
int __size_1___kernel___forma_kernel__0__ = N;
int __block_0___kernel___forma_kernel__0__ = 128;
int __block_1___kernel___forma_kernel__0__ = 1;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
__SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__);
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x-8);
int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__, 128);
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__);
for (int i = 0; i < 125; i++) {
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, input, N, M, __blockConfig___kernel___forma_kernel__0__.x, 128, __var_1__);
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, __var_1__, N, M, __blockConfig___kernel___forma_kernel__0__.x, 128, input);
}
for (int n = 0; n < 5; n++) {
#ifdef _TIMER_
hipEvent_t _forma_timer_start_,_forma_timer_stop_;
hipEventCreate(&_forma_timer_start_);
hipEventCreate(&_forma_timer_stop_);
hipEventRecord(_forma_timer_start_,0);
#endif
for (int i = 0; i < 125; i++) {
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, input, N, M, __blockConfig___kernel___forma_kernel__0__.x, 128, __var_1__);
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, __var_1__, N, M, __blockConfig___kernel___forma_kernel__0__.x, 128, input);
}
#ifdef _TIMER_
hipEventRecord(_forma_timer_stop_,0);
hipEventSynchronize(_forma_timer_stop_);
float elapsedTime;
hipEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
hipEventDestroy(_forma_timer_start_);
hipEventDestroy(_forma_timer_stop_);
#endif
}
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
hipPointerAttribute_t ptrAttrib___var_0__;
hipMemcpyKind memcpy_kind___var_0__ = hipMemcpyDeviceToHost;
if (hipPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == hipSuccess)
if (ptrAttrib___var_0__.memoryType == hipMemoryTypeDevice)
memcpy_kind___var_0__ = hipMemcpyDeviceToDevice;
hipGetLastError();
hipMemcpy(__var_0__,__var_1__, sizeof(double)*((N)*(M)), memcpy_kind___var_0__);
/*Kernel Launch End */
/* Host Free Begin */
hipFree(input);
hipFree(__var_1__);
}
/*Host Free End*/
| a0ccd6289e87cef54749abd42f8263f50f0926f9.cu | #include "cuda.h"
#ifdef _TIMER_
#include "cuda_profiler_api.h"
#endif
#include "stdio.h"
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#define mod(x,y) ( (x) & (y-1))
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
void Check_CUDA_Error(const char* message);
/*Texture references */
/*Shared Memory Variable */
extern __shared__ char __FORMA_SHARED_MEM__[];
/* Device code Begin */
__global__ void __kernel___forma_kernel__0__(double * __restrict__ input, int N, int M, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, double * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
double * __tilevar_2__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*(FORMA_BLOCKDIM_X);
double * __tilevar_3__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*(FORMA_BLOCKDIM_X);
double * __tilevar_4__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*(FORMA_BLOCKDIM_X);
double * __tilevar_5__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*(FORMA_BLOCKDIM_X);
double t2=0.0f, t3=0.0f, t4=0.0f, t5=0.0f;
double b2=0.0f, b3=0.0f, b4=0.0f, b5=0.0f;
int __iter_0__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X-8);
int __iter_y__ = FORMA_MAX((int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y)-4, 0);
// Initialize the values
int __iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if (__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))) {
__tilevar_2__[__iter_3__-__iter_0__] = input[__iter_3__+M*__iter_y__];
t2 = input[__iter_3__+M*(__iter_y__+1)];
}
// Initial computation
for (int __iter_1__ = FORMA_MAX(1,__iter_y__+1); __iter_1__ < FORMA_MIN(N-1,__iter_y__+7); __iter_1__++) {
if(__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))){
b2 = __tilevar_2__[__iter_3__-__iter_0__];
__tilevar_2__[__iter_3__-__iter_0__] = t2;
t2 = input[__iter_3__+M*(__iter_1__+1)];
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+1),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){
double __temp_1__ = (__tilevar_2__[__iter_3__-__iter_0__] - b2);
double __temp_2__ = (__temp_1__ * __temp_1__);
double __temp_3__ = (0.000100f + __temp_2__);
double __temp_5__ = (__tilevar_2__[__iter_3__-__iter_0__] - t2);
double __temp_6__ = (__temp_5__ * __temp_5__);
double __temp_7__ = (__temp_3__ + __temp_6__);
double __temp_9__ = (__tilevar_2__[__iter_3__-__iter_0__] - __tilevar_2__[__iter_3__+1-__iter_0__]);
double __temp_10__ = (__temp_9__ * __temp_9__);
double __temp_11__ = (__temp_7__ + __temp_10__);
double __temp_13__ = (__tilevar_2__[__iter_3__-__iter_0__] - __tilevar_2__[__iter_3__-1-__iter_0__]);
double __temp_14__ = (__temp_13__ * __temp_13__);
double __temp_15__ = (__temp_11__ + __temp_14__);
double __temp_16__ = sqrt(__temp_15__);
double __temp_17__ = (1.000000f / __temp_16__);
double __temp_18__ = (__tilevar_2__[__iter_3__-__iter_0__] + __temp_17__);
b3 = __tilevar_3__[__iter_3__-__iter_0__];
__tilevar_3__[__iter_3__-__iter_0__] = t3;
t3 = __temp_18__;
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+2),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){
double __temp_1__ = (__tilevar_3__[__iter_3__-__iter_0__] - b3);
double __temp_2__ = (__temp_1__ * __temp_1__);
double __temp_3__ = (0.000100f + __temp_2__);
double __temp_5__ = (__tilevar_3__[__iter_3__-__iter_0__] - t3);
double __temp_6__ = (__temp_5__ * __temp_5__);
double __temp_7__ = (__temp_3__ + __temp_6__);
double __temp_9__ = (__tilevar_3__[__iter_3__-__iter_0__] - __tilevar_3__[__iter_3__+1-__iter_0__]);
double __temp_10__ = (__temp_9__ * __temp_9__);
double __temp_11__ = (__temp_7__ + __temp_10__);
double __temp_13__ = (__tilevar_3__[__iter_3__-__iter_0__] - __tilevar_3__[__iter_3__-1-__iter_0__]);
double __temp_14__ = (__temp_13__ * __temp_13__);
double __temp_15__ = (__temp_11__ + __temp_14__);
double __temp_16__ = sqrt(__temp_15__);
double __temp_17__ = (1.000000f / __temp_16__);
double __temp_18__ = (__tilevar_3__[__iter_3__-__iter_0__] + __temp_17__);
b4 = __tilevar_4__[__iter_3__-__iter_0__];
__tilevar_4__[__iter_3__-__iter_0__] = t4;
t4 = __temp_18__;
}
__syncthreads ();
if(__iter_3__ >= FORMA_MAX((__iter_0__+3),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){
double __temp_1__ = (__tilevar_4__[__iter_3__-__iter_0__] - b4);
double __temp_2__ = (__temp_1__ * __temp_1__);
double __temp_3__ = (0.000100f + __temp_2__);
double __temp_5__ = (__tilevar_4__[__iter_3__-__iter_0__] - t4);
double __temp_6__ = (__temp_5__ * __temp_5__);
double __temp_7__ = (__temp_3__ + __temp_6__);
double __temp_9__ = (__tilevar_4__[__iter_3__-__iter_0__] - __tilevar_4__[__iter_3__+1-__iter_0__]);
double __temp_10__ = (__temp_9__ * __temp_9__);
double __temp_11__ = (__temp_7__ + __temp_10__);
double __temp_13__ = (__tilevar_4__[__iter_3__-__iter_0__] - __tilevar_4__[__iter_3__-1-__iter_0__]);
double __temp_14__ = (__temp_13__ * __temp_13__);
double __temp_15__ = (__temp_11__ + __temp_14__);
double __temp_16__ = sqrt(__temp_15__);
double __temp_17__ = (1.000000f / __temp_16__);
double __temp_18__ = (__tilevar_4__[__iter_3__-__iter_0__] + __temp_17__);
b5 = __tilevar_5__[__iter_3__-__iter_0__];
__tilevar_5__[__iter_3__-__iter_0__] = t5;
t5 = __temp_18__;
}
}
// Rest of the computation
for (int __iter_1__ = FORMA_MAX(1,__iter_y__+7); __iter_1__ < FORMA_MIN(N-1,__iter_y__+FORMA_BLOCKDIM_Y+8); __iter_1__++) {
if(__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))){
b2 = __tilevar_2__[__iter_3__-__iter_0__];
__tilevar_2__[__iter_3__-__iter_0__] = t2;
t2 = input[__iter_3__+M*(__iter_1__+1)];
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+1),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){
double __temp_1__ = (__tilevar_2__[__iter_3__-__iter_0__] - b2);
double __temp_2__ = (__temp_1__ * __temp_1__);
double __temp_3__ = (0.000100f + __temp_2__);
double __temp_5__ = (__tilevar_2__[__iter_3__-__iter_0__] - t2);
double __temp_6__ = (__temp_5__ * __temp_5__);
double __temp_7__ = (__temp_3__ + __temp_6__);
double __temp_9__ = (__tilevar_2__[__iter_3__-__iter_0__] - __tilevar_2__[__iter_3__+1-__iter_0__]);
double __temp_10__ = (__temp_9__ * __temp_9__);
double __temp_11__ = (__temp_7__ + __temp_10__);
double __temp_13__ = (__tilevar_2__[__iter_3__-__iter_0__] - __tilevar_2__[__iter_3__-1-__iter_0__]);
double __temp_14__ = (__temp_13__ * __temp_13__);
double __temp_15__ = (__temp_11__ + __temp_14__);
double __temp_16__ = sqrt(__temp_15__);
double __temp_17__ = (1.000000f / __temp_16__);
double __temp_18__ = (__tilevar_2__[__iter_3__-__iter_0__] + __temp_17__);
b3 = __tilevar_3__[__iter_3__-__iter_0__];
__tilevar_3__[__iter_3__-__iter_0__] = t3;
t3 = __temp_18__;
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+2),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){
double __temp_1__ = (__tilevar_3__[__iter_3__-__iter_0__] - b3);
double __temp_2__ = (__temp_1__ * __temp_1__);
double __temp_3__ = (0.000100f + __temp_2__);
double __temp_5__ = (__tilevar_3__[__iter_3__-__iter_0__] - t3);
double __temp_6__ = (__temp_5__ * __temp_5__);
double __temp_7__ = (__temp_3__ + __temp_6__);
double __temp_9__ = (__tilevar_3__[__iter_3__-__iter_0__] - __tilevar_3__[__iter_3__+1-__iter_0__]);
double __temp_10__ = (__temp_9__ * __temp_9__);
double __temp_11__ = (__temp_7__ + __temp_10__);
double __temp_13__ = (__tilevar_3__[__iter_3__-__iter_0__] - __tilevar_3__[__iter_3__-1-__iter_0__]);
double __temp_14__ = (__temp_13__ * __temp_13__);
double __temp_15__ = (__temp_11__ + __temp_14__);
double __temp_16__ = sqrt(__temp_15__);
double __temp_17__ = (1.000000f / __temp_16__);
double __temp_18__ = (__tilevar_3__[__iter_3__-__iter_0__] + __temp_17__);
b4 = __tilevar_4__[__iter_3__-__iter_0__];
__tilevar_4__[__iter_3__-__iter_0__] = t4;
t4 = __temp_18__;
}
__syncthreads ();
if(__iter_3__ >= FORMA_MAX((__iter_0__+3),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){
double __temp_1__ = (__tilevar_4__[__iter_3__-__iter_0__] - b4);
double __temp_2__ = (__temp_1__ * __temp_1__);
double __temp_3__ = (0.000100f + __temp_2__);
double __temp_5__ = (__tilevar_4__[__iter_3__-__iter_0__] - t4);
double __temp_6__ = (__temp_5__ * __temp_5__);
double __temp_7__ = (__temp_3__ + __temp_6__);
double __temp_9__ = (__tilevar_4__[__iter_3__-__iter_0__] - __tilevar_4__[__iter_3__+1-__iter_0__]);
double __temp_10__ = (__temp_9__ * __temp_9__);
double __temp_11__ = (__temp_7__ + __temp_10__);
double __temp_13__ = (__tilevar_4__[__iter_3__-__iter_0__] - __tilevar_4__[__iter_3__-1-__iter_0__]);
double __temp_14__ = (__temp_13__ * __temp_13__);
double __temp_15__ = (__temp_11__ + __temp_14__);
double __temp_16__ = sqrt(__temp_15__);
double __temp_17__ = (1.000000f / __temp_16__);
double __temp_18__ = (__tilevar_4__[__iter_3__-__iter_0__] + __temp_17__);
b5 = __tilevar_5__[__iter_3__-__iter_0__];
__tilevar_5__[__iter_3__-__iter_0__] = t5;
t5 = __temp_18__;
}
__syncthreads ();
if(__iter_3__ >= FORMA_MAX((__iter_0__+4),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-2)) ){
double __temp_1__ = (__tilevar_5__[__iter_3__-__iter_0__] - b5);
double __temp_2__ = (__temp_1__ * __temp_1__);
double __temp_3__ = (0.000100f + __temp_2__);
double __temp_5__ = (__tilevar_5__[__iter_3__-__iter_0__] - t5);
double __temp_6__ = (__temp_5__ * __temp_5__);
double __temp_7__ = (__temp_3__ + __temp_6__);
double __temp_9__ = (__tilevar_5__[__iter_3__-__iter_0__] - __tilevar_5__[__iter_3__+1-__iter_0__]);
double __temp_10__ = (__temp_9__ * __temp_9__);
double __temp_11__ = (__temp_7__ + __temp_10__);
double __temp_13__ = (__tilevar_5__[__iter_3__-__iter_0__] - __tilevar_5__[__iter_3__-1-__iter_0__]);
double __temp_14__ = (__temp_13__ * __temp_13__);
double __temp_15__ = (__temp_11__ + __temp_14__);
double __temp_16__ = sqrt(__temp_15__);
double __temp_17__ = (1.000000f / __temp_16__);
double __temp_18__ = (__tilevar_5__[__iter_3__-__iter_0__] + __temp_17__);
__var_1__[__iter_3__+(M)*FORMA_MAX(__iter_1__-3,0)] = __temp_18__;
}
}
}
int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int SMemSize = 0;
SMemSize += sizeof(double)*(4*FORMA_BLOCKDIM_X);
return SMemSize;
}
/*Device code End */
/* Host Code Begin */
extern "C" void gradient (double * h_input, int N, int M, double * __var_0__){
/* Host allocation Begin */
double * input;
cudaMalloc(&input,sizeof(double)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : input\n");
cudaPointerAttributes ptrAttrib_h_input;
cudaMemcpyKind memcpy_kind_h_input = cudaMemcpyHostToDevice;
if (cudaPointerGetAttributes(&ptrAttrib_h_input, h_input) == cudaSuccess)
if (ptrAttrib_h_input.memoryType == cudaMemoryTypeDevice)
memcpy_kind_h_input = cudaMemcpyDeviceToDevice;
cudaGetLastError();
if( memcpy_kind_h_input != cudaMemcpyDeviceToDevice ){
cudaMemcpy(input,h_input,sizeof(double)*((N)*(M)), memcpy_kind_h_input);
}
double * __var_1__;
cudaMalloc(&__var_1__,sizeof(double)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
int __FORMA_MAX_SHARED_MEM__;
cudaDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,cudaDevAttrMaxSharedMemoryPerBlock,0);
int __size_0___kernel___forma_kernel__0__ = M;
int __size_1___kernel___forma_kernel__0__ = N;
int __block_0___kernel___forma_kernel__0__ = 128;
int __block_1___kernel___forma_kernel__0__ = 1;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
__SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__);
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x-8);
int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__, 128);
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__);
for (int i = 0; i < 125; i++) {
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (input, N, M, __blockConfig___kernel___forma_kernel__0__.x, 128, __var_1__);
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (__var_1__, N, M, __blockConfig___kernel___forma_kernel__0__.x, 128, input);
}
for (int n = 0; n < 5; n++) {
#ifdef _TIMER_
cudaEvent_t _forma_timer_start_,_forma_timer_stop_;
cudaEventCreate(&_forma_timer_start_);
cudaEventCreate(&_forma_timer_stop_);
cudaEventRecord(_forma_timer_start_,0);
#endif
for (int i = 0; i < 125; i++) {
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (input, N, M, __blockConfig___kernel___forma_kernel__0__.x, 128, __var_1__);
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (__var_1__, N, M, __blockConfig___kernel___forma_kernel__0__.x, 128, input);
}
#ifdef _TIMER_
cudaEventRecord(_forma_timer_stop_,0);
cudaEventSynchronize(_forma_timer_stop_);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
cudaEventDestroy(_forma_timer_start_);
cudaEventDestroy(_forma_timer_stop_);
#endif
}
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
cudaPointerAttributes ptrAttrib___var_0__;
cudaMemcpyKind memcpy_kind___var_0__ = cudaMemcpyDeviceToHost;
if (cudaPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == cudaSuccess)
if (ptrAttrib___var_0__.memoryType == cudaMemoryTypeDevice)
memcpy_kind___var_0__ = cudaMemcpyDeviceToDevice;
cudaGetLastError();
cudaMemcpy(__var_0__,__var_1__, sizeof(double)*((N)*(M)), memcpy_kind___var_0__);
/*Kernel Launch End */
/* Host Free Begin */
cudaFree(input);
cudaFree(__var_1__);
}
/*Host Free End*/
|
943b03a210775fb3c21083990451ea8213df1736.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/detail/KernelUtils.h>
#include <c10/util/Exception.h>
namespace at {
namespace native {
using namespace at::cuda::detail;
template <typename T>
__host__ __device__ __forceinline__ T ceilDiv(T a, T b) {
return (a + b - 1) / b;
}
template <typename T>
__global__ void max_unpooling2d_forward_kernel(
const int64_t numInputElements,
const T* input,
const int64_t* indices,
const int64_t numChannels,
const int64_t inputHeight,
const int64_t inputWidth,
const int64_t outputHeight,
const int64_t outputWidth,
T* output) {
CUDA_KERNEL_LOOP(linearIndex, numInputElements) {
int c = (linearIndex / inputWidth / inputHeight) % numChannels;
int n = linearIndex / inputWidth / inputHeight / numChannels;
output += (n * numChannels + c) * outputHeight * outputWidth;
int maxind = indices[linearIndex];
output[maxind] = input[linearIndex];
}
}
template <typename T>
__global__ void max_unpooling3d_forward_kernel(
PackedTensorAccessor<T, 4> input,
PackedTensorAccessor<int64_t, 4> indices,
T* output,
const int64_t oT,
const int64_t oH,
const int64_t oW,
const int64_t offsetZ) {
int64_t iColumn = blockIdx.x * blockDim.x + threadIdx.x;
int64_t iRow = blockIdx.y * blockDim.y + threadIdx.y;
int64_t iFrame = (blockIdx.z + offsetZ) % input.size(1); // input frame/time
int64_t slice = (blockIdx.z + offsetZ) / input.size(1); // input slice/feature
if (iRow < input.size(2) && iColumn < input.size(3)) {
T val = input[slice][iFrame][iRow][iColumn];
int64_t index = indices[slice][iFrame][iRow][iColumn];
output[slice * oT * oH * oW + index] = val;
}
}
template <typename T>
__global__ void max_unpooling2d_backward_kernel(
const int64_t numInputElements,
const T* input,
const int64_t* indices,
const int64_t numChannels,
const int64_t inputHeight,
const int64_t inputWidth,
const int64_t outputHeight,
const int64_t outputWidth,
T* output) {
CUDA_KERNEL_LOOP(linearIndex, numInputElements) {
int c = (linearIndex / inputWidth / inputHeight) % numChannels;
int n = linearIndex / inputWidth / inputHeight / numChannels;
input += (n * numChannels + c) * outputHeight * outputWidth;
int maxind = indices[linearIndex];
output[linearIndex] = input[maxind];
}
}
template <typename T>
__global__ void max_unpooling3d_backward_kernel(
T* gradOutputData,
int64_t oT,
int64_t oH,
int64_t oW,
PackedTensorAccessor<int64_t, 4> indices,
PackedTensorAccessor<T, 4> gradInput,
int offsetZ) {
int iColumn = blockIdx.x * blockDim.x + threadIdx.x;
int iRow = blockIdx.y * blockDim.y + threadIdx.y;
int iFrame = (blockIdx.z + offsetZ) % gradInput.size(1); // output frame/time
int slice =
(blockIdx.z + offsetZ) / gradInput.size(1); // output slice/feature
if (iRow < gradInput.size(2) && iColumn < gradInput.size(3)) {
int64_t index = indices[slice][iFrame][iRow][iColumn];
T grad_val = gradOutputData[slice * oT * oH * oW + index];
gradInput[slice][iFrame][iRow][iColumn] = grad_val;
}
}
Tensor& max_unpooling2d_forward_out_cuda(
Tensor& output,
const Tensor& self_,
const Tensor& indices_,
IntArrayRef output_size) {
TORCH_CHECK(output.is_contiguous(), "output must be contiguous");
TORCH_CHECK(
indices_.scalar_type() == at::ScalarType::Long,
"elements in indices should be type int64");
auto oheight = output_size[0];
auto owidth = output_size[1];
TensorArg output_arg{output, "output", 1}, self_arg{self_, "self_", 2},
indices_arg{indices_, "indices_", 3};
checkAllSameGPU(
"max_unpooling2d_forward_out_cuda", {output_arg, self_arg, indices_arg});
TORCH_CHECK(self_.numel() > 0, "Input must be non-empty tensor");
TORCH_CHECK(
(self_.ndimension() == 3 || self_.ndimension() == 4),
"Input to max_unpooling2d should be a 3d or 4d Tensor",
self_.sizes());
TORCH_CHECK(
self_.sizes() == indices_.sizes(),
"Shape of input must match shape of indices");
TORCH_CHECK(
output_size.size() == 2,
"There should be exactly two elements (width, height) in output_size");
int64_t dimw = 2;
int64_t dimh = 1;
int64_t numBatch = 1;
int64_t numChannels;
int64_t inputHeight;
int64_t inputWidth;
auto self = self_.contiguous();
auto indices = indices_.contiguous();
if (self.ndimension() == 4) {
numBatch = self.size(0);
dimw++;
dimh++;
}
numChannels = self.size(dimh - 1);
inputHeight = self.size(dimh);
inputWidth = self.size(dimw);
output.resize_({numBatch, numChannels, oheight, owidth});
output.zero_();
auto count = self.numel();
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half,
self.scalar_type(), "max_unpooling2d_forward_kernel", ([&] {
hipLaunchKernelGGL(( max_unpooling2d_forward_kernel),
dim3(GET_BLOCKS(count)),
dim3(CUDA_NUM_THREADS),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
self.numel(),
self.data<scalar_t>(),
indices.data<int64_t>(),
numChannels,
inputHeight,
inputWidth,
oheight,
owidth,
output.data<scalar_t>());
}));
TORCH_CHECK(
hipGetLastError() == hipSuccess,
"max_unpooling2d_forward_kernel failed with error code ",
hipGetLastError());
if (self.ndimension() == 3) {
output.resize_({numChannels, oheight, owidth});
}
return output;
}
Tensor max_unpooling2d_forward_cuda(
const Tensor& self,
const Tensor& indices,
IntArrayRef output_size) {
auto output = at::empty({0}, self.options());
max_unpooling2d_forward_out_cuda(output, self, indices, output_size);
return output;
}
static void max_unpooling3d_shape_check(
const Tensor& input,
const Tensor& gradOutput,
const Tensor& indices,
IntArrayRef output_size,
IntArrayRef stride,
IntArrayRef padding) {
int64_t oT = output_size[0];
int64_t oH = output_size[1];
int64_t oW = output_size[2];
TORCH_CHECK(
indices.scalar_type() == at::ScalarType::Long,
"elements in indices should be type int64");
TORCH_CHECK(
(input.ndimension() == 4 || input.ndimension() == 5),
"Input to max_unpooling3d should be a 4d or 5d Tensor",
input.sizes());
TORCH_CHECK(
output_size.size() == 3,
"There should be exactly three elements (depth, height, width) in output_size");
TORCH_CHECK(
stride.size() == 3,
"There should be exactly three elements (depth, height, width) in stride");
TORCH_CHECK(
padding.size() == 3,
"There should be exactly three elements (depth, height, width) in padding");
TORCH_CHECK(
input.sizes() == indices.sizes(),
"Shape of indices should match shape of input");
TORCH_CHECK(input.numel() > 0, "Input must be non-empty");
TORCH_CHECK(
stride[0] > 0 && stride[1] > 0 && stride[2] > 0,
"strides should be greater than zero, but got stride: ",
stride);
int dimw = 3;
int dimh = 2;
int dimt = 1;
int dimn = 0;
if (input.ndimension() == 5) {
dimw++;
dimh++;
dimt++;
dimn++;
}
int nslices = input.size(dimn);
if (gradOutput.defined()) {
if (oT != gradOutput.size(dimt) || oH != gradOutput.size(dimh) ||
oW != gradOutput.size(dimw)) {
AT_ERROR(
"Inconsistent gradOutput size. oT= ",
oT,
", oH= ",
oH,
", oW= ",
oW,
". gradOutput: ",
gradOutput.size(dimt),
"x",
gradOutput.size(dimh),
"x",
gradOutput.size(dimw));
}
TORCH_CHECK(
gradOutput.ndimension() == input.ndimension() &&
gradOutput.size(dimn) == nslices,
"gradOutput and input Tensors should have same number of dimensions and also the same number of channels/slices");
}
}
Tensor& max_unpooling3d_forward_out_cuda(
Tensor& output,
const Tensor& self_,
const Tensor& indices_,
IntArrayRef output_size,
IntArrayRef stride,
IntArrayRef padding) {
TORCH_CHECK(output.is_contiguous(), "output must be contiguous");
max_unpooling3d_shape_check(
self_, Tensor(), indices_, output_size, stride, padding);
int64_t oT = output_size[0];
int64_t oH = output_size[1];
int64_t oW = output_size[2];
TensorArg output_arg{output, "output", 1}, self_arg{self_, "self_", 2},
indices_arg{indices_, "indices_", 3};
checkAllSameGPU(
"max_unpooling3d_forward_out_cuda", {output_arg, self_arg, indices_arg});
auto self = self_.contiguous();
auto indices = indices_.contiguous();
int64_t batchSize;
int64_t inputSlices;
int64_t inputTime;
int64_t inputHeight;
int64_t inputWidth;
if (self.ndimension() == 4) {
batchSize = 1;
inputSlices = self.size(0);
inputTime = self.size(1);
inputHeight = self.size(2);
inputWidth = self.size(3);
output.resize_({inputSlices, oT, oH, oW});
} else {
batchSize = self.size(0);
inputSlices = self.size(1);
inputTime = self.size(2);
inputHeight = self.size(3);
inputWidth = self.size(4);
output.resize_({batchSize, inputSlices, oT, oH, oW});
}
output.zero_();
// Collapse batch and feature dimensions if needed
if (self.ndimension() == 5) {
self = self.reshape({self.size(0) * self.size(1),
self.size(2),
self.size(3),
self.size(4)});
indices = indices.reshape({indices.size(0) * indices.size(1),
indices.size(2),
indices.size(3),
indices.size(4)});
}
int totalZ = inputTime * inputSlices * batchSize;
int offsetZ = 0;
dim3 block(32, 8);
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half,
self.scalar_type(), "max_unpooling3d_forward_kernel", ([&] {
while (totalZ > 0) {
dim3 grid(
ceilDiv(inputWidth, static_cast<int64_t>(block.x)),
ceilDiv(inputHeight, static_cast<int64_t>(block.y)),
totalZ > 65535 ? 65535 : totalZ);
hipLaunchKernelGGL(( max_unpooling3d_forward_kernel),
dim3(grid),
dim3(block),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
self.packed_accessor<scalar_t, 4>(),
indices.packed_accessor<int64_t, 4>(),
output.data<scalar_t>(),
oT,
oH,
oW,
offsetZ);
TORCH_CHECK(
hipGetLastError() == hipSuccess,
"max_unpooling3d_forward_kernel failed with error code ",
hipGetLastError());
totalZ -= 65535;
offsetZ += 65535;
}
}));
return output;
}
Tensor max_unpooling3d_forward_cuda(
const Tensor& self,
const Tensor& indices,
IntArrayRef output_size,
IntArrayRef stride,
IntArrayRef padding) {
auto output = at::empty({0}, self.options());
max_unpooling3d_forward_out_cuda(
output, self, indices, output_size, stride, padding);
return output;
}
at::Tensor& max_unpooling2d_backward_out_cuda(
Tensor& grad_input,
const Tensor& grad_output_,
const Tensor& self_,
const Tensor& indices_,
IntArrayRef output_size) {
int64_t oheight = output_size[0];
int64_t owidth = output_size[1];
TORCH_CHECK(grad_input.is_contiguous(), "grad_input must be contiguous");
TORCH_CHECK(
indices_.scalar_type() == at::ScalarType::Long,
"elements in indices should be type int64");
TensorArg grad_input_arg{grad_input, "grad_input", 1},
grad_output_arg{grad_output_, "grad_output_", 2},
self_arg{self_, "self_", 3}, indices_arg{indices_, "indices_", 4};
checkAllSameGPU(
"max_unpooling2d_backward_out_cuda",
{grad_input_arg, grad_output_arg, self_arg, indices_arg});
TORCH_CHECK(
(self_.ndimension() == 3 || self_.ndimension() == 4),
"Input to max_unpooling2d should be a 3d or 4d Tensor, instead got: ",
self_);
TORCH_CHECK(
self_.sizes() == indices_.sizes(),
"Input should have same shape as indices");
TORCH_CHECK(output_size.size() == 2, "output_size must have two elements");
int64_t nInputCols, nInputRows, nInputPlane, batchSize;
int dimw = 2;
int dimh = 1;
auto self = self_.contiguous();
auto indices = indices_.contiguous();
auto grad_output = grad_output_.contiguous();
if (self.ndimension() == 3) {
nInputPlane = self.size(0);
batchSize = 1;
} else {
++dimw;
++dimh;
nInputPlane = self.size(1);
batchSize = self.size(0);
}
nInputCols = self.size(dimw);
nInputRows = self.size(dimh);
if (oheight != grad_output.size(dimh) || owidth != grad_output.size(dimw)) {
AT_ERROR(
"Inconsistent gradOutput size. output height: ",
oheight,
", output width= ",
owidth,
", gradOutput: ",
grad_output.size(dimh),
"x",
grad_output.size(dimw));
}
grad_input.resize_as_(self);
grad_input.zero_();
int count = self.numel();
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half,
self.scalar_type(), "max_unpooling2d_backward_kernel", ([&] {
hipLaunchKernelGGL(( max_unpooling2d_backward_kernel),
dim3(GET_BLOCKS(count)),
dim3(CUDA_NUM_THREADS),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
count,
grad_output.data<scalar_t>(),
indices.data<int64_t>(),
nInputPlane,
nInputRows,
nInputCols,
oheight,
owidth,
grad_input.data<scalar_t>());
}));
TORCH_CHECK(
hipGetLastError() == hipSuccess,
"max_unpooling2d_backward_kernel failed with error code ",
hipGetLastError());
return grad_input;
}
at::Tensor max_unpooling2d_backward_cuda(
const Tensor& grad_output,
const Tensor& self,
const Tensor& indices,
IntArrayRef output_size) {
auto grad_input = at::empty_like(self);
max_unpooling2d_backward_out_cuda(
grad_input, grad_output, self, indices, output_size);
return grad_input;
}
at::Tensor& max_unpooling3d_backward_out_cuda(
Tensor& grad_input,
const Tensor& grad_output_,
const Tensor& self_,
const Tensor& indices_,
IntArrayRef output_size,
IntArrayRef stride,
IntArrayRef padding) {
TORCH_CHECK(grad_input.is_contiguous(), "grad_input must be contiguous");
int64_t oT = output_size[0];
int64_t oH = output_size[1];
int64_t oW = output_size[2];
max_unpooling3d_shape_check(
self_, grad_output_, indices_, output_size, stride, padding);
int batchSize = 0;
int inputSlices = 0;
int inputTime = 0;
int64_t inputHeight = 0;
int64_t inputWidth = 0;
TensorArg self_arg{self_, "self_", 1}, indices_arg{indices_, "indices_", 2},
grad_output_arg{grad_output_, "grad_output_", 3},
grad_input_arg{grad_input, "grad_input", 4};
checkAllSameGPU(
"max_unpooling3d_backward_out_cuda",
{self_arg, indices_arg, grad_output_arg, grad_input_arg});
auto self = self_.contiguous();
auto indices = indices_.contiguous();
auto grad_output = grad_output_.contiguous();
if (self.ndimension() == 4) {
batchSize = 1;
inputSlices = self.size(0);
inputTime = self.size(1);
inputHeight = self.size(2);
inputWidth = self.size(3);
} else {
batchSize = self.size(0);
inputSlices = self.size(1);
inputTime = self.size(2);
inputHeight = self.size(3);
inputWidth = self.size(4);
}
grad_input.resize_as_(self);
grad_input.zero_();
// Collapse batch and feature dimensions if needed
auto grad_input_reshaped = grad_input;
if (grad_input.ndimension() == 5) {
grad_input_reshaped =
grad_input.reshape({grad_input.size(0) * grad_input.size(1),
grad_input.size(2),
grad_input.size(3),
grad_input.size(4)});
indices = indices.reshape({indices.size(0) * indices.size(1),
indices.size(2),
indices.size(3),
indices.size(4)});
}
int totalZ = inputTime * inputSlices * batchSize;
int offsetZ = 0;
dim3 block(32, 8);
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half,
self.scalar_type(), "max_unpooling3d_backward_kernel", ([&] {
while (totalZ > 0) {
dim3 grid(
ceilDiv(inputWidth, static_cast<int64_t>(block.x)),
ceilDiv(inputHeight, static_cast<int64_t>(block.y)),
totalZ > 65535 ? 65535 : totalZ);
hipLaunchKernelGGL(( max_unpooling3d_backward_kernel),
dim3(grid),
dim3(block),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
grad_output.data<scalar_t>(),
oT,
oH,
oW,
indices.packed_accessor<int64_t, 4>(),
grad_input_reshaped.packed_accessor<scalar_t, 4>(),
offsetZ);
TORCH_CHECK(
hipGetLastError() == hipSuccess,
"max_unpooling3d_backward_kernel failed with error code ",
hipGetLastError());
totalZ -= 65535;
offsetZ += 65535;
}
}));
return grad_input;
}
at::Tensor max_unpooling3d_backward_cuda(
const Tensor& grad_output,
const Tensor& self,
const Tensor& indices,
IntArrayRef output_size,
IntArrayRef stride,
IntArrayRef padding) {
auto grad_input = at::empty_like(self);
max_unpooling3d_backward_out_cuda(
grad_input, grad_output, self, indices, output_size, stride, padding);
return grad_input;
}
} // namespace native
} // namespace at
| 943b03a210775fb3c21083990451ea8213df1736.cu | #include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/detail/KernelUtils.h>
#include <c10/util/Exception.h>
namespace at {
namespace native {
using namespace at::cuda::detail;
template <typename T>
__host__ __device__ __forceinline__ T ceilDiv(T a, T b) {
return (a + b - 1) / b;
}
template <typename T>
__global__ void max_unpooling2d_forward_kernel(
const int64_t numInputElements,
const T* input,
const int64_t* indices,
const int64_t numChannels,
const int64_t inputHeight,
const int64_t inputWidth,
const int64_t outputHeight,
const int64_t outputWidth,
T* output) {
CUDA_KERNEL_LOOP(linearIndex, numInputElements) {
int c = (linearIndex / inputWidth / inputHeight) % numChannels;
int n = linearIndex / inputWidth / inputHeight / numChannels;
output += (n * numChannels + c) * outputHeight * outputWidth;
int maxind = indices[linearIndex];
output[maxind] = input[linearIndex];
}
}
template <typename T>
__global__ void max_unpooling3d_forward_kernel(
PackedTensorAccessor<T, 4> input,
PackedTensorAccessor<int64_t, 4> indices,
T* output,
const int64_t oT,
const int64_t oH,
const int64_t oW,
const int64_t offsetZ) {
int64_t iColumn = blockIdx.x * blockDim.x + threadIdx.x;
int64_t iRow = blockIdx.y * blockDim.y + threadIdx.y;
int64_t iFrame = (blockIdx.z + offsetZ) % input.size(1); // input frame/time
int64_t slice = (blockIdx.z + offsetZ) / input.size(1); // input slice/feature
if (iRow < input.size(2) && iColumn < input.size(3)) {
T val = input[slice][iFrame][iRow][iColumn];
int64_t index = indices[slice][iFrame][iRow][iColumn];
output[slice * oT * oH * oW + index] = val;
}
}
template <typename T>
__global__ void max_unpooling2d_backward_kernel(
const int64_t numInputElements,
const T* input,
const int64_t* indices,
const int64_t numChannels,
const int64_t inputHeight,
const int64_t inputWidth,
const int64_t outputHeight,
const int64_t outputWidth,
T* output) {
CUDA_KERNEL_LOOP(linearIndex, numInputElements) {
int c = (linearIndex / inputWidth / inputHeight) % numChannels;
int n = linearIndex / inputWidth / inputHeight / numChannels;
input += (n * numChannels + c) * outputHeight * outputWidth;
int maxind = indices[linearIndex];
output[linearIndex] = input[maxind];
}
}
template <typename T>
__global__ void max_unpooling3d_backward_kernel(
T* gradOutputData,
int64_t oT,
int64_t oH,
int64_t oW,
PackedTensorAccessor<int64_t, 4> indices,
PackedTensorAccessor<T, 4> gradInput,
int offsetZ) {
int iColumn = blockIdx.x * blockDim.x + threadIdx.x;
int iRow = blockIdx.y * blockDim.y + threadIdx.y;
int iFrame = (blockIdx.z + offsetZ) % gradInput.size(1); // output frame/time
int slice =
(blockIdx.z + offsetZ) / gradInput.size(1); // output slice/feature
if (iRow < gradInput.size(2) && iColumn < gradInput.size(3)) {
int64_t index = indices[slice][iFrame][iRow][iColumn];
T grad_val = gradOutputData[slice * oT * oH * oW + index];
gradInput[slice][iFrame][iRow][iColumn] = grad_val;
}
}
Tensor& max_unpooling2d_forward_out_cuda(
Tensor& output,
const Tensor& self_,
const Tensor& indices_,
IntArrayRef output_size) {
TORCH_CHECK(output.is_contiguous(), "output must be contiguous");
TORCH_CHECK(
indices_.scalar_type() == at::ScalarType::Long,
"elements in indices should be type int64");
auto oheight = output_size[0];
auto owidth = output_size[1];
TensorArg output_arg{output, "output", 1}, self_arg{self_, "self_", 2},
indices_arg{indices_, "indices_", 3};
checkAllSameGPU(
"max_unpooling2d_forward_out_cuda", {output_arg, self_arg, indices_arg});
TORCH_CHECK(self_.numel() > 0, "Input must be non-empty tensor");
TORCH_CHECK(
(self_.ndimension() == 3 || self_.ndimension() == 4),
"Input to max_unpooling2d should be a 3d or 4d Tensor",
self_.sizes());
TORCH_CHECK(
self_.sizes() == indices_.sizes(),
"Shape of input must match shape of indices");
TORCH_CHECK(
output_size.size() == 2,
"There should be exactly two elements (width, height) in output_size");
int64_t dimw = 2;
int64_t dimh = 1;
int64_t numBatch = 1;
int64_t numChannels;
int64_t inputHeight;
int64_t inputWidth;
auto self = self_.contiguous();
auto indices = indices_.contiguous();
if (self.ndimension() == 4) {
numBatch = self.size(0);
dimw++;
dimh++;
}
numChannels = self.size(dimh - 1);
inputHeight = self.size(dimh);
inputWidth = self.size(dimw);
output.resize_({numBatch, numChannels, oheight, owidth});
output.zero_();
auto count = self.numel();
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half,
self.scalar_type(), "max_unpooling2d_forward_kernel", ([&] {
max_unpooling2d_forward_kernel<<<
GET_BLOCKS(count),
CUDA_NUM_THREADS,
0,
at::cuda::getCurrentCUDAStream()>>>(
self.numel(),
self.data<scalar_t>(),
indices.data<int64_t>(),
numChannels,
inputHeight,
inputWidth,
oheight,
owidth,
output.data<scalar_t>());
}));
TORCH_CHECK(
cudaGetLastError() == cudaSuccess,
"max_unpooling2d_forward_kernel failed with error code ",
cudaGetLastError());
if (self.ndimension() == 3) {
output.resize_({numChannels, oheight, owidth});
}
return output;
}
Tensor max_unpooling2d_forward_cuda(
const Tensor& self,
const Tensor& indices,
IntArrayRef output_size) {
auto output = at::empty({0}, self.options());
max_unpooling2d_forward_out_cuda(output, self, indices, output_size);
return output;
}
static void max_unpooling3d_shape_check(
const Tensor& input,
const Tensor& gradOutput,
const Tensor& indices,
IntArrayRef output_size,
IntArrayRef stride,
IntArrayRef padding) {
int64_t oT = output_size[0];
int64_t oH = output_size[1];
int64_t oW = output_size[2];
TORCH_CHECK(
indices.scalar_type() == at::ScalarType::Long,
"elements in indices should be type int64");
TORCH_CHECK(
(input.ndimension() == 4 || input.ndimension() == 5),
"Input to max_unpooling3d should be a 4d or 5d Tensor",
input.sizes());
TORCH_CHECK(
output_size.size() == 3,
"There should be exactly three elements (depth, height, width) in output_size");
TORCH_CHECK(
stride.size() == 3,
"There should be exactly three elements (depth, height, width) in stride");
TORCH_CHECK(
padding.size() == 3,
"There should be exactly three elements (depth, height, width) in padding");
TORCH_CHECK(
input.sizes() == indices.sizes(),
"Shape of indices should match shape of input");
TORCH_CHECK(input.numel() > 0, "Input must be non-empty");
TORCH_CHECK(
stride[0] > 0 && stride[1] > 0 && stride[2] > 0,
"strides should be greater than zero, but got stride: ",
stride);
int dimw = 3;
int dimh = 2;
int dimt = 1;
int dimn = 0;
if (input.ndimension() == 5) {
dimw++;
dimh++;
dimt++;
dimn++;
}
int nslices = input.size(dimn);
if (gradOutput.defined()) {
if (oT != gradOutput.size(dimt) || oH != gradOutput.size(dimh) ||
oW != gradOutput.size(dimw)) {
AT_ERROR(
"Inconsistent gradOutput size. oT= ",
oT,
", oH= ",
oH,
", oW= ",
oW,
". gradOutput: ",
gradOutput.size(dimt),
"x",
gradOutput.size(dimh),
"x",
gradOutput.size(dimw));
}
TORCH_CHECK(
gradOutput.ndimension() == input.ndimension() &&
gradOutput.size(dimn) == nslices,
"gradOutput and input Tensors should have same number of dimensions and also the same number of channels/slices");
}
}
Tensor& max_unpooling3d_forward_out_cuda(
Tensor& output,
const Tensor& self_,
const Tensor& indices_,
IntArrayRef output_size,
IntArrayRef stride,
IntArrayRef padding) {
TORCH_CHECK(output.is_contiguous(), "output must be contiguous");
max_unpooling3d_shape_check(
self_, Tensor(), indices_, output_size, stride, padding);
int64_t oT = output_size[0];
int64_t oH = output_size[1];
int64_t oW = output_size[2];
TensorArg output_arg{output, "output", 1}, self_arg{self_, "self_", 2},
indices_arg{indices_, "indices_", 3};
checkAllSameGPU(
"max_unpooling3d_forward_out_cuda", {output_arg, self_arg, indices_arg});
auto self = self_.contiguous();
auto indices = indices_.contiguous();
int64_t batchSize;
int64_t inputSlices;
int64_t inputTime;
int64_t inputHeight;
int64_t inputWidth;
if (self.ndimension() == 4) {
batchSize = 1;
inputSlices = self.size(0);
inputTime = self.size(1);
inputHeight = self.size(2);
inputWidth = self.size(3);
output.resize_({inputSlices, oT, oH, oW});
} else {
batchSize = self.size(0);
inputSlices = self.size(1);
inputTime = self.size(2);
inputHeight = self.size(3);
inputWidth = self.size(4);
output.resize_({batchSize, inputSlices, oT, oH, oW});
}
output.zero_();
// Collapse batch and feature dimensions if needed
if (self.ndimension() == 5) {
self = self.reshape({self.size(0) * self.size(1),
self.size(2),
self.size(3),
self.size(4)});
indices = indices.reshape({indices.size(0) * indices.size(1),
indices.size(2),
indices.size(3),
indices.size(4)});
}
int totalZ = inputTime * inputSlices * batchSize;
int offsetZ = 0;
dim3 block(32, 8);
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half,
self.scalar_type(), "max_unpooling3d_forward_kernel", ([&] {
while (totalZ > 0) {
dim3 grid(
ceilDiv(inputWidth, static_cast<int64_t>(block.x)),
ceilDiv(inputHeight, static_cast<int64_t>(block.y)),
totalZ > 65535 ? 65535 : totalZ);
max_unpooling3d_forward_kernel<<<
grid,
block,
0,
at::cuda::getCurrentCUDAStream()>>>(
self.packed_accessor<scalar_t, 4>(),
indices.packed_accessor<int64_t, 4>(),
output.data<scalar_t>(),
oT,
oH,
oW,
offsetZ);
TORCH_CHECK(
cudaGetLastError() == cudaSuccess,
"max_unpooling3d_forward_kernel failed with error code ",
cudaGetLastError());
totalZ -= 65535;
offsetZ += 65535;
}
}));
return output;
}
Tensor max_unpooling3d_forward_cuda(
const Tensor& self,
const Tensor& indices,
IntArrayRef output_size,
IntArrayRef stride,
IntArrayRef padding) {
auto output = at::empty({0}, self.options());
max_unpooling3d_forward_out_cuda(
output, self, indices, output_size, stride, padding);
return output;
}
at::Tensor& max_unpooling2d_backward_out_cuda(
Tensor& grad_input,
const Tensor& grad_output_,
const Tensor& self_,
const Tensor& indices_,
IntArrayRef output_size) {
int64_t oheight = output_size[0];
int64_t owidth = output_size[1];
TORCH_CHECK(grad_input.is_contiguous(), "grad_input must be contiguous");
TORCH_CHECK(
indices_.scalar_type() == at::ScalarType::Long,
"elements in indices should be type int64");
TensorArg grad_input_arg{grad_input, "grad_input", 1},
grad_output_arg{grad_output_, "grad_output_", 2},
self_arg{self_, "self_", 3}, indices_arg{indices_, "indices_", 4};
checkAllSameGPU(
"max_unpooling2d_backward_out_cuda",
{grad_input_arg, grad_output_arg, self_arg, indices_arg});
TORCH_CHECK(
(self_.ndimension() == 3 || self_.ndimension() == 4),
"Input to max_unpooling2d should be a 3d or 4d Tensor, instead got: ",
self_);
TORCH_CHECK(
self_.sizes() == indices_.sizes(),
"Input should have same shape as indices");
TORCH_CHECK(output_size.size() == 2, "output_size must have two elements");
int64_t nInputCols, nInputRows, nInputPlane, batchSize;
int dimw = 2;
int dimh = 1;
auto self = self_.contiguous();
auto indices = indices_.contiguous();
auto grad_output = grad_output_.contiguous();
if (self.ndimension() == 3) {
nInputPlane = self.size(0);
batchSize = 1;
} else {
++dimw;
++dimh;
nInputPlane = self.size(1);
batchSize = self.size(0);
}
nInputCols = self.size(dimw);
nInputRows = self.size(dimh);
if (oheight != grad_output.size(dimh) || owidth != grad_output.size(dimw)) {
AT_ERROR(
"Inconsistent gradOutput size. output height: ",
oheight,
", output width= ",
owidth,
", gradOutput: ",
grad_output.size(dimh),
"x",
grad_output.size(dimw));
}
grad_input.resize_as_(self);
grad_input.zero_();
int count = self.numel();
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half,
self.scalar_type(), "max_unpooling2d_backward_kernel", ([&] {
max_unpooling2d_backward_kernel<<<
GET_BLOCKS(count),
CUDA_NUM_THREADS,
0,
at::cuda::getCurrentCUDAStream()>>>(
count,
grad_output.data<scalar_t>(),
indices.data<int64_t>(),
nInputPlane,
nInputRows,
nInputCols,
oheight,
owidth,
grad_input.data<scalar_t>());
}));
TORCH_CHECK(
cudaGetLastError() == cudaSuccess,
"max_unpooling2d_backward_kernel failed with error code ",
cudaGetLastError());
return grad_input;
}
at::Tensor max_unpooling2d_backward_cuda(
const Tensor& grad_output,
const Tensor& self,
const Tensor& indices,
IntArrayRef output_size) {
auto grad_input = at::empty_like(self);
max_unpooling2d_backward_out_cuda(
grad_input, grad_output, self, indices, output_size);
return grad_input;
}
at::Tensor& max_unpooling3d_backward_out_cuda(
Tensor& grad_input,
const Tensor& grad_output_,
const Tensor& self_,
const Tensor& indices_,
IntArrayRef output_size,
IntArrayRef stride,
IntArrayRef padding) {
TORCH_CHECK(grad_input.is_contiguous(), "grad_input must be contiguous");
int64_t oT = output_size[0];
int64_t oH = output_size[1];
int64_t oW = output_size[2];
max_unpooling3d_shape_check(
self_, grad_output_, indices_, output_size, stride, padding);
int batchSize = 0;
int inputSlices = 0;
int inputTime = 0;
int64_t inputHeight = 0;
int64_t inputWidth = 0;
TensorArg self_arg{self_, "self_", 1}, indices_arg{indices_, "indices_", 2},
grad_output_arg{grad_output_, "grad_output_", 3},
grad_input_arg{grad_input, "grad_input", 4};
checkAllSameGPU(
"max_unpooling3d_backward_out_cuda",
{self_arg, indices_arg, grad_output_arg, grad_input_arg});
auto self = self_.contiguous();
auto indices = indices_.contiguous();
auto grad_output = grad_output_.contiguous();
if (self.ndimension() == 4) {
batchSize = 1;
inputSlices = self.size(0);
inputTime = self.size(1);
inputHeight = self.size(2);
inputWidth = self.size(3);
} else {
batchSize = self.size(0);
inputSlices = self.size(1);
inputTime = self.size(2);
inputHeight = self.size(3);
inputWidth = self.size(4);
}
grad_input.resize_as_(self);
grad_input.zero_();
// Collapse batch and feature dimensions if needed
auto grad_input_reshaped = grad_input;
if (grad_input.ndimension() == 5) {
grad_input_reshaped =
grad_input.reshape({grad_input.size(0) * grad_input.size(1),
grad_input.size(2),
grad_input.size(3),
grad_input.size(4)});
indices = indices.reshape({indices.size(0) * indices.size(1),
indices.size(2),
indices.size(3),
indices.size(4)});
}
int totalZ = inputTime * inputSlices * batchSize;
int offsetZ = 0;
dim3 block(32, 8);
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half,
self.scalar_type(), "max_unpooling3d_backward_kernel", ([&] {
while (totalZ > 0) {
dim3 grid(
ceilDiv(inputWidth, static_cast<int64_t>(block.x)),
ceilDiv(inputHeight, static_cast<int64_t>(block.y)),
totalZ > 65535 ? 65535 : totalZ);
max_unpooling3d_backward_kernel<<<
grid,
block,
0,
at::cuda::getCurrentCUDAStream()>>>(
grad_output.data<scalar_t>(),
oT,
oH,
oW,
indices.packed_accessor<int64_t, 4>(),
grad_input_reshaped.packed_accessor<scalar_t, 4>(),
offsetZ);
TORCH_CHECK(
cudaGetLastError() == cudaSuccess,
"max_unpooling3d_backward_kernel failed with error code ",
cudaGetLastError());
totalZ -= 65535;
offsetZ += 65535;
}
}));
return grad_input;
}
at::Tensor max_unpooling3d_backward_cuda(
const Tensor& grad_output,
const Tensor& self,
const Tensor& indices,
IntArrayRef output_size,
IntArrayRef stride,
IntArrayRef padding) {
auto grad_input = at::empty_like(self);
max_unpooling3d_backward_out_cuda(
grad_input, grad_output, self, indices, output_size, stride, padding);
return grad_input;
}
} // namespace native
} // namespace at
|
736efcbd95b8f0c7d61ff97119404ce4669f04d6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
__global__ void
jacobikernel( float* a, float* newa, float* lchange, int n, int m, float w0, float w1, float w2, int sz )
{
int ti = threadIdx.x;
int tj = threadIdx.y;
int i = blockIdx.x * blockDim.x + ti + 1;
int j = blockIdx.y * blockDim.y + tj + 1;
__shared__ float mychange[18*18];
float mnewa, molda;
mychange[tj*18+ti] = a[(j-1)*m+i-1];
if( ti < 2 ) mychange[tj*18+ti+16] = a[(j-1)*m+i+15];
if( tj < 2 ) mychange[(tj+16)*18+ti] = a[(j+15)*m+i-1];
if( tj < 2 && ti < 2 ) mychange[(tj+16)*18+ti+16] = a[(j+15)*m+i+15];
__syncthreads();
molda = mychange[(tj+1)*18+(ti+1)];
mnewa = w0*molda +
w1 * (mychange[(tj+1)*18+(ti )] + mychange[(tj )*18+(ti+1)] +
mychange[(tj+1)*18+(ti+2)] + mychange[(tj+2)*18+(ti+1)]) +
w2 * (mychange[(tj )*18+(ti )] + mychange[(tj+2)*18+(ti )] +
mychange[(tj )*18+(ti+2)] + mychange[(tj+2)*18+(ti+2)]);
newa[j*m+i] = mnewa;
__syncthreads();
int ii = ti+blockDim.x*tj;
mychange[ii] = fabsf( mnewa - molda );
__syncthreads();
int nn = blockDim.x * blockDim.y;
while( (nn>>=1) > 0 ){
if( ii < nn )
mychange[ii] = fmaxf( mychange[ii], mychange[ii+nn] );
__syncthreads();
}
if( ii == 0 )
lchange[blockIdx.x + gridDim.x*blockIdx.y] = mychange[0];
//***************** 1k optimization changes start here ******************//
__syncthreads();
int xi = blockIdx.x + gridDim.x*blockIdx.y;
if(xi == 0) {
float mych = 0.0f;
int ni = ti+blockDim.x*tj;
if( ni < sz ) mych = lchange[ni];
int mm = 256;
while( mm <= sz ){
mych = fmaxf( mych, lchange[ni+mm] );
mm += 256;
}
mychange[ni] = mych;
__syncthreads();
nn = blockDim.x*blockDim.x;
while( (nn>>=1) > 0 ){
if( ni < nn )
mychange[ni] = fmaxf(mychange[ni], mychange[ni+nn]);
__syncthreads();
}
if( ni == 0 )
lchange[0] = mychange[0];
}
//***************** 1k optimization changes end here ******************//
}
static float sumtime;
void JacobiGPU( float* a, int n, int m, float w0, float w1, float w2, float tol )
{
float change;
int iters;
size_t memsize;
int bx, by, gx, gy;
float *da, *dnewa, *lchange;
hipEvent_t e1, e2;
float changeCheck = 0, oldchange = 0;
bx = 16;
by = 16;
gx = (n-2)/bx + ((n-2)%bx == 0?0:1);
gy = (m-2)/by + ((m-2)%by == 0?0:1);
sumtime = 0.0f;
memsize = sizeof(float) * n * m;
hipMalloc( &da, memsize );
hipMalloc( &dnewa, memsize );
hipMalloc( &lchange, gx * gy * sizeof(float) );
hipEventCreate( &e1 );
hipEventCreate( &e2 );
dim3 block( bx, by );
dim3 grid( gx, gy );
iters = 0;
hipMemcpy( da, a, memsize, hipMemcpyHostToDevice );
hipMemcpy( dnewa, a, memsize, hipMemcpyHostToDevice );
do{
float msec;
++iters;
hipEventRecord( e1 );
hipLaunchKernelGGL(( jacobikernel), dim3(grid), dim3(block) , 0, 0, da, dnewa, lchange, n, m, w0, w1, w2, gx*gy );
hipEventRecord( e2 );
hipMemcpy( &change, lchange, sizeof(float), hipMemcpyDeviceToHost );
hipEventElapsedTime( &msec, e1, e2 );
sumtime += msec;
float *ta;
ta = da;
da = dnewa;
dnewa = ta;
//printf("iters = %d, change = %f\n", iters, change);
if(change == oldchange)
{
changeCheck++;
}
oldchange = change;
if(changeCheck > sqrt(m))
{
change = (tol - .01);
}
printf("iters = %d, change = %f, changeCheck = %f, oldchange = %f\n", iters, change, changeCheck, oldchange);
}while( change > tol );
printf( "JacobiGPU converged in %d iterations to residual %f\n", iters, change );
printf( "JacobiGPU used %f seconds total\n", sumtime/1000.0f );
hipMemcpy( a, dnewa, memsize, hipMemcpyDeviceToHost );
hipFree( da );
hipFree( dnewa );
hipFree( lchange );
hipEventDestroy( e1 );
hipEventDestroy( e2 );
}
static void init( float* a, int n, int m )
{
int i, j;
memset( a, 0, sizeof(float) * n * m );
/* boundary conditions */
for( j = 0; j < n; ++j ){
a[j*m+n-1] = j;
}
for( i = 0; i < m; ++i ){
a[(n-1)*m+i] = i;
}
a[(n-1)*m+m-1] = m+n;
}
int main( int argc, char* argv[] )
{
int n, m;
float *a;
struct timeval tt1, tt2;
int ms;
float fms;
if( argc <= 1 ){
fprintf( stderr, "%s sizen [sizem]\n", argv[0] );
return 1;
}
n = atoi( argv[1] );
if( n <= 0 ) n = 100;
m = n;
if( argc > 2 ){
m = atoi( argv[2] );
if( m <= 0 ) m = 100;
}
printf( "Jacobi %d x %d\n", n, m );
a = (float*)malloc( sizeof(float) * n * m );
init( a, n, m );
gettimeofday( &tt1, NULL );
JacobiGPU( a, n, m, .2, .1, .1, .1 );
gettimeofday( &tt2, NULL );
ms = (tt2.tv_sec - tt1.tv_sec);
ms = ms * 1000000 + (tt2.tv_usec - tt1.tv_usec);
fms = (float)ms / 1000000.0f;
printf( "time(gpu ) = %f seconds\n", fms );
}
| 736efcbd95b8f0c7d61ff97119404ce4669f04d6.cu | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
__global__ void
jacobikernel( float* a, float* newa, float* lchange, int n, int m, float w0, float w1, float w2, int sz )
{
int ti = threadIdx.x;
int tj = threadIdx.y;
int i = blockIdx.x * blockDim.x + ti + 1;
int j = blockIdx.y * blockDim.y + tj + 1;
__shared__ float mychange[18*18];
float mnewa, molda;
mychange[tj*18+ti] = a[(j-1)*m+i-1];
if( ti < 2 ) mychange[tj*18+ti+16] = a[(j-1)*m+i+15];
if( tj < 2 ) mychange[(tj+16)*18+ti] = a[(j+15)*m+i-1];
if( tj < 2 && ti < 2 ) mychange[(tj+16)*18+ti+16] = a[(j+15)*m+i+15];
__syncthreads();
molda = mychange[(tj+1)*18+(ti+1)];
mnewa = w0*molda +
w1 * (mychange[(tj+1)*18+(ti )] + mychange[(tj )*18+(ti+1)] +
mychange[(tj+1)*18+(ti+2)] + mychange[(tj+2)*18+(ti+1)]) +
w2 * (mychange[(tj )*18+(ti )] + mychange[(tj+2)*18+(ti )] +
mychange[(tj )*18+(ti+2)] + mychange[(tj+2)*18+(ti+2)]);
newa[j*m+i] = mnewa;
__syncthreads();
int ii = ti+blockDim.x*tj;
mychange[ii] = fabsf( mnewa - molda );
__syncthreads();
int nn = blockDim.x * blockDim.y;
while( (nn>>=1) > 0 ){
if( ii < nn )
mychange[ii] = fmaxf( mychange[ii], mychange[ii+nn] );
__syncthreads();
}
if( ii == 0 )
lchange[blockIdx.x + gridDim.x*blockIdx.y] = mychange[0];
//***************** 1k optimization changes start here ******************//
__syncthreads();
int xi = blockIdx.x + gridDim.x*blockIdx.y;
if(xi == 0) {
float mych = 0.0f;
int ni = ti+blockDim.x*tj;
if( ni < sz ) mych = lchange[ni];
int mm = 256;
while( mm <= sz ){
mych = fmaxf( mych, lchange[ni+mm] );
mm += 256;
}
mychange[ni] = mych;
__syncthreads();
nn = blockDim.x*blockDim.x;
while( (nn>>=1) > 0 ){
if( ni < nn )
mychange[ni] = fmaxf(mychange[ni], mychange[ni+nn]);
__syncthreads();
}
if( ni == 0 )
lchange[0] = mychange[0];
}
//***************** 1k optimization changes end here ******************//
}
static float sumtime;
void JacobiGPU( float* a, int n, int m, float w0, float w1, float w2, float tol )
{
float change;
int iters;
size_t memsize;
int bx, by, gx, gy;
float *da, *dnewa, *lchange;
cudaEvent_t e1, e2;
float changeCheck = 0, oldchange = 0;
bx = 16;
by = 16;
gx = (n-2)/bx + ((n-2)%bx == 0?0:1);
gy = (m-2)/by + ((m-2)%by == 0?0:1);
sumtime = 0.0f;
memsize = sizeof(float) * n * m;
cudaMalloc( &da, memsize );
cudaMalloc( &dnewa, memsize );
cudaMalloc( &lchange, gx * gy * sizeof(float) );
cudaEventCreate( &e1 );
cudaEventCreate( &e2 );
dim3 block( bx, by );
dim3 grid( gx, gy );
iters = 0;
cudaMemcpy( da, a, memsize, cudaMemcpyHostToDevice );
cudaMemcpy( dnewa, a, memsize, cudaMemcpyHostToDevice );
do{
float msec;
++iters;
cudaEventRecord( e1 );
jacobikernel<<< grid, block >>>( da, dnewa, lchange, n, m, w0, w1, w2, gx*gy );
cudaEventRecord( e2 );
cudaMemcpy( &change, lchange, sizeof(float), cudaMemcpyDeviceToHost );
cudaEventElapsedTime( &msec, e1, e2 );
sumtime += msec;
float *ta;
ta = da;
da = dnewa;
dnewa = ta;
//printf("iters = %d, change = %f\n", iters, change);
if(change == oldchange)
{
changeCheck++;
}
oldchange = change;
if(changeCheck > sqrt(m))
{
change = (tol - .01);
}
printf("iters = %d, change = %f, changeCheck = %f, oldchange = %f\n", iters, change, changeCheck, oldchange);
}while( change > tol );
printf( "JacobiGPU converged in %d iterations to residual %f\n", iters, change );
printf( "JacobiGPU used %f seconds total\n", sumtime/1000.0f );
cudaMemcpy( a, dnewa, memsize, cudaMemcpyDeviceToHost );
cudaFree( da );
cudaFree( dnewa );
cudaFree( lchange );
cudaEventDestroy( e1 );
cudaEventDestroy( e2 );
}
static void init( float* a, int n, int m )
{
int i, j;
memset( a, 0, sizeof(float) * n * m );
/* boundary conditions */
for( j = 0; j < n; ++j ){
a[j*m+n-1] = j;
}
for( i = 0; i < m; ++i ){
a[(n-1)*m+i] = i;
}
a[(n-1)*m+m-1] = m+n;
}
int main( int argc, char* argv[] )
{
int n, m;
float *a;
struct timeval tt1, tt2;
int ms;
float fms;
if( argc <= 1 ){
fprintf( stderr, "%s sizen [sizem]\n", argv[0] );
return 1;
}
n = atoi( argv[1] );
if( n <= 0 ) n = 100;
m = n;
if( argc > 2 ){
m = atoi( argv[2] );
if( m <= 0 ) m = 100;
}
printf( "Jacobi %d x %d\n", n, m );
a = (float*)malloc( sizeof(float) * n * m );
init( a, n, m );
gettimeofday( &tt1, NULL );
JacobiGPU( a, n, m, .2, .1, .1, .1 );
gettimeofday( &tt2, NULL );
ms = (tt2.tv_sec - tt1.tv_sec);
ms = ms * 1000000 + (tt2.tv_usec - tt1.tv_usec);
fms = (float)ms / 1000000.0f;
printf( "time(gpu ) = %f seconds\n", fms );
}
|
497e22cd79712fc9593135dcc7e4e6e3608d69a9.hip | // !!! This is a file automatically generated by hipify!!!
#include "kernel.h"
#include "kernels.h"
#include "hip/hip_runtime.h"
#include "corecrt_math.h"
#include "utils.h"
// __syncthreads()
#ifndef __HIPCC__
#define __HIPCC__
#endif // !__HIPCC__
#include "device_launch_parameters.h"
#include "hip/device_functions.h"
#include <cstdio>
namespace cudaKernel{
__global__ void getSubFireworkPositions(
float* startPoses, float* directions, const float* subDirs,
size_t nDirs, size_t stride, const float* relativePos,
size_t kShift, const float* shiftX_, const float* shiftY_) {
size_t bid = blockIdx.x;
size_t tid = threadIdx.x;
size_t idx = bid * blockDim.x + tid;
const float* dir = directions + bid * stride * 3;
float* targetDir = directions + (nDirs + bid * blockDim.x + tid) * 3;
startPoses[3 * idx] = dir[0] * *relativePos + shiftX_[kShift];
startPoses[3 * idx + 1] = dir[1] * *relativePos + shiftY_[kShift];
startPoses[3 * idx + 2] = dir[2] * *relativePos;
targetDir[0] = subDirs[tid * 3];
targetDir[1] = subDirs[tid * 3 + 1];
targetDir[2] = subDirs[tid * 3 + 2];
}
void getSubFireworkPositions(float* dStartPoses, float* dDirections,
const float* dSubDirs, size_t nDirs, size_t nSubDirs,
size_t nSubGroups, const float* dCentrifugalPos_, size_t startFrame,
size_t kShift, const float* dShiftX_, const float* dShiftY_) {
size_t stride = nDirs / nSubGroups;
const float* relativePos = dCentrifugalPos_ + startFrame;
getSubFireworkPositions << <nSubGroups, nSubDirs >> > (
dStartPoses, dDirections, dSubDirs, nDirs,
stride, relativePos, kShift, dShiftX_, dShiftY_);
CUDACHECK(hipGetLastError());
CUDACHECK(hipDeviceSynchronize());
}
} | 497e22cd79712fc9593135dcc7e4e6e3608d69a9.cu | #include "kernel.h"
#include "kernels.h"
#include "cuda_runtime.h"
#include "corecrt_math.h"
#include "utils.h"
// 为了让__syncthreads()通过语法检查
#ifndef __CUDACC__
#define __CUDACC__
#endif // !__CUDACC__
#include "device_launch_parameters.h"
#include "device_functions.h"
#include <cstdio>
namespace cudaKernel{
__global__ void getSubFireworkPositions(
float* startPoses, float* directions, const float* subDirs,
size_t nDirs, size_t stride, const float* relativePos,
size_t kShift, const float* shiftX_, const float* shiftY_) {
size_t bid = blockIdx.x;
size_t tid = threadIdx.x;
size_t idx = bid * blockDim.x + tid;
const float* dir = directions + bid * stride * 3;
float* targetDir = directions + (nDirs + bid * blockDim.x + tid) * 3;
startPoses[3 * idx] = dir[0] * *relativePos + shiftX_[kShift];
startPoses[3 * idx + 1] = dir[1] * *relativePos + shiftY_[kShift];
startPoses[3 * idx + 2] = dir[2] * *relativePos;
targetDir[0] = subDirs[tid * 3];
targetDir[1] = subDirs[tid * 3 + 1];
targetDir[2] = subDirs[tid * 3 + 2];
}
void getSubFireworkPositions(float* dStartPoses, float* dDirections,
const float* dSubDirs, size_t nDirs, size_t nSubDirs,
size_t nSubGroups, const float* dCentrifugalPos_, size_t startFrame,
size_t kShift, const float* dShiftX_, const float* dShiftY_) {
size_t stride = nDirs / nSubGroups;
const float* relativePos = dCentrifugalPos_ + startFrame;
getSubFireworkPositions << <nSubGroups, nSubDirs >> > (
dStartPoses, dDirections, dSubDirs, nDirs,
stride, relativePos, kShift, dShiftX_, dShiftY_);
CUDACHECK(cudaGetLastError());
CUDACHECK(cudaDeviceSynchronize());
}
} |
849cb0ece0c63ce08fd10299318df65883903332.hip | // !!! This is a file automatically generated by hipify!!!
//##############################################################################
//# #
//# Virus Model #
//# #
//##############################################################################
// nvcc SARS-CoV-2.cu -o program.out && ./program.out
/*
Using Camal case:
C Functions start with lower case
Variables start with upper case
*/
#include <stdio.h>
#include <stdlib.h>
#include "hip/hip_runtime.h"
#include <hiprand/hiprand_kernel.h>
#include <time.h>
#include <ctime>
#include <math.h>
#include <random>
#include <iostream>
using namespace std;
#include <chrono>
#define PI 3.1415926535897932f
#define CODETESTINGCONDITIONS 0
#define RUNCPU 0
//Globals to setup the kernals
dim3 BlockConfig, GridConfig;
//Simulation Parametersf
int CELL2CELL = 0;
int FREECELL = 1;
float timestep = 0.005; //Time step for model (No larger than 0.01 hour) 0.005 hr = 18 sec, (1/3600) hr = 1 sec
float endtime = (2*365)*24; //Days in hours
int Save = (1/timestep); //the number of time the program saves to file, (1/timestep) results in 1 save every simulated hour
int NumberOfLayers = 607; //607 is a million hexigon in a circle
int StartRuns = 0;
int NumberOfRuns = 100;
//Physical Parameters
//float MOI = pow(10,0); //pow(10,-5) to 1
float beta = 84.0; //2.3*pow(10,-7); //Infiction rate, units: per hour
float rho = 19919.2; //1920
float D = 4.8*pow(10,-12); //Diffusion rate at 37 degrees celsius unit: m^2/s //pow(6*10,-12) //3.96e-8
float c = 0.0049; //Clearance rate, units: per hour
float deltx = 25.0*pow(10,-6);
float deltxprime = deltx*2;
float Dtsx2 = D*timestep*pow(deltxprime,-2);
//Probability Constants
float TauI = 0.624; //Avg time for infection
float TauE = 5.88; //Avg time for eclipse
float ne = 30.0; //Number of eclipse compartments?
float ni = 100.0; //Number of infected compartments?
//float probi = 0.2; //Probability per unit time of cell to cell infection (/hour)
//Global Variables
char Path_to_Folder[100] = "";
char Directroy[100] = "";
char** LocationData;
char* cells;
char* cells_GPU;
float* ecl;
float* ecl_GPU;
float* inf;
float* inf_GPU;
float* vtemp;
float* vtemp_GPU;
float* th;
float* th_GPU;
float* ut;
float* ut_GPU;
float* EclipsePhaseLength;
float* EclipsePhaseLength_GPU;
float* InfectionPhaseLength;
float* InfectionPhaseLength_GPU;
int NumberOfCells;
int NumberDead;
int NumberDead1;
int NumberInfected1;
int NumberEclipse1;
int NumberHealthy1;
float AmountOfVirus;
hiprandState_t *state;
//Functions
float Te(float TauE, float ne){
// Picks a random number from the gamma distribution
// The number is to be used as a time step in the Eclipse Time Matrix
random_device rd;
default_random_engine generator(rd());
gamma_distribution<double> distribution(TauE, TauE/sqrt(ne));
return distribution(generator);
}
float Ti(float TauI, float ni){
// Picks a random number from the gamma distribution
// The number is to be used as a time step in the Infected Time Matrix
random_device rd;
default_random_engine generator(rd());
gamma_distribution<double> distribution(TauI, TauI/sqrt(ni));
return distribution(generator);
}
float PU1(){
// Picks a random number from a uniform distribution
// This probability
random_device rd;
default_random_engine generator(rd());
uniform_real_distribution<double> distribution(0.0,1.0);
return distribution(generator);
}
void creatingPathToFolderAndDirectory(int BigIndex, int NumberOfLayers, float MOI, float probi){
char TransmissionType[10] = "";
if (CELL2CELL == 1){
if (FREECELL == 1){
strcat(TransmissionType,"Both");
}
else {
strcat(TransmissionType,"CELL2CELL");
}
}
else if(CELL2CELL == 0){
if (FREECELL == 0){
strcat(TransmissionType,"Neither");
}
else{
strcat(TransmissionType,"FREECELL");
}
}
char Buffer[5]; //Buffer String For Conversion To Char
char TheCurrentTime[50];
time_t RawTime = time(NULL);
tm* SpecificMoment = localtime(&RawTime);
strcpy(Path_to_Folder, "");
strcpy(Directroy, "");
if(RUNCPU == 1){
strcat(Path_to_Folder,"ViralModel/");
}
else{
strcat(Path_to_Folder,"/media/baylorfain/HDD/SARS-CoV-2/");
}
// strftime(TheCurrentTime, 50, "%m-%d/%I:%M", SpecificMoment);
strftime(TheCurrentTime, 50, "%m-%d/", SpecificMoment);
strcat(Path_to_Folder,TheCurrentTime);
// strcat(Path_to_Folder,"_");
sprintf(Buffer,"%d",NumberOfLayers);
strcat(Path_to_Folder,Buffer);
strcat(Path_to_Folder,"_");
sprintf(Buffer,"%d",BigIndex);
strcat(Path_to_Folder,Buffer);
strcat(Path_to_Folder,"-");
strcat(Path_to_Folder,TransmissionType);
strcat(Path_to_Folder,"_");
sprintf(Buffer,"%.1f",log10(MOI));
strcat(Path_to_Folder,Buffer);
strcat(Path_to_Folder,"-");
strcat(Path_to_Folder,"MOI");
strcat(Path_to_Folder,"_");
sprintf(Buffer,"%.1f",probi);
strcat(Path_to_Folder,Buffer);
strcat(Path_to_Folder,"-");
strcat(Path_to_Folder,"probi");
strcat(Directroy,"mkdir -p ");
strcat(Directroy,Path_to_Folder);
int check = system(strdup(Directroy));
if(check != 0){
exit(0);
}
}
void creatingCellLocations(){
float SideLenght = (2.0/3.0);
int RadiusScale = 0;
for(int i=0; i<NumberOfLayers; i++){
if(i == 0){
RadiusScale = RadiusScale + 1;
}
else{
if((i)%2 == 1){
RadiusScale = RadiusScale + 1;
}
else{
RadiusScale = RadiusScale + 2;
}
}
}
float RadiusOfCircle = SideLenght*RadiusScale;
int count = 0;
for(int i=0; i<NumberOfLayers; i++){
count = count + i;
}
int NumberOfHexagons=(count)*6+1;
float** coord;
int n = NumberOfHexagons;
int m = 3;
coord = (float**) calloc(n,sizeof(float*));
for (int i = 0; i < n; i++){
coord[i] = (float*) calloc(m,sizeof(float));
}
float** percyclecoord;
n = NumberOfHexagons;
m = 3;
percyclecoord = (float**) calloc(n,sizeof(float*));
for (int i = 0; i < n; i++){
percyclecoord[i] = (float*) calloc(m,sizeof(float));
}
int temp;
for(int j=0; j<NumberOfLayers; j++){
for(int i=0; i<(2*j); i++){
if(i < j){
temp = i;
}
percyclecoord[i+(j-1)*j+1][0] = -temp-1;
percyclecoord[i+(j-1)*j+1][1] = temp+j-i;
percyclecoord[i+(j-1)*j+1][2] = -j+1+i;
}
}
float c0[3] = {percyclecoord[0][0], percyclecoord[0][1], percyclecoord[0][2]};
coord[0][2] = c0[2];
coord[0][1] = c0[1];
coord[0][0] = c0[0];
count = 0;
for(int j=0; j<(NumberOfHexagons/3); j++){
for(int i=0; i<3; i++){
coord[(i+0)%3+3*j+1][2] = percyclecoord[j+1][i]+c0[i];
coord[(i+1)%3+3*j+1][1] = percyclecoord[j+1][i]+c0[i];
coord[(i+2)%3+3*j+1][0] = percyclecoord[j+1][i]+c0[i];
}
}
float hi = coord[0][0];
float vi = coord[0][2];
float xmin = INFINITY;
float xcoord;
float ycoord;
double dist;
for(int i=0; i<NumberOfHexagons; i++){
xcoord = coord[i][0];
if(coord[i][0] < xmin){
xmin = coord[i][0];
}
ycoord = (2.0*sin(PI*(60.0/180.0))*(coord[i][1]-coord[i][2])/3.0)+vi;
dist = sqrtf(pow(double(xcoord-hi),2.0)+pow(double(ycoord-vi),2.0));
if(dist >= RadiusOfCircle){
coord[i][0] = 5000.0;
coord[i][1] = 0.0;
coord[i][2] = 0.0;
}
}
n = ((2*NumberOfLayers)-1);
m = ((2*NumberOfLayers)-1);
LocationData = (char**) malloc(n*sizeof(char*));
for(int j=0; j<n; j++){
LocationData[j] = (char*) malloc(m*sizeof(char));
for(int i=0; i<m; i++){
LocationData[j][i] = 'o';
}
}
NumberOfCells = 0;
for(int i=0; i<NumberOfHexagons; i++){
if(coord[i][0] != 5000.0){
LocationData[int(coord[i][2])-int(xmin)][int(coord[i][0])-int(xmin)] = 'h';
NumberOfCells = NumberOfCells + 1;
}
}
char File1[100] = "";
strcat(File1,Path_to_Folder);
strcat(File1,"/InitialCellLocations.txt");
FILE *outfile1 = fopen(File1,"a");
if (outfile1 == NULL){
printf("Error opening file!\n");
exit(0);
}
for(int i=0; i<((2*NumberOfLayers)-1); i++){
for(int j=0; j<((2*NumberOfLayers)-1); j++){
fprintf(outfile1,"%c,",LocationData[i][j]);
}
fprintf(outfile1,"\n");
}
fclose(outfile1);
char File2[100] = "";
strcat(File2,Path_to_Folder);
strcat(File2,"/Parameters.txt");
FILE *outfile2 = fopen(File2,"w");
if (outfile2 == NULL){
printf("Error opening file!\n");
exit(0);
}
fprintf(outfile2, "Hexagon Side Lenght = %f\n", SideLenght);
fprintf(outfile2, "Number of Layers = %d\n", NumberOfLayers);
fprintf(outfile2, "Radius of Circle = %f\n", RadiusOfCircle);
fprintf(outfile2, "Number of Cells = %d\n", NumberOfCells);
fclose(outfile2);
for (int i = 0; i < NumberOfHexagons; i++){
free(coord[i]);
}
free(coord);
for (int i = 0; i < NumberOfHexagons; i++){
free(percyclecoord[i]);
}
free(percyclecoord);
}
void allocateMemory(int Nx, int Ny){
//Produces a matrix for the cells
cells = (char*) malloc(Nx*Ny*2*sizeof(char));
//Produces a matrix that will track the amount virus above each cell
vtemp = (float*) malloc(Nx*Ny*2*sizeof(float));
//Produces a univeral time matrix (ut)
ut = (float*) malloc(Nx*Ny*sizeof(float));
//Produces a time matrix for after eclipse phase (e)
ecl = (float*) malloc(Nx*Ny*sizeof(float));
//Produces a time matrix for after infection phase (i)
inf = (float*) malloc(Nx*Ny*sizeof(float));
//Produces a time matrix hor healthy cells (t)
th = (float*) malloc(Nx*Ny*sizeof(float));
//Produces an array of eclipse phase durations for cells
EclipsePhaseLength = (float*) malloc(Nx*Ny*sizeof(float));
//Produces an array of infection phase durations for cells
InfectionPhaseLength = (float*) malloc(Nx*Ny*sizeof(float));
}
void initailConditions(int Nx, int Ny){
for(int j=0; j<Ny; j++){
for(int i=0; i<Nx; i++){
for(int k=0;k<2;k++){
cells[i+Nx*j+Nx*Ny*k] = LocationData[i][j];
vtemp[i+Nx*j+Nx*Ny*k] = 0.0;
}
ut[i+Nx*j] = 0.0;
ecl[i+Nx*j] = 0.0;
inf[i+Nx*j] = 0.0;
th[i+Nx*j] = 0.0;
EclipsePhaseLength[i+Nx*j] = Te(TauE,ne);
InfectionPhaseLength[i+Nx*j] = Ti(TauI,ni);
}
}
}
void infectANumberOfCellsRandomly(int Nx, int Ny, int Ni){
if(CODETESTINGCONDITIONS == 1){
cells[(NumberOfLayers-1)+Nx*(NumberOfLayers-1)+Nx*Ny*0] = 'i';
cells[(NumberOfLayers-1)+Nx*(NumberOfLayers-1)+Nx*Ny*1] = 'i'; //Only the center cell
}
else {
srand(time(NULL));
int randx;
int randy;
int NumberOfInfectedCellsCount = 0;
while(NumberOfInfectedCellsCount < Ni){
randx = (rand()%Nx);
randy = (rand()%Ny);
if((cells[randx+Nx*randy+Nx*Ny*0] != 'o') && (cells[randx+Nx*randy+Nx*Ny*0] == 'h')){
cells[randx+Nx*randy+Nx*Ny*0] = 'e';
cells[randx+Nx*randy+Nx*Ny*1] = 'e';
NumberOfInfectedCellsCount = NumberOfInfectedCellsCount + 1;
}
}
}
}
void printToFileCellAndVirusInitial(int Nx, int Ny, int NumberOfLayers){
char File3[100] = "";
strcat(File3,Path_to_Folder);
strcat(File3,"/cells_over_time.txt");
FILE *outfile3 = fopen(File3,"w");
if (outfile3 == NULL){
printf("Error opening file!\n");
exit(0);
}
for(int i=0; i<((2*NumberOfLayers)-1); i++){
for(int j=0; j<((2*NumberOfLayers)-1); j++){
fprintf(outfile3,"%c,",LocationData[i][j]);
}
fprintf(outfile3,"\n");
}
fclose(outfile3);
char File4[100] = "";
strcat(File4,Path_to_Folder);
strcat(File4,"/virus_over_time.txt");
FILE *outfile4 = fopen(File4,"w");
if (outfile4 == NULL){
printf("Error opening file!\n");
exit(0);
}
for(int i=0; i<((2*NumberOfLayers)-1); i++){
for(int j=0; j<((2*NumberOfLayers)-1); j++){
fprintf(outfile4,"%f,",0.0);
}
fprintf(outfile4,"\n");
}
fclose(outfile4);
}
void printToFileCellAndVirusAnalysisInitial(int Nx, int Ny){
NumberDead1 = 0;
NumberInfected1 = 0;
NumberEclipse1 = 0;
NumberHealthy1 = 0;
AmountOfVirus = 0.0;
for(int j=0; j<Ny; j++){
for(int i=0; i<Nx; i++){
AmountOfVirus = AmountOfVirus + vtemp[i+Nx*j+Nx*Ny*0];
if(cells[i+Nx*j+Nx*Ny*0] == 'd'){
NumberDead1 = NumberDead1 + 1;
}
else if(cells[i+Nx*j+Nx*Ny*0] == 'i'){
NumberInfected1 = NumberInfected1 + 1;
}
else if(cells[i+Nx*j+Nx*Ny*0] == 'e'){
NumberEclipse1 = NumberEclipse1 +1;
}
else if(cells[i+Nx*j+Nx*Ny*0] == 'h'){
NumberHealthy1 = NumberHealthy1 + 1;
}
}
}
char File9[100] = "";
strcat(File9,Path_to_Folder);
strcat(File9,"/PerTimeStep.txt");
FILE *outfile9 = fopen(File9,"w");
if (outfile9 == NULL){
printf("Error opening file!\n");
exit(0);
}
fprintf(outfile9,"%0.0f, %d, %d, %d, %d, %f,", 0.0, NumberHealthy1, NumberEclipse1, NumberInfected1, NumberDead1, AmountOfVirus);
fprintf(outfile9,"\n");
fclose(outfile9);
}
void cerialViralTransmission(int Nx, int Ny, int cell2cell, int freecell, float probi){
//The Healthy Cells' time
int NumberHealthy = 0;
for(int j=0; j<Ny; j++){
for(int i=0; i<Nx; i++){
if(cells[i+Nx*j+Nx*Ny*0] == 'h'){
NumberHealthy = NumberHealthy + 1;
}
}
}
int** LocationHealthy;
LocationHealthy = (int**) malloc(NumberHealthy*sizeof(int*));
for (int i=0; i<NumberHealthy; i++){
LocationHealthy[i] = (int*) malloc(2*sizeof(int));
}
int Indexer = 0;
for(int j=0; j<Ny; j++){
for(int i=0; i<Nx; i++){
if(cells[i+Nx*j+Nx*Ny*0] == 'h'){
LocationHealthy[Indexer][0] = i;
LocationHealthy[Indexer][1] = j;
Indexer = Indexer + 1;
}
}
}
if(NumberHealthy != 0){
int Row;
int Column;
for(int j=0; j<NumberHealthy; j++){
Row = LocationHealthy[j][0];
Column = LocationHealthy[j][1];
// Row is the row location of for a cell
// Column is the column location for a cell
th[Row+Nx*Column] = th[Row+Nx*Column] + timestep;
// "th" is the time matrix for healthy cells
// "ts" is the time step for the model
}
}
for (int i = 0; i < NumberHealthy; i++){
free(LocationHealthy[i]);
}
free(LocationHealthy);
//Eclipse phase -> Infection
int NumberEclipse = 0;
for(int j=0; j<Ny; j++){
for(int i=0; i<Nx; i++){
if(cells[i+Nx*j+Nx*Ny*0] == 'e'){
NumberEclipse = NumberEclipse + 1;
}
}
}
int** LocationEclipse;
LocationEclipse = (int**) malloc(NumberEclipse*sizeof(int*));
for (int i=0; i<NumberEclipse; i++){
LocationEclipse[i] = (int*) malloc(2*sizeof(int));
}
Indexer = 0;
for(int j=0; j<Ny; j++){
for(int i=0; i<Nx; i++){
if(cells[i+Nx*j+Nx*Ny*0] == 'e'){
LocationEclipse[Indexer][0] = i;
LocationEclipse[Indexer][1] = j;
Indexer = Indexer + 1;
}
}
}
if(NumberEclipse != 0){
int Row;
int Column;
for(int j=0; j<NumberEclipse; j++){
Row = LocationEclipse[j][0];
Column = LocationEclipse[j][1];
// Row is the row location of for a cell
// Column is the column location for a cell
if((ecl[Row+Nx*Column] + th[Row+Nx*Column]) < ut[Row+Nx*Column]){
cells[Row+Nx*Column+Nx*Ny*1] = 'i';
inf[Row+Nx*Column] = inf[Row+Nx*Column] + Ti(TauI, ni);
// "ecl" is the time matrix for after eclipse phase
// "th" is the time matrix for healthy cells
// "ut" is the univeral time matrix
// "cells" is the matrix of cells
// "inf" is the time matrix for after infection phase
}
}
}
for (int i = 0; i < NumberEclipse; i++){
free(LocationEclipse[i]);
}
free(LocationEclipse);
//Infection spreads
if(cell2cell == 1){
int NumberInfected = 0;
for(int j=0; j<Ny; j++){
for(int i=0; i<Nx; i++){
if(cells[i+Nx*j+Nx*Ny*0] == 'i'){
NumberInfected = NumberInfected + 1;
}
}
}
int** LocationInfected;
LocationInfected = (int**) malloc(NumberInfected*sizeof(int*));
for (int i=0; i<NumberInfected; i++){
LocationInfected[i] = (int*) malloc(2*sizeof(int));
}
int Indexer = 0;
for(int j=0; j<Ny; j++){
for(int i=0; i<Nx; i++){
if(cells[i+Nx*j+Nx*Ny*0] == 'i'){
LocationInfected[Indexer][0] = i;
LocationInfected[Indexer][1] = j;
Indexer = Indexer + 1;
}
}
}
if(NumberInfected != 0){
int Row;
int Column;
for(int j=0; j<NumberInfected; j++){
Row = LocationInfected[j][0];
Column = LocationInfected[j][1];
// #Row is the row location of for a cell
// #Column is the column location for a cell
int AboveRowExists = 1;
int LeftColumnExists = 1;
int BelowRowExists = 1;
int RightColumnExists = 1;
int AboveRow = Row-1; //row coordinate above cell
int LeftColumn = Column-1; //column coordinate left of cell
int BelowRow = Row+1; //row coordinate below cell
int RightColumn = Column+1; //column coordinate right of cell
// if the cell one row up doesn't exist, it's taken out of the equation
if(AboveRow < 0){
AboveRowExists = 0;
AboveRow = 0;
}
// if the cell one column to the left doesn't exist, it's taken out of the equation
if(LeftColumn < 0){
LeftColumnExists = 0;
LeftColumn = 0;
}
// if the cell one row down doesn't exist, it's taken out of the equation
if(BelowRow > Ny-1){
BelowRowExists = 0;
BelowRow = 0;
}
// if the cell one column to the right doesn't exist, it's taken out of the equation
if(RightColumn > Nx-1){
RightColumnExists = 0;
RightColumn = 0;
}
if(PU1()<probi*timestep){
if((LeftColumnExists == 1) && (cells[Row+Nx*LeftColumn+Nx*Ny*0] != 'o')){
if(cells[Row+Nx*LeftColumn+Nx*Ny*0] == 'h'){
cells[Row+Nx*LeftColumn+Nx*Ny*1] = 'e';
ecl[Row+Nx*LeftColumn] = Te(TauE,ne);
}
}
if((RightColumnExists == 1) && (cells[Row+Nx*RightColumn+Nx*Ny*0] != 'o')){
if(cells[Row+Nx*RightColumn+Nx*Ny*0] == 'h'){
cells[Row+Nx*RightColumn+Nx*Ny*1] = 'e';
ecl[Row+Nx*RightColumn] = Te(TauE,ne);
}
}
if((AboveRowExists == 1) && (cells[AboveRow+Nx*Column+Nx*Ny*0] != 'o')){
if(cells[AboveRow+Nx*Column+Nx*Ny*0] == 'h'){
cells[AboveRow+Nx*Column+Nx*Ny*1] = 'e';
ecl[AboveRow+Nx*Column] = Te(TauE,ne);
}
}
if((BelowRowExists == 1) && (cells[BelowRow+Nx*Column+Nx*Ny*0] != 'o')){
if(cells[BelowRow+Nx*Column+Nx*Ny*0] == 'h'){
cells[BelowRow+Nx*Column+Nx*Ny*1] = 'e';
ecl[BelowRow+Nx*Column] = Te(TauE,ne);
}
}
if((AboveRowExists == 1) && (RightColumnExists == 1) && (cells[AboveRow+Nx*RightColumn+Nx*Ny*0] != 'o')){
if(cells[AboveRow+Nx*RightColumn+Nx*Ny*0] == 'h'){
cells[AboveRow+Nx*RightColumn+Nx*Ny*1] = 'e';
ecl[AboveRow+Nx*RightColumn] = Te(TauE,ne);
}
}
if((BelowRowExists == 1) && (LeftColumnExists == 1) && (cells[BelowRow+Nx*LeftColumn+Nx*Ny*0] != 'o')){
if(cells[BelowRow+Nx*LeftColumn+Nx*Ny*0] == 'h'){
cells[BelowRow+Nx*LeftColumn+Nx*Ny*1] = 'e';
ecl[BelowRow+Nx*LeftColumn] = Te(TauE,ne);
}
}
}
}
}
for (int i = 0; i < NumberInfected; i++){
free(LocationInfected[i]);
}
free(LocationInfected);
}
//Virus Spreads
int NumberVirus = 0;
for(int j=0; j<Ny; j++){
for(int i=0; i<Nx; i++){
if(cells[i+Nx*j+Nx*Ny*0] != 'o'){
NumberVirus = NumberVirus + 1;
}
}
}
int** LocationVirus;
LocationVirus = (int**) malloc(NumberVirus*sizeof(int*));
for (int i=0; i<NumberVirus; i++){
LocationVirus[i] = (int*) malloc(2*sizeof(int));
}
Indexer = 0;
for(int j=0; j<Ny; j++){
for(int i=0; i<Nx; i++){
if(cells[i+Nx*j+Nx*Ny*0] != 'o'){
LocationVirus[Indexer][0] = i;
LocationVirus[Indexer][1] = j;
Indexer = Indexer + 1;
}
}
}
int Row;
int Column;
for(int j=0; j<NumberVirus; j++){
Row = LocationVirus[j][0];
Column = LocationVirus[j][1];
// Row is the row location of for a cell
// Column is the column location for a cell
int AboveRow = Row-1; //row coordinate above cell
int LeftColumn = Column-1; //column coordinate left of cell
int BelowRow = Row+1; //row coordinate below cell
int RightColumn = Column+1; //column coordinate right of cell
float rho2;
if(cells[Row+Nx*Column+Nx*Ny*0] == 'i'){
rho2 = rho;
}
else{
rho2 = 0;
}
// where rho2 is a placeholder variable
// if the cell one row up doesn't exist, it's taken out of the equation
if(AboveRow < 0){
AboveRow = Row;
}
// if the cell one column to the left doesn't exist, it's taken out of the equation
if(LeftColumn < 0){
LeftColumn = Column;
}
// if the cell one row down doesn't exist, it's taken out of the equation
if(BelowRow > (Ny-1)){
BelowRow = Row;
}
// if the cell one column to the right doesn't exist, it's taken out of the equation
if(RightColumn > (Nx-1)){
RightColumn = Column;
}
if(cells[AboveRow+Nx*Column+Nx*Ny*0] == 'o'){
AboveRow = Row;
}
if(cells[AboveRow+Nx*RightColumn+Nx*Ny*0] == 'o'){
AboveRow = Row;
RightColumn = Column;
}
if(cells[Row+Nx*RightColumn+Nx*Ny*0] == 'o'){
RightColumn = Column;
}
if(cells[BelowRow+Nx*Column+Nx*Ny*0] == 'o'){
BelowRow = Row;
}
if(cells[Row+Nx*LeftColumn+Nx*Ny*0] == 'o'){
LeftColumn = Column;
}
if(cells[BelowRow+Nx*LeftColumn+Nx*Ny*0] == 'o'){
BelowRow = Row;
LeftColumn = Column;
}
float NNN = (vtemp[AboveRow+Nx*Column+Nx*Ny*0] + vtemp[AboveRow+Nx*RightColumn+Nx*Ny*0] + vtemp[Row+Nx*RightColumn+Nx*Ny*0] + vtemp[BelowRow+Nx*Column+Nx*Ny*0] + vtemp[Row+Nx*LeftColumn+Nx*Ny*0] + vtemp[BelowRow+Nx*LeftColumn+Nx*Ny*0]);
float VirusProduced = rho2*timestep;
float VirusDecay = c*vtemp[Row+Nx*Column+Nx*Ny*0]*timestep;
float VirusOut = 4.0*Dtsx2*vtemp[Row+Nx*Column+Nx*Ny*0];
float VirusIn = 2.0*Dtsx2*NNN/3.0;
vtemp[Row+Nx*Column+Nx*Ny*1] = vtemp[Row+Nx*Column+Nx*Ny*0] + VirusProduced - VirusOut + VirusIn - VirusDecay;
if(vtemp[Row+Nx*Column+Nx*Ny*1] < pow(10.0,-10.0)){
vtemp[Row+Nx*Column+Nx*Ny*1] = 0.0;
}
//probability of infect adaptive time step
if(freecell == 1){
float probaility = PU1();
float adaptedtimestep = timestep; //variable time step
float adaptedtimestepcount = 1.0;
float pinfect = vtemp[Row+Nx*Column+Nx*Ny*1]*beta*adaptedtimestep;
while(pinfect > 1.0){
adaptedtimestep = adaptedtimestep/2.0;
pinfect = vtemp[Row+Nx*Column+Nx*Ny*1]*beta*adaptedtimestep;
adaptedtimestepcount = adaptedtimestepcount*2.0;
}
if(pinfect <= 1.0){
if(adaptedtimestepcount != 1.0){
pinfect = vtemp[Row+Nx*Column+Nx*Ny*1]*beta*adaptedtimestep;
}
while(adaptedtimestepcount != 1.0){
if(probaility < pinfect){
if(cells[Row+Nx*Column+Nx*Ny*0] == 'h'){
cells[Row+Nx*Column+Nx*Ny*1] = 'e';
ecl[Row+Nx*Column] = Te(TauE,ne);
}
}
adaptedtimestepcount = adaptedtimestepcount/2.0;
adaptedtimestep = adaptedtimestep*2.0;
pinfect = vtemp[Row+Nx*Column+Nx*Ny*1]*beta*adaptedtimestep;
}
if(adaptedtimestepcount == 1.0){
vtemp[Row+Nx*Column+Nx*Ny*1] = vtemp[Row+Nx*Column+Nx*Ny*0] + VirusProduced - VirusOut + VirusIn - VirusDecay;
if(probaility < pinfect){
if(cells[Row+Nx*Column+Nx*Ny*0] == 'h'){
cells[Row+Nx*Column+Nx*Ny*1] = 'e';
ecl[Row+Nx*Column] = Te(TauE,ne);
}
}
}
}
}
}
for (int i = 0; i < NumberVirus; i++){
free(LocationVirus[i]);
}
free(LocationVirus);
//kills cells
int NumberInfected = 0;
for(int j=0; j<Ny; j++){
for(int i=0; i<Nx; i++){
if(cells[i+Nx*j+Nx*Ny*0] == 'i'){
NumberInfected = NumberInfected + 1;
}
}
}
int** LocationInfected;
LocationInfected = (int**) malloc(NumberInfected*sizeof(int*));
for (int i=0; i<NumberInfected; i++){
LocationInfected[i] = (int*) malloc(2*sizeof(int));
}
Indexer = 0;
for(int j=0; j<Ny; j++){
for(int i=0; i<Nx; i++){
if(cells[i+Nx*j+Nx*Ny*0] == 'i'){
LocationInfected[Indexer][0] = i;
LocationInfected[Indexer][1] = j;
Indexer = Indexer + 1;
}
}
}
if(NumberInfected != 0){
int Row;
int Column;
for(int j=0; j<NumberInfected; j++){
Row = LocationInfected[j][0];
Column = LocationInfected[j][1];
// Row is the row location of for a cell
// Column is the column location for a cell
if(ut[Row+Nx*Column] > (inf[Row+Nx*Column] + ecl[Row+Nx*Column] + th[Row+Nx*Column])){
cells[Row+Nx*Column+Nx*Ny*1] = 'd';
if(CODETESTINGCONDITIONS == 1){
cells[Row+Nx*Column+Nx*Ny*1] = 'i';
}
// "ut" is the univeral time matrix
// "inf" is the time matrix for after infection phase
// "ecl" is the time matrix for after eclipse phase
// "th" is the time matrix for healthy cells
// "cells" is the matrix of cells
}
}
}
for (int i = 0; i < NumberInfected; i++){
free(LocationInfected[i]);
}
free(LocationInfected);
for(int j=0; j<Ny; j++){
for(int i=0; i<Nx; i++){
vtemp[i+Nx*j+Nx*Ny*0] = vtemp[i+Nx*j+Nx*Ny*1];
cells[i+Nx*j+Nx*Ny*0] = cells[i+Nx*j+Nx*Ny*1];
}
}
//The Universal Time for the cells is kept here (ut)
for(int j=0; j<Ny; j++){
for(int i=0; i<Nx; i++){
ut[i+Nx*j] = ut[i+Nx*j] + timestep;
}
}
}
void modifiedCerialViralTransmission(int Nx, int Ny, int cell2cell, int freecell, float probi){
int NumberHealthy = 0;
int NumberEclipse = 0;
int NumberInfected = 0;
int NumberVirus = 0;
for(int j=0; j<Ny; j++){
for(int i=0; i<Nx; i++){
if(cells[i+Nx*j+Nx*Ny*0] == 'h'){
NumberHealthy = NumberHealthy + 1;
}
if(cells[i+Nx*j+Nx*Ny*0] == 'e'){
NumberEclipse = NumberEclipse + 1;
}
if(cells[i+Nx*j+Nx*Ny*0] == 'i'){
NumberInfected = NumberInfected + 1;
}
if(cells[i+Nx*j+Nx*Ny*0] != 'o'){
NumberVirus = NumberVirus + 1;
}
}
}
int** LocationHealthy;
LocationHealthy = (int**) malloc(NumberHealthy*sizeof(int*));
for (int i=0; i<NumberHealthy; i++){
LocationHealthy[i] = (int*) malloc(2*sizeof(int));
}
int** LocationEclipse;
LocationEclipse = (int**) malloc(NumberEclipse*sizeof(int*));
for (int i=0; i<NumberEclipse; i++){
LocationEclipse[i] = (int*) malloc(2*sizeof(int));
}
int** LocationInfected;
LocationInfected = (int**) malloc(NumberInfected*sizeof(int*));
for (int i=0; i<NumberInfected; i++){
LocationInfected[i] = (int*) malloc(2*sizeof(int));
}
int** LocationVirus;
LocationVirus = (int**) malloc(NumberVirus*sizeof(int*));
for (int i=0; i<NumberVirus; i++){
LocationVirus[i] = (int*) malloc(2*sizeof(int));
}
int IndexerH = 0;
int IndexerE = 0;
int IndexerI = 0;
int IndexerO = 0;
for(int j=0; j<Ny; j++){
for(int i=0; i<Nx; i++){
if(cells[i+Nx*j+Nx*Ny*0] == 'h'){
LocationHealthy[IndexerH][0] = i;
LocationHealthy[IndexerH][1] = j;
IndexerH = IndexerH + 1;
}
if(cells[i+Nx*j+Nx*Ny*0] == 'e'){
LocationEclipse[IndexerE][0] = i;
LocationEclipse[IndexerE][1] = j;
IndexerE = IndexerE + 1;
}
if(cells[i+Nx*j+Nx*Ny*0] == 'i'){
LocationInfected[IndexerI][0] = i;
LocationInfected[IndexerI][1] = j;
IndexerI = IndexerI + 1;
}
if(cells[i+Nx*j+Nx*Ny*0] != 'o'){
LocationVirus[IndexerO][0] = i;
LocationVirus[IndexerO][1] = j;
IndexerO = IndexerO + 1;
}
}
}
//The Healthy Cells' time
if(NumberHealthy != 0){
int Row;
int Column;
for(int j=0; j<NumberHealthy; j++){
Row = LocationHealthy[j][0];
Column = LocationHealthy[j][1];
// Row is the row location of for a cell
// Column is the column location for a cell
th[Row+Nx*Column] = th[Row+Nx*Column] + timestep;
// "th" is the time matrix for healthy cells
// "ts" is the time step for the model
}
}
//Eclipse phase -> Infection
if(NumberEclipse != 0){
int Row;
int Column;
for(int j=0; j<NumberEclipse; j++){
Row = LocationEclipse[j][0];
Column = LocationEclipse[j][1];
// Row is the row location of for a cell
// Column is the column location for a cell
if((ecl[Row+Nx*Column] + th[Row+Nx*Column]) < ut[Row+Nx*Column]){
cells[Row+Nx*Column+Nx*Ny*1] = 'i';
inf[Row+Nx*Column] = inf[Row+Nx*Column] + Ti(TauI, ni);
// "ecl" is the time matrix for after eclipse phase
// "th" is the time matrix for healthy cells
// "ut" is the univeral time matrix
// "cells" is the matrix of cells
// "inf" is the time matrix for after infection phase
}
}
}
//Infection spreads
if(cell2cell == 1){
if(NumberInfected != 0){
int Row;
int Column;
for(int j=0; j<NumberInfected; j++){
Row = LocationInfected[j][0];
Column = LocationInfected[j][1];
// #Row is the row location of for a cell
// #Column is the column location for a cell
int AboveRowExists = 1;
int LeftColumnExists = 1;
int BelowRowExists = 1;
int RightColumnExists = 1;
int AboveRow = Row-1; //row coordinate above cell
int LeftColumn = Column-1; //column coordinate left of cell
int BelowRow = Row+1; //row coordinate below cell
int RightColumn = Column+1; //column coordinate right of cell
// if the cell one row up doesn't exist, it's taken out of the equation
if(AboveRow < 0){
AboveRowExists = 0;
AboveRow = 0;
}
// if the cell one column to the left doesn't exist, it's taken out of the equation
if(LeftColumn < 0){
LeftColumnExists = 0;
LeftColumn = 0;
}
// if the cell one row down doesn't exist, it's taken out of the equation
if(BelowRow > Ny-1){
BelowRowExists = 0;
BelowRow = 0;
}
// if the cell one column to the right doesn't exist, it's taken out of the equation
if(RightColumn > Nx-1){
RightColumnExists = 0;
RightColumn = 0;
}
if(PU1()<probi*timestep){
if((LeftColumnExists == 1) && (cells[Row+Nx*LeftColumn+Nx*Ny*0] != 'o')){
if(cells[Row+Nx*LeftColumn+Nx*Ny*0] == 'h'){
cells[Row+Nx*LeftColumn+Nx*Ny*1] = 'e';
ecl[Row+Nx*LeftColumn] = Te(TauE,ne);
}
}
if((RightColumnExists == 1) && (cells[Row+Nx*RightColumn+Nx*Ny*0] != 'o')){
if(cells[Row+Nx*RightColumn+Nx*Ny*0] == 'h'){
cells[Row+Nx*RightColumn+Nx*Ny*1] = 'e';
ecl[Row+Nx*RightColumn] = Te(TauE,ne);
}
}
if((AboveRowExists == 1) && (cells[AboveRow+Nx*Column+Nx*Ny*0] != 'o')){
if(cells[AboveRow+Nx*Column+Nx*Ny*0] == 'h'){
cells[AboveRow+Nx*Column+Nx*Ny*1] = 'e';
ecl[AboveRow+Nx*Column] = Te(TauE,ne);
}
}
if((BelowRowExists == 1) && (cells[BelowRow+Nx*Column+Nx*Ny*0] != 'o')){
if(cells[BelowRow+Nx*Column+Nx*Ny*0] == 'h'){
cells[BelowRow+Nx*Column+Nx*Ny*1] = 'e';
ecl[BelowRow+Nx*Column] = Te(TauE,ne);
}
}
if((AboveRowExists == 1) && (RightColumnExists == 1) && (cells[AboveRow+Nx*RightColumn+Nx*Ny*0] != 'o')){
if(cells[AboveRow+Nx*RightColumn+Nx*Ny*0] == 'h'){
cells[AboveRow+Nx*RightColumn+Nx*Ny*1] = 'e';
ecl[AboveRow+Nx*RightColumn] = Te(TauE,ne);
}
}
if((BelowRowExists == 1) && (LeftColumnExists == 1) && (cells[BelowRow+Nx*LeftColumn+Nx*Ny*0] != 'o')){
if(cells[BelowRow+Nx*LeftColumn+Nx*Ny*0] == 'h'){
cells[BelowRow+Nx*LeftColumn+Nx*Ny*1] = 'e';
ecl[BelowRow+Nx*LeftColumn] = Te(TauE,ne);
}
}
}
}
}
}
//Virus Spreads
int Row;
int Column;
for(int j=0; j<NumberVirus; j++){
Row = LocationVirus[j][0];
Column = LocationVirus[j][1];
// Row is the row location of for a cell
// Column is the column location for a cell
int AboveRow = Row-1; //row coordinate above cell
int LeftColumn = Column-1; //column coordinate left of cell
int BelowRow = Row+1; //row coordinate below cell
int RightColumn = Column+1; //column coordinate right of cell
float rho2;
if(cells[Row+Nx*Column+Nx*Ny*0] == 'i'){
rho2 = rho;
}
else{
rho2 = 0;
}
// where rho2 is a placeholder variable
// if the cell one row up doesn't exist, it's taken out of the equation
if(AboveRow < 0){
AboveRow = Row;
}
// if the cell one column to the left doesn't exist, it's taken out of the equation
if(LeftColumn < 0){
LeftColumn = Column;
}
// if the cell one row down doesn't exist, it's taken out of the equation
if(BelowRow > (Ny-1)){
BelowRow = Row;
}
// if the cell one column to the right doesn't exist, it's taken out of the equation
if(RightColumn > (Nx-1)){
RightColumn = Column;
}
if(cells[AboveRow+Nx*Column+Nx*Ny*0] == 'o'){
AboveRow = Row;
}
if(cells[AboveRow+Nx*RightColumn+Nx*Ny*0] == 'o'){
AboveRow = Row;
RightColumn = Column;
}
if(cells[Row+Nx*RightColumn+Nx*Ny*0] == 'o'){
RightColumn = Column;
}
if(cells[BelowRow+Nx*Column+Nx*Ny*0] == 'o'){
BelowRow = Row;
}
if(cells[Row+Nx*LeftColumn+Nx*Ny*0] == 'o'){
LeftColumn = Column;
}
if(cells[BelowRow+Nx*LeftColumn+Nx*Ny*0] == 'o'){
BelowRow = Row;
LeftColumn = Column;
}
float NNN = (vtemp[AboveRow+Nx*Column+Nx*Ny*0] + vtemp[AboveRow+Nx*RightColumn+Nx*Ny*0] + vtemp[Row+Nx*RightColumn+Nx*Ny*0] + vtemp[BelowRow+Nx*Column+Nx*Ny*0] + vtemp[Row+Nx*LeftColumn+Nx*Ny*0] + vtemp[BelowRow+Nx*LeftColumn+Nx*Ny*0]);
float VirusProduced = rho2*timestep;
float VirusDecay = c*vtemp[Row+Nx*Column+Nx*Ny*0]*timestep;
float VirusOut = 4.0*Dtsx2*vtemp[Row+Nx*Column+Nx*Ny*0];
float VirusIn = 2.0*Dtsx2*NNN/3.0;
vtemp[Row+Nx*Column+Nx*Ny*1] = vtemp[Row+Nx*Column+Nx*Ny*0] + VirusProduced - VirusOut + VirusIn - VirusDecay;
if(vtemp[Row+Nx*Column+Nx*Ny*1] < pow(10.0,-10.0)){
vtemp[Row+Nx*Column+Nx*Ny*1] = 0.0;
}
//probability of infect adaptive time step
if(freecell == 1){
float probaility = PU1();
float adaptedtimestep = timestep; //variable time step
float adaptedtimestepcount = 1.0;
float pinfect = vtemp[Row+Nx*Column+Nx*Ny*1]*beta*adaptedtimestep;
while(pinfect > 1.0){
adaptedtimestep = adaptedtimestep/2.0;
pinfect = vtemp[Row+Nx*Column+Nx*Ny*1]*beta*adaptedtimestep;
adaptedtimestepcount = adaptedtimestepcount*2.0;
}
if(pinfect <= 1.0){
if(adaptedtimestepcount != 1.0){
pinfect = vtemp[Row+Nx*Column+Nx*Ny*1]*beta*adaptedtimestep;
}
while(adaptedtimestepcount != 1.0){
if(probaility < pinfect){
if(cells[Row+Nx*Column+Nx*Ny*0] == 'h'){
cells[Row+Nx*Column+Nx*Ny*1] = 'e';
ecl[Row+Nx*Column] = Te(TauE,ne);
}
}
adaptedtimestepcount = adaptedtimestepcount/2.0;
adaptedtimestep = adaptedtimestep*2.0;
pinfect = vtemp[Row+Nx*Column+Nx*Ny*1]*beta*adaptedtimestep;
}
if(adaptedtimestepcount == 1.0){
vtemp[Row+Nx*Column+Nx*Ny*1] = vtemp[Row+Nx*Column+Nx*Ny*0] + VirusProduced - VirusOut + VirusIn - VirusDecay;
if(probaility < pinfect){
if(cells[Row+Nx*Column+Nx*Ny*0] == 'h'){
cells[Row+Nx*Column+Nx*Ny*1] = 'e';
ecl[Row+Nx*Column] = Te(TauE,ne);
}
}
}
}
}
}
//kills cells
if(NumberInfected != 0){
int Row;
int Column;
for(int j=0; j<NumberInfected; j++){
Row = LocationInfected[j][0];
Column = LocationInfected[j][1];
// Row is the row location of for a cell
// Column is the column location for a cell
if(ut[Row+Nx*Column] > (inf[Row+Nx*Column] + ecl[Row+Nx*Column] + th[Row+Nx*Column])){
cells[Row+Nx*Column+Nx*Ny*1] = 'd';
if(CODETESTINGCONDITIONS == 1){
cells[Row+Nx*Column+Nx*Ny*1] = 'i';
}
// "ut" is the univeral time matrix
// "inf" is the time matrix for after infection phase
// "ecl" is the time matrix for after eclipse phase
// "th" is the time matrix for healthy cells
// "cells" is the matrix of cells
}
}
}
for (int i = 0; i < NumberHealthy; i++){
free(LocationHealthy[i]);
}
free(LocationHealthy);
for (int i = 0; i < NumberEclipse; i++){
free(LocationEclipse[i]);
}
free(LocationEclipse);
for (int i = 0; i < NumberInfected; i++){
free(LocationInfected[i]);
}
free(LocationInfected);
for (int i = 0; i < NumberVirus; i++){
free(LocationVirus[i]);
}
free(LocationVirus);
for(int j=0; j<Ny; j++){
for(int i=0; i<Nx; i++){
vtemp[i+Nx*j+Nx*Ny*0] = vtemp[i+Nx*j+Nx*Ny*1];
cells[i+Nx*j+Nx*Ny*0] = cells[i+Nx*j+Nx*Ny*1];
}
}
//The Universal Time for the cells is kept here (ut)
for(int j=0; j<Ny; j++){
for(int i=0; i<Nx; i++){
ut[i+Nx*j] = ut[i+Nx*j] + timestep;
}
}
}
void printToFileCellAndVirus(int Nx, int Ny, int NumberOfLayers){
char File5[100] = "";
strcat(File5,Path_to_Folder);
strcat(File5,"/cells_over_time.txt");
FILE *outfile5 = fopen(File5,"a");
if (outfile5 == NULL){
printf("Error opening file!\n");
exit(0);
}
for(int i=0; i<((2*NumberOfLayers)-1); i++){
for(int j=0; j<((2*NumberOfLayers)-1); j++){
fprintf(outfile5,"%c,",cells[i+Nx*j+Nx*Ny*0]);
}
fprintf(outfile5,"\n");
}
fclose(outfile5);
char File6[100] = "";
strcat(File6,Path_to_Folder);
strcat(File6,"/virus_over_time.txt");
FILE *outfile6 = fopen(File6,"a");
if (outfile6 == NULL){
printf("Error opening file!\n");
exit(0);
}
for(int i=0; i<((2*NumberOfLayers)-1); i++){
for(int j=0; j<((2*NumberOfLayers)-1); j++){
fprintf(outfile6,"%f,",vtemp[i+Nx*j+Nx*Ny*1]);
}
fprintf(outfile6,"\n");
}
fclose(outfile6);
}
void printToFileCellAndVirusAnalysis(float timestep){
char File8[100] = "";
strcat(File8,Path_to_Folder);
strcat(File8,"/PerTimeStep.txt");
FILE *outfile8 = fopen(File8,"a");
if (outfile8 == NULL){
printf("Error opening file!\n");
exit(0);
}
fprintf(outfile8,"%0.0f, %d, %d, %d, %d, %f,", timestep+1, NumberHealthy1, NumberEclipse1, NumberInfected1, NumberDead1, AmountOfVirus);
fprintf(outfile8,"\n");
fclose(outfile8);
}
void createParameterFile(float timestep, int NumberofSavedTimeSteps, float endtime, float timestepcount, float AmountOfVirus, float rho, float D, float deltxprime, float c, float probi){
char File7[100] = "";
strcat(File7,Path_to_Folder);
strcat(File7,"/Parameters.txt");
FILE *outfile7 = fopen(File7,"a");
if (outfile7 == NULL){
printf("Error opening file!\n");
exit(0);
}
fprintf(outfile7, "Time Step = %f\n", timestep);
fprintf(outfile7, "Number of Saved Time Steps = %d\n", NumberofSavedTimeSteps);
fprintf(outfile7, "Initial End Time = %f\n", endtime);
fprintf(outfile7, "Actual Hours Simulated = %f\n", timestepcount*timestep);
fprintf(outfile7, "Final Amount of Virus = %f\n", AmountOfVirus);
fprintf(outfile7, "rho = %f\n", rho);
fprintf(outfile7, "D = %f\n", D);
fprintf(outfile7, "delta x = %f\n", deltxprime);
fprintf(outfile7, "c = %f\n", c);
fprintf(outfile7, "Probability of cell to cell infection: %f\n", probi);
fclose(outfile7);
}
void freeMemory(){
for(int i=0; i<((2*NumberOfLayers)-1); i++){
free(LocationData[i]);
}
free(LocationData);
free(cells);
free(ecl);
free(inf);
free(vtemp);
free(th);
free(ut);
free(EclipsePhaseLength);
free(InfectionPhaseLength);
if(RUNCPU == 0){
hipFree(cells_GPU);
hipFree(ecl_GPU);
hipFree(inf_GPU);
hipFree(vtemp_GPU);
hipFree(th_GPU);
hipFree(ut_GPU);
hipFree(EclipsePhaseLength_GPU);
hipFree(InfectionPhaseLength_GPU);
hipFree(state);
}
}
void errorCheck(const char *message){
hipError_t error;
error = hipGetLastError();
if(error != hipSuccess)
{
printf("\n CUDA ERROR: %s = %s\n", message, hipGetErrorString(error));
exit(0);
}
}
struct systemConstantsStruct
{
float MOI;
float beta;
float rho;
float D;
float c;
float deltx;
float deltxprime;
float Dtsx2;
float TauI;
float TauE;
float ne;
float ni;
float probi;
float timestep;
};
systemConstantsStruct SystemConstants;
void loadConstants(float MOI, float probi){
SystemConstants.MOI = MOI;
SystemConstants.beta = beta;
SystemConstants.rho = rho;
SystemConstants.D = D;
SystemConstants.c = c;
SystemConstants.deltx = deltx;
SystemConstants.deltxprime = deltxprime;
SystemConstants.Dtsx2 = Dtsx2;
SystemConstants.TauI = TauI;
SystemConstants.TauE = TauE;
SystemConstants.ne = ne;
SystemConstants.ni = ni;
SystemConstants.probi = probi;
SystemConstants.timestep = timestep;
}
void deviceSetupAndMemoryAllocation(int Nx, int Ny){
BlockConfig.x = 16;
BlockConfig.y = 16;
BlockConfig.z = 1;
GridConfig.x = (Nx-1)/BlockConfig.x + 1;
GridConfig.y = (Ny-1)/BlockConfig.y + 1;
GridConfig.z = 1;
hipMalloc((void**)&cells_GPU, Nx*Ny*2*sizeof(char));
errorCheck("hipMalloc cells Mem");
hipMalloc((void**)&vtemp_GPU, Nx*Ny*2*sizeof(float));
errorCheck("hipMalloc vtemp Mem");
hipMalloc((void**)&ut_GPU, Nx*Ny*sizeof(float));
errorCheck("hipMalloc ut Mem");
hipMalloc((void**)&ecl_GPU, Nx*Ny*sizeof(float));
errorCheck("hipMalloc ecl Mem");
hipMalloc((void**)&inf_GPU, Nx*Ny*sizeof(float));
errorCheck("hipMalloc inf Mem");
hipMalloc((void**)&th_GPU, Nx*Ny*sizeof(float));
errorCheck("hipMalloc th Mem");
hipMalloc((void**)&EclipsePhaseLength_GPU, Nx*Ny*sizeof(float));
errorCheck("hipMalloc EclipsePhaseLength Mem");
hipMalloc((void**)&InfectionPhaseLength_GPU, Nx*Ny*sizeof(float));
errorCheck("hipMalloc InfectionPhaseLength Mem");
}
__global__ void cuRand_Setup(hiprandState_t *state){
int Row = threadIdx.x + blockIdx.x * blockDim.x;
int Column = threadIdx.y + blockIdx.y * blockDim.y;
int offsetx = blockDim.x * gridDim.x;
int id = Row+offsetx*Column;
hiprand_init (clock64(), id, 0, state);
}
__device__ float PU_GPU(hiprandState_t *state){
// Picks a random number from a uniform distribution
float Random = hiprand_uniform(state);
return Random;
}
__global__ void kernel(char *cells, float *vtemp, float *ut, float *ecl, float *inf, float *th, float *epl, float *ipl, systemConstantsStruct constant, int cell2cell, int freecell, hiprandState_t *state, int NumberOfLayers, float probi){
int Row = threadIdx.x + blockIdx.x * blockDim.x;
int Column = threadIdx.y + blockIdx.y * blockDim.y;
int NX = (2*NumberOfLayers-1);
int NY = (2*NumberOfLayers-1);
int NXNY = NX*NY;
if((cells[Row+NX*Column+NXNY*0] != 'o') && (Row+NX*Column+NXNY < 2*NXNY)){
//Virus Spreads
int AboveRow = Row-1; //row coordinate above cell
int LeftColumn = Column-1; //column coordinate left of cell
int BelowRow = Row+1; //row coordinate below cell
int RightColumn = Column+1; //column coordinate right of cell
float rho2;
if(cells[Row+NX*Column+NXNY*0] == 'i'){
rho2 = constant.rho;
}
else{
rho2 = 0;
}
// where rho2 is a placeholder variable
// if the cell one row up doesn't exist, it's taken out of the equation
if(AboveRow < 0){
AboveRow = Row;
}
// if the cell one column to the left doesn't exist, it's taken out of the equation
if(LeftColumn < 0){
LeftColumn = Column;
}
// if the cell one row down doesn't exist, it's taken out of the equation
if(BelowRow > (NY-1)){
BelowRow = Row;
}
// if the cell one column to the right doesn't exist, it's taken out of the equation
if(RightColumn > (NX-1)){
RightColumn = Column;
}
if(cells[AboveRow+NX*Column+NXNY*0] == 'o'){
AboveRow = Row;
}
if(cells[AboveRow+NX*RightColumn+NXNY*0] == 'o'){
AboveRow = Row;
RightColumn = Column;
}
if(cells[Row+NX*RightColumn+NXNY*0] == 'o'){
RightColumn = Column;
}
if(cells[BelowRow+NX*Column+NXNY*0] == 'o'){
BelowRow = Row;
}
if(cells[Row+NX*LeftColumn+NXNY*0] == 'o'){
LeftColumn = Column;
}
if(cells[BelowRow+NX*LeftColumn+NXNY*0] == 'o'){
BelowRow = Row;
LeftColumn = Column;
}
float NNN = (vtemp[AboveRow+NX*Column+NXNY*0] + vtemp[AboveRow+NX*RightColumn+NXNY*0] + vtemp[Row+NX*RightColumn+NXNY*0] + vtemp[BelowRow+NX*Column+NXNY*0] + vtemp[Row+NX*LeftColumn+NXNY*0] + vtemp[BelowRow+NX*LeftColumn+NXNY*0]);
float VirusProduced = rho2*constant.timestep;
float VirusDecay = constant.c*vtemp[Row+NX*Column+NXNY*0]*constant.timestep;
float VirusOut = 4.0*constant.Dtsx2*vtemp[Row+NX*Column+NXNY*0];
float VirusIn = 2.0*constant.Dtsx2*NNN/3.0;
__syncthreads();
vtemp[Row+NX*Column+NXNY*1] = vtemp[Row+NX*Column+NXNY*0] + VirusProduced - VirusOut + VirusIn - VirusDecay;
if(vtemp[Row+NX*Column+NXNY*1] < pow(10.0,-10.0)){
vtemp[Row+NX*Column+NXNY*1] = 0.0;
}
//The Cell behavior
if(cells[Row+NX*Column+NXNY*0] == 'i'){
// Infectied
if(ut[Row+NX*Column] > (inf[Row+NX*Column] + ecl[Row+NX*Column] + th[Row+NX*Column])){
cells[Row+NX*Column+NXNY*1] = 'd';
if(CODETESTINGCONDITIONS == 1){
cells[Row+NX*Column+NXNY*1] = 'i';
}
}
}
else if(cells[Row+NX*Column+NXNY*0] == 'e'){
// Eclipse
if(ut[Row+NX*Column] > (ecl[Row+NX*Column] + th[Row+NX*Column])){
cells[Row+NX*Column+NXNY*1] = 'i';
inf[Row+NX*Column] = inf[Row+NX*Column] + ipl[Row+NX*Column];
}
}
else if(cells[Row+NX*Column+NXNY*0] == 'h'){
// Healthy
th[Row+NX*Column] = th[Row+NX*Column] + constant.timestep;
if(cell2cell == 1){
// Cell to cell transmission
int AboveRow = Row-1; //row coordinate above cell
int LeftColumn = Column-1; //column coordinate left of cell
int BelowRow = Row+1; //row coordinate below cell
int RightColumn = Column+1; //column coordinate right of cell
// if the cell one row up doesn't exist, it's taken out of the equation
if(AboveRow < 0){
AboveRow = 0;
}
// if the cell one column to the left doesn't exist, it's taken out of the equation
if(LeftColumn < 0){
LeftColumn = 0;
}
// if the cell one row down doesn't exist, it's taken out of the equation
if(BelowRow > NY-1){
BelowRow = 0;
}
// if the cell one column to the right doesn't exist, it's taken out of the equation
if(RightColumn > NX-1){
RightColumn = 0;
}
if(PU_GPU(state) < constant.probi*constant.timestep){
if(cells[Row+NX*LeftColumn+NXNY*0] == 'i'){
cells[Row+NX*Column+NXNY*1] = 'e';
}
if(cells[Row+NX*RightColumn+NXNY*0] == 'i'){
cells[Row+NX*Column+NXNY*1] = 'e';
}
if(cells[AboveRow+NX*Column+NXNY*0] == 'i'){
cells[Row+NX*Column+NXNY*1] = 'e';
}
if(cells[BelowRow+NX*Column+NXNY*0] == 'i'){
cells[Row+NX*Column+NXNY*1] = 'e';
}
if(cells[AboveRow+NX*RightColumn+NXNY*0] == 'i'){
cells[Row+NX*Column+NXNY*1] = 'e';
}
if(cells[BelowRow+NX*LeftColumn+NXNY*0] == 'i'){
cells[Row+NX*Column+NXNY*1] = 'e';
}
ecl[Row+NX*Column] = epl[Row+NX*Column];
}
}
if(freecell == 1){
// Cell free transmission
float probablity = PU_GPU(state);
float adaptedtimestep = constant.timestep; //variable time step
float adaptedtimestepcount = 1.0;
float pinfect = vtemp[Row+NX*Column+NXNY*1]*constant.beta*adaptedtimestep;
while(pinfect > 1.0){
adaptedtimestep = adaptedtimestep/2.0;
pinfect = vtemp[Row+NX*Column+NXNY*1]*constant.beta*adaptedtimestep;
adaptedtimestepcount = adaptedtimestepcount*2.0;
}
if(pinfect <= 1.0){
if(adaptedtimestepcount != 1.0){
pinfect = vtemp[Row+NX*Column+NXNY*1]*constant.beta*adaptedtimestep;
}
while(adaptedtimestepcount != 1.0){
if(probablity < pinfect){
cells[Row+NX*Column+NXNY*1] = 'e';
ecl[Row+NX*Column] = epl[Row+NX*Column];
}
adaptedtimestepcount = adaptedtimestepcount/2.0;
adaptedtimestep = adaptedtimestep*2.0;
pinfect = vtemp[Row+NX*Column+NXNY*1]*constant.beta*adaptedtimestep;
}
if(adaptedtimestepcount == 1.0){
vtemp[Row+NX*Column+NXNY*1] = vtemp[Row+NX*Column+NXNY*0] + VirusProduced - VirusOut + VirusIn - VirusDecay;
if(probablity < pinfect){
cells[Row+NX*Column+NXNY*1] = 'e';
ecl[Row+NX*Column] = epl[Row+NX*Column];
}
}
}
}
}
//The Universal Time for the cells is kept here (ut)
ut[Row+NX*Column] = ut[Row+NX*Column] + constant.timestep;
vtemp[Row+NX*Column+NXNY*0] = vtemp[Row+NX*Column+NXNY*1];
cells[Row+NX*Column+NXNY*0] = cells[Row+NX*Column+NXNY*1];
}
}
int main(void){
//Checks for Heisenberg status of viral diffusion
if(D*timestep/pow(deltxprime,2.0) > 0.5){
printf("%.1f",D*timestep/pow(deltxprime,2.0));
printf("CHANGE PARAMETERS TO FIT DIFFUSION LIMITS. VALUE MUST BE UNDER 0.5. VALUE SHOWN ABOVE");
exit(0);
}
//Clear Terminal
system("clear");
float MOI[10] = {5*powf(10,-1), powf(10,-1), 5*powf(10,-2), powf(10,-2), 5*powf(10,-3), powf(10,-3), 5*powf(10,-4), powf(10,-4), 5*powf(10,-5), powf(10,-5)};
//float MOI[6] = {powf(10,0), powf(10,-1), powf(10,-2), powf(10,-3), powf(10,-4), powf(10,-5)};
//float MOI[5] = {powf(10,-1), powf(10,-2), powf(10,-3), powf(10,-4), powf(10,-5)};
//float MOI[3] = {powf(10,-3), powf(10,-4), powf(10,-5)};
//float MOI[1] = {powf(10,0)};
float probi[1] = {0.2};
for(int q=0;q<(sizeof(MOI)/sizeof(MOI[0]));q++){
for(int k=0;k<(sizeof(probi)/sizeof(probi[0]));k++){
//Loop For The number Of Simulations To Run Per Setting
for(int BigIndex=0;BigIndex<NumberOfRuns;BigIndex++){
// auto start = chrono::high_resolution_clock::now();
// printf("\nStarting run %d\n", (BigIndex+1));
//Creating Save Path
creatingPathToFolderAndDirectory(StartRuns+BigIndex, NumberOfLayers, MOI[q], probi[k]);
//Creating placeholder variables for multipy runs
int cell2cell = CELL2CELL;
int freecell = FREECELL;
//Building Cells
creatingCellLocations();
//Number of Cells
//Number of initial infected cells
int Ni = NumberOfCells*MOI[q]; if(Ni < 1){ printf("Use larger MOI"); exit(0);}
int Nx = (2*NumberOfLayers-1); //Range of cells on x axis
int Ny = (2*NumberOfLayers-1); //Range of cells on y axis
//Makeing empty matrices
allocateMemory(Nx, Ny);
//Initializing
initailConditions(Nx, Ny);
//Deletes files and initial with values
if(BigIndex == 0){
printToFileCellAndVirusInitial(Nx, Ny, NumberOfLayers);
}
printToFileCellAndVirusAnalysisInitial(Nx, Ny);
//Infects a random cell, now seen as (e)
infectANumberOfCellsRandomly(Nx, Ny, Ni);
if(RUNCPU == 0){
hipMalloc((void**)&state, Nx*Ny*sizeof(int));
errorCheck("hipMalloc Random Setup");
hipLaunchKernelGGL(( cuRand_Setup), dim3(GridConfig),dim3(BlockConfig), 0, 0, state);
errorCheck("Random Setup");
loadConstants(MOI[q], probi[k]);
deviceSetupAndMemoryAllocation(Nx, Ny);
hipMemcpy( cells_GPU, cells, Nx*Ny*2*sizeof(char), hipMemcpyHostToDevice );
errorCheck("hipMemcpy cells HtoD");
hipMemcpy( vtemp_GPU, vtemp, Nx*Ny*2*sizeof(float), hipMemcpyHostToDevice );
errorCheck("hipMemcpy vtemp HtoD");
hipMemcpy( ut_GPU, ut, Nx*Ny*sizeof(float), hipMemcpyHostToDevice );
errorCheck("hipMemcpy ut HtoD");
hipMemcpy( ecl_GPU, ecl, Nx*Ny*sizeof(float), hipMemcpyHostToDevice );
errorCheck("hipMemcpy ecl HtoD");
hipMemcpy( inf_GPU, inf, Nx*Ny*sizeof(float), hipMemcpyHostToDevice );
errorCheck("hipMemcpy inf HtoD");
hipMemcpy( th_GPU, th, Nx*Ny*sizeof(float), hipMemcpyHostToDevice );
errorCheck("hipMemcpy th HtoD");
hipMemcpy( EclipsePhaseLength_GPU, EclipsePhaseLength, Nx*Ny*sizeof(float), hipMemcpyHostToDevice );
errorCheck("hipMemcpy EclipsePhaseLength HtoD");
hipMemcpy( InfectionPhaseLength_GPU, InfectionPhaseLength, Nx*Ny*sizeof(float), hipMemcpyHostToDevice );
errorCheck("hipMemcpy InfectionPhaseLength HtoD");
}
//Runs simulation
int NumberofTimeSteps = endtime/timestep;
int NumberofSavedTimeSteps = NumberofTimeSteps/Save;
int timestepcount = 0; //equal to the number of ts elapsed
while(timestepcount < (NumberofTimeSteps-1)){
if(RUNCPU == 0){
hipLaunchKernelGGL(( kernel), dim3(GridConfig),dim3(BlockConfig), 0, 0, cells_GPU, vtemp_GPU, ut_GPU, ecl_GPU, inf_GPU, th_GPU, EclipsePhaseLength_GPU, InfectionPhaseLength_GPU, SystemConstants, cell2cell, freecell, state, NumberOfLayers, probi[k]);
}
else{
//Cerial Viral Transmission
cerialViralTransmission(Nx, Ny, cell2cell, freecell, probi[k]);
// modifiedCerialViralTransmission(Nx, Ny, cell2cell, freecell, probi[k]);
}
if((timestepcount%Save) == 0){
if(RUNCPU == 0){
hipMemcpy( cells, cells_GPU, Nx*Ny*2*sizeof(char), hipMemcpyDeviceToHost );
errorCheck("hipMemcpy cells DtoH");
hipMemcpy( vtemp, vtemp_GPU, Nx*Ny*2*sizeof(float), hipMemcpyDeviceToHost );
errorCheck("hipMemcpy vtemp DtoH");
}
//Analysisa dish
NumberDead1 = 0;
NumberInfected1 = 0;
NumberEclipse1 = 0;
NumberHealthy1 = 0;
AmountOfVirus = 0.0;
for(int j=0; j<Ny; j++){
for(int i=0; i<Nx; i++){
AmountOfVirus = AmountOfVirus + vtemp[i+Nx*j+Nx*Ny*0];
if(cells[i+Nx*j+Nx*Ny*0] == 'd'){
NumberDead1 = NumberDead1 + 1;
}
else if(cells[i+Nx*j+Nx*Ny*0] == 'i'){
NumberInfected1 = NumberInfected1 + 1;
}
else if(cells[i+Nx*j+Nx*Ny*0] == 'e'){
NumberEclipse1 = NumberEclipse1 +1;
}
else if(cells[i+Nx*j+Nx*Ny*0] == 'h'){
NumberHealthy1 = NumberHealthy1 + 1;
}
}
}
//Prints status of cells virus
if(BigIndex == 0){
printToFileCellAndVirus(Nx, Ny, NumberOfLayers);
}
printToFileCellAndVirusAnalysis(timestepcount*timestep);
}
//Number of days completed
// if((timestepcount%(24*int(1/timestep))) == 0){
// printf("%.0f Day\n",(timestepcount*timestep)/24);
// }
// if((NumberHealthy1 == 0)){
// cell2cell = 0;
// freecell = 0;
// }
// else{
// cell2cell = CELL2CELL;
// freecell = FREECELL;
// }
// //End Code if Virus has below 10
// if((AmountOfVirus < pow(10,1.0)) && (NumberDead1 == NumberOfCells)){
// break;
// }
if((NumberInfected1 == 0) && (NumberEclipse1 == 0)){
cell2cell = 0;
freecell = 0;
}
else{
cell2cell = CELL2CELL;
freecell = FREECELL;
}
//End Code if Virus has below 10
if((AmountOfVirus < pow(10,1.0)) && (NumberInfected1 == 0) && (NumberEclipse1 == 0)){
break;
}
timestepcount = timestepcount+1;
}
//Writes a file with all of our parameters/variables
createParameterFile(timestep, NumberofSavedTimeSteps, endtime, timestepcount, AmountOfVirus, rho, D, deltxprime, c, probi[k]);
printf("\nMOI(%.1f) probi(%.1f): %d of %d Runs Done\n", log10(MOI[q]), probi[k], (BigIndex+1), NumberOfRuns);
freeMemory();
// auto finish = std::chrono::high_resolution_clock::now();
// chrono::duration<double> elapsed = finish - start;
// cout << "Elapsed time: " << elapsed.count() << " s";
}
}
}
printf("\nPROGRAM DONE\n");
}
| 849cb0ece0c63ce08fd10299318df65883903332.cu | //##############################################################################
//# #
//# Virus Model #
//# #
//##############################################################################
// nvcc SARS-CoV-2.cu -o program.out && ./program.out
/*
Using Camal case:
C Functions start with lower case
Variables start with upper case
*/
#include <stdio.h>
#include <stdlib.h>
#include "cuda.h"
#include <curand_kernel.h>
#include <time.h>
#include <ctime>
#include <math.h>
#include <random>
#include <iostream>
using namespace std;
#include <chrono>
#define PI 3.1415926535897932f
#define CODETESTINGCONDITIONS 0
#define RUNCPU 0
//Globals to setup the kernals
dim3 BlockConfig, GridConfig;
//Simulation Parametersf
int CELL2CELL = 0;
int FREECELL = 1;
float timestep = 0.005; //Time step for model (No larger than 0.01 hour) 0.005 hr = 18 sec, (1/3600) hr = 1 sec
float endtime = (2*365)*24; //Days in hours
int Save = (1/timestep); //the number of time the program saves to file, (1/timestep) results in 1 save every simulated hour
int NumberOfLayers = 607; //607 is a million hexigon in a circle
int StartRuns = 0;
int NumberOfRuns = 100;
//Physical Parameters
//float MOI = pow(10,0); //pow(10,-5) to 1
float beta = 84.0; //2.3*pow(10,-7); //Infiction rate, units: per hour
float rho = 19919.2; //1920
float D = 4.8*pow(10,-12); //Diffusion rate at 37 degrees celsius unit: m^2/s //pow(6*10,-12) //3.96e-8
float c = 0.0049; //Clearance rate, units: per hour
float deltx = 25.0*pow(10,-6);
float deltxprime = deltx*2;
float Dtsx2 = D*timestep*pow(deltxprime,-2);
//Probability Constants
float TauI = 0.624; //Avg time for infection
float TauE = 5.88; //Avg time for eclipse
float ne = 30.0; //Number of eclipse compartments?
float ni = 100.0; //Number of infected compartments?
//float probi = 0.2; //Probability per unit time of cell to cell infection (/hour)
//Global Variables
char Path_to_Folder[100] = "";
char Directroy[100] = "";
char** LocationData;
char* cells;
char* cells_GPU;
float* ecl;
float* ecl_GPU;
float* inf;
float* inf_GPU;
float* vtemp;
float* vtemp_GPU;
float* th;
float* th_GPU;
float* ut;
float* ut_GPU;
float* EclipsePhaseLength;
float* EclipsePhaseLength_GPU;
float* InfectionPhaseLength;
float* InfectionPhaseLength_GPU;
int NumberOfCells;
int NumberDead;
int NumberDead1;
int NumberInfected1;
int NumberEclipse1;
int NumberHealthy1;
float AmountOfVirus;
curandState *state;
//Functions
float Te(float TauE, float ne){
// Picks a random number from the gamma distribution
// The number is to be used as a time step in the Eclipse Time Matrix
random_device rd;
default_random_engine generator(rd());
gamma_distribution<double> distribution(TauE, TauE/sqrt(ne));
return distribution(generator);
}
float Ti(float TauI, float ni){
// Picks a random number from the gamma distribution
// The number is to be used as a time step in the Infected Time Matrix
random_device rd;
default_random_engine generator(rd());
gamma_distribution<double> distribution(TauI, TauI/sqrt(ni));
return distribution(generator);
}
float PU1(){
// Picks a random number from a uniform distribution
// This probability
random_device rd;
default_random_engine generator(rd());
uniform_real_distribution<double> distribution(0.0,1.0);
return distribution(generator);
}
void creatingPathToFolderAndDirectory(int BigIndex, int NumberOfLayers, float MOI, float probi){
char TransmissionType[10] = "";
if (CELL2CELL == 1){
if (FREECELL == 1){
strcat(TransmissionType,"Both");
}
else {
strcat(TransmissionType,"CELL2CELL");
}
}
else if(CELL2CELL == 0){
if (FREECELL == 0){
strcat(TransmissionType,"Neither");
}
else{
strcat(TransmissionType,"FREECELL");
}
}
char Buffer[5]; //Buffer String For Conversion To Char
char TheCurrentTime[50];
time_t RawTime = time(NULL);
tm* SpecificMoment = localtime(&RawTime);
strcpy(Path_to_Folder, "");
strcpy(Directroy, "");
if(RUNCPU == 1){
strcat(Path_to_Folder,"ViralModel/");
}
else{
strcat(Path_to_Folder,"/media/baylorfain/HDD/SARS-CoV-2/");
}
// strftime(TheCurrentTime, 50, "%m-%d/%I:%M", SpecificMoment);
strftime(TheCurrentTime, 50, "%m-%d/", SpecificMoment);
strcat(Path_to_Folder,TheCurrentTime);
// strcat(Path_to_Folder,"_");
sprintf(Buffer,"%d",NumberOfLayers);
strcat(Path_to_Folder,Buffer);
strcat(Path_to_Folder,"_");
sprintf(Buffer,"%d",BigIndex);
strcat(Path_to_Folder,Buffer);
strcat(Path_to_Folder,"-");
strcat(Path_to_Folder,TransmissionType);
strcat(Path_to_Folder,"_");
sprintf(Buffer,"%.1f",log10(MOI));
strcat(Path_to_Folder,Buffer);
strcat(Path_to_Folder,"-");
strcat(Path_to_Folder,"MOI");
strcat(Path_to_Folder,"_");
sprintf(Buffer,"%.1f",probi);
strcat(Path_to_Folder,Buffer);
strcat(Path_to_Folder,"-");
strcat(Path_to_Folder,"probi");
strcat(Directroy,"mkdir -p ");
strcat(Directroy,Path_to_Folder);
int check = system(strdup(Directroy));
if(check != 0){
exit(0);
}
}
void creatingCellLocations(){
float SideLenght = (2.0/3.0);
int RadiusScale = 0;
for(int i=0; i<NumberOfLayers; i++){
if(i == 0){
RadiusScale = RadiusScale + 1;
}
else{
if((i)%2 == 1){
RadiusScale = RadiusScale + 1;
}
else{
RadiusScale = RadiusScale + 2;
}
}
}
float RadiusOfCircle = SideLenght*RadiusScale;
int count = 0;
for(int i=0; i<NumberOfLayers; i++){
count = count + i;
}
int NumberOfHexagons=(count)*6+1;
float** coord;
int n = NumberOfHexagons;
int m = 3;
coord = (float**) calloc(n,sizeof(float*));
for (int i = 0; i < n; i++){
coord[i] = (float*) calloc(m,sizeof(float));
}
float** percyclecoord;
n = NumberOfHexagons;
m = 3;
percyclecoord = (float**) calloc(n,sizeof(float*));
for (int i = 0; i < n; i++){
percyclecoord[i] = (float*) calloc(m,sizeof(float));
}
int temp;
for(int j=0; j<NumberOfLayers; j++){
for(int i=0; i<(2*j); i++){
if(i < j){
temp = i;
}
percyclecoord[i+(j-1)*j+1][0] = -temp-1;
percyclecoord[i+(j-1)*j+1][1] = temp+j-i;
percyclecoord[i+(j-1)*j+1][2] = -j+1+i;
}
}
float c0[3] = {percyclecoord[0][0], percyclecoord[0][1], percyclecoord[0][2]};
coord[0][2] = c0[2];
coord[0][1] = c0[1];
coord[0][0] = c0[0];
count = 0;
for(int j=0; j<(NumberOfHexagons/3); j++){
for(int i=0; i<3; i++){
coord[(i+0)%3+3*j+1][2] = percyclecoord[j+1][i]+c0[i];
coord[(i+1)%3+3*j+1][1] = percyclecoord[j+1][i]+c0[i];
coord[(i+2)%3+3*j+1][0] = percyclecoord[j+1][i]+c0[i];
}
}
float hi = coord[0][0];
float vi = coord[0][2];
float xmin = INFINITY;
float xcoord;
float ycoord;
double dist;
for(int i=0; i<NumberOfHexagons; i++){
xcoord = coord[i][0];
if(coord[i][0] < xmin){
xmin = coord[i][0];
}
ycoord = (2.0*sin(PI*(60.0/180.0))*(coord[i][1]-coord[i][2])/3.0)+vi;
dist = sqrtf(pow(double(xcoord-hi),2.0)+pow(double(ycoord-vi),2.0));
if(dist >= RadiusOfCircle){
coord[i][0] = 5000.0;
coord[i][1] = 0.0;
coord[i][2] = 0.0;
}
}
n = ((2*NumberOfLayers)-1);
m = ((2*NumberOfLayers)-1);
LocationData = (char**) malloc(n*sizeof(char*));
for(int j=0; j<n; j++){
LocationData[j] = (char*) malloc(m*sizeof(char));
for(int i=0; i<m; i++){
LocationData[j][i] = 'o';
}
}
NumberOfCells = 0;
for(int i=0; i<NumberOfHexagons; i++){
if(coord[i][0] != 5000.0){
LocationData[int(coord[i][2])-int(xmin)][int(coord[i][0])-int(xmin)] = 'h';
NumberOfCells = NumberOfCells + 1;
}
}
char File1[100] = "";
strcat(File1,Path_to_Folder);
strcat(File1,"/InitialCellLocations.txt");
FILE *outfile1 = fopen(File1,"a");
if (outfile1 == NULL){
printf("Error opening file!\n");
exit(0);
}
for(int i=0; i<((2*NumberOfLayers)-1); i++){
for(int j=0; j<((2*NumberOfLayers)-1); j++){
fprintf(outfile1,"%c,",LocationData[i][j]);
}
fprintf(outfile1,"\n");
}
fclose(outfile1);
char File2[100] = "";
strcat(File2,Path_to_Folder);
strcat(File2,"/Parameters.txt");
FILE *outfile2 = fopen(File2,"w");
if (outfile2 == NULL){
printf("Error opening file!\n");
exit(0);
}
fprintf(outfile2, "Hexagon Side Lenght = %f\n", SideLenght);
fprintf(outfile2, "Number of Layers = %d\n", NumberOfLayers);
fprintf(outfile2, "Radius of Circle = %f\n", RadiusOfCircle);
fprintf(outfile2, "Number of Cells = %d\n", NumberOfCells);
fclose(outfile2);
for (int i = 0; i < NumberOfHexagons; i++){
free(coord[i]);
}
free(coord);
for (int i = 0; i < NumberOfHexagons; i++){
free(percyclecoord[i]);
}
free(percyclecoord);
}
void allocateMemory(int Nx, int Ny){
//Produces a matrix for the cells
cells = (char*) malloc(Nx*Ny*2*sizeof(char));
//Produces a matrix that will track the amount virus above each cell
vtemp = (float*) malloc(Nx*Ny*2*sizeof(float));
//Produces a univeral time matrix (ut)
ut = (float*) malloc(Nx*Ny*sizeof(float));
//Produces a time matrix for after eclipse phase (e)
ecl = (float*) malloc(Nx*Ny*sizeof(float));
//Produces a time matrix for after infection phase (i)
inf = (float*) malloc(Nx*Ny*sizeof(float));
//Produces a time matrix hor healthy cells (t)
th = (float*) malloc(Nx*Ny*sizeof(float));
//Produces an array of eclipse phase durations for cells
EclipsePhaseLength = (float*) malloc(Nx*Ny*sizeof(float));
//Produces an array of infection phase durations for cells
InfectionPhaseLength = (float*) malloc(Nx*Ny*sizeof(float));
}
void initailConditions(int Nx, int Ny){
for(int j=0; j<Ny; j++){
for(int i=0; i<Nx; i++){
for(int k=0;k<2;k++){
cells[i+Nx*j+Nx*Ny*k] = LocationData[i][j];
vtemp[i+Nx*j+Nx*Ny*k] = 0.0;
}
ut[i+Nx*j] = 0.0;
ecl[i+Nx*j] = 0.0;
inf[i+Nx*j] = 0.0;
th[i+Nx*j] = 0.0;
EclipsePhaseLength[i+Nx*j] = Te(TauE,ne);
InfectionPhaseLength[i+Nx*j] = Ti(TauI,ni);
}
}
}
void infectANumberOfCellsRandomly(int Nx, int Ny, int Ni){
if(CODETESTINGCONDITIONS == 1){
cells[(NumberOfLayers-1)+Nx*(NumberOfLayers-1)+Nx*Ny*0] = 'i';
cells[(NumberOfLayers-1)+Nx*(NumberOfLayers-1)+Nx*Ny*1] = 'i'; //Only the center cell
}
else {
srand(time(NULL));
int randx;
int randy;
int NumberOfInfectedCellsCount = 0;
while(NumberOfInfectedCellsCount < Ni){
randx = (rand()%Nx);
randy = (rand()%Ny);
if((cells[randx+Nx*randy+Nx*Ny*0] != 'o') && (cells[randx+Nx*randy+Nx*Ny*0] == 'h')){
cells[randx+Nx*randy+Nx*Ny*0] = 'e';
cells[randx+Nx*randy+Nx*Ny*1] = 'e';
NumberOfInfectedCellsCount = NumberOfInfectedCellsCount + 1;
}
}
}
}
void printToFileCellAndVirusInitial(int Nx, int Ny, int NumberOfLayers){
char File3[100] = "";
strcat(File3,Path_to_Folder);
strcat(File3,"/cells_over_time.txt");
FILE *outfile3 = fopen(File3,"w");
if (outfile3 == NULL){
printf("Error opening file!\n");
exit(0);
}
for(int i=0; i<((2*NumberOfLayers)-1); i++){
for(int j=0; j<((2*NumberOfLayers)-1); j++){
fprintf(outfile3,"%c,",LocationData[i][j]);
}
fprintf(outfile3,"\n");
}
fclose(outfile3);
char File4[100] = "";
strcat(File4,Path_to_Folder);
strcat(File4,"/virus_over_time.txt");
FILE *outfile4 = fopen(File4,"w");
if (outfile4 == NULL){
printf("Error opening file!\n");
exit(0);
}
for(int i=0; i<((2*NumberOfLayers)-1); i++){
for(int j=0; j<((2*NumberOfLayers)-1); j++){
fprintf(outfile4,"%f,",0.0);
}
fprintf(outfile4,"\n");
}
fclose(outfile4);
}
void printToFileCellAndVirusAnalysisInitial(int Nx, int Ny){
NumberDead1 = 0;
NumberInfected1 = 0;
NumberEclipse1 = 0;
NumberHealthy1 = 0;
AmountOfVirus = 0.0;
for(int j=0; j<Ny; j++){
for(int i=0; i<Nx; i++){
AmountOfVirus = AmountOfVirus + vtemp[i+Nx*j+Nx*Ny*0];
if(cells[i+Nx*j+Nx*Ny*0] == 'd'){
NumberDead1 = NumberDead1 + 1;
}
else if(cells[i+Nx*j+Nx*Ny*0] == 'i'){
NumberInfected1 = NumberInfected1 + 1;
}
else if(cells[i+Nx*j+Nx*Ny*0] == 'e'){
NumberEclipse1 = NumberEclipse1 +1;
}
else if(cells[i+Nx*j+Nx*Ny*0] == 'h'){
NumberHealthy1 = NumberHealthy1 + 1;
}
}
}
char File9[100] = "";
strcat(File9,Path_to_Folder);
strcat(File9,"/PerTimeStep.txt");
FILE *outfile9 = fopen(File9,"w");
if (outfile9 == NULL){
printf("Error opening file!\n");
exit(0);
}
fprintf(outfile9,"%0.0f, %d, %d, %d, %d, %f,", 0.0, NumberHealthy1, NumberEclipse1, NumberInfected1, NumberDead1, AmountOfVirus);
fprintf(outfile9,"\n");
fclose(outfile9);
}
void cerialViralTransmission(int Nx, int Ny, int cell2cell, int freecell, float probi){
//The Healthy Cells' time
int NumberHealthy = 0;
for(int j=0; j<Ny; j++){
for(int i=0; i<Nx; i++){
if(cells[i+Nx*j+Nx*Ny*0] == 'h'){
NumberHealthy = NumberHealthy + 1;
}
}
}
int** LocationHealthy;
LocationHealthy = (int**) malloc(NumberHealthy*sizeof(int*));
for (int i=0; i<NumberHealthy; i++){
LocationHealthy[i] = (int*) malloc(2*sizeof(int));
}
int Indexer = 0;
for(int j=0; j<Ny; j++){
for(int i=0; i<Nx; i++){
if(cells[i+Nx*j+Nx*Ny*0] == 'h'){
LocationHealthy[Indexer][0] = i;
LocationHealthy[Indexer][1] = j;
Indexer = Indexer + 1;
}
}
}
if(NumberHealthy != 0){
int Row;
int Column;
for(int j=0; j<NumberHealthy; j++){
Row = LocationHealthy[j][0];
Column = LocationHealthy[j][1];
// Row is the row location of for a cell
// Column is the column location for a cell
th[Row+Nx*Column] = th[Row+Nx*Column] + timestep;
// "th" is the time matrix for healthy cells
// "ts" is the time step for the model
}
}
for (int i = 0; i < NumberHealthy; i++){
free(LocationHealthy[i]);
}
free(LocationHealthy);
//Eclipse phase -> Infection
int NumberEclipse = 0;
for(int j=0; j<Ny; j++){
for(int i=0; i<Nx; i++){
if(cells[i+Nx*j+Nx*Ny*0] == 'e'){
NumberEclipse = NumberEclipse + 1;
}
}
}
int** LocationEclipse;
LocationEclipse = (int**) malloc(NumberEclipse*sizeof(int*));
for (int i=0; i<NumberEclipse; i++){
LocationEclipse[i] = (int*) malloc(2*sizeof(int));
}
Indexer = 0;
for(int j=0; j<Ny; j++){
for(int i=0; i<Nx; i++){
if(cells[i+Nx*j+Nx*Ny*0] == 'e'){
LocationEclipse[Indexer][0] = i;
LocationEclipse[Indexer][1] = j;
Indexer = Indexer + 1;
}
}
}
if(NumberEclipse != 0){
int Row;
int Column;
for(int j=0; j<NumberEclipse; j++){
Row = LocationEclipse[j][0];
Column = LocationEclipse[j][1];
// Row is the row location of for a cell
// Column is the column location for a cell
if((ecl[Row+Nx*Column] + th[Row+Nx*Column]) < ut[Row+Nx*Column]){
cells[Row+Nx*Column+Nx*Ny*1] = 'i';
inf[Row+Nx*Column] = inf[Row+Nx*Column] + Ti(TauI, ni);
// "ecl" is the time matrix for after eclipse phase
// "th" is the time matrix for healthy cells
// "ut" is the univeral time matrix
// "cells" is the matrix of cells
// "inf" is the time matrix for after infection phase
}
}
}
for (int i = 0; i < NumberEclipse; i++){
free(LocationEclipse[i]);
}
free(LocationEclipse);
//Infection spreads
if(cell2cell == 1){
int NumberInfected = 0;
for(int j=0; j<Ny; j++){
for(int i=0; i<Nx; i++){
if(cells[i+Nx*j+Nx*Ny*0] == 'i'){
NumberInfected = NumberInfected + 1;
}
}
}
int** LocationInfected;
LocationInfected = (int**) malloc(NumberInfected*sizeof(int*));
for (int i=0; i<NumberInfected; i++){
LocationInfected[i] = (int*) malloc(2*sizeof(int));
}
int Indexer = 0;
for(int j=0; j<Ny; j++){
for(int i=0; i<Nx; i++){
if(cells[i+Nx*j+Nx*Ny*0] == 'i'){
LocationInfected[Indexer][0] = i;
LocationInfected[Indexer][1] = j;
Indexer = Indexer + 1;
}
}
}
if(NumberInfected != 0){
int Row;
int Column;
for(int j=0; j<NumberInfected; j++){
Row = LocationInfected[j][0];
Column = LocationInfected[j][1];
// #Row is the row location of for a cell
// #Column is the column location for a cell
int AboveRowExists = 1;
int LeftColumnExists = 1;
int BelowRowExists = 1;
int RightColumnExists = 1;
int AboveRow = Row-1; //row coordinate above cell
int LeftColumn = Column-1; //column coordinate left of cell
int BelowRow = Row+1; //row coordinate below cell
int RightColumn = Column+1; //column coordinate right of cell
// if the cell one row up doesn't exist, it's taken out of the equation
if(AboveRow < 0){
AboveRowExists = 0;
AboveRow = 0;
}
// if the cell one column to the left doesn't exist, it's taken out of the equation
if(LeftColumn < 0){
LeftColumnExists = 0;
LeftColumn = 0;
}
// if the cell one row down doesn't exist, it's taken out of the equation
if(BelowRow > Ny-1){
BelowRowExists = 0;
BelowRow = 0;
}
// if the cell one column to the right doesn't exist, it's taken out of the equation
if(RightColumn > Nx-1){
RightColumnExists = 0;
RightColumn = 0;
}
if(PU1()<probi*timestep){
if((LeftColumnExists == 1) && (cells[Row+Nx*LeftColumn+Nx*Ny*0] != 'o')){
if(cells[Row+Nx*LeftColumn+Nx*Ny*0] == 'h'){
cells[Row+Nx*LeftColumn+Nx*Ny*1] = 'e';
ecl[Row+Nx*LeftColumn] = Te(TauE,ne);
}
}
if((RightColumnExists == 1) && (cells[Row+Nx*RightColumn+Nx*Ny*0] != 'o')){
if(cells[Row+Nx*RightColumn+Nx*Ny*0] == 'h'){
cells[Row+Nx*RightColumn+Nx*Ny*1] = 'e';
ecl[Row+Nx*RightColumn] = Te(TauE,ne);
}
}
if((AboveRowExists == 1) && (cells[AboveRow+Nx*Column+Nx*Ny*0] != 'o')){
if(cells[AboveRow+Nx*Column+Nx*Ny*0] == 'h'){
cells[AboveRow+Nx*Column+Nx*Ny*1] = 'e';
ecl[AboveRow+Nx*Column] = Te(TauE,ne);
}
}
if((BelowRowExists == 1) && (cells[BelowRow+Nx*Column+Nx*Ny*0] != 'o')){
if(cells[BelowRow+Nx*Column+Nx*Ny*0] == 'h'){
cells[BelowRow+Nx*Column+Nx*Ny*1] = 'e';
ecl[BelowRow+Nx*Column] = Te(TauE,ne);
}
}
if((AboveRowExists == 1) && (RightColumnExists == 1) && (cells[AboveRow+Nx*RightColumn+Nx*Ny*0] != 'o')){
if(cells[AboveRow+Nx*RightColumn+Nx*Ny*0] == 'h'){
cells[AboveRow+Nx*RightColumn+Nx*Ny*1] = 'e';
ecl[AboveRow+Nx*RightColumn] = Te(TauE,ne);
}
}
if((BelowRowExists == 1) && (LeftColumnExists == 1) && (cells[BelowRow+Nx*LeftColumn+Nx*Ny*0] != 'o')){
if(cells[BelowRow+Nx*LeftColumn+Nx*Ny*0] == 'h'){
cells[BelowRow+Nx*LeftColumn+Nx*Ny*1] = 'e';
ecl[BelowRow+Nx*LeftColumn] = Te(TauE,ne);
}
}
}
}
}
for (int i = 0; i < NumberInfected; i++){
free(LocationInfected[i]);
}
free(LocationInfected);
}
//Virus Spreads
int NumberVirus = 0;
for(int j=0; j<Ny; j++){
for(int i=0; i<Nx; i++){
if(cells[i+Nx*j+Nx*Ny*0] != 'o'){
NumberVirus = NumberVirus + 1;
}
}
}
int** LocationVirus;
LocationVirus = (int**) malloc(NumberVirus*sizeof(int*));
for (int i=0; i<NumberVirus; i++){
LocationVirus[i] = (int*) malloc(2*sizeof(int));
}
Indexer = 0;
for(int j=0; j<Ny; j++){
for(int i=0; i<Nx; i++){
if(cells[i+Nx*j+Nx*Ny*0] != 'o'){
LocationVirus[Indexer][0] = i;
LocationVirus[Indexer][1] = j;
Indexer = Indexer + 1;
}
}
}
int Row;
int Column;
for(int j=0; j<NumberVirus; j++){
Row = LocationVirus[j][0];
Column = LocationVirus[j][1];
// Row is the row location of for a cell
// Column is the column location for a cell
int AboveRow = Row-1; //row coordinate above cell
int LeftColumn = Column-1; //column coordinate left of cell
int BelowRow = Row+1; //row coordinate below cell
int RightColumn = Column+1; //column coordinate right of cell
float rho2;
if(cells[Row+Nx*Column+Nx*Ny*0] == 'i'){
rho2 = rho;
}
else{
rho2 = 0;
}
// where rho2 is a placeholder variable
// if the cell one row up doesn't exist, it's taken out of the equation
if(AboveRow < 0){
AboveRow = Row;
}
// if the cell one column to the left doesn't exist, it's taken out of the equation
if(LeftColumn < 0){
LeftColumn = Column;
}
// if the cell one row down doesn't exist, it's taken out of the equation
if(BelowRow > (Ny-1)){
BelowRow = Row;
}
// if the cell one column to the right doesn't exist, it's taken out of the equation
if(RightColumn > (Nx-1)){
RightColumn = Column;
}
if(cells[AboveRow+Nx*Column+Nx*Ny*0] == 'o'){
AboveRow = Row;
}
if(cells[AboveRow+Nx*RightColumn+Nx*Ny*0] == 'o'){
AboveRow = Row;
RightColumn = Column;
}
if(cells[Row+Nx*RightColumn+Nx*Ny*0] == 'o'){
RightColumn = Column;
}
if(cells[BelowRow+Nx*Column+Nx*Ny*0] == 'o'){
BelowRow = Row;
}
if(cells[Row+Nx*LeftColumn+Nx*Ny*0] == 'o'){
LeftColumn = Column;
}
if(cells[BelowRow+Nx*LeftColumn+Nx*Ny*0] == 'o'){
BelowRow = Row;
LeftColumn = Column;
}
float NNN = (vtemp[AboveRow+Nx*Column+Nx*Ny*0] + vtemp[AboveRow+Nx*RightColumn+Nx*Ny*0] + vtemp[Row+Nx*RightColumn+Nx*Ny*0] + vtemp[BelowRow+Nx*Column+Nx*Ny*0] + vtemp[Row+Nx*LeftColumn+Nx*Ny*0] + vtemp[BelowRow+Nx*LeftColumn+Nx*Ny*0]);
float VirusProduced = rho2*timestep;
float VirusDecay = c*vtemp[Row+Nx*Column+Nx*Ny*0]*timestep;
float VirusOut = 4.0*Dtsx2*vtemp[Row+Nx*Column+Nx*Ny*0];
float VirusIn = 2.0*Dtsx2*NNN/3.0;
vtemp[Row+Nx*Column+Nx*Ny*1] = vtemp[Row+Nx*Column+Nx*Ny*0] + VirusProduced - VirusOut + VirusIn - VirusDecay;
if(vtemp[Row+Nx*Column+Nx*Ny*1] < pow(10.0,-10.0)){
vtemp[Row+Nx*Column+Nx*Ny*1] = 0.0;
}
//probability of infect adaptive time step
if(freecell == 1){
float probaility = PU1();
float adaptedtimestep = timestep; //variable time step
float adaptedtimestepcount = 1.0;
float pinfect = vtemp[Row+Nx*Column+Nx*Ny*1]*beta*adaptedtimestep;
while(pinfect > 1.0){
adaptedtimestep = adaptedtimestep/2.0;
pinfect = vtemp[Row+Nx*Column+Nx*Ny*1]*beta*adaptedtimestep;
adaptedtimestepcount = adaptedtimestepcount*2.0;
}
if(pinfect <= 1.0){
if(adaptedtimestepcount != 1.0){
pinfect = vtemp[Row+Nx*Column+Nx*Ny*1]*beta*adaptedtimestep;
}
while(adaptedtimestepcount != 1.0){
if(probaility < pinfect){
if(cells[Row+Nx*Column+Nx*Ny*0] == 'h'){
cells[Row+Nx*Column+Nx*Ny*1] = 'e';
ecl[Row+Nx*Column] = Te(TauE,ne);
}
}
adaptedtimestepcount = adaptedtimestepcount/2.0;
adaptedtimestep = adaptedtimestep*2.0;
pinfect = vtemp[Row+Nx*Column+Nx*Ny*1]*beta*adaptedtimestep;
}
if(adaptedtimestepcount == 1.0){
vtemp[Row+Nx*Column+Nx*Ny*1] = vtemp[Row+Nx*Column+Nx*Ny*0] + VirusProduced - VirusOut + VirusIn - VirusDecay;
if(probaility < pinfect){
if(cells[Row+Nx*Column+Nx*Ny*0] == 'h'){
cells[Row+Nx*Column+Nx*Ny*1] = 'e';
ecl[Row+Nx*Column] = Te(TauE,ne);
}
}
}
}
}
}
for (int i = 0; i < NumberVirus; i++){
free(LocationVirus[i]);
}
free(LocationVirus);
//kills cells
int NumberInfected = 0;
for(int j=0; j<Ny; j++){
for(int i=0; i<Nx; i++){
if(cells[i+Nx*j+Nx*Ny*0] == 'i'){
NumberInfected = NumberInfected + 1;
}
}
}
int** LocationInfected;
LocationInfected = (int**) malloc(NumberInfected*sizeof(int*));
for (int i=0; i<NumberInfected; i++){
LocationInfected[i] = (int*) malloc(2*sizeof(int));
}
Indexer = 0;
for(int j=0; j<Ny; j++){
for(int i=0; i<Nx; i++){
if(cells[i+Nx*j+Nx*Ny*0] == 'i'){
LocationInfected[Indexer][0] = i;
LocationInfected[Indexer][1] = j;
Indexer = Indexer + 1;
}
}
}
if(NumberInfected != 0){
int Row;
int Column;
for(int j=0; j<NumberInfected; j++){
Row = LocationInfected[j][0];
Column = LocationInfected[j][1];
// Row is the row location of for a cell
// Column is the column location for a cell
if(ut[Row+Nx*Column] > (inf[Row+Nx*Column] + ecl[Row+Nx*Column] + th[Row+Nx*Column])){
cells[Row+Nx*Column+Nx*Ny*1] = 'd';
if(CODETESTINGCONDITIONS == 1){
cells[Row+Nx*Column+Nx*Ny*1] = 'i';
}
// "ut" is the univeral time matrix
// "inf" is the time matrix for after infection phase
// "ecl" is the time matrix for after eclipse phase
// "th" is the time matrix for healthy cells
// "cells" is the matrix of cells
}
}
}
for (int i = 0; i < NumberInfected; i++){
free(LocationInfected[i]);
}
free(LocationInfected);
for(int j=0; j<Ny; j++){
for(int i=0; i<Nx; i++){
vtemp[i+Nx*j+Nx*Ny*0] = vtemp[i+Nx*j+Nx*Ny*1];
cells[i+Nx*j+Nx*Ny*0] = cells[i+Nx*j+Nx*Ny*1];
}
}
//The Universal Time for the cells is kept here (ut)
for(int j=0; j<Ny; j++){
for(int i=0; i<Nx; i++){
ut[i+Nx*j] = ut[i+Nx*j] + timestep;
}
}
}
void modifiedCerialViralTransmission(int Nx, int Ny, int cell2cell, int freecell, float probi){
int NumberHealthy = 0;
int NumberEclipse = 0;
int NumberInfected = 0;
int NumberVirus = 0;
for(int j=0; j<Ny; j++){
for(int i=0; i<Nx; i++){
if(cells[i+Nx*j+Nx*Ny*0] == 'h'){
NumberHealthy = NumberHealthy + 1;
}
if(cells[i+Nx*j+Nx*Ny*0] == 'e'){
NumberEclipse = NumberEclipse + 1;
}
if(cells[i+Nx*j+Nx*Ny*0] == 'i'){
NumberInfected = NumberInfected + 1;
}
if(cells[i+Nx*j+Nx*Ny*0] != 'o'){
NumberVirus = NumberVirus + 1;
}
}
}
int** LocationHealthy;
LocationHealthy = (int**) malloc(NumberHealthy*sizeof(int*));
for (int i=0; i<NumberHealthy; i++){
LocationHealthy[i] = (int*) malloc(2*sizeof(int));
}
int** LocationEclipse;
LocationEclipse = (int**) malloc(NumberEclipse*sizeof(int*));
for (int i=0; i<NumberEclipse; i++){
LocationEclipse[i] = (int*) malloc(2*sizeof(int));
}
int** LocationInfected;
LocationInfected = (int**) malloc(NumberInfected*sizeof(int*));
for (int i=0; i<NumberInfected; i++){
LocationInfected[i] = (int*) malloc(2*sizeof(int));
}
int** LocationVirus;
LocationVirus = (int**) malloc(NumberVirus*sizeof(int*));
for (int i=0; i<NumberVirus; i++){
LocationVirus[i] = (int*) malloc(2*sizeof(int));
}
int IndexerH = 0;
int IndexerE = 0;
int IndexerI = 0;
int IndexerO = 0;
for(int j=0; j<Ny; j++){
for(int i=0; i<Nx; i++){
if(cells[i+Nx*j+Nx*Ny*0] == 'h'){
LocationHealthy[IndexerH][0] = i;
LocationHealthy[IndexerH][1] = j;
IndexerH = IndexerH + 1;
}
if(cells[i+Nx*j+Nx*Ny*0] == 'e'){
LocationEclipse[IndexerE][0] = i;
LocationEclipse[IndexerE][1] = j;
IndexerE = IndexerE + 1;
}
if(cells[i+Nx*j+Nx*Ny*0] == 'i'){
LocationInfected[IndexerI][0] = i;
LocationInfected[IndexerI][1] = j;
IndexerI = IndexerI + 1;
}
if(cells[i+Nx*j+Nx*Ny*0] != 'o'){
LocationVirus[IndexerO][0] = i;
LocationVirus[IndexerO][1] = j;
IndexerO = IndexerO + 1;
}
}
}
//The Healthy Cells' time
if(NumberHealthy != 0){
int Row;
int Column;
for(int j=0; j<NumberHealthy; j++){
Row = LocationHealthy[j][0];
Column = LocationHealthy[j][1];
// Row is the row location of for a cell
// Column is the column location for a cell
th[Row+Nx*Column] = th[Row+Nx*Column] + timestep;
// "th" is the time matrix for healthy cells
// "ts" is the time step for the model
}
}
//Eclipse phase -> Infection
if(NumberEclipse != 0){
int Row;
int Column;
for(int j=0; j<NumberEclipse; j++){
Row = LocationEclipse[j][0];
Column = LocationEclipse[j][1];
// Row is the row location of for a cell
// Column is the column location for a cell
if((ecl[Row+Nx*Column] + th[Row+Nx*Column]) < ut[Row+Nx*Column]){
cells[Row+Nx*Column+Nx*Ny*1] = 'i';
inf[Row+Nx*Column] = inf[Row+Nx*Column] + Ti(TauI, ni);
// "ecl" is the time matrix for after eclipse phase
// "th" is the time matrix for healthy cells
// "ut" is the univeral time matrix
// "cells" is the matrix of cells
// "inf" is the time matrix for after infection phase
}
}
}
//Infection spreads
if(cell2cell == 1){
if(NumberInfected != 0){
int Row;
int Column;
for(int j=0; j<NumberInfected; j++){
Row = LocationInfected[j][0];
Column = LocationInfected[j][1];
// #Row is the row location of for a cell
// #Column is the column location for a cell
int AboveRowExists = 1;
int LeftColumnExists = 1;
int BelowRowExists = 1;
int RightColumnExists = 1;
int AboveRow = Row-1; //row coordinate above cell
int LeftColumn = Column-1; //column coordinate left of cell
int BelowRow = Row+1; //row coordinate below cell
int RightColumn = Column+1; //column coordinate right of cell
// if the cell one row up doesn't exist, it's taken out of the equation
if(AboveRow < 0){
AboveRowExists = 0;
AboveRow = 0;
}
// if the cell one column to the left doesn't exist, it's taken out of the equation
if(LeftColumn < 0){
LeftColumnExists = 0;
LeftColumn = 0;
}
// if the cell one row down doesn't exist, it's taken out of the equation
if(BelowRow > Ny-1){
BelowRowExists = 0;
BelowRow = 0;
}
// if the cell one column to the right doesn't exist, it's taken out of the equation
if(RightColumn > Nx-1){
RightColumnExists = 0;
RightColumn = 0;
}
if(PU1()<probi*timestep){
if((LeftColumnExists == 1) && (cells[Row+Nx*LeftColumn+Nx*Ny*0] != 'o')){
if(cells[Row+Nx*LeftColumn+Nx*Ny*0] == 'h'){
cells[Row+Nx*LeftColumn+Nx*Ny*1] = 'e';
ecl[Row+Nx*LeftColumn] = Te(TauE,ne);
}
}
if((RightColumnExists == 1) && (cells[Row+Nx*RightColumn+Nx*Ny*0] != 'o')){
if(cells[Row+Nx*RightColumn+Nx*Ny*0] == 'h'){
cells[Row+Nx*RightColumn+Nx*Ny*1] = 'e';
ecl[Row+Nx*RightColumn] = Te(TauE,ne);
}
}
if((AboveRowExists == 1) && (cells[AboveRow+Nx*Column+Nx*Ny*0] != 'o')){
if(cells[AboveRow+Nx*Column+Nx*Ny*0] == 'h'){
cells[AboveRow+Nx*Column+Nx*Ny*1] = 'e';
ecl[AboveRow+Nx*Column] = Te(TauE,ne);
}
}
if((BelowRowExists == 1) && (cells[BelowRow+Nx*Column+Nx*Ny*0] != 'o')){
if(cells[BelowRow+Nx*Column+Nx*Ny*0] == 'h'){
cells[BelowRow+Nx*Column+Nx*Ny*1] = 'e';
ecl[BelowRow+Nx*Column] = Te(TauE,ne);
}
}
if((AboveRowExists == 1) && (RightColumnExists == 1) && (cells[AboveRow+Nx*RightColumn+Nx*Ny*0] != 'o')){
if(cells[AboveRow+Nx*RightColumn+Nx*Ny*0] == 'h'){
cells[AboveRow+Nx*RightColumn+Nx*Ny*1] = 'e';
ecl[AboveRow+Nx*RightColumn] = Te(TauE,ne);
}
}
if((BelowRowExists == 1) && (LeftColumnExists == 1) && (cells[BelowRow+Nx*LeftColumn+Nx*Ny*0] != 'o')){
if(cells[BelowRow+Nx*LeftColumn+Nx*Ny*0] == 'h'){
cells[BelowRow+Nx*LeftColumn+Nx*Ny*1] = 'e';
ecl[BelowRow+Nx*LeftColumn] = Te(TauE,ne);
}
}
}
}
}
}
//Virus Spreads
int Row;
int Column;
for(int j=0; j<NumberVirus; j++){
Row = LocationVirus[j][0];
Column = LocationVirus[j][1];
// Row is the row location of for a cell
// Column is the column location for a cell
int AboveRow = Row-1; //row coordinate above cell
int LeftColumn = Column-1; //column coordinate left of cell
int BelowRow = Row+1; //row coordinate below cell
int RightColumn = Column+1; //column coordinate right of cell
float rho2;
if(cells[Row+Nx*Column+Nx*Ny*0] == 'i'){
rho2 = rho;
}
else{
rho2 = 0;
}
// where rho2 is a placeholder variable
// if the cell one row up doesn't exist, it's taken out of the equation
if(AboveRow < 0){
AboveRow = Row;
}
// if the cell one column to the left doesn't exist, it's taken out of the equation
if(LeftColumn < 0){
LeftColumn = Column;
}
// if the cell one row down doesn't exist, it's taken out of the equation
if(BelowRow > (Ny-1)){
BelowRow = Row;
}
// if the cell one column to the right doesn't exist, it's taken out of the equation
if(RightColumn > (Nx-1)){
RightColumn = Column;
}
if(cells[AboveRow+Nx*Column+Nx*Ny*0] == 'o'){
AboveRow = Row;
}
if(cells[AboveRow+Nx*RightColumn+Nx*Ny*0] == 'o'){
AboveRow = Row;
RightColumn = Column;
}
if(cells[Row+Nx*RightColumn+Nx*Ny*0] == 'o'){
RightColumn = Column;
}
if(cells[BelowRow+Nx*Column+Nx*Ny*0] == 'o'){
BelowRow = Row;
}
if(cells[Row+Nx*LeftColumn+Nx*Ny*0] == 'o'){
LeftColumn = Column;
}
if(cells[BelowRow+Nx*LeftColumn+Nx*Ny*0] == 'o'){
BelowRow = Row;
LeftColumn = Column;
}
float NNN = (vtemp[AboveRow+Nx*Column+Nx*Ny*0] + vtemp[AboveRow+Nx*RightColumn+Nx*Ny*0] + vtemp[Row+Nx*RightColumn+Nx*Ny*0] + vtemp[BelowRow+Nx*Column+Nx*Ny*0] + vtemp[Row+Nx*LeftColumn+Nx*Ny*0] + vtemp[BelowRow+Nx*LeftColumn+Nx*Ny*0]);
float VirusProduced = rho2*timestep;
float VirusDecay = c*vtemp[Row+Nx*Column+Nx*Ny*0]*timestep;
float VirusOut = 4.0*Dtsx2*vtemp[Row+Nx*Column+Nx*Ny*0];
float VirusIn = 2.0*Dtsx2*NNN/3.0;
vtemp[Row+Nx*Column+Nx*Ny*1] = vtemp[Row+Nx*Column+Nx*Ny*0] + VirusProduced - VirusOut + VirusIn - VirusDecay;
if(vtemp[Row+Nx*Column+Nx*Ny*1] < pow(10.0,-10.0)){
vtemp[Row+Nx*Column+Nx*Ny*1] = 0.0;
}
//probability of infect adaptive time step
if(freecell == 1){
float probaility = PU1();
float adaptedtimestep = timestep; //variable time step
float adaptedtimestepcount = 1.0;
float pinfect = vtemp[Row+Nx*Column+Nx*Ny*1]*beta*adaptedtimestep;
while(pinfect > 1.0){
adaptedtimestep = adaptedtimestep/2.0;
pinfect = vtemp[Row+Nx*Column+Nx*Ny*1]*beta*adaptedtimestep;
adaptedtimestepcount = adaptedtimestepcount*2.0;
}
if(pinfect <= 1.0){
if(adaptedtimestepcount != 1.0){
pinfect = vtemp[Row+Nx*Column+Nx*Ny*1]*beta*adaptedtimestep;
}
while(adaptedtimestepcount != 1.0){
if(probaility < pinfect){
if(cells[Row+Nx*Column+Nx*Ny*0] == 'h'){
cells[Row+Nx*Column+Nx*Ny*1] = 'e';
ecl[Row+Nx*Column] = Te(TauE,ne);
}
}
adaptedtimestepcount = adaptedtimestepcount/2.0;
adaptedtimestep = adaptedtimestep*2.0;
pinfect = vtemp[Row+Nx*Column+Nx*Ny*1]*beta*adaptedtimestep;
}
if(adaptedtimestepcount == 1.0){
vtemp[Row+Nx*Column+Nx*Ny*1] = vtemp[Row+Nx*Column+Nx*Ny*0] + VirusProduced - VirusOut + VirusIn - VirusDecay;
if(probaility < pinfect){
if(cells[Row+Nx*Column+Nx*Ny*0] == 'h'){
cells[Row+Nx*Column+Nx*Ny*1] = 'e';
ecl[Row+Nx*Column] = Te(TauE,ne);
}
}
}
}
}
}
//kills cells
if(NumberInfected != 0){
int Row;
int Column;
for(int j=0; j<NumberInfected; j++){
Row = LocationInfected[j][0];
Column = LocationInfected[j][1];
// Row is the row location of for a cell
// Column is the column location for a cell
if(ut[Row+Nx*Column] > (inf[Row+Nx*Column] + ecl[Row+Nx*Column] + th[Row+Nx*Column])){
cells[Row+Nx*Column+Nx*Ny*1] = 'd';
if(CODETESTINGCONDITIONS == 1){
cells[Row+Nx*Column+Nx*Ny*1] = 'i';
}
// "ut" is the univeral time matrix
// "inf" is the time matrix for after infection phase
// "ecl" is the time matrix for after eclipse phase
// "th" is the time matrix for healthy cells
// "cells" is the matrix of cells
}
}
}
for (int i = 0; i < NumberHealthy; i++){
free(LocationHealthy[i]);
}
free(LocationHealthy);
for (int i = 0; i < NumberEclipse; i++){
free(LocationEclipse[i]);
}
free(LocationEclipse);
for (int i = 0; i < NumberInfected; i++){
free(LocationInfected[i]);
}
free(LocationInfected);
for (int i = 0; i < NumberVirus; i++){
free(LocationVirus[i]);
}
free(LocationVirus);
for(int j=0; j<Ny; j++){
for(int i=0; i<Nx; i++){
vtemp[i+Nx*j+Nx*Ny*0] = vtemp[i+Nx*j+Nx*Ny*1];
cells[i+Nx*j+Nx*Ny*0] = cells[i+Nx*j+Nx*Ny*1];
}
}
//The Universal Time for the cells is kept here (ut)
for(int j=0; j<Ny; j++){
for(int i=0; i<Nx; i++){
ut[i+Nx*j] = ut[i+Nx*j] + timestep;
}
}
}
void printToFileCellAndVirus(int Nx, int Ny, int NumberOfLayers){
char File5[100] = "";
strcat(File5,Path_to_Folder);
strcat(File5,"/cells_over_time.txt");
FILE *outfile5 = fopen(File5,"a");
if (outfile5 == NULL){
printf("Error opening file!\n");
exit(0);
}
for(int i=0; i<((2*NumberOfLayers)-1); i++){
for(int j=0; j<((2*NumberOfLayers)-1); j++){
fprintf(outfile5,"%c,",cells[i+Nx*j+Nx*Ny*0]);
}
fprintf(outfile5,"\n");
}
fclose(outfile5);
char File6[100] = "";
strcat(File6,Path_to_Folder);
strcat(File6,"/virus_over_time.txt");
FILE *outfile6 = fopen(File6,"a");
if (outfile6 == NULL){
printf("Error opening file!\n");
exit(0);
}
for(int i=0; i<((2*NumberOfLayers)-1); i++){
for(int j=0; j<((2*NumberOfLayers)-1); j++){
fprintf(outfile6,"%f,",vtemp[i+Nx*j+Nx*Ny*1]);
}
fprintf(outfile6,"\n");
}
fclose(outfile6);
}
void printToFileCellAndVirusAnalysis(float timestep){
char File8[100] = "";
strcat(File8,Path_to_Folder);
strcat(File8,"/PerTimeStep.txt");
FILE *outfile8 = fopen(File8,"a");
if (outfile8 == NULL){
printf("Error opening file!\n");
exit(0);
}
fprintf(outfile8,"%0.0f, %d, %d, %d, %d, %f,", timestep+1, NumberHealthy1, NumberEclipse1, NumberInfected1, NumberDead1, AmountOfVirus);
fprintf(outfile8,"\n");
fclose(outfile8);
}
void createParameterFile(float timestep, int NumberofSavedTimeSteps, float endtime, float timestepcount, float AmountOfVirus, float rho, float D, float deltxprime, float c, float probi){
char File7[100] = "";
strcat(File7,Path_to_Folder);
strcat(File7,"/Parameters.txt");
FILE *outfile7 = fopen(File7,"a");
if (outfile7 == NULL){
printf("Error opening file!\n");
exit(0);
}
fprintf(outfile7, "Time Step = %f\n", timestep);
fprintf(outfile7, "Number of Saved Time Steps = %d\n", NumberofSavedTimeSteps);
fprintf(outfile7, "Initial End Time = %f\n", endtime);
fprintf(outfile7, "Actual Hours Simulated = %f\n", timestepcount*timestep);
fprintf(outfile7, "Final Amount of Virus = %f\n", AmountOfVirus);
fprintf(outfile7, "rho = %f\n", rho);
fprintf(outfile7, "D = %f\n", D);
fprintf(outfile7, "delta x = %f\n", deltxprime);
fprintf(outfile7, "c = %f\n", c);
fprintf(outfile7, "Probability of cell to cell infection: %f\n", probi);
fclose(outfile7);
}
void freeMemory(){
for(int i=0; i<((2*NumberOfLayers)-1); i++){
free(LocationData[i]);
}
free(LocationData);
free(cells);
free(ecl);
free(inf);
free(vtemp);
free(th);
free(ut);
free(EclipsePhaseLength);
free(InfectionPhaseLength);
if(RUNCPU == 0){
cudaFree(cells_GPU);
cudaFree(ecl_GPU);
cudaFree(inf_GPU);
cudaFree(vtemp_GPU);
cudaFree(th_GPU);
cudaFree(ut_GPU);
cudaFree(EclipsePhaseLength_GPU);
cudaFree(InfectionPhaseLength_GPU);
cudaFree(state);
}
}
void errorCheck(const char *message){
cudaError_t error;
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("\n CUDA ERROR: %s = %s\n", message, cudaGetErrorString(error));
exit(0);
}
}
struct systemConstantsStruct
{
float MOI;
float beta;
float rho;
float D;
float c;
float deltx;
float deltxprime;
float Dtsx2;
float TauI;
float TauE;
float ne;
float ni;
float probi;
float timestep;
};
systemConstantsStruct SystemConstants;
void loadConstants(float MOI, float probi){
SystemConstants.MOI = MOI;
SystemConstants.beta = beta;
SystemConstants.rho = rho;
SystemConstants.D = D;
SystemConstants.c = c;
SystemConstants.deltx = deltx;
SystemConstants.deltxprime = deltxprime;
SystemConstants.Dtsx2 = Dtsx2;
SystemConstants.TauI = TauI;
SystemConstants.TauE = TauE;
SystemConstants.ne = ne;
SystemConstants.ni = ni;
SystemConstants.probi = probi;
SystemConstants.timestep = timestep;
}
void deviceSetupAndMemoryAllocation(int Nx, int Ny){
BlockConfig.x = 16;
BlockConfig.y = 16;
BlockConfig.z = 1;
GridConfig.x = (Nx-1)/BlockConfig.x + 1;
GridConfig.y = (Ny-1)/BlockConfig.y + 1;
GridConfig.z = 1;
cudaMalloc((void**)&cells_GPU, Nx*Ny*2*sizeof(char));
errorCheck("cudaMalloc cells Mem");
cudaMalloc((void**)&vtemp_GPU, Nx*Ny*2*sizeof(float));
errorCheck("cudaMalloc vtemp Mem");
cudaMalloc((void**)&ut_GPU, Nx*Ny*sizeof(float));
errorCheck("cudaMalloc ut Mem");
cudaMalloc((void**)&ecl_GPU, Nx*Ny*sizeof(float));
errorCheck("cudaMalloc ecl Mem");
cudaMalloc((void**)&inf_GPU, Nx*Ny*sizeof(float));
errorCheck("cudaMalloc inf Mem");
cudaMalloc((void**)&th_GPU, Nx*Ny*sizeof(float));
errorCheck("cudaMalloc th Mem");
cudaMalloc((void**)&EclipsePhaseLength_GPU, Nx*Ny*sizeof(float));
errorCheck("cudaMalloc EclipsePhaseLength Mem");
cudaMalloc((void**)&InfectionPhaseLength_GPU, Nx*Ny*sizeof(float));
errorCheck("cudaMalloc InfectionPhaseLength Mem");
}
__global__ void cuRand_Setup(curandState *state){
int Row = threadIdx.x + blockIdx.x * blockDim.x;
int Column = threadIdx.y + blockIdx.y * blockDim.y;
int offsetx = blockDim.x * gridDim.x;
int id = Row+offsetx*Column;
curand_init (clock64(), id, 0, state);
}
__device__ float PU_GPU(curandState *state){
// Picks a random number from a uniform distribution
float Random = curand_uniform(state);
return Random;
}
__global__ void kernel(char *cells, float *vtemp, float *ut, float *ecl, float *inf, float *th, float *epl, float *ipl, systemConstantsStruct constant, int cell2cell, int freecell, curandState *state, int NumberOfLayers, float probi){
int Row = threadIdx.x + blockIdx.x * blockDim.x;
int Column = threadIdx.y + blockIdx.y * blockDim.y;
int NX = (2*NumberOfLayers-1);
int NY = (2*NumberOfLayers-1);
int NXNY = NX*NY;
if((cells[Row+NX*Column+NXNY*0] != 'o') && (Row+NX*Column+NXNY < 2*NXNY)){
//Virus Spreads
int AboveRow = Row-1; //row coordinate above cell
int LeftColumn = Column-1; //column coordinate left of cell
int BelowRow = Row+1; //row coordinate below cell
int RightColumn = Column+1; //column coordinate right of cell
float rho2;
if(cells[Row+NX*Column+NXNY*0] == 'i'){
rho2 = constant.rho;
}
else{
rho2 = 0;
}
// where rho2 is a placeholder variable
// if the cell one row up doesn't exist, it's taken out of the equation
if(AboveRow < 0){
AboveRow = Row;
}
// if the cell one column to the left doesn't exist, it's taken out of the equation
if(LeftColumn < 0){
LeftColumn = Column;
}
// if the cell one row down doesn't exist, it's taken out of the equation
if(BelowRow > (NY-1)){
BelowRow = Row;
}
// if the cell one column to the right doesn't exist, it's taken out of the equation
if(RightColumn > (NX-1)){
RightColumn = Column;
}
if(cells[AboveRow+NX*Column+NXNY*0] == 'o'){
AboveRow = Row;
}
if(cells[AboveRow+NX*RightColumn+NXNY*0] == 'o'){
AboveRow = Row;
RightColumn = Column;
}
if(cells[Row+NX*RightColumn+NXNY*0] == 'o'){
RightColumn = Column;
}
if(cells[BelowRow+NX*Column+NXNY*0] == 'o'){
BelowRow = Row;
}
if(cells[Row+NX*LeftColumn+NXNY*0] == 'o'){
LeftColumn = Column;
}
if(cells[BelowRow+NX*LeftColumn+NXNY*0] == 'o'){
BelowRow = Row;
LeftColumn = Column;
}
float NNN = (vtemp[AboveRow+NX*Column+NXNY*0] + vtemp[AboveRow+NX*RightColumn+NXNY*0] + vtemp[Row+NX*RightColumn+NXNY*0] + vtemp[BelowRow+NX*Column+NXNY*0] + vtemp[Row+NX*LeftColumn+NXNY*0] + vtemp[BelowRow+NX*LeftColumn+NXNY*0]);
float VirusProduced = rho2*constant.timestep;
float VirusDecay = constant.c*vtemp[Row+NX*Column+NXNY*0]*constant.timestep;
float VirusOut = 4.0*constant.Dtsx2*vtemp[Row+NX*Column+NXNY*0];
float VirusIn = 2.0*constant.Dtsx2*NNN/3.0;
__syncthreads();
vtemp[Row+NX*Column+NXNY*1] = vtemp[Row+NX*Column+NXNY*0] + VirusProduced - VirusOut + VirusIn - VirusDecay;
if(vtemp[Row+NX*Column+NXNY*1] < pow(10.0,-10.0)){
vtemp[Row+NX*Column+NXNY*1] = 0.0;
}
//The Cell behavior
if(cells[Row+NX*Column+NXNY*0] == 'i'){
// Infectied
if(ut[Row+NX*Column] > (inf[Row+NX*Column] + ecl[Row+NX*Column] + th[Row+NX*Column])){
cells[Row+NX*Column+NXNY*1] = 'd';
if(CODETESTINGCONDITIONS == 1){
cells[Row+NX*Column+NXNY*1] = 'i';
}
}
}
else if(cells[Row+NX*Column+NXNY*0] == 'e'){
// Eclipse
if(ut[Row+NX*Column] > (ecl[Row+NX*Column] + th[Row+NX*Column])){
cells[Row+NX*Column+NXNY*1] = 'i';
inf[Row+NX*Column] = inf[Row+NX*Column] + ipl[Row+NX*Column];
}
}
else if(cells[Row+NX*Column+NXNY*0] == 'h'){
// Healthy
th[Row+NX*Column] = th[Row+NX*Column] + constant.timestep;
if(cell2cell == 1){
// Cell to cell transmission
int AboveRow = Row-1; //row coordinate above cell
int LeftColumn = Column-1; //column coordinate left of cell
int BelowRow = Row+1; //row coordinate below cell
int RightColumn = Column+1; //column coordinate right of cell
// if the cell one row up doesn't exist, it's taken out of the equation
if(AboveRow < 0){
AboveRow = 0;
}
// if the cell one column to the left doesn't exist, it's taken out of the equation
if(LeftColumn < 0){
LeftColumn = 0;
}
// if the cell one row down doesn't exist, it's taken out of the equation
if(BelowRow > NY-1){
BelowRow = 0;
}
// if the cell one column to the right doesn't exist, it's taken out of the equation
if(RightColumn > NX-1){
RightColumn = 0;
}
if(PU_GPU(state) < constant.probi*constant.timestep){
if(cells[Row+NX*LeftColumn+NXNY*0] == 'i'){
cells[Row+NX*Column+NXNY*1] = 'e';
}
if(cells[Row+NX*RightColumn+NXNY*0] == 'i'){
cells[Row+NX*Column+NXNY*1] = 'e';
}
if(cells[AboveRow+NX*Column+NXNY*0] == 'i'){
cells[Row+NX*Column+NXNY*1] = 'e';
}
if(cells[BelowRow+NX*Column+NXNY*0] == 'i'){
cells[Row+NX*Column+NXNY*1] = 'e';
}
if(cells[AboveRow+NX*RightColumn+NXNY*0] == 'i'){
cells[Row+NX*Column+NXNY*1] = 'e';
}
if(cells[BelowRow+NX*LeftColumn+NXNY*0] == 'i'){
cells[Row+NX*Column+NXNY*1] = 'e';
}
ecl[Row+NX*Column] = epl[Row+NX*Column];
}
}
if(freecell == 1){
// Cell free transmission
float probablity = PU_GPU(state);
float adaptedtimestep = constant.timestep; //variable time step
float adaptedtimestepcount = 1.0;
float pinfect = vtemp[Row+NX*Column+NXNY*1]*constant.beta*adaptedtimestep;
while(pinfect > 1.0){
adaptedtimestep = adaptedtimestep/2.0;
pinfect = vtemp[Row+NX*Column+NXNY*1]*constant.beta*adaptedtimestep;
adaptedtimestepcount = adaptedtimestepcount*2.0;
}
if(pinfect <= 1.0){
if(adaptedtimestepcount != 1.0){
pinfect = vtemp[Row+NX*Column+NXNY*1]*constant.beta*adaptedtimestep;
}
while(adaptedtimestepcount != 1.0){
if(probablity < pinfect){
cells[Row+NX*Column+NXNY*1] = 'e';
ecl[Row+NX*Column] = epl[Row+NX*Column];
}
adaptedtimestepcount = adaptedtimestepcount/2.0;
adaptedtimestep = adaptedtimestep*2.0;
pinfect = vtemp[Row+NX*Column+NXNY*1]*constant.beta*adaptedtimestep;
}
if(adaptedtimestepcount == 1.0){
vtemp[Row+NX*Column+NXNY*1] = vtemp[Row+NX*Column+NXNY*0] + VirusProduced - VirusOut + VirusIn - VirusDecay;
if(probablity < pinfect){
cells[Row+NX*Column+NXNY*1] = 'e';
ecl[Row+NX*Column] = epl[Row+NX*Column];
}
}
}
}
}
//The Universal Time for the cells is kept here (ut)
ut[Row+NX*Column] = ut[Row+NX*Column] + constant.timestep;
vtemp[Row+NX*Column+NXNY*0] = vtemp[Row+NX*Column+NXNY*1];
cells[Row+NX*Column+NXNY*0] = cells[Row+NX*Column+NXNY*1];
}
}
int main(void){
//Checks for Heisenberg status of viral diffusion
if(D*timestep/pow(deltxprime,2.0) > 0.5){
printf("%.1f",D*timestep/pow(deltxprime,2.0));
printf("CHANGE PARAMETERS TO FIT DIFFUSION LIMITS. VALUE MUST BE UNDER 0.5. VALUE SHOWN ABOVE");
exit(0);
}
//Clear Terminal
system("clear");
float MOI[10] = {5*powf(10,-1), powf(10,-1), 5*powf(10,-2), powf(10,-2), 5*powf(10,-3), powf(10,-3), 5*powf(10,-4), powf(10,-4), 5*powf(10,-5), powf(10,-5)};
//float MOI[6] = {powf(10,0), powf(10,-1), powf(10,-2), powf(10,-3), powf(10,-4), powf(10,-5)};
//float MOI[5] = {powf(10,-1), powf(10,-2), powf(10,-3), powf(10,-4), powf(10,-5)};
//float MOI[3] = {powf(10,-3), powf(10,-4), powf(10,-5)};
//float MOI[1] = {powf(10,0)};
float probi[1] = {0.2};
for(int q=0;q<(sizeof(MOI)/sizeof(MOI[0]));q++){
for(int k=0;k<(sizeof(probi)/sizeof(probi[0]));k++){
//Loop For The number Of Simulations To Run Per Setting
for(int BigIndex=0;BigIndex<NumberOfRuns;BigIndex++){
// auto start = chrono::high_resolution_clock::now();
// printf("\nStarting run %d\n", (BigIndex+1));
//Creating Save Path
creatingPathToFolderAndDirectory(StartRuns+BigIndex, NumberOfLayers, MOI[q], probi[k]);
//Creating placeholder variables for multipy runs
int cell2cell = CELL2CELL;
int freecell = FREECELL;
//Building Cells
creatingCellLocations();
//Number of Cells
//Number of initial infected cells
int Ni = NumberOfCells*MOI[q]; if(Ni < 1){ printf("Use larger MOI"); exit(0);}
int Nx = (2*NumberOfLayers-1); //Range of cells on x axis
int Ny = (2*NumberOfLayers-1); //Range of cells on y axis
//Makeing empty matrices
allocateMemory(Nx, Ny);
//Initializing
initailConditions(Nx, Ny);
//Deletes files and initial with values
if(BigIndex == 0){
printToFileCellAndVirusInitial(Nx, Ny, NumberOfLayers);
}
printToFileCellAndVirusAnalysisInitial(Nx, Ny);
//Infects a random cell, now seen as (e)
infectANumberOfCellsRandomly(Nx, Ny, Ni);
if(RUNCPU == 0){
cudaMalloc((void**)&state, Nx*Ny*sizeof(int));
errorCheck("cudaMalloc Random Setup");
cuRand_Setup<<<GridConfig,BlockConfig>>>(state);
errorCheck("Random Setup");
loadConstants(MOI[q], probi[k]);
deviceSetupAndMemoryAllocation(Nx, Ny);
cudaMemcpy( cells_GPU, cells, Nx*Ny*2*sizeof(char), cudaMemcpyHostToDevice );
errorCheck("cudaMemcpy cells HtoD");
cudaMemcpy( vtemp_GPU, vtemp, Nx*Ny*2*sizeof(float), cudaMemcpyHostToDevice );
errorCheck("cudaMemcpy vtemp HtoD");
cudaMemcpy( ut_GPU, ut, Nx*Ny*sizeof(float), cudaMemcpyHostToDevice );
errorCheck("cudaMemcpy ut HtoD");
cudaMemcpy( ecl_GPU, ecl, Nx*Ny*sizeof(float), cudaMemcpyHostToDevice );
errorCheck("cudaMemcpy ecl HtoD");
cudaMemcpy( inf_GPU, inf, Nx*Ny*sizeof(float), cudaMemcpyHostToDevice );
errorCheck("cudaMemcpy inf HtoD");
cudaMemcpy( th_GPU, th, Nx*Ny*sizeof(float), cudaMemcpyHostToDevice );
errorCheck("cudaMemcpy th HtoD");
cudaMemcpy( EclipsePhaseLength_GPU, EclipsePhaseLength, Nx*Ny*sizeof(float), cudaMemcpyHostToDevice );
errorCheck("cudaMemcpy EclipsePhaseLength HtoD");
cudaMemcpy( InfectionPhaseLength_GPU, InfectionPhaseLength, Nx*Ny*sizeof(float), cudaMemcpyHostToDevice );
errorCheck("cudaMemcpy InfectionPhaseLength HtoD");
}
//Runs simulation
int NumberofTimeSteps = endtime/timestep;
int NumberofSavedTimeSteps = NumberofTimeSteps/Save;
int timestepcount = 0; //equal to the number of ts elapsed
while(timestepcount < (NumberofTimeSteps-1)){
if(RUNCPU == 0){
kernel<<<GridConfig,BlockConfig>>>(cells_GPU, vtemp_GPU, ut_GPU, ecl_GPU, inf_GPU, th_GPU, EclipsePhaseLength_GPU, InfectionPhaseLength_GPU, SystemConstants, cell2cell, freecell, state, NumberOfLayers, probi[k]);
}
else{
//Cerial Viral Transmission
cerialViralTransmission(Nx, Ny, cell2cell, freecell, probi[k]);
// modifiedCerialViralTransmission(Nx, Ny, cell2cell, freecell, probi[k]);
}
if((timestepcount%Save) == 0){
if(RUNCPU == 0){
cudaMemcpy( cells, cells_GPU, Nx*Ny*2*sizeof(char), cudaMemcpyDeviceToHost );
errorCheck("cudaMemcpy cells DtoH");
cudaMemcpy( vtemp, vtemp_GPU, Nx*Ny*2*sizeof(float), cudaMemcpyDeviceToHost );
errorCheck("cudaMemcpy vtemp DtoH");
}
//Analysisa dish
NumberDead1 = 0;
NumberInfected1 = 0;
NumberEclipse1 = 0;
NumberHealthy1 = 0;
AmountOfVirus = 0.0;
for(int j=0; j<Ny; j++){
for(int i=0; i<Nx; i++){
AmountOfVirus = AmountOfVirus + vtemp[i+Nx*j+Nx*Ny*0];
if(cells[i+Nx*j+Nx*Ny*0] == 'd'){
NumberDead1 = NumberDead1 + 1;
}
else if(cells[i+Nx*j+Nx*Ny*0] == 'i'){
NumberInfected1 = NumberInfected1 + 1;
}
else if(cells[i+Nx*j+Nx*Ny*0] == 'e'){
NumberEclipse1 = NumberEclipse1 +1;
}
else if(cells[i+Nx*j+Nx*Ny*0] == 'h'){
NumberHealthy1 = NumberHealthy1 + 1;
}
}
}
//Prints status of cells virus
if(BigIndex == 0){
printToFileCellAndVirus(Nx, Ny, NumberOfLayers);
}
printToFileCellAndVirusAnalysis(timestepcount*timestep);
}
//Number of days completed
// if((timestepcount%(24*int(1/timestep))) == 0){
// printf("%.0f Day\n",(timestepcount*timestep)/24);
// }
// if((NumberHealthy1 == 0)){
// cell2cell = 0;
// freecell = 0;
// }
// else{
// cell2cell = CELL2CELL;
// freecell = FREECELL;
// }
// //End Code if Virus has below 10
// if((AmountOfVirus < pow(10,1.0)) && (NumberDead1 == NumberOfCells)){
// break;
// }
if((NumberInfected1 == 0) && (NumberEclipse1 == 0)){
cell2cell = 0;
freecell = 0;
}
else{
cell2cell = CELL2CELL;
freecell = FREECELL;
}
//End Code if Virus has below 10
if((AmountOfVirus < pow(10,1.0)) && (NumberInfected1 == 0) && (NumberEclipse1 == 0)){
break;
}
timestepcount = timestepcount+1;
}
//Writes a file with all of our parameters/variables
createParameterFile(timestep, NumberofSavedTimeSteps, endtime, timestepcount, AmountOfVirus, rho, D, deltxprime, c, probi[k]);
printf("\nMOI(%.1f) probi(%.1f): %d of %d Runs Done\n", log10(MOI[q]), probi[k], (BigIndex+1), NumberOfRuns);
freeMemory();
// auto finish = std::chrono::high_resolution_clock::now();
// chrono::duration<double> elapsed = finish - start;
// cout << "Elapsed time: " << elapsed.count() << " s";
}
}
}
printf("\nPROGRAM DONE\n");
}
|
4086edae5b93633316ef4302ad37e885f8d5b41b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Blur filter. Device code. */
#ifndef _BLUR_FILTER_KERNEL_H_
#define _BLUR_FILTER_KERNEL_H_
#include "blur_filter.h"
__global__ void
blur_filter_kernel (const float *in, float *out, int size)
{
/* Obtain thread location within the block */
int tx, ty;
tx = threadIdx.x;
ty = threadIdx.y;
int bx, by;
bx = blockIdx.x;
by = blockIdx.y;
int bdimx, bdimy;
bdimx = blockDim.x;
bdimy = blockDim.y;
// int imgSize = size;
int row = bdimy * by + ty;
int col = bdimx * bx + tx;
int i, j;
int curr_row, curr_col;
float blur_value;
int num_neighbors;
/* Apply blur filter to current pixel */
blur_value = 0.0;
num_neighbors = 0;
for (i = -BLUR_SIZE; i < (BLUR_SIZE + 1); i++) {
for (j = -BLUR_SIZE; j < (BLUR_SIZE + 1); j++) {
/* Accumulate values of neighbors while checking for
* boundary conditions */
curr_row = row + i;
curr_col = col + j;
if ((curr_row > -1) && (curr_row < size) &&\
(curr_col > -1) && (curr_col < size)) {
blur_value += in[curr_row * size + curr_col];
num_neighbors += 1;
}
}
}
out[row * size + col] = blur_value/num_neighbors;
return;
}
#endif /* _BLUR_FILTER_KERNEL_H_ */
| 4086edae5b93633316ef4302ad37e885f8d5b41b.cu | /* Blur filter. Device code. */
#ifndef _BLUR_FILTER_KERNEL_H_
#define _BLUR_FILTER_KERNEL_H_
#include "blur_filter.h"
__global__ void
blur_filter_kernel (const float *in, float *out, int size)
{
/* Obtain thread location within the block */
int tx, ty;
tx = threadIdx.x;
ty = threadIdx.y;
int bx, by;
bx = blockIdx.x;
by = blockIdx.y;
int bdimx, bdimy;
bdimx = blockDim.x;
bdimy = blockDim.y;
// int imgSize = size;
int row = bdimy * by + ty;
int col = bdimx * bx + tx;
int i, j;
int curr_row, curr_col;
float blur_value;
int num_neighbors;
/* Apply blur filter to current pixel */
blur_value = 0.0;
num_neighbors = 0;
for (i = -BLUR_SIZE; i < (BLUR_SIZE + 1); i++) {
for (j = -BLUR_SIZE; j < (BLUR_SIZE + 1); j++) {
/* Accumulate values of neighbors while checking for
* boundary conditions */
curr_row = row + i;
curr_col = col + j;
if ((curr_row > -1) && (curr_row < size) &&\
(curr_col > -1) && (curr_col < size)) {
blur_value += in[curr_row * size + curr_col];
num_neighbors += 1;
}
}
}
out[row * size + col] = blur_value/num_neighbors;
return;
}
#endif /* _BLUR_FILTER_KERNEL_H_ */
|
4221b64246b92457e562e3d13e8ff71973eb2c9c.hip | // !!! This is a file automatically generated by hipify!!!
#pragma comment(lib, "cudart.lib")
#pragma comment(lib, "hiprand.lib")
#include "../include/global_funcs.h"
#include "../include/sci_const.h"
#include "../include/device_compute_funcs.cuh"
#include "../include/common.hpp"
#include "../include/PrintStruct.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <hiprand/hiprand_kernel.h>
#include <cmath>
#include <hip/hip_vector_types.h>
#include <hip/hip_runtime.h>
//01
//: Array: Size:
//void UniformRandomArrayD(double* Array, const long Size)
//{
// hiprandGenerator_t gen; //
// hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_MRG32K3A); //
// hiprandSetPseudoRandomGeneratorSeed(gen, 11ULL); //
// hiprandGenerateUniformDouble(gen, Array, Size); //0-1
// hiprandDestroyGenerator(gen); //
// return;
//}
//
////
////: Array: Size: Mean:(0) Stddev:(0.7)
//void NormalRandomArrayD(double* Array, const long Size, double Mean, double Stddev)
//{
// hiprandGenerator_t gen; //
// hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_MRG32K3A); //
// hiprandSetPseudoRandomGeneratorSeed(gen, 11ULL); //
// hiprandGenerateNormalDouble(gen, Array, Size, Mean, Stddev); //
// hiprandDestroyGenerator(gen); //
// return;
//}
////
////: Array1:1 Array2:2 Array3:3 Array2:4
////Size: Nudis:(2) Stddev:(0.7)
//__global__ void DoubleNormalRandomArrayD(double* Array1, double* Array2, double* Array3, double* Array4,
// const long Size )
//{
// int i = threadIdx.x;
// double temp1 = 1;
// double temp2 = 1;
//
// Array1[i] = (Array1[i] - 0.5) * 20;
// Array3[i] = (Array3[i] - 0.5) * 20;
//
// temp1 = exp((-pow((Array1[i] - nuclear_spacing/2.0), nuclear_spacing/2.0)) / (nuclear_spacing/2.0 * pow(stddev, nuclear_spacing/2.0)))
// + exp((-pow((Array1[i] + nuclear_spacing/2.0), nuclear_spacing/2.0)) / (nuclear_spacing/2.0 * pow(stddev, nuclear_spacing/2.0)));
// temp2 = exp((-pow((Array3[i] - nuclear_spacing/2.0), nuclear_spacing/2.0)) / (nuclear_spacing/2.0 * pow(stddev, nuclear_spacing/2.0)))
// + exp((-pow((Array3[i] + nuclear_spacing/2.0), nuclear_spacing/2.0)) / (nuclear_spacing/2.0 * pow(stddev, nuclear_spacing/2.0)));
//
// if (Array2[i] > temp1 && Array4[i] > temp2)
// {
// Array1[i] = -99;
// Array3[i] = -99;
// }
// return;
//}
//
////
//__global__ void LinearTransmissionD(nuclei* Array, double* DTempArr1, double* DTempArr3, const long Size, int& i, int& j)
//{
// int p, q;
// hipMalloc((void **)(&p), 4);
// hipMalloc((void **)(&q), 4);
// hipMemcpy(&p, &i, 4, hipMemcpyHostToDevice);
// hipMemcpy(&p, &i, 4, hipMemcpyHostToDevice);
// while (i < Size && (i + j) < 2 * Size)
// {
// if (DTempArr1[i + j] == -99)
// {
// j++;
// }
// else {
// Array[i].first.x = DTempArr1[i + j] * sin(rotation*PI);
// Array[i].first.y = 0;
// Array[i].first.z = DTempArr1[i + j] * cos(rotation*PI);
// Array[i].second.x = DTempArr3[i + j] * sin(rotation*PI);
// Array[i].second.y = 0;
// Array[i].second.z = DTempArr3[i + j] * cos(rotation*PI);
// i++;
// }
// }
// hipMemcpy(&i, &p, 4, hipMemcpyDeviceToHost);
// hipMemcpy(&j, &q, 4, hipMemcpyDeviceToHost);
// return;
//}
//
////
////: Array: Size: Angle:
//void NucleiRandomD(nuclei* Array, const long Size)
//{
// int i(0);
// int j(0);
// size_t DoubleSize = 2 * Size * sizeof(double);
// double *DTempArr1, *DTempArr2, *DTempArr3, *DTempArr4;
// hipMalloc((void**)&DTempArr1, DoubleSize);
// hipMalloc((void**)&DTempArr2, DoubleSize);
// hipMalloc((void**)&DTempArr3, DoubleSize);
// hipMalloc((void**)&DTempArr4, DoubleSize);
//
// while (i < Size)
// {
// UniformRandomArrayD(DTempArr1, 2 * Size);
// UniformRandomArrayD(DTempArr2, 2 * Size);
// UniformRandomArrayD(DTempArr3, 2 * Size);
// UniformRandomArrayD(DTempArr4, 2 * Size);
//
// int threadsPerBlock = 256;
// int threadsPerGrid = (2 * Size + threadsPerBlock - 1) / threadsPerBlock;
// DoubleNormalRandomArrayD <<<threadsPerGrid, threadsPerBlock >>> (DTempArr1, DTempArr2, DTempArr3, DTempArr4, 2 * Size);
// LinearTransmissionD <<<1,1>>>(Array, DTempArr1, DTempArr3, Size, i, j);
// }
//}
//double3
__global__ void DoubleNormalRandomArrayD(nuclei* Array, const long Size)
{
double A1, A2, A3, A4;
double Ekall = -1;
double temp1 = 1;
double temp2 = 1;
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < Size)
{
hiprandState_t s;
int seed = -i;
hiprand_init(seed, 0, 0, &s);
while (Ekall < 0)
{
A2 = A4 = 2;
while (A2 > temp1 && A4 > temp2)
{
A1 = hiprand_uniform_double(&s);
A2 = hiprand_uniform_double(&s);
A3 = hiprand_uniform_double(&s);
A4 = hiprand_uniform_double(&s);
A1 = (A1 - 0.5) * 20;
A3 = (A3 - 0.5) * 20;
temp1 = exp((-pow((A1 - mean), 2)) / (mean * stddev * stddev))
+ exp((-pow((A1 + mean), 2)) / (mean * stddev * stddev));
temp2 = exp((-pow((A3 - mean), 2)) / (mean * stddev * stddev))
+ exp((-pow((A3 + mean), 2)) / (mean * stddev * stddev));
}
//printf("%lf\t%lf\n", A1,A3);
Array[i].first.x = A1 * sin(rotation*PI);
Array[i].first.y = 0;
Array[i].first.z = A1 * cos(rotation*PI);
Array[i].second.x = A3 * sin(rotation*PI);
Array[i].second.y = 0;
Array[i].second.z = A3 * cos(rotation*PI);
Ekall = E_kall(Array[i].first, Array[i].second);
//printf("%lf\n", Ekall);
}
px_py_pz_distribution(Array[i].first, Array[i].second, Ekall, i);
}
return;
}
__global__ void first_step_on_gpu(nuclei* first_arr, const long size)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
//printf("%p\n", &first_arr);
if(idx<size)
{
//printf("%d\n", idx);
for (int i = 0; i < one_steps; i++)
update_step_one(first_arr[idx].first, first_arr[idx].second);
}
}
__global__ void pre_second_step_aw(double* AW)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < 2 * two_steps)
{
double field_strength = sqrt((2.8e15) / (3.51e16)); // ee0
double t0 = 2 * PI / omega;
double t1 = 0.5 * DX * idx;
AW[idx] = (field_strength / omega) * (pow(sin((PI * t1) / (10 * t0)), 2)) * cos(omega * t1);
}
}
__global__ void pre_second_step_ds(double* AW,double* DS)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < 2*two_steps)
{
if (idx == 0)
DS[idx] = (AW[1] - AW[0]) / (0.5*DX);
if (idx == (2 * two_steps - 1))
DS[idx] = (AW[idx] - AW[idx - 1]) / (0.5*DX);
else
{
DS[idx] = (AW[idx + 1] - AW[idx - 1]) / 2.0 /(0.5* DX);
}
}
}
__global__ void second_step_on_gpu(nuclei* second_arr, nuclei* second_arr_fliter , const long size,double* DS,unsigned long long* ee1_ee2_count)
{
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
double e_laser_t1=0.0, e_laser_t2=0.0, e_laser_t3=0.0, e_laser_t4=0.0;
int idx_of_ds=-1; // nn
double t1=0.0, t2=0.0, t3=0.0, t4=0.0;
double now_t=0.0; //t(1)
if (idx<size)
{
for (int i = 0; i < two_steps; i++)
{
//
t1 = now_t;
if (t1 == 0)
e_laser_t1 = 0.0;
else
{
idx_of_ds = (2.0 * t1) / DX - 1;
e_laser_t1 = DS[idx_of_ds];
}
//
t2 = now_t + DX / 2.0;
idx_of_ds = 2.0 * t2 / DX- 1;
e_laser_t2 = DS[idx_of_ds];
//
t3 = now_t + DX / 2.0;
idx_of_ds = 2 * t3 / DX- 1;
e_laser_t3 = DS[idx_of_ds];
//
t4 = now_t + DX;
idx_of_ds = 2.0 * t4 / DX - 1;
e_laser_t4 = DS[idx_of_ds];
update_step_two(second_arr[idx].first, second_arr[idx].second,
e_laser_t1,e_laser_t2,e_laser_t3,e_laser_t4);
now_t = now_t + DX;
/*if(idx_of_ds == -1 )
update_step_two(second_arr[idx].first, second_arr[idx].second,
0.0,DS[0],DS[0],DS[1]);
else
{
update_step_two(second_arr[idx].first, second_arr[idx].second,
DS[idx_of_ds], DS[idx_of_ds + 1], DS[idx_of_ds + 1], DS[idx_of_ds + 2]);
}
idx_of_ds += 2;*/
}
double ee1 = CalculationE1(second_arr[idx].first, second_arr[idx].second);
double ee2 = CalculationE2(second_arr[idx].first, second_arr[idx].second);
if (ee1>0 && ee2>0)
{
unsigned long long temp_idx = atomicAdd(ee1_ee2_count, 1);
nuclei temp;
temp.first = second_arr[idx].first;
temp.second = second_arr[idx].second;
second_arr_fliter[temp_idx-1] = temp;
}
}
}
//
void NucleiRandomD(nuclei* Array, const long Size)
{
int dimx = 512;
dim3 block(dimx);
dim3 grid((Size + block.x - 1) / block.x, 1);
hipLaunchKernelGGL(( DoubleNormalRandomArrayD) , dim3(grid), dim3(block) , 0, 0, Array, Size);
CHECK(hipDeviceSynchronize());
}
void NucleiFisrtStep(nuclei* first_array, const long size)
{
int dimx = 32;
dim3 block(dimx);
dim3 grid((size + block.x - 1) / block.x, 1);
hipLaunchKernelGGL(( first_step_on_gpu) , dim3(grid), dim3(block) , 0, 0, first_array, size);
hipError_t cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "1st Kernel launch failed: %s\n", hipGetErrorString(cudaStatus));
return;
}
CHECK(hipDeviceSynchronize());
}
void NucleiSecondStep(nuclei* second_array, nuclei* second_array_fliter, const long size, double* aw, double* ds, unsigned long long* count)
{
//
int pre_dimx = 512;
dim3 pre_block(pre_dimx);
dim3 pre_grid((2 * two_steps_in_host + pre_block.x - 1) / pre_block.x, 1);
hipLaunchKernelGGL(( pre_second_step_aw) , dim3(pre_grid),dim3(pre_block), 0, 0, aw);
CHECK(hipDeviceSynchronize());
hipLaunchKernelGGL(( pre_second_step_ds) , dim3(pre_grid), dim3(pre_block) , 0, 0, aw, ds);
CHECK(hipDeviceSynchronize());
hipError_t cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "2nd Kernel launch failed: %s\n", hipGetErrorString(cudaStatus));
return;
}
//
int dimx = 32;
dim3 block(dimx);
dim3 grid((size + block.x - 1) / block.x, 1);
hipLaunchKernelGGL(( second_step_on_gpu) , dim3(grid), dim3(block) , 0, 0, second_array, second_array_fliter,size, ds,count);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "2nd Kernel launch failed: %s\n", hipGetErrorString(cudaStatus));
return;
}
CHECK(hipDeviceSynchronize());
}
void compute_on_gpu_one(const long pairs,const char* file_name)
{
long long nBytes = pairs * sizeof(nuclei);
printf("Use %lld Bytes %lfMB\n", nBytes, nBytes / double(1024 * 1024));
nuclei *gpu_init,*gpu_first,*gpu_second,*gpu_second_fliter;
nuclei *host_init,*host_first,*host_second,*host_second_fliter;
host_init = (nuclei*)malloc(nBytes);
host_first = (nuclei*)malloc(nBytes);
host_second = (nuclei*)malloc(nBytes);
host_second_fliter = (nuclei*)malloc(nBytes);
//
//init
double start = seconds();
CHECK(hipMalloc((void **)(&gpu_init), nBytes));
//
NucleiRandomD(gpu_init, pairs);
//()
CHECK(hipMalloc((void **)(&gpu_first), nBytes));
CHECK(hipMemcpy(gpu_first, gpu_init, nBytes, hipMemcpyDeviceToDevice));
//
CHECK(hipMemcpy(host_init, gpu_init, nBytes, hipMemcpyDeviceToHost));
PrintStruct(host_init, pairs, file_name, 0);
//init
CHECK(hipFree(gpu_init));
double elapse = seconds();
printf("Inition compltete %lf\n", elapse - start);
//
//
//first
start = seconds();
//
NucleiFisrtStep(gpu_first, pairs);
//()
CHECK(hipMalloc((void **)(&gpu_second), nBytes));
CHECK(hipMemcpy(gpu_second, gpu_first, nBytes, hipMemcpyDeviceToDevice));
//
CHECK(hipMemcpy(host_first, gpu_first, nBytes, hipMemcpyDeviceToHost));
PrintStruct(host_first, pairs, file_name, 1);
//first
CHECK(hipFree(gpu_first));
elapse = seconds();
printf("FirstStep compltete %lf\n", elapse - start);
//
//
start = seconds();
//
double *gpu_aw, *gpu_ds;
double *host_aw, *host_ds;
long bytes_of_aw_ds = sizeof(double) * 2 * two_steps_in_host;
CHECK(hipMalloc((void **)(&gpu_aw), bytes_of_aw_ds));
CHECK(hipMalloc((void **)(&gpu_ds), bytes_of_aw_ds));
host_aw = (double*)malloc(bytes_of_aw_ds);
host_ds = (double*)malloc(bytes_of_aw_ds);
//
unsigned long long*gpu_count,*host_count;
int bytes_of_u_long = sizeof(unsigned long long);
host_count = (unsigned long long*)malloc(bytes_of_u_long);
CHECK(hipMalloc((void **)(&gpu_count), bytes_of_u_long));
CHECK(hipMalloc((void **)(&gpu_second_fliter), nBytes));
//
NucleiSecondStep(gpu_second, gpu_second_fliter,pairs, gpu_aw, gpu_ds,gpu_count);
//
CHECK(hipMemcpy(host_second, gpu_second, nBytes, hipMemcpyDeviceToHost));
CHECK(hipMemcpy(host_second_fliter, gpu_second_fliter, nBytes, hipMemcpyDeviceToHost));
CHECK(hipMemcpy(host_aw, gpu_aw, bytes_of_aw_ds, hipMemcpyDeviceToHost));
CHECK(hipMemcpy(host_ds, gpu_ds, bytes_of_aw_ds, hipMemcpyDeviceToHost));
CHECK(hipMemcpy(host_count, gpu_count, bytes_of_u_long, hipMemcpyDeviceToHost));
printf("%ld\n", *host_count);
PrintStruct(host_second, pairs,file_name , 2);
PrintArray(host_aw, 2 * two_steps_in_host, file_name, 0);
PrintArray(host_ds, 2 * two_steps_in_host, file_name, 1);
PrintStruct(host_second_fliter, *host_count, file_name, 3);
//second
CHECK(hipFree(gpu_second));
CHECK(hipFree(gpu_aw));
CHECK(hipFree(gpu_ds));
elapse = seconds();
printf("SecondStep compltete %lf\n", elapse - start);
//
//
//free(host_aw);
//free(host_ds);
//free(host_first);
//free(host_init);
//free(host_init);
return;
} | 4221b64246b92457e562e3d13e8ff71973eb2c9c.cu | #pragma comment(lib, "cudart.lib")
#pragma comment(lib, "curand.lib")
#include "../include/global_funcs.h"
#include "../include/sci_const.h"
#include "../include/device_compute_funcs.cuh"
#include "../include/common.hpp"
#include "../include/PrintStruct.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <curand_kernel.h>
#include <cmath>
#include <vector_types.h>
#include <cuda_runtime.h>
//生成双精度01均匀分布随机数
//参数: Array:双精度数组 Size:数组长度
//void UniformRandomArrayD(double* Array, const long Size)
//{
// curandGenerator_t gen; //生成随机数变量
// curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_MRG32K3A); //指定算法
// curandSetPseudoRandomGeneratorSeed(gen, 11ULL); //随机数初始化
// curandGenerateUniformDouble(gen, Array, Size); //生成0-1均匀分布随机数,存储到缓冲器中
// curandDestroyGenerator(gen); //释放指针
// return;
//}
//
////生成双精度正态分布随机数
////参数: Array:双精度数组 Size:数组长度 Mean:均值(0) Stddev:方差(0.7)
//void NormalRandomArrayD(double* Array, const long Size, double Mean, double Stddev)
//{
// curandGenerator_t gen; //生成随机数变量
// curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_MRG32K3A); //指定算法
// curandSetPseudoRandomGeneratorSeed(gen, 11ULL); //随机数初始化
// curandGenerateNormalDouble(gen, Array, Size, Mean, Stddev); //生成正态分布随机数,存储到缓冲器中
// curandDestroyGenerator(gen); //释放指针
// return;
//}
////生成双精度双正态分布随机数
////参数: Array1:双精度数组1 Array2:双精度数组2 Array3:双精度数组3 Array2:双精度数组4
////Size:数组长度 Nudis:半核间距(2) Stddev:方差(0.7)
//__global__ void DoubleNormalRandomArrayD(double* Array1, double* Array2, double* Array3, double* Array4,
// const long Size )
//{
// int i = threadIdx.x;
// double temp1 = 1;
// double temp2 = 1;
//
// Array1[i] = (Array1[i] - 0.5) * 20;
// Array3[i] = (Array3[i] - 0.5) * 20;
//
// temp1 = exp((-pow((Array1[i] - nuclear_spacing/2.0), nuclear_spacing/2.0)) / (nuclear_spacing/2.0 * pow(stddev, nuclear_spacing/2.0)))
// + exp((-pow((Array1[i] + nuclear_spacing/2.0), nuclear_spacing/2.0)) / (nuclear_spacing/2.0 * pow(stddev, nuclear_spacing/2.0)));
// temp2 = exp((-pow((Array3[i] - nuclear_spacing/2.0), nuclear_spacing/2.0)) / (nuclear_spacing/2.0 * pow(stddev, nuclear_spacing/2.0)))
// + exp((-pow((Array3[i] + nuclear_spacing/2.0), nuclear_spacing/2.0)) / (nuclear_spacing/2.0 * pow(stddev, nuclear_spacing/2.0)));
//
// if (Array2[i] > temp1 && Array4[i] > temp2)
// {
// Array1[i] = -99;
// Array3[i] = -99;
// }
// return;
//}
//
////线性传参
//__global__ void LinearTransmissionD(nuclei* Array, double* DTempArr1, double* DTempArr3, const long Size, int& i, int& j)
//{
// int p, q;
// cudaMalloc((void **)(&p), 4);
// cudaMalloc((void **)(&q), 4);
// cudaMemcpy(&p, &i, 4, cudaMemcpyHostToDevice);
// cudaMemcpy(&p, &i, 4, cudaMemcpyHostToDevice);
// while (i < Size && (i + j) < 2 * Size)
// {
// if (DTempArr1[i + j] == -99)
// {
// j++;
// }
// else {
// Array[i].first.x = DTempArr1[i + j] * sin(rotation*PI);
// Array[i].first.y = 0;
// Array[i].first.z = DTempArr1[i + j] * cos(rotation*PI);
// Array[i].second.x = DTempArr3[i + j] * sin(rotation*PI);
// Array[i].second.y = 0;
// Array[i].second.z = DTempArr3[i + j] * cos(rotation*PI);
// i++;
// }
// }
// cudaMemcpy(&i, &p, 4, cudaMemcpyDeviceToHost);
// cudaMemcpy(&j, &q, 4, cudaMemcpyDeviceToHost);
// return;
//}
//
////用于双核粒子的随机数化
////参数: Array:粒子数组 Size:数组长度 Angle:偏移角
//void NucleiRandomD(nuclei* Array, const long Size)
//{
// int i(0);
// int j(0);
// size_t DoubleSize = 2 * Size * sizeof(double);
// double *DTempArr1, *DTempArr2, *DTempArr3, *DTempArr4;
// cudaMalloc((void**)&DTempArr1, DoubleSize);
// cudaMalloc((void**)&DTempArr2, DoubleSize);
// cudaMalloc((void**)&DTempArr3, DoubleSize);
// cudaMalloc((void**)&DTempArr4, DoubleSize);
//
// while (i < Size)
// {
// UniformRandomArrayD(DTempArr1, 2 * Size);
// UniformRandomArrayD(DTempArr2, 2 * Size);
// UniformRandomArrayD(DTempArr3, 2 * Size);
// UniformRandomArrayD(DTempArr4, 2 * Size);
//
// int threadsPerBlock = 256;
// int threadsPerGrid = (2 * Size + threadsPerBlock - 1) / threadsPerBlock;
// DoubleNormalRandomArrayD <<<threadsPerGrid, threadsPerBlock >>> (DTempArr1, DTempArr2, DTempArr3, DTempArr4, 2 * Size);
// LinearTransmissionD <<<1,1>>>(Array, DTempArr1, DTempArr3, Size, i, j);
// }
//}
//生成双精度双正态分布随机数double3
__global__ void DoubleNormalRandomArrayD(nuclei* Array, const long Size)
{
double A1, A2, A3, A4;
double Ekall = -1;
double temp1 = 1;
double temp2 = 1;
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < Size)
{
curandState s;
int seed = -i;
curand_init(seed, 0, 0, &s);
while (Ekall < 0)
{
A2 = A4 = 2;
while (A2 > temp1 && A4 > temp2)
{
A1 = curand_uniform_double(&s);
A2 = curand_uniform_double(&s);
A3 = curand_uniform_double(&s);
A4 = curand_uniform_double(&s);
A1 = (A1 - 0.5) * 20;
A3 = (A3 - 0.5) * 20;
temp1 = exp((-pow((A1 - mean), 2)) / (mean * stddev * stddev))
+ exp((-pow((A1 + mean), 2)) / (mean * stddev * stddev));
temp2 = exp((-pow((A3 - mean), 2)) / (mean * stddev * stddev))
+ exp((-pow((A3 + mean), 2)) / (mean * stddev * stddev));
}
//printf("%lf\t%lf\n", A1,A3);
Array[i].first.x = A1 * sin(rotation*PI);
Array[i].first.y = 0;
Array[i].first.z = A1 * cos(rotation*PI);
Array[i].second.x = A3 * sin(rotation*PI);
Array[i].second.y = 0;
Array[i].second.z = A3 * cos(rotation*PI);
Ekall = E_kall(Array[i].first, Array[i].second);
//printf("%lf\n", Ekall);
}
px_py_pz_distribution(Array[i].first, Array[i].second, Ekall, i);
}
return;
}
__global__ void first_step_on_gpu(nuclei* first_arr, const long size)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
//printf("%p\n", &first_arr);
if(idx<size)
{
//printf("%d\n", idx);
for (int i = 0; i < one_steps; i++)
update_step_one(first_arr[idx].first, first_arr[idx].second);
}
}
__global__ void pre_second_step_aw(double* AW)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < 2 * two_steps)
{
double field_strength = sqrt((2.8e15) / (3.51e16)); // 场强,对应之前ee0
double t0 = 2 * PI / omega;
double t1 = 0.5 * DX * idx;
AW[idx] = (field_strength / omega) * (pow(sin((PI * t1) / (10 * t0)), 2)) * cos(omega * t1);
}
}
__global__ void pre_second_step_ds(double* AW,double* DS)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < 2*two_steps)
{
if (idx == 0)
DS[idx] = (AW[1] - AW[0]) / (0.5*DX);
if (idx == (2 * two_steps - 1))
DS[idx] = (AW[idx] - AW[idx - 1]) / (0.5*DX);
else
{
DS[idx] = (AW[idx + 1] - AW[idx - 1]) / 2.0 /(0.5* DX);
}
}
}
__global__ void second_step_on_gpu(nuclei* second_arr, nuclei* second_arr_fliter , const long size,double* DS,unsigned long long* ee1_ee2_count)
{
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
double e_laser_t1=0.0, e_laser_t2=0.0, e_laser_t3=0.0, e_laser_t4=0.0;
int idx_of_ds=-1; // 相当于nn
double t1=0.0, t2=0.0, t3=0.0, t4=0.0;
double now_t=0.0; //当前时间,相当于t(1)
if (idx<size)
{
for (int i = 0; i < two_steps; i++)
{
//第一个激光场强度
t1 = now_t;
if (t1 == 0)
e_laser_t1 = 0.0;
else
{
idx_of_ds = (2.0 * t1) / DX - 1;
e_laser_t1 = DS[idx_of_ds];
}
//第二个激光场强度
t2 = now_t + DX / 2.0;
idx_of_ds = 2.0 * t2 / DX- 1;
e_laser_t2 = DS[idx_of_ds];
//第三个激光场强度
t3 = now_t + DX / 2.0;
idx_of_ds = 2 * t3 / DX- 1;
e_laser_t3 = DS[idx_of_ds];
//第四个激光场强度
t4 = now_t + DX;
idx_of_ds = 2.0 * t4 / DX - 1;
e_laser_t4 = DS[idx_of_ds];
update_step_two(second_arr[idx].first, second_arr[idx].second,
e_laser_t1,e_laser_t2,e_laser_t3,e_laser_t4);
now_t = now_t + DX;
/*if(idx_of_ds == -1 )
update_step_two(second_arr[idx].first, second_arr[idx].second,
0.0,DS[0],DS[0],DS[1]);
else
{
update_step_two(second_arr[idx].first, second_arr[idx].second,
DS[idx_of_ds], DS[idx_of_ds + 1], DS[idx_of_ds + 1], DS[idx_of_ds + 2]);
}
idx_of_ds += 2;*/
}
double ee1 = CalculationE1(second_arr[idx].first, second_arr[idx].second);
double ee2 = CalculationE2(second_arr[idx].first, second_arr[idx].second);
if (ee1>0 && ee2>0)
{
unsigned long long temp_idx = atomicAdd(ee1_ee2_count, 1);
nuclei temp;
temp.first = second_arr[idx].first;
temp.second = second_arr[idx].second;
second_arr_fliter[temp_idx-1] = temp;
}
}
}
//用于双核粒子的随机数化
void NucleiRandomD(nuclei* Array, const long Size)
{
int dimx = 512;
dim3 block(dimx);
dim3 grid((Size + block.x - 1) / block.x, 1);
DoubleNormalRandomArrayD <<< grid, block >>> (Array, Size);
CHECK(cudaDeviceSynchronize());
}
void NucleiFisrtStep(nuclei* first_array, const long size)
{
int dimx = 32;
dim3 block(dimx);
dim3 grid((size + block.x - 1) / block.x, 1);
first_step_on_gpu <<< grid, block >>> (first_array, size);
cudaError_t cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "1st Kernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
return;
}
CHECK(cudaDeviceSynchronize());
}
void NucleiSecondStep(nuclei* second_array, nuclei* second_array_fliter, const long size, double* aw, double* ds, unsigned long long* count)
{
//准备矢量势
int pre_dimx = 512;
dim3 pre_block(pre_dimx);
dim3 pre_grid((2 * two_steps_in_host + pre_block.x - 1) / pre_block.x, 1);
pre_second_step_aw <<< pre_grid,pre_block>>> (aw);
CHECK(cudaDeviceSynchronize());
pre_second_step_ds <<< pre_grid, pre_block >>> (aw, ds);
CHECK(cudaDeviceSynchronize());
cudaError_t cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "2nd Kernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
return;
}
//计算第二步
int dimx = 32;
dim3 block(dimx);
dim3 grid((size + block.x - 1) / block.x, 1);
second_step_on_gpu <<< grid, block >>> (second_array, second_array_fliter,size, ds,count);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "2nd Kernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
return;
}
CHECK(cudaDeviceSynchronize());
}
void compute_on_gpu_one(const long pairs,const char* file_name)
{
long long nBytes = pairs * sizeof(nuclei);
printf("Use %lld Bytes %lfMB\n", nBytes, nBytes / double(1024 * 1024));
nuclei *gpu_init,*gpu_first,*gpu_second,*gpu_second_fliter;
nuclei *host_init,*host_first,*host_second,*host_second_fliter;
host_init = (nuclei*)malloc(nBytes);
host_first = (nuclei*)malloc(nBytes);
host_second = (nuclei*)malloc(nBytes);
host_second_fliter = (nuclei*)malloc(nBytes);
//初始化!
//申请init空间
double start = seconds();
CHECK(cudaMalloc((void **)(&gpu_init), nBytes));
//计算
NucleiRandomD(gpu_init, pairs);
//把值赋给第一步(也申请了第一步的空间)
CHECK(cudaMalloc((void **)(&gpu_first), nBytes));
CHECK(cudaMemcpy(gpu_first, gpu_init, nBytes, cudaMemcpyDeviceToDevice));
//拷回并保存
CHECK(cudaMemcpy(host_init, gpu_init, nBytes, cudaMemcpyDeviceToHost));
PrintStruct(host_init, pairs, file_name, 0);
//释放init空间
CHECK(cudaFree(gpu_init));
double elapse = seconds();
printf("Inition compltete %lf\n", elapse - start);
//初始化完成!
//第一步计算
//first空间在之前申请过了
start = seconds();
//计算
NucleiFisrtStep(gpu_first, pairs);
//把值赋给第二步(也申请了第二步的空间)
CHECK(cudaMalloc((void **)(&gpu_second), nBytes));
CHECK(cudaMemcpy(gpu_second, gpu_first, nBytes, cudaMemcpyDeviceToDevice));
//拷回并保存
CHECK(cudaMemcpy(host_first, gpu_first, nBytes, cudaMemcpyDeviceToHost));
PrintStruct(host_first, pairs, file_name, 1);
//释放first空间
CHECK(cudaFree(gpu_first));
elapse = seconds();
printf("FirstStep compltete %lf\n", elapse - start);
//第一步完成!
//第二步计算
start = seconds();
//准备导数
double *gpu_aw, *gpu_ds;
double *host_aw, *host_ds;
long bytes_of_aw_ds = sizeof(double) * 2 * two_steps_in_host;
CHECK(cudaMalloc((void **)(&gpu_aw), bytes_of_aw_ds));
CHECK(cudaMalloc((void **)(&gpu_ds), bytes_of_aw_ds));
host_aw = (double*)malloc(bytes_of_aw_ds);
host_ds = (double*)malloc(bytes_of_aw_ds);
//电离率计数
unsigned long long*gpu_count,*host_count;
int bytes_of_u_long = sizeof(unsigned long long);
host_count = (unsigned long long*)malloc(bytes_of_u_long);
CHECK(cudaMalloc((void **)(&gpu_count), bytes_of_u_long));
CHECK(cudaMalloc((void **)(&gpu_second_fliter), nBytes));
//计算
NucleiSecondStep(gpu_second, gpu_second_fliter,pairs, gpu_aw, gpu_ds,gpu_count);
//拷回并保存
CHECK(cudaMemcpy(host_second, gpu_second, nBytes, cudaMemcpyDeviceToHost));
CHECK(cudaMemcpy(host_second_fliter, gpu_second_fliter, nBytes, cudaMemcpyDeviceToHost));
CHECK(cudaMemcpy(host_aw, gpu_aw, bytes_of_aw_ds, cudaMemcpyDeviceToHost));
CHECK(cudaMemcpy(host_ds, gpu_ds, bytes_of_aw_ds, cudaMemcpyDeviceToHost));
CHECK(cudaMemcpy(host_count, gpu_count, bytes_of_u_long, cudaMemcpyDeviceToHost));
printf("%ld\n", *host_count);
PrintStruct(host_second, pairs,file_name , 2);
PrintArray(host_aw, 2 * two_steps_in_host, file_name, 0);
PrintArray(host_ds, 2 * two_steps_in_host, file_name, 1);
PrintStruct(host_second_fliter, *host_count, file_name, 3);
//释放second空间
CHECK(cudaFree(gpu_second));
CHECK(cudaFree(gpu_aw));
CHECK(cudaFree(gpu_ds));
elapse = seconds();
printf("SecondStep compltete %lf\n", elapse - start);
// 第二步完成!
//释放主机内存空间
//free(host_aw);
//free(host_ds);
//free(host_first);
//free(host_init);
//free(host_init);
return;
} |
baf6c3b65e107a7649f5720e5f13bd225d1d8abc.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 2
* of the programming guide with some additions like error checking.
*/
#include <stdio.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
__global__ void
vectorAdd(const float *A, const float *B, float *C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}
}
/**
* Host main routine
*/
int
main(void)
{
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
// Print the vector length to be used, and compute its size
int numElements = 5000000;
size_t size = numElements * sizeof(float);
printf("[Vector addition of %d elements]\n", numElements);
// Allocate the host input vector A
float *h_A = (float *)malloc(size);
// Allocate the host input vector B
float *h_B = (float *)malloc(size);
// Allocate the host output vector C
float *h_C = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
h_B[i] = rand()/(float)RAND_MAX;
}
// Allocate the device input vector A
float *d_A = NULL;
err = hipMalloc((void **)&d_A, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
float *d_B = NULL;
err = hipMalloc((void **)&d_B, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector C
float *d_C = NULL;
err = hipMalloc((void **)&d_C, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
hipLaunchKernelGGL(( vectorAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, numElements);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vector is correct
for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("Test PASSED\n");
// Free device global memory
err = hipFree(d_A);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_B);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_C);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
free(h_A);
free(h_B);
free(h_C);
printf("Done\n");
return 0;
}
/*
nvprof "C:\ProgramData\NVIDIA Corporation\CUDA Samples\v10.2\bin\win64\Debug\vectorAdd.exe"
[Vector addition of 5000000 elements]
==13708== NVPROF is profiling process 13708, command: C:\ProgramData\NVIDIA Corporation\CUDA Samples\v10.2\bin\win64\Debug\vectorAdd.exe
Copy input data from the host memory to the CUDA device
CUDA kernel launch with 19532 blocks of 256 threads
Copy output data from the CUDA device to the host memory
Test PASSED
Done
==13708== Profiling application: C:\ProgramData\NVIDIA Corporation\CUDA Samples\v10.2\bin\win64\Debug\vectorAdd.exe
==13708== Profiling result:
Type Time(%) Time Calls Avg Min Max Name
GPU activities: 38.06% 13.006ms 1 13.006ms 13.006ms 13.006ms vectorAdd(float const *, float const *, float*, int)
36.68% 12.534ms 2 6.2670ms 6.0185ms 6.5155ms [CUDA memcpy HtoD]
25.26% 8.6321ms 1 8.6321ms 8.6321ms 8.6321ms [CUDA memcpy DtoH]
API calls: 63.27% 190.04ms 3 63.348ms 2.0236ms 185.95ms hipMalloc
23.90% 71.783ms 1 71.783ms 71.783ms 71.783ms hipDevicePrimaryCtxRelease
11.78% 35.381ms 3 11.794ms 6.2484ms 22.681ms hipMemcpy
0.54% 1.6290ms 1 1.6290ms 1.6290ms 1.6290ms hipModuleUnload
0.32% 951.40us 3 317.13us 227.70us 407.30us hipFree
0.14% 421.60us 97 4.3460us 100ns 140.80us hipDeviceGetAttribute
0.03% 81.900us 1 81.900us 81.900us 81.900us cudaLaunchKernel
0.01% 38.700us 1 38.700us 38.700us 38.700us cuDeviceTotalMem
0.00% 12.600us 1 12.600us 12.600us 12.600us hipDeviceGetPCIBusId
0.00% 2.8000us 1 2.8000us 2.8000us 2.8000us hipGetLastError
0.00% 2.3000us 3 766ns 400ns 1.4000us hipGetDeviceCount
0.00% 2.1000us 2 1.0500us 200ns 1.9000us hipDeviceGet
0.00% 1.0000us 1 1.0000us 1.0000us 1.0000us hipDeviceGetName
0.00% 400ns 1 400ns 400ns 400ns cuDeviceGetLuid
0.00% 300ns 1 300ns 300ns 300ns hipDeviceGetUuid
*/ | baf6c3b65e107a7649f5720e5f13bd225d1d8abc.cu | /**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 2
* of the programming guide with some additions like error checking.
*/
#include <stdio.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
#include <helper_cuda.h>
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
__global__ void
vectorAdd(const float *A, const float *B, float *C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}
}
/**
* Host main routine
*/
int
main(void)
{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
// Print the vector length to be used, and compute its size
int numElements = 5000000;
size_t size = numElements * sizeof(float);
printf("[Vector addition of %d elements]\n", numElements);
// Allocate the host input vector A
float *h_A = (float *)malloc(size);
// Allocate the host input vector B
float *h_B = (float *)malloc(size);
// Allocate the host output vector C
float *h_C = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
h_B[i] = rand()/(float)RAND_MAX;
}
// Allocate the device input vector A
float *d_A = NULL;
err = cudaMalloc((void **)&d_A, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
float *d_B = NULL;
err = cudaMalloc((void **)&d_B, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector C
float *d_C = NULL;
err = cudaMalloc((void **)&d_C, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, numElements);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vector is correct
for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("Test PASSED\n");
// Free device global memory
err = cudaFree(d_A);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_B);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_C);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
free(h_A);
free(h_B);
free(h_C);
printf("Done\n");
return 0;
}
/*
¦Ë nvprof "C:\ProgramData\NVIDIA Corporation\CUDA Samples\v10.2\bin\win64\Debug\vectorAdd.exe"
[Vector addition of 5000000 elements]
==13708== NVPROF is profiling process 13708, command: C:\ProgramData\NVIDIA Corporation\CUDA Samples\v10.2\bin\win64\Debug\vectorAdd.exe
Copy input data from the host memory to the CUDA device
CUDA kernel launch with 19532 blocks of 256 threads
Copy output data from the CUDA device to the host memory
Test PASSED
Done
==13708== Profiling application: C:\ProgramData\NVIDIA Corporation\CUDA Samples\v10.2\bin\win64\Debug\vectorAdd.exe
==13708== Profiling result:
Type Time(%) Time Calls Avg Min Max Name
GPU activities: 38.06% 13.006ms 1 13.006ms 13.006ms 13.006ms vectorAdd(float const *, float const *, float*, int)
36.68% 12.534ms 2 6.2670ms 6.0185ms 6.5155ms [CUDA memcpy HtoD]
25.26% 8.6321ms 1 8.6321ms 8.6321ms 8.6321ms [CUDA memcpy DtoH]
API calls: 63.27% 190.04ms 3 63.348ms 2.0236ms 185.95ms cudaMalloc
23.90% 71.783ms 1 71.783ms 71.783ms 71.783ms cuDevicePrimaryCtxRelease
11.78% 35.381ms 3 11.794ms 6.2484ms 22.681ms cudaMemcpy
0.54% 1.6290ms 1 1.6290ms 1.6290ms 1.6290ms cuModuleUnload
0.32% 951.40us 3 317.13us 227.70us 407.30us cudaFree
0.14% 421.60us 97 4.3460us 100ns 140.80us cuDeviceGetAttribute
0.03% 81.900us 1 81.900us 81.900us 81.900us cudaLaunchKernel
0.01% 38.700us 1 38.700us 38.700us 38.700us cuDeviceTotalMem
0.00% 12.600us 1 12.600us 12.600us 12.600us cuDeviceGetPCIBusId
0.00% 2.8000us 1 2.8000us 2.8000us 2.8000us cudaGetLastError
0.00% 2.3000us 3 766ns 400ns 1.4000us cuDeviceGetCount
0.00% 2.1000us 2 1.0500us 200ns 1.9000us cuDeviceGet
0.00% 1.0000us 1 1.0000us 1.0000us 1.0000us cuDeviceGetName
0.00% 400ns 1 400ns 400ns 400ns cuDeviceGetLuid
0.00% 300ns 1 300ns 300ns 300ns cuDeviceGetUuid
*/ |
58bc8c6a694e2d596b5d77cac453f7c39d721ebf.hip | // !!! This is a file automatically generated by hipify!!!
/*
* gpu_tol_4learn is a program that will perform localization tolerance
* to molecules in parallel
*
* This function will be called by matlab as
* [N, Nc, O, Oc, Sx, Sxc, Sy, Syc, X, Xc, Y, Yc, llv, Fn, lp] = gpu_tol(N, Nc, O, Oc, Sx, Sxc, Sy, Syc, X, Xc, Y, Yc, llv, Fn, lp, Nl, Nh, Ncl, Nch, Ol, Oh, Xcl, Xch, Ycl, Ych, Sl, Sh, Scl, Sch, fNh, fOh, fSh, ll, lh)
*
*/
#include <mex.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <hip/hip_runtime.h>
#include <helper_functions.h>
#include <helper_cuda.h>
// gpu function to build fractional error variables
void __global__ fractionate(double *out, double *x, double *xc, int m) {
int index = blockDim.x*blockIdx.x + threadIdx.x;
if (index < m) {
if (xc[index] >= 0) {
out[index] = sqrt(xc[index]) / x[index];
}
else {
out[index] = 100.0;
}
}
}
void __global__ tolerate(double *d_Ni,
double *d_No,
double *d_Nci,
double *d_Nco,
double *d_Oi,
double *d_Oo,
double *d_Oci,
double *d_Oco,
double *d_Sxi,
double *d_Sxo,
double *d_Sxci,
double *d_Sxco,
double *d_Syi,
double *d_Syo,
double *d_Syci,
double *d_Syco,
double *d_Xi,
double *d_Xo,
double *d_Xci,
double *d_Xco,
double *d_Yi,
double *d_Yo,
double *d_Yci,
double *d_Yco,
double *d_llvi,
double *d_llvo,
double *d_fni,
double *d_fno,
double *d_lpi,
double *d_lpo,
double *d_frN,
double *d_frO,
double *d_frSx,
double *d_frSy,
double Nl,
double Nh,
double Ncl,
double Nch,
double Ol,
double Oh,
double Ocl,
double Och,
double Xcl,
double Xch,
double Ycl,
double Ych,
double Sl,
double Sh,
double Scl,
double Sch,
double fNh,
double fOh,
double fSh,
double ll,
double lh,
int m,
double *d_xc;
double *d_yc){
int index = blockDim.x*blockIdx.x + threadIdx.x;
if(index < m)
{ // verify you are working on a molecule
if(d_Ni[index] >= Nl && d_Ni[index] <= Nh && d_Nci[index] >= Ncl && d_Nci[index] <= Nch && d_Oi[index] >= Ol && d_Oi[index] <= Oh && d_Oci[index] >= Ocl && d_Oci[index] <= Och && d_Xci[index] >= Xcl && d_Xci[index] <= Xch && d_Yci[index] >= Ycl && d_Yci[index] <= Ych && d_Sxi[index] >= Sl && d_Sxi[index] <= Sh && d_Syi[index] >= Sl && d_Syi[index] <= Sh && d_Sxci[index] >= Scl && d_Sxci[index] <= Sch && d_Syci[index] >= Scl && d_Syci[index] <= Sch && d_frN[index] <= fNh && d_frO[index] <= fOh && d_frSx[index] <= fSh && d_frSy[index] <= fSh && d_llvi[index] >= ll && d_llvi[index] <= lh)
{ d_No[index] = d_Ni[index];
d_Nco[index] = d_Nci[index];
d_Oo[index] = d_Oi[index];
d_Oco[index] = d_Oci[index];
d_Sxo[index] = d_Sxi[index];
d_Syo[index] = d_Syi[index];
d_Sxco[index] = d_Sxci[index];
d_Syco[index] = d_Syci[index];
d_Xo[index] = d_Xi[index];
d_Xco[index] = d_Xci[index];
d_Yo[index] = d_Yi[index];
d_Yco[index] = d_Yci[index];
d_llvo[index] = d_llvi[index];
d_fno[index] = d_fni[index];
d_lpo[index] = d_lpi[index];
} // end tolerance if
else {
d_No[index] = -1;
d_Nco[index] = -1;
d_Oo[index] = -1;
d_Oco[index] = -1;
d_Sxo[index] = -1;
d_Syo[index] = -1;
d_Sxco[index] = -1;
d_Syco[index] = -1;
d_Xo[index] = -1;
d_Xco[index] = -1;
d_Yo[index] = -1;
d_Yco[index] = -1;
d_llvo[index] = -1;
d_fno[index] = -1;
d_lpo[index] = -1;
}// end else
}// end working on a molecule
}//end global
// main
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[])
{
/*
*
* Variable Declaration and Setup
*
*
*/
// array doubles follow convention i is for input o is for output c is for crlb
double *Ni, *No, *Nci, *Nco, *Oi, *Oo, *Oci, *Oco, *Xi, *Xo, *Xci, *Xco;
double *Yi, *Yo, *Yci, *Yco, *Sxi, *Sxo, *Sxci, *Sxco, *Syi, *Syo, *Syci, *Syco;
double *llvi, *llvo, *lpi, *lpo, *fni, *fno, *xc, *yc;
// GPU Variables
double *d_Ni, *d_No, *d_Nci, *d_Nco, *d_Oi, *d_Oo, *d_Oci, *d_Oco, *d_Xi, *d_Xo, *d_Xci, *d_Xco;
double *d_Yi, *d_Yo, *d_Yci, *d_Yco, *d_Sxi, *d_Sxo, *d_Sxci, *d_Sxco, *d_Syi, *d_Syo, *d_Syci, *d_Syco;
double *d_llvi, *d_llvo, *d_lpi, *d_lpo, *d_fni, *d_fno, *d_frN, *d_frO, *d_frSx, *d_frSy, *d_xc, *d_yc;
// single entry doubles
double Nl, Nh, Ncl, Nch, Oh, Ol, Och, Ocl, Xch, Xcl, Ych, Ycl;
double Sh, Sl, Sch, Scl, fNh, fOh, fSh, ll, lh;
// Error Message Array
// Vector Size Array
if (nrhs != 38) {
mexErrMsgTxt("You need 36 input variables");
}
if (nlhs != 15) {
mexErrMsgTxt("You need 15 output variables");
}
if (mxGetM(prhs[0]) != mxGetM(prhs[1]) || mxGetM(prhs[0]) != mxGetM(prhs[2]) || mxGetM(prhs[0]) != mxGetM(prhs[3])) {
mexErrMsgTxt("Your input vectors must be the same size!\n");
}
if (mxGetM(prhs[0]) != mxGetM(prhs[4]) || mxGetM(prhs[0]) != mxGetM(prhs[5]) || mxGetM(prhs[0]) != mxGetM(prhs[6])) {
mexErrMsgTxt("Your input vectors must be the same size!\n");
}
if (mxGetM(prhs[0]) != mxGetM(prhs[7]) || mxGetM(prhs[0]) != mxGetM(prhs[8]) || mxGetM(prhs[0]) != mxGetM(prhs[9])) {
mexErrMsgTxt("Your input vectors must be the same size!\n");
}
if (mxGetM(prhs[0]) != mxGetM(prhs[10]) || mxGetM(prhs[0]) != mxGetM(prhs[11]) || mxGetM(prhs[0]) != mxGetM(prhs[12])) {
mexErrMsgTxt("Your input vectors must be the same size!\n");
}
if (mxGetM(prhs[0]) != mxGetM(prhs[13]) || mxGetM(prhs[0]) != mxGetM(prhs[14])) {
mexErrMsgTxt("Your input vectors must be the same size!\n");
}
if (mxGetM(prhs[0]) != mxGetM(prhs[37]) || mxGetM(prhs[0]) != mxGetM(prhs[36])) {
mexErrMsgTxt("Your input vectors must be the same size!\n");
}
// Check that variables are doubles
if (!mxIsDouble(prhs[0]) || mxIsComplex(prhs[0])) {
mexErrMsgTxt("Your input vectors must contain doubles\n");
}
if (!mxIsDouble(prhs[1]) || mxIsComplex(prhs[1])) {
mexErrMsgTxt("Your input vectors must contain doubles\n");
}
if (!mxIsDouble(prhs[2]) || mxIsComplex(prhs[2])) {
mexErrMsgTxt("Your input vectors must contain doubles\n");
}
if (!mxIsDouble(prhs[3]) || mxIsComplex(prhs[3])) {
mexErrMsgTxt("Your input vectors must contain doubles\n");
}
if (!mxIsDouble(prhs[4]) || mxIsComplex(prhs[4])) {
mexErrMsgTxt("Your input vectors must contain doubles\n");
}
if (!mxIsDouble(prhs[5]) || mxIsComplex(prhs[5])) {
mexErrMsgTxt("Your input vectors must contain doubles\n");
}
if (!mxIsDouble(prhs[6]) || mxIsComplex(prhs[6])) {
mexErrMsgTxt("Your input vectors must contain doubles\n");
}
if (!mxIsDouble(prhs[7]) || mxIsComplex(prhs[7])) {
mexErrMsgTxt("Your input vectors must contain doubles\n");
}
if (!mxIsDouble(prhs[8]) || mxIsComplex(prhs[8])) {
mexErrMsgTxt("Your input vectors must contain doubles\n");
}
if (!mxIsDouble(prhs[9]) || mxIsComplex(prhs[9])) {
mexErrMsgTxt("Your input vectors must contain doubles\n");
}
if (!mxIsDouble(prhs[10]) || mxIsComplex(prhs[10])) {
mexErrMsgTxt("Your input vectors must contain doubles\n");
}
if (!mxIsDouble(prhs[11]) || mxIsComplex(prhs[11])) {
mexErrMsgTxt("Your input vectors must contain doubles\n");
}
if (!mxIsDouble(prhs[12]) || mxIsComplex(prhs[12])) {
mexErrMsgTxt("Your input vectors must contain doubles\n");
}
if (!mxIsDouble(prhs[13]) || mxIsComplex(prhs[13])) {
mexErrMsgTxt("Your input vectors must contain doubles\n");
}
if (!mxIsDouble(prhs[14]) || mxIsComplex(prhs[14])) {
mexErrMsgTxt("Your input vectors must contain real doubles\n");
}
if (!mxIsDouble(prhs[36]) || mxIsComplex(prhs[36])) {
mexErrMsgTxt("Your input vectors must contain real doubles\n");
}
if (!mxIsDouble(prhs[37]) || mxIsComplex(prhs[37])) {
mexErrMsgTxt("Your input vectors must contain real doubles\n");
}
// Grab dimension size of data
const size_t *dims;
dims = mxGetDimensions(prhs[0]);
int m = (int)dims[0];
int n = (int)dims[1];
const int mem_size = m*n*sizeof(double);
// Get position of Data
Ni = (double *)mxGetPr(prhs[0]);
Nci = (double *)mxGetPr(prhs[1]);
Oi = (double *)mxGetPr(prhs[2]);
Oci = (double *)mxGetPr(prhs[3]);
Sxi = (double *)mxGetPr(prhs[4]);
Sxci = (double *)mxGetPr(prhs[5]);
Syi = (double *)mxGetPr(prhs[6]);
Syci = (double *)mxGetPr(prhs[7]);
Xi = (double *)mxGetPr(prhs[8]);
Xci = (double *)mxGetPr(prhs[9]);
Yi = (double *)mxGetPr(prhs[10]);
Yci = (double *)mxGetPr(prhs[11]);
llvi = (double *)mxGetPr(prhs[12]);
fni = (double *)mxGetPr(prhs[13]);
lpi = (double *)mxGetPr(prhs[14]);
xc = (double *)mxGetPr(prhs[36]);
yc = (double *)mxGetPr(prhs[37]);
// Get Tolerance Limits
Nl = mxGetScalar(prhs[15]);
Nh = mxGetScalar(prhs[16]);
Ncl = mxGetScalar(prhs[17]);
Nch = mxGetScalar(prhs[18]);
Ol = mxGetScalar(prhs[19]);
Oh = mxGetScalar(prhs[20]);
Ocl = mxGetScalar(prhs[21]);
Och = mxGetScalar(prhs[22]);
Xcl = mxGetScalar(prhs[23]);
Xch = mxGetScalar(prhs[24]);
Ycl = mxGetScalar(prhs[25]);
Ych = mxGetScalar(prhs[26]);
Sl = mxGetScalar(prhs[27]);
Sh = mxGetScalar(prhs[28]);
Scl = mxGetScalar(prhs[29]);
Sch = mxGetScalar(prhs[30]);
fNh = mxGetScalar(prhs[31]);
fOh = mxGetScalar(prhs[32]);
fSh = mxGetScalar(prhs[33]);
ll = mxGetScalar(prhs[34]);
lh = mxGetScalar(prhs[35]);
// Fairly Certain at this point all data is accessible through the program
/*
*
*
* GPU MEMORY ALLOCATION and Copying
* With a million molecule data set we're looking at 240 MB of data while the GeForce 1060
* Has ~59212 MB free or we are using ~4% of the total memory, because I've never seen a data
* set that big I am assuming our memory is going to be just fine
*
*/
// hipMalloc Array
checkCudaErrors(hipMalloc((void**)&d_Ni, mem_size));
checkCudaErrors(hipMalloc((void**)&d_No, mem_size));
checkCudaErrors(hipMalloc((void**)&d_Nci, mem_size));
checkCudaErrors(hipMalloc((void**)&d_Nco, mem_size));
checkCudaErrors(hipMalloc((void**)&d_Oi, mem_size));
checkCudaErrors(hipMalloc((void**)&d_Oo, mem_size));
checkCudaErrors(hipMalloc((void**)&d_Oci, mem_size));
checkCudaErrors(hipMalloc((void**)&d_Oco, mem_size));
checkCudaErrors(hipMalloc((void**)&d_Sxi, mem_size));
checkCudaErrors(hipMalloc((void**)&d_Sxo, mem_size));
checkCudaErrors(hipMalloc((void**)&d_Sxci, mem_size));
checkCudaErrors(hipMalloc((void**)&d_Sxco, mem_size));
checkCudaErrors(hipMalloc((void**)&d_Syi, mem_size));
checkCudaErrors(hipMalloc((void**)&d_Syo, mem_size));
checkCudaErrors(hipMalloc((void**)&d_Syci, mem_size));
checkCudaErrors(hipMalloc((void**)&d_Syco, mem_size));
checkCudaErrors(hipMalloc((void**)&d_Xi, mem_size));
checkCudaErrors(hipMalloc((void**)&d_Xo, mem_size));
checkCudaErrors(hipMalloc((void**)&d_Xci, mem_size));
checkCudaErrors(hipMalloc((void**)&d_Xco, mem_size));
checkCudaErrors(hipMalloc((void**)&d_Yi, mem_size));
checkCudaErrors(hipMalloc((void**)&d_Yo, mem_size));
checkCudaErrors(hipMalloc((void**)&d_Yci, mem_size));
checkCudaErrors(hipMalloc((void**)&d_Yco, mem_size));
checkCudaErrors(hipMalloc((void**)&d_llvi, mem_size));
checkCudaErrors(hipMalloc((void**)&d_llvo, mem_size));
checkCudaErrors(hipMalloc((void**)&d_fni, mem_size));
checkCudaErrors(hipMalloc((void**)&d_fno, mem_size));
checkCudaErrors(hipMalloc((void**)&d_lpi, mem_size));
checkCudaErrors(hipMalloc((void**)&d_lpo, mem_size));
checkCudaErrors(hipMalloc((void**)&d_frN, mem_size));
checkCudaErrors(hipMalloc((void**)&d_frO, mem_size));
checkCudaErrors(hipMalloc((void**)&d_frSx, mem_size));
checkCudaErrors(hipMalloc((void**)&d_frSy, mem_size));
checkCudaErrors(hipMalloc((void**)&d_xc, mem_size));
checkCudaErrors(hipMalloc((void**)&d_yc, mem_size));
// Data Copy Array
checkCudaErrors(hipMemcpy(d_Ni, Ni, mem_size, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_Nci, Nci, mem_size, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_Oi, Oi, mem_size, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_Oci, Oci, mem_size, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_Sxi, Sxi, mem_size, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_Sxci, Sxci, mem_size, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_Syi, Syi, mem_size, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_Syci, Syci, mem_size, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_Xi, Xi, mem_size, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_Xci, Xci, mem_size, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_Yi, Yi, mem_size, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_Yci, Yci, mem_size, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_llvi, llvi, mem_size, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_fni, fni, mem_size, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_lpi, lpi, mem_size, hipMemcpyHostToDevice));
// Get Fractional error Vectors and tolerance
fractionate << <((m - 1) / 1024 + 1), 1024 >> > (d_frN, d_Ni, d_Nci, m);
fractionate << <((m - 1) / 1024 + 1), 1024 >> > (d_frO, d_Oi, d_Oci, m);
fractionate << <((m - 1) / 1024 + 1), 1024 >> > (d_frSx, d_Sxi, d_Sxci, m);
fractionate << <((m - 1) / 1024 + 1), 1024 >> > (d_frSy, d_Syi, d_Syci, m);
tolerate << <((m - 1) / 1024 + 1), 1024 >> > (d_Ni, d_No, d_Nci, d_Nco, d_Oi, d_Oo, d_Oci, d_Oco, d_Sxi, d_Sxo, d_Sxci, d_Sxco, d_Syi, d_Syo, d_Syci, d_Syco, d_Xi, d_Xo, d_Xci, d_Xco, d_Yi, d_Yo, d_Yci, d_Yco, d_llvi, d_llvo, d_fni, d_fno, d_lpi, d_lpo, d_frN, d_frO, d_frSx, d_frSy, Nl, Nh, Ncl, Nch, Ol, Oh, Ocl, Och, Xcl, Xch, Ycl, Ych, Sl, Sh, Scl, Sch, fNh, fOh, fSh, ll, lh, m, d_xc, d_yc);
/*
*
*
* Copy back and free up space
*
*
*
*
*/
// Create Arrays at output pointers
plhs[0] = mxCreateDoubleMatrix(m, n, mxREAL);
plhs[1] = mxCreateDoubleMatrix(m, n, mxREAL);
plhs[2] = mxCreateDoubleMatrix(m, n, mxREAL);
plhs[3] = mxCreateDoubleMatrix(m, n, mxREAL);
plhs[4] = mxCreateDoubleMatrix(m, n, mxREAL);
plhs[5] = mxCreateDoubleMatrix(m, n, mxREAL);
plhs[6] = mxCreateDoubleMatrix(m, n, mxREAL);
plhs[7] = mxCreateDoubleMatrix(m, n, mxREAL);
plhs[8] = mxCreateDoubleMatrix(m, n, mxREAL);
plhs[9] = mxCreateDoubleMatrix(m, n, mxREAL);
plhs[10] = mxCreateDoubleMatrix(m, n, mxREAL);
plhs[11] = mxCreateDoubleMatrix(m, n, mxREAL);
plhs[12] = mxCreateDoubleMatrix(m, n, mxREAL);
plhs[13] = mxCreateDoubleMatrix(m, n, mxREAL);
plhs[14] = mxCreateDoubleMatrix(m, n, mxREAL);
No = (double *)mxGetPr(plhs[0]);
Nco = (double *)mxGetPr(plhs[1]);
Oo = (double *)mxGetPr(plhs[2]);
Oco = (double *)mxGetPr(plhs[3]);
Sxo = (double *)mxGetPr(plhs[4]);
Sxco = (double *)mxGetPr(plhs[5]);
Syo = (double *)mxGetPr(plhs[6]);
Syco = (double *)mxGetPr(plhs[7]);
Xo = (double *)mxGetPr(plhs[8]);
Xco = (double *)mxGetPr(plhs[9]);
Yo = (double *)mxGetPr(plhs[10]);
Yco = (double *)mxGetPr(plhs[11]);
llvo = (double *)mxGetPr(plhs[12]);
fno = (double *)mxGetPr(plhs[13]);
lpo = (double *)mxGetPr(plhs[14]);
// copy data array
checkCudaErrors(hipMemcpy(No, d_No, mem_size, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(Nco, d_Nco, mem_size, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(Oo, d_Oo, mem_size, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(Oco, d_Oco, mem_size, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(Sxo, d_Sxo, mem_size, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(Sxco, d_Sxco, mem_size, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(Syo, d_Syo, mem_size, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(Syco, d_Syco, mem_size, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(Xo, d_Xo, mem_size, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(Xco, d_Xco, mem_size, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(Yo, d_Yo, mem_size, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(Yco, d_Yco, mem_size, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(llvo, d_llvo, mem_size, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(fno, d_fno, mem_size, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(lpo, d_lpo, mem_size, hipMemcpyDeviceToHost));
/*
checkCudaErrors(hipMemcpy(y_out,d_data,mem_size, hipMemcpyDeviceToHost));
hipfftDestroy(plan);
hipFree(d_data);
// create complex double in matlab
plhs[0] = mxCreateDoubleMatrix(m, n, mxREAL);
yor = mxGetPr(plhs[0]);
yoi = mxGetPi(plhs[0]);
unpack_c2c(y_out, yor, yoi, n*m);
mxFree(data);
*/
// Release GPU memory
hipFree(d_Ni);
hipFree(d_No);
hipFree(d_Nci);
hipFree(d_Nco);
hipFree(d_Oi);
hipFree(d_Oo);
hipFree(d_Oci);
hipFree(d_Oco);
hipFree(d_Sxi);
hipFree(d_Sxo);
hipFree(d_Sxci);
hipFree(d_Sxco);
hipFree(d_Syi);
hipFree(d_Syo);
hipFree(d_Syci);
hipFree(d_Syco);
hipFree(d_Xi);
hipFree(d_Xo);
hipFree(d_Xci);
hipFree(d_Xco);
hipFree(d_Yi);
hipFree(d_Yo);
hipFree(d_Yci);
hipFree(d_Yco);
hipFree(d_llvi);
hipFree(d_llvo);
hipFree(d_fni);
hipFree(d_fno);
hipFree(d_lpi);
hipFree(d_lpo);
hipFree(d_frN);
hipFree(d_frO);
hipFree(d_frSx);
hipFree(d_frSy);
hipFree(d_xc);
hipFree(d_yc);
}
| 58bc8c6a694e2d596b5d77cac453f7c39d721ebf.cu | /*
* gpu_tol_4learn is a program that will perform localization tolerance
* to molecules in parallel
*
* This function will be called by matlab as
* [N, Nc, O, Oc, Sx, Sxc, Sy, Syc, X, Xc, Y, Yc, llv, Fn, lp] = gpu_tol(N, Nc, O, Oc, Sx, Sxc, Sy, Syc, X, Xc, Y, Yc, llv, Fn, lp, Nl, Nh, Ncl, Nch, Ol, Oh, Xcl, Xch, Ycl, Ych, Sl, Sh, Scl, Sch, fNh, fOh, fSh, ll, lh)
*
*/
#include <mex.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cuda_runtime.h>
#include <helper_functions.h>
#include <helper_cuda.h>
// gpu function to build fractional error variables
void __global__ fractionate(double *out, double *x, double *xc, int m) {
int index = blockDim.x*blockIdx.x + threadIdx.x;
if (index < m) {
if (xc[index] >= 0) {
out[index] = sqrt(xc[index]) / x[index];
}
else {
out[index] = 100.0;
}
}
}
void __global__ tolerate(double *d_Ni,
double *d_No,
double *d_Nci,
double *d_Nco,
double *d_Oi,
double *d_Oo,
double *d_Oci,
double *d_Oco,
double *d_Sxi,
double *d_Sxo,
double *d_Sxci,
double *d_Sxco,
double *d_Syi,
double *d_Syo,
double *d_Syci,
double *d_Syco,
double *d_Xi,
double *d_Xo,
double *d_Xci,
double *d_Xco,
double *d_Yi,
double *d_Yo,
double *d_Yci,
double *d_Yco,
double *d_llvi,
double *d_llvo,
double *d_fni,
double *d_fno,
double *d_lpi,
double *d_lpo,
double *d_frN,
double *d_frO,
double *d_frSx,
double *d_frSy,
double Nl,
double Nh,
double Ncl,
double Nch,
double Ol,
double Oh,
double Ocl,
double Och,
double Xcl,
double Xch,
double Ycl,
double Ych,
double Sl,
double Sh,
double Scl,
double Sch,
double fNh,
double fOh,
double fSh,
double ll,
double lh,
int m,
double *d_xc;
double *d_yc){
int index = blockDim.x*blockIdx.x + threadIdx.x;
if(index < m)
{ // verify you are working on a molecule
if(d_Ni[index] >= Nl && d_Ni[index] <= Nh && d_Nci[index] >= Ncl && d_Nci[index] <= Nch && d_Oi[index] >= Ol && d_Oi[index] <= Oh && d_Oci[index] >= Ocl && d_Oci[index] <= Och && d_Xci[index] >= Xcl && d_Xci[index] <= Xch && d_Yci[index] >= Ycl && d_Yci[index] <= Ych && d_Sxi[index] >= Sl && d_Sxi[index] <= Sh && d_Syi[index] >= Sl && d_Syi[index] <= Sh && d_Sxci[index] >= Scl && d_Sxci[index] <= Sch && d_Syci[index] >= Scl && d_Syci[index] <= Sch && d_frN[index] <= fNh && d_frO[index] <= fOh && d_frSx[index] <= fSh && d_frSy[index] <= fSh && d_llvi[index] >= ll && d_llvi[index] <= lh)
{ d_No[index] = d_Ni[index];
d_Nco[index] = d_Nci[index];
d_Oo[index] = d_Oi[index];
d_Oco[index] = d_Oci[index];
d_Sxo[index] = d_Sxi[index];
d_Syo[index] = d_Syi[index];
d_Sxco[index] = d_Sxci[index];
d_Syco[index] = d_Syci[index];
d_Xo[index] = d_Xi[index];
d_Xco[index] = d_Xci[index];
d_Yo[index] = d_Yi[index];
d_Yco[index] = d_Yci[index];
d_llvo[index] = d_llvi[index];
d_fno[index] = d_fni[index];
d_lpo[index] = d_lpi[index];
} // end tolerance if
else {
d_No[index] = -1;
d_Nco[index] = -1;
d_Oo[index] = -1;
d_Oco[index] = -1;
d_Sxo[index] = -1;
d_Syo[index] = -1;
d_Sxco[index] = -1;
d_Syco[index] = -1;
d_Xo[index] = -1;
d_Xco[index] = -1;
d_Yo[index] = -1;
d_Yco[index] = -1;
d_llvo[index] = -1;
d_fno[index] = -1;
d_lpo[index] = -1;
}// end else
}// end working on a molecule
}//end global
// main
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[])
{
/*
*
* Variable Declaration and Setup
*
*
*/
// array doubles follow convention i is for input o is for output c is for crlb
double *Ni, *No, *Nci, *Nco, *Oi, *Oo, *Oci, *Oco, *Xi, *Xo, *Xci, *Xco;
double *Yi, *Yo, *Yci, *Yco, *Sxi, *Sxo, *Sxci, *Sxco, *Syi, *Syo, *Syci, *Syco;
double *llvi, *llvo, *lpi, *lpo, *fni, *fno, *xc, *yc;
// GPU Variables
double *d_Ni, *d_No, *d_Nci, *d_Nco, *d_Oi, *d_Oo, *d_Oci, *d_Oco, *d_Xi, *d_Xo, *d_Xci, *d_Xco;
double *d_Yi, *d_Yo, *d_Yci, *d_Yco, *d_Sxi, *d_Sxo, *d_Sxci, *d_Sxco, *d_Syi, *d_Syo, *d_Syci, *d_Syco;
double *d_llvi, *d_llvo, *d_lpi, *d_lpo, *d_fni, *d_fno, *d_frN, *d_frO, *d_frSx, *d_frSy, *d_xc, *d_yc;
// single entry doubles
double Nl, Nh, Ncl, Nch, Oh, Ol, Och, Ocl, Xch, Xcl, Ych, Ycl;
double Sh, Sl, Sch, Scl, fNh, fOh, fSh, ll, lh;
// Error Message Array
// Vector Size Array
if (nrhs != 38) {
mexErrMsgTxt("You need 36 input variables");
}
if (nlhs != 15) {
mexErrMsgTxt("You need 15 output variables");
}
if (mxGetM(prhs[0]) != mxGetM(prhs[1]) || mxGetM(prhs[0]) != mxGetM(prhs[2]) || mxGetM(prhs[0]) != mxGetM(prhs[3])) {
mexErrMsgTxt("Your input vectors must be the same size!\n");
}
if (mxGetM(prhs[0]) != mxGetM(prhs[4]) || mxGetM(prhs[0]) != mxGetM(prhs[5]) || mxGetM(prhs[0]) != mxGetM(prhs[6])) {
mexErrMsgTxt("Your input vectors must be the same size!\n");
}
if (mxGetM(prhs[0]) != mxGetM(prhs[7]) || mxGetM(prhs[0]) != mxGetM(prhs[8]) || mxGetM(prhs[0]) != mxGetM(prhs[9])) {
mexErrMsgTxt("Your input vectors must be the same size!\n");
}
if (mxGetM(prhs[0]) != mxGetM(prhs[10]) || mxGetM(prhs[0]) != mxGetM(prhs[11]) || mxGetM(prhs[0]) != mxGetM(prhs[12])) {
mexErrMsgTxt("Your input vectors must be the same size!\n");
}
if (mxGetM(prhs[0]) != mxGetM(prhs[13]) || mxGetM(prhs[0]) != mxGetM(prhs[14])) {
mexErrMsgTxt("Your input vectors must be the same size!\n");
}
if (mxGetM(prhs[0]) != mxGetM(prhs[37]) || mxGetM(prhs[0]) != mxGetM(prhs[36])) {
mexErrMsgTxt("Your input vectors must be the same size!\n");
}
// Check that variables are doubles
if (!mxIsDouble(prhs[0]) || mxIsComplex(prhs[0])) {
mexErrMsgTxt("Your input vectors must contain doubles\n");
}
if (!mxIsDouble(prhs[1]) || mxIsComplex(prhs[1])) {
mexErrMsgTxt("Your input vectors must contain doubles\n");
}
if (!mxIsDouble(prhs[2]) || mxIsComplex(prhs[2])) {
mexErrMsgTxt("Your input vectors must contain doubles\n");
}
if (!mxIsDouble(prhs[3]) || mxIsComplex(prhs[3])) {
mexErrMsgTxt("Your input vectors must contain doubles\n");
}
if (!mxIsDouble(prhs[4]) || mxIsComplex(prhs[4])) {
mexErrMsgTxt("Your input vectors must contain doubles\n");
}
if (!mxIsDouble(prhs[5]) || mxIsComplex(prhs[5])) {
mexErrMsgTxt("Your input vectors must contain doubles\n");
}
if (!mxIsDouble(prhs[6]) || mxIsComplex(prhs[6])) {
mexErrMsgTxt("Your input vectors must contain doubles\n");
}
if (!mxIsDouble(prhs[7]) || mxIsComplex(prhs[7])) {
mexErrMsgTxt("Your input vectors must contain doubles\n");
}
if (!mxIsDouble(prhs[8]) || mxIsComplex(prhs[8])) {
mexErrMsgTxt("Your input vectors must contain doubles\n");
}
if (!mxIsDouble(prhs[9]) || mxIsComplex(prhs[9])) {
mexErrMsgTxt("Your input vectors must contain doubles\n");
}
if (!mxIsDouble(prhs[10]) || mxIsComplex(prhs[10])) {
mexErrMsgTxt("Your input vectors must contain doubles\n");
}
if (!mxIsDouble(prhs[11]) || mxIsComplex(prhs[11])) {
mexErrMsgTxt("Your input vectors must contain doubles\n");
}
if (!mxIsDouble(prhs[12]) || mxIsComplex(prhs[12])) {
mexErrMsgTxt("Your input vectors must contain doubles\n");
}
if (!mxIsDouble(prhs[13]) || mxIsComplex(prhs[13])) {
mexErrMsgTxt("Your input vectors must contain doubles\n");
}
if (!mxIsDouble(prhs[14]) || mxIsComplex(prhs[14])) {
mexErrMsgTxt("Your input vectors must contain real doubles\n");
}
if (!mxIsDouble(prhs[36]) || mxIsComplex(prhs[36])) {
mexErrMsgTxt("Your input vectors must contain real doubles\n");
}
if (!mxIsDouble(prhs[37]) || mxIsComplex(prhs[37])) {
mexErrMsgTxt("Your input vectors must contain real doubles\n");
}
// Grab dimension size of data
const size_t *dims;
dims = mxGetDimensions(prhs[0]);
int m = (int)dims[0];
int n = (int)dims[1];
const int mem_size = m*n*sizeof(double);
// Get position of Data
Ni = (double *)mxGetPr(prhs[0]);
Nci = (double *)mxGetPr(prhs[1]);
Oi = (double *)mxGetPr(prhs[2]);
Oci = (double *)mxGetPr(prhs[3]);
Sxi = (double *)mxGetPr(prhs[4]);
Sxci = (double *)mxGetPr(prhs[5]);
Syi = (double *)mxGetPr(prhs[6]);
Syci = (double *)mxGetPr(prhs[7]);
Xi = (double *)mxGetPr(prhs[8]);
Xci = (double *)mxGetPr(prhs[9]);
Yi = (double *)mxGetPr(prhs[10]);
Yci = (double *)mxGetPr(prhs[11]);
llvi = (double *)mxGetPr(prhs[12]);
fni = (double *)mxGetPr(prhs[13]);
lpi = (double *)mxGetPr(prhs[14]);
xc = (double *)mxGetPr(prhs[36]);
yc = (double *)mxGetPr(prhs[37]);
// Get Tolerance Limits
Nl = mxGetScalar(prhs[15]);
Nh = mxGetScalar(prhs[16]);
Ncl = mxGetScalar(prhs[17]);
Nch = mxGetScalar(prhs[18]);
Ol = mxGetScalar(prhs[19]);
Oh = mxGetScalar(prhs[20]);
Ocl = mxGetScalar(prhs[21]);
Och = mxGetScalar(prhs[22]);
Xcl = mxGetScalar(prhs[23]);
Xch = mxGetScalar(prhs[24]);
Ycl = mxGetScalar(prhs[25]);
Ych = mxGetScalar(prhs[26]);
Sl = mxGetScalar(prhs[27]);
Sh = mxGetScalar(prhs[28]);
Scl = mxGetScalar(prhs[29]);
Sch = mxGetScalar(prhs[30]);
fNh = mxGetScalar(prhs[31]);
fOh = mxGetScalar(prhs[32]);
fSh = mxGetScalar(prhs[33]);
ll = mxGetScalar(prhs[34]);
lh = mxGetScalar(prhs[35]);
// Fairly Certain at this point all data is accessible through the program
/*
*
*
* GPU MEMORY ALLOCATION and Copying
* With a million molecule data set we're looking at 240 MB of data while the GeForce 1060
* Has ~59212 MB free or we are using ~4% of the total memory, because I've never seen a data
* set that big I am assuming our memory is going to be just fine
*
*/
// cudaMalloc Array
checkCudaErrors(cudaMalloc((void**)&d_Ni, mem_size));
checkCudaErrors(cudaMalloc((void**)&d_No, mem_size));
checkCudaErrors(cudaMalloc((void**)&d_Nci, mem_size));
checkCudaErrors(cudaMalloc((void**)&d_Nco, mem_size));
checkCudaErrors(cudaMalloc((void**)&d_Oi, mem_size));
checkCudaErrors(cudaMalloc((void**)&d_Oo, mem_size));
checkCudaErrors(cudaMalloc((void**)&d_Oci, mem_size));
checkCudaErrors(cudaMalloc((void**)&d_Oco, mem_size));
checkCudaErrors(cudaMalloc((void**)&d_Sxi, mem_size));
checkCudaErrors(cudaMalloc((void**)&d_Sxo, mem_size));
checkCudaErrors(cudaMalloc((void**)&d_Sxci, mem_size));
checkCudaErrors(cudaMalloc((void**)&d_Sxco, mem_size));
checkCudaErrors(cudaMalloc((void**)&d_Syi, mem_size));
checkCudaErrors(cudaMalloc((void**)&d_Syo, mem_size));
checkCudaErrors(cudaMalloc((void**)&d_Syci, mem_size));
checkCudaErrors(cudaMalloc((void**)&d_Syco, mem_size));
checkCudaErrors(cudaMalloc((void**)&d_Xi, mem_size));
checkCudaErrors(cudaMalloc((void**)&d_Xo, mem_size));
checkCudaErrors(cudaMalloc((void**)&d_Xci, mem_size));
checkCudaErrors(cudaMalloc((void**)&d_Xco, mem_size));
checkCudaErrors(cudaMalloc((void**)&d_Yi, mem_size));
checkCudaErrors(cudaMalloc((void**)&d_Yo, mem_size));
checkCudaErrors(cudaMalloc((void**)&d_Yci, mem_size));
checkCudaErrors(cudaMalloc((void**)&d_Yco, mem_size));
checkCudaErrors(cudaMalloc((void**)&d_llvi, mem_size));
checkCudaErrors(cudaMalloc((void**)&d_llvo, mem_size));
checkCudaErrors(cudaMalloc((void**)&d_fni, mem_size));
checkCudaErrors(cudaMalloc((void**)&d_fno, mem_size));
checkCudaErrors(cudaMalloc((void**)&d_lpi, mem_size));
checkCudaErrors(cudaMalloc((void**)&d_lpo, mem_size));
checkCudaErrors(cudaMalloc((void**)&d_frN, mem_size));
checkCudaErrors(cudaMalloc((void**)&d_frO, mem_size));
checkCudaErrors(cudaMalloc((void**)&d_frSx, mem_size));
checkCudaErrors(cudaMalloc((void**)&d_frSy, mem_size));
checkCudaErrors(cudaMalloc((void**)&d_xc, mem_size));
checkCudaErrors(cudaMalloc((void**)&d_yc, mem_size));
// Data Copy Array
checkCudaErrors(cudaMemcpy(d_Ni, Ni, mem_size, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_Nci, Nci, mem_size, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_Oi, Oi, mem_size, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_Oci, Oci, mem_size, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_Sxi, Sxi, mem_size, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_Sxci, Sxci, mem_size, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_Syi, Syi, mem_size, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_Syci, Syci, mem_size, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_Xi, Xi, mem_size, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_Xci, Xci, mem_size, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_Yi, Yi, mem_size, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_Yci, Yci, mem_size, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_llvi, llvi, mem_size, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_fni, fni, mem_size, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_lpi, lpi, mem_size, cudaMemcpyHostToDevice));
// Get Fractional error Vectors and tolerance
fractionate << <((m - 1) / 1024 + 1), 1024 >> > (d_frN, d_Ni, d_Nci, m);
fractionate << <((m - 1) / 1024 + 1), 1024 >> > (d_frO, d_Oi, d_Oci, m);
fractionate << <((m - 1) / 1024 + 1), 1024 >> > (d_frSx, d_Sxi, d_Sxci, m);
fractionate << <((m - 1) / 1024 + 1), 1024 >> > (d_frSy, d_Syi, d_Syci, m);
tolerate << <((m - 1) / 1024 + 1), 1024 >> > (d_Ni, d_No, d_Nci, d_Nco, d_Oi, d_Oo, d_Oci, d_Oco, d_Sxi, d_Sxo, d_Sxci, d_Sxco, d_Syi, d_Syo, d_Syci, d_Syco, d_Xi, d_Xo, d_Xci, d_Xco, d_Yi, d_Yo, d_Yci, d_Yco, d_llvi, d_llvo, d_fni, d_fno, d_lpi, d_lpo, d_frN, d_frO, d_frSx, d_frSy, Nl, Nh, Ncl, Nch, Ol, Oh, Ocl, Och, Xcl, Xch, Ycl, Ych, Sl, Sh, Scl, Sch, fNh, fOh, fSh, ll, lh, m, d_xc, d_yc);
/*
*
*
* Copy back and free up space
*
*
*
*
*/
// Create Arrays at output pointers
plhs[0] = mxCreateDoubleMatrix(m, n, mxREAL);
plhs[1] = mxCreateDoubleMatrix(m, n, mxREAL);
plhs[2] = mxCreateDoubleMatrix(m, n, mxREAL);
plhs[3] = mxCreateDoubleMatrix(m, n, mxREAL);
plhs[4] = mxCreateDoubleMatrix(m, n, mxREAL);
plhs[5] = mxCreateDoubleMatrix(m, n, mxREAL);
plhs[6] = mxCreateDoubleMatrix(m, n, mxREAL);
plhs[7] = mxCreateDoubleMatrix(m, n, mxREAL);
plhs[8] = mxCreateDoubleMatrix(m, n, mxREAL);
plhs[9] = mxCreateDoubleMatrix(m, n, mxREAL);
plhs[10] = mxCreateDoubleMatrix(m, n, mxREAL);
plhs[11] = mxCreateDoubleMatrix(m, n, mxREAL);
plhs[12] = mxCreateDoubleMatrix(m, n, mxREAL);
plhs[13] = mxCreateDoubleMatrix(m, n, mxREAL);
plhs[14] = mxCreateDoubleMatrix(m, n, mxREAL);
No = (double *)mxGetPr(plhs[0]);
Nco = (double *)mxGetPr(plhs[1]);
Oo = (double *)mxGetPr(plhs[2]);
Oco = (double *)mxGetPr(plhs[3]);
Sxo = (double *)mxGetPr(plhs[4]);
Sxco = (double *)mxGetPr(plhs[5]);
Syo = (double *)mxGetPr(plhs[6]);
Syco = (double *)mxGetPr(plhs[7]);
Xo = (double *)mxGetPr(plhs[8]);
Xco = (double *)mxGetPr(plhs[9]);
Yo = (double *)mxGetPr(plhs[10]);
Yco = (double *)mxGetPr(plhs[11]);
llvo = (double *)mxGetPr(plhs[12]);
fno = (double *)mxGetPr(plhs[13]);
lpo = (double *)mxGetPr(plhs[14]);
// copy data array
checkCudaErrors(cudaMemcpy(No, d_No, mem_size, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(Nco, d_Nco, mem_size, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(Oo, d_Oo, mem_size, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(Oco, d_Oco, mem_size, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(Sxo, d_Sxo, mem_size, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(Sxco, d_Sxco, mem_size, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(Syo, d_Syo, mem_size, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(Syco, d_Syco, mem_size, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(Xo, d_Xo, mem_size, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(Xco, d_Xco, mem_size, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(Yo, d_Yo, mem_size, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(Yco, d_Yco, mem_size, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(llvo, d_llvo, mem_size, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(fno, d_fno, mem_size, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(lpo, d_lpo, mem_size, cudaMemcpyDeviceToHost));
/*
checkCudaErrors(cudaMemcpy(y_out,d_data,mem_size, cudaMemcpyDeviceToHost));
cufftDestroy(plan);
cudaFree(d_data);
// create complex double in matlab
plhs[0] = mxCreateDoubleMatrix(m, n, mxREAL);
yor = mxGetPr(plhs[0]);
yoi = mxGetPi(plhs[0]);
unpack_c2c(y_out, yor, yoi, n*m);
mxFree(data);
*/
// Release GPU memory
cudaFree(d_Ni);
cudaFree(d_No);
cudaFree(d_Nci);
cudaFree(d_Nco);
cudaFree(d_Oi);
cudaFree(d_Oo);
cudaFree(d_Oci);
cudaFree(d_Oco);
cudaFree(d_Sxi);
cudaFree(d_Sxo);
cudaFree(d_Sxci);
cudaFree(d_Sxco);
cudaFree(d_Syi);
cudaFree(d_Syo);
cudaFree(d_Syci);
cudaFree(d_Syco);
cudaFree(d_Xi);
cudaFree(d_Xo);
cudaFree(d_Xci);
cudaFree(d_Xco);
cudaFree(d_Yi);
cudaFree(d_Yo);
cudaFree(d_Yci);
cudaFree(d_Yco);
cudaFree(d_llvi);
cudaFree(d_llvo);
cudaFree(d_fni);
cudaFree(d_fno);
cudaFree(d_lpi);
cudaFree(d_lpo);
cudaFree(d_frN);
cudaFree(d_frO);
cudaFree(d_frSx);
cudaFree(d_frSy);
cudaFree(d_xc);
cudaFree(d_yc);
}
|
51760e61319797962ebc400bc37be1a942ea0d82.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by op2.m on 25-Oct-2011 14:59:20
//
// user function
__device__
#include "res_calc.h"
// CUDA kernel function
__global__ void op_cuda_res_calc(
float *ind_arg0, int *ind_arg0_maps,
float *ind_arg1, int *ind_arg1_maps,
float *ind_arg2, int *ind_arg2_maps,
float *ind_arg3, int *ind_arg3_maps,
short *arg0_maps,
short *arg1_maps,
short *arg2_maps,
short *arg3_maps,
short *arg4_maps,
short *arg5_maps,
short *arg6_maps,
short *arg7_maps,
int *ind_arg_sizes,
int *ind_arg_offs,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors) {
float arg6_l[4];
float arg7_l[4];
__shared__ int *ind_arg0_map, ind_arg0_size;
__shared__ int *ind_arg1_map, ind_arg1_size;
__shared__ int *ind_arg2_map, ind_arg2_size;
__shared__ int *ind_arg3_map, ind_arg3_size;
__shared__ float *ind_arg0_s;
__shared__ float *ind_arg1_s;
__shared__ float *ind_arg2_s;
__shared__ float *ind_arg3_s;
__shared__ int nelems2, ncolor;
__shared__ int nelem, offset_b;
extern __shared__ char shared[];
if (threadIdx.x==0) {
// get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx.x + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x);
ncolor = ncolors[blockId];
ind_arg0_size = ind_arg_sizes[0+blockId*4];
ind_arg1_size = ind_arg_sizes[1+blockId*4];
ind_arg2_size = ind_arg_sizes[2+blockId*4];
ind_arg3_size = ind_arg_sizes[3+blockId*4];
ind_arg0_map = ind_arg0_maps + ind_arg_offs[0+blockId*4];
ind_arg1_map = ind_arg1_maps + ind_arg_offs[1+blockId*4];
ind_arg2_map = ind_arg2_maps + ind_arg_offs[2+blockId*4];
ind_arg3_map = ind_arg3_maps + ind_arg_offs[3+blockId*4];
// set shared memory pointers
int nbytes = 0;
ind_arg0_s = (float *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg0_size*sizeof(float)*2);
ind_arg1_s = (float *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg1_size*sizeof(float)*4);
ind_arg2_s = (float *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg2_size*sizeof(float)*1);
ind_arg3_s = (float *) &shared[nbytes];
}
__syncthreads(); // make sure all of above completed
// copy indirect datasets into shared memory or zero increment
for (int n=threadIdx.x; n<ind_arg0_size*2; n+=blockDim.x)
ind_arg0_s[n] = ind_arg0[n%2+ind_arg0_map[n/2]*2];
for (int n=threadIdx.x; n<ind_arg1_size*4; n+=blockDim.x)
ind_arg1_s[n] = ind_arg1[n%4+ind_arg1_map[n/4]*4];
for (int n=threadIdx.x; n<ind_arg2_size*1; n+=blockDim.x)
ind_arg2_s[n] = ind_arg2[n%1+ind_arg2_map[n/1]*1];
for (int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x)
ind_arg3_s[n] = ZERO_float;
__syncthreads();
// process set elements
for (int n=threadIdx.x; n<nelems2; n+=blockDim.x) {
int col2 = -1;
if (n<nelem) {
// initialise local variables
for (int d=0; d<4; d++)
arg6_l[d] = ZERO_float;
for (int d=0; d<4; d++)
arg7_l[d] = ZERO_float;
// user-supplied kernel call
res_calc( ind_arg0_s+arg0_maps[n+offset_b]*2,
ind_arg0_s+arg1_maps[n+offset_b]*2,
ind_arg1_s+arg2_maps[n+offset_b]*4,
ind_arg1_s+arg3_maps[n+offset_b]*4,
ind_arg2_s+arg4_maps[n+offset_b]*1,
ind_arg2_s+arg5_maps[n+offset_b]*1,
arg6_l,
arg7_l );
col2 = colors[n+offset_b];
}
// store local variables
int arg6_map = arg6_maps[n+offset_b];
int arg7_map = arg7_maps[n+offset_b];
for (int col=0; col<ncolor; col++) {
if (col2==col) {
for (int d=0; d<4; d++)
ind_arg3_s[d+arg6_map*4] += arg6_l[d];
for (int d=0; d<4; d++)
ind_arg3_s[d+arg7_map*4] += arg7_l[d];
}
__syncthreads();
}
}
// apply pointered write/increment
for (int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x)
ind_arg3[n%4+ind_arg3_map[n/4]*4] += ind_arg3_s[n];
}
// host stub function
void op_par_loop_res_calc(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5,
op_arg arg6,
op_arg arg7 ){
int nargs = 8;
op_arg args[8] = {arg0,arg1,arg2,arg3,arg4,arg5,arg6,arg7};
int ninds = 4;
int inds[8] = {0,0,1,1,2,2,3,3};
if (OP_diags>2) {
printf(" kernel routine with indirection: res_calc \n");
}
// get plan
#ifdef OP_PART_SIZE_2
int part_size = OP_PART_SIZE_2;
#else
int part_size = OP_part_size;
#endif
op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds);
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timers(&cpu_t1, &wall_t1);
// execute plan
int block_offset = 0;
for (int col=0; col < Plan->ncolors; col++) {
#ifdef OP_BLOCK_SIZE_2
int nthread = OP_BLOCK_SIZE_2;
#else
int nthread = OP_block_size;
#endif
int nblocks = Plan->ncolblk[col];
int nshared = Plan->nshared;
hipLaunchKernelGGL(( op_cuda_res_calc), dim3(nblocks),dim3(nthread),nshared, 0,
(float *)arg0.data_d, Plan->ind_maps[0],
(float *)arg2.data_d, Plan->ind_maps[1],
(float *)arg4.data_d, Plan->ind_maps[2],
(float *)arg6.data_d, Plan->ind_maps[3],
Plan->loc_maps[0],
Plan->loc_maps[1],
Plan->loc_maps[2],
Plan->loc_maps[3],
Plan->loc_maps[4],
Plan->loc_maps[5],
Plan->loc_maps[6],
Plan->loc_maps[7],
Plan->ind_sizes,
Plan->ind_offs,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol);
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg("op_cuda_res_calc execution failed\n");
block_offset += nblocks;
}
// update kernel record
op_timers(&cpu_t2, &wall_t2);
op_timing_realloc(2);
OP_kernels[2].name = name;
OP_kernels[2].count += 1;
OP_kernels[2].time += wall_t2 - wall_t1;
OP_kernels[2].transfer += Plan->transfer;
OP_kernels[2].transfer2 += Plan->transfer2;
}
| 51760e61319797962ebc400bc37be1a942ea0d82.cu | //
// auto-generated by op2.m on 25-Oct-2011 14:59:20
//
// user function
__device__
#include "res_calc.h"
// CUDA kernel function
__global__ void op_cuda_res_calc(
float *ind_arg0, int *ind_arg0_maps,
float *ind_arg1, int *ind_arg1_maps,
float *ind_arg2, int *ind_arg2_maps,
float *ind_arg3, int *ind_arg3_maps,
short *arg0_maps,
short *arg1_maps,
short *arg2_maps,
short *arg3_maps,
short *arg4_maps,
short *arg5_maps,
short *arg6_maps,
short *arg7_maps,
int *ind_arg_sizes,
int *ind_arg_offs,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors) {
float arg6_l[4];
float arg7_l[4];
__shared__ int *ind_arg0_map, ind_arg0_size;
__shared__ int *ind_arg1_map, ind_arg1_size;
__shared__ int *ind_arg2_map, ind_arg2_size;
__shared__ int *ind_arg3_map, ind_arg3_size;
__shared__ float *ind_arg0_s;
__shared__ float *ind_arg1_s;
__shared__ float *ind_arg2_s;
__shared__ float *ind_arg3_s;
__shared__ int nelems2, ncolor;
__shared__ int nelem, offset_b;
extern __shared__ char shared[];
if (threadIdx.x==0) {
// get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx.x + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x);
ncolor = ncolors[blockId];
ind_arg0_size = ind_arg_sizes[0+blockId*4];
ind_arg1_size = ind_arg_sizes[1+blockId*4];
ind_arg2_size = ind_arg_sizes[2+blockId*4];
ind_arg3_size = ind_arg_sizes[3+blockId*4];
ind_arg0_map = ind_arg0_maps + ind_arg_offs[0+blockId*4];
ind_arg1_map = ind_arg1_maps + ind_arg_offs[1+blockId*4];
ind_arg2_map = ind_arg2_maps + ind_arg_offs[2+blockId*4];
ind_arg3_map = ind_arg3_maps + ind_arg_offs[3+blockId*4];
// set shared memory pointers
int nbytes = 0;
ind_arg0_s = (float *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg0_size*sizeof(float)*2);
ind_arg1_s = (float *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg1_size*sizeof(float)*4);
ind_arg2_s = (float *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg2_size*sizeof(float)*1);
ind_arg3_s = (float *) &shared[nbytes];
}
__syncthreads(); // make sure all of above completed
// copy indirect datasets into shared memory or zero increment
for (int n=threadIdx.x; n<ind_arg0_size*2; n+=blockDim.x)
ind_arg0_s[n] = ind_arg0[n%2+ind_arg0_map[n/2]*2];
for (int n=threadIdx.x; n<ind_arg1_size*4; n+=blockDim.x)
ind_arg1_s[n] = ind_arg1[n%4+ind_arg1_map[n/4]*4];
for (int n=threadIdx.x; n<ind_arg2_size*1; n+=blockDim.x)
ind_arg2_s[n] = ind_arg2[n%1+ind_arg2_map[n/1]*1];
for (int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x)
ind_arg3_s[n] = ZERO_float;
__syncthreads();
// process set elements
for (int n=threadIdx.x; n<nelems2; n+=blockDim.x) {
int col2 = -1;
if (n<nelem) {
// initialise local variables
for (int d=0; d<4; d++)
arg6_l[d] = ZERO_float;
for (int d=0; d<4; d++)
arg7_l[d] = ZERO_float;
// user-supplied kernel call
res_calc( ind_arg0_s+arg0_maps[n+offset_b]*2,
ind_arg0_s+arg1_maps[n+offset_b]*2,
ind_arg1_s+arg2_maps[n+offset_b]*4,
ind_arg1_s+arg3_maps[n+offset_b]*4,
ind_arg2_s+arg4_maps[n+offset_b]*1,
ind_arg2_s+arg5_maps[n+offset_b]*1,
arg6_l,
arg7_l );
col2 = colors[n+offset_b];
}
// store local variables
int arg6_map = arg6_maps[n+offset_b];
int arg7_map = arg7_maps[n+offset_b];
for (int col=0; col<ncolor; col++) {
if (col2==col) {
for (int d=0; d<4; d++)
ind_arg3_s[d+arg6_map*4] += arg6_l[d];
for (int d=0; d<4; d++)
ind_arg3_s[d+arg7_map*4] += arg7_l[d];
}
__syncthreads();
}
}
// apply pointered write/increment
for (int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x)
ind_arg3[n%4+ind_arg3_map[n/4]*4] += ind_arg3_s[n];
}
// host stub function
void op_par_loop_res_calc(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5,
op_arg arg6,
op_arg arg7 ){
int nargs = 8;
op_arg args[8] = {arg0,arg1,arg2,arg3,arg4,arg5,arg6,arg7};
int ninds = 4;
int inds[8] = {0,0,1,1,2,2,3,3};
if (OP_diags>2) {
printf(" kernel routine with indirection: res_calc \n");
}
// get plan
#ifdef OP_PART_SIZE_2
int part_size = OP_PART_SIZE_2;
#else
int part_size = OP_part_size;
#endif
op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds);
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timers(&cpu_t1, &wall_t1);
// execute plan
int block_offset = 0;
for (int col=0; col < Plan->ncolors; col++) {
#ifdef OP_BLOCK_SIZE_2
int nthread = OP_BLOCK_SIZE_2;
#else
int nthread = OP_block_size;
#endif
int nblocks = Plan->ncolblk[col];
int nshared = Plan->nshared;
op_cuda_res_calc<<<nblocks,nthread,nshared>>>(
(float *)arg0.data_d, Plan->ind_maps[0],
(float *)arg2.data_d, Plan->ind_maps[1],
(float *)arg4.data_d, Plan->ind_maps[2],
(float *)arg6.data_d, Plan->ind_maps[3],
Plan->loc_maps[0],
Plan->loc_maps[1],
Plan->loc_maps[2],
Plan->loc_maps[3],
Plan->loc_maps[4],
Plan->loc_maps[5],
Plan->loc_maps[6],
Plan->loc_maps[7],
Plan->ind_sizes,
Plan->ind_offs,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol);
cutilSafeCall(cudaThreadSynchronize());
cutilCheckMsg("op_cuda_res_calc execution failed\n");
block_offset += nblocks;
}
// update kernel record
op_timers(&cpu_t2, &wall_t2);
op_timing_realloc(2);
OP_kernels[2].name = name;
OP_kernels[2].count += 1;
OP_kernels[2].time += wall_t2 - wall_t1;
OP_kernels[2].transfer += Plan->transfer;
OP_kernels[2].transfer2 += Plan->transfer2;
}
|
3a0efdc4e2c4585c806bf7e08134093a5ae4cced.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <cstdlib>
#include <cassert>
#include <zlib.h>
#include <png.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <memory>
#include <hip/hip_vector_types.h>
#define MASK_N 2
#define MASK_X 5
#define MASK_Y 5
#define SCALE 8
#define TH_DIM 32
const dim3 thread_dim(TH_DIM, TH_DIM);
const int block_num = 5000;
/* Hint 7 */
// this variable is used by device
__constant__ int mask[MASK_N][MASK_X][MASK_Y] = {
{{ -1, -4, -6, -4, -1},
{ -2, -8,-12, -8, -2},
{ 0, 0, 0, 0, 0},
{ 2, 8, 12, 8, 2},
{ 1, 4, 6, 4, 1}},
{{ -1, -2, 0, 2, 1},
{ -4, -8, 0, 8, 4},
{ -6,-12, 0, 12, 6},
{ -4, -8, 0, 8, 4},
{ -1, -2, 0, 2, 1}}
};
int read_png(const char* filename, unsigned char** image, unsigned* height,
unsigned* width, unsigned* channels) {
unsigned char sig[8];
FILE* infile;
infile = fopen(filename, "rb");
fread(sig, 1, 8, infile);
if (!png_check_sig(sig, 8))
return 1; /* bad signature */
png_structp png_ptr;
png_infop info_ptr;
png_ptr = png_create_read_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
if (!png_ptr)
return 4; /* out of memory */
info_ptr = png_create_info_struct(png_ptr);
if (!info_ptr) {
png_destroy_read_struct(&png_ptr, NULL, NULL);
return 4; /* out of memory */
}
png_init_io(png_ptr, infile);
png_set_sig_bytes(png_ptr, 8);
png_read_info(png_ptr, info_ptr);
int bit_depth, color_type;
png_get_IHDR(png_ptr, info_ptr, width, height, &bit_depth, &color_type, NULL, NULL, NULL);
png_uint_32 i, rowbytes;
png_bytep row_pointers[*height];
png_read_update_info(png_ptr, info_ptr);
rowbytes = png_get_rowbytes(png_ptr, info_ptr);
*channels = (int) png_get_channels(png_ptr, info_ptr);
if ((*image = (unsigned char *) malloc(rowbytes * *height)) == NULL) {
png_destroy_read_struct(&png_ptr, &info_ptr, NULL);
return 3;
}
for (i = 0; i < *height; ++i)
row_pointers[i] = *image + i * rowbytes;
png_read_image(png_ptr, row_pointers);
png_read_end(png_ptr, NULL);
return 0;
}
void write_png(const char* filename, png_bytep image, const unsigned height, const unsigned width,
const unsigned channels) {
FILE* fp = fopen(filename, "wb");
png_structp png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
png_infop info_ptr = png_create_info_struct(png_ptr);
png_init_io(png_ptr, fp);
png_set_IHDR(png_ptr, info_ptr, width, height, 8,
PNG_COLOR_TYPE_RGB, PNG_INTERLACE_NONE,
PNG_COMPRESSION_TYPE_DEFAULT, PNG_FILTER_TYPE_DEFAULT);
png_set_filter(png_ptr, 0, PNG_NO_FILTERS);
png_write_info(png_ptr, info_ptr);
png_set_compression_level(png_ptr, 1);
png_bytep row_ptr[height];
for (int i = 0; i < height; ++ i) {
row_ptr[i] = image + i * width * channels * sizeof(unsigned char);
}
png_write_image(png_ptr, row_ptr);
png_write_end(png_ptr, NULL);
png_destroy_write_struct(&png_ptr, &info_ptr);
fclose(fp);
}
// __device__ int get_mask(int *mask, int n, int x, int y){
// return mask[n * MASK_X * MASK_Y + x * MASK_Y + y];
// }
/* Hint 5 */
// this function is called by host and executed by device
// extern __shared__ int sm_s[];
__global__ void sobel(unsigned char* s, unsigned char* t, unsigned height, unsigned width, unsigned channels) {
int x, y, i, v, u;
int R, G, B;
float val[MASK_N*3] = {0.0};
// const int x_divisor = blockDim.x - 1;
// const int x_width_div = width / x_divisor;
// const int x_width_mod = width % x_divisor;
// const int x_start = threadIdx.x * x_width_div;
// const int x_width = threadIdx.x < blockDim.x - 1? x_start + x_width_div : x_start + x_width_mod;
// const int x_gap = 1;
const int x_start = threadIdx.x;
const int x_width = width;
const int x_gap = blockDim.x;
const int y_start = blockIdx.x * blockDim.y + threadIdx.y;
const int y_height = height;
const int y_gap = gridDim.x * blockDim.y;
const int adjustX = (MASK_X % 2) ? 1 : 0;
const int adjustY = (MASK_Y % 2) ? 1 : 0;
const int xBound = MASK_X / 2;
const int yBound = MASK_Y / 2;
const int kernel_width = 2 * xBound + adjustX + TH_DIM - 1;
// __shared__ int sm_mask[MASK_N][MASK_X][MASK_Y];
__shared__ unsigned char sm_s[40000];
// printf("BLock %d, Thread (%d, %d) Created, Conv Box (%d : %d, %d : %d), Kernel Width: %d\n", blockIdx.x, threadIdx.x, threadIdx.y, \
// -xBound, xBound + adjustX + blockDim.x - 1, -yBound, yBound + adjustY + blockDim.y - 1, kernel_width);
// for(int i = 0; i < MASK_N; i++){
// for(int x = threadIdx.x; x < MASK_X; x+=blockDim.x){
// for(int y = threadIdx.y; y < MASK_Y; y+=blockDim.y){
// sm_mask[i][x][y] = mask[i][x][y];
// }
// }
// }
// __syncthreads();
char mask[MASK_N][MASK_X][MASK_Y] = {
{{ -1, -4, -6, -4, -1},
{ -2, -8,-12, -8, -2},
{ 0, 0, 0, 0, 0},
{ 2, 8, 12, 8, 2},
{ 1, 4, 6, 4, 1}},
{{ -1, -2, 0, 2, 1},
{ -4, -8, 0, 8, 4},
{ -6,-12, 0, 12, 6},
{ -4, -8, 0, 8, 4},
{ -1, -2, 0, 2, 1}}
};
/* Hint 6 */
// parallel job by blockIdx, blockDim, threadIdx
for (y = y_start; y < y_height; y+=y_gap) {
for (x = x_start; x < x_width; x+=x_gap) {
for (v = -yBound; v < yBound + adjustY; v+=2) {
for (u = -xBound; u < xBound + adjustX; u+=2) {
if ((x + u) >= 0 && (x + u) < width && y + v >= 0 && y + v < height) {
int base = channels * (kernel_width * (v + yBound + threadIdx.y) + (u + xBound + threadIdx.x));
sm_s[base + 2] = s[channels * (width * (y+v) + (x+u)) + 2];
sm_s[base + 1] = s[channels * (width * (y+v) + (x+u)) + 1];
sm_s[base + 0] = s[channels * (width * (y+v) + (x+u)) + 0];
}
}
}
__syncthreads();
for (i = 0; i < MASK_N; ++i) {
val[i*3+2] = 0.0;
val[i*3+1] = 0.0;
val[i*3] = 0.0;
for (v = -yBound; v < yBound + adjustY; v++) {
for (u = -xBound; u < xBound + adjustX; u++) {
if ((x + u) >= 0 && (x + u) < width && y + v >= 0 && y + v < height) {
int base = channels * (kernel_width * (v + yBound + threadIdx.y) + (u + xBound + threadIdx.x));
R = sm_s[base + 2];
G = sm_s[base + 1];
B = sm_s[base + 0];
// R = s[channels * (width * (y+v) + (x+u)) + 2];
// G = s[channels * (width * (y+v) + (x+u)) + 1];
// B = s[channels * (width * (y+v) + (x+u)) + 0];
val[i*3+2] += R * mask[i][u + xBound][v + yBound];
val[i*3+1] += G * mask[i][u + xBound][v + yBound];
val[i*3+0] += B * mask[i][u + xBound][v + yBound];
// printf("B(%d (%d %d)): RGB(%d %d %d) | sm_s(%d %d %d)\n", blockIdx.x, threadIdx.x, threadIdx.y, R, G, B, sm_s[sm_base_idx + 2], sm_s[sm_base_idx + 1], sm_s[sm_base_idx + 0]);
}
}
}
}
float totalR = 0.0;
float totalG = 0.0;
float totalB = 0.0;
for (i = 0; i < MASK_N; ++i) {
totalR += val[i * 3 + 2] * val[i * 3 + 2];
totalG += val[i * 3 + 1] * val[i * 3 + 1];
totalB += val[i * 3 + 0] * val[i * 3 + 0];
}
totalR = sqrt(totalR) / SCALE;
totalG = sqrt(totalG) / SCALE;
totalB = sqrt(totalB) / SCALE;
const unsigned char cR = (totalR > 255.0) ? 255 : totalR;
const unsigned char cG = (totalG > 255.0) ? 255 : totalG;
const unsigned char cB = (totalB > 255.0) ? 255 : totalB;
t[channels * (width * y + x) + 2] = cR;
t[channels * (width * y + x) + 1] = cG;
t[channels * (width * y + x) + 0] = cB;
}
}
// memcpy(t, s, (height * width * channels * sizeof(unsigned char)));
// t[idx_x] = idx_x;
}
int main(int argc, char** argv) {
assert(argc == 3);
unsigned height, width, channels;
unsigned char* host_s = NULL;
read_png(argv[1], &host_s, &height, &width, &channels);
unsigned char* host_t = (unsigned char*) malloc(height * width * channels * sizeof(unsigned char));
printf("Channel: %d\n", channels);
/* Hint 1 */
// hipMalloc(...) for device src and device dst
unsigned char *cuda_mem_s = NULL, *cuda_mem_t = NULL;
hipMalloc((void **)&cuda_mem_s, (height * width * channels * sizeof(unsigned char)));
hipMalloc((void **)&cuda_mem_t, (height * width * channels * sizeof(unsigned char)));
/* Hint 2 */
// hipMemcpy(...) copy source image to device (filter matrix if necessary)
hipMemcpy(cuda_mem_s, host_s, (height * width * channels * sizeof(unsigned char)), hipMemcpyHostToDevice);
// for(int i = 0; i < 10; i++){
// printf("Before-S: %d, T:%d\n", host_s[i], host_t[i]);
// }
/* Hint 3 */
// acclerate this function
hipLaunchKernelGGL(( sobel), dim3(block_num), dim3(thread_dim), 0, 0, cuda_mem_s, cuda_mem_t, height, width, channels);
// sobel(host_s, host_t, height, width, channels);
/* Hint 4 */
// hipMemcpy(...) copy result image to host
hipMemcpy(host_t, cuda_mem_t, (height * width * channels * sizeof(unsigned char)), hipMemcpyDeviceToHost);
// for(int i = 0; i < 10; i++){
// printf("After-S: %d, T:%d\n", host_s[i], host_t[i]);
// }
write_png(argv[2], host_t, height, width, channels);
return 0;
} | 3a0efdc4e2c4585c806bf7e08134093a5ae4cced.cu |
#include <iostream>
#include <cstdlib>
#include <cassert>
#include <zlib.h>
#include <png.h>
#include <cuda_runtime.h>
#include <cuda.h>
#include <memory>
#include <vector_types.h>
#define MASK_N 2
#define MASK_X 5
#define MASK_Y 5
#define SCALE 8
#define TH_DIM 32
const dim3 thread_dim(TH_DIM, TH_DIM);
const int block_num = 5000;
/* Hint 7 */
// this variable is used by device
__constant__ int mask[MASK_N][MASK_X][MASK_Y] = {
{{ -1, -4, -6, -4, -1},
{ -2, -8,-12, -8, -2},
{ 0, 0, 0, 0, 0},
{ 2, 8, 12, 8, 2},
{ 1, 4, 6, 4, 1}},
{{ -1, -2, 0, 2, 1},
{ -4, -8, 0, 8, 4},
{ -6,-12, 0, 12, 6},
{ -4, -8, 0, 8, 4},
{ -1, -2, 0, 2, 1}}
};
int read_png(const char* filename, unsigned char** image, unsigned* height,
unsigned* width, unsigned* channels) {
unsigned char sig[8];
FILE* infile;
infile = fopen(filename, "rb");
fread(sig, 1, 8, infile);
if (!png_check_sig(sig, 8))
return 1; /* bad signature */
png_structp png_ptr;
png_infop info_ptr;
png_ptr = png_create_read_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
if (!png_ptr)
return 4; /* out of memory */
info_ptr = png_create_info_struct(png_ptr);
if (!info_ptr) {
png_destroy_read_struct(&png_ptr, NULL, NULL);
return 4; /* out of memory */
}
png_init_io(png_ptr, infile);
png_set_sig_bytes(png_ptr, 8);
png_read_info(png_ptr, info_ptr);
int bit_depth, color_type;
png_get_IHDR(png_ptr, info_ptr, width, height, &bit_depth, &color_type, NULL, NULL, NULL);
png_uint_32 i, rowbytes;
png_bytep row_pointers[*height];
png_read_update_info(png_ptr, info_ptr);
rowbytes = png_get_rowbytes(png_ptr, info_ptr);
*channels = (int) png_get_channels(png_ptr, info_ptr);
if ((*image = (unsigned char *) malloc(rowbytes * *height)) == NULL) {
png_destroy_read_struct(&png_ptr, &info_ptr, NULL);
return 3;
}
for (i = 0; i < *height; ++i)
row_pointers[i] = *image + i * rowbytes;
png_read_image(png_ptr, row_pointers);
png_read_end(png_ptr, NULL);
return 0;
}
void write_png(const char* filename, png_bytep image, const unsigned height, const unsigned width,
const unsigned channels) {
FILE* fp = fopen(filename, "wb");
png_structp png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
png_infop info_ptr = png_create_info_struct(png_ptr);
png_init_io(png_ptr, fp);
png_set_IHDR(png_ptr, info_ptr, width, height, 8,
PNG_COLOR_TYPE_RGB, PNG_INTERLACE_NONE,
PNG_COMPRESSION_TYPE_DEFAULT, PNG_FILTER_TYPE_DEFAULT);
png_set_filter(png_ptr, 0, PNG_NO_FILTERS);
png_write_info(png_ptr, info_ptr);
png_set_compression_level(png_ptr, 1);
png_bytep row_ptr[height];
for (int i = 0; i < height; ++ i) {
row_ptr[i] = image + i * width * channels * sizeof(unsigned char);
}
png_write_image(png_ptr, row_ptr);
png_write_end(png_ptr, NULL);
png_destroy_write_struct(&png_ptr, &info_ptr);
fclose(fp);
}
// __device__ int get_mask(int *mask, int n, int x, int y){
// return mask[n * MASK_X * MASK_Y + x * MASK_Y + y];
// }
/* Hint 5 */
// this function is called by host and executed by device
// extern __shared__ int sm_s[];
__global__ void sobel(unsigned char* s, unsigned char* t, unsigned height, unsigned width, unsigned channels) {
int x, y, i, v, u;
int R, G, B;
float val[MASK_N*3] = {0.0};
// const int x_divisor = blockDim.x - 1;
// const int x_width_div = width / x_divisor;
// const int x_width_mod = width % x_divisor;
// const int x_start = threadIdx.x * x_width_div;
// const int x_width = threadIdx.x < blockDim.x - 1? x_start + x_width_div : x_start + x_width_mod;
// const int x_gap = 1;
const int x_start = threadIdx.x;
const int x_width = width;
const int x_gap = blockDim.x;
const int y_start = blockIdx.x * blockDim.y + threadIdx.y;
const int y_height = height;
const int y_gap = gridDim.x * blockDim.y;
const int adjustX = (MASK_X % 2) ? 1 : 0;
const int adjustY = (MASK_Y % 2) ? 1 : 0;
const int xBound = MASK_X / 2;
const int yBound = MASK_Y / 2;
const int kernel_width = 2 * xBound + adjustX + TH_DIM - 1;
// __shared__ int sm_mask[MASK_N][MASK_X][MASK_Y];
__shared__ unsigned char sm_s[40000];
// printf("BLock %d, Thread (%d, %d) Created, Conv Box (%d : %d, %d : %d), Kernel Width: %d\n", blockIdx.x, threadIdx.x, threadIdx.y, \
// -xBound, xBound + adjustX + blockDim.x - 1, -yBound, yBound + adjustY + blockDim.y - 1, kernel_width);
// for(int i = 0; i < MASK_N; i++){
// for(int x = threadIdx.x; x < MASK_X; x+=blockDim.x){
// for(int y = threadIdx.y; y < MASK_Y; y+=blockDim.y){
// sm_mask[i][x][y] = mask[i][x][y];
// }
// }
// }
// __syncthreads();
char mask[MASK_N][MASK_X][MASK_Y] = {
{{ -1, -4, -6, -4, -1},
{ -2, -8,-12, -8, -2},
{ 0, 0, 0, 0, 0},
{ 2, 8, 12, 8, 2},
{ 1, 4, 6, 4, 1}},
{{ -1, -2, 0, 2, 1},
{ -4, -8, 0, 8, 4},
{ -6,-12, 0, 12, 6},
{ -4, -8, 0, 8, 4},
{ -1, -2, 0, 2, 1}}
};
/* Hint 6 */
// parallel job by blockIdx, blockDim, threadIdx
for (y = y_start; y < y_height; y+=y_gap) {
for (x = x_start; x < x_width; x+=x_gap) {
for (v = -yBound; v < yBound + adjustY; v+=2) {
for (u = -xBound; u < xBound + adjustX; u+=2) {
if ((x + u) >= 0 && (x + u) < width && y + v >= 0 && y + v < height) {
int base = channels * (kernel_width * (v + yBound + threadIdx.y) + (u + xBound + threadIdx.x));
sm_s[base + 2] = s[channels * (width * (y+v) + (x+u)) + 2];
sm_s[base + 1] = s[channels * (width * (y+v) + (x+u)) + 1];
sm_s[base + 0] = s[channels * (width * (y+v) + (x+u)) + 0];
}
}
}
__syncthreads();
for (i = 0; i < MASK_N; ++i) {
val[i*3+2] = 0.0;
val[i*3+1] = 0.0;
val[i*3] = 0.0;
for (v = -yBound; v < yBound + adjustY; v++) {
for (u = -xBound; u < xBound + adjustX; u++) {
if ((x + u) >= 0 && (x + u) < width && y + v >= 0 && y + v < height) {
int base = channels * (kernel_width * (v + yBound + threadIdx.y) + (u + xBound + threadIdx.x));
R = sm_s[base + 2];
G = sm_s[base + 1];
B = sm_s[base + 0];
// R = s[channels * (width * (y+v) + (x+u)) + 2];
// G = s[channels * (width * (y+v) + (x+u)) + 1];
// B = s[channels * (width * (y+v) + (x+u)) + 0];
val[i*3+2] += R * mask[i][u + xBound][v + yBound];
val[i*3+1] += G * mask[i][u + xBound][v + yBound];
val[i*3+0] += B * mask[i][u + xBound][v + yBound];
// printf("B(%d (%d %d)): RGB(%d %d %d) | sm_s(%d %d %d)\n", blockIdx.x, threadIdx.x, threadIdx.y, R, G, B, sm_s[sm_base_idx + 2], sm_s[sm_base_idx + 1], sm_s[sm_base_idx + 0]);
}
}
}
}
float totalR = 0.0;
float totalG = 0.0;
float totalB = 0.0;
for (i = 0; i < MASK_N; ++i) {
totalR += val[i * 3 + 2] * val[i * 3 + 2];
totalG += val[i * 3 + 1] * val[i * 3 + 1];
totalB += val[i * 3 + 0] * val[i * 3 + 0];
}
totalR = sqrt(totalR) / SCALE;
totalG = sqrt(totalG) / SCALE;
totalB = sqrt(totalB) / SCALE;
const unsigned char cR = (totalR > 255.0) ? 255 : totalR;
const unsigned char cG = (totalG > 255.0) ? 255 : totalG;
const unsigned char cB = (totalB > 255.0) ? 255 : totalB;
t[channels * (width * y + x) + 2] = cR;
t[channels * (width * y + x) + 1] = cG;
t[channels * (width * y + x) + 0] = cB;
}
}
// memcpy(t, s, (height * width * channels * sizeof(unsigned char)));
// t[idx_x] = idx_x;
}
int main(int argc, char** argv) {
assert(argc == 3);
unsigned height, width, channels;
unsigned char* host_s = NULL;
read_png(argv[1], &host_s, &height, &width, &channels);
unsigned char* host_t = (unsigned char*) malloc(height * width * channels * sizeof(unsigned char));
printf("Channel: %d\n", channels);
/* Hint 1 */
// cudaMalloc(...) for device src and device dst
unsigned char *cuda_mem_s = NULL, *cuda_mem_t = NULL;
cudaMalloc((void **)&cuda_mem_s, (height * width * channels * sizeof(unsigned char)));
cudaMalloc((void **)&cuda_mem_t, (height * width * channels * sizeof(unsigned char)));
/* Hint 2 */
// cudaMemcpy(...) copy source image to device (filter matrix if necessary)
cudaMemcpy(cuda_mem_s, host_s, (height * width * channels * sizeof(unsigned char)), cudaMemcpyHostToDevice);
// for(int i = 0; i < 10; i++){
// printf("Before-S: %d, T:%d\n", host_s[i], host_t[i]);
// }
/* Hint 3 */
// acclerate this function
sobel<<<block_num, thread_dim>>>(cuda_mem_s, cuda_mem_t, height, width, channels);
// sobel(host_s, host_t, height, width, channels);
/* Hint 4 */
// cudaMemcpy(...) copy result image to host
cudaMemcpy(host_t, cuda_mem_t, (height * width * channels * sizeof(unsigned char)), cudaMemcpyDeviceToHost);
// for(int i = 0; i < 10; i++){
// printf("After-S: %d, T:%d\n", host_s[i], host_t[i]);
// }
write_png(argv[2], host_t, height, width, channels);
return 0;
} |
9e9059d2528e0bce07b90ce596ee633b0e749472.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "BvhHash.cuh"
namespace bvhhash {
void computePrimitiveHash(KeyValuePair * dst, Aabb * leafBoxes, uint numLeaves, uint buffSize,
Aabb * bigBox)
{
dim3 block(512, 1, 1);
unsigned nblk = iDivUp(buffSize, 512);
dim3 grid(nblk, 1, 1);
hipLaunchKernelGGL(( calculateLeafHash_kernel), dim3(grid), dim3(block) , 0, 0, dst, leafBoxes, numLeaves, buffSize, bigBox);
}
}
| 9e9059d2528e0bce07b90ce596ee633b0e749472.cu | #include "BvhHash.cuh"
namespace bvhhash {
void computePrimitiveHash(KeyValuePair * dst, Aabb * leafBoxes, uint numLeaves, uint buffSize,
Aabb * bigBox)
{
dim3 block(512, 1, 1);
unsigned nblk = iDivUp(buffSize, 512);
dim3 grid(nblk, 1, 1);
calculateLeafHash_kernel<<< grid, block >>>(dst, leafBoxes, numLeaves, buffSize, bigBox);
}
}
|
bc7d11e6af47324cc4922fe9dbd653ce39fcc1d3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/pad_op.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void PadImageConstNCHW(
const int nthreads, const T* const bottom_data, const int num,
const int channels, const int height, const int width,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T value, T* const top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int nc = index / padded_width;
const int pw = index % padded_width;
const int ph = nc % padded_height;
nc /= padded_height;
const int h = ph - pad_t;
const int w = pw - pad_l;
top_data[index] = (h < 0 || w < 0 || h >= height || w >= width)
? value
: bottom_data[(nc * height + h) * width + w];
}
}
template <typename T>
__global__ void PadImageReflectNCHW(
const int nthreads, const T* const bottom_data, const int num,
const int channels, const int height, const int width,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int nc = index / padded_width;
const int pw = index % padded_width;
const int ph = nc % padded_height;
nc /= padded_height;
int h = ph - pad_t;
int w = pw - pad_l;
h = max(h, -h);
w = max(w, -w);
h = min(h, 2 * height - h - 2);
w = min(w, 2 * width - w - 2);
top_data[index] = bottom_data[(nc * height + h) * width + w];
}
}
template <typename T>
__global__ void PadImageEdgeNCHW(
const int nthreads, const T* const bottom_data, const int num,
const int channels, const int height, const int width,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int nc = index / padded_width;
const int pw = index % padded_width;
const int ph = nc % padded_height;
nc /= padded_height;
const int h = min(height - 1, max(ph - pad_t, 0));
const int w = min(width - 1, max(pw - pad_l, 0));
top_data[index] = bottom_data[(nc * height + h) * width + w];
}
}
template <typename T>
__global__ void PadImageConstNHWC(
const int nthreads, const T* const bottom_data, const int num,
const int height, const int width, const int channels,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T value, T* const top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / channels;
const int c = index % channels;
const int pw = n % padded_width;
n /= padded_width;
const int ph = n % padded_height;
n /= padded_height;
const int h = ph - pad_t;
const int w = pw - pad_l;
top_data[index] = (h < 0 || w < 0 || h >= height || w >= width)
? value
: bottom_data[((n * height + h) * width + w) * channels + c];
}
}
template <typename T>
__global__ void PadImageReflectNHWC(
const int nthreads, const T* const bottom_data, const int num,
const int height, const int width, const int channels,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / channels;
const int c = index % channels;
const int pw = n % padded_width;
n /= padded_width;
const int ph = n % padded_height;
n /= padded_height;
int h = ph - pad_t;
int w = pw - pad_l;
h = max(h, -h);
w = max(w, -w);
h = min(h, 2 * height - h - 2);
w = min(w, 2 * width - w - 2);
top_data[index] =
bottom_data[((n * height + h) * width + w) * channels + c];
}
}
template <typename T>
__global__ void PadImageEdgeNHWC(
const int nthreads, const T* const bottom_data, const int num,
const int height, const int width, const int channels,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / channels;
const int c = index % channels;
const int pw = n % padded_width;
n /= padded_width;
const int ph = n % padded_height;
n /= padded_height;
const int h = min(height - 1, max(ph - pad_t, 0));
const int w = min(width - 1, max(pw - pad_l, 0));
top_data[index] =
bottom_data[((n * height + h) * width + w) * channels + c];
}
}
template <typename T>
__global__ void PadImageGradientConstNCHW(
const int nthreads, const T* const top_diff, const int num,
const int channels, const int height, const int width,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int nc = index / width;
const int pw = index % width + pad_l;
const int ph = nc % height + pad_t;
nc /= height;
bottom_diff[index] =
top_diff[(nc * padded_height + ph) * padded_width + pw];
}
}
template <typename T>
__global__ void PadImageGradientReflectNCHW(
const int nthreads, const T* const top_diff, const int num,
const int channels, const int height, const int width,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int nc = index / padded_width;
const int pw = index % padded_width;
const int ph = nc % padded_height;
nc /= padded_height;
int h = ph - pad_t;
int w = pw - pad_l;
h = max(h, -h);
w = max(w, -w);
h = min(h, 2 * height - h - 2);
w = min(w, 2 * width - w - 2);
atomicAdd(&bottom_diff[(nc * height + h) * width + w], top_diff[index]);
}
}
template <typename T>
__global__ void PadImageGradientEdgeNCHW(
const int nthreads, const T* const top_diff, const int num,
const int channels, const int height, const int width,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int nc = index / padded_width;
const int pw = index % padded_width;
const int ph = nc % padded_height;
nc /= padded_height;
const int h = min(height - 1, max(ph - pad_t, 0));
const int w = min(width - 1, max(pw - pad_l, 0));
atomicAdd(&bottom_diff[(nc * height + h) * width + w], top_diff[index]);
}
}
template <typename T>
__global__ void PadImageGradientConstNHWC(
const int nthreads, const T* const top_diff, const int num,
const int height, const int width, const int channels,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / channels;
const int c = index % channels;
const int pw = n % width + pad_l;
n /= width;
const int ph = n % height + pad_t;
n /= height;
bottom_diff[index] =
top_diff[((n * padded_height + ph) * padded_width + pw) * channels + c];
}
}
template <typename T>
__global__ void PadImageGradientReflectNHWC(
const int nthreads, const T* const top_diff, const int num,
const int height, const int width, const int channels,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / channels;
const int c = index % channels;
const int pw = n % padded_width;
n /= padded_width;
const int ph = n % padded_height;
n /= padded_height;
int h = ph - pad_t;
int w = pw - pad_l;
h = max(h, -h);
w = max(w, -w);
h = min(h, 2 * height - h - 2);
w = min(w, 2 * width - w - 2);
atomicAdd(
&bottom_diff[((n * height + h) * width + w) * channels + c],
top_diff[index]);
}
}
template <typename T>
__global__ void PadImageGradientEdgeNHWC(
const int nthreads, const T* const top_diff, const int num,
const int height, const int width, const int channels,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / channels;
const int c = index % channels;
const int pw = n % padded_width;
n /= padded_width;
const int ph = n % padded_height;
n /= padded_height;
const int h = min(height - 1, max(ph - pad_t, 0));
const int w = min(width - 1, max(pw - pad_l, 0));
atomicAdd(
&bottom_diff[((n * height + h) * width + w) * channels + c],
top_diff[index]);
}
}
} // namespace
template <>
bool PadImageOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() {
auto& X = Input(0);
auto* Y = Output(0);
const int num = X.dim32(0);
const int channels = X.dim32(1);
const int height = X.dim32(2);
const int width = X.dim32(3);
ConvPoolOpBase<CUDAContext>::SetOutputSize(X, Y, channels);
const int output_size = Y->size();
const int padded_height = Y->dim32(2);
const int padded_width = Y->dim32(3);
const float* Xdata = X.data<float>();
float* Ydata = Y->template mutable_data<float>();
switch (mode_) {
case PadMode::CONSTANT:
hipLaunchKernelGGL(( PadImageConstNCHW<float>),
dim3(CAFFE_GET_BLOCKS(output_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
output_size,
Xdata,
num,
channels,
height,
width,
padded_height,
padded_width,
pad_t(),
pad_l(),
value_,
Ydata);
break;
case PadMode::REFLECT:
hipLaunchKernelGGL(( PadImageReflectNCHW<float>),
dim3(CAFFE_GET_BLOCKS(output_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
output_size,
Xdata,
num,
channels,
height,
width,
padded_height,
padded_width,
pad_t(),
pad_l(),
Ydata);
break;
case PadMode::EDGE:
hipLaunchKernelGGL(( PadImageEdgeNCHW<float>),
dim3(CAFFE_GET_BLOCKS(output_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
output_size,
Xdata,
num,
channels,
height,
width,
padded_height,
padded_width,
pad_t(),
pad_l(),
Ydata);
break;
}
return true;
}
template<>
bool PadImageOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() {
auto& X = Input(0);
auto* Y = Output(0);
const int num = X.dim32(0);
const int height = X.dim32(1);
const int width = X.dim32(2);
const int channels = X.dim32(3);
ConvPoolOpBase<CUDAContext>::SetOutputSize(X, Y, channels);
const int output_size = Y->size();
const int padded_height = Y->dim32(1);
const int padded_width = Y->dim32(2);
const float* Xdata = X.data<float>();
float* Ydata = Y->template mutable_data<float>();
switch (mode_) {
case PadMode::CONSTANT:
hipLaunchKernelGGL(( PadImageConstNHWC<float>),
dim3(CAFFE_GET_BLOCKS(output_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
output_size,
Xdata,
num,
height,
width,
channels,
padded_height,
padded_width,
pad_t(),
pad_l(),
value_,
Ydata);
break;
case PadMode::REFLECT:
hipLaunchKernelGGL(( PadImageReflectNHWC<float>),
dim3(CAFFE_GET_BLOCKS(output_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
output_size,
Xdata,
num,
height,
width,
channels,
padded_height,
padded_width,
pad_t(),
pad_l(),
Ydata);
break;
case PadMode::EDGE:
hipLaunchKernelGGL(( PadImageEdgeNHWC<float>),
dim3(CAFFE_GET_BLOCKS(output_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
output_size,
Xdata,
num,
height,
width,
channels,
padded_height,
padded_width,
pad_t(),
pad_l(),
Ydata);
break;
}
return true;
}
template<>
bool PadImageGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() {
auto& dY = Input(0);
auto* dX = Output(0);
dX->Resize(
dY.dim32(0),
dY.dim32(1),
dY.dim32(2) - pad_t() - pad_b(),
dY.dim32(3) - pad_l() - pad_r());
const int input_size = dY.size();
const int padded_height = dY.dim32(2);
const int padded_width = dY.dim32(3);
const int output_size = dX->size();
const int num = dX->dim32(0);
const int channels = dX->dim32(1);
const int height = dX->dim32(2);
const int width = dX->dim32(3);
const float* dYdata = dY.data<float>();
float* dXdata = dX->template mutable_data<float>();
math::Set<float, CUDAContext>(output_size, 0, dXdata, &context_);
switch (mode_) {
case PadMode::CONSTANT:
hipLaunchKernelGGL(( PadImageGradientConstNCHW<float>),
dim3(CAFFE_GET_BLOCKS(output_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
output_size,
dYdata,
num,
channels,
height,
width,
padded_height,
padded_width,
pad_t(),
pad_l(),
dXdata);
break;
case PadMode::REFLECT:
hipLaunchKernelGGL(( PadImageGradientReflectNCHW<float>),
dim3(CAFFE_GET_BLOCKS(input_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
input_size,
dYdata,
num,
channels,
height,
width,
padded_height,
padded_width,
pad_t(),
pad_l(),
dXdata);
break;
case PadMode::EDGE:
hipLaunchKernelGGL(( PadImageGradientEdgeNCHW<float>),
dim3(CAFFE_GET_BLOCKS(input_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
input_size,
dYdata,
num,
channels,
height,
width,
padded_height,
padded_width,
pad_t(),
pad_l(),
dXdata);
break;
}
return true;
}
template<>
bool PadImageGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() {
auto& dY = Input(0);
auto* dX = Output(0);
dX->Resize(
dY.dim32(0),
dY.dim32(1) - pad_t() - pad_b(),
dY.dim32(2) - pad_l() - pad_r(),
dY.dim32(3));
const int input_size = dY.size();
const int padded_height = dY.dim32(1);
const int padded_width = dY.dim32(2);
const int output_size = dX->size();
const int num = dX->dim32(0);
const int height = dX->dim32(1);
const int width = dX->dim32(2);
const int channels = dX->dim32(3);
const float* dYdata = dY.data<float>();
float* dXdata = dX->template mutable_data<float>();
math::Set<float, CUDAContext>(output_size, 0, dXdata, &context_);
switch (mode_) {
case PadMode::CONSTANT:
hipLaunchKernelGGL(( PadImageGradientConstNHWC<float>),
dim3(CAFFE_GET_BLOCKS(output_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
output_size,
dYdata,
num,
height,
width,
channels,
padded_height,
padded_width,
pad_t(),
pad_l(),
dXdata);
break;
case PadMode::REFLECT:
hipLaunchKernelGGL(( PadImageGradientReflectNHWC<float>),
dim3(CAFFE_GET_BLOCKS(input_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
input_size,
dYdata,
num,
height,
width,
channels,
padded_height,
padded_width,
pad_t(),
pad_l(),
dXdata);
break;
case PadMode::EDGE:
hipLaunchKernelGGL(( PadImageGradientEdgeNHWC<float>),
dim3(CAFFE_GET_BLOCKS(input_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
input_size,
dYdata,
num,
height,
width,
channels,
padded_height,
padded_width,
pad_t(),
pad_l(),
dXdata);
break;
}
return true;
}
REGISTER_CUDA_OPERATOR(PadImage, PadImageOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(PadImageGradient,
PadImageGradientOp<float, CUDAContext>);
} // namespace caffe2
| bc7d11e6af47324cc4922fe9dbd653ce39fcc1d3.cu | #include <algorithm>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/pad_op.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void PadImageConstNCHW(
const int nthreads, const T* const bottom_data, const int num,
const int channels, const int height, const int width,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T value, T* const top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int nc = index / padded_width;
const int pw = index % padded_width;
const int ph = nc % padded_height;
nc /= padded_height;
const int h = ph - pad_t;
const int w = pw - pad_l;
top_data[index] = (h < 0 || w < 0 || h >= height || w >= width)
? value
: bottom_data[(nc * height + h) * width + w];
}
}
template <typename T>
__global__ void PadImageReflectNCHW(
const int nthreads, const T* const bottom_data, const int num,
const int channels, const int height, const int width,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int nc = index / padded_width;
const int pw = index % padded_width;
const int ph = nc % padded_height;
nc /= padded_height;
int h = ph - pad_t;
int w = pw - pad_l;
h = max(h, -h);
w = max(w, -w);
h = min(h, 2 * height - h - 2);
w = min(w, 2 * width - w - 2);
top_data[index] = bottom_data[(nc * height + h) * width + w];
}
}
template <typename T>
__global__ void PadImageEdgeNCHW(
const int nthreads, const T* const bottom_data, const int num,
const int channels, const int height, const int width,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int nc = index / padded_width;
const int pw = index % padded_width;
const int ph = nc % padded_height;
nc /= padded_height;
const int h = min(height - 1, max(ph - pad_t, 0));
const int w = min(width - 1, max(pw - pad_l, 0));
top_data[index] = bottom_data[(nc * height + h) * width + w];
}
}
template <typename T>
__global__ void PadImageConstNHWC(
const int nthreads, const T* const bottom_data, const int num,
const int height, const int width, const int channels,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T value, T* const top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / channels;
const int c = index % channels;
const int pw = n % padded_width;
n /= padded_width;
const int ph = n % padded_height;
n /= padded_height;
const int h = ph - pad_t;
const int w = pw - pad_l;
top_data[index] = (h < 0 || w < 0 || h >= height || w >= width)
? value
: bottom_data[((n * height + h) * width + w) * channels + c];
}
}
template <typename T>
__global__ void PadImageReflectNHWC(
const int nthreads, const T* const bottom_data, const int num,
const int height, const int width, const int channels,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / channels;
const int c = index % channels;
const int pw = n % padded_width;
n /= padded_width;
const int ph = n % padded_height;
n /= padded_height;
int h = ph - pad_t;
int w = pw - pad_l;
h = max(h, -h);
w = max(w, -w);
h = min(h, 2 * height - h - 2);
w = min(w, 2 * width - w - 2);
top_data[index] =
bottom_data[((n * height + h) * width + w) * channels + c];
}
}
template <typename T>
__global__ void PadImageEdgeNHWC(
const int nthreads, const T* const bottom_data, const int num,
const int height, const int width, const int channels,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / channels;
const int c = index % channels;
const int pw = n % padded_width;
n /= padded_width;
const int ph = n % padded_height;
n /= padded_height;
const int h = min(height - 1, max(ph - pad_t, 0));
const int w = min(width - 1, max(pw - pad_l, 0));
top_data[index] =
bottom_data[((n * height + h) * width + w) * channels + c];
}
}
template <typename T>
__global__ void PadImageGradientConstNCHW(
const int nthreads, const T* const top_diff, const int num,
const int channels, const int height, const int width,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int nc = index / width;
const int pw = index % width + pad_l;
const int ph = nc % height + pad_t;
nc /= height;
bottom_diff[index] =
top_diff[(nc * padded_height + ph) * padded_width + pw];
}
}
template <typename T>
__global__ void PadImageGradientReflectNCHW(
const int nthreads, const T* const top_diff, const int num,
const int channels, const int height, const int width,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int nc = index / padded_width;
const int pw = index % padded_width;
const int ph = nc % padded_height;
nc /= padded_height;
int h = ph - pad_t;
int w = pw - pad_l;
h = max(h, -h);
w = max(w, -w);
h = min(h, 2 * height - h - 2);
w = min(w, 2 * width - w - 2);
atomicAdd(&bottom_diff[(nc * height + h) * width + w], top_diff[index]);
}
}
template <typename T>
__global__ void PadImageGradientEdgeNCHW(
const int nthreads, const T* const top_diff, const int num,
const int channels, const int height, const int width,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int nc = index / padded_width;
const int pw = index % padded_width;
const int ph = nc % padded_height;
nc /= padded_height;
const int h = min(height - 1, max(ph - pad_t, 0));
const int w = min(width - 1, max(pw - pad_l, 0));
atomicAdd(&bottom_diff[(nc * height + h) * width + w], top_diff[index]);
}
}
template <typename T>
__global__ void PadImageGradientConstNHWC(
const int nthreads, const T* const top_diff, const int num,
const int height, const int width, const int channels,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / channels;
const int c = index % channels;
const int pw = n % width + pad_l;
n /= width;
const int ph = n % height + pad_t;
n /= height;
bottom_diff[index] =
top_diff[((n * padded_height + ph) * padded_width + pw) * channels + c];
}
}
template <typename T>
__global__ void PadImageGradientReflectNHWC(
const int nthreads, const T* const top_diff, const int num,
const int height, const int width, const int channels,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / channels;
const int c = index % channels;
const int pw = n % padded_width;
n /= padded_width;
const int ph = n % padded_height;
n /= padded_height;
int h = ph - pad_t;
int w = pw - pad_l;
h = max(h, -h);
w = max(w, -w);
h = min(h, 2 * height - h - 2);
w = min(w, 2 * width - w - 2);
atomicAdd(
&bottom_diff[((n * height + h) * width + w) * channels + c],
top_diff[index]);
}
}
template <typename T>
__global__ void PadImageGradientEdgeNHWC(
const int nthreads, const T* const top_diff, const int num,
const int height, const int width, const int channels,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / channels;
const int c = index % channels;
const int pw = n % padded_width;
n /= padded_width;
const int ph = n % padded_height;
n /= padded_height;
const int h = min(height - 1, max(ph - pad_t, 0));
const int w = min(width - 1, max(pw - pad_l, 0));
atomicAdd(
&bottom_diff[((n * height + h) * width + w) * channels + c],
top_diff[index]);
}
}
} // namespace
template <>
bool PadImageOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() {
auto& X = Input(0);
auto* Y = Output(0);
const int num = X.dim32(0);
const int channels = X.dim32(1);
const int height = X.dim32(2);
const int width = X.dim32(3);
ConvPoolOpBase<CUDAContext>::SetOutputSize(X, Y, channels);
const int output_size = Y->size();
const int padded_height = Y->dim32(2);
const int padded_width = Y->dim32(3);
const float* Xdata = X.data<float>();
float* Ydata = Y->template mutable_data<float>();
switch (mode_) {
case PadMode::CONSTANT:
PadImageConstNCHW<float><<<
CAFFE_GET_BLOCKS(output_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
output_size,
Xdata,
num,
channels,
height,
width,
padded_height,
padded_width,
pad_t(),
pad_l(),
value_,
Ydata);
break;
case PadMode::REFLECT:
PadImageReflectNCHW<float><<<
CAFFE_GET_BLOCKS(output_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
output_size,
Xdata,
num,
channels,
height,
width,
padded_height,
padded_width,
pad_t(),
pad_l(),
Ydata);
break;
case PadMode::EDGE:
PadImageEdgeNCHW<float><<<
CAFFE_GET_BLOCKS(output_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
output_size,
Xdata,
num,
channels,
height,
width,
padded_height,
padded_width,
pad_t(),
pad_l(),
Ydata);
break;
}
return true;
}
template<>
bool PadImageOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() {
auto& X = Input(0);
auto* Y = Output(0);
const int num = X.dim32(0);
const int height = X.dim32(1);
const int width = X.dim32(2);
const int channels = X.dim32(3);
ConvPoolOpBase<CUDAContext>::SetOutputSize(X, Y, channels);
const int output_size = Y->size();
const int padded_height = Y->dim32(1);
const int padded_width = Y->dim32(2);
const float* Xdata = X.data<float>();
float* Ydata = Y->template mutable_data<float>();
switch (mode_) {
case PadMode::CONSTANT:
PadImageConstNHWC<float><<<
CAFFE_GET_BLOCKS(output_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
output_size,
Xdata,
num,
height,
width,
channels,
padded_height,
padded_width,
pad_t(),
pad_l(),
value_,
Ydata);
break;
case PadMode::REFLECT:
PadImageReflectNHWC<float><<<
CAFFE_GET_BLOCKS(output_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
output_size,
Xdata,
num,
height,
width,
channels,
padded_height,
padded_width,
pad_t(),
pad_l(),
Ydata);
break;
case PadMode::EDGE:
PadImageEdgeNHWC<float><<<
CAFFE_GET_BLOCKS(output_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
output_size,
Xdata,
num,
height,
width,
channels,
padded_height,
padded_width,
pad_t(),
pad_l(),
Ydata);
break;
}
return true;
}
template<>
bool PadImageGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() {
auto& dY = Input(0);
auto* dX = Output(0);
dX->Resize(
dY.dim32(0),
dY.dim32(1),
dY.dim32(2) - pad_t() - pad_b(),
dY.dim32(3) - pad_l() - pad_r());
const int input_size = dY.size();
const int padded_height = dY.dim32(2);
const int padded_width = dY.dim32(3);
const int output_size = dX->size();
const int num = dX->dim32(0);
const int channels = dX->dim32(1);
const int height = dX->dim32(2);
const int width = dX->dim32(3);
const float* dYdata = dY.data<float>();
float* dXdata = dX->template mutable_data<float>();
math::Set<float, CUDAContext>(output_size, 0, dXdata, &context_);
switch (mode_) {
case PadMode::CONSTANT:
PadImageGradientConstNCHW<float><<<
CAFFE_GET_BLOCKS(output_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
output_size,
dYdata,
num,
channels,
height,
width,
padded_height,
padded_width,
pad_t(),
pad_l(),
dXdata);
break;
case PadMode::REFLECT:
PadImageGradientReflectNCHW<float><<<
CAFFE_GET_BLOCKS(input_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
input_size,
dYdata,
num,
channels,
height,
width,
padded_height,
padded_width,
pad_t(),
pad_l(),
dXdata);
break;
case PadMode::EDGE:
PadImageGradientEdgeNCHW<float><<<
CAFFE_GET_BLOCKS(input_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
input_size,
dYdata,
num,
channels,
height,
width,
padded_height,
padded_width,
pad_t(),
pad_l(),
dXdata);
break;
}
return true;
}
template<>
bool PadImageGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() {
auto& dY = Input(0);
auto* dX = Output(0);
dX->Resize(
dY.dim32(0),
dY.dim32(1) - pad_t() - pad_b(),
dY.dim32(2) - pad_l() - pad_r(),
dY.dim32(3));
const int input_size = dY.size();
const int padded_height = dY.dim32(1);
const int padded_width = dY.dim32(2);
const int output_size = dX->size();
const int num = dX->dim32(0);
const int height = dX->dim32(1);
const int width = dX->dim32(2);
const int channels = dX->dim32(3);
const float* dYdata = dY.data<float>();
float* dXdata = dX->template mutable_data<float>();
math::Set<float, CUDAContext>(output_size, 0, dXdata, &context_);
switch (mode_) {
case PadMode::CONSTANT:
PadImageGradientConstNHWC<float><<<
CAFFE_GET_BLOCKS(output_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
output_size,
dYdata,
num,
height,
width,
channels,
padded_height,
padded_width,
pad_t(),
pad_l(),
dXdata);
break;
case PadMode::REFLECT:
PadImageGradientReflectNHWC<float><<<
CAFFE_GET_BLOCKS(input_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
input_size,
dYdata,
num,
height,
width,
channels,
padded_height,
padded_width,
pad_t(),
pad_l(),
dXdata);
break;
case PadMode::EDGE:
PadImageGradientEdgeNHWC<float><<<
CAFFE_GET_BLOCKS(input_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
input_size,
dYdata,
num,
height,
width,
channels,
padded_height,
padded_width,
pad_t(),
pad_l(),
dXdata);
break;
}
return true;
}
REGISTER_CUDA_OPERATOR(PadImage, PadImageOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(PadImageGradient,
PadImageGradientOp<float, CUDAContext>);
} // namespace caffe2
|
9156e9f808c598b210a174d09458e9e174d32a44.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ########################### CPU JULIA FXN ###############################
// struct complex
// {
// float r ;
// float i ;
// } ;
// float magnitude (struct complex a)
// {
// return ((a.r*a.r) + (a.i*a.i)) ;
// }
// void add(struct complex a , struct complex b)
// {
// res->r = a.r + b.r ;
// res->i = a.i + b.i ;
// }
// void mul(struct complex a , struct complex b)
// {
// res->r = (a.r*b.r) + (a.i*b.i) ;
// res->i = (a.r*b.i) + (a.i*b*r) ;
// }
// int julia (int x , int y)
// {
// const float scale 1.5 ;
// float jx = scale*(float)(DIM/2 - x)(DIM/2) ;
// float jy = scale*(float)(DIM/2 - x)(DIM/2) ;
// struct complex c , a , r1 , r2 ;
// c.r = -0.8 ; c.i = 0.154 ;
// a.r = jx ; a.i = jy ;
// int i=0 ;
// for(int i=0 ; i<200 ; i++)
// {
// mul(a , a , &r1) ;
// add(r1 , c , &r2) ;
// if(magnitude(r2) > 1000)
// return 0 ;
// a.r = r2.r ;
// a.i = r2.i ;
// }
// return 0 ;
// }
// void kernel (unsigned char *ptr)
// {
// for(int y=0 ; y<DIM ; y++)
// {
// for (int x = 0; x <DIM ; x++)
// {
// int offset = x+ y*DIM ;
// int juliaValue = julia(x , y) ;
// ptr [offset*4 + 0] = 255*juliaValue ;
// ptr [offset*4 + 1] = 0 ;
// ptr [offset*4 + 2] = 0 ;
// ptr [offset*4 + 3] = 255 ;
// }
// }
// }
// int main(void)
// {
// CPUBITmap bitmap(DIM , DIM) ;
// unsigned char *ptr = bitmap.get_ptr() ;
// kernel(ptr) ;
// bitmap.dispay_and_exit() ;
// }
// ########################### GPU JULIA FXN #####################################
#include<cuda.h>
#include<cuda_runtime.h>
struct cucomplex
{
float r ;
float i ;
} ;
__device__ float magnitude (struct complex a)
{
return ((a.r*a.r) + (a.i*a.i)) ;
}
__device__ void add(struct complex a , struct complex b)
{
res->r = a.r + b.r ;
res->i = a.i + b.i ;
}
__device__ void mul(struct complex a , struct complex b)
{
res->r = (a.r*b.r) + (a.i*b.i) ;
res->i = (a.r*b.i) + (a.i*b*r) ;
}
__device__ int julia (int x , int y)
{
const float scale 1.5 ;
float jx = scale*(float)(DIM/2 - x)(DIM/2) ;
float jy = scale*(float)(DIM/2 - x)(DIM/2) ;
struct complex c , a , r1 , r2 ;
c.r = -0.8 ; c.i = 0.154 ;
a.r = jx ; a.i = jy ;
int i=0 ;
for(int i=0 ; i<200 ; i++)
{
mul(a , a , &r1) ;
add(r1 , c , &r2) ;
if(magnitude(r2) > 1000)
return 0 ;
a.r = r2.r ;
a.i = r2.i ;
}
return 0 ;
}
__global__ void kernel (unsigned char *ptr)
{
int x = blockIdx.x ;
int y = blockIdx.y ;
int offset = x + y*gridDim.x ;
int juliaValue = julia(x , y) ;
ptr [offset*4 + 0] = 255*juliaValue ;
ptr [offset*4 + 1] = 0 ;
ptr [offset*4 + 2] = 0 ;
ptr [offset*4 + 3] = 255 ;
}
int main(void)
{
CPUBITmap bitmap(DIM , DIM) ;
unsigned char *dev_bitmap ;
hipMalloc((void**) &dev_bitmap , bitmap.image_size())
dim3 grid(DIM , DIM) ;
hipLaunchKernelGGL(( kernel) , dim3(grid) , dim3(1) , 0, 0, dev_bitmap) ;
hipMemcpy(bitmap.get_ptr() , dev_bitmap , bitmap.image_size() , hipMemcpyDeviceToHost) ;
bitmap.dispay_and_exit() ;
hipFree(dev_bitmap) ;
}
| 9156e9f808c598b210a174d09458e9e174d32a44.cu |
// ########################### CPU JULIA FXN ###############################
// struct complex
// {
// float r ;
// float i ;
// } ;
// float magnitude (struct complex a)
// {
// return ((a.r*a.r) + (a.i*a.i)) ;
// }
// void add(struct complex a , struct complex b)
// {
// res->r = a.r + b.r ;
// res->i = a.i + b.i ;
// }
// void mul(struct complex a , struct complex b)
// {
// res->r = (a.r*b.r) + (a.i*b.i) ;
// res->i = (a.r*b.i) + (a.i*b*r) ;
// }
// int julia (int x , int y)
// {
// const float scale 1.5 ;
// float jx = scale*(float)(DIM/2 - x)(DIM/2) ;
// float jy = scale*(float)(DIM/2 - x)(DIM/2) ;
// struct complex c , a , r1 , r2 ;
// c.r = -0.8 ; c.i = 0.154 ;
// a.r = jx ; a.i = jy ;
// int i=0 ;
// for(int i=0 ; i<200 ; i++)
// {
// mul(a , a , &r1) ;
// add(r1 , c , &r2) ;
// if(magnitude(r2) > 1000)
// return 0 ;
// a.r = r2.r ;
// a.i = r2.i ;
// }
// return 0 ;
// }
// void kernel (unsigned char *ptr)
// {
// for(int y=0 ; y<DIM ; y++)
// {
// for (int x = 0; x <DIM ; x++)
// {
// int offset = x+ y*DIM ;
// int juliaValue = julia(x , y) ;
// ptr [offset*4 + 0] = 255*juliaValue ;
// ptr [offset*4 + 1] = 0 ;
// ptr [offset*4 + 2] = 0 ;
// ptr [offset*4 + 3] = 255 ;
// }
// }
// }
// int main(void)
// {
// CPUBITmap bitmap(DIM , DIM) ;
// unsigned char *ptr = bitmap.get_ptr() ;
// kernel(ptr) ;
// bitmap.dispay_and_exit() ;
// }
// ########################### GPU JULIA FXN #####################################
#include<cuda.h>
#include<cuda_runtime.h>
struct cucomplex
{
float r ;
float i ;
} ;
__device__ float magnitude (struct complex a)
{
return ((a.r*a.r) + (a.i*a.i)) ;
}
__device__ void add(struct complex a , struct complex b)
{
res->r = a.r + b.r ;
res->i = a.i + b.i ;
}
__device__ void mul(struct complex a , struct complex b)
{
res->r = (a.r*b.r) + (a.i*b.i) ;
res->i = (a.r*b.i) + (a.i*b*r) ;
}
__device__ int julia (int x , int y)
{
const float scale 1.5 ;
float jx = scale*(float)(DIM/2 - x)(DIM/2) ;
float jy = scale*(float)(DIM/2 - x)(DIM/2) ;
struct complex c , a , r1 , r2 ;
c.r = -0.8 ; c.i = 0.154 ;
a.r = jx ; a.i = jy ;
int i=0 ;
for(int i=0 ; i<200 ; i++)
{
mul(a , a , &r1) ;
add(r1 , c , &r2) ;
if(magnitude(r2) > 1000)
return 0 ;
a.r = r2.r ;
a.i = r2.i ;
}
return 0 ;
}
__global__ void kernel (unsigned char *ptr)
{
int x = blockIdx.x ;
int y = blockIdx.y ;
int offset = x + y*gridDim.x ;
int juliaValue = julia(x , y) ;
ptr [offset*4 + 0] = 255*juliaValue ;
ptr [offset*4 + 1] = 0 ;
ptr [offset*4 + 2] = 0 ;
ptr [offset*4 + 3] = 255 ;
}
int main(void)
{
CPUBITmap bitmap(DIM , DIM) ;
unsigned char *dev_bitmap ;
cudaMalloc((void**) &dev_bitmap , bitmap.image_size())
dim3 grid(DIM , DIM) ;
kernel <<< grid , 1 >>> (dev_bitmap) ;
cudaMemcpy(bitmap.get_ptr() , dev_bitmap , bitmap.image_size() , cudaMemcpyDeviceToHost) ;
bitmap.dispay_and_exit() ;
cudaFree(dev_bitmap) ;
}
|
fea3b112c59e40dddec27b7e22e651aca6b6fc8f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu)
*
* See LICENSE for clarification regarding multiple authors
*/
#include <algorithm>
#include <vector>
#include "k2/csrc/array.h"
#include "k2/csrc/array_ops.h"
#include "k2/csrc/macros.h"
namespace k2 {
// See documentation in header of what this is supposed to do.
// This is similar to the template Append() defined in ops_inl.h,
// but with changes largely about adding `data_offsets`, and
// subtracting one from the dims of all but the last array.
Array1<int32_t> SpliceRowSplits(int32_t num_arrays,
const Array1<int32_t> **src) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GT(num_arrays, 0);
ContextPtr &c = src[0]->Context();
// row_splits_vec is the exclusive-sum of the modified dimensions of
// the arrays in `src`. `Modified` means: is subtracted from the dims
// of all but the last array.
std::vector<int32_t> row_splits_vec(num_arrays + 1);
int32_t sum = 0;
row_splits_vec[0] = sum;
// `last_elem_ptrs_vec` contains, for each of the arrays in `num_array`, a
// pointer to the last element in that array.
std::vector<const int32_t *> last_elem_ptrs_vec(num_arrays);
for (int32_t i = 0; i < num_arrays; i++) {
K2_CHECK_GE(src[i]->Dim(), 1);
int32_t dim = src[i]->Dim() - (i + 1 < num_arrays ? 1 : 0);
sum += dim;
row_splits_vec[i + 1] = sum;
last_elem_ptrs_vec[i] = src[i]->Data() + src[i]->Dim() - 1;
}
int32_t ans_size = sum;
Array1<int32_t> ans(c, ans_size);
if (ans_size == 0) return ans;
int32_t *ans_data = ans.Data();
Array1<const int32_t *> last_elems_ptrs(c, last_elem_ptrs_vec);
Array1<int32_t> data_offsets(c, num_arrays);
// note as data_offsets.Dim() == last_elem_ptrs.Dim(), so the last element of
// last_elem_ptrs.Dim() will not be summed to data_offsets, it's OK as we
// don't need that value since we would not drop the last element of the last
// array.
ExclusiveSumDeref(last_elems_ptrs, &data_offsets);
int32_t *data_offsets_data = data_offsets.Data();
if (c->GetDeviceType() == kCpu) {
// a simple loop is faster, although the other branches should still work on
// CPU.
for (int32_t i = 0; i < num_arrays; i++) {
int32_t this_dim = src[i]->Dim();
const int32_t *this_src_data = src[i]->Data();
int32_t data_offset = data_offsets_data[i];
for (int32_t j = 0; j < this_dim; j++) {
ans_data[j] = this_src_data[j] + data_offset;
}
// notice `this_dim - 1` here, it means we will overwrite the copy of last
// element of src[i] when copying elements in src[i+1] in the next
// for-loop, it generates the same result with dropping the last element
// of src[i] as last-elment-of-src[i] == src[i+1]->Data()[0] (equals 0) +
// data_offsets_data[i+1].
ans_data += this_dim - 1;
}
} else {
K2_CHECK_EQ(c->GetDeviceType(), kCuda);
Array1<int32_t> row_splits(c, row_splits_vec);
std::vector<const int32_t *> src_ptrs_vec(num_arrays);
for (int32_t i = 0; i < num_arrays; i++) src_ptrs_vec[i] = src[i]->Data();
Array1<const int32_t *> src_ptrs(c, src_ptrs_vec);
const int32_t **src_ptrs_data = src_ptrs.Data();
mgpu::context_t *mgpu_context = GetModernGpuAllocator(c);
auto lambda_set_ans = [=] __device__(int32_t index, int32_t seg,
int32_t rank) {
ans_data[index] = src_ptrs_data[seg][rank] + data_offsets_data[seg];
};
K2_CUDA_SAFE_CALL(mgpu::transform_lbs(lambda_set_ans, ans_size,
row_splits.Data(),
row_splits.Dim() - 1, *mgpu_context));
}
return ans;
}
Array1<int32_t> AppendWithOffsets(const Array1<int32_t> &offsets,
const Array1<int32_t> **src) {
NVTX_RANGE(K2_FUNC);
int32_t num_arrays = offsets.Dim();
ContextPtr c = offsets.Context();
std::vector<int32_t> row_splits_vec(num_arrays + 1);
int32_t sum = 0;
row_splits_vec[0] = sum;
for (int32_t i = 0; i < num_arrays; ++i) {
int32_t dim = src[i]->Dim();
sum += dim;
row_splits_vec[i + 1] = sum;
}
int32_t ans_size = sum;
Array1<int32_t> ans(c, ans_size);
if (ans_size == 0) return ans;
int32_t *ans_data = ans.Data();
const int32_t *offsets_data = offsets.Data();
if (c->GetDeviceType() == kCpu) {
for (int32_t i = 0; i != num_arrays; ++i) {
int32_t this_dim = src[i]->Dim();
const int32_t *this_src_data = src[i]->Data();
int32_t offset = offsets_data[i];
for (int32_t j = 0; j != this_dim; ++j) {
ans_data[j] = this_src_data[j] + offset;
}
ans_data += this_dim;
}
} else {
K2_CHECK_EQ(c->GetDeviceType(), kCuda);
Array1<int32_t> row_splits(c, row_splits_vec);
std::vector<const int32_t *> src_ptrs_vec(num_arrays);
for (int32_t i = 0; i < num_arrays; ++i) src_ptrs_vec[i] = src[i]->Data();
Array1<const int32_t *> src_ptrs(c, src_ptrs_vec);
const int32_t **src_ptrs_data = src_ptrs.Data();
mgpu::context_t *mgpu_context = GetModernGpuAllocator(c);
// `index` is idx01, `seg` is idx0, `rank` is idx1, `value_offsets` is just
// a cache for `offsets_data`.
auto lambda_set_ans = [=] __device__(int32_t index, int32_t seg,
int32_t rank,
mgpu::tuple<int32_t> value_offsets) {
ans_data[index] = src_ptrs_data[seg][rank] + mgpu::get<0>(value_offsets);
};
K2_CUDA_SAFE_CALL(mgpu::transform_lbs(
lambda_set_ans, ans_size, row_splits.Data(), row_splits.Dim() - 1,
mgpu::make_tuple(offsets_data), *mgpu_context));
}
return ans;
}
bool ValidateRowIds(const Array1<int32_t> &row_ids,
Array1<int32_t> *temp /*=nullptr*/) {
NVTX_RANGE(K2_FUNC);
ContextPtr &ctx = row_ids.Context();
const int32_t *data = row_ids.Data();
int32_t dim = row_ids.Dim();
if (dim == 0) return true; // will treat this as valid
// note `row_ids[0]` may copy memory from device to host
if (row_ids[0] < 0) return false;
Array1<int32_t> temp_array;
if (temp == nullptr || temp->Dim() == 0) {
temp_array = Array1<int32_t>(ctx, 1);
} else {
K2_CHECK(IsCompatible(row_ids, *temp));
temp_array = temp->Range(0, 1);
}
temp = &temp_array;
*temp = 0;
int32_t *temp_data = temp->Data();
// Note: we know that dim >= 1 as we would have returned above if dim == 0.
// This will do nothing if (dim-1) == 0 as we have checked the first element.
K2_EVAL(
ctx, dim - 1, lambda_check_row_ids, (int32_t i)->void {
if (data[i] > data[i + 1]) *temp_data = 1; // means it's bad.
});
return (*temp)[0] == 0;
}
bool ValidateRowSplits(const Array1<int32_t> &row_splits,
Array1<int32_t> *temp /*=nullptr*/) {
NVTX_RANGE(K2_FUNC);
ContextPtr &ctx = row_splits.Context();
const int32_t *data = row_splits.Data();
int32_t dim = row_splits.Dim();
// must have at least one element and row_splits[0] == 0
if (dim == 0 || row_splits[0] != 0) return false;
Array1<int32_t> temp_array;
if (temp == nullptr || temp->Dim() == 0) {
temp_array = Array1<int32_t>(ctx, 1);
} else {
K2_CHECK(IsCompatible(row_splits, *temp));
temp_array = temp->Range(0, 1);
}
temp = &temp_array;
*temp = 0;
int32_t *temp_data = temp->Data();
// Note: we know that dim >= 1 as we would have returned above if dim == 0.
// This will do nothing if (dim-1) == 0 as we have checked the first element.
K2_EVAL(
ctx, dim - 1, lambda_check_row_splits, (int32_t i)->void {
if (data[i] > data[i + 1]) *temp_data = 1; // means it's bad.
});
return (*temp)[0] == 0;
}
bool ValidateRowSplitsAndIds(const Array1<int32_t> &row_splits,
const Array1<int32_t> &row_ids,
Array1<int32_t> *temp /*=nullptr*/) {
NVTX_RANGE(K2_FUNC);
// Check if their context are compatible or not while getting
ContextPtr ctx = GetContext(row_splits, row_ids);
int32_t num_rows = row_splits.Dim() - 1, num_elems = row_ids.Dim();
if (num_rows < 0 || (num_rows == 0 && num_elems > 0)) return false;
if (row_splits[0] != 0 || (num_elems > 0 && row_ids[0] < 0)) return false;
if (num_elems != row_splits[num_rows]) return false;
const int32_t *row_ids_data = row_ids.Data(),
*row_splits_data = row_splits.Data();
Array1<int32_t> temp_array;
if (temp == nullptr || temp->Dim() == 0) {
temp_array = Array1<int32_t>(ctx, 1);
} else {
K2_CHECK(ctx->IsCompatible(*temp->Context()));
temp_array = temp->Range(0, 1);
}
temp = &temp_array;
*temp = 0;
int32_t *temp_data = temp_array.Data();
K2_EVAL(
ctx, ::max(num_elems, num_rows), lambda_check_row_ids,
(int32_t i)->void {
// check row_splits
bool invalid_splits =
(i < num_rows && row_splits_data[i] > row_splits_data[i + 1]);
// check row_ids
bool invalid_ids =
(i < (num_elems - 1) && row_ids_data[i] > row_ids_data[i + 1]);
if (invalid_splits || invalid_ids) *temp_data = 1;
// check if row_splits and row_ids agree with each other
if (i < num_elems) {
int32_t this_row = row_ids_data[i];
if (this_row < 0 || this_row >= num_rows ||
i < row_splits_data[this_row] ||
i >= row_splits_data[this_row + 1])
*temp_data = 1;
}
});
return (*temp)[0] == 0;
}
void RowSplitsToRowIds(const Array1<int32_t> &row_splits,
Array1<int32_t> *row_ids) {
NVTX_RANGE(K2_FUNC);
ContextPtr c = GetContext(row_splits, *row_ids);
int32_t num_elems = row_ids->Dim(), num_rows = row_splits.Dim() - 1;
K2_CHECK_GE(num_rows, 0);
// if there are more than zero elems, there must be at least one row.
K2_CHECK(num_elems == 0 || num_rows > 0);
K2_CHECK_EQ(num_elems, row_splits[num_rows]);
RowSplitsToRowIds(c, num_rows, row_splits.Data(), num_elems, row_ids->Data());
}
void RowIdsToRowSplits(const Array1<int32_t> &row_ids,
Array1<int32_t> *row_splits) {
NVTX_RANGE(K2_FUNC);
ContextPtr c = GetContext(*row_splits, row_ids);
int32_t num_elems = row_ids.Dim(), num_rows = row_splits->Dim() - 1;
K2_CHECK_GE(num_rows, 0);
// if there are more than zero elems, there must be at least one row.
K2_CHECK(num_elems == 0 || num_rows > 0);
if (num_elems > 0) K2_CHECK_GT(num_rows, row_ids[num_elems - 1]);
RowIdsToRowSplits(c, num_elems, row_ids.Data(), false, num_rows,
row_splits->Data());
}
Array1<int32_t> GetCounts(ContextPtr c, const int32_t *src_data,
int32_t src_dim, int32_t n) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GE(n, 0);
Array1<int32_t> ans(c, n, 0); // init with 0
int32_t *ans_data = ans.Data();
if (n == 0) {
K2_CHECK_EQ(src_dim, 0);
return ans;
}
DeviceType d = c->GetDeviceType();
if (d == kCpu) {
for (int32_t i = 0; i < src_dim; ++i) {
++ans_data[src_data[i]];
}
} else {
K2_CHECK_EQ(d, kCuda);
std::size_t temp_storage_bytes = 0;
K2_CHECK_CUDA_ERROR(cub::DeviceHistogram::HistogramEven(
nullptr, temp_storage_bytes, src_data, ans_data, n + 1, 0, n, src_dim,
c->GetCudaStream())); // The first time is to determine temporary
// device storage requirements.
Array1<int8_t> d_temp_storage(c, temp_storage_bytes);
K2_CHECK_CUDA_ERROR(cub::DeviceHistogram::HistogramEven(
d_temp_storage.Data(), temp_storage_bytes, src_data, ans_data, n + 1, 0,
n, src_dim, c->GetCudaStream()));
}
return ans;
}
Array1<int32_t> GetCounts(const Array1<int32_t> &src, int32_t n) {
NVTX_RANGE(K2_FUNC);
return GetCounts(src.Context(), src.Data(), src.Dim(), n);
}
Array1<int32_t> InvertMonotonicDecreasing(const Array1<int32_t> &src) {
NVTX_RANGE(K2_FUNC);
ContextPtr &c = src.Context();
int32_t src_dim = src.Dim();
const int32_t *src_data = src.Data();
if (src_dim == 0) {
return Array1<int32_t>(c, 0);
}
K2_DCHECK_GT(src.Back(), 0); // just call Back when debugging
// note `src[0]` may do a DeviceToHost memory copy
int32_t ans_dim = src[0];
Array1<int32_t> ans(c, ans_dim, 0); // init with 0
int32_t *ans_data = ans.Data();
K2_EVAL(
c, src_dim, lambda_set_values, (int32_t i)->void {
K2_DCHECK((i + 1 == src_dim || src_data[i + 1] <= src_data[i]));
if (i + 1 == src_dim || src_data[i + 1] < src_data[i])
ans_data[src_data[i] - 1] = i + 1;
});
MonotonicDecreasingUpperBound(ans, &ans);
return ans;
}
Array1<int32_t> InvertPermutation(const Array1<int32_t> &src) {
ContextPtr &c = src.Context();
int32_t dim = src.Dim();
Array1<int32_t> ans(c, dim);
const int32_t *src_data = src.Data();
int32_t *ans_data = ans.Data();
K2_EVAL(
c, dim, lambda_set_ans, (int32_t i)->void { ans_data[src_data[i]] = i; });
return ans;
}
Array1<int32_t> RowSplitsToSizes(const Array1<int32_t> &row_splits) {
K2_CHECK_GT(row_splits.Dim(), 0);
ContextPtr &c = row_splits.Context();
int32_t num_rows = row_splits.Dim() - 1;
Array1<int32_t> sizes(c, num_rows);
const int32_t *row_splits_data = row_splits.Data();
int32_t *sizes_data = sizes.Data();
K2_EVAL(
c, num_rows, lambda_set_sizes, (int32_t i)->void {
sizes_data[i] = row_splits_data[i + 1] - row_splits_data[i];
});
return sizes;
}
// This is modified from RowSplitsToRowIdsKernel.
// When we invoke this we make a big enough grid that there doesn't have to
// be a loop over rows, i.e. (gridDim.x * blockDim.x) / threads_per_row >=
// num_rows
__global__ void SizesToMergeMapKernel(int32_t num_rows, int32_t threads_per_row,
const int32_t *row_splits,
int32_t num_elems, uint32_t *merge_map) {
int32_t thread = blockIdx.x * blockDim.x + threadIdx.x,
num_threads = gridDim.x * blockDim.x, row = thread / threads_per_row,
thread_this_row = thread % threads_per_row;
if (row >= num_rows) return;
K2_CHECK_GE(num_threads / threads_per_row, num_rows);
int32_t this_row_split = row_splits[row],
next_row_split = row_splits[row + 1],
row_length = next_row_split - this_row_split;
#pragma unroll(4)
for (; thread_this_row < row_length; thread_this_row += threads_per_row)
merge_map[this_row_split + thread_this_row] =
uint32_t(row) + uint32_t(num_rows) * uint32_t(thread_this_row);
}
Array1<uint32_t> SizesToMergeMap(ContextPtr c,
const std::vector<int32_t> &sizes) {
int32_t num_srcs = sizes.size();
ContextPtr cpu_context = GetCpuContext();
Array1<int32_t> row_splits_cpu(cpu_context, num_srcs + 1);
int32_t *row_splits_cpu_data = row_splits_cpu.Data();
int32_t tot_size = 0;
row_splits_cpu_data[0] = 0;
for (int32_t i = 0; i != num_srcs; ++i) {
tot_size += sizes[i];
row_splits_cpu_data[i + 1] = tot_size;
}
Array1<uint32_t> ans(c, tot_size);
if (tot_size == 0) return ans;
uint32_t *ans_data = ans.Data();
if (c->GetDeviceType() == kCpu) {
int32_t cur = 0;
for (int32_t src = 0; src != num_srcs; ++src) {
int32_t begin = cur, // i.e. the previous end.
end = row_splits_cpu_data[src + 1];
for (; cur != end; ++cur) {
// the 'src' says which source this item came from, and (cur - begin)
// is the position within that source.
ans_data[cur] =
uint32_t(src) + uint32_t(cur - begin) * uint32_t(num_srcs);
}
}
} else {
K2_CHECK_EQ(c->GetDeviceType(), kCuda);
Array1<int32_t> row_splits = row_splits_cpu.To(c);
#if 1
int32_t avg_elems_per_row = (tot_size + num_srcs - 1) / num_srcs,
threads_per_row = RoundUpToNearestPowerOfTwo(avg_elems_per_row),
tot_threads = num_srcs * threads_per_row;
int32_t block_size = 256;
int32_t grid_size = NumBlocks(tot_threads, block_size);
K2_CUDA_SAFE_CALL(
hipLaunchKernelGGL(( SizesToMergeMapKernel), dim3(grid_size), dim3(block_size), 0, c->GetCudaStream(),
num_srcs, threads_per_row, row_splits.Data(), tot_size,
ans.Data()));
#else
// Below version can be just faster than the above version when
// num_srcs > 5000 and tot_size > 1,000,000
mgpu::context_t *mgpu_context = GetModernGpuAllocator(c);
auto lambda_set_ans = [=] __device__(uint32_t index, uint32_t seg,
uint32_t rank) {
ans_data[index] = seg + rank * static_cast<uint32_t>(num_srcs);
};
K2_CUDA_SAFE_CALL(mgpu::transform_lbs(lambda_set_ans, tot_size,
row_splits.Data(),
row_splits.Dim() - 1, *mgpu_context));
#endif
}
return ans;
}
bool IsPermutation(const Array1<int32_t> &a) {
Array1<int32_t> ones(a.Context(), a.Dim(), 1);
int32_t *ones_data = ones.Data();
const int32_t *a_data = a.Data();
int32_t dim = a.Dim();
K2_EVAL(
a.Context(), a.Dim(), lambda_set_zero, (int32_t i)->void {
if (static_cast<uint32_t>(a_data[i]) < static_cast<uint32_t>(dim)) {
ones_data[a_data[i]] = 0;
}
});
return Equal(ones, 0);
}
void RowSplitsToRowIdsOffset(const Array1<int32_t> &row_splits_part,
Array1<int32_t> *row_ids_part) {
ContextPtr c = row_splits_part.Context();
Array1<int32_t> row_splits(c, row_splits_part.Dim());
int32_t *row_splits_data = row_splits.Data();
const int32_t *row_splits_part_data = row_splits_part.Data();
K2_EVAL(c, row_splits_part.Dim(), lambda_subtract_offset, (int32_t i) {
row_splits_data[i] = row_splits_part_data[i] - row_splits_part_data[0];
});
RowSplitsToRowIds(row_splits, row_ids_part);
}
} // namespace k2
| fea3b112c59e40dddec27b7e22e651aca6b6fc8f.cu | /**
* Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu)
*
* See LICENSE for clarification regarding multiple authors
*/
#include <algorithm>
#include <vector>
#include "k2/csrc/array.h"
#include "k2/csrc/array_ops.h"
#include "k2/csrc/macros.h"
namespace k2 {
// See documentation in header of what this is supposed to do.
// This is similar to the template Append() defined in ops_inl.h,
// but with changes largely about adding `data_offsets`, and
// subtracting one from the dims of all but the last array.
Array1<int32_t> SpliceRowSplits(int32_t num_arrays,
const Array1<int32_t> **src) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GT(num_arrays, 0);
ContextPtr &c = src[0]->Context();
// row_splits_vec is the exclusive-sum of the modified dimensions of
// the arrays in `src`. `Modified` means: is subtracted from the dims
// of all but the last array.
std::vector<int32_t> row_splits_vec(num_arrays + 1);
int32_t sum = 0;
row_splits_vec[0] = sum;
// `last_elem_ptrs_vec` contains, for each of the arrays in `num_array`, a
// pointer to the last element in that array.
std::vector<const int32_t *> last_elem_ptrs_vec(num_arrays);
for (int32_t i = 0; i < num_arrays; i++) {
K2_CHECK_GE(src[i]->Dim(), 1);
int32_t dim = src[i]->Dim() - (i + 1 < num_arrays ? 1 : 0);
sum += dim;
row_splits_vec[i + 1] = sum;
last_elem_ptrs_vec[i] = src[i]->Data() + src[i]->Dim() - 1;
}
int32_t ans_size = sum;
Array1<int32_t> ans(c, ans_size);
if (ans_size == 0) return ans;
int32_t *ans_data = ans.Data();
Array1<const int32_t *> last_elems_ptrs(c, last_elem_ptrs_vec);
Array1<int32_t> data_offsets(c, num_arrays);
// note as data_offsets.Dim() == last_elem_ptrs.Dim(), so the last element of
// last_elem_ptrs.Dim() will not be summed to data_offsets, it's OK as we
// don't need that value since we would not drop the last element of the last
// array.
ExclusiveSumDeref(last_elems_ptrs, &data_offsets);
int32_t *data_offsets_data = data_offsets.Data();
if (c->GetDeviceType() == kCpu) {
// a simple loop is faster, although the other branches should still work on
// CPU.
for (int32_t i = 0; i < num_arrays; i++) {
int32_t this_dim = src[i]->Dim();
const int32_t *this_src_data = src[i]->Data();
int32_t data_offset = data_offsets_data[i];
for (int32_t j = 0; j < this_dim; j++) {
ans_data[j] = this_src_data[j] + data_offset;
}
// notice `this_dim - 1` here, it means we will overwrite the copy of last
// element of src[i] when copying elements in src[i+1] in the next
// for-loop, it generates the same result with dropping the last element
// of src[i] as last-elment-of-src[i] == src[i+1]->Data()[0] (equals 0) +
// data_offsets_data[i+1].
ans_data += this_dim - 1;
}
} else {
K2_CHECK_EQ(c->GetDeviceType(), kCuda);
Array1<int32_t> row_splits(c, row_splits_vec);
std::vector<const int32_t *> src_ptrs_vec(num_arrays);
for (int32_t i = 0; i < num_arrays; i++) src_ptrs_vec[i] = src[i]->Data();
Array1<const int32_t *> src_ptrs(c, src_ptrs_vec);
const int32_t **src_ptrs_data = src_ptrs.Data();
mgpu::context_t *mgpu_context = GetModernGpuAllocator(c);
auto lambda_set_ans = [=] __device__(int32_t index, int32_t seg,
int32_t rank) {
ans_data[index] = src_ptrs_data[seg][rank] + data_offsets_data[seg];
};
K2_CUDA_SAFE_CALL(mgpu::transform_lbs(lambda_set_ans, ans_size,
row_splits.Data(),
row_splits.Dim() - 1, *mgpu_context));
}
return ans;
}
Array1<int32_t> AppendWithOffsets(const Array1<int32_t> &offsets,
const Array1<int32_t> **src) {
NVTX_RANGE(K2_FUNC);
int32_t num_arrays = offsets.Dim();
ContextPtr c = offsets.Context();
std::vector<int32_t> row_splits_vec(num_arrays + 1);
int32_t sum = 0;
row_splits_vec[0] = sum;
for (int32_t i = 0; i < num_arrays; ++i) {
int32_t dim = src[i]->Dim();
sum += dim;
row_splits_vec[i + 1] = sum;
}
int32_t ans_size = sum;
Array1<int32_t> ans(c, ans_size);
if (ans_size == 0) return ans;
int32_t *ans_data = ans.Data();
const int32_t *offsets_data = offsets.Data();
if (c->GetDeviceType() == kCpu) {
for (int32_t i = 0; i != num_arrays; ++i) {
int32_t this_dim = src[i]->Dim();
const int32_t *this_src_data = src[i]->Data();
int32_t offset = offsets_data[i];
for (int32_t j = 0; j != this_dim; ++j) {
ans_data[j] = this_src_data[j] + offset;
}
ans_data += this_dim;
}
} else {
K2_CHECK_EQ(c->GetDeviceType(), kCuda);
Array1<int32_t> row_splits(c, row_splits_vec);
std::vector<const int32_t *> src_ptrs_vec(num_arrays);
for (int32_t i = 0; i < num_arrays; ++i) src_ptrs_vec[i] = src[i]->Data();
Array1<const int32_t *> src_ptrs(c, src_ptrs_vec);
const int32_t **src_ptrs_data = src_ptrs.Data();
mgpu::context_t *mgpu_context = GetModernGpuAllocator(c);
// `index` is idx01, `seg` is idx0, `rank` is idx1, `value_offsets` is just
// a cache for `offsets_data`.
auto lambda_set_ans = [=] __device__(int32_t index, int32_t seg,
int32_t rank,
mgpu::tuple<int32_t> value_offsets) {
ans_data[index] = src_ptrs_data[seg][rank] + mgpu::get<0>(value_offsets);
};
K2_CUDA_SAFE_CALL(mgpu::transform_lbs(
lambda_set_ans, ans_size, row_splits.Data(), row_splits.Dim() - 1,
mgpu::make_tuple(offsets_data), *mgpu_context));
}
return ans;
}
bool ValidateRowIds(const Array1<int32_t> &row_ids,
Array1<int32_t> *temp /*=nullptr*/) {
NVTX_RANGE(K2_FUNC);
ContextPtr &ctx = row_ids.Context();
const int32_t *data = row_ids.Data();
int32_t dim = row_ids.Dim();
if (dim == 0) return true; // will treat this as valid
// note `row_ids[0]` may copy memory from device to host
if (row_ids[0] < 0) return false;
Array1<int32_t> temp_array;
if (temp == nullptr || temp->Dim() == 0) {
temp_array = Array1<int32_t>(ctx, 1);
} else {
K2_CHECK(IsCompatible(row_ids, *temp));
temp_array = temp->Range(0, 1);
}
temp = &temp_array;
*temp = 0;
int32_t *temp_data = temp->Data();
// Note: we know that dim >= 1 as we would have returned above if dim == 0.
// This will do nothing if (dim-1) == 0 as we have checked the first element.
K2_EVAL(
ctx, dim - 1, lambda_check_row_ids, (int32_t i)->void {
if (data[i] > data[i + 1]) *temp_data = 1; // means it's bad.
});
return (*temp)[0] == 0;
}
bool ValidateRowSplits(const Array1<int32_t> &row_splits,
Array1<int32_t> *temp /*=nullptr*/) {
NVTX_RANGE(K2_FUNC);
ContextPtr &ctx = row_splits.Context();
const int32_t *data = row_splits.Data();
int32_t dim = row_splits.Dim();
// must have at least one element and row_splits[0] == 0
if (dim == 0 || row_splits[0] != 0) return false;
Array1<int32_t> temp_array;
if (temp == nullptr || temp->Dim() == 0) {
temp_array = Array1<int32_t>(ctx, 1);
} else {
K2_CHECK(IsCompatible(row_splits, *temp));
temp_array = temp->Range(0, 1);
}
temp = &temp_array;
*temp = 0;
int32_t *temp_data = temp->Data();
// Note: we know that dim >= 1 as we would have returned above if dim == 0.
// This will do nothing if (dim-1) == 0 as we have checked the first element.
K2_EVAL(
ctx, dim - 1, lambda_check_row_splits, (int32_t i)->void {
if (data[i] > data[i + 1]) *temp_data = 1; // means it's bad.
});
return (*temp)[0] == 0;
}
bool ValidateRowSplitsAndIds(const Array1<int32_t> &row_splits,
const Array1<int32_t> &row_ids,
Array1<int32_t> *temp /*=nullptr*/) {
NVTX_RANGE(K2_FUNC);
// Check if their context are compatible or not while getting
ContextPtr ctx = GetContext(row_splits, row_ids);
int32_t num_rows = row_splits.Dim() - 1, num_elems = row_ids.Dim();
if (num_rows < 0 || (num_rows == 0 && num_elems > 0)) return false;
if (row_splits[0] != 0 || (num_elems > 0 && row_ids[0] < 0)) return false;
if (num_elems != row_splits[num_rows]) return false;
const int32_t *row_ids_data = row_ids.Data(),
*row_splits_data = row_splits.Data();
Array1<int32_t> temp_array;
if (temp == nullptr || temp->Dim() == 0) {
temp_array = Array1<int32_t>(ctx, 1);
} else {
K2_CHECK(ctx->IsCompatible(*temp->Context()));
temp_array = temp->Range(0, 1);
}
temp = &temp_array;
*temp = 0;
int32_t *temp_data = temp_array.Data();
K2_EVAL(
ctx, std::max(num_elems, num_rows), lambda_check_row_ids,
(int32_t i)->void {
// check row_splits
bool invalid_splits =
(i < num_rows && row_splits_data[i] > row_splits_data[i + 1]);
// check row_ids
bool invalid_ids =
(i < (num_elems - 1) && row_ids_data[i] > row_ids_data[i + 1]);
if (invalid_splits || invalid_ids) *temp_data = 1;
// check if row_splits and row_ids agree with each other
if (i < num_elems) {
int32_t this_row = row_ids_data[i];
if (this_row < 0 || this_row >= num_rows ||
i < row_splits_data[this_row] ||
i >= row_splits_data[this_row + 1])
*temp_data = 1;
}
});
return (*temp)[0] == 0;
}
void RowSplitsToRowIds(const Array1<int32_t> &row_splits,
Array1<int32_t> *row_ids) {
NVTX_RANGE(K2_FUNC);
ContextPtr c = GetContext(row_splits, *row_ids);
int32_t num_elems = row_ids->Dim(), num_rows = row_splits.Dim() - 1;
K2_CHECK_GE(num_rows, 0);
// if there are more than zero elems, there must be at least one row.
K2_CHECK(num_elems == 0 || num_rows > 0);
K2_CHECK_EQ(num_elems, row_splits[num_rows]);
RowSplitsToRowIds(c, num_rows, row_splits.Data(), num_elems, row_ids->Data());
}
void RowIdsToRowSplits(const Array1<int32_t> &row_ids,
Array1<int32_t> *row_splits) {
NVTX_RANGE(K2_FUNC);
ContextPtr c = GetContext(*row_splits, row_ids);
int32_t num_elems = row_ids.Dim(), num_rows = row_splits->Dim() - 1;
K2_CHECK_GE(num_rows, 0);
// if there are more than zero elems, there must be at least one row.
K2_CHECK(num_elems == 0 || num_rows > 0);
if (num_elems > 0) K2_CHECK_GT(num_rows, row_ids[num_elems - 1]);
RowIdsToRowSplits(c, num_elems, row_ids.Data(), false, num_rows,
row_splits->Data());
}
Array1<int32_t> GetCounts(ContextPtr c, const int32_t *src_data,
int32_t src_dim, int32_t n) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GE(n, 0);
Array1<int32_t> ans(c, n, 0); // init with 0
int32_t *ans_data = ans.Data();
if (n == 0) {
K2_CHECK_EQ(src_dim, 0);
return ans;
}
DeviceType d = c->GetDeviceType();
if (d == kCpu) {
for (int32_t i = 0; i < src_dim; ++i) {
++ans_data[src_data[i]];
}
} else {
K2_CHECK_EQ(d, kCuda);
std::size_t temp_storage_bytes = 0;
K2_CHECK_CUDA_ERROR(cub::DeviceHistogram::HistogramEven(
nullptr, temp_storage_bytes, src_data, ans_data, n + 1, 0, n, src_dim,
c->GetCudaStream())); // The first time is to determine temporary
// device storage requirements.
Array1<int8_t> d_temp_storage(c, temp_storage_bytes);
K2_CHECK_CUDA_ERROR(cub::DeviceHistogram::HistogramEven(
d_temp_storage.Data(), temp_storage_bytes, src_data, ans_data, n + 1, 0,
n, src_dim, c->GetCudaStream()));
}
return ans;
}
Array1<int32_t> GetCounts(const Array1<int32_t> &src, int32_t n) {
NVTX_RANGE(K2_FUNC);
return GetCounts(src.Context(), src.Data(), src.Dim(), n);
}
Array1<int32_t> InvertMonotonicDecreasing(const Array1<int32_t> &src) {
NVTX_RANGE(K2_FUNC);
ContextPtr &c = src.Context();
int32_t src_dim = src.Dim();
const int32_t *src_data = src.Data();
if (src_dim == 0) {
return Array1<int32_t>(c, 0);
}
K2_DCHECK_GT(src.Back(), 0); // just call Back when debugging
// note `src[0]` may do a DeviceToHost memory copy
int32_t ans_dim = src[0];
Array1<int32_t> ans(c, ans_dim, 0); // init with 0
int32_t *ans_data = ans.Data();
K2_EVAL(
c, src_dim, lambda_set_values, (int32_t i)->void {
K2_DCHECK((i + 1 == src_dim || src_data[i + 1] <= src_data[i]));
if (i + 1 == src_dim || src_data[i + 1] < src_data[i])
ans_data[src_data[i] - 1] = i + 1;
});
MonotonicDecreasingUpperBound(ans, &ans);
return ans;
}
Array1<int32_t> InvertPermutation(const Array1<int32_t> &src) {
ContextPtr &c = src.Context();
int32_t dim = src.Dim();
Array1<int32_t> ans(c, dim);
const int32_t *src_data = src.Data();
int32_t *ans_data = ans.Data();
K2_EVAL(
c, dim, lambda_set_ans, (int32_t i)->void { ans_data[src_data[i]] = i; });
return ans;
}
Array1<int32_t> RowSplitsToSizes(const Array1<int32_t> &row_splits) {
K2_CHECK_GT(row_splits.Dim(), 0);
ContextPtr &c = row_splits.Context();
int32_t num_rows = row_splits.Dim() - 1;
Array1<int32_t> sizes(c, num_rows);
const int32_t *row_splits_data = row_splits.Data();
int32_t *sizes_data = sizes.Data();
K2_EVAL(
c, num_rows, lambda_set_sizes, (int32_t i)->void {
sizes_data[i] = row_splits_data[i + 1] - row_splits_data[i];
});
return sizes;
}
// This is modified from RowSplitsToRowIdsKernel.
// When we invoke this we make a big enough grid that there doesn't have to
// be a loop over rows, i.e. (gridDim.x * blockDim.x) / threads_per_row >=
// num_rows
__global__ void SizesToMergeMapKernel(int32_t num_rows, int32_t threads_per_row,
const int32_t *row_splits,
int32_t num_elems, uint32_t *merge_map) {
int32_t thread = blockIdx.x * blockDim.x + threadIdx.x,
num_threads = gridDim.x * blockDim.x, row = thread / threads_per_row,
thread_this_row = thread % threads_per_row;
if (row >= num_rows) return;
K2_CHECK_GE(num_threads / threads_per_row, num_rows);
int32_t this_row_split = row_splits[row],
next_row_split = row_splits[row + 1],
row_length = next_row_split - this_row_split;
#pragma unroll(4)
for (; thread_this_row < row_length; thread_this_row += threads_per_row)
merge_map[this_row_split + thread_this_row] =
uint32_t(row) + uint32_t(num_rows) * uint32_t(thread_this_row);
}
Array1<uint32_t> SizesToMergeMap(ContextPtr c,
const std::vector<int32_t> &sizes) {
int32_t num_srcs = sizes.size();
ContextPtr cpu_context = GetCpuContext();
Array1<int32_t> row_splits_cpu(cpu_context, num_srcs + 1);
int32_t *row_splits_cpu_data = row_splits_cpu.Data();
int32_t tot_size = 0;
row_splits_cpu_data[0] = 0;
for (int32_t i = 0; i != num_srcs; ++i) {
tot_size += sizes[i];
row_splits_cpu_data[i + 1] = tot_size;
}
Array1<uint32_t> ans(c, tot_size);
if (tot_size == 0) return ans;
uint32_t *ans_data = ans.Data();
if (c->GetDeviceType() == kCpu) {
int32_t cur = 0;
for (int32_t src = 0; src != num_srcs; ++src) {
int32_t begin = cur, // i.e. the previous end.
end = row_splits_cpu_data[src + 1];
for (; cur != end; ++cur) {
// the 'src' says which source this item came from, and (cur - begin)
// is the position within that source.
ans_data[cur] =
uint32_t(src) + uint32_t(cur - begin) * uint32_t(num_srcs);
}
}
} else {
K2_CHECK_EQ(c->GetDeviceType(), kCuda);
Array1<int32_t> row_splits = row_splits_cpu.To(c);
#if 1
int32_t avg_elems_per_row = (tot_size + num_srcs - 1) / num_srcs,
threads_per_row = RoundUpToNearestPowerOfTwo(avg_elems_per_row),
tot_threads = num_srcs * threads_per_row;
int32_t block_size = 256;
int32_t grid_size = NumBlocks(tot_threads, block_size);
K2_CUDA_SAFE_CALL(
SizesToMergeMapKernel<<<grid_size, block_size, 0, c->GetCudaStream()>>>(
num_srcs, threads_per_row, row_splits.Data(), tot_size,
ans.Data()));
#else
// Below version can be just faster than the above version when
// num_srcs > 5000 and tot_size > 1,000,000
mgpu::context_t *mgpu_context = GetModernGpuAllocator(c);
auto lambda_set_ans = [=] __device__(uint32_t index, uint32_t seg,
uint32_t rank) {
ans_data[index] = seg + rank * static_cast<uint32_t>(num_srcs);
};
K2_CUDA_SAFE_CALL(mgpu::transform_lbs(lambda_set_ans, tot_size,
row_splits.Data(),
row_splits.Dim() - 1, *mgpu_context));
#endif
}
return ans;
}
bool IsPermutation(const Array1<int32_t> &a) {
Array1<int32_t> ones(a.Context(), a.Dim(), 1);
int32_t *ones_data = ones.Data();
const int32_t *a_data = a.Data();
int32_t dim = a.Dim();
K2_EVAL(
a.Context(), a.Dim(), lambda_set_zero, (int32_t i)->void {
if (static_cast<uint32_t>(a_data[i]) < static_cast<uint32_t>(dim)) {
ones_data[a_data[i]] = 0;
}
});
return Equal(ones, 0);
}
void RowSplitsToRowIdsOffset(const Array1<int32_t> &row_splits_part,
Array1<int32_t> *row_ids_part) {
ContextPtr c = row_splits_part.Context();
Array1<int32_t> row_splits(c, row_splits_part.Dim());
int32_t *row_splits_data = row_splits.Data();
const int32_t *row_splits_part_data = row_splits_part.Data();
K2_EVAL(c, row_splits_part.Dim(), lambda_subtract_offset, (int32_t i) {
row_splits_data[i] = row_splits_part_data[i] - row_splits_part_data[0];
});
RowSplitsToRowIds(row_splits, row_ids_part);
}
} // namespace k2
|
7948983d280314bd85e94cc1086983957475717b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Vector-Matrix multiplication: Y = A * X.
* Device code.
*/
#ifndef _MATRIXMUL_KERNEL_H_
#define _MATRIXMUL_KERNEL_H_
#include "vec_mat_mult.h"
__global__ void vec_mat_kernel_naive(float *Ad, float *Xd, float *Yd)
{
//Thread Index
int threadX = threadIdx.x;
//Block Index
int blockX = blockIdx.x;
//Absolute Position
int row = blockDim.x * blockX + threadX;
double Y_temp = 0;
float A_element = 0;
float X_element = 0;
for(int i = 0; i < MATRIX_SIZE; i++){
A_element = Ad[MATRIX_SIZE * row + i]; //Get all the values in the row of A
X_element = Xd[i]; //Get all values in X
Y_temp += A_element * X_element;
}
Yd[row] = (float)Y_temp;
}
__global__ void vec_mat_kernel_optimized(float *Ad, float *Xd, float *Yd)
{
__shared__ float Asub[TILE_SIZE][TILE_SIZE];
__shared__ float Xsub[TILE_SIZE];
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = blockDim.y * blockIdx.y + ty;
int col = blockDim.x * blockIdx.x + tx;
int k = 0;
int temp;
double Ysub = 0.0f;
while(k < MATRIX_SIZE){
if(k + ty < MATRIX_SIZE && col < MATRIX_SIZE)
Asub[ty][tx] = Ad[row*MATRIX_SIZE + k + tx];
if(ty == 0)
Xsub[tx] = Xd[(k+ty)+col];
__syncthreads();
if(ty == 0)
for(temp = 0; temp < TILE_SIZE; temp++)
Ysub += Asub[tx][temp] * Xsub[temp];
__syncthreads();
k += TILE_SIZE;
}
if(ty == 0)
Yd[row + col] = (float)Ysub;
}
#endif // #ifndef _MATRIXMUL_KERNEL_H_
| 7948983d280314bd85e94cc1086983957475717b.cu | /* Vector-Matrix multiplication: Y = A * X.
* Device code.
*/
#ifndef _MATRIXMUL_KERNEL_H_
#define _MATRIXMUL_KERNEL_H_
#include "vec_mat_mult.h"
__global__ void vec_mat_kernel_naive(float *Ad, float *Xd, float *Yd)
{
//Thread Index
int threadX = threadIdx.x;
//Block Index
int blockX = blockIdx.x;
//Absolute Position
int row = blockDim.x * blockX + threadX;
double Y_temp = 0;
float A_element = 0;
float X_element = 0;
for(int i = 0; i < MATRIX_SIZE; i++){
A_element = Ad[MATRIX_SIZE * row + i]; //Get all the values in the row of A
X_element = Xd[i]; //Get all values in X
Y_temp += A_element * X_element;
}
Yd[row] = (float)Y_temp;
}
__global__ void vec_mat_kernel_optimized(float *Ad, float *Xd, float *Yd)
{
__shared__ float Asub[TILE_SIZE][TILE_SIZE];
__shared__ float Xsub[TILE_SIZE];
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = blockDim.y * blockIdx.y + ty;
int col = blockDim.x * blockIdx.x + tx;
int k = 0;
int temp;
double Ysub = 0.0f;
while(k < MATRIX_SIZE){
if(k + ty < MATRIX_SIZE && col < MATRIX_SIZE)
Asub[ty][tx] = Ad[row*MATRIX_SIZE + k + tx];
if(ty == 0)
Xsub[tx] = Xd[(k+ty)+col];
__syncthreads();
if(ty == 0)
for(temp = 0; temp < TILE_SIZE; temp++)
Ysub += Asub[tx][temp] * Xsub[temp];
__syncthreads();
k += TILE_SIZE;
}
if(ty == 0)
Yd[row + col] = (float)Ysub;
}
#endif // #ifndef _MATRIXMUL_KERNEL_H_
|
8d0c2d063d1da3a6ae868b9780e060c15f83c1ee.hip | // !!! This is a file automatically generated by hipify!!!
// Written by Vasily Volkov.
// Copyright (c) 2008, The Regents of the University of California.
// All rights reserved.
#include <time.h>
#include "sgemmN.cuh"
#include "hip/hip_runtime.h"
#define BLOCK_SIZE 32
__device__ void saxpy( float a, float *b, float *c )
{
c[0] += a*b[0];
c[1] += a*b[1];
c[2] += a*b[2];
c[3] += a*b[3];
c[4] += a*b[4];
c[5] += a*b[5];
c[6] += a*b[6];
c[7] += a*b[7];
c[8] += a*b[8];
c[9] += a*b[9];
c[10] += a*b[10];
c[11] += a*b[11];
c[12] += a*b[12];
c[13] += a*b[13];
c[14] += a*b[14];
c[15] += a*b[15];
}
__device__ void saxpy2( const float* a, const float *b, const float *a2, float *c )
{
c[0] += a[0] *b[0] * a2[0];
c[1] += a[1] *b[1] * a2[0];
c[2] += a[2] *b[2] * a2[0];
c[3] += a[3] *b[3] * a2[0];
c[4] += a[4] *b[4] * a2[0];
c[5] += a[5] *b[5] * a2[0];
c[6] += a[6] *b[6] * a2[0];
c[7] += a[7] *b[7] * a2[0];
c[8] += a[8] *b[8] * a2[0];
c[9] += a[9] *b[9] * a2[0];
c[10] += a[10]*b[10] * a2[0];
c[11] += a[11]*b[11] * a2[0];
c[12] += a[12]*b[12] * a2[0];
c[13] += a[13]*b[13] * a2[0];
c[14] += a[14]*b[14] * a2[0];
c[15] += a[15]*b[15] * a2[0];
}
__device__ void saxpy3( const float* a, const float *b, float *c )
{
c[0] += a[0] *b[0] ;
c[1] += a[1] *b[1] ;
c[2] += a[2] *b[2] ;
c[3] += a[3] *b[3] ;
c[4] += a[4] *b[4] ;
c[5] += a[5] *b[5] ;
c[6] += a[6] *b[6] ;
c[7] += a[7] *b[7] ;
c[8] += a[8] *b[8] ;
c[9] += a[9] *b[9] ;
c[10] += a[10]*b[10] ;
c[11] += a[11]*b[11] ;
c[12] += a[12]*b[12] ;
c[13] += a[13]*b[13] ;
c[14] += a[14]*b[14] ;
c[15] += a[15]*b[15] ;
}
__device__ void saxpy64( const float* a, const float *b, float *c )
{
#pragma unroll
for( int i = 0; i < 64; i++)
c[i] = a[i]*b[i] ;
}
__device__ void saxpy32( const float* a, const float *b, float *c, const float* balance )
{
#pragma unroll
for( int i = 0; i < 32; i++)
c[i] = a[i]*b[i]* balance[0];
}
__device__ void redux32sum( const float* a, float* res )
{
float c2[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
float c[8] = {0,0,0,0,0,0,0,0};
// 32 -> 16
#pragma unroll
for( int i = 0; i < 16; i++)
c2[i] = a[i] + a[16+i];
// 16 -> 8
#pragma unroll
for( int i = 0; i < 8; i++)
c[i] = c2[8+i] + c2[i];
//8 -> 4
#pragma unroll
for( int i = 0; i < 4; i++)
c2[i] = c[4+i] + c[i];
//4->2
#pragma unroll
for( int i = 0; i < 2; i++)
c[i] = c2[2+i] + c2[i];
// 2->1
*res = c[0] + c[1];
}
__device__ void redux64sum( const float* a, float* res )
{
float c[32] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
float c2[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
// 64 -> 32
#pragma unroll
for( int i = 0; i < 64; i++)
c[i] = a[i] + a[32+i];
// 32 -> 16
#pragma unroll
for( int i = 0; i < 16; i++)
c2[i] = c[16+i] + c[i];
// 16 -> 8
#pragma unroll
for( int i = 0; i < 8; i++)
c[i] = c2[8+i] + c2[i];
//8 -> 4
#pragma unroll
for( int i = 0; i < 4; i++)
c2[i] = c[4+i] + c[i];
//4->2
#pragma unroll
for( int i = 0; i < 2; i++)
c[i] = c2[2+i] + c2[i];
// 2->1
*res = c[0] + c[1];
}
__device__ void redux16sum( const float* a, float* res )
{
float c[8] = {0,0,0,0,0,0,0,0};
// 16 -> 8
#pragma unroll
for( int i = 0; i < 8; i++)
c[i] += a[i] + a[i*2];
//8 -> 4
#pragma unroll
for( int i = 0; i < 4; i++)
c[i] += a[i] + a[i*2];
//4->2
#pragma unroll
for( int i = 0; i < 2; i++)
c[i] += a[i] + a[i*2];
// 2->1
*res = c[0] + c[1];
}
extern "C" __global__ void sgemmNT( const float *A, int lda, const float *B, int ldb, float* C, int ldc, int k, float alpha, float beta )
{
const int inx = threadIdx.x;
const int iny = threadIdx.y;
const int ibx = blockIdx.x * 64;
const int iby = blockIdx.y * 16;
const int id = inx + iny*16;
A += ibx + id;
B += iby + inx + __mul24( iny, ldb );
C += ibx + id + __mul24( iby, ldc );
const float *Blast = B + k*ldb;
float c[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
__shared__ float bs[16][16];
do
{
#pragma unroll
for( int i = 0; i < 16; i += 4 )
bs[iny+i][inx] = B[i*ldb];
__syncthreads();
#pragma unroll
for( int i = 0; i < 16; i++, A += lda )
saxpy( A[0], &bs[i][0], c );
B += 16*ldb;
__syncthreads();
} while( B < Blast );
for( int i = 0; i < 16; i++, C += ldc )
C[0] = alpha*c[i] + beta*C[0];
}
extern "C" __global__ void sgemmNN( const float *A, int lda, const float *B, int ldb, float* C, int ldc, int k, float alpha, float beta )
{
const int inx = threadIdx.x;
const int iny = threadIdx.y;
const int ibx = blockIdx.x * 64;
const int iby = blockIdx.y * 16;
const int id = inx + iny*16;
A += ibx + id;
B += inx + __mul24( iby + iny, ldb );
C += ibx + id + __mul24( iby, ldc );
const float *Blast = B + k;
float c[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
__shared__ float bs[16][17];
do
{
#pragma unroll
for( int i = 0; i < 16; i += 4 )
bs[inx][iny+i] = B[i*ldb];
__syncthreads();
#pragma unroll
for( int i = 0; i < 16; i++, A += lda )
saxpy( A[0], &bs[i][0], c );
B += 16;
__syncthreads();
} while( B < Blast );
for( int i = 0; i < 16; i++, C += ldc )
C[0] = alpha*c[i] + beta*C[0];
}
extern "C" __global__ void vec_mat_vec_mult(const float *A, int lda,
const float *B, int ldb,
float *C, int k,
float alpha, float beta )
{
// FOR 16 threads
const int inx = threadIdx.x;
//const int iny = threadIdx.y;
const int ibx = blockIdx.x * 16;
const int iby = blockIdx.y * 16;
const int id = inx;
const float* ARow = A;
A += ibx + id;
B += ibx;
const float *Blast = B + k;
// vector de multiplicacion local
float c[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
// vector de sumatorio a lo largo de la columna
float r[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
// valor temporal
float res = 0;
// cargamos la parte del vector que toca
__shared__ float bs[16];
bs[id] = A[0];
C[id] = -1;
__syncthreads();
do
{
saxpy3( &B[0], &bs[0], c );
redux16sum(c, &res);
r[id] += res*ARow[0];
__syncthreads();
B += 16;
ARow += 16;
} while( B < Blast );
redux16sum(r, &C[ibx]);
}
extern "C" __global__ void vmSymv(const float *A, int lda,
const float *B, int ldb,
float *C, int k, int length,
float alpha, float beta )
{
// FOR 16 threads
const int inx = threadIdx.x;
const int ibx = blockIdx.x * 16;
const int iby = blockIdx.y * 16;
const int id = threadIdx.x;
const float* ARow = A;
A += ibx + id;
B += ibx + __mul24(iby+inx,ldb);
const float *Blast = ARow + k;
// vector de multiplicacion local
float c[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
// vector de sumatorio a lo largo de la columna
__shared__ float r[16];
// valor temporal
float res = 0;
// cargamos la parte del vector que toca
__shared__ float bs[16];
bs[id] = A[0];
r[id] = 0;
__syncthreads();
int steps = ldb/16;
int counter = 0;
do
{
saxpy3( &B[0], &bs[0], c );
redux16sum(c, &res);
r[id] += res*ARow[0];
__syncthreads();
B += 16 * ldb;
ARow += 16;
counter++;
} while( counter < steps );
if(id == 0)
redux16sum(r, &C[ibx]);
}
extern "C" __global__ void vmv(const float *A, int lda,
const float *B, int ldb,
float *C, int k,
float alpha, float beta )
{
// FOR 64 threads
const int id = threadIdx.x;
const int ibx = blockIdx.x * BLOCK_SIZE;
B += ibx + id*ldb;
// vector de multiplicacion local
float c[BLOCK_SIZE] = { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
// cargamos la parte del vector que toca
__shared__ float bs[BLOCK_SIZE];
float r[BLOCK_SIZE] = { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
// valor temporal
float res;
bs[id] = A[ibx + id];
__syncthreads();
const int BStep = ldb*BLOCK_SIZE;
const float* blast = A + lda;
do
{
saxpy32( &B[0], &bs[0], c, &A[id]);
redux32sum(c, &res);
r[id] += res;
B += BStep;
A += BLOCK_SIZE;
} while( A < blast );
C[ibx+id] = r[id];
}
extern "C" void ourSgemm( char transa, char transb, int m, int n, int k, float alpha, const float *A, int lda, const float *B, int ldb, float beta, float *C, int ldc )
{
dim3 grid( m/64, n/16 ), threads( 16, 4 );
if( transb == 'N' || transb == 'n' )
hipLaunchKernelGGL(( sgemmNN), dim3(grid), dim3(threads), 0, 0, A, lda, B, ldb, C, ldc, k, alpha, beta );
else
hipLaunchKernelGGL(( sgemmNT), dim3(grid), dim3(threads), 0, 0, A, lda, B, ldb, C, ldc, k, alpha, beta );
}
//
// auxiliary routines
//
void fill( float *A, int n, int maxi )
{
for( int j = 0; j < n; j++ )
A[j] = float( (rand()%(maxi*2+1)) - maxi ) / ( maxi + 1.f );
}
float diff( int m, int n, float *A, int lda, float *B, int ldb )
{
float err = 0;
for( int j = 0; j < n; j++ )
for( int i = 0; i < m; i++ )
err = max( err, fabs( A[i+j*lda] - B[i+j*ldb] ) );
return err;
}
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include "rocblas.h"
#define MSize 6
#define NSize 512*32
#define IDX2C(i,j,ld) (((j)*(ld))+(i))
static __inline__ void modify (hipblasHandle_t handle, float *m, int ldm, int
n, int p, int q, float alpha, float beta)
{
hipblasSscal (handle, n-p, &alpha, &m[IDX2C(p,q,ldm)], ldm);
hipblasSscal (handle, ldm-p, &beta, &m[IDX2C(p,q,ldm)], 1);
}
int cublas_example()
{
hipError_t cudaStat;
hipblasStatus_t stat;
hipblasHandle_t handle;
int i, j;
// Matriz simetrica, column-mayor
float *B = (float*)malloc( (NSize*(NSize+1))/2*sizeof( float ) );
float *BComplete = (float*)malloc( NSize*NSize*sizeof( float ) );
// Vector
float *AVec = (float*)malloc( NSize*sizeof( float ) );
fill( BComplete, NSize*NSize, 31 );
fill( AVec, NSize, 31 );
int count = 0;
for(int i = 0; i< NSize; i++)
{
for(int j = 0; j < NSize; j++)
{
if(j <= i)
{
BComplete[i*NSize + j] = BComplete[j * NSize + i];
B[count] = BComplete[i*NSize + j];
count++;
}
}
}
// resultados
float *cuda_result = (float*)malloc( NSize*sizeof( float ) );
for(int i = 0; i< NSize; i++)
cuda_result[i]= 0;
//float *cpu_result= (float*)malloc( NSize*sizeof( float ) );
float* devPtrB, *devPtrAVec, *devPtrRes, devPtrBSym;
cudaStat = hipMalloc ((void**)&devPtrB, NSize*NSize*sizeof(float));
cudaStat = hipMalloc ((void**)&devPtrAVec, NSize*sizeof(float));
cudaStat = hipMalloc ((void**)&devPtrRes, NSize*sizeof(float));
cudaStat = hipMalloc ((void**)&devPtrBSym, ((NSize+1)*NSize)/2*sizeof(float));
if (cudaStat != hipSuccess) {
printf ("device memory allocation failed");
return EXIT_FAILURE;
}
stat = hipblasCreate(&handle);
if (stat != HIPBLAS_STATUS_SUCCESS) {
printf ("CUBLAS initialization failed\n");
return EXIT_FAILURE;
}
hipblasSetAtomicsMode(handle, HIPBLAS_ATOMICS_ALLOWED);
stat = hipblasSetMatrix (NSize, NSize, sizeof(float), BComplete, NSize, devPtrB, NSize);
//cudaStat = hipMemcpy(&devPtrBSym, &B, ((NSize+1)*NSize)/2*sizeof(float), hipMemcpyHostToDevice);
//cudaStat = hipMemcpy(&devPtrB, &BComplete, NSize*NSize*sizeof(float), hipMemcpyHostToDevice);
stat = hipblasSetVector (NSize, sizeof(float), AVec, 1, devPtrAVec, 1);
stat = hipblasSetVector (NSize, sizeof(float), cuda_result, 1, devPtrRes, 1);
if (stat != HIPBLAS_STATUS_SUCCESS) {
printf ("data download failed");
hipFree (devPtrAVec);
hipblasDestroy(handle);
return EXIT_FAILURE;
}
float alpha = 1;
float beta = 0;
hipEvent_t start, end;
Q( hipEventCreate( &start ) );
Q( hipEventCreate( &end ) );
hipEventRecord(start, 0);
float* sum = (float*)malloc(sizeof(float));
stat = hipblasSgemv( handle, HIPBLAS_OP_T, NSize, NSize, &alpha, devPtrB, NSize, devPtrAVec, 1, &beta, devPtrRes, 1 );
//stat = hipblasSsymv( handle, HIPBLAS_FILL_MODE_LOWER, NSize, &alpha, devPtrB, NSize, devPtrAVec, 1, &beta, devPtrRes, 1 );
if (stat != HIPBLAS_STATUS_SUCCESS) {
printf ("hipblasSgemv failed");
hipFree (devPtrAVec);
hipblasDestroy(handle);
return EXIT_FAILURE;
}
hipEventRecord(end, 0);
hipEventSynchronize(end);
float time;
hipEventElapsedTime(&time, start, end);
hipEventRecord(start, 0);
stat = hipblasSdot( handle, NSize, devPtrAVec, 1, devPtrRes, 1, sum );
hipEventRecord(end, 0);
hipEventSynchronize(end);
float time2;
hipEventElapsedTime(&time2, start, end);
stat = hipblasGetVector (NSize, sizeof(float), devPtrRes, 1, cuda_result, 1);
if (stat != HIPBLAS_STATUS_SUCCESS) {
printf ("data upload failed");
hipFree (devPtrAVec);
hipblasDestroy(handle);
return EXIT_FAILURE;
}
hipFree (devPtrAVec);
hipblasDestroy(handle);
/*float sum = 0;
for (j = 0; j < NSize; j++) {
sum += cuda_result[j]*AVec[j];
}*/
float sumCPU = 0;
clock_t ini = clock();
for(int i = 0; i< NSize; i++)
{
float sumCPUTemp = 0;
for(int j = 0; j< NSize; j++)
{
sumCPUTemp += BComplete[i*NSize+j]*AVec[j];
}
sumCPU+= sumCPUTemp*AVec[i];
}
clock_t fin = clock();
float sumDot = 0;
clock_t ini2 = clock();
for(int ii = 0; ii< 1000; ii++)
{
for(int jj = 0; jj< NSize; jj++)
{
sumDot += BComplete[jj*NSize+ii]*AVec[jj];
}
}
clock_t fin2 = clock();
printf("Multiplicacion mxv de %d elems.\n", NSize );
printf("Sumatorio completo en CUDA: %f en %f + %f ms.\n", *sum, time, time2);
printf("Sumatorio completo en CPU: %f en %f y %f ms.\n", sumCPU, ((double)(fin-ini))/CLOCKS_PER_SEC*1000, ((double)(fin2-ini2))/CLOCKS_PER_SEC*1000);
return EXIT_SUCCESS;
}
//
// main()
//
int ejecutar_sgemmNN(int items)//( int argc, char **argv )
{
/*
int N = items;
if(N < 16)
N = 16;
//FILE* cout;
//cout = fopen("C:\\Users\\chus\\Documents\\dev\\Data\\models\\multmatrix.txt", "a");
//
// startup
//
int idevice = 0;
Q( hipSetDevice( idevice ) );
struct hipDeviceProp_t prop;
Q( hipGetDeviceProperties( &prop, idevice ) );
printf( "\nDevice: %s, %.0f MHz clock, %.0f MB memory.\n", prop.name, prop.clockRate/1000.f, prop.totalGlobalMem/1024.f/1024.f );
hipEvent_t start, end;
Q( hipEventCreate( &start ) );
Q( hipEventCreate( &end ) );
//Q( hipblasInit( ) );
//
// allocate memory
//
//float *A = (float*)malloc( N*N*sizeof( float ) );
float *B = (float*)malloc( N*N*sizeof( float ) );
float *AVec = (float*)malloc( N*sizeof( float ) );
//float *C = (float*)malloc( N*N*sizeof( float ) );
//float *cublas_result = (float*)malloc( N*N*sizeof( float ) );
//float *our_result = (float*)malloc( N*N*sizeof( float ) );
float *our_result_for_sum = (float*)malloc( N*sizeof( float ) );
float *our_result= (float*)malloc( N*sizeof( float ) );
//fill( A, N*N, 31 );
fill( B, N*N, 31 );
//fill( C, N*N, 31 );
fill( AVec, N, 31 );
for(int i = 0; i< N; i++)
our_result_for_sum[i] = 0;
float *dA, *dB, *dC, *dAVec, *dCVec;
//Q( hipblasAlloc( N*N, sizeof(float), (void**)&dA ) );
Q( hipblasAlloc( N*N, sizeof(float), (void**)&dB ) );
//Q( hipblasAlloc( N*N, sizeof(float), (void**)&dC ) );
Q( hipblasAlloc( N, sizeof(float), (void**)&dAVec ) );
Q( hipblasAlloc( N, sizeof(float), (void**)&dCVec ) );
//Q( hipMemcpy( dA, A, N*N*sizeof(float), hipMemcpyHostToDevice ) );
Q( hipMemcpy( dB, B, N*N*sizeof(float), hipMemcpyHostToDevice ) );
Q( hipMemcpy( dAVec, AVec, N*sizeof(float), hipMemcpyHostToDevice ) );
Q( hipMemcpy( dCVec, our_result_for_sum, N*sizeof(float), hipMemcpyHostToDevice ) );
//
// bench square matrices
//
int i = 0;
//for( int i = 0; i < 2; i++ )
{
const char transa = 'N';
const char transb = i ? 'T' : 'N';
//printf( "\ntesting sgemm( '%c', '%c', n, n, n, ... )\n\n", transa, transb );
const int nb = 64;
//printf( " n CUBLAS,Gflop/s we,Gflop/s \"error\"\n" );
int idim = 1;
//for(idim = 1; idim <= N/nb; idim = int((idim+1)*1.25) )
//{
idim = N/nb;
int dim = idim*nb;
//
// set up the parameters
//
const int m = dim;
const int n = dim;
const int k = dim;
const int lda = dim;
const int ldb = dim;
const int ldc = dim;
const float alpha = 1;
const float beta = -1;
//
// compute with CUBLAS
//
/*
Q( hipblasSetMatrix( m, n, sizeof( float ), C, ldc, dC, ldc ) );
clock_t ini1 = clock();
hipblasSgemm( transa, transb, m, n, k, alpha, dA, lda, dB, ldb, beta, dC, ldc );
hipError_t cudaStatus = hipDeviceSynchronize();
clock_t fin1 = clock();
Q( hipblasGetError( ) );
Q( hipblasGetMatrix( m, n, sizeof( float ), dC, ldc, cublas_result, ldc ) );
//
// compute with our routine
//
Q( hipblasSetMatrix( m, n, sizeof( float ), C, ldc, dC, ldc ) );
clock_t ini2 = clock();
ourSgemm( transa, transb, m, n, k, alpha, dA, lda, dB, ldb, beta, dC, ldc );
hipError_t cudaStatus2 = hipDeviceSynchronize();
clock_t fin2 = clock();
Q( hipblasGetMatrix( m, n, sizeof( float ), dC, ldc, our_result, ldc ) );
*/
/*
dim3 grid( m/16 , 0, 0), threads( 16 , 0, 0);
//Q( hipblasSetMatrix( m, n, sizeof( float ), C, ldc, dC, ldc ) );
clock_t ini3 = clock();
vec_mat_vec_mult<<<grid, threads>>>( AVec, lda, B, ldb, dCVec, k, alpha, beta );
clock_t fin3 = clock();
hipError_t cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess)
{
printf("hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
printf("Error String: %s\n", hipGetErrorString(cudaStatus));
}
Q( hipMemcpy( our_result_for_sum, dCVec, (m/16)*sizeof(float), hipMemcpyDeviceToHost ) );
float resFinal = 0;
for(int i = 0; i< (m/16); i++)
{
resFinal += our_result_for_sum[i];
}
printf("Resultado Final en cuda: %f en %f ms\n", resFinal, ((double)(fin3-ini3))/CLOCKS_PER_SEC*1000);
//
// check the difference in results
//
//float difference = diff( m, n, cublas_result, ldc, our_result, ldc );
//
// bench cublas
//
/*
double cublas_time;
hipblasSgemm( transa, transb, m, n, k, alpha, dA, lda, dB, ldb, beta, dC, ldc );
BEGIN_TIMING( );
hipblasSgemm( transa, transb, m, n, k, alpha, dA, lda, dB, ldb, beta, dC, ldc );
END_TIMING( cublas_time );
double cublas_gflops = 2.*m*n*k/cublas_time/1e9;
//
// bench our routine
//
double our_time;
ourSgemm( transa, transb, m, n, k, alpha, dA, lda, dB, ldb, beta, dC, ldc );
BEGIN_TIMING( );
ourSgemm( transa, transb, m, n, k, alpha, dA, lda, dB, ldb, beta, dC, ldc );
END_TIMING( our_time );
double our_gflops = 2.*m*n*k/our_time/1e9;
*/
//
// report the results
//
/*
clock_t ini = clock();
float result = 0;
for(int i = 0; i< n; i++)
{
double tempSum = 0.0;
for(int k = 0; k< n; k++)
{
tempSum += AVec[k]*B[i*n+k];
}
result += tempSum*AVec[i];
}
clock_t fin = clock();
printf("Resultado Final en cpu: %f en %f ms\n", result, ((double)(fin-ini))/CLOCKS_PER_SEC*1000);
//printf( "TIME: %5d %11.2f %14.2f\n", n, cublas_time, our_time);
//double time1 = ((double)(fin1-ini1))/CLOCKS_PER_SEC * 1000.0;
//double time2 = ((double)(fin2-ini2))/CLOCKS_PER_SEC * 1000.0;
//printf( "TIME MINE: %d, %f, %f, CPU: %f \n", n, time1, time2, ((double)(fin-ini))/CLOCKS_PER_SEC * 1000.0);
//printf( "%5d %11.2f %14.2f %8g\n", n, cublas_gflops, our_gflops, difference );
//fprintf(cout, "%d, %f, %f, %f \n", n, time1, time2 , ((double)(fin-ini))/CLOCKS_PER_SEC * 1000.0);
//fflush(cout);
//}
}
//fclose(cout);
//
// shutdown
//
//hipblasFree( dAVec );
//hipblasFree( dB );
//hipblasFree( dCVec );
free( AVec );
free( B );
free( our_result_for_sum );
//free( cublas_result );
free( our_result );
//Q( hipblasShutdown( ) );
return 0;
*/
return 0;
}
int ejecutar_matrixVector(int items)//( int argc, char **argv )
{
int N = items;
if(N < 16)
N = 16;
//FILE* cout;
//cout = fopen("C:\\Users\\chus\\Documents\\dev\\Data\\models\\multmatrix.txt", "a");
int idevice = 0;
Q( hipSetDevice( idevice ) );
struct hipDeviceProp_t prop;
Q( hipGetDeviceProperties( &prop, idevice ) );
printf( "\nDevice: %s, %.0f MHz clock, %.0f MB memory.\n", prop.name, prop.clockRate/1000.f, prop.totalGlobalMem/1024.f/1024.f );
hipEvent_t start, end;
Q( hipEventCreate( &start ) );
Q( hipEventCreate( &end ) );
//
// allocate memory
//
printf("%d Elementos.\n", N);
// Matriz simetrica, row-mayor
float *B = (float*)malloc( (N*(N+1))/2*sizeof( float ) );
float *BComplete = (float*)malloc( N*N*sizeof( float ) );
// Vector
float *AVec = (float*)malloc( N*sizeof( float ) );
// resultados
float *cuda_result = (float*)malloc( N*sizeof( float ) );
float *cpu_result= (float*)malloc( N*sizeof( float ) );
// Inicializacion
fill( BComplete, N*N, 31 );
fill( AVec, N, 31 );
int count = 0;
for(int i = 0; i< N; i++)
{
for(int j= i; j< N; j++)
{
B[count] = BComplete[i*N + j ];
count++;
}
}
for(int i = 0; i< N; i++) cuda_result[i] = 0;
float *dA, *dB, *dAVec, *dCVec, *cuda_final_result, *cudaFinalResFloat;
hipError_t cudaStat;
//cudaStat = hipMalloc( (void**)&dB, (N*(N+1))/2 * sizeof(float) );
cudaStat = hipMalloc( (void**)&dB, N*N * sizeof(float) );
cudaStat = hipMalloc( (void**)&dAVec, N* sizeof(float) );
cudaStat = hipMalloc( (void**)&dCVec, N* sizeof(float) );
cudaStat = hipMalloc( (void**)&cudaFinalResFloat, sizeof(float) );
//hipMemcpy( dB, B, (N*(N+1))/2*sizeof(float), hipMemcpyHostToDevice );
hipMemcpy( dB, BComplete, N*N*sizeof(float), hipMemcpyHostToDevice );
hipMemcpy( dAVec, AVec, N*sizeof(float), hipMemcpyHostToDevice );
hipMemcpy( dCVec, cuda_result, N*sizeof(float), hipMemcpyHostToDevice );
// set up the parameters
const int dim = N;
const int m = dim;
const int n = dim;
const int k = dim;
const int lda = dim;
const int ldb = dim;
const int ldc = dim;
const float alpha = 1;
const float beta = 0;
// compute with CUBLAS
clock_t ini1 = clock();
//for(int tempI = 0; tempI< 1000; tempI++)
{
/*
hipblasSsymv( h, HIPBLAS_FILL_MODE_UPPER, N, &alpha, dB, lda, dAVec, 1, &beta, dCVec, 1 );
hipError_t cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess)
{
printf("hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
printf("Error String: %s\n", hipGetErrorString(cudaStatus));
}
Q( hipMemcpy( cuda_result, dCVec, N*sizeof(float), hipMemcpyDeviceToHost ) );
printf("Que pasa:\n %f %f %f %f %f \n", cuda_result[0], cuda_result[1], cuda_result[2], cuda_result[3], cuda_result[4]);
hipblasSdot( h, N, dAVec, 1, dCVec, 1 , cudaFinalResFloat);
hipMemcpy( &hostRes, cudaFinalResFloat, sizeof(float), hipMemcpyDeviceToHost );
*/
dim3 grid(N/32), threads(32);
hipLaunchKernelGGL(( vmv), dim3(grid), dim3(threads), 0, 0, dAVec, lda, dB, ldb, dCVec, k, alpha, beta );
}
hipError_t cudaStatus = hipDeviceSynchronize();
Q( hipMemcpy( cuda_result, dCVec, N*sizeof(float), hipMemcpyDeviceToHost ) );
float res001 = 0;
for(int f = 0; f < N; f++)
res001 += cuda_result[f];
clock_t fin1 = clock();
printf("Resultado Final en cuda: %f en %f ms\n", res001, ((double)(fin1-ini1))/CLOCKS_PER_SEC*1000);
float masHostRes = 0, masHostRes2 = 0;
/*
clock_t ini2 = clock();
for(int tempI = 0; tempI< 1000; tempI++)
hipblasSdot( h, N, dAVec, 1, dAVec, 1, cudaFinalResFloat);
clock_t fin2 = clock();
printf("masHostRes:%f en %fms\n", masHostRes, ((double)(fin2-ini2))/CLOCKS_PER_SEC);
clock_t ini3 = clock();
for(int tempI = 0; tempI< 1000; tempI++)
{
masHostRes2 = 0;
for(int i = 0; i<N; i++)
masHostRes2 += AVec[i]*AVec[i];
}
clock_t fin3 = clock();
printf("masHostResCPU:%f en %fms\n", masHostRes2, ((double)(fin3-ini3))/CLOCKS_PER_SEC);
*/
/*
Q( hipblasSetMatrix( m, n, sizeof( float ), C, ldc, dC, ldc ) );
Q( hipblasGetError( ) );
Q( hipblasGetMatrix( m, n, sizeof( float ), dC, ldc, cublas_result, ldc ) );
//
// compute with our routine
//
Q( hipblasSetMatrix( m, n, sizeof( float ), C, ldc, dC, ldc ) );
clock_t ini2 = clock();
ourSgemm( transa, transb, m, n, k, alpha, dA, lda, dB, ldb, beta, dC, ldc );
hipError_t cudaStatus2 = hipDeviceSynchronize();
clock_t fin2 = clock();
Q( hipblasGetMatrix( m, n, sizeof( float ), dC, ldc, our_result, ldc ) );
*/
/*
dim3 grid( m/16 , 0, 0), threads( 16 , 0, 0);
//Q( hipblasSetMatrix( m, n, sizeof( float ), C, ldc, dC, ldc ) );
clock_t ini3 = clock();
vec_mat_vec_mult<<<grid, threads>>>( AVec, lda, B, ldb, dCVec, k, alpha, beta );
clock_t fin3 = clock();
hipError_t cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess)
{
printf("hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
printf("Error String: %s\n", hipGetErrorString(cudaStatus));
}
Q( hipMemcpy( our_result_for_sum, dCVec, (m/16)*sizeof(float), hipMemcpyDeviceToHost ) );
*/
//printf("Resultado Final en cuda: %f en %f ms\n", hostRes, ((double)(fin1-ini1))/CLOCKS_PER_SEC);
//
// check the difference in results
//
//float difference = diff( m, n, cublas_result, ldc, our_result, ldc );
//
// bench cublas
//
/*
double cublas_time;
hipblasSgemm( transa, transb, m, n, k, alpha, dA, lda, dB, ldb, beta, dC, ldc );
BEGIN_TIMING( );
hipblasSgemm( transa, transb, m, n, k, alpha, dA, lda, dB, ldb, beta, dC, ldc );
END_TIMING( cublas_time );
double cublas_gflops = 2.*m*n*k/cublas_time/1e9;
//
// bench our routine
//
double our_time;
ourSgemm( transa, transb, m, n, k, alpha, dA, lda, dB, ldb, beta, dC, ldc );
BEGIN_TIMING( );
ourSgemm( transa, transb, m, n, k, alpha, dA, lda, dB, ldb, beta, dC, ldc );
END_TIMING( our_time );
double our_gflops = 2.*m*n*k/our_time/1e9;
*/
//
// report the results
//
clock_t ini = clock();
float result = 0;
for(int i = 0; i< N; i++)
{
float tempSum = 0.0;
for(int k = 0; k< N; k++)
{
float AvecValue = AVec[k];
float BValue = BComplete[i*N+k];
float tempValue = AVec[k]*BComplete[i*N+k];
tempSum += tempValue;
}
result += tempSum*AVec[i];
}
clock_t fin = clock();
printf("Resultado Final en cpu: %f en %f ms\n", result, ((double)(fin-ini))/CLOCKS_PER_SEC*1000);
//printf( "TIME: %5d %11.2f %14.2f\n", n, cublas_time, our_time);
//double time1 = ((double)(fin1-ini1))/CLOCKS_PER_SEC * 1000.0;
//double time2 = ((double)(fin2-ini2))/CLOCKS_PER_SEC * 1000.0;
//printf( "TIME MINE: %d, %f, %f, CPU: %f \n", n, time1, time2, ((double)(fin-ini))/CLOCKS_PER_SEC * 1000.0);
//printf( "%5d %11.2f %14.2f %8g\n", n, cublas_gflops, our_gflops, difference );
//fprintf(cout, "%d, %f, %f, %f \n", n, time1, time2 , ((double)(fin-ini))/CLOCKS_PER_SEC * 1000.0);
//fflush(cout);
//}
//}
//fclose(cout);
//
// shutdown
//
//hipblasDestroy(h);
hipFree( dAVec );
hipFree( dB );
hipFree( dCVec );
free( AVec );
free( B );
free( cuda_result );
//free( cublas_result );
free( cpu_result );
return 0;
}
//////////////////////
| 8d0c2d063d1da3a6ae868b9780e060c15f83c1ee.cu | // Written by Vasily Volkov.
// Copyright (c) 2008, The Regents of the University of California.
// All rights reserved.
#include <time.h>
#include "sgemmN.cuh"
#include "cuda_runtime.h"
#define BLOCK_SIZE 32
__device__ void saxpy( float a, float *b, float *c )
{
c[0] += a*b[0];
c[1] += a*b[1];
c[2] += a*b[2];
c[3] += a*b[3];
c[4] += a*b[4];
c[5] += a*b[5];
c[6] += a*b[6];
c[7] += a*b[7];
c[8] += a*b[8];
c[9] += a*b[9];
c[10] += a*b[10];
c[11] += a*b[11];
c[12] += a*b[12];
c[13] += a*b[13];
c[14] += a*b[14];
c[15] += a*b[15];
}
__device__ void saxpy2( const float* a, const float *b, const float *a2, float *c )
{
c[0] += a[0] *b[0] * a2[0];
c[1] += a[1] *b[1] * a2[0];
c[2] += a[2] *b[2] * a2[0];
c[3] += a[3] *b[3] * a2[0];
c[4] += a[4] *b[4] * a2[0];
c[5] += a[5] *b[5] * a2[0];
c[6] += a[6] *b[6] * a2[0];
c[7] += a[7] *b[7] * a2[0];
c[8] += a[8] *b[8] * a2[0];
c[9] += a[9] *b[9] * a2[0];
c[10] += a[10]*b[10] * a2[0];
c[11] += a[11]*b[11] * a2[0];
c[12] += a[12]*b[12] * a2[0];
c[13] += a[13]*b[13] * a2[0];
c[14] += a[14]*b[14] * a2[0];
c[15] += a[15]*b[15] * a2[0];
}
__device__ void saxpy3( const float* a, const float *b, float *c )
{
c[0] += a[0] *b[0] ;
c[1] += a[1] *b[1] ;
c[2] += a[2] *b[2] ;
c[3] += a[3] *b[3] ;
c[4] += a[4] *b[4] ;
c[5] += a[5] *b[5] ;
c[6] += a[6] *b[6] ;
c[7] += a[7] *b[7] ;
c[8] += a[8] *b[8] ;
c[9] += a[9] *b[9] ;
c[10] += a[10]*b[10] ;
c[11] += a[11]*b[11] ;
c[12] += a[12]*b[12] ;
c[13] += a[13]*b[13] ;
c[14] += a[14]*b[14] ;
c[15] += a[15]*b[15] ;
}
__device__ void saxpy64( const float* a, const float *b, float *c )
{
#pragma unroll
for( int i = 0; i < 64; i++)
c[i] = a[i]*b[i] ;
}
__device__ void saxpy32( const float* a, const float *b, float *c, const float* balance )
{
#pragma unroll
for( int i = 0; i < 32; i++)
c[i] = a[i]*b[i]* balance[0];
}
__device__ void redux32sum( const float* a, float* res )
{
float c2[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
float c[8] = {0,0,0,0,0,0,0,0};
// 32 -> 16
#pragma unroll
for( int i = 0; i < 16; i++)
c2[i] = a[i] + a[16+i];
// 16 -> 8
#pragma unroll
for( int i = 0; i < 8; i++)
c[i] = c2[8+i] + c2[i];
//8 -> 4
#pragma unroll
for( int i = 0; i < 4; i++)
c2[i] = c[4+i] + c[i];
//4->2
#pragma unroll
for( int i = 0; i < 2; i++)
c[i] = c2[2+i] + c2[i];
// 2->1
*res = c[0] + c[1];
}
__device__ void redux64sum( const float* a, float* res )
{
float c[32] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
float c2[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
// 64 -> 32
#pragma unroll
for( int i = 0; i < 64; i++)
c[i] = a[i] + a[32+i];
// 32 -> 16
#pragma unroll
for( int i = 0; i < 16; i++)
c2[i] = c[16+i] + c[i];
// 16 -> 8
#pragma unroll
for( int i = 0; i < 8; i++)
c[i] = c2[8+i] + c2[i];
//8 -> 4
#pragma unroll
for( int i = 0; i < 4; i++)
c2[i] = c[4+i] + c[i];
//4->2
#pragma unroll
for( int i = 0; i < 2; i++)
c[i] = c2[2+i] + c2[i];
// 2->1
*res = c[0] + c[1];
}
__device__ void redux16sum( const float* a, float* res )
{
float c[8] = {0,0,0,0,0,0,0,0};
// 16 -> 8
#pragma unroll
for( int i = 0; i < 8; i++)
c[i] += a[i] + a[i*2];
//8 -> 4
#pragma unroll
for( int i = 0; i < 4; i++)
c[i] += a[i] + a[i*2];
//4->2
#pragma unroll
for( int i = 0; i < 2; i++)
c[i] += a[i] + a[i*2];
// 2->1
*res = c[0] + c[1];
}
extern "C" __global__ void sgemmNT( const float *A, int lda, const float *B, int ldb, float* C, int ldc, int k, float alpha, float beta )
{
const int inx = threadIdx.x;
const int iny = threadIdx.y;
const int ibx = blockIdx.x * 64;
const int iby = blockIdx.y * 16;
const int id = inx + iny*16;
A += ibx + id;
B += iby + inx + __mul24( iny, ldb );
C += ibx + id + __mul24( iby, ldc );
const float *Blast = B + k*ldb;
float c[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
__shared__ float bs[16][16];
do
{
#pragma unroll
for( int i = 0; i < 16; i += 4 )
bs[iny+i][inx] = B[i*ldb];
__syncthreads();
#pragma unroll
for( int i = 0; i < 16; i++, A += lda )
saxpy( A[0], &bs[i][0], c );
B += 16*ldb;
__syncthreads();
} while( B < Blast );
for( int i = 0; i < 16; i++, C += ldc )
C[0] = alpha*c[i] + beta*C[0];
}
extern "C" __global__ void sgemmNN( const float *A, int lda, const float *B, int ldb, float* C, int ldc, int k, float alpha, float beta )
{
const int inx = threadIdx.x;
const int iny = threadIdx.y;
const int ibx = blockIdx.x * 64;
const int iby = blockIdx.y * 16;
const int id = inx + iny*16;
A += ibx + id;
B += inx + __mul24( iby + iny, ldb );
C += ibx + id + __mul24( iby, ldc );
const float *Blast = B + k;
float c[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
__shared__ float bs[16][17];
do
{
#pragma unroll
for( int i = 0; i < 16; i += 4 )
bs[inx][iny+i] = B[i*ldb];
__syncthreads();
#pragma unroll
for( int i = 0; i < 16; i++, A += lda )
saxpy( A[0], &bs[i][0], c );
B += 16;
__syncthreads();
} while( B < Blast );
for( int i = 0; i < 16; i++, C += ldc )
C[0] = alpha*c[i] + beta*C[0];
}
extern "C" __global__ void vec_mat_vec_mult(const float *A, int lda,
const float *B, int ldb,
float *C, int k,
float alpha, float beta )
{
// FOR 16 threads
const int inx = threadIdx.x;
//const int iny = threadIdx.y;
const int ibx = blockIdx.x * 16;
const int iby = blockIdx.y * 16;
const int id = inx;
const float* ARow = A;
A += ibx + id;
B += ibx;
const float *Blast = B + k;
// vector de multiplicacion local
float c[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
// vector de sumatorio a lo largo de la columna
float r[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
// valor temporal
float res = 0;
// cargamos la parte del vector que toca
__shared__ float bs[16];
bs[id] = A[0];
C[id] = -1;
__syncthreads();
do
{
saxpy3( &B[0], &bs[0], c );
redux16sum(c, &res);
r[id] += res*ARow[0];
__syncthreads();
B += 16;
ARow += 16;
} while( B < Blast );
redux16sum(r, &C[ibx]);
}
extern "C" __global__ void vmSymv(const float *A, int lda,
const float *B, int ldb,
float *C, int k, int length,
float alpha, float beta )
{
// FOR 16 threads
const int inx = threadIdx.x;
const int ibx = blockIdx.x * 16;
const int iby = blockIdx.y * 16;
const int id = threadIdx.x;
const float* ARow = A;
A += ibx + id;
B += ibx + __mul24(iby+inx,ldb);
const float *Blast = ARow + k;
// vector de multiplicacion local
float c[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
// vector de sumatorio a lo largo de la columna
__shared__ float r[16];
// valor temporal
float res = 0;
// cargamos la parte del vector que toca
__shared__ float bs[16];
bs[id] = A[0];
r[id] = 0;
__syncthreads();
int steps = ldb/16;
int counter = 0;
do
{
saxpy3( &B[0], &bs[0], c );
redux16sum(c, &res);
r[id] += res*ARow[0];
__syncthreads();
B += 16 * ldb;
ARow += 16;
counter++;
} while( counter < steps );
if(id == 0)
redux16sum(r, &C[ibx]);
}
extern "C" __global__ void vmv(const float *A, int lda,
const float *B, int ldb,
float *C, int k,
float alpha, float beta )
{
// FOR 64 threads
const int id = threadIdx.x;
const int ibx = blockIdx.x * BLOCK_SIZE;
B += ibx + id*ldb;
// vector de multiplicacion local
float c[BLOCK_SIZE] = { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
// cargamos la parte del vector que toca
__shared__ float bs[BLOCK_SIZE];
float r[BLOCK_SIZE] = { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
// valor temporal
float res;
bs[id] = A[ibx + id];
__syncthreads();
const int BStep = ldb*BLOCK_SIZE;
const float* blast = A + lda;
do
{
saxpy32( &B[0], &bs[0], c, &A[id]);
redux32sum(c, &res);
r[id] += res;
B += BStep;
A += BLOCK_SIZE;
} while( A < blast );
C[ibx+id] = r[id];
}
extern "C" void ourSgemm( char transa, char transb, int m, int n, int k, float alpha, const float *A, int lda, const float *B, int ldb, float beta, float *C, int ldc )
{
dim3 grid( m/64, n/16 ), threads( 16, 4 );
if( transb == 'N' || transb == 'n' )
sgemmNN<<<grid, threads>>>( A, lda, B, ldb, C, ldc, k, alpha, beta );
else
sgemmNT<<<grid, threads>>>( A, lda, B, ldb, C, ldc, k, alpha, beta );
}
//
// auxiliary routines
//
void fill( float *A, int n, int maxi )
{
for( int j = 0; j < n; j++ )
A[j] = float( (rand()%(maxi*2+1)) - maxi ) / ( maxi + 1.f );
}
float diff( int m, int n, float *A, int lda, float *B, int ldb )
{
float err = 0;
for( int j = 0; j < n; j++ )
for( int i = 0; i < m; i++ )
err = max( err, fabs( A[i+j*lda] - B[i+j*ldb] ) );
return err;
}
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda_runtime.h>
#include "cublas_v2.h"
#define MSize 6
#define NSize 512*32
#define IDX2C(i,j,ld) (((j)*(ld))+(i))
static __inline__ void modify (cublasHandle_t handle, float *m, int ldm, int
n, int p, int q, float alpha, float beta)
{
cublasSscal (handle, n-p, &alpha, &m[IDX2C(p,q,ldm)], ldm);
cublasSscal (handle, ldm-p, &beta, &m[IDX2C(p,q,ldm)], 1);
}
int cublas_example()
{
cudaError_t cudaStat;
cublasStatus_t stat;
cublasHandle_t handle;
int i, j;
// Matriz simetrica, column-mayor
float *B = (float*)malloc( (NSize*(NSize+1))/2*sizeof( float ) );
float *BComplete = (float*)malloc( NSize*NSize*sizeof( float ) );
// Vector
float *AVec = (float*)malloc( NSize*sizeof( float ) );
fill( BComplete, NSize*NSize, 31 );
fill( AVec, NSize, 31 );
int count = 0;
for(int i = 0; i< NSize; i++)
{
for(int j = 0; j < NSize; j++)
{
if(j <= i)
{
BComplete[i*NSize + j] = BComplete[j * NSize + i];
B[count] = BComplete[i*NSize + j];
count++;
}
}
}
// resultados
float *cuda_result = (float*)malloc( NSize*sizeof( float ) );
for(int i = 0; i< NSize; i++)
cuda_result[i]= 0;
//float *cpu_result= (float*)malloc( NSize*sizeof( float ) );
float* devPtrB, *devPtrAVec, *devPtrRes, devPtrBSym;
cudaStat = cudaMalloc ((void**)&devPtrB, NSize*NSize*sizeof(float));
cudaStat = cudaMalloc ((void**)&devPtrAVec, NSize*sizeof(float));
cudaStat = cudaMalloc ((void**)&devPtrRes, NSize*sizeof(float));
cudaStat = cudaMalloc ((void**)&devPtrBSym, ((NSize+1)*NSize)/2*sizeof(float));
if (cudaStat != cudaSuccess) {
printf ("device memory allocation failed");
return EXIT_FAILURE;
}
stat = cublasCreate(&handle);
if (stat != CUBLAS_STATUS_SUCCESS) {
printf ("CUBLAS initialization failed\n");
return EXIT_FAILURE;
}
cublasSetAtomicsMode(handle, CUBLAS_ATOMICS_ALLOWED);
stat = cublasSetMatrix (NSize, NSize, sizeof(float), BComplete, NSize, devPtrB, NSize);
//cudaStat = cudaMemcpy(&devPtrBSym, &B, ((NSize+1)*NSize)/2*sizeof(float), cudaMemcpyHostToDevice);
//cudaStat = cudaMemcpy(&devPtrB, &BComplete, NSize*NSize*sizeof(float), cudaMemcpyHostToDevice);
stat = cublasSetVector (NSize, sizeof(float), AVec, 1, devPtrAVec, 1);
stat = cublasSetVector (NSize, sizeof(float), cuda_result, 1, devPtrRes, 1);
if (stat != CUBLAS_STATUS_SUCCESS) {
printf ("data download failed");
cudaFree (devPtrAVec);
cublasDestroy(handle);
return EXIT_FAILURE;
}
float alpha = 1;
float beta = 0;
cudaEvent_t start, end;
Q( cudaEventCreate( &start ) );
Q( cudaEventCreate( &end ) );
cudaEventRecord(start, 0);
float* sum = (float*)malloc(sizeof(float));
stat = cublasSgemv( handle, CUBLAS_OP_T, NSize, NSize, &alpha, devPtrB, NSize, devPtrAVec, 1, &beta, devPtrRes, 1 );
//stat = cublasSsymv( handle, CUBLAS_FILL_MODE_LOWER, NSize, &alpha, devPtrB, NSize, devPtrAVec, 1, &beta, devPtrRes, 1 );
if (stat != CUBLAS_STATUS_SUCCESS) {
printf ("cublasSgemv failed");
cudaFree (devPtrAVec);
cublasDestroy(handle);
return EXIT_FAILURE;
}
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
float time;
cudaEventElapsedTime(&time, start, end);
cudaEventRecord(start, 0);
stat = cublasSdot( handle, NSize, devPtrAVec, 1, devPtrRes, 1, sum );
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
float time2;
cudaEventElapsedTime(&time2, start, end);
stat = cublasGetVector (NSize, sizeof(float), devPtrRes, 1, cuda_result, 1);
if (stat != CUBLAS_STATUS_SUCCESS) {
printf ("data upload failed");
cudaFree (devPtrAVec);
cublasDestroy(handle);
return EXIT_FAILURE;
}
cudaFree (devPtrAVec);
cublasDestroy(handle);
/*float sum = 0;
for (j = 0; j < NSize; j++) {
sum += cuda_result[j]*AVec[j];
}*/
float sumCPU = 0;
clock_t ini = clock();
for(int i = 0; i< NSize; i++)
{
float sumCPUTemp = 0;
for(int j = 0; j< NSize; j++)
{
sumCPUTemp += BComplete[i*NSize+j]*AVec[j];
}
sumCPU+= sumCPUTemp*AVec[i];
}
clock_t fin = clock();
float sumDot = 0;
clock_t ini2 = clock();
for(int ii = 0; ii< 1000; ii++)
{
for(int jj = 0; jj< NSize; jj++)
{
sumDot += BComplete[jj*NSize+ii]*AVec[jj];
}
}
clock_t fin2 = clock();
printf("Multiplicacion mxv de %d elems.\n", NSize );
printf("Sumatorio completo en CUDA: %f en %f + %f ms.\n", *sum, time, time2);
printf("Sumatorio completo en CPU: %f en %f y %f ms.\n", sumCPU, ((double)(fin-ini))/CLOCKS_PER_SEC*1000, ((double)(fin2-ini2))/CLOCKS_PER_SEC*1000);
return EXIT_SUCCESS;
}
//
// main()
//
int ejecutar_sgemmNN(int items)//( int argc, char **argv )
{
/*
int N = items;
if(N < 16)
N = 16;
//FILE* cout;
//cout = fopen("C:\\Users\\chus\\Documents\\dev\\Data\\models\\multmatrix.txt", "a");
//
// startup
//
int idevice = 0;
Q( cudaSetDevice( idevice ) );
struct cudaDeviceProp prop;
Q( cudaGetDeviceProperties( &prop, idevice ) );
printf( "\nDevice: %s, %.0f MHz clock, %.0f MB memory.\n", prop.name, prop.clockRate/1000.f, prop.totalGlobalMem/1024.f/1024.f );
cudaEvent_t start, end;
Q( cudaEventCreate( &start ) );
Q( cudaEventCreate( &end ) );
//Q( cublasInit( ) );
//
// allocate memory
//
//float *A = (float*)malloc( N*N*sizeof( float ) );
float *B = (float*)malloc( N*N*sizeof( float ) );
float *AVec = (float*)malloc( N*sizeof( float ) );
//float *C = (float*)malloc( N*N*sizeof( float ) );
//float *cublas_result = (float*)malloc( N*N*sizeof( float ) );
//float *our_result = (float*)malloc( N*N*sizeof( float ) );
float *our_result_for_sum = (float*)malloc( N*sizeof( float ) );
float *our_result= (float*)malloc( N*sizeof( float ) );
//fill( A, N*N, 31 );
fill( B, N*N, 31 );
//fill( C, N*N, 31 );
fill( AVec, N, 31 );
for(int i = 0; i< N; i++)
our_result_for_sum[i] = 0;
float *dA, *dB, *dC, *dAVec, *dCVec;
//Q( cublasAlloc( N*N, sizeof(float), (void**)&dA ) );
Q( cublasAlloc( N*N, sizeof(float), (void**)&dB ) );
//Q( cublasAlloc( N*N, sizeof(float), (void**)&dC ) );
Q( cublasAlloc( N, sizeof(float), (void**)&dAVec ) );
Q( cublasAlloc( N, sizeof(float), (void**)&dCVec ) );
//Q( cudaMemcpy( dA, A, N*N*sizeof(float), cudaMemcpyHostToDevice ) );
Q( cudaMemcpy( dB, B, N*N*sizeof(float), cudaMemcpyHostToDevice ) );
Q( cudaMemcpy( dAVec, AVec, N*sizeof(float), cudaMemcpyHostToDevice ) );
Q( cudaMemcpy( dCVec, our_result_for_sum, N*sizeof(float), cudaMemcpyHostToDevice ) );
//
// bench square matrices
//
int i = 0;
//for( int i = 0; i < 2; i++ )
{
const char transa = 'N';
const char transb = i ? 'T' : 'N';
//printf( "\ntesting sgemm( '%c', '%c', n, n, n, ... )\n\n", transa, transb );
const int nb = 64;
//printf( " n CUBLAS,Gflop/s we,Gflop/s \"error\"\n" );
int idim = 1;
//for(idim = 1; idim <= N/nb; idim = int((idim+1)*1.25) )
//{
idim = N/nb;
int dim = idim*nb;
//
// set up the parameters
//
const int m = dim;
const int n = dim;
const int k = dim;
const int lda = dim;
const int ldb = dim;
const int ldc = dim;
const float alpha = 1;
const float beta = -1;
//
// compute with CUBLAS
//
/*
Q( cublasSetMatrix( m, n, sizeof( float ), C, ldc, dC, ldc ) );
clock_t ini1 = clock();
cublasSgemm( transa, transb, m, n, k, alpha, dA, lda, dB, ldb, beta, dC, ldc );
cudaError_t cudaStatus = cudaDeviceSynchronize();
clock_t fin1 = clock();
Q( cublasGetError( ) );
Q( cublasGetMatrix( m, n, sizeof( float ), dC, ldc, cublas_result, ldc ) );
//
// compute with our routine
//
Q( cublasSetMatrix( m, n, sizeof( float ), C, ldc, dC, ldc ) );
clock_t ini2 = clock();
ourSgemm( transa, transb, m, n, k, alpha, dA, lda, dB, ldb, beta, dC, ldc );
cudaError_t cudaStatus2 = cudaDeviceSynchronize();
clock_t fin2 = clock();
Q( cublasGetMatrix( m, n, sizeof( float ), dC, ldc, our_result, ldc ) );
*/
/*
dim3 grid( m/16 , 0, 0), threads( 16 , 0, 0);
//Q( cublasSetMatrix( m, n, sizeof( float ), C, ldc, dC, ldc ) );
clock_t ini3 = clock();
vec_mat_vec_mult<<<grid, threads>>>( AVec, lda, B, ldb, dCVec, k, alpha, beta );
clock_t fin3 = clock();
cudaError_t cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess)
{
printf("cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
printf("Error String: %s\n", cudaGetErrorString(cudaStatus));
}
Q( cudaMemcpy( our_result_for_sum, dCVec, (m/16)*sizeof(float), cudaMemcpyDeviceToHost ) );
float resFinal = 0;
for(int i = 0; i< (m/16); i++)
{
resFinal += our_result_for_sum[i];
}
printf("Resultado Final en cuda: %f en %f ms\n", resFinal, ((double)(fin3-ini3))/CLOCKS_PER_SEC*1000);
//
// check the difference in results
//
//float difference = diff( m, n, cublas_result, ldc, our_result, ldc );
//
// bench cublas
//
/*
double cublas_time;
cublasSgemm( transa, transb, m, n, k, alpha, dA, lda, dB, ldb, beta, dC, ldc );
BEGIN_TIMING( );
cublasSgemm( transa, transb, m, n, k, alpha, dA, lda, dB, ldb, beta, dC, ldc );
END_TIMING( cublas_time );
double cublas_gflops = 2.*m*n*k/cublas_time/1e9;
//
// bench our routine
//
double our_time;
ourSgemm( transa, transb, m, n, k, alpha, dA, lda, dB, ldb, beta, dC, ldc );
BEGIN_TIMING( );
ourSgemm( transa, transb, m, n, k, alpha, dA, lda, dB, ldb, beta, dC, ldc );
END_TIMING( our_time );
double our_gflops = 2.*m*n*k/our_time/1e9;
*/
//
// report the results
//
/*
clock_t ini = clock();
float result = 0;
for(int i = 0; i< n; i++)
{
double tempSum = 0.0;
for(int k = 0; k< n; k++)
{
tempSum += AVec[k]*B[i*n+k];
}
result += tempSum*AVec[i];
}
clock_t fin = clock();
printf("Resultado Final en cpu: %f en %f ms\n", result, ((double)(fin-ini))/CLOCKS_PER_SEC*1000);
//printf( "TIME: %5d %11.2f %14.2f\n", n, cublas_time, our_time);
//double time1 = ((double)(fin1-ini1))/CLOCKS_PER_SEC * 1000.0;
//double time2 = ((double)(fin2-ini2))/CLOCKS_PER_SEC * 1000.0;
//printf( "TIME MINE: %d, %f, %f, CPU: %f \n", n, time1, time2, ((double)(fin-ini))/CLOCKS_PER_SEC * 1000.0);
//printf( "%5d %11.2f %14.2f %8g\n", n, cublas_gflops, our_gflops, difference );
//fprintf(cout, "%d, %f, %f, %f \n", n, time1, time2 , ((double)(fin-ini))/CLOCKS_PER_SEC * 1000.0);
//fflush(cout);
//}
}
//fclose(cout);
//
// shutdown
//
//cublasFree( dAVec );
//cublasFree( dB );
//cublasFree( dCVec );
free( AVec );
free( B );
free( our_result_for_sum );
//free( cublas_result );
free( our_result );
//Q( cublasShutdown( ) );
return 0;
*/
return 0;
}
int ejecutar_matrixVector(int items)//( int argc, char **argv )
{
int N = items;
if(N < 16)
N = 16;
//FILE* cout;
//cout = fopen("C:\\Users\\chus\\Documents\\dev\\Data\\models\\multmatrix.txt", "a");
int idevice = 0;
Q( cudaSetDevice( idevice ) );
struct cudaDeviceProp prop;
Q( cudaGetDeviceProperties( &prop, idevice ) );
printf( "\nDevice: %s, %.0f MHz clock, %.0f MB memory.\n", prop.name, prop.clockRate/1000.f, prop.totalGlobalMem/1024.f/1024.f );
cudaEvent_t start, end;
Q( cudaEventCreate( &start ) );
Q( cudaEventCreate( &end ) );
//
// allocate memory
//
printf("%d Elementos.\n", N);
// Matriz simetrica, row-mayor
float *B = (float*)malloc( (N*(N+1))/2*sizeof( float ) );
float *BComplete = (float*)malloc( N*N*sizeof( float ) );
// Vector
float *AVec = (float*)malloc( N*sizeof( float ) );
// resultados
float *cuda_result = (float*)malloc( N*sizeof( float ) );
float *cpu_result= (float*)malloc( N*sizeof( float ) );
// Inicializacion
fill( BComplete, N*N, 31 );
fill( AVec, N, 31 );
int count = 0;
for(int i = 0; i< N; i++)
{
for(int j= i; j< N; j++)
{
B[count] = BComplete[i*N + j ];
count++;
}
}
for(int i = 0; i< N; i++) cuda_result[i] = 0;
float *dA, *dB, *dAVec, *dCVec, *cuda_final_result, *cudaFinalResFloat;
cudaError_t cudaStat;
//cudaStat = cudaMalloc( (void**)&dB, (N*(N+1))/2 * sizeof(float) );
cudaStat = cudaMalloc( (void**)&dB, N*N * sizeof(float) );
cudaStat = cudaMalloc( (void**)&dAVec, N* sizeof(float) );
cudaStat = cudaMalloc( (void**)&dCVec, N* sizeof(float) );
cudaStat = cudaMalloc( (void**)&cudaFinalResFloat, sizeof(float) );
//cudaMemcpy( dB, B, (N*(N+1))/2*sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( dB, BComplete, N*N*sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( dAVec, AVec, N*sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( dCVec, cuda_result, N*sizeof(float), cudaMemcpyHostToDevice );
// set up the parameters
const int dim = N;
const int m = dim;
const int n = dim;
const int k = dim;
const int lda = dim;
const int ldb = dim;
const int ldc = dim;
const float alpha = 1;
const float beta = 0;
// compute with CUBLAS
clock_t ini1 = clock();
//for(int tempI = 0; tempI< 1000; tempI++)
{
/*
cublasSsymv( h, CUBLAS_FILL_MODE_UPPER, N, &alpha, dB, lda, dAVec, 1, &beta, dCVec, 1 );
cudaError_t cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess)
{
printf("cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
printf("Error String: %s\n", cudaGetErrorString(cudaStatus));
}
Q( cudaMemcpy( cuda_result, dCVec, N*sizeof(float), cudaMemcpyDeviceToHost ) );
printf("Que pasa:\n %f %f %f %f %f \n", cuda_result[0], cuda_result[1], cuda_result[2], cuda_result[3], cuda_result[4]);
cublasSdot( h, N, dAVec, 1, dCVec, 1 , cudaFinalResFloat);
cudaMemcpy( &hostRes, cudaFinalResFloat, sizeof(float), cudaMemcpyDeviceToHost );
*/
dim3 grid(N/32), threads(32);
vmv<<<grid, threads>>>( dAVec, lda, dB, ldb, dCVec, k, alpha, beta );
}
cudaError_t cudaStatus = cudaDeviceSynchronize();
Q( cudaMemcpy( cuda_result, dCVec, N*sizeof(float), cudaMemcpyDeviceToHost ) );
float res001 = 0;
for(int f = 0; f < N; f++)
res001 += cuda_result[f];
clock_t fin1 = clock();
printf("Resultado Final en cuda: %f en %f ms\n", res001, ((double)(fin1-ini1))/CLOCKS_PER_SEC*1000);
float masHostRes = 0, masHostRes2 = 0;
/*
clock_t ini2 = clock();
for(int tempI = 0; tempI< 1000; tempI++)
cublasSdot( h, N, dAVec, 1, dAVec, 1, cudaFinalResFloat);
clock_t fin2 = clock();
printf("masHostRes:%f en %fms\n", masHostRes, ((double)(fin2-ini2))/CLOCKS_PER_SEC);
clock_t ini3 = clock();
for(int tempI = 0; tempI< 1000; tempI++)
{
masHostRes2 = 0;
for(int i = 0; i<N; i++)
masHostRes2 += AVec[i]*AVec[i];
}
clock_t fin3 = clock();
printf("masHostResCPU:%f en %fms\n", masHostRes2, ((double)(fin3-ini3))/CLOCKS_PER_SEC);
*/
/*
Q( cublasSetMatrix( m, n, sizeof( float ), C, ldc, dC, ldc ) );
Q( cublasGetError( ) );
Q( cublasGetMatrix( m, n, sizeof( float ), dC, ldc, cublas_result, ldc ) );
//
// compute with our routine
//
Q( cublasSetMatrix( m, n, sizeof( float ), C, ldc, dC, ldc ) );
clock_t ini2 = clock();
ourSgemm( transa, transb, m, n, k, alpha, dA, lda, dB, ldb, beta, dC, ldc );
cudaError_t cudaStatus2 = cudaDeviceSynchronize();
clock_t fin2 = clock();
Q( cublasGetMatrix( m, n, sizeof( float ), dC, ldc, our_result, ldc ) );
*/
/*
dim3 grid( m/16 , 0, 0), threads( 16 , 0, 0);
//Q( cublasSetMatrix( m, n, sizeof( float ), C, ldc, dC, ldc ) );
clock_t ini3 = clock();
vec_mat_vec_mult<<<grid, threads>>>( AVec, lda, B, ldb, dCVec, k, alpha, beta );
clock_t fin3 = clock();
cudaError_t cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess)
{
printf("cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
printf("Error String: %s\n", cudaGetErrorString(cudaStatus));
}
Q( cudaMemcpy( our_result_for_sum, dCVec, (m/16)*sizeof(float), cudaMemcpyDeviceToHost ) );
*/
//printf("Resultado Final en cuda: %f en %f ms\n", hostRes, ((double)(fin1-ini1))/CLOCKS_PER_SEC);
//
// check the difference in results
//
//float difference = diff( m, n, cublas_result, ldc, our_result, ldc );
//
// bench cublas
//
/*
double cublas_time;
cublasSgemm( transa, transb, m, n, k, alpha, dA, lda, dB, ldb, beta, dC, ldc );
BEGIN_TIMING( );
cublasSgemm( transa, transb, m, n, k, alpha, dA, lda, dB, ldb, beta, dC, ldc );
END_TIMING( cublas_time );
double cublas_gflops = 2.*m*n*k/cublas_time/1e9;
//
// bench our routine
//
double our_time;
ourSgemm( transa, transb, m, n, k, alpha, dA, lda, dB, ldb, beta, dC, ldc );
BEGIN_TIMING( );
ourSgemm( transa, transb, m, n, k, alpha, dA, lda, dB, ldb, beta, dC, ldc );
END_TIMING( our_time );
double our_gflops = 2.*m*n*k/our_time/1e9;
*/
//
// report the results
//
clock_t ini = clock();
float result = 0;
for(int i = 0; i< N; i++)
{
float tempSum = 0.0;
for(int k = 0; k< N; k++)
{
float AvecValue = AVec[k];
float BValue = BComplete[i*N+k];
float tempValue = AVec[k]*BComplete[i*N+k];
tempSum += tempValue;
}
result += tempSum*AVec[i];
}
clock_t fin = clock();
printf("Resultado Final en cpu: %f en %f ms\n", result, ((double)(fin-ini))/CLOCKS_PER_SEC*1000);
//printf( "TIME: %5d %11.2f %14.2f\n", n, cublas_time, our_time);
//double time1 = ((double)(fin1-ini1))/CLOCKS_PER_SEC * 1000.0;
//double time2 = ((double)(fin2-ini2))/CLOCKS_PER_SEC * 1000.0;
//printf( "TIME MINE: %d, %f, %f, CPU: %f \n", n, time1, time2, ((double)(fin-ini))/CLOCKS_PER_SEC * 1000.0);
//printf( "%5d %11.2f %14.2f %8g\n", n, cublas_gflops, our_gflops, difference );
//fprintf(cout, "%d, %f, %f, %f \n", n, time1, time2 , ((double)(fin-ini))/CLOCKS_PER_SEC * 1000.0);
//fflush(cout);
//}
//}
//fclose(cout);
//
// shutdown
//
//cublasDestroy(h);
cudaFree( dAVec );
cudaFree( dB );
cudaFree( dCVec );
free( AVec );
free( B );
free( cuda_result );
//free( cublas_result );
free( cpu_result );
return 0;
}
//////////////////////
|
e9a68d1f4900e1beb4dfef6476e4c7a9804750ac.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
DataStructure.cu
*
*/
#include "../include/DataStructure.cuh"
__global__ void setupMonteCarloMPCVectors(MonteCarloMPC *Out)
{
unsigned int id = threadIdx.x + blockDim.x * blockIdx.x;
Out[id].L = 0.0f;
Out[id].W = 0.0f;
for(int i = 0; i < HORIZON; i++){
for(int k = 0; k < DIM_OF_U; k++){
Out[id].InputSeq[k][i] = 0.0f;
}
}
__syncthreads();
} | e9a68d1f4900e1beb4dfef6476e4c7a9804750ac.cu | /*
DataStructure.cu
*構造体の初期化
*/
#include "../include/DataStructure.cuh"
__global__ void setupMonteCarloMPCVectors(MonteCarloMPC *Out)
{
unsigned int id = threadIdx.x + blockDim.x * blockIdx.x;
Out[id].L = 0.0f;
Out[id].W = 0.0f;
for(int i = 0; i < HORIZON; i++){
for(int k = 0; k < DIM_OF_U; k++){
Out[id].InputSeq[k][i] = 0.0f;
}
}
__syncthreads();
} |
95cd65764f0eb7510a975c154258c0167edb9c32.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda_pack_density.hpp"
#include "cuda_device_properties.hpp"
namespace GauXC {
namespace integrator {
namespace cuda {
using namespace GauXC::cuda;
template <typename T>
__global__ void submat_set_combined_kernel( size_t ntasks,
XCTaskDevice<T>* device_tasks,
T* A,
size_t LDA ) {
const int batch_id = blockIdx.z;
if( batch_id < ntasks ) {
auto& task = device_tasks[ batch_id ];
const auto ncut = task.ncut;
const auto* submat_cut_device = task.submat_cut;
const auto LDAS = task.nbe;
auto* ASmall_device = task.nbe_scr;
//if( LDAS == LDAB ) return;
const int tid_x = blockDim.x * blockIdx.x + threadIdx.x;
const int tid_y = blockDim.y * blockIdx.y + threadIdx.y;
int64_t i(0);
for( size_t i_cut = 0; i_cut < ncut; ++i_cut ) {
const int64_t i_cut_first = submat_cut_device[ 2*i_cut ];
const int64_t i_cut_second = submat_cut_device[ 2*i_cut + 1 ];
const int64_t delta_i = i_cut_second - i_cut_first;
int64_t j(0);
for( size_t j_cut = 0; j_cut < ncut; ++j_cut ) {
const int64_t j_cut_first = submat_cut_device[ 2*j_cut ];
const int64_t j_cut_second = submat_cut_device[ 2*j_cut + 1 ];
const int64_t delta_j = j_cut_second - j_cut_first;
auto* ASmall_begin = ASmall_device + i + j *LDAS;
auto* ABig_begin = A + i_cut_first + j_cut_first*LDA ;
for( size_t J = tid_y; J < delta_j; J += blockDim.y )
for( size_t I = tid_x; I < delta_i; I += blockDim.x )
ASmall_begin[I + J*LDAS] = ABig_begin[I + J*LDA];
j += delta_j;
}
i += delta_i;
}
} // batch_id check
}
template <typename T>
void task_pack_density_matrix( size_t ntasks,
XCTaskDevice<T>* device_tasks,
T* P_device,
size_t LDP,
hipStream_t stream ) {
dim3 threads(warp_size,max_warps_per_thread_block,1), blocks(1,1,ntasks);
hipLaunchKernelGGL(( submat_set_combined_kernel), dim3(blocks), dim3(threads), 0, stream ,
ntasks, device_tasks, P_device, LDP
);
}
template
void task_pack_density_matrix( size_t ntasks,
XCTaskDevice<double>* device_tasks,
double* P_device,
size_t LDP,
hipStream_t stream );
}
}
}
| 95cd65764f0eb7510a975c154258c0167edb9c32.cu | #include "cuda_pack_density.hpp"
#include "cuda_device_properties.hpp"
namespace GauXC {
namespace integrator {
namespace cuda {
using namespace GauXC::cuda;
template <typename T>
__global__ void submat_set_combined_kernel( size_t ntasks,
XCTaskDevice<T>* device_tasks,
T* A,
size_t LDA ) {
const int batch_id = blockIdx.z;
if( batch_id < ntasks ) {
auto& task = device_tasks[ batch_id ];
const auto ncut = task.ncut;
const auto* submat_cut_device = task.submat_cut;
const auto LDAS = task.nbe;
auto* ASmall_device = task.nbe_scr;
//if( LDAS == LDAB ) return;
const int tid_x = blockDim.x * blockIdx.x + threadIdx.x;
const int tid_y = blockDim.y * blockIdx.y + threadIdx.y;
int64_t i(0);
for( size_t i_cut = 0; i_cut < ncut; ++i_cut ) {
const int64_t i_cut_first = submat_cut_device[ 2*i_cut ];
const int64_t i_cut_second = submat_cut_device[ 2*i_cut + 1 ];
const int64_t delta_i = i_cut_second - i_cut_first;
int64_t j(0);
for( size_t j_cut = 0; j_cut < ncut; ++j_cut ) {
const int64_t j_cut_first = submat_cut_device[ 2*j_cut ];
const int64_t j_cut_second = submat_cut_device[ 2*j_cut + 1 ];
const int64_t delta_j = j_cut_second - j_cut_first;
auto* ASmall_begin = ASmall_device + i + j *LDAS;
auto* ABig_begin = A + i_cut_first + j_cut_first*LDA ;
for( size_t J = tid_y; J < delta_j; J += blockDim.y )
for( size_t I = tid_x; I < delta_i; I += blockDim.x )
ASmall_begin[I + J*LDAS] = ABig_begin[I + J*LDA];
j += delta_j;
}
i += delta_i;
}
} // batch_id check
}
template <typename T>
void task_pack_density_matrix( size_t ntasks,
XCTaskDevice<T>* device_tasks,
T* P_device,
size_t LDP,
cudaStream_t stream ) {
dim3 threads(warp_size,max_warps_per_thread_block,1), blocks(1,1,ntasks);
submat_set_combined_kernel<<< blocks, threads, 0, stream >>>(
ntasks, device_tasks, P_device, LDP
);
}
template
void task_pack_density_matrix( size_t ntasks,
XCTaskDevice<double>* device_tasks,
double* P_device,
size_t LDP,
cudaStream_t stream );
}
}
}
|
d448c5b13e0bd69cc9c6d6282dd379bec60e5edc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<iostream>
#include<stdio.h>
#include<malloc.h>
#include<opencv2/opencv.hpp>
#include <time.h>
using namespace std;
using namespace cv;
#define RED 2
#define GREEN 1
#define BLUE 0
#define TILE_SIZE 32
#define MAX_MASK_WIDTH 9
#define MASK_WIDTH 3
__constant__ char M[MASK_WIDTH*MASK_WIDTH];
__device__
unsigned char clamp(int value){
if (value < 0) value = 0;
if (value > 255) value = 255;
return (unsigned char)value;
}
//--------->
__global__ void sobelSharedMem(unsigned char *imageInput, int width, int height, \
unsigned int maskWidth,unsigned char *imageOutput){
int size = TILE_SIZE + MASK_WIDTH - 1;
__shared__ float N_ds[TILE_SIZE + MASK_WIDTH - 1][TILE_SIZE+ MASK_WIDTH - 1];
int n = maskWidth/2;
int dest = threadIdx.y*TILE_SIZE+threadIdx.x, destY = dest / size, destX = dest % size,
srcY = blockIdx.y * TILE_SIZE + destY - n, srcX = blockIdx.x * TILE_SIZE + destX - n,
src = (srcY * width + srcX);
if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width)
N_ds[destY][destX] = imageInput[src];
else
N_ds[destY][destX] = 0;
// Segunda carga de Batch
dest = threadIdx.y * TILE_SIZE + threadIdx.x + TILE_SIZE * TILE_SIZE;
destY = dest / size, destX = dest % size;
srcY = blockIdx.y * TILE_SIZE + destY - n;
srcX = blockIdx.x * TILE_SIZE + destX - n;
src = (srcY * width + srcX);
if (destY < size) {
if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width)
N_ds[destY][destX] = imageInput[src];
else
N_ds[destY][destX] = 0;
}
__syncthreads();
int x = blockIdx.y * blockDim.y + threadIdx.y;
int y = blockIdx.x * blockDim.x + threadIdx.x;
if (x > height || y > width)
return;
int cur = 0, nx, ny;
for (int i = 0; i < maskWidth; ++i) {
for (int j = 0; j < maskWidth; ++j) {
nx = threadIdx.y + i;
ny = threadIdx.x + j;
if (nx >= 0 && nx < size && ny >= 0 && ny < size) {
cur += N_ds[nx][ny] * M[i * maskWidth + j];
}
}
}
imageOutput[x * width + y] = clamp(cur);
__syncthreads();
}
//--------->
__global__
void img2grayCU(unsigned char *imageInput, int width, int height, unsigned char *imageOutput){
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
if((row < height) && (col < width)){
imageOutput[row*width+col] = imageInput[(row*width+col)*3+RED]*0.299 + imageInput[(row*width+col)*3+GREEN]*0.587
+ imageInput[(row*width+col)*3+BLUE]*0.114;
}
}
__global__
void UnionCU(unsigned char *imageOutput, unsigned char *Gx, unsigned char *Gy, int rows, int cols){
int i = blockIdx.y*blockDim.y+threadIdx.y;
int j = blockIdx.x*blockDim.x+threadIdx.x;
if (i < rows && j < cols){
imageOutput[(i * cols) + j] = sqrtf((Gx[(i * cols) + j] * Gx[(i * cols) + j]) + (Gy[(i * cols) + j] * Gy[(i * cols) + j]) );
}
}
int main(int argc, char **argv){
hipError_t error = hipSuccess;
clock_t start, end;
double time_used;
unsigned char *h_imageInput, *d_imageInput, *d_imageGray;
unsigned char *d_Gx, *d_Gy, *h_G, *d_G; // Operadores Sobel
char* imageName = argv[1];
Mat image;
if (argc != 2) {
printf("Usage: Image path\n");
return 1;
}
image = imread(imageName, 1);
if (!image.data) {
printf("No image Data\n");
return 1;
}
//---------> Gray
Size s = image.size();
int width = s.width;
int height = s.height;
int sz = sizeof(unsigned char) * width * height * image.channels();
int size = sizeof(unsigned char) * width * height;
h_imageInput = (unsigned char*)malloc(sz);
error = hipMalloc((void**)&d_imageInput,sz);
if (error != hipSuccess) {
printf("Error allocating memory for d_imageInput\n");
exit(-1);
}
//--------->
start = clock();
h_imageInput = image.data;
error = hipMemcpy(d_imageInput, h_imageInput, sz, hipMemcpyHostToDevice);
if (error != hipSuccess) {
printf("Error copying data from h_imageInput to d_imageInput\n");
exit(-1);
}
end = clock();
time_used = ((double) (end - start)) /CLOCKS_PER_SEC;
//--------->
error = hipMalloc((void**)&d_imageGray, size);
if (error != hipSuccess) {
printf("Error allocating memory for d_imageGray\n");
exit(-1);
}
//--------->
start = clock();
int blockSize = 32;
dim3 dimBlock(blockSize, blockSize, 1);
dim3 dimGrid(ceil(width/float(blockSize)), ceil(height/float(blockSize)), 1);
hipLaunchKernelGGL(( img2grayCU), dim3(dimGrid),dim3(dimBlock), 0, 0, d_imageInput, width, height, d_imageGray);
hipDeviceSynchronize();
end = clock();
time_used += ((double) (end - start)) /CLOCKS_PER_SEC;
//--------->
//---------> Masks
char h_XMask[] = {-1, 0, 1, -2, 0, 2, -1, 0, 1};
char h_YMask[] = {-1, -2, -1, 0, 0, 0, 1, 2, 1};
//---------> Copiing to ConstantMemory
start = clock();
error = hipMemcpyToSymbol(M, h_XMask, sizeof(char)*MASK_WIDTH*MASK_WIDTH);
if(error != hipSuccess){
printf("Error copying mask h_M to M\n");
exit(-1);
}
end = clock();
time_used += ((double) (end - start)) /CLOCKS_PER_SEC;
//--------->
//---------> Sobel
error = hipMalloc((void**)&d_Gx, size);
if (error != hipSuccess) {
printf("Error allocating memory for d_Gx\n");
exit(-1);
}
error = hipMalloc((void**)&d_Gy, size);
if (error != hipSuccess) {
printf("Error allocating memory for d_Gy\n");
exit(-1);
}
h_G = (unsigned char*)malloc(size);
error = hipMalloc((void**)&d_G, size);
if (error != hipSuccess) {
printf("Error allocating memory for d_G\n");
exit(-1);
}
//--------->
//--------->
start = clock();
// Convolution
hipLaunchKernelGGL(( sobelSharedMem), dim3(dimGrid),dim3(dimBlock), 0, 0, d_imageGray, width, height, MASK_WIDTH, d_Gx);
hipDeviceSynchronize();
error = hipMemcpyToSymbol(M, h_YMask, sizeof(char)*MASK_WIDTH*MASK_WIDTH);
if(error != hipSuccess){
printf("Error copying mask h_YMask to M\n");
exit(-1);
}
// Convolution in Gy
hipLaunchKernelGGL(( sobelSharedMem), dim3(dimGrid),dim3(dimBlock), 0, 0, d_imageGray, width, height, MASK_WIDTH, d_Gy);
hipDeviceSynchronize();
// Union of Gx and Gy results
hipLaunchKernelGGL(( UnionCU), dim3(dimGrid),dim3(dimBlock), 0, 0, d_G, d_Gx, d_Gy, height, width);
hipDeviceSynchronize();
error = hipMemcpy(h_G, d_G, size, hipMemcpyDeviceToHost);
if (error != hipSuccess) {
printf("Error copying data from d_G to h_G\n");
exit(-1);
}
end = clock();
time_used += ((double) (end - start)) /CLOCKS_PER_SEC;
//--------->
printf ("%lf \n",time_used);
free(h_imageInput);
hipFree(d_imageInput);
hipFree(d_imageGray);
free(h_G);
hipFree(d_Gx);
hipFree(d_Gy);
hipFree(d_G);
return 0;
}
| d448c5b13e0bd69cc9c6d6282dd379bec60e5edc.cu | #include<iostream>
#include<stdio.h>
#include<malloc.h>
#include<opencv2/opencv.hpp>
#include <time.h>
using namespace std;
using namespace cv;
#define RED 2
#define GREEN 1
#define BLUE 0
#define TILE_SIZE 32
#define MAX_MASK_WIDTH 9
#define MASK_WIDTH 3
__constant__ char M[MASK_WIDTH*MASK_WIDTH];
__device__
unsigned char clamp(int value){
if (value < 0) value = 0;
if (value > 255) value = 255;
return (unsigned char)value;
}
//--------->
__global__ void sobelSharedMem(unsigned char *imageInput, int width, int height, \
unsigned int maskWidth,unsigned char *imageOutput){
int size = TILE_SIZE + MASK_WIDTH - 1;
__shared__ float N_ds[TILE_SIZE + MASK_WIDTH - 1][TILE_SIZE+ MASK_WIDTH - 1];
int n = maskWidth/2;
int dest = threadIdx.y*TILE_SIZE+threadIdx.x, destY = dest / size, destX = dest % size,
srcY = blockIdx.y * TILE_SIZE + destY - n, srcX = blockIdx.x * TILE_SIZE + destX - n,
src = (srcY * width + srcX);
if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width)
N_ds[destY][destX] = imageInput[src];
else
N_ds[destY][destX] = 0;
// Segunda carga de Batch
dest = threadIdx.y * TILE_SIZE + threadIdx.x + TILE_SIZE * TILE_SIZE;
destY = dest / size, destX = dest % size;
srcY = blockIdx.y * TILE_SIZE + destY - n;
srcX = blockIdx.x * TILE_SIZE + destX - n;
src = (srcY * width + srcX);
if (destY < size) {
if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width)
N_ds[destY][destX] = imageInput[src];
else
N_ds[destY][destX] = 0;
}
__syncthreads();
int x = blockIdx.y * blockDim.y + threadIdx.y;
int y = blockIdx.x * blockDim.x + threadIdx.x;
if (x > height || y > width)
return;
int cur = 0, nx, ny;
for (int i = 0; i < maskWidth; ++i) {
for (int j = 0; j < maskWidth; ++j) {
nx = threadIdx.y + i;
ny = threadIdx.x + j;
if (nx >= 0 && nx < size && ny >= 0 && ny < size) {
cur += N_ds[nx][ny] * M[i * maskWidth + j];
}
}
}
imageOutput[x * width + y] = clamp(cur);
__syncthreads();
}
//--------->
__global__
void img2grayCU(unsigned char *imageInput, int width, int height, unsigned char *imageOutput){
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
if((row < height) && (col < width)){
imageOutput[row*width+col] = imageInput[(row*width+col)*3+RED]*0.299 + imageInput[(row*width+col)*3+GREEN]*0.587
+ imageInput[(row*width+col)*3+BLUE]*0.114;
}
}
__global__
void UnionCU(unsigned char *imageOutput, unsigned char *Gx, unsigned char *Gy, int rows, int cols){
int i = blockIdx.y*blockDim.y+threadIdx.y;
int j = blockIdx.x*blockDim.x+threadIdx.x;
if (i < rows && j < cols){
imageOutput[(i * cols) + j] = sqrtf((Gx[(i * cols) + j] * Gx[(i * cols) + j]) + (Gy[(i * cols) + j] * Gy[(i * cols) + j]) );
}
}
int main(int argc, char **argv){
cudaError_t error = cudaSuccess;
clock_t start, end;
double time_used;
unsigned char *h_imageInput, *d_imageInput, *d_imageGray;
unsigned char *d_Gx, *d_Gy, *h_G, *d_G; // Operadores Sobel
char* imageName = argv[1];
Mat image;
if (argc != 2) {
printf("Usage: Image path\n");
return 1;
}
image = imread(imageName, 1);
if (!image.data) {
printf("No image Data\n");
return 1;
}
//---------> Gray
Size s = image.size();
int width = s.width;
int height = s.height;
int sz = sizeof(unsigned char) * width * height * image.channels();
int size = sizeof(unsigned char) * width * height;
h_imageInput = (unsigned char*)malloc(sz);
error = cudaMalloc((void**)&d_imageInput,sz);
if (error != cudaSuccess) {
printf("Error allocating memory for d_imageInput\n");
exit(-1);
}
//--------->
start = clock();
h_imageInput = image.data;
error = cudaMemcpy(d_imageInput, h_imageInput, sz, cudaMemcpyHostToDevice);
if (error != cudaSuccess) {
printf("Error copying data from h_imageInput to d_imageInput\n");
exit(-1);
}
end = clock();
time_used = ((double) (end - start)) /CLOCKS_PER_SEC;
//--------->
error = cudaMalloc((void**)&d_imageGray, size);
if (error != cudaSuccess) {
printf("Error allocating memory for d_imageGray\n");
exit(-1);
}
//--------->
start = clock();
int blockSize = 32;
dim3 dimBlock(blockSize, blockSize, 1);
dim3 dimGrid(ceil(width/float(blockSize)), ceil(height/float(blockSize)), 1);
img2grayCU<<<dimGrid,dimBlock>>>(d_imageInput, width, height, d_imageGray);
cudaDeviceSynchronize();
end = clock();
time_used += ((double) (end - start)) /CLOCKS_PER_SEC;
//--------->
//---------> Masks
char h_XMask[] = {-1, 0, 1, -2, 0, 2, -1, 0, 1};
char h_YMask[] = {-1, -2, -1, 0, 0, 0, 1, 2, 1};
//---------> Copiing to ConstantMemory
start = clock();
error = cudaMemcpyToSymbol(M, h_XMask, sizeof(char)*MASK_WIDTH*MASK_WIDTH);
if(error != cudaSuccess){
printf("Error copying mask h_M to M\n");
exit(-1);
}
end = clock();
time_used += ((double) (end - start)) /CLOCKS_PER_SEC;
//--------->
//---------> Sobel
error = cudaMalloc((void**)&d_Gx, size);
if (error != cudaSuccess) {
printf("Error allocating memory for d_Gx\n");
exit(-1);
}
error = cudaMalloc((void**)&d_Gy, size);
if (error != cudaSuccess) {
printf("Error allocating memory for d_Gy\n");
exit(-1);
}
h_G = (unsigned char*)malloc(size);
error = cudaMalloc((void**)&d_G, size);
if (error != cudaSuccess) {
printf("Error allocating memory for d_G\n");
exit(-1);
}
//--------->
//--------->
start = clock();
// Convolution
sobelSharedMem<<<dimGrid,dimBlock>>>(d_imageGray, width, height, MASK_WIDTH, d_Gx);
cudaDeviceSynchronize();
error = cudaMemcpyToSymbol(M, h_YMask, sizeof(char)*MASK_WIDTH*MASK_WIDTH);
if(error != cudaSuccess){
printf("Error copying mask h_YMask to M\n");
exit(-1);
}
// Convolution in Gy
sobelSharedMem<<<dimGrid,dimBlock>>>(d_imageGray, width, height, MASK_WIDTH, d_Gy);
cudaDeviceSynchronize();
// Union of Gx and Gy results
UnionCU<<<dimGrid,dimBlock>>>(d_G, d_Gx, d_Gy, height, width);
cudaDeviceSynchronize();
error = cudaMemcpy(h_G, d_G, size, cudaMemcpyDeviceToHost);
if (error != cudaSuccess) {
printf("Error copying data from d_G to h_G\n");
exit(-1);
}
end = clock();
time_used += ((double) (end - start)) /CLOCKS_PER_SEC;
//--------->
printf ("%lf \n",time_used);
free(h_imageInput);
cudaFree(d_imageInput);
cudaFree(d_imageGray);
free(h_G);
cudaFree(d_Gx);
cudaFree(d_Gy);
cudaFree(d_G);
return 0;
}
|
dc5a40719cf78d81254ffa5311bbcbf433b263cd.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCxHWx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 4, 16, false,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| dc5a40719cf78d81254ffa5311bbcbf433b263cd.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCxHWx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 4, 16, false,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
0e509fb201cbe709468f8fd78ac001921558c320.hip | // !!! This is a file automatically generated by hipify!!!
/******************************************************************************
* Copyright 2018 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
#include "modules/perception/inference/utils/cuda_util.h"
#include <hip/hip_runtime_api.h>
#include <boost/thread.hpp>
#include "boost/thread.hpp"
#include "cyber/common/log.h"
namespace apollo {
namespace perception {
namespace inference {
static boost::thread_specific_ptr<CudaUtil> thread_instance_;
#define CUBLAS_CHECK(condition) \
do { \
hipblasStatus_t status = condition; \
CHECK_EQ(status, HIPBLAS_STATUS_SUCCESS) << " " << status; \
} while (0)
CudaUtil &CudaUtil::get() {
if (!thread_instance_.get()) {
thread_instance_.reset(new CudaUtil);
}
return *(thread_instance_.get());
}
CudaUtil::CudaUtil() { CUBLAS_CHECK(hipblasCreate(&cublas_handle_)); }
bool CudaUtil::set_device_id(int device_id) {
int now_device = -1;
auto cuda_error = hipGetDevice(&now_device);
CHECK_EQ(cuda_error, hipSuccess) << " " << hipGetErrorString(cuda_error);
if (now_device == device_id) {
return true;
} else {
cuda_error = hipSetDevice(device_id);
CHECK_EQ(cuda_error, hipSuccess) << " " << hipGetErrorString(cuda_error);
if (get().cublas_handle_ != nullptr) {
CUBLAS_CHECK(hipblasDestroy(get().cublas_handle_));
}
CUBLAS_CHECK(hipblasCreate(&get().cublas_handle_));
}
return true;
}
hipblasHandle_t &CudaUtil::get_handler() { return get().cublas_handle_; }
CudaUtil::~CudaUtil() {
if (get().cublas_handle_) {
CUBLAS_CHECK(hipblasDestroy(get().cublas_handle_));
}
}
} // namespace inference
} // namespace perception
} // namespace apollo
| 0e509fb201cbe709468f8fd78ac001921558c320.cu | /******************************************************************************
* Copyright 2018 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
#include "modules/perception/inference/utils/cuda_util.h"
#include <cuda_runtime_api.h>
#include <boost/thread.hpp>
#include "boost/thread.hpp"
#include "cyber/common/log.h"
namespace apollo {
namespace perception {
namespace inference {
static boost::thread_specific_ptr<CudaUtil> thread_instance_;
#define CUBLAS_CHECK(condition) \
do { \
cublasStatus_t status = condition; \
CHECK_EQ(status, CUBLAS_STATUS_SUCCESS) << " " << status; \
} while (0)
CudaUtil &CudaUtil::get() {
if (!thread_instance_.get()) {
thread_instance_.reset(new CudaUtil);
}
return *(thread_instance_.get());
}
CudaUtil::CudaUtil() { CUBLAS_CHECK(cublasCreate(&cublas_handle_)); }
bool CudaUtil::set_device_id(int device_id) {
int now_device = -1;
auto cuda_error = cudaGetDevice(&now_device);
CHECK_EQ(cuda_error, cudaSuccess) << " " << cudaGetErrorString(cuda_error);
if (now_device == device_id) {
return true;
} else {
cuda_error = cudaSetDevice(device_id);
CHECK_EQ(cuda_error, cudaSuccess) << " " << cudaGetErrorString(cuda_error);
if (get().cublas_handle_ != nullptr) {
CUBLAS_CHECK(cublasDestroy(get().cublas_handle_));
}
CUBLAS_CHECK(cublasCreate(&get().cublas_handle_));
}
return true;
}
cublasHandle_t &CudaUtil::get_handler() { return get().cublas_handle_; }
CudaUtil::~CudaUtil() {
if (get().cublas_handle_) {
CUBLAS_CHECK(cublasDestroy(get().cublas_handle_));
}
}
} // namespace inference
} // namespace perception
} // namespace apollo
|
ed43e81e9e845d3a7f32a0306c23db2b613a2292.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_yvel_plus_4_front;
int xdim0_update_halo_kernel2_yvel_plus_4_front_h = -1;
__constant__ int ydim0_update_halo_kernel2_yvel_plus_4_front;
int ydim0_update_halo_kernel2_yvel_plus_4_front_h = -1;
__constant__ int xdim1_update_halo_kernel2_yvel_plus_4_front;
int xdim1_update_halo_kernel2_yvel_plus_4_front_h = -1;
__constant__ int ydim1_update_halo_kernel2_yvel_plus_4_front;
int ydim1_update_halo_kernel2_yvel_plus_4_front_h = -1;
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel2_yvel_plus_4_front*(y)+xdim0_update_halo_kernel2_yvel_plus_4_front*ydim0_update_halo_kernel2_yvel_plus_4_front*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel2_yvel_plus_4_front*(y)+xdim1_update_halo_kernel2_yvel_plus_4_front*ydim1_update_halo_kernel2_yvel_plus_4_front*(z))
//user function
__device__
inline void update_halo_kernel2_yvel_plus_4_front(double *yvel0, double *yvel1, const int* fields)
{
if(fields[FIELD_YVEL0] == 1) yvel0[OPS_ACC0(0,0,0)] = yvel0[OPS_ACC0(0,0,-4)];
if(fields[FIELD_YVEL1] == 1) yvel1[OPS_ACC1(0,0,0)] = yvel1[OPS_ACC1(0,0,-4)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_yvel_plus_4_front(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 + idx_y * 1 * xdim0_update_halo_kernel2_yvel_plus_4_front + idx_z * 1 * xdim0_update_halo_kernel2_yvel_plus_4_front * ydim0_update_halo_kernel2_yvel_plus_4_front;
arg1 += idx_x * 1 + idx_y * 1 * xdim1_update_halo_kernel2_yvel_plus_4_front + idx_z * 1 * xdim1_update_halo_kernel2_yvel_plus_4_front * ydim1_update_halo_kernel2_yvel_plus_4_front;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_yvel_plus_4_front(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel2_yvel_plus_4_front(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_arg args[3] = { arg0, arg1, arg2};
ops_timing_realloc(75,"update_halo_kernel2_yvel_plus_4_front");
OPS_kernels[75].count++;
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif //OPS_MPI
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0]*args[0].dat->dim;
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0]*args[1].dat->dim;
int ydim1 = args[1].dat->size[1];
//Timing
double t1,t2,c1,c2;
ops_timers_core(&c2,&t2);
if (xdim0 != xdim0_update_halo_kernel2_yvel_plus_4_front_h || ydim0 != ydim0_update_halo_kernel2_yvel_plus_4_front_h || xdim1 != xdim1_update_halo_kernel2_yvel_plus_4_front_h || ydim1 != ydim1_update_halo_kernel2_yvel_plus_4_front_h) {
hipMemcpyToSymbol( xdim0_update_halo_kernel2_yvel_plus_4_front, &xdim0, sizeof(int) );
xdim0_update_halo_kernel2_yvel_plus_4_front_h = xdim0;
hipMemcpyToSymbol( ydim0_update_halo_kernel2_yvel_plus_4_front, &ydim0, sizeof(int) );
ydim0_update_halo_kernel2_yvel_plus_4_front_h = ydim0;
hipMemcpyToSymbol( xdim1_update_halo_kernel2_yvel_plus_4_front, &xdim1, sizeof(int) );
xdim1_update_halo_kernel2_yvel_plus_4_front_h = xdim1;
hipMemcpyToSymbol( ydim1_update_halo_kernel2_yvel_plus_4_front, &ydim1, sizeof(int) );
ydim1_update_halo_kernel2_yvel_plus_4_front_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
//set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d];
#endif //OPS_MPI
int base0 = dat0 * 1 *
(start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d];
#endif //OPS_MPI
int base1 = dat1 * 1 *
(start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
ops_timers_core(&c1,&t1);
OPS_kernels[75].mpi_time += t1-t2;
//call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel2_yvel_plus_4_front), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
}
ops_timers_core(&c2,&t2);
OPS_kernels[75].time += t2-t1;
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
//Update kernel record
OPS_kernels[75].transfer += ops_compute_transfer(dim, range, &arg0);
OPS_kernels[75].transfer += ops_compute_transfer(dim, range, &arg1);
}
| ed43e81e9e845d3a7f32a0306c23db2b613a2292.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_yvel_plus_4_front;
int xdim0_update_halo_kernel2_yvel_plus_4_front_h = -1;
__constant__ int ydim0_update_halo_kernel2_yvel_plus_4_front;
int ydim0_update_halo_kernel2_yvel_plus_4_front_h = -1;
__constant__ int xdim1_update_halo_kernel2_yvel_plus_4_front;
int xdim1_update_halo_kernel2_yvel_plus_4_front_h = -1;
__constant__ int ydim1_update_halo_kernel2_yvel_plus_4_front;
int ydim1_update_halo_kernel2_yvel_plus_4_front_h = -1;
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel2_yvel_plus_4_front*(y)+xdim0_update_halo_kernel2_yvel_plus_4_front*ydim0_update_halo_kernel2_yvel_plus_4_front*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel2_yvel_plus_4_front*(y)+xdim1_update_halo_kernel2_yvel_plus_4_front*ydim1_update_halo_kernel2_yvel_plus_4_front*(z))
//user function
__device__
inline void update_halo_kernel2_yvel_plus_4_front(double *yvel0, double *yvel1, const int* fields)
{
if(fields[FIELD_YVEL0] == 1) yvel0[OPS_ACC0(0,0,0)] = yvel0[OPS_ACC0(0,0,-4)];
if(fields[FIELD_YVEL1] == 1) yvel1[OPS_ACC1(0,0,0)] = yvel1[OPS_ACC1(0,0,-4)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_yvel_plus_4_front(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 + idx_y * 1 * xdim0_update_halo_kernel2_yvel_plus_4_front + idx_z * 1 * xdim0_update_halo_kernel2_yvel_plus_4_front * ydim0_update_halo_kernel2_yvel_plus_4_front;
arg1 += idx_x * 1 + idx_y * 1 * xdim1_update_halo_kernel2_yvel_plus_4_front + idx_z * 1 * xdim1_update_halo_kernel2_yvel_plus_4_front * ydim1_update_halo_kernel2_yvel_plus_4_front;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_yvel_plus_4_front(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel2_yvel_plus_4_front(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_arg args[3] = { arg0, arg1, arg2};
ops_timing_realloc(75,"update_halo_kernel2_yvel_plus_4_front");
OPS_kernels[75].count++;
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif //OPS_MPI
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0]*args[0].dat->dim;
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0]*args[1].dat->dim;
int ydim1 = args[1].dat->size[1];
//Timing
double t1,t2,c1,c2;
ops_timers_core(&c2,&t2);
if (xdim0 != xdim0_update_halo_kernel2_yvel_plus_4_front_h || ydim0 != ydim0_update_halo_kernel2_yvel_plus_4_front_h || xdim1 != xdim1_update_halo_kernel2_yvel_plus_4_front_h || ydim1 != ydim1_update_halo_kernel2_yvel_plus_4_front_h) {
cudaMemcpyToSymbol( xdim0_update_halo_kernel2_yvel_plus_4_front, &xdim0, sizeof(int) );
xdim0_update_halo_kernel2_yvel_plus_4_front_h = xdim0;
cudaMemcpyToSymbol( ydim0_update_halo_kernel2_yvel_plus_4_front, &ydim0, sizeof(int) );
ydim0_update_halo_kernel2_yvel_plus_4_front_h = ydim0;
cudaMemcpyToSymbol( xdim1_update_halo_kernel2_yvel_plus_4_front, &xdim1, sizeof(int) );
xdim1_update_halo_kernel2_yvel_plus_4_front_h = xdim1;
cudaMemcpyToSymbol( ydim1_update_halo_kernel2_yvel_plus_4_front, &ydim1, sizeof(int) );
ydim1_update_halo_kernel2_yvel_plus_4_front_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
//set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d];
#endif //OPS_MPI
int base0 = dat0 * 1 *
(start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d];
#endif //OPS_MPI
int base1 = dat1 * 1 *
(start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
ops_timers_core(&c1,&t1);
OPS_kernels[75].mpi_time += t1-t2;
//call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel2_yvel_plus_4_front<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
}
ops_timers_core(&c2,&t2);
OPS_kernels[75].time += t2-t1;
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
//Update kernel record
OPS_kernels[75].transfer += ops_compute_transfer(dim, range, &arg0);
OPS_kernels[75].transfer += ops_compute_transfer(dim, range, &arg1);
}
|
e472af953a49356899cf7e3fd1f8d55d204bbace.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cmath>
#include <iostream>
#include <random>
#include <ctime>
double fRand(double fMin, double fMax)
{
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_real_distribution<> dis(fMin, fMax);
double a = dis(gen);
return a;
}
struct Obstacle
{
public:
double _x, _y, v_x, v_y, r;
Obstacle()
{
_x = fRand(-100.0, 100.0);
_y = fRand(-100.0, 100.0);
v_x = fRand(0.0, 5.0);
v_y = fRand(0.0, 5.0);
r = 1.0;
}
};
__global__ double* intersectTime_g(Obstacle a)
{
//distance between obstacle and scooter
double distance = sqrt(a._x * a._x + a._y * a._y);
//path length for start and end collision
double d_start = distance - 2.0;
double d_end = distance + 2.0;
//velocity of obstacle
double velocity = sqrt(a.v_x * a.v_x + a.v_y * a.v_y);
//time for start and end collision
double t_start = d_start / velocity;
double t_end = d_end / velocity;
//store start/end time into vector
double *result;
hipMallocManaged(&result, 2 * sizeof(double));
result[0] = t_start;
result[1] = t_end;
//for test output
//printf("(%.2lf, %.2lf), v = %.2lf\n", a._x, a._y, velocity);
return result;
}
std::vector<double> intersectTime_c(Obstacle a)
{
//distance between obstacle and scooter
double distance = sqrt(a._x * a._x + a._y * a._y);
//path length for start and end collision
double d_start = distance - 2.0;
double d_end = distance + 2.0;
//velocity of obstacle
double velocity = sqrt(a.v_x * a.v_x + a.v_y * a.v_y);
//time for start and end collision
double t_start = d_start / velocity;
double t_end = d_end / velocity;
//store start/end time into vector
std::vector<double> result;
result.push_back(t_start);
result.push_back(t_end);
//for test output
//printf("(%.2lf, %.2lf), v = %.2lf\n", a._x, a._y, velocity);
return result;
}
void print(std::vector<std::vector<double> > &list)
{
for(int i = 0; i < list.size(); i++)
{
printf("start_time: %.2lf | end_time: %.2lf\n", list.at(i).at(0), list.at(i).at(1));
}
}
int main()
{
//no of obstacles = n * 10
for(int n = 0; n < 100; n++)
{
/**
* GPU
*/
//start timing
float elapsed_g = 0;
hipEvent_t start, stop;
HANDLE_ERROR(hipEventCreate(&start));
HANDLE_ERROR(hipEventCreate(&stop));
HANDLE_ERROR(hipEventRecord(start, 0));
//sample size: 1000
for (int r = 0; r < 1000; r++)
{
srand(time(0));
/* Allocate Unified memeory - accessible from CPU or GPU */
//points: store n * 10 obstacle object.
Obstacle *points;
hipMallocManaged(&points, n * 10 * sizeof(Obstacle));
//list : store start_time, end_time for all obstacles
//n * 10 obstacles has 2 double time values
double *list;
hipMallocManaged(&list, n * 10 * 2 * sizeof(double));
//initialize obstacle array on the host
for(int i = 0; i < (n * 10); i++)
{
Obstacle obs;
points[i] = obs;
}
// run kernel on GPU
for(int j = 0; j < (n * 10); j++)
{
double *result;
hipMallocManaged(&result, 2 * sizeof(double));
/*Streaming Multiprocessors*/
int blockSize = 256;
int numBlocks = (n * 10 + blockSize - 1) / blockSize;
result =hipLaunchKernelGGL(( intersectTime_g), dim3(numBlocks), dim3(blockSize), 0, 0, n*10, list, points);
//cuda code to allocate memory for vector
list[j] = result;
hipFree(result);
}
hipFree(points);
hipFree(list);
hipDeviceSynchronize();
}
//print time for gpu
HANDLE_ERROR(hipEventRecord(stop, 0));
HANDLE_ERROR(hipEventSynchronize (stop) );
HANDLE_ERROR(hipEventElapsedTime(&elapsed_g, start, stop) );
HANDLE_ERROR(hipEventDestroy(start));
HANDLE_ERROR(hipEventDestroy(stop));
printf("%d GPU: %.8lf s ", (n*10), elapsed_g);
/**
* CPU
*/
//start timing
long double total_time = 0.0;
clock_t e = clock();
//sample size : 1000
for (int r = 0; r < 1000; r++)
{
srand(time(0));
std::vector<std::vector<double> > list;
for(int i = 0; i < (n*10); i++)
{
Obstacle obs;
std::vector<double> result = intersectTime_c(obs);
list.push_back(result);
}
}
//print time for cpu
e = clock() - e;
double elapsed_c = e / (double) CLOCKS_PER_SEC;
// calculate time used for each sample
printf("CPU: %.8lf s ", elapsed_c / 1000.0);
//print CPU / GPU : increase rate
printf("%.2lf \n", elapsed_c / elapsed_g);
}
}
| e472af953a49356899cf7e3fd1f8d55d204bbace.cu | #include <cmath>
#include <iostream>
#include <random>
#include <ctime>
double fRand(double fMin, double fMax)
{
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_real_distribution<> dis(fMin, fMax);
double a = dis(gen);
return a;
}
struct Obstacle
{
public:
double _x, _y, v_x, v_y, r;
Obstacle()
{
_x = fRand(-100.0, 100.0);
_y = fRand(-100.0, 100.0);
v_x = fRand(0.0, 5.0);
v_y = fRand(0.0, 5.0);
r = 1.0;
}
};
__global__ double* intersectTime_g(Obstacle a)
{
//distance between obstacle and scooter
double distance = sqrt(a._x * a._x + a._y * a._y);
//path length for start and end collision
double d_start = distance - 2.0;
double d_end = distance + 2.0;
//velocity of obstacle
double velocity = sqrt(a.v_x * a.v_x + a.v_y * a.v_y);
//time for start and end collision
double t_start = d_start / velocity;
double t_end = d_end / velocity;
//store start/end time into vector
double *result;
cudaMallocManaged(&result, 2 * sizeof(double));
result[0] = t_start;
result[1] = t_end;
//for test output
//printf("(%.2lf, %.2lf), v = %.2lf\n", a._x, a._y, velocity);
return result;
}
std::vector<double> intersectTime_c(Obstacle a)
{
//distance between obstacle and scooter
double distance = sqrt(a._x * a._x + a._y * a._y);
//path length for start and end collision
double d_start = distance - 2.0;
double d_end = distance + 2.0;
//velocity of obstacle
double velocity = sqrt(a.v_x * a.v_x + a.v_y * a.v_y);
//time for start and end collision
double t_start = d_start / velocity;
double t_end = d_end / velocity;
//store start/end time into vector
std::vector<double> result;
result.push_back(t_start);
result.push_back(t_end);
//for test output
//printf("(%.2lf, %.2lf), v = %.2lf\n", a._x, a._y, velocity);
return result;
}
void print(std::vector<std::vector<double> > &list)
{
for(int i = 0; i < list.size(); i++)
{
printf("start_time: %.2lf | end_time: %.2lf\n", list.at(i).at(0), list.at(i).at(1));
}
}
int main()
{
//no of obstacles = n * 10
for(int n = 0; n < 100; n++)
{
/**
* GPU
*/
//start timing
float elapsed_g = 0;
cudaEvent_t start, stop;
HANDLE_ERROR(cudaEventCreate(&start));
HANDLE_ERROR(cudaEventCreate(&stop));
HANDLE_ERROR(cudaEventRecord(start, 0));
//sample size: 1000
for (int r = 0; r < 1000; r++)
{
srand(time(0));
/* Allocate Unified memeory - accessible from CPU or GPU */
//points: store n * 10 obstacle object.
Obstacle *points;
cudaMallocManaged(&points, n * 10 * sizeof(Obstacle));
//list : store start_time, end_time for all obstacles
//n * 10 obstacles has 2 double time values
double *list;
cudaMallocManaged(&list, n * 10 * 2 * sizeof(double));
//initialize obstacle array on the host
for(int i = 0; i < (n * 10); i++)
{
Obstacle obs;
points[i] = obs;
}
// run kernel on GPU
for(int j = 0; j < (n * 10); j++)
{
double *result;
cudaMallocManaged(&result, 2 * sizeof(double));
/*Streaming Multiprocessors*/
int blockSize = 256;
int numBlocks = (n * 10 + blockSize - 1) / blockSize;
result = intersectTime_g<<<numBlocks, blockSize>>>(n*10, list, points);
//cuda code to allocate memory for vector
list[j] = result;
cudaFree(result);
}
cudaFree(points);
cudaFree(list);
cudaDeviceSynchronize();
}
//print time for gpu
HANDLE_ERROR(cudaEventRecord(stop, 0));
HANDLE_ERROR(cudaEventSynchronize (stop) );
HANDLE_ERROR(cudaEventElapsedTime(&elapsed_g, start, stop) );
HANDLE_ERROR(cudaEventDestroy(start));
HANDLE_ERROR(cudaEventDestroy(stop));
printf("%d GPU: %.8lf s ", (n*10), elapsed_g);
/**
* CPU
*/
//start timing
long double total_time = 0.0;
clock_t e = clock();
//sample size : 1000
for (int r = 0; r < 1000; r++)
{
srand(time(0));
std::vector<std::vector<double> > list;
for(int i = 0; i < (n*10); i++)
{
Obstacle obs;
std::vector<double> result = intersectTime_c(obs);
list.push_back(result);
}
}
//print time for cpu
e = clock() - e;
double elapsed_c = e / (double) CLOCKS_PER_SEC;
// calculate time used for each sample
printf("CPU: %.8lf s ", elapsed_c / 1000.0);
//print CPU / GPU : increase rate
printf("%.2lf \n", elapsed_c / elapsed_g);
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.