hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
60ffc4b4ca9dc427a68caa8530586a78e0f3d1b2.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) 2020 Xiaomi Corporation (authors: Fangjun Kuang)
*
* See LICENSE for clarification regarding multiple authors
*/
#include <deque>
#include <functional>
#include <mutex> // NOLINT
#include <set>
#include <unordered_map>
#include <unordered_set>
#include "k2/csrc/context.h"
#include "k2/csrc/log.h"
#include "k2/csrc/macros.h"
#include "k2/csrc/nvtx.h"
namespace k2 {
namespace {
struct BlockSize {
size_t size; // size of this memory block in bytes
void *ptr; // pointer to the beginning of this memory block
explicit BlockSize(size_t size, void *ptr = nullptr) : size(size), ptr(ptr) {}
};
struct Block : public BlockSize {
bool allocated; // true if the block is currently allocated
// false if the block is available for allocation
int event_count; // number of outstanding cuda events
std::unordered_set<hipStream_t> streams;
Block(size_t size, void *ptr, bool allocated)
: BlockSize(size, ptr), allocated(allocated), event_count(0), streams() {}
};
static bool BlockComparator(const BlockSize &a, const BlockSize &b) {
NVTX_RANGE(K2_FUNC);
// sort by size, break ties with pointer
if (a.size != b.size) return a.size < b.size;
return std::less<void *>()(a.ptr, b.ptr);
}
/* Allocate pinned memory using hipHostMalloc with caching.
WARNING: Once memory is allocated, it is not returned to the system.
Use it with care!
*/
class PinnedAllocator {
public:
PinnedAllocator() : available_(&BlockComparator) {}
/* Allocate a block of memory.
If we can find a free block that is large enough (first fit or best fit
as free blocks are sorted by size) for the requested size, the free block
is marked as allocated and returned to the user.
If no free blocks are available, a new block is allocated by
using `hipHostMalloc`.
@param [in] size Number of bytes to be allocated.
@param [out] ptr On return, it contains the starting address of
the allocated memory.
@return Return hipSuccess on success. Return a CUDA error code on failure.
*/
hipError_t Malloc(size_t size, void **ptr) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_NE(ptr, nullptr);
std::lock_guard<std::mutex> lock(mutex_);
// If the number of outstanding cuda events is larger
// than 100, we will invoke `ProcessEvents()`, which
// may make some blocks available for reuse.
//
// If the number of cuda_events_ is small,
// we first try to find a block from the pool. If it
// cannot find one, we then invoke `ProcessEvents`.
//
// The purpose is to reduce the time of waiting for
// the pending events.
for (int32_t iter = 0; iter < 2; ++iter) {
if (cuda_events_.size() > 100 || iter > 0) {
// ProcessEvents may free blocks
hipError_t err = ProcessEvents();
if (err != hipSuccess) return err;
}
// search for the smallest block which can hold this allocation
BlockSize search_key(size);
auto it = available_.lower_bound(search_key);
if (it != available_.end()) {
// we find an unused block
Block &block = blocks_.at(it->ptr);
K2_CHECK(!block.allocated && block.event_count == 0);
block.allocated = true;
*ptr = block.ptr;
available_.erase(it);
return hipSuccess;
}
}
// we need to allocate a new block.
// note that hipHostMalloc may not touch pointer if size is 0
*ptr = 0;
hipError_t err = hipHostMalloc(ptr, size);
if (err != hipSuccess) return err;
blocks_.insert({*ptr, Block(size, *ptr, true)});
return hipSuccess;
}
/* Free memory allocated by `Malloc`.
@param [in] ptr Pointer to the starting address of a block
allocated by `Malloc`.
@return Return hipSuccess on success. Return a CUDA error code on failure.
*/
hipError_t Free(void *ptr) {
NVTX_RANGE(K2_FUNC);
if (ptr == nullptr) return hipSuccess;
std::lock_guard<std::mutex> lock(mutex_);
// process outstanding cuda events which may have occurred
hipError_t err = ProcessEvents();
if (err != hipSuccess) return err;
auto it = blocks_.find(ptr);
K2_CHECK(it != blocks_.end())
<< "The passed pointer is not allocated by Malloc!";
Block &block = it->second;
K2_CHECK(block.allocated);
block.allocated = false;
// insert CUDA events for each stream on which this block was used.
err = InsertEvents(block);
if (err != hipSuccess) return err;
if (block.event_count == 0) {
// the block can be re-used if there are no outstanding cuda events
available_.insert(block);
}
return hipSuccess;
}
/* Record an event of a ptr with a stream.
@param [in] stream A CUDA stream.
@param [in] ptr It is a pointer returned by `Malloc`.
*/
void RecordEvent(hipStream_t stream, void *ptr) {
NVTX_RANGE(K2_FUNC);
std::lock_guard<std::mutex> lock(mutex_);
auto it = blocks_.find(ptr);
if (it == blocks_.end()) {
// this pointer is not returned by `Malloc`, ignore it.
return;
}
Block &block = it->second;
K2_CHECK(block.allocated)
<< "RecordEvent is called with a block that has not been allocated!";
block.streams.insert(stream);
}
private:
hipError_t InsertEvents(Block &block) {
NVTX_RANGE(K2_FUNC);
// InsertEvents is called from `Free`, which has already held the mutex.
std::unordered_set<hipStream_t> streams(std::move(block.streams));
for (auto it = streams.begin(); it != streams.end(); ++it) {
hipEvent_t event;
hipError_t err =
hipEventCreateWithFlags(&event, hipEventDisableTiming);
if (err != hipSuccess) return err;
err = hipEventRecord(event, *it);
if (err != hipSuccess) return err;
++block.event_count;
cuda_events_.emplace_back(event, block.ptr);
}
return hipSuccess;
}
/* Process events in `cuda_events_`.
If the events of a block have all been processed, this block
is put into `available_` and is ready for reuse.
If `hipEventQuery()` returns `hipErrorNotReady`, it
returns immediately.
@return `hipSuccess` on success; on error, it returns a
cuda error code.
*/
hipError_t ProcessEvents() {
NVTX_RANGE(K2_FUNC);
// InsertEvents is called from `Malloc` and `Free`,
// which has already held the mutex.
while (!cuda_events_.empty()) {
auto &e = cuda_events_.front();
hipEvent_t event = e.first;
hipError_t err = hipEventQuery(event);
if (err == hipErrorNotReady) break;
if (err != hipSuccess) return err;
err = hipEventDestroy(event);
if (err != hipSuccess) return err;
Block &block = blocks_.at(e.second);
--block.event_count;
if (block.event_count == 0 && !block.allocated) available_.insert(block);
cuda_events_.pop_front();
}
return hipSuccess;
}
private:
// It contains all blocks allocated by Malloc.
std::unordered_map<void *, Block> blocks_;
using Compare = bool (*)(const BlockSize &, const BlockSize &);
// It contains all free blocks **sorted** by block size in increasing order.
std::set<BlockSize, Compare> available_;
// outstanding cuda events
std::deque<std::pair<hipEvent_t, void *>> cuda_events_;
// to protect `blocks_`, `available_` and `cuda_events_` being accessed
// from multiple threads
std::mutex mutex_;
};
static PinnedAllocator *GetPinnedAllocator() {
static std::once_flag init_flag;
static PinnedAllocator *allocator = nullptr;
std::call_once(init_flag, []() {
// it is never freed.
allocator = new PinnedAllocator;
});
return allocator;
}
} // namespace
class PinnedContext : public Context {
public:
PinnedContext() { allocator_ = GetPinnedAllocator(); }
DeviceType GetDeviceType() const override { return kCpu; }
void *Allocate(std::size_t bytes, void **deleter_context) override {
void *p = nullptr;
hipError_t err = allocator_->Malloc(bytes, &p);
if (deleter_context != nullptr) *deleter_context = nullptr;
return p;
}
void Deallocate(void *data, void *deleter_context) override {
(void)deleter_context;
allocator_->Free(data);
}
bool IsCompatible(const Context &other) const override {
return other.GetDeviceType() == kCpu;
}
void CopyDataTo(size_t num_bytes, const void *src, ContextPtr dst_context,
void *dst) override {
DeviceType device_type = dst_context->GetDeviceType();
switch (device_type) {
case kCpu:
// we assume that src and dst do not overlap
memcpy(dst, src, num_bytes);
break;
case kCuda: {
hipStream_t stream = dst_context->GetCudaStream();
hipError_t ret = hipMemcpyAsync(dst, src, num_bytes,
hipMemcpyHostToDevice, stream);
K2_CHECK_CUDA_ERROR(ret);
allocator_->RecordEvent(stream, const_cast<void *>(src));
break;
}
default:
K2_LOG(FATAL) << "Unsupported device type: " << device_type;
break;
}
}
private:
PinnedAllocator *allocator_; // NOT owned here
};
ContextPtr GetPinnedContext() {
static std::once_flag has_cuda_init_flag;
static bool has_cuda = false;
std::call_once(has_cuda_init_flag, []() {
int32_t count = 0;
hipError_t err = hipGetDeviceCount(&count);
if (err != hipSuccess) {
K2_LOG(WARNING) << "hipGetDeviceCount() failed: "
<< hipGetErrorString(err) << "\n."
<< "Return a CPU context";
} else if (count == 0) {
K2_LOG(WARNING)
<< "No CUDA capable devices are found. Return a CPU context.";
} else {
has_cuda = true;
}
});
if (has_cuda) return std::make_shared<PinnedContext>();
return GetCpuContext();
}
ContextPtr GetContextForTransfer(DeviceType device_type) {
switch (device_type) {
case kCpu:
return GetCpuContext();
case kCuda:
return GetPinnedContext();
default:
K2_LOG(FATAL) << "Unsupported device type: " << device_type;
return nullptr;
}
}
} // namespace k2
| 60ffc4b4ca9dc427a68caa8530586a78e0f3d1b2.cu | /**
* Copyright (c) 2020 Xiaomi Corporation (authors: Fangjun Kuang)
*
* See LICENSE for clarification regarding multiple authors
*/
#include <deque>
#include <functional>
#include <mutex> // NOLINT
#include <set>
#include <unordered_map>
#include <unordered_set>
#include "k2/csrc/context.h"
#include "k2/csrc/log.h"
#include "k2/csrc/macros.h"
#include "k2/csrc/nvtx.h"
namespace k2 {
namespace {
struct BlockSize {
size_t size; // size of this memory block in bytes
void *ptr; // pointer to the beginning of this memory block
explicit BlockSize(size_t size, void *ptr = nullptr) : size(size), ptr(ptr) {}
};
struct Block : public BlockSize {
bool allocated; // true if the block is currently allocated
// false if the block is available for allocation
int event_count; // number of outstanding cuda events
std::unordered_set<cudaStream_t> streams;
Block(size_t size, void *ptr, bool allocated)
: BlockSize(size, ptr), allocated(allocated), event_count(0), streams() {}
};
static bool BlockComparator(const BlockSize &a, const BlockSize &b) {
NVTX_RANGE(K2_FUNC);
// sort by size, break ties with pointer
if (a.size != b.size) return a.size < b.size;
return std::less<void *>()(a.ptr, b.ptr);
}
/* Allocate pinned memory using cudaMallocHost with caching.
WARNING: Once memory is allocated, it is not returned to the system.
Use it with care!
*/
class PinnedAllocator {
public:
PinnedAllocator() : available_(&BlockComparator) {}
/* Allocate a block of memory.
If we can find a free block that is large enough (first fit or best fit
as free blocks are sorted by size) for the requested size, the free block
is marked as allocated and returned to the user.
If no free blocks are available, a new block is allocated by
using `cudaMallocHost`.
@param [in] size Number of bytes to be allocated.
@param [out] ptr On return, it contains the starting address of
the allocated memory.
@return Return cudaSuccess on success. Return a CUDA error code on failure.
*/
cudaError_t Malloc(size_t size, void **ptr) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_NE(ptr, nullptr);
std::lock_guard<std::mutex> lock(mutex_);
// If the number of outstanding cuda events is larger
// than 100, we will invoke `ProcessEvents()`, which
// may make some blocks available for reuse.
//
// If the number of cuda_events_ is small,
// we first try to find a block from the pool. If it
// cannot find one, we then invoke `ProcessEvents`.
//
// The purpose is to reduce the time of waiting for
// the pending events.
for (int32_t iter = 0; iter < 2; ++iter) {
if (cuda_events_.size() > 100 || iter > 0) {
// ProcessEvents may free blocks
cudaError_t err = ProcessEvents();
if (err != cudaSuccess) return err;
}
// search for the smallest block which can hold this allocation
BlockSize search_key(size);
auto it = available_.lower_bound(search_key);
if (it != available_.end()) {
// we find an unused block
Block &block = blocks_.at(it->ptr);
K2_CHECK(!block.allocated && block.event_count == 0);
block.allocated = true;
*ptr = block.ptr;
available_.erase(it);
return cudaSuccess;
}
}
// we need to allocate a new block.
// note that cudaMallocHost may not touch pointer if size is 0
*ptr = 0;
cudaError_t err = cudaMallocHost(ptr, size);
if (err != cudaSuccess) return err;
blocks_.insert({*ptr, Block(size, *ptr, true)});
return cudaSuccess;
}
/* Free memory allocated by `Malloc`.
@param [in] ptr Pointer to the starting address of a block
allocated by `Malloc`.
@return Return cudaSuccess on success. Return a CUDA error code on failure.
*/
cudaError_t Free(void *ptr) {
NVTX_RANGE(K2_FUNC);
if (ptr == nullptr) return cudaSuccess;
std::lock_guard<std::mutex> lock(mutex_);
// process outstanding cuda events which may have occurred
cudaError_t err = ProcessEvents();
if (err != cudaSuccess) return err;
auto it = blocks_.find(ptr);
K2_CHECK(it != blocks_.end())
<< "The passed pointer is not allocated by Malloc!";
Block &block = it->second;
K2_CHECK(block.allocated);
block.allocated = false;
// insert CUDA events for each stream on which this block was used.
err = InsertEvents(block);
if (err != cudaSuccess) return err;
if (block.event_count == 0) {
// the block can be re-used if there are no outstanding cuda events
available_.insert(block);
}
return cudaSuccess;
}
/* Record an event of a ptr with a stream.
@param [in] stream A CUDA stream.
@param [in] ptr It is a pointer returned by `Malloc`.
*/
void RecordEvent(cudaStream_t stream, void *ptr) {
NVTX_RANGE(K2_FUNC);
std::lock_guard<std::mutex> lock(mutex_);
auto it = blocks_.find(ptr);
if (it == blocks_.end()) {
// this pointer is not returned by `Malloc`, ignore it.
return;
}
Block &block = it->second;
K2_CHECK(block.allocated)
<< "RecordEvent is called with a block that has not been allocated!";
block.streams.insert(stream);
}
private:
cudaError_t InsertEvents(Block &block) {
NVTX_RANGE(K2_FUNC);
// InsertEvents is called from `Free`, which has already held the mutex.
std::unordered_set<cudaStream_t> streams(std::move(block.streams));
for (auto it = streams.begin(); it != streams.end(); ++it) {
cudaEvent_t event;
cudaError_t err =
cudaEventCreateWithFlags(&event, cudaEventDisableTiming);
if (err != cudaSuccess) return err;
err = cudaEventRecord(event, *it);
if (err != cudaSuccess) return err;
++block.event_count;
cuda_events_.emplace_back(event, block.ptr);
}
return cudaSuccess;
}
/* Process events in `cuda_events_`.
If the events of a block have all been processed, this block
is put into `available_` and is ready for reuse.
If `cudaEventQuery()` returns `cudaErrorNotReady`, it
returns immediately.
@return `cudaSuccess` on success; on error, it returns a
cuda error code.
*/
cudaError_t ProcessEvents() {
NVTX_RANGE(K2_FUNC);
// InsertEvents is called from `Malloc` and `Free`,
// which has already held the mutex.
while (!cuda_events_.empty()) {
auto &e = cuda_events_.front();
cudaEvent_t event = e.first;
cudaError_t err = cudaEventQuery(event);
if (err == cudaErrorNotReady) break;
if (err != cudaSuccess) return err;
err = cudaEventDestroy(event);
if (err != cudaSuccess) return err;
Block &block = blocks_.at(e.second);
--block.event_count;
if (block.event_count == 0 && !block.allocated) available_.insert(block);
cuda_events_.pop_front();
}
return cudaSuccess;
}
private:
// It contains all blocks allocated by Malloc.
std::unordered_map<void *, Block> blocks_;
using Compare = bool (*)(const BlockSize &, const BlockSize &);
// It contains all free blocks **sorted** by block size in increasing order.
std::set<BlockSize, Compare> available_;
// outstanding cuda events
std::deque<std::pair<cudaEvent_t, void *>> cuda_events_;
// to protect `blocks_`, `available_` and `cuda_events_` being accessed
// from multiple threads
std::mutex mutex_;
};
static PinnedAllocator *GetPinnedAllocator() {
static std::once_flag init_flag;
static PinnedAllocator *allocator = nullptr;
std::call_once(init_flag, []() {
// it is never freed.
allocator = new PinnedAllocator;
});
return allocator;
}
} // namespace
class PinnedContext : public Context {
public:
PinnedContext() { allocator_ = GetPinnedAllocator(); }
DeviceType GetDeviceType() const override { return kCpu; }
void *Allocate(std::size_t bytes, void **deleter_context) override {
void *p = nullptr;
cudaError_t err = allocator_->Malloc(bytes, &p);
if (deleter_context != nullptr) *deleter_context = nullptr;
return p;
}
void Deallocate(void *data, void *deleter_context) override {
(void)deleter_context;
allocator_->Free(data);
}
bool IsCompatible(const Context &other) const override {
return other.GetDeviceType() == kCpu;
}
void CopyDataTo(size_t num_bytes, const void *src, ContextPtr dst_context,
void *dst) override {
DeviceType device_type = dst_context->GetDeviceType();
switch (device_type) {
case kCpu:
// we assume that src and dst do not overlap
memcpy(dst, src, num_bytes);
break;
case kCuda: {
cudaStream_t stream = dst_context->GetCudaStream();
cudaError_t ret = cudaMemcpyAsync(dst, src, num_bytes,
cudaMemcpyHostToDevice, stream);
K2_CHECK_CUDA_ERROR(ret);
allocator_->RecordEvent(stream, const_cast<void *>(src));
break;
}
default:
K2_LOG(FATAL) << "Unsupported device type: " << device_type;
break;
}
}
private:
PinnedAllocator *allocator_; // NOT owned here
};
ContextPtr GetPinnedContext() {
static std::once_flag has_cuda_init_flag;
static bool has_cuda = false;
std::call_once(has_cuda_init_flag, []() {
int32_t count = 0;
cudaError_t err = cudaGetDeviceCount(&count);
if (err != cudaSuccess) {
K2_LOG(WARNING) << "cudaGetDeviceCount() failed: "
<< cudaGetErrorString(err) << "\n."
<< "Return a CPU context";
} else if (count == 0) {
K2_LOG(WARNING)
<< "No CUDA capable devices are found. Return a CPU context.";
} else {
has_cuda = true;
}
});
if (has_cuda) return std::make_shared<PinnedContext>();
return GetCpuContext();
}
ContextPtr GetContextForTransfer(DeviceType device_type) {
switch (device_type) {
case kCpu:
return GetCpuContext();
case kCuda:
return GetPinnedContext();
default:
K2_LOG(FATAL) << "Unsupported device type: " << device_type;
return nullptr;
}
}
} // namespace k2
|
961e8c9db75929ffc97f77d82cfcab75dc7a5e28.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "astar_gpu.h"
#include "heap.h"
#include "list.h"
#include "sliding_puzzle.h"
#include "pathfinding.h"
#include "cuda_utils.h"
#include "assert.h"
#include <vector>
#include <string>
#include <sstream>
#include <algorithm>
#include <fstream>
#include <chrono>
#define STATES (32 * 1024ll * 1024)
#define HASH_SIZE (1024 * 1024)
#define HASH_FUNS 128
__global__ void init_heap(const char *s, heap **Q, state *states_pool, char *nodes_pool, int state_len);
__global__ void clear_list(list *S);
__global__ void fill_list(const char *t, int k, int state_len,
heap **Q, list *S, state *states_pool, char *nodes_pool,
char ***expand_buf, expand_fun expand, heur_fun h, states_delta_fun states_delta);
__global__ void deduplicate(state **H, list *T, const char *t, heur_fun h);
__global__ void push_to_queues(const char *t, int k, heap **Q, list *S, heur_fun h, int off);
__device__ int f(const state *x, const char *t, heur_fun h);
__device__ int calculate_id();
__device__ state *state_create(const char *node, int f, int g, state *prev,
state *states_pool, char *nodes_pool, int state_len);
char ***expand_bufs_create(int bufs, int elements, int element_size);
char **expand_buf_create(int elements, int element_size);
void states_pool_create(state **states, char **nodes, int node_size);
void states_pool_destroy(state *states_pool, char *nodes_pool);
#define THREADS_PER_BLOCK 1024
#define BLOCKS 16
#define RESULT_LEN (1024 * 1024)
__device__ int total_Q_size = 0;
__device__ int found = 0;
__device__ int out_of_memory = 0;
__device__ char result_path[RESULT_LEN];
// Rewrite A* to take in a MAPF object
void astar_gpu_mapf(mapf m, std::vector<std::pair<int, int> > starts, std::fstream &output)
{
// need to do preprocessing work for mapf
// modifying pathfinding_read_input
rows_cpu = m.get_y();
cols_cpu = m.get_x();
// Start and end positions
// I'm assuming this has something to do with mapf goals
std::string s_tmp = "";
std::string t_tmp = "";
std::vector<std::pair<int, int>> goals = m.get_goals();
for(std::vector<std::pair<int, int>>::const_iterator it = goals.begin(); it != goals.end(); ++it) {
t_tmp += to_string(it->first) + ',' + to_string(it->second);
}
for(std::vector<std::pair<int, int>>::const_iterator it = starts.begin(); it != starts.end(); ++it) {
s_tmp += to_string(it->first) + ',' + to_string(it->second);
}
// Wants them as C-strings
char* s_out = s_tmp.c_str();
char* t_out = t_tmp.c_str();
// Add obstacles to board
std::vector<std::pair<int, int>> obstacles = m.get_obstacles();
for(std::vector<std::pair<int, int>>::const_iterator it = obstacles.begin(); it != obstacles.end(); ++it) {
board_cpu[it->first][it->second] = -1;
}
// Cells that have connections with weights creater than 1,2
// Not sure if this is applicable to us
char *s_gpu, *t_gpu;
int k = THREADS_PER_BLOCK * BLOCKS;
expand_fun expand_fun_cpu;
heur_fun h_cpu;
states_delta_fun states_delta_cpu;
int expand_elements;
int expand_element_size;
auto start = std::chrono::high_resolution_clock::now();
pathfinding_preprocessing(s_out, t_out, &s_gpu, &t_gpu, &expand_fun_cpu, &h_cpu, &states_delta_cpu,
&expand_elements, &expand_element_size);
state **H;
char ***expand_buf = expand_bufs_create(THREADS_PER_BLOCK * BLOCKS, expand_elements, expand_element_size);
HANDLE_RESULT(hipMalloc(&H, HASH_SIZE * sizeof(state*)));
HANDLE_RESULT(hipMemset(H, 0, HASH_SIZE * sizeof(state*)));
heap **Q = heaps_create(k);
list **Ss = lists_create(BLOCKS, 1000000);
list *S = list_create(1024 * 1024);
state *states_pool;
char *nodes_pool;
states_pool_create(&states_pool, &nodes_pool, expand_element_size);
int total_Q_size_cpu;
int found_cpu;
int out_of_memory_cpu;
hipLaunchKernelGGL(( init_heap), dim3(1), dim3(1), 0, 0, s_gpu, Q, states_pool, nodes_pool, expand_element_size);
int step = 0;
do {
hipLaunchKernelGGL(( clear_list), dim3(1), dim3(1), 0, 0, S);
HANDLE_RESULT(hipDeviceSynchronize());
hipLaunchKernelGGL(( fill_list), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, t_gpu, k, expand_element_size, Q, S, states_pool, nodes_pool,
expand_buf, expand_fun_cpu, h_cpu, states_delta_cpu);
HANDLE_RESULT(hipMemcpyFromSymbol(&found_cpu, found, sizeof(int)));
HANDLE_RESULT(hipMemcpyFromSymbol(&out_of_memory, found, sizeof(int)));
if (found_cpu) break;
if (out_of_memory_cpu) break;
HANDLE_RESULT(hipDeviceSynchronize());
hipLaunchKernelGGL(( deduplicate), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, H, S, t_gpu, h_cpu);
HANDLE_RESULT(hipDeviceSynchronize());
hipLaunchKernelGGL(( push_to_queues), dim3(1), dim3(THREADS_PER_BLOCK), 0, 0, t_gpu, k, Q, S, h_cpu, step) ;
HANDLE_RESULT(hipDeviceSynchronize());
HANDLE_RESULT(hipMemcpyFromSymbol(&total_Q_size_cpu, total_Q_size, sizeof(int)));
step++;
} while (total_Q_size_cpu > 0);
auto end = std::chrono::high_resolution_clock::now();
auto duration = end - start;
output << std::chrono::duration_cast<std::chrono::milliseconds>(duration).count() << "\n";
if (found_cpu) {
char result_path_cpu[RESULT_LEN];
HANDLE_RESULT(hipMemcpyFromSymbol(result_path_cpu, result_path, RESULT_LEN));
std::string path_str = std::string(result_path_cpu);
std::istringstream path_stream;
path_stream.str(result_path_cpu);
std::vector<std::string> v;
for (std::string line; std::getline(path_stream, line); ) {
v.push_back(line);
}
std::reverse(v.begin(), v.end());
if (version == SLIDING) {
output << sliding_puzzle_postprocessing(v);
} else if (version == PATHFINDING) {
for (std::string path_el: v) {
output << path_el << "\n";
}
}
}
states_pool_destroy(states_pool, nodes_pool);
lists_destroy(Ss, BLOCKS);
heaps_destroy(Q, k);
HANDLE_RESULT(hipFree(H));
HANDLE_RESULT(hipDeviceSynchronize());
}
void astar_gpu(const char *s_in, const char *t_in, version_value version, std::fstream &output) {
char *s_gpu, *t_gpu;
int k = THREADS_PER_BLOCK * BLOCKS;
expand_fun expand_fun_cpu;
heur_fun h_cpu;
states_delta_fun states_delta_cpu;
int expand_elements;
int expand_element_size;
auto start = std::chrono::high_resolution_clock::now();
if (version == SLIDING) {
sliding_puzzle_preprocessing(s_in, t_in, &s_gpu, &t_gpu, &expand_fun_cpu, &h_cpu, &states_delta_cpu,
&expand_elements, &expand_element_size);
} else if (version == PATHFINDING) {
pathfinding_preprocessing(s_in, t_in, &s_gpu, &t_gpu, &expand_fun_cpu, &h_cpu, &states_delta_cpu,
&expand_elements, &expand_element_size);
}
state **H;
char ***expand_buf = expand_bufs_create(THREADS_PER_BLOCK * BLOCKS, expand_elements, expand_element_size);
HANDLE_RESULT(hipMalloc(&H, HASH_SIZE * sizeof(state*)));
HANDLE_RESULT(hipMemset(H, 0, HASH_SIZE * sizeof(state*)));
heap **Q = heaps_create(k);
list **Ss = lists_create(BLOCKS, 1000000);
list *S = list_create(1024 * 1024);
state *states_pool;
char *nodes_pool;
states_pool_create(&states_pool, &nodes_pool, expand_element_size);
int total_Q_size_cpu;
int found_cpu;
int out_of_memory_cpu;
hipLaunchKernelGGL(( init_heap), dim3(1), dim3(1), 0, 0, s_gpu, Q, states_pool, nodes_pool, expand_element_size);
int step = 0;
do {
hipLaunchKernelGGL(( clear_list), dim3(1), dim3(1), 0, 0, S);
HANDLE_RESULT(hipDeviceSynchronize());
hipLaunchKernelGGL(( fill_list), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, t_gpu, k, expand_element_size, Q, S, states_pool, nodes_pool,
expand_buf, expand_fun_cpu, h_cpu, states_delta_cpu);
HANDLE_RESULT(hipMemcpyFromSymbol(&found_cpu, found, sizeof(int)));
HANDLE_RESULT(hipMemcpyFromSymbol(&out_of_memory, found, sizeof(int)));
if (found_cpu) break;
if (out_of_memory_cpu) break;
HANDLE_RESULT(hipDeviceSynchronize());
hipLaunchKernelGGL(( deduplicate), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, H, S, t_gpu, h_cpu);
HANDLE_RESULT(hipDeviceSynchronize());
hipLaunchKernelGGL(( push_to_queues), dim3(1), dim3(THREADS_PER_BLOCK), 0, 0, t_gpu, k, Q, S, h_cpu, step) ;
HANDLE_RESULT(hipDeviceSynchronize());
HANDLE_RESULT(hipMemcpyFromSymbol(&total_Q_size_cpu, total_Q_size, sizeof(int)));
step++;
} while (total_Q_size_cpu > 0);
auto end = std::chrono::high_resolution_clock::now();
auto duration = end - start;
output << std::chrono::duration_cast<std::chrono::milliseconds>(duration).count() << "\n";
if (found_cpu) {
char result_path_cpu[RESULT_LEN];
HANDLE_RESULT(hipMemcpyFromSymbol(result_path_cpu, result_path, RESULT_LEN));
std::string path_str = std::string(result_path_cpu);
std::istringstream path_stream;
path_stream.str(result_path_cpu);
std::vector<std::string> v;
for (std::string line; std::getline(path_stream, line); ) {
v.push_back(line);
}
std::reverse(v.begin(), v.end());
if (version == SLIDING) {
output << sliding_puzzle_postprocessing(v);
} else if (version == PATHFINDING) {
for (std::string path_el: v) {
output << path_el << "\n";
}
}
}
states_pool_destroy(states_pool, nodes_pool);
lists_destroy(Ss, BLOCKS);
heaps_destroy(Q, k);
HANDLE_RESULT(hipFree(H));
HANDLE_RESULT(hipDeviceSynchronize());
}
__global__ void init_heap(const char *s, heap **Q, state *states_pool, char *nodes_pool, int state_len) {
heap_insert(Q[0], state_create(s, 0, 0, NULL, states_pool, nodes_pool, state_len));
atomicAdd(&total_Q_size, 1);
}
__device__ int processed = 0;
__device__ int steps = 0;
__device__ int heaps_min_before;
__global__ void clear_list(list *S) {
list_clear(S);
}
__global__ void fill_list(const char *t, int k, int state_len,
heap **Q, list *S, state *states_pool, char *nodes_pool,
char ***expand_buf, expand_fun expand, heur_fun h, states_delta_fun states_delta) {
state *m = NULL;
int id = calculate_id();
char **my_expand_buf = expand_buf[id];
if (id == 0)steps++;
for (int i = id; i < k; i += blockDim.x * gridDim.x) {
if (Q[i]->size == 0) continue;
state *q = heap_extract(Q[i]);
atomicSub(&total_Q_size, 1);
if (cuda_str_eq(q->node, t)) {
if (m == NULL || f(q, t, h) < f(m, t, h)) {
m = q;
}
continue;
}
expand(q->node, my_expand_buf);
for (int j = 0; my_expand_buf[j][0] != '\0'; j++) {
int delta = states_delta(q->node, my_expand_buf[j]);
state *new_state = state_create(my_expand_buf[j], -1, q->g + delta, q, states_pool, nodes_pool, state_len);
if (new_state == NULL) return;
list_insert(S, new_state);
}
}
if (m != NULL && f(m, t, h) <= heaps_min(Q, k)) {
int found_before = atomicCAS(&found, 0, 1);
if (found_before == 1) return;
state *cur = m;
int result_len = 0;
while (cur != NULL) {
int len = cuda_strlen(cur->node) + 1;
memcpy(result_path + result_len, cur->node, len);
result_len += len;
result_path[result_len-1] = '\n';
cur = cur->prev;
}
result_path[result_len-1] = '\0';
return;
}
}
__global__ void deduplicate(state **H, list *T, const char *t, heur_fun h) {
int id = calculate_id();
for (int i = id; i < T->length; i += blockDim.x * gridDim.x) {
int z = 0;
state *t1 = list_get(T, i);
for (int j = 0; j < HASH_FUNS; j++) {
assert(t1->node != NULL);
state *el = H[jenkins_hash(j, t1->node) % HASH_SIZE];
if (el == NULL || cuda_str_eq(t1->node, el->node)) {
z = j;
break;
}
}
int index = jenkins_hash(z, t1->node) % HASH_SIZE;
t1 = (state*)atomicExch((unsigned long long*)&(H[index]), (unsigned long long)t1);
if (t1 != NULL && cuda_str_eq(t1->node, list_get(T, i)->node) &&
f(list_get(T, i), t, h) >= f(t1, t, h)) {
list_remove(T, i);
continue;
}
t1 = list_get(T, i);
for (int j = 0; j < HASH_FUNS; j++) {
if (j != z) {
state *el = H[jenkins_hash(j, t1->node) % HASH_SIZE];
if (el != NULL && cuda_str_eq(el->node, t1->node) &&
f(list_get(T, i), t, h) >= f(el, t, h)) {
list_remove(T, i);
break;
}
}
}
}
}
__global__ void push_to_queues(const char *t, int k, heap **Q, list *S, heur_fun h, int off) {
for (int i = threadIdx.x; i < S->length; i += blockDim.x) {
state *t1 = list_get(S, i);
if (t1 != NULL) {
t1->f = f(t1, t, h);
heap_insert(Q[(i + off) % k], t1);
atomicAdd(&processed, 1);
atomicAdd(&total_Q_size, 1);
}
__syncthreads();
}
}
__device__ int f(const state *x, const char *t, heur_fun h) {
return x->g + h(x->node, t);
}
void states_pool_create(state **states, char **nodes, int node_size) {
HANDLE_RESULT(hipMalloc(states, STATES * sizeof(state)));
HANDLE_RESULT(hipMalloc(nodes, 3 * STATES * node_size * sizeof(char)));
HANDLE_RESULT(hipMemset(*states, 0, STATES * sizeof(state)));
HANDLE_RESULT(hipMemset(*nodes, 0, 3 * STATES * node_size * sizeof(char)));
}
void states_pool_destroy(state *states_pool, char *nodes_pool) {
HANDLE_RESULT(hipFree(states_pool));
HANDLE_RESULT(hipFree(nodes_pool));
}
char ***expand_bufs_create(int bufs, int elements, int element_size) {
int bufs_size = bufs * sizeof(char**);
char ***bufs_cpu = (char***)malloc(bufs_size);
for (int i = 0; i < bufs; i++) {
bufs_cpu[i] = expand_buf_create(elements, element_size);
}
char ***bufs_gpu;
HANDLE_RESULT(hipMalloc(&bufs_gpu, bufs_size));
HANDLE_RESULT(hipMemcpy(bufs_gpu, bufs_cpu, bufs_size, hipMemcpyDefault));
free(bufs_cpu);
return bufs_gpu;
}
char **expand_buf_create(int elements, int element_size) {
char **buf_cpu = (char**)malloc(elements * sizeof(char*));
for (int i = 0; i < elements; i++) {
HANDLE_RESULT(hipMalloc(&(buf_cpu[i]), element_size));
}
char **buf_gpu;
HANDLE_RESULT(hipMalloc(&buf_gpu, elements * sizeof(char*)));
HANDLE_RESULT(hipMemcpy(buf_gpu, buf_cpu, elements * sizeof(char*),
hipMemcpyDefault));
free(buf_cpu);
return buf_gpu;
}
__device__ int used_states = 0;
__device__ state *state_create(const char *node, int f, int g, state *prev,
state *states_pool, char *nodes_pool, int state_len) {
int index = atomicAdd(&used_states, 1);
if (index >= STATES || (long long)state_len * index >= (1<<30)) {
out_of_memory = 1;
return NULL;
}
state *result = &(states_pool[index]);
memcpy(&(nodes_pool[(unsigned long long)state_len * index]), node, state_len);
result->node = &(nodes_pool[state_len * index]);
result->f = f;
result->g = g;
result->prev = prev;
return result;
}
__device__ int calculate_id() {
return threadIdx.x + blockIdx.x * blockDim.x;
}
| 961e8c9db75929ffc97f77d82cfcab75dc7a5e28.cu | #include <stdio.h>
#include "astar_gpu.h"
#include "heap.h"
#include "list.h"
#include "sliding_puzzle.h"
#include "pathfinding.h"
#include "cuda_utils.h"
#include "assert.h"
#include <vector>
#include <string>
#include <sstream>
#include <algorithm>
#include <fstream>
#include <chrono>
#define STATES (32 * 1024ll * 1024)
#define HASH_SIZE (1024 * 1024)
#define HASH_FUNS 128
__global__ void init_heap(const char *s, heap **Q, state *states_pool, char *nodes_pool, int state_len);
__global__ void clear_list(list *S);
__global__ void fill_list(const char *t, int k, int state_len,
heap **Q, list *S, state *states_pool, char *nodes_pool,
char ***expand_buf, expand_fun expand, heur_fun h, states_delta_fun states_delta);
__global__ void deduplicate(state **H, list *T, const char *t, heur_fun h);
__global__ void push_to_queues(const char *t, int k, heap **Q, list *S, heur_fun h, int off);
__device__ int f(const state *x, const char *t, heur_fun h);
__device__ int calculate_id();
__device__ state *state_create(const char *node, int f, int g, state *prev,
state *states_pool, char *nodes_pool, int state_len);
char ***expand_bufs_create(int bufs, int elements, int element_size);
char **expand_buf_create(int elements, int element_size);
void states_pool_create(state **states, char **nodes, int node_size);
void states_pool_destroy(state *states_pool, char *nodes_pool);
#define THREADS_PER_BLOCK 1024
#define BLOCKS 16
#define RESULT_LEN (1024 * 1024)
__device__ int total_Q_size = 0;
__device__ int found = 0;
__device__ int out_of_memory = 0;
__device__ char result_path[RESULT_LEN];
// Rewrite A* to take in a MAPF object
void astar_gpu_mapf(mapf m, std::vector<std::pair<int, int> > starts, std::fstream &output)
{
// need to do preprocessing work for mapf
// modifying pathfinding_read_input
rows_cpu = m.get_y();
cols_cpu = m.get_x();
// Start and end positions
// I'm assuming this has something to do with mapf goals
std::string s_tmp = "";
std::string t_tmp = "";
std::vector<std::pair<int, int>> goals = m.get_goals();
for(std::vector<std::pair<int, int>>::const_iterator it = goals.begin(); it != goals.end(); ++it) {
t_tmp += to_string(it->first) + ',' + to_string(it->second);
}
for(std::vector<std::pair<int, int>>::const_iterator it = starts.begin(); it != starts.end(); ++it) {
s_tmp += to_string(it->first) + ',' + to_string(it->second);
}
// Wants them as C-strings
char* s_out = s_tmp.c_str();
char* t_out = t_tmp.c_str();
// Add obstacles to board
std::vector<std::pair<int, int>> obstacles = m.get_obstacles();
for(std::vector<std::pair<int, int>>::const_iterator it = obstacles.begin(); it != obstacles.end(); ++it) {
board_cpu[it->first][it->second] = -1;
}
// Cells that have connections with weights creater than 1,2
// Not sure if this is applicable to us
char *s_gpu, *t_gpu;
int k = THREADS_PER_BLOCK * BLOCKS;
expand_fun expand_fun_cpu;
heur_fun h_cpu;
states_delta_fun states_delta_cpu;
int expand_elements;
int expand_element_size;
auto start = std::chrono::high_resolution_clock::now();
pathfinding_preprocessing(s_out, t_out, &s_gpu, &t_gpu, &expand_fun_cpu, &h_cpu, &states_delta_cpu,
&expand_elements, &expand_element_size);
state **H;
char ***expand_buf = expand_bufs_create(THREADS_PER_BLOCK * BLOCKS, expand_elements, expand_element_size);
HANDLE_RESULT(cudaMalloc(&H, HASH_SIZE * sizeof(state*)));
HANDLE_RESULT(cudaMemset(H, 0, HASH_SIZE * sizeof(state*)));
heap **Q = heaps_create(k);
list **Ss = lists_create(BLOCKS, 1000000);
list *S = list_create(1024 * 1024);
state *states_pool;
char *nodes_pool;
states_pool_create(&states_pool, &nodes_pool, expand_element_size);
int total_Q_size_cpu;
int found_cpu;
int out_of_memory_cpu;
init_heap<<<1, 1>>>(s_gpu, Q, states_pool, nodes_pool, expand_element_size);
int step = 0;
do {
clear_list<<<1, 1>>>(S);
HANDLE_RESULT(cudaDeviceSynchronize());
fill_list<<<BLOCKS, THREADS_PER_BLOCK>>>(t_gpu, k, expand_element_size, Q, S, states_pool, nodes_pool,
expand_buf, expand_fun_cpu, h_cpu, states_delta_cpu);
HANDLE_RESULT(cudaMemcpyFromSymbol(&found_cpu, found, sizeof(int)));
HANDLE_RESULT(cudaMemcpyFromSymbol(&out_of_memory, found, sizeof(int)));
if (found_cpu) break;
if (out_of_memory_cpu) break;
HANDLE_RESULT(cudaDeviceSynchronize());
deduplicate<<<BLOCKS, THREADS_PER_BLOCK>>>(H, S, t_gpu, h_cpu);
HANDLE_RESULT(cudaDeviceSynchronize());
push_to_queues<<<1, THREADS_PER_BLOCK>>>(t_gpu, k, Q, S, h_cpu, step) ;
HANDLE_RESULT(cudaDeviceSynchronize());
HANDLE_RESULT(cudaMemcpyFromSymbol(&total_Q_size_cpu, total_Q_size, sizeof(int)));
step++;
} while (total_Q_size_cpu > 0);
auto end = std::chrono::high_resolution_clock::now();
auto duration = end - start;
output << std::chrono::duration_cast<std::chrono::milliseconds>(duration).count() << "\n";
if (found_cpu) {
char result_path_cpu[RESULT_LEN];
HANDLE_RESULT(cudaMemcpyFromSymbol(result_path_cpu, result_path, RESULT_LEN));
std::string path_str = std::string(result_path_cpu);
std::istringstream path_stream;
path_stream.str(result_path_cpu);
std::vector<std::string> v;
for (std::string line; std::getline(path_stream, line); ) {
v.push_back(line);
}
std::reverse(v.begin(), v.end());
if (version == SLIDING) {
output << sliding_puzzle_postprocessing(v);
} else if (version == PATHFINDING) {
for (std::string path_el: v) {
output << path_el << "\n";
}
}
}
states_pool_destroy(states_pool, nodes_pool);
lists_destroy(Ss, BLOCKS);
heaps_destroy(Q, k);
HANDLE_RESULT(cudaFree(H));
HANDLE_RESULT(cudaDeviceSynchronize());
}
void astar_gpu(const char *s_in, const char *t_in, version_value version, std::fstream &output) {
char *s_gpu, *t_gpu;
int k = THREADS_PER_BLOCK * BLOCKS;
expand_fun expand_fun_cpu;
heur_fun h_cpu;
states_delta_fun states_delta_cpu;
int expand_elements;
int expand_element_size;
auto start = std::chrono::high_resolution_clock::now();
if (version == SLIDING) {
sliding_puzzle_preprocessing(s_in, t_in, &s_gpu, &t_gpu, &expand_fun_cpu, &h_cpu, &states_delta_cpu,
&expand_elements, &expand_element_size);
} else if (version == PATHFINDING) {
pathfinding_preprocessing(s_in, t_in, &s_gpu, &t_gpu, &expand_fun_cpu, &h_cpu, &states_delta_cpu,
&expand_elements, &expand_element_size);
}
state **H;
char ***expand_buf = expand_bufs_create(THREADS_PER_BLOCK * BLOCKS, expand_elements, expand_element_size);
HANDLE_RESULT(cudaMalloc(&H, HASH_SIZE * sizeof(state*)));
HANDLE_RESULT(cudaMemset(H, 0, HASH_SIZE * sizeof(state*)));
heap **Q = heaps_create(k);
list **Ss = lists_create(BLOCKS, 1000000);
list *S = list_create(1024 * 1024);
state *states_pool;
char *nodes_pool;
states_pool_create(&states_pool, &nodes_pool, expand_element_size);
int total_Q_size_cpu;
int found_cpu;
int out_of_memory_cpu;
init_heap<<<1, 1>>>(s_gpu, Q, states_pool, nodes_pool, expand_element_size);
int step = 0;
do {
clear_list<<<1, 1>>>(S);
HANDLE_RESULT(cudaDeviceSynchronize());
fill_list<<<BLOCKS, THREADS_PER_BLOCK>>>(t_gpu, k, expand_element_size, Q, S, states_pool, nodes_pool,
expand_buf, expand_fun_cpu, h_cpu, states_delta_cpu);
HANDLE_RESULT(cudaMemcpyFromSymbol(&found_cpu, found, sizeof(int)));
HANDLE_RESULT(cudaMemcpyFromSymbol(&out_of_memory, found, sizeof(int)));
if (found_cpu) break;
if (out_of_memory_cpu) break;
HANDLE_RESULT(cudaDeviceSynchronize());
deduplicate<<<BLOCKS, THREADS_PER_BLOCK>>>(H, S, t_gpu, h_cpu);
HANDLE_RESULT(cudaDeviceSynchronize());
push_to_queues<<<1, THREADS_PER_BLOCK>>>(t_gpu, k, Q, S, h_cpu, step) ;
HANDLE_RESULT(cudaDeviceSynchronize());
HANDLE_RESULT(cudaMemcpyFromSymbol(&total_Q_size_cpu, total_Q_size, sizeof(int)));
step++;
} while (total_Q_size_cpu > 0);
auto end = std::chrono::high_resolution_clock::now();
auto duration = end - start;
output << std::chrono::duration_cast<std::chrono::milliseconds>(duration).count() << "\n";
if (found_cpu) {
char result_path_cpu[RESULT_LEN];
HANDLE_RESULT(cudaMemcpyFromSymbol(result_path_cpu, result_path, RESULT_LEN));
std::string path_str = std::string(result_path_cpu);
std::istringstream path_stream;
path_stream.str(result_path_cpu);
std::vector<std::string> v;
for (std::string line; std::getline(path_stream, line); ) {
v.push_back(line);
}
std::reverse(v.begin(), v.end());
if (version == SLIDING) {
output << sliding_puzzle_postprocessing(v);
} else if (version == PATHFINDING) {
for (std::string path_el: v) {
output << path_el << "\n";
}
}
}
states_pool_destroy(states_pool, nodes_pool);
lists_destroy(Ss, BLOCKS);
heaps_destroy(Q, k);
HANDLE_RESULT(cudaFree(H));
HANDLE_RESULT(cudaDeviceSynchronize());
}
__global__ void init_heap(const char *s, heap **Q, state *states_pool, char *nodes_pool, int state_len) {
heap_insert(Q[0], state_create(s, 0, 0, NULL, states_pool, nodes_pool, state_len));
atomicAdd(&total_Q_size, 1);
}
__device__ int processed = 0;
__device__ int steps = 0;
__device__ int heaps_min_before;
__global__ void clear_list(list *S) {
list_clear(S);
}
__global__ void fill_list(const char *t, int k, int state_len,
heap **Q, list *S, state *states_pool, char *nodes_pool,
char ***expand_buf, expand_fun expand, heur_fun h, states_delta_fun states_delta) {
state *m = NULL;
int id = calculate_id();
char **my_expand_buf = expand_buf[id];
if (id == 0)steps++;
for (int i = id; i < k; i += blockDim.x * gridDim.x) {
if (Q[i]->size == 0) continue;
state *q = heap_extract(Q[i]);
atomicSub(&total_Q_size, 1);
if (cuda_str_eq(q->node, t)) {
if (m == NULL || f(q, t, h) < f(m, t, h)) {
m = q;
}
continue;
}
expand(q->node, my_expand_buf);
for (int j = 0; my_expand_buf[j][0] != '\0'; j++) {
int delta = states_delta(q->node, my_expand_buf[j]);
state *new_state = state_create(my_expand_buf[j], -1, q->g + delta, q, states_pool, nodes_pool, state_len);
if (new_state == NULL) return;
list_insert(S, new_state);
}
}
if (m != NULL && f(m, t, h) <= heaps_min(Q, k)) {
int found_before = atomicCAS(&found, 0, 1);
if (found_before == 1) return;
state *cur = m;
int result_len = 0;
while (cur != NULL) {
int len = cuda_strlen(cur->node) + 1;
memcpy(result_path + result_len, cur->node, len);
result_len += len;
result_path[result_len-1] = '\n';
cur = cur->prev;
}
result_path[result_len-1] = '\0';
return;
}
}
__global__ void deduplicate(state **H, list *T, const char *t, heur_fun h) {
int id = calculate_id();
for (int i = id; i < T->length; i += blockDim.x * gridDim.x) {
int z = 0;
state *t1 = list_get(T, i);
for (int j = 0; j < HASH_FUNS; j++) {
assert(t1->node != NULL);
state *el = H[jenkins_hash(j, t1->node) % HASH_SIZE];
if (el == NULL || cuda_str_eq(t1->node, el->node)) {
z = j;
break;
}
}
int index = jenkins_hash(z, t1->node) % HASH_SIZE;
t1 = (state*)atomicExch((unsigned long long*)&(H[index]), (unsigned long long)t1);
if (t1 != NULL && cuda_str_eq(t1->node, list_get(T, i)->node) &&
f(list_get(T, i), t, h) >= f(t1, t, h)) {
list_remove(T, i);
continue;
}
t1 = list_get(T, i);
for (int j = 0; j < HASH_FUNS; j++) {
if (j != z) {
state *el = H[jenkins_hash(j, t1->node) % HASH_SIZE];
if (el != NULL && cuda_str_eq(el->node, t1->node) &&
f(list_get(T, i), t, h) >= f(el, t, h)) {
list_remove(T, i);
break;
}
}
}
}
}
__global__ void push_to_queues(const char *t, int k, heap **Q, list *S, heur_fun h, int off) {
for (int i = threadIdx.x; i < S->length; i += blockDim.x) {
state *t1 = list_get(S, i);
if (t1 != NULL) {
t1->f = f(t1, t, h);
heap_insert(Q[(i + off) % k], t1);
atomicAdd(&processed, 1);
atomicAdd(&total_Q_size, 1);
}
__syncthreads();
}
}
__device__ int f(const state *x, const char *t, heur_fun h) {
return x->g + h(x->node, t);
}
void states_pool_create(state **states, char **nodes, int node_size) {
HANDLE_RESULT(cudaMalloc(states, STATES * sizeof(state)));
HANDLE_RESULT(cudaMalloc(nodes, 3 * STATES * node_size * sizeof(char)));
HANDLE_RESULT(cudaMemset(*states, 0, STATES * sizeof(state)));
HANDLE_RESULT(cudaMemset(*nodes, 0, 3 * STATES * node_size * sizeof(char)));
}
void states_pool_destroy(state *states_pool, char *nodes_pool) {
HANDLE_RESULT(cudaFree(states_pool));
HANDLE_RESULT(cudaFree(nodes_pool));
}
char ***expand_bufs_create(int bufs, int elements, int element_size) {
int bufs_size = bufs * sizeof(char**);
char ***bufs_cpu = (char***)malloc(bufs_size);
for (int i = 0; i < bufs; i++) {
bufs_cpu[i] = expand_buf_create(elements, element_size);
}
char ***bufs_gpu;
HANDLE_RESULT(cudaMalloc(&bufs_gpu, bufs_size));
HANDLE_RESULT(cudaMemcpy(bufs_gpu, bufs_cpu, bufs_size, cudaMemcpyDefault));
free(bufs_cpu);
return bufs_gpu;
}
char **expand_buf_create(int elements, int element_size) {
char **buf_cpu = (char**)malloc(elements * sizeof(char*));
for (int i = 0; i < elements; i++) {
HANDLE_RESULT(cudaMalloc(&(buf_cpu[i]), element_size));
}
char **buf_gpu;
HANDLE_RESULT(cudaMalloc(&buf_gpu, elements * sizeof(char*)));
HANDLE_RESULT(cudaMemcpy(buf_gpu, buf_cpu, elements * sizeof(char*),
cudaMemcpyDefault));
free(buf_cpu);
return buf_gpu;
}
__device__ int used_states = 0;
__device__ state *state_create(const char *node, int f, int g, state *prev,
state *states_pool, char *nodes_pool, int state_len) {
int index = atomicAdd(&used_states, 1);
if (index >= STATES || (long long)state_len * index >= (1<<30)) {
out_of_memory = 1;
return NULL;
}
state *result = &(states_pool[index]);
memcpy(&(nodes_pool[(unsigned long long)state_len * index]), node, state_len);
result->node = &(nodes_pool[state_len * index]);
result->f = f;
result->g = g;
result->prev = prev;
return result;
}
__device__ int calculate_id() {
return threadIdx.x + blockIdx.x * blockDim.x;
}
|
4e374637338272c8e0ffdcdf46e1c2d01664aeff.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <vector>
#include <algorithm>
#include <stdexcept>
#include <hip/hip_runtime.h>
#include <hipfft.h>
#include <hip/hip_complex.h>
#include <hipsparse.h>
#include "timer.h"
#include "grid.h"
#include "device.h"
using namespace rfgpu;
#define cusparse_check_rv(func) \
if (rv!=HIPSPARSE_STATUS_SUCCESS) { \
char msg[1024]; \
sprintf(msg, "cusparse error: %s returned %d", func, rv); \
throw std::runtime_error(msg); \
}
#define array_dim_check(func,array,expected) { \
if (array.dims() != expected) { \
char msg[1024]; \
sprintf(msg, "%s: array dimension error", func); \
throw std::invalid_argument(msg); \
} \
if (array.has_device(_device)==false) { \
char msg[1024]; \
sprintf(msg, "%s: array not defined on device %d", func, _device); \
throw std::invalid_argument(msg); \
}\
}
Grid::Grid(int _nbl, int _nchan, int _ntime, int _upix, int _vpix, int device)
: G_cols(false), shift(false), OnDevice(device) // Any GPU-only arrays go here
{
nbl = _nbl;
nchan = _nchan;
ntime = _ntime;
upix = _upix;
vpix = _vpix;
h_one = make_float2(1.0,0.0);
h_zero = make_float2(0.0,0.0);
hipsparseStatus_t rv;
rv = hipsparseCreate(&sparse);
cusparse_check_rv("hipsparseCreate");
rv = hipsparseCreateMatDescr(&descr);
cusparse_check_rv("hipsparseCreateMatDescr");
cell = 80.0; // 80 wavelengths == ~42' FoV
maxshift = 0;
#ifdef USETIMER
timers["grid"] = new Timer();
timers["cols"] = new Timer();
timers["ds"] = new Timer();
timers["compute"] = new Timer();
timers["conj"] = new Timer();
#endif
allocate();
reset_device();
}
void Grid::allocate() {
u.resize(nbl);
v.resize(nbl);
freq.resize(nchan);
G_vals.resize(ncol());
G_rows.resize(nrow()+1);
G_cols.resize(ncol());
G_cols0.resize(ncol());
G_chan.resize(ncol());
G_pix.resize(ncol());
shift.resize(nchan);
conj.resize(nbl);
}
void Grid::set_uv(const std::vector<float> &_u, const std::vector<float> &_v) {
if (_u.size()!=nbl || _v.size()!=nbl) {
char msg[1024];
sprintf(msg, "Grid::set_uv array size error (u=%d v=%d nbl=%d)",
_u.size(), _v.size(), nbl);
throw std::invalid_argument(msg);
}
for (int i=0; i<nbl; i++) {
u[i] = _u[i];
v[i] = _v[i];
}
}
void Grid::set_freq(const std::vector<float> &_freq) {
if (_freq.size()!=nchan) {
char msg[1024];
sprintf(msg, "Grid::set_freq array size error (freq=%d nchan=%d)",
_freq.size(), nchan);
throw std::invalid_argument(msg);
}
for (int i=0; i<nchan; i++) { freq[i] = _freq[i]; }
}
void Grid::set_shift(const std::vector<int> &_shift) {
if (_shift.size()!=nchan) {
char msg[1024];
sprintf(msg, "Grid::set_shift array size error (shift=%d nchan=%d)",
_shift.size(), nchan);
throw std::invalid_argument(msg);
}
int _maxshift=0;
for (int i=0; i<nchan; i++) {
if (_shift[i]>_maxshift) { _maxshift=_shift[i]; }
if (_shift[i]<0) {
char msg[1024];
sprintf(msg, "Grid::set_shift negative shift not allowed "
"(ichan=%d shift=%d)", i, _shift[i]);
throw std::invalid_argument(msg);
}
}
if (_maxshift>ntime) {
char msg[1024];
sprintf(msg,
"Grid::set_shift max shift out of range (maxshift=%d ntime=%d)",
_maxshift, ntime);
throw std::invalid_argument(msg);
}
maxshift = _maxshift;
CheckDevice cd(this);
hipMemcpy(shift.d, _shift.data(), shift.size(), hipMemcpyHostToDevice);
}
void Grid::compute() {
CheckDevice cd(this);
//printf("nrow=%d ncol=%d\n", nrow(), ncol());
IFTIMER( timers["compute"]->start(); )
// compute grid pix location for each input vis
nnz = 0;
for (int ibl=0; ibl<nbl; ibl++) {
for (int ichan=0; ichan<nchan; ichan++) {
int x = round((u[ibl]*freq[ichan])/cell);
int y = round((v[ibl]*freq[ichan])/cell);
if (y<0) { y*=-1; x*=-1; conj.h[ibl]=1; }
else { conj.h[ibl]=0; }
if (x<=upix/2 && x>=-upix/2 && y<vpix && y>=0) {
if (x<0) x += upix;
G_pix.h[nnz] = x*vpix + y;
G_cols0.h[nnz] = ibl*nchan + ichan;
nnz++;
}
}
}
G_pix.h2d();
G_cols0.h2d();
conj.h2d();
hipsparseStatus_t rv;
// on GPU, sort and compress into CSR matrix format
size_t pbuf_size;
rv = hipsparseXcoosort_bufferSizeExt(sparse, nrow(), ncol(), nnz,
G_pix.d, G_cols0.d, &pbuf_size);
cusparse_check_rv("hipsparseXcoosort_bufferSizeExt");
Array<char> pbuf(pbuf_size,false);
Array<int> perm(nnz,false);
rv = hipsparseCreateIdentityPermutation(sparse, nnz, perm.d);
cusparse_check_rv("hipsparseCreateIdentityPermutation");
rv = hipsparseXcoosortByRow(sparse, nrow(), ncol(), nnz,
G_pix.d, G_cols0.d, perm.d, (void *)pbuf.d);
cusparse_check_rv("hipsparseXcoosortByRow");
rv = hipsparseXcoo2csr(sparse, G_pix.d, nnz, nrow(), G_rows.d,
HIPSPARSE_INDEX_BASE_ZERO);
cusparse_check_rv("hipsparseXcoo2csr");
// Fill in normalization factors
G_rows.d2h();
for (int i=0; i<nrow(); i++) {
for (int j=G_rows.h[i]; j<G_rows.h[i+1]; j++) {
// This is something like uniform weighting:
//G_vals.h[j].x = 1.0/((float)G_rows.h[i+1] - (float)G_rows.h[i]);
// This is natural weighting:
G_vals.h[j].x = 1.0/(2.0*nnz);
G_vals.h[j].y = 0.0;
}
}
G_vals.h2d();
// retrieve channel idx of each data point
G_cols0.d2h();
for (int i=0; i<nnz; i++) { G_chan.h[i] = G_cols0.h[i] % nchan; }
G_chan.h2d();
IFTIMER( timers["compute"]->stop(); )
}
// Call with nbl thread blocks
__global__ void conjugate_data(cdata *dat, int *conj, int nchan, int ntime) {
const int ibl = blockIdx.x;
const int offs = ibl*nchan*ntime;
if (conj[ibl]) {
for (int i=threadIdx.x; i<nchan*ntime; i+=blockDim.x) {
dat[offs+i].y *= -1.0;
}
}
}
void Grid::conjugate(Array<cdata> &data) {
array_dim_check("Grid::conjugate", data, indim());
CheckDevice cd(this);
IFTIMER( timers["conj"]->start(); )
hipLaunchKernelGGL(( conjugate_data), dim3(nbl),dim3(512), 0, 0, data.dd[_device], conj.d, nchan, ntime);
IFTIMER( timers["conj"]->stop(); )
}
// Call with nbl thread blocks
// TODO there may be problems if ntime is not divisible by 2
__global__ void downsample_data(cdata *dat, int nchan, int ntime) {
const int ibl = blockIdx.x;
const int offs = ibl*nchan*ntime;
for (int ichan=0; ichan<nchan; ichan++) {
for (int itime=2*threadIdx.x; itime<ntime; itime+=2*blockDim.x) {
const int ii = offs + ichan*ntime + itime;
float2 x0 = dat[ii];
float2 x1 = dat[ii+1];
const int oo = offs + ichan*ntime + itime/2;
__syncthreads();
dat[oo].x = 0.5*(x0.x + x1.x);
dat[oo].y = 0.5*(x0.y + x1.y);
}
}
}
void Grid::downsample(Array<cdata> &data) {
array_dim_check("Grid::downsample", data, indim());
CheckDevice cd(this);
IFTIMER( timers["ds"]->start(); )
hipLaunchKernelGGL(( downsample_data), dim3(nbl),dim3(512), 0, 0, data.dd[_device], nchan, ntime);
IFTIMER( timers["ds"]->stop(); )
}
__global__ void adjust_cols(int *ocol, int *icol, int *chan,
int *shift, int itime, int nchan, int nnz, int ntime) {
const int ii = blockDim.x*blockIdx.x + threadIdx.x;
__shared__ int lshift[2048]; // max nchan=2048 TODO
for (int i=threadIdx.x; i<nchan; i+=blockDim.x) {
lshift[i] = shift[i];
}
__syncthreads();
if (ii<nnz) { ocol[ii] = icol[ii]*ntime + lshift[chan[ii]] + itime; }
}
void Grid::operate(Array<cdata> &in, Array<cdata> &out, int itime) {
array_dim_check("Grid::operate(in)", in, indim());
array_dim_check("Grid::operate(out)", out, outdim());
operate(in.dd[_device], out.dd[_device], itime);
}
void Grid::operate(cdata *in, cdata *out, int itime) {
if ((itime+maxshift)>=ntime) {
char msg[1024];
sprintf(msg,
"Grid::operate itime(%d)+maxshift(%d) >= ntime(%d)",
itime, maxshift, ntime);
throw std::invalid_argument(msg);
}
CheckDevice cd(this);
// Need to keep n threads per block less than 1024
// Can we automatically get max thread per block?
int nthread = 512;
int nblock = (nbl*nchan)/nthread;
if ((nbl*nchan)%nthread) { nblock++; }
IFTIMER( timers["cols"]->start(); )
hipLaunchKernelGGL(( adjust_cols), dim3(nblock), dim3(nthread), 0, 0, G_cols.d, G_cols0.d, G_chan.d, shift.d,
itime, nchan, nnz, ntime);
IFTIMER( timers["cols"]->stop(); )
hipsparseStatus_t rv;
IFTIMER( timers["grid"]->start(); )
rv = hipsparseCcsrmv(sparse, HIPSPARSE_OPERATION_NON_TRANSPOSE,
nrow(), ncol()*ntime, nnz, &h_one, descr,
G_vals.d, G_rows.d, G_cols.d,
in, &h_zero, out);
IFTIMER (timers["grid"]->stop(); )
cusparse_check_rv("hipsparseCcsrmv");
}
| 4e374637338272c8e0ffdcdf46e1c2d01664aeff.cu | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <vector>
#include <algorithm>
#include <stdexcept>
#include <cuda.h>
#include <cufft.h>
#include <cuComplex.h>
#include <cusparse.h>
#include "timer.h"
#include "grid.h"
#include "device.h"
using namespace rfgpu;
#define cusparse_check_rv(func) \
if (rv!=CUSPARSE_STATUS_SUCCESS) { \
char msg[1024]; \
sprintf(msg, "cusparse error: %s returned %d", func, rv); \
throw std::runtime_error(msg); \
}
#define array_dim_check(func,array,expected) { \
if (array.dims() != expected) { \
char msg[1024]; \
sprintf(msg, "%s: array dimension error", func); \
throw std::invalid_argument(msg); \
} \
if (array.has_device(_device)==false) { \
char msg[1024]; \
sprintf(msg, "%s: array not defined on device %d", func, _device); \
throw std::invalid_argument(msg); \
}\
}
Grid::Grid(int _nbl, int _nchan, int _ntime, int _upix, int _vpix, int device)
: G_cols(false), shift(false), OnDevice(device) // Any GPU-only arrays go here
{
nbl = _nbl;
nchan = _nchan;
ntime = _ntime;
upix = _upix;
vpix = _vpix;
h_one = make_float2(1.0,0.0);
h_zero = make_float2(0.0,0.0);
cusparseStatus_t rv;
rv = cusparseCreate(&sparse);
cusparse_check_rv("cusparseCreate");
rv = cusparseCreateMatDescr(&descr);
cusparse_check_rv("cusparseCreateMatDescr");
cell = 80.0; // 80 wavelengths == ~42' FoV
maxshift = 0;
#ifdef USETIMER
timers["grid"] = new Timer();
timers["cols"] = new Timer();
timers["ds"] = new Timer();
timers["compute"] = new Timer();
timers["conj"] = new Timer();
#endif
allocate();
reset_device();
}
void Grid::allocate() {
u.resize(nbl);
v.resize(nbl);
freq.resize(nchan);
G_vals.resize(ncol());
G_rows.resize(nrow()+1);
G_cols.resize(ncol());
G_cols0.resize(ncol());
G_chan.resize(ncol());
G_pix.resize(ncol());
shift.resize(nchan);
conj.resize(nbl);
}
void Grid::set_uv(const std::vector<float> &_u, const std::vector<float> &_v) {
if (_u.size()!=nbl || _v.size()!=nbl) {
char msg[1024];
sprintf(msg, "Grid::set_uv array size error (u=%d v=%d nbl=%d)",
_u.size(), _v.size(), nbl);
throw std::invalid_argument(msg);
}
for (int i=0; i<nbl; i++) {
u[i] = _u[i];
v[i] = _v[i];
}
}
void Grid::set_freq(const std::vector<float> &_freq) {
if (_freq.size()!=nchan) {
char msg[1024];
sprintf(msg, "Grid::set_freq array size error (freq=%d nchan=%d)",
_freq.size(), nchan);
throw std::invalid_argument(msg);
}
for (int i=0; i<nchan; i++) { freq[i] = _freq[i]; }
}
void Grid::set_shift(const std::vector<int> &_shift) {
if (_shift.size()!=nchan) {
char msg[1024];
sprintf(msg, "Grid::set_shift array size error (shift=%d nchan=%d)",
_shift.size(), nchan);
throw std::invalid_argument(msg);
}
int _maxshift=0;
for (int i=0; i<nchan; i++) {
if (_shift[i]>_maxshift) { _maxshift=_shift[i]; }
if (_shift[i]<0) {
char msg[1024];
sprintf(msg, "Grid::set_shift negative shift not allowed "
"(ichan=%d shift=%d)", i, _shift[i]);
throw std::invalid_argument(msg);
}
}
if (_maxshift>ntime) {
char msg[1024];
sprintf(msg,
"Grid::set_shift max shift out of range (maxshift=%d ntime=%d)",
_maxshift, ntime);
throw std::invalid_argument(msg);
}
maxshift = _maxshift;
CheckDevice cd(this);
cudaMemcpy(shift.d, _shift.data(), shift.size(), cudaMemcpyHostToDevice);
}
void Grid::compute() {
CheckDevice cd(this);
//printf("nrow=%d ncol=%d\n", nrow(), ncol());
IFTIMER( timers["compute"]->start(); )
// compute grid pix location for each input vis
nnz = 0;
for (int ibl=0; ibl<nbl; ibl++) {
for (int ichan=0; ichan<nchan; ichan++) {
int x = round((u[ibl]*freq[ichan])/cell);
int y = round((v[ibl]*freq[ichan])/cell);
if (y<0) { y*=-1; x*=-1; conj.h[ibl]=1; }
else { conj.h[ibl]=0; }
if (x<=upix/2 && x>=-upix/2 && y<vpix && y>=0) {
if (x<0) x += upix;
G_pix.h[nnz] = x*vpix + y;
G_cols0.h[nnz] = ibl*nchan + ichan;
nnz++;
}
}
}
G_pix.h2d();
G_cols0.h2d();
conj.h2d();
cusparseStatus_t rv;
// on GPU, sort and compress into CSR matrix format
size_t pbuf_size;
rv = cusparseXcoosort_bufferSizeExt(sparse, nrow(), ncol(), nnz,
G_pix.d, G_cols0.d, &pbuf_size);
cusparse_check_rv("cusparseXcoosort_bufferSizeExt");
Array<char> pbuf(pbuf_size,false);
Array<int> perm(nnz,false);
rv = cusparseCreateIdentityPermutation(sparse, nnz, perm.d);
cusparse_check_rv("cusparseCreateIdentityPermutation");
rv = cusparseXcoosortByRow(sparse, nrow(), ncol(), nnz,
G_pix.d, G_cols0.d, perm.d, (void *)pbuf.d);
cusparse_check_rv("cusparseXcoosortByRow");
rv = cusparseXcoo2csr(sparse, G_pix.d, nnz, nrow(), G_rows.d,
CUSPARSE_INDEX_BASE_ZERO);
cusparse_check_rv("cusparseXcoo2csr");
// Fill in normalization factors
G_rows.d2h();
for (int i=0; i<nrow(); i++) {
for (int j=G_rows.h[i]; j<G_rows.h[i+1]; j++) {
// This is something like uniform weighting:
//G_vals.h[j].x = 1.0/((float)G_rows.h[i+1] - (float)G_rows.h[i]);
// This is natural weighting:
G_vals.h[j].x = 1.0/(2.0*nnz);
G_vals.h[j].y = 0.0;
}
}
G_vals.h2d();
// retrieve channel idx of each data point
G_cols0.d2h();
for (int i=0; i<nnz; i++) { G_chan.h[i] = G_cols0.h[i] % nchan; }
G_chan.h2d();
IFTIMER( timers["compute"]->stop(); )
}
// Call with nbl thread blocks
__global__ void conjugate_data(cdata *dat, int *conj, int nchan, int ntime) {
const int ibl = blockIdx.x;
const int offs = ibl*nchan*ntime;
if (conj[ibl]) {
for (int i=threadIdx.x; i<nchan*ntime; i+=blockDim.x) {
dat[offs+i].y *= -1.0;
}
}
}
void Grid::conjugate(Array<cdata> &data) {
array_dim_check("Grid::conjugate", data, indim());
CheckDevice cd(this);
IFTIMER( timers["conj"]->start(); )
conjugate_data<<<nbl,512>>>(data.dd[_device], conj.d, nchan, ntime);
IFTIMER( timers["conj"]->stop(); )
}
// Call with nbl thread blocks
// TODO there may be problems if ntime is not divisible by 2
__global__ void downsample_data(cdata *dat, int nchan, int ntime) {
const int ibl = blockIdx.x;
const int offs = ibl*nchan*ntime;
for (int ichan=0; ichan<nchan; ichan++) {
for (int itime=2*threadIdx.x; itime<ntime; itime+=2*blockDim.x) {
const int ii = offs + ichan*ntime + itime;
float2 x0 = dat[ii];
float2 x1 = dat[ii+1];
const int oo = offs + ichan*ntime + itime/2;
__syncthreads();
dat[oo].x = 0.5*(x0.x + x1.x);
dat[oo].y = 0.5*(x0.y + x1.y);
}
}
}
void Grid::downsample(Array<cdata> &data) {
array_dim_check("Grid::downsample", data, indim());
CheckDevice cd(this);
IFTIMER( timers["ds"]->start(); )
downsample_data<<<nbl,512>>>(data.dd[_device], nchan, ntime);
IFTIMER( timers["ds"]->stop(); )
}
__global__ void adjust_cols(int *ocol, int *icol, int *chan,
int *shift, int itime, int nchan, int nnz, int ntime) {
const int ii = blockDim.x*blockIdx.x + threadIdx.x;
__shared__ int lshift[2048]; // max nchan=2048 TODO
for (int i=threadIdx.x; i<nchan; i+=blockDim.x) {
lshift[i] = shift[i];
}
__syncthreads();
if (ii<nnz) { ocol[ii] = icol[ii]*ntime + lshift[chan[ii]] + itime; }
}
void Grid::operate(Array<cdata> &in, Array<cdata> &out, int itime) {
array_dim_check("Grid::operate(in)", in, indim());
array_dim_check("Grid::operate(out)", out, outdim());
operate(in.dd[_device], out.dd[_device], itime);
}
void Grid::operate(cdata *in, cdata *out, int itime) {
if ((itime+maxshift)>=ntime) {
char msg[1024];
sprintf(msg,
"Grid::operate itime(%d)+maxshift(%d) >= ntime(%d)",
itime, maxshift, ntime);
throw std::invalid_argument(msg);
}
CheckDevice cd(this);
// Need to keep n threads per block less than 1024
// Can we automatically get max thread per block?
int nthread = 512;
int nblock = (nbl*nchan)/nthread;
if ((nbl*nchan)%nthread) { nblock++; }
IFTIMER( timers["cols"]->start(); )
adjust_cols<<<nblock, nthread>>>(G_cols.d, G_cols0.d, G_chan.d, shift.d,
itime, nchan, nnz, ntime);
IFTIMER( timers["cols"]->stop(); )
cusparseStatus_t rv;
IFTIMER( timers["grid"]->start(); )
rv = cusparseCcsrmv(sparse, CUSPARSE_OPERATION_NON_TRANSPOSE,
nrow(), ncol()*ntime, nnz, &h_one, descr,
G_vals.d, G_rows.d, G_cols.d,
in, &h_zero, out);
IFTIMER (timers["grid"]->stop(); )
cusparse_check_rv("cusparseCcsrmv");
}
|
0b7707350667aee7ede54b7915ac96487d62b452.hip | // !!! This is a file automatically generated by hipify!!!
#include "hipfft.h"
#include <iostream>
#define C2R 1
#define R2C 2
#define C2C 3
#define Z2D 5
#define D2Z 6
#define Z2Z 7
#define _FROMTO FROMTO
#if _FROMTO == Z2Z
#define TO_TYPE hipfftDoubleComplex
#define FROM_TYPE hipfftDoubleComplex
#define FROMTO_STR "double precision complex-to-complex"
#elif _FROMTO == D2Z
#define TO_TYPE hipfftDoubleComplex
#define FROM_TYPE hipfftDoubleReal
#define FROMTO_STR "double precision real-to-complex"
#elif _FROMTO == Z2D
#define TO_TYPE hipfftDoubleReal
#define FROM_TYPE hipfftDoubleComplex
#define FROMTO_STR "double precision complex-to-real"
#elif _FROMTO == C2C
#define TO_TYPE hipfftComplex
#define FROM_TYPE hipfftComplex
#define FROMTO_STR "single precision complex-to-complex"
#elif _FROMTO == R2C
#define TO_TYPE hipfftComplex
#define FROM_TYPE hipfftReal
#define FROMTO_STR "single precision real-to-complex"
#elif _FROMTO == C2R
#define TO_TYPE hipfftReal
#define FROM_TYPE hipfftComplex
#define FROMTO_STR "single precision complex-to-real"
#else
#error "FROMTO must be one of Z2Z, Z2D, D2Z, C2C, R2C and C2R"
#endif
template <class A, class B>
hipfftResult_t CUFFTPLAN2D(hipfftHandle *plan, int size_x, int size_y,
A* in, B* out);
hipfftResult_t CUFFTPLAN2D( hipfftHandle *plan, int size_x, int size_y,
hipfftDoubleComplex* in, hipfftDoubleComplex* out) {
return hipfftPlan2d(plan, size_x, size_y, HIPFFT_Z2Z);
}
hipfftResult_t CUFFTPLAN2D( hipfftHandle *plan, int size_x, int size_y,
hipfftDoubleReal* in, hipfftDoubleComplex* out) {
return hipfftPlan2d(plan, size_x, size_y, HIPFFT_D2Z);
}
hipfftResult_t CUFFTPLAN2D( hipfftHandle *plan, int size_x, int size_y,
hipfftDoubleComplex* in, hipfftDoubleReal* out) {
return hipfftPlan2d(plan, size_x, size_y, HIPFFT_Z2D);
}
hipfftResult_t CUFFTPLAN2D( hipfftHandle *plan, int size_x, int size_y,
hipfftComplex* in, hipfftComplex* out) {
return hipfftPlan2d(plan, size_x, size_y, HIPFFT_C2C);
}
hipfftResult_t CUFFTPLAN2D( hipfftHandle *plan, int size_x, int size_y,
hipfftReal* in, hipfftComplex* out) {
return hipfftPlan2d(plan, size_x, size_y, HIPFFT_R2C);
}
hipfftResult_t CUFFTPLAN2D( hipfftHandle *plan, int size_x, int size_y,
hipfftComplex* in, hipfftReal* out) {
return hipfftPlan2d(plan, size_x, size_y, HIPFFT_C2R);
}
template <class A, class B>
hipfftResult_t CUFFTEXEC(hipfftHandle plan, A* in, B* out);
hipfftResult_t CUFFTEXEC ( hipfftHandle plan, hipfftDoubleComplex* in,
hipfftDoubleComplex* out) {
return hipfftExecZ2Z(plan, in, out, HIPFFT_FORWARD);
}
hipfftResult_t CUFFTEXEC( hipfftHandle plan, hipfftDoubleReal* in,
hipfftDoubleComplex* out) {
return hipfftExecD2Z(plan, in, out);
}
hipfftResult_t CUFFTEXEC( hipfftHandle plan, hipfftDoubleComplex* in,
hipfftDoubleReal* out) {
return hipfftExecZ2D(plan, in, out);
}
hipfftResult_t CUFFTEXEC( hipfftHandle plan, hipfftComplex* in,
hipfftComplex* out) {
return hipfftExecC2C(plan, in, out, HIPFFT_FORWARD);
}
hipfftResult_t CUFFTEXEC( hipfftHandle plan, hipfftReal* in,
hipfftComplex* out) {
return hipfftExecR2C(plan, in, out);
}
hipfftResult_t CUFFTEXEC( hipfftHandle plan, hipfftComplex* in,
hipfftReal* out) {
return hipfftExecC2R(plan, in, out);
}
size_t CUFFTGETSIZE2D( hipfftHandle plan, int size_x, int size_y, hipfftComplex* in, hipfftComplex* out) {
size_t work_size;
hipfftGetSize2d( plan, size_x, size_y, HIPFFT_C2C, &work_size);
return work_size;
}
size_t CUFFTGETSIZE2D( hipfftHandle plan, int size_x, int size_y, hipfftComplex* in, hipfftReal* out) {
size_t work_size;
hipfftGetSize2d( plan, size_x, size_y, HIPFFT_C2R, &work_size);
return work_size;
}
size_t CUFFTGETSIZE2D( hipfftHandle plan, int size_x, int size_y, hipfftReal* in, hipfftComplex* out) {
size_t work_size;
hipfftGetSize2d( plan, size_x, size_y, HIPFFT_R2C, &work_size);
return work_size;
}
size_t CUFFTGETSIZE2D( hipfftHandle plan, int size_x, int size_y, hipfftDoubleComplex* in, hipfftDoubleComplex* out) {
size_t work_size;
hipfftGetSize2d( plan, size_x, size_y, HIPFFT_Z2Z, &work_size);
return work_size;
}
size_t CUFFTGETSIZE2D( hipfftHandle plan, int size_x, int size_y, hipfftDoubleComplex* in, hipfftDoubleReal* out) {
size_t work_size;
hipfftGetSize2d( plan, size_x, size_y, HIPFFT_Z2D, &work_size);
return work_size;
}
size_t CUFFTGETSIZE2D( hipfftHandle plan, int size_x, int size_y, hipfftDoubleReal* in, hipfftDoubleComplex* out) {
size_t work_size;
hipfftGetSize2d( plan, size_x, size_y, HIPFFT_D2Z, &work_size);
return work_size;
}
int main(int argc, char** argv) {
int NX=10016, NY=10016;
int size = NX*NY;
float elapsed;
hipfftHandle plan;
FROM_TYPE *data1;
hipMalloc(&data1, sizeof(FROM_TYPE)*NX*NY);
#ifndef INPLACE
TO_TYPE *data2;
hipMalloc(&data2, sizeof(TO_TYPE)*NX*NY);
#endif
hipEvent_t start, stop;
hipEventCreate(&start); hipEventCreate(&stop);
hipError_t err;
hipfftResult_t r;
err = hipGetLastError();
if (err) std::cout << "Error in initial copy" << std::endl;
std::cin >> NX >> NY;
std::cout << "**** " << FROMTO_STR << " ****" << std::endl;
std::cout << "dx, dy, storage(MB), elapsed, Gcell/s, Gflps" << std::endl;
#ifdef INPLACE
#define TARGET data1
#else
#define TARGET data2
#endif
while( NX != 0) {
if (NX*NY > size) {
std::cout << std::endl << "***Reallocating to " << NX << " x " << NY
<< " = " << sizeof(FROM_TYPE)*NX*NY/1024/1024 << " Mbytes "
<< std::endl;
hipFree(data1); data1=0;
err = hipMalloc(&data1, sizeof(FROM_TYPE)*NX*NY);
if(!data1 || err) std::cout << "Failed to allocate data1! : "
<< std::endl
<< hipGetErrorString(err) << std::endl;
#ifndef INPLACE
hipFree(data2); data2=0;
err = hipMalloc(&data2, sizeof(TO_TYPE)*NX*NY);
if(!data2 || err) std::cout << "Failed to allocate data1! : "
<< std::endl << hipGetErrorString(err)
<< std::endl;
#endif
size = NX*NY;
}
r = CUFFTPLAN2D(&plan, NX, NY, data1, TARGET);
if (r != HIPFFT_SUCCESS) std::cout << "Failed to create cufft plan!"
<< std::endl
<< "Error message: " << r << std::endl;
hipEventRecord(start);
for (int z=0; z< 5; z++)
if (!r) r = CUFFTEXEC(plan, data1, TARGET);
if (r || hipGetLastError()) std::cout << "Failure in CUFFTEXEC"
<< std::endl;
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed, start, stop);
err = hipGetLastError();
if (err) std::cout << NX << ", " << NY << " - Error " << err <<" : " <<
hipGetErrorString(err) << std::endl;
else if (r) std::cout << NX << ", " << NY << " - CUFFT Error " << r <<
std::endl;
else std::cout << NX << ", " << NY << ", "
<< CUFFTGETSIZE2D(plan, NX, NY, data1, TARGET)/1000000<<", "
<< elapsed/5 << ", "
<< 5*NX*NY/elapsed/1000/1000 << ", "
<< 5/elapsed/1000/1000*NX*NY*(log2(NX+0.000)+log2(NY+0.000))
<< std::endl;
hipfftDestroy(plan);
std::cin >> NX >> NY;
}
std::cout << "0, 0" << std::endl;
//printf("(%d,%d) - Error %d: %s\n", NX, NY, err, hipGetErrorString(err));
hipFree(data1);
#ifndef INPLACE
hipFree(data2);
#endif
hipEventDestroy(start); hipEventDestroy(stop);
return 0;
}
| 0b7707350667aee7ede54b7915ac96487d62b452.cu | #include "cufft.h"
#include <iostream>
#define C2R 1
#define R2C 2
#define C2C 3
#define Z2D 5
#define D2Z 6
#define Z2Z 7
#define _FROMTO FROMTO
#if _FROMTO == Z2Z
#define TO_TYPE cufftDoubleComplex
#define FROM_TYPE cufftDoubleComplex
#define FROMTO_STR "double precision complex-to-complex"
#elif _FROMTO == D2Z
#define TO_TYPE cufftDoubleComplex
#define FROM_TYPE cufftDoubleReal
#define FROMTO_STR "double precision real-to-complex"
#elif _FROMTO == Z2D
#define TO_TYPE cufftDoubleReal
#define FROM_TYPE cufftDoubleComplex
#define FROMTO_STR "double precision complex-to-real"
#elif _FROMTO == C2C
#define TO_TYPE cufftComplex
#define FROM_TYPE cufftComplex
#define FROMTO_STR "single precision complex-to-complex"
#elif _FROMTO == R2C
#define TO_TYPE cufftComplex
#define FROM_TYPE cufftReal
#define FROMTO_STR "single precision real-to-complex"
#elif _FROMTO == C2R
#define TO_TYPE cufftReal
#define FROM_TYPE cufftComplex
#define FROMTO_STR "single precision complex-to-real"
#else
#error "FROMTO must be one of Z2Z, Z2D, D2Z, C2C, R2C and C2R"
#endif
template <class A, class B>
cufftResult_t CUFFTPLAN2D(cufftHandle *plan, int size_x, int size_y,
A* in, B* out);
cufftResult_t CUFFTPLAN2D( cufftHandle *plan, int size_x, int size_y,
cufftDoubleComplex* in, cufftDoubleComplex* out) {
return cufftPlan2d(plan, size_x, size_y, CUFFT_Z2Z);
}
cufftResult_t CUFFTPLAN2D( cufftHandle *plan, int size_x, int size_y,
cufftDoubleReal* in, cufftDoubleComplex* out) {
return cufftPlan2d(plan, size_x, size_y, CUFFT_D2Z);
}
cufftResult_t CUFFTPLAN2D( cufftHandle *plan, int size_x, int size_y,
cufftDoubleComplex* in, cufftDoubleReal* out) {
return cufftPlan2d(plan, size_x, size_y, CUFFT_Z2D);
}
cufftResult_t CUFFTPLAN2D( cufftHandle *plan, int size_x, int size_y,
cufftComplex* in, cufftComplex* out) {
return cufftPlan2d(plan, size_x, size_y, CUFFT_C2C);
}
cufftResult_t CUFFTPLAN2D( cufftHandle *plan, int size_x, int size_y,
cufftReal* in, cufftComplex* out) {
return cufftPlan2d(plan, size_x, size_y, CUFFT_R2C);
}
cufftResult_t CUFFTPLAN2D( cufftHandle *plan, int size_x, int size_y,
cufftComplex* in, cufftReal* out) {
return cufftPlan2d(plan, size_x, size_y, CUFFT_C2R);
}
template <class A, class B>
cufftResult_t CUFFTEXEC(cufftHandle plan, A* in, B* out);
cufftResult_t CUFFTEXEC ( cufftHandle plan, cufftDoubleComplex* in,
cufftDoubleComplex* out) {
return cufftExecZ2Z(plan, in, out, CUFFT_FORWARD);
}
cufftResult_t CUFFTEXEC( cufftHandle plan, cufftDoubleReal* in,
cufftDoubleComplex* out) {
return cufftExecD2Z(plan, in, out);
}
cufftResult_t CUFFTEXEC( cufftHandle plan, cufftDoubleComplex* in,
cufftDoubleReal* out) {
return cufftExecZ2D(plan, in, out);
}
cufftResult_t CUFFTEXEC( cufftHandle plan, cufftComplex* in,
cufftComplex* out) {
return cufftExecC2C(plan, in, out, CUFFT_FORWARD);
}
cufftResult_t CUFFTEXEC( cufftHandle plan, cufftReal* in,
cufftComplex* out) {
return cufftExecR2C(plan, in, out);
}
cufftResult_t CUFFTEXEC( cufftHandle plan, cufftComplex* in,
cufftReal* out) {
return cufftExecC2R(plan, in, out);
}
size_t CUFFTGETSIZE2D( cufftHandle plan, int size_x, int size_y, cufftComplex* in, cufftComplex* out) {
size_t work_size;
cufftGetSize2d( plan, size_x, size_y, CUFFT_C2C, &work_size);
return work_size;
}
size_t CUFFTGETSIZE2D( cufftHandle plan, int size_x, int size_y, cufftComplex* in, cufftReal* out) {
size_t work_size;
cufftGetSize2d( plan, size_x, size_y, CUFFT_C2R, &work_size);
return work_size;
}
size_t CUFFTGETSIZE2D( cufftHandle plan, int size_x, int size_y, cufftReal* in, cufftComplex* out) {
size_t work_size;
cufftGetSize2d( plan, size_x, size_y, CUFFT_R2C, &work_size);
return work_size;
}
size_t CUFFTGETSIZE2D( cufftHandle plan, int size_x, int size_y, cufftDoubleComplex* in, cufftDoubleComplex* out) {
size_t work_size;
cufftGetSize2d( plan, size_x, size_y, CUFFT_Z2Z, &work_size);
return work_size;
}
size_t CUFFTGETSIZE2D( cufftHandle plan, int size_x, int size_y, cufftDoubleComplex* in, cufftDoubleReal* out) {
size_t work_size;
cufftGetSize2d( plan, size_x, size_y, CUFFT_Z2D, &work_size);
return work_size;
}
size_t CUFFTGETSIZE2D( cufftHandle plan, int size_x, int size_y, cufftDoubleReal* in, cufftDoubleComplex* out) {
size_t work_size;
cufftGetSize2d( plan, size_x, size_y, CUFFT_D2Z, &work_size);
return work_size;
}
int main(int argc, char** argv) {
int NX=10016, NY=10016;
int size = NX*NY;
float elapsed;
cufftHandle plan;
FROM_TYPE *data1;
cudaMalloc(&data1, sizeof(FROM_TYPE)*NX*NY);
#ifndef INPLACE
TO_TYPE *data2;
cudaMalloc(&data2, sizeof(TO_TYPE)*NX*NY);
#endif
cudaEvent_t start, stop;
cudaEventCreate(&start); cudaEventCreate(&stop);
cudaError_t err;
cufftResult_t r;
err = cudaGetLastError();
if (err) std::cout << "Error in initial copy" << std::endl;
std::cin >> NX >> NY;
std::cout << "**** " << FROMTO_STR << " ****" << std::endl;
std::cout << "dx, dy, storage(MB), elapsed, Gcell/s, Gflps" << std::endl;
#ifdef INPLACE
#define TARGET data1
#else
#define TARGET data2
#endif
while( NX != 0) {
if (NX*NY > size) {
std::cout << std::endl << "***Reallocating to " << NX << " x " << NY
<< " = " << sizeof(FROM_TYPE)*NX*NY/1024/1024 << " Mbytes "
<< std::endl;
cudaFree(data1); data1=0;
err = cudaMalloc(&data1, sizeof(FROM_TYPE)*NX*NY);
if(!data1 || err) std::cout << "Failed to allocate data1! : "
<< std::endl
<< cudaGetErrorString(err) << std::endl;
#ifndef INPLACE
cudaFree(data2); data2=0;
err = cudaMalloc(&data2, sizeof(TO_TYPE)*NX*NY);
if(!data2 || err) std::cout << "Failed to allocate data1! : "
<< std::endl << cudaGetErrorString(err)
<< std::endl;
#endif
size = NX*NY;
}
r = CUFFTPLAN2D(&plan, NX, NY, data1, TARGET);
if (r != CUFFT_SUCCESS) std::cout << "Failed to create cufft plan!"
<< std::endl
<< "Error message: " << r << std::endl;
cudaEventRecord(start);
for (int z=0; z< 5; z++)
if (!r) r = CUFFTEXEC(plan, data1, TARGET);
if (r || cudaGetLastError()) std::cout << "Failure in CUFFTEXEC"
<< std::endl;
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
err = cudaGetLastError();
if (err) std::cout << NX << ", " << NY << " - Error " << err <<" : " <<
cudaGetErrorString(err) << std::endl;
else if (r) std::cout << NX << ", " << NY << " - CUFFT Error " << r <<
std::endl;
else std::cout << NX << ", " << NY << ", "
<< CUFFTGETSIZE2D(plan, NX, NY, data1, TARGET)/1000000<<", "
<< elapsed/5 << ", "
<< 5*NX*NY/elapsed/1000/1000 << ", "
<< 5/elapsed/1000/1000*NX*NY*(log2(NX+0.000)+log2(NY+0.000))
<< std::endl;
cufftDestroy(plan);
std::cin >> NX >> NY;
}
std::cout << "0, 0" << std::endl;
//printf("(%d,%d) - Error %d: %s\n", NX, NY, err, cudaGetErrorString(err));
cudaFree(data1);
#ifndef INPLACE
cudaFree(data2);
#endif
cudaEventDestroy(start); cudaEventDestroy(stop);
return 0;
}
|
904d8fb38a62ec5d61f476c4def9a37a3177d3f6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "poisson2d.hpp"
#include "timer.hpp"
#include <algorithm>
#include <string>
#include <vector>
#include <iostream>
#include <stdio.h>
// Block and grid size defines.
// Seperate defines are really just for future convenience...
#define BLOCK_SIZE 512
#define GRID_SIZE 512
#define SEP ";"
//#define DEBUG
// y = A * x
__global__ void cuda_csr_matvec_product(int N, int *csr_rowoffsets,
int *csr_colindices, double *csr_values,
double *x, double *y)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) {
double sum = 0;
for (int k = csr_rowoffsets[i]; k < csr_rowoffsets[i + 1]; k++) {
sum += csr_values[k] * x[csr_colindices[k]];
}
y[i] = sum;
}
}
// x <- x + alpha * y
__global__ void cuda_vecadd(int N, double *x, double *y, double alpha)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x)
x[i] += alpha * y[i];
}
// x <- y + alpha * x
__global__ void cuda_vecadd2(int N, double *x, double *y, double alpha)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x)
x[i] = y[i] + alpha * x[i];
}
// result = (x, y)
__global__ void cuda_dot_product(int N, double *x, double *y, double *result)
{
__shared__ double shared_mem[BLOCK_SIZE];
double dot = 0;
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) {
dot += x[i] * y[i];
}
shared_mem[threadIdx.x] = dot;
for (int k = blockDim.x / 2; k > 0; k /= 2) {
__syncthreads();
if (threadIdx.x < k) {
shared_mem[threadIdx.x] += shared_mem[threadIdx.x + k];
}
}
if (threadIdx.x == 0) atomicAdd(result, shared_mem[0]);
}
/** Implementation of the conjugate gradient algorithm.
*
* The control flow is handled by the CPU.
* Only the individual operations (vector updates, dot products, sparse
* matrix-vector product) are transferred to CUDA kernels.
*
* The temporary arrays p, r, and Ap need to be allocated on the GPU for use
* with CUDA. Modify as you see fit.
*/
int conjugate_gradient(int N, // number of unknows
int *csr_rowoffsets, int *csr_colindices,
double *csr_values, double *rhs, double *solution)
//, double *init_guess) // feel free to add a nonzero initial guess as needed
{
// initialize timer
Timer timer;
// clear solution vector (it may contain garbage values):
std::fill(solution, solution + N, 0);
// initialize work vectors:
double alpha, beta;
double *cuda_solution, *cuda_p, *cuda_r, *cuda_Ap, *cuda_scalar;
hipMalloc(&cuda_p, sizeof(double) * N);
hipMalloc(&cuda_r, sizeof(double) * N);
hipMalloc(&cuda_Ap, sizeof(double) * N);
hipMalloc(&cuda_solution, sizeof(double) * N);
hipMalloc(&cuda_scalar, sizeof(double));
hipMemcpy(cuda_p, rhs, sizeof(double) * N, hipMemcpyHostToDevice);
hipMemcpy(cuda_r, rhs, sizeof(double) * N, hipMemcpyHostToDevice);
hipMemcpy(cuda_solution, solution, sizeof(double) * N, hipMemcpyHostToDevice);
const double zero = 0;
double residual_norm_squared = 0;
hipMemcpy(cuda_scalar, &zero, sizeof(double), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( cuda_dot_product), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, N, cuda_r, cuda_r, cuda_scalar);
hipMemcpy(&residual_norm_squared, cuda_scalar, sizeof(double), hipMemcpyDeviceToHost);
double initial_residual_squared = residual_norm_squared;
int iters = 0;
hipDeviceSynchronize();
timer.reset();
while (1) {
// line 4: A*p:
hipLaunchKernelGGL(( cuda_csr_matvec_product), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, N, csr_rowoffsets, csr_colindices, csr_values, cuda_p, cuda_Ap);
// lines 5,6:
hipMemcpy(cuda_scalar, &zero, sizeof(double), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( cuda_dot_product), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, N, cuda_p, cuda_Ap, cuda_scalar);
hipMemcpy(&alpha, cuda_scalar, sizeof(double), hipMemcpyDeviceToHost);
alpha = residual_norm_squared / alpha;
// line 7:
hipLaunchKernelGGL(( cuda_vecadd), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, N, cuda_solution, cuda_p, alpha);
// line 8:
hipLaunchKernelGGL(( cuda_vecadd), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, N, cuda_r, cuda_Ap, -alpha);
// line 9:
beta = residual_norm_squared;
hipMemcpy(cuda_scalar, &zero, sizeof(double), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( cuda_dot_product), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, N, cuda_r, cuda_r, cuda_scalar);
hipMemcpy(&residual_norm_squared, cuda_scalar, sizeof(double), hipMemcpyDeviceToHost);
// line 10:
if (std::sqrt(residual_norm_squared / initial_residual_squared) < 1e-6) {
break;
}
// line 11:
beta = residual_norm_squared / beta;
// line 12:
hipLaunchKernelGGL(( cuda_vecadd2), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, N, cuda_p, cuda_r, beta);
if (iters > 10000)
break; // solver didn't converge
++iters;
}
hipMemcpy(solution, cuda_solution, sizeof(double) * N, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
#ifdef DEBUG
std::cout << "Time elapsed: " << timer.get() << " (" << timer.get() / iters << " per iteration)" << std::endl;
if (iters > 10000)
std::cout << "Conjugate Gradient did NOT converge within 10000 iterations"
<< std::endl;
else
std::cout << "Conjugate Gradient converged in " << iters << " iterations."
<< std::endl;
#endif
hipFree(cuda_p);
hipFree(cuda_r);
hipFree(cuda_Ap);
hipFree(cuda_solution);
hipFree(cuda_scalar);
return iters;
}
/** Solve a system with `points_per_direction * points_per_direction` unknowns
*/
void solve_system(int points_per_direction) {
Timer timer;
int N = points_per_direction *
points_per_direction; // number of unknows to solve for
#ifdef DEBUG
std::cout << "Solving Ax=b with " << N << " unknowns." << std::endl;
#endif
//
// Allocate CSR arrays.
//
// Note: Usually one does not know the number of nonzeros in the system matrix
// a-priori.
// For this exercise, however, we know that there are at most 5 nonzeros
// per row in the system matrix, so we can allocate accordingly.
//
int *csr_rowoffsets = (int *)malloc(sizeof(double) * (N + 1));
int *csr_colindices = (int *)malloc(sizeof(double) * 5 * N);
double *csr_values = (double *)malloc(sizeof(double) * 5 * N);
int *cuda_csr_rowoffsets, *cuda_csr_colindices;
double *cuda_csr_values;
//
// fill CSR matrix with values
//
generate_fdm_laplace(points_per_direction, csr_rowoffsets, csr_colindices,
csr_values);
//
// Allocate solution vector and right hand side:
//
double *solution = (double *)malloc(sizeof(double) * N);
double *rhs = (double *)malloc(sizeof(double) * N);
std::fill(rhs, rhs + N, 1);
//
// Allocate CUDA-arrays //
//
hipMalloc(&cuda_csr_rowoffsets, sizeof(double) * (N + 1));
hipMalloc(&cuda_csr_colindices, sizeof(double) * 5 * N);
hipMalloc(&cuda_csr_values, sizeof(double) * 5 * N);
hipMemcpy(cuda_csr_rowoffsets, csr_rowoffsets, sizeof(double) * (N + 1), hipMemcpyHostToDevice);
hipMemcpy(cuda_csr_colindices, csr_colindices, sizeof(double) * 5 * N, hipMemcpyHostToDevice);
hipMemcpy(cuda_csr_values, csr_values, sizeof(double) * 5 * N, hipMemcpyHostToDevice);
//
// Call Conjugate Gradient implementation with GPU arrays
//
timer.reset();
int iters = conjugate_gradient(N, cuda_csr_rowoffsets, cuda_csr_colindices, cuda_csr_values, rhs, solution);
double runtime = timer.get();
//
// Check for convergence:
//
double residual_norm = relative_residual(N, csr_rowoffsets, csr_colindices, csr_values, rhs, solution);
#ifdef DEBUG
std::cout << "Relative residual norm: " << residual_norm
<< " (should be smaller than 1e-6)" << std::endl;
#endif
#ifndef DEBUG
std::cout << points_per_direction << SEP
<< N << SEP
<< runtime << SEP
<< iters << SEP
<< residual_norm << std::endl;
#endif
hipFree(cuda_csr_rowoffsets);
hipFree(cuda_csr_colindices);
hipFree(cuda_csr_values);
free(solution);
free(rhs);
free(csr_rowoffsets);
free(csr_colindices);
free(csr_values);
}
int main() {
std::vector<size_t> p_per_dir{ (size_t)sqrt(1e3), (size_t)sqrt(1e4), (size_t)sqrt(1e5), (size_t)sqrt(1e6), (size_t)sqrt(4e6)};
#ifndef DEBUG
std::cout << "p" << SEP "N" << SEP << "time" << SEP << "iters" << SEP << "norm_after" << std::endl;
#endif
for (auto& points: p_per_dir)
solve_system(points); // solves a system with 100*100 unknowns
return EXIT_SUCCESS;
}
| 904d8fb38a62ec5d61f476c4def9a37a3177d3f6.cu | #include "poisson2d.hpp"
#include "timer.hpp"
#include <algorithm>
#include <string>
#include <vector>
#include <iostream>
#include <stdio.h>
// Block and grid size defines.
// Seperate defines are really just for future convenience...
#define BLOCK_SIZE 512
#define GRID_SIZE 512
#define SEP ";"
//#define DEBUG
// y = A * x
__global__ void cuda_csr_matvec_product(int N, int *csr_rowoffsets,
int *csr_colindices, double *csr_values,
double *x, double *y)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) {
double sum = 0;
for (int k = csr_rowoffsets[i]; k < csr_rowoffsets[i + 1]; k++) {
sum += csr_values[k] * x[csr_colindices[k]];
}
y[i] = sum;
}
}
// x <- x + alpha * y
__global__ void cuda_vecadd(int N, double *x, double *y, double alpha)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x)
x[i] += alpha * y[i];
}
// x <- y + alpha * x
__global__ void cuda_vecadd2(int N, double *x, double *y, double alpha)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x)
x[i] = y[i] + alpha * x[i];
}
// result = (x, y)
__global__ void cuda_dot_product(int N, double *x, double *y, double *result)
{
__shared__ double shared_mem[BLOCK_SIZE];
double dot = 0;
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) {
dot += x[i] * y[i];
}
shared_mem[threadIdx.x] = dot;
for (int k = blockDim.x / 2; k > 0; k /= 2) {
__syncthreads();
if (threadIdx.x < k) {
shared_mem[threadIdx.x] += shared_mem[threadIdx.x + k];
}
}
if (threadIdx.x == 0) atomicAdd(result, shared_mem[0]);
}
/** Implementation of the conjugate gradient algorithm.
*
* The control flow is handled by the CPU.
* Only the individual operations (vector updates, dot products, sparse
* matrix-vector product) are transferred to CUDA kernels.
*
* The temporary arrays p, r, and Ap need to be allocated on the GPU for use
* with CUDA. Modify as you see fit.
*/
int conjugate_gradient(int N, // number of unknows
int *csr_rowoffsets, int *csr_colindices,
double *csr_values, double *rhs, double *solution)
//, double *init_guess) // feel free to add a nonzero initial guess as needed
{
// initialize timer
Timer timer;
// clear solution vector (it may contain garbage values):
std::fill(solution, solution + N, 0);
// initialize work vectors:
double alpha, beta;
double *cuda_solution, *cuda_p, *cuda_r, *cuda_Ap, *cuda_scalar;
cudaMalloc(&cuda_p, sizeof(double) * N);
cudaMalloc(&cuda_r, sizeof(double) * N);
cudaMalloc(&cuda_Ap, sizeof(double) * N);
cudaMalloc(&cuda_solution, sizeof(double) * N);
cudaMalloc(&cuda_scalar, sizeof(double));
cudaMemcpy(cuda_p, rhs, sizeof(double) * N, cudaMemcpyHostToDevice);
cudaMemcpy(cuda_r, rhs, sizeof(double) * N, cudaMemcpyHostToDevice);
cudaMemcpy(cuda_solution, solution, sizeof(double) * N, cudaMemcpyHostToDevice);
const double zero = 0;
double residual_norm_squared = 0;
cudaMemcpy(cuda_scalar, &zero, sizeof(double), cudaMemcpyHostToDevice);
cuda_dot_product<<<GRID_SIZE, BLOCK_SIZE>>>(N, cuda_r, cuda_r, cuda_scalar);
cudaMemcpy(&residual_norm_squared, cuda_scalar, sizeof(double), cudaMemcpyDeviceToHost);
double initial_residual_squared = residual_norm_squared;
int iters = 0;
cudaDeviceSynchronize();
timer.reset();
while (1) {
// line 4: A*p:
cuda_csr_matvec_product<<<GRID_SIZE, BLOCK_SIZE>>>(N, csr_rowoffsets, csr_colindices, csr_values, cuda_p, cuda_Ap);
// lines 5,6:
cudaMemcpy(cuda_scalar, &zero, sizeof(double), cudaMemcpyHostToDevice);
cuda_dot_product<<<GRID_SIZE, BLOCK_SIZE>>>(N, cuda_p, cuda_Ap, cuda_scalar);
cudaMemcpy(&alpha, cuda_scalar, sizeof(double), cudaMemcpyDeviceToHost);
alpha = residual_norm_squared / alpha;
// line 7:
cuda_vecadd<<<GRID_SIZE, BLOCK_SIZE>>>(N, cuda_solution, cuda_p, alpha);
// line 8:
cuda_vecadd<<<GRID_SIZE, BLOCK_SIZE>>>(N, cuda_r, cuda_Ap, -alpha);
// line 9:
beta = residual_norm_squared;
cudaMemcpy(cuda_scalar, &zero, sizeof(double), cudaMemcpyHostToDevice);
cuda_dot_product<<<GRID_SIZE, BLOCK_SIZE>>>(N, cuda_r, cuda_r, cuda_scalar);
cudaMemcpy(&residual_norm_squared, cuda_scalar, sizeof(double), cudaMemcpyDeviceToHost);
// line 10:
if (std::sqrt(residual_norm_squared / initial_residual_squared) < 1e-6) {
break;
}
// line 11:
beta = residual_norm_squared / beta;
// line 12:
cuda_vecadd2<<<GRID_SIZE, BLOCK_SIZE>>>(N, cuda_p, cuda_r, beta);
if (iters > 10000)
break; // solver didn't converge
++iters;
}
cudaMemcpy(solution, cuda_solution, sizeof(double) * N, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
#ifdef DEBUG
std::cout << "Time elapsed: " << timer.get() << " (" << timer.get() / iters << " per iteration)" << std::endl;
if (iters > 10000)
std::cout << "Conjugate Gradient did NOT converge within 10000 iterations"
<< std::endl;
else
std::cout << "Conjugate Gradient converged in " << iters << " iterations."
<< std::endl;
#endif
cudaFree(cuda_p);
cudaFree(cuda_r);
cudaFree(cuda_Ap);
cudaFree(cuda_solution);
cudaFree(cuda_scalar);
return iters;
}
/** Solve a system with `points_per_direction * points_per_direction` unknowns
*/
void solve_system(int points_per_direction) {
Timer timer;
int N = points_per_direction *
points_per_direction; // number of unknows to solve for
#ifdef DEBUG
std::cout << "Solving Ax=b with " << N << " unknowns." << std::endl;
#endif
//
// Allocate CSR arrays.
//
// Note: Usually one does not know the number of nonzeros in the system matrix
// a-priori.
// For this exercise, however, we know that there are at most 5 nonzeros
// per row in the system matrix, so we can allocate accordingly.
//
int *csr_rowoffsets = (int *)malloc(sizeof(double) * (N + 1));
int *csr_colindices = (int *)malloc(sizeof(double) * 5 * N);
double *csr_values = (double *)malloc(sizeof(double) * 5 * N);
int *cuda_csr_rowoffsets, *cuda_csr_colindices;
double *cuda_csr_values;
//
// fill CSR matrix with values
//
generate_fdm_laplace(points_per_direction, csr_rowoffsets, csr_colindices,
csr_values);
//
// Allocate solution vector and right hand side:
//
double *solution = (double *)malloc(sizeof(double) * N);
double *rhs = (double *)malloc(sizeof(double) * N);
std::fill(rhs, rhs + N, 1);
//
// Allocate CUDA-arrays //
//
cudaMalloc(&cuda_csr_rowoffsets, sizeof(double) * (N + 1));
cudaMalloc(&cuda_csr_colindices, sizeof(double) * 5 * N);
cudaMalloc(&cuda_csr_values, sizeof(double) * 5 * N);
cudaMemcpy(cuda_csr_rowoffsets, csr_rowoffsets, sizeof(double) * (N + 1), cudaMemcpyHostToDevice);
cudaMemcpy(cuda_csr_colindices, csr_colindices, sizeof(double) * 5 * N, cudaMemcpyHostToDevice);
cudaMemcpy(cuda_csr_values, csr_values, sizeof(double) * 5 * N, cudaMemcpyHostToDevice);
//
// Call Conjugate Gradient implementation with GPU arrays
//
timer.reset();
int iters = conjugate_gradient(N, cuda_csr_rowoffsets, cuda_csr_colindices, cuda_csr_values, rhs, solution);
double runtime = timer.get();
//
// Check for convergence:
//
double residual_norm = relative_residual(N, csr_rowoffsets, csr_colindices, csr_values, rhs, solution);
#ifdef DEBUG
std::cout << "Relative residual norm: " << residual_norm
<< " (should be smaller than 1e-6)" << std::endl;
#endif
#ifndef DEBUG
std::cout << points_per_direction << SEP
<< N << SEP
<< runtime << SEP
<< iters << SEP
<< residual_norm << std::endl;
#endif
cudaFree(cuda_csr_rowoffsets);
cudaFree(cuda_csr_colindices);
cudaFree(cuda_csr_values);
free(solution);
free(rhs);
free(csr_rowoffsets);
free(csr_colindices);
free(csr_values);
}
int main() {
std::vector<size_t> p_per_dir{ (size_t)sqrt(1e3), (size_t)sqrt(1e4), (size_t)sqrt(1e5), (size_t)sqrt(1e6), (size_t)sqrt(4e6)};
#ifndef DEBUG
std::cout << "p" << SEP "N" << SEP << "time" << SEP << "iters" << SEP << "norm_after" << std::endl;
#endif
for (auto& points: p_per_dir)
solve_system(points); // solves a system with 100*100 unknowns
return EXIT_SUCCESS;
}
|
b00e85cee5bdc5f1aa01f17f79dab57b6b4247d5.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Chain_loc v 1.0 is the source code for a mex file that will input image data and parameter estimators and output localized data
* Calling this function in matlab will look like
* [xf_all, yf_all, N, off_all, sigx, sigy, xf_crlb, yf_crlb, N_crlb, off_crlb, sigx_crlb, sigy_crlb, llv, framenum_all] = chain_loc( i1, numthreads, angle(in rads))
* written by Andrew Nelson Version v 1.1 on 5/2/15
*/
/*
Version 1.2 has had substantial debugging and careful comparison between cpu version. Removed extra diagnostic code that has been commented out and has no use in regular code
At this point we can be sured that the algorithm for calculating position, total area, and offset are working properly on the GPU
This version is usable with the localiztion code Quhzx_01_3.m
Fixed
Error codes should not be given off when inputs and outputs don't match expected arguments
Fixed a problem considerably prolonging computation time by removing gpu device invokation early in main loop ( i.e. reset the gpu, synchronize threads, initialize gpu ....)
Added redundant void __global__ loops to handle multiple sizes of input images
*/
#include <mex.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <hip/hip_runtime.h>
#define PI 3.14159265358979323846
/*
* Device code
*
* To facilitate coding (for me) I have copied the localization algorithm to be used with multiple sized areas
*/
/*
Device Functions
*/
__device__ double device_det(double Fisher[36])
{
double det;
det = Fisher[0] * (Fisher[7] * (Fisher[14] * (Fisher[21] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) + Fisher[33] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23])) - Fisher[20] * (Fisher[15] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) + Fisher[33] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17])) + Fisher[26] * (Fisher[15] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) + Fisher[33] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17])) - Fisher[32] * (Fisher[15] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) + Fisher[27] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]))) - Fisher[13] * (Fisher[8] * (Fisher[21] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) + Fisher[33] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23])) - Fisher[20] * (Fisher[9] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11])) + Fisher[26] * (Fisher[9] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11])) - Fisher[32] * (Fisher[9] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) + Fisher[27] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]))) + Fisher[19] * (Fisher[8] * (Fisher[15] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) + Fisher[33] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17])) - Fisher[14] * (Fisher[9] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11])) + Fisher[26] * (Fisher[9] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])) - Fisher[32] * (Fisher[9] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) + Fisher[27] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11]))) - Fisher[25] * (Fisher[8] * (Fisher[15] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) + Fisher[33] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17])) - Fisher[14] * (Fisher[9] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11])) + Fisher[20] * (Fisher[9] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])) - Fisher[32] * (Fisher[9] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]) + Fisher[21] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11]))) + Fisher[31] * (Fisher[8] * (Fisher[15] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) + Fisher[27] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17])) - Fisher[14] * (Fisher[9] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) + Fisher[27] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11])) + Fisher[20] * (Fisher[9] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) + Fisher[27] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])) - Fisher[26] * (Fisher[9] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]) + Fisher[21] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])))) - Fisher[6] * (Fisher[1] * (Fisher[14] * (Fisher[21] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) + Fisher[33] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23])) - Fisher[20] * (Fisher[15] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) + Fisher[33] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17])) + Fisher[26] * (Fisher[15] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) + Fisher[33] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17])) - Fisher[32] * (Fisher[15] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) + Fisher[27] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]))) - Fisher[13] * (Fisher[2] * (Fisher[21] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) + Fisher[33] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23])) - Fisher[20] * (Fisher[3] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5])) + Fisher[26] * (Fisher[3] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5])) - Fisher[32] * (Fisher[3] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]))) + Fisher[19] * (Fisher[2] * (Fisher[15] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) + Fisher[33] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17])) - Fisher[14] * (Fisher[3] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5])) + Fisher[26] * (Fisher[3] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])) - Fisher[32] * (Fisher[3] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5]))) - Fisher[25] * (Fisher[2] * (Fisher[15] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) + Fisher[33] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17])) - Fisher[14] * (Fisher[3] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5])) + Fisher[20] * (Fisher[3] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])) - Fisher[32] * (Fisher[3] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]) + Fisher[21] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5]))) + Fisher[31] * (Fisher[2] * (Fisher[15] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) + Fisher[27] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17])) - Fisher[14] * (Fisher[3] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5])) + Fisher[20] * (Fisher[3] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])) - Fisher[26] * (Fisher[3] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]) + Fisher[21] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])))) + Fisher[12] * (Fisher[1] * (Fisher[8] * (Fisher[21] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) + Fisher[33] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23])) - Fisher[20] * (Fisher[9] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11])) + Fisher[26] * (Fisher[9] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11])) - Fisher[32] * (Fisher[9] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) + Fisher[27] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]))) - Fisher[7] * (Fisher[2] * (Fisher[21] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) + Fisher[33] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23])) - Fisher[20] * (Fisher[3] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5])) + Fisher[26] * (Fisher[3] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5])) - Fisher[32] * (Fisher[3] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]))) + Fisher[19] * (Fisher[2] * (Fisher[9] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11])) - Fisher[8] * (Fisher[3] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5])) + Fisher[26] * (Fisher[3] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])) - Fisher[32] * (Fisher[3] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5]))) - Fisher[25] * (Fisher[2] * (Fisher[9] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11])) - Fisher[8] * (Fisher[3] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5])) + Fisher[20] * (Fisher[3] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])) - Fisher[32] * (Fisher[3] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]) + Fisher[21] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5]))) + Fisher[31] * (Fisher[2] * (Fisher[9] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) + Fisher[27] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11])) - Fisher[8] * (Fisher[3] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5])) + Fisher[20] * (Fisher[3] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])) - Fisher[26] * (Fisher[3] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]) + Fisher[21] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])))) - Fisher[18] * (Fisher[1] * (Fisher[8] * (Fisher[15] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) + Fisher[33] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17])) - Fisher[14] * (Fisher[9] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11])) + Fisher[26] * (Fisher[9] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])) - Fisher[32] * (Fisher[9] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) + Fisher[27] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11]))) - Fisher[7] * (Fisher[2] * (Fisher[15] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) + Fisher[33] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17])) - Fisher[14] * (Fisher[3] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5])) + Fisher[26] * (Fisher[3] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])) - Fisher[32] * (Fisher[3] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5]))) + Fisher[13] * (Fisher[2] * (Fisher[9] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11])) - Fisher[8] * (Fisher[3] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5])) + Fisher[26] * (Fisher[3] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])) - Fisher[32] * (Fisher[3] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5]))) - Fisher[25] * (Fisher[2] * (Fisher[9] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])) - Fisher[8] * (Fisher[3] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])) + Fisher[14] * (Fisher[3] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])) - Fisher[32] * (Fisher[3] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5]) + Fisher[15] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5]))) + Fisher[31] * (Fisher[2] * (Fisher[9] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) + Fisher[27] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])) - Fisher[8] * (Fisher[3] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])) + Fisher[14] * (Fisher[3] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])) - Fisher[26] * (Fisher[3] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5]) + Fisher[15] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])))) + Fisher[24] * (Fisher[1] * (Fisher[8] * (Fisher[15] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) + Fisher[33] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17])) - Fisher[14] * (Fisher[9] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11])) + Fisher[20] * (Fisher[9] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])) - Fisher[32] * (Fisher[9] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]) + Fisher[21] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11]))) - Fisher[7] * (Fisher[2] * (Fisher[15] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) + Fisher[33] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17])) - Fisher[14] * (Fisher[3] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5])) + Fisher[20] * (Fisher[3] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])) - Fisher[32] * (Fisher[3] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]) + Fisher[21] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5]))) + Fisher[13] * (Fisher[2] * (Fisher[9] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11])) - Fisher[8] * (Fisher[3] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5])) + Fisher[20] * (Fisher[3] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])) - Fisher[32] * (Fisher[3] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]) + Fisher[21] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5]))) - Fisher[19] * (Fisher[2] * (Fisher[9] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])) - Fisher[8] * (Fisher[3] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])) + Fisher[14] * (Fisher[3] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])) - Fisher[32] * (Fisher[3] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5]) + Fisher[15] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5]))) + Fisher[31] * (Fisher[2] * (Fisher[9] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]) + Fisher[21] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])) - Fisher[8] * (Fisher[3] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]) + Fisher[21] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])) + Fisher[14] * (Fisher[3] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]) + Fisher[21] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])) - Fisher[20] * (Fisher[3] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5]) + Fisher[15] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])))) - Fisher[30] * (Fisher[1] * (Fisher[8] * (Fisher[15] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) + Fisher[27] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17])) - Fisher[14] * (Fisher[9] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) + Fisher[27] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11])) + Fisher[20] * (Fisher[9] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) + Fisher[27] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])) - Fisher[26] * (Fisher[9] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]) + Fisher[21] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11]))) - Fisher[7] * (Fisher[2] * (Fisher[15] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) + Fisher[27] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17])) - Fisher[14] * (Fisher[3] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5])) + Fisher[20] * (Fisher[3] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])) - Fisher[26] * (Fisher[3] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]) + Fisher[21] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5]))) + Fisher[13] * (Fisher[2] * (Fisher[9] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) + Fisher[27] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11])) - Fisher[8] * (Fisher[3] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5])) + Fisher[20] * (Fisher[3] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])) - Fisher[26] * (Fisher[3] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]) + Fisher[21] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5]))) - Fisher[19] * (Fisher[2] * (Fisher[9] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) + Fisher[27] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])) - Fisher[8] * (Fisher[3] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])) + Fisher[14] * (Fisher[3] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])) - Fisher[26] * (Fisher[3] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5]) + Fisher[15] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5]))) + Fisher[25] * (Fisher[2] * (Fisher[9] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]) + Fisher[21] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])) - Fisher[8] * (Fisher[3] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]) + Fisher[21] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])) + Fisher[14] * (Fisher[3] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]) + Fisher[21] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])) - Fisher[20] * (Fisher[3] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5]) + Fisher[15] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5]))));
return det;
}
/*
Global Functions
*/
// localize
__global__ void localize(double *d_iall,
double *d_xf_all,
double *d_yf_all,
double *d_N,
double *d_off,
double *d_sigx,
double *d_sigy,
double *d_xf_crlb,
double *d_yf_crlb,
double *d_N_crlb,
double *d_off_crlb,
double *d_sigx_crlb,
double *d_sigy_crlb,
double *d_llv,
double ang,
int lpcnt,
int numi)
{
// Declare variables
const int pix = 5; // pixel size
__shared__ double xgrid[pix*pix]; // allocate xpix and ypix variables to the shared memory of the blocks
__shared__ double ygrid[pix*pix]; // this will reduce calls to global device memory
double dudx, dudy, dudsx, dudsy, d2udx2, d2udy2, d2udsx2, d2udsy2, dudn, dudo, Ex, Ey, u;
double d_x, d_y, d_n, d_sx, d_sy, d_o, dd_x, dd_y, dd_sx, dd_sy, dd_n, dd_o, x, y;
// these variables will exist on the register of each thread
double d_beta1[6] = {0, 0, 0, 0, 0, 0};
int tx = threadIdx.x;
int index = blockIdx.x*blockDim.x + tx; // calculate thread index
double d_i2[pix*pix]; // initialize data for image
double llv;
double fisher[36] = { 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 };
double det_fish = 0.0;
// create xgrid and ygrid we want to create the grid regardless of whether the index is crunching on an image
if (tx == 0){
for (int i = 0; i <pix; i++){
for(int j = 0; j <pix; j++){
x = (double)j - ((double)pix-1)/2;
y = (double)i - ((double)pix-1)/2;
xgrid[j*pix + i] = x*cos(ang) - y*sin(ang);
ygrid[j*pix + i] = x*sin(ang) + y*cos(ang);
}
}
}
if (index < numi){ // check to see that threads only work if an image exists
// buffer all the variables into shared memory and registers and build guesses
d_beta1[0] = 0.0;
d_beta1[1] = 0.0;
d_beta1[2] = 0.0;
d_beta1[3] = 1; // guess on sigma widths
d_beta1[4] = 1; // guess on sigma widths
d_beta1[5] = 100000;
for (int i = 0; i <pix*pix; i++) {
d_i2[i] = d_iall[i + index*pix*pix]; // this buffers [49] pixels into each d_i2, the thread index determines which image is analyzed
d_beta1[0] +=xgrid[i]*d_i2[i]; // sum of x and image weight
d_beta1[1] +=ygrid[i]*d_i2[i]; // sum of y and image weight
d_beta1[2] +=d_i2[i]; // image sum
if (d_beta1[5] > d_i2[i]){d_beta1[5] = d_i2[i];} // find minimum of image
}
d_beta1[0] = d_beta1[0] / d_beta1[2];
d_beta1[1] = d_beta1[1] / d_beta1[2];
// start the for loop iterations FOR 1
for (int counttry = 0; counttry < lpcnt; counttry++){
d_x = 0.0;
d_y = 0.0;
d_n = 0.0;
d_sx = 0.0;
d_sy = 0.0;
d_o = 0.0;
dd_x = 0.0; //wipe incremental variables each loop to give correct correction factor
dd_y = 0.0;
dd_n = 0.0;
dd_sx = 0.0;
dd_sy = 0.0;
dd_o = 0.0;
u = 0;
Ey = 0;
Ex = 0;
llv = 0.0;
// Calculate pixel values for derivatives, 2nd derivatives, errorfunctions and u
for (int rowcount = 0; rowcount < pix; rowcount++){ // FOR 2 loops over all rows
for (int colcount = 0; colcount < pix; colcount++){ // FOR 3 loops over all columns
// x/ygrid is col major(come from matlab) and i3 is col major
// these three lines help define the fitting gaussian as deined by the current iteration of parameters
Ex = 0.5 * (erf((xgrid[rowcount + colcount*pix] - d_beta1[0] + 0.5) / sqrt(2.0 * d_beta1[3] * d_beta1[3])) - erf((xgrid[rowcount + colcount*pix] - d_beta1[0] - 0.5) / sqrt(2.0 * d_beta1[3] * d_beta1[3])));
Ey = 0.5 * (erf((ygrid[rowcount + colcount*pix] - d_beta1[1] + 0.5) / sqrt(2.0 * d_beta1[4] * d_beta1[4])) - erf((ygrid[rowcount + colcount*pix] - d_beta1[1] - 0.5) / sqrt(2.0 * d_beta1[4] * d_beta1[4])));
u = d_beta1[2] * Ex*Ey + d_beta1[5];
// first derivatives calculations
// these are done pixel by pixel with the sum added up in the d_x and dd_x areas
dudx = (d_beta1[2] / sqrt(2.0 * PI*d_beta1[3] * d_beta1[3]))*(exp(-powf(xgrid[rowcount + colcount*pix] - d_beta1[0] - 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))
- exp(-powf(xgrid[rowcount + colcount*pix] - d_beta1[0] + 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3])))*Ey;
dudy = (d_beta1[2] / sqrt(2.0 * PI*d_beta1[4] * d_beta1[4]))*(exp(-powf(ygrid[rowcount + colcount*pix] - d_beta1[1] - 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))
- exp(-powf(ygrid[rowcount + colcount*pix] - d_beta1[1] + 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4])))*Ex;
dudsx = (d_beta1[2] / (sqrt(2.0*PI) * powf(d_beta1[3], 2.0)))*((xgrid[rowcount + colcount*pix] - d_beta1[0] - 0.5) * exp(-powf(xgrid[rowcount + colcount*pix] - d_beta1[0] - 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))
- (xgrid[rowcount + colcount*pix] - d_beta1[0] + 0.5)*exp(-powf(xgrid[rowcount + colcount*pix] - d_beta1[0] + 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3])))*Ey;
dudsy = (d_beta1[2] / (sqrt(2.0*PI) * powf(d_beta1[4], 2.0)))*((ygrid[rowcount + colcount*pix] - d_beta1[1] - 0.5) * exp(-powf(ygrid[rowcount + colcount*pix] - d_beta1[1] - 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))
- (ygrid[rowcount + colcount*pix] - d_beta1[1] + 0.5)*exp(-powf(ygrid[rowcount + colcount*pix] - d_beta1[1] + 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4])))*Ex;
dudn = Ex*Ey;
dudo = 1.0;
// second derivatives
// these are calcualted in a similar manner to the first derivatives
d2udx2 = (d_beta1[2] / (sqrt(2.0 * PI)*powf(d_beta1[3], 3.0))*((xgrid[rowcount + colcount*pix] - d_beta1[0] - 0.5)*exp(-powf(xgrid[rowcount + colcount*pix] - d_beta1[0] - 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))
- (xgrid[rowcount + colcount*pix] - d_beta1[0] + 0.5)*exp(-powf(xgrid[rowcount + colcount*pix] - d_beta1[0] + 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))))*Ey;
d2udy2 = (d_beta1[2] / (sqrt(2.0 * PI)*powf(d_beta1[4], 3.0))*((ygrid[rowcount + colcount*pix] - d_beta1[1] - 0.5)*exp(-powf(ygrid[rowcount + colcount*pix] - d_beta1[1] - 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))
- (ygrid[rowcount + colcount*pix] - d_beta1[1] + 0.5)*exp(-powf(ygrid[rowcount + colcount*pix] - d_beta1[1] + 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))))*Ex;
d2udsx2 = (Ey*d_beta1[2] / (sqrt(2.0 * PI)))
*(powf(d_beta1[3], -5.0)*(powf((xgrid[rowcount + colcount*pix] - d_beta1[0] - 0.5), 3)*exp(-powf(xgrid[rowcount + colcount*pix] - d_beta1[0] - 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))
- powf((xgrid[rowcount + colcount*pix] - d_beta1[0] + 0.5), 3)*exp(-powf(xgrid[rowcount + colcount*pix] - d_beta1[0] + 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3])))
- 2 * powf(d_beta1[3], -3.0)*((xgrid[rowcount + colcount*pix] - d_beta1[0] - 0.5) *exp(-powf(xgrid[rowcount + colcount*pix] - d_beta1[0] - 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))
- (xgrid[rowcount + colcount*pix] - d_beta1[0] + 0.5) *exp(-powf(xgrid[rowcount + colcount*pix] - d_beta1[0] + 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))));
d2udsy2 = (Ex*d_beta1[2] / (sqrt(2.0 * PI)))
*(powf(d_beta1[4], -5.0)*(powf((ygrid[rowcount + colcount*pix] - d_beta1[1] - 0.5), 3)*exp(-powf(ygrid[rowcount + colcount*pix] - d_beta1[1] - 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))
- powf((ygrid[rowcount + colcount*pix] - d_beta1[1] + 0.5), 3)*exp(-powf(ygrid[rowcount + colcount*pix] - d_beta1[1] + 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4])))
- 2 * powf(d_beta1[4], -3.0)*((ygrid[rowcount + colcount*pix] - d_beta1[1] - 0.5) *exp(-powf(ygrid[rowcount + colcount*pix] - d_beta1[1] - 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))
- (ygrid[rowcount + colcount*pix] - d_beta1[1] + 0.5) *exp(-powf(ygrid[rowcount + colcount*pix] - d_beta1[1] + 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))));
// summing variable to lead to correction factors
// these variables keep track of the correction which is given by summing over the entire pixel
d_x = d_x + dudx*((d_i2[rowcount + colcount*pix] / u) - 1.0);
dd_x = dd_x + d2udx2*((d_i2[rowcount + colcount*pix] / u) - 1.0) - powf(dudx, 2.0) * d_i2[rowcount + colcount*pix] / powf(u, 2.0);
d_y = d_y + dudy*((d_i2[rowcount + colcount*pix] / u) - 1.0);
dd_y = dd_y + d2udy2*((d_i2[rowcount + colcount*pix] / u) - 1.0) - powf(dudy, 2.0) * d_i2[rowcount + colcount*pix] / powf(u, 2.0);
d_n = d_n + dudn*((d_i2[rowcount + colcount*pix] / u) - 1.0);
d_sx = d_sx + dudsx*((d_i2[rowcount + colcount*pix] / u) - 1.0);
dd_sx = dd_sx + d2udsx2*((d_i2[rowcount + colcount*pix] / u) - 1.0) - powf(dudsx, 2.0) * d_i2[rowcount + colcount*pix] / powf(u, 2.0);
d_sy = d_sy + dudsy*((d_i2[rowcount + colcount*pix] / u) - 1.0);
dd_sy = dd_sy + d2udsy2*((d_i2[rowcount + colcount*pix] / u) - 1.0) - powf(dudsy, 2.0) * d_i2[rowcount + colcount*pix] / powf(u, 2.0);
dd_n = dd_n - powf(dudn, 2.0) * d_i2[rowcount + colcount*pix] / powf(u, 2);
d_o = d_o + ((d_i2[rowcount + colcount*pix] / u) - 1.0);
dd_o = dd_o - d_i2[rowcount + colcount*pix] / powf(u, 2.0);
if (counttry == lpcnt-1){ // on the last count, construct fisher information matrix elements
fisher[0] += dudx*dudx / u;
fisher[1] += dudx*dudy / u;
fisher[2] += dudx*dudn / u;
fisher[3] += dudx*dudo / u;
fisher[4] += dudx*dudsx / u;
fisher[5] += dudx*dudsy / u;
fisher[6] += dudy*dudx / u;
fisher[7] += dudy*dudy / u;
fisher[8] += dudy*dudn / u;
fisher[9] += dudy*dudo / u;
fisher[10] += dudy*dudsx / u;;
fisher[11] += dudy*dudsy / u;;
fisher[12] += dudn*dudx / u;
fisher[13] += dudn*dudy / u;
fisher[14] += dudn*dudn / u;
fisher[15] += dudn*dudo / u;
fisher[16] += dudn*dudsx / u;
fisher[17] += dudn*dudsy / u;
fisher[18] += dudo*dudx / u;
fisher[19] += dudo*dudy / u;
fisher[20] += dudo*dudn / u;
fisher[21] += dudo*dudo / u;
fisher[22] += dudo*dudsx / u;
fisher[23] += dudo*dudsy / u;
fisher[24] += dudsx*dudx / u;
fisher[25] += dudsx*dudy / u;
fisher[26] += dudsx*dudn / u;
fisher[27] += dudsx*dudo / u;
fisher[28] += dudsx*dudsx / u;
fisher[29] += dudsx*dudsy / u;
fisher[30] += dudsy*dudx / u;
fisher[31] += dudsy*dudy / u;
fisher[32] += dudsy*dudn / u;
fisher[33] += dudsy*dudo / u;
fisher[34] += dudsy*dudsx / u;
fisher[35] += dudsy*dudsy / u;
llv += d_i2[rowcount+colcount*pix] * log(u + 0.0000000000000001) - u - d_i2[rowcount + colcount*pix]*log(d_i2[rowcount + colcount*pix] + 0.0000000000000001) + d_i2[rowcount + colcount*pix];
}
} // END FOR 3
} // END FOR2
// correct beta1 values with tolerances
d_beta1[0] = d_beta1[0] - d_x / dd_x;
d_beta1[1] = d_beta1[1] - d_y / dd_y;
d_beta1[2] = d_beta1[2] - d_n / dd_n;
d_beta1[3] = d_beta1[3] - d_sx / dd_sx;
d_beta1[4] = d_beta1[4] - d_sy / dd_sy;
d_beta1[5] = d_beta1[5] - d_o / dd_o;
} // end FOR 1
if (d_beta1[0] == d_beta1[0] && d_beta1[1] == d_beta1[1] && d_beta1[2] == d_beta1[2] && d_beta1[5] == d_beta1[5] && d_beta1[3] == d_beta1[3] && d_beta1[4] == d_beta1[4] && d_beta1[5] == d_beta1[5]){ // begin is numeric if statement
if (d_beta1[2] > 0 && d_beta1[0] >= -(double)pix/2 && d_beta1[0] <= (double)pix/2 && d_beta1[1] <= (double)pix/2 && d_beta1[1] >= -(double)pix/2 ){ // was the molecule inside the grid? Was N positive? if yes then record the point
d_xf_all[index] = d_beta1[0] ;
d_yf_all[index] = d_beta1[1] ;
d_N[index] = d_beta1[2];
d_sigx[index] = d_beta1[3];
d_sigy[index] = d_beta1[4];
d_off[index] = d_beta1[5];
d_llv[index] = llv;
/*d_xf_all[index] = testi ; */
/*d_N[index] = testi;
d_sigx[index] = testi;
d_sigy[index] = testi;
d_off[index] =testi;
d_llv[index] =testi;*/
// calculate crlb's for estimators
// UPDATE FOR SIGMA VALUES
det_fish = device_det(fisher); // these values were determined using a homemade Python code called cofacs.py and text_det.py and checking against lower rank matricies
d_xf_crlb[index] = (fisher[7] * (fisher[14] * (fisher[21] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) + fisher[33] * (fisher[22] * fisher[29] - fisher[28] * fisher[23])) - fisher[20] * (fisher[15] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[29] - fisher[28] * fisher[17])) + fisher[26] * (fisher[15] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[23] - fisher[22] * fisher[17])) - fisher[32] * (fisher[15] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) + fisher[27] * (fisher[16] * fisher[23] - fisher[22] * fisher[17]))) - fisher[13] * (fisher[8] * (fisher[21] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) + fisher[33] * (fisher[22] * fisher[29] - fisher[28] * fisher[23])) - fisher[20] * (fisher[9] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[29] - fisher[28] * fisher[11])) + fisher[26] * (fisher[9] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[23] - fisher[22] * fisher[11])) - fisher[32] * (fisher[9] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[27] * (fisher[10] * fisher[23] - fisher[22] * fisher[11]))) + fisher[19] * (fisher[8] * (fisher[15] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[29] - fisher[28] * fisher[17])) - fisher[14] * (fisher[9] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[29] - fisher[28] * fisher[11])) + fisher[26] * (fisher[9] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[15] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])) - fisher[32] * (fisher[9] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[15] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[27] * (fisher[10] * fisher[17] - fisher[16] * fisher[11]))) - fisher[25] * (fisher[8] * (fisher[15] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[23] - fisher[22] * fisher[17])) - fisher[14] * (fisher[9] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[23] - fisher[22] * fisher[11])) + fisher[20] * (fisher[9] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[15] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])) - fisher[32] * (fisher[9] * (fisher[16] * fisher[23] - fisher[22] * fisher[17]) - fisher[15] * (fisher[10] * fisher[23] - fisher[22] * fisher[11]) + fisher[21] * (fisher[10] * fisher[17] - fisher[16] * fisher[11]))) + fisher[31] * (fisher[8] * (fisher[15] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) + fisher[27] * (fisher[16] * fisher[23] - fisher[22] * fisher[17])) - fisher[14] * (fisher[9] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[27] * (fisher[10] * fisher[23] - fisher[22] * fisher[11])) + fisher[20] * (fisher[9] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[15] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[27] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])) - fisher[26] * (fisher[9] * (fisher[16] * fisher[23] - fisher[22] * fisher[17]) - fisher[15] * (fisher[10] * fisher[23] - fisher[22] * fisher[11]) + fisher[21] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])))) / det_fish;
d_yf_crlb[index] = -(-(fisher[0] * (fisher[14] * (fisher[21] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) + fisher[33] * (fisher[22] * fisher[29] - fisher[28] * fisher[23])) - fisher[20] * (fisher[15] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[29] - fisher[28] * fisher[17])) + fisher[26] * (fisher[15] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[23] - fisher[22] * fisher[17])) - fisher[32] * (fisher[15] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) + fisher[27] * (fisher[16] * fisher[23] - fisher[22] * fisher[17]))) - fisher[12] * (fisher[2] * (fisher[21] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) + fisher[33] * (fisher[22] * fisher[29] - fisher[28] * fisher[23])) - fisher[20] * (fisher[3] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[29] - fisher[28] * fisher[5])) + fisher[26] * (fisher[3] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[23] - fisher[22] * fisher[5])) - fisher[32] * (fisher[3] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[23] - fisher[22] * fisher[5]))) + fisher[18] * (fisher[2] * (fisher[15] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[29] - fisher[28] * fisher[17])) - fisher[14] * (fisher[3] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[29] - fisher[28] * fisher[5])) + fisher[26] * (fisher[3] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[15] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])) - fisher[32] * (fisher[3] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[15] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[17] - fisher[16] * fisher[5]))) - fisher[24] * (fisher[2] * (fisher[15] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[23] - fisher[22] * fisher[17])) - fisher[14] * (fisher[3] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[23] - fisher[22] * fisher[5])) + fisher[20] * (fisher[3] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[15] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])) - fisher[32] * (fisher[3] * (fisher[16] * fisher[23] - fisher[22] * fisher[17]) - fisher[15] * (fisher[4] * fisher[23] - fisher[22] * fisher[5]) + fisher[21] * (fisher[4] * fisher[17] - fisher[16] * fisher[5]))) + fisher[30] * (fisher[2] * (fisher[15] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) + fisher[27] * (fisher[16] * fisher[23] - fisher[22] * fisher[17])) - fisher[14] * (fisher[3] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[23] - fisher[22] * fisher[5])) + fisher[20] * (fisher[3] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[15] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])) - fisher[26] * (fisher[3] * (fisher[16] * fisher[23] - fisher[22] * fisher[17]) - fisher[15] * (fisher[4] * fisher[23] - fisher[22] * fisher[5]) + fisher[21] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])))) / det_fish);
d_N_crlb[index] = (fisher[0] * (fisher[7] * (fisher[21] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) + fisher[33] * (fisher[22] * fisher[29] - fisher[28] * fisher[23])) - fisher[19] * (fisher[9] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[29] - fisher[28] * fisher[11])) + fisher[25] * (fisher[9] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[23] - fisher[22] * fisher[11])) - fisher[31] * (fisher[9] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[27] * (fisher[10] * fisher[23] - fisher[22] * fisher[11]))) - fisher[6] * (fisher[1] * (fisher[21] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) + fisher[33] * (fisher[22] * fisher[29] - fisher[28] * fisher[23])) - fisher[19] * (fisher[3] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[29] - fisher[28] * fisher[5])) + fisher[25] * (fisher[3] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[23] - fisher[22] * fisher[5])) - fisher[31] * (fisher[3] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[23] - fisher[22] * fisher[5]))) + fisher[18] * (fisher[1] * (fisher[9] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[29] - fisher[28] * fisher[11])) - fisher[7] * (fisher[3] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[29] - fisher[28] * fisher[5])) + fisher[25] * (fisher[3] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) - fisher[9] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])) - fisher[31] * (fisher[3] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) - fisher[9] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[11] - fisher[10] * fisher[5]))) - fisher[24] * (fisher[1] * (fisher[9] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[23] - fisher[22] * fisher[11])) - fisher[7] * (fisher[3] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[23] - fisher[22] * fisher[5])) + fisher[19] * (fisher[3] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) - fisher[9] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])) - fisher[31] * (fisher[3] * (fisher[10] * fisher[23] - fisher[22] * fisher[11]) - fisher[9] * (fisher[4] * fisher[23] - fisher[22] * fisher[5]) + fisher[21] * (fisher[4] * fisher[11] - fisher[10] * fisher[5]))) + fisher[30] * (fisher[1] * (fisher[9] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[27] * (fisher[10] * fisher[23] - fisher[22] * fisher[11])) - fisher[7] * (fisher[3] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[23] - fisher[22] * fisher[5])) + fisher[19] * (fisher[3] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) - fisher[9] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])) - fisher[25] * (fisher[3] * (fisher[10] * fisher[23] - fisher[22] * fisher[11]) - fisher[9] * (fisher[4] * fisher[23] - fisher[22] * fisher[5]) + fisher[21] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])))) / det_fish;
d_off_crlb[index] = -(-(fisher[0] * (fisher[7] * (fisher[14] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[26] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[32] * (fisher[16] * fisher[29] - fisher[28] * fisher[17])) - fisher[13] * (fisher[8] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[26] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[32] * (fisher[10] * fisher[29] - fisher[28] * fisher[11])) + fisher[25] * (fisher[8] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[14] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[32] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])) - fisher[31] * (fisher[8] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[14] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[26] * (fisher[10] * fisher[17] - fisher[16] * fisher[11]))) - fisher[6] * (fisher[1] * (fisher[14] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[26] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[32] * (fisher[16] * fisher[29] - fisher[28] * fisher[17])) - fisher[13] * (fisher[2] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[26] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[32] * (fisher[4] * fisher[29] - fisher[28] * fisher[5])) + fisher[25] * (fisher[2] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[14] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[32] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])) - fisher[31] * (fisher[2] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[14] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[26] * (fisher[4] * fisher[17] - fisher[16] * fisher[5]))) + fisher[12] * (fisher[1] * (fisher[8] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[26] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[32] * (fisher[10] * fisher[29] - fisher[28] * fisher[11])) - fisher[7] * (fisher[2] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[26] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[32] * (fisher[4] * fisher[29] - fisher[28] * fisher[5])) + fisher[25] * (fisher[2] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) - fisher[8] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[32] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])) - fisher[31] * (fisher[2] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) - fisher[8] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[26] * (fisher[4] * fisher[11] - fisher[10] * fisher[5]))) - fisher[24] * (fisher[1] * (fisher[8] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[14] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[32] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])) - fisher[7] * (fisher[2] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[14] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[32] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])) + fisher[13] * (fisher[2] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) - fisher[8] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[32] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])) - fisher[31] * (fisher[2] * (fisher[10] * fisher[17] - fisher[16] * fisher[11]) - fisher[8] * (fisher[4] * fisher[17] - fisher[16] * fisher[5]) + fisher[14] * (fisher[4] * fisher[11] - fisher[10] * fisher[5]))) + fisher[30] * (fisher[1] * (fisher[8] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[14] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[26] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])) - fisher[7] * (fisher[2] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[14] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[26] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])) + fisher[13] * (fisher[2] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) - fisher[8] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[26] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])) - fisher[25] * (fisher[2] * (fisher[10] * fisher[17] - fisher[16] * fisher[11]) - fisher[8] * (fisher[4] * fisher[17] - fisher[16] * fisher[5]) + fisher[14] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])))) / det_fish);
d_sigx_crlb[index] = (fisher[0]*(fisher[7]*(fisher[14]*(fisher[21]*fisher[35]-fisher[33]*fisher[23])-fisher[20]*(fisher[15]*fisher[35]-fisher[33]*fisher[17])+fisher[32]*(fisher[15]*fisher[23]-fisher[21]*fisher[17]))-fisher[13]*(fisher[8]*(fisher[21]*fisher[35]-fisher[33]*fisher[23])-fisher[20]*(fisher[9]*fisher[35]-fisher[33]*fisher[11])+fisher[32]*(fisher[9]*fisher[23]-fisher[21]*fisher[11]))+fisher[19]*(fisher[8]*(fisher[15]*fisher[35]-fisher[33]*fisher[17])-fisher[14]*(fisher[9]*fisher[35]-fisher[33]*fisher[11])+fisher[32]*(fisher[9]*fisher[17]-fisher[15]*fisher[11]))-fisher[31]*(fisher[8]*(fisher[15]*fisher[23]-fisher[21]*fisher[17])-fisher[14]*(fisher[9]*fisher[23]-fisher[21]*fisher[11])+fisher[20]*(fisher[9]*fisher[17]-fisher[15]*fisher[11])))-fisher[6]*(fisher[1]*(fisher[14]*(fisher[21]*fisher[35]-fisher[33]*fisher[23])-fisher[20]*(fisher[15]*fisher[35]-fisher[33]*fisher[17])+fisher[32]*(fisher[15]*fisher[23]-fisher[21]*fisher[17]))-fisher[13]*(fisher[2]*(fisher[21]*fisher[35]-fisher[33]*fisher[23])-fisher[20]*(fisher[3]*fisher[35]-fisher[33]*fisher[5])+fisher[32]*(fisher[3]*fisher[23]-fisher[21]*fisher[5]))+fisher[19]*(fisher[2]*(fisher[15]*fisher[35]-fisher[33]*fisher[17])-fisher[14]*(fisher[3]*fisher[35]-fisher[33]*fisher[5])+fisher[32]*(fisher[3]*fisher[17]-fisher[15]*fisher[5]))-fisher[31]*(fisher[2]*(fisher[15]*fisher[23]-fisher[21]*fisher[17])-fisher[14]*(fisher[3]*fisher[23]-fisher[21]*fisher[5])+fisher[20]*(fisher[3]*fisher[17]-fisher[15]*fisher[5])))+fisher[12]*(fisher[1]*(fisher[8]*(fisher[21]*fisher[35]-fisher[33]*fisher[23])-fisher[20]*(fisher[9]*fisher[35]-fisher[33]*fisher[11])+fisher[32]*(fisher[9]*fisher[23]-fisher[21]*fisher[11]))-fisher[7]*(fisher[2]*(fisher[21]*fisher[35]-fisher[33]*fisher[23])-fisher[20]*(fisher[3]*fisher[35]-fisher[33]*fisher[5])+fisher[32]*(fisher[3]*fisher[23]-fisher[21]*fisher[5]))+fisher[19]*(fisher[2]*(fisher[9]*fisher[35]-fisher[33]*fisher[11])-fisher[8]*(fisher[3]*fisher[35]-fisher[33]*fisher[5])+fisher[32]*(fisher[3]*fisher[11]-fisher[9]*fisher[5]))-fisher[31]*(fisher[2]*(fisher[9]*fisher[23]-fisher[21]*fisher[11])-fisher[8]*(fisher[3]*fisher[23]-fisher[21]*fisher[5])+fisher[20]*(fisher[3]*fisher[11]-fisher[9]*fisher[5])))-fisher[18]*(fisher[1]*(fisher[8]*(fisher[15]*fisher[35]-fisher[33]*fisher[17])-fisher[14]*(fisher[9]*fisher[35]-fisher[33]*fisher[11])+fisher[32]*(fisher[9]*fisher[17]-fisher[15]*fisher[11]))-fisher[7]*(fisher[2]*(fisher[15]*fisher[35]-fisher[33]*fisher[17])-fisher[14]*(fisher[3]*fisher[35]-fisher[33]*fisher[5])+fisher[32]*(fisher[3]*fisher[17]-fisher[15]*fisher[5]))+fisher[13]*(fisher[2]*(fisher[9]*fisher[35]-fisher[33]*fisher[11])-fisher[8]*(fisher[3]*fisher[35]-fisher[33]*fisher[5])+fisher[32]*(fisher[3]*fisher[11]-fisher[9]*fisher[5]))-fisher[31]*(fisher[2]*(fisher[9]*fisher[17]-fisher[15]*fisher[11])-fisher[8]*(fisher[3]*fisher[17]-fisher[15]*fisher[5])+fisher[14]*(fisher[3]*fisher[11]-fisher[9]*fisher[5])))+fisher[30]*(fisher[1]*(fisher[8]*(fisher[15]*fisher[23]-fisher[21]*fisher[17])-fisher[14]*(fisher[9]*fisher[23]-fisher[21]*fisher[11])+fisher[20]*(fisher[9]*fisher[17]-fisher[15]*fisher[11]))-fisher[7]*(fisher[2]*(fisher[15]*fisher[23]-fisher[21]*fisher[17])-fisher[14]*(fisher[3]*fisher[23]-fisher[21]*fisher[5])+fisher[20]*(fisher[3]*fisher[17]-fisher[15]*fisher[5]))+fisher[13]*(fisher[2]*(fisher[9]*fisher[23]-fisher[21]*fisher[11])-fisher[8]*(fisher[3]*fisher[23]-fisher[21]*fisher[5])+fisher[20]*(fisher[3]*fisher[11]-fisher[9]*fisher[5]))-fisher[19]*(fisher[2]*(fisher[9]*fisher[17]-fisher[15]*fisher[11])-fisher[8]*(fisher[3]*fisher[17]-fisher[15]*fisher[5])+fisher[14]*(fisher[3]*fisher[11]-fisher[9]*fisher[5]))))/det_fish;
d_sigy_crlb[index] = -(-(fisher[0]*(fisher[7]*(fisher[14]*(fisher[21]*fisher[28]-fisher[27]*fisher[22])-fisher[20]*(fisher[15]*fisher[28]-fisher[27]*fisher[16])+fisher[26]*(fisher[15]*fisher[22]-fisher[21]*fisher[16]))-fisher[13]*(fisher[8]*(fisher[21]*fisher[28]-fisher[27]*fisher[22])-fisher[20]*(fisher[9]*fisher[28]-fisher[27]*fisher[10])+fisher[26]*(fisher[9]*fisher[22]-fisher[21]*fisher[10]))+fisher[19]*(fisher[8]*(fisher[15]*fisher[28]-fisher[27]*fisher[16])-fisher[14]*(fisher[9]*fisher[28]-fisher[27]*fisher[10])+fisher[26]*(fisher[9]*fisher[16]-fisher[15]*fisher[10]))-fisher[25]*(fisher[8]*(fisher[15]*fisher[22]-fisher[21]*fisher[16])-fisher[14]*(fisher[9]*fisher[22]-fisher[21]*fisher[10])+fisher[20]*(fisher[9]*fisher[16]-fisher[15]*fisher[10])))-fisher[6]*(fisher[1]*(fisher[14]*(fisher[21]*fisher[28]-fisher[27]*fisher[22])-fisher[20]*(fisher[15]*fisher[28]-fisher[27]*fisher[16])+fisher[26]*(fisher[15]*fisher[22]-fisher[21]*fisher[16]))-fisher[13]*(fisher[2]*(fisher[21]*fisher[28]-fisher[27]*fisher[22])-fisher[20]*(fisher[3]*fisher[28]-fisher[27]*fisher[4])+fisher[26]*(fisher[3]*fisher[22]-fisher[21]*fisher[4]))+fisher[19]*(fisher[2]*(fisher[15]*fisher[28]-fisher[27]*fisher[16])-fisher[14]*(fisher[3]*fisher[28]-fisher[27]*fisher[4])+fisher[26]*(fisher[3]*fisher[16]-fisher[15]*fisher[4]))-fisher[25]*(fisher[2]*(fisher[15]*fisher[22]-fisher[21]*fisher[16])-fisher[14]*(fisher[3]*fisher[22]-fisher[21]*fisher[4])+fisher[20]*(fisher[3]*fisher[16]-fisher[15]*fisher[4])))+fisher[12]*(fisher[1]*(fisher[8]*(fisher[21]*fisher[28]-fisher[27]*fisher[22])-fisher[20]*(fisher[9]*fisher[28]-fisher[27]*fisher[10])+fisher[26]*(fisher[9]*fisher[22]-fisher[21]*fisher[10]))-fisher[7]*(fisher[2]*(fisher[21]*fisher[28]-fisher[27]*fisher[22])-fisher[20]*(fisher[3]*fisher[28]-fisher[27]*fisher[4])+fisher[26]*(fisher[3]*fisher[22]-fisher[21]*fisher[4]))+fisher[19]*(fisher[2]*(fisher[9]*fisher[28]-fisher[27]*fisher[10])-fisher[8]*(fisher[3]*fisher[28]-fisher[27]*fisher[4])+fisher[26]*(fisher[3]*fisher[10]-fisher[9]*fisher[4]))-fisher[25]*(fisher[2]*(fisher[9]*fisher[22]-fisher[21]*fisher[10])-fisher[8]*(fisher[3]*fisher[22]-fisher[21]*fisher[4])+fisher[20]*(fisher[3]*fisher[10]-fisher[9]*fisher[4])))-fisher[18]*(fisher[1]*(fisher[8]*(fisher[15]*fisher[28]-fisher[27]*fisher[16])-fisher[14]*(fisher[9]*fisher[28]-fisher[27]*fisher[10])+fisher[26]*(fisher[9]*fisher[16]-fisher[15]*fisher[10]))-fisher[7]*(fisher[2]*(fisher[15]*fisher[28]-fisher[27]*fisher[16])-fisher[14]*(fisher[3]*fisher[28]-fisher[27]*fisher[4])+fisher[26]*(fisher[3]*fisher[16]-fisher[15]*fisher[4]))+fisher[13]*(fisher[2]*(fisher[9]*fisher[28]-fisher[27]*fisher[10])-fisher[8]*(fisher[3]*fisher[28]-fisher[27]*fisher[4])+fisher[26]*(fisher[3]*fisher[10]-fisher[9]*fisher[4]))-fisher[25]*(fisher[2]*(fisher[9]*fisher[16]-fisher[15]*fisher[10])-fisher[8]*(fisher[3]*fisher[16]-fisher[15]*fisher[4])+fisher[14]*(fisher[3]*fisher[10]-fisher[9]*fisher[4])))+fisher[24]*(fisher[1]*(fisher[8]*(fisher[15]*fisher[22]-fisher[21]*fisher[16])-fisher[14]*(fisher[9]*fisher[22]-fisher[21]*fisher[10])+fisher[20]*(fisher[9]*fisher[16]-fisher[15]*fisher[10]))-fisher[7]*(fisher[2]*(fisher[15]*fisher[22]-fisher[21]*fisher[16])-fisher[14]*(fisher[3]*fisher[22]-fisher[21]*fisher[4])+fisher[20]*(fisher[3]*fisher[16]-fisher[15]*fisher[4]))+fisher[13]*(fisher[2]*(fisher[9]*fisher[22]-fisher[21]*fisher[10])-fisher[8]*(fisher[3]*fisher[22]-fisher[21]*fisher[4])+fisher[20]*(fisher[3]*fisher[10]-fisher[9]*fisher[4]))-fisher[19]*(fisher[2]*(fisher[9]*fisher[16]-fisher[15]*fisher[10])-fisher[8]*(fisher[3]*fisher[16]-fisher[15]*fisher[4])+fisher[14]*(fisher[3]*fisher[10]-fisher[9]*fisher[4]))))/det_fish);
/*d_xf_crlb[index] = testi;
d_yf_crlb[index] = testi;
d_N_crlb[index] = testi;
d_off_crlb[index] = testi;
d_sigx_crlb[index] = testi;
d_sigy_crlb[index] = testi;*/
}
else{ // if localization failed set all parameters to -1. These can easily be identified by molecules with framenum_all -1
d_xf_all[index] = -1;
d_yf_all[index] = -1;
d_N[index] = -1;
d_off[index] = -1;
d_sigx[index] = -1;
d_sigy[index] = -1;
d_xf_crlb[index] = -1;
d_yf_crlb[index] = -1;
d_N_crlb[index] = -1;
d_off_crlb[index] = -1;
d_sigx_crlb[index] = -1;
d_sigy_crlb[index] = -1;
d_llv[index] = llv;
}
} //end is numeric if statement
else{
d_xf_all[index] = -1;
d_yf_all[index] = -1;
d_N[index] = -1;
d_off[index] = -1;
d_sigx[index] = -1;
d_sigy[index] = -1;
d_xf_crlb[index] = -1;
d_yf_crlb[index] = -1;
d_N_crlb[index] = -1;
d_off_crlb[index] = -1;
d_sigx_crlb[index] = -1;
d_sigy_crlb[index] = -1;
d_llv[index] = llv;
} // end else fail statement
}
} // end localize 7
/*
* Host code
*
*
*/
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
/* Declare all variables.*/
double *iall; // the pointer to the array of all images to be analyzed
double *d_iall; // Pointer to image array on gpu
double angle;
double *d_xf_all;
double *d_yf_all;
double *d_sigx_all;
double *d_sigy_all;
double *d_N;
double *d_off;
double *d_llv;
double *d_xf_crlb;
double *d_yf_crlb;
double *d_N_crlb;
double *d_off_crlb;
double *d_sigx_crlb;
double * d_sigy_crlb;
double *xf, *xfc, *yf, *yfc, *n, *nc, *sigx, *sigxc, *sigy, *sigyc, *off, *offc, *llv;
size_t threadsperblock;
int irow; // number of pixels in a row which should also be the number in a coloumn
int numi; // number of images imported
int lpcnt; // number of loops to perform the MLE calculation over
const size_t *idims;
/* Throw an error if the input does not match expectations. */
if (nrhs != 4) {
printf("Must have 4 inputs ( i1, numthreads, angle(in rads), MLE loop count)\n");
mexErrMsgTxt("See Error above!\n");
}
if (!mxIsDouble(prhs[0]) || mxIsComplex(prhs[0])){
printf("i1 must be a nxm double array\n");
mexErrMsgTxt("See Error above!\n");
}
if (!mxIsDouble(prhs[2]) || mxIsComplex(prhs[2])){
printf("angle must be a DOUBLE\n");
mexErrMsgTxt("See Error above!\n");
}
// get pointer to input arguments
iall = (double *)mxGetPr(prhs[0]); // matlab linearizes in a coloumn major format which affects indexing (Writing MAtlab C/MEX Code - Research Gate)
idims = mxGetDimensions(prhs[0]); // get dimensions of image array
irow = (int)idims[0];
numi = (int)idims[1];
angle = (double)mxGetScalar(prhs[2]);
lpcnt = (int)mxGetScalar(prhs[3]);
if (numi > 1000000 || numi < 1){
numi = 1;
}
int imem = irow*numi*sizeof(double);
int vmem = numi*sizeof(double);
// verify that the input variables are what was expected
if (irow != 25){
printf("Images are of incorrect size. There must be a square number of rows in the entry.\n");
mexErrMsgTxt("See Error above!\n");
}
if (nlhs != 13){
printf("You must have 13 output variables [xf_all, xf_crlb, yf_all, yf_crlb, N, N_crlb, sigx_all, sigx_crlb, sigy_all, sigy_crlb, off_all, off_crlb, llv_all]\n");
mexErrMsgTxt("See Error above!\n");
}
// allocate memory and copy it onto the gpu device
// iall
hipMalloc((void**)&d_iall, imem); // allocate image memory
hipMemcpy(d_iall, iall, imem, hipMemcpyHostToDevice); // copy images from device to host
/*
checkCudaErrors(hipMalloc((void**)&d_iall, imem)); // allocate image memory
checkCudaErrors(hipMemcpy(d_iall, iall, imem, hipMemcpyHostToDevice)); // copy images from device to host
*/
// allocate memory for fitted variables that will be returned from device
hipMalloc((void**)&d_xf_all , vmem); // allocate xf_all memory
hipMalloc((void**)&d_xf_crlb , vmem); // allocate xf_crlb memory
hipMalloc((void**)&d_yf_all , vmem); // allocate yf_all memory
hipMalloc((void**)&d_yf_crlb , vmem); // allocate yf_crlb memory
hipMalloc((void**)&d_sigx_all , vmem); // allocate sigx memory
hipMalloc((void**)&d_sigx_crlb, vmem); // allocate sigx_crlb memory
hipMalloc((void**)&d_sigy_all , vmem); // allocate sigy memory
hipMalloc((void**)&d_sigy_crlb, vmem); // allocate sigy_crlb memory
hipMalloc((void**)&d_N , vmem); // allocate N memory
hipMalloc((void**)&d_N_crlb , vmem); // allocate N_crlb memory
hipMalloc((void**)&d_off , vmem); // allocate off memory
hipMalloc((void**)&d_off_crlb , vmem); // allocate N memory
hipMalloc((void**)&d_llv , vmem); // allocate llv memory
/*
checkCudaErrors(hipMalloc((void**)&d_xf_all , vmem)); // allocate xf_all memory
checkCudaErrors(hipMalloc((void**)&d_xf_crlb , vmem)); // allocate xf_crlb memory
checkCudaErrors(hipMalloc((void**)&d_yf_all , vmem)); // allocate yf_all memory
checkCudaErrors(hipMalloc((void**)&d_yf_crlb , vmem)); // allocate yf_crlb memory
checkCudaErrors(hipMalloc((void**)&d_sigx_all , vmem)); // allocate sigx memory
checkCudaErrors(hipMalloc((void**)&d_sigx_crlb, vmem)); // allocate sigx_crlb memory
checkCudaErrors(hipMalloc((void**)&d_sigy_all , vmem)); // allocate sigy memory
checkCudaErrors(hipMalloc((void**)&d_sigy_crlb, vmem)); // allocate sigy_crlb memory
checkCudaErrors(hipMalloc((void**)&d_N , vmem)); // allocate N memory
checkCudaErrors(hipMalloc((void**)&d_N_crlb , vmem)); // allocate N_crlb memory
checkCudaErrors(hipMalloc((void**)&d_off , vmem)); // allocate off memory
checkCudaErrors(hipMalloc((void**)&d_off_crlb , vmem)); // allocate N memory
checkCudaErrors(hipMalloc((void**)&d_llv , vmem)); // allocate llv memory
*/
/* Run GPU kernel*/
threadsperblock = mxGetScalar(prhs[1]); // get number of threads perblock from matlab
localize << <((numi - 1) / threadsperblock + 1), threadsperblock >> >(d_iall, d_xf_all, d_yf_all, d_N, d_off, d_sigx_all, d_sigy_all, d_xf_crlb, d_yf_crlb, d_N_crlb, d_off_crlb, d_sigx_crlb, d_sigy_crlb, d_llv, angle, lpcnt, numi);
// Allocate host side memory for output arrays at the output pointer positions
plhs[0] = mxCreateNumericMatrix(numi,1,mxDOUBLE_CLASS, mxREAL);
plhs[1] = mxCreateNumericMatrix(numi,1,mxDOUBLE_CLASS, mxREAL);
plhs[2] = mxCreateNumericMatrix(numi,1,mxDOUBLE_CLASS, mxREAL);
plhs[3] = mxCreateNumericMatrix(numi,1,mxDOUBLE_CLASS, mxREAL);
plhs[4] = mxCreateNumericMatrix(numi,1,mxDOUBLE_CLASS, mxREAL);
plhs[5] = mxCreateNumericMatrix(numi,1,mxDOUBLE_CLASS, mxREAL);
plhs[6] = mxCreateNumericMatrix(numi,1,mxDOUBLE_CLASS, mxREAL);
plhs[7] = mxCreateNumericMatrix(numi,1,mxDOUBLE_CLASS, mxREAL);
plhs[8] = mxCreateNumericMatrix(numi,1,mxDOUBLE_CLASS, mxREAL);
plhs[9] = mxCreateNumericMatrix(numi,1,mxDOUBLE_CLASS, mxREAL);
plhs[10] = mxCreateNumericMatrix(numi,1,mxDOUBLE_CLASS, mxREAL);
plhs[11] = mxCreateNumericMatrix(numi,1,mxDOUBLE_CLASS, mxREAL);
plhs[12] = mxCreateNumericMatrix(numi,1,mxDOUBLE_CLASS, mxREAL);
xf = (double *)mxGetPr(plhs[0]);
xfc = (double *)mxGetPr(plhs[1]);
yf = (double *)mxGetPr(plhs[2]);
yfc = (double *)mxGetPr(plhs[3]);
n = (double *)mxGetPr(plhs[4]);
nc = (double *)mxGetPr(plhs[5]);
sigx = (double *)mxGetPr(plhs[6]);
sigxc = (double *)mxGetPr(plhs[7]);
sigy = (double *)mxGetPr(plhs[8]);
sigyc = (double *)mxGetPr(plhs[9]);
off = (double *)mxGetPr(plhs[10]);
offc = (double *)mxGetPr(plhs[11]);
llv = (double *)mxGetPr(plhs[12]);
// copy memory from device to host
// due to annoyances w/ compilation I am trying to avoid checkCudaErrors
hipMemcpy(xf , d_xf_all ,vmem, hipMemcpyDeviceToHost);
hipMemcpy(xfc , d_xf_crlb ,vmem, hipMemcpyDeviceToHost);
hipMemcpy(yf , d_yf_all ,vmem, hipMemcpyDeviceToHost);
hipMemcpy(yfc , d_yf_crlb ,vmem, hipMemcpyDeviceToHost);
hipMemcpy(n , d_N ,vmem, hipMemcpyDeviceToHost);
hipMemcpy(nc , d_N_crlb ,vmem, hipMemcpyDeviceToHost);
hipMemcpy(sigx , d_sigx_all ,vmem, hipMemcpyDeviceToHost);
hipMemcpy(sigxc , d_sigx_crlb ,vmem, hipMemcpyDeviceToHost);
hipMemcpy(sigy , d_sigy_all ,vmem, hipMemcpyDeviceToHost);
hipMemcpy(sigyc , d_sigy_crlb ,vmem, hipMemcpyDeviceToHost);
hipMemcpy(off , d_off ,vmem, hipMemcpyDeviceToHost);
hipMemcpy(offc , d_off_crlb ,vmem, hipMemcpyDeviceToHost);
hipMemcpy(llv , d_llv ,vmem, hipMemcpyDeviceToHost);
/*
checkCudaErrors(hipMemcpy(xf , d_xf_all ,vmem, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(xfc , d_xf_crlb ,vmem, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(yf , d_yf_all ,vmem, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(yfc , d_yf_crlb ,vmem, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(n , d_N ,vmem, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(nc , d_N_crlb ,vmem, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(sigx , d_sigx_all ,vmem, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(sigxc , d_sigx_crlb ,vmem, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(sigy , d_sigy_all ,vmem, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(sigyc , d_sigy_crlb ,vmem, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(off , d_off ,vmem, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(offc , d_off_crlb ,vmem, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(llv , d_llv ,vmem, hipMemcpyDeviceToHost));
*/
// clean up
hipFree(d_iall);
hipFree(d_N);
hipFree(d_xf_all);
hipFree(d_yf_all);
hipFree(d_off);
hipFree(d_sigx_all);
hipFree(d_sigy_all);
hipFree(d_xf_crlb);
hipFree(d_yf_crlb);
hipFree(d_N_crlb);
hipFree(d_off_crlb);
hipFree(d_sigx_crlb);
hipFree(d_sigy_crlb);
hipFree(d_llv);
} // DONE
| b00e85cee5bdc5f1aa01f17f79dab57b6b4247d5.cu | /*
* Chain_loc v 1.0 is the source code for a mex file that will input image data and parameter estimators and output localized data
* Calling this function in matlab will look like
* [xf_all, yf_all, N, off_all, sigx, sigy, xf_crlb, yf_crlb, N_crlb, off_crlb, sigx_crlb, sigy_crlb, llv, framenum_all] = chain_loc( i1, numthreads, angle(in rads))
* written by Andrew Nelson Version v 1.1 on 5/2/15
*/
/*
Version 1.2 has had substantial debugging and careful comparison between cpu version. Removed extra diagnostic code that has been commented out and has no use in regular code
At this point we can be sured that the algorithm for calculating position, total area, and offset are working properly on the GPU
This version is usable with the localiztion code Quhzx_01_3.m
Fixed
Error codes should not be given off when inputs and outputs don't match expected arguments
Fixed a problem considerably prolonging computation time by removing gpu device invokation early in main loop ( i.e. reset the gpu, synchronize threads, initialize gpu ....)
Added redundant void __global__ loops to handle multiple sizes of input images
*/
#include <mex.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cuda_runtime.h>
#define PI 3.14159265358979323846
/*
* Device code
*
* To facilitate coding (for me) I have copied the localization algorithm to be used with multiple sized areas
*/
/*
Device Functions
*/
__device__ double device_det(double Fisher[36])
{
double det;
det = Fisher[0] * (Fisher[7] * (Fisher[14] * (Fisher[21] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) + Fisher[33] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23])) - Fisher[20] * (Fisher[15] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) + Fisher[33] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17])) + Fisher[26] * (Fisher[15] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) + Fisher[33] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17])) - Fisher[32] * (Fisher[15] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) + Fisher[27] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]))) - Fisher[13] * (Fisher[8] * (Fisher[21] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) + Fisher[33] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23])) - Fisher[20] * (Fisher[9] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11])) + Fisher[26] * (Fisher[9] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11])) - Fisher[32] * (Fisher[9] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) + Fisher[27] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]))) + Fisher[19] * (Fisher[8] * (Fisher[15] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) + Fisher[33] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17])) - Fisher[14] * (Fisher[9] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11])) + Fisher[26] * (Fisher[9] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])) - Fisher[32] * (Fisher[9] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) + Fisher[27] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11]))) - Fisher[25] * (Fisher[8] * (Fisher[15] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) + Fisher[33] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17])) - Fisher[14] * (Fisher[9] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11])) + Fisher[20] * (Fisher[9] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])) - Fisher[32] * (Fisher[9] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]) + Fisher[21] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11]))) + Fisher[31] * (Fisher[8] * (Fisher[15] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) + Fisher[27] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17])) - Fisher[14] * (Fisher[9] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) + Fisher[27] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11])) + Fisher[20] * (Fisher[9] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) + Fisher[27] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])) - Fisher[26] * (Fisher[9] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]) + Fisher[21] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])))) - Fisher[6] * (Fisher[1] * (Fisher[14] * (Fisher[21] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) + Fisher[33] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23])) - Fisher[20] * (Fisher[15] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) + Fisher[33] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17])) + Fisher[26] * (Fisher[15] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) + Fisher[33] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17])) - Fisher[32] * (Fisher[15] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) + Fisher[27] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]))) - Fisher[13] * (Fisher[2] * (Fisher[21] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) + Fisher[33] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23])) - Fisher[20] * (Fisher[3] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5])) + Fisher[26] * (Fisher[3] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5])) - Fisher[32] * (Fisher[3] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]))) + Fisher[19] * (Fisher[2] * (Fisher[15] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) + Fisher[33] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17])) - Fisher[14] * (Fisher[3] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5])) + Fisher[26] * (Fisher[3] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])) - Fisher[32] * (Fisher[3] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5]))) - Fisher[25] * (Fisher[2] * (Fisher[15] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) + Fisher[33] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17])) - Fisher[14] * (Fisher[3] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5])) + Fisher[20] * (Fisher[3] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])) - Fisher[32] * (Fisher[3] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]) + Fisher[21] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5]))) + Fisher[31] * (Fisher[2] * (Fisher[15] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) + Fisher[27] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17])) - Fisher[14] * (Fisher[3] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5])) + Fisher[20] * (Fisher[3] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])) - Fisher[26] * (Fisher[3] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]) + Fisher[21] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])))) + Fisher[12] * (Fisher[1] * (Fisher[8] * (Fisher[21] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) + Fisher[33] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23])) - Fisher[20] * (Fisher[9] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11])) + Fisher[26] * (Fisher[9] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11])) - Fisher[32] * (Fisher[9] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) + Fisher[27] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]))) - Fisher[7] * (Fisher[2] * (Fisher[21] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) + Fisher[33] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23])) - Fisher[20] * (Fisher[3] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5])) + Fisher[26] * (Fisher[3] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5])) - Fisher[32] * (Fisher[3] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]))) + Fisher[19] * (Fisher[2] * (Fisher[9] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11])) - Fisher[8] * (Fisher[3] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5])) + Fisher[26] * (Fisher[3] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])) - Fisher[32] * (Fisher[3] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5]))) - Fisher[25] * (Fisher[2] * (Fisher[9] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11])) - Fisher[8] * (Fisher[3] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5])) + Fisher[20] * (Fisher[3] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])) - Fisher[32] * (Fisher[3] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]) + Fisher[21] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5]))) + Fisher[31] * (Fisher[2] * (Fisher[9] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) + Fisher[27] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11])) - Fisher[8] * (Fisher[3] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5])) + Fisher[20] * (Fisher[3] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])) - Fisher[26] * (Fisher[3] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]) + Fisher[21] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])))) - Fisher[18] * (Fisher[1] * (Fisher[8] * (Fisher[15] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) + Fisher[33] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17])) - Fisher[14] * (Fisher[9] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11])) + Fisher[26] * (Fisher[9] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])) - Fisher[32] * (Fisher[9] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) + Fisher[27] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11]))) - Fisher[7] * (Fisher[2] * (Fisher[15] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) + Fisher[33] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17])) - Fisher[14] * (Fisher[3] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5])) + Fisher[26] * (Fisher[3] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])) - Fisher[32] * (Fisher[3] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5]))) + Fisher[13] * (Fisher[2] * (Fisher[9] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11])) - Fisher[8] * (Fisher[3] * (Fisher[28] * Fisher[35] - Fisher[34] * Fisher[29]) - Fisher[27] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5])) + Fisher[26] * (Fisher[3] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])) - Fisher[32] * (Fisher[3] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5]))) - Fisher[25] * (Fisher[2] * (Fisher[9] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])) - Fisher[8] * (Fisher[3] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])) + Fisher[14] * (Fisher[3] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])) - Fisher[32] * (Fisher[3] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5]) + Fisher[15] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5]))) + Fisher[31] * (Fisher[2] * (Fisher[9] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) + Fisher[27] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])) - Fisher[8] * (Fisher[3] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])) + Fisher[14] * (Fisher[3] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])) - Fisher[26] * (Fisher[3] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5]) + Fisher[15] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])))) + Fisher[24] * (Fisher[1] * (Fisher[8] * (Fisher[15] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) + Fisher[33] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17])) - Fisher[14] * (Fisher[9] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11])) + Fisher[20] * (Fisher[9] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])) - Fisher[32] * (Fisher[9] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]) + Fisher[21] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11]))) - Fisher[7] * (Fisher[2] * (Fisher[15] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) + Fisher[33] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17])) - Fisher[14] * (Fisher[3] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5])) + Fisher[20] * (Fisher[3] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])) - Fisher[32] * (Fisher[3] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]) + Fisher[21] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5]))) + Fisher[13] * (Fisher[2] * (Fisher[9] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11])) - Fisher[8] * (Fisher[3] * (Fisher[22] * Fisher[35] - Fisher[34] * Fisher[23]) - Fisher[21] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5])) + Fisher[20] * (Fisher[3] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])) - Fisher[32] * (Fisher[3] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]) + Fisher[21] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5]))) - Fisher[19] * (Fisher[2] * (Fisher[9] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) + Fisher[33] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])) - Fisher[8] * (Fisher[3] * (Fisher[16] * Fisher[35] - Fisher[34] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])) + Fisher[14] * (Fisher[3] * (Fisher[10] * Fisher[35] - Fisher[34] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[35] - Fisher[34] * Fisher[5]) + Fisher[33] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])) - Fisher[32] * (Fisher[3] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5]) + Fisher[15] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5]))) + Fisher[31] * (Fisher[2] * (Fisher[9] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]) + Fisher[21] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])) - Fisher[8] * (Fisher[3] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]) + Fisher[21] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])) + Fisher[14] * (Fisher[3] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]) + Fisher[21] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])) - Fisher[20] * (Fisher[3] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5]) + Fisher[15] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])))) - Fisher[30] * (Fisher[1] * (Fisher[8] * (Fisher[15] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) + Fisher[27] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17])) - Fisher[14] * (Fisher[9] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) + Fisher[27] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11])) + Fisher[20] * (Fisher[9] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) + Fisher[27] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])) - Fisher[26] * (Fisher[9] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]) + Fisher[21] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11]))) - Fisher[7] * (Fisher[2] * (Fisher[15] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) + Fisher[27] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17])) - Fisher[14] * (Fisher[3] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5])) + Fisher[20] * (Fisher[3] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])) - Fisher[26] * (Fisher[3] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]) + Fisher[21] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5]))) + Fisher[13] * (Fisher[2] * (Fisher[9] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) + Fisher[27] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11])) - Fisher[8] * (Fisher[3] * (Fisher[22] * Fisher[29] - Fisher[28] * Fisher[23]) - Fisher[21] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5])) + Fisher[20] * (Fisher[3] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])) - Fisher[26] * (Fisher[3] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]) + Fisher[21] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5]))) - Fisher[19] * (Fisher[2] * (Fisher[9] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) + Fisher[27] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])) - Fisher[8] * (Fisher[3] * (Fisher[16] * Fisher[29] - Fisher[28] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])) + Fisher[14] * (Fisher[3] * (Fisher[10] * Fisher[29] - Fisher[28] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[29] - Fisher[28] * Fisher[5]) + Fisher[27] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])) - Fisher[26] * (Fisher[3] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5]) + Fisher[15] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5]))) + Fisher[25] * (Fisher[2] * (Fisher[9] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]) - Fisher[15] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]) + Fisher[21] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11])) - Fisher[8] * (Fisher[3] * (Fisher[16] * Fisher[23] - Fisher[22] * Fisher[17]) - Fisher[15] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]) + Fisher[21] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5])) + Fisher[14] * (Fisher[3] * (Fisher[10] * Fisher[23] - Fisher[22] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[23] - Fisher[22] * Fisher[5]) + Fisher[21] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5])) - Fisher[20] * (Fisher[3] * (Fisher[10] * Fisher[17] - Fisher[16] * Fisher[11]) - Fisher[9] * (Fisher[4] * Fisher[17] - Fisher[16] * Fisher[5]) + Fisher[15] * (Fisher[4] * Fisher[11] - Fisher[10] * Fisher[5]))));
return det;
}
/*
Global Functions
*/
// localize
__global__ void localize(double *d_iall,
double *d_xf_all,
double *d_yf_all,
double *d_N,
double *d_off,
double *d_sigx,
double *d_sigy,
double *d_xf_crlb,
double *d_yf_crlb,
double *d_N_crlb,
double *d_off_crlb,
double *d_sigx_crlb,
double *d_sigy_crlb,
double *d_llv,
double ang,
int lpcnt,
int numi)
{
// Declare variables
const int pix = 5; // pixel size
__shared__ double xgrid[pix*pix]; // allocate xpix and ypix variables to the shared memory of the blocks
__shared__ double ygrid[pix*pix]; // this will reduce calls to global device memory
double dudx, dudy, dudsx, dudsy, d2udx2, d2udy2, d2udsx2, d2udsy2, dudn, dudo, Ex, Ey, u;
double d_x, d_y, d_n, d_sx, d_sy, d_o, dd_x, dd_y, dd_sx, dd_sy, dd_n, dd_o, x, y;
// these variables will exist on the register of each thread
double d_beta1[6] = {0, 0, 0, 0, 0, 0};
int tx = threadIdx.x;
int index = blockIdx.x*blockDim.x + tx; // calculate thread index
double d_i2[pix*pix]; // initialize data for image
double llv;
double fisher[36] = { 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 };
double det_fish = 0.0;
// create xgrid and ygrid we want to create the grid regardless of whether the index is crunching on an image
if (tx == 0){
for (int i = 0; i <pix; i++){
for(int j = 0; j <pix; j++){
x = (double)j - ((double)pix-1)/2;
y = (double)i - ((double)pix-1)/2;
xgrid[j*pix + i] = x*cos(ang) - y*sin(ang);
ygrid[j*pix + i] = x*sin(ang) + y*cos(ang);
}
}
}
if (index < numi){ // check to see that threads only work if an image exists
// buffer all the variables into shared memory and registers and build guesses
d_beta1[0] = 0.0;
d_beta1[1] = 0.0;
d_beta1[2] = 0.0;
d_beta1[3] = 1; // guess on sigma widths
d_beta1[4] = 1; // guess on sigma widths
d_beta1[5] = 100000;
for (int i = 0; i <pix*pix; i++) {
d_i2[i] = d_iall[i + index*pix*pix]; // this buffers [49] pixels into each d_i2, the thread index determines which image is analyzed
d_beta1[0] +=xgrid[i]*d_i2[i]; // sum of x and image weight
d_beta1[1] +=ygrid[i]*d_i2[i]; // sum of y and image weight
d_beta1[2] +=d_i2[i]; // image sum
if (d_beta1[5] > d_i2[i]){d_beta1[5] = d_i2[i];} // find minimum of image
}
d_beta1[0] = d_beta1[0] / d_beta1[2];
d_beta1[1] = d_beta1[1] / d_beta1[2];
// start the for loop iterations FOR 1
for (int counttry = 0; counttry < lpcnt; counttry++){
d_x = 0.0;
d_y = 0.0;
d_n = 0.0;
d_sx = 0.0;
d_sy = 0.0;
d_o = 0.0;
dd_x = 0.0; //wipe incremental variables each loop to give correct correction factor
dd_y = 0.0;
dd_n = 0.0;
dd_sx = 0.0;
dd_sy = 0.0;
dd_o = 0.0;
u = 0;
Ey = 0;
Ex = 0;
llv = 0.0;
// Calculate pixel values for derivatives, 2nd derivatives, errorfunctions and u
for (int rowcount = 0; rowcount < pix; rowcount++){ // FOR 2 loops over all rows
for (int colcount = 0; colcount < pix; colcount++){ // FOR 3 loops over all columns
// x/ygrid is col major(come from matlab) and i3 is col major
// these three lines help define the fitting gaussian as deined by the current iteration of parameters
Ex = 0.5 * (erf((xgrid[rowcount + colcount*pix] - d_beta1[0] + 0.5) / sqrt(2.0 * d_beta1[3] * d_beta1[3])) - erf((xgrid[rowcount + colcount*pix] - d_beta1[0] - 0.5) / sqrt(2.0 * d_beta1[3] * d_beta1[3])));
Ey = 0.5 * (erf((ygrid[rowcount + colcount*pix] - d_beta1[1] + 0.5) / sqrt(2.0 * d_beta1[4] * d_beta1[4])) - erf((ygrid[rowcount + colcount*pix] - d_beta1[1] - 0.5) / sqrt(2.0 * d_beta1[4] * d_beta1[4])));
u = d_beta1[2] * Ex*Ey + d_beta1[5];
// first derivatives calculations
// these are done pixel by pixel with the sum added up in the d_x and dd_x areas
dudx = (d_beta1[2] / sqrt(2.0 * PI*d_beta1[3] * d_beta1[3]))*(exp(-powf(xgrid[rowcount + colcount*pix] - d_beta1[0] - 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))
- exp(-powf(xgrid[rowcount + colcount*pix] - d_beta1[0] + 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3])))*Ey;
dudy = (d_beta1[2] / sqrt(2.0 * PI*d_beta1[4] * d_beta1[4]))*(exp(-powf(ygrid[rowcount + colcount*pix] - d_beta1[1] - 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))
- exp(-powf(ygrid[rowcount + colcount*pix] - d_beta1[1] + 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4])))*Ex;
dudsx = (d_beta1[2] / (sqrt(2.0*PI) * powf(d_beta1[3], 2.0)))*((xgrid[rowcount + colcount*pix] - d_beta1[0] - 0.5) * exp(-powf(xgrid[rowcount + colcount*pix] - d_beta1[0] - 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))
- (xgrid[rowcount + colcount*pix] - d_beta1[0] + 0.5)*exp(-powf(xgrid[rowcount + colcount*pix] - d_beta1[0] + 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3])))*Ey;
dudsy = (d_beta1[2] / (sqrt(2.0*PI) * powf(d_beta1[4], 2.0)))*((ygrid[rowcount + colcount*pix] - d_beta1[1] - 0.5) * exp(-powf(ygrid[rowcount + colcount*pix] - d_beta1[1] - 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))
- (ygrid[rowcount + colcount*pix] - d_beta1[1] + 0.5)*exp(-powf(ygrid[rowcount + colcount*pix] - d_beta1[1] + 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4])))*Ex;
dudn = Ex*Ey;
dudo = 1.0;
// second derivatives
// these are calcualted in a similar manner to the first derivatives
d2udx2 = (d_beta1[2] / (sqrt(2.0 * PI)*powf(d_beta1[3], 3.0))*((xgrid[rowcount + colcount*pix] - d_beta1[0] - 0.5)*exp(-powf(xgrid[rowcount + colcount*pix] - d_beta1[0] - 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))
- (xgrid[rowcount + colcount*pix] - d_beta1[0] + 0.5)*exp(-powf(xgrid[rowcount + colcount*pix] - d_beta1[0] + 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))))*Ey;
d2udy2 = (d_beta1[2] / (sqrt(2.0 * PI)*powf(d_beta1[4], 3.0))*((ygrid[rowcount + colcount*pix] - d_beta1[1] - 0.5)*exp(-powf(ygrid[rowcount + colcount*pix] - d_beta1[1] - 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))
- (ygrid[rowcount + colcount*pix] - d_beta1[1] + 0.5)*exp(-powf(ygrid[rowcount + colcount*pix] - d_beta1[1] + 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))))*Ex;
d2udsx2 = (Ey*d_beta1[2] / (sqrt(2.0 * PI)))
*(powf(d_beta1[3], -5.0)*(powf((xgrid[rowcount + colcount*pix] - d_beta1[0] - 0.5), 3)*exp(-powf(xgrid[rowcount + colcount*pix] - d_beta1[0] - 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))
- powf((xgrid[rowcount + colcount*pix] - d_beta1[0] + 0.5), 3)*exp(-powf(xgrid[rowcount + colcount*pix] - d_beta1[0] + 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3])))
- 2 * powf(d_beta1[3], -3.0)*((xgrid[rowcount + colcount*pix] - d_beta1[0] - 0.5) *exp(-powf(xgrid[rowcount + colcount*pix] - d_beta1[0] - 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))
- (xgrid[rowcount + colcount*pix] - d_beta1[0] + 0.5) *exp(-powf(xgrid[rowcount + colcount*pix] - d_beta1[0] + 0.5, 2.0) / (2.0 * d_beta1[3] * d_beta1[3]))));
d2udsy2 = (Ex*d_beta1[2] / (sqrt(2.0 * PI)))
*(powf(d_beta1[4], -5.0)*(powf((ygrid[rowcount + colcount*pix] - d_beta1[1] - 0.5), 3)*exp(-powf(ygrid[rowcount + colcount*pix] - d_beta1[1] - 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))
- powf((ygrid[rowcount + colcount*pix] - d_beta1[1] + 0.5), 3)*exp(-powf(ygrid[rowcount + colcount*pix] - d_beta1[1] + 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4])))
- 2 * powf(d_beta1[4], -3.0)*((ygrid[rowcount + colcount*pix] - d_beta1[1] - 0.5) *exp(-powf(ygrid[rowcount + colcount*pix] - d_beta1[1] - 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))
- (ygrid[rowcount + colcount*pix] - d_beta1[1] + 0.5) *exp(-powf(ygrid[rowcount + colcount*pix] - d_beta1[1] + 0.5, 2.0) / (2.0 * d_beta1[4] * d_beta1[4]))));
// summing variable to lead to correction factors
// these variables keep track of the correction which is given by summing over the entire pixel
d_x = d_x + dudx*((d_i2[rowcount + colcount*pix] / u) - 1.0);
dd_x = dd_x + d2udx2*((d_i2[rowcount + colcount*pix] / u) - 1.0) - powf(dudx, 2.0) * d_i2[rowcount + colcount*pix] / powf(u, 2.0);
d_y = d_y + dudy*((d_i2[rowcount + colcount*pix] / u) - 1.0);
dd_y = dd_y + d2udy2*((d_i2[rowcount + colcount*pix] / u) - 1.0) - powf(dudy, 2.0) * d_i2[rowcount + colcount*pix] / powf(u, 2.0);
d_n = d_n + dudn*((d_i2[rowcount + colcount*pix] / u) - 1.0);
d_sx = d_sx + dudsx*((d_i2[rowcount + colcount*pix] / u) - 1.0);
dd_sx = dd_sx + d2udsx2*((d_i2[rowcount + colcount*pix] / u) - 1.0) - powf(dudsx, 2.0) * d_i2[rowcount + colcount*pix] / powf(u, 2.0);
d_sy = d_sy + dudsy*((d_i2[rowcount + colcount*pix] / u) - 1.0);
dd_sy = dd_sy + d2udsy2*((d_i2[rowcount + colcount*pix] / u) - 1.0) - powf(dudsy, 2.0) * d_i2[rowcount + colcount*pix] / powf(u, 2.0);
dd_n = dd_n - powf(dudn, 2.0) * d_i2[rowcount + colcount*pix] / powf(u, 2);
d_o = d_o + ((d_i2[rowcount + colcount*pix] / u) - 1.0);
dd_o = dd_o - d_i2[rowcount + colcount*pix] / powf(u, 2.0);
if (counttry == lpcnt-1){ // on the last count, construct fisher information matrix elements
fisher[0] += dudx*dudx / u;
fisher[1] += dudx*dudy / u;
fisher[2] += dudx*dudn / u;
fisher[3] += dudx*dudo / u;
fisher[4] += dudx*dudsx / u;
fisher[5] += dudx*dudsy / u;
fisher[6] += dudy*dudx / u;
fisher[7] += dudy*dudy / u;
fisher[8] += dudy*dudn / u;
fisher[9] += dudy*dudo / u;
fisher[10] += dudy*dudsx / u;;
fisher[11] += dudy*dudsy / u;;
fisher[12] += dudn*dudx / u;
fisher[13] += dudn*dudy / u;
fisher[14] += dudn*dudn / u;
fisher[15] += dudn*dudo / u;
fisher[16] += dudn*dudsx / u;
fisher[17] += dudn*dudsy / u;
fisher[18] += dudo*dudx / u;
fisher[19] += dudo*dudy / u;
fisher[20] += dudo*dudn / u;
fisher[21] += dudo*dudo / u;
fisher[22] += dudo*dudsx / u;
fisher[23] += dudo*dudsy / u;
fisher[24] += dudsx*dudx / u;
fisher[25] += dudsx*dudy / u;
fisher[26] += dudsx*dudn / u;
fisher[27] += dudsx*dudo / u;
fisher[28] += dudsx*dudsx / u;
fisher[29] += dudsx*dudsy / u;
fisher[30] += dudsy*dudx / u;
fisher[31] += dudsy*dudy / u;
fisher[32] += dudsy*dudn / u;
fisher[33] += dudsy*dudo / u;
fisher[34] += dudsy*dudsx / u;
fisher[35] += dudsy*dudsy / u;
llv += d_i2[rowcount+colcount*pix] * log(u + 0.0000000000000001) - u - d_i2[rowcount + colcount*pix]*log(d_i2[rowcount + colcount*pix] + 0.0000000000000001) + d_i2[rowcount + colcount*pix];
}
} // END FOR 3
} // END FOR2
// correct beta1 values with tolerances
d_beta1[0] = d_beta1[0] - d_x / dd_x;
d_beta1[1] = d_beta1[1] - d_y / dd_y;
d_beta1[2] = d_beta1[2] - d_n / dd_n;
d_beta1[3] = d_beta1[3] - d_sx / dd_sx;
d_beta1[4] = d_beta1[4] - d_sy / dd_sy;
d_beta1[5] = d_beta1[5] - d_o / dd_o;
} // end FOR 1
if (d_beta1[0] == d_beta1[0] && d_beta1[1] == d_beta1[1] && d_beta1[2] == d_beta1[2] && d_beta1[5] == d_beta1[5] && d_beta1[3] == d_beta1[3] && d_beta1[4] == d_beta1[4] && d_beta1[5] == d_beta1[5]){ // begin is numeric if statement
if (d_beta1[2] > 0 && d_beta1[0] >= -(double)pix/2 && d_beta1[0] <= (double)pix/2 && d_beta1[1] <= (double)pix/2 && d_beta1[1] >= -(double)pix/2 ){ // was the molecule inside the grid? Was N positive? if yes then record the point
d_xf_all[index] = d_beta1[0] ;
d_yf_all[index] = d_beta1[1] ;
d_N[index] = d_beta1[2];
d_sigx[index] = d_beta1[3];
d_sigy[index] = d_beta1[4];
d_off[index] = d_beta1[5];
d_llv[index] = llv;
/*d_xf_all[index] = testi ; */
/*d_N[index] = testi;
d_sigx[index] = testi;
d_sigy[index] = testi;
d_off[index] =testi;
d_llv[index] =testi;*/
// calculate crlb's for estimators
// UPDATE FOR SIGMA VALUES
det_fish = device_det(fisher); // these values were determined using a homemade Python code called cofacs.py and text_det.py and checking against lower rank matricies
d_xf_crlb[index] = (fisher[7] * (fisher[14] * (fisher[21] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) + fisher[33] * (fisher[22] * fisher[29] - fisher[28] * fisher[23])) - fisher[20] * (fisher[15] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[29] - fisher[28] * fisher[17])) + fisher[26] * (fisher[15] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[23] - fisher[22] * fisher[17])) - fisher[32] * (fisher[15] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) + fisher[27] * (fisher[16] * fisher[23] - fisher[22] * fisher[17]))) - fisher[13] * (fisher[8] * (fisher[21] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) + fisher[33] * (fisher[22] * fisher[29] - fisher[28] * fisher[23])) - fisher[20] * (fisher[9] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[29] - fisher[28] * fisher[11])) + fisher[26] * (fisher[9] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[23] - fisher[22] * fisher[11])) - fisher[32] * (fisher[9] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[27] * (fisher[10] * fisher[23] - fisher[22] * fisher[11]))) + fisher[19] * (fisher[8] * (fisher[15] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[29] - fisher[28] * fisher[17])) - fisher[14] * (fisher[9] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[29] - fisher[28] * fisher[11])) + fisher[26] * (fisher[9] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[15] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])) - fisher[32] * (fisher[9] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[15] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[27] * (fisher[10] * fisher[17] - fisher[16] * fisher[11]))) - fisher[25] * (fisher[8] * (fisher[15] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[23] - fisher[22] * fisher[17])) - fisher[14] * (fisher[9] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[23] - fisher[22] * fisher[11])) + fisher[20] * (fisher[9] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[15] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])) - fisher[32] * (fisher[9] * (fisher[16] * fisher[23] - fisher[22] * fisher[17]) - fisher[15] * (fisher[10] * fisher[23] - fisher[22] * fisher[11]) + fisher[21] * (fisher[10] * fisher[17] - fisher[16] * fisher[11]))) + fisher[31] * (fisher[8] * (fisher[15] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) + fisher[27] * (fisher[16] * fisher[23] - fisher[22] * fisher[17])) - fisher[14] * (fisher[9] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[27] * (fisher[10] * fisher[23] - fisher[22] * fisher[11])) + fisher[20] * (fisher[9] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[15] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[27] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])) - fisher[26] * (fisher[9] * (fisher[16] * fisher[23] - fisher[22] * fisher[17]) - fisher[15] * (fisher[10] * fisher[23] - fisher[22] * fisher[11]) + fisher[21] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])))) / det_fish;
d_yf_crlb[index] = -(-(fisher[0] * (fisher[14] * (fisher[21] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) + fisher[33] * (fisher[22] * fisher[29] - fisher[28] * fisher[23])) - fisher[20] * (fisher[15] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[29] - fisher[28] * fisher[17])) + fisher[26] * (fisher[15] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[23] - fisher[22] * fisher[17])) - fisher[32] * (fisher[15] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) + fisher[27] * (fisher[16] * fisher[23] - fisher[22] * fisher[17]))) - fisher[12] * (fisher[2] * (fisher[21] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) + fisher[33] * (fisher[22] * fisher[29] - fisher[28] * fisher[23])) - fisher[20] * (fisher[3] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[29] - fisher[28] * fisher[5])) + fisher[26] * (fisher[3] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[23] - fisher[22] * fisher[5])) - fisher[32] * (fisher[3] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[23] - fisher[22] * fisher[5]))) + fisher[18] * (fisher[2] * (fisher[15] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[29] - fisher[28] * fisher[17])) - fisher[14] * (fisher[3] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[29] - fisher[28] * fisher[5])) + fisher[26] * (fisher[3] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[15] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])) - fisher[32] * (fisher[3] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[15] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[17] - fisher[16] * fisher[5]))) - fisher[24] * (fisher[2] * (fisher[15] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[33] * (fisher[16] * fisher[23] - fisher[22] * fisher[17])) - fisher[14] * (fisher[3] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[23] - fisher[22] * fisher[5])) + fisher[20] * (fisher[3] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[15] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])) - fisher[32] * (fisher[3] * (fisher[16] * fisher[23] - fisher[22] * fisher[17]) - fisher[15] * (fisher[4] * fisher[23] - fisher[22] * fisher[5]) + fisher[21] * (fisher[4] * fisher[17] - fisher[16] * fisher[5]))) + fisher[30] * (fisher[2] * (fisher[15] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) + fisher[27] * (fisher[16] * fisher[23] - fisher[22] * fisher[17])) - fisher[14] * (fisher[3] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[23] - fisher[22] * fisher[5])) + fisher[20] * (fisher[3] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[15] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])) - fisher[26] * (fisher[3] * (fisher[16] * fisher[23] - fisher[22] * fisher[17]) - fisher[15] * (fisher[4] * fisher[23] - fisher[22] * fisher[5]) + fisher[21] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])))) / det_fish);
d_N_crlb[index] = (fisher[0] * (fisher[7] * (fisher[21] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) + fisher[33] * (fisher[22] * fisher[29] - fisher[28] * fisher[23])) - fisher[19] * (fisher[9] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[29] - fisher[28] * fisher[11])) + fisher[25] * (fisher[9] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[23] - fisher[22] * fisher[11])) - fisher[31] * (fisher[9] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[27] * (fisher[10] * fisher[23] - fisher[22] * fisher[11]))) - fisher[6] * (fisher[1] * (fisher[21] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) + fisher[33] * (fisher[22] * fisher[29] - fisher[28] * fisher[23])) - fisher[19] * (fisher[3] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[29] - fisher[28] * fisher[5])) + fisher[25] * (fisher[3] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[23] - fisher[22] * fisher[5])) - fisher[31] * (fisher[3] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[23] - fisher[22] * fisher[5]))) + fisher[18] * (fisher[1] * (fisher[9] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[29] - fisher[28] * fisher[11])) - fisher[7] * (fisher[3] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[27] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[29] - fisher[28] * fisher[5])) + fisher[25] * (fisher[3] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) - fisher[9] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])) - fisher[31] * (fisher[3] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) - fisher[9] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[11] - fisher[10] * fisher[5]))) - fisher[24] * (fisher[1] * (fisher[9] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[33] * (fisher[10] * fisher[23] - fisher[22] * fisher[11])) - fisher[7] * (fisher[3] * (fisher[22] * fisher[35] - fisher[34] * fisher[23]) - fisher[21] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[23] - fisher[22] * fisher[5])) + fisher[19] * (fisher[3] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) - fisher[9] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[33] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])) - fisher[31] * (fisher[3] * (fisher[10] * fisher[23] - fisher[22] * fisher[11]) - fisher[9] * (fisher[4] * fisher[23] - fisher[22] * fisher[5]) + fisher[21] * (fisher[4] * fisher[11] - fisher[10] * fisher[5]))) + fisher[30] * (fisher[1] * (fisher[9] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[27] * (fisher[10] * fisher[23] - fisher[22] * fisher[11])) - fisher[7] * (fisher[3] * (fisher[22] * fisher[29] - fisher[28] * fisher[23]) - fisher[21] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[23] - fisher[22] * fisher[5])) + fisher[19] * (fisher[3] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) - fisher[9] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[27] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])) - fisher[25] * (fisher[3] * (fisher[10] * fisher[23] - fisher[22] * fisher[11]) - fisher[9] * (fisher[4] * fisher[23] - fisher[22] * fisher[5]) + fisher[21] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])))) / det_fish;
d_off_crlb[index] = -(-(fisher[0] * (fisher[7] * (fisher[14] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[26] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[32] * (fisher[16] * fisher[29] - fisher[28] * fisher[17])) - fisher[13] * (fisher[8] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[26] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[32] * (fisher[10] * fisher[29] - fisher[28] * fisher[11])) + fisher[25] * (fisher[8] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[14] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[32] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])) - fisher[31] * (fisher[8] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[14] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[26] * (fisher[10] * fisher[17] - fisher[16] * fisher[11]))) - fisher[6] * (fisher[1] * (fisher[14] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[26] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) + fisher[32] * (fisher[16] * fisher[29] - fisher[28] * fisher[17])) - fisher[13] * (fisher[2] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[26] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[32] * (fisher[4] * fisher[29] - fisher[28] * fisher[5])) + fisher[25] * (fisher[2] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[14] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[32] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])) - fisher[31] * (fisher[2] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[14] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[26] * (fisher[4] * fisher[17] - fisher[16] * fisher[5]))) + fisher[12] * (fisher[1] * (fisher[8] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[26] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[32] * (fisher[10] * fisher[29] - fisher[28] * fisher[11])) - fisher[7] * (fisher[2] * (fisher[28] * fisher[35] - fisher[34] * fisher[29]) - fisher[26] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[32] * (fisher[4] * fisher[29] - fisher[28] * fisher[5])) + fisher[25] * (fisher[2] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) - fisher[8] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[32] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])) - fisher[31] * (fisher[2] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) - fisher[8] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[26] * (fisher[4] * fisher[11] - fisher[10] * fisher[5]))) - fisher[24] * (fisher[1] * (fisher[8] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[14] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) + fisher[32] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])) - fisher[7] * (fisher[2] * (fisher[16] * fisher[35] - fisher[34] * fisher[17]) - fisher[14] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[32] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])) + fisher[13] * (fisher[2] * (fisher[10] * fisher[35] - fisher[34] * fisher[11]) - fisher[8] * (fisher[4] * fisher[35] - fisher[34] * fisher[5]) + fisher[32] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])) - fisher[31] * (fisher[2] * (fisher[10] * fisher[17] - fisher[16] * fisher[11]) - fisher[8] * (fisher[4] * fisher[17] - fisher[16] * fisher[5]) + fisher[14] * (fisher[4] * fisher[11] - fisher[10] * fisher[5]))) + fisher[30] * (fisher[1] * (fisher[8] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[14] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) + fisher[26] * (fisher[10] * fisher[17] - fisher[16] * fisher[11])) - fisher[7] * (fisher[2] * (fisher[16] * fisher[29] - fisher[28] * fisher[17]) - fisher[14] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[26] * (fisher[4] * fisher[17] - fisher[16] * fisher[5])) + fisher[13] * (fisher[2] * (fisher[10] * fisher[29] - fisher[28] * fisher[11]) - fisher[8] * (fisher[4] * fisher[29] - fisher[28] * fisher[5]) + fisher[26] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])) - fisher[25] * (fisher[2] * (fisher[10] * fisher[17] - fisher[16] * fisher[11]) - fisher[8] * (fisher[4] * fisher[17] - fisher[16] * fisher[5]) + fisher[14] * (fisher[4] * fisher[11] - fisher[10] * fisher[5])))) / det_fish);
d_sigx_crlb[index] = (fisher[0]*(fisher[7]*(fisher[14]*(fisher[21]*fisher[35]-fisher[33]*fisher[23])-fisher[20]*(fisher[15]*fisher[35]-fisher[33]*fisher[17])+fisher[32]*(fisher[15]*fisher[23]-fisher[21]*fisher[17]))-fisher[13]*(fisher[8]*(fisher[21]*fisher[35]-fisher[33]*fisher[23])-fisher[20]*(fisher[9]*fisher[35]-fisher[33]*fisher[11])+fisher[32]*(fisher[9]*fisher[23]-fisher[21]*fisher[11]))+fisher[19]*(fisher[8]*(fisher[15]*fisher[35]-fisher[33]*fisher[17])-fisher[14]*(fisher[9]*fisher[35]-fisher[33]*fisher[11])+fisher[32]*(fisher[9]*fisher[17]-fisher[15]*fisher[11]))-fisher[31]*(fisher[8]*(fisher[15]*fisher[23]-fisher[21]*fisher[17])-fisher[14]*(fisher[9]*fisher[23]-fisher[21]*fisher[11])+fisher[20]*(fisher[9]*fisher[17]-fisher[15]*fisher[11])))-fisher[6]*(fisher[1]*(fisher[14]*(fisher[21]*fisher[35]-fisher[33]*fisher[23])-fisher[20]*(fisher[15]*fisher[35]-fisher[33]*fisher[17])+fisher[32]*(fisher[15]*fisher[23]-fisher[21]*fisher[17]))-fisher[13]*(fisher[2]*(fisher[21]*fisher[35]-fisher[33]*fisher[23])-fisher[20]*(fisher[3]*fisher[35]-fisher[33]*fisher[5])+fisher[32]*(fisher[3]*fisher[23]-fisher[21]*fisher[5]))+fisher[19]*(fisher[2]*(fisher[15]*fisher[35]-fisher[33]*fisher[17])-fisher[14]*(fisher[3]*fisher[35]-fisher[33]*fisher[5])+fisher[32]*(fisher[3]*fisher[17]-fisher[15]*fisher[5]))-fisher[31]*(fisher[2]*(fisher[15]*fisher[23]-fisher[21]*fisher[17])-fisher[14]*(fisher[3]*fisher[23]-fisher[21]*fisher[5])+fisher[20]*(fisher[3]*fisher[17]-fisher[15]*fisher[5])))+fisher[12]*(fisher[1]*(fisher[8]*(fisher[21]*fisher[35]-fisher[33]*fisher[23])-fisher[20]*(fisher[9]*fisher[35]-fisher[33]*fisher[11])+fisher[32]*(fisher[9]*fisher[23]-fisher[21]*fisher[11]))-fisher[7]*(fisher[2]*(fisher[21]*fisher[35]-fisher[33]*fisher[23])-fisher[20]*(fisher[3]*fisher[35]-fisher[33]*fisher[5])+fisher[32]*(fisher[3]*fisher[23]-fisher[21]*fisher[5]))+fisher[19]*(fisher[2]*(fisher[9]*fisher[35]-fisher[33]*fisher[11])-fisher[8]*(fisher[3]*fisher[35]-fisher[33]*fisher[5])+fisher[32]*(fisher[3]*fisher[11]-fisher[9]*fisher[5]))-fisher[31]*(fisher[2]*(fisher[9]*fisher[23]-fisher[21]*fisher[11])-fisher[8]*(fisher[3]*fisher[23]-fisher[21]*fisher[5])+fisher[20]*(fisher[3]*fisher[11]-fisher[9]*fisher[5])))-fisher[18]*(fisher[1]*(fisher[8]*(fisher[15]*fisher[35]-fisher[33]*fisher[17])-fisher[14]*(fisher[9]*fisher[35]-fisher[33]*fisher[11])+fisher[32]*(fisher[9]*fisher[17]-fisher[15]*fisher[11]))-fisher[7]*(fisher[2]*(fisher[15]*fisher[35]-fisher[33]*fisher[17])-fisher[14]*(fisher[3]*fisher[35]-fisher[33]*fisher[5])+fisher[32]*(fisher[3]*fisher[17]-fisher[15]*fisher[5]))+fisher[13]*(fisher[2]*(fisher[9]*fisher[35]-fisher[33]*fisher[11])-fisher[8]*(fisher[3]*fisher[35]-fisher[33]*fisher[5])+fisher[32]*(fisher[3]*fisher[11]-fisher[9]*fisher[5]))-fisher[31]*(fisher[2]*(fisher[9]*fisher[17]-fisher[15]*fisher[11])-fisher[8]*(fisher[3]*fisher[17]-fisher[15]*fisher[5])+fisher[14]*(fisher[3]*fisher[11]-fisher[9]*fisher[5])))+fisher[30]*(fisher[1]*(fisher[8]*(fisher[15]*fisher[23]-fisher[21]*fisher[17])-fisher[14]*(fisher[9]*fisher[23]-fisher[21]*fisher[11])+fisher[20]*(fisher[9]*fisher[17]-fisher[15]*fisher[11]))-fisher[7]*(fisher[2]*(fisher[15]*fisher[23]-fisher[21]*fisher[17])-fisher[14]*(fisher[3]*fisher[23]-fisher[21]*fisher[5])+fisher[20]*(fisher[3]*fisher[17]-fisher[15]*fisher[5]))+fisher[13]*(fisher[2]*(fisher[9]*fisher[23]-fisher[21]*fisher[11])-fisher[8]*(fisher[3]*fisher[23]-fisher[21]*fisher[5])+fisher[20]*(fisher[3]*fisher[11]-fisher[9]*fisher[5]))-fisher[19]*(fisher[2]*(fisher[9]*fisher[17]-fisher[15]*fisher[11])-fisher[8]*(fisher[3]*fisher[17]-fisher[15]*fisher[5])+fisher[14]*(fisher[3]*fisher[11]-fisher[9]*fisher[5]))))/det_fish;
d_sigy_crlb[index] = -(-(fisher[0]*(fisher[7]*(fisher[14]*(fisher[21]*fisher[28]-fisher[27]*fisher[22])-fisher[20]*(fisher[15]*fisher[28]-fisher[27]*fisher[16])+fisher[26]*(fisher[15]*fisher[22]-fisher[21]*fisher[16]))-fisher[13]*(fisher[8]*(fisher[21]*fisher[28]-fisher[27]*fisher[22])-fisher[20]*(fisher[9]*fisher[28]-fisher[27]*fisher[10])+fisher[26]*(fisher[9]*fisher[22]-fisher[21]*fisher[10]))+fisher[19]*(fisher[8]*(fisher[15]*fisher[28]-fisher[27]*fisher[16])-fisher[14]*(fisher[9]*fisher[28]-fisher[27]*fisher[10])+fisher[26]*(fisher[9]*fisher[16]-fisher[15]*fisher[10]))-fisher[25]*(fisher[8]*(fisher[15]*fisher[22]-fisher[21]*fisher[16])-fisher[14]*(fisher[9]*fisher[22]-fisher[21]*fisher[10])+fisher[20]*(fisher[9]*fisher[16]-fisher[15]*fisher[10])))-fisher[6]*(fisher[1]*(fisher[14]*(fisher[21]*fisher[28]-fisher[27]*fisher[22])-fisher[20]*(fisher[15]*fisher[28]-fisher[27]*fisher[16])+fisher[26]*(fisher[15]*fisher[22]-fisher[21]*fisher[16]))-fisher[13]*(fisher[2]*(fisher[21]*fisher[28]-fisher[27]*fisher[22])-fisher[20]*(fisher[3]*fisher[28]-fisher[27]*fisher[4])+fisher[26]*(fisher[3]*fisher[22]-fisher[21]*fisher[4]))+fisher[19]*(fisher[2]*(fisher[15]*fisher[28]-fisher[27]*fisher[16])-fisher[14]*(fisher[3]*fisher[28]-fisher[27]*fisher[4])+fisher[26]*(fisher[3]*fisher[16]-fisher[15]*fisher[4]))-fisher[25]*(fisher[2]*(fisher[15]*fisher[22]-fisher[21]*fisher[16])-fisher[14]*(fisher[3]*fisher[22]-fisher[21]*fisher[4])+fisher[20]*(fisher[3]*fisher[16]-fisher[15]*fisher[4])))+fisher[12]*(fisher[1]*(fisher[8]*(fisher[21]*fisher[28]-fisher[27]*fisher[22])-fisher[20]*(fisher[9]*fisher[28]-fisher[27]*fisher[10])+fisher[26]*(fisher[9]*fisher[22]-fisher[21]*fisher[10]))-fisher[7]*(fisher[2]*(fisher[21]*fisher[28]-fisher[27]*fisher[22])-fisher[20]*(fisher[3]*fisher[28]-fisher[27]*fisher[4])+fisher[26]*(fisher[3]*fisher[22]-fisher[21]*fisher[4]))+fisher[19]*(fisher[2]*(fisher[9]*fisher[28]-fisher[27]*fisher[10])-fisher[8]*(fisher[3]*fisher[28]-fisher[27]*fisher[4])+fisher[26]*(fisher[3]*fisher[10]-fisher[9]*fisher[4]))-fisher[25]*(fisher[2]*(fisher[9]*fisher[22]-fisher[21]*fisher[10])-fisher[8]*(fisher[3]*fisher[22]-fisher[21]*fisher[4])+fisher[20]*(fisher[3]*fisher[10]-fisher[9]*fisher[4])))-fisher[18]*(fisher[1]*(fisher[8]*(fisher[15]*fisher[28]-fisher[27]*fisher[16])-fisher[14]*(fisher[9]*fisher[28]-fisher[27]*fisher[10])+fisher[26]*(fisher[9]*fisher[16]-fisher[15]*fisher[10]))-fisher[7]*(fisher[2]*(fisher[15]*fisher[28]-fisher[27]*fisher[16])-fisher[14]*(fisher[3]*fisher[28]-fisher[27]*fisher[4])+fisher[26]*(fisher[3]*fisher[16]-fisher[15]*fisher[4]))+fisher[13]*(fisher[2]*(fisher[9]*fisher[28]-fisher[27]*fisher[10])-fisher[8]*(fisher[3]*fisher[28]-fisher[27]*fisher[4])+fisher[26]*(fisher[3]*fisher[10]-fisher[9]*fisher[4]))-fisher[25]*(fisher[2]*(fisher[9]*fisher[16]-fisher[15]*fisher[10])-fisher[8]*(fisher[3]*fisher[16]-fisher[15]*fisher[4])+fisher[14]*(fisher[3]*fisher[10]-fisher[9]*fisher[4])))+fisher[24]*(fisher[1]*(fisher[8]*(fisher[15]*fisher[22]-fisher[21]*fisher[16])-fisher[14]*(fisher[9]*fisher[22]-fisher[21]*fisher[10])+fisher[20]*(fisher[9]*fisher[16]-fisher[15]*fisher[10]))-fisher[7]*(fisher[2]*(fisher[15]*fisher[22]-fisher[21]*fisher[16])-fisher[14]*(fisher[3]*fisher[22]-fisher[21]*fisher[4])+fisher[20]*(fisher[3]*fisher[16]-fisher[15]*fisher[4]))+fisher[13]*(fisher[2]*(fisher[9]*fisher[22]-fisher[21]*fisher[10])-fisher[8]*(fisher[3]*fisher[22]-fisher[21]*fisher[4])+fisher[20]*(fisher[3]*fisher[10]-fisher[9]*fisher[4]))-fisher[19]*(fisher[2]*(fisher[9]*fisher[16]-fisher[15]*fisher[10])-fisher[8]*(fisher[3]*fisher[16]-fisher[15]*fisher[4])+fisher[14]*(fisher[3]*fisher[10]-fisher[9]*fisher[4]))))/det_fish);
/*d_xf_crlb[index] = testi;
d_yf_crlb[index] = testi;
d_N_crlb[index] = testi;
d_off_crlb[index] = testi;
d_sigx_crlb[index] = testi;
d_sigy_crlb[index] = testi;*/
}
else{ // if localization failed set all parameters to -1. These can easily be identified by molecules with framenum_all -1
d_xf_all[index] = -1;
d_yf_all[index] = -1;
d_N[index] = -1;
d_off[index] = -1;
d_sigx[index] = -1;
d_sigy[index] = -1;
d_xf_crlb[index] = -1;
d_yf_crlb[index] = -1;
d_N_crlb[index] = -1;
d_off_crlb[index] = -1;
d_sigx_crlb[index] = -1;
d_sigy_crlb[index] = -1;
d_llv[index] = llv;
}
} //end is numeric if statement
else{
d_xf_all[index] = -1;
d_yf_all[index] = -1;
d_N[index] = -1;
d_off[index] = -1;
d_sigx[index] = -1;
d_sigy[index] = -1;
d_xf_crlb[index] = -1;
d_yf_crlb[index] = -1;
d_N_crlb[index] = -1;
d_off_crlb[index] = -1;
d_sigx_crlb[index] = -1;
d_sigy_crlb[index] = -1;
d_llv[index] = llv;
} // end else fail statement
}
} // end localize 7
/*
* Host code
*
*
*/
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
/* Declare all variables.*/
double *iall; // the pointer to the array of all images to be analyzed
double *d_iall; // Pointer to image array on gpu
double angle;
double *d_xf_all;
double *d_yf_all;
double *d_sigx_all;
double *d_sigy_all;
double *d_N;
double *d_off;
double *d_llv;
double *d_xf_crlb;
double *d_yf_crlb;
double *d_N_crlb;
double *d_off_crlb;
double *d_sigx_crlb;
double * d_sigy_crlb;
double *xf, *xfc, *yf, *yfc, *n, *nc, *sigx, *sigxc, *sigy, *sigyc, *off, *offc, *llv;
size_t threadsperblock;
int irow; // number of pixels in a row which should also be the number in a coloumn
int numi; // number of images imported
int lpcnt; // number of loops to perform the MLE calculation over
const size_t *idims;
/* Throw an error if the input does not match expectations. */
if (nrhs != 4) {
printf("Must have 4 inputs ( i1, numthreads, angle(in rads), MLE loop count)\n");
mexErrMsgTxt("See Error above!\n");
}
if (!mxIsDouble(prhs[0]) || mxIsComplex(prhs[0])){
printf("i1 must be a nxm double array\n");
mexErrMsgTxt("See Error above!\n");
}
if (!mxIsDouble(prhs[2]) || mxIsComplex(prhs[2])){
printf("angle must be a DOUBLE\n");
mexErrMsgTxt("See Error above!\n");
}
// get pointer to input arguments
iall = (double *)mxGetPr(prhs[0]); // matlab linearizes in a coloumn major format which affects indexing (Writing MAtlab C/MEX Code - Research Gate)
idims = mxGetDimensions(prhs[0]); // get dimensions of image array
irow = (int)idims[0];
numi = (int)idims[1];
angle = (double)mxGetScalar(prhs[2]);
lpcnt = (int)mxGetScalar(prhs[3]);
if (numi > 1000000 || numi < 1){
numi = 1;
}
int imem = irow*numi*sizeof(double);
int vmem = numi*sizeof(double);
// verify that the input variables are what was expected
if (irow != 25){
printf("Images are of incorrect size. There must be a square number of rows in the entry.\n");
mexErrMsgTxt("See Error above!\n");
}
if (nlhs != 13){
printf("You must have 13 output variables [xf_all, xf_crlb, yf_all, yf_crlb, N, N_crlb, sigx_all, sigx_crlb, sigy_all, sigy_crlb, off_all, off_crlb, llv_all]\n");
mexErrMsgTxt("See Error above!\n");
}
// allocate memory and copy it onto the gpu device
// iall
cudaMalloc((void**)&d_iall, imem); // allocate image memory
cudaMemcpy(d_iall, iall, imem, cudaMemcpyHostToDevice); // copy images from device to host
/*
checkCudaErrors(cudaMalloc((void**)&d_iall, imem)); // allocate image memory
checkCudaErrors(cudaMemcpy(d_iall, iall, imem, cudaMemcpyHostToDevice)); // copy images from device to host
*/
// allocate memory for fitted variables that will be returned from device
cudaMalloc((void**)&d_xf_all , vmem); // allocate xf_all memory
cudaMalloc((void**)&d_xf_crlb , vmem); // allocate xf_crlb memory
cudaMalloc((void**)&d_yf_all , vmem); // allocate yf_all memory
cudaMalloc((void**)&d_yf_crlb , vmem); // allocate yf_crlb memory
cudaMalloc((void**)&d_sigx_all , vmem); // allocate sigx memory
cudaMalloc((void**)&d_sigx_crlb, vmem); // allocate sigx_crlb memory
cudaMalloc((void**)&d_sigy_all , vmem); // allocate sigy memory
cudaMalloc((void**)&d_sigy_crlb, vmem); // allocate sigy_crlb memory
cudaMalloc((void**)&d_N , vmem); // allocate N memory
cudaMalloc((void**)&d_N_crlb , vmem); // allocate N_crlb memory
cudaMalloc((void**)&d_off , vmem); // allocate off memory
cudaMalloc((void**)&d_off_crlb , vmem); // allocate N memory
cudaMalloc((void**)&d_llv , vmem); // allocate llv memory
/*
checkCudaErrors(cudaMalloc((void**)&d_xf_all , vmem)); // allocate xf_all memory
checkCudaErrors(cudaMalloc((void**)&d_xf_crlb , vmem)); // allocate xf_crlb memory
checkCudaErrors(cudaMalloc((void**)&d_yf_all , vmem)); // allocate yf_all memory
checkCudaErrors(cudaMalloc((void**)&d_yf_crlb , vmem)); // allocate yf_crlb memory
checkCudaErrors(cudaMalloc((void**)&d_sigx_all , vmem)); // allocate sigx memory
checkCudaErrors(cudaMalloc((void**)&d_sigx_crlb, vmem)); // allocate sigx_crlb memory
checkCudaErrors(cudaMalloc((void**)&d_sigy_all , vmem)); // allocate sigy memory
checkCudaErrors(cudaMalloc((void**)&d_sigy_crlb, vmem)); // allocate sigy_crlb memory
checkCudaErrors(cudaMalloc((void**)&d_N , vmem)); // allocate N memory
checkCudaErrors(cudaMalloc((void**)&d_N_crlb , vmem)); // allocate N_crlb memory
checkCudaErrors(cudaMalloc((void**)&d_off , vmem)); // allocate off memory
checkCudaErrors(cudaMalloc((void**)&d_off_crlb , vmem)); // allocate N memory
checkCudaErrors(cudaMalloc((void**)&d_llv , vmem)); // allocate llv memory
*/
/* Run GPU kernel*/
threadsperblock = mxGetScalar(prhs[1]); // get number of threads perblock from matlab
localize << <((numi - 1) / threadsperblock + 1), threadsperblock >> >(d_iall, d_xf_all, d_yf_all, d_N, d_off, d_sigx_all, d_sigy_all, d_xf_crlb, d_yf_crlb, d_N_crlb, d_off_crlb, d_sigx_crlb, d_sigy_crlb, d_llv, angle, lpcnt, numi);
// Allocate host side memory for output arrays at the output pointer positions
plhs[0] = mxCreateNumericMatrix(numi,1,mxDOUBLE_CLASS, mxREAL);
plhs[1] = mxCreateNumericMatrix(numi,1,mxDOUBLE_CLASS, mxREAL);
plhs[2] = mxCreateNumericMatrix(numi,1,mxDOUBLE_CLASS, mxREAL);
plhs[3] = mxCreateNumericMatrix(numi,1,mxDOUBLE_CLASS, mxREAL);
plhs[4] = mxCreateNumericMatrix(numi,1,mxDOUBLE_CLASS, mxREAL);
plhs[5] = mxCreateNumericMatrix(numi,1,mxDOUBLE_CLASS, mxREAL);
plhs[6] = mxCreateNumericMatrix(numi,1,mxDOUBLE_CLASS, mxREAL);
plhs[7] = mxCreateNumericMatrix(numi,1,mxDOUBLE_CLASS, mxREAL);
plhs[8] = mxCreateNumericMatrix(numi,1,mxDOUBLE_CLASS, mxREAL);
plhs[9] = mxCreateNumericMatrix(numi,1,mxDOUBLE_CLASS, mxREAL);
plhs[10] = mxCreateNumericMatrix(numi,1,mxDOUBLE_CLASS, mxREAL);
plhs[11] = mxCreateNumericMatrix(numi,1,mxDOUBLE_CLASS, mxREAL);
plhs[12] = mxCreateNumericMatrix(numi,1,mxDOUBLE_CLASS, mxREAL);
xf = (double *)mxGetPr(plhs[0]);
xfc = (double *)mxGetPr(plhs[1]);
yf = (double *)mxGetPr(plhs[2]);
yfc = (double *)mxGetPr(plhs[3]);
n = (double *)mxGetPr(plhs[4]);
nc = (double *)mxGetPr(plhs[5]);
sigx = (double *)mxGetPr(plhs[6]);
sigxc = (double *)mxGetPr(plhs[7]);
sigy = (double *)mxGetPr(plhs[8]);
sigyc = (double *)mxGetPr(plhs[9]);
off = (double *)mxGetPr(plhs[10]);
offc = (double *)mxGetPr(plhs[11]);
llv = (double *)mxGetPr(plhs[12]);
// copy memory from device to host
// due to annoyances w/ compilation I am trying to avoid checkCudaErrors
cudaMemcpy(xf , d_xf_all ,vmem, cudaMemcpyDeviceToHost);
cudaMemcpy(xfc , d_xf_crlb ,vmem, cudaMemcpyDeviceToHost);
cudaMemcpy(yf , d_yf_all ,vmem, cudaMemcpyDeviceToHost);
cudaMemcpy(yfc , d_yf_crlb ,vmem, cudaMemcpyDeviceToHost);
cudaMemcpy(n , d_N ,vmem, cudaMemcpyDeviceToHost);
cudaMemcpy(nc , d_N_crlb ,vmem, cudaMemcpyDeviceToHost);
cudaMemcpy(sigx , d_sigx_all ,vmem, cudaMemcpyDeviceToHost);
cudaMemcpy(sigxc , d_sigx_crlb ,vmem, cudaMemcpyDeviceToHost);
cudaMemcpy(sigy , d_sigy_all ,vmem, cudaMemcpyDeviceToHost);
cudaMemcpy(sigyc , d_sigy_crlb ,vmem, cudaMemcpyDeviceToHost);
cudaMemcpy(off , d_off ,vmem, cudaMemcpyDeviceToHost);
cudaMemcpy(offc , d_off_crlb ,vmem, cudaMemcpyDeviceToHost);
cudaMemcpy(llv , d_llv ,vmem, cudaMemcpyDeviceToHost);
/*
checkCudaErrors(cudaMemcpy(xf , d_xf_all ,vmem, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(xfc , d_xf_crlb ,vmem, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(yf , d_yf_all ,vmem, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(yfc , d_yf_crlb ,vmem, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(n , d_N ,vmem, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(nc , d_N_crlb ,vmem, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(sigx , d_sigx_all ,vmem, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(sigxc , d_sigx_crlb ,vmem, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(sigy , d_sigy_all ,vmem, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(sigyc , d_sigy_crlb ,vmem, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(off , d_off ,vmem, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(offc , d_off_crlb ,vmem, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(llv , d_llv ,vmem, cudaMemcpyDeviceToHost));
*/
// clean up
cudaFree(d_iall);
cudaFree(d_N);
cudaFree(d_xf_all);
cudaFree(d_yf_all);
cudaFree(d_off);
cudaFree(d_sigx_all);
cudaFree(d_sigy_all);
cudaFree(d_xf_crlb);
cudaFree(d_yf_crlb);
cudaFree(d_N_crlb);
cudaFree(d_off_crlb);
cudaFree(d_sigx_crlb);
cudaFree(d_sigy_crlb);
cudaFree(d_llv);
} // DONE
|
c95f0563e4fa77bbd5cb9c3bee4d3bc51d445612.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define inf 99999
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
__global__ void firstpass(int n, int k, float* x, int* qx)
{
__shared__ float dBlck[1024], qBlck[1024];
float tmp = 0.00;
int i = (threadIdx.x >> 5), j = threadIdx.x & 31;
int ind1 = ((k << 5) + i) * n + (k << 5) + j, k1 = k << 5;
dBlck[threadIdx.x] = x[ind1];
qBlck[threadIdx.x] = qx[ind1];
for (int l = 0; l < 32; ++l)
{
__syncthreads();
tmp = dBlck[(i << 5) + l] + dBlck[(l << 5) + j];
if (dBlck[threadIdx.x] > tmp)
{
dBlck[threadIdx.x] = tmp;
qBlck[threadIdx.x] = l + k1;
}
}
x[ind1] = dBlck[threadIdx.x];
qx[ind1] = qBlck[threadIdx.x];
}
__global__ void secondpass(int n, int k, float* x, int* qx)
{
__shared__ float dBlck[1024], qcBlck[1024], cBlock[1024];
int i = (threadIdx.x >> 5), j = threadIdx.x & 31, k1 = (k << 5), skip = 0;
dBlck[threadIdx.x] = x[(k1 + i) * n + k1 + j];
float tmp = 0.00;
if (blockIdx.x >= k) // jump over block computed in first pass
{
skip = 1;
}
if (blockIdx.y == 0)
{
int ind1 = (k1 + i) * n + ((blockIdx.x + skip) << 5) + j;
cBlock[threadIdx.x] = x[ind1];
qcBlck[threadIdx.x] = qx[ind1];
for (int l = 0; l < 32; ++l)
{
__syncthreads();
tmp = dBlck[(i << 5) + l] + cBlock[(l << 5) + j];
if (cBlock[threadIdx.x] > tmp)
{
cBlock[threadIdx.x] = tmp;
qcBlck[threadIdx.x] = l + k1;
}
}
x[ind1] = cBlock[threadIdx.x];
qx[ind1] = qcBlck[threadIdx.x];
}
else
{
int ind1 = (((blockIdx.x + skip)<<5) + i) * n + k1 + j;
cBlock[threadIdx.x] = x[ind1];
qcBlck[threadIdx.x] = qx[ind1];
for (int l = 0; l < 32; ++l)
{
__syncthreads();
tmp = cBlock[(i << 5) + l] + dBlck[(l << 5) + j];
if (cBlock[threadIdx.x] > tmp)
{
cBlock[threadIdx.x] = tmp;
qcBlck[threadIdx.x] = l + k1;
}
}
x[ind1] = cBlock[threadIdx.x];
qx[ind1] = qcBlck[threadIdx.x];
}
}
__global__ void thirdpass(int n, int k, float* x, int* qx)
{
int i = (threadIdx.x >> 5), j = threadIdx.x & 31, k1 = (k << 5), skipx = 0, skipy = 0;
__shared__ float dyBlck[1024], dxBlck[1024], qcBlck[1024], cBlock[1024];
if (blockIdx.x >= k)
{
skipx = 1;
}
if (blockIdx.y >= k)
{
skipy = 1;
}
dxBlck[threadIdx.x] = x[((k << 5) + i) * n + ((blockIdx.y + skipy) << 5) + j];
dyBlck[threadIdx.x] = x[(((blockIdx.x + skipx) << 5) + i) * n + (k << 5) + j];
int ind1 = (((blockIdx.x + skipx) << 5) + i) * n + ((blockIdx.y + skipy) << 5) + j;
cBlock[threadIdx.x] = x[ind1];
qcBlck[threadIdx.x] = qx[ind1];
float tmp = 0.00;
for (int l = 0; l < 32; ++l)
{
__syncthreads();
tmp = dyBlck[(i << 5) + l] + dxBlck[(l << 5) + j];
if (cBlock[threadIdx.x] > tmp)
{
cBlock[threadIdx.x] = tmp;
qcBlck[threadIdx.x] = l + k1;
}
}
x[ind1] = cBlock[threadIdx.x];
qx[ind1] = qcBlck[threadIdx.x];
}
int main(int argc, char **argv)
{
hipEvent_t start, stop;
float *host_A, *host_D, *dev_x, *A, *D, tolerance = 0.001, sum = 0, dt_ms = 0;;
int *host_Q, n = atoi(argv[1]), *dev_qx, *Q, i, j, bk11 = 1, bk21 = n/32 - 1, bk22 = 2, bk31 = n/32 - 1, bk32 = n/32 - 1, k = 0;
double t1s, t2s, t3s, t4s, t5s;
char runcpu = argv[2][0];
printf("==========================================\n");
printf("Running with %d nodes \n", n);
printf("\n");
hipMalloc(&dev_x, n * n * sizeof(float));
hipMalloc(&dev_qx, n * n * sizeof(float));
// Arrays for the CPU
A = (float *) malloc(n * n * sizeof(float));
D = (float *) malloc(n * n * sizeof(float));
Q = (int *) malloc(n * n * sizeof(int));
// Arrays for the GPU
host_A = (float *) malloc(n * n * sizeof(float));
host_D = (float *) malloc(n * n * sizeof(float));
host_Q = (int *) malloc(n * n * sizeof(int));
// generate random graph
srand(time(NULL));
for (i = 0; i < n; ++i)
{
for (j = 0; j < n; ++j)
{
Q[i * n + j] = -1;
}
}
for (i = 0; i < n; ++i)
{
for (j = 0; j < n; ++j)
{
if (i == j)
{
A[i * n + j] = 0;
}
else
{
A[i * n + j] = 1200 * (float) rand() / RAND_MAX + 1;
if (A[i * n + j] > 1000)
{
A[i * n + j] = inf;
Q[i * n + j] = -2;
}
}
D[i * n + j] = A[i * n + j];
host_A[i * n + j] = A[i * n + j];
host_Q[i * n + j] = Q[i * n + j];
}
}
hipEventCreate(&start);
hipEventCreate(&stop);
// First copy, CPU -> GPU
hipEventRecord(start, 0);
hipMemcpy(dev_x, host_A, n * n * sizeof (float), hipMemcpyHostToDevice);
hipMemcpy(dev_qx, host_Q, n * n * sizeof (int), hipMemcpyHostToDevice);
hipEventRecord(stop, 0);
hipEventSynchronize(start);
hipEventSynchronize(stop);
hipEventElapsedTime(&dt_ms, start, stop);
printf("Transfer CPU -> GPU, time: %lf ms\n", dt_ms);
sum+=dt_ms;
t1s = dt_ms;
// GPU calculation
hipEventRecord(start, 0);
dim3 bk2(n / 32 - 1, 2);
dim3 bk3(n / 32 - 1, n / 32 - 1);
int gputhreads = 1024;
for (k = 0; k < n / 32; ++k)
{
firstpass << <1, gputhreads>>>(n, k, dev_x, dev_qx);
secondpass << <bk2, gputhreads>>>(n, k, dev_x, dev_qx);
thirdpass << <bk3, gputhreads>>>(n, k, dev_x, dev_qx);
}
hipDeviceSynchronize(); // wait until all threads are done
hipEventRecord(stop, 0);
hipEventSynchronize(start);
hipEventSynchronize(stop);
hipEventElapsedTime(&dt_ms, start, stop);
printf("Calculation time for GPU: %lf ms\n\n", dt_ms);
sum+=dt_ms;
t2s = dt_ms;
// Second copy, GPU -> CPU
hipEventRecord(start, 0);
hipMemcpy(host_D, dev_x, n * n * sizeof (float), hipMemcpyDeviceToHost);
hipMemcpy(host_Q, dev_qx, n * n * sizeof (int), hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(start);
hipEventSynchronize(stop);
hipEventElapsedTime(&dt_ms, start, stop);
printf("Transfer GPU -> CPU, time: %lf ms\n", dt_ms);
sum+=dt_ms;
t3s = dt_ms;
printf("Total time: %lf ms\n\n----------------------------\n", sum);
t4s = sum;
// Running sequentially on CPU now
if(runcpu == 'y')
{
printf("\n");
printf("Sequential execution on CPU (could take a while)... \n");
hipEventRecord(start, 0);
for (k = 0; k < n; ++k)
{
for (i = 0; i < n; ++i)
{
for (j = 0; j < n; ++j)
{
if ((D[i * n + k] + D[k * n + j]) < D[i * n + j])
{
D[i * n + j] = D[i * n + k] + D[k * n + j];
Q[i * n + j] = k;
}
}
}
}
hipEventRecord(stop, 0);
hipEventSynchronize(start);
hipEventSynchronize(stop);
hipEventElapsedTime(&dt_ms, start, stop);
printf("CPU time: %lf ms\n", dt_ms);
t5s = dt_ms;
printf("\n");
// Result validation
printf("Comparing CPU results with GPU results...");
for (i = 0; i < n; ++i)
{
for (j = 0; j < n; ++j)
{
if (abs(D[i * n + j] - host_D[i * n + j]) > tolerance)
{
printf("ERROR: Different results in row i = %d and column j = %d, CPU result = %f GPU, result = %f \n", i, j, D[i * n + j], host_D[i * n + j]);
}
}
}
printf("Comparison complete! \n");
}
else
{
t5s = -1;
}
printf("Results are written to file resultsfwblocked.csv\n==========================================\n");
FILE *fptr;
fptr = fopen("resultsfwblocked.csv","a");
fprintf(fptr,"%d,%d,%d,%d,%d,%d,%d,%lf,%lf,%lf,%lf,%lf\n",n, bk11, bk21, bk22, bk31, bk32, gputhreads, t1s, t2s, t3s, t4s, t5s);
fclose(fptr);
return 0;
}
| c95f0563e4fa77bbd5cb9c3bee4d3bc51d445612.cu | #define inf 99999
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
__global__ void firstpass(int n, int k, float* x, int* qx)
{
__shared__ float dBlck[1024], qBlck[1024];
float tmp = 0.00;
int i = (threadIdx.x >> 5), j = threadIdx.x & 31;
int ind1 = ((k << 5) + i) * n + (k << 5) + j, k1 = k << 5;
dBlck[threadIdx.x] = x[ind1];
qBlck[threadIdx.x] = qx[ind1];
for (int l = 0; l < 32; ++l)
{
__syncthreads();
tmp = dBlck[(i << 5) + l] + dBlck[(l << 5) + j];
if (dBlck[threadIdx.x] > tmp)
{
dBlck[threadIdx.x] = tmp;
qBlck[threadIdx.x] = l + k1;
}
}
x[ind1] = dBlck[threadIdx.x];
qx[ind1] = qBlck[threadIdx.x];
}
__global__ void secondpass(int n, int k, float* x, int* qx)
{
__shared__ float dBlck[1024], qcBlck[1024], cBlock[1024];
int i = (threadIdx.x >> 5), j = threadIdx.x & 31, k1 = (k << 5), skip = 0;
dBlck[threadIdx.x] = x[(k1 + i) * n + k1 + j];
float tmp = 0.00;
if (blockIdx.x >= k) // jump over block computed in first pass
{
skip = 1;
}
if (blockIdx.y == 0)
{
int ind1 = (k1 + i) * n + ((blockIdx.x + skip) << 5) + j;
cBlock[threadIdx.x] = x[ind1];
qcBlck[threadIdx.x] = qx[ind1];
for (int l = 0; l < 32; ++l)
{
__syncthreads();
tmp = dBlck[(i << 5) + l] + cBlock[(l << 5) + j];
if (cBlock[threadIdx.x] > tmp)
{
cBlock[threadIdx.x] = tmp;
qcBlck[threadIdx.x] = l + k1;
}
}
x[ind1] = cBlock[threadIdx.x];
qx[ind1] = qcBlck[threadIdx.x];
}
else
{
int ind1 = (((blockIdx.x + skip)<<5) + i) * n + k1 + j;
cBlock[threadIdx.x] = x[ind1];
qcBlck[threadIdx.x] = qx[ind1];
for (int l = 0; l < 32; ++l)
{
__syncthreads();
tmp = cBlock[(i << 5) + l] + dBlck[(l << 5) + j];
if (cBlock[threadIdx.x] > tmp)
{
cBlock[threadIdx.x] = tmp;
qcBlck[threadIdx.x] = l + k1;
}
}
x[ind1] = cBlock[threadIdx.x];
qx[ind1] = qcBlck[threadIdx.x];
}
}
__global__ void thirdpass(int n, int k, float* x, int* qx)
{
int i = (threadIdx.x >> 5), j = threadIdx.x & 31, k1 = (k << 5), skipx = 0, skipy = 0;
__shared__ float dyBlck[1024], dxBlck[1024], qcBlck[1024], cBlock[1024];
if (blockIdx.x >= k)
{
skipx = 1;
}
if (blockIdx.y >= k)
{
skipy = 1;
}
dxBlck[threadIdx.x] = x[((k << 5) + i) * n + ((blockIdx.y + skipy) << 5) + j];
dyBlck[threadIdx.x] = x[(((blockIdx.x + skipx) << 5) + i) * n + (k << 5) + j];
int ind1 = (((blockIdx.x + skipx) << 5) + i) * n + ((blockIdx.y + skipy) << 5) + j;
cBlock[threadIdx.x] = x[ind1];
qcBlck[threadIdx.x] = qx[ind1];
float tmp = 0.00;
for (int l = 0; l < 32; ++l)
{
__syncthreads();
tmp = dyBlck[(i << 5) + l] + dxBlck[(l << 5) + j];
if (cBlock[threadIdx.x] > tmp)
{
cBlock[threadIdx.x] = tmp;
qcBlck[threadIdx.x] = l + k1;
}
}
x[ind1] = cBlock[threadIdx.x];
qx[ind1] = qcBlck[threadIdx.x];
}
int main(int argc, char **argv)
{
cudaEvent_t start, stop;
float *host_A, *host_D, *dev_x, *A, *D, tolerance = 0.001, sum = 0, dt_ms = 0;;
int *host_Q, n = atoi(argv[1]), *dev_qx, *Q, i, j, bk11 = 1, bk21 = n/32 - 1, bk22 = 2, bk31 = n/32 - 1, bk32 = n/32 - 1, k = 0;
double t1s, t2s, t3s, t4s, t5s;
char runcpu = argv[2][0];
printf("==========================================\n");
printf("Running with %d nodes \n", n);
printf("\n");
cudaMalloc(&dev_x, n * n * sizeof(float));
cudaMalloc(&dev_qx, n * n * sizeof(float));
// Arrays for the CPU
A = (float *) malloc(n * n * sizeof(float));
D = (float *) malloc(n * n * sizeof(float));
Q = (int *) malloc(n * n * sizeof(int));
// Arrays for the GPU
host_A = (float *) malloc(n * n * sizeof(float));
host_D = (float *) malloc(n * n * sizeof(float));
host_Q = (int *) malloc(n * n * sizeof(int));
// generate random graph
srand(time(NULL));
for (i = 0; i < n; ++i)
{
for (j = 0; j < n; ++j)
{
Q[i * n + j] = -1;
}
}
for (i = 0; i < n; ++i)
{
for (j = 0; j < n; ++j)
{
if (i == j)
{
A[i * n + j] = 0;
}
else
{
A[i * n + j] = 1200 * (float) rand() / RAND_MAX + 1;
if (A[i * n + j] > 1000)
{
A[i * n + j] = inf;
Q[i * n + j] = -2;
}
}
D[i * n + j] = A[i * n + j];
host_A[i * n + j] = A[i * n + j];
host_Q[i * n + j] = Q[i * n + j];
}
}
cudaEventCreate(&start);
cudaEventCreate(&stop);
// First copy, CPU -> GPU
cudaEventRecord(start, 0);
cudaMemcpy(dev_x, host_A, n * n * sizeof (float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_qx, host_Q, n * n * sizeof (int), cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaEventSynchronize(start);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&dt_ms, start, stop);
printf("Transfer CPU -> GPU, time: %lf ms\n", dt_ms);
sum+=dt_ms;
t1s = dt_ms;
// GPU calculation
cudaEventRecord(start, 0);
dim3 bk2(n / 32 - 1, 2);
dim3 bk3(n / 32 - 1, n / 32 - 1);
int gputhreads = 1024;
for (k = 0; k < n / 32; ++k)
{
firstpass << <1, gputhreads>>>(n, k, dev_x, dev_qx);
secondpass << <bk2, gputhreads>>>(n, k, dev_x, dev_qx);
thirdpass << <bk3, gputhreads>>>(n, k, dev_x, dev_qx);
}
cudaDeviceSynchronize(); // wait until all threads are done
cudaEventRecord(stop, 0);
cudaEventSynchronize(start);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&dt_ms, start, stop);
printf("Calculation time for GPU: %lf ms\n\n", dt_ms);
sum+=dt_ms;
t2s = dt_ms;
// Second copy, GPU -> CPU
cudaEventRecord(start, 0);
cudaMemcpy(host_D, dev_x, n * n * sizeof (float), cudaMemcpyDeviceToHost);
cudaMemcpy(host_Q, dev_qx, n * n * sizeof (int), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(start);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&dt_ms, start, stop);
printf("Transfer GPU -> CPU, time: %lf ms\n", dt_ms);
sum+=dt_ms;
t3s = dt_ms;
printf("Total time: %lf ms\n\n----------------------------\n", sum);
t4s = sum;
// Running sequentially on CPU now
if(runcpu == 'y')
{
printf("\n");
printf("Sequential execution on CPU (could take a while)... \n");
cudaEventRecord(start, 0);
for (k = 0; k < n; ++k)
{
for (i = 0; i < n; ++i)
{
for (j = 0; j < n; ++j)
{
if ((D[i * n + k] + D[k * n + j]) < D[i * n + j])
{
D[i * n + j] = D[i * n + k] + D[k * n + j];
Q[i * n + j] = k;
}
}
}
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(start);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&dt_ms, start, stop);
printf("CPU time: %lf ms\n", dt_ms);
t5s = dt_ms;
printf("\n");
// Result validation
printf("Comparing CPU results with GPU results...");
for (i = 0; i < n; ++i)
{
for (j = 0; j < n; ++j)
{
if (abs(D[i * n + j] - host_D[i * n + j]) > tolerance)
{
printf("ERROR: Different results in row i = %d and column j = %d, CPU result = %f GPU, result = %f \n", i, j, D[i * n + j], host_D[i * n + j]);
}
}
}
printf("Comparison complete! \n");
}
else
{
t5s = -1;
}
printf("Results are written to file resultsfwblocked.csv\n==========================================\n");
FILE *fptr;
fptr = fopen("resultsfwblocked.csv","a");
fprintf(fptr,"%d,%d,%d,%d,%d,%d,%d,%lf,%lf,%lf,%lf,%lf\n",n, bk11, bk21, bk22, bk31, bk32, gputhreads, t1s, t2s, t3s, t4s, t5s);
fclose(fptr);
return 0;
}
|
4dfd927211d8f6ad1f9ade3d9337a586f6c4d053.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <string>
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/operators/fake_quantize_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
namespace paddle {
namespace operators {
template <typename T>
__global__ void FindAbsMaxKernel(const T* in, const int n, T* out) {
int bid = threadIdx.x + blockIdx.x * blockDim.x;
int tid = threadIdx.x;
extern __shared__ char* shared_max_data_tmp[];
auto shared_max_data = reinterpret_cast<T*>(shared_max_data_tmp);
if (gridDim.x > 1) {
shared_max_data[tid] = T(0);
for (int i = bid; i < n; i += blockDim.x * gridDim.x) {
T tmp = abs(in[i]);
if (tmp > shared_max_data[tid]) {
shared_max_data[tid] = tmp;
}
}
} else {
if (bid < n) {
shared_max_data[tid] = abs(in[bid]);
} else {
shared_max_data[tid] = T(0);
}
}
__syncthreads();
for (int i = blockDim.x / 2; i > 0; i >>= 1) {
if (tid < i && (shared_max_data[tid] < shared_max_data[tid + i])) {
shared_max_data[tid] = shared_max_data[tid + i];
}
__syncthreads();
}
if (tid == 0) {
out[blockIdx.x] = shared_max_data[0];
}
}
template <typename T>
struct FindAbsMaxFunctor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& ctx, const T* in,
const int num, T* out) {
int block = 1024;
int grid = (block - 1 + num) / block;
grid = (grid > block) ? block : grid;
framework::Tensor max;
T* max_data = max.mutable_data<T>(phi::make_ddim({grid}), ctx.GetPlace());
hipLaunchKernelGGL(( FindAbsMaxKernel<T>), dim3(grid), dim3(block), 1024 * sizeof(T), ctx.stream(),
in, num, max_data);
hipLaunchKernelGGL(( FindAbsMaxKernel<T>), dim3(1), dim3(block), 1024 * sizeof(T), ctx.stream(),
max_data, grid, out);
}
};
template struct FindAbsMaxFunctor<platform::CUDADeviceContext, float>;
template struct FindAbsMaxFunctor<platform::CUDADeviceContext,
paddle::platform::float16>;
template <typename T>
__global__ void FindChannelAbsMaxKernelQuantAxis0(const T* in, const int n,
const int c, T* out) {
int tid = threadIdx.x;
int channel_size = n / c;
const T* in_c = in + blockIdx.x * channel_size;
extern __shared__ T shared_max_data[];
shared_max_data[tid] = T(0);
for (int i = tid; i < channel_size; i += blockDim.x) {
T tmp = fabs(in_c[i]);
if (tmp > shared_max_data[tid]) {
shared_max_data[tid] = tmp;
}
}
__syncthreads();
for (int i = blockDim.x / 2; i > 0; i >>= 1) {
if (tid < i && (shared_max_data[tid] < shared_max_data[tid + i])) {
shared_max_data[tid] = shared_max_data[tid + i];
}
__syncthreads();
}
if (tid == 0) {
out[blockIdx.x] = shared_max_data[0];
}
}
template <typename T>
__global__ void FindChannelAbsMaxKernelQuantAxis1(const T* in, const int n,
const int cin, const int cout,
T* out) {
extern __shared__ T shared_max_data[];
int cout_wh_size = n / cin;
int wh_size = n / (cin * cout);
int tid = threadIdx.x;
int bid = blockIdx.x;
const T* in_current = in + tid * cout_wh_size + bid * wh_size;
shared_max_data[tid] = T(0);
for (int i = 0; i < wh_size; i++) {
T tmp = fabs(in_current[i]);
if (tmp > shared_max_data[tid]) {
shared_max_data[tid] = tmp;
}
}
__syncthreads();
int len = blockDim.x;
for (int i = (len + 1) / 2; i > 0; len = i, i = (i + 1) / 2) {
if (tid < i && tid + i < len &&
shared_max_data[tid] < shared_max_data[tid + i]) {
shared_max_data[tid] = shared_max_data[tid + i];
}
if (i == 1) {
i = 0; // break the loop
}
__syncthreads();
}
if (tid == 0 && shared_max_data[0] > out[bid]) {
out[bid] = shared_max_data[0];
}
}
template <typename T>
struct FindChannelAbsMaxFunctor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& ctx,
const framework::Tensor& in_tensor, const int quant_axis,
T* out_abs_max) {
PADDLE_ENFORCE_EQ(
quant_axis == 0 || quant_axis == 1, true,
platform::errors::InvalidArgument("'quant_axis' should be 0 or 1, but "
"the received is %d",
quant_axis));
const int num = in_tensor.numel();
auto in_dims = in_tensor.dims();
const T* in_data = in_tensor.data<T>();
if (quant_axis == 0) {
int cout = in_dims[0];
int grid = cout;
int block = 1024;
hipLaunchKernelGGL(( FindChannelAbsMaxKernelQuantAxis0<
T>), dim3(grid), dim3(block), block * sizeof(T), ctx.stream(),
in_data, num, cout, out_abs_max);
} else if (quant_axis == 1) {
int cin = in_dims[0];
int cout = in_dims[1];
int grid = cout;
int max_threads = 1024;
#ifdef PADDLE_WITH_HIP
hipMemset(out_abs_max, 0, sizeof(T) * cout);
#else
hipMemset(out_abs_max, 0, sizeof(T) * cout);
#endif
for (int i = 0; i < cin / max_threads; i++) {
int block = max_threads;
hipLaunchKernelGGL(( FindChannelAbsMaxKernelQuantAxis1<
T>), dim3(grid), dim3(block), block * sizeof(T), ctx.stream(),
in_data, num, cin, cout, out_abs_max);
in_data += num / cin;
}
int block = cin % max_threads;
if (block > 0) {
hipLaunchKernelGGL(( FindChannelAbsMaxKernelQuantAxis1<
T>), dim3(grid), dim3(block), block * sizeof(T), ctx.stream(),
in_data, num, in_dims[0], in_dims[1], out_abs_max);
}
}
}
};
template struct FindChannelAbsMaxFunctor<platform::CUDADeviceContext, float>;
template <typename T>
__global__ void ClipAndQuantKernel(const T* in, const T* scale,
const int bin_cnt, const int n, T* out) {
int bid = threadIdx.x + blockIdx.x * blockDim.x;
int tid = threadIdx.x;
T s = scale[0];
T inv_s = inverse(s);
for (int i = bid; i < n; i += blockDim.x * gridDim.x) {
T x = in[i];
T v = x > s ? s : x;
v = v < -s ? -s : v;
v = bin_cnt * inv_s * v;
out[i] = round(v);
}
}
template <typename T>
__global__ void ClipAndQuantDequantKernel(const T* in, const T* scale,
const int bin_cnt, const int n,
T* out) {
int bid = threadIdx.x + blockIdx.x * blockDim.x;
int tid = threadIdx.x;
T s = scale[0];
T inv_s = inverse(s);
T bin_cnt_t = static_cast<T>(bin_cnt);
for (int i = bid; i < n; i += blockDim.x * gridDim.x) {
T x = in[i];
x = x > s ? s : x;
x = x < -s ? -s : x;
x = bin_cnt_t * inv_s * x;
x = static_cast<T>(round(static_cast<float>(x)));
out[i] = (x * s) / bin_cnt_t;
}
}
template <typename T>
struct ClipAndFakeQuantFunctor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& ctx,
const framework::Tensor& in, const framework::Tensor& scale,
const int bin_cnt, framework::Tensor* out) {
int num = in.numel();
int block = 1024;
int grid = (block - 1 + num) / block;
const T* in_data = in.data<T>();
const T* scale_data = scale.data<T>();
T* out_data = out->mutable_data<T>(ctx.GetPlace());
hipLaunchKernelGGL(( ClipAndQuantKernel<T>), dim3(grid), dim3(block), 0, ctx.stream(),
in_data, scale_data, bin_cnt, num, out_data);
}
};
template struct ClipAndFakeQuantFunctor<platform::CUDADeviceContext, float>;
template <typename T>
struct ClipAndFakeQuantDequantFunctor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& ctx,
const framework::Tensor& in, const framework::Tensor& scale,
const int bin_cnt, framework::Tensor* out) {
int num = in.numel();
int block = 1024;
int grid = (block - 1 + num) / block;
const T* in_data = in.data<T>();
const T* scale_data = scale.data<T>();
T* out_data = out->mutable_data<T>(ctx.GetPlace());
hipLaunchKernelGGL(( ClipAndQuantDequantKernel<T>), dim3(grid), dim3(block), 0, ctx.stream(),
in_data, scale_data, bin_cnt, num, out_data);
}
};
// ChannelClipAndQuantKernel for quant_axis is 0
template <typename T>
__global__ void ChannelClipAndQuantKernelQuantAxis0(const T* in, const T* scale,
const int bin_cnt,
const int n, const int c,
T* out) {
int tid = threadIdx.x;
int channel_size = n / c;
const T* in_c = in + blockIdx.x * channel_size;
T* out_c = out + blockIdx.x * channel_size;
T s = scale[blockIdx.x];
T inv_s = inverse(s);
for (int i = tid; i < channel_size; i += blockDim.x) {
T x = in_c[i];
T v = x > s ? s : x;
v = v < -s ? -s : v;
v = bin_cnt * inv_s * v;
out_c[i] = round(v);
}
}
// ChannelClipAndQuantKernel for quant_axis is 1
template <typename T>
__global__ void ChannelClipAndQuantKernelQuantAxis1(const T* in, const T* scale,
const int bin_cnt,
const int n, const int cin,
const int cout, T* out) {
T s = scale[blockIdx.x % cout];
T inv_s = inverse(s);
int wh_size = n / (cin * cout);
const T* in_c = in + blockIdx.x * wh_size;
T* out_c = out + blockIdx.x * wh_size;
for (int i = threadIdx.x; i < wh_size; i += blockDim.x) {
T x = in_c[i];
T v = x > s ? s : x;
v = v < -s ? -s : v;
v = bin_cnt * inv_s * v;
out_c[i] = round(v);
}
}
template <typename T>
struct ChannelClipAndFakeQuantFunctor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& ctx,
const framework::Tensor& in, const framework::Tensor& scale,
const int bin_cnt, const int quant_axis,
framework::Tensor* out) {
PADDLE_ENFORCE_EQ(
quant_axis == 0 || quant_axis == 1, true,
platform::errors::InvalidArgument("'quant_axis' should be 0 or 1, but "
"the received is %d",
quant_axis));
int num = in.numel();
auto in_dims = in.dims();
const T* in_data = in.data<T>();
const T* scale_data = scale.data<T>();
T* out_data = out->mutable_data<T>(ctx.GetPlace());
if (quant_axis == 0) {
int grid = in_dims[0];
int block = 1024;
hipLaunchKernelGGL(( ChannelClipAndQuantKernelQuantAxis0<T>), dim3(grid), dim3(block), 0, ctx.stream(),
in_data, scale_data, bin_cnt, num, in_dims[0], out_data);
} else if (quant_axis == 1) {
int grid = in_dims[0] * in_dims[1];
int block = 1024;
hipLaunchKernelGGL(( ChannelClipAndQuantKernelQuantAxis1<T>), dim3(grid), dim3(block), 0, ctx.stream(),
in_data, scale_data, bin_cnt, num, in_dims[0], in_dims[1], out_data);
}
}
};
template struct ChannelClipAndFakeQuantFunctor<platform::CUDADeviceContext,
float>;
template <typename T>
__global__ void FindRangeAbsMaxAndFillArray(const T* cur_scale,
const T* last_scale,
const int64_t* iter,
const int window_size, T* scale_arr,
T* out_scale, int* need_find_max,
int* out_size) {
int it = iter[0];
int idx = it % window_size;
T removed = scale_arr[idx];
T cur = cur_scale[0];
scale_arr[idx] = cur;
T max = last_scale[0];
out_scale[0] = max < cur ? cur : max;
if (fabs(removed - max) < 1e-6) {
need_find_max[0] = 1;
out_size[0] = it > window_size ? window_size : it;
} else {
need_find_max[0] = 0;
}
}
template <typename T>
struct FindRangeAbsMaxFunctor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& ctx,
const framework::Tensor& cur_scale,
const framework::Tensor& last_scale,
const framework::Tensor& iter, const int window_size,
framework::Tensor* scales_arr, framework::Tensor* out_scale) {
const auto gpu_place = ctx.GetPlace();
T* scale_arr = scales_arr->mutable_data<T>(gpu_place);
T* out_scale_data = out_scale->mutable_data<T>(gpu_place);
framework::Tensor need_find_max, out_size;
int* find_max = need_find_max.mutable_data<int>({1}, gpu_place);
int* out_size_data = out_size.mutable_data<int>({1}, gpu_place);
hipLaunchKernelGGL(( FindRangeAbsMaxAndFillArray<T>), dim3(1), dim3(1), 0, ctx.stream(),
cur_scale.data<T>(), last_scale.data<T>(), iter.data<int64_t>(),
window_size, scale_arr, out_scale_data, find_max, out_size_data);
int g_find_max;
memory::Copy(platform::CPUPlace(), &g_find_max, gpu_place, find_max,
sizeof(int), ctx.stream());
ctx.Wait();
if (g_find_max) {
int len;
memory::Copy(platform::CPUPlace(), &len, gpu_place, out_size_data,
sizeof(int), ctx.stream());
ctx.Wait();
FindAbsMaxFunctor<platform::CUDADeviceContext, T>()(ctx, scale_arr, len,
out_scale_data);
}
}
};
template struct FindRangeAbsMaxFunctor<platform::CUDADeviceContext, float>;
template <typename T>
struct FindMovingAverageAbsMaxFunctor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& ctx,
const framework::Tensor& in_accum,
const framework::Tensor& in_state, const T* cur_scale,
const float rate, framework::Tensor* out_state,
framework::Tensor* out_accum, framework::Tensor* out_scale) {
const auto gpu_place = ctx.GetPlace();
T accum;
T state;
T scale;
memory::Copy(platform::CPUPlace(), &accum, gpu_place, in_accum.data<T>(),
sizeof(T), ctx.stream());
memory::Copy(platform::CPUPlace(), &state, gpu_place, in_state.data<T>(),
sizeof(T), ctx.stream());
memory::Copy(platform::CPUPlace(), &scale, gpu_place, cur_scale, sizeof(T),
ctx.stream());
ctx.Wait();
T rate_t = static_cast<T>(rate);
state = rate_t * state + static_cast<T>(1.0);
accum = rate_t * accum + scale;
scale = accum / state;
memory::Copy(gpu_place, out_accum->mutable_data<T>(gpu_place),
platform::CPUPlace(), &accum, sizeof(T), ctx.stream());
memory::Copy(gpu_place, out_state->mutable_data<T>(gpu_place),
platform::CPUPlace(), &state, sizeof(T), ctx.stream());
memory::Copy(gpu_place, out_scale->mutable_data<T>(gpu_place),
platform::CPUPlace(), &scale, sizeof(T), ctx.stream());
ctx.Wait();
}
};
// ChannelClipAndQuantDequantKernel for quant_axis is 0
template <typename T>
__global__ void ChannelClipAndQuantDequantKernelQuantAxis0(
const T* in, const T* scale, const int bin_cnt, const int n, const int c,
T* out) {
int tid = threadIdx.x;
int channel_size = n / c;
const T* in_c = in + blockIdx.x * channel_size;
T* out_c = out + blockIdx.x * channel_size;
T s = scale[blockIdx.x];
T inv_s = inverse(s);
for (int i = tid; i < channel_size; i += blockDim.x) {
T x = in_c[i];
T v = x > s ? s : x;
v = v < -s ? -s : v;
v = bin_cnt * inv_s * v;
out_c[i] = round(v) * s / bin_cnt;
}
}
// ChannelClipAndQuantDequantKernel for quant_axis is 1
template <typename T>
__global__ void ChannelClipAndQuantDequantKernelQuantAxis1(
const T* in, const T* scale, const int bin_cnt, const int n, const int cin,
const int cout, T* out) {
T s = scale[blockIdx.x % cout];
T inv_s = inverse(s);
int wh_size = n / (cin * cout);
const T* in_c = in + blockIdx.x * wh_size;
T* out_c = out + blockIdx.x * wh_size;
for (int i = threadIdx.x; i < wh_size; i += blockDim.x) {
T x = in_c[i];
T v = x > s ? s : x;
v = v < -s ? -s : v;
v = bin_cnt * inv_s * v;
out_c[i] = round(v) * s / bin_cnt;
}
}
template <typename T>
struct ChannelClipFakeQuantDequantFunctor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& ctx,
const framework::Tensor& in, const framework::Tensor& scale,
const int bin_cnt, const int quant_axis,
framework::Tensor* out) {
// At present, channelwise quantization supports conv2d, depthwise_conv2d
// conv2d_transpose and mul
PADDLE_ENFORCE_EQ(
quant_axis == 0 || quant_axis == 1, true,
platform::errors::InvalidArgument("'quant_axis' should be 0 or 1, but "
"the received is %d",
quant_axis));
int num = in.numel();
auto in_dims = in.dims();
const T* in_data = in.data<T>();
const T* scale_data = scale.data<T>();
T* out_data = out->mutable_data<T>(ctx.GetPlace());
if (quant_axis == 0) {
int grid = in_dims[0];
int block = 1024;
hipLaunchKernelGGL(( ChannelClipAndQuantDequantKernelQuantAxis0<
T>), dim3(grid), dim3(block), 0, ctx.stream(), in_data, scale_data, bin_cnt,
num, in_dims[0], out_data);
} else if (quant_axis == 1) {
int grid = in_dims[0] * in_dims[1];
int block = 1024;
hipLaunchKernelGGL(( ChannelClipAndQuantDequantKernelQuantAxis1<
T>), dim3(grid), dim3(block), 0, ctx.stream(),
in_data, scale_data, bin_cnt, num, in_dims[0], in_dims[1], out_data);
}
}
};
template struct ChannelClipFakeQuantDequantFunctor<platform::CUDADeviceContext,
float>;
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
using CUDA = paddle::platform::CUDADeviceContext;
using float16 = paddle::platform::float16;
REGISTER_OP_CUDA_KERNEL(fake_quantize_abs_max,
ops::FakeQuantizeAbsMaxKernel<CUDA, float>);
REGISTER_OP_CUDA_KERNEL(fake_quantize_dequantize_abs_max,
ops::FakeQuantizeDequantizeAbsMaxKernel<CUDA, float>,
ops::FakeQuantizeDequantizeAbsMaxKernel<CUDA, float16>);
REGISTER_OP_CUDA_KERNEL(fake_channel_wise_quantize_abs_max,
ops::FakeChannelWiseQuantizeAbsMaxKernel<CUDA, float>);
REGISTER_OP_CUDA_KERNEL(fake_quantize_range_abs_max,
ops::FakeQuantizeRangeAbsMaxKernel<CUDA, float>);
REGISTER_OP_CUDA_KERNEL(
fake_quantize_moving_average_abs_max,
ops::FakeQuantizeMovingAverageAbsMaxKernel<CUDA, float>);
REGISTER_OP_CUDA_KERNEL(moving_average_abs_max_scale,
ops::MovingAverageAbsMaxScaleKernel<CUDA, float>,
ops::MovingAverageAbsMaxScaleKernel<CUDA, float16>);
REGISTER_OP_CUDA_KERNEL(
fake_quantize_dequantize_moving_average_abs_max,
ops::FakeQuantizeDequantizeMovingAverageAbsMaxKernel<CUDA, float>,
ops::FakeQuantizeDequantizeMovingAverageAbsMaxKernel<CUDA, float16>);
REGISTER_OP_CUDA_KERNEL(stright_throuth_estimator_grad,
ops::StrightThroughEstimatorGradKernel<CUDA, float>,
ops::StrightThroughEstimatorGradKernel<CUDA, float16>);
REGISTER_OP_CUDA_KERNEL(
fake_channel_wise_quantize_dequantize_abs_max,
ops::FakeChannelWiseQuantizeDequantizeAbsMaxKernel<CUDA, float>);
| 4dfd927211d8f6ad1f9ade3d9337a586f6c4d053.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <string>
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/operators/fake_quantize_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
namespace paddle {
namespace operators {
template <typename T>
__global__ void FindAbsMaxKernel(const T* in, const int n, T* out) {
int bid = threadIdx.x + blockIdx.x * blockDim.x;
int tid = threadIdx.x;
extern __shared__ char* shared_max_data_tmp[];
auto shared_max_data = reinterpret_cast<T*>(shared_max_data_tmp);
if (gridDim.x > 1) {
shared_max_data[tid] = T(0);
for (int i = bid; i < n; i += blockDim.x * gridDim.x) {
T tmp = abs(in[i]);
if (tmp > shared_max_data[tid]) {
shared_max_data[tid] = tmp;
}
}
} else {
if (bid < n) {
shared_max_data[tid] = abs(in[bid]);
} else {
shared_max_data[tid] = T(0);
}
}
__syncthreads();
for (int i = blockDim.x / 2; i > 0; i >>= 1) {
if (tid < i && (shared_max_data[tid] < shared_max_data[tid + i])) {
shared_max_data[tid] = shared_max_data[tid + i];
}
__syncthreads();
}
if (tid == 0) {
out[blockIdx.x] = shared_max_data[0];
}
}
template <typename T>
struct FindAbsMaxFunctor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& ctx, const T* in,
const int num, T* out) {
int block = 1024;
int grid = (block - 1 + num) / block;
grid = (grid > block) ? block : grid;
framework::Tensor max;
T* max_data = max.mutable_data<T>(phi::make_ddim({grid}), ctx.GetPlace());
FindAbsMaxKernel<T><<<grid, block, 1024 * sizeof(T), ctx.stream()>>>(
in, num, max_data);
FindAbsMaxKernel<T><<<1, block, 1024 * sizeof(T), ctx.stream()>>>(
max_data, grid, out);
}
};
template struct FindAbsMaxFunctor<platform::CUDADeviceContext, float>;
template struct FindAbsMaxFunctor<platform::CUDADeviceContext,
paddle::platform::float16>;
template <typename T>
__global__ void FindChannelAbsMaxKernelQuantAxis0(const T* in, const int n,
const int c, T* out) {
int tid = threadIdx.x;
int channel_size = n / c;
const T* in_c = in + blockIdx.x * channel_size;
extern __shared__ T shared_max_data[];
shared_max_data[tid] = T(0);
for (int i = tid; i < channel_size; i += blockDim.x) {
T tmp = fabs(in_c[i]);
if (tmp > shared_max_data[tid]) {
shared_max_data[tid] = tmp;
}
}
__syncthreads();
for (int i = blockDim.x / 2; i > 0; i >>= 1) {
if (tid < i && (shared_max_data[tid] < shared_max_data[tid + i])) {
shared_max_data[tid] = shared_max_data[tid + i];
}
__syncthreads();
}
if (tid == 0) {
out[blockIdx.x] = shared_max_data[0];
}
}
template <typename T>
__global__ void FindChannelAbsMaxKernelQuantAxis1(const T* in, const int n,
const int cin, const int cout,
T* out) {
extern __shared__ T shared_max_data[];
int cout_wh_size = n / cin;
int wh_size = n / (cin * cout);
int tid = threadIdx.x;
int bid = blockIdx.x;
const T* in_current = in + tid * cout_wh_size + bid * wh_size;
shared_max_data[tid] = T(0);
for (int i = 0; i < wh_size; i++) {
T tmp = fabs(in_current[i]);
if (tmp > shared_max_data[tid]) {
shared_max_data[tid] = tmp;
}
}
__syncthreads();
int len = blockDim.x;
for (int i = (len + 1) / 2; i > 0; len = i, i = (i + 1) / 2) {
if (tid < i && tid + i < len &&
shared_max_data[tid] < shared_max_data[tid + i]) {
shared_max_data[tid] = shared_max_data[tid + i];
}
if (i == 1) {
i = 0; // break the loop
}
__syncthreads();
}
if (tid == 0 && shared_max_data[0] > out[bid]) {
out[bid] = shared_max_data[0];
}
}
template <typename T>
struct FindChannelAbsMaxFunctor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& ctx,
const framework::Tensor& in_tensor, const int quant_axis,
T* out_abs_max) {
PADDLE_ENFORCE_EQ(
quant_axis == 0 || quant_axis == 1, true,
platform::errors::InvalidArgument("'quant_axis' should be 0 or 1, but "
"the received is %d",
quant_axis));
const int num = in_tensor.numel();
auto in_dims = in_tensor.dims();
const T* in_data = in_tensor.data<T>();
if (quant_axis == 0) {
int cout = in_dims[0];
int grid = cout;
int block = 1024;
FindChannelAbsMaxKernelQuantAxis0<
T><<<grid, block, block * sizeof(T), ctx.stream()>>>(
in_data, num, cout, out_abs_max);
} else if (quant_axis == 1) {
int cin = in_dims[0];
int cout = in_dims[1];
int grid = cout;
int max_threads = 1024;
#ifdef PADDLE_WITH_HIP
hipMemset(out_abs_max, 0, sizeof(T) * cout);
#else
cudaMemset(out_abs_max, 0, sizeof(T) * cout);
#endif
for (int i = 0; i < cin / max_threads; i++) {
int block = max_threads;
FindChannelAbsMaxKernelQuantAxis1<
T><<<grid, block, block * sizeof(T), ctx.stream()>>>(
in_data, num, cin, cout, out_abs_max);
in_data += num / cin;
}
int block = cin % max_threads;
if (block > 0) {
FindChannelAbsMaxKernelQuantAxis1<
T><<<grid, block, block * sizeof(T), ctx.stream()>>>(
in_data, num, in_dims[0], in_dims[1], out_abs_max);
}
}
}
};
template struct FindChannelAbsMaxFunctor<platform::CUDADeviceContext, float>;
template <typename T>
__global__ void ClipAndQuantKernel(const T* in, const T* scale,
const int bin_cnt, const int n, T* out) {
int bid = threadIdx.x + blockIdx.x * blockDim.x;
int tid = threadIdx.x;
T s = scale[0];
T inv_s = inverse(s);
for (int i = bid; i < n; i += blockDim.x * gridDim.x) {
T x = in[i];
T v = x > s ? s : x;
v = v < -s ? -s : v;
v = bin_cnt * inv_s * v;
out[i] = round(v);
}
}
template <typename T>
__global__ void ClipAndQuantDequantKernel(const T* in, const T* scale,
const int bin_cnt, const int n,
T* out) {
int bid = threadIdx.x + blockIdx.x * blockDim.x;
int tid = threadIdx.x;
T s = scale[0];
T inv_s = inverse(s);
T bin_cnt_t = static_cast<T>(bin_cnt);
for (int i = bid; i < n; i += blockDim.x * gridDim.x) {
T x = in[i];
x = x > s ? s : x;
x = x < -s ? -s : x;
x = bin_cnt_t * inv_s * x;
x = static_cast<T>(round(static_cast<float>(x)));
out[i] = (x * s) / bin_cnt_t;
}
}
template <typename T>
struct ClipAndFakeQuantFunctor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& ctx,
const framework::Tensor& in, const framework::Tensor& scale,
const int bin_cnt, framework::Tensor* out) {
int num = in.numel();
int block = 1024;
int grid = (block - 1 + num) / block;
const T* in_data = in.data<T>();
const T* scale_data = scale.data<T>();
T* out_data = out->mutable_data<T>(ctx.GetPlace());
ClipAndQuantKernel<T><<<grid, block, 0, ctx.stream()>>>(
in_data, scale_data, bin_cnt, num, out_data);
}
};
template struct ClipAndFakeQuantFunctor<platform::CUDADeviceContext, float>;
template <typename T>
struct ClipAndFakeQuantDequantFunctor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& ctx,
const framework::Tensor& in, const framework::Tensor& scale,
const int bin_cnt, framework::Tensor* out) {
int num = in.numel();
int block = 1024;
int grid = (block - 1 + num) / block;
const T* in_data = in.data<T>();
const T* scale_data = scale.data<T>();
T* out_data = out->mutable_data<T>(ctx.GetPlace());
ClipAndQuantDequantKernel<T><<<grid, block, 0, ctx.stream()>>>(
in_data, scale_data, bin_cnt, num, out_data);
}
};
// ChannelClipAndQuantKernel for quant_axis is 0
template <typename T>
__global__ void ChannelClipAndQuantKernelQuantAxis0(const T* in, const T* scale,
const int bin_cnt,
const int n, const int c,
T* out) {
int tid = threadIdx.x;
int channel_size = n / c;
const T* in_c = in + blockIdx.x * channel_size;
T* out_c = out + blockIdx.x * channel_size;
T s = scale[blockIdx.x];
T inv_s = inverse(s);
for (int i = tid; i < channel_size; i += blockDim.x) {
T x = in_c[i];
T v = x > s ? s : x;
v = v < -s ? -s : v;
v = bin_cnt * inv_s * v;
out_c[i] = round(v);
}
}
// ChannelClipAndQuantKernel for quant_axis is 1
template <typename T>
__global__ void ChannelClipAndQuantKernelQuantAxis1(const T* in, const T* scale,
const int bin_cnt,
const int n, const int cin,
const int cout, T* out) {
T s = scale[blockIdx.x % cout];
T inv_s = inverse(s);
int wh_size = n / (cin * cout);
const T* in_c = in + blockIdx.x * wh_size;
T* out_c = out + blockIdx.x * wh_size;
for (int i = threadIdx.x; i < wh_size; i += blockDim.x) {
T x = in_c[i];
T v = x > s ? s : x;
v = v < -s ? -s : v;
v = bin_cnt * inv_s * v;
out_c[i] = round(v);
}
}
template <typename T>
struct ChannelClipAndFakeQuantFunctor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& ctx,
const framework::Tensor& in, const framework::Tensor& scale,
const int bin_cnt, const int quant_axis,
framework::Tensor* out) {
PADDLE_ENFORCE_EQ(
quant_axis == 0 || quant_axis == 1, true,
platform::errors::InvalidArgument("'quant_axis' should be 0 or 1, but "
"the received is %d",
quant_axis));
int num = in.numel();
auto in_dims = in.dims();
const T* in_data = in.data<T>();
const T* scale_data = scale.data<T>();
T* out_data = out->mutable_data<T>(ctx.GetPlace());
if (quant_axis == 0) {
int grid = in_dims[0];
int block = 1024;
ChannelClipAndQuantKernelQuantAxis0<T><<<grid, block, 0, ctx.stream()>>>(
in_data, scale_data, bin_cnt, num, in_dims[0], out_data);
} else if (quant_axis == 1) {
int grid = in_dims[0] * in_dims[1];
int block = 1024;
ChannelClipAndQuantKernelQuantAxis1<T><<<grid, block, 0, ctx.stream()>>>(
in_data, scale_data, bin_cnt, num, in_dims[0], in_dims[1], out_data);
}
}
};
template struct ChannelClipAndFakeQuantFunctor<platform::CUDADeviceContext,
float>;
template <typename T>
__global__ void FindRangeAbsMaxAndFillArray(const T* cur_scale,
const T* last_scale,
const int64_t* iter,
const int window_size, T* scale_arr,
T* out_scale, int* need_find_max,
int* out_size) {
int it = iter[0];
int idx = it % window_size;
T removed = scale_arr[idx];
T cur = cur_scale[0];
scale_arr[idx] = cur;
T max = last_scale[0];
out_scale[0] = max < cur ? cur : max;
if (fabs(removed - max) < 1e-6) {
need_find_max[0] = 1;
out_size[0] = it > window_size ? window_size : it;
} else {
need_find_max[0] = 0;
}
}
template <typename T>
struct FindRangeAbsMaxFunctor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& ctx,
const framework::Tensor& cur_scale,
const framework::Tensor& last_scale,
const framework::Tensor& iter, const int window_size,
framework::Tensor* scales_arr, framework::Tensor* out_scale) {
const auto gpu_place = ctx.GetPlace();
T* scale_arr = scales_arr->mutable_data<T>(gpu_place);
T* out_scale_data = out_scale->mutable_data<T>(gpu_place);
framework::Tensor need_find_max, out_size;
int* find_max = need_find_max.mutable_data<int>({1}, gpu_place);
int* out_size_data = out_size.mutable_data<int>({1}, gpu_place);
FindRangeAbsMaxAndFillArray<T><<<1, 1, 0, ctx.stream()>>>(
cur_scale.data<T>(), last_scale.data<T>(), iter.data<int64_t>(),
window_size, scale_arr, out_scale_data, find_max, out_size_data);
int g_find_max;
memory::Copy(platform::CPUPlace(), &g_find_max, gpu_place, find_max,
sizeof(int), ctx.stream());
ctx.Wait();
if (g_find_max) {
int len;
memory::Copy(platform::CPUPlace(), &len, gpu_place, out_size_data,
sizeof(int), ctx.stream());
ctx.Wait();
FindAbsMaxFunctor<platform::CUDADeviceContext, T>()(ctx, scale_arr, len,
out_scale_data);
}
}
};
template struct FindRangeAbsMaxFunctor<platform::CUDADeviceContext, float>;
template <typename T>
struct FindMovingAverageAbsMaxFunctor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& ctx,
const framework::Tensor& in_accum,
const framework::Tensor& in_state, const T* cur_scale,
const float rate, framework::Tensor* out_state,
framework::Tensor* out_accum, framework::Tensor* out_scale) {
const auto gpu_place = ctx.GetPlace();
T accum;
T state;
T scale;
memory::Copy(platform::CPUPlace(), &accum, gpu_place, in_accum.data<T>(),
sizeof(T), ctx.stream());
memory::Copy(platform::CPUPlace(), &state, gpu_place, in_state.data<T>(),
sizeof(T), ctx.stream());
memory::Copy(platform::CPUPlace(), &scale, gpu_place, cur_scale, sizeof(T),
ctx.stream());
ctx.Wait();
T rate_t = static_cast<T>(rate);
state = rate_t * state + static_cast<T>(1.0);
accum = rate_t * accum + scale;
scale = accum / state;
memory::Copy(gpu_place, out_accum->mutable_data<T>(gpu_place),
platform::CPUPlace(), &accum, sizeof(T), ctx.stream());
memory::Copy(gpu_place, out_state->mutable_data<T>(gpu_place),
platform::CPUPlace(), &state, sizeof(T), ctx.stream());
memory::Copy(gpu_place, out_scale->mutable_data<T>(gpu_place),
platform::CPUPlace(), &scale, sizeof(T), ctx.stream());
ctx.Wait();
}
};
// ChannelClipAndQuantDequantKernel for quant_axis is 0
template <typename T>
__global__ void ChannelClipAndQuantDequantKernelQuantAxis0(
const T* in, const T* scale, const int bin_cnt, const int n, const int c,
T* out) {
int tid = threadIdx.x;
int channel_size = n / c;
const T* in_c = in + blockIdx.x * channel_size;
T* out_c = out + blockIdx.x * channel_size;
T s = scale[blockIdx.x];
T inv_s = inverse(s);
for (int i = tid; i < channel_size; i += blockDim.x) {
T x = in_c[i];
T v = x > s ? s : x;
v = v < -s ? -s : v;
v = bin_cnt * inv_s * v;
out_c[i] = round(v) * s / bin_cnt;
}
}
// ChannelClipAndQuantDequantKernel for quant_axis is 1
template <typename T>
__global__ void ChannelClipAndQuantDequantKernelQuantAxis1(
const T* in, const T* scale, const int bin_cnt, const int n, const int cin,
const int cout, T* out) {
T s = scale[blockIdx.x % cout];
T inv_s = inverse(s);
int wh_size = n / (cin * cout);
const T* in_c = in + blockIdx.x * wh_size;
T* out_c = out + blockIdx.x * wh_size;
for (int i = threadIdx.x; i < wh_size; i += blockDim.x) {
T x = in_c[i];
T v = x > s ? s : x;
v = v < -s ? -s : v;
v = bin_cnt * inv_s * v;
out_c[i] = round(v) * s / bin_cnt;
}
}
template <typename T>
struct ChannelClipFakeQuantDequantFunctor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& ctx,
const framework::Tensor& in, const framework::Tensor& scale,
const int bin_cnt, const int quant_axis,
framework::Tensor* out) {
// At present, channelwise quantization supports conv2d, depthwise_conv2d
// conv2d_transpose and mul
PADDLE_ENFORCE_EQ(
quant_axis == 0 || quant_axis == 1, true,
platform::errors::InvalidArgument("'quant_axis' should be 0 or 1, but "
"the received is %d",
quant_axis));
int num = in.numel();
auto in_dims = in.dims();
const T* in_data = in.data<T>();
const T* scale_data = scale.data<T>();
T* out_data = out->mutable_data<T>(ctx.GetPlace());
if (quant_axis == 0) {
int grid = in_dims[0];
int block = 1024;
ChannelClipAndQuantDequantKernelQuantAxis0<
T><<<grid, block, 0, ctx.stream()>>>(in_data, scale_data, bin_cnt,
num, in_dims[0], out_data);
} else if (quant_axis == 1) {
int grid = in_dims[0] * in_dims[1];
int block = 1024;
ChannelClipAndQuantDequantKernelQuantAxis1<
T><<<grid, block, 0, ctx.stream()>>>(
in_data, scale_data, bin_cnt, num, in_dims[0], in_dims[1], out_data);
}
}
};
template struct ChannelClipFakeQuantDequantFunctor<platform::CUDADeviceContext,
float>;
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
using CUDA = paddle::platform::CUDADeviceContext;
using float16 = paddle::platform::float16;
REGISTER_OP_CUDA_KERNEL(fake_quantize_abs_max,
ops::FakeQuantizeAbsMaxKernel<CUDA, float>);
REGISTER_OP_CUDA_KERNEL(fake_quantize_dequantize_abs_max,
ops::FakeQuantizeDequantizeAbsMaxKernel<CUDA, float>,
ops::FakeQuantizeDequantizeAbsMaxKernel<CUDA, float16>);
REGISTER_OP_CUDA_KERNEL(fake_channel_wise_quantize_abs_max,
ops::FakeChannelWiseQuantizeAbsMaxKernel<CUDA, float>);
REGISTER_OP_CUDA_KERNEL(fake_quantize_range_abs_max,
ops::FakeQuantizeRangeAbsMaxKernel<CUDA, float>);
REGISTER_OP_CUDA_KERNEL(
fake_quantize_moving_average_abs_max,
ops::FakeQuantizeMovingAverageAbsMaxKernel<CUDA, float>);
REGISTER_OP_CUDA_KERNEL(moving_average_abs_max_scale,
ops::MovingAverageAbsMaxScaleKernel<CUDA, float>,
ops::MovingAverageAbsMaxScaleKernel<CUDA, float16>);
REGISTER_OP_CUDA_KERNEL(
fake_quantize_dequantize_moving_average_abs_max,
ops::FakeQuantizeDequantizeMovingAverageAbsMaxKernel<CUDA, float>,
ops::FakeQuantizeDequantizeMovingAverageAbsMaxKernel<CUDA, float16>);
REGISTER_OP_CUDA_KERNEL(stright_throuth_estimator_grad,
ops::StrightThroughEstimatorGradKernel<CUDA, float>,
ops::StrightThroughEstimatorGradKernel<CUDA, float16>);
REGISTER_OP_CUDA_KERNEL(
fake_channel_wise_quantize_dequantize_abs_max,
ops::FakeChannelWiseQuantizeDequantizeAbsMaxKernel<CUDA, float>);
|
2934a903904a6992a69e8cee7ef6f7758d8f6965.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <unittest/unittest.h>
#include <thrust/scan.h>
#include <thrust/functional.h>
template<typename Iterator1, typename Iterator2>
__global__
void inclusive_scan_kernel(Iterator1 first, Iterator1 last, Iterator2 result)
{
thrust::inclusive_scan(thrust::seq, first, last, result);
}
template<typename Iterator1, typename Iterator2>
__global__
void exclusive_scan_kernel(Iterator1 first, Iterator1 last, Iterator2 result)
{
thrust::exclusive_scan(thrust::seq, first, last, result);
}
template<typename Iterator1, typename Iterator2, typename T>
__global__
void exclusive_scan_kernel(Iterator1 first, Iterator1 last, Iterator2 result, T init)
{
thrust::exclusive_scan(thrust::seq, first, last, result, init);
}
template<typename T>
struct TestScanDeviceSeq
{
void operator()(const size_t n)
{
thrust::host_vector<T> h_input = unittest::random_integers<T>(n);
thrust::device_vector<T> d_input = h_input;
thrust::host_vector<T> h_output(n);
thrust::device_vector<T> d_output(n);
thrust::inclusive_scan(h_input.begin(), h_input.end(), h_output.begin());
hipLaunchKernelGGL(( inclusive_scan_kernel), dim3(1),dim3(1), 0, 0, d_input.begin(), d_input.end(), d_output.begin());
ASSERT_EQUAL(d_output, h_output);
thrust::exclusive_scan(h_input.begin(), h_input.end(), h_output.begin());
hipLaunchKernelGGL(( exclusive_scan_kernel), dim3(1),dim3(1), 0, 0, d_input.begin(), d_input.end(), d_output.begin());
ASSERT_EQUAL(d_output, h_output);
thrust::exclusive_scan(h_input.begin(), h_input.end(), h_output.begin(), (T) 11);
hipLaunchKernelGGL(( exclusive_scan_kernel), dim3(1),dim3(1), 0, 0, d_input.begin(), d_input.end(), d_output.begin(), (T) 11);
ASSERT_EQUAL(d_output, h_output);
// in-place scans
h_output = h_input;
d_output = d_input;
thrust::inclusive_scan(h_output.begin(), h_output.end(), h_output.begin());
hipLaunchKernelGGL(( inclusive_scan_kernel), dim3(1),dim3(1), 0, 0, d_output.begin(), d_output.end(), d_output.begin());
ASSERT_EQUAL(d_output, h_output);
#if TORCH_HIP_VERSION > 5000
h_output = h_input;
d_output = d_input;
thrust::exclusive_scan(h_output.begin(), h_output.end(), h_output.begin());
hipLaunchKernelGGL(( exclusive_scan_kernel), dim3(1),dim3(1), 0, 0, d_output.begin(), d_output.end(), d_output.begin());
ASSERT_EQUAL(d_output, h_output);
#else
KNOWN_FAILURE; // XXX nvcc 5 generates bad code for inplace sequential exclusive_scan
#endif
}
};
VariableUnitTest<TestScanDeviceSeq, IntegralTypes> TestScanDeviceSeqInstance;
void TestScanCudaStreams()
{
typedef thrust::device_vector<int> Vector;
typedef typename Vector::value_type T;
typename Vector::iterator iter;
Vector input(5);
Vector result(5);
Vector output(5);
input[0] = 1; input[1] = 3; input[2] = -2; input[3] = 4; input[4] = -5;
Vector input_copy(input);
hipStream_t s;
hipStreamCreate(&s);
// inclusive scan
iter = thrust::inclusive_scan(thrust::hip::par(s), input.begin(), input.end(), output.begin());
hipStreamSynchronize(s);
result[0] = 1; result[1] = 4; result[2] = 2; result[3] = 6; result[4] = 1;
ASSERT_EQUAL(iter - output.begin(), input.size());
ASSERT_EQUAL(input, input_copy);
ASSERT_EQUAL(output, result);
// exclusive scan
iter = thrust::exclusive_scan(thrust::hip::par(s), input.begin(), input.end(), output.begin(), 0);
hipStreamSynchronize(s);
result[0] = 0; result[1] = 1; result[2] = 4; result[3] = 2; result[4] = 6;
ASSERT_EQUAL(iter - output.begin(), input.size());
ASSERT_EQUAL(input, input_copy);
ASSERT_EQUAL(output, result);
// exclusive scan with init
iter = thrust::exclusive_scan(thrust::hip::par(s), input.begin(), input.end(), output.begin(), 3);
hipStreamSynchronize(s);
result[0] = 3; result[1] = 4; result[2] = 7; result[3] = 5; result[4] = 9;
ASSERT_EQUAL(iter - output.begin(), input.size());
ASSERT_EQUAL(input, input_copy);
ASSERT_EQUAL(output, result);
// inclusive scan with op
iter = thrust::inclusive_scan(thrust::hip::par(s), input.begin(), input.end(), output.begin(), thrust::plus<T>());
hipStreamSynchronize(s);
result[0] = 1; result[1] = 4; result[2] = 2; result[3] = 6; result[4] = 1;
ASSERT_EQUAL(iter - output.begin(), input.size());
ASSERT_EQUAL(input, input_copy);
ASSERT_EQUAL(output, result);
// exclusive scan with init and op
iter = thrust::exclusive_scan(thrust::hip::par(s), input.begin(), input.end(), output.begin(), 3, thrust::plus<T>());
hipStreamSynchronize(s);
result[0] = 3; result[1] = 4; result[2] = 7; result[3] = 5; result[4] = 9;
ASSERT_EQUAL(iter - output.begin(), input.size());
ASSERT_EQUAL(input, input_copy);
ASSERT_EQUAL(output, result);
// inplace inclusive scan
input = input_copy;
iter = thrust::inclusive_scan(thrust::hip::par(s), input.begin(), input.end(), input.begin());
hipStreamSynchronize(s);
result[0] = 1; result[1] = 4; result[2] = 2; result[3] = 6; result[4] = 1;
ASSERT_EQUAL(iter - input.begin(), input.size());
ASSERT_EQUAL(input, result);
// inplace exclusive scan with init
input = input_copy;
iter = thrust::exclusive_scan(thrust::hip::par(s), input.begin(), input.end(), input.begin(), 3);
hipStreamSynchronize(s);
result[0] = 3; result[1] = 4; result[2] = 7; result[3] = 5; result[4] = 9;
ASSERT_EQUAL(iter - input.begin(), input.size());
ASSERT_EQUAL(input, result);
// inplace exclusive scan with implicit init=0
input = input_copy;
iter = thrust::exclusive_scan(thrust::hip::par(s), input.begin(), input.end(), input.begin());
hipStreamSynchronize(s);
result[0] = 0; result[1] = 1; result[2] = 4; result[3] = 2; result[4] = 6;
ASSERT_EQUAL(iter - input.begin(), input.size());
ASSERT_EQUAL(input, result);
hipStreamDestroy(s);
}
DECLARE_UNITTEST(TestScanCudaStreams);
| 2934a903904a6992a69e8cee7ef6f7758d8f6965.cu | #include <cstdio>
#include <unittest/unittest.h>
#include <thrust/scan.h>
#include <thrust/functional.h>
template<typename Iterator1, typename Iterator2>
__global__
void inclusive_scan_kernel(Iterator1 first, Iterator1 last, Iterator2 result)
{
thrust::inclusive_scan(thrust::seq, first, last, result);
}
template<typename Iterator1, typename Iterator2>
__global__
void exclusive_scan_kernel(Iterator1 first, Iterator1 last, Iterator2 result)
{
thrust::exclusive_scan(thrust::seq, first, last, result);
}
template<typename Iterator1, typename Iterator2, typename T>
__global__
void exclusive_scan_kernel(Iterator1 first, Iterator1 last, Iterator2 result, T init)
{
thrust::exclusive_scan(thrust::seq, first, last, result, init);
}
template<typename T>
struct TestScanDeviceSeq
{
void operator()(const size_t n)
{
thrust::host_vector<T> h_input = unittest::random_integers<T>(n);
thrust::device_vector<T> d_input = h_input;
thrust::host_vector<T> h_output(n);
thrust::device_vector<T> d_output(n);
thrust::inclusive_scan(h_input.begin(), h_input.end(), h_output.begin());
inclusive_scan_kernel<<<1,1>>>(d_input.begin(), d_input.end(), d_output.begin());
ASSERT_EQUAL(d_output, h_output);
thrust::exclusive_scan(h_input.begin(), h_input.end(), h_output.begin());
exclusive_scan_kernel<<<1,1>>>(d_input.begin(), d_input.end(), d_output.begin());
ASSERT_EQUAL(d_output, h_output);
thrust::exclusive_scan(h_input.begin(), h_input.end(), h_output.begin(), (T) 11);
exclusive_scan_kernel<<<1,1>>>(d_input.begin(), d_input.end(), d_output.begin(), (T) 11);
ASSERT_EQUAL(d_output, h_output);
// in-place scans
h_output = h_input;
d_output = d_input;
thrust::inclusive_scan(h_output.begin(), h_output.end(), h_output.begin());
inclusive_scan_kernel<<<1,1>>>(d_output.begin(), d_output.end(), d_output.begin());
ASSERT_EQUAL(d_output, h_output);
#if CUDA_VERSION > 5000
h_output = h_input;
d_output = d_input;
thrust::exclusive_scan(h_output.begin(), h_output.end(), h_output.begin());
exclusive_scan_kernel<<<1,1>>>(d_output.begin(), d_output.end(), d_output.begin());
ASSERT_EQUAL(d_output, h_output);
#else
KNOWN_FAILURE; // XXX nvcc 5 generates bad code for inplace sequential exclusive_scan
#endif
}
};
VariableUnitTest<TestScanDeviceSeq, IntegralTypes> TestScanDeviceSeqInstance;
void TestScanCudaStreams()
{
typedef thrust::device_vector<int> Vector;
typedef typename Vector::value_type T;
typename Vector::iterator iter;
Vector input(5);
Vector result(5);
Vector output(5);
input[0] = 1; input[1] = 3; input[2] = -2; input[3] = 4; input[4] = -5;
Vector input_copy(input);
cudaStream_t s;
cudaStreamCreate(&s);
// inclusive scan
iter = thrust::inclusive_scan(thrust::cuda::par(s), input.begin(), input.end(), output.begin());
cudaStreamSynchronize(s);
result[0] = 1; result[1] = 4; result[2] = 2; result[3] = 6; result[4] = 1;
ASSERT_EQUAL(iter - output.begin(), input.size());
ASSERT_EQUAL(input, input_copy);
ASSERT_EQUAL(output, result);
// exclusive scan
iter = thrust::exclusive_scan(thrust::cuda::par(s), input.begin(), input.end(), output.begin(), 0);
cudaStreamSynchronize(s);
result[0] = 0; result[1] = 1; result[2] = 4; result[3] = 2; result[4] = 6;
ASSERT_EQUAL(iter - output.begin(), input.size());
ASSERT_EQUAL(input, input_copy);
ASSERT_EQUAL(output, result);
// exclusive scan with init
iter = thrust::exclusive_scan(thrust::cuda::par(s), input.begin(), input.end(), output.begin(), 3);
cudaStreamSynchronize(s);
result[0] = 3; result[1] = 4; result[2] = 7; result[3] = 5; result[4] = 9;
ASSERT_EQUAL(iter - output.begin(), input.size());
ASSERT_EQUAL(input, input_copy);
ASSERT_EQUAL(output, result);
// inclusive scan with op
iter = thrust::inclusive_scan(thrust::cuda::par(s), input.begin(), input.end(), output.begin(), thrust::plus<T>());
cudaStreamSynchronize(s);
result[0] = 1; result[1] = 4; result[2] = 2; result[3] = 6; result[4] = 1;
ASSERT_EQUAL(iter - output.begin(), input.size());
ASSERT_EQUAL(input, input_copy);
ASSERT_EQUAL(output, result);
// exclusive scan with init and op
iter = thrust::exclusive_scan(thrust::cuda::par(s), input.begin(), input.end(), output.begin(), 3, thrust::plus<T>());
cudaStreamSynchronize(s);
result[0] = 3; result[1] = 4; result[2] = 7; result[3] = 5; result[4] = 9;
ASSERT_EQUAL(iter - output.begin(), input.size());
ASSERT_EQUAL(input, input_copy);
ASSERT_EQUAL(output, result);
// inplace inclusive scan
input = input_copy;
iter = thrust::inclusive_scan(thrust::cuda::par(s), input.begin(), input.end(), input.begin());
cudaStreamSynchronize(s);
result[0] = 1; result[1] = 4; result[2] = 2; result[3] = 6; result[4] = 1;
ASSERT_EQUAL(iter - input.begin(), input.size());
ASSERT_EQUAL(input, result);
// inplace exclusive scan with init
input = input_copy;
iter = thrust::exclusive_scan(thrust::cuda::par(s), input.begin(), input.end(), input.begin(), 3);
cudaStreamSynchronize(s);
result[0] = 3; result[1] = 4; result[2] = 7; result[3] = 5; result[4] = 9;
ASSERT_EQUAL(iter - input.begin(), input.size());
ASSERT_EQUAL(input, result);
// inplace exclusive scan with implicit init=0
input = input_copy;
iter = thrust::exclusive_scan(thrust::cuda::par(s), input.begin(), input.end(), input.begin());
cudaStreamSynchronize(s);
result[0] = 0; result[1] = 1; result[2] = 4; result[3] = 2; result[4] = 6;
ASSERT_EQUAL(iter - input.begin(), input.size());
ASSERT_EQUAL(input, result);
cudaStreamDestroy(s);
}
DECLARE_UNITTEST(TestScanCudaStreams);
|
195ca20445cfe878fb2fcdfe057a7d0b1d347556.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "reorg_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int N = XSIZE*YSIZE;
float *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
int w = XSIZE;
int h = YSIZE;
int c = 2;
int batch = 2;
int stride = 2;
int forward = XSIZE*YSIZE;
float *out = NULL;
hipMalloc(&out, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
reorg_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, N,x,w,h,c,batch,stride,forward,out);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
reorg_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, N,x,w,h,c,batch,stride,forward,out);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
reorg_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, N,x,w,h,c,batch,stride,forward,out);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 195ca20445cfe878fb2fcdfe057a7d0b1d347556.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "reorg_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int N = XSIZE*YSIZE;
float *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
int w = XSIZE;
int h = YSIZE;
int c = 2;
int batch = 2;
int stride = 2;
int forward = XSIZE*YSIZE;
float *out = NULL;
cudaMalloc(&out, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
reorg_kernel<<<gridBlock,threadBlock>>>(N,x,w,h,c,batch,stride,forward,out);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
reorg_kernel<<<gridBlock,threadBlock>>>(N,x,w,h,c,batch,stride,forward,out);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
reorg_kernel<<<gridBlock,threadBlock>>>(N,x,w,h,c,batch,stride,forward,out);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
0651e5f17cef79892a85858a3b05572b5207df99.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <srad.h>
#include <stdio.h>
__global__ void
srad_cuda_1(
float *E_C,
float *W_C,
float *N_C,
float *S_C,
float * J_cuda,
float * C_cuda,
int cols,
int rows,
float q0sqr
)
{
//block id
int bx = blockIdx.x;
int by = blockIdx.y;
//thread id
int tx = threadIdx.x;
int ty = threadIdx.y;
//indices
int index = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + tx;
int index_n = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + tx - cols;
int index_s = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * BLOCK_SIZE + tx;
int index_w = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty - 1;
int index_e = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + BLOCK_SIZE;
float n, w, e, s, jc, g2, l, num, den, qsqr, c;
//shared memory allocation
__shared__ float temp[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float temp_result[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float north[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float south[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float east[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float west[BLOCK_SIZE][BLOCK_SIZE];
//load data to shared memory
if( index_n > 0 ) {
north[ty][tx] = J_cuda[index_n];
}
if( index_s < cols * rows) {
south[ty][tx] = J_cuda[index_s];
}
if ( by == 0 ){
north[ty][tx] = J_cuda[BLOCK_SIZE * bx + tx];
}
else if ( by == gridDim.y - 1 ){
south[ty][tx] = J_cuda[cols * BLOCK_SIZE * (gridDim.y - 1) + BLOCK_SIZE * bx + cols * ( BLOCK_SIZE - 1 ) + tx];
}
__syncthreads();
if ( index_w > 0 ) {
west[ty][tx] = J_cuda[index_w];
}
if( index_e < cols * rows) {
east[ty][tx] = J_cuda[index_e];
}
if ( bx == 0 ){
west[ty][tx] = J_cuda[cols * BLOCK_SIZE * by + cols * ty];
}
else if ( bx == gridDim.x - 1 ){
east[ty][tx] = J_cuda[cols * BLOCK_SIZE * by + BLOCK_SIZE * ( gridDim.x - 1) + cols * ty + BLOCK_SIZE-1];
}
__syncthreads();
temp[ty][tx] = J_cuda[index];
__syncthreads();
jc = temp[ty][tx];
if ( ty == 0 && tx == 0 ){ //nw
n = north[ty][tx] - jc;
s = temp[ty+1][tx] - jc;
w = west[ty][tx] - jc;
e = temp[ty][tx+1] - jc;
}
else if ( ty == 0 && tx == BLOCK_SIZE-1 ){ //ne
n = north[ty][tx] - jc;
s = temp[ty+1][tx] - jc;
w = temp[ty][tx-1] - jc;
e = east[ty][tx] - jc;
}
else if ( ty == BLOCK_SIZE -1 && tx == BLOCK_SIZE - 1){ //se
n = temp[ty-1][tx] - jc;
s = south[ty][tx] - jc;
w = temp[ty][tx-1] - jc;
e = east[ty][tx] - jc;
}
else if ( ty == BLOCK_SIZE -1 && tx == 0 ){//sw
n = temp[ty-1][tx] - jc;
s = south[ty][tx] - jc;
w = west[ty][tx] - jc;
e = temp[ty][tx+1] - jc;
}
else if ( ty == 0 ){ //n
n = north[ty][tx] - jc;
s = temp[ty+1][tx] - jc;
w = temp[ty][tx-1] - jc;
e = temp[ty][tx+1] - jc;
}
else if ( tx == BLOCK_SIZE -1 ){ //e
n = temp[ty-1][tx] - jc;
s = temp[ty+1][tx] - jc;
w = temp[ty][tx-1] - jc;
e = east[ty][tx] - jc;
}
else if ( ty == BLOCK_SIZE -1){ //s
n = temp[ty-1][tx] - jc;
s = south[ty][tx] - jc;
w = temp[ty][tx-1] - jc;
e = temp[ty][tx+1] - jc;
}
else if ( tx == 0 ){ //w
n = temp[ty-1][tx] - jc;
s = temp[ty+1][tx] - jc;
w = west[ty][tx] - jc;
e = temp[ty][tx+1] - jc;
}
else{ //the data elements which are not on the borders
n = temp[ty-1][tx] - jc;
s = temp[ty+1][tx] - jc;
w = temp[ty][tx-1] - jc;
e = temp[ty][tx+1] - jc;
}
g2 = ( n * n + s * s + w * w + e * e ) / (jc * jc);
l = ( n + s + w + e ) / jc;
num = (0.5*g2) - ((1.0/16.0)*(l*l)) ;
den = 1 + (.25*l);
qsqr = num/(den*den);
// diffusion coefficent (equ 33)
den = (qsqr-q0sqr) / (q0sqr * (1+q0sqr)) ;
c = 1.0 / (1.0+den) ;
// saturate diffusion coefficent
if (c < 0){temp_result[ty][tx] = 0;}
else if (c > 1) {temp_result[ty][tx] = 1;}
else {temp_result[ty][tx] = c;}
__syncthreads();
C_cuda[index] = temp_result[ty][tx];
E_C[index] = e;
W_C[index] = w;
S_C[index] = s;
N_C[index] = n;
}
__global__ void
srad_cuda_2(
float *E_C,
float *W_C,
float *N_C,
float *S_C,
float * J_cuda,
float * C_cuda,
int cols,
int rows,
float lambda,
float q0sqr
)
{
//block id
int bx = blockIdx.x;
int by = blockIdx.y;
//thread id
int tx = threadIdx.x;
int ty = threadIdx.y;
//indices
int index = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + tx;
int index_s = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * BLOCK_SIZE + tx;
int index_e = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + BLOCK_SIZE;
float cc, cn, cs, ce, cw, d_sum;
//shared memory allocation
__shared__ float south_c[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float east_c[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float c_cuda_temp[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float c_cuda_result[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float temp[BLOCK_SIZE][BLOCK_SIZE];
//load data to shared memory
temp[ty][tx] = J_cuda[index];
__syncthreads();
if( index_s < cols * rows) {
south_c[ty][tx] = C_cuda[index_s];
}
if ( by == gridDim.y - 1 ){
south_c[ty][tx] = C_cuda[cols * BLOCK_SIZE * (gridDim.y - 1) + BLOCK_SIZE * bx + cols * ( BLOCK_SIZE - 1 ) + tx];
}
__syncthreads();
if( index_e < cols * rows) {
east_c[ty][tx] = C_cuda[index_e];
}
if ( bx == gridDim.x - 1 ){
east_c[ty][tx] = C_cuda[cols * BLOCK_SIZE * by + BLOCK_SIZE * ( gridDim.x - 1) + cols * ty + BLOCK_SIZE-1];
}
__syncthreads();
c_cuda_temp[ty][tx] = C_cuda[index];
__syncthreads();
cc = c_cuda_temp[ty][tx];
if ( ty == BLOCK_SIZE -1 && tx == BLOCK_SIZE - 1){ //se
cn = cc;
cs = south_c[ty][tx];
cw = cc;
ce = east_c[ty][tx];
}
else if ( tx == BLOCK_SIZE -1 ){ //e
cn = cc;
cs = c_cuda_temp[ty+1][tx];
cw = cc;
ce = east_c[ty][tx];
}
else if ( ty == BLOCK_SIZE -1){ //s
cn = cc;
cs = south_c[ty][tx];
cw = cc;
ce = c_cuda_temp[ty][tx+1];
}
else{ //the data elements which are not on the borders
cn = cc;
cs = c_cuda_temp[ty+1][tx];
cw = cc;
ce = c_cuda_temp[ty][tx+1];
}
// divergence (equ 58)
d_sum = cn * N_C[index] + cs * S_C[index] + cw * W_C[index] + ce * E_C[index];
// image update (equ 61)
c_cuda_result[ty][tx] = temp[ty][tx] + 0.25 * lambda * d_sum;
__syncthreads();
J_cuda[index] = c_cuda_result[ty][tx];
}
| 0651e5f17cef79892a85858a3b05572b5207df99.cu | #include <srad.h>
#include <stdio.h>
__global__ void
srad_cuda_1(
float *E_C,
float *W_C,
float *N_C,
float *S_C,
float * J_cuda,
float * C_cuda,
int cols,
int rows,
float q0sqr
)
{
//block id
int bx = blockIdx.x;
int by = blockIdx.y;
//thread id
int tx = threadIdx.x;
int ty = threadIdx.y;
//indices
int index = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + tx;
int index_n = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + tx - cols;
int index_s = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * BLOCK_SIZE + tx;
int index_w = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty - 1;
int index_e = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + BLOCK_SIZE;
float n, w, e, s, jc, g2, l, num, den, qsqr, c;
//shared memory allocation
__shared__ float temp[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float temp_result[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float north[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float south[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float east[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float west[BLOCK_SIZE][BLOCK_SIZE];
//load data to shared memory
if( index_n > 0 ) {
north[ty][tx] = J_cuda[index_n];
}
if( index_s < cols * rows) {
south[ty][tx] = J_cuda[index_s];
}
if ( by == 0 ){
north[ty][tx] = J_cuda[BLOCK_SIZE * bx + tx];
}
else if ( by == gridDim.y - 1 ){
south[ty][tx] = J_cuda[cols * BLOCK_SIZE * (gridDim.y - 1) + BLOCK_SIZE * bx + cols * ( BLOCK_SIZE - 1 ) + tx];
}
__syncthreads();
if ( index_w > 0 ) {
west[ty][tx] = J_cuda[index_w];
}
if( index_e < cols * rows) {
east[ty][tx] = J_cuda[index_e];
}
if ( bx == 0 ){
west[ty][tx] = J_cuda[cols * BLOCK_SIZE * by + cols * ty];
}
else if ( bx == gridDim.x - 1 ){
east[ty][tx] = J_cuda[cols * BLOCK_SIZE * by + BLOCK_SIZE * ( gridDim.x - 1) + cols * ty + BLOCK_SIZE-1];
}
__syncthreads();
temp[ty][tx] = J_cuda[index];
__syncthreads();
jc = temp[ty][tx];
if ( ty == 0 && tx == 0 ){ //nw
n = north[ty][tx] - jc;
s = temp[ty+1][tx] - jc;
w = west[ty][tx] - jc;
e = temp[ty][tx+1] - jc;
}
else if ( ty == 0 && tx == BLOCK_SIZE-1 ){ //ne
n = north[ty][tx] - jc;
s = temp[ty+1][tx] - jc;
w = temp[ty][tx-1] - jc;
e = east[ty][tx] - jc;
}
else if ( ty == BLOCK_SIZE -1 && tx == BLOCK_SIZE - 1){ //se
n = temp[ty-1][tx] - jc;
s = south[ty][tx] - jc;
w = temp[ty][tx-1] - jc;
e = east[ty][tx] - jc;
}
else if ( ty == BLOCK_SIZE -1 && tx == 0 ){//sw
n = temp[ty-1][tx] - jc;
s = south[ty][tx] - jc;
w = west[ty][tx] - jc;
e = temp[ty][tx+1] - jc;
}
else if ( ty == 0 ){ //n
n = north[ty][tx] - jc;
s = temp[ty+1][tx] - jc;
w = temp[ty][tx-1] - jc;
e = temp[ty][tx+1] - jc;
}
else if ( tx == BLOCK_SIZE -1 ){ //e
n = temp[ty-1][tx] - jc;
s = temp[ty+1][tx] - jc;
w = temp[ty][tx-1] - jc;
e = east[ty][tx] - jc;
}
else if ( ty == BLOCK_SIZE -1){ //s
n = temp[ty-1][tx] - jc;
s = south[ty][tx] - jc;
w = temp[ty][tx-1] - jc;
e = temp[ty][tx+1] - jc;
}
else if ( tx == 0 ){ //w
n = temp[ty-1][tx] - jc;
s = temp[ty+1][tx] - jc;
w = west[ty][tx] - jc;
e = temp[ty][tx+1] - jc;
}
else{ //the data elements which are not on the borders
n = temp[ty-1][tx] - jc;
s = temp[ty+1][tx] - jc;
w = temp[ty][tx-1] - jc;
e = temp[ty][tx+1] - jc;
}
g2 = ( n * n + s * s + w * w + e * e ) / (jc * jc);
l = ( n + s + w + e ) / jc;
num = (0.5*g2) - ((1.0/16.0)*(l*l)) ;
den = 1 + (.25*l);
qsqr = num/(den*den);
// diffusion coefficent (equ 33)
den = (qsqr-q0sqr) / (q0sqr * (1+q0sqr)) ;
c = 1.0 / (1.0+den) ;
// saturate diffusion coefficent
if (c < 0){temp_result[ty][tx] = 0;}
else if (c > 1) {temp_result[ty][tx] = 1;}
else {temp_result[ty][tx] = c;}
__syncthreads();
C_cuda[index] = temp_result[ty][tx];
E_C[index] = e;
W_C[index] = w;
S_C[index] = s;
N_C[index] = n;
}
__global__ void
srad_cuda_2(
float *E_C,
float *W_C,
float *N_C,
float *S_C,
float * J_cuda,
float * C_cuda,
int cols,
int rows,
float lambda,
float q0sqr
)
{
//block id
int bx = blockIdx.x;
int by = blockIdx.y;
//thread id
int tx = threadIdx.x;
int ty = threadIdx.y;
//indices
int index = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + tx;
int index_s = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * BLOCK_SIZE + tx;
int index_e = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + BLOCK_SIZE;
float cc, cn, cs, ce, cw, d_sum;
//shared memory allocation
__shared__ float south_c[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float east_c[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float c_cuda_temp[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float c_cuda_result[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float temp[BLOCK_SIZE][BLOCK_SIZE];
//load data to shared memory
temp[ty][tx] = J_cuda[index];
__syncthreads();
if( index_s < cols * rows) {
south_c[ty][tx] = C_cuda[index_s];
}
if ( by == gridDim.y - 1 ){
south_c[ty][tx] = C_cuda[cols * BLOCK_SIZE * (gridDim.y - 1) + BLOCK_SIZE * bx + cols * ( BLOCK_SIZE - 1 ) + tx];
}
__syncthreads();
if( index_e < cols * rows) {
east_c[ty][tx] = C_cuda[index_e];
}
if ( bx == gridDim.x - 1 ){
east_c[ty][tx] = C_cuda[cols * BLOCK_SIZE * by + BLOCK_SIZE * ( gridDim.x - 1) + cols * ty + BLOCK_SIZE-1];
}
__syncthreads();
c_cuda_temp[ty][tx] = C_cuda[index];
__syncthreads();
cc = c_cuda_temp[ty][tx];
if ( ty == BLOCK_SIZE -1 && tx == BLOCK_SIZE - 1){ //se
cn = cc;
cs = south_c[ty][tx];
cw = cc;
ce = east_c[ty][tx];
}
else if ( tx == BLOCK_SIZE -1 ){ //e
cn = cc;
cs = c_cuda_temp[ty+1][tx];
cw = cc;
ce = east_c[ty][tx];
}
else if ( ty == BLOCK_SIZE -1){ //s
cn = cc;
cs = south_c[ty][tx];
cw = cc;
ce = c_cuda_temp[ty][tx+1];
}
else{ //the data elements which are not on the borders
cn = cc;
cs = c_cuda_temp[ty+1][tx];
cw = cc;
ce = c_cuda_temp[ty][tx+1];
}
// divergence (equ 58)
d_sum = cn * N_C[index] + cs * S_C[index] + cw * W_C[index] + ce * E_C[index];
// image update (equ 61)
c_cuda_result[ty][tx] = temp[ty][tx] + 0.25 * lambda * d_sum;
__syncthreads();
J_cuda[index] = c_cuda_result[ty][tx];
}
|
3be1c8b2cfd6bb79e44c84871f1724adf1f4c2bb.hip | // !!! This is a file automatically generated by hipify!!!
/* C file that implements the helper functions specified in helpers.cu */
#include <stdio.h>
#include "helpers.cuh"
#include <rocblas.h>
#include <hiprand/hiprand.h>
/* check whether the last CUDA function or CUDA kernel launch is erroneous and if yes an error message will be printed
and then the program will be aborted*/
void gpuAssert(const char *file, int line){
hipError_t code = hipGetLastError();
if (code != hipSuccess) {
fprintf(stderr, "Cuda error: %s \n in file : %s line number : %d\n", hipGetErrorString(code), file, line );
exit(-1);
}
}
/*check whether a returned error code by a cublas api function is a erroneous and if yes print the error message*/
void checkCublas(int code,const char *file, int line){
if(code!=HIPBLAS_STATUS_SUCCESS){
fprintf(stderr, "Cublas error: %s \n in file : %s line number : %d\n", cublasGerErrorString(code), file, line );
exit(-1);
}
}
/*check whether a returned error code by a hiprand api function is a erroneous and if yes print the error message*/
void checkCurand(int code,const char *file, int line){
if(code!=HIPBLAS_STATUS_SUCCESS){
fprintf(stderr, "Cublas error: %s \n in file : %s line number : %d\n", curandGerErrorString(code), file, line );
exit(-1);
}
}
/*Return the error message based the error code for cublas */
static const char *cublasGerErrorString(int error)
{
switch (error)
{
case HIPBLAS_STATUS_SUCCESS:
return "HIPBLAS_STATUS_SUCCESS";
case HIPBLAS_STATUS_NOT_INITIALIZED:
return "HIPBLAS_STATUS_NOT_INITIALIZED";
case HIPBLAS_STATUS_ALLOC_FAILED:
return "HIPBLAS_STATUS_ALLOC_FAILED";
case HIPBLAS_STATUS_INVALID_VALUE:
return "HIPBLAS_STATUS_INVALID_VALUE";
case HIPBLAS_STATUS_ARCH_MISMATCH:
return "HIPBLAS_STATUS_ARCH_MISMATCH";
case HIPBLAS_STATUS_MAPPING_ERROR:
return "HIPBLAS_STATUS_MAPPING_ERROR";
case HIPBLAS_STATUS_EXECUTION_FAILED:
return "HIPBLAS_STATUS_EXECUTION_FAILED";
case HIPBLAS_STATUS_INTERNAL_ERROR:
return "HIPBLAS_STATUS_INTERNAL_ERROR";
case HIPBLAS_STATUS_NOT_SUPPORTED:
return "HIPBLAS_STATUS_NOT_SUPPORTED";
case CUBLAS_STATUS_LICENSE_ERROR:
return "CUBLAS_STATUS_LICENSE_ERROR";
}
return "<unknown>";
}
/*Return the error message based the error code for hiprand */
static const char *curandGerErrorString(int error)
{
switch (error)
{
case HIPRAND_STATUS_SUCCESS:
return "HIPRAND_STATUS_SUCCESS";
case HIPRAND_STATUS_VERSION_MISMATCH:
return "HIPRAND_STATUS_VERSION_MISMATCH";
case HIPRAND_STATUS_NOT_INITIALIZED:
return "HIPRAND_STATUS_NOT_INITIALIZED";
case HIPRAND_STATUS_ALLOCATION_FAILED:
return "HIPRAND_STATUS_ALLOCATION_FAILED";
case HIPRAND_STATUS_TYPE_ERROR:
return "HIPRAND_STATUS_TYPE_ERROR";
case HIPRAND_STATUS_OUT_OF_RANGE:
return "HIPRAND_STATUS_OUT_OF_RANGE";
case HIPRAND_STATUS_LENGTH_NOT_MULTIPLE:
return "HIPRAND_STATUS_LENGTH_NOT_MULTIPLE";
case HIPRAND_STATUS_DOUBLE_PRECISION_REQUIRED:
return "HIPRAND_STATUS_DOUBLE_PRECISION_REQUIRED";
case HIPRAND_STATUS_LAUNCH_FAILURE:
return "HIPRAND_STATUS_LAUNCH_FAILURE";
case HIPRAND_STATUS_PREEXISTING_FAILURE:
return "HIPRAND_STATUS_PREEXISTING_FAILURE";
case HIPRAND_STATUS_INITIALIZATION_FAILED:
return "HIPRAND_STATUS_INITIALIZATION_FAILED";
case HIPRAND_STATUS_ARCH_MISMATCH:
return "HIPRAND_STATUS_ARCH_MISMATCH";
case HIPRAND_STATUS_INTERNAL_ERROR:
return "HIPRAND_STATUS_INTERNAL_ERROR";
}
return "<unknown>";
}
/* Check whether a previous memory allocation was successful. If RAM is full usually the returned value is a NULL pointer.
For example if you allocate memory by doing
int *mem = malloc(sizeof(int)*SIZE)
check whether it was successful by calling
checkAllocRAM(mem) afterwards */
void checkAllocRAM(void *ptr){
if (ptr==NULL){
fprintf(stderr, "Memory Full.\nYour array is too large. Please try a smaller array.\n");
exit(EXIT_FAILURE);
}
}
/* This checks whether a file has been opened corrected. If a file opening failed the returned value is a NULL pointer
FOr example if you open a file using
FILE *file=fopen("file.txt","r");
check by calling isFileValid(file); */
void isFileValid(FILE *fp){
if (fp==NULL){
perror("A file access error occurred\n");
exit(EXIT_FAILURE);
}
}
| 3be1c8b2cfd6bb79e44c84871f1724adf1f4c2bb.cu | /* C file that implements the helper functions specified in helpers.cu */
#include <stdio.h>
#include "helpers.cuh"
#include <cublas_v2.h>
#include <curand.h>
/* check whether the last CUDA function or CUDA kernel launch is erroneous and if yes an error message will be printed
and then the program will be aborted*/
void gpuAssert(const char *file, int line){
cudaError_t code = cudaGetLastError();
if (code != cudaSuccess) {
fprintf(stderr, "Cuda error: %s \n in file : %s line number : %d\n", cudaGetErrorString(code), file, line );
exit(-1);
}
}
/*check whether a returned error code by a cublas api function is a erroneous and if yes print the error message*/
void checkCublas(int code,const char *file, int line){
if(code!=CUBLAS_STATUS_SUCCESS){
fprintf(stderr, "Cublas error: %s \n in file : %s line number : %d\n", cublasGerErrorString(code), file, line );
exit(-1);
}
}
/*check whether a returned error code by a curand api function is a erroneous and if yes print the error message*/
void checkCurand(int code,const char *file, int line){
if(code!=CUBLAS_STATUS_SUCCESS){
fprintf(stderr, "Cublas error: %s \n in file : %s line number : %d\n", curandGerErrorString(code), file, line );
exit(-1);
}
}
/*Return the error message based the error code for cublas */
static const char *cublasGerErrorString(int error)
{
switch (error)
{
case CUBLAS_STATUS_SUCCESS:
return "CUBLAS_STATUS_SUCCESS";
case CUBLAS_STATUS_NOT_INITIALIZED:
return "CUBLAS_STATUS_NOT_INITIALIZED";
case CUBLAS_STATUS_ALLOC_FAILED:
return "CUBLAS_STATUS_ALLOC_FAILED";
case CUBLAS_STATUS_INVALID_VALUE:
return "CUBLAS_STATUS_INVALID_VALUE";
case CUBLAS_STATUS_ARCH_MISMATCH:
return "CUBLAS_STATUS_ARCH_MISMATCH";
case CUBLAS_STATUS_MAPPING_ERROR:
return "CUBLAS_STATUS_MAPPING_ERROR";
case CUBLAS_STATUS_EXECUTION_FAILED:
return "CUBLAS_STATUS_EXECUTION_FAILED";
case CUBLAS_STATUS_INTERNAL_ERROR:
return "CUBLAS_STATUS_INTERNAL_ERROR";
case CUBLAS_STATUS_NOT_SUPPORTED:
return "CUBLAS_STATUS_NOT_SUPPORTED";
case CUBLAS_STATUS_LICENSE_ERROR:
return "CUBLAS_STATUS_LICENSE_ERROR";
}
return "<unknown>";
}
/*Return the error message based the error code for curand */
static const char *curandGerErrorString(int error)
{
switch (error)
{
case CURAND_STATUS_SUCCESS:
return "CURAND_STATUS_SUCCESS";
case CURAND_STATUS_VERSION_MISMATCH:
return "CURAND_STATUS_VERSION_MISMATCH";
case CURAND_STATUS_NOT_INITIALIZED:
return "CURAND_STATUS_NOT_INITIALIZED";
case CURAND_STATUS_ALLOCATION_FAILED:
return "CURAND_STATUS_ALLOCATION_FAILED";
case CURAND_STATUS_TYPE_ERROR:
return "CURAND_STATUS_TYPE_ERROR";
case CURAND_STATUS_OUT_OF_RANGE:
return "CURAND_STATUS_OUT_OF_RANGE";
case CURAND_STATUS_LENGTH_NOT_MULTIPLE:
return "CURAND_STATUS_LENGTH_NOT_MULTIPLE";
case CURAND_STATUS_DOUBLE_PRECISION_REQUIRED:
return "CURAND_STATUS_DOUBLE_PRECISION_REQUIRED";
case CURAND_STATUS_LAUNCH_FAILURE:
return "CURAND_STATUS_LAUNCH_FAILURE";
case CURAND_STATUS_PREEXISTING_FAILURE:
return "CURAND_STATUS_PREEXISTING_FAILURE";
case CURAND_STATUS_INITIALIZATION_FAILED:
return "CURAND_STATUS_INITIALIZATION_FAILED";
case CURAND_STATUS_ARCH_MISMATCH:
return "CURAND_STATUS_ARCH_MISMATCH";
case CURAND_STATUS_INTERNAL_ERROR:
return "CURAND_STATUS_INTERNAL_ERROR";
}
return "<unknown>";
}
/* Check whether a previous memory allocation was successful. If RAM is full usually the returned value is a NULL pointer.
For example if you allocate memory by doing
int *mem = malloc(sizeof(int)*SIZE)
check whether it was successful by calling
checkAllocRAM(mem) afterwards */
void checkAllocRAM(void *ptr){
if (ptr==NULL){
fprintf(stderr, "Memory Full.\nYour array is too large. Please try a smaller array.\n");
exit(EXIT_FAILURE);
}
}
/* This checks whether a file has been opened corrected. If a file opening failed the returned value is a NULL pointer
FOr example if you open a file using
FILE *file=fopen("file.txt","r");
check by calling isFileValid(file); */
void isFileValid(FILE *fp){
if (fp==NULL){
perror("A file access error occurred\n");
exit(EXIT_FAILURE);
}
}
|
b7f83c611d00eca8535a5bc980a284d5db0b54e1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void setVal( int* testfuck, int size )
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
testfuck[id] = size - id;
} | b7f83c611d00eca8535a5bc980a284d5db0b54e1.cu | #include "includes.h"
__global__ void setVal( int* testfuck, int size )
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
testfuck[id] = size - id;
} |
c26f7780d7739e791b6d4609bffcf538b0bc75e6.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <math.h>
#include <time.h>
#include <hiprand/hiprand_kernel.h>
#define TRIALS_PER_THREAD 10000
#define BLOCKS 512
#define THREADS 1
#define PI 3.1415926535 // known value of pi
double uniform(double a, double b){
return rand() / (RAND_MAX + 1.0) * (b - a) + a;
}
__global__ void gpu_monte_carlo(float *estimate, hiprandState_t *states) {
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
int points_in_circle = 0;
float x, y;
hiprand_init(1234, tid, 0, &states[tid]); // Initialize CURAND
for(int i = 0; i < TRIALS_PER_THREAD; i++) {
x = hiprand_uniform (&states[tid]);
y = hiprand_uniform (&states[tid]);
points_in_circle += (x*x + y*y <= 1.0f); // count if x & y is in the circle.
}
estimate[tid] = 4.0f * points_in_circle / (float) TRIALS_PER_THREAD; // return estimate of pi
}
double cpu_monte_carlo(long n) {
double x, y;
long in_circle;
double r = 5.0;
float a = -r,b = r;
for(long i = 0; i < n; i++) {
x = uniform(a,b);
y = uniform(a,b);
in_circle += x*x + y*y <= r*r ? 1 : 0;
}
return ((double)in_circle/n)*4;
}
int main (int argc, char *argv[]) {
clock_t startgpu, stopgpu,startcpu,stopcpu;
float host[BLOCKS * THREADS];
float *dev;
hiprandState_t *devStates;
startcpu = clock();
float pi_cpu = cpu_monte_carlo(BLOCKS * THREADS * TRIALS_PER_THREAD);
stopcpu = clock();
printf("Pi = %f CPU pi calculated in %f s.\n", pi_cpu,(stopcpu-startcpu)/(float)CLOCKS_PER_SEC);
startgpu = clock();
hipMalloc((void **) &dev, BLOCKS * THREADS * sizeof(float)); // allocate device mem. for counts
hipMalloc( (void **)&devStates, THREADS * BLOCKS * sizeof(hiprandState_t) );
hipLaunchKernelGGL(( gpu_monte_carlo), dim3(BLOCKS), dim3(THREADS), 0, 0, dev, devStates);
hipMemcpy(host, dev, BLOCKS * THREADS * sizeof(float), hipMemcpyDeviceToHost); // return results
float pi_gpu;
for(int i = 0; i < BLOCKS * THREADS; i++) {
pi_gpu += host[i];
}
pi_gpu /= (BLOCKS * THREADS);
stopgpu = clock();
printf("Pi = %f GPU pi calculated in %f s.\n", pi_gpu,(stopgpu-startgpu)/(float)CLOCKS_PER_SEC);
return 0;
} | c26f7780d7739e791b6d4609bffcf538b0bc75e6.cu | #include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
#include <math.h>
#include <time.h>
#include <curand_kernel.h>
#define TRIALS_PER_THREAD 10000
#define BLOCKS 512
#define THREADS 1
#define PI 3.1415926535 // known value of pi
double uniform(double a, double b){
return rand() / (RAND_MAX + 1.0) * (b - a) + a;
}
__global__ void gpu_monte_carlo(float *estimate, curandState *states) {
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
int points_in_circle = 0;
float x, y;
curand_init(1234, tid, 0, &states[tid]); // Initialize CURAND
for(int i = 0; i < TRIALS_PER_THREAD; i++) {
x = curand_uniform (&states[tid]);
y = curand_uniform (&states[tid]);
points_in_circle += (x*x + y*y <= 1.0f); // count if x & y is in the circle.
}
estimate[tid] = 4.0f * points_in_circle / (float) TRIALS_PER_THREAD; // return estimate of pi
}
double cpu_monte_carlo(long n) {
double x, y;
long in_circle;
double r = 5.0;
float a = -r,b = r;
for(long i = 0; i < n; i++) {
x = uniform(a,b);
y = uniform(a,b);
in_circle += x*x + y*y <= r*r ? 1 : 0;
}
return ((double)in_circle/n)*4;
}
int main (int argc, char *argv[]) {
clock_t startgpu, stopgpu,startcpu,stopcpu;
float host[BLOCKS * THREADS];
float *dev;
curandState *devStates;
startcpu = clock();
float pi_cpu = cpu_monte_carlo(BLOCKS * THREADS * TRIALS_PER_THREAD);
stopcpu = clock();
printf("Pi = %f CPU pi calculated in %f s.\n", pi_cpu,(stopcpu-startcpu)/(float)CLOCKS_PER_SEC);
startgpu = clock();
cudaMalloc((void **) &dev, BLOCKS * THREADS * sizeof(float)); // allocate device mem. for counts
cudaMalloc( (void **)&devStates, THREADS * BLOCKS * sizeof(curandState) );
gpu_monte_carlo<<<BLOCKS, THREADS>>>(dev, devStates);
cudaMemcpy(host, dev, BLOCKS * THREADS * sizeof(float), cudaMemcpyDeviceToHost); // return results
float pi_gpu;
for(int i = 0; i < BLOCKS * THREADS; i++) {
pi_gpu += host[i];
}
pi_gpu /= (BLOCKS * THREADS);
stopgpu = clock();
printf("Pi = %f GPU pi calculated in %f s.\n", pi_gpu,(stopgpu-startgpu)/(float)CLOCKS_PER_SEC);
return 0;
} |
d88d3c925b78cd5e1ba03c3883cb12f0fccddaa0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* Author: Anatoly Baskeheev, Itseez Ltd, ([email protected])
*/
#include <limits>
#include "internal.hpp"
#include "pcl/gpu/utils/timers_cuda.hpp"
#include "pcl/gpu/utils/device/funcattrib.hpp"
#include "pcl/gpu/utils/device/algorithm.hpp"
#include "utils/scan_block.hpp"
#include "utils/morton.hpp"
#include <thrust/device_ptr.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/reduce.h>
#include <thrust/device_ptr.h>
using namespace pcl::gpu;
namespace pcl
{
namespace device
{
template<typename PointType>
struct SelectMinPoint
{
__host__ __device__ __forceinline__ PointType operator()(const PointType& e1, const PointType& e2) const
{
PointType result;
result.x = fmin(e1.x, e2.x);
result.y = fmin(e1.y, e2.y);
result.z = fmin(e1.z, e2.z);
return result;
}
};
template<typename PointType>
struct SelectMaxPoint
{
__host__ __device__ __forceinline__ PointType operator()(const PointType& e1, const PointType& e2) const
{
PointType result;
result.x = fmax(e1.x, e2.x);
result.y = fmax(e1.y, e2.y);
result.z = fmax(e1.z, e2.z);
return result;
}
};
template<typename PointType>
struct PointType_to_tuple
{
__device__ __forceinline__ thrust::tuple<float, float, float> operator()(const PointType& arg) const
{
thrust::tuple<float, float, float> res;
res.get<0>() = arg.x;
res.get<1>() = arg.y;
res.get<2>() = arg.z;
return res;
}
};
}
}
namespace pcl
{
namespace device
{
const static int max_points_per_leaf = 96;
enum
{
GRID_SIZE = 1,
CTA_SIZE = 1024-32,
STRIDE = CTA_SIZE,
LEVEL_BITS_NUM = 3,
ARITY = 1 << LEVEL_BITS_NUM
};
__shared__ int nodes_num;
__shared__ int tasks_beg;
__shared__ int tasks_end;
__shared__ int total_new;
__shared__ volatile int offsets[CTA_SIZE];
struct SingleStepBuild
{
const int* codes;
int points_number;
mutable OctreeGlobal octree;
static __device__ __forceinline__ int divUp(int total, int grain) { return (total + grain - 1) / grain; };
__device__ __forceinline__ int FindCells(int task, int level, int cell_begs[], char cell_code[]) const
{
int cell_count = 0;
int beg = octree.begs[task];
int end = octree.ends[task];
if (end - beg < max_points_per_leaf)
{
//cell_count == 0;
}
else
{
int cur_code = Morton::extractLevelCode(codes[beg], level);
cell_begs[cell_count] = beg;
cell_code[cell_count] = cur_code;
++cell_count;
int last_code = Morton::extractLevelCode(codes[end - 1], level);
if (last_code == cur_code)
{
cell_begs[cell_count] = end;
}
else
{
for(;;)
{
int search_code = cur_code + 1;
if (search_code == 8)
{
cell_begs[cell_count] = end;
break;
}
int morton_code = Morton::shiftLevelCode(search_code, level);
int pos = lower_bound(codes + beg, codes + end, morton_code, CompareByLevelCode(level)) - codes;
if (pos == end)
{
cell_begs[cell_count] = end;
break;
}
cur_code = Morton::extractLevelCode(codes[pos], level);
cell_begs[cell_count] = pos;
cell_code[cell_count] = cur_code;
++cell_count;
beg = pos;
}
}
}
return cell_count;
}
__device__ __forceinline__ void operator()() const
{
//32 is a performance penalty step for search
static_assert((max_points_per_leaf % 32) == 0, "max_points_per_leaf must be a multiple of 32");
if (threadIdx.x == 0)
{
//init root
octree.codes[0] = 0;
octree.nodes[0] = 0;
octree. begs[0] = 0;
octree. ends[0] = points_number;
octree.parent[0] = -1;
//init shared
nodes_num = 1;
tasks_beg = 0;
tasks_end = 1;
total_new = 0;
}
int level = 0;
int cell_begs[ARITY + 1];
char cell_code[ARITY];
__syncthreads();
while (tasks_beg < tasks_end && level < Morton::levels)
{
int task_count = tasks_end - tasks_beg;
int iters = divUp(task_count, CTA_SIZE);
int task = tasks_beg + threadIdx.x;
//__syncthreads(); // extra??
for(int it = 0; it < iters; ++it, task += STRIDE)
{
int cell_count = (task < tasks_end) ? FindCells(task, level, cell_begs, cell_code) : 0;
offsets[threadIdx.x] = cell_count;
__syncthreads();
scan_block<pcl::device::exclusive>(offsets);
//__syncthreads(); //because sync is inside the scan above
if (task < tasks_end)
{
if (cell_count > 0)
{
int parent_code_shifted = octree.codes[task] << LEVEL_BITS_NUM;
int offset = nodes_num + offsets[threadIdx.x];
int mask = 0;
for(int i = 0; i < cell_count; ++i)
{
octree.begs [offset + i] = cell_begs[i];
octree.ends [offset + i] = cell_begs[i + 1];
octree.codes[offset + i] = parent_code_shifted + cell_code[i];
octree.nodes[offset + i] = 0;
octree.parent[offset + i] = task;
mask |= (1 << cell_code[i]);
}
octree.nodes[task] = (offset << 8) + mask;
}
else
octree.nodes[task] = 0;
}
__syncthreads();
if (threadIdx.x == CTA_SIZE - 1)
{
total_new += cell_count + offsets[threadIdx.x];
nodes_num += cell_count + offsets[threadIdx.x];
}
__syncthreads();
} /* for(int it = 0; it < iters; ++it, task += STRIDE) */
//__syncthreads(); //extra ??
if (threadIdx.x == CTA_SIZE - 1)
{
tasks_beg = tasks_end;
tasks_end += total_new;
total_new = 0;
}
++level;
__syncthreads();
}
if (threadIdx.x == CTA_SIZE - 1)
*octree.nodes_num = nodes_num;
}
};
__global__ void __launch_bounds__(CTA_SIZE) singleStepKernel(const SingleStepBuild ssb) { ssb(); }
}
}
void pcl::device::OctreeImpl::build()
{
using namespace pcl::device;
host_octree.downloaded = false;
int points_num = (int)points.size();
//allocatations
{
//ScopeTimer timer("new_allocs");
//+1 codes * points_num * sizeof(int)
//+1 indices * points_num * sizeof(int)
//+1 octreeGlobal.nodes * points_num * sizeof(int)
//+1 octreeGlobal.codes * points_num * sizeof(int)
//+1 octreeGlobal.begs * points_num * sizeof(int)
//+1 octreeGlobal.ends * points_num * sizeof(int)
//+1 octreeGlobal.parent * points_num * sizeof(int)
//+3 points_sorted * points_num * sizeof(float)
//==
// 10 rows
//left
//octreeGlobal.nodes_num * 1 * sizeof(int)
//==
// 3 * sizeof(int) => +1 row
const int transaction_size = 128 / sizeof(int);
int cols = std::max<int>(points_num, transaction_size * 4);
int rows = 10 + 1; // = 13
storage.create(rows, cols);
codes = DeviceArray<int>(storage.ptr(0), points_num);
indices = DeviceArray<int>(storage.ptr(1), points_num);
octreeGlobal.nodes = storage.ptr(2);
octreeGlobal.codes = storage.ptr(3);
octreeGlobal.begs = storage.ptr(4);
octreeGlobal.ends = storage.ptr(5);
octreeGlobal.parent = storage.ptr(6);
octreeGlobal.nodes_num = storage.ptr(7);
points_sorted = DeviceArray2D<float>(3, points_num, storage.ptr(8), storage.step());
}
{
//ScopeTimer timer("reduce-morton-sort-permutations");
thrust::device_ptr<PointType> beg(points.ptr());
thrust::device_ptr<PointType> end = beg + points.size();
{
PointType atmax, atmin;
atmax.x = atmax.y = atmax.z = std::numeric_limits<float>::max();
atmin.x = atmin.y = atmin.z = std::numeric_limits<float>::lowest();
atmax.w = atmin.w = 0;
//ScopeTimer timer("reduce");
PointType minp = thrust::reduce(beg, end, atmax, SelectMinPoint<PointType>());
PointType maxp = thrust::reduce(beg, end, atmin, SelectMaxPoint<PointType>());
octreeGlobal.minp = make_float3(minp.x, minp.y, minp.z);
octreeGlobal.maxp = make_float3(maxp.x, maxp.y, maxp.z);
}
thrust::device_ptr<int> codes_beg(codes.ptr());
thrust::device_ptr<int> codes_end = codes_beg + codes.size();
{
//ScopeTimer timer("morton");
thrust::transform(beg, end, codes_beg, CalcMorton(octreeGlobal.minp, octreeGlobal.maxp));
}
thrust::device_ptr<int> indices_beg(indices.ptr());
thrust::device_ptr<int> indices_end = indices_beg + indices.size();
{
//ScopeTimer timer("sort");
thrust::sequence(indices_beg, indices_end);
thrust::sort_by_key(codes_beg, codes_end, indices_beg );
}
{
////ScopeTimer timer("perm");
//thrust::copy(make_permutation_iterator(beg, indices_beg),
// make_permutation_iterator(end, indices_end), device_ptr<float3>(points_sorted.ptr()));
}
{
thrust::device_ptr<float> xs(points_sorted.ptr(0));
thrust::device_ptr<float> ys(points_sorted.ptr(1));
thrust::device_ptr<float> zs(points_sorted.ptr(2));
//ScopeTimer timer("perm2");
thrust::transform(make_permutation_iterator(beg, indices_beg),
make_permutation_iterator(end, indices_end),
make_zip_iterator(make_tuple(xs, ys, zs)), PointType_to_tuple<PointType>());
}
}
SingleStepBuild ssb;
ssb.octree = octreeGlobal;
ssb.codes = codes;
ssb.points_number = (int)codes.size();
//printFuncAttrib(singleStepKernel);
cudaSafeCall( hipFuncSetCacheConfig(singleStepKernel, hipFuncCachePreferL1) );
hipLaunchKernelGGL(( singleStepKernel), dim3(GRID_SIZE), dim3(CTA_SIZE), 0, 0, ssb);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
| d88d3c925b78cd5e1ba03c3883cb12f0fccddaa0.cu | /*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* Author: Anatoly Baskeheev, Itseez Ltd, ([email protected])
*/
#include <limits>
#include "internal.hpp"
#include "pcl/gpu/utils/timers_cuda.hpp"
#include "pcl/gpu/utils/device/funcattrib.hpp"
#include "pcl/gpu/utils/device/algorithm.hpp"
#include "utils/scan_block.hpp"
#include "utils/morton.hpp"
#include <thrust/device_ptr.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/reduce.h>
#include <thrust/device_ptr.h>
using namespace pcl::gpu;
namespace pcl
{
namespace device
{
template<typename PointType>
struct SelectMinPoint
{
__host__ __device__ __forceinline__ PointType operator()(const PointType& e1, const PointType& e2) const
{
PointType result;
result.x = fmin(e1.x, e2.x);
result.y = fmin(e1.y, e2.y);
result.z = fmin(e1.z, e2.z);
return result;
}
};
template<typename PointType>
struct SelectMaxPoint
{
__host__ __device__ __forceinline__ PointType operator()(const PointType& e1, const PointType& e2) const
{
PointType result;
result.x = fmax(e1.x, e2.x);
result.y = fmax(e1.y, e2.y);
result.z = fmax(e1.z, e2.z);
return result;
}
};
template<typename PointType>
struct PointType_to_tuple
{
__device__ __forceinline__ thrust::tuple<float, float, float> operator()(const PointType& arg) const
{
thrust::tuple<float, float, float> res;
res.get<0>() = arg.x;
res.get<1>() = arg.y;
res.get<2>() = arg.z;
return res;
}
};
}
}
namespace pcl
{
namespace device
{
const static int max_points_per_leaf = 96;
enum
{
GRID_SIZE = 1,
CTA_SIZE = 1024-32,
STRIDE = CTA_SIZE,
LEVEL_BITS_NUM = 3,
ARITY = 1 << LEVEL_BITS_NUM
};
__shared__ int nodes_num;
__shared__ int tasks_beg;
__shared__ int tasks_end;
__shared__ int total_new;
__shared__ volatile int offsets[CTA_SIZE];
struct SingleStepBuild
{
const int* codes;
int points_number;
mutable OctreeGlobal octree;
static __device__ __forceinline__ int divUp(int total, int grain) { return (total + grain - 1) / grain; };
__device__ __forceinline__ int FindCells(int task, int level, int cell_begs[], char cell_code[]) const
{
int cell_count = 0;
int beg = octree.begs[task];
int end = octree.ends[task];
if (end - beg < max_points_per_leaf)
{
//cell_count == 0;
}
else
{
int cur_code = Morton::extractLevelCode(codes[beg], level);
cell_begs[cell_count] = beg;
cell_code[cell_count] = cur_code;
++cell_count;
int last_code = Morton::extractLevelCode(codes[end - 1], level);
if (last_code == cur_code)
{
cell_begs[cell_count] = end;
}
else
{
for(;;)
{
int search_code = cur_code + 1;
if (search_code == 8)
{
cell_begs[cell_count] = end;
break;
}
int morton_code = Morton::shiftLevelCode(search_code, level);
int pos = lower_bound(codes + beg, codes + end, morton_code, CompareByLevelCode(level)) - codes;
if (pos == end)
{
cell_begs[cell_count] = end;
break;
}
cur_code = Morton::extractLevelCode(codes[pos], level);
cell_begs[cell_count] = pos;
cell_code[cell_count] = cur_code;
++cell_count;
beg = pos;
}
}
}
return cell_count;
}
__device__ __forceinline__ void operator()() const
{
//32 is a performance penalty step for search
static_assert((max_points_per_leaf % 32) == 0, "max_points_per_leaf must be a multiple of 32");
if (threadIdx.x == 0)
{
//init root
octree.codes[0] = 0;
octree.nodes[0] = 0;
octree. begs[0] = 0;
octree. ends[0] = points_number;
octree.parent[0] = -1;
//init shared
nodes_num = 1;
tasks_beg = 0;
tasks_end = 1;
total_new = 0;
}
int level = 0;
int cell_begs[ARITY + 1];
char cell_code[ARITY];
__syncthreads();
while (tasks_beg < tasks_end && level < Morton::levels)
{
int task_count = tasks_end - tasks_beg;
int iters = divUp(task_count, CTA_SIZE);
int task = tasks_beg + threadIdx.x;
//__syncthreads(); // extra??
for(int it = 0; it < iters; ++it, task += STRIDE)
{
int cell_count = (task < tasks_end) ? FindCells(task, level, cell_begs, cell_code) : 0;
offsets[threadIdx.x] = cell_count;
__syncthreads();
scan_block<pcl::device::exclusive>(offsets);
//__syncthreads(); //because sync is inside the scan above
if (task < tasks_end)
{
if (cell_count > 0)
{
int parent_code_shifted = octree.codes[task] << LEVEL_BITS_NUM;
int offset = nodes_num + offsets[threadIdx.x];
int mask = 0;
for(int i = 0; i < cell_count; ++i)
{
octree.begs [offset + i] = cell_begs[i];
octree.ends [offset + i] = cell_begs[i + 1];
octree.codes[offset + i] = parent_code_shifted + cell_code[i];
octree.nodes[offset + i] = 0;
octree.parent[offset + i] = task;
mask |= (1 << cell_code[i]);
}
octree.nodes[task] = (offset << 8) + mask;
}
else
octree.nodes[task] = 0;
}
__syncthreads();
if (threadIdx.x == CTA_SIZE - 1)
{
total_new += cell_count + offsets[threadIdx.x];
nodes_num += cell_count + offsets[threadIdx.x];
}
__syncthreads();
} /* for(int it = 0; it < iters; ++it, task += STRIDE) */
//__syncthreads(); //extra ??
if (threadIdx.x == CTA_SIZE - 1)
{
tasks_beg = tasks_end;
tasks_end += total_new;
total_new = 0;
}
++level;
__syncthreads();
}
if (threadIdx.x == CTA_SIZE - 1)
*octree.nodes_num = nodes_num;
}
};
__global__ void __launch_bounds__(CTA_SIZE) singleStepKernel(const SingleStepBuild ssb) { ssb(); }
}
}
void pcl::device::OctreeImpl::build()
{
using namespace pcl::device;
host_octree.downloaded = false;
int points_num = (int)points.size();
//allocatations
{
//ScopeTimer timer("new_allocs");
//+1 codes * points_num * sizeof(int)
//+1 indices * points_num * sizeof(int)
//+1 octreeGlobal.nodes * points_num * sizeof(int)
//+1 octreeGlobal.codes * points_num * sizeof(int)
//+1 octreeGlobal.begs * points_num * sizeof(int)
//+1 octreeGlobal.ends * points_num * sizeof(int)
//+1 octreeGlobal.parent * points_num * sizeof(int)
//+3 points_sorted * points_num * sizeof(float)
//==
// 10 rows
//left
//octreeGlobal.nodes_num * 1 * sizeof(int)
//==
// 3 * sizeof(int) => +1 row
const int transaction_size = 128 / sizeof(int);
int cols = std::max<int>(points_num, transaction_size * 4);
int rows = 10 + 1; // = 13
storage.create(rows, cols);
codes = DeviceArray<int>(storage.ptr(0), points_num);
indices = DeviceArray<int>(storage.ptr(1), points_num);
octreeGlobal.nodes = storage.ptr(2);
octreeGlobal.codes = storage.ptr(3);
octreeGlobal.begs = storage.ptr(4);
octreeGlobal.ends = storage.ptr(5);
octreeGlobal.parent = storage.ptr(6);
octreeGlobal.nodes_num = storage.ptr(7);
points_sorted = DeviceArray2D<float>(3, points_num, storage.ptr(8), storage.step());
}
{
//ScopeTimer timer("reduce-morton-sort-permutations");
thrust::device_ptr<PointType> beg(points.ptr());
thrust::device_ptr<PointType> end = beg + points.size();
{
PointType atmax, atmin;
atmax.x = atmax.y = atmax.z = std::numeric_limits<float>::max();
atmin.x = atmin.y = atmin.z = std::numeric_limits<float>::lowest();
atmax.w = atmin.w = 0;
//ScopeTimer timer("reduce");
PointType minp = thrust::reduce(beg, end, atmax, SelectMinPoint<PointType>());
PointType maxp = thrust::reduce(beg, end, atmin, SelectMaxPoint<PointType>());
octreeGlobal.minp = make_float3(minp.x, minp.y, minp.z);
octreeGlobal.maxp = make_float3(maxp.x, maxp.y, maxp.z);
}
thrust::device_ptr<int> codes_beg(codes.ptr());
thrust::device_ptr<int> codes_end = codes_beg + codes.size();
{
//ScopeTimer timer("morton");
thrust::transform(beg, end, codes_beg, CalcMorton(octreeGlobal.minp, octreeGlobal.maxp));
}
thrust::device_ptr<int> indices_beg(indices.ptr());
thrust::device_ptr<int> indices_end = indices_beg + indices.size();
{
//ScopeTimer timer("sort");
thrust::sequence(indices_beg, indices_end);
thrust::sort_by_key(codes_beg, codes_end, indices_beg );
}
{
////ScopeTimer timer("perm");
//thrust::copy(make_permutation_iterator(beg, indices_beg),
// make_permutation_iterator(end, indices_end), device_ptr<float3>(points_sorted.ptr()));
}
{
thrust::device_ptr<float> xs(points_sorted.ptr(0));
thrust::device_ptr<float> ys(points_sorted.ptr(1));
thrust::device_ptr<float> zs(points_sorted.ptr(2));
//ScopeTimer timer("perm2");
thrust::transform(make_permutation_iterator(beg, indices_beg),
make_permutation_iterator(end, indices_end),
make_zip_iterator(make_tuple(xs, ys, zs)), PointType_to_tuple<PointType>());
}
}
SingleStepBuild ssb;
ssb.octree = octreeGlobal;
ssb.codes = codes;
ssb.points_number = (int)codes.size();
//printFuncAttrib(singleStepKernel);
cudaSafeCall( cudaFuncSetCacheConfig(singleStepKernel, cudaFuncCachePreferL1) );
singleStepKernel<<<GRID_SIZE, CTA_SIZE>>>(ssb);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
|
67f37d1c5b99e2ee673bf8554739b8a9230b9043.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
To compile:
nvcc -arch=sm_60 reduction_kernel.cu
To run with the array size 2^20, expo dist mean 5, and init seed 17:
./a.out 20 5 17
*/
#include <stdio.h>
#include <stdlib.h>
template<class T>
struct SharedMemory
{
__device__ inline operator T *()
{
extern __shared__ int __smem[];
return (T *)__smem;
}
__device__ inline operator const T *() const
{
extern __shared__ int __smem[];
return (T *)__smem;
}
};
template<class T>
T reduceCPU(T *data, int size)
{
T sum = data[0];
T c = (T)0.0;
for (int i = 1; i < size; i++)
{
T y = data[i] - c;
T t = sum + y;
c = (t - sum) - y;
sum = t;
}
return sum;
}
/*
Parallel sum reduction using shared memory
- takes log(n) steps for n input elements
- uses n threads
- only works for power-of-2 arrays
*/
/*
This version uses sequential addressing -- no divergence or bank conflicts.
*/
__global__ void
qroot(double *g_idata, double *g_odata)
{
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
g_odata[i]= sqrt(sqrt(g_idata[i]));
}
__global__ void
reduce(double *g_idata, double *g_odata, unsigned int n)
{
double *sdata = SharedMemory<double>();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? g_idata[i] : 0;
__syncthreads();
// do reduction in shared mem
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
// CUDA Runtime
#include <hip/hip_runtime.h>
#define checkCudaErrors(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
extern "C" double reduction(int n, int mean, int seed)
{
int size = 1<<n; // number of elements to reduce
int maxThreads = 256; // number of threads per block
// create random input data on CPU
unsigned int bytes = size * sizeof(double);
double *h_idata = (double *) malloc(bytes);
srand48(seed);
for (int i=0; i<size; i++)
{
// h_idata[i] = 1.0; // for testing
// expo dist with mean 5.0
h_idata[i] = -mean * log(drand48());
}
int numBlocks = size / maxThreads;
int numThreads = size;
int smemSize = maxThreads * sizeof(double);
// allocate mem for the result on host side
double *h_odata = (double *) malloc(numBlocks*sizeof(double));
// allocate device memory and data
double *d_idata = NULL;
double *d_odata = NULL;
checkCudaErrors(hipMalloc((void **) &d_idata, bytes));
checkCudaErrors(hipMalloc((void **) &d_odata, numBlocks*sizeof(double)));
// copy data directly to device memory
checkCudaErrors(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( qroot), dim3(numBlocks),dim3(maxThreads),smemSize, 0, d_idata, d_idata);
hipLaunchKernelGGL(( reduce), dim3(numBlocks),dim3(maxThreads),smemSize, 0, d_idata, d_odata, numThreads);
int s=numBlocks;
while (s > 1) {
hipLaunchKernelGGL(( reduce), dim3((s+maxThreads-1)/maxThreads),dim3(maxThreads),smemSize, 0, d_odata, d_odata, s);
s = (s+maxThreads-1)/maxThreads;
}
checkCudaErrors(hipMemcpy(h_odata, d_odata, sizeof(double), hipMemcpyDeviceToHost));
printf("GPU sum : %f\n\n", h_odata[0]);
double result = h_odata[0];
checkCudaErrors(hipFree(d_idata));
checkCudaErrors(hipFree(d_odata));
double cpu_result = reduceCPU<double>(h_idata, size);
printf("CPU sum : %f\n", cpu_result);
return result;
}
| 67f37d1c5b99e2ee673bf8554739b8a9230b9043.cu | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
To compile:
nvcc -arch=sm_60 reduction_kernel.cu
To run with the array size 2^20, expo dist mean 5, and init seed 17:
./a.out 20 5 17
*/
#include <stdio.h>
#include <stdlib.h>
template<class T>
struct SharedMemory
{
__device__ inline operator T *()
{
extern __shared__ int __smem[];
return (T *)__smem;
}
__device__ inline operator const T *() const
{
extern __shared__ int __smem[];
return (T *)__smem;
}
};
template<class T>
T reduceCPU(T *data, int size)
{
T sum = data[0];
T c = (T)0.0;
for (int i = 1; i < size; i++)
{
T y = data[i] - c;
T t = sum + y;
c = (t - sum) - y;
sum = t;
}
return sum;
}
/*
Parallel sum reduction using shared memory
- takes log(n) steps for n input elements
- uses n threads
- only works for power-of-2 arrays
*/
/*
This version uses sequential addressing -- no divergence or bank conflicts.
*/
__global__ void
qroot(double *g_idata, double *g_odata)
{
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
g_odata[i]= sqrt(sqrt(g_idata[i]));
}
__global__ void
reduce(double *g_idata, double *g_odata, unsigned int n)
{
double *sdata = SharedMemory<double>();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? g_idata[i] : 0;
__syncthreads();
// do reduction in shared mem
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
// CUDA Runtime
#include <cuda_runtime.h>
#define checkCudaErrors(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
extern "C" double reduction(int n, int mean, int seed)
{
int size = 1<<n; // number of elements to reduce
int maxThreads = 256; // number of threads per block
// create random input data on CPU
unsigned int bytes = size * sizeof(double);
double *h_idata = (double *) malloc(bytes);
srand48(seed);
for (int i=0; i<size; i++)
{
// h_idata[i] = 1.0; // for testing
// expo dist with mean 5.0
h_idata[i] = -mean * log(drand48());
}
int numBlocks = size / maxThreads;
int numThreads = size;
int smemSize = maxThreads * sizeof(double);
// allocate mem for the result on host side
double *h_odata = (double *) malloc(numBlocks*sizeof(double));
// allocate device memory and data
double *d_idata = NULL;
double *d_odata = NULL;
checkCudaErrors(cudaMalloc((void **) &d_idata, bytes));
checkCudaErrors(cudaMalloc((void **) &d_odata, numBlocks*sizeof(double)));
// copy data directly to device memory
checkCudaErrors(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
qroot<<<numBlocks,maxThreads,smemSize>>>(d_idata, d_idata);
reduce<<<numBlocks,maxThreads,smemSize>>>(d_idata, d_odata, numThreads);
int s=numBlocks;
while (s > 1) {
reduce<<<(s+maxThreads-1)/maxThreads,maxThreads,smemSize>>>(d_odata, d_odata, s);
s = (s+maxThreads-1)/maxThreads;
}
checkCudaErrors(cudaMemcpy(h_odata, d_odata, sizeof(double), cudaMemcpyDeviceToHost));
printf("GPU sum : %f\n\n", h_odata[0]);
double result = h_odata[0];
checkCudaErrors(cudaFree(d_idata));
checkCudaErrors(cudaFree(d_odata));
double cpu_result = reduceCPU<double>(h_idata, size);
printf("CPU sum : %f\n", cpu_result);
return result;
}
|
8f82f8b2d21e05f2220cb803c9139fa795c354c5.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "ker.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *cormat = NULL;
hipMalloc(&cormat, XSIZE*YSIZE);
float *upper = NULL;
hipMalloc(&upper, XSIZE*YSIZE);
int n1 = 1;
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
ker), dim3(gridBlock),dim3(threadBlock), 0, 0, cormat,upper,n1,n);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
ker), dim3(gridBlock),dim3(threadBlock), 0, 0, cormat,upper,n1,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
ker), dim3(gridBlock),dim3(threadBlock), 0, 0, cormat,upper,n1,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 8f82f8b2d21e05f2220cb803c9139fa795c354c5.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "ker.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *cormat = NULL;
cudaMalloc(&cormat, XSIZE*YSIZE);
float *upper = NULL;
cudaMalloc(&upper, XSIZE*YSIZE);
int n1 = 1;
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
ker<<<gridBlock,threadBlock>>>(cormat,upper,n1,n);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
ker<<<gridBlock,threadBlock>>>(cormat,upper,n1,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
ker<<<gridBlock,threadBlock>>>(cormat,upper,n1,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
0643380f565624da72c96b699f6ffb8cfe2fd623.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
// #
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// #
// http://www.apache.org/licenses/LICENSE-2.0
// #
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Soft Rasterizer (SoftRas)
// Copyright (c) 2017 Hiroharu Kato
// Copyright (c) 2018 Nikos Kolotouros
// Copyright (c) 2019 Shichen Liu
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
// #
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
// #
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
#include <ATen/ATen.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
namespace {
template <typename scalar_t>
__global__ void load_textures_cuda_kernel(
const scalar_t* __restrict__ image,
const scalar_t* __restrict__ faces,
const int32_t* __restrict__ is_update,
scalar_t* __restrict__ textures,
size_t texture_size,
size_t texture_res,
size_t image_height,
size_t image_width) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i * 3 >= texture_size) {
return;
}
const int R = texture_res;
const int fn = i / (R * R);
const int w_y = (i % (R * R)) / R;
const int w_x = i % R;
// compute barycentric coordinate
scalar_t w0, w1, w2;
if (w_x + w_y < R) {
w0 = (w_x + 1. / 3.) / R;
w1 = (w_y + 1. / 3.) / R;
w2 = 1. - w0 - w1;
} else {
w0 = ((R - 1. - w_x) + 2. / 3.) / R;
w1 = ((R - 1. - w_y) + 2. / 3.) / R;
w2 = 1. - w0 - w1;
}
const scalar_t* face = &faces[fn * 3 * 2];
scalar_t* texture = &textures[i * 3];
if (is_update[fn] == 0) return;
const scalar_t pos_x = (
(face[2 * 0 + 0] * w0 + face[2 * 1 + 0] * w1 + face[2 * 2 + 0] * w2) * (image_width - 1));
const scalar_t pos_y = (
(face[2 * 0 + 1] * w0 + face[2 * 1 + 1] * w1 + face[2 * 2 + 1] * w2) * (image_height - 1));
if (1) {
/* bilinear sampling */
const scalar_t weight_x1 = pos_x - (int)pos_x;
const scalar_t weight_x0 = 1 - weight_x1;
const scalar_t weight_y1 = pos_y - (int)pos_y;
const scalar_t weight_y0 = 1 - weight_y1;
for (int k = 0; k < 3; k++) {
scalar_t c = 0;
c += image[((int)pos_y * image_width + (int)pos_x) * 3 + k] * (weight_x0 * weight_y0);
c += image[((int)(pos_y + 1) * image_width + (int)pos_x) * 3 + k] * (weight_x0 * weight_y1);
c += image[((int)pos_y * image_width + ((int)pos_x) + 1) * 3 + k] * (weight_x1 * weight_y0);
c += image[((int)(pos_y + 1)* image_width + ((int)pos_x) + 1) * 3 + k] * (weight_x1 * weight_y1);
texture[k] = c;
}
} else {
/* nearest neighbor */
const int pos_xi = round(pos_x);
const int pos_yi = round(pos_y);
for (int k = 0; k < 3; k++) {
texture[k] = image[(pos_yi * image_width + pos_xi) * 3 + k];
}
}
}
}
at::Tensor load_textures_cuda(
at::Tensor image,
at::Tensor faces,
at::Tensor textures,
at::Tensor is_update) {
// texture_size = size of the textures tensor
const auto texture_size = textures.numel();
// notice that texture_res != texture_res
const auto texture_res = sqrt(textures.size(1));
const auto image_height = image.size(0);
const auto image_width = image.size(1);
const int threads = 1024;
const dim3 blocks ((texture_size / 3 - 1) / threads + 1);
AT_DISPATCH_FLOATING_TYPES(image.type(), "load_textures_cuda", ([&] {
hipLaunchKernelGGL(( load_textures_cuda_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
image.data<scalar_t>(),
faces.data<scalar_t>(),
is_update.data<int32_t>(),
textures.data<scalar_t>(),
texture_size,
texture_res,
image_height,
image_width);
}));
hipError_t err = hipGetLastError();
if (err != hipSuccess)
printf("Error in load_textures: %s\n", hipGetErrorString(err));
return textures;
}
| 0643380f565624da72c96b699f6ffb8cfe2fd623.cu | // Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
// #
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// #
// http://www.apache.org/licenses/LICENSE-2.0
// #
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Soft Rasterizer (SoftRas)
// Copyright (c) 2017 Hiroharu Kato
// Copyright (c) 2018 Nikos Kolotouros
// Copyright (c) 2019 Shichen Liu
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
// #
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
// #
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
#include <ATen/ATen.h>
#include <cuda.h>
#include <cuda_runtime.h>
namespace {
template <typename scalar_t>
__global__ void load_textures_cuda_kernel(
const scalar_t* __restrict__ image,
const scalar_t* __restrict__ faces,
const int32_t* __restrict__ is_update,
scalar_t* __restrict__ textures,
size_t texture_size,
size_t texture_res,
size_t image_height,
size_t image_width) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i * 3 >= texture_size) {
return;
}
const int R = texture_res;
const int fn = i / (R * R);
const int w_y = (i % (R * R)) / R;
const int w_x = i % R;
// compute barycentric coordinate
scalar_t w0, w1, w2;
if (w_x + w_y < R) {
w0 = (w_x + 1. / 3.) / R;
w1 = (w_y + 1. / 3.) / R;
w2 = 1. - w0 - w1;
} else {
w0 = ((R - 1. - w_x) + 2. / 3.) / R;
w1 = ((R - 1. - w_y) + 2. / 3.) / R;
w2 = 1. - w0 - w1;
}
const scalar_t* face = &faces[fn * 3 * 2];
scalar_t* texture = &textures[i * 3];
if (is_update[fn] == 0) return;
const scalar_t pos_x = (
(face[2 * 0 + 0] * w0 + face[2 * 1 + 0] * w1 + face[2 * 2 + 0] * w2) * (image_width - 1));
const scalar_t pos_y = (
(face[2 * 0 + 1] * w0 + face[2 * 1 + 1] * w1 + face[2 * 2 + 1] * w2) * (image_height - 1));
if (1) {
/* bilinear sampling */
const scalar_t weight_x1 = pos_x - (int)pos_x;
const scalar_t weight_x0 = 1 - weight_x1;
const scalar_t weight_y1 = pos_y - (int)pos_y;
const scalar_t weight_y0 = 1 - weight_y1;
for (int k = 0; k < 3; k++) {
scalar_t c = 0;
c += image[((int)pos_y * image_width + (int)pos_x) * 3 + k] * (weight_x0 * weight_y0);
c += image[((int)(pos_y + 1) * image_width + (int)pos_x) * 3 + k] * (weight_x0 * weight_y1);
c += image[((int)pos_y * image_width + ((int)pos_x) + 1) * 3 + k] * (weight_x1 * weight_y0);
c += image[((int)(pos_y + 1)* image_width + ((int)pos_x) + 1) * 3 + k] * (weight_x1 * weight_y1);
texture[k] = c;
}
} else {
/* nearest neighbor */
const int pos_xi = round(pos_x);
const int pos_yi = round(pos_y);
for (int k = 0; k < 3; k++) {
texture[k] = image[(pos_yi * image_width + pos_xi) * 3 + k];
}
}
}
}
at::Tensor load_textures_cuda(
at::Tensor image,
at::Tensor faces,
at::Tensor textures,
at::Tensor is_update) {
// texture_size = size of the textures tensor
const auto texture_size = textures.numel();
// notice that texture_res != texture_res
const auto texture_res = sqrt(textures.size(1));
const auto image_height = image.size(0);
const auto image_width = image.size(1);
const int threads = 1024;
const dim3 blocks ((texture_size / 3 - 1) / threads + 1);
AT_DISPATCH_FLOATING_TYPES(image.type(), "load_textures_cuda", ([&] {
load_textures_cuda_kernel<scalar_t><<<blocks, threads>>>(
image.data<scalar_t>(),
faces.data<scalar_t>(),
is_update.data<int32_t>(),
textures.data<scalar_t>(),
texture_size,
texture_res,
image_height,
image_width);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error in load_textures: %s\n", cudaGetErrorString(err));
return textures;
}
|
ea67b6d065ecbcf4ec9cb3c166fd911883b9fd54.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from magmablas/zgeadd_batched.cu, normal z -> c, Tue Aug 30 09:38:38 2016
@author Mark Gates
*/
#include "magma_internal.h"
#define NB 64
/******************************************************************************/
/*
Batches clacpy of multiple arrays;
y-dimension of grid is different arrays,
x-dimension of grid is blocks for each array.
Matrix is m x n, and is divided into block rows, each NB x n.
Each CUDA block has NB threads to handle one block row.
Each thread adds one row, iterating across all columns.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
TODO. Block in both directions, for large matrices.
E.g., each block does 64x64 tile, instead of 64xN tile.
*/
__global__ void
cgeadd_batched_kernel(
int m, int n,
magmaFloatComplex alpha,
const magmaFloatComplex * const *dAarray, int ldda,
magmaFloatComplex **dBarray, int lddb )
{
// dA and dB iterate across row i
const magmaFloatComplex *dA = dAarray[ blockIdx.y ];
magmaFloatComplex *dB = dBarray[ blockIdx.y ];
int i = blockIdx.x*blockDim.x + threadIdx.x;
if ( i < m ) {
dA += i;
dB += i;
const magmaFloatComplex *dAend = dA + n*ldda;
while( dA < dAend ) {
*dB = alpha*(*dA) + (*dB);
dA += ldda;
dB += lddb;
}
}
}
/***************************************************************************//**
Purpose
-------
ZGEADD adds two sets of matrices, dAarray[i] = alpha*dAarray[i] + dBarray[i],
for i = 0, ..., batchCount-1.
Arguments
---------
@param[in]
m INTEGER
The number of rows of each matrix dAarray[i]. M >= 0.
@param[in]
n INTEGER
The number of columns of each matrix dAarray[i]. N >= 0.
@param[in]
alpha COMPLEX
The scalar alpha.
@param[in]
dAarray array on GPU, dimension(batchCount), of pointers to arrays,
with each array a COMPLEX array, dimension (LDDA,N)
The m by n matrices dAarray[i].
@param[in]
ldda INTEGER
The leading dimension of each array dAarray[i]. LDDA >= max(1,M).
@param[in,out]
dBarray array on GPU, dimension(batchCount), of pointers to arrays,
with each array a COMPLEX array, dimension (LDDB,N)
The m by n matrices dBarray[i].
@param[in]
lddb INTEGER
The leading dimension of each array dBarray[i]. LDDB >= max(1,M).
@param[in]
batchCount INTEGER
The number of matrices to add; length of dAarray and dBarray.
batchCount >= 0.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_geadd_batched
*******************************************************************************/
extern "C" void
magmablas_cgeadd_batched(
magma_int_t m, magma_int_t n,
magmaFloatComplex alpha,
magmaFloatComplex_const_ptr const dAarray[], magma_int_t ldda,
magmaFloatComplex_ptr dBarray[], magma_int_t lddb,
magma_int_t batchCount,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
else if ( batchCount < 0 )
info = -8;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 || batchCount == 0 )
return;
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ), batchCount );
hipLaunchKernelGGL(( cgeadd_batched_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, alpha, dAarray, ldda, dBarray, lddb );
}
| ea67b6d065ecbcf4ec9cb3c166fd911883b9fd54.cu | /*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from magmablas/zgeadd_batched.cu, normal z -> c, Tue Aug 30 09:38:38 2016
@author Mark Gates
*/
#include "magma_internal.h"
#define NB 64
/******************************************************************************/
/*
Batches clacpy of multiple arrays;
y-dimension of grid is different arrays,
x-dimension of grid is blocks for each array.
Matrix is m x n, and is divided into block rows, each NB x n.
Each CUDA block has NB threads to handle one block row.
Each thread adds one row, iterating across all columns.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
TODO. Block in both directions, for large matrices.
E.g., each block does 64x64 tile, instead of 64xN tile.
*/
__global__ void
cgeadd_batched_kernel(
int m, int n,
magmaFloatComplex alpha,
const magmaFloatComplex * const *dAarray, int ldda,
magmaFloatComplex **dBarray, int lddb )
{
// dA and dB iterate across row i
const magmaFloatComplex *dA = dAarray[ blockIdx.y ];
magmaFloatComplex *dB = dBarray[ blockIdx.y ];
int i = blockIdx.x*blockDim.x + threadIdx.x;
if ( i < m ) {
dA += i;
dB += i;
const magmaFloatComplex *dAend = dA + n*ldda;
while( dA < dAend ) {
*dB = alpha*(*dA) + (*dB);
dA += ldda;
dB += lddb;
}
}
}
/***************************************************************************//**
Purpose
-------
ZGEADD adds two sets of matrices, dAarray[i] = alpha*dAarray[i] + dBarray[i],
for i = 0, ..., batchCount-1.
Arguments
---------
@param[in]
m INTEGER
The number of rows of each matrix dAarray[i]. M >= 0.
@param[in]
n INTEGER
The number of columns of each matrix dAarray[i]. N >= 0.
@param[in]
alpha COMPLEX
The scalar alpha.
@param[in]
dAarray array on GPU, dimension(batchCount), of pointers to arrays,
with each array a COMPLEX array, dimension (LDDA,N)
The m by n matrices dAarray[i].
@param[in]
ldda INTEGER
The leading dimension of each array dAarray[i]. LDDA >= max(1,M).
@param[in,out]
dBarray array on GPU, dimension(batchCount), of pointers to arrays,
with each array a COMPLEX array, dimension (LDDB,N)
The m by n matrices dBarray[i].
@param[in]
lddb INTEGER
The leading dimension of each array dBarray[i]. LDDB >= max(1,M).
@param[in]
batchCount INTEGER
The number of matrices to add; length of dAarray and dBarray.
batchCount >= 0.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_geadd_batched
*******************************************************************************/
extern "C" void
magmablas_cgeadd_batched(
magma_int_t m, magma_int_t n,
magmaFloatComplex alpha,
magmaFloatComplex_const_ptr const dAarray[], magma_int_t ldda,
magmaFloatComplex_ptr dBarray[], magma_int_t lddb,
magma_int_t batchCount,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
else if ( batchCount < 0 )
info = -8;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 || batchCount == 0 )
return;
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ), batchCount );
cgeadd_batched_kernel<<< grid, threads, 0, queue->cuda_stream() >>>(
m, n, alpha, dAarray, ldda, dBarray, lddb );
}
|
3bf76261c15a1e5c69d6acb0dc26bc871628b767.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <math.h>
using namespace std;
// 2D surfaces
surface<void, cudaSurfaceType2D> inputSurfRef;
surface<void, cudaSurfaceType2D> outputSurfRef;
// kernel: copy and increment by one
__global__ void copyKernel(int width, int height)
{
// Calculate surface coordinates
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < width && y < height)
{
float data_1,data_2,Cvalue=0;
int e;
for(e=0;e<width;e++)
{
surf2Dread(&data_1, inputSurfRef, e*4, y);
surf2Dread(&data_2, inputSurfRef, x*4, e);
Cvalue=data_1*data_2 + Cvalue;
}
surf2Dwrite(Cvalue, outputSurfRef, x*4, y);
// Read from input surface
//surf2Dread(&data, inputSurfRef, x*4, y);
// Write to output surface
//data = data + 2;
//surf2Dwrite(data, outputSurfRef, x*4, y);
}
}
// Host code
int main()
{
int width = 3;
int height = 3;
int size = sizeof(float)*width*height;
//allocate host and device memory
float *h_data;
float *h_data_out;
h_data = (float*)malloc(sizeof(float)*height*width);
h_data_out = (float*)malloc(sizeof(float)*height*width);
//initialize host matrix before usage
for(int loop=0; loop<width*height;loop++)
h_data[loop] = (float)rand()/(float)(RAND_MAX-1);
cout<<"datos entrada : "<<endl<<endl;
for(int i = 0;i<width*height;i++)
{
cout<<h_data[i]<<endl;
}
// Allocate CUDA arrays in device memory
hipChannelFormatDesc channelDesc;
channelDesc = hipCreateChannelDesc<float>();
hipArray* cuInputArray; hipArray* cuOutputArray;
hipMallocArray(&cuInputArray, &channelDesc, width, height,hipArraySurfaceLoadStore);
hipMallocArray(&cuOutputArray, &channelDesc, width, height, hipArraySurfaceLoadStore);
// Copy to device memory some data located at address h_data in host memory
hipMemcpyToArray(cuInputArray, 0, 0, h_data, size, hipMemcpyHostToDevice);
// Bind the arrays to the surface references
hipBindSurfaceToArray(inputSurfRef, cuInputArray);
hipBindSurfaceToArray(outputSurfRef, cuOutputArray);
// Invoke kernel
dim3 dimBlock(3, 3, 1);
dim3 dimGrid(1,1,1);
hipLaunchKernelGGL(( copyKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, width, height);
// Copy to host memory some data located at address outputSurfRef in device memory
cudaMemcpyFromArray(h_data_out,cuOutputArray,0,0 , size, hipMemcpyDeviceToHost);
// Display
cout<<endl<<"datos de salida : "<<endl<<endl;
for(int i = 0;i<width*height;i++)
{
cout<<h_data_out[i]<<endl;
}
// Free device memory
free(h_data);
hipFreeArray(cuInputArray);
hipFreeArray(cuOutputArray);
system("pause");
return 0;
} | 3bf76261c15a1e5c69d6acb0dc26bc871628b767.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <math.h>
using namespace std;
// 2D surfaces
surface<void, cudaSurfaceType2D> inputSurfRef;
surface<void, cudaSurfaceType2D> outputSurfRef;
// kernel: copy and increment by one
__global__ void copyKernel(int width, int height)
{
// Calculate surface coordinates
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < width && y < height)
{
float data_1,data_2,Cvalue=0;
int e;
for(e=0;e<width;e++)
{
surf2Dread(&data_1, inputSurfRef, e*4, y);
surf2Dread(&data_2, inputSurfRef, x*4, e);
Cvalue=data_1*data_2 + Cvalue;
}
surf2Dwrite(Cvalue, outputSurfRef, x*4, y);
// Read from input surface
//surf2Dread(&data, inputSurfRef, x*4, y);
// Write to output surface
//data = data + 2;
//surf2Dwrite(data, outputSurfRef, x*4, y);
}
}
// Host code
int main()
{
int width = 3;
int height = 3;
int size = sizeof(float)*width*height;
//allocate host and device memory
float *h_data;
float *h_data_out;
h_data = (float*)malloc(sizeof(float)*height*width);
h_data_out = (float*)malloc(sizeof(float)*height*width);
//initialize host matrix before usage
for(int loop=0; loop<width*height;loop++)
h_data[loop] = (float)rand()/(float)(RAND_MAX-1);
cout<<"datos entrada : "<<endl<<endl;
for(int i = 0;i<width*height;i++)
{
cout<<h_data[i]<<endl;
}
// Allocate CUDA arrays in device memory
cudaChannelFormatDesc channelDesc;
channelDesc = cudaCreateChannelDesc<float>();
cudaArray* cuInputArray; cudaArray* cuOutputArray;
cudaMallocArray(&cuInputArray, &channelDesc, width, height,cudaArraySurfaceLoadStore);
cudaMallocArray(&cuOutputArray, &channelDesc, width, height, cudaArraySurfaceLoadStore);
// Copy to device memory some data located at address h_data in host memory
cudaMemcpyToArray(cuInputArray, 0, 0, h_data, size, cudaMemcpyHostToDevice);
// Bind the arrays to the surface references
cudaBindSurfaceToArray(inputSurfRef, cuInputArray);
cudaBindSurfaceToArray(outputSurfRef, cuOutputArray);
// Invoke kernel
dim3 dimBlock(3, 3, 1);
dim3 dimGrid(1,1,1);
copyKernel<<<dimGrid, dimBlock>>>(width, height);
// Copy to host memory some data located at address outputSurfRef in device memory
cudaMemcpyFromArray(h_data_out,cuOutputArray,0,0 , size, cudaMemcpyDeviceToHost);
// Display
cout<<endl<<"datos de salida : "<<endl<<endl;
for(int i = 0;i<width*height;i++)
{
cout<<h_data_out[i]<<endl;
}
// Free device memory
free(h_data);
cudaFreeArray(cuInputArray);
cudaFreeArray(cuOutputArray);
system("pause");
return 0;
} |
7c0ababb6f3a0fce8bf0825563d6e9c2c3bf098a.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdlib.h>
#include <cusp/array2d.h>
#include <thrust/tuple.h>
#include <thrust/host_vector.h>
#include <thrust/transform.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <thrust/unique.h>
#include <thrust/binary_search.h>
#include <thrust/copy.h>
#include <thrust/iterator/transform_output_iterator.h>
#include <cusparse_v2.h>
#include "stbi_raii.hpp"
#include <hipcub/hipcub.hpp>
#include <cusp/transpose.h>
#include <cusp/gallery/grid.h>
#include <cusp/gallery/poisson.h>
#include <cusp/print.h>
#include <cusp/convert.h>
#include <cusp/relaxation/sor.h>
#include <cusp/relaxation/jacobi.h>
#include <cusp/krylov/cg.h>
#include <cusp/krylov/bicgstab.h>
#include <cusp/linear_operator.h>
#include <cusp/precond/diagonal.h>
#include <cusp/monitor.h>
#include <cusp/io/matrix_market.h>
#include "matrix_functional.cuh"
#include "zip_it.cuh"
#include "cycle_iterator.cuh"
using real = float;
void gpuAssert(hipError_t code, const char* file, int line, bool abort = true)
{
if (code != hipSuccess)
{
fprintf(
stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort)
{
exit(code);
}
}
}
void gpuErrchk(hipError_t ans)
{
gpuAssert((ans), __FILE__, __LINE__);
}
static const char* _cusparseGetErrorEnum(hipsparseStatus_t error)
{
switch (error)
{
case HIPSPARSE_STATUS_SUCCESS: return "HIPSPARSE_STATUS_SUCCESS";
case HIPSPARSE_STATUS_NOT_INITIALIZED:
return "HIPSPARSE_STATUS_NOT_INITIALIZED";
case HIPSPARSE_STATUS_ALLOC_FAILED: return "HIPSPARSE_STATUS_ALLOC_FAILED";
case HIPSPARSE_STATUS_INVALID_VALUE: return "HIPSPARSE_STATUS_INVALID_VALUE";
case HIPSPARSE_STATUS_ARCH_MISMATCH: return "HIPSPARSE_STATUS_ARCH_MISMATCH";
case HIPSPARSE_STATUS_MAPPING_ERROR: return "HIPSPARSE_STATUS_MAPPING_ERROR";
case HIPSPARSE_STATUS_EXECUTION_FAILED:
return "HIPSPARSE_STATUS_EXECUTION_FAILED";
case HIPSPARSE_STATUS_INTERNAL_ERROR: return "HIPSPARSE_STATUS_INTERNAL_ERROR";
case HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
case HIPSPARSE_STATUS_ZERO_PIVOT: return "HIPSPARSE_STATUS_ZERO_PIVOT";
}
return "<unknown>";
}
inline void
__cusparseSafeCall(hipsparseStatus_t err, const char* file, const int line)
{
if (HIPSPARSE_STATUS_SUCCESS != err)
{
fprintf(stderr,
"CUSPARSE error in file '%s', line %d, error %s\nterminating!\n",
__FILE__,
__LINE__,
_cusparseGetErrorEnum(err));
assert(0);
}
}
extern "C" void cusparseSafeCall(hipsparseStatus_t err)
{
__cusparseSafeCall(err, __FILE__, __LINE__);
}
template <typename T>
constexpr T sqr(T val) noexcept
{
return val * val;
}
template <typename T>
void strided_copy(const T* i_src,
T* i_dest,
int src_stride,
int dest_stride,
int n,
hipMemcpyKind i_kind)
{
hipMemcpy2D(i_dest,
sizeof(T) * dest_stride,
i_src,
sizeof(T) * src_stride,
sizeof(T),
n,
i_kind);
}
void make_device_image(gsl::not_null<const real*> h_image,
cusp::array2d<real, cusp::device_memory>::view d_image)
{
const int npixels = d_image.num_cols;
const int nchannels = d_image.num_rows;
for (int c = 0; c < nchannels; ++c)
{
auto d_image_channel = d_image.values.begin().base().get() + npixels * c;
const auto h_image_channel = h_image.get() + c;
strided_copy(h_image_channel,
d_image_channel,
nchannels,
1,
npixels,
hipMemcpyHostToDevice);
}
}
void make_host_image(cusp::array2d<real, cusp::device_memory>::view d_image,
gsl::not_null<real*> h_image)
{
const int npixels = d_image.num_cols;
const int nchannels = d_image.num_rows;
for (int c = 0; c < nchannels; ++c)
{
auto d_image_channel = d_image.values.begin().base().get() + npixels * c;
const auto h_image_channel = h_image.get() + c;
strided_copy(d_image_channel,
h_image_channel,
1,
nchannels,
npixels,
hipMemcpyDeviceToHost);
}
}
cusp::csr_matrix<int, real, cusp::device_memory>
cusparse_add(cusp::coo_matrix<int, real, cusp::device_memory>::const_view di_A,
cusp::coo_matrix<int, real, cusp::device_memory>::const_view di_B)
{
cusp::array1d<int, cusp::device_memory> A_row_offsets(di_A.num_rows + 1);
cusp::indices_to_offsets(di_A.row_indices, A_row_offsets);
cusp::array1d<int, cusp::device_memory> B_row_offsets(di_B.num_rows + 1);
cusp::indices_to_offsets(di_B.row_indices, B_row_offsets);
hipsparseHandle_t handle;
cusparseSafeCall(hipsparseCreate(&handle));
hipsparseMatDescr_t A_description;
cusparseSafeCall(hipsparseCreateMatDescr(&A_description));
hipsparseMatDescr_t B_description;
cusparseSafeCall(hipsparseCreateMatDescr(&B_description));
hipsparseMatDescr_t C_description;
cusparseSafeCall(hipsparseCreateMatDescr(&C_description));
// Coefficients
const real alpha = 1.f;
const real beta = 0.1f * 2.f;
int C_base, C_nnz;
// Not sure if this is needed
int* nnz_total = &C_nnz;
cusparseSafeCall(hipsparseSetPointerMode(handle, HIPSPARSE_POINTER_MODE_HOST));
cusp::array1d<int, cusp::device_memory> C_row_offsets(di_A.num_rows + 1);
cusparseSafeCall(hipsparseXcsrgeamNnz(handle,
di_A.num_rows,
di_A.num_cols,
A_description,
di_A.num_entries,
A_row_offsets.begin().base().get(),
di_A.column_indices.begin().base().get(),
B_description,
di_B.num_entries,
B_row_offsets.begin().base().get(),
di_B.column_indices.begin().base().get(),
C_description,
C_row_offsets.begin().base().get(),
nnz_total));
if (nnz_total != NULL)
{
C_nnz = *nnz_total;
}
else
{
C_nnz = C_row_offsets.back();
C_nnz -= C_row_offsets[0];
}
cusp::csr_matrix<int, real, cusp::device_memory> do_C(
di_A.num_rows, di_A.num_cols, C_nnz);
do_C.row_offsets = std::move(C_row_offsets);
// Now actually do the add
cusparseSafeCall(hipsparseScsrgeam(handle,
di_A.num_rows,
di_A.num_cols,
&alpha,
A_description,
di_A.num_entries,
di_A.values.begin().base().get(),
A_row_offsets.begin().base().get(),
di_A.column_indices.begin().base().get(),
&beta,
B_description,
di_B.num_entries,
di_B.values.begin().base().get(),
B_row_offsets.begin().base().get(),
di_B.column_indices.begin().base().get(),
C_description,
do_C.values.begin().base().get(),
do_C.row_offsets.begin().base().get(),
do_C.column_indices.begin().base().get()));
return do_C;
}
void check_symmetry(
cusp::csr_matrix<int, real, cusp::device_memory>::const_view di_M)
{
// Copy to host
cusp::csr_matrix<int, real, cusp::host_memory> M = di_M;
// Transpose
cusp::csr_matrix<int, real, cusp::host_memory> MT(
M.num_cols, M.num_rows, M.num_entries);
cusp::transpose(M, MT);
printf("Checking for symmetry\n");
for (int i = 0; i < di_M.num_entries; ++i)
{
const real value = M.values[i];
const real valueT = MT.values[i];
if (value != valueT)
{
printf("BAD symmetry at: %d with value: %f and value^T: %f\n",
i,
value,
valueT);
}
}
}
void build_M(float3 L,
cusp::coo_matrix<int, real, cusp::device_memory>::view do_M)
{
const int nnormals = do_M.num_rows / 3;
// Outer product of the lighting direction
// clang-format off
real LLT[9] = {L.x * L.x, L.x * L.y, L.x * L.z,
L.x * L.y, L.y * L.y, L.y * L.z,
L.x * L.z, L.y * L.z, L.z * L.z};
printf("LLT:\n [%f, %f, %f,\n %f, %f, %f,\n %f, %f, %f]\n",
LLT[0], LLT[1], LLT[2],
LLT[3], LLT[4], LLT[5],
LLT[6], LLT[7], LLT[8]);
// clang-format on
// Copy to the device
thrust::device_vector<real> d_LLT(LLT, LLT + 9);
// Perform a kronecker product of LLT with the Identity matrix
// We want to iterate over each row of LLT, n times where n is the number of
// normals
const auto LLT_row = detail::make_row_iterator(nnormals * 3);
// We want to iterate over each column of LLT, in a repeating cycle for each n
const auto LLT_col = detail::make_column_iterator(3);
// Now we can combine the two
const auto LLT_i = thrust::make_transform_iterator(
detail::zip_it(LLT_row, LLT_col),
[=] __host__ __device__(const thrust::tuple<int, int>& coord) {
return coord.get<0>() * 3 + coord.get<1>();
});
// Use the look up index to get the real value from LLT
const auto LLT_v = thrust::make_permutation_iterator(d_LLT.begin(), LLT_i);
// Copy the values across to M
thrust::copy_n(LLT_v, nnormals * 9, do_M.values.begin());
// The row keys will be i / 3, as we only have 3 values per row and column
const auto count = thrust::make_counting_iterator(0);
thrust::transform(count,
count + nnormals * 9,
do_M.row_indices.begin(),
detail::unary_divides<int>(3));
// To write the column keys we need a repeating sequence of 0, 1, 2 * n to
// give 0, n, 2n, and then we offset by the row % n
thrust::transform(LLT_col,
LLT_col + nnormals * 9,
do_M.row_indices.begin(),
do_M.column_indices.begin(),
[=] __host__ __device__(int s, int r) {
return (r % nnormals) + s * nnormals;
});
using tup3 = thrust::tuple<int, int, real>;
const auto inc_diag = [=] __host__ __device__(tup3 entry) {
// Add one to the diagonals
if (entry.get<0>() == entry.get<1>())
{
entry.get<2>() += 1;
}
return entry;
};
auto entry_it = detail::zip_it(
do_M.row_indices.begin(), do_M.column_indices.begin(), do_M.values.begin());
// Fix the boundary cell diagonals
// thrust::transform(entry_it, entry_it + nnormals*3, entry_it, inc_diag);
}
void build_B(const int m,
const int n,
cusp::coo_matrix<int, real, cusp::device_memory>::view do_B)
{
const int nsb = do_B.num_entries / 3;
const int nnormals = m * n;
auto entry_it = detail::zip_it(
do_B.row_indices.begin(), do_B.column_indices.begin(), do_B.values.begin());
// Build the discrete Poisson problem matrix
cusp::coo_matrix<int, real, cusp::device_memory> d_temp;
cusp::gallery::poisson5pt(d_temp, n, m);
const auto temp_begin = detail::zip_it(d_temp.row_indices.begin(),
d_temp.column_indices.begin(),
d_temp.values.begin());
thrust::copy_n(temp_begin, nsb, entry_it);
using tup3 = thrust::tuple<int, int, real>;
const auto fix_bnds = [=] __host__ __device__(tup3 entry) {
// Fix boundary cell diagonals
if (entry.get<0>() == entry.get<1>())
{
const int r = entry.get<0>() / n;
const int c = entry.get<0>() % n;
// If we're in a boundary cell we subtract one from the valence
entry.get<2>() -= (r == 0 || r == (m - 1));
entry.get<2>() -= (c == 0 || c == (n - 1));
}
return entry;
};
// Fix the boundary cell diagonals
thrust::transform(entry_it, entry_it + nsb, entry_it, fix_bnds);
// Correct the boundaries which don't have valence of 4 and copy the
// corrected B for each channel of the normal vectors (3 times).
// Tuple of [Row, Column, Value]
auto entry_s = detail::make_cycle_iterator(entry_it, nsb);
// Copy sB 3 times, offsetting by it's width and height for each new copy
const auto op = [=] __host__ __device__(tup3 entry, int count) {
// Work out what channel we're in
const int channel = count / nsb;
// Offset for the channel
entry.get<0>() += channel * nnormals;
entry.get<1>() += channel * nnormals;
return entry;
};
const auto count = thrust::make_counting_iterator(nsb);
thrust::transform(entry_s, entry_s + nsb * 2, count, entry_it + nsb, op);
}
template <typename T>
constexpr __host__ __device__ real signum(T val)
{
return (T(0) <= val) - (val < T(0));
}
template <typename T>
constexpr __host__ __device__ T clamp(const T& n,
const T& lower,
const T& upper)
{
return max(lower, min(n, upper));
}
template <typename T>
constexpr __host__ __device__ T iclamp(const T& n, const T& e)
{
return max(e, std::abs(n)) * signum(n);
}
struct relative_height_from_normals
{
using vec2 = thrust::tuple<real, real>;
__host__ __device__ real dot(const vec2& n1, const vec2& n2) const noexcept
{
return n1.get<0>() * n2.get<0>() + n1.get<1>() * n2.get<1>();
}
__host__ __device__ real det(const vec2& n1, const vec2& n2) const noexcept
{
return n1.get<0>() * n2.get<1>() - n1.get<1>() * n2.get<0>();
}
__host__ __device__ vec2 normalize(const vec2& n) const noexcept
{
const auto norm = std::sqrt(dot(n, n));
return thrust::make_tuple(n.get<0>() / norm, n.get<1>() / norm);
}
__host__ __device__ real operator()(vec2 n1,
vec2 n2,
bool debug = false) const noexcept
{
// Normalize n1 and n2
n1 = normalize(n1);
n2 = normalize(n2);
const real x = n1.get<0>() - n2.get<0>();
const real y = n1.get<1>() - n2.get<1>();
real q;
constexpr float epsilon = 0.0000001f;
if (std::abs(x) > epsilon)
{
q = y / x;
}
else
{
const auto inf = std::numeric_limits<real>::infinity();
const real g1 =
n1.get<0>() == 0.f ? inf : n1.get<1>() / n1.get<0>();
if (g1 == inf)
q = 0.f;
else if (g1 == 0.f)
q = 1.f / epsilon;
else
q = 1.f / g1;
}
return q;
}
};
void normalize(cusp::array1d<real, cusp::device_memory>::view dio_v)
{
// Subtract the minimum value
const real min = *thrust::min_element(dio_v.begin(), dio_v.end());
const detail::unary_minus<real> subf(min);
thrust::transform(dio_v.begin(), dio_v.end(), dio_v.begin(), subf);
// Divide by the maximum value
const real scale = 1.f / *thrust::max_element(dio_v.begin(), dio_v.end());
const detail::unary_multiplies<real> mulf(scale);
thrust::transform(dio_v.begin(), dio_v.end(), dio_v.begin(), mulf);
}
void print_range_avg(cusp::array1d<real, cusp::device_memory>::const_view di_v)
{
const real min = *thrust::min_element(di_v.begin(), di_v.end());
const real max = *thrust::max_element(di_v.begin(), di_v.end());
const real avg = thrust::reduce(di_v.begin(), di_v.end()) / di_v.size();
std::cout << "min: " << min << ", max: " << max << ", avg: " << avg << '\n';
}
void build_Q_values(cusp::array2d<real, cusp::device_memory>::view di_normals,
cusp::coo_matrix<int, real, cusp::device_memory>::view do_Q)
{
// Iterate over the normals with their index
const auto count = thrust::make_counting_iterator(0);
const auto normal_begin = detail::zip_it(di_normals.row(0).begin(),
di_normals.row(1).begin(),
di_normals.row(2).begin(),
count);
// Iterate over pairs of normals using the matrix coordinates
const auto n1_begin =
thrust::make_permutation_iterator(normal_begin, do_Q.row_indices.begin());
const auto n2_begin = thrust::make_permutation_iterator(
normal_begin, do_Q.column_indices.begin());
const auto n1_end = n1_begin + do_Q.num_entries;
using vec = thrust::tuple<real, real, real, int>;
thrust::transform(n1_begin,
n1_end,
n2_begin,
do_Q.values.begin(),
[] __host__ __device__(const vec& i_n1, const vec& i_n2) {
// Check whether these normals are vertical or horizontal
// neighbors and project the normals accordingly
auto n1 = thrust::make_tuple(0.f, i_n1.get<2>());
auto n2 = thrust::make_tuple(0.f, i_n2.get<2>());
if (std::abs(i_n1.get<3>() - i_n2.get<3>()) == 1)
{
n1.get<0>() = i_n1.get<0>();
n2.get<0>() = i_n2.get<0>();
}
else
{
n1.get<0>() = i_n1.get<1>();
n2.get<0>() = i_n2.get<1>();
}
// in lower triangle
const bool lower = i_n1.get<3>() > i_n2.get<3>();
const real q = relative_height_from_normals{}(n1, n2);
return lower ? -q : q;
});
}
void apply_sor(
cusp::csr_matrix<int, real, cusp::device_memory>::const_view di_A,
cusp::array1d<real, cusp::device_memory>::const_view di_b,
cusp::array1d<real, cusp::device_memory>::view do_x,
const real i_w,
const real i_tol,
const int i_max_iter,
const bool verbose)
{
// Linear SOR operator
cusp::relaxation::sor<real, cusp::device_memory> M(di_A, i_w);
// Array to store the residual
cusp::array1d<real, cusp::device_memory> d_r(di_b.size());
// Compute the initial residual
const auto compute_residual = [&] __host__ {
cusp::multiply(di_A, do_x, d_r);
cusp::blas::axpy(di_b, d_r, -1.f);
};
compute_residual();
// Monitor the convergence
cusp::monitor<real> monitor(di_b, i_max_iter, i_tol, 0, verbose);
// Iterate until convergence criteria is met
for (; !monitor.finished(d_r); ++monitor)
{
// Apply the SOR linear operator to iterate on our solution
M(di_A, di_b, do_x);
// Compute the residual
compute_residual();
}
}
int main(int argc, char* argv[])
{
assert(argc >= 5);
auto h_image = stbi::loadf(argv[1], 3);
printf("Loaded image with dim: %dx%dx%d\n",
h_image.width(),
h_image.height(),
h_image.n_channels());
const real azimuth = std::stof(argv[2]) * M_PI / 180.0f;
const real polar = std::stof(argv[3]) * M_PI / 180.0f;
// Lighting direction
float3 L{std::stof(argv[2]), std::stof(argv[3]), std::stof(argv[4])};
// float3 L{std::sin(polar) * std::cos(azimuth),
// std::sin(polar) * std::sin(azimuth),
// std::cos(polar)};
const real L_rlen = 1.f / std::sqrt(L.x * L.x + L.y * L.y + L.z * L.z);
L.x *= L_rlen;
L.y *= L_rlen;
L.z *= L_rlen;
printf("L: [%f, %f, %f]\n", L.x, L.y, L.z);
cusp::array2d<real, cusp::device_memory> d_image(h_image.n_channels(),
h_image.n_pixels());
make_device_image(h_image.get(), d_image);
auto d_intensity = d_image.row(0);
cusp::blas::scal(d_intensity, 2.f);
// cusp::io::read_matrix_market_file(d_intensity, "shading.mtx");
print_range_avg(d_intensity);
// normalize(d_intensity);
print_range_avg(d_intensity);
const int width = h_image.width();
const int height = h_image.height();
const int nnormals = width * height;
printf("Num pixels: %d rows * %d cols = %d\n", height, width, nnormals);
cusp::coo_matrix<int, real, cusp::device_memory> d_M(
nnormals * 3, nnormals * 3, nnormals * 9);
build_M(L, d_M);
printf("M has been built %dx%d\n", d_M.num_rows, d_M.num_cols);
// B is our pixel 4-neighborhood adjacency matrix
cusp::coo_matrix<int, real, cusp::device_memory> d_B(
nnormals * 3, nnormals * 3, 3 * (height * (5 * width - 2) - 2 * width));
build_B(height, width, d_B);
printf("B has been built %dx%d\n", d_B.num_rows, d_B.num_cols);
// Now we build A using M and B
// A = M + 8lmI -2lmB <=> A = M + 2lm(4I - B)
// So we use cuSparse to compute alpha * M + beta * B, where beta is 2lm
// Now we can add M
auto d_A = cusparse_add(d_M, d_B);
printf("A has been built %dx%d\n", d_A.num_rows, d_A.num_cols);
// cusp::print(d_A.values.subarray(0, 10));
check_symmetry(d_A);
// The b vector of the system is (shading intensity * L), where L repeats
// Copy L to the device
thrust::device_vector<real> d_L(&L.x, (&L.x) + 3);
// Iterate over one component of L per channel of the normals
const auto cyclic_L = thrust::make_permutation_iterator(
d_L.begin(), detail::make_row_iterator(nnormals));
const thrust::multiplies<real> mul;
// Loop over for each dimension of the normals
const auto cyclic_i =
detail::make_cycle_iterator(d_intensity.begin(), nnormals);
// Allocate the b vector
cusp::array1d<real, cusp::device_memory> d_b(nnormals * 3);
// Write the b vector
thrust::transform(
cyclic_i, cyclic_i + nnormals * 3, cyclic_L, d_b.begin(), mul);
printf("b has been built %dx%d\n", d_b.size(), 1);
// Now we can solve for the relative normals via SOR
cusp::array1d<real, cusp::device_memory> d_x(3 * nnormals, 1.f);
thrust::tabulate(
d_x.begin(), d_x.end(), [=] __host__ __device__(int x) -> real {
return x >= nnormals * 2;
});
{
apply_sor(d_A, d_b, d_x, 1.f, 1e-5f, 1500, true);
// Normalize
using vec3 = thrust::tuple<real, real, real>;
const auto normalize_vec = [] __host__ __device__(vec3 normal) {
const real rmag =
1.f / std::sqrt(sqr(normal.get<0>()) + sqr(normal.get<1>()) +
sqr(normal.get<2>()));
normal.get<0>() *= rmag;
normal.get<1>() *= rmag;
normal.get<2>() = std::abs(normal.get<2>()) * rmag;
return normal;
};
// Normalize our resulting normals
auto norm_begin = detail::zip_it(
d_x.begin(), d_x.begin() + nnormals, d_x.begin() + nnormals * 2);
auto norm_end = norm_begin + nnormals;
thrust::transform(norm_begin, norm_end, norm_begin, normalize_vec);
}
printf("Done\n");
auto d_initial_normals = cusp::make_array2d_view(
3, nnormals, nnormals, cusp::make_array1d_view(d_x), cusp::row_major{});
cusp::array2d<real, cusp::device_memory> normal_copy = d_initial_normals;
thrust::transform(normal_copy.values.begin(),
normal_copy.values.end(),
normal_copy.values.begin(),
detail::unary_plus<real>(1.f));
thrust::transform(normal_copy.values.begin(),
normal_copy.values.end(),
normal_copy.values.begin(),
detail::unary_multiplies<real>(0.5f));
make_host_image(normal_copy, h_image.get());
stbi::writef("initial_normals.png", h_image);
// Now that we have relative normals, we calculate the relative heights
cusp::coo_matrix<int, real, cusp::device_memory> d_Q(
nnormals, nnormals, height * (4 * width - 2) - 2 * width);
// Initialize a grid matrix using CUSP
cusp::gallery::grid2d(d_Q, width, height);
build_Q_values(d_initial_normals, d_Q);
// Now we can assemble a poisson problem to solve the absolute heights
cusp::array1d<real, cusp::device_memory> d_pb(nnormals);
thrust::reduce_by_key(d_Q.row_indices.begin(),
d_Q.row_indices.end(),
d_Q.values.begin(),
thrust::make_discard_iterator(),
d_pb.begin());
// The A matrix
cusp::coo_matrix<int, real, cusp::device_memory> d_pA(
nnormals, nnormals, height * (5 * width - 2) - 2 * width);
cusp::gallery::poisson5pt(d_pA, width, height);
auto pA_begin = detail::zip_it(
d_pA.row_indices.begin(), d_pA.column_indices.begin(), d_pA.values.begin());
using tup3 = thrust::tuple<int, int, real>;
const auto fix_bnds = [=] __host__ __device__(tup3 entry) {
// Fix boundary cell diagonals
if (entry.get<0>() == entry.get<1>())
{
const int r = entry.get<0>() / width;
const int c = entry.get<0>() % width;
// If we're in a boundary cell we subtract one from the valence
entry.get<2>() -= (r == 0 || r == (height - 1));
entry.get<2>() -= (c == 0 || c == (width - 1));
}
return entry;
};
// Fix the boundary cell diagonals
thrust::transform(pA_begin, pA_begin + d_pA.num_entries, pA_begin, fix_bnds);
// To get a result we need to "pin" the solution by setting an arbitrary
// value to some constant. I use the first height.
// Make the first equation a trivial solution 1*h0 = x
d_pA.values.begin()[0] = 1.f;
d_pA.values.begin()[1] = 0.f;
d_pA.values.begin()[2] = 0.f;
// Need to replace any references to the final solution with constants in b
d_pA.values.begin()[3] = 0.f;
d_pA.values.begin()[4 * width - 2] = 0.f;
d_pb[0] = 0.5f;
d_pb.begin()[1] += d_pb[0];
d_pb.begin()[width] += d_pb[0];
cusp::array1d<real, cusp::device_memory> d_h(nnormals, 0.5f);
d_h[0] = d_pb[0];
{
cusp::csr_matrix<int, real, cusp::device_memory> pA(
d_pA.num_rows, d_pA.num_cols, d_pA.num_entries);
cusp::indices_to_offsets(d_pA.row_indices, pA.row_offsets);
pA.column_indices = d_pA.column_indices;
pA.values = d_pA.values;
apply_sor(pA, d_pb, d_h, 0.9f, 1e-4f, std::stoi(argv[5]), true);
}
printf("H0: %f, H1: %f, H2:%f, H4:%f, Q01: %f, Q10: %f, Q12: %f, Q14: %f\n",
(real)d_h[0],
(real)d_h[1],
(real)d_h[2],
(real)d_h[4],
(real)d_Q.values[0],
(real)d_Q.values[2],
(real)d_Q.values[3],
(real)d_Q.values[4]);
print_range_avg(d_h);
normalize(d_h);
const auto h_out = detail::zip_it(d_h.begin(), d_h.begin(), d_h.begin());
const auto rn_begin = detail::zip_it(d_initial_normals.row(0).begin(),
d_initial_normals.row(1).begin(),
d_initial_normals.row(2).begin());
thrust::copy_n(h_out, nnormals, rn_begin);
make_host_image(d_initial_normals, h_image.get());
stbi::writef("height.png", h_image);
}
| 7c0ababb6f3a0fce8bf0825563d6e9c2c3bf098a.cu | #include <iostream>
#include <stdlib.h>
#include <cusp/array2d.h>
#include <thrust/tuple.h>
#include <thrust/host_vector.h>
#include <thrust/transform.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <thrust/unique.h>
#include <thrust/binary_search.h>
#include <thrust/copy.h>
#include <thrust/iterator/transform_output_iterator.h>
#include <cusparse_v2.h>
#include "stbi_raii.hpp"
#include <cub/cub.cuh>
#include <cusp/transpose.h>
#include <cusp/gallery/grid.h>
#include <cusp/gallery/poisson.h>
#include <cusp/print.h>
#include <cusp/convert.h>
#include <cusp/relaxation/sor.h>
#include <cusp/relaxation/jacobi.h>
#include <cusp/krylov/cg.h>
#include <cusp/krylov/bicgstab.h>
#include <cusp/linear_operator.h>
#include <cusp/precond/diagonal.h>
#include <cusp/monitor.h>
#include <cusp/io/matrix_market.h>
#include "matrix_functional.cuh"
#include "zip_it.cuh"
#include "cycle_iterator.cuh"
using real = float;
void gpuAssert(cudaError_t code, const char* file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(
stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort)
{
exit(code);
}
}
}
void gpuErrchk(cudaError_t ans)
{
gpuAssert((ans), __FILE__, __LINE__);
}
static const char* _cusparseGetErrorEnum(cusparseStatus_t error)
{
switch (error)
{
case CUSPARSE_STATUS_SUCCESS: return "CUSPARSE_STATUS_SUCCESS";
case CUSPARSE_STATUS_NOT_INITIALIZED:
return "CUSPARSE_STATUS_NOT_INITIALIZED";
case CUSPARSE_STATUS_ALLOC_FAILED: return "CUSPARSE_STATUS_ALLOC_FAILED";
case CUSPARSE_STATUS_INVALID_VALUE: return "CUSPARSE_STATUS_INVALID_VALUE";
case CUSPARSE_STATUS_ARCH_MISMATCH: return "CUSPARSE_STATUS_ARCH_MISMATCH";
case CUSPARSE_STATUS_MAPPING_ERROR: return "CUSPARSE_STATUS_MAPPING_ERROR";
case CUSPARSE_STATUS_EXECUTION_FAILED:
return "CUSPARSE_STATUS_EXECUTION_FAILED";
case CUSPARSE_STATUS_INTERNAL_ERROR: return "CUSPARSE_STATUS_INTERNAL_ERROR";
case CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
case CUSPARSE_STATUS_ZERO_PIVOT: return "CUSPARSE_STATUS_ZERO_PIVOT";
}
return "<unknown>";
}
inline void
__cusparseSafeCall(cusparseStatus_t err, const char* file, const int line)
{
if (CUSPARSE_STATUS_SUCCESS != err)
{
fprintf(stderr,
"CUSPARSE error in file '%s', line %d, error %s\nterminating!\n",
__FILE__,
__LINE__,
_cusparseGetErrorEnum(err));
assert(0);
}
}
extern "C" void cusparseSafeCall(cusparseStatus_t err)
{
__cusparseSafeCall(err, __FILE__, __LINE__);
}
template <typename T>
constexpr T sqr(T val) noexcept
{
return val * val;
}
template <typename T>
void strided_copy(const T* i_src,
T* i_dest,
int src_stride,
int dest_stride,
int n,
cudaMemcpyKind i_kind)
{
cudaMemcpy2D(i_dest,
sizeof(T) * dest_stride,
i_src,
sizeof(T) * src_stride,
sizeof(T),
n,
i_kind);
}
void make_device_image(gsl::not_null<const real*> h_image,
cusp::array2d<real, cusp::device_memory>::view d_image)
{
const int npixels = d_image.num_cols;
const int nchannels = d_image.num_rows;
for (int c = 0; c < nchannels; ++c)
{
auto d_image_channel = d_image.values.begin().base().get() + npixels * c;
const auto h_image_channel = h_image.get() + c;
strided_copy(h_image_channel,
d_image_channel,
nchannels,
1,
npixels,
cudaMemcpyHostToDevice);
}
}
void make_host_image(cusp::array2d<real, cusp::device_memory>::view d_image,
gsl::not_null<real*> h_image)
{
const int npixels = d_image.num_cols;
const int nchannels = d_image.num_rows;
for (int c = 0; c < nchannels; ++c)
{
auto d_image_channel = d_image.values.begin().base().get() + npixels * c;
const auto h_image_channel = h_image.get() + c;
strided_copy(d_image_channel,
h_image_channel,
1,
nchannels,
npixels,
cudaMemcpyDeviceToHost);
}
}
cusp::csr_matrix<int, real, cusp::device_memory>
cusparse_add(cusp::coo_matrix<int, real, cusp::device_memory>::const_view di_A,
cusp::coo_matrix<int, real, cusp::device_memory>::const_view di_B)
{
cusp::array1d<int, cusp::device_memory> A_row_offsets(di_A.num_rows + 1);
cusp::indices_to_offsets(di_A.row_indices, A_row_offsets);
cusp::array1d<int, cusp::device_memory> B_row_offsets(di_B.num_rows + 1);
cusp::indices_to_offsets(di_B.row_indices, B_row_offsets);
cusparseHandle_t handle;
cusparseSafeCall(cusparseCreate(&handle));
cusparseMatDescr_t A_description;
cusparseSafeCall(cusparseCreateMatDescr(&A_description));
cusparseMatDescr_t B_description;
cusparseSafeCall(cusparseCreateMatDescr(&B_description));
cusparseMatDescr_t C_description;
cusparseSafeCall(cusparseCreateMatDescr(&C_description));
// Coefficients
const real alpha = 1.f;
const real beta = 0.1f * 2.f;
int C_base, C_nnz;
// Not sure if this is needed
int* nnz_total = &C_nnz;
cusparseSafeCall(cusparseSetPointerMode(handle, CUSPARSE_POINTER_MODE_HOST));
cusp::array1d<int, cusp::device_memory> C_row_offsets(di_A.num_rows + 1);
cusparseSafeCall(cusparseXcsrgeamNnz(handle,
di_A.num_rows,
di_A.num_cols,
A_description,
di_A.num_entries,
A_row_offsets.begin().base().get(),
di_A.column_indices.begin().base().get(),
B_description,
di_B.num_entries,
B_row_offsets.begin().base().get(),
di_B.column_indices.begin().base().get(),
C_description,
C_row_offsets.begin().base().get(),
nnz_total));
if (nnz_total != NULL)
{
C_nnz = *nnz_total;
}
else
{
C_nnz = C_row_offsets.back();
C_nnz -= C_row_offsets[0];
}
cusp::csr_matrix<int, real, cusp::device_memory> do_C(
di_A.num_rows, di_A.num_cols, C_nnz);
do_C.row_offsets = std::move(C_row_offsets);
// Now actually do the add
cusparseSafeCall(cusparseScsrgeam(handle,
di_A.num_rows,
di_A.num_cols,
&alpha,
A_description,
di_A.num_entries,
di_A.values.begin().base().get(),
A_row_offsets.begin().base().get(),
di_A.column_indices.begin().base().get(),
&beta,
B_description,
di_B.num_entries,
di_B.values.begin().base().get(),
B_row_offsets.begin().base().get(),
di_B.column_indices.begin().base().get(),
C_description,
do_C.values.begin().base().get(),
do_C.row_offsets.begin().base().get(),
do_C.column_indices.begin().base().get()));
return do_C;
}
void check_symmetry(
cusp::csr_matrix<int, real, cusp::device_memory>::const_view di_M)
{
// Copy to host
cusp::csr_matrix<int, real, cusp::host_memory> M = di_M;
// Transpose
cusp::csr_matrix<int, real, cusp::host_memory> MT(
M.num_cols, M.num_rows, M.num_entries);
cusp::transpose(M, MT);
printf("Checking for symmetry\n");
for (int i = 0; i < di_M.num_entries; ++i)
{
const real value = M.values[i];
const real valueT = MT.values[i];
if (value != valueT)
{
printf("BAD symmetry at: %d with value: %f and value^T: %f\n",
i,
value,
valueT);
}
}
}
void build_M(float3 L,
cusp::coo_matrix<int, real, cusp::device_memory>::view do_M)
{
const int nnormals = do_M.num_rows / 3;
// Outer product of the lighting direction
// clang-format off
real LLT[9] = {L.x * L.x, L.x * L.y, L.x * L.z,
L.x * L.y, L.y * L.y, L.y * L.z,
L.x * L.z, L.y * L.z, L.z * L.z};
printf("LLT:\n [%f, %f, %f,\n %f, %f, %f,\n %f, %f, %f]\n",
LLT[0], LLT[1], LLT[2],
LLT[3], LLT[4], LLT[5],
LLT[6], LLT[7], LLT[8]);
// clang-format on
// Copy to the device
thrust::device_vector<real> d_LLT(LLT, LLT + 9);
// Perform a kronecker product of LLT with the Identity matrix
// We want to iterate over each row of LLT, n times where n is the number of
// normals
const auto LLT_row = detail::make_row_iterator(nnormals * 3);
// We want to iterate over each column of LLT, in a repeating cycle for each n
const auto LLT_col = detail::make_column_iterator(3);
// Now we can combine the two
const auto LLT_i = thrust::make_transform_iterator(
detail::zip_it(LLT_row, LLT_col),
[=] __host__ __device__(const thrust::tuple<int, int>& coord) {
return coord.get<0>() * 3 + coord.get<1>();
});
// Use the look up index to get the real value from LLT
const auto LLT_v = thrust::make_permutation_iterator(d_LLT.begin(), LLT_i);
// Copy the values across to M
thrust::copy_n(LLT_v, nnormals * 9, do_M.values.begin());
// The row keys will be i / 3, as we only have 3 values per row and column
const auto count = thrust::make_counting_iterator(0);
thrust::transform(count,
count + nnormals * 9,
do_M.row_indices.begin(),
detail::unary_divides<int>(3));
// To write the column keys we need a repeating sequence of 0, 1, 2 * n to
// give 0, n, 2n, and then we offset by the row % n
thrust::transform(LLT_col,
LLT_col + nnormals * 9,
do_M.row_indices.begin(),
do_M.column_indices.begin(),
[=] __host__ __device__(int s, int r) {
return (r % nnormals) + s * nnormals;
});
using tup3 = thrust::tuple<int, int, real>;
const auto inc_diag = [=] __host__ __device__(tup3 entry) {
// Add one to the diagonals
if (entry.get<0>() == entry.get<1>())
{
entry.get<2>() += 1;
}
return entry;
};
auto entry_it = detail::zip_it(
do_M.row_indices.begin(), do_M.column_indices.begin(), do_M.values.begin());
// Fix the boundary cell diagonals
// thrust::transform(entry_it, entry_it + nnormals*3, entry_it, inc_diag);
}
void build_B(const int m,
const int n,
cusp::coo_matrix<int, real, cusp::device_memory>::view do_B)
{
const int nsb = do_B.num_entries / 3;
const int nnormals = m * n;
auto entry_it = detail::zip_it(
do_B.row_indices.begin(), do_B.column_indices.begin(), do_B.values.begin());
// Build the discrete Poisson problem matrix
cusp::coo_matrix<int, real, cusp::device_memory> d_temp;
cusp::gallery::poisson5pt(d_temp, n, m);
const auto temp_begin = detail::zip_it(d_temp.row_indices.begin(),
d_temp.column_indices.begin(),
d_temp.values.begin());
thrust::copy_n(temp_begin, nsb, entry_it);
using tup3 = thrust::tuple<int, int, real>;
const auto fix_bnds = [=] __host__ __device__(tup3 entry) {
// Fix boundary cell diagonals
if (entry.get<0>() == entry.get<1>())
{
const int r = entry.get<0>() / n;
const int c = entry.get<0>() % n;
// If we're in a boundary cell we subtract one from the valence
entry.get<2>() -= (r == 0 || r == (m - 1));
entry.get<2>() -= (c == 0 || c == (n - 1));
}
return entry;
};
// Fix the boundary cell diagonals
thrust::transform(entry_it, entry_it + nsb, entry_it, fix_bnds);
// Correct the boundaries which don't have valence of 4 and copy the
// corrected B for each channel of the normal vectors (3 times).
// Tuple of [Row, Column, Value]
auto entry_s = detail::make_cycle_iterator(entry_it, nsb);
// Copy sB 3 times, offsetting by it's width and height for each new copy
const auto op = [=] __host__ __device__(tup3 entry, int count) {
// Work out what channel we're in
const int channel = count / nsb;
// Offset for the channel
entry.get<0>() += channel * nnormals;
entry.get<1>() += channel * nnormals;
return entry;
};
const auto count = thrust::make_counting_iterator(nsb);
thrust::transform(entry_s, entry_s + nsb * 2, count, entry_it + nsb, op);
}
template <typename T>
constexpr __host__ __device__ real signum(T val)
{
return (T(0) <= val) - (val < T(0));
}
template <typename T>
constexpr __host__ __device__ T clamp(const T& n,
const T& lower,
const T& upper)
{
return max(lower, min(n, upper));
}
template <typename T>
constexpr __host__ __device__ T iclamp(const T& n, const T& e)
{
return max(e, std::abs(n)) * signum(n);
}
struct relative_height_from_normals
{
using vec2 = thrust::tuple<real, real>;
__host__ __device__ real dot(const vec2& n1, const vec2& n2) const noexcept
{
return n1.get<0>() * n2.get<0>() + n1.get<1>() * n2.get<1>();
}
__host__ __device__ real det(const vec2& n1, const vec2& n2) const noexcept
{
return n1.get<0>() * n2.get<1>() - n1.get<1>() * n2.get<0>();
}
__host__ __device__ vec2 normalize(const vec2& n) const noexcept
{
const auto norm = std::sqrt(dot(n, n));
return thrust::make_tuple(n.get<0>() / norm, n.get<1>() / norm);
}
__host__ __device__ real operator()(vec2 n1,
vec2 n2,
bool debug = false) const noexcept
{
// Normalize n1 and n2
n1 = normalize(n1);
n2 = normalize(n2);
const real x = n1.get<0>() - n2.get<0>();
const real y = n1.get<1>() - n2.get<1>();
real q;
constexpr float epsilon = 0.0000001f;
if (std::abs(x) > epsilon)
{
q = y / x;
}
else
{
const auto inf = std::numeric_limits<real>::infinity();
const real g1 =
n1.get<0>() == 0.f ? inf : n1.get<1>() / n1.get<0>();
if (g1 == inf)
q = 0.f;
else if (g1 == 0.f)
q = 1.f / epsilon;
else
q = 1.f / g1;
}
return q;
}
};
void normalize(cusp::array1d<real, cusp::device_memory>::view dio_v)
{
// Subtract the minimum value
const real min = *thrust::min_element(dio_v.begin(), dio_v.end());
const detail::unary_minus<real> subf(min);
thrust::transform(dio_v.begin(), dio_v.end(), dio_v.begin(), subf);
// Divide by the maximum value
const real scale = 1.f / *thrust::max_element(dio_v.begin(), dio_v.end());
const detail::unary_multiplies<real> mulf(scale);
thrust::transform(dio_v.begin(), dio_v.end(), dio_v.begin(), mulf);
}
void print_range_avg(cusp::array1d<real, cusp::device_memory>::const_view di_v)
{
const real min = *thrust::min_element(di_v.begin(), di_v.end());
const real max = *thrust::max_element(di_v.begin(), di_v.end());
const real avg = thrust::reduce(di_v.begin(), di_v.end()) / di_v.size();
std::cout << "min: " << min << ", max: " << max << ", avg: " << avg << '\n';
}
void build_Q_values(cusp::array2d<real, cusp::device_memory>::view di_normals,
cusp::coo_matrix<int, real, cusp::device_memory>::view do_Q)
{
// Iterate over the normals with their index
const auto count = thrust::make_counting_iterator(0);
const auto normal_begin = detail::zip_it(di_normals.row(0).begin(),
di_normals.row(1).begin(),
di_normals.row(2).begin(),
count);
// Iterate over pairs of normals using the matrix coordinates
const auto n1_begin =
thrust::make_permutation_iterator(normal_begin, do_Q.row_indices.begin());
const auto n2_begin = thrust::make_permutation_iterator(
normal_begin, do_Q.column_indices.begin());
const auto n1_end = n1_begin + do_Q.num_entries;
using vec = thrust::tuple<real, real, real, int>;
thrust::transform(n1_begin,
n1_end,
n2_begin,
do_Q.values.begin(),
[] __host__ __device__(const vec& i_n1, const vec& i_n2) {
// Check whether these normals are vertical or horizontal
// neighbors and project the normals accordingly
auto n1 = thrust::make_tuple(0.f, i_n1.get<2>());
auto n2 = thrust::make_tuple(0.f, i_n2.get<2>());
if (std::abs(i_n1.get<3>() - i_n2.get<3>()) == 1)
{
n1.get<0>() = i_n1.get<0>();
n2.get<0>() = i_n2.get<0>();
}
else
{
n1.get<0>() = i_n1.get<1>();
n2.get<0>() = i_n2.get<1>();
}
// in lower triangle
const bool lower = i_n1.get<3>() > i_n2.get<3>();
const real q = relative_height_from_normals{}(n1, n2);
return lower ? -q : q;
});
}
void apply_sor(
cusp::csr_matrix<int, real, cusp::device_memory>::const_view di_A,
cusp::array1d<real, cusp::device_memory>::const_view di_b,
cusp::array1d<real, cusp::device_memory>::view do_x,
const real i_w,
const real i_tol,
const int i_max_iter,
const bool verbose)
{
// Linear SOR operator
cusp::relaxation::sor<real, cusp::device_memory> M(di_A, i_w);
// Array to store the residual
cusp::array1d<real, cusp::device_memory> d_r(di_b.size());
// Compute the initial residual
const auto compute_residual = [&] __host__ {
cusp::multiply(di_A, do_x, d_r);
cusp::blas::axpy(di_b, d_r, -1.f);
};
compute_residual();
// Monitor the convergence
cusp::monitor<real> monitor(di_b, i_max_iter, i_tol, 0, verbose);
// Iterate until convergence criteria is met
for (; !monitor.finished(d_r); ++monitor)
{
// Apply the SOR linear operator to iterate on our solution
M(di_A, di_b, do_x);
// Compute the residual
compute_residual();
}
}
int main(int argc, char* argv[])
{
assert(argc >= 5);
auto h_image = stbi::loadf(argv[1], 3);
printf("Loaded image with dim: %dx%dx%d\n",
h_image.width(),
h_image.height(),
h_image.n_channels());
const real azimuth = std::stof(argv[2]) * M_PI / 180.0f;
const real polar = std::stof(argv[3]) * M_PI / 180.0f;
// Lighting direction
float3 L{std::stof(argv[2]), std::stof(argv[3]), std::stof(argv[4])};
// float3 L{std::sin(polar) * std::cos(azimuth),
// std::sin(polar) * std::sin(azimuth),
// std::cos(polar)};
const real L_rlen = 1.f / std::sqrt(L.x * L.x + L.y * L.y + L.z * L.z);
L.x *= L_rlen;
L.y *= L_rlen;
L.z *= L_rlen;
printf("L: [%f, %f, %f]\n", L.x, L.y, L.z);
cusp::array2d<real, cusp::device_memory> d_image(h_image.n_channels(),
h_image.n_pixels());
make_device_image(h_image.get(), d_image);
auto d_intensity = d_image.row(0);
cusp::blas::scal(d_intensity, 2.f);
// cusp::io::read_matrix_market_file(d_intensity, "shading.mtx");
print_range_avg(d_intensity);
// normalize(d_intensity);
print_range_avg(d_intensity);
const int width = h_image.width();
const int height = h_image.height();
const int nnormals = width * height;
printf("Num pixels: %d rows * %d cols = %d\n", height, width, nnormals);
cusp::coo_matrix<int, real, cusp::device_memory> d_M(
nnormals * 3, nnormals * 3, nnormals * 9);
build_M(L, d_M);
printf("M has been built %dx%d\n", d_M.num_rows, d_M.num_cols);
// B is our pixel 4-neighborhood adjacency matrix
cusp::coo_matrix<int, real, cusp::device_memory> d_B(
nnormals * 3, nnormals * 3, 3 * (height * (5 * width - 2) - 2 * width));
build_B(height, width, d_B);
printf("B has been built %dx%d\n", d_B.num_rows, d_B.num_cols);
// Now we build A using M and B
// A = M + 8lmI -2lmB <=> A = M + 2lm(4I - B)
// So we use cuSparse to compute alpha * M + beta * B, where beta is 2lm
// Now we can add M
auto d_A = cusparse_add(d_M, d_B);
printf("A has been built %dx%d\n", d_A.num_rows, d_A.num_cols);
// cusp::print(d_A.values.subarray(0, 10));
check_symmetry(d_A);
// The b vector of the system is (shading intensity * L), where L repeats
// Copy L to the device
thrust::device_vector<real> d_L(&L.x, (&L.x) + 3);
// Iterate over one component of L per channel of the normals
const auto cyclic_L = thrust::make_permutation_iterator(
d_L.begin(), detail::make_row_iterator(nnormals));
const thrust::multiplies<real> mul;
// Loop over for each dimension of the normals
const auto cyclic_i =
detail::make_cycle_iterator(d_intensity.begin(), nnormals);
// Allocate the b vector
cusp::array1d<real, cusp::device_memory> d_b(nnormals * 3);
// Write the b vector
thrust::transform(
cyclic_i, cyclic_i + nnormals * 3, cyclic_L, d_b.begin(), mul);
printf("b has been built %dx%d\n", d_b.size(), 1);
// Now we can solve for the relative normals via SOR
cusp::array1d<real, cusp::device_memory> d_x(3 * nnormals, 1.f);
thrust::tabulate(
d_x.begin(), d_x.end(), [=] __host__ __device__(int x) -> real {
return x >= nnormals * 2;
});
{
apply_sor(d_A, d_b, d_x, 1.f, 1e-5f, 1500, true);
// Normalize
using vec3 = thrust::tuple<real, real, real>;
const auto normalize_vec = [] __host__ __device__(vec3 normal) {
const real rmag =
1.f / std::sqrt(sqr(normal.get<0>()) + sqr(normal.get<1>()) +
sqr(normal.get<2>()));
normal.get<0>() *= rmag;
normal.get<1>() *= rmag;
normal.get<2>() = std::abs(normal.get<2>()) * rmag;
return normal;
};
// Normalize our resulting normals
auto norm_begin = detail::zip_it(
d_x.begin(), d_x.begin() + nnormals, d_x.begin() + nnormals * 2);
auto norm_end = norm_begin + nnormals;
thrust::transform(norm_begin, norm_end, norm_begin, normalize_vec);
}
printf("Done\n");
auto d_initial_normals = cusp::make_array2d_view(
3, nnormals, nnormals, cusp::make_array1d_view(d_x), cusp::row_major{});
cusp::array2d<real, cusp::device_memory> normal_copy = d_initial_normals;
thrust::transform(normal_copy.values.begin(),
normal_copy.values.end(),
normal_copy.values.begin(),
detail::unary_plus<real>(1.f));
thrust::transform(normal_copy.values.begin(),
normal_copy.values.end(),
normal_copy.values.begin(),
detail::unary_multiplies<real>(0.5f));
make_host_image(normal_copy, h_image.get());
stbi::writef("initial_normals.png", h_image);
// Now that we have relative normals, we calculate the relative heights
cusp::coo_matrix<int, real, cusp::device_memory> d_Q(
nnormals, nnormals, height * (4 * width - 2) - 2 * width);
// Initialize a grid matrix using CUSP
cusp::gallery::grid2d(d_Q, width, height);
build_Q_values(d_initial_normals, d_Q);
// Now we can assemble a poisson problem to solve the absolute heights
cusp::array1d<real, cusp::device_memory> d_pb(nnormals);
thrust::reduce_by_key(d_Q.row_indices.begin(),
d_Q.row_indices.end(),
d_Q.values.begin(),
thrust::make_discard_iterator(),
d_pb.begin());
// The A matrix
cusp::coo_matrix<int, real, cusp::device_memory> d_pA(
nnormals, nnormals, height * (5 * width - 2) - 2 * width);
cusp::gallery::poisson5pt(d_pA, width, height);
auto pA_begin = detail::zip_it(
d_pA.row_indices.begin(), d_pA.column_indices.begin(), d_pA.values.begin());
using tup3 = thrust::tuple<int, int, real>;
const auto fix_bnds = [=] __host__ __device__(tup3 entry) {
// Fix boundary cell diagonals
if (entry.get<0>() == entry.get<1>())
{
const int r = entry.get<0>() / width;
const int c = entry.get<0>() % width;
// If we're in a boundary cell we subtract one from the valence
entry.get<2>() -= (r == 0 || r == (height - 1));
entry.get<2>() -= (c == 0 || c == (width - 1));
}
return entry;
};
// Fix the boundary cell diagonals
thrust::transform(pA_begin, pA_begin + d_pA.num_entries, pA_begin, fix_bnds);
// To get a result we need to "pin" the solution by setting an arbitrary
// value to some constant. I use the first height.
// Make the first equation a trivial solution 1*h0 = x
d_pA.values.begin()[0] = 1.f;
d_pA.values.begin()[1] = 0.f;
d_pA.values.begin()[2] = 0.f;
// Need to replace any references to the final solution with constants in b
d_pA.values.begin()[3] = 0.f;
d_pA.values.begin()[4 * width - 2] = 0.f;
d_pb[0] = 0.5f;
d_pb.begin()[1] += d_pb[0];
d_pb.begin()[width] += d_pb[0];
cusp::array1d<real, cusp::device_memory> d_h(nnormals, 0.5f);
d_h[0] = d_pb[0];
{
cusp::csr_matrix<int, real, cusp::device_memory> pA(
d_pA.num_rows, d_pA.num_cols, d_pA.num_entries);
cusp::indices_to_offsets(d_pA.row_indices, pA.row_offsets);
pA.column_indices = d_pA.column_indices;
pA.values = d_pA.values;
apply_sor(pA, d_pb, d_h, 0.9f, 1e-4f, std::stoi(argv[5]), true);
}
printf("H0: %f, H1: %f, H2:%f, H4:%f, Q01: %f, Q10: %f, Q12: %f, Q14: %f\n",
(real)d_h[0],
(real)d_h[1],
(real)d_h[2],
(real)d_h[4],
(real)d_Q.values[0],
(real)d_Q.values[2],
(real)d_Q.values[3],
(real)d_Q.values[4]);
print_range_avg(d_h);
normalize(d_h);
const auto h_out = detail::zip_it(d_h.begin(), d_h.begin(), d_h.begin());
const auto rn_begin = detail::zip_it(d_initial_normals.row(0).begin(),
d_initial_normals.row(1).begin(),
d_initial_normals.row(2).begin());
thrust::copy_n(h_out, nnormals, rn_begin);
make_host_image(d_initial_normals, h_image.get());
stbi::writef("height.png", h_image);
}
|
daf969b5129b74d3be34e217056b7f64ea8c9cca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*****************************************************************************************
dopoffs.c
Takes the delay-correction polynomial for a Doppler dataset and figures out the COM
Doppler corrections (in units of Doppler bins) for each frame.
Modified 2015 June 3 by CM:
Implement smearing for the "fit" and "write" actions
Modified 2006 June 21 by CM:
Changed dopres to dop_per_bin
Modified 2003 April 26 by CM:
Removed delay computation
*****************************************************************************************/
extern "C" {
#include "head.h"
}
__device__ int dopoffs_nframes;
__global__ void dopoffs_get_frames_krnl(struct dat_t *ddat, int s) {
/* Single-threaded kernel */
if (threadIdx.x == 0)
dopoffs_nframes = ddat->set[s].desc.doppler.nframes;
}
__global__ void dopoffs_cuda_krnl(struct dat_t *ddat, int s) {
/* nframes-threaded kernel */
int f = blockIdx.x * blockDim.x + threadIdx.x;
int k, n;
double dop, arg, x;
if (f < dopoffs_nframes) {
for (k=0; k<ddat->set[s].desc.doppler.nviews; k++) {
x = 1.0;
dop = 0.0;
arg = ddat->set[s].desc.doppler.frame[f].view[k].t -
ddat->set[s].desc.doppler.delcor.t0;
for (n=1; n<=ddat->set[s].desc.doppler.delcor.n; n++) {
dop += n*ddat->set[s].desc.doppler.delcor.a[n].val*x;
x *= arg;
}
/* dop has units of usec/day and there are 86400 sec/day */
ddat->set[s].desc.doppler.frame[f].view[k].dopoff =
-dop*ddat->set[s].desc.doppler.Ftx
/ (ddat->set[s].desc.doppler.dop_per_bin*86400.0);
}
}
}
__host__ void dopoffs_cuda(struct dat_t *ddat, int s)
{
int nframes = 0;
dim3 BLK,THD;
//ddat->set[s].desc.doppler
/* Get frames and views */
hipLaunchKernelGGL(( dopoffs_get_frames_krnl), dim3(1),dim3(1), 0, 0, ddat, s);
checkErrorAfterKernelLaunch("dopoffs_get_frames_krnl (dopoffs_cuda)");
gpuErrchk(hipMemcpyFromSymbol(&nframes, dopoffs_nframes, sizeof(int),
0, hipMemcpyDeviceToHost));
/* Launch nframes-threaded kernel */
THD.x = nframes;
hipLaunchKernelGGL(( dopoffs_cuda_krnl), dim3(BLK),dim3(THD), 0, 0, ddat, s);
checkErrorAfterKernelLaunch("dopoffs_cuda_krnl (dopoffs_cuda)");
}
| daf969b5129b74d3be34e217056b7f64ea8c9cca.cu | /*****************************************************************************************
dopoffs.c
Takes the delay-correction polynomial for a Doppler dataset and figures out the COM
Doppler corrections (in units of Doppler bins) for each frame.
Modified 2015 June 3 by CM:
Implement smearing for the "fit" and "write" actions
Modified 2006 June 21 by CM:
Changed dopres to dop_per_bin
Modified 2003 April 26 by CM:
Removed delay computation
*****************************************************************************************/
extern "C" {
#include "head.h"
}
__device__ int dopoffs_nframes;
__global__ void dopoffs_get_frames_krnl(struct dat_t *ddat, int s) {
/* Single-threaded kernel */
if (threadIdx.x == 0)
dopoffs_nframes = ddat->set[s].desc.doppler.nframes;
}
__global__ void dopoffs_cuda_krnl(struct dat_t *ddat, int s) {
/* nframes-threaded kernel */
int f = blockIdx.x * blockDim.x + threadIdx.x;
int k, n;
double dop, arg, x;
if (f < dopoffs_nframes) {
for (k=0; k<ddat->set[s].desc.doppler.nviews; k++) {
x = 1.0;
dop = 0.0;
arg = ddat->set[s].desc.doppler.frame[f].view[k].t -
ddat->set[s].desc.doppler.delcor.t0;
for (n=1; n<=ddat->set[s].desc.doppler.delcor.n; n++) {
dop += n*ddat->set[s].desc.doppler.delcor.a[n].val*x;
x *= arg;
}
/* dop has units of usec/day and there are 86400 sec/day */
ddat->set[s].desc.doppler.frame[f].view[k].dopoff =
-dop*ddat->set[s].desc.doppler.Ftx
/ (ddat->set[s].desc.doppler.dop_per_bin*86400.0);
}
}
}
__host__ void dopoffs_cuda(struct dat_t *ddat, int s)
{
int nframes = 0;
dim3 BLK,THD;
//ddat->set[s].desc.doppler
/* Get frames and views */
dopoffs_get_frames_krnl<<<1,1>>>(ddat, s);
checkErrorAfterKernelLaunch("dopoffs_get_frames_krnl (dopoffs_cuda)");
gpuErrchk(cudaMemcpyFromSymbol(&nframes, dopoffs_nframes, sizeof(int),
0, cudaMemcpyDeviceToHost));
/* Launch nframes-threaded kernel */
THD.x = nframes;
dopoffs_cuda_krnl<<<BLK,THD>>>(ddat, s);
checkErrorAfterKernelLaunch("dopoffs_cuda_krnl (dopoffs_cuda)");
}
|
62593116a571b2a7cedf11d0780d7ba097b58548.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <device_launch_parameters.h>
#include <hipcub/hipcub.hpp>
#include "common_kernels.cuh"
#include "kernels_hip.cuh"
using namespace clustering;
__global__ void centroid(const float* __restrict__ points,
float* __restrict__ work_centroid,
csize_t count,
csize_t dim)
{
typedef hipcub::BlockReduce<float, 1024> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
float tmp[MAX_DIM];
memset(tmp, 0, dim * sizeof(float));
for (csize_t idx = blockDim.x * blockIdx.x + threadIdx.x; idx < count; idx += gridDim.x * blockDim.x)
{
for (csize_t i = 0; i < dim; ++i)
tmp[i] += points[idx * dim + i];
}
for (csize_t i = 0; i < dim; i++)
{
auto aggregate = BlockReduce(temp_storage).Sum(tmp[i]);
if (threadIdx.x == 0)
work_centroid[blockIdx.x * dim + i] = aggregate;
}
}
__global__ void reduce_centroid(const float* __restrict__ grid_centroid,
float* __restrict__ out_centroid,
csize_t grid_size,
csize_t divisor,
csize_t dim)
{
for (csize_t i = threadIdx.x; i < dim; i += blockDim.x)
{
float sum = 0;
for (size_t j = 0; j < grid_size; j++)
{
sum += grid_centroid[j * dim + i];
}
out_centroid[i] = sum / divisor;
}
}
void run_centroid(const float* points,
float* work_centroid,
float* out_centroid,
csize_t cluster_size,
csize_t dim,
kernel_info info)
{
auto block_dim = ((cluster_size + 31) / 32) * 32;
auto grid_dim = (block_dim + 1023) / 1024;
block_dim = block_dim > info.block_dim ? info.block_dim : block_dim;
grid_dim = grid_dim > info.grid_dim ? info.grid_dim : grid_dim;
hipLaunchKernelGGL(( centroid), dim3(grid_dim), dim3(1024), 0, info.stream,
points, work_centroid, cluster_size, dim);
hipLaunchKernelGGL(( reduce_centroid), dim3(1), dim3(32), 0, info.stream, work_centroid, out_centroid, grid_dim, cluster_size, dim);
}
| 62593116a571b2a7cedf11d0780d7ba097b58548.cu | #include <device_launch_parameters.h>
#include <cub/block/block_reduce.cuh>
#include "common_kernels.cuh"
#include "kernels.cuh"
using namespace clustering;
__global__ void centroid(const float* __restrict__ points,
float* __restrict__ work_centroid,
csize_t count,
csize_t dim)
{
typedef cub::BlockReduce<float, 1024> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
float tmp[MAX_DIM];
memset(tmp, 0, dim * sizeof(float));
for (csize_t idx = blockDim.x * blockIdx.x + threadIdx.x; idx < count; idx += gridDim.x * blockDim.x)
{
for (csize_t i = 0; i < dim; ++i)
tmp[i] += points[idx * dim + i];
}
for (csize_t i = 0; i < dim; i++)
{
auto aggregate = BlockReduce(temp_storage).Sum(tmp[i]);
if (threadIdx.x == 0)
work_centroid[blockIdx.x * dim + i] = aggregate;
}
}
__global__ void reduce_centroid(const float* __restrict__ grid_centroid,
float* __restrict__ out_centroid,
csize_t grid_size,
csize_t divisor,
csize_t dim)
{
for (csize_t i = threadIdx.x; i < dim; i += blockDim.x)
{
float sum = 0;
for (size_t j = 0; j < grid_size; j++)
{
sum += grid_centroid[j * dim + i];
}
out_centroid[i] = sum / divisor;
}
}
void run_centroid(const float* points,
float* work_centroid,
float* out_centroid,
csize_t cluster_size,
csize_t dim,
kernel_info info)
{
auto block_dim = ((cluster_size + 31) / 32) * 32;
auto grid_dim = (block_dim + 1023) / 1024;
block_dim = block_dim > info.block_dim ? info.block_dim : block_dim;
grid_dim = grid_dim > info.grid_dim ? info.grid_dim : grid_dim;
centroid<<<grid_dim, 1024, 0, info.stream>>>(
points, work_centroid, cluster_size, dim);
reduce_centroid<<<1, 32, 0, info.stream>>>(work_centroid, out_centroid, grid_dim, cluster_size, dim);
}
|
7c16f04b5a0caad922583a6623a0b0bbf25cda39.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gpuRowTests.cuh"
#include "../independence/gpuInd.cuh"
__global__ void testRowL0(MMState state, int row, int row_count)
{
size_t id = (blockIdx.x * blockDim.x) + threadIdx.x;
size_t row_node = static_cast<size_t>(sqrt(2 * id + pow(row - 0.5, 2)) + 0.5);
size_t col_node = id - ((row_node * (row_node - 1) / 2) - (row * (row - 1) / 2));
size_t max_row = row + row_count;
if (col_node < state.p && row_node < max_row)
{
int idx = state.p * row_node + col_node;
if (col_node < row_node && state.adj[idx])
{
int inv_idx = state.p * col_node + row_node;
double pVal = GPU::calcPValue(state.cor[idx], state.observations);
state.pMax[inv_idx] = pVal;
if (state.pMax[inv_idx] >= state.alpha)
{
state.adj[idx] = 0;
state.adj[inv_idx] = 0;
state.sepSets[(col_node * state.maxCondSize * state.p) +
(row_node * state.maxCondSize)] = -2;
}
}
}
}
__global__ void testRowL1(MMState state, int *rows, int start_row, int max_row_count)
{
if (start_row + blockIdx.x >= max_row_count)
{
return;
}
size_t row_node = rows[start_row + blockIdx.x];
size_t row_neighbours = state.adj_compact[row_node * state.p + state.p - 1];
extern __shared__ double pVals[];
size_t col_node = state.adj_compact[row_node * state.p + blockIdx.y];
if (row_neighbours > blockIdx.y && row_neighbours >= 1 && col_node < state.p)
{
size_t subIndex = 0;
for (size_t offset = threadIdx.x; offset < row_neighbours; offset += blockDim.x)
{
if (offset == blockIdx.y)
{
pVals[threadIdx.x] = -1;
}
else
{
subIndex = state.adj_compact[row_node * state.p + offset];
pVals[threadIdx.x] = GPU::pValL1(
state.cor[row_node * state.p + col_node],
state.cor[row_node * state.p + subIndex],
state.cor[col_node * state.p + subIndex], state.observations);
}
__syncthreads();
if (threadIdx.x == 0)
{
for (size_t i = 0; i < blockDim.x && i < row_neighbours; ++i)
{
double pVal = pVals[i];
if (offset + i < state.p && pVal >= state.alpha)
{
if (row_node < col_node)
{
if (atomicCAS(&state.lock[(state.p * row_node) + col_node], 0, 1) == 0)
{
state.adj[row_node * state.p + col_node] = 0;
state.adj[col_node * state.p + row_node] = 0;
state.sepSets[row_node * state.p * state.maxCondSize +
col_node * state.maxCondSize] =
state.adj_compact[row_node * state.p + offset + i];
state.pMax[state.p * row_node + col_node] = pVal;
}
}
else
{
if (atomicCAS(&state.lock[(state.p * col_node) + row_node], 0, 1) == 0)
{
state.adj[row_node * state.p + col_node] = 0;
state.adj[col_node * state.p + row_node] = 0;
state.pMax[state.p * col_node + row_node] = pVal;
state.sepSets[col_node * state.p * state.maxCondSize +
row_node * state.maxCondSize] =
state.adj_compact[col_node * state.p + offset + i];
}
}
break;
}
}
}
__syncthreads();
if (state.adj[row_node * state.p + col_node] == 0)
break;
}
}
}
template <int lvlSize, int kLvlSizeSmall>
__global__ void testRowLN(MMState state, int *rows, int start_row, int max_row_count)
{
if (start_row + blockIdx.x >= max_row_count)
{
return;
}
size_t row_node = rows[start_row + blockIdx.x];
size_t row_count = state.adj_compact[row_node * state.p + state.p - 1];
if (row_count > blockIdx.y && // col_node available
row_count >= kLvlSizeSmall)
{
double Submat[lvlSize][lvlSize];
double SubmatPInv[lvlSize][lvlSize];
int sepset_nodes[kLvlSizeSmall];
// pseudo-inverse parameter
double v[lvlSize][lvlSize];
double w[lvlSize], rv1[lvlSize];
double res1[lvlSize][lvlSize];
// Determine sepsets to work on
size_t col_node = state.adj_compact[row_node * state.p + blockIdx.y]; // get actual id
int row_neighbours = row_count - 1; // get number of neighbours && exclude col_node
size_t row_test_count = binomialCoeff(row_neighbours, kLvlSizeSmall);
for (size_t test_index = threadIdx.x; test_index < row_test_count;
test_index += blockDim.x)
{
ithCombination(sepset_nodes, test_index, kLvlSizeSmall,
row_neighbours);
for (int ind = 0; ind < kLvlSizeSmall; ++ind)
{
if (sepset_nodes[ind] - 1 >= blockIdx.y)
{
sepset_nodes[ind] =
state.adj_compact[row_node * state.p + sepset_nodes[ind]];
}
else
{
sepset_nodes[ind] =
state.adj_compact[row_node * state.p + sepset_nodes[ind] - 1];
}
}
for (int i = 0; i < lvlSize; ++i)
{
// set diagonal
Submat[i][i] = 1;
}
Submat[0][1] = Submat[1][0] = state.cor[row_node * state.p + col_node];
for (int j = 2; j < lvlSize; ++j)
{
// set correlations of X
Submat[0][j] = Submat[j][0] =
state.cor[row_node * state.p + sepset_nodes[j - 2]];
// set correlations of Y
Submat[1][j] = Submat[j][1] =
state.cor[col_node * state.p + sepset_nodes[j - 2]];
}
for (int i = 2; i < lvlSize; ++i)
{
for (int j = i + 1; j < lvlSize; ++j)
{
Submat[i][j] = Submat[j][i] =
state.cor[sepset_nodes[i - 2] * state.p + sepset_nodes[j - 2]];
}
}
if ( state.adj[state.p * row_node + col_node] == 0) {
break;
}
pseudoinverse<lvlSize>(Submat, SubmatPInv, v, rv1, w, res1);
double r = -SubmatPInv[0][1] / sqrt(SubmatPInv[0][0] * SubmatPInv[1][1]);
double pVal = GPU::calcPValue(r, state.observations);
if (pVal >= state.alpha)
{
if (row_node < col_node)
{
if (atomicCAS(&state.lock[(state.p * row_node) + col_node], 0, 1) == 0)
{
state.adj[state.p * row_node + col_node] = 0;
state.adj[state.p * col_node + row_node] = 0;
state.pMax[state.p * row_node + col_node] = pVal;
for (int j = 0; j < kLvlSizeSmall; ++j)
{
state.sepSets[row_node * state.p * state.maxCondSize +
col_node * state.maxCondSize + j] = sepset_nodes[j];
}
}
}
else
{
if (atomicCAS(&state.lock[(state.p * col_node) + row_node], 0, 1) == 0)
{
state.adj[state.p * row_node + col_node] = 0;
state.adj[state.p * col_node + row_node] = 0;
state.pMax[state.p * col_node + row_node] = pVal;
for (int j = 0; j < kLvlSizeSmall; ++j)
{
state.sepSets[col_node * state.p * state.maxCondSize +
row_node * state.maxCondSize + j] = sepset_nodes[j];
}
}
}
}
}
}
}
template __global__ void testRowLN<4,2>(MMState state, int *rows, int start_row, int max_row_count);
template __global__ void testRowLN<5,3>(MMState state, int *rows, int start_row, int max_row_count);
| 7c16f04b5a0caad922583a6623a0b0bbf25cda39.cu | #include "gpuRowTests.cuh"
#include "../independence/gpuInd.cuh"
__global__ void testRowL0(MMState state, int row, int row_count)
{
size_t id = (blockIdx.x * blockDim.x) + threadIdx.x;
size_t row_node = static_cast<size_t>(sqrt(2 * id + pow(row - 0.5, 2)) + 0.5);
size_t col_node = id - ((row_node * (row_node - 1) / 2) - (row * (row - 1) / 2));
size_t max_row = row + row_count;
if (col_node < state.p && row_node < max_row)
{
int idx = state.p * row_node + col_node;
if (col_node < row_node && state.adj[idx])
{
int inv_idx = state.p * col_node + row_node;
double pVal = GPU::calcPValue(state.cor[idx], state.observations);
state.pMax[inv_idx] = pVal;
if (state.pMax[inv_idx] >= state.alpha)
{
state.adj[idx] = 0;
state.adj[inv_idx] = 0;
state.sepSets[(col_node * state.maxCondSize * state.p) +
(row_node * state.maxCondSize)] = -2;
}
}
}
}
__global__ void testRowL1(MMState state, int *rows, int start_row, int max_row_count)
{
if (start_row + blockIdx.x >= max_row_count)
{
return;
}
size_t row_node = rows[start_row + blockIdx.x];
size_t row_neighbours = state.adj_compact[row_node * state.p + state.p - 1];
extern __shared__ double pVals[];
size_t col_node = state.adj_compact[row_node * state.p + blockIdx.y];
if (row_neighbours > blockIdx.y && row_neighbours >= 1 && col_node < state.p)
{
size_t subIndex = 0;
for (size_t offset = threadIdx.x; offset < row_neighbours; offset += blockDim.x)
{
if (offset == blockIdx.y)
{
pVals[threadIdx.x] = -1;
}
else
{
subIndex = state.adj_compact[row_node * state.p + offset];
pVals[threadIdx.x] = GPU::pValL1(
state.cor[row_node * state.p + col_node],
state.cor[row_node * state.p + subIndex],
state.cor[col_node * state.p + subIndex], state.observations);
}
__syncthreads();
if (threadIdx.x == 0)
{
for (size_t i = 0; i < blockDim.x && i < row_neighbours; ++i)
{
double pVal = pVals[i];
if (offset + i < state.p && pVal >= state.alpha)
{
if (row_node < col_node)
{
if (atomicCAS(&state.lock[(state.p * row_node) + col_node], 0, 1) == 0)
{
state.adj[row_node * state.p + col_node] = 0;
state.adj[col_node * state.p + row_node] = 0;
state.sepSets[row_node * state.p * state.maxCondSize +
col_node * state.maxCondSize] =
state.adj_compact[row_node * state.p + offset + i];
state.pMax[state.p * row_node + col_node] = pVal;
}
}
else
{
if (atomicCAS(&state.lock[(state.p * col_node) + row_node], 0, 1) == 0)
{
state.adj[row_node * state.p + col_node] = 0;
state.adj[col_node * state.p + row_node] = 0;
state.pMax[state.p * col_node + row_node] = pVal;
state.sepSets[col_node * state.p * state.maxCondSize +
row_node * state.maxCondSize] =
state.adj_compact[col_node * state.p + offset + i];
}
}
break;
}
}
}
__syncthreads();
if (state.adj[row_node * state.p + col_node] == 0)
break;
}
}
}
template <int lvlSize, int kLvlSizeSmall>
__global__ void testRowLN(MMState state, int *rows, int start_row, int max_row_count)
{
if (start_row + blockIdx.x >= max_row_count)
{
return;
}
size_t row_node = rows[start_row + blockIdx.x];
size_t row_count = state.adj_compact[row_node * state.p + state.p - 1];
if (row_count > blockIdx.y && // col_node available
row_count >= kLvlSizeSmall)
{
double Submat[lvlSize][lvlSize];
double SubmatPInv[lvlSize][lvlSize];
int sepset_nodes[kLvlSizeSmall];
// pseudo-inverse parameter
double v[lvlSize][lvlSize];
double w[lvlSize], rv1[lvlSize];
double res1[lvlSize][lvlSize];
// Determine sepsets to work on
size_t col_node = state.adj_compact[row_node * state.p + blockIdx.y]; // get actual id
int row_neighbours = row_count - 1; // get number of neighbours && exclude col_node
size_t row_test_count = binomialCoeff(row_neighbours, kLvlSizeSmall);
for (size_t test_index = threadIdx.x; test_index < row_test_count;
test_index += blockDim.x)
{
ithCombination(sepset_nodes, test_index, kLvlSizeSmall,
row_neighbours);
for (int ind = 0; ind < kLvlSizeSmall; ++ind)
{
if (sepset_nodes[ind] - 1 >= blockIdx.y)
{
sepset_nodes[ind] =
state.adj_compact[row_node * state.p + sepset_nodes[ind]];
}
else
{
sepset_nodes[ind] =
state.adj_compact[row_node * state.p + sepset_nodes[ind] - 1];
}
}
for (int i = 0; i < lvlSize; ++i)
{
// set diagonal
Submat[i][i] = 1;
}
Submat[0][1] = Submat[1][0] = state.cor[row_node * state.p + col_node];
for (int j = 2; j < lvlSize; ++j)
{
// set correlations of X
Submat[0][j] = Submat[j][0] =
state.cor[row_node * state.p + sepset_nodes[j - 2]];
// set correlations of Y
Submat[1][j] = Submat[j][1] =
state.cor[col_node * state.p + sepset_nodes[j - 2]];
}
for (int i = 2; i < lvlSize; ++i)
{
for (int j = i + 1; j < lvlSize; ++j)
{
Submat[i][j] = Submat[j][i] =
state.cor[sepset_nodes[i - 2] * state.p + sepset_nodes[j - 2]];
}
}
if ( state.adj[state.p * row_node + col_node] == 0) {
break;
}
pseudoinverse<lvlSize>(Submat, SubmatPInv, v, rv1, w, res1);
double r = -SubmatPInv[0][1] / sqrt(SubmatPInv[0][0] * SubmatPInv[1][1]);
double pVal = GPU::calcPValue(r, state.observations);
if (pVal >= state.alpha)
{
if (row_node < col_node)
{
if (atomicCAS(&state.lock[(state.p * row_node) + col_node], 0, 1) == 0)
{
state.adj[state.p * row_node + col_node] = 0;
state.adj[state.p * col_node + row_node] = 0;
state.pMax[state.p * row_node + col_node] = pVal;
for (int j = 0; j < kLvlSizeSmall; ++j)
{
state.sepSets[row_node * state.p * state.maxCondSize +
col_node * state.maxCondSize + j] = sepset_nodes[j];
}
}
}
else
{
if (atomicCAS(&state.lock[(state.p * col_node) + row_node], 0, 1) == 0)
{
state.adj[state.p * row_node + col_node] = 0;
state.adj[state.p * col_node + row_node] = 0;
state.pMax[state.p * col_node + row_node] = pVal;
for (int j = 0; j < kLvlSizeSmall; ++j)
{
state.sepSets[col_node * state.p * state.maxCondSize +
row_node * state.maxCondSize + j] = sepset_nodes[j];
}
}
}
}
}
}
}
template __global__ void testRowLN<4,2>(MMState state, int *rows, int start_row, int max_row_count);
template __global__ void testRowLN<5,3>(MMState state, int *rows, int start_row, int max_row_count);
|
71c867e1f77721876b95f7b258a9ef54ebe24ac9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
// Enabled to print a bunch of junk during solving
#define DEBUG_PRINT_SOLVER_INFO 0
#include "WarpingSolverParameters.h"
#include "WarpingSolverState.h"
#include "WarpingSolverUtil.h"
#include "WarpingSolverEquations.h"
#include <assert.h>
#include <stdio.h>
#include <stdint.h>
#include "CUDATimer.h"
#ifdef _WIN32
#include <conio.h>
#endif
#ifdef _WIN32
#define EXPORT __declspec(dllexport)
#else
#define EXPORT
#endif
#define WARP_SIZE 32u
#define WARP_MASK (WARP_SIZE-1u)
/////////////////////////////////////////////////////////////////////////
// Eval Residual
/////////////////////////////////////////////////////////////////////////
__global__ void ResetResidualDevice(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x == 0) state.d_sumResidual[0] = 0.0f;
}
__global__ void EvalResidualDevice(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.N; // Number of block variables
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
float residual = 0.0f;
if (x < N)
{
residual = evalFDevice(x, input, state, parameters);
}
residual = warpReduce(residual);
unsigned int laneid;
//This command gets the lane ID within the current warp
asm("mov.u32 %0, %%laneid;" : "=r"(laneid));
if (laneid == 0) {
atomicAdd(&state.d_sumResidual[0], residual);
}
}
float EvalResidual(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer& timer)
{
float residual = 0.0f;
const unsigned int N = input.N; // Number of block variables
ResetResidualDevice << < 1, 1, 1 >> >(input, state, parameters);
cudaSafeCall(hipDeviceSynchronize());
timer.startEvent("EvalResidual");
EvalResidualDevice << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(input, state, parameters);
timer.endEvent();
cudaSafeCall(hipDeviceSynchronize());
residual = state.getSumResidual();
#ifdef _DEBUG
cudaSafeCall(hipDeviceSynchronize());
#endif
return residual;
}
// For the naming scheme of the variables see:
// http://en.wikipedia.org/wiki/Conjugate_gradient_method
// This code is an implementation of their PCG pseudo code
__global__ void PCGInit_Kernel1(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.N;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
float d = 0.0f;
if (x < N)
{
float3 residuumA;
const float3 residuum = evalMinusJTFDevice(x, input, state, parameters, residuumA); // residuum = J^T x -F - A x delta_0 => J^T x -F, since A x x_0 == 0
state.d_r[x] = residuum; // store for next iteration
state.d_rA[x] = residuumA; // store for next iteration
const float3 p = state.d_precondioner[x] * residuum; // apply preconditioner M^-1
state.d_p[x] = p;
const float3 pA = state.d_precondionerA[x] * residuumA; // apply preconditioner M^-1
state.d_pA[x] = pA;
d = dot(residuum, p) + dot(residuumA, pA); // x-th term of nomimator for computing alpha and denominator for computing beta
}
d = warpReduce(d);
if ((threadIdx.x & WARP_MASK) == 0) {
atomicAdd(state.d_scanAlpha, d);
}
}
__global__ void PCGInit_Kernel2(unsigned int N, SolverState state)
{
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < N) {
state.d_rDotzOld[x] = state.d_scanAlpha[0];
state.d_delta[x] = make_float3(0.0f, 0.0f, 0.0f);
state.d_deltaA[x] = make_float3(0.0f, 0.0f, 0.0f);
}
}
void Initialization(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer& timer)
{
const unsigned int N = input.N;
const int blocksPerGrid = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const int shmem_size = sizeof(float)*THREADS_PER_BLOCK;
if (blocksPerGrid > THREADS_PER_BLOCK)
{
std::cout << "Too many variables for this block size. Maximum number of variables for two kernel scan: " << THREADS_PER_BLOCK*THREADS_PER_BLOCK << std::endl;
while (1);
}
cudaSafeCall(hipMemset(state.d_scanAlpha, 0, sizeof(float)));
timer.startEvent("PCGInit_Kernel1");
PCGInit_Kernel1 << <blocksPerGrid, THREADS_PER_BLOCK, shmem_size >> >(input, state, parameters);
timer.endEvent();
#ifdef _DEBUG
cudaSafeCall(hipDeviceSynchronize());
#endif
timer.startEvent("PCGInit_Kernel2");
PCGInit_Kernel2 << <blocksPerGrid, THREADS_PER_BLOCK, shmem_size >> >(N, state);
timer.endEvent();
#ifdef _DEBUG
cudaSafeCall(hipDeviceSynchronize());
#endif
#if DEBUG_PRINT_SOLVER_INFO
float temp;
cudaSafeCall( hipMemcpy(&temp, state.d_scanAlpha, sizeof(float), hipMemcpyDeviceToHost) );
printf("ScanAlpha (Init): %f\n", temp);
#endif
}
/////////////////////////////////////////////////////////////////////////
// PCG Iteration Parts
/////////////////////////////////////////////////////////////////////////
__global__ void PCGStep_Kernel1(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.N; // Number of block variables
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
float d = 0.0f;
if (x < N)
{
float3 tmpA;
const float3 tmp = applyJTJDevice(x, input, state, parameters, tmpA); // A x p_k => J^T x J x p_k
state.d_Ap_X[x] = tmp; // store for next kernel call
state.d_Ap_A[x] = tmpA; // store for next kernel call
d = dot(state.d_p[x], tmp) + dot(state.d_pA[x], tmpA); // x-th term of denominator of alpha
}
d = warpReduce(d);
if ((threadIdx.x & WARP_MASK) == 0) {
atomicAdd(state.d_scanAlpha, d); // sum over x-th terms to compute denominator of alpha inside this block
}
}
__global__ void PCGStep_Kernel2(SolverInput input, SolverState state)
{
const unsigned int N = input.N;
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
const float dotProduct = state.d_scanAlpha[0];
float b = 0.0f;
if (x < N)
{
float alpha = 0.0f;
if (dotProduct > FLOAT_EPSILON) alpha = state.d_rDotzOld[x] / dotProduct; // update step size alpha
state.d_delta[x] = state.d_delta[x] + alpha*state.d_p[x]; // do a decent step
state.d_deltaA[x] = state.d_deltaA[x] + alpha*state.d_pA[x]; // do a decent step
float3 r = state.d_r[x] - alpha*state.d_Ap_X[x]; // update residuum
state.d_r[x] = r; // store for next kernel call
float3 rA = state.d_rA[x] - alpha*state.d_Ap_A[x]; // update residuum
state.d_rA[x] = rA; // store for next kernel call
float3 z = state.d_precondioner[x] * r; // apply preconditioner M^-1
state.d_z[x] = z; // save for next kernel call
float3 zA = state.d_precondionerA[x] * rA; // apply preconditioner M^-1
state.d_zA[x] = zA; // save for next kernel call
b = dot(z, r) + dot(zA, rA); // compute x-th term of the nominator of beta
}
b = warpReduce(b);
if ((threadIdx.x & WARP_MASK) == 0) {
atomicAdd(state.d_scanBeta, b); // sum over x-th terms to compute denominator of alpha inside this block
}
}
__global__ void PCGStep_Kernel3(SolverInput input, SolverState state)
{
const unsigned int N = input.N;
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < N)
{
const float rDotzNew = state.d_scanBeta[0]; // get new nominator
const float rDotzOld = state.d_rDotzOld[x]; // get old denominator
float beta = 0.0f;
if (rDotzOld > FLOAT_EPSILON) beta = rDotzNew / rDotzOld; // update step size beta
state.d_rDotzOld[x] = rDotzNew; // save new rDotz for next iteration
state.d_p[x] = state.d_z[x] + beta*state.d_p[x]; // update decent direction
state.d_pA[x] = state.d_zA[x] + beta*state.d_pA[x]; // update decent direction
}
}
void PCGIteration(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer& timer)
{
const unsigned int N = input.N; // Number of block variables
// Do PCG step
const int blocksPerGrid = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const int shmem_size = sizeof(float)*THREADS_PER_BLOCK;
if (blocksPerGrid > THREADS_PER_BLOCK)
{
std::cout << "Too many variables for this block size. Maximum number of variables for two kernel scan: " << THREADS_PER_BLOCK*THREADS_PER_BLOCK << std::endl;
while (1);
}
cudaSafeCall(hipMemset(state.d_scanAlpha, 0, sizeof(float)));
timer.startEvent("PCGStep_Kernel1");
PCGStep_Kernel1 << <blocksPerGrid, THREADS_PER_BLOCK, shmem_size >> >(input, state, parameters);
timer.endEvent();
#ifdef _DEBUG
cudaSafeCall(hipDeviceSynchronize());
#endif
cudaSafeCall(hipMemset(state.d_scanBeta, 0, sizeof(float)));
timer.startEvent("PCGStep_Kernel2");
PCGStep_Kernel2 << <blocksPerGrid, THREADS_PER_BLOCK, shmem_size >> >(input, state);
timer.endEvent();
#ifdef _DEBUG
cudaSafeCall(hipDeviceSynchronize());
#endif
timer.startEvent("PCGStep_Kernel3");
PCGStep_Kernel3 << <blocksPerGrid, THREADS_PER_BLOCK, shmem_size >> >(input, state);
timer.endEvent();
#ifdef _DEBUG
cudaSafeCall(hipDeviceSynchronize());
#endif
#if DEBUG_PRINT_SOLVER_INFO
float temp;
cudaSafeCall( hipMemcpy(&temp, state.d_scanAlpha, sizeof(float), hipMemcpyDeviceToHost) );
printf("ScanAlpha (Step): %f\n", temp);
cudaSafeCall( hipMemcpy(&temp, state.d_scanBeta, sizeof(float), hipMemcpyDeviceToHost) );
printf("ScanBeta (Step): %f\n", temp);
#endif
}
/////////////////////////////////////////////////////////////////////////
// Apply Update
/////////////////////////////////////////////////////////////////////////
__global__ void ApplyLinearUpdateDevice(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.N; // Number of block variables
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < N) {
state.d_x[x] = state.d_x[x] + state.d_delta[x];
state.d_a[x] = state.d_a[x] + state.d_deltaA[x];
}
}
void ApplyLinearUpdate(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer& timer)
{
const unsigned int N = input.N; // Number of block variables
timer.startEvent("ApplyLinearUpdateDevice");
ApplyLinearUpdateDevice << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(input, state, parameters);
timer.endEvent();
cudaSafeCall(hipDeviceSynchronize()); // Hm
#ifdef _DEBUG
cudaSafeCall(hipDeviceSynchronize());
#endif
}
////////////////////////////////////////////////////////////////////
// Main GN Solver Loop
////////////////////////////////////////////////////////////////////
extern "C" double ImageWarpingSolveGNStub(SolverInput& input, SolverState& state, SolverParameters& parameters)
{
CUDATimer timer;
for (unsigned int nIter = 0; nIter < parameters.nNonLinearIterations; nIter++)
{
float residual = EvalResidual(input, state, parameters, timer);
printf("%i: cost: %f\n", nIter, residual);
Initialization(input, state, parameters, timer);
for (unsigned int linIter = 0; linIter < parameters.nLinIterations; linIter++) {
PCGIteration(input, state, parameters, timer);
}
ApplyLinearUpdate(input, state, parameters, timer);
timer.nextIteration();
}
float residual = EvalResidual(input, state, parameters, timer);
printf("final cost: %f\n", residual);
timer.evaluate();
return (double)residual;
}
| 71c867e1f77721876b95f7b258a9ef54ebe24ac9.cu | #include <iostream>
// Enabled to print a bunch of junk during solving
#define DEBUG_PRINT_SOLVER_INFO 0
#include "WarpingSolverParameters.h"
#include "WarpingSolverState.h"
#include "WarpingSolverUtil.h"
#include "WarpingSolverEquations.h"
#include <assert.h>
#include <stdio.h>
#include <stdint.h>
#include "CUDATimer.h"
#ifdef _WIN32
#include <conio.h>
#endif
#ifdef _WIN32
#define EXPORT __declspec(dllexport)
#else
#define EXPORT
#endif
#define WARP_SIZE 32u
#define WARP_MASK (WARP_SIZE-1u)
/////////////////////////////////////////////////////////////////////////
// Eval Residual
/////////////////////////////////////////////////////////////////////////
__global__ void ResetResidualDevice(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x == 0) state.d_sumResidual[0] = 0.0f;
}
__global__ void EvalResidualDevice(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.N; // Number of block variables
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
float residual = 0.0f;
if (x < N)
{
residual = evalFDevice(x, input, state, parameters);
}
residual = warpReduce(residual);
unsigned int laneid;
//This command gets the lane ID within the current warp
asm("mov.u32 %0, %%laneid;" : "=r"(laneid));
if (laneid == 0) {
atomicAdd(&state.d_sumResidual[0], residual);
}
}
float EvalResidual(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer& timer)
{
float residual = 0.0f;
const unsigned int N = input.N; // Number of block variables
ResetResidualDevice << < 1, 1, 1 >> >(input, state, parameters);
cudaSafeCall(cudaDeviceSynchronize());
timer.startEvent("EvalResidual");
EvalResidualDevice << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(input, state, parameters);
timer.endEvent();
cudaSafeCall(cudaDeviceSynchronize());
residual = state.getSumResidual();
#ifdef _DEBUG
cudaSafeCall(cudaDeviceSynchronize());
#endif
return residual;
}
// For the naming scheme of the variables see:
// http://en.wikipedia.org/wiki/Conjugate_gradient_method
// This code is an implementation of their PCG pseudo code
__global__ void PCGInit_Kernel1(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.N;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
float d = 0.0f;
if (x < N)
{
float3 residuumA;
const float3 residuum = evalMinusJTFDevice(x, input, state, parameters, residuumA); // residuum = J^T x -F - A x delta_0 => J^T x -F, since A x x_0 == 0
state.d_r[x] = residuum; // store for next iteration
state.d_rA[x] = residuumA; // store for next iteration
const float3 p = state.d_precondioner[x] * residuum; // apply preconditioner M^-1
state.d_p[x] = p;
const float3 pA = state.d_precondionerA[x] * residuumA; // apply preconditioner M^-1
state.d_pA[x] = pA;
d = dot(residuum, p) + dot(residuumA, pA); // x-th term of nomimator for computing alpha and denominator for computing beta
}
d = warpReduce(d);
if ((threadIdx.x & WARP_MASK) == 0) {
atomicAdd(state.d_scanAlpha, d);
}
}
__global__ void PCGInit_Kernel2(unsigned int N, SolverState state)
{
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < N) {
state.d_rDotzOld[x] = state.d_scanAlpha[0];
state.d_delta[x] = make_float3(0.0f, 0.0f, 0.0f);
state.d_deltaA[x] = make_float3(0.0f, 0.0f, 0.0f);
}
}
void Initialization(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer& timer)
{
const unsigned int N = input.N;
const int blocksPerGrid = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const int shmem_size = sizeof(float)*THREADS_PER_BLOCK;
if (blocksPerGrid > THREADS_PER_BLOCK)
{
std::cout << "Too many variables for this block size. Maximum number of variables for two kernel scan: " << THREADS_PER_BLOCK*THREADS_PER_BLOCK << std::endl;
while (1);
}
cudaSafeCall(cudaMemset(state.d_scanAlpha, 0, sizeof(float)));
timer.startEvent("PCGInit_Kernel1");
PCGInit_Kernel1 << <blocksPerGrid, THREADS_PER_BLOCK, shmem_size >> >(input, state, parameters);
timer.endEvent();
#ifdef _DEBUG
cudaSafeCall(cudaDeviceSynchronize());
#endif
timer.startEvent("PCGInit_Kernel2");
PCGInit_Kernel2 << <blocksPerGrid, THREADS_PER_BLOCK, shmem_size >> >(N, state);
timer.endEvent();
#ifdef _DEBUG
cudaSafeCall(cudaDeviceSynchronize());
#endif
#if DEBUG_PRINT_SOLVER_INFO
float temp;
cudaSafeCall( cudaMemcpy(&temp, state.d_scanAlpha, sizeof(float), cudaMemcpyDeviceToHost) );
printf("ScanAlpha (Init): %f\n", temp);
#endif
}
/////////////////////////////////////////////////////////////////////////
// PCG Iteration Parts
/////////////////////////////////////////////////////////////////////////
__global__ void PCGStep_Kernel1(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.N; // Number of block variables
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
float d = 0.0f;
if (x < N)
{
float3 tmpA;
const float3 tmp = applyJTJDevice(x, input, state, parameters, tmpA); // A x p_k => J^T x J x p_k
state.d_Ap_X[x] = tmp; // store for next kernel call
state.d_Ap_A[x] = tmpA; // store for next kernel call
d = dot(state.d_p[x], tmp) + dot(state.d_pA[x], tmpA); // x-th term of denominator of alpha
}
d = warpReduce(d);
if ((threadIdx.x & WARP_MASK) == 0) {
atomicAdd(state.d_scanAlpha, d); // sum over x-th terms to compute denominator of alpha inside this block
}
}
__global__ void PCGStep_Kernel2(SolverInput input, SolverState state)
{
const unsigned int N = input.N;
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
const float dotProduct = state.d_scanAlpha[0];
float b = 0.0f;
if (x < N)
{
float alpha = 0.0f;
if (dotProduct > FLOAT_EPSILON) alpha = state.d_rDotzOld[x] / dotProduct; // update step size alpha
state.d_delta[x] = state.d_delta[x] + alpha*state.d_p[x]; // do a decent step
state.d_deltaA[x] = state.d_deltaA[x] + alpha*state.d_pA[x]; // do a decent step
float3 r = state.d_r[x] - alpha*state.d_Ap_X[x]; // update residuum
state.d_r[x] = r; // store for next kernel call
float3 rA = state.d_rA[x] - alpha*state.d_Ap_A[x]; // update residuum
state.d_rA[x] = rA; // store for next kernel call
float3 z = state.d_precondioner[x] * r; // apply preconditioner M^-1
state.d_z[x] = z; // save for next kernel call
float3 zA = state.d_precondionerA[x] * rA; // apply preconditioner M^-1
state.d_zA[x] = zA; // save for next kernel call
b = dot(z, r) + dot(zA, rA); // compute x-th term of the nominator of beta
}
b = warpReduce(b);
if ((threadIdx.x & WARP_MASK) == 0) {
atomicAdd(state.d_scanBeta, b); // sum over x-th terms to compute denominator of alpha inside this block
}
}
__global__ void PCGStep_Kernel3(SolverInput input, SolverState state)
{
const unsigned int N = input.N;
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < N)
{
const float rDotzNew = state.d_scanBeta[0]; // get new nominator
const float rDotzOld = state.d_rDotzOld[x]; // get old denominator
float beta = 0.0f;
if (rDotzOld > FLOAT_EPSILON) beta = rDotzNew / rDotzOld; // update step size beta
state.d_rDotzOld[x] = rDotzNew; // save new rDotz for next iteration
state.d_p[x] = state.d_z[x] + beta*state.d_p[x]; // update decent direction
state.d_pA[x] = state.d_zA[x] + beta*state.d_pA[x]; // update decent direction
}
}
void PCGIteration(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer& timer)
{
const unsigned int N = input.N; // Number of block variables
// Do PCG step
const int blocksPerGrid = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const int shmem_size = sizeof(float)*THREADS_PER_BLOCK;
if (blocksPerGrid > THREADS_PER_BLOCK)
{
std::cout << "Too many variables for this block size. Maximum number of variables for two kernel scan: " << THREADS_PER_BLOCK*THREADS_PER_BLOCK << std::endl;
while (1);
}
cudaSafeCall(cudaMemset(state.d_scanAlpha, 0, sizeof(float)));
timer.startEvent("PCGStep_Kernel1");
PCGStep_Kernel1 << <blocksPerGrid, THREADS_PER_BLOCK, shmem_size >> >(input, state, parameters);
timer.endEvent();
#ifdef _DEBUG
cudaSafeCall(cudaDeviceSynchronize());
#endif
cudaSafeCall(cudaMemset(state.d_scanBeta, 0, sizeof(float)));
timer.startEvent("PCGStep_Kernel2");
PCGStep_Kernel2 << <blocksPerGrid, THREADS_PER_BLOCK, shmem_size >> >(input, state);
timer.endEvent();
#ifdef _DEBUG
cudaSafeCall(cudaDeviceSynchronize());
#endif
timer.startEvent("PCGStep_Kernel3");
PCGStep_Kernel3 << <blocksPerGrid, THREADS_PER_BLOCK, shmem_size >> >(input, state);
timer.endEvent();
#ifdef _DEBUG
cudaSafeCall(cudaDeviceSynchronize());
#endif
#if DEBUG_PRINT_SOLVER_INFO
float temp;
cudaSafeCall( cudaMemcpy(&temp, state.d_scanAlpha, sizeof(float), cudaMemcpyDeviceToHost) );
printf("ScanAlpha (Step): %f\n", temp);
cudaSafeCall( cudaMemcpy(&temp, state.d_scanBeta, sizeof(float), cudaMemcpyDeviceToHost) );
printf("ScanBeta (Step): %f\n", temp);
#endif
}
/////////////////////////////////////////////////////////////////////////
// Apply Update
/////////////////////////////////////////////////////////////////////////
__global__ void ApplyLinearUpdateDevice(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.N; // Number of block variables
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < N) {
state.d_x[x] = state.d_x[x] + state.d_delta[x];
state.d_a[x] = state.d_a[x] + state.d_deltaA[x];
}
}
void ApplyLinearUpdate(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer& timer)
{
const unsigned int N = input.N; // Number of block variables
timer.startEvent("ApplyLinearUpdateDevice");
ApplyLinearUpdateDevice << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(input, state, parameters);
timer.endEvent();
cudaSafeCall(cudaDeviceSynchronize()); // Hm
#ifdef _DEBUG
cudaSafeCall(cudaDeviceSynchronize());
#endif
}
////////////////////////////////////////////////////////////////////
// Main GN Solver Loop
////////////////////////////////////////////////////////////////////
extern "C" double ImageWarpingSolveGNStub(SolverInput& input, SolverState& state, SolverParameters& parameters)
{
CUDATimer timer;
for (unsigned int nIter = 0; nIter < parameters.nNonLinearIterations; nIter++)
{
float residual = EvalResidual(input, state, parameters, timer);
printf("%i: cost: %f\n", nIter, residual);
Initialization(input, state, parameters, timer);
for (unsigned int linIter = 0; linIter < parameters.nLinIterations; linIter++) {
PCGIteration(input, state, parameters, timer);
}
ApplyLinearUpdate(input, state, parameters, timer);
timer.nextIteration();
}
float residual = EvalResidual(input, state, parameters, timer);
printf("final cost: %f\n", residual);
timer.evaluate();
return (double)residual;
}
|
64187fc95abe0453a724c4cc777a7a2d2de3472e.hip | // !!! This is a file automatically generated by hipify!!!
//#include <stdio.h>
#include "book.h"
int main( void ) {
hipDeviceProp_t prop;
int count;
HANDLE_ERROR( hipGetDeviceCount( &count ) );
for (int i=0; i< count; i++) {
HANDLE_ERROR( hipGetDeviceProperties( &prop, i ) );
printf( " --- General Information for device %d ---\n", i );
printf( "Name: %s\n", prop.name );
printf( "Compute capability: %d.%d\n", prop.major, prop.minor );
printf( "Clock rate: %d\n", prop.clockRate );
printf( "Device copy overlap: " );
if (prop.deviceOverlap)
printf( "Enabled\n" );
else
printf( "Disabled\n" );
printf( "Kernel execition timeout : " );
if (prop.kernelExecTimeoutEnabled)
printf( "Enabled\n" );
else
printf( "Disabled\n" );
printf( " --- Memory Information for device %d ---\n", i );
printf( "Total global mem: %ld\n", prop.totalGlobalMem );
printf( "Total constant Mem: %ld\n", prop.totalConstMem );
printf( "Max mem pitch: %ld\n", prop.memPitch );
printf( "Texture Alignment: %ld\n", prop.textureAlignment );
printf( " --- MP Information for device %d ---\n", i );
printf( "Multiprocessor count: %d\n",
prop.multiProcessorCount );
printf( "Shared mem per mp: %ld\n", prop.sharedMemPerBlock );
printf( "Registers per mp: %d\n", prop.regsPerBlock );
printf( "Threads in warp: %d\n", prop.warpSize );
printf( "Max threads per block: %d\n",
prop.maxThreadsPerBlock );
printf( "Max thread dimensions: (%d, %d, %d)\n",
prop.maxThreadsDim[0], prop.maxThreadsDim[1],
prop.maxThreadsDim[2] );
printf( "Max grid dimensions: (%d, %d, %d)\n",
prop.maxGridSize[0], prop.maxGridSize[1],
prop.maxGridSize[2] );
printf( "\n" );
}
}
| 64187fc95abe0453a724c4cc777a7a2d2de3472e.cu | //#include <stdio.h>
#include "book.h"
int main( void ) {
cudaDeviceProp prop;
int count;
HANDLE_ERROR( cudaGetDeviceCount( &count ) );
for (int i=0; i< count; i++) {
HANDLE_ERROR( cudaGetDeviceProperties( &prop, i ) );
printf( " --- General Information for device %d ---\n", i );
printf( "Name: %s\n", prop.name );
printf( "Compute capability: %d.%d\n", prop.major, prop.minor );
printf( "Clock rate: %d\n", prop.clockRate );
printf( "Device copy overlap: " );
if (prop.deviceOverlap)
printf( "Enabled\n" );
else
printf( "Disabled\n" );
printf( "Kernel execition timeout : " );
if (prop.kernelExecTimeoutEnabled)
printf( "Enabled\n" );
else
printf( "Disabled\n" );
printf( " --- Memory Information for device %d ---\n", i );
printf( "Total global mem: %ld\n", prop.totalGlobalMem );
printf( "Total constant Mem: %ld\n", prop.totalConstMem );
printf( "Max mem pitch: %ld\n", prop.memPitch );
printf( "Texture Alignment: %ld\n", prop.textureAlignment );
printf( " --- MP Information for device %d ---\n", i );
printf( "Multiprocessor count: %d\n",
prop.multiProcessorCount );
printf( "Shared mem per mp: %ld\n", prop.sharedMemPerBlock );
printf( "Registers per mp: %d\n", prop.regsPerBlock );
printf( "Threads in warp: %d\n", prop.warpSize );
printf( "Max threads per block: %d\n",
prop.maxThreadsPerBlock );
printf( "Max thread dimensions: (%d, %d, %d)\n",
prop.maxThreadsDim[0], prop.maxThreadsDim[1],
prop.maxThreadsDim[2] );
printf( "Max grid dimensions: (%d, %d, %d)\n",
prop.maxGridSize[0], prop.maxGridSize[1],
prop.maxGridSize[2] );
printf( "\n" );
}
}
|
518bc3c1e77eaf3fe8de712306c75b1defb2c8b2.hip | // !!! This is a file automatically generated by hipify!!!
//pass
//--blockDim=32 --gridDim=2
#include <hip/hip_runtime.h>
__global__ void race (int* A, int* B)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int idx = blockDim.x * bid + tid;
int temp = A[idx + 1];
A[idx] = temp;
temp = B[idx + 1];
B[idx] = temp;
} | 518bc3c1e77eaf3fe8de712306c75b1defb2c8b2.cu | //pass
//--blockDim=32 --gridDim=2
#include <cuda.h>
__global__ void race (int* A, int* B)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int idx = blockDim.x * bid + tid;
int temp = A[idx + 1];
A[idx] = temp;
temp = B[idx + 1];
B[idx] = temp;
} |
7e2e6dbec81c06ca842c163f76f2acaac8c38ac3.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2015 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <hipcub/hipcub.hpp>
#include "../debug.h"
#define N ( 1 << 27 )
#define FLOATTYPE_T float
int main()
{
FLOATTYPE_T *h_in, h_out, good_out;
FLOATTYPE_T *d_in, *d_out;
int size = N;
int memBytes = size * sizeof( FLOATTYPE_T );
/* get GPU device number and name */
int dev;
hipDeviceProp_t deviceProp;
checkCUDA( hipGetDevice( &dev ) );
checkCUDA( hipGetDeviceProperties( &deviceProp, dev ) );
printf("Using GPU %d: %s\n", dev, deviceProp.name );
/* allocate space for device copies of in, out */
checkCUDA( hipMalloc( &d_in, memBytes ) );
checkCUDA( hipMalloc( &d_out, sizeof(FLOATTYPE_T) ) );
/* allocate space for host copies of in, out and setup input values */
h_in = (FLOATTYPE_T *)malloc( memBytes );
for( int i = 0; i < size; i++ )
{
h_in[i] = FLOATTYPE_T( rand() ) / ( FLOATTYPE_T (RAND_MAX) + 1.0 );
if( i % 2 == 0 ) h_in[i] = -h_in[i];
}
h_out = 0.0;
good_out = 0.0;
/* copy inputs to device */
checkCUDA( hipMemcpy( d_in, h_in, memBytes, hipMemcpyHostToDevice ) );
checkCUDA( hipMemset( d_out, 0, sizeof(FLOATTYPE_T) ) );
/* initialize the CUB temp storage */
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
hipcub::DeviceReduce::Sum( FIXME );
printf("temp storage is %ld\n", temp_storage_bytes );
checkCUDA( hipMalloc( &d_temp_storage, temp_storage_bytes ) );
/* start the timers */
hipEvent_t start, stop;
checkCUDA( hipEventCreate( &start ) );
checkCUDA( hipEventCreate( &stop ) );
checkCUDA( hipEventRecord( start, 0 ) );
/* launch the kernel on the GPU */
hipcub::DeviceReduce::Sum( FIXME );
/* stop the timers */
checkCUDA( hipEventRecord( stop, 0 ) );
checkCUDA( hipEventSynchronize( stop ) );
float elapsedTime;
checkCUDA( hipEventElapsedTime( &elapsedTime, start, stop ) );
printf("Total elements is %d, %f GB\n", size, sizeof(FLOATTYPE_T)*
(double)size * 1.e-9 );
printf("GPU total time is %f ms, bandwidth %f GB/s\n", elapsedTime,
sizeof(FLOATTYPE_T) * (double) size /
( (double) elapsedTime / 1000.0 ) * 1.e-9);
/* copy result back to host */
checkCUDA( hipMemcpy( &h_out, d_out, sizeof(FLOATTYPE_T),
hipMemcpyDeviceToHost ) );
checkCUDA( hipEventRecord( start, 0 ) );
for( int i = 0; i < size; i++ )
{
good_out += h_in[i];
} /* end for */
checkCUDA( hipEventRecord( stop, 0 ) );
checkCUDA( hipEventSynchronize( stop ) );
checkCUDA( hipEventElapsedTime( &elapsedTime, start, stop ) );
printf("CPU total time is %f ms, bandwidth %f GB/s\n", elapsedTime,
sizeof(FLOATTYPE_T) * (double) size /
( (double) elapsedTime / 1000.0 ) * 1.e-9);
FLOATTYPE_T diff = abs( good_out - h_out );
if( diff / h_out < 0.001 ) printf("PASS\n");
else
{
printf("FAIL\n");
printf("Error is %f\n", diff / h_out );
printf("GPU result is %f, CPU result is %f\n",h_out, good_out );
} /* end else */
/* clean up */
free(h_in);
checkCUDA( hipFree( d_in ) );
checkCUDA( hipFree( d_out ) );
checkCUDA( hipFree( d_temp_storage ) );
checkCUDA( hipDeviceReset() );
return 0;
} /* end main */
| 7e2e6dbec81c06ca842c163f76f2acaac8c38ac3.cu | /*
* Copyright 2015 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <cub/cub.cuh>
#include "../debug.h"
#define N ( 1 << 27 )
#define FLOATTYPE_T float
int main()
{
FLOATTYPE_T *h_in, h_out, good_out;
FLOATTYPE_T *d_in, *d_out;
int size = N;
int memBytes = size * sizeof( FLOATTYPE_T );
/* get GPU device number and name */
int dev;
cudaDeviceProp deviceProp;
checkCUDA( cudaGetDevice( &dev ) );
checkCUDA( cudaGetDeviceProperties( &deviceProp, dev ) );
printf("Using GPU %d: %s\n", dev, deviceProp.name );
/* allocate space for device copies of in, out */
checkCUDA( cudaMalloc( &d_in, memBytes ) );
checkCUDA( cudaMalloc( &d_out, sizeof(FLOATTYPE_T) ) );
/* allocate space for host copies of in, out and setup input values */
h_in = (FLOATTYPE_T *)malloc( memBytes );
for( int i = 0; i < size; i++ )
{
h_in[i] = FLOATTYPE_T( rand() ) / ( FLOATTYPE_T (RAND_MAX) + 1.0 );
if( i % 2 == 0 ) h_in[i] = -h_in[i];
}
h_out = 0.0;
good_out = 0.0;
/* copy inputs to device */
checkCUDA( cudaMemcpy( d_in, h_in, memBytes, cudaMemcpyHostToDevice ) );
checkCUDA( cudaMemset( d_out, 0, sizeof(FLOATTYPE_T) ) );
/* initialize the CUB temp storage */
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
cub::DeviceReduce::Sum( FIXME );
printf("temp storage is %ld\n", temp_storage_bytes );
checkCUDA( cudaMalloc( &d_temp_storage, temp_storage_bytes ) );
/* start the timers */
cudaEvent_t start, stop;
checkCUDA( cudaEventCreate( &start ) );
checkCUDA( cudaEventCreate( &stop ) );
checkCUDA( cudaEventRecord( start, 0 ) );
/* launch the kernel on the GPU */
cub::DeviceReduce::Sum( FIXME );
/* stop the timers */
checkCUDA( cudaEventRecord( stop, 0 ) );
checkCUDA( cudaEventSynchronize( stop ) );
float elapsedTime;
checkCUDA( cudaEventElapsedTime( &elapsedTime, start, stop ) );
printf("Total elements is %d, %f GB\n", size, sizeof(FLOATTYPE_T)*
(double)size * 1.e-9 );
printf("GPU total time is %f ms, bandwidth %f GB/s\n", elapsedTime,
sizeof(FLOATTYPE_T) * (double) size /
( (double) elapsedTime / 1000.0 ) * 1.e-9);
/* copy result back to host */
checkCUDA( cudaMemcpy( &h_out, d_out, sizeof(FLOATTYPE_T),
cudaMemcpyDeviceToHost ) );
checkCUDA( cudaEventRecord( start, 0 ) );
for( int i = 0; i < size; i++ )
{
good_out += h_in[i];
} /* end for */
checkCUDA( cudaEventRecord( stop, 0 ) );
checkCUDA( cudaEventSynchronize( stop ) );
checkCUDA( cudaEventElapsedTime( &elapsedTime, start, stop ) );
printf("CPU total time is %f ms, bandwidth %f GB/s\n", elapsedTime,
sizeof(FLOATTYPE_T) * (double) size /
( (double) elapsedTime / 1000.0 ) * 1.e-9);
FLOATTYPE_T diff = abs( good_out - h_out );
if( diff / h_out < 0.001 ) printf("PASS\n");
else
{
printf("FAIL\n");
printf("Error is %f\n", diff / h_out );
printf("GPU result is %f, CPU result is %f\n",h_out, good_out );
} /* end else */
/* clean up */
free(h_in);
checkCUDA( cudaFree( d_in ) );
checkCUDA( cudaFree( d_out ) );
checkCUDA( cudaFree( d_temp_storage ) );
checkCUDA( cudaDeviceReset() );
return 0;
} /* end main */
|
435c8ab079a07a1985819d188fce87430a6fee08.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "VecAdd_kernel.cu"
int main(int argc, char *argv[])
{
int N = 100;
unsigned int size;
float *d_A, *d_B, *d_C;
float *h_A, *h_B, *h_C;
/****************************
* Initialization of memory *
****************************/
size = N * sizeof(float);
h_A = (float *) malloc(size);
h_B = (float *) malloc(size);
h_C = (float *) malloc(size);
for (unsigned i=0; i<N; i++) {
h_A[i] = 1.0f;
h_B[i] = 2.0f;
h_C[i] = 0.0f;
}
// YOUR TASKS:
// - Allocate below device arrays d_A, d_B and d_C
// - Transfer array data from host to device arrays
// Insert code below this line.
/****************************
* GPU execution *
****************************/
// YOUR TASK:
// - Define below the number of threads per block and blocks per grid
// Update the two lines below this line.
int threadsPerBlock = 0;
int blocksPerGrid = 0;
hipLaunchKernelGGL(( VecAdd_kernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A,d_B,d_C,N);
hipDeviceSynchronize();
// YOUR TASK:
// - Transfer data results stored in d_C to host array
// Insert code below this line.
/****************************
* Verification *
****************************/
float sum = 0.0f;
for (unsigned i=0; i<N; i++) {
sum += h_C[i];
}
printf("Vector addition\n");
if (abs(sum-3.0f*(float) N)<=1e-10)
{
printf("PASSED!\n");
}
else
{
printf("FAILED!\n");
}
/****************************
* Cleaning memory *
****************************/
// YOUR TASK:
// - Free device memory for the allocated d_A, d_B and d_C arrays
// Insert code below this line.
free(h_A);
free(h_B);
free(h_C);
return 0;
} | 435c8ab079a07a1985819d188fce87430a6fee08.cu | #include <stdio.h>
#include "VecAdd_kernel.cu"
int main(int argc, char *argv[])
{
int N = 100;
unsigned int size;
float *d_A, *d_B, *d_C;
float *h_A, *h_B, *h_C;
/****************************
* Initialization of memory *
****************************/
size = N * sizeof(float);
h_A = (float *) malloc(size);
h_B = (float *) malloc(size);
h_C = (float *) malloc(size);
for (unsigned i=0; i<N; i++) {
h_A[i] = 1.0f;
h_B[i] = 2.0f;
h_C[i] = 0.0f;
}
// YOUR TASKS:
// - Allocate below device arrays d_A, d_B and d_C
// - Transfer array data from host to device arrays
// Insert code below this line.
/****************************
* GPU execution *
****************************/
// YOUR TASK:
// - Define below the number of threads per block and blocks per grid
// Update the two lines below this line.
int threadsPerBlock = 0;
int blocksPerGrid = 0;
VecAdd_kernel<<<blocksPerGrid, threadsPerBlock>>>(d_A,d_B,d_C,N);
cudaThreadSynchronize();
// YOUR TASK:
// - Transfer data results stored in d_C to host array
// Insert code below this line.
/****************************
* Verification *
****************************/
float sum = 0.0f;
for (unsigned i=0; i<N; i++) {
sum += h_C[i];
}
printf("Vector addition\n");
if (abs(sum-3.0f*(float) N)<=1e-10)
{
printf("PASSED!\n");
}
else
{
printf("FAILED!\n");
}
/****************************
* Cleaning memory *
****************************/
// YOUR TASK:
// - Free device memory for the allocated d_A, d_B and d_C arrays
// Insert code below this line.
free(h_A);
free(h_B);
free(h_C);
return 0;
} |
57567ede84cf531691fb6f8563003699b2247196.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include "utils.h"
#include "cuda_utils.h"
#include <vector>
#include <random>
#include <iostream>
#include <chrono>
#include <string>
#include <algorithm>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
using namespace std;
__global__ void linear_interp_kick(const double * beam_dt,
double * beam_dE,
const double * voltage_array,
const double * bin_centers,
const int n_slices,
const int n_macroparticles,
const double acc_kick)
{
const double inv_bin_width = (n_slices - 1) / (bin_centers[n_slices - 1] - bin_centers[0]);
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x;
i < n_macroparticles;
i += blockDim.x * gridDim.x)
{
const double a = beam_dt[i];
int ffbin = (int)((a - bin_centers[0]) * inv_bin_width);
double voltageKick;
if ((a < bin_centers[0]) || (a > bin_centers[n_slices - 1]))
voltageKick = 0.;
else
voltageKick = voltage_array[ffbin] +
(a - bin_centers[ffbin]) *
(voltage_array[ffbin + 1] - voltage_array[ffbin])
* inv_bin_width;
beam_dE[i] = beam_dE[i] + voltageKick + acc_kick;
}
}
int main(int argc, char const *argv[])
{
int n_turns = 50000;
int n_particles = 1000000;
int n_slices = 1000;
int blocks = 512;
int threads = 512;
if (argc > 1) n_turns = atoi(argv[1]);
if (argc > 2) n_particles = atoi(argv[2]);
if (argc > 3) n_slices = atoi(argv[3]);
if (argc > 4) blocks = atoi(argv[4]);
if (argc > 5) threads = atoi(argv[5]);
// setup random engine
default_random_engine gen;
uniform_real_distribution<double> d(0.0, 1.0);
// initialize variables
vector<double> dE, dt;
vector<double> voltage, edges, bin_centers;
double cut_left, cut_right, acc_kick;
string input = HOME "/input_files/distribution_10M_particles.txt";
read_distribution(input, n_particles, dt, dE);
voltage.resize(n_slices);
for (int i = 0; i < n_slices; ++i) {
voltage[i] = d(gen);
}
cut_left = 1.05 * (*min_element(dt.begin(), dt.end()));
cut_right = 0.95 * (*max_element(dt.begin(), dt.end()));
// cut_left = dt[rand() % n_slices];
// cut_right = dt[rand() % n_slices];
acc_kick = 10e6 * d(gen);
if (cut_left > cut_right) swap(cut_left, cut_right);
edges.resize(n_slices);
linspace(cut_left, cut_right, n_slices + 1, edges.data());
bin_centers.resize(n_slices);
for (int i = 0; i < n_slices; ++i) {
bin_centers[i] = (edges[i] + edges[i + 1]) / 2.;
}
thrust::device_vector<double> d_dE = dE;
thrust::device_vector<double> d_dt = dt;
thrust::device_vector<double> d_voltage = voltage;
thrust::device_vector<double> d_bin_centers = bin_centers;
double *d_dE_ptr = thrust::raw_pointer_cast(d_dE.data());
double *d_dt_ptr = thrust::raw_pointer_cast(d_dt.data());
double *d_bin_centers_ptr = thrust::raw_pointer_cast(d_bin_centers.data());
double *d_voltage_ptr = thrust::raw_pointer_cast(d_voltage.data());
auto start = chrono::high_resolution_clock::now();
// main loop
for (int i = 0; i < n_turns; ++i) {
hipLaunchKernelGGL(( linear_interp_kick) , dim3(blocks), dim3(threads), 0, 0,
d_dt_ptr,
d_dE_ptr,
d_voltage_ptr,
d_bin_centers_ptr,
n_slices, n_particles, acc_kick);
hipDeviceSynchronize();
}
auto end = chrono::high_resolution_clock::now();
thrust::copy(d_dE.begin(), d_dE.end(), dE.begin());
auto duration = chrono::duration_cast<chrono::milliseconds>(end - start).count();
printf("function\tcounter\taverage_value\tstd(%%)\tcalls\n");
printf("interp_kick_gpu_v7\ttime(ms)\t%d\t0\t1\n", duration);
printf("dE: %lf\n", accumulate(dE.begin(), dE.end(), 0.0) / n_particles);
// papiprof->stop_counters();
// papiprof->report_timing();
// report results
return 0;
} | 57567ede84cf531691fb6f8563003699b2247196.cu | #include <stdlib.h>
#include <stdio.h>
#include "utils.h"
#include "cuda_utils.h"
#include <vector>
#include <random>
#include <iostream>
#include <chrono>
#include <string>
#include <algorithm>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
using namespace std;
__global__ void linear_interp_kick(const double * beam_dt,
double * beam_dE,
const double * voltage_array,
const double * bin_centers,
const int n_slices,
const int n_macroparticles,
const double acc_kick)
{
const double inv_bin_width = (n_slices - 1) / (bin_centers[n_slices - 1] - bin_centers[0]);
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x;
i < n_macroparticles;
i += blockDim.x * gridDim.x)
{
const double a = beam_dt[i];
int ffbin = (int)((a - bin_centers[0]) * inv_bin_width);
double voltageKick;
if ((a < bin_centers[0]) || (a > bin_centers[n_slices - 1]))
voltageKick = 0.;
else
voltageKick = voltage_array[ffbin] +
(a - bin_centers[ffbin]) *
(voltage_array[ffbin + 1] - voltage_array[ffbin])
* inv_bin_width;
beam_dE[i] = beam_dE[i] + voltageKick + acc_kick;
}
}
int main(int argc, char const *argv[])
{
int n_turns = 50000;
int n_particles = 1000000;
int n_slices = 1000;
int blocks = 512;
int threads = 512;
if (argc > 1) n_turns = atoi(argv[1]);
if (argc > 2) n_particles = atoi(argv[2]);
if (argc > 3) n_slices = atoi(argv[3]);
if (argc > 4) blocks = atoi(argv[4]);
if (argc > 5) threads = atoi(argv[5]);
// setup random engine
default_random_engine gen;
uniform_real_distribution<double> d(0.0, 1.0);
// initialize variables
vector<double> dE, dt;
vector<double> voltage, edges, bin_centers;
double cut_left, cut_right, acc_kick;
string input = HOME "/input_files/distribution_10M_particles.txt";
read_distribution(input, n_particles, dt, dE);
voltage.resize(n_slices);
for (int i = 0; i < n_slices; ++i) {
voltage[i] = d(gen);
}
cut_left = 1.05 * (*min_element(dt.begin(), dt.end()));
cut_right = 0.95 * (*max_element(dt.begin(), dt.end()));
// cut_left = dt[rand() % n_slices];
// cut_right = dt[rand() % n_slices];
acc_kick = 10e6 * d(gen);
if (cut_left > cut_right) swap(cut_left, cut_right);
edges.resize(n_slices);
linspace(cut_left, cut_right, n_slices + 1, edges.data());
bin_centers.resize(n_slices);
for (int i = 0; i < n_slices; ++i) {
bin_centers[i] = (edges[i] + edges[i + 1]) / 2.;
}
thrust::device_vector<double> d_dE = dE;
thrust::device_vector<double> d_dt = dt;
thrust::device_vector<double> d_voltage = voltage;
thrust::device_vector<double> d_bin_centers = bin_centers;
double *d_dE_ptr = thrust::raw_pointer_cast(d_dE.data());
double *d_dt_ptr = thrust::raw_pointer_cast(d_dt.data());
double *d_bin_centers_ptr = thrust::raw_pointer_cast(d_bin_centers.data());
double *d_voltage_ptr = thrust::raw_pointer_cast(d_voltage.data());
auto start = chrono::high_resolution_clock::now();
// main loop
for (int i = 0; i < n_turns; ++i) {
linear_interp_kick <<< blocks, threads>>>(
d_dt_ptr,
d_dE_ptr,
d_voltage_ptr,
d_bin_centers_ptr,
n_slices, n_particles, acc_kick);
cudaThreadSynchronize();
}
auto end = chrono::high_resolution_clock::now();
thrust::copy(d_dE.begin(), d_dE.end(), dE.begin());
auto duration = chrono::duration_cast<chrono::milliseconds>(end - start).count();
printf("function\tcounter\taverage_value\tstd(%%)\tcalls\n");
printf("interp_kick_gpu_v7\ttime(ms)\t%d\t0\t1\n", duration);
printf("dE: %lf\n", accumulate(dE.begin(), dE.end(), 0.0) / n_particles);
// papiprof->stop_counters();
// papiprof->report_timing();
// report results
return 0;
} |
4de27e1900c2865a56d82f42444f4a8a148ce9ba.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************//**
* \file FSI.cu
* \author Christopher Minar ([email protected])
* \based on code by Anush Krishnan ([email protected])
* \brief Declaration of the class FSI.
*/
#include <solvers/NavierStokes/oscCylinder/kernels/structure.h>
#include "FSI.h"
#include <solvers/NavierStokes/NavierStokes/kernels/intermediatePressure.h>
#include <sys/stat.h>
/**
* \brief Constructor. Copies the database and information about the computational grid.
*
* \param pDB database that contains all the simulation parameters
* \param dInfo information related to the computational grid
*//*
FSI::FSI(parameterDB *pDB, domain *dInfo)
{
paramDB = pDB;
domInfo = dInfo;
}
void FSI::writeData()
{
parameterDB &db = *NavierStokesSolver::paramDB;
double dt = db["simulation"]["dt"].get<double>();
NavierStokesSolver::logger.startTimer("output");
NavierStokesSolver::writeCommon();
forceFile << timeStep*dt << '\t' << B.forceX << '\t' << B.forceY << std::endl;
logger.stopTimer("output");
}
void FSI::updateSolver()
{
NavierStokesSolver::B.calculateBoundingBoxes(*NavierStokesSolver::paramDB, *NavierStokesSolver::domInfo);
NavierStokesSolver::tagPoints();
NavierStokesSolver::generateLHS1();//is this needed?
NavierStokesSolver::generateLHS2();
NavierStokesSolver::logger.startTimer("Preconditioner");
if (NavierStokesSolver::iterationCount2 > 20)
{
//NavierStokesSolver::PC.update(NavierStokesSolver::LHS1,NavierStokesSolver::LHS2);
//NavierStokesSolver::PC1->update(NavierStokesSolver::LHS1);
//NavierStokesSolver::PC2->update(NavierStokesSolver::LHS2);
}
NavierStokesSolver::logger.stopTimer("Preconditioner");
}
void FSI::moveBody()
{
parameterDB &db = *NavierStokesSolver::paramDB;
NavierStokesSolver::calculateForce();
double *y_r = thrust::raw_pointer_cast( &(NavierStokesSolver::B.y[0]) ),
*vB_r= thrust::raw_pointer_cast( &(NavierStokesSolver::B.vB[0]) );
double Cy = NavierStokesSolver::B.forceY*2.0,
U = NavierStokesSolver::bc[XMINUS][0],
Mred= 2.0,
Ured= db["flow"]["Ured"].get<double>(),
dt = db["simulation"]["dt"].get<double>(),
totalPoints=NavierStokesSolver::B.totalPoints,
vold= NavierStokesSolver::B.centerVelocityV,
yold= NavierStokesSolver::B.midY,
vnew,
ynew;
double a = dt*M_PI*M_PI*4/(Ured*Ured),
b = dt*dt*2*M_PI*M_PI/(Ured*Ured);
//calc updated velocity
vnew = (vold - a*(yold+ dt/2*vold) + dt*Cy/2/Mred)/(1+b);
ynew = yold + dt/2*(vnew + vold);
NavierStokesSolver::B.centerVelocityV = vnew;
NavierStokesSolver::B.midY = ynew;
const int blocksize = 256;
dim3 grid( int( (totalPoints)/blocksize ) +1, 1);
dim3 block(blocksize, 1);
hipLaunchKernelGGL(( kernels::update_body_viv), dim3(grid),dim3(block), 0, 0, y_r, vB_r, ynew-yold, vnew, totalPoints);
}
void FSI::moveBodySC()
{
parameterDB &db = *NavierStokesSolver::paramDB;
NavierStokesSolver::calculateForce();
double *y_r = thrust::raw_pointer_cast( &(NavierStokesSolver::B.y[0]) ),
*vB_r= thrust::raw_pointer_cast( &(NavierStokesSolver::B.vB[0]) );
double Cy = NavierStokesSolver::B.forceY*2.0,
U = NavierStokesSolver::bc[XMINUS][0],
Mred= 2.0,
Ured= db["simulation"]["Ured"].get<double>(),
dt = db["simulation"]["dt"].get<double>(),
totalPoints=NavierStokesSolver::B.totalPoints,
vold= NavierStokesSolver::B.centerVelocityV0,
yold= NavierStokesSolver::B.midY0,
vnew,
ynew,
relaxation_coeff = 0.75;
double a = dt*M_PI*M_PI*4/(Ured*Ured),
b = dt*dt*2*M_PI*M_PI/(Ured*Ured);
//calc updated velocity
vnew = (vold - a*(yold+ dt/2*vold) + dt*Cy/2/Mred)/(1+b);
vnew = relaxation_coeff * vnew + (1-relaxation_coeff) * NavierStokesSolver::B.centerVelocityV; //relax velocity
ynew = yold + dt/2*(vnew + vold);
NavierStokesSolver::B.centerVelocityV = vnew;
NavierStokesSolver::B.midY = ynew;
const int blocksize = 256;
dim3 grid( int( (totalPoints)/blocksize ) +1, 1);
dim3 block(blocksize, 1);
hipLaunchKernelGGL(( kernels::update_body_viv), dim3(grid),dim3(block), 0, 0, y_r, vB_r, ynew-yold, vnew, totalPoints);
}
void FSI::stepTime()
{
LC();
}
void FSI::LC()
{
NavierStokesSolver::generateRHS1();
NavierStokesSolver::solveIntermediateVelocity();
NavierStokesSolver::generateRHS2();
NavierStokesSolver::solvePoisson();
NavierStokesSolver::velocityProjection();
//Release the body after a certain timestep
if (NavierStokesSolver::timeStep >= (*NavierStokesSolver::paramDB)["simulation"]["startStep"].get<int>())
{
std::cout<<"5.1\n\n";
moveBody();
updateSolver();
}
NavierStokesSolver::timeStep++;
if (NavierStokesSolver::timeStep > 140)
{
//arrayprint(rhs1,"rhs1","x");
//arrayprint(uhat,"uhat","x");
//arrayprint(uhat,"vhat","y");
//arrayprint(rhs2,"rhs2","p");
//arrayprint(pressure,"pressure","p");
//arrayprint(u,"u","x");
//arrayprint(u,"v","y");
}
}
void FSI::SC()
{
NavierStokesSolver::B.centerVelocityV0 = NavierStokesSolver::B.centerVelocityV;
NavierStokesSolver::B.midY0 = NavierStokesSolver::B.midY;
int count = 0;
do
{
NavierStokesSolver::B.forceYk[0] = NavierStokesSolver::B.forceY;
NavierStokesSolver::generateRHS1();
NavierStokesSolver::solveIntermediateVelocity();
NavierStokesSolver::generateRHS2();
NavierStokesSolver::solvePoisson();
NavierStokesSolver::velocityProjection();
//Release the body after a certain timestep
if (NavierStokesSolver::timeStep >= (*NavierStokesSolver::paramDB)["simulation"]["startStep"].get<int>())
{
moveBodySC();
updateSolver();
}
count += 1;
}
while (fabs(NavierStokesSolver::B.forceY- NavierStokesSolver::B.forceYk[0]) > 0.0001);
if (count > 1)
std::cout<<count<<"\n";
std::cout<<NavierStokesSolver::timeStep<<"\n";
NavierStokesSolver::timeStep++;
}*/
/*
void FSI::callTest()
{
int nx = NavierStokesSolver::domInfo ->nx,
ny = NavierStokesSolver::domInfo ->ny;
double dt = (*NavierStokesSolver::paramDB)["simulation"]["dt"].get<double>();
const int blocksize = 256;
dim3 grid( int( (nx*ny-0.5)/blocksize ) +1, 1);
dim3 block(blocksize, 1);
double *test_r = thrust::raw_pointer_cast( &(NavierStokesSolver::test[0]) ),
*uhat_r = thrust::raw_pointer_cast( &(NavierStokesSolver::uhat[0]) ),
*pressure_r = thrust::raw_pointer_cast( &(NavierStokesSolver::pressure[0]) ),
*dx_r = thrust::raw_pointer_cast( &(NavierStokesSolver::domInfo->dx[0]) );
kernels::testkernel<<<grid,block>>>(test_r, uhat_r, pressure_r, dx_r, dt, nx, ny);
}*/
| 4de27e1900c2865a56d82f42444f4a8a148ce9ba.cu | /***************************************************************************//**
* \file FSI.cu
* \author Christopher Minar ([email protected])
* \based on code by Anush Krishnan ([email protected])
* \brief Declaration of the class FSI.
*/
#include <solvers/NavierStokes/oscCylinder/kernels/structure.h>
#include "FSI.h"
#include <solvers/NavierStokes/NavierStokes/kernels/intermediatePressure.h>
#include <sys/stat.h>
/**
* \brief Constructor. Copies the database and information about the computational grid.
*
* \param pDB database that contains all the simulation parameters
* \param dInfo information related to the computational grid
*//*
FSI::FSI(parameterDB *pDB, domain *dInfo)
{
paramDB = pDB;
domInfo = dInfo;
}
void FSI::writeData()
{
parameterDB &db = *NavierStokesSolver::paramDB;
double dt = db["simulation"]["dt"].get<double>();
NavierStokesSolver::logger.startTimer("output");
NavierStokesSolver::writeCommon();
forceFile << timeStep*dt << '\t' << B.forceX << '\t' << B.forceY << std::endl;
logger.stopTimer("output");
}
void FSI::updateSolver()
{
NavierStokesSolver::B.calculateBoundingBoxes(*NavierStokesSolver::paramDB, *NavierStokesSolver::domInfo);
NavierStokesSolver::tagPoints();
NavierStokesSolver::generateLHS1();//is this needed?
NavierStokesSolver::generateLHS2();
NavierStokesSolver::logger.startTimer("Preconditioner");
if (NavierStokesSolver::iterationCount2 > 20)
{
//NavierStokesSolver::PC.update(NavierStokesSolver::LHS1,NavierStokesSolver::LHS2);
//NavierStokesSolver::PC1->update(NavierStokesSolver::LHS1);
//NavierStokesSolver::PC2->update(NavierStokesSolver::LHS2);
}
NavierStokesSolver::logger.stopTimer("Preconditioner");
}
void FSI::moveBody()
{
parameterDB &db = *NavierStokesSolver::paramDB;
NavierStokesSolver::calculateForce();
double *y_r = thrust::raw_pointer_cast( &(NavierStokesSolver::B.y[0]) ),
*vB_r= thrust::raw_pointer_cast( &(NavierStokesSolver::B.vB[0]) );
double Cy = NavierStokesSolver::B.forceY*2.0,
U = NavierStokesSolver::bc[XMINUS][0],
Mred= 2.0,
Ured= db["flow"]["Ured"].get<double>(),
dt = db["simulation"]["dt"].get<double>(),
totalPoints=NavierStokesSolver::B.totalPoints,
vold= NavierStokesSolver::B.centerVelocityV,
yold= NavierStokesSolver::B.midY,
vnew,
ynew;
double a = dt*M_PI*M_PI*4/(Ured*Ured),
b = dt*dt*2*M_PI*M_PI/(Ured*Ured);
//calc updated velocity
vnew = (vold - a*(yold+ dt/2*vold) + dt*Cy/2/Mred)/(1+b);
ynew = yold + dt/2*(vnew + vold);
NavierStokesSolver::B.centerVelocityV = vnew;
NavierStokesSolver::B.midY = ynew;
const int blocksize = 256;
dim3 grid( int( (totalPoints)/blocksize ) +1, 1);
dim3 block(blocksize, 1);
kernels::update_body_viv<<<grid,block>>>(y_r, vB_r, ynew-yold, vnew, totalPoints);
}
void FSI::moveBodySC()
{
parameterDB &db = *NavierStokesSolver::paramDB;
NavierStokesSolver::calculateForce();
double *y_r = thrust::raw_pointer_cast( &(NavierStokesSolver::B.y[0]) ),
*vB_r= thrust::raw_pointer_cast( &(NavierStokesSolver::B.vB[0]) );
double Cy = NavierStokesSolver::B.forceY*2.0,
U = NavierStokesSolver::bc[XMINUS][0],
Mred= 2.0,
Ured= db["simulation"]["Ured"].get<double>(),
dt = db["simulation"]["dt"].get<double>(),
totalPoints=NavierStokesSolver::B.totalPoints,
vold= NavierStokesSolver::B.centerVelocityV0,
yold= NavierStokesSolver::B.midY0,
vnew,
ynew,
relaxation_coeff = 0.75;
double a = dt*M_PI*M_PI*4/(Ured*Ured),
b = dt*dt*2*M_PI*M_PI/(Ured*Ured);
//calc updated velocity
vnew = (vold - a*(yold+ dt/2*vold) + dt*Cy/2/Mred)/(1+b);
vnew = relaxation_coeff * vnew + (1-relaxation_coeff) * NavierStokesSolver::B.centerVelocityV; //relax velocity
ynew = yold + dt/2*(vnew + vold);
NavierStokesSolver::B.centerVelocityV = vnew;
NavierStokesSolver::B.midY = ynew;
const int blocksize = 256;
dim3 grid( int( (totalPoints)/blocksize ) +1, 1);
dim3 block(blocksize, 1);
kernels::update_body_viv<<<grid,block>>>(y_r, vB_r, ynew-yold, vnew, totalPoints);
}
void FSI::stepTime()
{
LC();
}
void FSI::LC()
{
NavierStokesSolver::generateRHS1();
NavierStokesSolver::solveIntermediateVelocity();
NavierStokesSolver::generateRHS2();
NavierStokesSolver::solvePoisson();
NavierStokesSolver::velocityProjection();
//Release the body after a certain timestep
if (NavierStokesSolver::timeStep >= (*NavierStokesSolver::paramDB)["simulation"]["startStep"].get<int>())
{
std::cout<<"5.1\n\n";
moveBody();
updateSolver();
}
NavierStokesSolver::timeStep++;
if (NavierStokesSolver::timeStep > 140)
{
//arrayprint(rhs1,"rhs1","x");
//arrayprint(uhat,"uhat","x");
//arrayprint(uhat,"vhat","y");
//arrayprint(rhs2,"rhs2","p");
//arrayprint(pressure,"pressure","p");
//arrayprint(u,"u","x");
//arrayprint(u,"v","y");
}
}
void FSI::SC()
{
NavierStokesSolver::B.centerVelocityV0 = NavierStokesSolver::B.centerVelocityV;
NavierStokesSolver::B.midY0 = NavierStokesSolver::B.midY;
int count = 0;
do
{
NavierStokesSolver::B.forceYk[0] = NavierStokesSolver::B.forceY;
NavierStokesSolver::generateRHS1();
NavierStokesSolver::solveIntermediateVelocity();
NavierStokesSolver::generateRHS2();
NavierStokesSolver::solvePoisson();
NavierStokesSolver::velocityProjection();
//Release the body after a certain timestep
if (NavierStokesSolver::timeStep >= (*NavierStokesSolver::paramDB)["simulation"]["startStep"].get<int>())
{
moveBodySC();
updateSolver();
}
count += 1;
}
while (fabs(NavierStokesSolver::B.forceY- NavierStokesSolver::B.forceYk[0]) > 0.0001);
if (count > 1)
std::cout<<count<<"\n";
std::cout<<NavierStokesSolver::timeStep<<"\n";
NavierStokesSolver::timeStep++;
}*/
/*
void FSI::callTest()
{
int nx = NavierStokesSolver::domInfo ->nx,
ny = NavierStokesSolver::domInfo ->ny;
double dt = (*NavierStokesSolver::paramDB)["simulation"]["dt"].get<double>();
const int blocksize = 256;
dim3 grid( int( (nx*ny-0.5)/blocksize ) +1, 1);
dim3 block(blocksize, 1);
double *test_r = thrust::raw_pointer_cast( &(NavierStokesSolver::test[0]) ),
*uhat_r = thrust::raw_pointer_cast( &(NavierStokesSolver::uhat[0]) ),
*pressure_r = thrust::raw_pointer_cast( &(NavierStokesSolver::pressure[0]) ),
*dx_r = thrust::raw_pointer_cast( &(NavierStokesSolver::domInfo->dx[0]) );
kernels::testkernel<<<grid,block>>>(test_r, uhat_r, pressure_r, dx_r, dt, nx, ny);
}*/
|
f61c304d1936dd1b68b3838071463b6668127441.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define NUM_THREADS KEYS_NUM_THREADS
#define BLOCKS_PER_SM KEYS_BLOCKS_PER_SM
#define VALUES_PER_THREAD KEYS_VALUES_PER_THREAD
#define NUM_WARPS (NUM_THREADS / WARP_SIZE)
#define LOG_NUM_WARPS LOG_BASE_2(NUM_WARPS)
#define VALUES_PER_WARP (WARP_SIZE * VALUES_PER_THREAD)
#define NUM_VALUES (NUM_THREADS * VALUES_PER_THREAD)
////////////////////////////////////////////////////////////////////////////////
// UPSWEEP PASS. Find the sum of all values in the last segment in each block.
// When the first head flag in the block is encountered, write out the sum to
// that point and return. We only need to reduce the last segment to feed sums
// up to the reduction pass.
extern "C" __global__ __launch_bounds__(NUM_THREADS, BLOCKS_PER_SM)
void SegScanUpsweepKeys(const uint* valuesIn_global, const uint* keysIn_global,
uint* blockLast_global, int* headFlagPos_global,
const int2* rangePairs_global) {
uint tid = threadIdx.x;
uint lane = (WARP_SIZE - 1) & tid;
uint warp = tid / WARP_SIZE;
uint block = blockIdx.x;
int2 range = rangePairs_global[block];
const int UpsweepValues = 8;
const int NumValues = UpsweepValues * NUM_THREADS;
// Start at the last tile (NUM_VALUES before the end iterator). Because
// upsweep isn't executed for the last block, we don't have to worry about
// the ending edge case.
int current = range.y - NumValues;
uint threadSum = 0;
uint blockFlags = 0;
// Load the last key in the segment.
uint lastKey = keysIn_global[range.y - 1];
while(current >= range.x) {
uint keys[UpsweepValues];
uint x[UpsweepValues];
#pragma unroll
for(int i = 0; i < UpsweepValues; ++i) {
x[i] = valuesIn_global[current + tid + i * NUM_THREADS];
keys[i] = keysIn_global[current + tid + i * NUM_THREADS];
}
// Add up all the values with a key that matches lastKey. If this thread
// has any key that doesn't match lastKey, mark the prevSeg flag.
bool prevSeg = false;
#pragma unroll
for(int i = 0; i < UpsweepValues; ++i) {
if(keys[i] == lastKey) threadSum += x[i];
else prevSeg = true;
}
// Use ballot to see if any threads in this warp encountered an earlier
// segment.
uint warpFlags = __ballot(prevSeg);
__shared__ volatile uint warpShared[NUM_WARPS];
if(!lane) warpShared[warp] = warpFlags;
__syncthreads();
if(tid < NUM_WARPS) {
warpFlags = warpShared[tid];
warpFlags = __ballot(warpFlags);
warpShared[tid] = warpFlags;
}
__syncthreads();
uint blockFlags = warpShared[0];
if(blockFlags) break;
current -= NumValues;
}
// We've either hit the preceding segment or run out of values. Do a
// horizontal sum of the thread values and store to global memory.
uint total = (uint)Reduce<NUM_WARPS>(tid, (int)threadSum, 0);
if(0 == tid) {
blockLast_global[block] = total;
// Prepare the head flag.
uint headFlag = blockFlags;
if(!headFlag && range.x) {
// Load the preceding key.
uint precedingKey = keysIn_global[range.x - 1];
headFlag = precedingKey != lastKey;
}
headFlagPos_global[block] = headFlag;
}
}
////////////////////////////////////////////////////////////////////////////////
// DOWNSWEEP PASS.
extern "C" __global__ __launch_bounds__(NUM_THREADS, BLOCKS_PER_SM)
void SegScanDownsweepKeys(const uint* valuesIn_global,
const uint* keysIn_global, uint* valuesOut_global, const uint* start_global,
const int2* rangePairs_global, int count, int inclusive) {
uint tid = threadIdx.x;
uint lane = (WARP_SIZE - 1) & tid;
uint warp = tid / WARP_SIZE;
uint block = blockIdx.x;
uint index = VALUES_PER_WARP * warp + lane;
int2 range = rangePairs_global[block];
const int Size = NUM_WARPS * VALUES_PER_THREAD * (WARP_SIZE + 1);
__shared__ volatile uint shared[Size];
__shared__ volatile uint blockOffset_shared;
// Use a stride of 33 slots per warp per value to allow conflict-free
// transposes from strided to thread order.
volatile uint* warpShared = shared +
warp * VALUES_PER_THREAD * (WARP_SIZE + 1);
volatile uint* threadShared = warpShared + lane;
// Transpose values into thread order.
uint offset = VALUES_PER_THREAD * lane;
offset += offset / WARP_SIZE;
if(!tid) blockOffset_shared = start_global[block];
__shared__ volatile uint precedingKey_shared;
if(!tid)
precedingKey_shared = block ? keysIn_global[range.x - 1] : 0;
while(range.x < range.y) {
// Load values into packed.
uint x[VALUES_PER_THREAD];
uint keys[VALUES_PER_THREAD];
////////////////////////////////////////////////////////////////////////
// Load and transpose values.
#pragma unroll
for(int i = 0; i < VALUES_PER_THREAD; ++i) {
uint source = range.x + index + i * WARP_SIZE;
uint x = valuesIn_global[source];
threadShared[i * (WARP_SIZE + 1)] = x;
}
// Transpose into thread order
#pragma unroll
for(int i = 0; i < VALUES_PER_THREAD; ++i)
x[i] = warpShared[offset + i];
////////////////////////////////////////////////////////////////////////
// Load and transpose keys.
#pragma unroll
for(int i = 0; i < VALUES_PER_THREAD; ++i) {
uint source = range.x + index + i * WARP_SIZE;
uint key = keysIn_global[source];
threadShared[i * (WARP_SIZE + 1)] = key;
}
// Transpose into thread order
#pragma unroll
for(int i = 0; i < VALUES_PER_THREAD; ++i)
keys[i] = warpShared[offset + i];
__syncthreads();
// Store the last key for each thread in shared memory.
shared[1 + tid] = keys[VALUES_PER_THREAD - 1];
__syncthreads();
// Retrieve the last key for the preceding thread.
uint precedingKey = shared[tid];
if(!tid) {
precedingKey = precedingKey_shared;
precedingKey_shared = shared[NUM_THREADS];
}
////////////////////////////////////////////////////////////////////////
// Compare the adjacent keys in each thread to derive head flags.
uint flags[VALUES_PER_THREAD];
#pragma unroll
for(int i = 0; i < VALUES_PER_THREAD; ++i) {
if(i) flags[i] = keys[i - 1] != keys[i];
else flags[0] = keys[0] != precedingKey;
}
////////////////////////////////////////////////////////////////////////
// Run downsweep function on values and head flags.
SegScanDownsweep<NUM_WARPS, VALUES_PER_THREAD>(tid, lane, warp, x,
flags, warpShared, threadShared, inclusive, &blockOffset_shared);
// Transpose
#pragma unroll
for(int i = 0; i < VALUES_PER_THREAD; ++i)
warpShared[offset + i] = x[i];
#pragma unroll
for(int i = 0; i < VALUES_PER_THREAD; ++i) {
uint target = range.x + index + i * WARP_SIZE;
valuesOut_global[target] = threadShared[i * (WARP_SIZE + 1)];
}
range.x += NUM_VALUES;
}
}
#undef NUM_THREADS
#undef NUM_WARPS
#undef LOG_NUM_WARPS
#undef BLOCKS_PER_SM
#undef VALUES_PER_THREAD
#undef VALUES_PER_WARP
#undef NUM_VALUES
| f61c304d1936dd1b68b3838071463b6668127441.cu | #define NUM_THREADS KEYS_NUM_THREADS
#define BLOCKS_PER_SM KEYS_BLOCKS_PER_SM
#define VALUES_PER_THREAD KEYS_VALUES_PER_THREAD
#define NUM_WARPS (NUM_THREADS / WARP_SIZE)
#define LOG_NUM_WARPS LOG_BASE_2(NUM_WARPS)
#define VALUES_PER_WARP (WARP_SIZE * VALUES_PER_THREAD)
#define NUM_VALUES (NUM_THREADS * VALUES_PER_THREAD)
////////////////////////////////////////////////////////////////////////////////
// UPSWEEP PASS. Find the sum of all values in the last segment in each block.
// When the first head flag in the block is encountered, write out the sum to
// that point and return. We only need to reduce the last segment to feed sums
// up to the reduction pass.
extern "C" __global__ __launch_bounds__(NUM_THREADS, BLOCKS_PER_SM)
void SegScanUpsweepKeys(const uint* valuesIn_global, const uint* keysIn_global,
uint* blockLast_global, int* headFlagPos_global,
const int2* rangePairs_global) {
uint tid = threadIdx.x;
uint lane = (WARP_SIZE - 1) & tid;
uint warp = tid / WARP_SIZE;
uint block = blockIdx.x;
int2 range = rangePairs_global[block];
const int UpsweepValues = 8;
const int NumValues = UpsweepValues * NUM_THREADS;
// Start at the last tile (NUM_VALUES before the end iterator). Because
// upsweep isn't executed for the last block, we don't have to worry about
// the ending edge case.
int current = range.y - NumValues;
uint threadSum = 0;
uint blockFlags = 0;
// Load the last key in the segment.
uint lastKey = keysIn_global[range.y - 1];
while(current >= range.x) {
uint keys[UpsweepValues];
uint x[UpsweepValues];
#pragma unroll
for(int i = 0; i < UpsweepValues; ++i) {
x[i] = valuesIn_global[current + tid + i * NUM_THREADS];
keys[i] = keysIn_global[current + tid + i * NUM_THREADS];
}
// Add up all the values with a key that matches lastKey. If this thread
// has any key that doesn't match lastKey, mark the prevSeg flag.
bool prevSeg = false;
#pragma unroll
for(int i = 0; i < UpsweepValues; ++i) {
if(keys[i] == lastKey) threadSum += x[i];
else prevSeg = true;
}
// Use ballot to see if any threads in this warp encountered an earlier
// segment.
uint warpFlags = __ballot(prevSeg);
__shared__ volatile uint warpShared[NUM_WARPS];
if(!lane) warpShared[warp] = warpFlags;
__syncthreads();
if(tid < NUM_WARPS) {
warpFlags = warpShared[tid];
warpFlags = __ballot(warpFlags);
warpShared[tid] = warpFlags;
}
__syncthreads();
uint blockFlags = warpShared[0];
if(blockFlags) break;
current -= NumValues;
}
// We've either hit the preceding segment or run out of values. Do a
// horizontal sum of the thread values and store to global memory.
uint total = (uint)Reduce<NUM_WARPS>(tid, (int)threadSum, 0);
if(0 == tid) {
blockLast_global[block] = total;
// Prepare the head flag.
uint headFlag = blockFlags;
if(!headFlag && range.x) {
// Load the preceding key.
uint precedingKey = keysIn_global[range.x - 1];
headFlag = precedingKey != lastKey;
}
headFlagPos_global[block] = headFlag;
}
}
////////////////////////////////////////////////////////////////////////////////
// DOWNSWEEP PASS.
extern "C" __global__ __launch_bounds__(NUM_THREADS, BLOCKS_PER_SM)
void SegScanDownsweepKeys(const uint* valuesIn_global,
const uint* keysIn_global, uint* valuesOut_global, const uint* start_global,
const int2* rangePairs_global, int count, int inclusive) {
uint tid = threadIdx.x;
uint lane = (WARP_SIZE - 1) & tid;
uint warp = tid / WARP_SIZE;
uint block = blockIdx.x;
uint index = VALUES_PER_WARP * warp + lane;
int2 range = rangePairs_global[block];
const int Size = NUM_WARPS * VALUES_PER_THREAD * (WARP_SIZE + 1);
__shared__ volatile uint shared[Size];
__shared__ volatile uint blockOffset_shared;
// Use a stride of 33 slots per warp per value to allow conflict-free
// transposes from strided to thread order.
volatile uint* warpShared = shared +
warp * VALUES_PER_THREAD * (WARP_SIZE + 1);
volatile uint* threadShared = warpShared + lane;
// Transpose values into thread order.
uint offset = VALUES_PER_THREAD * lane;
offset += offset / WARP_SIZE;
if(!tid) blockOffset_shared = start_global[block];
__shared__ volatile uint precedingKey_shared;
if(!tid)
precedingKey_shared = block ? keysIn_global[range.x - 1] : 0;
while(range.x < range.y) {
// Load values into packed.
uint x[VALUES_PER_THREAD];
uint keys[VALUES_PER_THREAD];
////////////////////////////////////////////////////////////////////////
// Load and transpose values.
#pragma unroll
for(int i = 0; i < VALUES_PER_THREAD; ++i) {
uint source = range.x + index + i * WARP_SIZE;
uint x = valuesIn_global[source];
threadShared[i * (WARP_SIZE + 1)] = x;
}
// Transpose into thread order
#pragma unroll
for(int i = 0; i < VALUES_PER_THREAD; ++i)
x[i] = warpShared[offset + i];
////////////////////////////////////////////////////////////////////////
// Load and transpose keys.
#pragma unroll
for(int i = 0; i < VALUES_PER_THREAD; ++i) {
uint source = range.x + index + i * WARP_SIZE;
uint key = keysIn_global[source];
threadShared[i * (WARP_SIZE + 1)] = key;
}
// Transpose into thread order
#pragma unroll
for(int i = 0; i < VALUES_PER_THREAD; ++i)
keys[i] = warpShared[offset + i];
__syncthreads();
// Store the last key for each thread in shared memory.
shared[1 + tid] = keys[VALUES_PER_THREAD - 1];
__syncthreads();
// Retrieve the last key for the preceding thread.
uint precedingKey = shared[tid];
if(!tid) {
precedingKey = precedingKey_shared;
precedingKey_shared = shared[NUM_THREADS];
}
////////////////////////////////////////////////////////////////////////
// Compare the adjacent keys in each thread to derive head flags.
uint flags[VALUES_PER_THREAD];
#pragma unroll
for(int i = 0; i < VALUES_PER_THREAD; ++i) {
if(i) flags[i] = keys[i - 1] != keys[i];
else flags[0] = keys[0] != precedingKey;
}
////////////////////////////////////////////////////////////////////////
// Run downsweep function on values and head flags.
SegScanDownsweep<NUM_WARPS, VALUES_PER_THREAD>(tid, lane, warp, x,
flags, warpShared, threadShared, inclusive, &blockOffset_shared);
// Transpose
#pragma unroll
for(int i = 0; i < VALUES_PER_THREAD; ++i)
warpShared[offset + i] = x[i];
#pragma unroll
for(int i = 0; i < VALUES_PER_THREAD; ++i) {
uint target = range.x + index + i * WARP_SIZE;
valuesOut_global[target] = threadShared[i * (WARP_SIZE + 1)];
}
range.x += NUM_VALUES;
}
}
#undef NUM_THREADS
#undef NUM_WARPS
#undef LOG_NUM_WARPS
#undef BLOCKS_PER_SM
#undef VALUES_PER_THREAD
#undef VALUES_PER_WARP
#undef NUM_VALUES
|
91325ebdd1dc1c26c05672bc355f02e8ac25fa2a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THZCTensorMath.h"
#include "THZCGeneral.h"
#include "THZCGeneral.cuh"
#include "THZCApply.cuh"
#include "THH/THHReduceApplyUtils.cuh"
// ccx toCcx(cx val) {
// return ccx(crealf(val), cimagf(val));
// }
// Compute the offsets into the given tensors for a linear index. For the 't2'
// tensor, dimension 'dim' is skipped. The tensors are assumed to have the same
// size (with the exception of 't2' in dimension 'dim').
// This version uses a static number of dimensions.
template <typename IndexType, int Dims>
struct IndexToScatterGatherOffsets {
static __device__ void compute(
IndexType linearId, const int dim,
const TensorInfo<IndexType>& index, IndexType* indexOffset,
const ZTensorInfo<IndexType>& t1, IndexType* t1Offset,
const ZTensorInfo<IndexType>& t2, IndexType* t2Offset) {
for (int d = Dims - 1; d >= 0; d--) {
IndexType curDimIndex = linearId % index.sizes[d];
*indexOffset += curDimIndex * index.strides[d];
*t1Offset += curDimIndex * t1.strides[d];
if (d != dim) {
*t2Offset += curDimIndex * t2.strides[d];
}
linearId /= index.sizes[d];
}
}
static __device__ void compute(
IndexType linearId, const int dim,
const TensorInfo<IndexType>& index, IndexType* indexOffset,
const ZTensorInfo<IndexType>& t2, IndexType* t2Offset) {
for (int d = Dims - 1; d >= 0; d--) {
IndexType curDimIndex = linearId % index.sizes[d];
*indexOffset += curDimIndex * index.strides[d];
if (d != dim) {
*t2Offset += curDimIndex * t2.strides[d];
}
linearId /= index.sizes[d];
}
}
};
// Same as above but using a dynamic number of dimensions.
template <typename IndexType>
struct IndexToScatterGatherOffsets<IndexType, -1> {
static __device__ void compute(
IndexType linearId, const int dim,
const TensorInfo<IndexType>& index, IndexType* indexOffset,
const ZTensorInfo<IndexType>& t1, IndexType* t1Offset,
const ZTensorInfo<IndexType>& t2, IndexType* t2Offset) {
for (int d = index.dims - 1; d >= 0; d--) {
IndexType curDimIndex = linearId % index.sizes[d];
*indexOffset += curDimIndex * index.strides[d];
*t1Offset += curDimIndex * t1.strides[d];
if (d != dim) {
*t2Offset += curDimIndex * t2.strides[d];
}
linearId /= index.sizes[d];
}
}
static __device__ void compute(
IndexType linearId, const int dim,
const TensorInfo<IndexType>& index, IndexType* indexOffset,
const ZTensorInfo<IndexType>& t2, IndexType* t2Offset) {
for (int d = index.dims - 1; d >= 0; d--) {
IndexType curDimIndex = linearId % index.sizes[d];
*indexOffset += curDimIndex * index.strides[d];
if (d != dim) {
*t2Offset += curDimIndex * t2.strides[d];
}
linearId /= index.sizes[d];
}
}
};
template <typename IndexType, int Dims>
__global__ void THZCudaTensor_gatherKernel(
ZTensorInfo<IndexType> tensor,
ZTensorInfo<IndexType> src,
TensorInfo<IndexType> index,
const int dim,
const IndexType totalElements) {
for (IndexType linearId = blockIdx.x * blockDim.x + threadIdx.x;
linearId < totalElements;
linearId += gridDim.x * blockDim.x) {
IndexType tensorOffset = 0;
IndexType srcOffset = 0;
IndexType indexOffset = 0;
IndexToScatterGatherOffsets<IndexType, Dims>::compute(linearId, dim,
index, &indexOffset,
tensor, &tensorOffset,
src, &srcOffset);
IndexType indexValue = (IndexType)index.data[indexOffset] - 1;
srcOffset += indexValue * src.strides[dim];
tensor.data[tensorOffset] = src.data[srcOffset];
}
}
#define RUN(TYPE, DIMS) \
hipLaunchKernelGGL(( THZCudaTensor_gatherKernel<TYPE, DIMS>) \
, dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), \
tensorinfo, srcInfo, indexInfo, dim, (TYPE)totalElements);
void THZCudaTensor_gather(THCState* state, THZCudaTensor *tensor, THZCudaTensor *src, int dim, THCudaTensor *index) {
THAssert(THZCudaTensor_checkGPU(state, 3, tensor, src, index));
THArgCheck(THZCudaTensor_nDimension(state, src) == THZCudaTensor_nDimension(state, tensor), 2,
"Input tensor must have same dimensions as output tensor");
THArgCheck(dim >= 0 && dim < THZCudaTensor_nDimension(state, tensor), 3,
"Index dimension is out of bounds");
THArgCheck(THCudaTensor_nDimension(state, index) == THZCudaTensor_nDimension(state, src), 4,
"Index tensor must have same dimensions as input tensor");
THArgCheck(THZCudaTensor_isSameSizeAsZF(state, tensor, index), 4,
"Index tensor must have the same size as output tensor.");
for (int d = 0; d < THZCudaTensor_nDimension(state, tensor); d++) {
if (d != dim) {
THArgCheck(THZCudaTensor_size(state, tensor, d) == THZCudaTensor_size(state, src, d), 2,
"Input tensor must have same size as output tensor apart from the specified dimension");
}
}
// if (THZCudaTensor_nDimension(state, tensor) > MAX_CUTORCH_DIMS) {
// return THArgCheck(false, 1, CUTORCH_DIM_WARNING);
// }
const long totalElements = THCudaTensor_nElement(state, index);
const dim3 block = getApplyBlock();
dim3 grid;
// if (!getApplyGrid(state, totalElements, grid)) {
// return THArgCheck(false, 1, CUTORCH_DIM_WARNING);
// }
THZCudaTensor* oldTensor = NULL;
if (THZC_overlappingIndices(state, tensor)) {
oldTensor = tensor;
tensor = THZCudaTensor_newContiguous(state, tensor);
}
if (THZC_canUse32BitIndexMath(state, tensor) &&
THZC_canUse32BitIndexMath(state, src) &&
THC_canUse32BitIndexMath(state, index)) {
ZTensorInfo<unsigned int> tensorinfo(state, tensor, NoCollapseDims);
ZTensorInfo<unsigned int> srcInfo(state, src, NoCollapseDims);
TensorInfo<unsigned int> indexInfo(state, index, NoCollapseDims);
// Specialize for a small number of dimensions.
switch (indexInfo.dims) {
case 1:
RUN(unsigned int, 1);
break;
case 2:
RUN(unsigned int, 2);
break;
case 3:
RUN(unsigned int, 3);
break;
default:
RUN(unsigned int, -1);
break;
}
} else {
ZTensorInfo<unsigned long> tensorinfo(state, tensor, NoCollapseDims);
ZTensorInfo<unsigned long> srcInfo(state, src, NoCollapseDims);
TensorInfo<unsigned long> indexInfo(state, index, NoCollapseDims);
RUN(unsigned long, -1)
}
if (oldTensor) {
THZCudaTensor_copyIgnoringOverlaps(state, oldTensor, tensor);
THZCudaTensor_free(state, tensor);
tensor = oldTensor;
}
}
#undef RUN
template <typename IndexType, int Dims>
__global__ void THZCudaTensor_scatterKernel(
ZTensorInfo<IndexType> tensor,
ZTensorInfo<IndexType> src,
TensorInfo<IndexType> index,
const int dim,
const IndexType totalElements) {
for (IndexType linearId = blockIdx.x * blockDim.x + threadIdx.x;
linearId < totalElements;
linearId += gridDim.x * blockDim.x) {
IndexType tensorOffset = 0;
IndexType srcOffset = 0;
IndexType indexOffset = 0;
IndexToScatterGatherOffsets<IndexType, Dims>::compute(linearId, dim,
index, &indexOffset,
src, &srcOffset,
tensor, &tensorOffset);
IndexType indexValue = (IndexType)index.data[indexOffset] - 1;
tensorOffset += indexValue * tensor.strides[dim];
tensor.data[tensorOffset] = src.data[srcOffset];
}
}
#define RUN(TYPE, DIMS) \
hipLaunchKernelGGL(( THZCudaTensor_scatterKernel<TYPE, DIMS>) \
, dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), \
tensorinfo, srcInfo, indexInfo, dim, (TYPE)totalElements);
void THZCudaTensor_scatter(THCState* state, THZCudaTensor *tensor, int dim, THCudaTensor *index, THZCudaTensor *src) {
THAssert(THZCudaTensor_checkGPU(state, 3, tensor, src));
THAssert(THCudaTensor_checkGPU(state, 1, index));
THArgCheck(dim >= 0 && dim < THZCudaTensor_nDimension(state, tensor), 2,
"Index dimension is out of bounds");
THArgCheck(THCudaTensor_nDimension(state, index) == THZCudaTensor_nDimension(state, src), 3,
"Index tensor must have same dimensions as input tensor");
THArgCheck(THZCudaTensor_nDimension(state, src) == THZCudaTensor_nDimension(state, tensor), 4,
"Input tensor must have same dimensions as output tensor");
THArgCheck(THZCudaTensor_isSameSizeAsZF(state, src, index), 3,
"Index tensor must have the same size as input tensor.");
for (int d = 0; d < THZCudaTensor_nDimension(state, tensor); d++) {
if (d != dim) {
THArgCheck(THZCudaTensor_size(state, tensor, d) == THZCudaTensor_size(state, src, d), 4,
"Input tensor must have same size as output tensor apart from the specified dimension");
}
}
// if (THZCudaTensor_nDimension(state, tensor) > MAX_CUTORCH_DIMS) {
// return THArgCheck(false, 1, CUTORCH_DIM_WARNING);
// }
const long totalElements = THCudaTensor_nElement(state, index);
const dim3 block = getApplyBlock();
dim3 grid;
// if (!getApplyGrid(state, totalElements, grid)) {
// return THArgCheck(false, 1, CUTORCH_DIM_WARNING);
// }
THZCudaTensor* oldTensor = NULL;
if (THZC_overlappingIndices(state, tensor)) {
oldTensor = tensor;
tensor = THZCudaTensor_newContiguous(state, tensor);
}
if (THZC_canUse32BitIndexMath(state, tensor) &&
THZC_canUse32BitIndexMath(state, src) &&
THC_canUse32BitIndexMath(state, index)) {
ZTensorInfo<unsigned int> tensorinfo(state, tensor, NoCollapseDims);
ZTensorInfo<unsigned int> srcInfo(state, src, NoCollapseDims);
TensorInfo<unsigned int> indexInfo(state, index, NoCollapseDims);
// Specialize for a small number of dimensions.
switch (indexInfo.dims) {
case 1:
RUN(unsigned int, 1);
break;
case 2:
RUN(unsigned int, 2);
break;
case 3:
RUN(unsigned int, 3);
break;
default:
RUN(unsigned int, -1);
break;
}
} else {
ZTensorInfo<unsigned long> tensorinfo(state, tensor, NoCollapseDims);
ZTensorInfo<unsigned long> srcInfo(state, src, NoCollapseDims);
TensorInfo<unsigned long> indexInfo(state, index, NoCollapseDims);
RUN(unsigned long, -1)
}
if (oldTensor) {
THZCudaTensor_copyIgnoringOverlaps(state, oldTensor, tensor);
THZCudaTensor_free(state, tensor);
tensor = oldTensor;
}
}
#undef RUN
template <typename IndexType, int Dims>
__global__ void THZCudaTensor_scatterFillKernel(
ZTensorInfo<IndexType> tensor,
TensorInfo<IndexType> index,
ccx value,
const int dim,
const IndexType totalElements) {
for (IndexType linearId = blockIdx.x * blockDim.x + threadIdx.x;
linearId < totalElements;
linearId += gridDim.x * blockDim.x) {
IndexType tensorOffset = 0;
IndexType indexOffset = 0;
IndexToScatterGatherOffsets<IndexType, Dims>::compute(linearId, dim,
index, &indexOffset,
tensor, &tensorOffset);
IndexType indexValue = (IndexType)index.data[indexOffset] - 1;
tensorOffset += indexValue * tensor.strides[dim];
tensor.data[tensorOffset] = value;
}
}
#define RUN(TYPE, DIMS) \
hipLaunchKernelGGL(( THZCudaTensor_scatterFillKernel<TYPE, DIMS>) \
, dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), \
tensorinfo, indexInfo, toCcx(value), dim, (TYPE)totalElements);
void THZCudaTensor_scatterFill(THCState* state, THZCudaTensor *tensor, int dim, THCudaTensor *index, cx value) {
THAssert(THZCudaTensor_checkGPU(state, 2, tensor));
THAssert(THCudaTensor_checkGPU(state, 2, index));
THArgCheck(dim >= 0 && dim < THZCudaTensor_nDimension(state, tensor), 2,
"Index dimension is out of bounds");
THArgCheck(THCudaTensor_nDimension(state, index) == THZCudaTensor_nDimension(state, tensor), 3,
"Index tensor must have same dimensions as output tensor");
for (int d = 0; d < THZCudaTensor_nDimension(state, tensor); d++) {
if (d != dim) {
THArgCheck(THZCudaTensor_size(state, tensor, d) == THCudaTensor_size(state, index, d), 4,
"Index tensor must have same size as output tensor apart from the specified dimension");
}
}
// if (THZCudaTensor_nDimension(state, tensor) > MAX_CUTORCH_DIMS) {
// return THArgCheck(false, 1, CUTORCH_DIM_WARNING);
// }
const long totalElements = THCudaTensor_nElement(state, index);
const dim3 block = getApplyBlock();
dim3 grid;
// if (!getApplyGrid(state, totalElements, grid)) {
// return THArgCheck(false, 1, CUTORCH_DIM_WARNING);
// }
THZCudaTensor* oldTensor = NULL;
if (THZC_overlappingIndices(state, tensor)) {
oldTensor = tensor;
tensor = THZCudaTensor_newContiguous(state, tensor);
}
if (THZC_canUse32BitIndexMath(state, tensor) &&
THC_canUse32BitIndexMath(state, index)) {
ZTensorInfo<unsigned int> tensorinfo(state, tensor, NoCollapseDims);
TensorInfo<unsigned int> indexInfo(state, index, NoCollapseDims);
// Specialize for a small number of dimensions.
switch (indexInfo.dims) {
case 1:
RUN(unsigned int, 1);
break;
case 2:
RUN(unsigned int, 2);
break;
case 3:
RUN(unsigned int, 3);
break;
default:
RUN(unsigned int, -1);
break;
}
} else {
ZTensorInfo<unsigned long> tensorinfo(state, tensor, NoCollapseDims);
TensorInfo<unsigned long> indexInfo(state, index, NoCollapseDims);
RUN(unsigned long, -1);
}
if (oldTensor) {
THZCudaTensor_copyIgnoringOverlaps(state, oldTensor, tensor);
THZCudaTensor_free(state, tensor);
tensor = oldTensor;
}
}
#undef RUN
| 91325ebdd1dc1c26c05672bc355f02e8ac25fa2a.cu | #include "THZCTensorMath.h"
#include "THZCGeneral.h"
#include "THZCGeneral.cuh"
#include "THZCApply.cuh"
#include "THC/THCReduceApplyUtils.cuh"
// ccx toCcx(cx val) {
// return ccx(crealf(val), cimagf(val));
// }
// Compute the offsets into the given tensors for a linear index. For the 't2'
// tensor, dimension 'dim' is skipped. The tensors are assumed to have the same
// size (with the exception of 't2' in dimension 'dim').
// This version uses a static number of dimensions.
template <typename IndexType, int Dims>
struct IndexToScatterGatherOffsets {
static __device__ void compute(
IndexType linearId, const int dim,
const TensorInfo<IndexType>& index, IndexType* indexOffset,
const ZTensorInfo<IndexType>& t1, IndexType* t1Offset,
const ZTensorInfo<IndexType>& t2, IndexType* t2Offset) {
for (int d = Dims - 1; d >= 0; d--) {
IndexType curDimIndex = linearId % index.sizes[d];
*indexOffset += curDimIndex * index.strides[d];
*t1Offset += curDimIndex * t1.strides[d];
if (d != dim) {
*t2Offset += curDimIndex * t2.strides[d];
}
linearId /= index.sizes[d];
}
}
static __device__ void compute(
IndexType linearId, const int dim,
const TensorInfo<IndexType>& index, IndexType* indexOffset,
const ZTensorInfo<IndexType>& t2, IndexType* t2Offset) {
for (int d = Dims - 1; d >= 0; d--) {
IndexType curDimIndex = linearId % index.sizes[d];
*indexOffset += curDimIndex * index.strides[d];
if (d != dim) {
*t2Offset += curDimIndex * t2.strides[d];
}
linearId /= index.sizes[d];
}
}
};
// Same as above but using a dynamic number of dimensions.
template <typename IndexType>
struct IndexToScatterGatherOffsets<IndexType, -1> {
static __device__ void compute(
IndexType linearId, const int dim,
const TensorInfo<IndexType>& index, IndexType* indexOffset,
const ZTensorInfo<IndexType>& t1, IndexType* t1Offset,
const ZTensorInfo<IndexType>& t2, IndexType* t2Offset) {
for (int d = index.dims - 1; d >= 0; d--) {
IndexType curDimIndex = linearId % index.sizes[d];
*indexOffset += curDimIndex * index.strides[d];
*t1Offset += curDimIndex * t1.strides[d];
if (d != dim) {
*t2Offset += curDimIndex * t2.strides[d];
}
linearId /= index.sizes[d];
}
}
static __device__ void compute(
IndexType linearId, const int dim,
const TensorInfo<IndexType>& index, IndexType* indexOffset,
const ZTensorInfo<IndexType>& t2, IndexType* t2Offset) {
for (int d = index.dims - 1; d >= 0; d--) {
IndexType curDimIndex = linearId % index.sizes[d];
*indexOffset += curDimIndex * index.strides[d];
if (d != dim) {
*t2Offset += curDimIndex * t2.strides[d];
}
linearId /= index.sizes[d];
}
}
};
template <typename IndexType, int Dims>
__global__ void THZCudaTensor_gatherKernel(
ZTensorInfo<IndexType> tensor,
ZTensorInfo<IndexType> src,
TensorInfo<IndexType> index,
const int dim,
const IndexType totalElements) {
for (IndexType linearId = blockIdx.x * blockDim.x + threadIdx.x;
linearId < totalElements;
linearId += gridDim.x * blockDim.x) {
IndexType tensorOffset = 0;
IndexType srcOffset = 0;
IndexType indexOffset = 0;
IndexToScatterGatherOffsets<IndexType, Dims>::compute(linearId, dim,
index, &indexOffset,
tensor, &tensorOffset,
src, &srcOffset);
IndexType indexValue = (IndexType)index.data[indexOffset] - 1;
srcOffset += indexValue * src.strides[dim];
tensor.data[tensorOffset] = src.data[srcOffset];
}
}
#define RUN(TYPE, DIMS) \
THZCudaTensor_gatherKernel<TYPE, DIMS> \
<<<grid, block, 0, THCState_getCurrentStream(state)>>>( \
tensorinfo, srcInfo, indexInfo, dim, (TYPE)totalElements);
void THZCudaTensor_gather(THCState* state, THZCudaTensor *tensor, THZCudaTensor *src, int dim, THCudaTensor *index) {
THAssert(THZCudaTensor_checkGPU(state, 3, tensor, src, index));
THArgCheck(THZCudaTensor_nDimension(state, src) == THZCudaTensor_nDimension(state, tensor), 2,
"Input tensor must have same dimensions as output tensor");
THArgCheck(dim >= 0 && dim < THZCudaTensor_nDimension(state, tensor), 3,
"Index dimension is out of bounds");
THArgCheck(THCudaTensor_nDimension(state, index) == THZCudaTensor_nDimension(state, src), 4,
"Index tensor must have same dimensions as input tensor");
THArgCheck(THZCudaTensor_isSameSizeAsZF(state, tensor, index), 4,
"Index tensor must have the same size as output tensor.");
for (int d = 0; d < THZCudaTensor_nDimension(state, tensor); d++) {
if (d != dim) {
THArgCheck(THZCudaTensor_size(state, tensor, d) == THZCudaTensor_size(state, src, d), 2,
"Input tensor must have same size as output tensor apart from the specified dimension");
}
}
// if (THZCudaTensor_nDimension(state, tensor) > MAX_CUTORCH_DIMS) {
// return THArgCheck(false, 1, CUTORCH_DIM_WARNING);
// }
const long totalElements = THCudaTensor_nElement(state, index);
const dim3 block = getApplyBlock();
dim3 grid;
// if (!getApplyGrid(state, totalElements, grid)) {
// return THArgCheck(false, 1, CUTORCH_DIM_WARNING);
// }
THZCudaTensor* oldTensor = NULL;
if (THZC_overlappingIndices(state, tensor)) {
oldTensor = tensor;
tensor = THZCudaTensor_newContiguous(state, tensor);
}
if (THZC_canUse32BitIndexMath(state, tensor) &&
THZC_canUse32BitIndexMath(state, src) &&
THC_canUse32BitIndexMath(state, index)) {
ZTensorInfo<unsigned int> tensorinfo(state, tensor, NoCollapseDims);
ZTensorInfo<unsigned int> srcInfo(state, src, NoCollapseDims);
TensorInfo<unsigned int> indexInfo(state, index, NoCollapseDims);
// Specialize for a small number of dimensions.
switch (indexInfo.dims) {
case 1:
RUN(unsigned int, 1);
break;
case 2:
RUN(unsigned int, 2);
break;
case 3:
RUN(unsigned int, 3);
break;
default:
RUN(unsigned int, -1);
break;
}
} else {
ZTensorInfo<unsigned long> tensorinfo(state, tensor, NoCollapseDims);
ZTensorInfo<unsigned long> srcInfo(state, src, NoCollapseDims);
TensorInfo<unsigned long> indexInfo(state, index, NoCollapseDims);
RUN(unsigned long, -1)
}
if (oldTensor) {
THZCudaTensor_copyIgnoringOverlaps(state, oldTensor, tensor);
THZCudaTensor_free(state, tensor);
tensor = oldTensor;
}
}
#undef RUN
template <typename IndexType, int Dims>
__global__ void THZCudaTensor_scatterKernel(
ZTensorInfo<IndexType> tensor,
ZTensorInfo<IndexType> src,
TensorInfo<IndexType> index,
const int dim,
const IndexType totalElements) {
for (IndexType linearId = blockIdx.x * blockDim.x + threadIdx.x;
linearId < totalElements;
linearId += gridDim.x * blockDim.x) {
IndexType tensorOffset = 0;
IndexType srcOffset = 0;
IndexType indexOffset = 0;
IndexToScatterGatherOffsets<IndexType, Dims>::compute(linearId, dim,
index, &indexOffset,
src, &srcOffset,
tensor, &tensorOffset);
IndexType indexValue = (IndexType)index.data[indexOffset] - 1;
tensorOffset += indexValue * tensor.strides[dim];
tensor.data[tensorOffset] = src.data[srcOffset];
}
}
#define RUN(TYPE, DIMS) \
THZCudaTensor_scatterKernel<TYPE, DIMS> \
<<<grid, block, 0, THCState_getCurrentStream(state)>>>( \
tensorinfo, srcInfo, indexInfo, dim, (TYPE)totalElements);
void THZCudaTensor_scatter(THCState* state, THZCudaTensor *tensor, int dim, THCudaTensor *index, THZCudaTensor *src) {
THAssert(THZCudaTensor_checkGPU(state, 3, tensor, src));
THAssert(THCudaTensor_checkGPU(state, 1, index));
THArgCheck(dim >= 0 && dim < THZCudaTensor_nDimension(state, tensor), 2,
"Index dimension is out of bounds");
THArgCheck(THCudaTensor_nDimension(state, index) == THZCudaTensor_nDimension(state, src), 3,
"Index tensor must have same dimensions as input tensor");
THArgCheck(THZCudaTensor_nDimension(state, src) == THZCudaTensor_nDimension(state, tensor), 4,
"Input tensor must have same dimensions as output tensor");
THArgCheck(THZCudaTensor_isSameSizeAsZF(state, src, index), 3,
"Index tensor must have the same size as input tensor.");
for (int d = 0; d < THZCudaTensor_nDimension(state, tensor); d++) {
if (d != dim) {
THArgCheck(THZCudaTensor_size(state, tensor, d) == THZCudaTensor_size(state, src, d), 4,
"Input tensor must have same size as output tensor apart from the specified dimension");
}
}
// if (THZCudaTensor_nDimension(state, tensor) > MAX_CUTORCH_DIMS) {
// return THArgCheck(false, 1, CUTORCH_DIM_WARNING);
// }
const long totalElements = THCudaTensor_nElement(state, index);
const dim3 block = getApplyBlock();
dim3 grid;
// if (!getApplyGrid(state, totalElements, grid)) {
// return THArgCheck(false, 1, CUTORCH_DIM_WARNING);
// }
THZCudaTensor* oldTensor = NULL;
if (THZC_overlappingIndices(state, tensor)) {
oldTensor = tensor;
tensor = THZCudaTensor_newContiguous(state, tensor);
}
if (THZC_canUse32BitIndexMath(state, tensor) &&
THZC_canUse32BitIndexMath(state, src) &&
THC_canUse32BitIndexMath(state, index)) {
ZTensorInfo<unsigned int> tensorinfo(state, tensor, NoCollapseDims);
ZTensorInfo<unsigned int> srcInfo(state, src, NoCollapseDims);
TensorInfo<unsigned int> indexInfo(state, index, NoCollapseDims);
// Specialize for a small number of dimensions.
switch (indexInfo.dims) {
case 1:
RUN(unsigned int, 1);
break;
case 2:
RUN(unsigned int, 2);
break;
case 3:
RUN(unsigned int, 3);
break;
default:
RUN(unsigned int, -1);
break;
}
} else {
ZTensorInfo<unsigned long> tensorinfo(state, tensor, NoCollapseDims);
ZTensorInfo<unsigned long> srcInfo(state, src, NoCollapseDims);
TensorInfo<unsigned long> indexInfo(state, index, NoCollapseDims);
RUN(unsigned long, -1)
}
if (oldTensor) {
THZCudaTensor_copyIgnoringOverlaps(state, oldTensor, tensor);
THZCudaTensor_free(state, tensor);
tensor = oldTensor;
}
}
#undef RUN
template <typename IndexType, int Dims>
__global__ void THZCudaTensor_scatterFillKernel(
ZTensorInfo<IndexType> tensor,
TensorInfo<IndexType> index,
ccx value,
const int dim,
const IndexType totalElements) {
for (IndexType linearId = blockIdx.x * blockDim.x + threadIdx.x;
linearId < totalElements;
linearId += gridDim.x * blockDim.x) {
IndexType tensorOffset = 0;
IndexType indexOffset = 0;
IndexToScatterGatherOffsets<IndexType, Dims>::compute(linearId, dim,
index, &indexOffset,
tensor, &tensorOffset);
IndexType indexValue = (IndexType)index.data[indexOffset] - 1;
tensorOffset += indexValue * tensor.strides[dim];
tensor.data[tensorOffset] = value;
}
}
#define RUN(TYPE, DIMS) \
THZCudaTensor_scatterFillKernel<TYPE, DIMS> \
<<<grid, block, 0, THCState_getCurrentStream(state)>>>( \
tensorinfo, indexInfo, toCcx(value), dim, (TYPE)totalElements);
void THZCudaTensor_scatterFill(THCState* state, THZCudaTensor *tensor, int dim, THCudaTensor *index, cx value) {
THAssert(THZCudaTensor_checkGPU(state, 2, tensor));
THAssert(THCudaTensor_checkGPU(state, 2, index));
THArgCheck(dim >= 0 && dim < THZCudaTensor_nDimension(state, tensor), 2,
"Index dimension is out of bounds");
THArgCheck(THCudaTensor_nDimension(state, index) == THZCudaTensor_nDimension(state, tensor), 3,
"Index tensor must have same dimensions as output tensor");
for (int d = 0; d < THZCudaTensor_nDimension(state, tensor); d++) {
if (d != dim) {
THArgCheck(THZCudaTensor_size(state, tensor, d) == THCudaTensor_size(state, index, d), 4,
"Index tensor must have same size as output tensor apart from the specified dimension");
}
}
// if (THZCudaTensor_nDimension(state, tensor) > MAX_CUTORCH_DIMS) {
// return THArgCheck(false, 1, CUTORCH_DIM_WARNING);
// }
const long totalElements = THCudaTensor_nElement(state, index);
const dim3 block = getApplyBlock();
dim3 grid;
// if (!getApplyGrid(state, totalElements, grid)) {
// return THArgCheck(false, 1, CUTORCH_DIM_WARNING);
// }
THZCudaTensor* oldTensor = NULL;
if (THZC_overlappingIndices(state, tensor)) {
oldTensor = tensor;
tensor = THZCudaTensor_newContiguous(state, tensor);
}
if (THZC_canUse32BitIndexMath(state, tensor) &&
THC_canUse32BitIndexMath(state, index)) {
ZTensorInfo<unsigned int> tensorinfo(state, tensor, NoCollapseDims);
TensorInfo<unsigned int> indexInfo(state, index, NoCollapseDims);
// Specialize for a small number of dimensions.
switch (indexInfo.dims) {
case 1:
RUN(unsigned int, 1);
break;
case 2:
RUN(unsigned int, 2);
break;
case 3:
RUN(unsigned int, 3);
break;
default:
RUN(unsigned int, -1);
break;
}
} else {
ZTensorInfo<unsigned long> tensorinfo(state, tensor, NoCollapseDims);
TensorInfo<unsigned long> indexInfo(state, index, NoCollapseDims);
RUN(unsigned long, -1);
}
if (oldTensor) {
THZCudaTensor_copyIgnoringOverlaps(state, oldTensor, tensor);
THZCudaTensor_free(state, tensor);
tensor = oldTensor;
}
}
#undef RUN
|
d50019e8f71f910cb598d5072ea228af8e40a5d6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//pass
//--gridDim=16 --blockDim=64
#include "common.h"
__global__ void
d_boxfilter_x_global(float *id, float *od, int w, int h, int r)
{
__requires(w == 1024);
__requires(h == 1024);
__requires(r == 14);
unsigned int y = blockIdx.x*blockDim.x + threadIdx.x;
d_boxfilter_x(&id[y * w], &od[y * w], w, h, r);
}
| d50019e8f71f910cb598d5072ea228af8e40a5d6.cu | //pass
//--gridDim=16 --blockDim=64
#include "common.h"
__global__ void
d_boxfilter_x_global(float *id, float *od, int w, int h, int r)
{
__requires(w == 1024);
__requires(h == 1024);
__requires(r == 14);
unsigned int y = blockIdx.x*blockDim.x + threadIdx.x;
d_boxfilter_x(&id[y * w], &od[y * w], w, h, r);
}
|
7bb74e0cf7ed949beaa643cd68fba4a01ed0d594.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenseshashernless required by applicable law or agreed
to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <functional>
#include "paddle/framework/lod_tensor.h"
#include "paddle/framework/op_registry.h"
#include "paddle/operators/nccl/nccl_gpu_common.h"
namespace paddle {
namespace operators {
using framework::Tensor;
using platform::Communicator;
using framework::LoDTensor;
template <typename Type>
class NCCLTypeWrapper;
template <>
class NCCLTypeWrapper<float> {
public:
static const ncclDataType_t type = ncclFloat;
};
template <>
class NCCLTypeWrapper<double> {
public:
static const ncclDataType_t type = ncclDouble;
};
template <typename T>
class NCCLAllReduceKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
"This kernel only runs on GPU device.");
auto ins = ctx.MultiInput<LoDTensor>("X");
auto outs = ctx.MultiOutput<LoDTensor>("Out");
std::string reduction = ctx.Attr<std::string>("reduction");
ncclRedOp_t reduction_op_ = ncclSum;
if (reduction == "ncclMin") {
reduction_op_ = ncclMin;
} else if (reduction == "ncclMax") {
reduction_op_ = ncclMax;
} else if (reduction == "ncclSum") {
reduction_op_ = ncclSum;
} else if (reduction == "ncclProd") {
reduction_op_ = ncclProd;
} else {
PADDLE_THROW("Invalid reduction. default ncclSum.");
}
auto* comm = ctx.Input<Communicator>("Communicator");
auto stream = reinterpret_cast<const platform::CUDADeviceContext&>(
ctx.device_context())
.stream();
// device id
int gpu_id = boost::get<platform::GPUPlace>(ctx.GetPlace()).GetDeviceId();
int idx = comm->GetCommId(gpu_id);
for (size_t i = 0; i < ins.size(); ++i) {
VLOG(1) << "gpu : "
<< " invoke allreduce. send " << ins[i]->numel() << " recv "
<< outs[i]->numel();
PADDLE_ENFORCE(platform::dynload::ncclAllReduce(
ins[i]->data<T>(), outs[i]->mutable_data<T>(ctx.GetPlace()),
outs[i]->numel(), NCCLTypeWrapper<T>::type, reduction_op_,
comm->comms_[idx], stream));
PADDLE_ENFORCE(hipStreamSynchronize(stream));
VLOG(1) << "gpu : "
<< " finished allreduce. send " << ins[i]->numel() << " recv "
<< outs[i]->numel();
}
}
};
template <typename T>
class NCCLReduceKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
"This kernel only runs on GPU device.");
auto ins = ctx.MultiInput<LoDTensor>("X"); // x0, x1, x2
auto outs = ctx.MultiOutput<LoDTensor>("Out");
std::string reduction = ctx.Attr<std::string>("reduction");
ncclRedOp_t reduction_op_ = ncclSum;
if (reduction == "ncclMin") {
reduction_op_ = ncclMin;
} else if (reduction == "ncclMax") {
reduction_op_ = ncclMax;
} else if (reduction == "ncclSum") {
reduction_op_ = ncclSum;
} else if (reduction == "ncclProd") {
reduction_op_ = ncclProd;
} else {
PADDLE_THROW("Invalid reduction. default ncclSum.");
}
int root = ctx.Attr<int>("root");
auto* comm = ctx.Input<Communicator>("Communicator");
auto stream = reinterpret_cast<const platform::CUDADeviceContext&>(
ctx.device_context())
.stream();
// device id
int gpu_id = boost::get<platform::GPUPlace>(ctx.GetPlace()).GetDeviceId();
int idx = comm->GetCommId(gpu_id);
auto ins_names = ctx.Inputs("X");
std::hash<std::string> hasher;
for (size_t i = 0; i < ins.size(); ++i) {
if (root == platform::kInvalidGPUId) {
root = hasher(ins_names[i]) % comm->comms_.size();
}
T* recvbuffer = nullptr;
if (root == gpu_id) {
recvbuffer = outs[i]->mutable_data<T>(ctx.GetPlace());
}
VLOG(1) << "gpu : " << gpu_id << " invoke reduce. send "
<< ins[i]->numel() << " recv " << outs[i]->numel();
PADDLE_ENFORCE(platform::dynload::ncclReduce(
ins[i]->data<T>(), recvbuffer, ins[i]->numel(),
NCCLTypeWrapper<T>::type, reduction_op_, root, comm->comms_[idx],
stream));
PADDLE_ENFORCE(hipStreamSynchronize(stream));
VLOG(1) << "gpu : " << gpu_id << " finished reduce. send "
<< ins[i]->numel() << " recv " << outs[i]->numel();
}
}
};
template <typename T>
class NCCLBcastKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
"This kernel only runs on GPU device.");
int root = ctx.Attr<int>("root");
auto* comm = ctx.Input<Communicator>("Communicator");
auto stream = reinterpret_cast<const platform::CUDADeviceContext&>(
ctx.device_context())
.stream();
// device id
int gpu_id = boost::get<platform::GPUPlace>(ctx.GetPlace()).GetDeviceId();
int idx = comm->GetCommId(gpu_id);
if (idx == root) {
auto ins = ctx.MultiInput<LoDTensor>("X");
for (size_t i = 0; i < ins.size(); ++i) {
VLOG(1) << "gpu : " << gpu_id << " invoke Bcast. send "
<< ins[i]->numel();
VLOG(1) << " before ncclBcast";
PADDLE_ENFORCE(platform::dynload::ncclBcast(
(void*)ins[i]->data<T>(), ins[i]->numel(), NCCLTypeWrapper<T>::type,
root, comm->comms_[idx], stream));
VLOG(1) << " after ncclBcast";
PADDLE_ENFORCE(hipStreamSynchronize(stream));
VLOG(1) << "gpu : " << gpu_id << " finished Bcast.";
}
} else {
auto outs = ctx.MultiOutput<LoDTensor>("Out");
for (size_t i = 0; i < outs.size(); ++i) {
VLOG(1) << "gpu : " << gpu_id << " invoke Bcast. recv buffer "
<< framework::product(outs[i]->dims());
PADDLE_ENFORCE(platform::dynload::ncclBcast(
outs[i]->mutable_data<T>(ctx.GetPlace()), outs[i]->numel(),
NCCLTypeWrapper<T>::type, root, comm->comms_[idx], stream));
PADDLE_ENFORCE(hipStreamSynchronize(stream));
VLOG(1) << "gpu : " << gpu_id << " finished Bcast. recv "
<< outs[i]->numel();
}
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(ncclAllReduce, ops::NCCLAllReduceKernel<float>);
REGISTER_OP_GPU_KERNEL(ncclBcast, ops::NCCLBcastKernel<float>);
REGISTER_OP_GPU_KERNEL(ncclReduce, ops::NCCLReduceKernel<float>);
| 7bb74e0cf7ed949beaa643cd68fba4a01ed0d594.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenseshashernless required by applicable law or agreed
to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <functional>
#include "paddle/framework/lod_tensor.h"
#include "paddle/framework/op_registry.h"
#include "paddle/operators/nccl/nccl_gpu_common.h"
namespace paddle {
namespace operators {
using framework::Tensor;
using platform::Communicator;
using framework::LoDTensor;
template <typename Type>
class NCCLTypeWrapper;
template <>
class NCCLTypeWrapper<float> {
public:
static const ncclDataType_t type = ncclFloat;
};
template <>
class NCCLTypeWrapper<double> {
public:
static const ncclDataType_t type = ncclDouble;
};
template <typename T>
class NCCLAllReduceKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
"This kernel only runs on GPU device.");
auto ins = ctx.MultiInput<LoDTensor>("X");
auto outs = ctx.MultiOutput<LoDTensor>("Out");
std::string reduction = ctx.Attr<std::string>("reduction");
ncclRedOp_t reduction_op_ = ncclSum;
if (reduction == "ncclMin") {
reduction_op_ = ncclMin;
} else if (reduction == "ncclMax") {
reduction_op_ = ncclMax;
} else if (reduction == "ncclSum") {
reduction_op_ = ncclSum;
} else if (reduction == "ncclProd") {
reduction_op_ = ncclProd;
} else {
PADDLE_THROW("Invalid reduction. default ncclSum.");
}
auto* comm = ctx.Input<Communicator>("Communicator");
auto stream = reinterpret_cast<const platform::CUDADeviceContext&>(
ctx.device_context())
.stream();
// device id
int gpu_id = boost::get<platform::GPUPlace>(ctx.GetPlace()).GetDeviceId();
int idx = comm->GetCommId(gpu_id);
for (size_t i = 0; i < ins.size(); ++i) {
VLOG(1) << "gpu : "
<< " invoke allreduce. send " << ins[i]->numel() << " recv "
<< outs[i]->numel();
PADDLE_ENFORCE(platform::dynload::ncclAllReduce(
ins[i]->data<T>(), outs[i]->mutable_data<T>(ctx.GetPlace()),
outs[i]->numel(), NCCLTypeWrapper<T>::type, reduction_op_,
comm->comms_[idx], stream));
PADDLE_ENFORCE(cudaStreamSynchronize(stream));
VLOG(1) << "gpu : "
<< " finished allreduce. send " << ins[i]->numel() << " recv "
<< outs[i]->numel();
}
}
};
template <typename T>
class NCCLReduceKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
"This kernel only runs on GPU device.");
auto ins = ctx.MultiInput<LoDTensor>("X"); // x0, x1, x2
auto outs = ctx.MultiOutput<LoDTensor>("Out");
std::string reduction = ctx.Attr<std::string>("reduction");
ncclRedOp_t reduction_op_ = ncclSum;
if (reduction == "ncclMin") {
reduction_op_ = ncclMin;
} else if (reduction == "ncclMax") {
reduction_op_ = ncclMax;
} else if (reduction == "ncclSum") {
reduction_op_ = ncclSum;
} else if (reduction == "ncclProd") {
reduction_op_ = ncclProd;
} else {
PADDLE_THROW("Invalid reduction. default ncclSum.");
}
int root = ctx.Attr<int>("root");
auto* comm = ctx.Input<Communicator>("Communicator");
auto stream = reinterpret_cast<const platform::CUDADeviceContext&>(
ctx.device_context())
.stream();
// device id
int gpu_id = boost::get<platform::GPUPlace>(ctx.GetPlace()).GetDeviceId();
int idx = comm->GetCommId(gpu_id);
auto ins_names = ctx.Inputs("X");
std::hash<std::string> hasher;
for (size_t i = 0; i < ins.size(); ++i) {
if (root == platform::kInvalidGPUId) {
root = hasher(ins_names[i]) % comm->comms_.size();
}
T* recvbuffer = nullptr;
if (root == gpu_id) {
recvbuffer = outs[i]->mutable_data<T>(ctx.GetPlace());
}
VLOG(1) << "gpu : " << gpu_id << " invoke reduce. send "
<< ins[i]->numel() << " recv " << outs[i]->numel();
PADDLE_ENFORCE(platform::dynload::ncclReduce(
ins[i]->data<T>(), recvbuffer, ins[i]->numel(),
NCCLTypeWrapper<T>::type, reduction_op_, root, comm->comms_[idx],
stream));
PADDLE_ENFORCE(cudaStreamSynchronize(stream));
VLOG(1) << "gpu : " << gpu_id << " finished reduce. send "
<< ins[i]->numel() << " recv " << outs[i]->numel();
}
}
};
template <typename T>
class NCCLBcastKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
"This kernel only runs on GPU device.");
int root = ctx.Attr<int>("root");
auto* comm = ctx.Input<Communicator>("Communicator");
auto stream = reinterpret_cast<const platform::CUDADeviceContext&>(
ctx.device_context())
.stream();
// device id
int gpu_id = boost::get<platform::GPUPlace>(ctx.GetPlace()).GetDeviceId();
int idx = comm->GetCommId(gpu_id);
if (idx == root) {
auto ins = ctx.MultiInput<LoDTensor>("X");
for (size_t i = 0; i < ins.size(); ++i) {
VLOG(1) << "gpu : " << gpu_id << " invoke Bcast. send "
<< ins[i]->numel();
VLOG(1) << " before ncclBcast";
PADDLE_ENFORCE(platform::dynload::ncclBcast(
(void*)ins[i]->data<T>(), ins[i]->numel(), NCCLTypeWrapper<T>::type,
root, comm->comms_[idx], stream));
VLOG(1) << " after ncclBcast";
PADDLE_ENFORCE(cudaStreamSynchronize(stream));
VLOG(1) << "gpu : " << gpu_id << " finished Bcast.";
}
} else {
auto outs = ctx.MultiOutput<LoDTensor>("Out");
for (size_t i = 0; i < outs.size(); ++i) {
VLOG(1) << "gpu : " << gpu_id << " invoke Bcast. recv buffer "
<< framework::product(outs[i]->dims());
PADDLE_ENFORCE(platform::dynload::ncclBcast(
outs[i]->mutable_data<T>(ctx.GetPlace()), outs[i]->numel(),
NCCLTypeWrapper<T>::type, root, comm->comms_[idx], stream));
PADDLE_ENFORCE(cudaStreamSynchronize(stream));
VLOG(1) << "gpu : " << gpu_id << " finished Bcast. recv "
<< outs[i]->numel();
}
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(ncclAllReduce, ops::NCCLAllReduceKernel<float>);
REGISTER_OP_GPU_KERNEL(ncclBcast, ops::NCCLBcastKernel<float>);
REGISTER_OP_GPU_KERNEL(ncclReduce, ops::NCCLReduceKernel<float>);
|
3d76d418a4f2ace1b615972cf91dee059896c2e5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C"{
#include<stdio.h>
#include "qsort_cuda.h"
}
extern "C"{
const int N = 7;
const int blocksize = 7;
__global__
void hello(char *a, int *b)
{
a[threadIdx.x] += b[threadIdx.x];
}
void MyTestFunct()
{
char a[N] = "Hello ";
int b[N] = {15, 10, 6, 0, -11, 1, 0};
char *ad;
int *bd;
const int csize = N*sizeof(char);
const int isize = N*sizeof(int);
printf("%s", a);
hipMalloc( (void**)&ad, csize );
hipMalloc( (void**)&bd, isize );
hipMemcpy( ad, a, csize, hipMemcpyHostToDevice );
hipMemcpy( bd, b, isize, hipMemcpyHostToDevice );
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
hipLaunchKernelGGL(( hello), dim3(dimGrid), dim3(dimBlock), 0, 0, ad, bd);
hipMemcpy( a, ad, csize, hipMemcpyDeviceToHost );
hipFree( ad );
printf("%s\n", a);
}
}
| 3d76d418a4f2ace1b615972cf91dee059896c2e5.cu | extern "C"{
#include<stdio.h>
#include "qsort_cuda.h"
}
extern "C"{
const int N = 7;
const int blocksize = 7;
__global__
void hello(char *a, int *b)
{
a[threadIdx.x] += b[threadIdx.x];
}
void MyTestFunct()
{
char a[N] = "Hello ";
int b[N] = {15, 10, 6, 0, -11, 1, 0};
char *ad;
int *bd;
const int csize = N*sizeof(char);
const int isize = N*sizeof(int);
printf("%s", a);
cudaMalloc( (void**)&ad, csize );
cudaMalloc( (void**)&bd, isize );
cudaMemcpy( ad, a, csize, cudaMemcpyHostToDevice );
cudaMemcpy( bd, b, isize, cudaMemcpyHostToDevice );
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
hello<<<dimGrid, dimBlock>>>(ad, bd);
cudaMemcpy( a, ad, csize, cudaMemcpyDeviceToHost );
cudaFree( ad );
printf("%s\n", a);
}
}
|
8b97e867f59fecbde3288e1d00a11d6cc0a7114e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or bpied warranties, including, but not limited to, the bpied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "internal_shared.hpp"
#include "opencv2/gpu/device/saturate_cast.hpp"
#include "opencv2/gpu/device/limits.hpp"
namespace cv { namespace gpu { namespace device
{
namespace stereobp
{
///////////////////////////////////////////////////////////////
/////////////////////// load constants ////////////////////////
///////////////////////////////////////////////////////////////
__constant__ int cndisp;
__constant__ float cmax_data_term;
__constant__ float cdata_weight;
__constant__ float cmax_disc_term;
__constant__ float cdisc_single_jump;
void load_constants(int ndisp, float max_data_term, float data_weight, float max_disc_term, float disc_single_jump)
{
cudaSafeCall( hipMemcpyToSymbol(cndisp, &ndisp, sizeof(int )) );
cudaSafeCall( hipMemcpyToSymbol(cmax_data_term, &max_data_term, sizeof(float)) );
cudaSafeCall( hipMemcpyToSymbol(cdata_weight, &data_weight, sizeof(float)) );
cudaSafeCall( hipMemcpyToSymbol(cmax_disc_term, &max_disc_term, sizeof(float)) );
cudaSafeCall( hipMemcpyToSymbol(cdisc_single_jump, &disc_single_jump, sizeof(float)) );
}
///////////////////////////////////////////////////////////////
////////////////////////// comp data //////////////////////////
///////////////////////////////////////////////////////////////
template <int cn> struct PixDiff;
template <> struct PixDiff<1>
{
__device__ __forceinline__ PixDiff(const uchar* ls)
{
l = *ls;
}
__device__ __forceinline__ float operator()(const uchar* rs) const
{
return ::abs((int)l - *rs);
}
uchar l;
};
template <> struct PixDiff<3>
{
__device__ __forceinline__ PixDiff(const uchar* ls)
{
l = *((uchar3*)ls);
}
__device__ __forceinline__ float operator()(const uchar* rs) const
{
const float tr = 0.299f;
const float tg = 0.587f;
const float tb = 0.114f;
float val = tb * ::abs((int)l.x - rs[0]);
val += tg * ::abs((int)l.y - rs[1]);
val += tr * ::abs((int)l.z - rs[2]);
return val;
}
uchar3 l;
};
template <> struct PixDiff<4>
{
__device__ __forceinline__ PixDiff(const uchar* ls)
{
l = *((uchar4*)ls);
}
__device__ __forceinline__ float operator()(const uchar* rs) const
{
const float tr = 0.299f;
const float tg = 0.587f;
const float tb = 0.114f;
uchar4 r = *((uchar4*)rs);
float val = tb * ::abs((int)l.x - r.x);
val += tg * ::abs((int)l.y - r.y);
val += tr * ::abs((int)l.z - r.z);
return val;
}
uchar4 l;
};
template <int cn, typename D>
__global__ void comp_data(const PtrStepSzb left, const PtrStepb right, PtrStep<D> data)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y > 0 && y < left.rows - 1 && x > 0 && x < left.cols - 1)
{
const uchar* ls = left.ptr(y) + x * cn;
const PixDiff<cn> pixDiff(ls);
const uchar* rs = right.ptr(y) + x * cn;
D* ds = data.ptr(y) + x;
const size_t disp_step = data.step * left.rows / sizeof(D);
for (int disp = 0; disp < cndisp; disp++)
{
if (x - disp >= 1)
{
float val = pixDiff(rs - disp * cn);
ds[disp * disp_step] = saturate_cast<D>(fmin(cdata_weight * val, cdata_weight * cmax_data_term));
}
else
{
ds[disp * disp_step] = saturate_cast<D>(cdata_weight * cmax_data_term);
}
}
}
}
template<typename T, typename D>
void comp_data_gpu(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& data, hipStream_t stream);
template <> void comp_data_gpu<uchar, short>(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& data, hipStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(left.cols, threads.x);
grid.y = divUp(left.rows, threads.y);
hipLaunchKernelGGL(( comp_data<1, short>), dim3(grid), dim3(threads), 0, stream, left, right, (PtrStepSz<short>)data);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template <> void comp_data_gpu<uchar, float>(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& data, hipStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(left.cols, threads.x);
grid.y = divUp(left.rows, threads.y);
hipLaunchKernelGGL(( comp_data<1, float>), dim3(grid), dim3(threads), 0, stream, left, right, (PtrStepSz<float>)data);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template <> void comp_data_gpu<uchar3, short>(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& data, hipStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(left.cols, threads.x);
grid.y = divUp(left.rows, threads.y);
hipLaunchKernelGGL(( comp_data<3, short>), dim3(grid), dim3(threads), 0, stream, left, right, (PtrStepSz<short>)data);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template <> void comp_data_gpu<uchar3, float>(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& data, hipStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(left.cols, threads.x);
grid.y = divUp(left.rows, threads.y);
hipLaunchKernelGGL(( comp_data<3, float>), dim3(grid), dim3(threads), 0, stream, left, right, (PtrStepSz<float>)data);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template <> void comp_data_gpu<uchar4, short>(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& data, hipStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(left.cols, threads.x);
grid.y = divUp(left.rows, threads.y);
hipLaunchKernelGGL(( comp_data<4, short>), dim3(grid), dim3(threads), 0, stream, left, right, (PtrStepSz<short>)data);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template <> void comp_data_gpu<uchar4, float>(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& data, hipStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(left.cols, threads.x);
grid.y = divUp(left.rows, threads.y);
hipLaunchKernelGGL(( comp_data<4, float>), dim3(grid), dim3(threads), 0, stream, left, right, (PtrStepSz<float>)data);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
///////////////////////////////////////////////////////////////
//////////////////////// data step down ///////////////////////
///////////////////////////////////////////////////////////////
template <typename T>
__global__ void data_step_down(int dst_cols, int dst_rows, int src_rows, const PtrStep<T> src, PtrStep<T> dst)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < dst_cols && y < dst_rows)
{
for (int d = 0; d < cndisp; ++d)
{
float dst_reg = src.ptr(d * src_rows + (2*y+0))[(2*x+0)];
dst_reg += src.ptr(d * src_rows + (2*y+1))[(2*x+0)];
dst_reg += src.ptr(d * src_rows + (2*y+0))[(2*x+1)];
dst_reg += src.ptr(d * src_rows + (2*y+1))[(2*x+1)];
dst.ptr(d * dst_rows + y)[x] = saturate_cast<T>(dst_reg);
}
}
}
template<typename T>
void data_step_down_gpu(int dst_cols, int dst_rows, int src_rows, const PtrStepSzb& src, const PtrStepSzb& dst, hipStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(dst_cols, threads.x);
grid.y = divUp(dst_rows, threads.y);
hipLaunchKernelGGL(( data_step_down<T>), dim3(grid), dim3(threads), 0, stream, dst_cols, dst_rows, src_rows, (PtrStepSz<T>)src, (PtrStepSz<T>)dst);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template void data_step_down_gpu<short>(int dst_cols, int dst_rows, int src_rows, const PtrStepSzb& src, const PtrStepSzb& dst, hipStream_t stream);
template void data_step_down_gpu<float>(int dst_cols, int dst_rows, int src_rows, const PtrStepSzb& src, const PtrStepSzb& dst, hipStream_t stream);
///////////////////////////////////////////////////////////////
/////////////////// level up messages ////////////////////////
///////////////////////////////////////////////////////////////
template <typename T>
__global__ void level_up_message(int dst_cols, int dst_rows, int src_rows, const PtrStep<T> src, PtrStep<T> dst)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < dst_cols && y < dst_rows)
{
const size_t dst_disp_step = dst.step * dst_rows / sizeof(T);
const size_t src_disp_step = src.step * src_rows / sizeof(T);
T* dstr = dst.ptr(y ) + x;
const T* srcr = src.ptr(y/2) + x/2;
for (int d = 0; d < cndisp; ++d)
dstr[d * dst_disp_step] = srcr[d * src_disp_step];
}
}
template <typename T>
void level_up_messages_gpu(int dst_idx, int dst_cols, int dst_rows, int src_rows, PtrStepSzb* mus, PtrStepSzb* mds, PtrStepSzb* mls, PtrStepSzb* mrs, hipStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(dst_cols, threads.x);
grid.y = divUp(dst_rows, threads.y);
int src_idx = (dst_idx + 1) & 1;
hipLaunchKernelGGL(( level_up_message<T>), dim3(grid), dim3(threads), 0, stream, dst_cols, dst_rows, src_rows, (PtrStepSz<T>)mus[src_idx], (PtrStepSz<T>)mus[dst_idx]);
cudaSafeCall( hipGetLastError() );
hipLaunchKernelGGL(( level_up_message<T>), dim3(grid), dim3(threads), 0, stream, dst_cols, dst_rows, src_rows, (PtrStepSz<T>)mds[src_idx], (PtrStepSz<T>)mds[dst_idx]);
cudaSafeCall( hipGetLastError() );
hipLaunchKernelGGL(( level_up_message<T>), dim3(grid), dim3(threads), 0, stream, dst_cols, dst_rows, src_rows, (PtrStepSz<T>)mls[src_idx], (PtrStepSz<T>)mls[dst_idx]);
cudaSafeCall( hipGetLastError() );
hipLaunchKernelGGL(( level_up_message<T>), dim3(grid), dim3(threads), 0, stream, dst_cols, dst_rows, src_rows, (PtrStepSz<T>)mrs[src_idx], (PtrStepSz<T>)mrs[dst_idx]);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template void level_up_messages_gpu<short>(int dst_idx, int dst_cols, int dst_rows, int src_rows, PtrStepSzb* mus, PtrStepSzb* mds, PtrStepSzb* mls, PtrStepSzb* mrs, hipStream_t stream);
template void level_up_messages_gpu<float>(int dst_idx, int dst_cols, int dst_rows, int src_rows, PtrStepSzb* mus, PtrStepSzb* mds, PtrStepSzb* mls, PtrStepSzb* mrs, hipStream_t stream);
///////////////////////////////////////////////////////////////
//////////////////// calc all iterations /////////////////////
///////////////////////////////////////////////////////////////
template <typename T>
__device__ void calc_min_linear_penalty(T* dst, size_t step)
{
float prev = dst[0];
float cur;
for (int disp = 1; disp < cndisp; ++disp)
{
prev += cdisc_single_jump;
cur = dst[step * disp];
if (prev < cur)
{
cur = prev;
dst[step * disp] = saturate_cast<T>(prev);
}
prev = cur;
}
prev = dst[(cndisp - 1) * step];
for (int disp = cndisp - 2; disp >= 0; disp--)
{
prev += cdisc_single_jump;
cur = dst[step * disp];
if (prev < cur)
{
cur = prev;
dst[step * disp] = saturate_cast<T>(prev);
}
prev = cur;
}
}
template <typename T>
__device__ void message(const T* msg1, const T* msg2, const T* msg3, const T* data, T* dst, size_t msg_disp_step, size_t data_disp_step)
{
float minimum = device::numeric_limits<float>::max();
for(int i = 0; i < cndisp; ++i)
{
float dst_reg = msg1[msg_disp_step * i];
dst_reg += msg2[msg_disp_step * i];
dst_reg += msg3[msg_disp_step * i];
dst_reg += data[data_disp_step * i];
if (dst_reg < minimum)
minimum = dst_reg;
dst[msg_disp_step * i] = saturate_cast<T>(dst_reg);
}
calc_min_linear_penalty(dst, msg_disp_step);
minimum += cmax_disc_term;
float sum = 0;
for(int i = 0; i < cndisp; ++i)
{
float dst_reg = dst[msg_disp_step * i];
if (dst_reg > minimum)
{
dst_reg = minimum;
dst[msg_disp_step * i] = saturate_cast<T>(minimum);
}
sum += dst_reg;
}
sum /= cndisp;
for(int i = 0; i < cndisp; ++i)
dst[msg_disp_step * i] -= sum;
}
template <typename T>
__global__ void one_iteration(int t, int elem_step, T* u, T* d, T* l, T* r, const PtrStep<T> data, int cols, int rows)
{
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int x = ((blockIdx.x * blockDim.x + threadIdx.x) << 1) + ((y + t) & 1);
if ((y > 0) && (y < rows - 1) && (x > 0) && (x < cols - 1))
{
T* us = u + y * elem_step + x;
T* ds = d + y * elem_step + x;
T* ls = l + y * elem_step + x;
T* rs = r + y * elem_step + x;
const T* dt = data.ptr(y) + x;
size_t msg_disp_step = elem_step * rows;
size_t data_disp_step = data.step * rows / sizeof(T);
message(us + elem_step, ls + 1, rs - 1, dt, us, msg_disp_step, data_disp_step);
message(ds - elem_step, ls + 1, rs - 1, dt, ds, msg_disp_step, data_disp_step);
message(us + elem_step, ds - elem_step, rs - 1, dt, rs, msg_disp_step, data_disp_step);
message(us + elem_step, ds - elem_step, ls + 1, dt, ls, msg_disp_step, data_disp_step);
}
}
template <typename T>
void calc_all_iterations_gpu(int cols, int rows, int iters, const PtrStepSzb& u, const PtrStepSzb& d,
const PtrStepSzb& l, const PtrStepSzb& r, const PtrStepSzb& data, hipStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(cols, threads.x << 1);
grid.y = divUp(rows, threads.y);
int elem_step = u.step/sizeof(T);
for(int t = 0; t < iters; ++t)
{
hipLaunchKernelGGL(( one_iteration<T>), dim3(grid), dim3(threads), 0, stream, t, elem_step, (T*)u.data, (T*)d.data, (T*)l.data, (T*)r.data, (PtrStepSz<T>)data, cols, rows);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
}
template void calc_all_iterations_gpu<short>(int cols, int rows, int iters, const PtrStepSzb& u, const PtrStepSzb& d, const PtrStepSzb& l, const PtrStepSzb& r, const PtrStepSzb& data, hipStream_t stream);
template void calc_all_iterations_gpu<float>(int cols, int rows, int iters, const PtrStepSzb& u, const PtrStepSzb& d, const PtrStepSzb& l, const PtrStepSzb& r, const PtrStepSzb& data, hipStream_t stream);
///////////////////////////////////////////////////////////////
/////////////////////////// output ////////////////////////////
///////////////////////////////////////////////////////////////
template <typename T>
__global__ void output(const int elem_step, const T* u, const T* d, const T* l, const T* r, const T* data,
PtrStepSz<short> disp)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y > 0 && y < disp.rows - 1 && x > 0 && x < disp.cols - 1)
{
const T* us = u + (y + 1) * elem_step + x;
const T* ds = d + (y - 1) * elem_step + x;
const T* ls = l + y * elem_step + (x + 1);
const T* rs = r + y * elem_step+ (x - 1);
const T* dt = data + y * elem_step + x;
size_t disp_step = disp.rows * elem_step;
int best = 0;
float best_val = numeric_limits<float>::max();
for (int d = 0; d < cndisp; ++d)
{
float val = us[d * disp_step];
val += ds[d * disp_step];
val += ls[d * disp_step];
val += rs[d * disp_step];
val += dt[d * disp_step];
if (val < best_val)
{
best_val = val;
best = d;
}
}
disp.ptr(y)[x] = saturate_cast<short>(best);
}
}
template <typename T>
void output_gpu(const PtrStepSzb& u, const PtrStepSzb& d, const PtrStepSzb& l, const PtrStepSzb& r, const PtrStepSzb& data,
const PtrStepSz<short>& disp, hipStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(disp.cols, threads.x);
grid.y = divUp(disp.rows, threads.y);
int elem_step = static_cast<int>(u.step/sizeof(T));
hipLaunchKernelGGL(( output<T>), dim3(grid), dim3(threads), 0, stream, elem_step, (const T*)u.data, (const T*)d.data, (const T*)l.data, (const T*)r.data, (const T*)data.data, disp);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template void output_gpu<short>(const PtrStepSzb& u, const PtrStepSzb& d, const PtrStepSzb& l, const PtrStepSzb& r, const PtrStepSzb& data, const PtrStepSz<short>& disp, hipStream_t stream);
template void output_gpu<float>(const PtrStepSzb& u, const PtrStepSzb& d, const PtrStepSzb& l, const PtrStepSzb& r, const PtrStepSzb& data, const PtrStepSz<short>& disp, hipStream_t stream);
} // namespace stereobp
}}} // namespace cv { namespace gpu { namespace device
#endif /* CUDA_DISABLER */ | 8b97e867f59fecbde3288e1d00a11d6cc0a7114e.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or bpied warranties, including, but not limited to, the bpied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "internal_shared.hpp"
#include "opencv2/gpu/device/saturate_cast.hpp"
#include "opencv2/gpu/device/limits.hpp"
namespace cv { namespace gpu { namespace device
{
namespace stereobp
{
///////////////////////////////////////////////////////////////
/////////////////////// load constants ////////////////////////
///////////////////////////////////////////////////////////////
__constant__ int cndisp;
__constant__ float cmax_data_term;
__constant__ float cdata_weight;
__constant__ float cmax_disc_term;
__constant__ float cdisc_single_jump;
void load_constants(int ndisp, float max_data_term, float data_weight, float max_disc_term, float disc_single_jump)
{
cudaSafeCall( cudaMemcpyToSymbol(cndisp, &ndisp, sizeof(int )) );
cudaSafeCall( cudaMemcpyToSymbol(cmax_data_term, &max_data_term, sizeof(float)) );
cudaSafeCall( cudaMemcpyToSymbol(cdata_weight, &data_weight, sizeof(float)) );
cudaSafeCall( cudaMemcpyToSymbol(cmax_disc_term, &max_disc_term, sizeof(float)) );
cudaSafeCall( cudaMemcpyToSymbol(cdisc_single_jump, &disc_single_jump, sizeof(float)) );
}
///////////////////////////////////////////////////////////////
////////////////////////// comp data //////////////////////////
///////////////////////////////////////////////////////////////
template <int cn> struct PixDiff;
template <> struct PixDiff<1>
{
__device__ __forceinline__ PixDiff(const uchar* ls)
{
l = *ls;
}
__device__ __forceinline__ float operator()(const uchar* rs) const
{
return ::abs((int)l - *rs);
}
uchar l;
};
template <> struct PixDiff<3>
{
__device__ __forceinline__ PixDiff(const uchar* ls)
{
l = *((uchar3*)ls);
}
__device__ __forceinline__ float operator()(const uchar* rs) const
{
const float tr = 0.299f;
const float tg = 0.587f;
const float tb = 0.114f;
float val = tb * ::abs((int)l.x - rs[0]);
val += tg * ::abs((int)l.y - rs[1]);
val += tr * ::abs((int)l.z - rs[2]);
return val;
}
uchar3 l;
};
template <> struct PixDiff<4>
{
__device__ __forceinline__ PixDiff(const uchar* ls)
{
l = *((uchar4*)ls);
}
__device__ __forceinline__ float operator()(const uchar* rs) const
{
const float tr = 0.299f;
const float tg = 0.587f;
const float tb = 0.114f;
uchar4 r = *((uchar4*)rs);
float val = tb * ::abs((int)l.x - r.x);
val += tg * ::abs((int)l.y - r.y);
val += tr * ::abs((int)l.z - r.z);
return val;
}
uchar4 l;
};
template <int cn, typename D>
__global__ void comp_data(const PtrStepSzb left, const PtrStepb right, PtrStep<D> data)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y > 0 && y < left.rows - 1 && x > 0 && x < left.cols - 1)
{
const uchar* ls = left.ptr(y) + x * cn;
const PixDiff<cn> pixDiff(ls);
const uchar* rs = right.ptr(y) + x * cn;
D* ds = data.ptr(y) + x;
const size_t disp_step = data.step * left.rows / sizeof(D);
for (int disp = 0; disp < cndisp; disp++)
{
if (x - disp >= 1)
{
float val = pixDiff(rs - disp * cn);
ds[disp * disp_step] = saturate_cast<D>(fmin(cdata_weight * val, cdata_weight * cmax_data_term));
}
else
{
ds[disp * disp_step] = saturate_cast<D>(cdata_weight * cmax_data_term);
}
}
}
}
template<typename T, typename D>
void comp_data_gpu(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& data, cudaStream_t stream);
template <> void comp_data_gpu<uchar, short>(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& data, cudaStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(left.cols, threads.x);
grid.y = divUp(left.rows, threads.y);
comp_data<1, short><<<grid, threads, 0, stream>>>(left, right, (PtrStepSz<short>)data);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <> void comp_data_gpu<uchar, float>(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& data, cudaStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(left.cols, threads.x);
grid.y = divUp(left.rows, threads.y);
comp_data<1, float><<<grid, threads, 0, stream>>>(left, right, (PtrStepSz<float>)data);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <> void comp_data_gpu<uchar3, short>(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& data, cudaStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(left.cols, threads.x);
grid.y = divUp(left.rows, threads.y);
comp_data<3, short><<<grid, threads, 0, stream>>>(left, right, (PtrStepSz<short>)data);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <> void comp_data_gpu<uchar3, float>(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& data, cudaStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(left.cols, threads.x);
grid.y = divUp(left.rows, threads.y);
comp_data<3, float><<<grid, threads, 0, stream>>>(left, right, (PtrStepSz<float>)data);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <> void comp_data_gpu<uchar4, short>(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& data, cudaStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(left.cols, threads.x);
grid.y = divUp(left.rows, threads.y);
comp_data<4, short><<<grid, threads, 0, stream>>>(left, right, (PtrStepSz<short>)data);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <> void comp_data_gpu<uchar4, float>(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& data, cudaStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(left.cols, threads.x);
grid.y = divUp(left.rows, threads.y);
comp_data<4, float><<<grid, threads, 0, stream>>>(left, right, (PtrStepSz<float>)data);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
///////////////////////////////////////////////////////////////
//////////////////////// data step down ///////////////////////
///////////////////////////////////////////////////////////////
template <typename T>
__global__ void data_step_down(int dst_cols, int dst_rows, int src_rows, const PtrStep<T> src, PtrStep<T> dst)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < dst_cols && y < dst_rows)
{
for (int d = 0; d < cndisp; ++d)
{
float dst_reg = src.ptr(d * src_rows + (2*y+0))[(2*x+0)];
dst_reg += src.ptr(d * src_rows + (2*y+1))[(2*x+0)];
dst_reg += src.ptr(d * src_rows + (2*y+0))[(2*x+1)];
dst_reg += src.ptr(d * src_rows + (2*y+1))[(2*x+1)];
dst.ptr(d * dst_rows + y)[x] = saturate_cast<T>(dst_reg);
}
}
}
template<typename T>
void data_step_down_gpu(int dst_cols, int dst_rows, int src_rows, const PtrStepSzb& src, const PtrStepSzb& dst, cudaStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(dst_cols, threads.x);
grid.y = divUp(dst_rows, threads.y);
data_step_down<T><<<grid, threads, 0, stream>>>(dst_cols, dst_rows, src_rows, (PtrStepSz<T>)src, (PtrStepSz<T>)dst);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template void data_step_down_gpu<short>(int dst_cols, int dst_rows, int src_rows, const PtrStepSzb& src, const PtrStepSzb& dst, cudaStream_t stream);
template void data_step_down_gpu<float>(int dst_cols, int dst_rows, int src_rows, const PtrStepSzb& src, const PtrStepSzb& dst, cudaStream_t stream);
///////////////////////////////////////////////////////////////
/////////////////// level up messages ////////////////////////
///////////////////////////////////////////////////////////////
template <typename T>
__global__ void level_up_message(int dst_cols, int dst_rows, int src_rows, const PtrStep<T> src, PtrStep<T> dst)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < dst_cols && y < dst_rows)
{
const size_t dst_disp_step = dst.step * dst_rows / sizeof(T);
const size_t src_disp_step = src.step * src_rows / sizeof(T);
T* dstr = dst.ptr(y ) + x;
const T* srcr = src.ptr(y/2) + x/2;
for (int d = 0; d < cndisp; ++d)
dstr[d * dst_disp_step] = srcr[d * src_disp_step];
}
}
template <typename T>
void level_up_messages_gpu(int dst_idx, int dst_cols, int dst_rows, int src_rows, PtrStepSzb* mus, PtrStepSzb* mds, PtrStepSzb* mls, PtrStepSzb* mrs, cudaStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(dst_cols, threads.x);
grid.y = divUp(dst_rows, threads.y);
int src_idx = (dst_idx + 1) & 1;
level_up_message<T><<<grid, threads, 0, stream>>>(dst_cols, dst_rows, src_rows, (PtrStepSz<T>)mus[src_idx], (PtrStepSz<T>)mus[dst_idx]);
cudaSafeCall( cudaGetLastError() );
level_up_message<T><<<grid, threads, 0, stream>>>(dst_cols, dst_rows, src_rows, (PtrStepSz<T>)mds[src_idx], (PtrStepSz<T>)mds[dst_idx]);
cudaSafeCall( cudaGetLastError() );
level_up_message<T><<<grid, threads, 0, stream>>>(dst_cols, dst_rows, src_rows, (PtrStepSz<T>)mls[src_idx], (PtrStepSz<T>)mls[dst_idx]);
cudaSafeCall( cudaGetLastError() );
level_up_message<T><<<grid, threads, 0, stream>>>(dst_cols, dst_rows, src_rows, (PtrStepSz<T>)mrs[src_idx], (PtrStepSz<T>)mrs[dst_idx]);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template void level_up_messages_gpu<short>(int dst_idx, int dst_cols, int dst_rows, int src_rows, PtrStepSzb* mus, PtrStepSzb* mds, PtrStepSzb* mls, PtrStepSzb* mrs, cudaStream_t stream);
template void level_up_messages_gpu<float>(int dst_idx, int dst_cols, int dst_rows, int src_rows, PtrStepSzb* mus, PtrStepSzb* mds, PtrStepSzb* mls, PtrStepSzb* mrs, cudaStream_t stream);
///////////////////////////////////////////////////////////////
//////////////////// calc all iterations /////////////////////
///////////////////////////////////////////////////////////////
template <typename T>
__device__ void calc_min_linear_penalty(T* dst, size_t step)
{
float prev = dst[0];
float cur;
for (int disp = 1; disp < cndisp; ++disp)
{
prev += cdisc_single_jump;
cur = dst[step * disp];
if (prev < cur)
{
cur = prev;
dst[step * disp] = saturate_cast<T>(prev);
}
prev = cur;
}
prev = dst[(cndisp - 1) * step];
for (int disp = cndisp - 2; disp >= 0; disp--)
{
prev += cdisc_single_jump;
cur = dst[step * disp];
if (prev < cur)
{
cur = prev;
dst[step * disp] = saturate_cast<T>(prev);
}
prev = cur;
}
}
template <typename T>
__device__ void message(const T* msg1, const T* msg2, const T* msg3, const T* data, T* dst, size_t msg_disp_step, size_t data_disp_step)
{
float minimum = device::numeric_limits<float>::max();
for(int i = 0; i < cndisp; ++i)
{
float dst_reg = msg1[msg_disp_step * i];
dst_reg += msg2[msg_disp_step * i];
dst_reg += msg3[msg_disp_step * i];
dst_reg += data[data_disp_step * i];
if (dst_reg < minimum)
minimum = dst_reg;
dst[msg_disp_step * i] = saturate_cast<T>(dst_reg);
}
calc_min_linear_penalty(dst, msg_disp_step);
minimum += cmax_disc_term;
float sum = 0;
for(int i = 0; i < cndisp; ++i)
{
float dst_reg = dst[msg_disp_step * i];
if (dst_reg > minimum)
{
dst_reg = minimum;
dst[msg_disp_step * i] = saturate_cast<T>(minimum);
}
sum += dst_reg;
}
sum /= cndisp;
for(int i = 0; i < cndisp; ++i)
dst[msg_disp_step * i] -= sum;
}
template <typename T>
__global__ void one_iteration(int t, int elem_step, T* u, T* d, T* l, T* r, const PtrStep<T> data, int cols, int rows)
{
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int x = ((blockIdx.x * blockDim.x + threadIdx.x) << 1) + ((y + t) & 1);
if ((y > 0) && (y < rows - 1) && (x > 0) && (x < cols - 1))
{
T* us = u + y * elem_step + x;
T* ds = d + y * elem_step + x;
T* ls = l + y * elem_step + x;
T* rs = r + y * elem_step + x;
const T* dt = data.ptr(y) + x;
size_t msg_disp_step = elem_step * rows;
size_t data_disp_step = data.step * rows / sizeof(T);
message(us + elem_step, ls + 1, rs - 1, dt, us, msg_disp_step, data_disp_step);
message(ds - elem_step, ls + 1, rs - 1, dt, ds, msg_disp_step, data_disp_step);
message(us + elem_step, ds - elem_step, rs - 1, dt, rs, msg_disp_step, data_disp_step);
message(us + elem_step, ds - elem_step, ls + 1, dt, ls, msg_disp_step, data_disp_step);
}
}
template <typename T>
void calc_all_iterations_gpu(int cols, int rows, int iters, const PtrStepSzb& u, const PtrStepSzb& d,
const PtrStepSzb& l, const PtrStepSzb& r, const PtrStepSzb& data, cudaStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(cols, threads.x << 1);
grid.y = divUp(rows, threads.y);
int elem_step = u.step/sizeof(T);
for(int t = 0; t < iters; ++t)
{
one_iteration<T><<<grid, threads, 0, stream>>>(t, elem_step, (T*)u.data, (T*)d.data, (T*)l.data, (T*)r.data, (PtrStepSz<T>)data, cols, rows);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
}
template void calc_all_iterations_gpu<short>(int cols, int rows, int iters, const PtrStepSzb& u, const PtrStepSzb& d, const PtrStepSzb& l, const PtrStepSzb& r, const PtrStepSzb& data, cudaStream_t stream);
template void calc_all_iterations_gpu<float>(int cols, int rows, int iters, const PtrStepSzb& u, const PtrStepSzb& d, const PtrStepSzb& l, const PtrStepSzb& r, const PtrStepSzb& data, cudaStream_t stream);
///////////////////////////////////////////////////////////////
/////////////////////////// output ////////////////////////////
///////////////////////////////////////////////////////////////
template <typename T>
__global__ void output(const int elem_step, const T* u, const T* d, const T* l, const T* r, const T* data,
PtrStepSz<short> disp)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y > 0 && y < disp.rows - 1 && x > 0 && x < disp.cols - 1)
{
const T* us = u + (y + 1) * elem_step + x;
const T* ds = d + (y - 1) * elem_step + x;
const T* ls = l + y * elem_step + (x + 1);
const T* rs = r + y * elem_step+ (x - 1);
const T* dt = data + y * elem_step + x;
size_t disp_step = disp.rows * elem_step;
int best = 0;
float best_val = numeric_limits<float>::max();
for (int d = 0; d < cndisp; ++d)
{
float val = us[d * disp_step];
val += ds[d * disp_step];
val += ls[d * disp_step];
val += rs[d * disp_step];
val += dt[d * disp_step];
if (val < best_val)
{
best_val = val;
best = d;
}
}
disp.ptr(y)[x] = saturate_cast<short>(best);
}
}
template <typename T>
void output_gpu(const PtrStepSzb& u, const PtrStepSzb& d, const PtrStepSzb& l, const PtrStepSzb& r, const PtrStepSzb& data,
const PtrStepSz<short>& disp, cudaStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(disp.cols, threads.x);
grid.y = divUp(disp.rows, threads.y);
int elem_step = static_cast<int>(u.step/sizeof(T));
output<T><<<grid, threads, 0, stream>>>(elem_step, (const T*)u.data, (const T*)d.data, (const T*)l.data, (const T*)r.data, (const T*)data.data, disp);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template void output_gpu<short>(const PtrStepSzb& u, const PtrStepSzb& d, const PtrStepSzb& l, const PtrStepSzb& r, const PtrStepSzb& data, const PtrStepSz<short>& disp, cudaStream_t stream);
template void output_gpu<float>(const PtrStepSzb& u, const PtrStepSzb& d, const PtrStepSzb& l, const PtrStepSzb& r, const PtrStepSzb& data, const PtrStepSz<short>& disp, cudaStream_t stream);
} // namespace stereobp
}}} // namespace cv { namespace gpu { namespace device
#endif /* CUDA_DISABLER */ |
fb63e6c8a3a0b3269ead9ff70f544e1a1ce74c66.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. */
#include <ATen/ATen.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <hip/hip_runtime_api.h>
#include <ATen/hip/HIPContext.h>
#include <torch/extension.h>
#include "scaled_upper_triang_masked_softmax.h"
#include "type_shim.h"
namespace multihead_attn {
namespace fused_softmax {
namespace scaled_upper_triang_masked_softmax {
torch::Tensor fwd_cuda(
torch::Tensor const& input,
float scale_factor)
{
// input is a 3d tensor with dimensions [attn_batches, seq_len, seq_len]
const int attn_batches = input.size(0);
const int seq_len = input.size(1);
TORCH_INTERNAL_ASSERT(seq_len <= 16384);
// Output
auto act_options = input.options().requires_grad(false);
torch::Tensor softmax_results =
torch::empty({attn_batches, seq_len, seq_len}, act_options);
// Softmax Intermediate Result Ptr
void* input_ptr = static_cast<void*>(input.data_ptr());
void* softmax_results_ptr = static_cast<void*>(softmax_results.data_ptr());
DISPATCH_HALF_AND_BFLOAT(
input.scalar_type(),
"dispatch_scaled_upper_triang_masked_softmax_forward",
dispatch_scaled_upper_triang_masked_softmax_forward<scalar_t, scalar_t, float>(
reinterpret_cast<scalar_t*>(softmax_results_ptr),
reinterpret_cast<const scalar_t*>(input_ptr),
scale_factor,
seq_len,
seq_len,
attn_batches);
);
return softmax_results;
}
torch::Tensor bwd_cuda(
torch::Tensor const& output_grads_,
torch::Tensor const& softmax_results_,
float scale_factor) {
auto output_grads = output_grads_.contiguous();
auto softmax_results = softmax_results_.contiguous();
//output grads is a 3d tensor with dimensions [attn_batches, seq_len, seq_len]
const int attn_batches = output_grads.size(0);
const int seq_len = output_grads.size(1);
TORCH_INTERNAL_ASSERT(output_grads.size(1) == output_grads.size(2));
void* output_grads_ptr = static_cast<void*>(output_grads.data_ptr());
//Softmax Grad
DISPATCH_HALF_AND_BFLOAT(
output_grads_.scalar_type(),
"dispatch_scaled_upper_triang_masked_softmax_backward",
dispatch_scaled_upper_triang_masked_softmax_backward<scalar_t, scalar_t, float>(
reinterpret_cast<scalar_t*>(output_grads_ptr),
reinterpret_cast<scalar_t*>(output_grads_ptr),
reinterpret_cast<scalar_t const*>(softmax_results.data_ptr()),
scale_factor,
seq_len,
seq_len,
attn_batches);
);
//backward pass is completely in-place
return output_grads;
}
}
}
}
| fb63e6c8a3a0b3269ead9ff70f544e1a1ce74c66.cu | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. */
#include <ATen/ATen.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include <cuda_profiler_api.h>
#include <ATen/cuda/CUDAContext.h>
#include <torch/extension.h>
#include "scaled_upper_triang_masked_softmax.h"
#include "type_shim.h"
namespace multihead_attn {
namespace fused_softmax {
namespace scaled_upper_triang_masked_softmax {
torch::Tensor fwd_cuda(
torch::Tensor const& input,
float scale_factor)
{
// input is a 3d tensor with dimensions [attn_batches, seq_len, seq_len]
const int attn_batches = input.size(0);
const int seq_len = input.size(1);
TORCH_INTERNAL_ASSERT(seq_len <= 16384);
// Output
auto act_options = input.options().requires_grad(false);
torch::Tensor softmax_results =
torch::empty({attn_batches, seq_len, seq_len}, act_options);
// Softmax Intermediate Result Ptr
void* input_ptr = static_cast<void*>(input.data_ptr());
void* softmax_results_ptr = static_cast<void*>(softmax_results.data_ptr());
DISPATCH_HALF_AND_BFLOAT(
input.scalar_type(),
"dispatch_scaled_upper_triang_masked_softmax_forward",
dispatch_scaled_upper_triang_masked_softmax_forward<scalar_t, scalar_t, float>(
reinterpret_cast<scalar_t*>(softmax_results_ptr),
reinterpret_cast<const scalar_t*>(input_ptr),
scale_factor,
seq_len,
seq_len,
attn_batches);
);
return softmax_results;
}
torch::Tensor bwd_cuda(
torch::Tensor const& output_grads_,
torch::Tensor const& softmax_results_,
float scale_factor) {
auto output_grads = output_grads_.contiguous();
auto softmax_results = softmax_results_.contiguous();
//output grads is a 3d tensor with dimensions [attn_batches, seq_len, seq_len]
const int attn_batches = output_grads.size(0);
const int seq_len = output_grads.size(1);
TORCH_INTERNAL_ASSERT(output_grads.size(1) == output_grads.size(2));
void* output_grads_ptr = static_cast<void*>(output_grads.data_ptr());
//Softmax Grad
DISPATCH_HALF_AND_BFLOAT(
output_grads_.scalar_type(),
"dispatch_scaled_upper_triang_masked_softmax_backward",
dispatch_scaled_upper_triang_masked_softmax_backward<scalar_t, scalar_t, float>(
reinterpret_cast<scalar_t*>(output_grads_ptr),
reinterpret_cast<scalar_t*>(output_grads_ptr),
reinterpret_cast<scalar_t const*>(softmax_results.data_ptr()),
scale_factor,
seq_len,
seq_len,
attn_batches);
);
//backward pass is completely in-place
return output_grads;
}
}
}
}
|
c5d135500bda86cb3139758a46f5b1110efb35dd.hip | // !!! This is a file automatically generated by hipify!!!
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2015 Benoit Steiner <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
#define EIGEN_TEST_FUNC cxx11_tensor_reduction_cuda
#define EIGEN_USE_GPU
#include "main.h"
#include <unsupported/Eigen/CXX11/Tensor>
/*#include <stdlib.h> // for sleep(uint)
*/
template<int DataLayout>
static void test_full_reductions() {
printf("\n\n\n\n ---- Testing full reductions (CPU) ----\n\n\n\n");
// set number of rows and columns of input tensor
const int N = 64;
// initialize input tensor
Tensor<float, 2, DataLayout> in(N, N);
in.setRandom();
// initialize output tensor
Tensor<float, 0, DataLayout> full_redux;
// perform full sum reduction on CPU
full_redux = in.sum();
printf("\n\n\n\n ---- Testing full reductions (GPU) ----\n\n\n\n");
// CUDA stuff
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
// data type sizes
std::size_t in_bytes = in.size() * sizeof(float);
std::size_t out_bytes = full_redux.size() * sizeof(float);
// allocate floats on GPU
float* gpu_in_ptr = static_cast<float*>(gpu_device.allocate(in_bytes));
float* gpu_out_ptr = static_cast<float*>(gpu_device.allocate(out_bytes));
// copy input tensor data from host to device
gpu_device.memcpyHostToDevice(gpu_in_ptr, in.data(), in_bytes);
TensorMap<Tensor<float, 2, DataLayout> > in_gpu(gpu_in_ptr, N, N);
TensorMap<Tensor<float, 0, DataLayout> > out_gpu(gpu_out_ptr);
// perform full sum reduction on GPU (device)
out_gpu.device(gpu_device) = in_gpu.sum();
// initialize output tensor for gpu computation
Tensor<float, 0, DataLayout> full_redux_gpu;
// copy memory from device to host
gpu_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_ptr, out_bytes);
// synchronize (does What?)
gpu_device.synchronize();
// Check that the CPU and GPU reductions return the same result.
VERIFY_IS_APPROX(full_redux(), full_redux_gpu());
// cleanup memory
gpu_device.deallocate(gpu_in_ptr);
gpu_device.deallocate(gpu_out_ptr);
}
static void test_partial_reductions(){ // jzuern partial reduction
printf("\n\n\n\n ---- Testing partial reductions (CPU) ----\n\n\n\n");
hipDeviceSetLimit(hipLimitPrintfFifoSize, 1000*1024*1024); // hundred megs output buffer
// set number of rows and columns of input tensor
const int N = 8;
// initialize input tensor
Tensor<float, 4, ColMajor> in(N, N, N, N);
in.setRandom();
// initialize output tensor
Tensor<float, 1, ColMajor> part_redux(N);
// define eigen array of dimensions to reduce along
Eigen::array<int, 3> dims({1,2,3}); // 0,1,2 works. 1,2,3 does not
// 0th dimension is innermost
// nth dimension is outermost
// perform partial sum reduction on CPU
part_redux = in.sum(dims);
printf("\n\n\n\n ---- Testing partial reductions (GPU) ----\n\n\n\n");
// CUDA stuff
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
// data type sizes
std::size_t in_bytes = in.size() * sizeof(float);
std::size_t out_bytes = part_redux.size() * sizeof(float);
// allocate floats on GPU
float* gpu_in_ptr = static_cast<float*>(gpu_device.allocate(in_bytes));
float* gpu_out_ptr = static_cast<float*>(gpu_device.allocate(out_bytes));
// copy input tensor data from host to device
gpu_device.memcpyHostToDevice(gpu_in_ptr, in.data(), in_bytes);
TensorMap<Tensor<float, 4, ColMajor> > in_gpu (gpu_in_ptr, N, N, N, N);
TensorMap<Tensor<float, 1, ColMajor> > out_gpu(gpu_out_ptr,N);
// perform partial sum reduction on GPU (device)
out_gpu.device(gpu_device) = in_gpu.sum(dims);
// initialize output tensor for gpu computation
Tensor<float, 1, ColMajor> part_redux_gpu(N);
// copy memory from device to host
gpu_device.memcpyDeviceToHost(part_redux_gpu.data(), gpu_out_ptr, out_bytes); // version 1
//assert(hipMemcpyAsync(part_redux.data(), gpu_out_ptr, out_bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess); //version 2
// synchronize (does What?)
gpu_device.synchronize(); // version 1
//assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess); // version 2
// cleanup memory
gpu_device.deallocate(gpu_in_ptr);
gpu_device.deallocate(gpu_out_ptr);
}
void test_cuda_reduction_steiner()
{
Tensor<float, 4> in1(23,6,97,5);
Tensor<float, 2> out(97,5);
in1.setRandom();
std::size_t in1_bytes = in1.size() * sizeof(float);
std::size_t out_bytes = out.size() * sizeof(float);
float* d_in1;
float* d_out;
hipMalloc((void**)(&d_in1), in1_bytes);
hipMalloc((void**)(&d_out), out_bytes);
hipMemcpy(d_in1, in1.data(), in1_bytes, hipMemcpyHostToDevice);
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 4> > gpu_in1(d_in1, 23,6,97,5);
Eigen::TensorMap<Eigen::Tensor<float, 2> > gpu_out(d_out, 97,5);
array<int, 2> reduction_axis;
reduction_axis[0] = 0;
reduction_axis[1] = 1;
gpu_out.device(gpu_device) = gpu_in1.sum(reduction_axis);
assert(hipMemcpyAsync(out.data(), d_out, out_bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess);
assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess);
hipFree(d_in1);
hipFree(d_out);
}
void test_cxx11_tensor_reduction_cuda() {
//CALL_SUBTEST(test_full_reductions<ColMajor>());
CALL_SUBTEST(test_partial_reductions());
//CALL_SUBTEST(test_cuda_reduction_steiner());
}
| c5d135500bda86cb3139758a46f5b1110efb35dd.cu | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2015 Benoit Steiner <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
#define EIGEN_TEST_FUNC cxx11_tensor_reduction_cuda
#define EIGEN_USE_GPU
#include "main.h"
#include <unsupported/Eigen/CXX11/Tensor>
/*#include <stdlib.h> // for sleep(uint)
*/
template<int DataLayout>
static void test_full_reductions() {
printf("\n\n\n\n ---- Testing full reductions (CPU) ----\n\n\n\n");
// set number of rows and columns of input tensor
const int N = 64;
// initialize input tensor
Tensor<float, 2, DataLayout> in(N, N);
in.setRandom();
// initialize output tensor
Tensor<float, 0, DataLayout> full_redux;
// perform full sum reduction on CPU
full_redux = in.sum();
printf("\n\n\n\n ---- Testing full reductions (GPU) ----\n\n\n\n");
// CUDA stuff
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
// data type sizes
std::size_t in_bytes = in.size() * sizeof(float);
std::size_t out_bytes = full_redux.size() * sizeof(float);
// allocate floats on GPU
float* gpu_in_ptr = static_cast<float*>(gpu_device.allocate(in_bytes));
float* gpu_out_ptr = static_cast<float*>(gpu_device.allocate(out_bytes));
// copy input tensor data from host to device
gpu_device.memcpyHostToDevice(gpu_in_ptr, in.data(), in_bytes);
TensorMap<Tensor<float, 2, DataLayout> > in_gpu(gpu_in_ptr, N, N);
TensorMap<Tensor<float, 0, DataLayout> > out_gpu(gpu_out_ptr);
// perform full sum reduction on GPU (device)
out_gpu.device(gpu_device) = in_gpu.sum();
// initialize output tensor for gpu computation
Tensor<float, 0, DataLayout> full_redux_gpu;
// copy memory from device to host
gpu_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_ptr, out_bytes);
// synchronize (does What?)
gpu_device.synchronize();
// Check that the CPU and GPU reductions return the same result.
VERIFY_IS_APPROX(full_redux(), full_redux_gpu());
// cleanup memory
gpu_device.deallocate(gpu_in_ptr);
gpu_device.deallocate(gpu_out_ptr);
}
static void test_partial_reductions(){ // jzuern partial reduction
printf("\n\n\n\n ---- Testing partial reductions (CPU) ----\n\n\n\n");
cudaDeviceSetLimit(cudaLimitPrintfFifoSize, 1000*1024*1024); // hundred megs output buffer
// set number of rows and columns of input tensor
const int N = 8;
// initialize input tensor
Tensor<float, 4, ColMajor> in(N, N, N, N);
in.setRandom();
// initialize output tensor
Tensor<float, 1, ColMajor> part_redux(N);
// define eigen array of dimensions to reduce along
Eigen::array<int, 3> dims({1,2,3}); // 0,1,2 works. 1,2,3 does not
// 0th dimension is innermost
// nth dimension is outermost
// perform partial sum reduction on CPU
part_redux = in.sum(dims);
printf("\n\n\n\n ---- Testing partial reductions (GPU) ----\n\n\n\n");
// CUDA stuff
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
// data type sizes
std::size_t in_bytes = in.size() * sizeof(float);
std::size_t out_bytes = part_redux.size() * sizeof(float);
// allocate floats on GPU
float* gpu_in_ptr = static_cast<float*>(gpu_device.allocate(in_bytes));
float* gpu_out_ptr = static_cast<float*>(gpu_device.allocate(out_bytes));
// copy input tensor data from host to device
gpu_device.memcpyHostToDevice(gpu_in_ptr, in.data(), in_bytes);
TensorMap<Tensor<float, 4, ColMajor> > in_gpu (gpu_in_ptr, N, N, N, N);
TensorMap<Tensor<float, 1, ColMajor> > out_gpu(gpu_out_ptr,N);
// perform partial sum reduction on GPU (device)
out_gpu.device(gpu_device) = in_gpu.sum(dims);
// initialize output tensor for gpu computation
Tensor<float, 1, ColMajor> part_redux_gpu(N);
// copy memory from device to host
gpu_device.memcpyDeviceToHost(part_redux_gpu.data(), gpu_out_ptr, out_bytes); // version 1
//assert(cudaMemcpyAsync(part_redux.data(), gpu_out_ptr, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); //version 2
// synchronize (does What?)
gpu_device.synchronize(); // version 1
//assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); // version 2
// cleanup memory
gpu_device.deallocate(gpu_in_ptr);
gpu_device.deallocate(gpu_out_ptr);
}
void test_cuda_reduction_steiner()
{
Tensor<float, 4> in1(23,6,97,5);
Tensor<float, 2> out(97,5);
in1.setRandom();
std::size_t in1_bytes = in1.size() * sizeof(float);
std::size_t out_bytes = out.size() * sizeof(float);
float* d_in1;
float* d_out;
cudaMalloc((void**)(&d_in1), in1_bytes);
cudaMalloc((void**)(&d_out), out_bytes);
cudaMemcpy(d_in1, in1.data(), in1_bytes, cudaMemcpyHostToDevice);
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 4> > gpu_in1(d_in1, 23,6,97,5);
Eigen::TensorMap<Eigen::Tensor<float, 2> > gpu_out(d_out, 97,5);
array<int, 2> reduction_axis;
reduction_axis[0] = 0;
reduction_axis[1] = 1;
gpu_out.device(gpu_device) = gpu_in1.sum(reduction_axis);
assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
cudaFree(d_in1);
cudaFree(d_out);
}
void test_cxx11_tensor_reduction_cuda() {
//CALL_SUBTEST(test_full_reductions<ColMajor>());
CALL_SUBTEST(test_partial_reductions());
//CALL_SUBTEST(test_cuda_reduction_steiner());
}
|
ca214b83c2910beb29e8b7c6919776b197112177.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__
void initWith(float num, float *a, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i += stride)
{
a[i] = num;
}
}
__global__
void addVectorsInto(float *result, float *a, float *b, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i += stride)
{
result[i] = a[i] + b[i];
}
}
void checkElementsAre(float target, float *vector, int N)
{
for(int i = 0; i < N; i++)
{
if(vector[i] != target)
{
printf("FAIL: vector[%d] - %0.0f does not equal %0.0f\n", i, vector[i], target);
exit(1);
}
}
printf("Success! All values calculated correctly.\n");
}
int main()
{
int deviceId;
int numberOfSMs;
hipGetDevice(&deviceId);
hipDeviceGetAttribute(&numberOfSMs, hipDeviceAttributeMultiprocessorCount, deviceId);
const int N = 2<<24;
size_t size = N * sizeof(float);
float *a;
float *b;
float *c;
hipMallocManaged(&a, size);
hipMallocManaged(&b, size);
hipMallocManaged(&c, size);
hipMemPrefetchAsync(a, size, deviceId);
hipMemPrefetchAsync(b, size, deviceId);
hipMemPrefetchAsync(c, size, deviceId);
size_t threadsPerBlock;
size_t numberOfBlocks;
threadsPerBlock = 256;
numberOfBlocks = 32 * numberOfSMs;
hipError_t addVectorsErr;
hipError_t asyncErr;
hipStream_t streamA, streamB, streamC;
hipStreamCreate(&streamA);
hipStreamCreate(&streamB);
hipStreamCreate(&streamC);
hipLaunchKernelGGL(( initWith), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, streamA, 3, a, N);
hipLaunchKernelGGL(( initWith), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, streamB, 4, b, N);
hipLaunchKernelGGL(( initWith), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, streamC, 0, c, N);
hipStreamDestroy(streamA);
hipStreamDestroy(streamB);
hipStreamDestroy(streamC);
hipLaunchKernelGGL(( addVectorsInto), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, 0, c, a, b, N);
addVectorsErr = hipGetLastError();
if(addVectorsErr != hipSuccess) printf("Error: %s\n", hipGetErrorString(addVectorsErr));
asyncErr = hipDeviceSynchronize();
if(asyncErr != hipSuccess) printf("Error: %s\n", hipGetErrorString(asyncErr));
hipMemPrefetchAsync(c, size, hipCpuDeviceId);
checkElementsAre(7, c, N);
hipFree(a);
hipFree(b);
hipFree(c);
}
| ca214b83c2910beb29e8b7c6919776b197112177.cu | #include <stdio.h>
__global__
void initWith(float num, float *a, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i += stride)
{
a[i] = num;
}
}
__global__
void addVectorsInto(float *result, float *a, float *b, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i += stride)
{
result[i] = a[i] + b[i];
}
}
void checkElementsAre(float target, float *vector, int N)
{
for(int i = 0; i < N; i++)
{
if(vector[i] != target)
{
printf("FAIL: vector[%d] - %0.0f does not equal %0.0f\n", i, vector[i], target);
exit(1);
}
}
printf("Success! All values calculated correctly.\n");
}
int main()
{
int deviceId;
int numberOfSMs;
cudaGetDevice(&deviceId);
cudaDeviceGetAttribute(&numberOfSMs, cudaDevAttrMultiProcessorCount, deviceId);
const int N = 2<<24;
size_t size = N * sizeof(float);
float *a;
float *b;
float *c;
cudaMallocManaged(&a, size);
cudaMallocManaged(&b, size);
cudaMallocManaged(&c, size);
cudaMemPrefetchAsync(a, size, deviceId);
cudaMemPrefetchAsync(b, size, deviceId);
cudaMemPrefetchAsync(c, size, deviceId);
size_t threadsPerBlock;
size_t numberOfBlocks;
threadsPerBlock = 256;
numberOfBlocks = 32 * numberOfSMs;
cudaError_t addVectorsErr;
cudaError_t asyncErr;
cudaStream_t streamA, streamB, streamC;
cudaStreamCreate(&streamA);
cudaStreamCreate(&streamB);
cudaStreamCreate(&streamC);
initWith<<<numberOfBlocks, threadsPerBlock, 0, streamA>>>(3, a, N);
initWith<<<numberOfBlocks, threadsPerBlock, 0, streamB>>>(4, b, N);
initWith<<<numberOfBlocks, threadsPerBlock, 0, streamC>>>(0, c, N);
cudaStreamDestroy(streamA);
cudaStreamDestroy(streamB);
cudaStreamDestroy(streamC);
addVectorsInto<<<numberOfBlocks, threadsPerBlock>>>(c, a, b, N);
addVectorsErr = cudaGetLastError();
if(addVectorsErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(addVectorsErr));
asyncErr = cudaDeviceSynchronize();
if(asyncErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(asyncErr));
cudaMemPrefetchAsync(c, size, cudaCpuDeviceId);
checkElementsAre(7, c, N);
cudaFree(a);
cudaFree(b);
cudaFree(c);
}
|
815909ccd4ae18e67a278ffef5a5099c8c7b1e89.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 2023 by XGBoost contributors
*/
#include <cstddef> // std::size_t
#include <cstdint> // std::int32_t
#include <vector> // std::vector
#include "../common/linalg_op.h" // ElementWiseKernel,cbegin,cend
#include "../common/quantile_loss_utils.h" // QuantileLossParam
#include "../common/stats.h" // Quantile,WeightedQuantile
#include "adaptive.h" // UpdateTreeLeaf
#include "dmlc/parameter.h" // DMLC_DECLARE_PARAMETER
#include "init_estimation.h" // CheckInitInputs
#include "xgboost/base.h" // GradientPair,XGBOOST_DEVICE,bst_target_t
#include "xgboost/data.h" // MetaInfo
#include "xgboost/host_device_vector.h" // HostDeviceVector
#include "xgboost/json.h" // Json,String,ToJson,FromJson
#include "xgboost/linalg.h" // Tensor,MakeTensorView,MakeVec
#include "xgboost/objective.h" // ObjFunction
#include "xgboost/parameter.h" // XGBoostParameter
#if defined(XGBOOST_USE_CUDA)
#include "../common/linalg_op.cuh" // ElementWiseKernel
#include "../common/stats.cuh" // SegmentedQuantile
#endif // defined(XGBOOST_USE_CUDA)
namespace xgboost {
namespace obj {
class QuantileRegression : public ObjFunction {
common::QuantileLossParam param_;
HostDeviceVector<float> alpha_;
bst_target_t Targets(MetaInfo const& info) const override {
auto const& alpha = param_.quantile_alpha.Get();
CHECK_EQ(alpha.size(), alpha_.Size()) << "The objective is not yet configured.";
CHECK_EQ(info.labels.Shape(1), 1) << "Multi-target is not yet supported by the quantile loss.";
CHECK(!alpha.empty());
// We have some placeholders for multi-target in the quantile loss. But it's not
// supported as the gbtree doesn't know how to slice the gradient and there's no 3-dim
// model shape in general.
auto n_y = ::max(static_cast<std::size_t>(1), info.labels.Shape(1));
return alpha_.Size() * n_y;
}
public:
void GetGradient(HostDeviceVector<float> const& preds, const MetaInfo& info, std::int32_t iter,
HostDeviceVector<GradientPair>* out_gpair) override {
if (iter == 0) {
CheckInitInputs(info);
}
CHECK_EQ(param_.quantile_alpha.Get().size(), alpha_.Size());
using SizeT = decltype(info.num_row_);
SizeT n_targets = this->Targets(info);
SizeT n_alphas = alpha_.Size();
CHECK_NE(n_alphas, 0);
CHECK_GE(n_targets, n_alphas);
CHECK_EQ(preds.Size(), info.num_row_ * n_targets);
auto labels = info.labels.View(ctx_->gpu_id);
out_gpair->SetDevice(ctx_->gpu_id);
out_gpair->Resize(n_targets * info.num_row_);
auto gpair =
linalg::MakeTensorView(ctx_->IsCPU() ? out_gpair->HostSpan() : out_gpair->DeviceSpan(),
{info.num_row_, n_alphas, n_targets / n_alphas}, ctx_->gpu_id);
info.weights_.SetDevice(ctx_->gpu_id);
common::OptionalWeights weight{ctx_->IsCPU() ? info.weights_.ConstHostSpan()
: info.weights_.ConstDeviceSpan()};
preds.SetDevice(ctx_->gpu_id);
auto predt = linalg::MakeVec(&preds);
auto n_samples = info.num_row_;
alpha_.SetDevice(ctx_->gpu_id);
auto alpha = ctx_->IsCPU() ? alpha_.ConstHostSpan() : alpha_.ConstDeviceSpan();
linalg::ElementWiseKernel(
ctx_, gpair, [=] XGBOOST_DEVICE(std::size_t i, GradientPair const&) mutable {
auto idx = linalg::UnravelIndex(static_cast<std::size_t>(i),
{static_cast<std::size_t>(n_samples),
static_cast<std::size_t>(alpha.size()),
static_cast<std::size_t>(n_targets / alpha.size())});
// std::tie is not available for cuda kernel.
std::size_t sample_id = std::get<0>(idx);
std::size_t quantile_id = std::get<1>(idx);
std::size_t target_id = std::get<2>(idx);
auto d = predt(i) - labels(sample_id, target_id);
auto h = weight[sample_id];
if (d >= 0) {
auto g = (1.0f - alpha[quantile_id]) * weight[sample_id];
gpair(sample_id, quantile_id, target_id) = GradientPair{g, h};
} else {
auto g = (-alpha[quantile_id] * weight[sample_id]);
gpair(sample_id, quantile_id, target_id) = GradientPair{g, h};
}
});
}
void InitEstimation(MetaInfo const& info, linalg::Vector<float>* base_score) const override {
CHECK(!alpha_.Empty());
auto n_targets = this->Targets(info);
base_score->SetDevice(ctx_->gpu_id);
base_score->Reshape(n_targets);
double sw{0};
if (ctx_->IsCPU()) {
auto quantiles = base_score->HostView();
auto h_weights = info.weights_.ConstHostVector();
if (info.weights_.Empty()) {
sw = info.num_row_;
} else {
sw = std::accumulate(std::cbegin(h_weights), std::cend(h_weights), 0.0);
}
for (bst_target_t t{0}; t < n_targets; ++t) {
auto alpha = param_.quantile_alpha[t];
auto h_labels = info.labels.HostView();
if (h_weights.empty()) {
quantiles(t) =
common::Quantile(ctx_, alpha, linalg::cbegin(h_labels), linalg::cend(h_labels));
} else {
CHECK_EQ(h_weights.size(), h_labels.Size());
quantiles(t) = common::WeightedQuantile(ctx_, alpha, linalg::cbegin(h_labels),
linalg::cend(h_labels), std::cbegin(h_weights));
}
}
} else {
#if defined(XGBOOST_USE_CUDA)
alpha_.SetDevice(ctx_->gpu_id);
auto d_alpha = alpha_.ConstDeviceSpan();
auto d_labels = info.labels.View(ctx_->gpu_id);
auto seg_it = dh::MakeTransformIterator<std::size_t>(
thrust::make_counting_iterator(0ul),
[=] XGBOOST_DEVICE(std::size_t i) { return i * d_labels.Shape(0); });
CHECK_EQ(d_labels.Shape(1), 1);
auto val_it = dh::MakeTransformIterator<float>(thrust::make_counting_iterator(0ul),
[=] XGBOOST_DEVICE(std::size_t i) {
auto sample_idx = i % d_labels.Shape(0);
return d_labels(sample_idx, 0);
});
auto n = d_labels.Size() * d_alpha.size();
CHECK_EQ(base_score->Size(), d_alpha.size());
if (info.weights_.Empty()) {
common::SegmentedQuantile(ctx_, d_alpha.data(), seg_it, seg_it + d_alpha.size() + 1, val_it,
val_it + n, base_score->Data());
sw = info.num_row_;
} else {
info.weights_.SetDevice(ctx_->gpu_id);
auto d_weights = info.weights_.ConstDeviceSpan();
auto weight_it = dh::MakeTransformIterator<float>(thrust::make_counting_iterator(0ul),
[=] XGBOOST_DEVICE(std::size_t i) {
auto sample_idx = i % d_labels.Shape(0);
return d_weights[sample_idx];
});
common::SegmentedWeightedQuantile(ctx_, d_alpha.data(), seg_it, seg_it + d_alpha.size() + 1,
val_it, val_it + n, weight_it, weight_it + n,
base_score->Data());
sw = dh::Reduce(ctx_->CUDACtx()->CTP(), dh::tcbegin(d_weights), dh::tcend(d_weights), 0.0,
thrust::plus<double>{});
}
#else
common::AssertGPUSupport();
#endif // defined(XGBOOST_USE_CUDA)
}
// For multiple quantiles, we should extend the base score to a vector instead of
// computing the average. For now, this is a workaround.
linalg::Vector<float> temp;
common::Mean(ctx_, *base_score, &temp);
double meanq = temp(0) * sw;
collective::Allreduce<collective::Operation::kSum>(&meanq, 1);
collective::Allreduce<collective::Operation::kSum>(&sw, 1);
meanq /= (sw + kRtEps);
base_score->Reshape(1);
base_score->Data()->Fill(meanq);
}
void UpdateTreeLeaf(HostDeviceVector<bst_node_t> const& position, MetaInfo const& info,
HostDeviceVector<float> const& prediction, std::int32_t group_idx,
RegTree* p_tree) const override {
auto alpha = param_.quantile_alpha[group_idx];
::xgboost::obj::UpdateTreeLeaf(ctx_, position, group_idx, info, prediction, alpha, p_tree);
}
void Configure(Args const& args) override {
param_.UpdateAllowUnknown(args);
param_.Validate();
this->alpha_.HostVector() = param_.quantile_alpha.Get();
}
ObjInfo Task() const override { return {ObjInfo::kRegression, true, true}; }
static char const* Name() { return "reg:quantileerror"; }
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String(Name());
out["quantile_loss_param"] = ToJson(param_);
}
void LoadConfig(Json const& in) override {
CHECK_EQ(get<String const>(in["name"]), Name());
FromJson(in["quantile_loss_param"], ¶m_);
alpha_.HostVector() = param_.quantile_alpha.Get();
}
const char* DefaultEvalMetric() const override { return "quantile"; }
Json DefaultMetricConfig() const override {
CHECK(param_.GetInitialised());
Json config{Object{}};
config["name"] = String{this->DefaultEvalMetric()};
config["quantile_loss_param"] = ToJson(param_);
return config;
}
};
XGBOOST_REGISTER_OBJECTIVE(QuantileRegression, QuantileRegression::Name())
.describe("Regression with quantile loss.")
.set_body([]() { return new QuantileRegression(); });
#if defined(XGBOOST_USE_CUDA)
DMLC_REGISTRY_FILE_TAG(quantile_obj_gpu);
#endif // defined(XGBOOST_USE_CUDA)
} // namespace obj
} // namespace xgboost
| 815909ccd4ae18e67a278ffef5a5099c8c7b1e89.cu | /**
* Copyright 2023 by XGBoost contributors
*/
#include <cstddef> // std::size_t
#include <cstdint> // std::int32_t
#include <vector> // std::vector
#include "../common/linalg_op.h" // ElementWiseKernel,cbegin,cend
#include "../common/quantile_loss_utils.h" // QuantileLossParam
#include "../common/stats.h" // Quantile,WeightedQuantile
#include "adaptive.h" // UpdateTreeLeaf
#include "dmlc/parameter.h" // DMLC_DECLARE_PARAMETER
#include "init_estimation.h" // CheckInitInputs
#include "xgboost/base.h" // GradientPair,XGBOOST_DEVICE,bst_target_t
#include "xgboost/data.h" // MetaInfo
#include "xgboost/host_device_vector.h" // HostDeviceVector
#include "xgboost/json.h" // Json,String,ToJson,FromJson
#include "xgboost/linalg.h" // Tensor,MakeTensorView,MakeVec
#include "xgboost/objective.h" // ObjFunction
#include "xgboost/parameter.h" // XGBoostParameter
#if defined(XGBOOST_USE_CUDA)
#include "../common/linalg_op.cuh" // ElementWiseKernel
#include "../common/stats.cuh" // SegmentedQuantile
#endif // defined(XGBOOST_USE_CUDA)
namespace xgboost {
namespace obj {
class QuantileRegression : public ObjFunction {
common::QuantileLossParam param_;
HostDeviceVector<float> alpha_;
bst_target_t Targets(MetaInfo const& info) const override {
auto const& alpha = param_.quantile_alpha.Get();
CHECK_EQ(alpha.size(), alpha_.Size()) << "The objective is not yet configured.";
CHECK_EQ(info.labels.Shape(1), 1) << "Multi-target is not yet supported by the quantile loss.";
CHECK(!alpha.empty());
// We have some placeholders for multi-target in the quantile loss. But it's not
// supported as the gbtree doesn't know how to slice the gradient and there's no 3-dim
// model shape in general.
auto n_y = std::max(static_cast<std::size_t>(1), info.labels.Shape(1));
return alpha_.Size() * n_y;
}
public:
void GetGradient(HostDeviceVector<float> const& preds, const MetaInfo& info, std::int32_t iter,
HostDeviceVector<GradientPair>* out_gpair) override {
if (iter == 0) {
CheckInitInputs(info);
}
CHECK_EQ(param_.quantile_alpha.Get().size(), alpha_.Size());
using SizeT = decltype(info.num_row_);
SizeT n_targets = this->Targets(info);
SizeT n_alphas = alpha_.Size();
CHECK_NE(n_alphas, 0);
CHECK_GE(n_targets, n_alphas);
CHECK_EQ(preds.Size(), info.num_row_ * n_targets);
auto labels = info.labels.View(ctx_->gpu_id);
out_gpair->SetDevice(ctx_->gpu_id);
out_gpair->Resize(n_targets * info.num_row_);
auto gpair =
linalg::MakeTensorView(ctx_->IsCPU() ? out_gpair->HostSpan() : out_gpair->DeviceSpan(),
{info.num_row_, n_alphas, n_targets / n_alphas}, ctx_->gpu_id);
info.weights_.SetDevice(ctx_->gpu_id);
common::OptionalWeights weight{ctx_->IsCPU() ? info.weights_.ConstHostSpan()
: info.weights_.ConstDeviceSpan()};
preds.SetDevice(ctx_->gpu_id);
auto predt = linalg::MakeVec(&preds);
auto n_samples = info.num_row_;
alpha_.SetDevice(ctx_->gpu_id);
auto alpha = ctx_->IsCPU() ? alpha_.ConstHostSpan() : alpha_.ConstDeviceSpan();
linalg::ElementWiseKernel(
ctx_, gpair, [=] XGBOOST_DEVICE(std::size_t i, GradientPair const&) mutable {
auto idx = linalg::UnravelIndex(static_cast<std::size_t>(i),
{static_cast<std::size_t>(n_samples),
static_cast<std::size_t>(alpha.size()),
static_cast<std::size_t>(n_targets / alpha.size())});
// std::tie is not available for cuda kernel.
std::size_t sample_id = std::get<0>(idx);
std::size_t quantile_id = std::get<1>(idx);
std::size_t target_id = std::get<2>(idx);
auto d = predt(i) - labels(sample_id, target_id);
auto h = weight[sample_id];
if (d >= 0) {
auto g = (1.0f - alpha[quantile_id]) * weight[sample_id];
gpair(sample_id, quantile_id, target_id) = GradientPair{g, h};
} else {
auto g = (-alpha[quantile_id] * weight[sample_id]);
gpair(sample_id, quantile_id, target_id) = GradientPair{g, h};
}
});
}
void InitEstimation(MetaInfo const& info, linalg::Vector<float>* base_score) const override {
CHECK(!alpha_.Empty());
auto n_targets = this->Targets(info);
base_score->SetDevice(ctx_->gpu_id);
base_score->Reshape(n_targets);
double sw{0};
if (ctx_->IsCPU()) {
auto quantiles = base_score->HostView();
auto h_weights = info.weights_.ConstHostVector();
if (info.weights_.Empty()) {
sw = info.num_row_;
} else {
sw = std::accumulate(std::cbegin(h_weights), std::cend(h_weights), 0.0);
}
for (bst_target_t t{0}; t < n_targets; ++t) {
auto alpha = param_.quantile_alpha[t];
auto h_labels = info.labels.HostView();
if (h_weights.empty()) {
quantiles(t) =
common::Quantile(ctx_, alpha, linalg::cbegin(h_labels), linalg::cend(h_labels));
} else {
CHECK_EQ(h_weights.size(), h_labels.Size());
quantiles(t) = common::WeightedQuantile(ctx_, alpha, linalg::cbegin(h_labels),
linalg::cend(h_labels), std::cbegin(h_weights));
}
}
} else {
#if defined(XGBOOST_USE_CUDA)
alpha_.SetDevice(ctx_->gpu_id);
auto d_alpha = alpha_.ConstDeviceSpan();
auto d_labels = info.labels.View(ctx_->gpu_id);
auto seg_it = dh::MakeTransformIterator<std::size_t>(
thrust::make_counting_iterator(0ul),
[=] XGBOOST_DEVICE(std::size_t i) { return i * d_labels.Shape(0); });
CHECK_EQ(d_labels.Shape(1), 1);
auto val_it = dh::MakeTransformIterator<float>(thrust::make_counting_iterator(0ul),
[=] XGBOOST_DEVICE(std::size_t i) {
auto sample_idx = i % d_labels.Shape(0);
return d_labels(sample_idx, 0);
});
auto n = d_labels.Size() * d_alpha.size();
CHECK_EQ(base_score->Size(), d_alpha.size());
if (info.weights_.Empty()) {
common::SegmentedQuantile(ctx_, d_alpha.data(), seg_it, seg_it + d_alpha.size() + 1, val_it,
val_it + n, base_score->Data());
sw = info.num_row_;
} else {
info.weights_.SetDevice(ctx_->gpu_id);
auto d_weights = info.weights_.ConstDeviceSpan();
auto weight_it = dh::MakeTransformIterator<float>(thrust::make_counting_iterator(0ul),
[=] XGBOOST_DEVICE(std::size_t i) {
auto sample_idx = i % d_labels.Shape(0);
return d_weights[sample_idx];
});
common::SegmentedWeightedQuantile(ctx_, d_alpha.data(), seg_it, seg_it + d_alpha.size() + 1,
val_it, val_it + n, weight_it, weight_it + n,
base_score->Data());
sw = dh::Reduce(ctx_->CUDACtx()->CTP(), dh::tcbegin(d_weights), dh::tcend(d_weights), 0.0,
thrust::plus<double>{});
}
#else
common::AssertGPUSupport();
#endif // defined(XGBOOST_USE_CUDA)
}
// For multiple quantiles, we should extend the base score to a vector instead of
// computing the average. For now, this is a workaround.
linalg::Vector<float> temp;
common::Mean(ctx_, *base_score, &temp);
double meanq = temp(0) * sw;
collective::Allreduce<collective::Operation::kSum>(&meanq, 1);
collective::Allreduce<collective::Operation::kSum>(&sw, 1);
meanq /= (sw + kRtEps);
base_score->Reshape(1);
base_score->Data()->Fill(meanq);
}
void UpdateTreeLeaf(HostDeviceVector<bst_node_t> const& position, MetaInfo const& info,
HostDeviceVector<float> const& prediction, std::int32_t group_idx,
RegTree* p_tree) const override {
auto alpha = param_.quantile_alpha[group_idx];
::xgboost::obj::UpdateTreeLeaf(ctx_, position, group_idx, info, prediction, alpha, p_tree);
}
void Configure(Args const& args) override {
param_.UpdateAllowUnknown(args);
param_.Validate();
this->alpha_.HostVector() = param_.quantile_alpha.Get();
}
ObjInfo Task() const override { return {ObjInfo::kRegression, true, true}; }
static char const* Name() { return "reg:quantileerror"; }
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String(Name());
out["quantile_loss_param"] = ToJson(param_);
}
void LoadConfig(Json const& in) override {
CHECK_EQ(get<String const>(in["name"]), Name());
FromJson(in["quantile_loss_param"], ¶m_);
alpha_.HostVector() = param_.quantile_alpha.Get();
}
const char* DefaultEvalMetric() const override { return "quantile"; }
Json DefaultMetricConfig() const override {
CHECK(param_.GetInitialised());
Json config{Object{}};
config["name"] = String{this->DefaultEvalMetric()};
config["quantile_loss_param"] = ToJson(param_);
return config;
}
};
XGBOOST_REGISTER_OBJECTIVE(QuantileRegression, QuantileRegression::Name())
.describe("Regression with quantile loss.")
.set_body([]() { return new QuantileRegression(); });
#if defined(XGBOOST_USE_CUDA)
DMLC_REGISTRY_FILE_TAG(quantile_obj_gpu);
#endif // defined(XGBOOST_USE_CUDA)
} // namespace obj
} // namespace xgboost
|
59fcca95e6407b98655819972096fd09d233d7ec.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "count3D.h"
#include "readfile.h"
#include "utility.h"
#include <iostream>
#include <fstream>
#include <vector>
#include <stdio.h>
#include "camera.h"
#include "render.h"
#include "render2D.h"
#include "plane.h"
#include "cuda_check_error.h"
#define TX 32
#define TY 32
using namespace std;
extern int divUp(int a, int b);
int main2D(int ns)
{
std::vector<float3> pc = read_yxz("yxz.txt");
int x0=15;
int y0=15;
int z0=15;
aabb box=point_cloud_bounds(pc);
int m=(int)(box.max().x-box.min().x)/x0+1;
int n=(int)(box.max().y-box.min().y)/y0+1;
int p=(int)(box.max().z-box.min().z)/z0+1;
box.print();
float3 centroid = make_float3(0.5*(box.min().x+box.max().x),
0.5*(box.min().y+box.max().y),
0.5*(box.min().z+box.max().z));
//float3 origin = make_float3(-2200,1098,2210);
//float3 origin=make_float3(500,2300,2210);
float3 unitY = make_float3(0,1,0);
plane aPlane(make_float3(0,0,centroid.z),make_float3(0.0,0.0,1));
fprintf(stderr,"Centroid: (%f,%f,%f)\n",centroid.x,centroid.y,centroid.z);
int nx=400;
int ny=400;
float radius = 100.0;
setupSeeds(64);
float3 origin=make_float3(300,1500,4300);
camera cam(origin,centroid,unitY,45,(float)nx/(float)ny,0,1000);
//float max_density;
//max_density=1.0f;
ofstream pic;
pic.open("pic.ppm");
pic << "P3\n" << nx << " " << ny << "\n255\n";
int ir,ig,ib;
float *densities = (float *)calloc(nx*ny,sizeof(float));
float3* d_pc;
int len=pc.size();
CudaSafeCall(hipMallocManaged(&d_pc, len*sizeof(float3)));
CudaSafeCall(hipMemcpy(d_pc, &pc[0], len*sizeof(float3),hipMemcpyHostToDevice));
float* d_pixels;
CudaSafeCall(hipMallocManaged(&d_pixels, nx*ny*sizeof(float)));
CudaSafeCall(hipMemset(d_pixels,0,nx*ny*sizeof(float)));
const dim3 blockSize(TX,TY);
const dim3 gridSize(divUp(nx*ny,TX),divUp(len,TY));
int *d_mutex=0;
CudaSafeCall(hipMallocManaged((void**)&d_mutex, nx*ny*sizeof(int)));
CudaSafeCall(hipMemset(d_mutex,0,nx*ny*sizeof(int)));
camera *d_cam;
CudaSafeCall(hipMallocManaged(&d_cam, sizeof(camera)));
CudaSafeCall(hipMemcpy(d_cam, &cam, sizeof(camera),hipMemcpyHostToDevice));
setupPlaneSeeds(TX);
plane *d_plane;
CudaSafeCall(hipMallocManaged(&d_plane, sizeof(plane)));
CudaSafeCall(hipMemcpy(d_plane, &aPlane, sizeof(plane),hipMemcpyHostToDevice));
float * d_max_density;
CudaSafeCall(hipMallocManaged(&d_max_density, sizeof(float)));
CudaSafeCall(hipMemset(d_max_density,0,sizeof(float)));
hipLaunchKernelGGL(( renderPlaneKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_pixels,nx,ny,d_pc,len,d_plane,d_max_density,
d_cam,radius,d_mutex,ns,devStates);
CudaCheckError();
CudaSafeCall(hipDeviceSynchronize());
printf("Max density before: %f\n",*d_max_density);
#if 1
for(int j=ny-1;j>=0;j--)
for(int i=0;i<nx;i++)
{
if(d_pixels[i+j*nx]>*d_max_density)
*d_max_density=d_pixels[i+j*nx];
}
printf("Max density after: %f\n",*d_max_density);
#endif
ofstream csv;
csv.open("pic.csv");
csv<<"i,j,density,r,g,b"<<endl;
for(int j=ny-1;j>=0;j--)
for(int i=0;i<nx;i++)
{
float3 color=heat_color(d_pixels[i+j*nx],*d_max_density);
ir=int(255.99*color.x);
ig=int(255.99*color.y);
ib=int(255.99*color.z);
pic << ir<<" " << ig<<" " << ib<<"\n";
csv<<i<<","<<j<<","<<d_pixels[i+j*nx]<<","<<ir<<","<<ig<<","<<ib<<endl;
}
//csv
csv.close();
free(densities);
pic.close();
//fprintf(stderr,"Max density: %f\n", max_density);
//CudaSafeCall(hipFree(cells));
CudaSafeCall(hipFree(d_pc));
CudaSafeCall(hipFree(d_pixels));
CudaSafeCall(hipFree(d_max_density));
CudaSafeCall(hipFree(d_mutex));
CudaSafeCall(hipFree(d_cam));
CudaSafeCall(hipFree(d_plane));
return 0;
}
| 59fcca95e6407b98655819972096fd09d233d7ec.cu | #include "count3D.h"
#include "readfile.h"
#include "utility.h"
#include <iostream>
#include <fstream>
#include <vector>
#include <stdio.h>
#include "camera.h"
#include "render.h"
#include "render2D.h"
#include "plane.h"
#include "cuda_check_error.h"
#define TX 32
#define TY 32
using namespace std;
extern int divUp(int a, int b);
int main2D(int ns)
{
std::vector<float3> pc = read_yxz("yxz.txt");
int x0=15;
int y0=15;
int z0=15;
aabb box=point_cloud_bounds(pc);
int m=(int)(box.max().x-box.min().x)/x0+1;
int n=(int)(box.max().y-box.min().y)/y0+1;
int p=(int)(box.max().z-box.min().z)/z0+1;
box.print();
float3 centroid = make_float3(0.5*(box.min().x+box.max().x),
0.5*(box.min().y+box.max().y),
0.5*(box.min().z+box.max().z));
//float3 origin = make_float3(-2200,1098,2210);
//float3 origin=make_float3(500,2300,2210);
float3 unitY = make_float3(0,1,0);
plane aPlane(make_float3(0,0,centroid.z),make_float3(0.0,0.0,1));
fprintf(stderr,"Centroid: (%f,%f,%f)\n",centroid.x,centroid.y,centroid.z);
int nx=400;
int ny=400;
float radius = 100.0;
setupSeeds(64);
float3 origin=make_float3(300,1500,4300);
camera cam(origin,centroid,unitY,45,(float)nx/(float)ny,0,1000);
//float max_density;
//max_density=1.0f;
ofstream pic;
pic.open("pic.ppm");
pic << "P3\n" << nx << " " << ny << "\n255\n";
int ir,ig,ib;
float *densities = (float *)calloc(nx*ny,sizeof(float));
float3* d_pc;
int len=pc.size();
CudaSafeCall(cudaMallocManaged(&d_pc, len*sizeof(float3)));
CudaSafeCall(cudaMemcpy(d_pc, &pc[0], len*sizeof(float3),cudaMemcpyHostToDevice));
float* d_pixels;
CudaSafeCall(cudaMallocManaged(&d_pixels, nx*ny*sizeof(float)));
CudaSafeCall(cudaMemset(d_pixels,0,nx*ny*sizeof(float)));
const dim3 blockSize(TX,TY);
const dim3 gridSize(divUp(nx*ny,TX),divUp(len,TY));
int *d_mutex=0;
CudaSafeCall(cudaMallocManaged((void**)&d_mutex, nx*ny*sizeof(int)));
CudaSafeCall(cudaMemset(d_mutex,0,nx*ny*sizeof(int)));
camera *d_cam;
CudaSafeCall(cudaMallocManaged(&d_cam, sizeof(camera)));
CudaSafeCall(cudaMemcpy(d_cam, &cam, sizeof(camera),cudaMemcpyHostToDevice));
setupPlaneSeeds(TX);
plane *d_plane;
CudaSafeCall(cudaMallocManaged(&d_plane, sizeof(plane)));
CudaSafeCall(cudaMemcpy(d_plane, &aPlane, sizeof(plane),cudaMemcpyHostToDevice));
float * d_max_density;
CudaSafeCall(cudaMallocManaged(&d_max_density, sizeof(float)));
CudaSafeCall(cudaMemset(d_max_density,0,sizeof(float)));
renderPlaneKernel<<<gridSize, blockSize>>>(d_pixels,nx,ny,d_pc,len,d_plane,d_max_density,
d_cam,radius,d_mutex,ns,devStates);
CudaCheckError();
CudaSafeCall(cudaDeviceSynchronize());
printf("Max density before: %f\n",*d_max_density);
#if 1
for(int j=ny-1;j>=0;j--)
for(int i=0;i<nx;i++)
{
if(d_pixels[i+j*nx]>*d_max_density)
*d_max_density=d_pixels[i+j*nx];
}
printf("Max density after: %f\n",*d_max_density);
#endif
ofstream csv;
csv.open("pic.csv");
csv<<"i,j,density,r,g,b"<<endl;
for(int j=ny-1;j>=0;j--)
for(int i=0;i<nx;i++)
{
float3 color=heat_color(d_pixels[i+j*nx],*d_max_density);
ir=int(255.99*color.x);
ig=int(255.99*color.y);
ib=int(255.99*color.z);
pic << ir<<" " << ig<<" " << ib<<"\n";
csv<<i<<","<<j<<","<<d_pixels[i+j*nx]<<","<<ir<<","<<ig<<","<<ib<<endl;
}
//csv
csv.close();
free(densities);
pic.close();
//fprintf(stderr,"Max density: %f\n", max_density);
//CudaSafeCall(cudaFree(cells));
CudaSafeCall(cudaFree(d_pc));
CudaSafeCall(cudaFree(d_pixels));
CudaSafeCall(cudaFree(d_max_density));
CudaSafeCall(cudaFree(d_mutex));
CudaSafeCall(cudaFree(d_cam));
CudaSafeCall(cudaFree(d_plane));
return 0;
}
|
a0eb55cac0b639390f2c2f0dece5ed0d4375cb24.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
Developed by: Hector Augusto Velasco-Perez
@ CHAOS Lab
@ Georgia Institute of Technology
August 07/10/2019
Special thanks to:
Dr. Flavio Fenton
Dr. Claire Yanyan Ji
Dr. Abouzar Kaboudian
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
*/
#include <stdio.h>
#include <stdlib.h>
#include <vector>
#include "typedef3V-FK.h"
//#include "globalVariables.cuh"
#include "hostPrototypes.h"
#include "devicePrototypes.cuh"
#include "./common/CudaSafeCall.h"
// Weight constants
extern __constant__ REAL dt_d, rx_d, ry_d, rz_d;
extern __constant__ REAL rxyc_d, rxzc_d, ryzc_d, rxyzf_d;
extern __constant__ REAL rCxyz_d, rwe_d, rsn_d, rbt_d;
extern __constant__ REAL rxy_d, rbx_d, rby_d;
// Miscellaneous constants
extern __constant__ REAL expTau_vp_d, expTau_wp_d, expTau_wn_d;
extern __constant__ REAL invdx_d, invdy_d, invdz_d;
extern __device__ vec3dyn dev_data1[NN];
extern __device__ vec6dyn dev_data2[NN];
extern __device__ int dev_count;
/*========================================================================
* Main Entry of the Kernel
*========================================================================
*/
__global__ void FK_3V_kernel(stateVar g_out, stateVar g_in, conductionVar r,
REAL *J_d) {
/*------------------------------------------------------------------------
* Getting i and j global indices
*------------------------------------------------------------------------
*/
const int i = threadIdx.x;
const int j = blockIdx.x*blockDim.y + threadIdx.y;
const int k = blockIdx.y;
/*------------------------------------------------------------------------
* return if we are outside the domain
*------------------------------------------------------------------------
*/
if( i >= nx && j>=ny && k >= nz) {
return ;
}
/*------------------------------------------------------------------------
* Converting global index into matrix indices assuming
* the column major structure of the matlab matrices
*------------------------------------------------------------------------
*/
const int nxy = nx*ny;
const int i3d = k * nxy + j * nx + i;
/*------------------------------------------------------------------------
* Setting local variables
*------------------------------------------------------------------------
*/
#ifdef DOUBLE_PRECISION
REAL u = g_in.u[i3d] ;
REAL v = g_in.v[i3d] ;
REAL w = g_in.w[i3d] ;
/*------------------------------------------------------------------------
* Additional heaviside functions
*------------------------------------------------------------------------
*/
REAL p = ( u > theta_c ) ? 1.0:0.0 ;
REAL q = ( u > theta_v ) ? 1.0:0.0 ;
/*------------------------------------------------------------------------
* Calculating dependant tau's
*------------------------------------------------------------------------
*/
REAL tau_vnu = q*tau_v1n + (1.0-q)*tau_v2n;
g_out.v[i3d] =
( u > theta_c ) ? v*expTau_vp_d : 1.0-(1.0-v)*exp(-dt_d/tau_vnu);
g_out.w[i3d] =
( u > theta_c ) ? w*expTau_wp_d : 1.0-(1.0-w)*expTau_wn_d;
/*
REAL dv2dt = (1.0-p)*(1.0-v)/tau_vnu - p*v/tau_vp;
v += dv2dt*dt_d ;
g_out.v[i3d] = v ;
REAL dw2dt = (1.0-p)*(1.0-w)/tau_wn - p*w/tau_wp;
w += dw2dt*dt_d ;
g_out.w[i3d] = w ;
*/
/*------------------------------------------------------------------------
* I_sum
*------------------------------------------------------------------------
*/
//Fast inward (Sodium)
REAL J_fi = -p*v*(1.0-u)*(u-theta_c)/tau_d;
//Slow outward (Potassium)
REAL J_so = (1.0-p)*u/tau_o + p/tau_r;
//Slow inward (Calcium)
REAL J_si = -w*(1.0 + tanh(K*(u-u_csi)))/(2.0*tau_si);
REAL I_sum = J_fi + J_so + J_si ;
J_d[i3d] = I_sum;
/*------------------------------------------------------------------------
* Laplacian Calculation
*
* No flux boundary condition is applied on all boundaries through
* the Laplacian operator definition
*------------------------------------------------------------------------
*/
int S = ( j> 0 ) ? I3D(nx,nxy,i,j-1,k) : I3D(nx,nxy,i,j+1,k) ;
int N = ( j<(ny-1) ) ? I3D(nx,nxy,i,j+1,k) : I3D(nx,nxy,i,j-1,k) ;
int W = ( i> 0 ) ? I3D(nx,nxy,i-1,j,k) : I3D(nx,nxy,i+1,j,k) ;
int E = ( i<(nx-1) ) ? I3D(nx,nxy,i+1,j,k) : I3D(nx,nxy,i-1,j,k) ;
//////////////////////
int SWxy = (i>0 && j>0) ? I3D(nx,nxy,i-1,j-1,k) :
((i==0 && j>0) ? I3D(nx,nxy,i+1,j-1,k) :
((i>0 && j==0)? I3D(nx,nxy,i-1,j+1,k) : I3D(nx,nxy,i+1,j+1,k) ) ) ;
int SExy = (i<(nx-1) && j>0) ? I3D(nx,nxy,i+1,j-1,k) :
((i==(nx-1) && j>0) ? I3D(nx,nxy,i-1,j-1,k) :
((i<(nx-1) && j==0)? I3D(nx,nxy,i+1,j+1,k) : I3D(nx,nxy,i-1,j+1,k) ) ) ;
int NWxy = (i>0 && j<(ny-1)) ? I3D(nx,nxy,i-1,j+1,k) :
((i==0 && j<(ny-1)) ? I3D(nx,nxy,i+1,j+1,k) :
((i>0 && j==(ny-1))? I3D(nx,nxy,i-1,j-1,k) : I3D(nx,nxy,i+1,j-1,k) ) ) ;
int NExy = (i<(nx-1) && j<(ny-1)) ? I3D(nx,nxy,i+1,j+1,k) :
((i==(nx-1) && j<(ny-1)) ? I3D(nx,nxy,i-1,j+1,k) :
((i<(nx-1) && j==(ny-1))? I3D(nx,nxy,i+1,j-1,k) : I3D(nx,nxy,i-1,j-1,k) ) ) ;
#ifdef PERIODIC_Z // In the z direction
int B = ( k> 0 ) ? I3D(nx,nxy,i,j,k-1) : I3D(nx,nxy,i,j,nz-1) ;
int T = ( k<(nz-1) ) ? I3D(nx,nxy,i,j,k+1) : I3D(nx,nxy,i,j,0) ;
int SWxz = (i>0 && k>0) ? I3D(nx,nxy,i-1,j,k-1) :
((i==0 && k>0) ? I3D(nx,nxy,i+1,j,k-1) :
((i>0 && k==0)? I3D(nx,nxy,i-1,j,nz-1) : I3D(nx,nxy,i+1,j,k+1) ) ) ;
int SExz = (i<(nx-1) && k>0) ? I3D(nx,nxy,i+1,j,k-1) :
((i==(nx-1) && k>0) ? I3D(nx,nxy,i-1,j,k-1) :
((i<(nx-1) && k==0)? I3D(nx,nxy,i+1,j,nz-1) : I3D(nx,nxy,i-1,j,k+1) ) ) ;
int NWxz = (i>0 && k<(nz-1)) ? I3D(nx,nxy,i-1,j,k+1) :
((i==0 && k<(nz-1)) ? I3D(nx,nxy,i+1,j,k+1) :
((i>0 && k==(nz-1))? I3D(nx,nxy,i-1,j,0) : I3D(nx,nxy,i+1,j,k-1) ) ) ;
int NExz = (i<(nx-1) && k<(nz-1)) ? I3D(nx,nxy,i+1,j,k+1) :
((i==(nx-1) && k<(nz-1)) ? I3D(nx,nxy,i-1,j,k+1) :
((i<(nx-1) && k==(nz-1))? I3D(nx,nxy,i+1,j,0) : I3D(nx,nxy,i-1,j,k-1) ) ) ;
//////////////////////////
int SWyz = (j>0 && k>0) ? I3D(nx,nxy,i,j-1,k-1) :
((j==0 && k>0) ? I3D(nx,nxy,i,j+1,k-1) :
((j>0 && k==0)? I3D(nx,nxy,i,j-1,nz-1) : I3D(nx,nxy,i,j+1,k+1) ) ) ;
int SEyz = (j<(ny-1) && k>0) ? I3D(nx,nxy,i,j+1,k-1) :
((j==(ny-1) && k>0) ? I3D(nx,nxy,i,j-1,k-1) :
((j<(ny-1) && k==0)? I3D(nx,nxy,i,j+1,nz-1) : I3D(nx,nxy,i,j-1,k+1) ) ) ;
int NWyz = (j>0 && k<(nz-1)) ? I3D(nx,nxy,i,j-1,k+1) :
((j==0 && k<(nz-1)) ? I3D(nx,nxy,i,j+1,k+1) :
((j>0 && k==(nz-1))? I3D(nx,nxy,i,j-1,0) : I3D(nx,nxy,i,j+1,k-1) ) ) ;
int NEyz = (j<(ny-1) && k<(nz-1)) ? I3D(nx,nxy,i,j+1,k+1) :
((j==(ny-1) && k<(nz-1)) ? I3D(nx,nxy,i,j-1,k+1) :
((j<(ny-1) && k==(nz-1))? I3D(nx,nxy,i,j+1,0) : I3D(nx,nxy,i,j-1,k-1) ) ) ;
#else
int B = ( k> 0 ) ? I3D(nx,nxy,i,j,k-1) : I3D(nx,nxy,i,j,k+1) ;
int T = ( k<(nz-1) ) ? I3D(nx,nxy,i,j,k+1) : I3D(nx,nxy,i,j,k-1) ;
int SWxz = (i>0 && k>0) ? I3D(nx,nxy,i-1,j,k-1) :
((i==0 && k>0) ? I3D(nx,nxy,i+1,j,k-1) :
((i>0 && k==0)? I3D(nx,nxy,i-1,j,k+1) : I3D(nx,nxy,i+1,j,k+1) ) ) ;
int SExz = (i<(nx-1) && k>0) ? I3D(nx,nxy,i+1,j,k-1) :
((i==(nx-1) && k>0) ? I3D(nx,nxy,i-1,j,k-1) :
((i<(nx-1) && k==0)? I3D(nx,nxy,i+1,j,k+1) : I3D(nx,nxy,i-1,j,k+1) ) ) ;
int NWxz = (i>0 && k<(nz-1)) ? I3D(nx,nxy,i-1,j,k+1) :
((i==0 && k<(nz-1)) ? I3D(nx,nxy,i+1,j,k+1) :
((i>0 && k==(nz-1))? I3D(nx,nxy,i-1,j,k-1) : I3D(nx,nxy,i+1,j,k-1) ) ) ;
int NExz = (i<(nx-1) && k<(nz-1)) ? I3D(nx,nxy,i+1,j,k+1) :
((i==(nx-1) && k<(nz-1)) ? I3D(nx,nxy,i-1,j,k+1) :
((i<(nx-1) && k==(nz-1))? I3D(nx,nxy,i+1,j,k-1) : I3D(nx,nxy,i-1,j,k-1) ) ) ;
//////////////////////////
int SWyz = (j>0 && k>0) ? I3D(nx,nxy,i,j-1,k-1) :
((j==0 && k>0) ? I3D(nx,nxy,i,j+1,k-1) :
((j>0 && k==0)? I3D(nx,nxy,i,j-1,k+1) : I3D(nx,nxy,i,j+1,k+1) ) ) ;
int SEyz = (j<(ny-1) && k>0) ? I3D(nx,nxy,i,j+1,k-1) :
((j==(ny-1) && k>0) ? I3D(nx,nxy,i,j-1,k-1) :
((j<(ny-1) && k==0)? I3D(nx,nxy,i,j+1,k+1) : I3D(nx,nxy,i,j-1,k+1) ) ) ;
int NWyz = (j>0 && k<(nz-1)) ? I3D(nx,nxy,i,j-1,k+1) :
((j==0 && k<(nz-1)) ? I3D(nx,nxy,i,j+1,k+1) :
((j>0 && k==(nz-1))? I3D(nx,nxy,i,j-1,k-1) : I3D(nx,nxy,i,j+1,k-1) ) ) ;
int NEyz = (j<(ny-1) && k<(nz-1)) ? I3D(nx,nxy,i,j+1,k+1) :
((j==(ny-1) && k<(nz-1)) ? I3D(nx,nxy,i,j-1,k+1) :
((j<(ny-1) && k==(nz-1))? I3D(nx,nxy,i,j+1,k-1) : I3D(nx,nxy,i,j-1,k-1) ) ) ;
#endif
#ifdef ANISOTROPIC_TISSUE
/*------------------------------------------------------------------------
* Anisotropic Laplacian
*------------------------------------------------------------------------
*/
REAL rx = r.x[k];
REAL ry = r.y[k];
REAL rz = r.z[k];
REAL rbx = r.bx[k];
REAL rby = r.by[k];
REAL du2dt = ( rCxyz_d * (rx + ry + rz)*u
+ rwe_d * (4.0*rx - ry - rz)*(g_in.u[W] + g_in.u[E])
+ rsn_d * (4.0*ry - rx - rz)*(g_in.u[N] + g_in.u[S])
+ rbt_d * (4.0*rz - ry - rx)*(g_in.u[T] + g_in.u[B])
+ rxyc_d * (rx + ry)*( g_in.u[SWxy] +
g_in.u[SExy] +
g_in.u[NWxy] +
g_in.u[NExy] )
+ rxzc_d * (rx + rz)*( g_in.u[SWxz] +
g_in.u[SExz] +
g_in.u[NWxz] +
g_in.u[NExz] )
+ ryzc_d * (ry + rz)*( g_in.u[SWyz] +
g_in.u[SEyz] +
g_in.u[NWyz] +
g_in.u[NEyz] ) ) ;
du2dt -= ( dt_d*( 0.5*I_sum
+ rxyzf_d * ( ( J_d[E] + J_d[W] )
+ ( J_d[N] + J_d[S] )
+ ( J_d[B] + J_d[T] ) ) ) / C_m ) ;
/*
REAL du2dt = (
+ ( g_in.u[W] - 2.f*u + g_in.u[E] )*rx
+ ( g_in.u[N] - 2.f*u + g_in.u[S] )*ry
+ ( g_in.u[T] - 2.f*u + g_in.u[B] )*rz );
du2dt -= dt_d*I_sum/C_m ;
*/
// Correction to NSWE boundary conditions
REAL b_S = (j > 0 )? 0.0:
((j==0 && (i==0 || i==(nx-1)))? 0.0:
rby*(g_in.u[I3D(nx,nxy,i+1,j,k)] - g_in.u[I3D(nx,nxy,i-1,j,k)])) ;
REAL b_N = (j < (ny-1))? 0.0:
((j==(ny-1) && (i==0 || i==(nx-1)))? 0.0:
-rby*(g_in.u[I3D(nx,nxy,i+1,j,k)] - g_in.u[I3D(nx,nxy,i-1,j,k)])) ;
REAL b_W = (i > 0 )? 0.0:
((i==0 && (j==0 || j==(ny-1)))? 0.0:
rbx*(g_in.u[I3D(nx,nxy,i,j+1,k)] - g_in.u[I3D(nx,nxy,i,j-1,k)])) ;
REAL b_E = (i < (nx-1))? 0.0:
((i==(nx-1) && (j==0 || j==(ny-1)))? 0.0:
-rbx*(g_in.u[I3D(nx,nxy,i,j+1,k)] - g_in.u[I3D(nx,nxy,i,j-1,k)])) ;
du2dt += (
( b_S + b_N )*ry
+ ( b_W + b_E )*rx );
// Correcion to SW SE NW NE boundary conditions
REAL b_SW = (i>0 && j>0)? 0.0 :
((i==0 && j>1)? rbx*(g_in.u[i3d] - g_in.u[I3D(nx,nxy,i,j-2,k)]) :
((i>1 && j==0)? rby*(g_in.u[i3d] - g_in.u[I3D(nx,nxy,i-2,j,k)]) : 0.0)) ;
REAL b_SE = (i<(nx-1) && j>0)? 0.0 :
((i==(nx-1) && j>1)? -rbx*(g_in.u[i3d] - g_in.u[I3D(nx,nxy,i,j-2,k)]) :
((i<(nx-2) && j==0)? rby*(g_in.u[I3D(nx,nxy,i+2,j,k)] - g_in.u[i3d]) : 0.0)) ;
REAL b_NW = (i>0 && j<(ny-1))? 0.0 :
((i==0 && j<(ny-2))? rbx*(g_in.u[I3D(nx,nxy,i,j+2,k)] - g_in.u[i3d]) :
((i>1 && j==(ny-1))? -rby*(g_in.u[i3d] - g_in.u[I3D(nx,nxy,i-2,j,k)]) : 0.0)) ;
REAL b_NE = (i<(nx-1) && j<(ny-1))? 0.0 :
((i==(nx-1) && j<(ny-2))? -rbx*(g_in.u[I3D(nx,nxy,i,j+2,k)] - g_in.u[i3d]) :
((i<(nx-2) && j==(ny-1))? -rby*(g_in.u[I3D(nx,nxy,i+2,j,k)] - g_in.u[i3d]) : 0.0)) ;
du2dt += ( r.xy[k]*( (g_in.u[SWxy] + b_SW) +
(g_in.u[NExy] + b_NE) -
(g_in.u[SExy] + b_SE) -
(g_in.u[NWxy] + b_NW) ) );
#else
/*------------------------------------------------------------------------
* Isotropic Laplacian
*------------------------------------------------------------------------
*/
REAL du2dt = ( rCxyz_d*u
+ rwe_d*(g_in.u[W] + g_in.u[E])
+ rsn_d*(g_in.u[N] + g_in.u[S])
+ rbt_d*(g_in.u[T] + g_in.u[B])
+ rxyc_d*( g_in.u[SWxy] +
g_in.u[SExy] +
g_in.u[NWxy] +
g_in.u[NExy] )
+ rxzc_d*( g_in.u[SWxz] +
g_in.u[SExz] +
g_in.u[NWxz] +
g_in.u[NExz] )
+ ryzc_d*( g_in.u[SWyz] +
g_in.u[SEyz] +
g_in.u[NWyz] +
g_in.u[NEyz] ) ) ;
du2dt -= ( dt_d*( 0.5*I_sum
+ rxyzf_d * ( ( J_d[E] + J_d[W] )
+ ( J_d[N] + J_d[S] )
+ ( J_d[B] + J_d[T] ) ) ) / C_m ) ;
/*
REAL du2dt = (
+ ( g_in.u[W] - 2.f*u + g_in.u[E] )*rx_d
+ ( g_in.u[N] - 2.f*u + g_in.u[S] )*ry_d
+ ( g_in.u[T] - 2.f*u + g_in.u[B] )*rz_d );
du2dt -= dt_d*I_sum/C_m ;
*/
#endif
/*------------------------------------------------------------------------
* Time integration
*------------------------------------------------------------------------
*/
u += du2dt ;
g_out.u[i3d] = u ;
/*------------------------------------------------------------------------
* Single precision
*------------------------------------------------------------------------
*/
#else
REAL u = g_in.u[i3d] ;
REAL v = g_in.v[i3d] ;
REAL w = g_in.w[i3d] ;
/*------------------------------------------------------------------------
* Additional heaviside functions
*------------------------------------------------------------------------
*/
REAL p = ( u > theta_c ) ? 1.0:0.0 ;
REAL q = ( u > theta_v ) ? 1.0:0.0 ;
/*------------------------------------------------------------------------
* Calculating dependant tau's
*------------------------------------------------------------------------
*/
REAL tau_vnu = q*tau_v1n + (1.f-q)*tau_v2n;
g_out.v[i3d] =
( u > theta_c ) ? v*expTau_vp_d : 1.0f-(1.0f-v)*expf(-dt_d/tau_vnu);
g_out.w[i3d] =
( u > theta_c ) ? w*expTau_wp_d : 1.0f-(1.0f-w)*expTau_wn_d;
/*
REAL dv2dt = (1.f-p)*(1.f-v)/tau_vnu - p*v/tau_vp;
v += dv2dt*dt_d ;
g_out.v[i3d] = v ;
REAL dw2dt = (1.f-p)*(1.f-w)/tau_wn - p*w/tau_wp;
w += dw2dt*dt_d ;
g_out.w[i3d] = w ;
*/
/*------------------------------------------------------------------------
* I_sum
*------------------------------------------------------------------------
*/
//Fast inward (Sodium)
REAL J_fi = -p*v*(1.f-u)*(u-theta_c)/tau_d;
//Slow outward (Potassium)
REAL J_so = (1.f-p)*u/tau_o + p/tau_r;
//Slow inward (Calcium)
REAL J_si = -w*(1.f + tanhf(K*(u-u_csi)))/(2.f*tau_si);
REAL I_sum = J_fi + J_so + J_si ;
J_d[i3d] = I_sum;
/*------------------------------------------------------------------------
* Laplacian Calculation
*
* No flux boundary condition is applied on all boundaries through
* the Laplacian operator definition
*------------------------------------------------------------------------
*/
int S = ( j> 0 ) ? I3D(nx,nxy,i,j-1,k) : I3D(nx,nxy,i,j+1,k) ;
int N = ( j<(ny-1) ) ? I3D(nx,nxy,i,j+1,k) : I3D(nx,nxy,i,j-1,k) ;
int W = ( i> 0 ) ? I3D(nx,nxy,i-1,j,k) : I3D(nx,nxy,i+1,j,k) ;
int E = ( i<(nx-1) ) ? I3D(nx,nxy,i+1,j,k) : I3D(nx,nxy,i-1,j,k) ;
int SWxy = (i>0 && j>0) ? I3D(nx,nxy,i-1,j-1,k) :
((i==0 && j>0) ? I3D(nx,nxy,i+1,j-1,k) :
((i>0 && j==0)? I3D(nx,nxy,i-1,j+1,k) : I3D(nx,nxy,i+1,j+1,k) ) ) ;
int SExy = (i<(nx-1) && j>0) ? I3D(nx,nxy,i+1,j-1,k) :
((i==(nx-1) && j>0) ? I3D(nx,nxy,i-1,j-1,k) :
((i<(nx-1) && j==0)? I3D(nx,nxy,i+1,j+1,k) : I3D(nx,nxy,i-1,j+1,k) ) ) ;
int NWxy = (i>0 && j<(ny-1)) ? I3D(nx,nxy,i-1,j+1,k) :
((i==0 && j<(ny-1)) ? I3D(nx,nxy,i+1,j+1,k) :
((i>0 && j==(ny-1))? I3D(nx,nxy,i-1,j-1,k) : I3D(nx,nxy,i+1,j-1,k) ) ) ;
int NExy = (i<(nx-1) && j<(ny-1)) ? I3D(nx,nxy,i+1,j+1,k) :
((i==(nx-1) && j<(ny-1)) ? I3D(nx,nxy,i-1,j+1,k) :
((i<(nx-1) && j==(ny-1))? I3D(nx,nxy,i+1,j-1,k) : I3D(nx,nxy,i-1,j-1,k) ) ) ;
#ifdef PERIODIC_Z // In the z direction
int B = ( k> 0 ) ? I3D(nx,nxy,i,j,k-1) : I3D(nx,nxy,i,j,nz-1) ;
int T = ( k<(nz-1) ) ? I3D(nx,nxy,i,j,k+1) : I3D(nx,nxy,i,j,0) ;
int SWxz = (i>0 && k>0) ? I3D(nx,nxy,i-1,j,k-1) :
((i==0 && k>0) ? I3D(nx,nxy,i+1,j,k-1) :
((i>0 && k==0)? I3D(nx,nxy,i-1,j,nz-1) : I3D(nx,nxy,i+1,j,k+1) ) ) ;
int SExz = (i<(nx-1) && k>0) ? I3D(nx,nxy,i+1,j,k-1) :
((i==(nx-1) && k>0) ? I3D(nx,nxy,i-1,j,k-1) :
((i<(nx-1) && k==0)? I3D(nx,nxy,i+1,j,nz-1) : I3D(nx,nxy,i-1,j,k+1) ) ) ;
int NWxz = (i>0 && k<(nz-1)) ? I3D(nx,nxy,i-1,j,k+1) :
((i==0 && k<(nz-1)) ? I3D(nx,nxy,i+1,j,k+1) :
((i>0 && k==(nz-1))? I3D(nx,nxy,i-1,j,0) : I3D(nx,nxy,i+1,j,k-1) ) ) ;
int NExz = (i<(nx-1) && k<(nz-1)) ? I3D(nx,nxy,i+1,j,k+1) :
((i==(nx-1) && k<(nz-1)) ? I3D(nx,nxy,i-1,j,k+1) :
((i<(nx-1) && k==(nz-1))? I3D(nx,nxy,i+1,j,0) : I3D(nx,nxy,i-1,j,k-1) ) ) ;
//////////////////////////
int SWyz = (j>0 && k>0) ? I3D(nx,nxy,i,j-1,k-1) :
((j==0 && k>0) ? I3D(nx,nxy,i,j+1,k-1) :
((j>0 && k==0)? I3D(nx,nxy,i,j-1,nz-1) : I3D(nx,nxy,i,j+1,k+1) ) ) ;
int SEyz = (j<(ny-1) && k>0) ? I3D(nx,nxy,i,j+1,k-1) :
((j==(ny-1) && k>0) ? I3D(nx,nxy,i,j-1,k-1) :
((j<(ny-1) && k==0)? I3D(nx,nxy,i,j+1,nz-1) : I3D(nx,nxy,i,j-1,k+1) ) ) ;
int NWyz = (j>0 && k<(nz-1)) ? I3D(nx,nxy,i,j-1,k+1) :
((j==0 && k<(nz-1)) ? I3D(nx,nxy,i,j+1,k+1) :
((j>0 && k==(nz-1))? I3D(nx,nxy,i,j-1,0) : I3D(nx,nxy,i,j+1,k-1) ) ) ;
int NEyz = (j<(ny-1) && k<(nz-1)) ? I3D(nx,nxy,i,j+1,k+1) :
((j==(ny-1) && k<(nz-1)) ? I3D(nx,nxy,i,j-1,k+1) :
((j<(ny-1) && k==(nz-1))? I3D(nx,nxy,i,j+1,0) : I3D(nx,nxy,i,j-1,k-1) ) ) ;
#else
int B = ( k> 0 ) ? I3D(nx,nxy,i,j,k-1) : I3D(nx,nxy,i,j,k+1) ;
int T = ( k<(nz-1) ) ? I3D(nx,nxy,i,j,k+1) : I3D(nx,nxy,i,j,k-1) ;
int SWxz = (i>0 && k>0) ? I3D(nx,nxy,i-1,j,k-1) :
((i==0 && k>0) ? I3D(nx,nxy,i+1,j,k-1) :
((i>0 && k==0)? I3D(nx,nxy,i-1,j,k+1) : I3D(nx,nxy,i+1,j,k+1) ) ) ;
int SExz = (i<(nx-1) && k>0) ? I3D(nx,nxy,i+1,j,k-1) :
((i==(nx-1) && k>0) ? I3D(nx,nxy,i-1,j,k-1) :
((i<(nx-1) && k==0)? I3D(nx,nxy,i+1,j,k+1) : I3D(nx,nxy,i-1,j,k+1) ) ) ;
int NWxz = (i>0 && k<(nz-1)) ? I3D(nx,nxy,i-1,j,k+1) :
((i==0 && k<(nz-1)) ? I3D(nx,nxy,i+1,j,k+1) :
((i>0 && k==(nz-1))? I3D(nx,nxy,i-1,j,k-1) : I3D(nx,nxy,i+1,j,k-1) ) ) ;
int NExz = (i<(nx-1) && k<(nz-1)) ? I3D(nx,nxy,i+1,j,k+1) :
((i==(nx-1) && k<(nz-1)) ? I3D(nx,nxy,i-1,j,k+1) :
((i<(nx-1) && k==(nz-1))? I3D(nx,nxy,i+1,j,k-1) : I3D(nx,nxy,i-1,j,k-1) ) ) ;
//////////////////////////
int SWyz = (j>0 && k>0) ? I3D(nx,nxy,i,j-1,k-1) :
((j==0 && k>0) ? I3D(nx,nxy,i,j+1,k-1) :
((j>0 && k==0)? I3D(nx,nxy,i,j-1,k+1) : I3D(nx,nxy,i,j+1,k+1) ) ) ;
int SEyz = (j<(ny-1) && k>0) ? I3D(nx,nxy,i,j+1,k-1) :
((j==(ny-1) && k>0) ? I3D(nx,nxy,i,j-1,k-1) :
((j<(ny-1) && k==0)? I3D(nx,nxy,i,j+1,k+1) : I3D(nx,nxy,i,j-1,k+1) ) ) ;
int NWyz = (j>0 && k<(nz-1)) ? I3D(nx,nxy,i,j-1,k+1) :
((j==0 && k<(nz-1)) ? I3D(nx,nxy,i,j+1,k+1) :
((j>0 && k==(nz-1))? I3D(nx,nxy,i,j-1,k-1) : I3D(nx,nxy,i,j+1,k-1) ) ) ;
int NEyz = (j<(ny-1) && k<(nz-1)) ? I3D(nx,nxy,i,j+1,k+1) :
((j==(ny-1) && k<(nz-1)) ? I3D(nx,nxy,i,j-1,k+1) :
((j<(ny-1) && k==(nz-1))? I3D(nx,nxy,i,j+1,k-1) : I3D(nx,nxy,i,j-1,k-1) ) ) ;
#endif
#ifdef ANISOTROPIC_TISSUE
/*------------------------------------------------------------------------
* Anisotropic Laplacian
*-------------------------------------------------------------------------
*/
REAL rx = r.x[k];
REAL ry = r.y[k];
REAL rz = r.z[k];
REAL rbx = r.bx[k];
REAL rby = r.by[k];
REAL du2dt = ( rCxyz_d * (rx + ry + rz)*u
+ rwe_d * (4.0*rx - ry - rz)*(g_in.u[W] + g_in.u[E])
+ rsn_d * (4.0*ry - rx - rz)*(g_in.u[N] + g_in.u[S])
+ rbt_d * (4.0*rz - ry - rx)*(g_in.u[T] + g_in.u[B])
+ rxyc_d * (rx + ry)*( g_in.u[SWxy] +
g_in.u[SExy] +
g_in.u[NWxy] +
g_in.u[NExy] )
+ rxzc_d * (rx + rz)*( g_in.u[SWxz] +
g_in.u[SExz] +
g_in.u[NWxz] +
g_in.u[NExz] )
+ ryzc_d * (ry + rz)*( g_in.u[SWyz] +
g_in.u[SEyz] +
g_in.u[NWyz] +
g_in.u[NEyz] ) ) ;
du2dt -= ( dt_d*( 0.5*I_sum
+ rxyzf_d * ( ( J_d[E] + J_d[W] )
+ ( J_d[N] + J_d[S] )
+ ( J_d[B] + J_d[T] ) ) ) / C_m ) ;
/*
REAL du2dt = (
+ ( g_in.u[W] - 2.f*u + g_in.u[E] )*rx
+ ( g_in.u[N] - 2.f*u + g_in.u[S] )*ry
+ ( g_in.u[T] - 2.f*u + g_in.u[B] )*rz );
du2dt -= dt_d*I_sum/C_m ;
*/
// Correction to NSWE boundary conditions
REAL b_S = (j > 0 ) ? 0.f :
((j==0 && (i==0 || i==(nx-1)))? 0.f:
rby*(g_in.u[I3D(nx,nxy,i+1,j,k)] - g_in.u[I3D(nx,nxy,i-1,j,k)]));
REAL b_N = (j < (ny-1)) ? 0.f :
((j==(ny-1) && (i==0 || i==(nx-1)))? 0.f:
-rby*(g_in.u[I3D(nx,nxy,i+1,j,k)] - g_in.u[I3D(nx,nxy,i-1,j,k)]));
REAL b_W = (i > 0 ) ? 0.f :
((i==0 && (j==0 || j==(ny-1)))? 0.f:
rbx*(g_in.u[I3D(nx,nxy,i,j+1,k)] - g_in.u[I3D(nx,nxy,i,j-1,k)]));
REAL b_E = (i < (nx-1)) ? 0.f :
((i==(nx-1) && (j==0 || j==(ny-1)))? 0.f:
-rbx*(g_in.u[I3D(nx,nxy,i,j+1,k)] - g_in.u[I3D(nx,nxy,i,j-1,k)]));
du2dt += (
( b_S + b_N )*ry
+ ( b_W + b_E )*rx );
// Correcion to SW SE NW NE boundary conditions
REAL b_SW = (i>0 && j>0) ? 0.0f :
((i==0 && j>1) ? rbx*(g_in.u[i3d] - g_in.u[I3D(nx,nxy,i,j-2,k)]) :
((i>1 && j==0) ? rby*(g_in.u[i3d] - g_in.u[I3D(nx,nxy,i-2,j,k)]) : 0.0f));
REAL b_SE = (i<(nx-1) && j>0) ? 0.0f :
((i==(nx-1) && j>1) ? - rbx*(g_in.u[i3d] - g_in.u[I3D(nx,nxy,i,j-2,k)]) :
((i<(nx-2) && j==0) ? rby*(g_in.u[I3D(nx,nxy,i+2,j,k)] - g_in.u[i3d]) : 0.0f));
REAL b_NW = (i>0 && j<(ny-1)) ? 0.0f :
((i==0 && j<(ny-2)) ? rbx*(g_in.u[I3D(nx,nxy,i,j+2,k)] - g_in.u[i3d]) :
((i>1 && j==(ny-1)) ? - rby*(g_in.u[i3d] - g_in.u[I3D(nx,nxy,i-2,j,k)]) : 0.0f));
REAL b_NE = (i<(nx-1) && j<(ny-1)) ? 0.0f :
((i==(nx-1) && j<(ny-2)) ? - rbx*(g_in.u[I3D(nx,nxy,i,j+2,k)] - g_in.u[i3d]) :
((i<(nx-2) && j==(ny-1)) ? - rby*(g_in.u[I3D(nx,nxy,i+2,j,k)] - g_in.u[i3d]) : 0.0f));
du2dt += ( r.xy[k]*( (g_in.u[SWxy] + b_SW) +
(g_in.u[NExy] + b_NE) -
(g_in.u[SExy] + b_SE) -
(g_in.u[NWxy] + b_NW) ) );
#else
/*------------------------------------------------------------------------
* Isotropic Laplacian
*------------------------------------------------------------------------
*/
REAL du2dt = ( rCxyz_d*u
+ rwe_d*(g_in.u[W] + g_in.u[E])
+ rsn_d*(g_in.u[N] + g_in.u[S])
+ rbt_d*(g_in.u[T] + g_in.u[B])
+ rxyc_d*( g_in.u[SWxy] +
g_in.u[SExy] +
g_in.u[NWxy] +
g_in.u[NExy] )
+ rxzc_d*( g_in.u[SWxz] +
g_in.u[SExz] +
g_in.u[NWxz] +
g_in.u[NExz] )
+ ryzc_d*( g_in.u[SWyz] +
g_in.u[SEyz] +
g_in.u[NWyz] +
g_in.u[NEyz] ) ) ;
du2dt -= ( dt_d*( 0.5f*I_sum
+ rxyzf_d * ( ( J_d[E] + J_d[W] )
+ ( J_d[N] + J_d[S] )
+ ( J_d[B] + J_d[T] ) ) ) / C_m ) ;
/*
REAL du2dt = (
+ ( g_in.u[W] - 2.f*u + g_in.u[E] )*rx_d
+ ( g_in.u[N] - 2.f*u + g_in.u[S] )*ry_d
+ ( g_in.u[T] - 2.f*u + g_in.u[B] )*rz_d );
du2dt -= dt_d*I_sum/C_m ;
*/
#endif
/*------------------------------------------------------------------------
* Time integration
*------------------------------------------------------------------------
*/
u += du2dt ;
g_out.u[i3d] = u ;
#endif
}
void FK_3V_wrapper(dim3 grid3D, dim3 block3D, stateVar gOut_d, stateVar gIn_d,
conductionVar r_d, REAL *J_current_d) {
/*
hipEvent_t start,stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float elapsedTime;
hipEventRecord(start,0);
*/
hipLaunchKernelGGL(( FK_3V_kernel), dim3(grid3D), dim3(block3D), 0, 0, gOut_d, gIn_d, r_d, J_current_d);
CudaCheckError();
/*
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime,start,stop);
printf("3V kernel took: %f ms\n", elapsedTime);
//t += 2.0*dt;
*/
/*
SIM_2V_kernel<<<grid3D, block3D>>>(gIn_d, gOut_d, r_d, J_current_d);
CudaCheckError();
*/
//swapSoA(&gIn_d, &gOut_d);
}
// This function launches all functions that need to be prcessed at every frame
// No graphics functions are launched from here
void animation(dim3 grid3D, dim3 block3D,
stateVar g_h, stateVar gOut_d, stateVar gIn_d, REAL *J_current_d,
conductionVar r_d, paramVar param, REAL *pt_h, REAL *pt_d,
std::vector<electrodeVar> &electrode,
bool initConditionFlag) {
#pragma unroll
for (int i=0;i<(ITPERFRAME);i++) {
FK_3V_wrapper(grid3D,block3D,gOut_d,gIn_d,r_d,J_current_d);
swapSoA(&gIn_d, &gOut_d);
}
// Single point time tracking
singlePoint(gIn_d,pt_h,pt_d,param.singlePointPixel,electrode);
}
__global__ void singlePoint_kernel(stateVar g_in, REAL *pt_d,
int singlePointPixel) {
pt_d[0] = g_in.u[singlePointPixel];
pt_d[1] = g_in.v[singlePointPixel];
pt_d[2] = g_in.v[singlePointPixel];
}
void singlePoint(stateVar gIn_d, REAL *pt_h, REAL *pt_d,
int singlePointPixel, std::vector<electrodeVar> &electrode) {
hipLaunchKernelGGL(( singlePoint_kernel), dim3(1),dim3(1), 0, 0, gIn_d, pt_d, singlePointPixel);
CudaCheckError();
CudaSafeCall(hipMemcpy(pt_h, pt_d, 3*sizeof(REAL),hipMemcpyDeviceToHost));
electrodeVar data = {
pt_h[0],
pt_h[1],
pt_h[2],
};
electrode.push_back(data);
}
__global__ void copyRender_kernel(int totpoints, stateVar g_in,
VolumeType *h_volume) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < totpoints; i += stride) {
h_volume[i] = (unsigned char)255.f*(float)g_in.u[i]*0.9f;
}
}
// Convert numerical values of the PDE solution to colors (char)
//extern "C"
void copyRender(dim3 grid1D, dim3 block1D, int totpoints,
stateVar gIn_d, VolumeType *h_volume) {
/*
hipEvent_t start,stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float elapsedTime;
hipEventRecord(start,0);
*/
hipLaunchKernelGGL(( copyRender_kernel), dim3(grid1D), dim3(block1D), 0, 0, totpoints, gIn_d, h_volume);
CudaCheckError();
/*
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime,start,stop);
printf("Time: %f ms\n", elapsedTime);
*/
}
__global__ void spiralTip_kernel(REAL *g_past, stateVar g_present,
VolumeType *h_vol) {
/*------------------------------------------------------------------------
* Getting i, and k global indices
*------------------------------------------------------------------------
*/
const int i = blockIdx.x*blockDim.x + threadIdx.x;
const int j = blockIdx.y;
const int k = threadIdx.y;
/*------------------------------------------------------------------------
* Return if we are outside the domain
*------------------------------------------------------------------------
*/
if( i >= nx && j>=ny && k >= nz) {
return ;
}
const int nxy = nx*ny;
int s0 = I3D(nx,nxy,i,j,k);
int sx = ( i<(nx-1) ) ? I3D(nx,nxy,i+1,j,k) : I3D(nx,nxy,i,j,k);
int sy = ( j<(ny-1) ) ? I3D(nx,nxy,i,j+1,k) : I3D(nx,nxy,i,j,k);
int sz = ( k<(nz-1) ) ? I3D(nx,nxy,i,j,k+1) : I3D(nx,nxy,i,j,k);
int sxy = ( (j<(ny-1)) && (i<(nx-1) ) ) ? I3D(nx,nxy,i+1,j+1,k) : I3D(nx,nxy,i,j,k);
int sxz = ( (k<(nz-1)) && (i<(nx-1) ) ) ? I3D(nx,nxy,i+1,j,k+1) : I3D(nx,nxy,i,j,k);
int syz = ( (j<(ny-1)) && (k<(nz-1) ) ) ? I3D(nx,nxy,i,j+1,k+1) : I3D(nx,nxy,i,j,k);
#ifdef SPIRALTIP_INTERPOLATION
/*------------------------------------------------------------------------
* Calculate pixel position of filament
*------------------------------------------------------------------------
*/
REAL x1, x2, x3, x4, y1, y2, y3, y4;
REAL x3y1, x4y1, x3y2, x4y2, x1y3, x2y3, x1y4, x2y4, x2y1, x1y2, x4y3, x3y4;
REAL den1, den2, ctn1, ctn2, disc, xtip, ytip, px, py, sroot1, sroot2;
REAL gx, gy, gz, gx1, gx2, gx3, gx4, gy1, gy2, gy3, gy4, gz1, gz2, gz3, gz4;
/*------------------------------------------------------------------------
* Calculate pixel position of filament and plot
*------------------------------------------------------------------------
*/
int S = ( j>0 ) ? I3D(nx,nxy,i,j-1,k) : I3D(nx,nxy,i,j+1,k) ;
int Sx = ( (j>0) && (i<(nx-1)) ) ? I3D(nx,nxy,i+1,j-1,k) : I3D(nx,nxy,i-1,j+1,k) ;
int Sy = I3D(nx,nxy,i,j,k) ;
int Sxy = ( i<(nx-1) ) ? I3D(nx,nxy,i+1,j,k) : I3D(nx,nxy,i-1,j,k) ;
int Sz = ( j>0 ) ? I3D(nx,nxy,i,j-1,k+1) : I3D(nx,nxy,i,j+2,k+1) ;
int Sxz = ( j>0 ) ? I3D(nx,nxy,i+1,j-1,k+1) : I3D(nx,nxy,i,j+2,k+1) ;
int Syz = ( j>0 ) ? I3D(nx,nxy,i,j,k+1) : I3D(nx,nxy,i,j+2,k+1) ;
int N = ( j<(ny-1) ) ? I3D(nx,nxy,i,j+1,k) : I3D(nx,nxy,i,j-1,k) ;
int Nx = ( (i<(nx-1)) && (j<(ny-1)) ) ? I3D(nx,nxy,i+1,j+1,k) : I3D(nx,nxy,i-1,j-1,k) ;
int Ny = ( j<(ny-2) ) ? I3D(nx,nxy,i,j+2,k) : (( j==(ny-2) ) ? I3D(nx,nxy,i,j,k) : I3D(nx,nxy,i,j-1,k)) ;
int Nxy = ( (i<(nx-1)) && (j<(ny-2)) ) ? I3D(nx,nxy,i+1,j+2,k) : ((j==(ny-2)) ? I3D(nx,nxy,i-1,j,k) : I3D(nx,nxy,i-1,j-1,k)) ;
int Nz = ( (j<(ny-1)) && (k<(nz-1)) ) ? I3D(nx,nxy,i,j+1,k+1) : I3D(nx,nxy,i,j-1,k-1) ;
int Nxz = ( (i<(nx-1)) && (j<(ny-1)) && (k<(nz-1)) ) ? I3D(nx,nxy,i+1,j+1,k+1) : I3D(nx,nxy,i-1,j-1,k-1) ;
int Nyz = ( (j<(ny-2)) && (k<(nz-1)) ) ? I3D(nx,nxy,i,j+2,k+1) : ((k==(nz-2)) ? I3D(nx,nxy,i,j,k-1) : I3D(nx,nxy,i,j-1,k-1) );
int W = ( i>0 ) ? I3D(nx,nxy,i-1,j,k) : I3D(nx,nxy,i-1,j,k) ;
int Wx = I3D(nx,nxy,i,j,k) ;
int Wy = ( (i>0) && (j<(ny-1)) ) ? I3D(nx,nxy,i-1,j+1,k) : I3D(nx,nxy,i+1,j-1,k) ;
int Wxy = ( (j<(ny-1)) ) ? I3D(nx,nxy,i,j+1,k) : I3D(nx,nxy,i-1,j,k) ;
int Wz = ( (i>0) && (k<(nz-1)) ) ? I3D(nx,nxy,i-1,j,k+1) : I3D(nx,nxy,i+1,j,k-1) ;
int Wxz = ( k<(nz-1) ) ? I3D(nx,nxy,i,j,k+1) : I3D(nx,nxy,i,j,k-1) ;
int Wyz = ( (i>0) && (j<(ny-1)) && (k<(nz-1)) ) ? I3D(nx,nxy,i-1,j+1,k+1) : I3D(nx,nxy,i+1,j-1,k-1) ;
int E = ( i<(nx-1) ) ? I3D(nx,nxy,i+1,j,k) : I3D(nx,nxy,i-1,j,k) ;
int Ex = ( i<(nx-2) ) ? I3D(nx,nxy,i+2,j,k) : ((i==(nx-2)) ? I3D(nx,nxy,i,j,k) : I3D(nx,nxy,i-1,j,k));
int Ey = ( (i<(nx-1)) && (j<(ny-1)) ) ? I3D(nx,nxy,i+1,j+1,k) : I3D(nx,nxy,i-1,j-1,k) ;
int Exy = ( (i<(nx-2)) && (j<(ny-1)) ) ? I3D(nx,nxy,i+2,j+1,k) : ( (i==(nx-2)) ? I3D(nx,nxy,i,j-1,k) : I3D(nx,nxy,i-1,j-1,k)) ;
int Ez = ( (i<(nx-1)) && (k<(nz-1)) ) ? I3D(nx,nxy,i+1,j,k+1) : I3D(nx,nxy,i-1,j,k-1) ;
int Exz = ( (i<(nx-2)) && (k<(nz-1)) ) ? I3D(nx,nxy,i+2,j,k+1) : ( (i==(nx-2)) ? I3D(nx,nxy,i,j,k-1) : I3D(nx,nxy,i-1,j,k-1) );
int Eyz = ( (i<(nx-1)) && (j<(ny-1)) && (k<(nz-1)) ) ? I3D(nx,nxy,i+1,j+1,k+1) : I3D(nx,nxy,i-1,j-1,k-1) ;
int B = ( k>0 ) ? I3D(nx,nxy,i,j,k-1) : I3D(nx,nxy,i,j,k+1) ;
int Bx = ( (k>0) && (i<(nx-1)) ) ? I3D(nx,nxy,i+1,j,k-1) : I3D(nx,nxy,i-1,j,k+1) ;
int By = ( (k>0) && (j<(ny-1)) ) ? I3D(nx,nxy,i,j+1,k-1) : I3D(nx,nxy,i,j-1,k+1) ;
int Bxy = ( (i<(nx-1)) && (j<(ny-1)) && (k>0) ) ? I3D(nx,nxy,i+1,j+1,k-1) : I3D(nx,nxy,i-1,j-1,k+1) ;
int Bz = I3D(nx,nxy,i,j,k);
int Bxz = ( i<(nx-1) ) ? I3D(nx,nxy,i+1,j,k) : I3D(nx,nxy,i-1,j,k) ;
int Byz = ( j<(ny-1) ) ? I3D(nx,nxy,i,j+1,k) : I3D(nx,nxy,i,j-1,k) ;
int T = ( k<(nz-1) ) ? I3D(nx,nxy,i,j,k+1) : I3D(nx,nxy,i,j,k-1) ;
int Tx = ( (i<(nx-1)) && (k<(nz-1)) ) ? I3D(nx,nxy,i+1,j,k+1) : I3D(nx,nxy,i-1,j,k-1) ;
int Ty = ( (j<(ny-1)) && (k<(nz-1)) ) ? I3D(nx,nxy,i,j+1,k+1) : I3D(nx,nxy,i,j-1,k-1) ;
int Txy = ( (i<(nx-1)) && (j<(ny-1)) && (k<(nz-1)) ) ? I3D(nx,nxy,i+1,j+1,k+1) : I3D(nx,nxy,i-1,j-1,k-1) ;
int Tz = ( k<(nz-2) ) ? I3D(nx,nxy,i,j,k+2) : ( (k==(nz-2)) ? I3D(nx,nxy,i,j,k) : I3D(nx,nxy,i,j,k-1));
int Txz = ( (i<(nx-1)) && k<(nz-2) ) ? I3D(nx,nxy,i+1,j,k+2) : ( (k==(nz-2)) ? I3D(nx,nxy,i-1,j,k) : I3D(nx,nxy,i-1,j,k-1));
int Tyz = ( (j<(ny-1)) && (k<(nz-2)) ) ? I3D(nx,nxy,i,j+1,k+2) : ( (k==(nz-2)) ? I3D(nx,nxy,i,j-1,k) : I3D(nx,nxy,i,j-1,k-1) );
/*------------------------------------------------------------------------
* XY plane
*------------------------------------------------------------------------
*/
x1 = g_present.u[s0];
x2 = g_present.u[sx];
x4 = g_present.u[sy];
x3 = g_present.u[sxy];
y1 = g_past[s0];
y2 = g_past[sx];
y4 = g_past[sy];
y3 = g_past[sxy];
x3y1 = x3*y1;
x4y1 = x4*y1;
x3y2 = x3*y2;
x4y2 = x4*y2;
x1y3 = x1*y3;
x2y3 = x2*y3;
x1y4 = x1*y4;
x2y4 = x2*y4;
x2y1 = x2*y1;
x1y2 = x1*y2;
x4y3 = x4*y3;
x3y4 = x3*y4;
den1 = 2.0*(x3y1 - x4y1 - x3y2 + x4y2 - x1y3 + x2y3 + x1y4 - x2y4);
den2 = 2.0*(x2y1 - x3y1 - x1y2 + x4y2 + x1y3 - x4y3 - x2y4 + x3y4);
ctn1 = x1 - x2 + x3 - x4 - y1 + y2 - y3 + y4;
ctn2 = x3y1 - 2.0*x4y1 + x4y2 - x1y3 + 2.0*x1y4 - x2y4;
disc = 4.0 * ( x3y1 - x3y2 - x4y1 + x4y2 - x1y3 + x1y4 + x2y3 - x2y4 )
* (x4y1 - x1y4 + Uth * (x1 - x4 - y1 + y4)) +
( -ctn2 + Uth * ctn1 ) * (-ctn2 + Uth * ctn1 );
px = -(Uth * ctn1 - ctn2)/den1;
py = (Uth * ctn1)/den2 -
(-2.0* x2y1 + x3y1 + 2.0 *x1y2 - x4y2 - x1y3 + x2y4)/den2;
sroot1 = sqrt(disc)/den1;
sroot2 = sqrt(disc)/den2;
/*------------------------------------------------------------------------
* XY plane
* Clockwise direction
*------------------------------------------------------------------------
*/
xtip = px + sroot1;
ytip = py + sroot2;
if ( ( ((xtip > 0.0) && (xtip < 1.0)) +
((ytip > 0.0) && (ytip < 1.0)) +
( disc > 0.0 ) ) > 2 ) {
/*
gx = (g_present.u[E] - g_present.u[W])*invdx_d;
gy = (g_present.u[N] - g_present.u[S])*invdy_d;
gz = (g_present.u[T] - g_present.u[B])*invdz_d;
*/
gx1 = (g_present.u[E] - g_present.u[W])*invdx_d;
gy1 = (g_present.u[N] - g_present.u[S])*invdy_d;
gz1 = (g_present.u[T] - g_present.u[B])*invdz_d;
gx2 = (g_present.u[Ex] - g_present.u[Wx])*invdx_d;
gy2 = (g_present.u[Nx] - g_present.u[Sx])*invdy_d;
gz2 = (g_present.u[Tx] - g_present.u[Bx])*invdz_d;
gx3 = (g_present.u[Ey] - g_present.u[Wy])*invdx_d;
gy3 = (g_present.u[Ny] - g_present.u[Sy])*invdy_d;
gz3 = (g_present.u[Ty] - g_present.u[By])*invdz_d;
gx4 = (g_present.u[Exy] - g_present.u[Wxy])*invdx_d;
gy4 = (g_present.u[Nxy] - g_present.u[Sxy])*invdy_d;
gz4 = (g_present.u[Txy] - g_present.u[Bxy])*invdz_d;
gx = (1.0 - xtip)*(1.0 - ytip)*gx1 +
xtip*(1.0 - ytip)*gx2 + ytip*(1.0 - xtip)*gx3 + xtip*ytip*gx4;
gy = (1.0 - xtip)*(1.0 - ytip)*gy1 +
xtip*(1.0 - ytip)*gy2 + ytip*(1.0 - xtip)*gy3 + xtip*ytip*gy4;
gz = (1.0 - xtip)*(1.0 - ytip)*gz1 +
xtip*(1.0 - ytip)*gz2 + ytip*(1.0 - xtip)*gz3 + xtip*ytip*gz4;
vec3dyn a = { .x = (REAL)i+xtip, .y = (REAL)j+ytip, .z = (REAL)k};
vec6dyn b = { .x = (REAL)i+xtip, .y = (REAL)j+ytip, .z = (REAL)k, .vx = gx, .vy = gy, .vz = gz};
tip_push_back1(a);
tip_push_back2(b);
h_vol[I3D(nx,nxy,i,j,k)] = (unsigned char)255;
}
/*------------------------------------------------------------------------
* Anticlockwise direction
*------------------------------------------------------------------------
*/
xtip = px - sroot1;
ytip = py - sroot2;
if ( ( ((xtip > 0.0) && (xtip < 1.0)) +
((ytip > 0.0) && (ytip < 1.0)) +
( disc > 0.0 ) ) > 2 ) {
/*
gx = (g_present.u[E] - g_present.u[W])*invdx_d;
gy = (g_present.u[N] - g_present.u[S])*invdy_d;
gz = (g_present.u[T] - g_present.u[B])*invdz_d;
*/
gx1 = (g_present.u[E] - g_present.u[W])*invdx_d;
gy1 = (g_present.u[N] - g_present.u[S])*invdy_d;
gz1 = (g_present.u[T] - g_present.u[B])*invdz_d;
gx2 = (g_present.u[Ex] - g_present.u[Wx])*invdx_d;
gy2 = (g_present.u[Nx] - g_present.u[Sx])*invdy_d;
gz2 = (g_present.u[Tx] - g_present.u[Bx])*invdz_d;
gx3 = (g_present.u[Ey] - g_present.u[Wy])*invdx_d;
gy3 = (g_present.u[Ny] - g_present.u[Sy])*invdy_d;
gz3 = (g_present.u[Ty] - g_present.u[By])*invdz_d;
gx4 = (g_present.u[Exy] - g_present.u[Wxy])*invdx_d;
gy4 = (g_present.u[Nxy] - g_present.u[Sxy])*invdy_d;
gz4 = (g_present.u[Txy] - g_present.u[Bxy])*invdz_d;
gx = (1.0 - xtip)*(1.0 - ytip)*gx1 +
xtip*(1.0 - ytip)*gx2 + ytip*(1.0 - xtip)*gx3 + xtip*ytip*gx4;
gy = (1.0 - xtip)*(1.0 - ytip)*gy1 +
xtip*(1.0 - ytip)*gy2 + ytip*(1.0 - xtip)*gy3 + xtip*ytip*gy4;
gz = (1.0 - xtip)*(1.0 - ytip)*gz1 +
xtip*(1.0 - ytip)*gz2 + ytip*(1.0 - xtip)*gz3 + xtip*ytip*gz4;
vec3dyn a = { .x = (REAL)i+xtip, .y = (REAL)j+ytip, .z = (REAL)k};
vec6dyn b = { .x = (REAL)i+xtip, .y = (REAL)j+ytip, .z = (REAL)k, .vx = gx, .vy = gy, .vz = gz};
tip_push_back1(a);
tip_push_back2(b);
h_vol[I3D(nx,nxy,i,j,k)] = (unsigned char)255;
}
/*------------------------------------------------------------------------
* XZ plane
* Clockwise direction
*------------------------------------------------------------------------
*/
x1 = g_present.u[s0];
x2 = g_present.u[sx];
x3 = g_present.u[sxz];
x4 = g_present.u[sz];
y1 = g_past[s0];
y2 = g_past[sx];
y3 = g_past[sxz];
y4 = g_past[sz];
x3y1 = x3*y1;
x4y1 = x4*y1;
x3y2 = x3*y2;
x4y2 = x4*y2;
x1y3 = x1*y3;
x2y3 = x2*y3;
x1y4 = x1*y4;
x2y4 = x2*y4;
x2y1 = x2*y1;
x1y2 = x1*y2;
x4y3 = x4*y3;
x3y4 = x3*y4;
den1 = 2.0*(x3y1 - x4y1 - x3y2 + x4y2 - x1y3 + x2y3 + x1y4 - x2y4);
den2 = 2.0*(x2y1 - x3y1 - x1y2 + x4y2 + x1y3 - x4y3 - x2y4 + x3y4);
ctn1 = x1 - x2 + x3 - x4 - y1 + y2 - y3 + y4;
ctn2 = x3y1 - 2.0*x4y1 + x4y2 - x1y3 + 2.0*x1y4 - x2y4;
disc = 4.0 * ( x3y1 - x3y2 - x4y1 + x4y2 - x1y3 + x1y4 + x2y3 - x2y4 )
* (x4y1 - x1y4 + Uth * (x1 - x4 - y1 + y4)) +
( -ctn2 + Uth * ctn1 ) * (-ctn2 + Uth * ctn1 );
px = -(Uth * ctn1 - ctn2)/den1;
py = (Uth * ctn1)/den2 -
(-2.0* x2y1 + x3y1 + 2.0 *x1y2 - x4y2 - x1y3 + x2y4)/den2;
sroot1 = sqrt(disc)/den1;
sroot2 = sqrt(disc)/den2;
/*------------------------------------------------------------------------
* XZ plane
* Clockwise direction
*------------------------------------------------------------------------
*/
xtip = px + sroot1;
ytip = py + sroot2;
if ( ( ((xtip > 0.0) && (xtip < 1.0)) +
((ytip > 0.0) && (ytip < 1.0)) +
( disc > 0.0 ) ) > 2 ) {
/*
gx = (g_present.u[E] - g_present.u[W])*invdx_d;
gy = (g_present.u[N] - g_present.u[S])*invdy_d;
gz = (g_present.u[T] - g_present.u[B])*invdz_d;
*/
gx1 = (g_present.u[E] - g_present.u[W])*invdx_d;
gy1 = (g_present.u[N] - g_present.u[S])*invdy_d;
gz1 = (g_present.u[T] - g_present.u[B])*invdz_d;
gx2 = (g_present.u[Ex] - g_present.u[Wx])*invdx_d;
gy2 = (g_present.u[Nx] - g_present.u[Sx])*invdy_d;
gz2 = (g_present.u[Tx] - g_present.u[Bx])*invdz_d;
gx3 = (g_present.u[Ez] - g_present.u[Wz])*invdx_d;
gy3 = (g_present.u[Nz] - g_present.u[Sz])*invdy_d;
gz3 = (g_present.u[Tz] - g_present.u[Bz])*invdz_d;
gx4 = (g_present.u[Exz] - g_present.u[Wxz])*invdx_d;
gy4 = (g_present.u[Nxz] - g_present.u[Sxz])*invdy_d;
gz4 = (g_present.u[Txz] - g_present.u[Bxz])*invdz_d;
gx = (1.0 - xtip)*(1.0 - ytip)*gx1 +
xtip*(1.0 - ytip)*gx2 + ytip*(1.0 - xtip)*gx3 + xtip*ytip*gx4;
gy = (1.0 - xtip)*(1.0 - ytip)*gy1 +
xtip*(1.0 - ytip)*gy2 + ytip*(1.0 - xtip)*gy3 + xtip*ytip*gy4;
gz = (1.0 - xtip)*(1.0 - ytip)*gz1 +
xtip*(1.0 - ytip)*gz2 + ytip*(1.0 - xtip)*gz3 + xtip*ytip*gz4;
vec3dyn a = { .x = (REAL)i+xtip, .y = (REAL)j, .z = (REAL)k+ytip};
vec6dyn b = { .x = (REAL)i+xtip, .y = (REAL)j, .z = (REAL)k+ytip, .vx = gx, .vy = gy, .vz = gz};
tip_push_back1(a);
tip_push_back2(b);
h_vol[I3D(nx,nxy,i,j,k)] = (unsigned char)255;
}
/*------------------------------------------------------------------------
* Anticlockwise direction
*------------------------------------------------------------------------
*/
xtip = px - sroot1;
ytip = py - sroot2;
if ( ( ((xtip > 0.0) && (xtip < 1.0)) +
((ytip > 0.0) && (ytip < 1.0)) +
( disc > 0.0 ) ) > 2 ) {
/*
gx = (g_present.u[E] - g_present.u[W])*invdx_d;
gy = (g_present.u[N] - g_present.u[S])*invdy_d;
gz = (g_present.u[T] - g_present.u[B])*invdz_d;
*/
gx1 = (g_present.u[E] - g_present.u[W])*invdx_d;
gy1 = (g_present.u[N] - g_present.u[S])*invdy_d;
gz1 = (g_present.u[T] - g_present.u[B])*invdz_d;
gx2 = (g_present.u[Ex] - g_present.u[Wx])*invdx_d;
gy2 = (g_present.u[Nx] - g_present.u[Sx])*invdy_d;
gz2 = (g_present.u[Tx] - g_present.u[Bx])*invdz_d;
gx3 = (g_present.u[Ez] - g_present.u[Wz])*invdx_d;
gy3 = (g_present.u[Nz] - g_present.u[Sz])*invdy_d;
gz3 = (g_present.u[Tz] - g_present.u[Bz])*invdz_d;
gx4 = (g_present.u[Exz] - g_present.u[Wxz])*invdx_d;
gy4 = (g_present.u[Nxz] - g_present.u[Sxz])*invdy_d;
gz4 = (g_present.u[Txz] - g_present.u[Bxz])*invdz_d;
gx = (1.0 - xtip)*(1.0 - ytip)*gx1 +
xtip*(1.0 - ytip)*gx2 + ytip*(1.0 - xtip)*gx3 + xtip*ytip*gx4;
gy = (1.0 - xtip)*(1.0 - ytip)*gy1 +
xtip*(1.0 - ytip)*gy2 + ytip*(1.0 - xtip)*gy3 + xtip*ytip*gy4;
gz = (1.0 - xtip)*(1.0 - ytip)*gz1 +
xtip*(1.0 - ytip)*gz2 + ytip*(1.0 - xtip)*gz3 + xtip*ytip*gz4;
vec3dyn a = { .x = (REAL)i+xtip, .y = (REAL)j, .z = (REAL)k+ytip};
vec6dyn b = { .x = (REAL)i+xtip, .y = (REAL)j, .z = (REAL)k+ytip, .vx = gx, .vy = gy, .vz = gz};
tip_push_back1(a);
tip_push_back2(b);
h_vol[I3D(nx,nxy,i,j,k)] = (unsigned char)255;
}
/*------------------------------------------------------------------------
* YZ direction
* Anticlockwse direction
*------------------------------------------------------------------------
*/
x1 = g_present.u[s0];
x2 = g_present.u[sy];
x3 = g_present.u[syz];
x4 = g_present.u[sz];
y1 = g_past[s0];
y2 = g_past[sy];
y3 = g_past[syz];
y4 = g_past[sz];
x3y1 = x3*y1;
x4y1 = x4*y1;
x3y2 = x3*y2;
x4y2 = x4*y2;
x1y3 = x1*y3;
x2y3 = x2*y3;
x1y4 = x1*y4;
x2y4 = x2*y4;
x2y1 = x2*y1;
x1y2 = x1*y2;
x4y3 = x4*y3;
x3y4 = x3*y4;
den1 = 2.0*(x3y1 - x4y1 - x3y2 + x4y2 - x1y3 + x2y3 + x1y4 - x2y4);
den2 = 2.0*(x2y1 - x3y1 - x1y2 + x4y2 + x1y3 - x4y3 - x2y4 + x3y4);
ctn1 = x1 - x2 + x3 - x4 - y1 + y2 - y3 + y4;
ctn2 = x3y1 - 2.0*x4y1 + x4y2 - x1y3 + 2.0*x1y4 - x2y4;
disc = 4.0 * ( x3y1 - x3y2 - x4y1 + x4y2 - x1y3 + x1y4 + x2y3 - x2y4 )
* (x4y1 - x1y4 + Uth * (x1 - x4 - y1 + y4)) +
( -ctn2 + Uth * ctn1 ) * (-ctn2 + Uth * ctn1 );
px = -(Uth * ctn1 - ctn2)/den1;
py = (Uth * ctn1)/den2 -
(-2.0* x2y1 + x3y1 + 2.0 *x1y2 - x4y2 - x1y3 + x2y4)/den2;
sroot1 = sqrt(disc)/den1;
sroot2 = sqrt(disc)/den2;
/*------------------------------------------------------------------------
* YZ plane
* Clockwise direction
*------------------------------------------------------------------------
*/
xtip = px + sroot1;
ytip = py + sroot2;
if ( ( ((xtip > 0.0) && (xtip < 1.0)) +
((ytip > 0.0) && (ytip < 1.0)) +
( disc > 0.0 ) ) > 2 ) {
/*
gx = (g_present.u[E] - g_present.u[W])*invdx_d;
gy = (g_present.u[N] - g_present.u[S])*invdy_d;
gz = (g_present.u[T] - g_present.u[B])*invdz_d;
*/
gx1 = (g_present.u[E] - g_present.u[W])*invdx_d;
gy1 = (g_present.u[N] - g_present.u[S])*invdy_d;
gz1 = (g_present.u[T] - g_present.u[B])*invdz_d;
gx2 = (g_present.u[Ey] - g_present.u[Wy])*invdx_d;
gy2 = (g_present.u[Ny] - g_present.u[Sy])*invdy_d;
gz2 = (g_present.u[Ty] - g_present.u[By])*invdz_d;
gx3 = (g_present.u[Ez] - g_present.u[Wz])*invdx_d;
gy3 = (g_present.u[Nz] - g_present.u[Sz])*invdy_d;
gz3 = (g_present.u[Tz] - g_present.u[Bz])*invdz_d;
gx4 = (g_present.u[Eyz] - g_present.u[Wyz])*invdx_d;
gy4 = (g_present.u[Nyz] - g_present.u[Syz])*invdy_d;
gz4 = (g_present.u[Tyz] - g_present.u[Byz])*invdz_d;
gx = (1.0 - xtip)*(1.0 - ytip)*gx1 +
xtip*(1.0 - ytip)*gx2 + ytip*(1.0 - xtip)*gx3 + xtip*ytip*gx4;
gy = (1.0 - xtip)*(1.0 - ytip)*gy1 +
xtip*(1.0 - ytip)*gy2 + ytip*(1.0 - xtip)*gy3 + xtip*ytip*gy4;
gz = (1.0 - xtip)*(1.0 - ytip)*gz1 +
xtip*(1.0 - ytip)*gz2 + ytip*(1.0 - xtip)*gz3 + xtip*ytip*gz4;
vec3dyn a = { .x = (REAL)i, .y = (REAL)j+xtip, .z = (REAL)k+ytip};
vec6dyn b = { .x = (REAL)i, .y = (REAL)j+xtip, .z = (REAL)k+ytip, .vx = gx, .vy = gy, .vz = gz};
tip_push_back1(a);
tip_push_back2(b);
h_vol[I3D(nx,nxy,i,j,k)] = (unsigned char)255;
}
/*------------------------------------------------------------------------
* Anticlockwise direction
*------------------------------------------------------------------------
*/
xtip = px - sroot1;
ytip = py - sroot2;
if ( ( ((xtip > 0.0) && (xtip < 1.0)) +
((ytip > 0.0) && (ytip < 1.0)) +
( disc > 0.0 ) ) > 2 ) {
/*
gx = (g_present.u[E] - g_present.u[W])*invdx_d;
gy = (g_present.u[N] - g_present.u[S])*invdy_d;
gz = (g_present.u[T] - g_present.u[B])*invdz_d;
*/
gx1 = (g_present.u[E] - g_present.u[W])*invdx_d;
gy1 = (g_present.u[N] - g_present.u[S])*invdy_d;
gz1 = (g_present.u[T] - g_present.u[B])*invdz_d;
gx2 = (g_present.u[Ey] - g_present.u[Wy])*invdx_d;
gy2 = (g_present.u[Ny] - g_present.u[Sy])*invdy_d;
gz2 = (g_present.u[Ty] - g_present.u[By])*invdz_d;
gx3 = (g_present.u[Ez] - g_present.u[Wz])*invdx_d;
gy3 = (g_present.u[Nz] - g_present.u[Sz])*invdy_d;
gz3 = (g_present.u[Tz] - g_present.u[Bz])*invdz_d;
gx4 = (g_present.u[Eyz] - g_present.u[Wyz])*invdx_d;
gy4 = (g_present.u[Nyz] - g_present.u[Syz])*invdy_d;
gz4 = (g_present.u[Tyz] - g_present.u[Byz])*invdz_d;
gx = (1.0 - xtip)*(1.0 - ytip)*gx1 +
xtip*(1.0 - ytip)*gx2 + ytip*(1.0 - xtip)*gx3 + xtip*ytip*gx4;
gy = (1.0 - xtip)*(1.0 - ytip)*gy1 +
xtip*(1.0 - ytip)*gy2 + ytip*(1.0 - xtip)*gy3 + xtip*ytip*gy4;
gz = (1.0 - xtip)*(1.0 - ytip)*gz1 +
xtip*(1.0 - ytip)*gz2 + ytip*(1.0 - xtip)*gz3 + xtip*ytip*gz4;
vec3dyn a = { .x = (REAL)i, .y = (REAL)j+xtip, .z = (REAL)k+ytip};
vec6dyn b = { .x = (REAL)i, .y = (REAL)j+xtip, .z = (REAL)k+ytip, .vx = gx, .vy = gy, .vz = gz};
tip_push_back1(a);
tip_push_back2(b);
h_vol[I3D(nx,nxy,i,j,k)] = (unsigned char)255;
}
#else
/*------------------------------------------------------------------------
* Calculate tip for visualization
*------------------------------------------------------------------------
*/
int sxyz = ( (i<(nx-1)) && (j<(ny-1)) && (k<(nz-1) ) ) ? I3D(nx,nxy,i+1,j+1,k+1) : I3D(nx,nxy,i,j,k);
if ( (i<(nx-1)) && (j<(ny-1)) && (k<(nz-1)) ) {
h_vol[I3D(nx,nxy,i,j,k)] = 255*(unsigned char)(filament(s0,sx,sy,sz,sxy,sxz,syz,sxyz,g_past,g_present));
}
else {
h_vol[I3D(nx,nxy,i,j,k)] = 0;
}
#endif
}
__device__ int tip_push_back1(vec3dyn & mt) {
int insert_pt = atomicAdd(&dev_count, 1);
if (insert_pt < NN){
dev_data1[insert_pt] = mt;
return insert_pt;}
else return -1;
}
__device__ int tip_push_back2(vec6dyn & mt) {
int insert_pt = dev_count;//atomicAdd(&dev_count, 1);
if (insert_pt < NN){
dev_data2[insert_pt] = mt;
return insert_pt;}
else return -1;
}
__device__ bool filament(int s0, int sx, int sy, int sz, int sxy, int sxz, int syz, int sxyz,
REAL *g_past, stateVar g_present) {
REAL v0, vx, vy, vz, vxy, vxz, vyz, vxyz;
REAL d0, dx, dy, dz, dxy, dxz, dyz, dxyz;
REAL f0, fx, fy, fz, fxy, fxz, fyz, fxyz;
REAL s;
bool bv, bdv;
v0 = g_present.u[s0];
vx = g_present.u[sx];
vy = g_present.u[sy];
vz = g_present.u[sz];
vxy = g_present.u[sxy];
vxz = g_present.u[sxz];
vyz = g_present.u[syz];
vxyz = g_present.u[sxyz];
f0 = v0 - Uth;
fx = vx - Uth;
fy = vy - Uth;
fz = vz - Uth;
fxy = vxy - Uth;
fyz = vyz - Uth;
fxz = vxz - Uth;
fxyz = vxyz - Uth;
s = STEP(0.0, f0 )
+ STEP(0.0, fx )
+ STEP(0.0, fy )
+ STEP(0.0, fz )
+ STEP(0.0, fxy )
+ STEP(0.0, fyz )
+ STEP(0.0, fxz )
+ STEP(0.0, fxyz);
bv = ( s>0.5 ) && ( s<7.5 );
d0 = v0 - g_past[s0];
dx = vx - g_past[sx];
dy = vy - g_past[sy];
dz = vz - g_past[sz];
dxy = vxy - g_past[sxy];
dxz = vxz - g_past[sxz];
dyz = vyz - g_past[syz];
dxyz = vxyz - g_past[sxyz];
s = STEP(0.0, d0 )
+ STEP(0.0, dx )
+ STEP(0.0, dy )
+ STEP(0.0, dz )
+ STEP(0.0, dxy )
+ STEP(0.0, dyz )
+ STEP(0.0, dxz )
+ STEP(0.0, dxyz);
bdv = ( s>0.5 ) && ( s<7.5 );
return ( bdv && bv );
}
// Spiral tip tracking (not precise)
VolumeType *spiralTip(dim3 grid3Dz, dim3 block3Dz, REAL *v_past_d,
stateVar gIn_d, VolumeType *h_volume) {
/*
hipEvent_t start,stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float elapsedTime;
hipEventRecord(start,0);
*/
hipLaunchKernelGGL(( spiralTip_kernel), dim3(grid3Dz),dim3(block3Dz), 0, 0, v_past_d, gIn_d, h_volume);
CudaCheckError();
/*
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime,start,stop);
printf("Time: %f ms\n", elapsedTime);
*/
return h_volume;
}
// Set voltage to zero on certain regions of the domain (condution block) to
// initialize a spiral wave
void cutVoltage(paramVar p, stateVar g_h, stateVar g_present_d) {
int i, j, k, idx;
CudaSafeCall(hipMemcpy(g_h.u, g_present_d.u, p.memSize,
hipMemcpyDeviceToHost));
if (p.counterclock) {
for (k=0;k<nz;k++) {
for (j=0;j<ny;j++) {
for (i=nx/2;i<nx;i++) {
idx = i + nx * (j + ny * k);
g_h.u[idx] = 0.0;
}
}
}
}
if (p.clock) {
for (k=0;k<nz;k++) {
for (j=0;j<ny;j++) {
for (i=0;i<nx/2;i++) {
idx = i + nx * (j + ny * k);
g_h.u[idx] = 0.0;
}
}
}
}
CudaSafeCall(hipMemcpy(g_present_d.u, g_h.u, p.memSize,
hipMemcpyHostToDevice));
}
// Stimulate with voltage certain regions of the domain
void stimulateV(int memSize, stateVar g_h, stateVar g_present_d) {
int i, j, k, idx;
CudaSafeCall(hipMemcpy(g_h.u, g_present_d.u, memSize,
hipMemcpyDeviceToHost));
for (k=(int)floor(0);k<(int)floor(nz);k++) {
for (j=(int)floor(0);j<(int)floor(ny/8);j++) {
for (i=(int)floor(0);i<(int)floor(nx);i++) {
idx = i + nx*j + nx*ny*k;
g_h.u[idx] = 1.0f;
}
}
}
CudaSafeCall(hipMemcpy(g_present_d.u, g_h.u, memSize,
hipMemcpyHostToDevice));
}
| a0eb55cac0b639390f2c2f0dece5ed0d4375cb24.cu |
/*
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
Developed by: Hector Augusto Velasco-Perez
@ CHAOS Lab
@ Georgia Institute of Technology
August 07/10/2019
Special thanks to:
Dr. Flavio Fenton
Dr. Claire Yanyan Ji
Dr. Abouzar Kaboudian
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
*/
#include <stdio.h>
#include <stdlib.h>
#include <vector>
#include "typedef3V-FK.h"
//#include "globalVariables.cuh"
#include "hostPrototypes.h"
#include "devicePrototypes.cuh"
#include "./common/CudaSafeCall.h"
// Weight constants
extern __constant__ REAL dt_d, rx_d, ry_d, rz_d;
extern __constant__ REAL rxyc_d, rxzc_d, ryzc_d, rxyzf_d;
extern __constant__ REAL rCxyz_d, rwe_d, rsn_d, rbt_d;
extern __constant__ REAL rxy_d, rbx_d, rby_d;
// Miscellaneous constants
extern __constant__ REAL expTau_vp_d, expTau_wp_d, expTau_wn_d;
extern __constant__ REAL invdx_d, invdy_d, invdz_d;
extern __device__ vec3dyn dev_data1[NN];
extern __device__ vec6dyn dev_data2[NN];
extern __device__ int dev_count;
/*========================================================================
* Main Entry of the Kernel
*========================================================================
*/
__global__ void FK_3V_kernel(stateVar g_out, stateVar g_in, conductionVar r,
REAL *J_d) {
/*------------------------------------------------------------------------
* Getting i and j global indices
*------------------------------------------------------------------------
*/
const int i = threadIdx.x;
const int j = blockIdx.x*blockDim.y + threadIdx.y;
const int k = blockIdx.y;
/*------------------------------------------------------------------------
* return if we are outside the domain
*------------------------------------------------------------------------
*/
if( i >= nx && j>=ny && k >= nz) {
return ;
}
/*------------------------------------------------------------------------
* Converting global index into matrix indices assuming
* the column major structure of the matlab matrices
*------------------------------------------------------------------------
*/
const int nxy = nx*ny;
const int i3d = k * nxy + j * nx + i;
/*------------------------------------------------------------------------
* Setting local variables
*------------------------------------------------------------------------
*/
#ifdef DOUBLE_PRECISION
REAL u = g_in.u[i3d] ;
REAL v = g_in.v[i3d] ;
REAL w = g_in.w[i3d] ;
/*------------------------------------------------------------------------
* Additional heaviside functions
*------------------------------------------------------------------------
*/
REAL p = ( u > theta_c ) ? 1.0:0.0 ;
REAL q = ( u > theta_v ) ? 1.0:0.0 ;
/*------------------------------------------------------------------------
* Calculating dependant tau's
*------------------------------------------------------------------------
*/
REAL tau_vnu = q*tau_v1n + (1.0-q)*tau_v2n;
g_out.v[i3d] =
( u > theta_c ) ? v*expTau_vp_d : 1.0-(1.0-v)*exp(-dt_d/tau_vnu);
g_out.w[i3d] =
( u > theta_c ) ? w*expTau_wp_d : 1.0-(1.0-w)*expTau_wn_d;
/*
REAL dv2dt = (1.0-p)*(1.0-v)/tau_vnu - p*v/tau_vp;
v += dv2dt*dt_d ;
g_out.v[i3d] = v ;
REAL dw2dt = (1.0-p)*(1.0-w)/tau_wn - p*w/tau_wp;
w += dw2dt*dt_d ;
g_out.w[i3d] = w ;
*/
/*------------------------------------------------------------------------
* I_sum
*------------------------------------------------------------------------
*/
//Fast inward (Sodium)
REAL J_fi = -p*v*(1.0-u)*(u-theta_c)/tau_d;
//Slow outward (Potassium)
REAL J_so = (1.0-p)*u/tau_o + p/tau_r;
//Slow inward (Calcium)
REAL J_si = -w*(1.0 + tanh(K*(u-u_csi)))/(2.0*tau_si);
REAL I_sum = J_fi + J_so + J_si ;
J_d[i3d] = I_sum;
/*------------------------------------------------------------------------
* Laplacian Calculation
*
* No flux boundary condition is applied on all boundaries through
* the Laplacian operator definition
*------------------------------------------------------------------------
*/
int S = ( j> 0 ) ? I3D(nx,nxy,i,j-1,k) : I3D(nx,nxy,i,j+1,k) ;
int N = ( j<(ny-1) ) ? I3D(nx,nxy,i,j+1,k) : I3D(nx,nxy,i,j-1,k) ;
int W = ( i> 0 ) ? I3D(nx,nxy,i-1,j,k) : I3D(nx,nxy,i+1,j,k) ;
int E = ( i<(nx-1) ) ? I3D(nx,nxy,i+1,j,k) : I3D(nx,nxy,i-1,j,k) ;
//////////////////////
int SWxy = (i>0 && j>0) ? I3D(nx,nxy,i-1,j-1,k) :
((i==0 && j>0) ? I3D(nx,nxy,i+1,j-1,k) :
((i>0 && j==0)? I3D(nx,nxy,i-1,j+1,k) : I3D(nx,nxy,i+1,j+1,k) ) ) ;
int SExy = (i<(nx-1) && j>0) ? I3D(nx,nxy,i+1,j-1,k) :
((i==(nx-1) && j>0) ? I3D(nx,nxy,i-1,j-1,k) :
((i<(nx-1) && j==0)? I3D(nx,nxy,i+1,j+1,k) : I3D(nx,nxy,i-1,j+1,k) ) ) ;
int NWxy = (i>0 && j<(ny-1)) ? I3D(nx,nxy,i-1,j+1,k) :
((i==0 && j<(ny-1)) ? I3D(nx,nxy,i+1,j+1,k) :
((i>0 && j==(ny-1))? I3D(nx,nxy,i-1,j-1,k) : I3D(nx,nxy,i+1,j-1,k) ) ) ;
int NExy = (i<(nx-1) && j<(ny-1)) ? I3D(nx,nxy,i+1,j+1,k) :
((i==(nx-1) && j<(ny-1)) ? I3D(nx,nxy,i-1,j+1,k) :
((i<(nx-1) && j==(ny-1))? I3D(nx,nxy,i+1,j-1,k) : I3D(nx,nxy,i-1,j-1,k) ) ) ;
#ifdef PERIODIC_Z // In the z direction
int B = ( k> 0 ) ? I3D(nx,nxy,i,j,k-1) : I3D(nx,nxy,i,j,nz-1) ;
int T = ( k<(nz-1) ) ? I3D(nx,nxy,i,j,k+1) : I3D(nx,nxy,i,j,0) ;
int SWxz = (i>0 && k>0) ? I3D(nx,nxy,i-1,j,k-1) :
((i==0 && k>0) ? I3D(nx,nxy,i+1,j,k-1) :
((i>0 && k==0)? I3D(nx,nxy,i-1,j,nz-1) : I3D(nx,nxy,i+1,j,k+1) ) ) ;
int SExz = (i<(nx-1) && k>0) ? I3D(nx,nxy,i+1,j,k-1) :
((i==(nx-1) && k>0) ? I3D(nx,nxy,i-1,j,k-1) :
((i<(nx-1) && k==0)? I3D(nx,nxy,i+1,j,nz-1) : I3D(nx,nxy,i-1,j,k+1) ) ) ;
int NWxz = (i>0 && k<(nz-1)) ? I3D(nx,nxy,i-1,j,k+1) :
((i==0 && k<(nz-1)) ? I3D(nx,nxy,i+1,j,k+1) :
((i>0 && k==(nz-1))? I3D(nx,nxy,i-1,j,0) : I3D(nx,nxy,i+1,j,k-1) ) ) ;
int NExz = (i<(nx-1) && k<(nz-1)) ? I3D(nx,nxy,i+1,j,k+1) :
((i==(nx-1) && k<(nz-1)) ? I3D(nx,nxy,i-1,j,k+1) :
((i<(nx-1) && k==(nz-1))? I3D(nx,nxy,i+1,j,0) : I3D(nx,nxy,i-1,j,k-1) ) ) ;
//////////////////////////
int SWyz = (j>0 && k>0) ? I3D(nx,nxy,i,j-1,k-1) :
((j==0 && k>0) ? I3D(nx,nxy,i,j+1,k-1) :
((j>0 && k==0)? I3D(nx,nxy,i,j-1,nz-1) : I3D(nx,nxy,i,j+1,k+1) ) ) ;
int SEyz = (j<(ny-1) && k>0) ? I3D(nx,nxy,i,j+1,k-1) :
((j==(ny-1) && k>0) ? I3D(nx,nxy,i,j-1,k-1) :
((j<(ny-1) && k==0)? I3D(nx,nxy,i,j+1,nz-1) : I3D(nx,nxy,i,j-1,k+1) ) ) ;
int NWyz = (j>0 && k<(nz-1)) ? I3D(nx,nxy,i,j-1,k+1) :
((j==0 && k<(nz-1)) ? I3D(nx,nxy,i,j+1,k+1) :
((j>0 && k==(nz-1))? I3D(nx,nxy,i,j-1,0) : I3D(nx,nxy,i,j+1,k-1) ) ) ;
int NEyz = (j<(ny-1) && k<(nz-1)) ? I3D(nx,nxy,i,j+1,k+1) :
((j==(ny-1) && k<(nz-1)) ? I3D(nx,nxy,i,j-1,k+1) :
((j<(ny-1) && k==(nz-1))? I3D(nx,nxy,i,j+1,0) : I3D(nx,nxy,i,j-1,k-1) ) ) ;
#else
int B = ( k> 0 ) ? I3D(nx,nxy,i,j,k-1) : I3D(nx,nxy,i,j,k+1) ;
int T = ( k<(nz-1) ) ? I3D(nx,nxy,i,j,k+1) : I3D(nx,nxy,i,j,k-1) ;
int SWxz = (i>0 && k>0) ? I3D(nx,nxy,i-1,j,k-1) :
((i==0 && k>0) ? I3D(nx,nxy,i+1,j,k-1) :
((i>0 && k==0)? I3D(nx,nxy,i-1,j,k+1) : I3D(nx,nxy,i+1,j,k+1) ) ) ;
int SExz = (i<(nx-1) && k>0) ? I3D(nx,nxy,i+1,j,k-1) :
((i==(nx-1) && k>0) ? I3D(nx,nxy,i-1,j,k-1) :
((i<(nx-1) && k==0)? I3D(nx,nxy,i+1,j,k+1) : I3D(nx,nxy,i-1,j,k+1) ) ) ;
int NWxz = (i>0 && k<(nz-1)) ? I3D(nx,nxy,i-1,j,k+1) :
((i==0 && k<(nz-1)) ? I3D(nx,nxy,i+1,j,k+1) :
((i>0 && k==(nz-1))? I3D(nx,nxy,i-1,j,k-1) : I3D(nx,nxy,i+1,j,k-1) ) ) ;
int NExz = (i<(nx-1) && k<(nz-1)) ? I3D(nx,nxy,i+1,j,k+1) :
((i==(nx-1) && k<(nz-1)) ? I3D(nx,nxy,i-1,j,k+1) :
((i<(nx-1) && k==(nz-1))? I3D(nx,nxy,i+1,j,k-1) : I3D(nx,nxy,i-1,j,k-1) ) ) ;
//////////////////////////
int SWyz = (j>0 && k>0) ? I3D(nx,nxy,i,j-1,k-1) :
((j==0 && k>0) ? I3D(nx,nxy,i,j+1,k-1) :
((j>0 && k==0)? I3D(nx,nxy,i,j-1,k+1) : I3D(nx,nxy,i,j+1,k+1) ) ) ;
int SEyz = (j<(ny-1) && k>0) ? I3D(nx,nxy,i,j+1,k-1) :
((j==(ny-1) && k>0) ? I3D(nx,nxy,i,j-1,k-1) :
((j<(ny-1) && k==0)? I3D(nx,nxy,i,j+1,k+1) : I3D(nx,nxy,i,j-1,k+1) ) ) ;
int NWyz = (j>0 && k<(nz-1)) ? I3D(nx,nxy,i,j-1,k+1) :
((j==0 && k<(nz-1)) ? I3D(nx,nxy,i,j+1,k+1) :
((j>0 && k==(nz-1))? I3D(nx,nxy,i,j-1,k-1) : I3D(nx,nxy,i,j+1,k-1) ) ) ;
int NEyz = (j<(ny-1) && k<(nz-1)) ? I3D(nx,nxy,i,j+1,k+1) :
((j==(ny-1) && k<(nz-1)) ? I3D(nx,nxy,i,j-1,k+1) :
((j<(ny-1) && k==(nz-1))? I3D(nx,nxy,i,j+1,k-1) : I3D(nx,nxy,i,j-1,k-1) ) ) ;
#endif
#ifdef ANISOTROPIC_TISSUE
/*------------------------------------------------------------------------
* Anisotropic Laplacian
*------------------------------------------------------------------------
*/
REAL rx = r.x[k];
REAL ry = r.y[k];
REAL rz = r.z[k];
REAL rbx = r.bx[k];
REAL rby = r.by[k];
REAL du2dt = ( rCxyz_d * (rx + ry + rz)*u
+ rwe_d * (4.0*rx - ry - rz)*(g_in.u[W] + g_in.u[E])
+ rsn_d * (4.0*ry - rx - rz)*(g_in.u[N] + g_in.u[S])
+ rbt_d * (4.0*rz - ry - rx)*(g_in.u[T] + g_in.u[B])
+ rxyc_d * (rx + ry)*( g_in.u[SWxy] +
g_in.u[SExy] +
g_in.u[NWxy] +
g_in.u[NExy] )
+ rxzc_d * (rx + rz)*( g_in.u[SWxz] +
g_in.u[SExz] +
g_in.u[NWxz] +
g_in.u[NExz] )
+ ryzc_d * (ry + rz)*( g_in.u[SWyz] +
g_in.u[SEyz] +
g_in.u[NWyz] +
g_in.u[NEyz] ) ) ;
du2dt -= ( dt_d*( 0.5*I_sum
+ rxyzf_d * ( ( J_d[E] + J_d[W] )
+ ( J_d[N] + J_d[S] )
+ ( J_d[B] + J_d[T] ) ) ) / C_m ) ;
/*
REAL du2dt = (
+ ( g_in.u[W] - 2.f*u + g_in.u[E] )*rx
+ ( g_in.u[N] - 2.f*u + g_in.u[S] )*ry
+ ( g_in.u[T] - 2.f*u + g_in.u[B] )*rz );
du2dt -= dt_d*I_sum/C_m ;
*/
// Correction to NSWE boundary conditions
REAL b_S = (j > 0 )? 0.0:
((j==0 && (i==0 || i==(nx-1)))? 0.0:
rby*(g_in.u[I3D(nx,nxy,i+1,j,k)] - g_in.u[I3D(nx,nxy,i-1,j,k)])) ;
REAL b_N = (j < (ny-1))? 0.0:
((j==(ny-1) && (i==0 || i==(nx-1)))? 0.0:
-rby*(g_in.u[I3D(nx,nxy,i+1,j,k)] - g_in.u[I3D(nx,nxy,i-1,j,k)])) ;
REAL b_W = (i > 0 )? 0.0:
((i==0 && (j==0 || j==(ny-1)))? 0.0:
rbx*(g_in.u[I3D(nx,nxy,i,j+1,k)] - g_in.u[I3D(nx,nxy,i,j-1,k)])) ;
REAL b_E = (i < (nx-1))? 0.0:
((i==(nx-1) && (j==0 || j==(ny-1)))? 0.0:
-rbx*(g_in.u[I3D(nx,nxy,i,j+1,k)] - g_in.u[I3D(nx,nxy,i,j-1,k)])) ;
du2dt += (
( b_S + b_N )*ry
+ ( b_W + b_E )*rx );
// Correcion to SW SE NW NE boundary conditions
REAL b_SW = (i>0 && j>0)? 0.0 :
((i==0 && j>1)? rbx*(g_in.u[i3d] - g_in.u[I3D(nx,nxy,i,j-2,k)]) :
((i>1 && j==0)? rby*(g_in.u[i3d] - g_in.u[I3D(nx,nxy,i-2,j,k)]) : 0.0)) ;
REAL b_SE = (i<(nx-1) && j>0)? 0.0 :
((i==(nx-1) && j>1)? -rbx*(g_in.u[i3d] - g_in.u[I3D(nx,nxy,i,j-2,k)]) :
((i<(nx-2) && j==0)? rby*(g_in.u[I3D(nx,nxy,i+2,j,k)] - g_in.u[i3d]) : 0.0)) ;
REAL b_NW = (i>0 && j<(ny-1))? 0.0 :
((i==0 && j<(ny-2))? rbx*(g_in.u[I3D(nx,nxy,i,j+2,k)] - g_in.u[i3d]) :
((i>1 && j==(ny-1))? -rby*(g_in.u[i3d] - g_in.u[I3D(nx,nxy,i-2,j,k)]) : 0.0)) ;
REAL b_NE = (i<(nx-1) && j<(ny-1))? 0.0 :
((i==(nx-1) && j<(ny-2))? -rbx*(g_in.u[I3D(nx,nxy,i,j+2,k)] - g_in.u[i3d]) :
((i<(nx-2) && j==(ny-1))? -rby*(g_in.u[I3D(nx,nxy,i+2,j,k)] - g_in.u[i3d]) : 0.0)) ;
du2dt += ( r.xy[k]*( (g_in.u[SWxy] + b_SW) +
(g_in.u[NExy] + b_NE) -
(g_in.u[SExy] + b_SE) -
(g_in.u[NWxy] + b_NW) ) );
#else
/*------------------------------------------------------------------------
* Isotropic Laplacian
*------------------------------------------------------------------------
*/
REAL du2dt = ( rCxyz_d*u
+ rwe_d*(g_in.u[W] + g_in.u[E])
+ rsn_d*(g_in.u[N] + g_in.u[S])
+ rbt_d*(g_in.u[T] + g_in.u[B])
+ rxyc_d*( g_in.u[SWxy] +
g_in.u[SExy] +
g_in.u[NWxy] +
g_in.u[NExy] )
+ rxzc_d*( g_in.u[SWxz] +
g_in.u[SExz] +
g_in.u[NWxz] +
g_in.u[NExz] )
+ ryzc_d*( g_in.u[SWyz] +
g_in.u[SEyz] +
g_in.u[NWyz] +
g_in.u[NEyz] ) ) ;
du2dt -= ( dt_d*( 0.5*I_sum
+ rxyzf_d * ( ( J_d[E] + J_d[W] )
+ ( J_d[N] + J_d[S] )
+ ( J_d[B] + J_d[T] ) ) ) / C_m ) ;
/*
REAL du2dt = (
+ ( g_in.u[W] - 2.f*u + g_in.u[E] )*rx_d
+ ( g_in.u[N] - 2.f*u + g_in.u[S] )*ry_d
+ ( g_in.u[T] - 2.f*u + g_in.u[B] )*rz_d );
du2dt -= dt_d*I_sum/C_m ;
*/
#endif
/*------------------------------------------------------------------------
* Time integration
*------------------------------------------------------------------------
*/
u += du2dt ;
g_out.u[i3d] = u ;
/*------------------------------------------------------------------------
* Single precision
*------------------------------------------------------------------------
*/
#else
REAL u = g_in.u[i3d] ;
REAL v = g_in.v[i3d] ;
REAL w = g_in.w[i3d] ;
/*------------------------------------------------------------------------
* Additional heaviside functions
*------------------------------------------------------------------------
*/
REAL p = ( u > theta_c ) ? 1.0:0.0 ;
REAL q = ( u > theta_v ) ? 1.0:0.0 ;
/*------------------------------------------------------------------------
* Calculating dependant tau's
*------------------------------------------------------------------------
*/
REAL tau_vnu = q*tau_v1n + (1.f-q)*tau_v2n;
g_out.v[i3d] =
( u > theta_c ) ? v*expTau_vp_d : 1.0f-(1.0f-v)*expf(-dt_d/tau_vnu);
g_out.w[i3d] =
( u > theta_c ) ? w*expTau_wp_d : 1.0f-(1.0f-w)*expTau_wn_d;
/*
REAL dv2dt = (1.f-p)*(1.f-v)/tau_vnu - p*v/tau_vp;
v += dv2dt*dt_d ;
g_out.v[i3d] = v ;
REAL dw2dt = (1.f-p)*(1.f-w)/tau_wn - p*w/tau_wp;
w += dw2dt*dt_d ;
g_out.w[i3d] = w ;
*/
/*------------------------------------------------------------------------
* I_sum
*------------------------------------------------------------------------
*/
//Fast inward (Sodium)
REAL J_fi = -p*v*(1.f-u)*(u-theta_c)/tau_d;
//Slow outward (Potassium)
REAL J_so = (1.f-p)*u/tau_o + p/tau_r;
//Slow inward (Calcium)
REAL J_si = -w*(1.f + tanhf(K*(u-u_csi)))/(2.f*tau_si);
REAL I_sum = J_fi + J_so + J_si ;
J_d[i3d] = I_sum;
/*------------------------------------------------------------------------
* Laplacian Calculation
*
* No flux boundary condition is applied on all boundaries through
* the Laplacian operator definition
*------------------------------------------------------------------------
*/
int S = ( j> 0 ) ? I3D(nx,nxy,i,j-1,k) : I3D(nx,nxy,i,j+1,k) ;
int N = ( j<(ny-1) ) ? I3D(nx,nxy,i,j+1,k) : I3D(nx,nxy,i,j-1,k) ;
int W = ( i> 0 ) ? I3D(nx,nxy,i-1,j,k) : I3D(nx,nxy,i+1,j,k) ;
int E = ( i<(nx-1) ) ? I3D(nx,nxy,i+1,j,k) : I3D(nx,nxy,i-1,j,k) ;
int SWxy = (i>0 && j>0) ? I3D(nx,nxy,i-1,j-1,k) :
((i==0 && j>0) ? I3D(nx,nxy,i+1,j-1,k) :
((i>0 && j==0)? I3D(nx,nxy,i-1,j+1,k) : I3D(nx,nxy,i+1,j+1,k) ) ) ;
int SExy = (i<(nx-1) && j>0) ? I3D(nx,nxy,i+1,j-1,k) :
((i==(nx-1) && j>0) ? I3D(nx,nxy,i-1,j-1,k) :
((i<(nx-1) && j==0)? I3D(nx,nxy,i+1,j+1,k) : I3D(nx,nxy,i-1,j+1,k) ) ) ;
int NWxy = (i>0 && j<(ny-1)) ? I3D(nx,nxy,i-1,j+1,k) :
((i==0 && j<(ny-1)) ? I3D(nx,nxy,i+1,j+1,k) :
((i>0 && j==(ny-1))? I3D(nx,nxy,i-1,j-1,k) : I3D(nx,nxy,i+1,j-1,k) ) ) ;
int NExy = (i<(nx-1) && j<(ny-1)) ? I3D(nx,nxy,i+1,j+1,k) :
((i==(nx-1) && j<(ny-1)) ? I3D(nx,nxy,i-1,j+1,k) :
((i<(nx-1) && j==(ny-1))? I3D(nx,nxy,i+1,j-1,k) : I3D(nx,nxy,i-1,j-1,k) ) ) ;
#ifdef PERIODIC_Z // In the z direction
int B = ( k> 0 ) ? I3D(nx,nxy,i,j,k-1) : I3D(nx,nxy,i,j,nz-1) ;
int T = ( k<(nz-1) ) ? I3D(nx,nxy,i,j,k+1) : I3D(nx,nxy,i,j,0) ;
int SWxz = (i>0 && k>0) ? I3D(nx,nxy,i-1,j,k-1) :
((i==0 && k>0) ? I3D(nx,nxy,i+1,j,k-1) :
((i>0 && k==0)? I3D(nx,nxy,i-1,j,nz-1) : I3D(nx,nxy,i+1,j,k+1) ) ) ;
int SExz = (i<(nx-1) && k>0) ? I3D(nx,nxy,i+1,j,k-1) :
((i==(nx-1) && k>0) ? I3D(nx,nxy,i-1,j,k-1) :
((i<(nx-1) && k==0)? I3D(nx,nxy,i+1,j,nz-1) : I3D(nx,nxy,i-1,j,k+1) ) ) ;
int NWxz = (i>0 && k<(nz-1)) ? I3D(nx,nxy,i-1,j,k+1) :
((i==0 && k<(nz-1)) ? I3D(nx,nxy,i+1,j,k+1) :
((i>0 && k==(nz-1))? I3D(nx,nxy,i-1,j,0) : I3D(nx,nxy,i+1,j,k-1) ) ) ;
int NExz = (i<(nx-1) && k<(nz-1)) ? I3D(nx,nxy,i+1,j,k+1) :
((i==(nx-1) && k<(nz-1)) ? I3D(nx,nxy,i-1,j,k+1) :
((i<(nx-1) && k==(nz-1))? I3D(nx,nxy,i+1,j,0) : I3D(nx,nxy,i-1,j,k-1) ) ) ;
//////////////////////////
int SWyz = (j>0 && k>0) ? I3D(nx,nxy,i,j-1,k-1) :
((j==0 && k>0) ? I3D(nx,nxy,i,j+1,k-1) :
((j>0 && k==0)? I3D(nx,nxy,i,j-1,nz-1) : I3D(nx,nxy,i,j+1,k+1) ) ) ;
int SEyz = (j<(ny-1) && k>0) ? I3D(nx,nxy,i,j+1,k-1) :
((j==(ny-1) && k>0) ? I3D(nx,nxy,i,j-1,k-1) :
((j<(ny-1) && k==0)? I3D(nx,nxy,i,j+1,nz-1) : I3D(nx,nxy,i,j-1,k+1) ) ) ;
int NWyz = (j>0 && k<(nz-1)) ? I3D(nx,nxy,i,j-1,k+1) :
((j==0 && k<(nz-1)) ? I3D(nx,nxy,i,j+1,k+1) :
((j>0 && k==(nz-1))? I3D(nx,nxy,i,j-1,0) : I3D(nx,nxy,i,j+1,k-1) ) ) ;
int NEyz = (j<(ny-1) && k<(nz-1)) ? I3D(nx,nxy,i,j+1,k+1) :
((j==(ny-1) && k<(nz-1)) ? I3D(nx,nxy,i,j-1,k+1) :
((j<(ny-1) && k==(nz-1))? I3D(nx,nxy,i,j+1,0) : I3D(nx,nxy,i,j-1,k-1) ) ) ;
#else
int B = ( k> 0 ) ? I3D(nx,nxy,i,j,k-1) : I3D(nx,nxy,i,j,k+1) ;
int T = ( k<(nz-1) ) ? I3D(nx,nxy,i,j,k+1) : I3D(nx,nxy,i,j,k-1) ;
int SWxz = (i>0 && k>0) ? I3D(nx,nxy,i-1,j,k-1) :
((i==0 && k>0) ? I3D(nx,nxy,i+1,j,k-1) :
((i>0 && k==0)? I3D(nx,nxy,i-1,j,k+1) : I3D(nx,nxy,i+1,j,k+1) ) ) ;
int SExz = (i<(nx-1) && k>0) ? I3D(nx,nxy,i+1,j,k-1) :
((i==(nx-1) && k>0) ? I3D(nx,nxy,i-1,j,k-1) :
((i<(nx-1) && k==0)? I3D(nx,nxy,i+1,j,k+1) : I3D(nx,nxy,i-1,j,k+1) ) ) ;
int NWxz = (i>0 && k<(nz-1)) ? I3D(nx,nxy,i-1,j,k+1) :
((i==0 && k<(nz-1)) ? I3D(nx,nxy,i+1,j,k+1) :
((i>0 && k==(nz-1))? I3D(nx,nxy,i-1,j,k-1) : I3D(nx,nxy,i+1,j,k-1) ) ) ;
int NExz = (i<(nx-1) && k<(nz-1)) ? I3D(nx,nxy,i+1,j,k+1) :
((i==(nx-1) && k<(nz-1)) ? I3D(nx,nxy,i-1,j,k+1) :
((i<(nx-1) && k==(nz-1))? I3D(nx,nxy,i+1,j,k-1) : I3D(nx,nxy,i-1,j,k-1) ) ) ;
//////////////////////////
int SWyz = (j>0 && k>0) ? I3D(nx,nxy,i,j-1,k-1) :
((j==0 && k>0) ? I3D(nx,nxy,i,j+1,k-1) :
((j>0 && k==0)? I3D(nx,nxy,i,j-1,k+1) : I3D(nx,nxy,i,j+1,k+1) ) ) ;
int SEyz = (j<(ny-1) && k>0) ? I3D(nx,nxy,i,j+1,k-1) :
((j==(ny-1) && k>0) ? I3D(nx,nxy,i,j-1,k-1) :
((j<(ny-1) && k==0)? I3D(nx,nxy,i,j+1,k+1) : I3D(nx,nxy,i,j-1,k+1) ) ) ;
int NWyz = (j>0 && k<(nz-1)) ? I3D(nx,nxy,i,j-1,k+1) :
((j==0 && k<(nz-1)) ? I3D(nx,nxy,i,j+1,k+1) :
((j>0 && k==(nz-1))? I3D(nx,nxy,i,j-1,k-1) : I3D(nx,nxy,i,j+1,k-1) ) ) ;
int NEyz = (j<(ny-1) && k<(nz-1)) ? I3D(nx,nxy,i,j+1,k+1) :
((j==(ny-1) && k<(nz-1)) ? I3D(nx,nxy,i,j-1,k+1) :
((j<(ny-1) && k==(nz-1))? I3D(nx,nxy,i,j+1,k-1) : I3D(nx,nxy,i,j-1,k-1) ) ) ;
#endif
#ifdef ANISOTROPIC_TISSUE
/*------------------------------------------------------------------------
* Anisotropic Laplacian
*-------------------------------------------------------------------------
*/
REAL rx = r.x[k];
REAL ry = r.y[k];
REAL rz = r.z[k];
REAL rbx = r.bx[k];
REAL rby = r.by[k];
REAL du2dt = ( rCxyz_d * (rx + ry + rz)*u
+ rwe_d * (4.0*rx - ry - rz)*(g_in.u[W] + g_in.u[E])
+ rsn_d * (4.0*ry - rx - rz)*(g_in.u[N] + g_in.u[S])
+ rbt_d * (4.0*rz - ry - rx)*(g_in.u[T] + g_in.u[B])
+ rxyc_d * (rx + ry)*( g_in.u[SWxy] +
g_in.u[SExy] +
g_in.u[NWxy] +
g_in.u[NExy] )
+ rxzc_d * (rx + rz)*( g_in.u[SWxz] +
g_in.u[SExz] +
g_in.u[NWxz] +
g_in.u[NExz] )
+ ryzc_d * (ry + rz)*( g_in.u[SWyz] +
g_in.u[SEyz] +
g_in.u[NWyz] +
g_in.u[NEyz] ) ) ;
du2dt -= ( dt_d*( 0.5*I_sum
+ rxyzf_d * ( ( J_d[E] + J_d[W] )
+ ( J_d[N] + J_d[S] )
+ ( J_d[B] + J_d[T] ) ) ) / C_m ) ;
/*
REAL du2dt = (
+ ( g_in.u[W] - 2.f*u + g_in.u[E] )*rx
+ ( g_in.u[N] - 2.f*u + g_in.u[S] )*ry
+ ( g_in.u[T] - 2.f*u + g_in.u[B] )*rz );
du2dt -= dt_d*I_sum/C_m ;
*/
// Correction to NSWE boundary conditions
REAL b_S = (j > 0 ) ? 0.f :
((j==0 && (i==0 || i==(nx-1)))? 0.f:
rby*(g_in.u[I3D(nx,nxy,i+1,j,k)] - g_in.u[I3D(nx,nxy,i-1,j,k)]));
REAL b_N = (j < (ny-1)) ? 0.f :
((j==(ny-1) && (i==0 || i==(nx-1)))? 0.f:
-rby*(g_in.u[I3D(nx,nxy,i+1,j,k)] - g_in.u[I3D(nx,nxy,i-1,j,k)]));
REAL b_W = (i > 0 ) ? 0.f :
((i==0 && (j==0 || j==(ny-1)))? 0.f:
rbx*(g_in.u[I3D(nx,nxy,i,j+1,k)] - g_in.u[I3D(nx,nxy,i,j-1,k)]));
REAL b_E = (i < (nx-1)) ? 0.f :
((i==(nx-1) && (j==0 || j==(ny-1)))? 0.f:
-rbx*(g_in.u[I3D(nx,nxy,i,j+1,k)] - g_in.u[I3D(nx,nxy,i,j-1,k)]));
du2dt += (
( b_S + b_N )*ry
+ ( b_W + b_E )*rx );
// Correcion to SW SE NW NE boundary conditions
REAL b_SW = (i>0 && j>0) ? 0.0f :
((i==0 && j>1) ? rbx*(g_in.u[i3d] - g_in.u[I3D(nx,nxy,i,j-2,k)]) :
((i>1 && j==0) ? rby*(g_in.u[i3d] - g_in.u[I3D(nx,nxy,i-2,j,k)]) : 0.0f));
REAL b_SE = (i<(nx-1) && j>0) ? 0.0f :
((i==(nx-1) && j>1) ? - rbx*(g_in.u[i3d] - g_in.u[I3D(nx,nxy,i,j-2,k)]) :
((i<(nx-2) && j==0) ? rby*(g_in.u[I3D(nx,nxy,i+2,j,k)] - g_in.u[i3d]) : 0.0f));
REAL b_NW = (i>0 && j<(ny-1)) ? 0.0f :
((i==0 && j<(ny-2)) ? rbx*(g_in.u[I3D(nx,nxy,i,j+2,k)] - g_in.u[i3d]) :
((i>1 && j==(ny-1)) ? - rby*(g_in.u[i3d] - g_in.u[I3D(nx,nxy,i-2,j,k)]) : 0.0f));
REAL b_NE = (i<(nx-1) && j<(ny-1)) ? 0.0f :
((i==(nx-1) && j<(ny-2)) ? - rbx*(g_in.u[I3D(nx,nxy,i,j+2,k)] - g_in.u[i3d]) :
((i<(nx-2) && j==(ny-1)) ? - rby*(g_in.u[I3D(nx,nxy,i+2,j,k)] - g_in.u[i3d]) : 0.0f));
du2dt += ( r.xy[k]*( (g_in.u[SWxy] + b_SW) +
(g_in.u[NExy] + b_NE) -
(g_in.u[SExy] + b_SE) -
(g_in.u[NWxy] + b_NW) ) );
#else
/*------------------------------------------------------------------------
* Isotropic Laplacian
*------------------------------------------------------------------------
*/
REAL du2dt = ( rCxyz_d*u
+ rwe_d*(g_in.u[W] + g_in.u[E])
+ rsn_d*(g_in.u[N] + g_in.u[S])
+ rbt_d*(g_in.u[T] + g_in.u[B])
+ rxyc_d*( g_in.u[SWxy] +
g_in.u[SExy] +
g_in.u[NWxy] +
g_in.u[NExy] )
+ rxzc_d*( g_in.u[SWxz] +
g_in.u[SExz] +
g_in.u[NWxz] +
g_in.u[NExz] )
+ ryzc_d*( g_in.u[SWyz] +
g_in.u[SEyz] +
g_in.u[NWyz] +
g_in.u[NEyz] ) ) ;
du2dt -= ( dt_d*( 0.5f*I_sum
+ rxyzf_d * ( ( J_d[E] + J_d[W] )
+ ( J_d[N] + J_d[S] )
+ ( J_d[B] + J_d[T] ) ) ) / C_m ) ;
/*
REAL du2dt = (
+ ( g_in.u[W] - 2.f*u + g_in.u[E] )*rx_d
+ ( g_in.u[N] - 2.f*u + g_in.u[S] )*ry_d
+ ( g_in.u[T] - 2.f*u + g_in.u[B] )*rz_d );
du2dt -= dt_d*I_sum/C_m ;
*/
#endif
/*------------------------------------------------------------------------
* Time integration
*------------------------------------------------------------------------
*/
u += du2dt ;
g_out.u[i3d] = u ;
#endif
}
void FK_3V_wrapper(dim3 grid3D, dim3 block3D, stateVar gOut_d, stateVar gIn_d,
conductionVar r_d, REAL *J_current_d) {
/*
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float elapsedTime;
cudaEventRecord(start,0);
*/
FK_3V_kernel<<<grid3D, block3D>>>(gOut_d, gIn_d, r_d, J_current_d);
CudaCheckError();
/*
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime,start,stop);
printf("3V kernel took: %f ms\n", elapsedTime);
//t += 2.0*dt;
*/
/*
SIM_2V_kernel<<<grid3D, block3D>>>(gIn_d, gOut_d, r_d, J_current_d);
CudaCheckError();
*/
//swapSoA(&gIn_d, &gOut_d);
}
// This function launches all functions that need to be prcessed at every frame
// No graphics functions are launched from here
void animation(dim3 grid3D, dim3 block3D,
stateVar g_h, stateVar gOut_d, stateVar gIn_d, REAL *J_current_d,
conductionVar r_d, paramVar param, REAL *pt_h, REAL *pt_d,
std::vector<electrodeVar> &electrode,
bool initConditionFlag) {
#pragma unroll
for (int i=0;i<(ITPERFRAME);i++) {
FK_3V_wrapper(grid3D,block3D,gOut_d,gIn_d,r_d,J_current_d);
swapSoA(&gIn_d, &gOut_d);
}
// Single point time tracking
singlePoint(gIn_d,pt_h,pt_d,param.singlePointPixel,electrode);
}
__global__ void singlePoint_kernel(stateVar g_in, REAL *pt_d,
int singlePointPixel) {
pt_d[0] = g_in.u[singlePointPixel];
pt_d[1] = g_in.v[singlePointPixel];
pt_d[2] = g_in.v[singlePointPixel];
}
void singlePoint(stateVar gIn_d, REAL *pt_h, REAL *pt_d,
int singlePointPixel, std::vector<electrodeVar> &electrode) {
singlePoint_kernel<<<1,1>>>(gIn_d, pt_d, singlePointPixel);
CudaCheckError();
CudaSafeCall(cudaMemcpy(pt_h, pt_d, 3*sizeof(REAL),cudaMemcpyDeviceToHost));
electrodeVar data = {
pt_h[0],
pt_h[1],
pt_h[2],
};
electrode.push_back(data);
}
__global__ void copyRender_kernel(int totpoints, stateVar g_in,
VolumeType *h_volume) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < totpoints; i += stride) {
h_volume[i] = (unsigned char)255.f*(float)g_in.u[i]*0.9f;
}
}
// Convert numerical values of the PDE solution to colors (char)
//extern "C"
void copyRender(dim3 grid1D, dim3 block1D, int totpoints,
stateVar gIn_d, VolumeType *h_volume) {
/*
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float elapsedTime;
cudaEventRecord(start,0);
*/
copyRender_kernel<<<grid1D, block1D>>>(totpoints, gIn_d, h_volume);
CudaCheckError();
/*
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime,start,stop);
printf("Time: %f ms\n", elapsedTime);
*/
}
__global__ void spiralTip_kernel(REAL *g_past, stateVar g_present,
VolumeType *h_vol) {
/*------------------------------------------------------------------------
* Getting i, and k global indices
*------------------------------------------------------------------------
*/
const int i = blockIdx.x*blockDim.x + threadIdx.x;
const int j = blockIdx.y;
const int k = threadIdx.y;
/*------------------------------------------------------------------------
* Return if we are outside the domain
*------------------------------------------------------------------------
*/
if( i >= nx && j>=ny && k >= nz) {
return ;
}
const int nxy = nx*ny;
int s0 = I3D(nx,nxy,i,j,k);
int sx = ( i<(nx-1) ) ? I3D(nx,nxy,i+1,j,k) : I3D(nx,nxy,i,j,k);
int sy = ( j<(ny-1) ) ? I3D(nx,nxy,i,j+1,k) : I3D(nx,nxy,i,j,k);
int sz = ( k<(nz-1) ) ? I3D(nx,nxy,i,j,k+1) : I3D(nx,nxy,i,j,k);
int sxy = ( (j<(ny-1)) && (i<(nx-1) ) ) ? I3D(nx,nxy,i+1,j+1,k) : I3D(nx,nxy,i,j,k);
int sxz = ( (k<(nz-1)) && (i<(nx-1) ) ) ? I3D(nx,nxy,i+1,j,k+1) : I3D(nx,nxy,i,j,k);
int syz = ( (j<(ny-1)) && (k<(nz-1) ) ) ? I3D(nx,nxy,i,j+1,k+1) : I3D(nx,nxy,i,j,k);
#ifdef SPIRALTIP_INTERPOLATION
/*------------------------------------------------------------------------
* Calculate pixel position of filament
*------------------------------------------------------------------------
*/
REAL x1, x2, x3, x4, y1, y2, y3, y4;
REAL x3y1, x4y1, x3y2, x4y2, x1y3, x2y3, x1y4, x2y4, x2y1, x1y2, x4y3, x3y4;
REAL den1, den2, ctn1, ctn2, disc, xtip, ytip, px, py, sroot1, sroot2;
REAL gx, gy, gz, gx1, gx2, gx3, gx4, gy1, gy2, gy3, gy4, gz1, gz2, gz3, gz4;
/*------------------------------------------------------------------------
* Calculate pixel position of filament and plot
*------------------------------------------------------------------------
*/
int S = ( j>0 ) ? I3D(nx,nxy,i,j-1,k) : I3D(nx,nxy,i,j+1,k) ;
int Sx = ( (j>0) && (i<(nx-1)) ) ? I3D(nx,nxy,i+1,j-1,k) : I3D(nx,nxy,i-1,j+1,k) ;
int Sy = I3D(nx,nxy,i,j,k) ;
int Sxy = ( i<(nx-1) ) ? I3D(nx,nxy,i+1,j,k) : I3D(nx,nxy,i-1,j,k) ;
int Sz = ( j>0 ) ? I3D(nx,nxy,i,j-1,k+1) : I3D(nx,nxy,i,j+2,k+1) ;
int Sxz = ( j>0 ) ? I3D(nx,nxy,i+1,j-1,k+1) : I3D(nx,nxy,i,j+2,k+1) ;
int Syz = ( j>0 ) ? I3D(nx,nxy,i,j,k+1) : I3D(nx,nxy,i,j+2,k+1) ;
int N = ( j<(ny-1) ) ? I3D(nx,nxy,i,j+1,k) : I3D(nx,nxy,i,j-1,k) ;
int Nx = ( (i<(nx-1)) && (j<(ny-1)) ) ? I3D(nx,nxy,i+1,j+1,k) : I3D(nx,nxy,i-1,j-1,k) ;
int Ny = ( j<(ny-2) ) ? I3D(nx,nxy,i,j+2,k) : (( j==(ny-2) ) ? I3D(nx,nxy,i,j,k) : I3D(nx,nxy,i,j-1,k)) ;
int Nxy = ( (i<(nx-1)) && (j<(ny-2)) ) ? I3D(nx,nxy,i+1,j+2,k) : ((j==(ny-2)) ? I3D(nx,nxy,i-1,j,k) : I3D(nx,nxy,i-1,j-1,k)) ;
int Nz = ( (j<(ny-1)) && (k<(nz-1)) ) ? I3D(nx,nxy,i,j+1,k+1) : I3D(nx,nxy,i,j-1,k-1) ;
int Nxz = ( (i<(nx-1)) && (j<(ny-1)) && (k<(nz-1)) ) ? I3D(nx,nxy,i+1,j+1,k+1) : I3D(nx,nxy,i-1,j-1,k-1) ;
int Nyz = ( (j<(ny-2)) && (k<(nz-1)) ) ? I3D(nx,nxy,i,j+2,k+1) : ((k==(nz-2)) ? I3D(nx,nxy,i,j,k-1) : I3D(nx,nxy,i,j-1,k-1) );
int W = ( i>0 ) ? I3D(nx,nxy,i-1,j,k) : I3D(nx,nxy,i-1,j,k) ;
int Wx = I3D(nx,nxy,i,j,k) ;
int Wy = ( (i>0) && (j<(ny-1)) ) ? I3D(nx,nxy,i-1,j+1,k) : I3D(nx,nxy,i+1,j-1,k) ;
int Wxy = ( (j<(ny-1)) ) ? I3D(nx,nxy,i,j+1,k) : I3D(nx,nxy,i-1,j,k) ;
int Wz = ( (i>0) && (k<(nz-1)) ) ? I3D(nx,nxy,i-1,j,k+1) : I3D(nx,nxy,i+1,j,k-1) ;
int Wxz = ( k<(nz-1) ) ? I3D(nx,nxy,i,j,k+1) : I3D(nx,nxy,i,j,k-1) ;
int Wyz = ( (i>0) && (j<(ny-1)) && (k<(nz-1)) ) ? I3D(nx,nxy,i-1,j+1,k+1) : I3D(nx,nxy,i+1,j-1,k-1) ;
int E = ( i<(nx-1) ) ? I3D(nx,nxy,i+1,j,k) : I3D(nx,nxy,i-1,j,k) ;
int Ex = ( i<(nx-2) ) ? I3D(nx,nxy,i+2,j,k) : ((i==(nx-2)) ? I3D(nx,nxy,i,j,k) : I3D(nx,nxy,i-1,j,k));
int Ey = ( (i<(nx-1)) && (j<(ny-1)) ) ? I3D(nx,nxy,i+1,j+1,k) : I3D(nx,nxy,i-1,j-1,k) ;
int Exy = ( (i<(nx-2)) && (j<(ny-1)) ) ? I3D(nx,nxy,i+2,j+1,k) : ( (i==(nx-2)) ? I3D(nx,nxy,i,j-1,k) : I3D(nx,nxy,i-1,j-1,k)) ;
int Ez = ( (i<(nx-1)) && (k<(nz-1)) ) ? I3D(nx,nxy,i+1,j,k+1) : I3D(nx,nxy,i-1,j,k-1) ;
int Exz = ( (i<(nx-2)) && (k<(nz-1)) ) ? I3D(nx,nxy,i+2,j,k+1) : ( (i==(nx-2)) ? I3D(nx,nxy,i,j,k-1) : I3D(nx,nxy,i-1,j,k-1) );
int Eyz = ( (i<(nx-1)) && (j<(ny-1)) && (k<(nz-1)) ) ? I3D(nx,nxy,i+1,j+1,k+1) : I3D(nx,nxy,i-1,j-1,k-1) ;
int B = ( k>0 ) ? I3D(nx,nxy,i,j,k-1) : I3D(nx,nxy,i,j,k+1) ;
int Bx = ( (k>0) && (i<(nx-1)) ) ? I3D(nx,nxy,i+1,j,k-1) : I3D(nx,nxy,i-1,j,k+1) ;
int By = ( (k>0) && (j<(ny-1)) ) ? I3D(nx,nxy,i,j+1,k-1) : I3D(nx,nxy,i,j-1,k+1) ;
int Bxy = ( (i<(nx-1)) && (j<(ny-1)) && (k>0) ) ? I3D(nx,nxy,i+1,j+1,k-1) : I3D(nx,nxy,i-1,j-1,k+1) ;
int Bz = I3D(nx,nxy,i,j,k);
int Bxz = ( i<(nx-1) ) ? I3D(nx,nxy,i+1,j,k) : I3D(nx,nxy,i-1,j,k) ;
int Byz = ( j<(ny-1) ) ? I3D(nx,nxy,i,j+1,k) : I3D(nx,nxy,i,j-1,k) ;
int T = ( k<(nz-1) ) ? I3D(nx,nxy,i,j,k+1) : I3D(nx,nxy,i,j,k-1) ;
int Tx = ( (i<(nx-1)) && (k<(nz-1)) ) ? I3D(nx,nxy,i+1,j,k+1) : I3D(nx,nxy,i-1,j,k-1) ;
int Ty = ( (j<(ny-1)) && (k<(nz-1)) ) ? I3D(nx,nxy,i,j+1,k+1) : I3D(nx,nxy,i,j-1,k-1) ;
int Txy = ( (i<(nx-1)) && (j<(ny-1)) && (k<(nz-1)) ) ? I3D(nx,nxy,i+1,j+1,k+1) : I3D(nx,nxy,i-1,j-1,k-1) ;
int Tz = ( k<(nz-2) ) ? I3D(nx,nxy,i,j,k+2) : ( (k==(nz-2)) ? I3D(nx,nxy,i,j,k) : I3D(nx,nxy,i,j,k-1));
int Txz = ( (i<(nx-1)) && k<(nz-2) ) ? I3D(nx,nxy,i+1,j,k+2) : ( (k==(nz-2)) ? I3D(nx,nxy,i-1,j,k) : I3D(nx,nxy,i-1,j,k-1));
int Tyz = ( (j<(ny-1)) && (k<(nz-2)) ) ? I3D(nx,nxy,i,j+1,k+2) : ( (k==(nz-2)) ? I3D(nx,nxy,i,j-1,k) : I3D(nx,nxy,i,j-1,k-1) );
/*------------------------------------------------------------------------
* XY plane
*------------------------------------------------------------------------
*/
x1 = g_present.u[s0];
x2 = g_present.u[sx];
x4 = g_present.u[sy];
x3 = g_present.u[sxy];
y1 = g_past[s0];
y2 = g_past[sx];
y4 = g_past[sy];
y3 = g_past[sxy];
x3y1 = x3*y1;
x4y1 = x4*y1;
x3y2 = x3*y2;
x4y2 = x4*y2;
x1y3 = x1*y3;
x2y3 = x2*y3;
x1y4 = x1*y4;
x2y4 = x2*y4;
x2y1 = x2*y1;
x1y2 = x1*y2;
x4y3 = x4*y3;
x3y4 = x3*y4;
den1 = 2.0*(x3y1 - x4y1 - x3y2 + x4y2 - x1y3 + x2y3 + x1y4 - x2y4);
den2 = 2.0*(x2y1 - x3y1 - x1y2 + x4y2 + x1y3 - x4y3 - x2y4 + x3y4);
ctn1 = x1 - x2 + x3 - x4 - y1 + y2 - y3 + y4;
ctn2 = x3y1 - 2.0*x4y1 + x4y2 - x1y3 + 2.0*x1y4 - x2y4;
disc = 4.0 * ( x3y1 - x3y2 - x4y1 + x4y2 - x1y3 + x1y4 + x2y3 - x2y4 )
* (x4y1 - x1y4 + Uth * (x1 - x4 - y1 + y4)) +
( -ctn2 + Uth * ctn1 ) * (-ctn2 + Uth * ctn1 );
px = -(Uth * ctn1 - ctn2)/den1;
py = (Uth * ctn1)/den2 -
(-2.0* x2y1 + x3y1 + 2.0 *x1y2 - x4y2 - x1y3 + x2y4)/den2;
sroot1 = sqrt(disc)/den1;
sroot2 = sqrt(disc)/den2;
/*------------------------------------------------------------------------
* XY plane
* Clockwise direction
*------------------------------------------------------------------------
*/
xtip = px + sroot1;
ytip = py + sroot2;
if ( ( ((xtip > 0.0) && (xtip < 1.0)) +
((ytip > 0.0) && (ytip < 1.0)) +
( disc > 0.0 ) ) > 2 ) {
/*
gx = (g_present.u[E] - g_present.u[W])*invdx_d;
gy = (g_present.u[N] - g_present.u[S])*invdy_d;
gz = (g_present.u[T] - g_present.u[B])*invdz_d;
*/
gx1 = (g_present.u[E] - g_present.u[W])*invdx_d;
gy1 = (g_present.u[N] - g_present.u[S])*invdy_d;
gz1 = (g_present.u[T] - g_present.u[B])*invdz_d;
gx2 = (g_present.u[Ex] - g_present.u[Wx])*invdx_d;
gy2 = (g_present.u[Nx] - g_present.u[Sx])*invdy_d;
gz2 = (g_present.u[Tx] - g_present.u[Bx])*invdz_d;
gx3 = (g_present.u[Ey] - g_present.u[Wy])*invdx_d;
gy3 = (g_present.u[Ny] - g_present.u[Sy])*invdy_d;
gz3 = (g_present.u[Ty] - g_present.u[By])*invdz_d;
gx4 = (g_present.u[Exy] - g_present.u[Wxy])*invdx_d;
gy4 = (g_present.u[Nxy] - g_present.u[Sxy])*invdy_d;
gz4 = (g_present.u[Txy] - g_present.u[Bxy])*invdz_d;
gx = (1.0 - xtip)*(1.0 - ytip)*gx1 +
xtip*(1.0 - ytip)*gx2 + ytip*(1.0 - xtip)*gx3 + xtip*ytip*gx4;
gy = (1.0 - xtip)*(1.0 - ytip)*gy1 +
xtip*(1.0 - ytip)*gy2 + ytip*(1.0 - xtip)*gy3 + xtip*ytip*gy4;
gz = (1.0 - xtip)*(1.0 - ytip)*gz1 +
xtip*(1.0 - ytip)*gz2 + ytip*(1.0 - xtip)*gz3 + xtip*ytip*gz4;
vec3dyn a = { .x = (REAL)i+xtip, .y = (REAL)j+ytip, .z = (REAL)k};
vec6dyn b = { .x = (REAL)i+xtip, .y = (REAL)j+ytip, .z = (REAL)k, .vx = gx, .vy = gy, .vz = gz};
tip_push_back1(a);
tip_push_back2(b);
h_vol[I3D(nx,nxy,i,j,k)] = (unsigned char)255;
}
/*------------------------------------------------------------------------
* Anticlockwise direction
*------------------------------------------------------------------------
*/
xtip = px - sroot1;
ytip = py - sroot2;
if ( ( ((xtip > 0.0) && (xtip < 1.0)) +
((ytip > 0.0) && (ytip < 1.0)) +
( disc > 0.0 ) ) > 2 ) {
/*
gx = (g_present.u[E] - g_present.u[W])*invdx_d;
gy = (g_present.u[N] - g_present.u[S])*invdy_d;
gz = (g_present.u[T] - g_present.u[B])*invdz_d;
*/
gx1 = (g_present.u[E] - g_present.u[W])*invdx_d;
gy1 = (g_present.u[N] - g_present.u[S])*invdy_d;
gz1 = (g_present.u[T] - g_present.u[B])*invdz_d;
gx2 = (g_present.u[Ex] - g_present.u[Wx])*invdx_d;
gy2 = (g_present.u[Nx] - g_present.u[Sx])*invdy_d;
gz2 = (g_present.u[Tx] - g_present.u[Bx])*invdz_d;
gx3 = (g_present.u[Ey] - g_present.u[Wy])*invdx_d;
gy3 = (g_present.u[Ny] - g_present.u[Sy])*invdy_d;
gz3 = (g_present.u[Ty] - g_present.u[By])*invdz_d;
gx4 = (g_present.u[Exy] - g_present.u[Wxy])*invdx_d;
gy4 = (g_present.u[Nxy] - g_present.u[Sxy])*invdy_d;
gz4 = (g_present.u[Txy] - g_present.u[Bxy])*invdz_d;
gx = (1.0 - xtip)*(1.0 - ytip)*gx1 +
xtip*(1.0 - ytip)*gx2 + ytip*(1.0 - xtip)*gx3 + xtip*ytip*gx4;
gy = (1.0 - xtip)*(1.0 - ytip)*gy1 +
xtip*(1.0 - ytip)*gy2 + ytip*(1.0 - xtip)*gy3 + xtip*ytip*gy4;
gz = (1.0 - xtip)*(1.0 - ytip)*gz1 +
xtip*(1.0 - ytip)*gz2 + ytip*(1.0 - xtip)*gz3 + xtip*ytip*gz4;
vec3dyn a = { .x = (REAL)i+xtip, .y = (REAL)j+ytip, .z = (REAL)k};
vec6dyn b = { .x = (REAL)i+xtip, .y = (REAL)j+ytip, .z = (REAL)k, .vx = gx, .vy = gy, .vz = gz};
tip_push_back1(a);
tip_push_back2(b);
h_vol[I3D(nx,nxy,i,j,k)] = (unsigned char)255;
}
/*------------------------------------------------------------------------
* XZ plane
* Clockwise direction
*------------------------------------------------------------------------
*/
x1 = g_present.u[s0];
x2 = g_present.u[sx];
x3 = g_present.u[sxz];
x4 = g_present.u[sz];
y1 = g_past[s0];
y2 = g_past[sx];
y3 = g_past[sxz];
y4 = g_past[sz];
x3y1 = x3*y1;
x4y1 = x4*y1;
x3y2 = x3*y2;
x4y2 = x4*y2;
x1y3 = x1*y3;
x2y3 = x2*y3;
x1y4 = x1*y4;
x2y4 = x2*y4;
x2y1 = x2*y1;
x1y2 = x1*y2;
x4y3 = x4*y3;
x3y4 = x3*y4;
den1 = 2.0*(x3y1 - x4y1 - x3y2 + x4y2 - x1y3 + x2y3 + x1y4 - x2y4);
den2 = 2.0*(x2y1 - x3y1 - x1y2 + x4y2 + x1y3 - x4y3 - x2y4 + x3y4);
ctn1 = x1 - x2 + x3 - x4 - y1 + y2 - y3 + y4;
ctn2 = x3y1 - 2.0*x4y1 + x4y2 - x1y3 + 2.0*x1y4 - x2y4;
disc = 4.0 * ( x3y1 - x3y2 - x4y1 + x4y2 - x1y3 + x1y4 + x2y3 - x2y4 )
* (x4y1 - x1y4 + Uth * (x1 - x4 - y1 + y4)) +
( -ctn2 + Uth * ctn1 ) * (-ctn2 + Uth * ctn1 );
px = -(Uth * ctn1 - ctn2)/den1;
py = (Uth * ctn1)/den2 -
(-2.0* x2y1 + x3y1 + 2.0 *x1y2 - x4y2 - x1y3 + x2y4)/den2;
sroot1 = sqrt(disc)/den1;
sroot2 = sqrt(disc)/den2;
/*------------------------------------------------------------------------
* XZ plane
* Clockwise direction
*------------------------------------------------------------------------
*/
xtip = px + sroot1;
ytip = py + sroot2;
if ( ( ((xtip > 0.0) && (xtip < 1.0)) +
((ytip > 0.0) && (ytip < 1.0)) +
( disc > 0.0 ) ) > 2 ) {
/*
gx = (g_present.u[E] - g_present.u[W])*invdx_d;
gy = (g_present.u[N] - g_present.u[S])*invdy_d;
gz = (g_present.u[T] - g_present.u[B])*invdz_d;
*/
gx1 = (g_present.u[E] - g_present.u[W])*invdx_d;
gy1 = (g_present.u[N] - g_present.u[S])*invdy_d;
gz1 = (g_present.u[T] - g_present.u[B])*invdz_d;
gx2 = (g_present.u[Ex] - g_present.u[Wx])*invdx_d;
gy2 = (g_present.u[Nx] - g_present.u[Sx])*invdy_d;
gz2 = (g_present.u[Tx] - g_present.u[Bx])*invdz_d;
gx3 = (g_present.u[Ez] - g_present.u[Wz])*invdx_d;
gy3 = (g_present.u[Nz] - g_present.u[Sz])*invdy_d;
gz3 = (g_present.u[Tz] - g_present.u[Bz])*invdz_d;
gx4 = (g_present.u[Exz] - g_present.u[Wxz])*invdx_d;
gy4 = (g_present.u[Nxz] - g_present.u[Sxz])*invdy_d;
gz4 = (g_present.u[Txz] - g_present.u[Bxz])*invdz_d;
gx = (1.0 - xtip)*(1.0 - ytip)*gx1 +
xtip*(1.0 - ytip)*gx2 + ytip*(1.0 - xtip)*gx3 + xtip*ytip*gx4;
gy = (1.0 - xtip)*(1.0 - ytip)*gy1 +
xtip*(1.0 - ytip)*gy2 + ytip*(1.0 - xtip)*gy3 + xtip*ytip*gy4;
gz = (1.0 - xtip)*(1.0 - ytip)*gz1 +
xtip*(1.0 - ytip)*gz2 + ytip*(1.0 - xtip)*gz3 + xtip*ytip*gz4;
vec3dyn a = { .x = (REAL)i+xtip, .y = (REAL)j, .z = (REAL)k+ytip};
vec6dyn b = { .x = (REAL)i+xtip, .y = (REAL)j, .z = (REAL)k+ytip, .vx = gx, .vy = gy, .vz = gz};
tip_push_back1(a);
tip_push_back2(b);
h_vol[I3D(nx,nxy,i,j,k)] = (unsigned char)255;
}
/*------------------------------------------------------------------------
* Anticlockwise direction
*------------------------------------------------------------------------
*/
xtip = px - sroot1;
ytip = py - sroot2;
if ( ( ((xtip > 0.0) && (xtip < 1.0)) +
((ytip > 0.0) && (ytip < 1.0)) +
( disc > 0.0 ) ) > 2 ) {
/*
gx = (g_present.u[E] - g_present.u[W])*invdx_d;
gy = (g_present.u[N] - g_present.u[S])*invdy_d;
gz = (g_present.u[T] - g_present.u[B])*invdz_d;
*/
gx1 = (g_present.u[E] - g_present.u[W])*invdx_d;
gy1 = (g_present.u[N] - g_present.u[S])*invdy_d;
gz1 = (g_present.u[T] - g_present.u[B])*invdz_d;
gx2 = (g_present.u[Ex] - g_present.u[Wx])*invdx_d;
gy2 = (g_present.u[Nx] - g_present.u[Sx])*invdy_d;
gz2 = (g_present.u[Tx] - g_present.u[Bx])*invdz_d;
gx3 = (g_present.u[Ez] - g_present.u[Wz])*invdx_d;
gy3 = (g_present.u[Nz] - g_present.u[Sz])*invdy_d;
gz3 = (g_present.u[Tz] - g_present.u[Bz])*invdz_d;
gx4 = (g_present.u[Exz] - g_present.u[Wxz])*invdx_d;
gy4 = (g_present.u[Nxz] - g_present.u[Sxz])*invdy_d;
gz4 = (g_present.u[Txz] - g_present.u[Bxz])*invdz_d;
gx = (1.0 - xtip)*(1.0 - ytip)*gx1 +
xtip*(1.0 - ytip)*gx2 + ytip*(1.0 - xtip)*gx3 + xtip*ytip*gx4;
gy = (1.0 - xtip)*(1.0 - ytip)*gy1 +
xtip*(1.0 - ytip)*gy2 + ytip*(1.0 - xtip)*gy3 + xtip*ytip*gy4;
gz = (1.0 - xtip)*(1.0 - ytip)*gz1 +
xtip*(1.0 - ytip)*gz2 + ytip*(1.0 - xtip)*gz3 + xtip*ytip*gz4;
vec3dyn a = { .x = (REAL)i+xtip, .y = (REAL)j, .z = (REAL)k+ytip};
vec6dyn b = { .x = (REAL)i+xtip, .y = (REAL)j, .z = (REAL)k+ytip, .vx = gx, .vy = gy, .vz = gz};
tip_push_back1(a);
tip_push_back2(b);
h_vol[I3D(nx,nxy,i,j,k)] = (unsigned char)255;
}
/*------------------------------------------------------------------------
* YZ direction
* Anticlockwse direction
*------------------------------------------------------------------------
*/
x1 = g_present.u[s0];
x2 = g_present.u[sy];
x3 = g_present.u[syz];
x4 = g_present.u[sz];
y1 = g_past[s0];
y2 = g_past[sy];
y3 = g_past[syz];
y4 = g_past[sz];
x3y1 = x3*y1;
x4y1 = x4*y1;
x3y2 = x3*y2;
x4y2 = x4*y2;
x1y3 = x1*y3;
x2y3 = x2*y3;
x1y4 = x1*y4;
x2y4 = x2*y4;
x2y1 = x2*y1;
x1y2 = x1*y2;
x4y3 = x4*y3;
x3y4 = x3*y4;
den1 = 2.0*(x3y1 - x4y1 - x3y2 + x4y2 - x1y3 + x2y3 + x1y4 - x2y4);
den2 = 2.0*(x2y1 - x3y1 - x1y2 + x4y2 + x1y3 - x4y3 - x2y4 + x3y4);
ctn1 = x1 - x2 + x3 - x4 - y1 + y2 - y3 + y4;
ctn2 = x3y1 - 2.0*x4y1 + x4y2 - x1y3 + 2.0*x1y4 - x2y4;
disc = 4.0 * ( x3y1 - x3y2 - x4y1 + x4y2 - x1y3 + x1y4 + x2y3 - x2y4 )
* (x4y1 - x1y4 + Uth * (x1 - x4 - y1 + y4)) +
( -ctn2 + Uth * ctn1 ) * (-ctn2 + Uth * ctn1 );
px = -(Uth * ctn1 - ctn2)/den1;
py = (Uth * ctn1)/den2 -
(-2.0* x2y1 + x3y1 + 2.0 *x1y2 - x4y2 - x1y3 + x2y4)/den2;
sroot1 = sqrt(disc)/den1;
sroot2 = sqrt(disc)/den2;
/*------------------------------------------------------------------------
* YZ plane
* Clockwise direction
*------------------------------------------------------------------------
*/
xtip = px + sroot1;
ytip = py + sroot2;
if ( ( ((xtip > 0.0) && (xtip < 1.0)) +
((ytip > 0.0) && (ytip < 1.0)) +
( disc > 0.0 ) ) > 2 ) {
/*
gx = (g_present.u[E] - g_present.u[W])*invdx_d;
gy = (g_present.u[N] - g_present.u[S])*invdy_d;
gz = (g_present.u[T] - g_present.u[B])*invdz_d;
*/
gx1 = (g_present.u[E] - g_present.u[W])*invdx_d;
gy1 = (g_present.u[N] - g_present.u[S])*invdy_d;
gz1 = (g_present.u[T] - g_present.u[B])*invdz_d;
gx2 = (g_present.u[Ey] - g_present.u[Wy])*invdx_d;
gy2 = (g_present.u[Ny] - g_present.u[Sy])*invdy_d;
gz2 = (g_present.u[Ty] - g_present.u[By])*invdz_d;
gx3 = (g_present.u[Ez] - g_present.u[Wz])*invdx_d;
gy3 = (g_present.u[Nz] - g_present.u[Sz])*invdy_d;
gz3 = (g_present.u[Tz] - g_present.u[Bz])*invdz_d;
gx4 = (g_present.u[Eyz] - g_present.u[Wyz])*invdx_d;
gy4 = (g_present.u[Nyz] - g_present.u[Syz])*invdy_d;
gz4 = (g_present.u[Tyz] - g_present.u[Byz])*invdz_d;
gx = (1.0 - xtip)*(1.0 - ytip)*gx1 +
xtip*(1.0 - ytip)*gx2 + ytip*(1.0 - xtip)*gx3 + xtip*ytip*gx4;
gy = (1.0 - xtip)*(1.0 - ytip)*gy1 +
xtip*(1.0 - ytip)*gy2 + ytip*(1.0 - xtip)*gy3 + xtip*ytip*gy4;
gz = (1.0 - xtip)*(1.0 - ytip)*gz1 +
xtip*(1.0 - ytip)*gz2 + ytip*(1.0 - xtip)*gz3 + xtip*ytip*gz4;
vec3dyn a = { .x = (REAL)i, .y = (REAL)j+xtip, .z = (REAL)k+ytip};
vec6dyn b = { .x = (REAL)i, .y = (REAL)j+xtip, .z = (REAL)k+ytip, .vx = gx, .vy = gy, .vz = gz};
tip_push_back1(a);
tip_push_back2(b);
h_vol[I3D(nx,nxy,i,j,k)] = (unsigned char)255;
}
/*------------------------------------------------------------------------
* Anticlockwise direction
*------------------------------------------------------------------------
*/
xtip = px - sroot1;
ytip = py - sroot2;
if ( ( ((xtip > 0.0) && (xtip < 1.0)) +
((ytip > 0.0) && (ytip < 1.0)) +
( disc > 0.0 ) ) > 2 ) {
/*
gx = (g_present.u[E] - g_present.u[W])*invdx_d;
gy = (g_present.u[N] - g_present.u[S])*invdy_d;
gz = (g_present.u[T] - g_present.u[B])*invdz_d;
*/
gx1 = (g_present.u[E] - g_present.u[W])*invdx_d;
gy1 = (g_present.u[N] - g_present.u[S])*invdy_d;
gz1 = (g_present.u[T] - g_present.u[B])*invdz_d;
gx2 = (g_present.u[Ey] - g_present.u[Wy])*invdx_d;
gy2 = (g_present.u[Ny] - g_present.u[Sy])*invdy_d;
gz2 = (g_present.u[Ty] - g_present.u[By])*invdz_d;
gx3 = (g_present.u[Ez] - g_present.u[Wz])*invdx_d;
gy3 = (g_present.u[Nz] - g_present.u[Sz])*invdy_d;
gz3 = (g_present.u[Tz] - g_present.u[Bz])*invdz_d;
gx4 = (g_present.u[Eyz] - g_present.u[Wyz])*invdx_d;
gy4 = (g_present.u[Nyz] - g_present.u[Syz])*invdy_d;
gz4 = (g_present.u[Tyz] - g_present.u[Byz])*invdz_d;
gx = (1.0 - xtip)*(1.0 - ytip)*gx1 +
xtip*(1.0 - ytip)*gx2 + ytip*(1.0 - xtip)*gx3 + xtip*ytip*gx4;
gy = (1.0 - xtip)*(1.0 - ytip)*gy1 +
xtip*(1.0 - ytip)*gy2 + ytip*(1.0 - xtip)*gy3 + xtip*ytip*gy4;
gz = (1.0 - xtip)*(1.0 - ytip)*gz1 +
xtip*(1.0 - ytip)*gz2 + ytip*(1.0 - xtip)*gz3 + xtip*ytip*gz4;
vec3dyn a = { .x = (REAL)i, .y = (REAL)j+xtip, .z = (REAL)k+ytip};
vec6dyn b = { .x = (REAL)i, .y = (REAL)j+xtip, .z = (REAL)k+ytip, .vx = gx, .vy = gy, .vz = gz};
tip_push_back1(a);
tip_push_back2(b);
h_vol[I3D(nx,nxy,i,j,k)] = (unsigned char)255;
}
#else
/*------------------------------------------------------------------------
* Calculate tip for visualization
*------------------------------------------------------------------------
*/
int sxyz = ( (i<(nx-1)) && (j<(ny-1)) && (k<(nz-1) ) ) ? I3D(nx,nxy,i+1,j+1,k+1) : I3D(nx,nxy,i,j,k);
if ( (i<(nx-1)) && (j<(ny-1)) && (k<(nz-1)) ) {
h_vol[I3D(nx,nxy,i,j,k)] = 255*(unsigned char)(filament(s0,sx,sy,sz,sxy,sxz,syz,sxyz,g_past,g_present));
}
else {
h_vol[I3D(nx,nxy,i,j,k)] = 0;
}
#endif
}
__device__ int tip_push_back1(vec3dyn & mt) {
int insert_pt = atomicAdd(&dev_count, 1);
if (insert_pt < NN){
dev_data1[insert_pt] = mt;
return insert_pt;}
else return -1;
}
__device__ int tip_push_back2(vec6dyn & mt) {
int insert_pt = dev_count;//atomicAdd(&dev_count, 1);
if (insert_pt < NN){
dev_data2[insert_pt] = mt;
return insert_pt;}
else return -1;
}
__device__ bool filament(int s0, int sx, int sy, int sz, int sxy, int sxz, int syz, int sxyz,
REAL *g_past, stateVar g_present) {
REAL v0, vx, vy, vz, vxy, vxz, vyz, vxyz;
REAL d0, dx, dy, dz, dxy, dxz, dyz, dxyz;
REAL f0, fx, fy, fz, fxy, fxz, fyz, fxyz;
REAL s;
bool bv, bdv;
v0 = g_present.u[s0];
vx = g_present.u[sx];
vy = g_present.u[sy];
vz = g_present.u[sz];
vxy = g_present.u[sxy];
vxz = g_present.u[sxz];
vyz = g_present.u[syz];
vxyz = g_present.u[sxyz];
f0 = v0 - Uth;
fx = vx - Uth;
fy = vy - Uth;
fz = vz - Uth;
fxy = vxy - Uth;
fyz = vyz - Uth;
fxz = vxz - Uth;
fxyz = vxyz - Uth;
s = STEP(0.0, f0 )
+ STEP(0.0, fx )
+ STEP(0.0, fy )
+ STEP(0.0, fz )
+ STEP(0.0, fxy )
+ STEP(0.0, fyz )
+ STEP(0.0, fxz )
+ STEP(0.0, fxyz);
bv = ( s>0.5 ) && ( s<7.5 );
d0 = v0 - g_past[s0];
dx = vx - g_past[sx];
dy = vy - g_past[sy];
dz = vz - g_past[sz];
dxy = vxy - g_past[sxy];
dxz = vxz - g_past[sxz];
dyz = vyz - g_past[syz];
dxyz = vxyz - g_past[sxyz];
s = STEP(0.0, d0 )
+ STEP(0.0, dx )
+ STEP(0.0, dy )
+ STEP(0.0, dz )
+ STEP(0.0, dxy )
+ STEP(0.0, dyz )
+ STEP(0.0, dxz )
+ STEP(0.0, dxyz);
bdv = ( s>0.5 ) && ( s<7.5 );
return ( bdv && bv );
}
// Spiral tip tracking (not precise)
VolumeType *spiralTip(dim3 grid3Dz, dim3 block3Dz, REAL *v_past_d,
stateVar gIn_d, VolumeType *h_volume) {
/*
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float elapsedTime;
cudaEventRecord(start,0);
*/
spiralTip_kernel<<<grid3Dz,block3Dz>>>(v_past_d, gIn_d, h_volume);
CudaCheckError();
/*
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime,start,stop);
printf("Time: %f ms\n", elapsedTime);
*/
return h_volume;
}
// Set voltage to zero on certain regions of the domain (condution block) to
// initialize a spiral wave
void cutVoltage(paramVar p, stateVar g_h, stateVar g_present_d) {
int i, j, k, idx;
CudaSafeCall(cudaMemcpy(g_h.u, g_present_d.u, p.memSize,
cudaMemcpyDeviceToHost));
if (p.counterclock) {
for (k=0;k<nz;k++) {
for (j=0;j<ny;j++) {
for (i=nx/2;i<nx;i++) {
idx = i + nx * (j + ny * k);
g_h.u[idx] = 0.0;
}
}
}
}
if (p.clock) {
for (k=0;k<nz;k++) {
for (j=0;j<ny;j++) {
for (i=0;i<nx/2;i++) {
idx = i + nx * (j + ny * k);
g_h.u[idx] = 0.0;
}
}
}
}
CudaSafeCall(cudaMemcpy(g_present_d.u, g_h.u, p.memSize,
cudaMemcpyHostToDevice));
}
// Stimulate with voltage certain regions of the domain
void stimulateV(int memSize, stateVar g_h, stateVar g_present_d) {
int i, j, k, idx;
CudaSafeCall(cudaMemcpy(g_h.u, g_present_d.u, memSize,
cudaMemcpyDeviceToHost));
for (k=(int)floor(0);k<(int)floor(nz);k++) {
for (j=(int)floor(0);j<(int)floor(ny/8);j++) {
for (i=(int)floor(0);i<(int)floor(nx);i++) {
idx = i + nx*j + nx*ny*k;
g_h.u[idx] = 1.0f;
}
}
}
CudaSafeCall(cudaMemcpy(g_present_d.u, g_h.u, memSize,
cudaMemcpyHostToDevice));
}
|
be9ae23301aeac94ad70ccb8933da19fa2ae86e4.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
int main() {
int count;
hipGetDeviceCount(&count);
printf("Found %d CUDA devices\n", count);
printf("=========================================\n");
for (int device = 0; device < count; device++) {
struct hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, device);
printf("Information for %s (device %d):\n", prop.name, device);
printf(" Total global memory: %zd\n", prop.totalGlobalMem);
printf(" Total const memory: %zd\n", prop.totalConstMem);
printf(" Shared memory per block: %zd\n", prop.sharedMemPerBlock);
printf(" Warp size: %d\n", prop.warpSize);
printf(" Max threads per block: %d\n", prop.maxThreadsPerBlock);
printf(" Max threads dimension: [ %d, %d, %d ]\n",
prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
printf(" Max grid size: [ %d, %d, %d ]\n",
prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
printf("-----------------------------------------\n");
}
return 0;
}
| be9ae23301aeac94ad70ccb8933da19fa2ae86e4.cu | #include <cuda.h>
#include <stdio.h>
int main() {
int count;
cudaGetDeviceCount(&count);
printf("Found %d CUDA devices\n", count);
printf("=========================================\n");
for (int device = 0; device < count; device++) {
struct cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, device);
printf("Information for %s (device %d):\n", prop.name, device);
printf(" Total global memory: %zd\n", prop.totalGlobalMem);
printf(" Total const memory: %zd\n", prop.totalConstMem);
printf(" Shared memory per block: %zd\n", prop.sharedMemPerBlock);
printf(" Warp size: %d\n", prop.warpSize);
printf(" Max threads per block: %d\n", prop.maxThreadsPerBlock);
printf(" Max threads dimension: [ %d, %d, %d ]\n",
prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
printf(" Max grid size: [ %d, %d, %d ]\n",
prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
printf("-----------------------------------------\n");
}
return 0;
}
|
7ea9d3e77c3789811a2568e50b216d3f2494a935.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <gmp.h>
#include <cassert>
#include "cgbn/cgbn.h"
#include "utility/support.h"
#define TPI 16
#define BITS 1024
#define TPB 128 // threads per block (divible by 32)
static const uint32_t TPI_ONES=(1ull<<TPI)-1;
// List of Gpu params. BI generally stands for big integer.
struct MyGpuParams {
static const int BI_BITS = 1024;
static const int BI_BYTES = 128;
static const int BI_BITS_PER_LIMB = 64;
static const int BI_LIMBS = 16;
static const int BI_TPI = 16; // Threads per instance, this has to match LIMBS per BigInt
};
// Fq really represents a biginteger of BI_LIMBS of type uint64_t. But since this is in
// CUDA, and gets parallely executed the class represents a single limb.
typedef uint64_t mfq_t;
__constant__ mfq_t mnt4_modulus_device[16];
// Class represents a big integer vector. But since it uses a GPU, all operations are
// defined on a single big integer which is of a fixed size.
// The basic data type is kept fixed at uint64_t.
typedef struct {
mfq_t x[MyGpuParams::BI_LIMBS];
mfq_t y[MyGpuParams::BI_LIMBS];
} tuple_mfq_ti; // ti for instance, that is full array
typedef struct {
mfq_t a0[MyGpuParams::BI_LIMBS];
mfq_t a1[MyGpuParams::BI_LIMBS];
} mfq2_ti; // ti for instance, that is full array
typedef struct {
mfq2_ti A;
mfq2_ti B;
} mquad_ti;
typedef struct {
uint32_t lane;
uint32_t sync_mask;
uint32_t instance_number;
uint32_t instance_count;
uint32_t warp_number;
uint32_t subwarp_number; // 0 or 1
} thread_context_t;
__device__ void fq2_add(thread_context_t& tc, mfq_t& a, mfq_t& b);
__device__ __forceinline__ static int32_t fast_propagate_add_u64(thread_context_t& tc,
const uint32_t carry, uint64_t &x);
__device__ void compute_context(thread_context_t& t, uint32_t instance_count) {
t.instance_number =(blockIdx.x*blockDim.x + threadIdx.x)/TPI; // determine my instance number
t.lane = threadIdx.x & TPI-1;
t.warp_number = (blockIdx.x * blockDim.x + threadIdx.x) / warpSize;
t.instance_count = instance_count;
// two sub warps per warp.
t.subwarp_number = t.instance_number % 2;
t.sync_mask = (t.subwarp_number == 0) ? 0x0000FFFF: 0xFFFF0000;
}
__device__ __forceinline__ uint64_t add_cc_u64(uint64_t a, uint64_t b) {
uint64_t r;
asm volatile ("add.cc.u64 %0, %1, %2;" : "=l"(r) : "l"(a), "l"(b));
return r;
}
__device__ __forceinline__ uint32_t add_cc(uint32_t a, uint32_t b) {
uint32_t r;
asm volatile ("add.cc.u32 %0, %1, %2;" : "=r"(r) : "r"(a), "r"(b));
return r;
}
__device__ __forceinline__ uint32_t addc_cc(uint32_t a, uint32_t b) {
uint32_t r;
asm volatile ("addc.cc.u32 %0, %1, %2;" : "=r"(r) : "r"(a), "r"(b));
return r;
}
__device__ static int32_t fast_propagate_add_u64(thread_context_t& tc,
const uint32_t carry, uint64_t &x) {
//uint32_t warp_thread=threadIdx.x & warpSize-1, lane=1<<warp_thread;
uint32_t lane_mask = 1 << tc.lane;
uint32_t g, p, c;
uint64_t sum;
g=__ballot_sync(tc.sync_mask, carry==1);
p=__ballot_sync(tc.sync_mask, x==0xFFFFFFFFFFFFFFFFull);
g = (tc.subwarp_number == 0) ? g : g >> 16;
p = (tc.subwarp_number == 0) ? p : p >> 16;
sum=(uint64_t)g+(uint64_t)g+(uint64_t)p;
c=lane_mask&(p^sum);
x=x+(c!=0);
return sum>>16; // -(p==0xFFFFFFFF);
}
__device__
void fq2_add_nomod(thread_context_t& tc, mfq_t& a, mfq_t& b) {
uint64_t sum, carry;
// THIS IS WRONG. FIX ME.
sum = add_cc_u64(a, b);
carry = addc_cc(0, 0);
fast_propagate_add_u64(tc, carry, sum);
a = sum;
}
__device__
void fq2_add(thread_context_t& tc, mfq_t& a, mfq_t& b) {
// HUGELY WRONG. FIX ME.
fq2_add_nomod(tc, a, b);
}
__device__ __forceinline__ uint32_t sub_cc(uint32_t a, uint32_t b) {
uint32_t r;
asm volatile ("sub.cc.u32 %0, %1, %2;" : "=r"(r) : "r"(a), "r"(b));
return r;
}
__device__ __forceinline__ static int32_t fast_propagate_sub_u64(thread_context_t& tc, const uint32_t carry, uint64_t &x) {
// uint32_t sync=0xFFFFFFFF, warp_thread=threadIdx.x & warpSize-1, lane=1<<warp_thread;
uint32_t lane_mask = 1 << tc.lane;
uint32_t g, p, c;
uint64_t sum = 0;
g=__ballot_sync(tc.sync_mask, carry==0xFFFFFFFF);
p=__ballot_sync(tc.sync_mask, x==0);
g = (tc.subwarp_number == 0) ? g : (g >> 16);
p = (tc.subwarp_number == 0) ? p : (p >> 16);
sum=(uint64_t)g+(uint64_t)g+(uint64_t)p;
c=lane_mask&(p^sum);
x=x-(c!=0);
return (sum>>16); // -(p==0xFFFFFFFF);
}
__device__ __forceinline__ static int32_t fast_propagate_sub(thread_context_t& tc, const uint32_t carry, uint32_t &x) {
// uint32_t sync=0xFFFFFFFF, warp_thread=threadIdx.x & warpSize-1, lane=1<<warp_thread;
uint32_t g, p, c;
uint64_t sum;
uint32_t lane_mask = 1 << tc.lane;
g=__ballot_sync(tc.sync_mask, carry==0xFFFFFFFF);
p=__ballot_sync(tc.sync_mask, x==0);
g = (tc.subwarp_number == 0) ? g : g >> 16;
p = (tc.subwarp_number == 0) ? p : p >> 16;
sum=(uint64_t)g+(uint64_t)g+(uint64_t)p;
c=lane_mask&(p^sum);
x=x-(c!=0);
return (sum>>32); // -(p==0xFFFFFFFF);
}
__device__ __forceinline__ uint64_t subc_u64(uint64_t a, uint64_t b) {
uint64_t r;
asm volatile ("subc.u64 %0, %1, %2;" : "=l"(r) : "l"(a), "l"(b));
return r;
}
__device__ __forceinline__ uint32_t subc(uint32_t a, uint32_t b) {
uint32_t r;
asm volatile ("subc.u32 %0, %1, %2;" : "=r"(r) : "r"(a), "r"(b));
return r;
}
__device__ __forceinline__ uint64_t sub_cc_u64(uint64_t a, uint64_t b) {
uint64_t r;
asm volatile ("sub.cc.u64 %0, %1, %2;" : "=l"(r) : "l"(a), "l"(b));
return r;
}
__device__
int dev_sub(thread_context_t& tc, uint32_t& a, uint32_t& b) {
uint32_t carry;
a = sub_cc(a, b);
carry=subc(0, 0);
return -fast_propagate_sub(tc, carry, a);
}
__device__ __forceinline__
int dev_sub_u64(thread_context_t& tc, uint64_t& a, uint64_t& b) {
uint32_t carry;
a = sub_cc_u64(a, b);
carry=subc(0, 0);
return -fast_propagate_sub_u64(tc, carry, a);
}
// Assuming either a < b or a > b and a < 2b. we subtract b
// from a and test.
__device__ __forceinline__
void one_mod_u64(thread_context_t& tc, uint64_t& a, uint64_t& b) {
uint64_t dummy_a = a;
int which = dev_sub_u64(tc, dummy_a, b);
a = (which == -1) ? a : dummy_a;
}
__device__
int32_t fq_add_nomod(thread_context_t& tc, mfq_t& a, mfq_t& b) {
uint64_t sum, carry;
sum = add_cc_u64(a, b);
carry = addc_cc(0, 0);
carry = fast_propagate_add_u64(tc, carry, sum);
a = sum;
return carry;
}
__device__
void fq_add_mod(thread_context_t& tc, mfq_t& a, mfq_t& b, mfq_t& m) {
uint64_t sum, carry;
sum = add_cc_u64(a, b);
carry = addc_cc(0, 0);
fast_propagate_add_u64(tc, carry, sum);
a = sum;
// DO THE MODULUS.
one_mod_u64(tc, a, m);
}
__device__
void fq_sub_mod(thread_context_t& tc, mfq_t& a, mfq_t& b, mfq_t& m) {
int which = dev_sub_u64(tc, a, b);
if (which == -1) {
fq_add_nomod(tc, a, m);
}
}
__global__
void fq2_add_kernel(mquad_ti* instances, uint32_t instance_count) {
int32_t my_instance =(blockIdx.x*blockDim.x + threadIdx.x)/TPI; // determine my instance number
if(my_instance>=instance_count) return; // return if my_instance is not valid
thread_context_t tc;
compute_context(tc, instance_count);
if (tc.instance_number >= instance_count) return;
// THIS IS WRONG.
fq2_add(tc, instances[tc.instance_number].A.a0[tc.lane],
instances[tc.instance_number].B.a0[tc.lane]);
}
// X - Y
__global__
void fq_sub_kernel(tuple_mfq_ti* instances, uint32_t instance_count, mfq_t modulus[]) {
int32_t my_instance =(blockIdx.x*blockDim.x + threadIdx.x)/TPI; // determine my instance number
if(my_instance>=instance_count) return; // return if my_instance is not valid
thread_context_t tc;
compute_context(tc, instance_count);
if (tc.instance_number >= instance_count) return;
fq_sub_mod(tc, instances[tc.instance_number].x[tc.lane],
instances[tc.instance_number].y[tc.lane], mnt4_modulus_device[tc.lane]);
}
__global__
void fq_add_kernel(tuple_mfq_ti* instances, uint32_t instance_count, mfq_t modulus[]) {
int32_t my_instance =(blockIdx.x*blockDim.x + threadIdx.x)/TPI; // determine my instance number
if(my_instance>=instance_count) return; // return if my_instance is not valid
thread_context_t tc;
compute_context(tc, instance_count);
if (tc.instance_number >= instance_count) return;
fq_add_mod(tc, instances[tc.instance_number].x[tc.lane],
instances[tc.instance_number].y[tc.lane], mnt4_modulus_device[tc.lane]);
}
void load_mnt4_modulus() {
hipMemcpyToSymbol(mnt4_modulus_device, mnt4_modulus, bytes_per_elem, 0, hipMemcpyHostToDevice);
}
| 7ea9d3e77c3789811a2568e50b216d3f2494a935.cu | #include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <cuda.h>
#include <gmp.h>
#include <cassert>
#include "cgbn/cgbn.h"
#include "utility/support.h"
#define TPI 16
#define BITS 1024
#define TPB 128 // threads per block (divible by 32)
static const uint32_t TPI_ONES=(1ull<<TPI)-1;
// List of Gpu params. BI generally stands for big integer.
struct MyGpuParams {
static const int BI_BITS = 1024;
static const int BI_BYTES = 128;
static const int BI_BITS_PER_LIMB = 64;
static const int BI_LIMBS = 16;
static const int BI_TPI = 16; // Threads per instance, this has to match LIMBS per BigInt
};
// Fq really represents a biginteger of BI_LIMBS of type uint64_t. But since this is in
// CUDA, and gets parallely executed the class represents a single limb.
typedef uint64_t mfq_t;
__constant__ mfq_t mnt4_modulus_device[16];
// Class represents a big integer vector. But since it uses a GPU, all operations are
// defined on a single big integer which is of a fixed size.
// The basic data type is kept fixed at uint64_t.
typedef struct {
mfq_t x[MyGpuParams::BI_LIMBS];
mfq_t y[MyGpuParams::BI_LIMBS];
} tuple_mfq_ti; // ti for instance, that is full array
typedef struct {
mfq_t a0[MyGpuParams::BI_LIMBS];
mfq_t a1[MyGpuParams::BI_LIMBS];
} mfq2_ti; // ti for instance, that is full array
typedef struct {
mfq2_ti A;
mfq2_ti B;
} mquad_ti;
typedef struct {
uint32_t lane;
uint32_t sync_mask;
uint32_t instance_number;
uint32_t instance_count;
uint32_t warp_number;
uint32_t subwarp_number; // 0 or 1
} thread_context_t;
__device__ void fq2_add(thread_context_t& tc, mfq_t& a, mfq_t& b);
__device__ __forceinline__ static int32_t fast_propagate_add_u64(thread_context_t& tc,
const uint32_t carry, uint64_t &x);
__device__ void compute_context(thread_context_t& t, uint32_t instance_count) {
t.instance_number =(blockIdx.x*blockDim.x + threadIdx.x)/TPI; // determine my instance number
t.lane = threadIdx.x & TPI-1;
t.warp_number = (blockIdx.x * blockDim.x + threadIdx.x) / warpSize;
t.instance_count = instance_count;
// two sub warps per warp.
t.subwarp_number = t.instance_number % 2;
t.sync_mask = (t.subwarp_number == 0) ? 0x0000FFFF: 0xFFFF0000;
}
__device__ __forceinline__ uint64_t add_cc_u64(uint64_t a, uint64_t b) {
uint64_t r;
asm volatile ("add.cc.u64 %0, %1, %2;" : "=l"(r) : "l"(a), "l"(b));
return r;
}
__device__ __forceinline__ uint32_t add_cc(uint32_t a, uint32_t b) {
uint32_t r;
asm volatile ("add.cc.u32 %0, %1, %2;" : "=r"(r) : "r"(a), "r"(b));
return r;
}
__device__ __forceinline__ uint32_t addc_cc(uint32_t a, uint32_t b) {
uint32_t r;
asm volatile ("addc.cc.u32 %0, %1, %2;" : "=r"(r) : "r"(a), "r"(b));
return r;
}
__device__ static int32_t fast_propagate_add_u64(thread_context_t& tc,
const uint32_t carry, uint64_t &x) {
//uint32_t warp_thread=threadIdx.x & warpSize-1, lane=1<<warp_thread;
uint32_t lane_mask = 1 << tc.lane;
uint32_t g, p, c;
uint64_t sum;
g=__ballot_sync(tc.sync_mask, carry==1);
p=__ballot_sync(tc.sync_mask, x==0xFFFFFFFFFFFFFFFFull);
g = (tc.subwarp_number == 0) ? g : g >> 16;
p = (tc.subwarp_number == 0) ? p : p >> 16;
sum=(uint64_t)g+(uint64_t)g+(uint64_t)p;
c=lane_mask&(p^sum);
x=x+(c!=0);
return sum>>16; // -(p==0xFFFFFFFF);
}
__device__
void fq2_add_nomod(thread_context_t& tc, mfq_t& a, mfq_t& b) {
uint64_t sum, carry;
// THIS IS WRONG. FIX ME.
sum = add_cc_u64(a, b);
carry = addc_cc(0, 0);
fast_propagate_add_u64(tc, carry, sum);
a = sum;
}
__device__
void fq2_add(thread_context_t& tc, mfq_t& a, mfq_t& b) {
// HUGELY WRONG. FIX ME.
fq2_add_nomod(tc, a, b);
}
__device__ __forceinline__ uint32_t sub_cc(uint32_t a, uint32_t b) {
uint32_t r;
asm volatile ("sub.cc.u32 %0, %1, %2;" : "=r"(r) : "r"(a), "r"(b));
return r;
}
__device__ __forceinline__ static int32_t fast_propagate_sub_u64(thread_context_t& tc, const uint32_t carry, uint64_t &x) {
// uint32_t sync=0xFFFFFFFF, warp_thread=threadIdx.x & warpSize-1, lane=1<<warp_thread;
uint32_t lane_mask = 1 << tc.lane;
uint32_t g, p, c;
uint64_t sum = 0;
g=__ballot_sync(tc.sync_mask, carry==0xFFFFFFFF);
p=__ballot_sync(tc.sync_mask, x==0);
g = (tc.subwarp_number == 0) ? g : (g >> 16);
p = (tc.subwarp_number == 0) ? p : (p >> 16);
sum=(uint64_t)g+(uint64_t)g+(uint64_t)p;
c=lane_mask&(p^sum);
x=x-(c!=0);
return (sum>>16); // -(p==0xFFFFFFFF);
}
__device__ __forceinline__ static int32_t fast_propagate_sub(thread_context_t& tc, const uint32_t carry, uint32_t &x) {
// uint32_t sync=0xFFFFFFFF, warp_thread=threadIdx.x & warpSize-1, lane=1<<warp_thread;
uint32_t g, p, c;
uint64_t sum;
uint32_t lane_mask = 1 << tc.lane;
g=__ballot_sync(tc.sync_mask, carry==0xFFFFFFFF);
p=__ballot_sync(tc.sync_mask, x==0);
g = (tc.subwarp_number == 0) ? g : g >> 16;
p = (tc.subwarp_number == 0) ? p : p >> 16;
sum=(uint64_t)g+(uint64_t)g+(uint64_t)p;
c=lane_mask&(p^sum);
x=x-(c!=0);
return (sum>>32); // -(p==0xFFFFFFFF);
}
__device__ __forceinline__ uint64_t subc_u64(uint64_t a, uint64_t b) {
uint64_t r;
asm volatile ("subc.u64 %0, %1, %2;" : "=l"(r) : "l"(a), "l"(b));
return r;
}
__device__ __forceinline__ uint32_t subc(uint32_t a, uint32_t b) {
uint32_t r;
asm volatile ("subc.u32 %0, %1, %2;" : "=r"(r) : "r"(a), "r"(b));
return r;
}
__device__ __forceinline__ uint64_t sub_cc_u64(uint64_t a, uint64_t b) {
uint64_t r;
asm volatile ("sub.cc.u64 %0, %1, %2;" : "=l"(r) : "l"(a), "l"(b));
return r;
}
__device__
int dev_sub(thread_context_t& tc, uint32_t& a, uint32_t& b) {
uint32_t carry;
a = sub_cc(a, b);
carry=subc(0, 0);
return -fast_propagate_sub(tc, carry, a);
}
__device__ __forceinline__
int dev_sub_u64(thread_context_t& tc, uint64_t& a, uint64_t& b) {
uint32_t carry;
a = sub_cc_u64(a, b);
carry=subc(0, 0);
return -fast_propagate_sub_u64(tc, carry, a);
}
// Assuming either a < b or a > b and a < 2b. we subtract b
// from a and test.
__device__ __forceinline__
void one_mod_u64(thread_context_t& tc, uint64_t& a, uint64_t& b) {
uint64_t dummy_a = a;
int which = dev_sub_u64(tc, dummy_a, b);
a = (which == -1) ? a : dummy_a;
}
__device__
int32_t fq_add_nomod(thread_context_t& tc, mfq_t& a, mfq_t& b) {
uint64_t sum, carry;
sum = add_cc_u64(a, b);
carry = addc_cc(0, 0);
carry = fast_propagate_add_u64(tc, carry, sum);
a = sum;
return carry;
}
__device__
void fq_add_mod(thread_context_t& tc, mfq_t& a, mfq_t& b, mfq_t& m) {
uint64_t sum, carry;
sum = add_cc_u64(a, b);
carry = addc_cc(0, 0);
fast_propagate_add_u64(tc, carry, sum);
a = sum;
// DO THE MODULUS.
one_mod_u64(tc, a, m);
}
__device__
void fq_sub_mod(thread_context_t& tc, mfq_t& a, mfq_t& b, mfq_t& m) {
int which = dev_sub_u64(tc, a, b);
if (which == -1) {
fq_add_nomod(tc, a, m);
}
}
__global__
void fq2_add_kernel(mquad_ti* instances, uint32_t instance_count) {
int32_t my_instance =(blockIdx.x*blockDim.x + threadIdx.x)/TPI; // determine my instance number
if(my_instance>=instance_count) return; // return if my_instance is not valid
thread_context_t tc;
compute_context(tc, instance_count);
if (tc.instance_number >= instance_count) return;
// THIS IS WRONG.
fq2_add(tc, instances[tc.instance_number].A.a0[tc.lane],
instances[tc.instance_number].B.a0[tc.lane]);
}
// X - Y
__global__
void fq_sub_kernel(tuple_mfq_ti* instances, uint32_t instance_count, mfq_t modulus[]) {
int32_t my_instance =(blockIdx.x*blockDim.x + threadIdx.x)/TPI; // determine my instance number
if(my_instance>=instance_count) return; // return if my_instance is not valid
thread_context_t tc;
compute_context(tc, instance_count);
if (tc.instance_number >= instance_count) return;
fq_sub_mod(tc, instances[tc.instance_number].x[tc.lane],
instances[tc.instance_number].y[tc.lane], mnt4_modulus_device[tc.lane]);
}
__global__
void fq_add_kernel(tuple_mfq_ti* instances, uint32_t instance_count, mfq_t modulus[]) {
int32_t my_instance =(blockIdx.x*blockDim.x + threadIdx.x)/TPI; // determine my instance number
if(my_instance>=instance_count) return; // return if my_instance is not valid
thread_context_t tc;
compute_context(tc, instance_count);
if (tc.instance_number >= instance_count) return;
fq_add_mod(tc, instances[tc.instance_number].x[tc.lane],
instances[tc.instance_number].y[tc.lane], mnt4_modulus_device[tc.lane]);
}
void load_mnt4_modulus() {
cudaMemcpyToSymbol(mnt4_modulus_device, mnt4_modulus, bytes_per_elem, 0, cudaMemcpyHostToDevice);
}
|
e458ed4c9e5eeeddfc2b81c4245cf4aeb180dff1.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "rgb2gray.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *grayImage = NULL;
hipMalloc(&grayImage, XSIZE*YSIZE);
float *rgbImage = NULL;
hipMalloc(&rgbImage, XSIZE*YSIZE);
int channels = 1;
int width = XSIZE;
int height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
rgb2gray), dim3(gridBlock),dim3(threadBlock), 0, 0, grayImage,rgbImage,channels,width,height);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
rgb2gray), dim3(gridBlock),dim3(threadBlock), 0, 0, grayImage,rgbImage,channels,width,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
rgb2gray), dim3(gridBlock),dim3(threadBlock), 0, 0, grayImage,rgbImage,channels,width,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | e458ed4c9e5eeeddfc2b81c4245cf4aeb180dff1.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "rgb2gray.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *grayImage = NULL;
cudaMalloc(&grayImage, XSIZE*YSIZE);
float *rgbImage = NULL;
cudaMalloc(&rgbImage, XSIZE*YSIZE);
int channels = 1;
int width = XSIZE;
int height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
rgb2gray<<<gridBlock,threadBlock>>>(grayImage,rgbImage,channels,width,height);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
rgb2gray<<<gridBlock,threadBlock>>>(grayImage,rgbImage,channels,width,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
rgb2gray<<<gridBlock,threadBlock>>>(grayImage,rgbImage,channels,width,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
model_update_kernel_util.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/cuda/atomic.cuh"
#include "oneflow/user/kernels/model_update_kernel_util.h"
#include <hipcub/hipcub.hpp>
#include "oneflow/core/ep/cuda/cuda_stream.h"
namespace oneflow {
namespace {
template<typename T, typename G, typename C>
__global__ void SGDUpdateGpu(int64_t n, T scale, float l1, float l2, float weight_decay,
float learning_rate_val, float lr_scale, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff,
T* model, C* model_copy) {
if (skip_if != nullptr && *skip_if != 0) { return; }
if (learning_rate != nullptr) { learning_rate_val = *learning_rate; }
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
learning_rate_val *= lr_scale;
CUDA_1D_KERNEL_LOOP(i, n) {
if (model_copy != nullptr) {
FusedSGDUpdateFunctor<T, G, C>()(model_diff + i, model + i, model_copy + i, scale, l1, l2,
weight_decay, learning_rate_val);
} else {
SGDUpdateFunctor<T, G>()(model_diff + i, model + i, scale, l1, l2, weight_decay,
learning_rate_val);
}
}
}
template<typename T, typename K, typename IDX>
__global__ void IndexedSlicesSGDUpdateGpu(float weight_decay, float lr_scale,
const IDX feature_size, const int64_t lower_bound,
const int64_t upper_bound, const IDX* num_unique_instance,
const float* learning_rate, const K* indices,
const T* values, T* model) {
const int64_t n = *num_unique_instance * feature_size;
T lr = *learning_rate;
lr *= lr_scale;
CUDA_1D_KERNEL_LOOP_T(IDX, i, n) {
const IDX indices_idx = i / feature_size;
const IDX inner_idx = i - indices_idx * feature_size;
const IDX instance_id = indices[indices_idx];
if (instance_id >= lower_bound && instance_id < upper_bound) {
const IDX model_idx = (instance_id - lower_bound) * feature_size + inner_idx;
SGDUpdateFunctor<T, T>()(values + i, model + model_idx, static_cast<T>(1), 0.0, 0.0,
weight_decay, lr);
}
}
}
template<typename T>
__global__ void SumSquares2(int64_t n, const T* src0, T* dst0, const T* src1, T* dst1) {
T t_sum0 = 0;
T t_sum1 = 0;
CUDA_1D_KERNEL_LOOP(i, n) {
t_sum0 += src0[i] * src0[i];
t_sum1 += src1[i] * src1[i];
}
typedef hipcub::BlockReduce<T, kCudaThreadsNumPerBlock> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage0;
__shared__ typename BlockReduce::TempStorage temp_storage1;
T b_sum0 = BlockReduce(temp_storage0).Sum(t_sum0);
T b_sum1 = BlockReduce(temp_storage1).Sum(t_sum1);
if (threadIdx.x == 0) {
cuda::atomic::Add(dst0, b_sum0);
cuda::atomic::Add(dst1, b_sum1);
}
}
} // namespace
template<typename T, typename G, typename C>
struct SGDUpdateKernelUtil<DeviceType::kCUDA, T, G, C> {
static void Update(ep::Stream* stream, int64_t n, T scale, float l1, float l2, float weight_decay,
float learning_rate_val, float lr_scale, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff, T* model,
C* model_copy);
};
template<typename T, typename G, typename C>
void SGDUpdateKernelUtil<DeviceType::kCUDA, T, G, C>::Update(
ep::Stream* stream, int64_t n, T scale, float l1, float l2, float weight_decay,
float learning_rate_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const G* model_diff, T* model, C* model_copy) {
hipLaunchKernelGGL(( SGDUpdateGpu<T, G, C>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0,
stream->As<ep::CudaStream>()->cuda_stream(),
n, scale, l1, l2, weight_decay, learning_rate_val, lr_scale, learning_rate, scale_by_ptr,
skip_if, model_diff, model, model_copy);
}
template<typename T, typename G>
struct SGDUpdateKernelUtil<DeviceType::kCUDA, T, G, float16> {
static void Update(ep::Stream* stream, int64_t n, T scale, float l1, float l2, float weight_decay,
float learning_rate_val, float lr_scale, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff, T* model,
float16* model_copy);
};
template<typename T, typename G>
void SGDUpdateKernelUtil<DeviceType::kCUDA, T, G, float16>::Update(
ep::Stream* stream, int64_t n, T scale, float l1, float l2, float weight_decay,
float learning_rate_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const G* model_diff, T* model, float16* model_copy) {
SGDUpdateKernelUtil<DeviceType::kCUDA, T, G, half>::Update(
stream, n, scale, l1, l2, weight_decay, learning_rate_val, lr_scale, learning_rate,
scale_by_ptr, skip_if, model_diff, model, reinterpret_cast<half*>(model_copy));
}
template<typename T>
struct SGDUpdateKernelUtil<DeviceType::kCUDA, T, float16, float16> {
static void Update(ep::Stream* stream, int64_t n, T scale, float l1, float l2, float weight_decay,
float learning_rate_val, float lr_scale, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const float16* model_diff,
T* model, float16* model_copy);
};
template<typename T>
void SGDUpdateKernelUtil<DeviceType::kCUDA, T, float16, float16>::Update(
ep::Stream* stream, int64_t n, T scale, float l1, float l2, float weight_decay,
float learning_rate_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const float16* model_diff, T* model, float16* model_copy) {
SGDUpdateKernelUtil<DeviceType::kCUDA, T, half, half>::Update(
stream, n, scale, l1, l2, weight_decay, learning_rate_val, lr_scale, learning_rate,
scale_by_ptr, skip_if, reinterpret_cast<const half*>(model_diff), model,
reinterpret_cast<half*>(model_copy));
}
template struct SGDUpdateKernelUtil<DeviceType::kCUDA, double, double, float16>;
template struct SGDUpdateKernelUtil<DeviceType::kCUDA, float, float, float16>;
template struct SGDUpdateKernelUtil<DeviceType::kCUDA, float, float16, float16>;
template<typename T, typename K, typename IDX>
struct IndexedSlicesSGDUpdateKernelUtil<DeviceType::kCUDA, T, K, IDX> {
static void Update(ep::Stream* stream, float weight_decay, float lr_scale, int64_t num_indices,
int64_t feature_size, int64_t lower_bound, int64_t upper_bound,
const IDX* num_unique_instance, const float* learning_rate, const K* indices,
const T* values, T* model);
};
template<typename T, typename K, typename IDX>
void IndexedSlicesSGDUpdateKernelUtil<DeviceType::kCUDA, T, K, IDX>::Update(
ep::Stream* stream, float weight_decay, float lr_scale, int64_t num_indices,
int64_t feature_size, int64_t lower_bound, int64_t upper_bound, const IDX* num_unique_instance,
const float* learning_rate, const K* indices, const T* values, T* model) {
hipLaunchKernelGGL(( IndexedSlicesSGDUpdateGpu<T, K, IDX>)
, dim3(BlocksNum4ThreadsNum(num_indices * feature_size)), dim3(kCudaThreadsNumPerBlock), 0,
stream->As<ep::CudaStream>()->cuda_stream(),
weight_decay, lr_scale, feature_size, lower_bound, upper_bound, num_unique_instance,
learning_rate, indices, values, model);
}
#define INITIATE_INDEXED_SLICES_SGD_UPDATE_KERNEL_UTIL_CUDA(val_type_pair, key_type_pair, \
idx_type_pair) \
template struct IndexedSlicesSGDUpdateKernelUtil< \
DeviceType::kCUDA, OF_PP_PAIR_FIRST(val_type_pair), OF_PP_PAIR_FIRST(key_type_pair), \
OF_PP_PAIR_FIRST(idx_type_pair)>;
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INITIATE_INDEXED_SLICES_SGD_UPDATE_KERNEL_UTIL_CUDA,
FLOATING_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ, INT_DATA_TYPE_SEQ);
#undef INITIATE_INDEXED_SLICES_SGD_UPDATE_KERNEL_UTIL_CUDA
namespace {
template<typename T, typename G>
__global__ void MomentumUpdateGpu(int64_t n, T scale, float l1, float l2, float beta,
float dampening, bool nesterov, bool maximize, float weight_decay,
float learning_rate_val, float lr_scale,
const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const G* model_diff, T* model,
T* momentum) {
if (skip_if != nullptr && *skip_if != 0) { return; }
if (learning_rate != nullptr) { learning_rate_val = *learning_rate; }
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
learning_rate_val *= lr_scale;
CUDA_1D_KERNEL_LOOP(i, n) {
MomentumUpdateFunctor<T, G>()(model_diff + i, model + i, momentum + i, scale, l1, l2, beta,
dampening, nesterov, maximize, weight_decay, learning_rate_val);
}
}
template<typename T, typename K, typename IDX>
__global__ void IndexedSlicesMomentumUpdateGpu(T beta, float dampening, bool nesterov,
bool maximize, float weight_decay, float lr_scale,
int64_t feature_size, int64_t lower_bound,
int64_t upper_bound, const IDX* num_unique_instance,
const float* learning_rate, const K* indices,
const T* values, T* model, T* momentum) {
const int64_t n = *num_unique_instance * feature_size;
T lr = *learning_rate;
lr *= lr_scale;
CUDA_1D_KERNEL_LOOP(i, n) {
const IDX indices_idx = i / feature_size;
const IDX inner_idx = i - indices_idx * feature_size;
const IDX instance_id = indices[indices_idx];
if (instance_id >= lower_bound && instance_id < upper_bound) {
const IDX model_idx = (instance_id - lower_bound) * feature_size + inner_idx;
MomentumUpdateFunctor<T, T>()(values + i, model + model_idx, momentum + model_idx,
static_cast<T>(1), 0.0, 0.0, beta, dampening, nesterov,
maximize, weight_decay, lr);
}
}
}
} // namespace
template<typename T, typename G>
struct MomentumUpdateKernelUtil<DeviceType::kCUDA, T, G> {
static void Update(ep::Stream* stream, int64_t n, T scale, float l1, float l2, float beta,
float dampening, bool nesterov, bool maximize, float weight_decay,
float learning_rate_val, float lr_scale, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff, T* model,
T* momentum);
};
template<typename T, typename G>
void MomentumUpdateKernelUtil<DeviceType::kCUDA, T, G>::Update(
ep::Stream* stream, int64_t n, T scale, float l1, float l2, float beta, float dampening,
bool nesterov, bool maximize, float weight_decay, float learning_rate_val, float lr_scale,
const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff,
T* model, T* momentum) {
hipLaunchKernelGGL(( MomentumUpdateGpu<T, G>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0,
stream->As<ep::CudaStream>()->cuda_stream(),
n, scale, l1, l2, beta, dampening, nesterov, maximize, weight_decay, learning_rate_val,
lr_scale, learning_rate, scale_by_ptr, skip_if, model_diff, model, momentum);
}
template<typename T>
struct MomentumUpdateKernelUtil<DeviceType::kCUDA, T, float16> {
static void Update(ep::Stream* stream, int64_t n, T scale, float l1, float l2, float beta,
float dampening, bool nesterov, bool maximize, float weight_decay,
float learning_rate_val, float lr_scale, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const float16* model_diff,
T* model, T* momentum);
};
template<typename T>
void MomentumUpdateKernelUtil<DeviceType::kCUDA, T, float16>::Update(
ep::Stream* stream, int64_t n, T scale, float l1, float l2, float beta, float dampening,
bool nesterov, bool maximize, float weight_decay, float learning_rate_val, float lr_scale,
const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if,
const float16* model_diff, T* model, T* momentum) {
MomentumUpdateKernelUtil<DeviceType::kCUDA, T, half>::Update(
stream, n, scale, l1, l2, beta, dampening, nesterov, maximize, weight_decay,
learning_rate_val, lr_scale, learning_rate, scale_by_ptr, skip_if,
reinterpret_cast<const half*>(model_diff), model, momentum);
}
template struct MomentumUpdateKernelUtil<DeviceType::kCUDA, double, double>;
template struct MomentumUpdateKernelUtil<DeviceType::kCUDA, float, float>;
template struct MomentumUpdateKernelUtil<DeviceType::kCUDA, float, float16>;
template<typename T, typename K, typename IDX>
struct IndexedSlicesMomentumMdUpdateKernelUtil<DeviceType::kCUDA, T, K, IDX> {
static void Update(ep::Stream* stream, T beta, float dampening, bool nesterov, bool maximize,
float weight_decay, float lr_scale, int64_t num_instance, int64_t feature_size,
int64_t lower_bound, int64_t upper_bound, const IDX* num_unique_instance,
const float* learning_rate, const K* indices, const T* values, T* model,
T* momentum);
};
template<typename T, typename K, typename IDX>
void IndexedSlicesMomentumMdUpdateKernelUtil<DeviceType::kCUDA, T, K, IDX>::Update(
ep::Stream* stream, T beta, float dampening, bool nesterov, bool maximize, float weight_decay,
float lr_scale, int64_t num_instance, int64_t feature_size, int64_t lower_bound,
int64_t upper_bound, const IDX* num_unique_instance, const float* learning_rate,
const K* indices, const T* values, T* model, T* momentum) {
hipLaunchKernelGGL(( IndexedSlicesMomentumUpdateGpu<T, K, IDX>)
, dim3(BlocksNum4ThreadsNum(num_instance * feature_size)), dim3(kCudaThreadsNumPerBlock), 0,
stream->As<ep::CudaStream>()->cuda_stream(),
beta, dampening, nesterov, maximize, weight_decay, lr_scale, feature_size, lower_bound,
upper_bound, num_unique_instance, learning_rate, indices, values, model, momentum);
}
#define INSTANTIATE_INDEXED_SLICES_MOMENTUM_MODEL_UPDATE_KERNEL_UTIL_CUDA( \
val_type_pair, key_type_pair, idx_type_pair) \
template struct IndexedSlicesMomentumMdUpdateKernelUtil< \
DeviceType::kCUDA, OF_PP_PAIR_FIRST(val_type_pair), OF_PP_PAIR_FIRST(key_type_pair), \
OF_PP_PAIR_FIRST(idx_type_pair)>;
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_INDEXED_SLICES_MOMENTUM_MODEL_UPDATE_KERNEL_UTIL_CUDA,
FLOATING_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ, INT_DATA_TYPE_SEQ);
#undef INSTANTIATE_INDEXED_SLICES_MOMENTUM_MODEL_UPDATE_KERNEL_UTIL_CUDA
namespace {
__global__ void BiasCorrectionFactorKernelGpu(float beta, const int64_t* train_step, float* out) {
const auto exponent = static_cast<double>(*train_step + 1);
const float bias_correction_factor = 1.0 - static_cast<float>(pow(beta, exponent));
*out = bias_correction_factor;
}
template<typename T, typename G, typename C>
__global__ void AdamUpdateGpu(int64_t n, T scale, float l1, float l2, float beta1, float beta2,
float epsilon, float weight_decay, bool amsgrad,
bool do_bias_correction, float learning_rate_val, float lr_scale,
float bias_correction1_val, float bias_correction2_val,
const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const float* bias_correction1_ptr,
const float* bias_correction2_ptr, const G* model_diff, T* model,
C* model_copy, T* m, T* v, T* max_v) {
if (skip_if != nullptr && *skip_if != 0) { return; }
if (learning_rate != nullptr) { learning_rate_val = *learning_rate; }
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
if (bias_correction1_ptr != nullptr) { bias_correction1_val = *bias_correction1_ptr; }
if (bias_correction2_ptr != nullptr) { bias_correction2_val = *bias_correction2_ptr; }
learning_rate_val *= lr_scale;
CUDA_1D_KERNEL_LOOP(i, n) {
if (model_copy != nullptr) {
FusedAdamUpdateFunctor<T, G, C>()(model_diff + i, model + i, model_copy + i, m + i, v + i,
max_v + i, scale, l1, l2, beta1, beta2, epsilon,
weight_decay, amsgrad, bias_correction1_val,
bias_correction2_val, learning_rate_val);
} else {
AdamUpdateFunctor<T, G>()(model_diff + i, model + i, m + i, v + i, max_v + i, scale, l1, l2,
beta1, beta2, epsilon, weight_decay, amsgrad, bias_correction1_val,
bias_correction2_val, learning_rate_val);
}
}
}
template<typename T>
__global__ void AdamUpdateBetaTGpu(const T beta1, const T beta2, const int64_t* skip_if, T* beta1_t,
T* beta2_t) {
if (skip_if != nullptr && *skip_if != 0) { return; }
*beta1_t *= beta1;
*beta2_t *= beta2;
}
template<typename T, typename K, typename IDX>
__global__ void IndexedSlicesAdamUpdateGpu(
float beta1, float beta2, float epsilon, float weight_decay, bool amsgrad,
bool do_bias_correction, float lr, float lr_scale, int64_t feature_size, int64_t lower_bound,
int64_t upper_bound, const IDX* num_unique_instance, const float* learning_rate,
const float* bias_correction1_ptr, const float* bias_correction2_ptr, const K* indices,
const T* values, T* model, T* m, T* v, T* max_v) {
if (learning_rate != nullptr) { lr = *learning_rate; }
lr *= lr_scale;
float bias_correction1 = 1.0;
float bias_correction2 = 1.0;
if (bias_correction1_ptr != nullptr) { bias_correction1 = *bias_correction1_ptr; }
if (bias_correction2_ptr != nullptr) { bias_correction2 = *bias_correction2_ptr; }
const int64_t n = *num_unique_instance * feature_size;
CUDA_1D_KERNEL_LOOP(i, n) {
const IDX indices_idx = i / feature_size;
const IDX inner_idx = i - indices_idx * feature_size;
const IDX instance_id = indices[indices_idx];
if (instance_id >= lower_bound && instance_id < upper_bound) {
const IDX model_idx = (instance_id - lower_bound) * feature_size + inner_idx;
AdamUpdateFunctor<T, T>()(values + i, model + model_idx, m + model_idx, v + model_idx,
max_v + i, static_cast<T>(1), 0, 0, beta1, beta2, epsilon,
weight_decay, amsgrad, bias_correction1, bias_correction2, lr);
}
}
}
template<typename T, typename G>
__global__ void LambGradGpu(int64_t n, T scale, float l1, float l2, float beta1, float beta2,
float epsilon, const T* scale_by_ptr, const int64_t* skip_if,
const G* model_diff, T* adam_diff, T* model, T* m, T* v,
bool do_bias_correction, float bias_correction1_val,
float bias_correction2_val, const float* bias_correction1_ptr,
const float* bias_correction2_ptr) {
if (skip_if != nullptr && *skip_if != 0) { return; }
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
if (bias_correction1_ptr != nullptr) { bias_correction1_val = *bias_correction1_ptr; }
if (bias_correction2_ptr != nullptr) { bias_correction2_val = *bias_correction2_ptr; }
CUDA_1D_KERNEL_LOOP(i, n) {
LambGradFunctor<T, G>()(model_diff + i, adam_diff + i, model + i, m + i, v + i, scale, l1, l2,
beta1, beta2, epsilon, do_bias_correction, bias_correction1_val,
bias_correction2_val);
}
}
template<typename T>
__global__ void LambUpdateGpu(int64_t n, float weight_decay, float learning_rate_val,
float lr_scale, const float* learning_rate_ptr,
const int64_t* skip_if, const T* w_norm_2, const T* g_norm_2,
const T* adam_diff, T* model) {
if (skip_if != nullptr && *skip_if != 0) { return; }
if (learning_rate_ptr != nullptr) { learning_rate_val = *learning_rate_ptr; }
learning_rate_val *= lr_scale;
const float lr = LambLRFunctor<T>()(learning_rate_val, w_norm_2, g_norm_2);
CUDA_1D_KERNEL_LOOP(i, n) { LambUpdateFunctor<T>()(lr, weight_decay, adam_diff + i, model + i); }
}
} // namespace
template<typename T, typename G, typename C>
struct AdamUpdateKernelUtil<DeviceType::kCUDA, T, G, C> {
static void Update(ep::Stream* stream, int64_t n, T scale, float l1, float l2, float beta1,
float beta2, float epsilon, float weight_decay, bool amsgrad,
bool do_bias_correction, float learning_rate_val, float lr_scale,
float bias_correction1_val, float bias_correction2_val,
const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if,
const float* bias_correction1_ptr, const float* bias_correction2_ptr,
const G* model_diff, T* model, C* model_copy, T* m, T* v, T* max_v);
};
template<typename T, typename G, typename C>
void AdamUpdateKernelUtil<DeviceType::kCUDA, T, G, C>::Update(
ep::Stream* stream, int64_t n, T scale, float l1, float l2, float beta1, float beta2,
float epsilon, float weight_decay, bool amsgrad, bool do_bias_correction,
float learning_rate_val, float lr_scale, float bias_correction1_val, float bias_correction2_val,
const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if,
const float* bias_correction1_ptr, const float* bias_correction2_ptr, const G* model_diff,
T* model, C* model_copy, T* m, T* v, T* max_v) {
hipLaunchKernelGGL(( AdamUpdateGpu<T, G, C>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0,
stream->As<ep::CudaStream>()->cuda_stream(),
n, scale, l1, l2, beta1, beta2, epsilon, weight_decay, amsgrad, do_bias_correction,
learning_rate_val, lr_scale, bias_correction1_val, bias_correction2_val, learning_rate,
scale_by_ptr, skip_if, bias_correction1_ptr, bias_correction2_ptr, model_diff, model,
model_copy, m, v, max_v);
}
template<typename T, typename G>
struct AdamUpdateKernelUtil<DeviceType::kCUDA, T, G, float16> {
static void Update(ep::Stream* stream, int64_t n, T scale, float l1, float l2, float beta1,
float beta2, float epsilon, float weight_decay, bool amsgrad,
bool do_bias_correction, float learning_rate_val, float lr_scale,
float bias_correction1_val, float bias_correction2_val,
const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if,
const float* bias_correction1_ptr, const float* bias_correction2_ptr,
const G* model_diff, T* model, float16* model_copy, T* m, T* v, T* max_v);
};
template<typename T, typename G>
void AdamUpdateKernelUtil<DeviceType::kCUDA, T, G, float16>::Update(
ep::Stream* stream, int64_t n, T scale, float l1, float l2, float beta1, float beta2,
float epsilon, float weight_decay, bool amsgrad, bool do_bias_correction,
float learning_rate_val, float lr_scale, float bias_correction1_val, float bias_correction2_val,
const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if,
const float* bias_correction1_ptr, const float* bias_correction2_ptr, const G* model_diff,
T* model, float16* model_copy, T* m, T* v, T* max_v) {
AdamUpdateKernelUtil<DeviceType::kCUDA, T, G, half>::Update(
stream, n, scale, l1, l2, beta1, beta2, epsilon, weight_decay, amsgrad, do_bias_correction,
learning_rate_val, lr_scale, bias_correction1_val, bias_correction2_val, learning_rate,
scale_by_ptr, skip_if, bias_correction1_ptr, bias_correction2_ptr, model_diff, model,
reinterpret_cast<half*>(model_copy), m, v, max_v);
}
template<typename T>
struct AdamUpdateKernelUtil<DeviceType::kCUDA, T, float16, float16> {
static void Update(ep::Stream* stream, int64_t n, T scale, float l1, float l2, float beta1,
float beta2, float epsilon, float weight_decay, bool amsgrad,
bool do_bias_correction, float learning_rate_val, float lr_scale,
float bias_correction1_val, float bias_correction2_val,
const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if,
const float* bias_correction1_ptr, const float* bias_correction2_ptr,
const float16* model_diff, T* model, float16* model_copy, T* m, T* v,
T* max_v);
};
template<typename T>
void AdamUpdateKernelUtil<DeviceType::kCUDA, T, float16, float16>::Update(
ep::Stream* stream, int64_t n, T scale, float l1, float l2, float beta1, float beta2,
float epsilon, float weight_decay, bool amsgrad, bool do_bias_correction,
float learning_rate_val, float lr_scale, float bias_correction1_val, float bias_correction2_val,
const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if,
const float* bias_correction1_ptr, const float* bias_correction2_ptr, const float16* model_diff,
T* model, float16* model_copy, T* m, T* v, T* max_v) {
AdamUpdateKernelUtil<DeviceType::kCUDA, T, half, half>::Update(
stream, n, scale, l1, l2, beta1, beta2, epsilon, weight_decay, amsgrad, do_bias_correction,
learning_rate_val, lr_scale, bias_correction1_val, bias_correction2_val, learning_rate,
scale_by_ptr, skip_if, bias_correction1_ptr, bias_correction2_ptr,
reinterpret_cast<const half*>(model_diff), model, reinterpret_cast<half*>(model_copy), m, v,
max_v);
}
template struct AdamUpdateKernelUtil<DeviceType::kCUDA, float, float, float16>;
template struct AdamUpdateKernelUtil<DeviceType::kCUDA, double, double, float16>;
template struct AdamUpdateKernelUtil<DeviceType::kCUDA, float, float16, float16>;
template<typename T, typename G>
__global__ void AdagradUpdateGpu(int64_t n, T scale, float l1, float l2, float lr_decay,
float epsilon, float weight_decay, float learning_rate_val,
float lr_scale, int64_t train_step, const float* learning_rate,
const int64_t* train_step_ptr, const T* scale_by_ptr,
const int64_t* skip_if, const G* model_diff, T* model, T* sum) {
if (skip_if != nullptr && *skip_if != 0) { return; }
if (learning_rate != nullptr) { learning_rate_val = *learning_rate; }
if (train_step_ptr != nullptr) {
train_step = *train_step_ptr + 1;
} // train_step_ptr start from zero.
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
learning_rate_val = learning_rate_val * lr_scale / (1 + (train_step - 1) * lr_decay);
CUDA_1D_KERNEL_LOOP(i, n) {
AdagradUpdateFunctor<T, G>()(model_diff + i, model + i, sum + i, scale, l1, l2, epsilon,
weight_decay, learning_rate_val);
}
}
template<typename T, typename G>
struct AdagradUpdateKernelUtil<DeviceType::kCUDA, T, G> {
static void Update(ep::Stream* stream, int64_t n, T scale, float l1, float l2, float lr_decay,
float epsilon, float weight_decay, float learning_rate_val, float lr_scale,
int64_t train_step, const float* learning_rate, const int64_t* train_step_ptr,
const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff, T* model,
T* sum);
};
template<typename T, typename G>
void AdagradUpdateKernelUtil<DeviceType::kCUDA, T, G>::Update(
ep::Stream* stream, int64_t n, T scale, float l1, float l2, float lr_decay, float epsilon,
float weight_decay, float learning_rate_val, float lr_scale, int64_t train_step,
const float* learning_rate, const int64_t* train_step_ptr, const T* scale_by_ptr,
const int64_t* skip_if, const G* model_diff, T* model, T* sum) {
hipLaunchKernelGGL(( AdagradUpdateGpu<T, G>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0,
stream->As<ep::CudaStream>()->cuda_stream(),
n, scale, l1, l2, lr_decay, epsilon, weight_decay, learning_rate_val, lr_scale, train_step,
learning_rate, train_step_ptr, scale_by_ptr, skip_if, model_diff, model, sum);
}
template struct AdagradUpdateKernelUtil<DeviceType::kCUDA, float, float>;
template struct AdagradUpdateKernelUtil<DeviceType::kCUDA, double, double>;
template<typename T, typename G>
struct LambUpdateKernelUtil<DeviceType::kCUDA, T, G> {
static void Update(ep::Stream* stream, int64_t n, float scale, float l1, float l2, float beta1,
float beta2, float epsilon, float weight_decay, float learning_rate_val,
float lr_scale, bool do_bias_correction, float bias_correction1_val,
float bias_correction2_val, const float* learning_rate_ptr,
const float* bias_correction1_ptr, const float* bias_correction2_ptr,
const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff,
T* adam_diff, T* model, T* m, T* v, T* norm_buffer);
};
template<typename T, typename G>
void LambUpdateKernelUtil<DeviceType::kCUDA, T, G>::Update(
ep::Stream* stream, int64_t n, float scale, float l1, float l2, float beta1, float beta2,
float epsilon, float weight_decay, float learning_rate_val, float lr_scale,
bool do_bias_correction, float bias_correction1_val, float bias_correction2_val,
const float* learning_rate_ptr, const float* bias_correction1_ptr,
const float* bias_correction2_ptr, const T* scale_by_ptr, const int64_t* skip_if,
const G* model_diff, T* adam_diff, T* model, T* m, T* v, T* norm_buffer) {
hipLaunchKernelGGL(( LambGradGpu<T, G>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0,
stream->As<ep::CudaStream>()->cuda_stream(),
n, scale, l1, l2, beta1, beta2, epsilon, scale_by_ptr, skip_if, model_diff, adam_diff, model,
m, v, do_bias_correction, bias_correction1_val, bias_correction2_val, bias_correction1_ptr,
bias_correction2_ptr);
T* w_norm_2 = norm_buffer;
T* g_norm_2 = norm_buffer + 1;
Memset<DeviceType::kCUDA>(stream, norm_buffer, 0, 2 * sizeof(T));
hipLaunchKernelGGL(( SumSquares2<T>)
, dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0,
stream->As<ep::CudaStream>()->cuda_stream(), n, model, w_norm_2, adam_diff, g_norm_2);
hipLaunchKernelGGL(( LambUpdateGpu<T>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0,
stream->As<ep::CudaStream>()->cuda_stream(),
n, weight_decay, learning_rate_val, lr_scale, learning_rate_ptr, skip_if, w_norm_2, g_norm_2,
adam_diff, model);
}
template<typename T>
struct LambUpdateKernelUtil<DeviceType::kCUDA, T, float16> {
static void Update(ep::Stream* stream, int64_t n, float scale, float l1, float l2, float beta1,
float beta2, float epsilon, float weight_decay, float learning_rate_val,
float lr_scale, bool do_bias_correction, float bias_correction1_val,
float bias_correction2_val, const float* learning_rate_ptr,
const float* bias_correction1_ptr, const float* bias_correction2_ptr,
const T* scale_by_ptr, const int64_t* skip_if, const float16* model_diff,
T* adam_diff, T* model, T* m, T* v, T* norm_buffer);
};
template<typename T>
void LambUpdateKernelUtil<DeviceType::kCUDA, T, float16>::Update(
ep::Stream* stream, int64_t n, float scale, float l1, float l2, float beta1, float beta2,
float epsilon, float weight_decay, float learning_rate_val, float lr_scale,
bool do_bias_correction, float bias_correction1_val, float bias_correction2_val,
const float* learning_rate_ptr, const float* bias_correction1_ptr,
const float* bias_correction2_ptr, const T* scale_by_ptr, const int64_t* skip_if,
const float16* model_diff, T* adam_diff, T* model, T* m, T* v, T* norm_buffer) {
LambUpdateKernelUtil<DeviceType::kCUDA, T, half>::Update(
stream, n, scale, l1, l2, beta1, beta2, epsilon, weight_decay, learning_rate_val, lr_scale,
do_bias_correction, bias_correction1_val, bias_correction2_val, learning_rate_ptr,
bias_correction1_ptr, bias_correction2_ptr, scale_by_ptr, skip_if,
reinterpret_cast<const half*>(model_diff), adam_diff, model, m, v, norm_buffer);
}
template struct LambUpdateKernelUtil<DeviceType::kCUDA, float, float>;
template struct LambUpdateKernelUtil<DeviceType::kCUDA, double, double>;
template struct LambUpdateKernelUtil<DeviceType::kCUDA, float, float16>;
template<typename T, typename K, typename IDX>
struct IndexedSlicesAdamMdUpdateKernelUtil<DeviceType::kCUDA, T, K, IDX> {
static void Update(ep::Stream* stream, float beta1, float beta2, float epsilon,
float weight_decay, bool amsgrad, bool do_bias_correction, float lr,
float lr_scale, int64_t num_instance, int64_t feature_size,
int64_t lower_bound, int64_t upper_bound, const IDX* num_unique_instance,
const float* learning_rate, const float* bias_correction1_ptr,
const float* bias_correction2_ptr, const K* indices, const T* values, T* model,
T* m, T* v, T* max_v);
};
template<typename T, typename K, typename IDX>
void IndexedSlicesAdamMdUpdateKernelUtil<DeviceType::kCUDA, T, K, IDX>::Update(
ep::Stream* stream, float beta1, float beta2, float epsilon, float weight_decay, bool amsgrad,
bool do_bias_correction, float lr, float lr_scale, int64_t num_instance, int64_t feature_size,
int64_t lower_bound, int64_t upper_bound, const IDX* num_unique_instance,
const float* learning_rate, const float* bias_correction1_ptr,
const float* bias_correction2_ptr, const K* indices, const T* values, T* model, T* m, T* v,
T* max_v) {
hipLaunchKernelGGL(( IndexedSlicesAdamUpdateGpu<T, K, IDX>)
, dim3(BlocksNum4ThreadsNum(num_instance * feature_size)), dim3(kCudaThreadsNumPerBlock), 0,
stream->As<ep::CudaStream>()->cuda_stream(),
beta1, beta2, epsilon, weight_decay, amsgrad, do_bias_correction, lr, lr_scale,
feature_size, lower_bound, upper_bound, num_unique_instance, learning_rate,
bias_correction1_ptr, bias_correction2_ptr, indices, values, model, m, v, max_v);
}
#define INSTANTIATE_INDEXED_SLICES_ADAM_MODEL_UPDATE_KERNEL_UTIL_CUDA( \
val_type_pair, key_type_pair, idx_type_pair) \
template struct IndexedSlicesAdamMdUpdateKernelUtil< \
DeviceType::kCUDA, OF_PP_PAIR_FIRST(val_type_pair), OF_PP_PAIR_FIRST(key_type_pair), \
OF_PP_PAIR_FIRST(idx_type_pair)>;
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_INDEXED_SLICES_ADAM_MODEL_UPDATE_KERNEL_UTIL_CUDA,
FLOATING_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ, INT_DATA_TYPE_SEQ);
#undef INSTANTIATE_INDEXED_SLICES_ADAM_MODEL_UPDATE_KERNEL_UTIL_CUDA
template<>
struct BiasCorrectionFactorKernelUtil<DeviceType::kCUDA> {
static void BiasCorrectionFactorCompute(ep::Stream* stream, float beta, const int64_t* train_step,
float* out);
};
void BiasCorrectionFactorKernelUtil<DeviceType::kCUDA>::BiasCorrectionFactorCompute(
ep::Stream* stream, float beta, const int64_t* train_step, float* out) {
hipLaunchKernelGGL(( BiasCorrectionFactorKernelGpu), dim3(1), dim3(1), 0, stream->As<ep::CudaStream>()->cuda_stream(),
beta, train_step, out);
}
namespace {
template<typename T, typename G, bool centered>
__global__ void RmsPropUpdateGpu(int64_t n, T scale, float l1, float l2, T* mean_square,
T* mean_gradient, float epsilon, float weight_decay,
float decay_rate, float learning_rate_val, float lr_scale,
const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const G* model_diff, T* model) {
if (skip_if != nullptr && *skip_if != 0) { return; }
if (learning_rate != nullptr) { learning_rate_val = *learning_rate; }
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
learning_rate_val *= lr_scale;
CUDA_1D_KERNEL_LOOP(i, n) {
RmsPropUpdateFunctor<T, G, centered>()(model_diff + i, model + i, n, scale, l1, l2,
mean_square + i,
(centered ? mean_gradient + i : nullptr), epsilon,
weight_decay, decay_rate, learning_rate_val);
}
}
} // namespace
template<typename T, typename G>
struct RmsPropUpdateKernelUtil<DeviceType::kCUDA, T, G> {
static void Update(ep::Stream* stream, int64_t n, T scale, float l1, float l2, bool centered,
float epsilon, float weight_decay, float decay_rate, float learning_rate_val,
float lr_scale, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const G* model_diff, T* model, T* mean_square,
T* mean_gradient);
};
template<typename T, typename G>
void RmsPropUpdateKernelUtil<DeviceType::kCUDA, T, G>::Update(
ep::Stream* stream, int64_t n, T scale, float l1, float l2, bool centered, float epsilon,
float weight_decay, float decay_rate, float learning_rate_val, float lr_scale,
const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff,
T* model, T* mean_square, T* mean_gradient) {
if (centered) {
hipLaunchKernelGGL(( RmsPropUpdateGpu<T, G, true>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0,
stream->As<ep::CudaStream>()->cuda_stream(),
n, scale, l1, l2, mean_square, mean_gradient, epsilon, weight_decay, decay_rate,
learning_rate_val, lr_scale, learning_rate, scale_by_ptr, skip_if, model_diff, model);
} else {
hipLaunchKernelGGL(( RmsPropUpdateGpu<T, G, false>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0,
stream->As<ep::CudaStream>()->cuda_stream(),
n, scale, l1, l2, mean_square, mean_gradient, epsilon, weight_decay, decay_rate,
learning_rate_val, lr_scale, learning_rate, scale_by_ptr, skip_if, model_diff, model);
}
}
template<typename T>
struct RmsPropUpdateKernelUtil<DeviceType::kCUDA, T, float16> {
static void Update(ep::Stream* stream, int64_t n, T scale, float l1, float l2, bool centered,
float epsilon, float weight_decay, float decay_rate, float learning_rate_val,
float lr_scale, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const float16* model_diff, T* model, T* mean_square,
T* mean_gradient);
};
template<typename T>
void RmsPropUpdateKernelUtil<DeviceType::kCUDA, T, float16>::Update(
ep::Stream* stream, int64_t n, T scale, float l1, float l2, bool centered, float epsilon,
float weight_decay, float decay_rate, float learning_rate_val, float lr_scale,
const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if,
const float16* model_diff, T* model, T* mean_square, T* mean_gradient) {
RmsPropUpdateKernelUtil<DeviceType::kCUDA, T, half>::Update(
stream, n, scale, l1, l2, centered, epsilon, weight_decay, decay_rate, learning_rate_val,
lr_scale, learning_rate, scale_by_ptr, skip_if, reinterpret_cast<const half*>(model_diff),
model, mean_square, mean_gradient);
}
template struct RmsPropUpdateKernelUtil<DeviceType::kCUDA, float, float>;
template struct RmsPropUpdateKernelUtil<DeviceType::kCUDA, double, double>;
template struct RmsPropUpdateKernelUtil<DeviceType::kCUDA, float, float16>;
namespace {
template<typename T, typename G>
__global__ void LarsScaleModelDiffGpu(int64_t n, T scale, float l1, float l2, const T* scale_by_ptr,
const int64_t* skip_if, const G* model_diff, T* model,
T* model_diff_tmp) {
if (skip_if != nullptr && *skip_if != 0) { return; }
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
CUDA_1D_KERNEL_LOOP(i, n) {
model_diff_tmp[i] =
CastScaleRegularizeGradientFunctor<T, G>()(model_diff[i], model[i], scale, l1, l2);
}
}
template<typename T>
__global__ void LarsGetLocalLearningRateGpu(const float* learning_rate, float lr_scale,
T weight_decay, T epsilon, T lars_coefficient,
const int64_t* skip_if, T* data_tmp) {
if (skip_if != nullptr && *skip_if != 0) { return; }
T* model_norm = &data_tmp[0];
T* model_diff_norm = &data_tmp[1];
T* local_learning_rate = &data_tmp[2];
*model_norm = std::sqrt(*model_norm);
*model_diff_norm = std::sqrt(*model_diff_norm);
T lars = static_cast<T>(1);
if (*model_norm > 0 && *model_diff_norm > 0) {
lars = lars_coefficient * (*model_norm)
/ (epsilon + (*model_diff_norm) + weight_decay * (*model_norm));
}
T lr = *learning_rate;
lr *= lr_scale;
*local_learning_rate = lr * lars;
}
template<typename T>
__global__ void LarsUpdateGpu(int64_t n, float momentum_beta, T* momentum, float weight_decay,
const int64_t* skip_if, T* local_learning_rate, T* model_diff_tmp,
T* model) {
if (skip_if != nullptr && *skip_if != 0) { return; }
CUDA_1D_KERNEL_LOOP(i, n) {
LarsUpdateFunctor<T>()(model_diff_tmp + i, model + i, momentum_beta, momentum + i, weight_decay,
*local_learning_rate);
}
}
} // namespace
template<typename T, typename G>
struct LarsUpdateKernelUtil<DeviceType::kCUDA, T, G> {
static void Update(ep::Stream* stream, int64_t n, T scale, float l1, float l2,
float momentum_beta, float epsilon, float lars_coefficient, float weight_decay,
float lr_scale, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const G* model_diff, T* model, T* momentum,
T* data_tmp, T* model_diff_tmp);
};
template<typename T, typename G>
void LarsUpdateKernelUtil<DeviceType::kCUDA, T, G>::Update(
ep::Stream* stream, int64_t n, T scale, float l1, float l2, float momentum_beta, float epsilon,
float lars_coefficient, float weight_decay, float lr_scale, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff, T* model, T* momentum,
T* data_tmp, T* model_diff_tmp) {
hipLaunchKernelGGL(( LarsScaleModelDiffGpu<T, G>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0,
stream->As<ep::CudaStream>()->cuda_stream(),
n, scale, l1, l2, scale_by_ptr, skip_if, model_diff, model, model_diff_tmp);
T* model_norm = data_tmp;
T* model_diff_norm = data_tmp + 1;
T* local_learning_rate = data_tmp + 2;
Memset<DeviceType::kCUDA>(stream, data_tmp, 0, 2 * sizeof(T));
hipLaunchKernelGGL(( SumSquares2<T>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0,
stream->As<ep::CudaStream>()->cuda_stream(), n, model, model_norm,
model_diff_tmp, model_diff_norm);
hipLaunchKernelGGL(( LarsGetLocalLearningRateGpu<T>), dim3(1), dim3(1), 0, stream->As<ep::CudaStream>()->cuda_stream(),
learning_rate, lr_scale, weight_decay, epsilon, lars_coefficient, skip_if, data_tmp);
hipLaunchKernelGGL(( LarsUpdateGpu<T>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0,
stream->As<ep::CudaStream>()->cuda_stream(),
n, momentum_beta, momentum, weight_decay, skip_if, local_learning_rate, model_diff_tmp,
model);
}
template<typename T>
struct LarsUpdateKernelUtil<DeviceType::kCUDA, T, float16> {
static void Update(ep::Stream* stream, int64_t n, T scale, float l1, float l2,
float momentum_beta, float epsilon, float lars_coefficient, float weight_decay,
float lr_scale, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const float16* model_diff, T* model, T* momentum,
T* data_tmp, T* model_diff_tmp);
};
template<typename T>
void LarsUpdateKernelUtil<DeviceType::kCUDA, T, float16>::Update(
ep::Stream* stream, int64_t n, T scale, float l1, float l2, float momentum_beta, float epsilon,
float lars_coefficient, float weight_decay, float lr_scale, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const float16* model_diff, T* model, T* momentum,
T* data_tmp, T* model_diff_tmp) {
LarsUpdateKernelUtil<DeviceType::kCUDA, T, half>::Update(
stream, n, scale, l1, l2, momentum_beta, epsilon, lars_coefficient, weight_decay, lr_scale,
learning_rate, scale_by_ptr, skip_if, reinterpret_cast<const half*>(model_diff), model,
momentum, data_tmp, model_diff_tmp);
}
template struct LarsUpdateKernelUtil<DeviceType::kCUDA, float, float>;
template struct LarsUpdateKernelUtil<DeviceType::kCUDA, double, double>;
template struct LarsUpdateKernelUtil<DeviceType::kCUDA, float, float16>;
template<typename T, typename G>
__global__ void FtrlUpdateGpu(int64_t n, T scale, float l1, float l2, float lr_power, float lambda1,
float lambda2, float beta, float weight_decay,
float learning_rate_val, float lr_scale, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff,
T* model, T* accumulate, T* z) {
if (skip_if != nullptr && *skip_if != 0) { return; }
if (learning_rate != nullptr) { learning_rate_val = *learning_rate; }
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
learning_rate_val *= lr_scale;
CUDA_1D_KERNEL_LOOP(i, n) {
FtrlUpdateFunctor<T, G>()(model_diff + i, model + i, accumulate + i, z + i, scale, l1, l2,
lr_power, lambda1, lambda2, beta, weight_decay, learning_rate_val);
}
}
template<typename T, typename G>
struct FtrlUpdateKernelUtil<DeviceType::kCUDA, T, G> {
static void Update(ep::Stream* stream, int64_t n, T scale, float l1, float l2, float lr_power,
float lambda1, float lambda2, float beta, float weight_decay,
float learning_rate_val, float lr_scale, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff, T* model,
T* accumulate, T* z);
};
template<typename T, typename G>
void FtrlUpdateKernelUtil<DeviceType::kCUDA, T, G>::Update(
ep::Stream* stream, int64_t n, T scale, float l1, float l2, float lr_power, float lambda1,
float lambda2, float beta, float weight_decay, float learning_rate_val, float lr_scale,
const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff,
T* model, T* accumulate, T* z) {
hipLaunchKernelGGL(( FtrlUpdateGpu<T, G>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0,
stream->As<ep::CudaStream>()->cuda_stream(),
n, scale, l1, l2, lr_power, lambda1, lambda2, beta, weight_decay, learning_rate_val, lr_scale,
learning_rate, scale_by_ptr, skip_if, model_diff, model, accumulate, z);
}
template<typename T>
struct FtrlUpdateKernelUtil<DeviceType::kCUDA, T, float16> {
static void Update(ep::Stream* stream, int64_t n, T scale, float l1, float l2, float lr_power,
float lambda1, float lambda2, float beta, float weight_decay,
float learning_rate_val, float lr_scale, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const float16* model_diff,
T* model, T* accumulate, T* z);
};
template<typename T>
void FtrlUpdateKernelUtil<DeviceType::kCUDA, T, float16>::Update(
ep::Stream* stream, int64_t n, T scale, float l1, float l2, float lr_power, float lambda1,
float lambda2, float beta, float weight_decay, float learning_rate_val, float lr_scale,
const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if,
const float16* model_diff, T* model, T* accumulate, T* z) {
FtrlUpdateKernelUtil<DeviceType::kCUDA, T, half>::Update(
stream, n, scale, l1, l2, lr_power, lambda1, lambda2, beta, weight_decay, learning_rate_val,
lr_scale, learning_rate, scale_by_ptr, skip_if, reinterpret_cast<const half*>(model_diff),
model, accumulate, z);
}
template struct FtrlUpdateKernelUtil<DeviceType::kCUDA, float, float>;
template struct FtrlUpdateKernelUtil<DeviceType::kCUDA, double, double>;
template struct FtrlUpdateKernelUtil<DeviceType::kCUDA, float, float16>;
template<typename T, typename G>
__global__ void AdadeltaUpdateGpu(int64_t n, T scale, float l1, float l2, float rho, float epsilon,
bool maximize, float weight_decay, float learning_rate_val,
float lr_scale, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const G* model_diff, T* model,
T* square_avgs, T* acc_deltas) {
if (skip_if != nullptr && *skip_if != 0) { return; }
if (learning_rate != nullptr) { learning_rate_val = *learning_rate; }
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
learning_rate_val *= lr_scale;
CUDA_1D_KERNEL_LOOP(i, n) {
AdadeltaUpdateFunctor<T, G>()(model_diff + i, model + i, square_avgs + i, acc_deltas + i, scale,
l1, l2, rho, epsilon, maximize, weight_decay, learning_rate_val);
}
}
template<typename T, typename G>
struct AdadeltaUpdateKernelUtil<DeviceType::kCUDA, T, G> {
static void Update(ep::Stream* stream, int64_t n, T scale, float l1, float l2, float rho,
float epsilon, bool maximize, float weight_decay, float learning_rate_val,
float lr_scale, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const G* model_diff, T* model, T* square_avgs,
T* acc_deltas);
};
template<typename T, typename G>
void AdadeltaUpdateKernelUtil<DeviceType::kCUDA, T, G>::Update(
ep::Stream* stream, int64_t n, T scale, float l1, float l2, float rho, float epsilon,
bool maximize, float weight_decay, float learning_rate_val, float lr_scale,
const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff,
T* model, T* square_avgs, T* acc_deltas) {
hipLaunchKernelGGL(( AdadeltaUpdateGpu<T, G>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0,
stream->As<ep::CudaStream>()->cuda_stream(),
n, scale, l1, l2, rho, epsilon, maximize, weight_decay, learning_rate_val, lr_scale,
learning_rate, scale_by_ptr, skip_if, model_diff, model, square_avgs, acc_deltas);
}
template<typename T>
struct AdadeltaUpdateKernelUtil<DeviceType::kCUDA, T, float16> {
static void Update(ep::Stream* stream, int64_t n, T scale, float l1, float l2, float rho,
float epsilon, bool maximize, float weight_decay, float learning_rate_val,
float lr_scale, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const float16* model_diff, T* model, T* square_avgs,
T* acc_deltas);
};
template<typename T>
void AdadeltaUpdateKernelUtil<DeviceType::kCUDA, T, float16>::Update(
ep::Stream* stream, int64_t n, T scale, float l1, float l2, float rho, float epsilon,
bool maximize, float weight_decay, float learning_rate_val, float lr_scale,
const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if,
const float16* model_diff, T* model, T* square_avgs, T* acc_deltas) {
AdadeltaUpdateKernelUtil<DeviceType::kCUDA, T, half>::Update(
stream, n, scale, l1, l2, rho, epsilon, maximize, weight_decay, learning_rate_val, lr_scale,
learning_rate, scale_by_ptr, skip_if, reinterpret_cast<const half*>(model_diff), model,
square_avgs, acc_deltas);
}
template struct AdadeltaUpdateKernelUtil<DeviceType::kCUDA, float, float>;
template struct AdadeltaUpdateKernelUtil<DeviceType::kCUDA, double, double>;
template struct AdadeltaUpdateKernelUtil<DeviceType::kCUDA, float, float16>;
} // namespace oneflow
| model_update_kernel_util.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/cuda/atomic.cuh"
#include "oneflow/user/kernels/model_update_kernel_util.h"
#include <cub/cub.cuh>
#include "oneflow/core/ep/cuda/cuda_stream.h"
namespace oneflow {
namespace {
template<typename T, typename G, typename C>
__global__ void SGDUpdateGpu(int64_t n, T scale, float l1, float l2, float weight_decay,
float learning_rate_val, float lr_scale, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff,
T* model, C* model_copy) {
if (skip_if != nullptr && *skip_if != 0) { return; }
if (learning_rate != nullptr) { learning_rate_val = *learning_rate; }
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
learning_rate_val *= lr_scale;
CUDA_1D_KERNEL_LOOP(i, n) {
if (model_copy != nullptr) {
FusedSGDUpdateFunctor<T, G, C>()(model_diff + i, model + i, model_copy + i, scale, l1, l2,
weight_decay, learning_rate_val);
} else {
SGDUpdateFunctor<T, G>()(model_diff + i, model + i, scale, l1, l2, weight_decay,
learning_rate_val);
}
}
}
template<typename T, typename K, typename IDX>
__global__ void IndexedSlicesSGDUpdateGpu(float weight_decay, float lr_scale,
const IDX feature_size, const int64_t lower_bound,
const int64_t upper_bound, const IDX* num_unique_instance,
const float* learning_rate, const K* indices,
const T* values, T* model) {
const int64_t n = *num_unique_instance * feature_size;
T lr = *learning_rate;
lr *= lr_scale;
CUDA_1D_KERNEL_LOOP_T(IDX, i, n) {
const IDX indices_idx = i / feature_size;
const IDX inner_idx = i - indices_idx * feature_size;
const IDX instance_id = indices[indices_idx];
if (instance_id >= lower_bound && instance_id < upper_bound) {
const IDX model_idx = (instance_id - lower_bound) * feature_size + inner_idx;
SGDUpdateFunctor<T, T>()(values + i, model + model_idx, static_cast<T>(1), 0.0, 0.0,
weight_decay, lr);
}
}
}
template<typename T>
__global__ void SumSquares2(int64_t n, const T* src0, T* dst0, const T* src1, T* dst1) {
T t_sum0 = 0;
T t_sum1 = 0;
CUDA_1D_KERNEL_LOOP(i, n) {
t_sum0 += src0[i] * src0[i];
t_sum1 += src1[i] * src1[i];
}
typedef cub::BlockReduce<T, kCudaThreadsNumPerBlock> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage0;
__shared__ typename BlockReduce::TempStorage temp_storage1;
T b_sum0 = BlockReduce(temp_storage0).Sum(t_sum0);
T b_sum1 = BlockReduce(temp_storage1).Sum(t_sum1);
if (threadIdx.x == 0) {
cuda::atomic::Add(dst0, b_sum0);
cuda::atomic::Add(dst1, b_sum1);
}
}
} // namespace
template<typename T, typename G, typename C>
struct SGDUpdateKernelUtil<DeviceType::kCUDA, T, G, C> {
static void Update(ep::Stream* stream, int64_t n, T scale, float l1, float l2, float weight_decay,
float learning_rate_val, float lr_scale, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff, T* model,
C* model_copy);
};
template<typename T, typename G, typename C>
void SGDUpdateKernelUtil<DeviceType::kCUDA, T, G, C>::Update(
ep::Stream* stream, int64_t n, T scale, float l1, float l2, float weight_decay,
float learning_rate_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const G* model_diff, T* model, C* model_copy) {
SGDUpdateGpu<T, G, C><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0,
stream->As<ep::CudaStream>()->cuda_stream()>>>(
n, scale, l1, l2, weight_decay, learning_rate_val, lr_scale, learning_rate, scale_by_ptr,
skip_if, model_diff, model, model_copy);
}
template<typename T, typename G>
struct SGDUpdateKernelUtil<DeviceType::kCUDA, T, G, float16> {
static void Update(ep::Stream* stream, int64_t n, T scale, float l1, float l2, float weight_decay,
float learning_rate_val, float lr_scale, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff, T* model,
float16* model_copy);
};
template<typename T, typename G>
void SGDUpdateKernelUtil<DeviceType::kCUDA, T, G, float16>::Update(
ep::Stream* stream, int64_t n, T scale, float l1, float l2, float weight_decay,
float learning_rate_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const G* model_diff, T* model, float16* model_copy) {
SGDUpdateKernelUtil<DeviceType::kCUDA, T, G, half>::Update(
stream, n, scale, l1, l2, weight_decay, learning_rate_val, lr_scale, learning_rate,
scale_by_ptr, skip_if, model_diff, model, reinterpret_cast<half*>(model_copy));
}
template<typename T>
struct SGDUpdateKernelUtil<DeviceType::kCUDA, T, float16, float16> {
static void Update(ep::Stream* stream, int64_t n, T scale, float l1, float l2, float weight_decay,
float learning_rate_val, float lr_scale, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const float16* model_diff,
T* model, float16* model_copy);
};
template<typename T>
void SGDUpdateKernelUtil<DeviceType::kCUDA, T, float16, float16>::Update(
ep::Stream* stream, int64_t n, T scale, float l1, float l2, float weight_decay,
float learning_rate_val, float lr_scale, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const float16* model_diff, T* model, float16* model_copy) {
SGDUpdateKernelUtil<DeviceType::kCUDA, T, half, half>::Update(
stream, n, scale, l1, l2, weight_decay, learning_rate_val, lr_scale, learning_rate,
scale_by_ptr, skip_if, reinterpret_cast<const half*>(model_diff), model,
reinterpret_cast<half*>(model_copy));
}
template struct SGDUpdateKernelUtil<DeviceType::kCUDA, double, double, float16>;
template struct SGDUpdateKernelUtil<DeviceType::kCUDA, float, float, float16>;
template struct SGDUpdateKernelUtil<DeviceType::kCUDA, float, float16, float16>;
template<typename T, typename K, typename IDX>
struct IndexedSlicesSGDUpdateKernelUtil<DeviceType::kCUDA, T, K, IDX> {
static void Update(ep::Stream* stream, float weight_decay, float lr_scale, int64_t num_indices,
int64_t feature_size, int64_t lower_bound, int64_t upper_bound,
const IDX* num_unique_instance, const float* learning_rate, const K* indices,
const T* values, T* model);
};
template<typename T, typename K, typename IDX>
void IndexedSlicesSGDUpdateKernelUtil<DeviceType::kCUDA, T, K, IDX>::Update(
ep::Stream* stream, float weight_decay, float lr_scale, int64_t num_indices,
int64_t feature_size, int64_t lower_bound, int64_t upper_bound, const IDX* num_unique_instance,
const float* learning_rate, const K* indices, const T* values, T* model) {
IndexedSlicesSGDUpdateGpu<T, K, IDX>
<<<BlocksNum4ThreadsNum(num_indices * feature_size), kCudaThreadsNumPerBlock, 0,
stream->As<ep::CudaStream>()->cuda_stream()>>>(
weight_decay, lr_scale, feature_size, lower_bound, upper_bound, num_unique_instance,
learning_rate, indices, values, model);
}
#define INITIATE_INDEXED_SLICES_SGD_UPDATE_KERNEL_UTIL_CUDA(val_type_pair, key_type_pair, \
idx_type_pair) \
template struct IndexedSlicesSGDUpdateKernelUtil< \
DeviceType::kCUDA, OF_PP_PAIR_FIRST(val_type_pair), OF_PP_PAIR_FIRST(key_type_pair), \
OF_PP_PAIR_FIRST(idx_type_pair)>;
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INITIATE_INDEXED_SLICES_SGD_UPDATE_KERNEL_UTIL_CUDA,
FLOATING_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ, INT_DATA_TYPE_SEQ);
#undef INITIATE_INDEXED_SLICES_SGD_UPDATE_KERNEL_UTIL_CUDA
namespace {
template<typename T, typename G>
__global__ void MomentumUpdateGpu(int64_t n, T scale, float l1, float l2, float beta,
float dampening, bool nesterov, bool maximize, float weight_decay,
float learning_rate_val, float lr_scale,
const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const G* model_diff, T* model,
T* momentum) {
if (skip_if != nullptr && *skip_if != 0) { return; }
if (learning_rate != nullptr) { learning_rate_val = *learning_rate; }
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
learning_rate_val *= lr_scale;
CUDA_1D_KERNEL_LOOP(i, n) {
MomentumUpdateFunctor<T, G>()(model_diff + i, model + i, momentum + i, scale, l1, l2, beta,
dampening, nesterov, maximize, weight_decay, learning_rate_val);
}
}
template<typename T, typename K, typename IDX>
__global__ void IndexedSlicesMomentumUpdateGpu(T beta, float dampening, bool nesterov,
bool maximize, float weight_decay, float lr_scale,
int64_t feature_size, int64_t lower_bound,
int64_t upper_bound, const IDX* num_unique_instance,
const float* learning_rate, const K* indices,
const T* values, T* model, T* momentum) {
const int64_t n = *num_unique_instance * feature_size;
T lr = *learning_rate;
lr *= lr_scale;
CUDA_1D_KERNEL_LOOP(i, n) {
const IDX indices_idx = i / feature_size;
const IDX inner_idx = i - indices_idx * feature_size;
const IDX instance_id = indices[indices_idx];
if (instance_id >= lower_bound && instance_id < upper_bound) {
const IDX model_idx = (instance_id - lower_bound) * feature_size + inner_idx;
MomentumUpdateFunctor<T, T>()(values + i, model + model_idx, momentum + model_idx,
static_cast<T>(1), 0.0, 0.0, beta, dampening, nesterov,
maximize, weight_decay, lr);
}
}
}
} // namespace
template<typename T, typename G>
struct MomentumUpdateKernelUtil<DeviceType::kCUDA, T, G> {
static void Update(ep::Stream* stream, int64_t n, T scale, float l1, float l2, float beta,
float dampening, bool nesterov, bool maximize, float weight_decay,
float learning_rate_val, float lr_scale, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff, T* model,
T* momentum);
};
template<typename T, typename G>
void MomentumUpdateKernelUtil<DeviceType::kCUDA, T, G>::Update(
ep::Stream* stream, int64_t n, T scale, float l1, float l2, float beta, float dampening,
bool nesterov, bool maximize, float weight_decay, float learning_rate_val, float lr_scale,
const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff,
T* model, T* momentum) {
MomentumUpdateGpu<T, G><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0,
stream->As<ep::CudaStream>()->cuda_stream()>>>(
n, scale, l1, l2, beta, dampening, nesterov, maximize, weight_decay, learning_rate_val,
lr_scale, learning_rate, scale_by_ptr, skip_if, model_diff, model, momentum);
}
template<typename T>
struct MomentumUpdateKernelUtil<DeviceType::kCUDA, T, float16> {
static void Update(ep::Stream* stream, int64_t n, T scale, float l1, float l2, float beta,
float dampening, bool nesterov, bool maximize, float weight_decay,
float learning_rate_val, float lr_scale, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const float16* model_diff,
T* model, T* momentum);
};
template<typename T>
void MomentumUpdateKernelUtil<DeviceType::kCUDA, T, float16>::Update(
ep::Stream* stream, int64_t n, T scale, float l1, float l2, float beta, float dampening,
bool nesterov, bool maximize, float weight_decay, float learning_rate_val, float lr_scale,
const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if,
const float16* model_diff, T* model, T* momentum) {
MomentumUpdateKernelUtil<DeviceType::kCUDA, T, half>::Update(
stream, n, scale, l1, l2, beta, dampening, nesterov, maximize, weight_decay,
learning_rate_val, lr_scale, learning_rate, scale_by_ptr, skip_if,
reinterpret_cast<const half*>(model_diff), model, momentum);
}
template struct MomentumUpdateKernelUtil<DeviceType::kCUDA, double, double>;
template struct MomentumUpdateKernelUtil<DeviceType::kCUDA, float, float>;
template struct MomentumUpdateKernelUtil<DeviceType::kCUDA, float, float16>;
template<typename T, typename K, typename IDX>
struct IndexedSlicesMomentumMdUpdateKernelUtil<DeviceType::kCUDA, T, K, IDX> {
static void Update(ep::Stream* stream, T beta, float dampening, bool nesterov, bool maximize,
float weight_decay, float lr_scale, int64_t num_instance, int64_t feature_size,
int64_t lower_bound, int64_t upper_bound, const IDX* num_unique_instance,
const float* learning_rate, const K* indices, const T* values, T* model,
T* momentum);
};
template<typename T, typename K, typename IDX>
void IndexedSlicesMomentumMdUpdateKernelUtil<DeviceType::kCUDA, T, K, IDX>::Update(
ep::Stream* stream, T beta, float dampening, bool nesterov, bool maximize, float weight_decay,
float lr_scale, int64_t num_instance, int64_t feature_size, int64_t lower_bound,
int64_t upper_bound, const IDX* num_unique_instance, const float* learning_rate,
const K* indices, const T* values, T* model, T* momentum) {
IndexedSlicesMomentumUpdateGpu<T, K, IDX>
<<<BlocksNum4ThreadsNum(num_instance * feature_size), kCudaThreadsNumPerBlock, 0,
stream->As<ep::CudaStream>()->cuda_stream()>>>(
beta, dampening, nesterov, maximize, weight_decay, lr_scale, feature_size, lower_bound,
upper_bound, num_unique_instance, learning_rate, indices, values, model, momentum);
}
#define INSTANTIATE_INDEXED_SLICES_MOMENTUM_MODEL_UPDATE_KERNEL_UTIL_CUDA( \
val_type_pair, key_type_pair, idx_type_pair) \
template struct IndexedSlicesMomentumMdUpdateKernelUtil< \
DeviceType::kCUDA, OF_PP_PAIR_FIRST(val_type_pair), OF_PP_PAIR_FIRST(key_type_pair), \
OF_PP_PAIR_FIRST(idx_type_pair)>;
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_INDEXED_SLICES_MOMENTUM_MODEL_UPDATE_KERNEL_UTIL_CUDA,
FLOATING_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ, INT_DATA_TYPE_SEQ);
#undef INSTANTIATE_INDEXED_SLICES_MOMENTUM_MODEL_UPDATE_KERNEL_UTIL_CUDA
namespace {
__global__ void BiasCorrectionFactorKernelGpu(float beta, const int64_t* train_step, float* out) {
const auto exponent = static_cast<double>(*train_step + 1);
const float bias_correction_factor = 1.0 - static_cast<float>(pow(beta, exponent));
*out = bias_correction_factor;
}
template<typename T, typename G, typename C>
__global__ void AdamUpdateGpu(int64_t n, T scale, float l1, float l2, float beta1, float beta2,
float epsilon, float weight_decay, bool amsgrad,
bool do_bias_correction, float learning_rate_val, float lr_scale,
float bias_correction1_val, float bias_correction2_val,
const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const float* bias_correction1_ptr,
const float* bias_correction2_ptr, const G* model_diff, T* model,
C* model_copy, T* m, T* v, T* max_v) {
if (skip_if != nullptr && *skip_if != 0) { return; }
if (learning_rate != nullptr) { learning_rate_val = *learning_rate; }
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
if (bias_correction1_ptr != nullptr) { bias_correction1_val = *bias_correction1_ptr; }
if (bias_correction2_ptr != nullptr) { bias_correction2_val = *bias_correction2_ptr; }
learning_rate_val *= lr_scale;
CUDA_1D_KERNEL_LOOP(i, n) {
if (model_copy != nullptr) {
FusedAdamUpdateFunctor<T, G, C>()(model_diff + i, model + i, model_copy + i, m + i, v + i,
max_v + i, scale, l1, l2, beta1, beta2, epsilon,
weight_decay, amsgrad, bias_correction1_val,
bias_correction2_val, learning_rate_val);
} else {
AdamUpdateFunctor<T, G>()(model_diff + i, model + i, m + i, v + i, max_v + i, scale, l1, l2,
beta1, beta2, epsilon, weight_decay, amsgrad, bias_correction1_val,
bias_correction2_val, learning_rate_val);
}
}
}
template<typename T>
__global__ void AdamUpdateBetaTGpu(const T beta1, const T beta2, const int64_t* skip_if, T* beta1_t,
T* beta2_t) {
if (skip_if != nullptr && *skip_if != 0) { return; }
*beta1_t *= beta1;
*beta2_t *= beta2;
}
template<typename T, typename K, typename IDX>
__global__ void IndexedSlicesAdamUpdateGpu(
float beta1, float beta2, float epsilon, float weight_decay, bool amsgrad,
bool do_bias_correction, float lr, float lr_scale, int64_t feature_size, int64_t lower_bound,
int64_t upper_bound, const IDX* num_unique_instance, const float* learning_rate,
const float* bias_correction1_ptr, const float* bias_correction2_ptr, const K* indices,
const T* values, T* model, T* m, T* v, T* max_v) {
if (learning_rate != nullptr) { lr = *learning_rate; }
lr *= lr_scale;
float bias_correction1 = 1.0;
float bias_correction2 = 1.0;
if (bias_correction1_ptr != nullptr) { bias_correction1 = *bias_correction1_ptr; }
if (bias_correction2_ptr != nullptr) { bias_correction2 = *bias_correction2_ptr; }
const int64_t n = *num_unique_instance * feature_size;
CUDA_1D_KERNEL_LOOP(i, n) {
const IDX indices_idx = i / feature_size;
const IDX inner_idx = i - indices_idx * feature_size;
const IDX instance_id = indices[indices_idx];
if (instance_id >= lower_bound && instance_id < upper_bound) {
const IDX model_idx = (instance_id - lower_bound) * feature_size + inner_idx;
AdamUpdateFunctor<T, T>()(values + i, model + model_idx, m + model_idx, v + model_idx,
max_v + i, static_cast<T>(1), 0, 0, beta1, beta2, epsilon,
weight_decay, amsgrad, bias_correction1, bias_correction2, lr);
}
}
}
template<typename T, typename G>
__global__ void LambGradGpu(int64_t n, T scale, float l1, float l2, float beta1, float beta2,
float epsilon, const T* scale_by_ptr, const int64_t* skip_if,
const G* model_diff, T* adam_diff, T* model, T* m, T* v,
bool do_bias_correction, float bias_correction1_val,
float bias_correction2_val, const float* bias_correction1_ptr,
const float* bias_correction2_ptr) {
if (skip_if != nullptr && *skip_if != 0) { return; }
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
if (bias_correction1_ptr != nullptr) { bias_correction1_val = *bias_correction1_ptr; }
if (bias_correction2_ptr != nullptr) { bias_correction2_val = *bias_correction2_ptr; }
CUDA_1D_KERNEL_LOOP(i, n) {
LambGradFunctor<T, G>()(model_diff + i, adam_diff + i, model + i, m + i, v + i, scale, l1, l2,
beta1, beta2, epsilon, do_bias_correction, bias_correction1_val,
bias_correction2_val);
}
}
template<typename T>
__global__ void LambUpdateGpu(int64_t n, float weight_decay, float learning_rate_val,
float lr_scale, const float* learning_rate_ptr,
const int64_t* skip_if, const T* w_norm_2, const T* g_norm_2,
const T* adam_diff, T* model) {
if (skip_if != nullptr && *skip_if != 0) { return; }
if (learning_rate_ptr != nullptr) { learning_rate_val = *learning_rate_ptr; }
learning_rate_val *= lr_scale;
const float lr = LambLRFunctor<T>()(learning_rate_val, w_norm_2, g_norm_2);
CUDA_1D_KERNEL_LOOP(i, n) { LambUpdateFunctor<T>()(lr, weight_decay, adam_diff + i, model + i); }
}
} // namespace
template<typename T, typename G, typename C>
struct AdamUpdateKernelUtil<DeviceType::kCUDA, T, G, C> {
static void Update(ep::Stream* stream, int64_t n, T scale, float l1, float l2, float beta1,
float beta2, float epsilon, float weight_decay, bool amsgrad,
bool do_bias_correction, float learning_rate_val, float lr_scale,
float bias_correction1_val, float bias_correction2_val,
const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if,
const float* bias_correction1_ptr, const float* bias_correction2_ptr,
const G* model_diff, T* model, C* model_copy, T* m, T* v, T* max_v);
};
template<typename T, typename G, typename C>
void AdamUpdateKernelUtil<DeviceType::kCUDA, T, G, C>::Update(
ep::Stream* stream, int64_t n, T scale, float l1, float l2, float beta1, float beta2,
float epsilon, float weight_decay, bool amsgrad, bool do_bias_correction,
float learning_rate_val, float lr_scale, float bias_correction1_val, float bias_correction2_val,
const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if,
const float* bias_correction1_ptr, const float* bias_correction2_ptr, const G* model_diff,
T* model, C* model_copy, T* m, T* v, T* max_v) {
AdamUpdateGpu<T, G, C><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0,
stream->As<ep::CudaStream>()->cuda_stream()>>>(
n, scale, l1, l2, beta1, beta2, epsilon, weight_decay, amsgrad, do_bias_correction,
learning_rate_val, lr_scale, bias_correction1_val, bias_correction2_val, learning_rate,
scale_by_ptr, skip_if, bias_correction1_ptr, bias_correction2_ptr, model_diff, model,
model_copy, m, v, max_v);
}
template<typename T, typename G>
struct AdamUpdateKernelUtil<DeviceType::kCUDA, T, G, float16> {
static void Update(ep::Stream* stream, int64_t n, T scale, float l1, float l2, float beta1,
float beta2, float epsilon, float weight_decay, bool amsgrad,
bool do_bias_correction, float learning_rate_val, float lr_scale,
float bias_correction1_val, float bias_correction2_val,
const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if,
const float* bias_correction1_ptr, const float* bias_correction2_ptr,
const G* model_diff, T* model, float16* model_copy, T* m, T* v, T* max_v);
};
template<typename T, typename G>
void AdamUpdateKernelUtil<DeviceType::kCUDA, T, G, float16>::Update(
ep::Stream* stream, int64_t n, T scale, float l1, float l2, float beta1, float beta2,
float epsilon, float weight_decay, bool amsgrad, bool do_bias_correction,
float learning_rate_val, float lr_scale, float bias_correction1_val, float bias_correction2_val,
const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if,
const float* bias_correction1_ptr, const float* bias_correction2_ptr, const G* model_diff,
T* model, float16* model_copy, T* m, T* v, T* max_v) {
AdamUpdateKernelUtil<DeviceType::kCUDA, T, G, half>::Update(
stream, n, scale, l1, l2, beta1, beta2, epsilon, weight_decay, amsgrad, do_bias_correction,
learning_rate_val, lr_scale, bias_correction1_val, bias_correction2_val, learning_rate,
scale_by_ptr, skip_if, bias_correction1_ptr, bias_correction2_ptr, model_diff, model,
reinterpret_cast<half*>(model_copy), m, v, max_v);
}
template<typename T>
struct AdamUpdateKernelUtil<DeviceType::kCUDA, T, float16, float16> {
static void Update(ep::Stream* stream, int64_t n, T scale, float l1, float l2, float beta1,
float beta2, float epsilon, float weight_decay, bool amsgrad,
bool do_bias_correction, float learning_rate_val, float lr_scale,
float bias_correction1_val, float bias_correction2_val,
const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if,
const float* bias_correction1_ptr, const float* bias_correction2_ptr,
const float16* model_diff, T* model, float16* model_copy, T* m, T* v,
T* max_v);
};
template<typename T>
void AdamUpdateKernelUtil<DeviceType::kCUDA, T, float16, float16>::Update(
ep::Stream* stream, int64_t n, T scale, float l1, float l2, float beta1, float beta2,
float epsilon, float weight_decay, bool amsgrad, bool do_bias_correction,
float learning_rate_val, float lr_scale, float bias_correction1_val, float bias_correction2_val,
const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if,
const float* bias_correction1_ptr, const float* bias_correction2_ptr, const float16* model_diff,
T* model, float16* model_copy, T* m, T* v, T* max_v) {
AdamUpdateKernelUtil<DeviceType::kCUDA, T, half, half>::Update(
stream, n, scale, l1, l2, beta1, beta2, epsilon, weight_decay, amsgrad, do_bias_correction,
learning_rate_val, lr_scale, bias_correction1_val, bias_correction2_val, learning_rate,
scale_by_ptr, skip_if, bias_correction1_ptr, bias_correction2_ptr,
reinterpret_cast<const half*>(model_diff), model, reinterpret_cast<half*>(model_copy), m, v,
max_v);
}
template struct AdamUpdateKernelUtil<DeviceType::kCUDA, float, float, float16>;
template struct AdamUpdateKernelUtil<DeviceType::kCUDA, double, double, float16>;
template struct AdamUpdateKernelUtil<DeviceType::kCUDA, float, float16, float16>;
template<typename T, typename G>
__global__ void AdagradUpdateGpu(int64_t n, T scale, float l1, float l2, float lr_decay,
float epsilon, float weight_decay, float learning_rate_val,
float lr_scale, int64_t train_step, const float* learning_rate,
const int64_t* train_step_ptr, const T* scale_by_ptr,
const int64_t* skip_if, const G* model_diff, T* model, T* sum) {
if (skip_if != nullptr && *skip_if != 0) { return; }
if (learning_rate != nullptr) { learning_rate_val = *learning_rate; }
if (train_step_ptr != nullptr) {
train_step = *train_step_ptr + 1;
} // train_step_ptr start from zero.
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
learning_rate_val = learning_rate_val * lr_scale / (1 + (train_step - 1) * lr_decay);
CUDA_1D_KERNEL_LOOP(i, n) {
AdagradUpdateFunctor<T, G>()(model_diff + i, model + i, sum + i, scale, l1, l2, epsilon,
weight_decay, learning_rate_val);
}
}
template<typename T, typename G>
struct AdagradUpdateKernelUtil<DeviceType::kCUDA, T, G> {
static void Update(ep::Stream* stream, int64_t n, T scale, float l1, float l2, float lr_decay,
float epsilon, float weight_decay, float learning_rate_val, float lr_scale,
int64_t train_step, const float* learning_rate, const int64_t* train_step_ptr,
const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff, T* model,
T* sum);
};
template<typename T, typename G>
void AdagradUpdateKernelUtil<DeviceType::kCUDA, T, G>::Update(
ep::Stream* stream, int64_t n, T scale, float l1, float l2, float lr_decay, float epsilon,
float weight_decay, float learning_rate_val, float lr_scale, int64_t train_step,
const float* learning_rate, const int64_t* train_step_ptr, const T* scale_by_ptr,
const int64_t* skip_if, const G* model_diff, T* model, T* sum) {
AdagradUpdateGpu<T, G><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0,
stream->As<ep::CudaStream>()->cuda_stream()>>>(
n, scale, l1, l2, lr_decay, epsilon, weight_decay, learning_rate_val, lr_scale, train_step,
learning_rate, train_step_ptr, scale_by_ptr, skip_if, model_diff, model, sum);
}
template struct AdagradUpdateKernelUtil<DeviceType::kCUDA, float, float>;
template struct AdagradUpdateKernelUtil<DeviceType::kCUDA, double, double>;
template<typename T, typename G>
struct LambUpdateKernelUtil<DeviceType::kCUDA, T, G> {
static void Update(ep::Stream* stream, int64_t n, float scale, float l1, float l2, float beta1,
float beta2, float epsilon, float weight_decay, float learning_rate_val,
float lr_scale, bool do_bias_correction, float bias_correction1_val,
float bias_correction2_val, const float* learning_rate_ptr,
const float* bias_correction1_ptr, const float* bias_correction2_ptr,
const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff,
T* adam_diff, T* model, T* m, T* v, T* norm_buffer);
};
template<typename T, typename G>
void LambUpdateKernelUtil<DeviceType::kCUDA, T, G>::Update(
ep::Stream* stream, int64_t n, float scale, float l1, float l2, float beta1, float beta2,
float epsilon, float weight_decay, float learning_rate_val, float lr_scale,
bool do_bias_correction, float bias_correction1_val, float bias_correction2_val,
const float* learning_rate_ptr, const float* bias_correction1_ptr,
const float* bias_correction2_ptr, const T* scale_by_ptr, const int64_t* skip_if,
const G* model_diff, T* adam_diff, T* model, T* m, T* v, T* norm_buffer) {
LambGradGpu<T, G><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0,
stream->As<ep::CudaStream>()->cuda_stream()>>>(
n, scale, l1, l2, beta1, beta2, epsilon, scale_by_ptr, skip_if, model_diff, adam_diff, model,
m, v, do_bias_correction, bias_correction1_val, bias_correction2_val, bias_correction1_ptr,
bias_correction2_ptr);
T* w_norm_2 = norm_buffer;
T* g_norm_2 = norm_buffer + 1;
Memset<DeviceType::kCUDA>(stream, norm_buffer, 0, 2 * sizeof(T));
SumSquares2<T>
<<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0,
stream->As<ep::CudaStream>()->cuda_stream()>>>(n, model, w_norm_2, adam_diff, g_norm_2);
LambUpdateGpu<T><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0,
stream->As<ep::CudaStream>()->cuda_stream()>>>(
n, weight_decay, learning_rate_val, lr_scale, learning_rate_ptr, skip_if, w_norm_2, g_norm_2,
adam_diff, model);
}
template<typename T>
struct LambUpdateKernelUtil<DeviceType::kCUDA, T, float16> {
static void Update(ep::Stream* stream, int64_t n, float scale, float l1, float l2, float beta1,
float beta2, float epsilon, float weight_decay, float learning_rate_val,
float lr_scale, bool do_bias_correction, float bias_correction1_val,
float bias_correction2_val, const float* learning_rate_ptr,
const float* bias_correction1_ptr, const float* bias_correction2_ptr,
const T* scale_by_ptr, const int64_t* skip_if, const float16* model_diff,
T* adam_diff, T* model, T* m, T* v, T* norm_buffer);
};
template<typename T>
void LambUpdateKernelUtil<DeviceType::kCUDA, T, float16>::Update(
ep::Stream* stream, int64_t n, float scale, float l1, float l2, float beta1, float beta2,
float epsilon, float weight_decay, float learning_rate_val, float lr_scale,
bool do_bias_correction, float bias_correction1_val, float bias_correction2_val,
const float* learning_rate_ptr, const float* bias_correction1_ptr,
const float* bias_correction2_ptr, const T* scale_by_ptr, const int64_t* skip_if,
const float16* model_diff, T* adam_diff, T* model, T* m, T* v, T* norm_buffer) {
LambUpdateKernelUtil<DeviceType::kCUDA, T, half>::Update(
stream, n, scale, l1, l2, beta1, beta2, epsilon, weight_decay, learning_rate_val, lr_scale,
do_bias_correction, bias_correction1_val, bias_correction2_val, learning_rate_ptr,
bias_correction1_ptr, bias_correction2_ptr, scale_by_ptr, skip_if,
reinterpret_cast<const half*>(model_diff), adam_diff, model, m, v, norm_buffer);
}
template struct LambUpdateKernelUtil<DeviceType::kCUDA, float, float>;
template struct LambUpdateKernelUtil<DeviceType::kCUDA, double, double>;
template struct LambUpdateKernelUtil<DeviceType::kCUDA, float, float16>;
template<typename T, typename K, typename IDX>
struct IndexedSlicesAdamMdUpdateKernelUtil<DeviceType::kCUDA, T, K, IDX> {
static void Update(ep::Stream* stream, float beta1, float beta2, float epsilon,
float weight_decay, bool amsgrad, bool do_bias_correction, float lr,
float lr_scale, int64_t num_instance, int64_t feature_size,
int64_t lower_bound, int64_t upper_bound, const IDX* num_unique_instance,
const float* learning_rate, const float* bias_correction1_ptr,
const float* bias_correction2_ptr, const K* indices, const T* values, T* model,
T* m, T* v, T* max_v);
};
template<typename T, typename K, typename IDX>
void IndexedSlicesAdamMdUpdateKernelUtil<DeviceType::kCUDA, T, K, IDX>::Update(
ep::Stream* stream, float beta1, float beta2, float epsilon, float weight_decay, bool amsgrad,
bool do_bias_correction, float lr, float lr_scale, int64_t num_instance, int64_t feature_size,
int64_t lower_bound, int64_t upper_bound, const IDX* num_unique_instance,
const float* learning_rate, const float* bias_correction1_ptr,
const float* bias_correction2_ptr, const K* indices, const T* values, T* model, T* m, T* v,
T* max_v) {
IndexedSlicesAdamUpdateGpu<T, K, IDX>
<<<BlocksNum4ThreadsNum(num_instance * feature_size), kCudaThreadsNumPerBlock, 0,
stream->As<ep::CudaStream>()->cuda_stream()>>>(
beta1, beta2, epsilon, weight_decay, amsgrad, do_bias_correction, lr, lr_scale,
feature_size, lower_bound, upper_bound, num_unique_instance, learning_rate,
bias_correction1_ptr, bias_correction2_ptr, indices, values, model, m, v, max_v);
}
#define INSTANTIATE_INDEXED_SLICES_ADAM_MODEL_UPDATE_KERNEL_UTIL_CUDA( \
val_type_pair, key_type_pair, idx_type_pair) \
template struct IndexedSlicesAdamMdUpdateKernelUtil< \
DeviceType::kCUDA, OF_PP_PAIR_FIRST(val_type_pair), OF_PP_PAIR_FIRST(key_type_pair), \
OF_PP_PAIR_FIRST(idx_type_pair)>;
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_INDEXED_SLICES_ADAM_MODEL_UPDATE_KERNEL_UTIL_CUDA,
FLOATING_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ, INT_DATA_TYPE_SEQ);
#undef INSTANTIATE_INDEXED_SLICES_ADAM_MODEL_UPDATE_KERNEL_UTIL_CUDA
template<>
struct BiasCorrectionFactorKernelUtil<DeviceType::kCUDA> {
static void BiasCorrectionFactorCompute(ep::Stream* stream, float beta, const int64_t* train_step,
float* out);
};
void BiasCorrectionFactorKernelUtil<DeviceType::kCUDA>::BiasCorrectionFactorCompute(
ep::Stream* stream, float beta, const int64_t* train_step, float* out) {
BiasCorrectionFactorKernelGpu<<<1, 1, 0, stream->As<ep::CudaStream>()->cuda_stream()>>>(
beta, train_step, out);
}
namespace {
template<typename T, typename G, bool centered>
__global__ void RmsPropUpdateGpu(int64_t n, T scale, float l1, float l2, T* mean_square,
T* mean_gradient, float epsilon, float weight_decay,
float decay_rate, float learning_rate_val, float lr_scale,
const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const G* model_diff, T* model) {
if (skip_if != nullptr && *skip_if != 0) { return; }
if (learning_rate != nullptr) { learning_rate_val = *learning_rate; }
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
learning_rate_val *= lr_scale;
CUDA_1D_KERNEL_LOOP(i, n) {
RmsPropUpdateFunctor<T, G, centered>()(model_diff + i, model + i, n, scale, l1, l2,
mean_square + i,
(centered ? mean_gradient + i : nullptr), epsilon,
weight_decay, decay_rate, learning_rate_val);
}
}
} // namespace
template<typename T, typename G>
struct RmsPropUpdateKernelUtil<DeviceType::kCUDA, T, G> {
static void Update(ep::Stream* stream, int64_t n, T scale, float l1, float l2, bool centered,
float epsilon, float weight_decay, float decay_rate, float learning_rate_val,
float lr_scale, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const G* model_diff, T* model, T* mean_square,
T* mean_gradient);
};
template<typename T, typename G>
void RmsPropUpdateKernelUtil<DeviceType::kCUDA, T, G>::Update(
ep::Stream* stream, int64_t n, T scale, float l1, float l2, bool centered, float epsilon,
float weight_decay, float decay_rate, float learning_rate_val, float lr_scale,
const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff,
T* model, T* mean_square, T* mean_gradient) {
if (centered) {
RmsPropUpdateGpu<T, G, true><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0,
stream->As<ep::CudaStream>()->cuda_stream()>>>(
n, scale, l1, l2, mean_square, mean_gradient, epsilon, weight_decay, decay_rate,
learning_rate_val, lr_scale, learning_rate, scale_by_ptr, skip_if, model_diff, model);
} else {
RmsPropUpdateGpu<T, G, false><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0,
stream->As<ep::CudaStream>()->cuda_stream()>>>(
n, scale, l1, l2, mean_square, mean_gradient, epsilon, weight_decay, decay_rate,
learning_rate_val, lr_scale, learning_rate, scale_by_ptr, skip_if, model_diff, model);
}
}
template<typename T>
struct RmsPropUpdateKernelUtil<DeviceType::kCUDA, T, float16> {
static void Update(ep::Stream* stream, int64_t n, T scale, float l1, float l2, bool centered,
float epsilon, float weight_decay, float decay_rate, float learning_rate_val,
float lr_scale, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const float16* model_diff, T* model, T* mean_square,
T* mean_gradient);
};
template<typename T>
void RmsPropUpdateKernelUtil<DeviceType::kCUDA, T, float16>::Update(
ep::Stream* stream, int64_t n, T scale, float l1, float l2, bool centered, float epsilon,
float weight_decay, float decay_rate, float learning_rate_val, float lr_scale,
const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if,
const float16* model_diff, T* model, T* mean_square, T* mean_gradient) {
RmsPropUpdateKernelUtil<DeviceType::kCUDA, T, half>::Update(
stream, n, scale, l1, l2, centered, epsilon, weight_decay, decay_rate, learning_rate_val,
lr_scale, learning_rate, scale_by_ptr, skip_if, reinterpret_cast<const half*>(model_diff),
model, mean_square, mean_gradient);
}
template struct RmsPropUpdateKernelUtil<DeviceType::kCUDA, float, float>;
template struct RmsPropUpdateKernelUtil<DeviceType::kCUDA, double, double>;
template struct RmsPropUpdateKernelUtil<DeviceType::kCUDA, float, float16>;
namespace {
template<typename T, typename G>
__global__ void LarsScaleModelDiffGpu(int64_t n, T scale, float l1, float l2, const T* scale_by_ptr,
const int64_t* skip_if, const G* model_diff, T* model,
T* model_diff_tmp) {
if (skip_if != nullptr && *skip_if != 0) { return; }
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
CUDA_1D_KERNEL_LOOP(i, n) {
model_diff_tmp[i] =
CastScaleRegularizeGradientFunctor<T, G>()(model_diff[i], model[i], scale, l1, l2);
}
}
template<typename T>
__global__ void LarsGetLocalLearningRateGpu(const float* learning_rate, float lr_scale,
T weight_decay, T epsilon, T lars_coefficient,
const int64_t* skip_if, T* data_tmp) {
if (skip_if != nullptr && *skip_if != 0) { return; }
T* model_norm = &data_tmp[0];
T* model_diff_norm = &data_tmp[1];
T* local_learning_rate = &data_tmp[2];
*model_norm = std::sqrt(*model_norm);
*model_diff_norm = std::sqrt(*model_diff_norm);
T lars = static_cast<T>(1);
if (*model_norm > 0 && *model_diff_norm > 0) {
lars = lars_coefficient * (*model_norm)
/ (epsilon + (*model_diff_norm) + weight_decay * (*model_norm));
}
T lr = *learning_rate;
lr *= lr_scale;
*local_learning_rate = lr * lars;
}
template<typename T>
__global__ void LarsUpdateGpu(int64_t n, float momentum_beta, T* momentum, float weight_decay,
const int64_t* skip_if, T* local_learning_rate, T* model_diff_tmp,
T* model) {
if (skip_if != nullptr && *skip_if != 0) { return; }
CUDA_1D_KERNEL_LOOP(i, n) {
LarsUpdateFunctor<T>()(model_diff_tmp + i, model + i, momentum_beta, momentum + i, weight_decay,
*local_learning_rate);
}
}
} // namespace
template<typename T, typename G>
struct LarsUpdateKernelUtil<DeviceType::kCUDA, T, G> {
static void Update(ep::Stream* stream, int64_t n, T scale, float l1, float l2,
float momentum_beta, float epsilon, float lars_coefficient, float weight_decay,
float lr_scale, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const G* model_diff, T* model, T* momentum,
T* data_tmp, T* model_diff_tmp);
};
template<typename T, typename G>
void LarsUpdateKernelUtil<DeviceType::kCUDA, T, G>::Update(
ep::Stream* stream, int64_t n, T scale, float l1, float l2, float momentum_beta, float epsilon,
float lars_coefficient, float weight_decay, float lr_scale, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff, T* model, T* momentum,
T* data_tmp, T* model_diff_tmp) {
LarsScaleModelDiffGpu<T, G><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0,
stream->As<ep::CudaStream>()->cuda_stream()>>>(
n, scale, l1, l2, scale_by_ptr, skip_if, model_diff, model, model_diff_tmp);
T* model_norm = data_tmp;
T* model_diff_norm = data_tmp + 1;
T* local_learning_rate = data_tmp + 2;
Memset<DeviceType::kCUDA>(stream, data_tmp, 0, 2 * sizeof(T));
SumSquares2<T><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0,
stream->As<ep::CudaStream>()->cuda_stream()>>>(n, model, model_norm,
model_diff_tmp, model_diff_norm);
LarsGetLocalLearningRateGpu<T><<<1, 1, 0, stream->As<ep::CudaStream>()->cuda_stream()>>>(
learning_rate, lr_scale, weight_decay, epsilon, lars_coefficient, skip_if, data_tmp);
LarsUpdateGpu<T><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0,
stream->As<ep::CudaStream>()->cuda_stream()>>>(
n, momentum_beta, momentum, weight_decay, skip_if, local_learning_rate, model_diff_tmp,
model);
}
template<typename T>
struct LarsUpdateKernelUtil<DeviceType::kCUDA, T, float16> {
static void Update(ep::Stream* stream, int64_t n, T scale, float l1, float l2,
float momentum_beta, float epsilon, float lars_coefficient, float weight_decay,
float lr_scale, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const float16* model_diff, T* model, T* momentum,
T* data_tmp, T* model_diff_tmp);
};
template<typename T>
void LarsUpdateKernelUtil<DeviceType::kCUDA, T, float16>::Update(
ep::Stream* stream, int64_t n, T scale, float l1, float l2, float momentum_beta, float epsilon,
float lars_coefficient, float weight_decay, float lr_scale, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const float16* model_diff, T* model, T* momentum,
T* data_tmp, T* model_diff_tmp) {
LarsUpdateKernelUtil<DeviceType::kCUDA, T, half>::Update(
stream, n, scale, l1, l2, momentum_beta, epsilon, lars_coefficient, weight_decay, lr_scale,
learning_rate, scale_by_ptr, skip_if, reinterpret_cast<const half*>(model_diff), model,
momentum, data_tmp, model_diff_tmp);
}
template struct LarsUpdateKernelUtil<DeviceType::kCUDA, float, float>;
template struct LarsUpdateKernelUtil<DeviceType::kCUDA, double, double>;
template struct LarsUpdateKernelUtil<DeviceType::kCUDA, float, float16>;
template<typename T, typename G>
__global__ void FtrlUpdateGpu(int64_t n, T scale, float l1, float l2, float lr_power, float lambda1,
float lambda2, float beta, float weight_decay,
float learning_rate_val, float lr_scale, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff,
T* model, T* accumulate, T* z) {
if (skip_if != nullptr && *skip_if != 0) { return; }
if (learning_rate != nullptr) { learning_rate_val = *learning_rate; }
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
learning_rate_val *= lr_scale;
CUDA_1D_KERNEL_LOOP(i, n) {
FtrlUpdateFunctor<T, G>()(model_diff + i, model + i, accumulate + i, z + i, scale, l1, l2,
lr_power, lambda1, lambda2, beta, weight_decay, learning_rate_val);
}
}
template<typename T, typename G>
struct FtrlUpdateKernelUtil<DeviceType::kCUDA, T, G> {
static void Update(ep::Stream* stream, int64_t n, T scale, float l1, float l2, float lr_power,
float lambda1, float lambda2, float beta, float weight_decay,
float learning_rate_val, float lr_scale, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff, T* model,
T* accumulate, T* z);
};
template<typename T, typename G>
void FtrlUpdateKernelUtil<DeviceType::kCUDA, T, G>::Update(
ep::Stream* stream, int64_t n, T scale, float l1, float l2, float lr_power, float lambda1,
float lambda2, float beta, float weight_decay, float learning_rate_val, float lr_scale,
const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff,
T* model, T* accumulate, T* z) {
FtrlUpdateGpu<T, G><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0,
stream->As<ep::CudaStream>()->cuda_stream()>>>(
n, scale, l1, l2, lr_power, lambda1, lambda2, beta, weight_decay, learning_rate_val, lr_scale,
learning_rate, scale_by_ptr, skip_if, model_diff, model, accumulate, z);
}
template<typename T>
struct FtrlUpdateKernelUtil<DeviceType::kCUDA, T, float16> {
static void Update(ep::Stream* stream, int64_t n, T scale, float l1, float l2, float lr_power,
float lambda1, float lambda2, float beta, float weight_decay,
float learning_rate_val, float lr_scale, const float* learning_rate,
const T* scale_by_ptr, const int64_t* skip_if, const float16* model_diff,
T* model, T* accumulate, T* z);
};
template<typename T>
void FtrlUpdateKernelUtil<DeviceType::kCUDA, T, float16>::Update(
ep::Stream* stream, int64_t n, T scale, float l1, float l2, float lr_power, float lambda1,
float lambda2, float beta, float weight_decay, float learning_rate_val, float lr_scale,
const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if,
const float16* model_diff, T* model, T* accumulate, T* z) {
FtrlUpdateKernelUtil<DeviceType::kCUDA, T, half>::Update(
stream, n, scale, l1, l2, lr_power, lambda1, lambda2, beta, weight_decay, learning_rate_val,
lr_scale, learning_rate, scale_by_ptr, skip_if, reinterpret_cast<const half*>(model_diff),
model, accumulate, z);
}
template struct FtrlUpdateKernelUtil<DeviceType::kCUDA, float, float>;
template struct FtrlUpdateKernelUtil<DeviceType::kCUDA, double, double>;
template struct FtrlUpdateKernelUtil<DeviceType::kCUDA, float, float16>;
template<typename T, typename G>
__global__ void AdadeltaUpdateGpu(int64_t n, T scale, float l1, float l2, float rho, float epsilon,
bool maximize, float weight_decay, float learning_rate_val,
float lr_scale, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const G* model_diff, T* model,
T* square_avgs, T* acc_deltas) {
if (skip_if != nullptr && *skip_if != 0) { return; }
if (learning_rate != nullptr) { learning_rate_val = *learning_rate; }
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
learning_rate_val *= lr_scale;
CUDA_1D_KERNEL_LOOP(i, n) {
AdadeltaUpdateFunctor<T, G>()(model_diff + i, model + i, square_avgs + i, acc_deltas + i, scale,
l1, l2, rho, epsilon, maximize, weight_decay, learning_rate_val);
}
}
template<typename T, typename G>
struct AdadeltaUpdateKernelUtil<DeviceType::kCUDA, T, G> {
static void Update(ep::Stream* stream, int64_t n, T scale, float l1, float l2, float rho,
float epsilon, bool maximize, float weight_decay, float learning_rate_val,
float lr_scale, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const G* model_diff, T* model, T* square_avgs,
T* acc_deltas);
};
template<typename T, typename G>
void AdadeltaUpdateKernelUtil<DeviceType::kCUDA, T, G>::Update(
ep::Stream* stream, int64_t n, T scale, float l1, float l2, float rho, float epsilon,
bool maximize, float weight_decay, float learning_rate_val, float lr_scale,
const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if, const G* model_diff,
T* model, T* square_avgs, T* acc_deltas) {
AdadeltaUpdateGpu<T, G><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0,
stream->As<ep::CudaStream>()->cuda_stream()>>>(
n, scale, l1, l2, rho, epsilon, maximize, weight_decay, learning_rate_val, lr_scale,
learning_rate, scale_by_ptr, skip_if, model_diff, model, square_avgs, acc_deltas);
}
template<typename T>
struct AdadeltaUpdateKernelUtil<DeviceType::kCUDA, T, float16> {
static void Update(ep::Stream* stream, int64_t n, T scale, float l1, float l2, float rho,
float epsilon, bool maximize, float weight_decay, float learning_rate_val,
float lr_scale, const float* learning_rate, const T* scale_by_ptr,
const int64_t* skip_if, const float16* model_diff, T* model, T* square_avgs,
T* acc_deltas);
};
template<typename T>
void AdadeltaUpdateKernelUtil<DeviceType::kCUDA, T, float16>::Update(
ep::Stream* stream, int64_t n, T scale, float l1, float l2, float rho, float epsilon,
bool maximize, float weight_decay, float learning_rate_val, float lr_scale,
const float* learning_rate, const T* scale_by_ptr, const int64_t* skip_if,
const float16* model_diff, T* model, T* square_avgs, T* acc_deltas) {
AdadeltaUpdateKernelUtil<DeviceType::kCUDA, T, half>::Update(
stream, n, scale, l1, l2, rho, epsilon, maximize, weight_decay, learning_rate_val, lr_scale,
learning_rate, scale_by_ptr, skip_if, reinterpret_cast<const half*>(model_diff), model,
square_avgs, acc_deltas);
}
template struct AdadeltaUpdateKernelUtil<DeviceType::kCUDA, float, float>;
template struct AdadeltaUpdateKernelUtil<DeviceType::kCUDA, double, double>;
template struct AdadeltaUpdateKernelUtil<DeviceType::kCUDA, float, float16>;
} // namespace oneflow
|
32f67ee3bbb3fec854b226ae6ce7bbef77303a48.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// --------------------------------------------------------
// Multitask Network Cascade
// Written by Haozhi Qi
// Copyright (c) 2016, Haozhi Qi
// Licensed under The MIT License [see LICENSE for details]
// --------------------------------------------------------
#include <cfloat>
#include "caffe/fast_rcnn_layers.hpp"
#include <thrust/reduce.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__device__ void bilinear_interpolate(const Dtype* bottom_data, const int height, const int width, Dtype h, Dtype w, Dtype & maxval, Dtype & maxidx_h, Dtype & maxidx_w) {
// deal with cases that inverse elements are out of feature map boundary
if (h < -0.5 || h > height - 0.5 || w < -0.5 || w > width - 0.5) {
//empty
return;
}
if (h <= 0) h = 0;
if (w <= 0) w = 0;
int h_low = (int) h;
int w_low = (int) w;
int h_high;
int w_high;
if (h_low >= height - 1) {
h_high = h_low = height - 1;
h = (Dtype) h_low;
} else {
h_high = h_low + 1;
}
if (w_low >= width - 1) {
w_high = w_low = width - 1;
w = (Dtype) w_low;
} else {
w_high = w_low + 1;
}
Dtype lh = h - h_low;
Dtype lw = w - w_low;
Dtype hh = 1 - lh, hw = 1 - lw;
// do bilinear interpolation
Dtype v1 = bottom_data[h_low * width + w_low];
Dtype v2 = bottom_data[h_low * width + w_high];
Dtype v3 = bottom_data[h_high * width + w_low];
Dtype v4 = bottom_data[h_high * width + w_high];
Dtype w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
Dtype val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
if (val > maxval) {
maxval = val;
maxidx_h = h;
maxidx_w = w;
}
}
template <typename Dtype>
__global__ void ROIWarpingForward(const int nthreads, const Dtype* bottom_data,
const Dtype spatial_scale, const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, const Dtype* bottom_rois,
Dtype* top_data, Dtype* argmax_data_h, Dtype* argmax_data_w) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_level = bottom_rois[0];
Dtype roi_start_w = round(bottom_rois[1] * spatial_scale);
Dtype roi_start_h = round(bottom_rois[2] * spatial_scale);
Dtype roi_end_w = round(bottom_rois[3] * spatial_scale);
Dtype roi_end_h = round(bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
Dtype roi_width = max(roi_end_w - roi_start_w, (Dtype)0.);
Dtype roi_height = max(roi_end_h - roi_start_h, (Dtype)0.);
Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width);
// Define an empty pooling region to be zero
Dtype maxval = -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backpropgated
Dtype maxidx_h = -1;
Dtype maxidx_w = -1;
bottom_data += (roi_level * channels + c) * height * width;
Dtype ih = roi_start_h + static_cast<Dtype>(ph) * bin_size_h;
Dtype iw = roi_start_w + static_cast<Dtype>(pw) * bin_size_w;
bilinear_interpolate(bottom_data, height, width, ih, iw, maxval, maxidx_h, maxidx_w);
if (maxidx_h == -1 && maxidx_w == -1) maxval = 0;
top_data[index] = maxval;
argmax_data_h[index] = maxidx_h;
argmax_data_w[index] = maxidx_w;
}
}
template <typename Dtype>
void ROIWarpingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_rois = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
Dtype* argmax_data_h = max_idx_h_.mutable_gpu_data();
Dtype* argmax_data_w = max_idx_w_.mutable_gpu_data();
int count = top[0]->count();
ROIWarpingForward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >
(count, bottom_data, spatial_scale_, channels_, height_, width_, pooled_height_,
pooled_width_, bottom_rois, top_data, argmax_data_h, argmax_data_w);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__device__ Dtype get_feature_gradient(Dtype argmax_h, Dtype argmax_w, const int h, const int w, const int height, const int width)
{
if (argmax_h < -0.5 || argmax_h >(height - 0.5) || argmax_w < -0.5 || argmax_w >(width - 0.5))
{
//empty
return 0;
}
if (argmax_h < 0) argmax_h = 0;
if (argmax_w < 0) argmax_w = 0;
int argmax_h_low = (int)argmax_h;
int argmax_w_low = (int)argmax_w;
int argmax_h_high;
int argmax_w_high;
if (argmax_h_low >= height - 1) {
argmax_h_high = argmax_h_low = height - 1;
argmax_h = (Dtype)argmax_h_low;
}
else
argmax_h_high = argmax_h_low + 1;
if (argmax_w_low >= width - 1) {
argmax_w_high = argmax_w_low = width - 1;
argmax_w = (Dtype)argmax_w_low;
}
else
argmax_w_high = argmax_w_low + 1;
Dtype weight = 0;
if (h == argmax_h_low) {
if (w == argmax_w_low) {
weight = (h + 1 - argmax_h) * (w + 1 - argmax_w);
}
else if (w == argmax_w_high) {
weight = (h + 1 - argmax_h) * (argmax_w + 1 - w);
}
}
else if (h == argmax_h_high) {
if (w == argmax_w_low) {
weight = (argmax_h + 1 - h) * (w + 1 - argmax_w);
}
else if (w == argmax_w_high) {
weight = (argmax_h + 1 - h) * (argmax_w + 1 - w);
}
}
return weight;
}
template <typename Dtype>
__global__ void ROIWarpingBackwardFeature(const int nthreads, const Dtype* top_diff,
const Dtype* argmax_data_h, const Dtype* argmax_data_w, const int num_rois, const Dtype spatial_scale, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, Dtype* bottom_diff, const Dtype* bottom_rois) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, h, w) coords in bottom data
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
Dtype gradient = 0;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5;
int roi_level = offset_bottom_rois[0];
// Skip if ROI's level doesn't match n
if (n != roi_level) {
continue;
}
Dtype roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
Dtype roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
Dtype roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
Dtype roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Skip if ROI doesn't include (h, w)
const bool in_roi = (w >= floor(roi_start_w) && w <= ceil(roi_end_w) &&
h >= floor(roi_start_h) && h <= ceil(roi_end_h));
if (!in_roi) {
continue;
}
int offset = (roi_n * channels + c) * pooled_height * pooled_width;
const Dtype* offset_top_diff = top_diff + offset;
const Dtype* offset_argmax_data_h = argmax_data_h + offset;
const Dtype* offset_argmax_data_w = argmax_data_w + offset;
// Compute feasible set of pooled units that could have pooled
// this bottom unit
// Force malformed ROIs to be 1x1
Dtype roi_width = max(roi_end_w - roi_start_w+(Dtype)1.0, (Dtype)1.0);
Dtype roi_height = max(roi_end_h - roi_start_h+(Dtype)1.0, (Dtype)1.0);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int phstart = floor(static_cast<Dtype>(h - roi_start_h - 1) / bin_size_h - 1);
int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h);
int pwstart = floor(static_cast<Dtype>(w - roi_start_w - 1) / bin_size_w - 1);
int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w);
phstart = min(max(phstart, 0), pooled_height);
phend = min(max(phend, 0), pooled_height);
pwstart = min(max(pwstart, 0), pooled_width);
pwend = min(max(pwend, 0), pooled_width);
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
Dtype weight = get_feature_gradient(offset_argmax_data_h[ph * pooled_width + pw],
offset_argmax_data_w[ph * pooled_width + pw], h, w, height, width);
gradient += weight * offset_top_diff[ph * pooled_width + pw];
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
__device__ Dtype get_coordinate_gradient(int coordinate_index, Dtype h, Dtype w,
const Dtype* offset_bottom_data, const Dtype oh, const Dtype ow, const int height, const int width,
const int pooled_height, const int pooled_width) {
int arg_interpolate_h = (int) h;
int arg_interpolate_w = (int) w;
if (arg_interpolate_h + 1 > height - 1 || arg_interpolate_w + 1 > width - 1) {
return 0;
}
Dtype map_ratio_h = static_cast<Dtype>(oh) / static_cast<Dtype>(pooled_height);
Dtype map_ratio_w = static_cast<Dtype>(ow) / static_cast<Dtype>(pooled_width);
Dtype weight = 0;
int corner_ind_1 = arg_interpolate_h * width + arg_interpolate_w;
int corner_ind_2 = arg_interpolate_h * width + (arg_interpolate_w + 1);
int corner_ind_3 = (arg_interpolate_h + 1) * width + arg_interpolate_w;
int corner_ind_4 = (arg_interpolate_h + 1) * width + (arg_interpolate_w + 1);
Dtype dxc = 0.0, dyc = 0.0, dw = 0.0, dh = 0.0;
dxc += (-1.0 * (1.0 - h + arg_interpolate_h) * offset_bottom_data[corner_ind_1]);
dxc += ( 1.0 * (1.0 - h + arg_interpolate_h) * offset_bottom_data[corner_ind_2]);
dxc += (-1.0 * (h - arg_interpolate_h) * offset_bottom_data[corner_ind_3]);
dxc += ( 1.0 * (h - arg_interpolate_h) * offset_bottom_data[corner_ind_4]);
dyc += (-1.0 * (1.0 - w + arg_interpolate_w) * offset_bottom_data[corner_ind_1]);
dyc += (-1.0 * (w - arg_interpolate_w) * offset_bottom_data[corner_ind_2]);
dyc += ( 1.0 * (1.0 - w + arg_interpolate_w) * offset_bottom_data[corner_ind_3]);
dyc += ( 1.0 * (w - arg_interpolate_w) * offset_bottom_data[corner_ind_4]);
dw += ((0.5 - map_ratio_w) * (1.0 - h + arg_interpolate_h) * offset_bottom_data[corner_ind_1]);
dw += ((-0.5+map_ratio_w) * (1.0 - h + arg_interpolate_h) * offset_bottom_data[corner_ind_2]);
dw += ((0.5- map_ratio_w) * (h - arg_interpolate_h) * offset_bottom_data[corner_ind_3]);
dw += ( (-0.5+map_ratio_w) * (h - arg_interpolate_h) * offset_bottom_data[corner_ind_4]);
dh += ((0.5-map_ratio_h) * (1.0 - w + arg_interpolate_w) * offset_bottom_data[corner_ind_1]);
dh += ((0.5- map_ratio_h) * ( w - arg_interpolate_w) * offset_bottom_data[corner_ind_2]);
dh += ( (-0.5+map_ratio_h) * (1.0 - w + arg_interpolate_w) * offset_bottom_data[corner_ind_3]);
dh += ( (-0.5+map_ratio_h) * ( w - arg_interpolate_w) * offset_bottom_data[corner_ind_4]);
if (coordinate_index == 1) {
// \par f / \par x1
weight = 0.5 * dxc - dw;
} else if (coordinate_index == 2) {
// \par f / \par y1
weight = 0.5 * dyc - dh;
} else if (coordinate_index == 3) {
// \par f / \par x2
weight = 0.5 * dxc + dw;
} else if (coordinate_index == 4) {
// \par f / \par y2
weight = 0.5 * dyc + dh;
}
return weight;
}
template <typename Dtype>
__global__ void ROIWarpingBackwardCoordinate(const int nthreads, const int pooled_width, const int pooled_height,
const int width, const int height, const int channels, const Dtype spatial_scale, const Dtype* bottom_rois, const Dtype* bottom_data,
const Dtype* argmax_data_h, const Dtype* argmax_data_w, const Dtype* top_diff, Dtype* buffer_data) {
// index is arranged as (roi_n * 5, c, w, h)
// each element in buffer_data represents the derivative of output feature
// map to certain coordinate
// coordinate_index == 0: to batch index (will always be 0)
// coordinate_index == 1: to xc (x-center of ROI)
// coordinate_index == 2: to yc (y-center of ROI)
// coordinate_index == 3: to w (width of ROI)
// coordinate_index == 4: to h (height of ROI)
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = (index / pooled_width / pooled_height / channels);
int roi_n = n / 5;
int coordinate_index = n % 5;
Dtype gradient = 0.0;
if (coordinate_index == 0) {
buffer_data[index] = gradient;
}
const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5;
int roi_batch_ind = offset_bottom_rois[0];
int roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
int roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
int roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
int roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width);
assert(roi_start_h <= roi_end_h);
assert(roi_start_w <= roi_end_w);
const Dtype* offset_bottom_data = bottom_data + ((roi_batch_ind * channels + c) * height * width);
int offset = (((roi_n * channels + c) * pooled_height + ph) * pooled_width) + pw;
// arg max coordinate when forward
Dtype ih = argmax_data_h[offset];
Dtype iw = argmax_data_w[offset];
// since we compute the max value over a set of elements during forward
// so we re-compute the output element according to argmax_data
// (similar for iw)
const Dtype output_h = (ih - roi_start_h) / bin_size_h;
const Dtype output_w = (iw - roi_start_w) / bin_size_w;
Dtype weight = spatial_scale * get_coordinate_gradient(coordinate_index, ih, iw, offset_bottom_data, output_h, output_w, height, width, pooled_height, pooled_width);
buffer_data[index] = weight * top_diff[offset];
}
}
// used for thrust::reduce_by_key as key struct
// https://thrust.github.io/doc/group__reductions.html for more detail
template <typename T>
struct linear_index_to_row_index : public thrust::unary_function<T,T>
{
T C; // number of columns
__host__ __device__
linear_index_to_row_index(T C) : C(C) {}
__host__ __device__
T operator()(T i) {
return i / C;
}
};
template <typename Dtype>
void ROIWarpingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_rois = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
Dtype* argmax_data_h = max_idx_h_.mutable_gpu_data();
Dtype* argmax_data_w = max_idx_w_.mutable_gpu_data();
const Dtype* top_data = top[0]->gpu_data();
// backpropgation to feature map
if (propagate_down[0]) {
hipLaunchKernelGGL(( ROIWarpingBackwardFeature<Dtype>) , dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS) , 0, 0,
count, top_diff, argmax_data_h, argmax_data_w, top[0]->num(), spatial_scale_, channels_,
height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois);
}
Dtype* bottom_rois_diff = bottom[1]->mutable_gpu_diff();
count = bottom[1]->count();
caffe_gpu_set(count, Dtype(0.), bottom_rois_diff);
// backpropgation to coordinate
// note: for each ROI, every element of the output feature map has derivative on its coordinate
// but it will be very slow if we aggregate all the gradient inside CUDA kernel
// therefore we pre-computed the dirivative of coordinate for each output element (stored in buffer_)
// and then use thrust reduce_by_key to get summation of this values
if (propagate_down[1]) {
Dtype* buffer_data = buffer_.mutable_gpu_diff();
const int buffer_count = buffer_.count();
caffe_gpu_set(buffer_count, Dtype(0.), buffer_data);
hipLaunchKernelGGL(( ROIWarpingBackwardCoordinate<Dtype>), dim3(CAFFE_GET_BLOCKS(buffer_count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
buffer_count, pooled_width_, pooled_height_, width_, height_, channels_, spatial_scale_, bottom_rois, bottom_data,
argmax_data_h, argmax_data_w, top_diff, buffer_data);
// this is a standard practice for thrush::reduce_by_key
// you may refer https://github.com/thrust/thrust/blob/master/examples/sum_rows.cu for more detail
int R = bottom[1]->num() * 5;
int C = channels_ * pooled_height_ * pooled_width_;
thrust::device_vector<Dtype> array(R*C);
thrust::copy(buffer_data, buffer_data+buffer_count, array.begin());
thrust::device_vector<Dtype> row_sums(R);
thrust::device_vector<int> row_indices(R);
thrust::reduce_by_key(
thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(C)),
thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(C)) + (R*C),
array.begin(),
row_indices.begin(),
row_sums.begin(),
thrust::equal_to<int>(),
thrust::plus<Dtype>());
// copy back the result value to Caffe's blob
thrust::copy(row_sums.begin(), row_sums.end(), bottom_rois_diff);
}
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(ROIWarpingLayer);
} // namespace caffe
| 32f67ee3bbb3fec854b226ae6ce7bbef77303a48.cu | // --------------------------------------------------------
// Multitask Network Cascade
// Written by Haozhi Qi
// Copyright (c) 2016, Haozhi Qi
// Licensed under The MIT License [see LICENSE for details]
// --------------------------------------------------------
#include <cfloat>
#include "caffe/fast_rcnn_layers.hpp"
#include <thrust/reduce.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__device__ void bilinear_interpolate(const Dtype* bottom_data, const int height, const int width, Dtype h, Dtype w, Dtype & maxval, Dtype & maxidx_h, Dtype & maxidx_w) {
// deal with cases that inverse elements are out of feature map boundary
if (h < -0.5 || h > height - 0.5 || w < -0.5 || w > width - 0.5) {
//empty
return;
}
if (h <= 0) h = 0;
if (w <= 0) w = 0;
int h_low = (int) h;
int w_low = (int) w;
int h_high;
int w_high;
if (h_low >= height - 1) {
h_high = h_low = height - 1;
h = (Dtype) h_low;
} else {
h_high = h_low + 1;
}
if (w_low >= width - 1) {
w_high = w_low = width - 1;
w = (Dtype) w_low;
} else {
w_high = w_low + 1;
}
Dtype lh = h - h_low;
Dtype lw = w - w_low;
Dtype hh = 1 - lh, hw = 1 - lw;
// do bilinear interpolation
Dtype v1 = bottom_data[h_low * width + w_low];
Dtype v2 = bottom_data[h_low * width + w_high];
Dtype v3 = bottom_data[h_high * width + w_low];
Dtype v4 = bottom_data[h_high * width + w_high];
Dtype w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
Dtype val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
if (val > maxval) {
maxval = val;
maxidx_h = h;
maxidx_w = w;
}
}
template <typename Dtype>
__global__ void ROIWarpingForward(const int nthreads, const Dtype* bottom_data,
const Dtype spatial_scale, const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, const Dtype* bottom_rois,
Dtype* top_data, Dtype* argmax_data_h, Dtype* argmax_data_w) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_level = bottom_rois[0];
Dtype roi_start_w = round(bottom_rois[1] * spatial_scale);
Dtype roi_start_h = round(bottom_rois[2] * spatial_scale);
Dtype roi_end_w = round(bottom_rois[3] * spatial_scale);
Dtype roi_end_h = round(bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
Dtype roi_width = max(roi_end_w - roi_start_w, (Dtype)0.);
Dtype roi_height = max(roi_end_h - roi_start_h, (Dtype)0.);
Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width);
// Define an empty pooling region to be zero
Dtype maxval = -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backpropgated
Dtype maxidx_h = -1;
Dtype maxidx_w = -1;
bottom_data += (roi_level * channels + c) * height * width;
Dtype ih = roi_start_h + static_cast<Dtype>(ph) * bin_size_h;
Dtype iw = roi_start_w + static_cast<Dtype>(pw) * bin_size_w;
bilinear_interpolate(bottom_data, height, width, ih, iw, maxval, maxidx_h, maxidx_w);
if (maxidx_h == -1 && maxidx_w == -1) maxval = 0;
top_data[index] = maxval;
argmax_data_h[index] = maxidx_h;
argmax_data_w[index] = maxidx_w;
}
}
template <typename Dtype>
void ROIWarpingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_rois = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
Dtype* argmax_data_h = max_idx_h_.mutable_gpu_data();
Dtype* argmax_data_w = max_idx_w_.mutable_gpu_data();
int count = top[0]->count();
ROIWarpingForward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >
(count, bottom_data, spatial_scale_, channels_, height_, width_, pooled_height_,
pooled_width_, bottom_rois, top_data, argmax_data_h, argmax_data_w);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__device__ Dtype get_feature_gradient(Dtype argmax_h, Dtype argmax_w, const int h, const int w, const int height, const int width)
{
if (argmax_h < -0.5 || argmax_h >(height - 0.5) || argmax_w < -0.5 || argmax_w >(width - 0.5))
{
//empty
return 0;
}
if (argmax_h < 0) argmax_h = 0;
if (argmax_w < 0) argmax_w = 0;
int argmax_h_low = (int)argmax_h;
int argmax_w_low = (int)argmax_w;
int argmax_h_high;
int argmax_w_high;
if (argmax_h_low >= height - 1) {
argmax_h_high = argmax_h_low = height - 1;
argmax_h = (Dtype)argmax_h_low;
}
else
argmax_h_high = argmax_h_low + 1;
if (argmax_w_low >= width - 1) {
argmax_w_high = argmax_w_low = width - 1;
argmax_w = (Dtype)argmax_w_low;
}
else
argmax_w_high = argmax_w_low + 1;
Dtype weight = 0;
if (h == argmax_h_low) {
if (w == argmax_w_low) {
weight = (h + 1 - argmax_h) * (w + 1 - argmax_w);
}
else if (w == argmax_w_high) {
weight = (h + 1 - argmax_h) * (argmax_w + 1 - w);
}
}
else if (h == argmax_h_high) {
if (w == argmax_w_low) {
weight = (argmax_h + 1 - h) * (w + 1 - argmax_w);
}
else if (w == argmax_w_high) {
weight = (argmax_h + 1 - h) * (argmax_w + 1 - w);
}
}
return weight;
}
template <typename Dtype>
__global__ void ROIWarpingBackwardFeature(const int nthreads, const Dtype* top_diff,
const Dtype* argmax_data_h, const Dtype* argmax_data_w, const int num_rois, const Dtype spatial_scale, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, Dtype* bottom_diff, const Dtype* bottom_rois) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, h, w) coords in bottom data
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
Dtype gradient = 0;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5;
int roi_level = offset_bottom_rois[0];
// Skip if ROI's level doesn't match n
if (n != roi_level) {
continue;
}
Dtype roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
Dtype roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
Dtype roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
Dtype roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Skip if ROI doesn't include (h, w)
const bool in_roi = (w >= floor(roi_start_w) && w <= ceil(roi_end_w) &&
h >= floor(roi_start_h) && h <= ceil(roi_end_h));
if (!in_roi) {
continue;
}
int offset = (roi_n * channels + c) * pooled_height * pooled_width;
const Dtype* offset_top_diff = top_diff + offset;
const Dtype* offset_argmax_data_h = argmax_data_h + offset;
const Dtype* offset_argmax_data_w = argmax_data_w + offset;
// Compute feasible set of pooled units that could have pooled
// this bottom unit
// Force malformed ROIs to be 1x1
Dtype roi_width = max(roi_end_w - roi_start_w+(Dtype)1.0, (Dtype)1.0);
Dtype roi_height = max(roi_end_h - roi_start_h+(Dtype)1.0, (Dtype)1.0);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int phstart = floor(static_cast<Dtype>(h - roi_start_h - 1) / bin_size_h - 1);
int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h);
int pwstart = floor(static_cast<Dtype>(w - roi_start_w - 1) / bin_size_w - 1);
int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w);
phstart = min(max(phstart, 0), pooled_height);
phend = min(max(phend, 0), pooled_height);
pwstart = min(max(pwstart, 0), pooled_width);
pwend = min(max(pwend, 0), pooled_width);
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
Dtype weight = get_feature_gradient(offset_argmax_data_h[ph * pooled_width + pw],
offset_argmax_data_w[ph * pooled_width + pw], h, w, height, width);
gradient += weight * offset_top_diff[ph * pooled_width + pw];
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
__device__ Dtype get_coordinate_gradient(int coordinate_index, Dtype h, Dtype w,
const Dtype* offset_bottom_data, const Dtype oh, const Dtype ow, const int height, const int width,
const int pooled_height, const int pooled_width) {
int arg_interpolate_h = (int) h;
int arg_interpolate_w = (int) w;
if (arg_interpolate_h + 1 > height - 1 || arg_interpolate_w + 1 > width - 1) {
return 0;
}
Dtype map_ratio_h = static_cast<Dtype>(oh) / static_cast<Dtype>(pooled_height);
Dtype map_ratio_w = static_cast<Dtype>(ow) / static_cast<Dtype>(pooled_width);
Dtype weight = 0;
int corner_ind_1 = arg_interpolate_h * width + arg_interpolate_w;
int corner_ind_2 = arg_interpolate_h * width + (arg_interpolate_w + 1);
int corner_ind_3 = (arg_interpolate_h + 1) * width + arg_interpolate_w;
int corner_ind_4 = (arg_interpolate_h + 1) * width + (arg_interpolate_w + 1);
Dtype dxc = 0.0, dyc = 0.0, dw = 0.0, dh = 0.0;
dxc += (-1.0 * (1.0 - h + arg_interpolate_h) * offset_bottom_data[corner_ind_1]);
dxc += ( 1.0 * (1.0 - h + arg_interpolate_h) * offset_bottom_data[corner_ind_2]);
dxc += (-1.0 * (h - arg_interpolate_h) * offset_bottom_data[corner_ind_3]);
dxc += ( 1.0 * (h - arg_interpolate_h) * offset_bottom_data[corner_ind_4]);
dyc += (-1.0 * (1.0 - w + arg_interpolate_w) * offset_bottom_data[corner_ind_1]);
dyc += (-1.0 * (w - arg_interpolate_w) * offset_bottom_data[corner_ind_2]);
dyc += ( 1.0 * (1.0 - w + arg_interpolate_w) * offset_bottom_data[corner_ind_3]);
dyc += ( 1.0 * (w - arg_interpolate_w) * offset_bottom_data[corner_ind_4]);
dw += ((0.5 - map_ratio_w) * (1.0 - h + arg_interpolate_h) * offset_bottom_data[corner_ind_1]);
dw += ((-0.5+map_ratio_w) * (1.0 - h + arg_interpolate_h) * offset_bottom_data[corner_ind_2]);
dw += ((0.5- map_ratio_w) * (h - arg_interpolate_h) * offset_bottom_data[corner_ind_3]);
dw += ( (-0.5+map_ratio_w) * (h - arg_interpolate_h) * offset_bottom_data[corner_ind_4]);
dh += ((0.5-map_ratio_h) * (1.0 - w + arg_interpolate_w) * offset_bottom_data[corner_ind_1]);
dh += ((0.5- map_ratio_h) * ( w - arg_interpolate_w) * offset_bottom_data[corner_ind_2]);
dh += ( (-0.5+map_ratio_h) * (1.0 - w + arg_interpolate_w) * offset_bottom_data[corner_ind_3]);
dh += ( (-0.5+map_ratio_h) * ( w - arg_interpolate_w) * offset_bottom_data[corner_ind_4]);
if (coordinate_index == 1) {
// \par f / \par x1
weight = 0.5 * dxc - dw;
} else if (coordinate_index == 2) {
// \par f / \par y1
weight = 0.5 * dyc - dh;
} else if (coordinate_index == 3) {
// \par f / \par x2
weight = 0.5 * dxc + dw;
} else if (coordinate_index == 4) {
// \par f / \par y2
weight = 0.5 * dyc + dh;
}
return weight;
}
template <typename Dtype>
__global__ void ROIWarpingBackwardCoordinate(const int nthreads, const int pooled_width, const int pooled_height,
const int width, const int height, const int channels, const Dtype spatial_scale, const Dtype* bottom_rois, const Dtype* bottom_data,
const Dtype* argmax_data_h, const Dtype* argmax_data_w, const Dtype* top_diff, Dtype* buffer_data) {
// index is arranged as (roi_n * 5, c, w, h)
// each element in buffer_data represents the derivative of output feature
// map to certain coordinate
// coordinate_index == 0: to batch index (will always be 0)
// coordinate_index == 1: to xc (x-center of ROI)
// coordinate_index == 2: to yc (y-center of ROI)
// coordinate_index == 3: to w (width of ROI)
// coordinate_index == 4: to h (height of ROI)
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = (index / pooled_width / pooled_height / channels);
int roi_n = n / 5;
int coordinate_index = n % 5;
Dtype gradient = 0.0;
if (coordinate_index == 0) {
buffer_data[index] = gradient;
}
const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5;
int roi_batch_ind = offset_bottom_rois[0];
int roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
int roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
int roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
int roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width);
assert(roi_start_h <= roi_end_h);
assert(roi_start_w <= roi_end_w);
const Dtype* offset_bottom_data = bottom_data + ((roi_batch_ind * channels + c) * height * width);
int offset = (((roi_n * channels + c) * pooled_height + ph) * pooled_width) + pw;
// arg max coordinate when forward
Dtype ih = argmax_data_h[offset];
Dtype iw = argmax_data_w[offset];
// since we compute the max value over a set of elements during forward
// so we re-compute the output element according to argmax_data
// (similar for iw)
const Dtype output_h = (ih - roi_start_h) / bin_size_h;
const Dtype output_w = (iw - roi_start_w) / bin_size_w;
Dtype weight = spatial_scale * get_coordinate_gradient(coordinate_index, ih, iw, offset_bottom_data, output_h, output_w, height, width, pooled_height, pooled_width);
buffer_data[index] = weight * top_diff[offset];
}
}
// used for thrust::reduce_by_key as key struct
// https://thrust.github.io/doc/group__reductions.html for more detail
template <typename T>
struct linear_index_to_row_index : public thrust::unary_function<T,T>
{
T C; // number of columns
__host__ __device__
linear_index_to_row_index(T C) : C(C) {}
__host__ __device__
T operator()(T i) {
return i / C;
}
};
template <typename Dtype>
void ROIWarpingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_rois = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
Dtype* argmax_data_h = max_idx_h_.mutable_gpu_data();
Dtype* argmax_data_w = max_idx_w_.mutable_gpu_data();
const Dtype* top_data = top[0]->gpu_data();
// backpropgation to feature map
if (propagate_down[0]) {
ROIWarpingBackwardFeature<Dtype> <<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >>>
(count, top_diff, argmax_data_h, argmax_data_w, top[0]->num(), spatial_scale_, channels_,
height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois);
}
Dtype* bottom_rois_diff = bottom[1]->mutable_gpu_diff();
count = bottom[1]->count();
caffe_gpu_set(count, Dtype(0.), bottom_rois_diff);
// backpropgation to coordinate
// note: for each ROI, every element of the output feature map has derivative on its coordinate
// but it will be very slow if we aggregate all the gradient inside CUDA kernel
// therefore we pre-computed the dirivative of coordinate for each output element (stored in buffer_)
// and then use thrust reduce_by_key to get summation of this values
if (propagate_down[1]) {
Dtype* buffer_data = buffer_.mutable_gpu_diff();
const int buffer_count = buffer_.count();
caffe_gpu_set(buffer_count, Dtype(0.), buffer_data);
ROIWarpingBackwardCoordinate<Dtype><<<CAFFE_GET_BLOCKS(buffer_count), CAFFE_CUDA_NUM_THREADS>>>(
buffer_count, pooled_width_, pooled_height_, width_, height_, channels_, spatial_scale_, bottom_rois, bottom_data,
argmax_data_h, argmax_data_w, top_diff, buffer_data);
// this is a standard practice for thrush::reduce_by_key
// you may refer https://github.com/thrust/thrust/blob/master/examples/sum_rows.cu for more detail
int R = bottom[1]->num() * 5;
int C = channels_ * pooled_height_ * pooled_width_;
thrust::device_vector<Dtype> array(R*C);
thrust::copy(buffer_data, buffer_data+buffer_count, array.begin());
thrust::device_vector<Dtype> row_sums(R);
thrust::device_vector<int> row_indices(R);
thrust::reduce_by_key(
thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(C)),
thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(C)) + (R*C),
array.begin(),
row_indices.begin(),
row_sums.begin(),
thrust::equal_to<int>(),
thrust::plus<Dtype>());
// copy back the result value to Caffe's blob
thrust::copy(row_sums.begin(), row_sums.end(), bottom_rois_diff);
}
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(ROIWarpingLayer);
} // namespace caffe
|
5436fd2447aa80ef32bb0f216e0ae076f2c89654.hip | // !!! This is a file automatically generated by hipify!!!
// Includes
#include <stdio.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include "common.h"
// Variables
float* h_A;
float* h_B;
float* h_C;
float* h_D;
float* h_E;
float* d_A;
float* d_B;
float* d_C;
float* d_D;
float* d_E;
// Functions
void Cleanup(void);
void RandomInit(float*, int);
// Device code
__global__ void VecAdd(const float* A, const float* B, float* C, float* D, float* E)
{
int i = threadIdx.x;
C[i] = A[i] + B[i];
E[i] = A[i] + B[i] * D[i];
}
// Host code
int main(int argc, char** argv)
{
printf("Simple vector addition\n");
int N = 256;
size_t size = N * sizeof(float);
// Allocate input vectors h_A, h_B and h_C in host memory
h_A = (float*)malloc(size);
if (h_A == 0) Cleanup();
h_B = (float*)malloc(size);
if (h_B == 0) Cleanup();
h_C = (float*)malloc(size);
if (h_C == 0) Cleanup();
h_D = (float*)malloc(size);
if (h_D == 0) Cleanup();
h_E = (float*)malloc(size);
if (h_E == 0) Cleanup();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
RandomInit(h_D, N);
// Allocate vectors in device memory
CUDA_SAFE_CALL( hipMalloc((void**)&d_A, size) );
CUDA_SAFE_CALL( hipMalloc((void**)&d_B, size) );
CUDA_SAFE_CALL( hipMalloc((void**)&d_C, size) );
CUDA_SAFE_CALL( hipMalloc((void**)&d_D, size) );
CUDA_SAFE_CALL( hipMalloc((void**)&d_E, size) );
// Copy vectors from host memory to device memory
CUDA_SAFE_CALL( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) );
CUDA_SAFE_CALL( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) );
CUDA_SAFE_CALL( hipMemcpy(d_D, h_D, size, hipMemcpyHostToDevice) );
// Invoke kernel
hipLaunchKernelGGL(( VecAdd), dim3(1), dim3(N), 0, 0, d_A, d_B, d_C, d_D, d_E);
#ifdef _DEBUG
CUDA_SAFE_CALL( hipDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
CUDA_SAFE_CALL( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) );
CUDA_SAFE_CALL( hipMemcpy(h_E, d_E, size, hipMemcpyDeviceToHost) );
// Verify result
int i = 0;
for (i = 0; i < N; ++i)
{
float sum = h_A[i] + h_B[i];
printf("%f + %f = %f\n", h_A[i], h_B[i], h_C[i]);
if (fabs(h_C[i] - sum) > 1e-5)
break;
}
printf("%s \n", (i == N) ? "PASSED" : "FAILED");
// TODO: Print out E and verify the result.
for (i = 0; i < N; ++i)
{
float sum = h_A[i] + h_B[i] * h_D[i];
printf("%f + %f * %f = %f\n", h_A[i], h_B[i], h_D[i], h_E[i]);
if (fabs(h_E[i] - sum) > 1e-5)
break;
}
printf("%s \n", (i == N) ? "PASSED" : "FAILED");
Cleanup();
}
void Cleanup(void)
{
// Free device memory
if (d_A)
hipFree(d_A);
if (d_B)
hipFree(d_B);
if (d_C)
hipFree(d_C);
// TODO: Free device memory of D and E.
if (d_D)
hipFree(d_B);
if (d_E)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
// TODO: Free host memory of D and E.
if (h_D)
free(h_D);
if (h_E)
free(h_E);
CUDA_SAFE_CALL( hipDeviceReset() );
printf("\nPress ENTER to exit...\n");
fflush( stdout);
fflush( stderr);
getchar();
exit(0);
}
// Allocates an array with random float entries.
void RandomInit(float* data, int n)
{
for (int i = 0; i < n; ++i)
data[i] = rand() / (float)RAND_MAX;
}
| 5436fd2447aa80ef32bb0f216e0ae076f2c89654.cu |
// Includes
#include <stdio.h>
#include <math.h>
#include <cuda.h>
#include "common.h"
// Variables
float* h_A;
float* h_B;
float* h_C;
float* h_D;
float* h_E;
float* d_A;
float* d_B;
float* d_C;
float* d_D;
float* d_E;
// Functions
void Cleanup(void);
void RandomInit(float*, int);
// Device code
__global__ void VecAdd(const float* A, const float* B, float* C, float* D, float* E)
{
int i = threadIdx.x;
C[i] = A[i] + B[i];
E[i] = A[i] + B[i] * D[i];
}
// Host code
int main(int argc, char** argv)
{
printf("Simple vector addition\n");
int N = 256;
size_t size = N * sizeof(float);
// Allocate input vectors h_A, h_B and h_C in host memory
h_A = (float*)malloc(size);
if (h_A == 0) Cleanup();
h_B = (float*)malloc(size);
if (h_B == 0) Cleanup();
h_C = (float*)malloc(size);
if (h_C == 0) Cleanup();
h_D = (float*)malloc(size);
if (h_D == 0) Cleanup();
h_E = (float*)malloc(size);
if (h_E == 0) Cleanup();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
RandomInit(h_D, N);
// Allocate vectors in device memory
CUDA_SAFE_CALL( cudaMalloc((void**)&d_A, size) );
CUDA_SAFE_CALL( cudaMalloc((void**)&d_B, size) );
CUDA_SAFE_CALL( cudaMalloc((void**)&d_C, size) );
CUDA_SAFE_CALL( cudaMalloc((void**)&d_D, size) );
CUDA_SAFE_CALL( cudaMalloc((void**)&d_E, size) );
// Copy vectors from host memory to device memory
CUDA_SAFE_CALL( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
CUDA_SAFE_CALL( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) );
CUDA_SAFE_CALL( cudaMemcpy(d_D, h_D, size, cudaMemcpyHostToDevice) );
// Invoke kernel
VecAdd<<<1, N>>>(d_A, d_B, d_C, d_D, d_E);
#ifdef _DEBUG
CUDA_SAFE_CALL( cudaThreadSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
CUDA_SAFE_CALL( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
CUDA_SAFE_CALL( cudaMemcpy(h_E, d_E, size, cudaMemcpyDeviceToHost) );
// Verify result
int i = 0;
for (i = 0; i < N; ++i)
{
float sum = h_A[i] + h_B[i];
printf("%f + %f = %f\n", h_A[i], h_B[i], h_C[i]);
if (fabs(h_C[i] - sum) > 1e-5)
break;
}
printf("%s \n", (i == N) ? "PASSED" : "FAILED");
// TODO: Print out E and verify the result.
for (i = 0; i < N; ++i)
{
float sum = h_A[i] + h_B[i] * h_D[i];
printf("%f + %f * %f = %f\n", h_A[i], h_B[i], h_D[i], h_E[i]);
if (fabs(h_E[i] - sum) > 1e-5)
break;
}
printf("%s \n", (i == N) ? "PASSED" : "FAILED");
Cleanup();
}
void Cleanup(void)
{
// Free device memory
if (d_A)
cudaFree(d_A);
if (d_B)
cudaFree(d_B);
if (d_C)
cudaFree(d_C);
// TODO: Free device memory of D and E.
if (d_D)
cudaFree(d_B);
if (d_E)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
// TODO: Free host memory of D and E.
if (h_D)
free(h_D);
if (h_E)
free(h_E);
CUDA_SAFE_CALL( cudaThreadExit() );
printf("\nPress ENTER to exit...\n");
fflush( stdout);
fflush( stderr);
getchar();
exit(0);
}
// Allocates an array with random float entries.
void RandomInit(float* data, int n)
{
for (int i = 0; i < n; ++i)
data[i] = rand() / (float)RAND_MAX;
}
|
051a034b8396595c9c02fb92bd75c2b14a0617a4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CudaBsrMatrix.h"
#include "CudaDiagBlockMatrix.h"
typedef CudaBsrMatrix::Range Range;
__global__ void CudaBsrMatrix_set(int n, float* ptr, float val)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < n)
ptr[i] = val;
}
__global__ void CudaBsrMatrix_scale_add(int n, float* ptr, float alpha, float beta)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < n)
ptr[i] = alpha * ptr[i] + beta;
}
__global__ void CudaBsrMatrix_scale_add_diag(int nRows, float* ptr,
const int* bsrRowPtr, hipTextureObject_t bsrColIdx,
int blockSize, float alpha, float beta)
{
int iRow = threadIdx.x + blockIdx.x * blockDim.x;
if (iRow >= nRows)
return;
int iBlockRow = iRow / blockSize;
int rowBlockBegin = bsrRowPtr[iBlockRow];
int rowBlockEnd = bsrRowPtr[iBlockRow + 1];
for (int c = rowBlockBegin; c < rowBlockEnd; c++)
{
int iBlockCol = 0;
tex1Dfetch(&iBlockCol, bsrColIdx, c);
if (iBlockCol == iBlockRow)
{
int rowShift = iRow - iBlockRow * blockSize;
int pos = (c * blockSize + rowShift) * blockSize + rowShift;
ptr[pos] = alpha * ptr[pos] + beta;
}
}
}
__global__ void CudaBsrMatrix_fill_increment_1_n(int* data, int n)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < n)
data[i] = i;
}
__global__ void CudaBsrMatrix_setRowFromBlockedCsrRowPtr(const int* csrRowPtr,
int* bsrRowPtr, int blockInRows, int rowsPerBlock, int elementsPerBlock)
{
int iBlockRow = threadIdx.x + blockIdx.x * blockDim.x;
if (iBlockRow <= blockInRows)
bsrRowPtr[iBlockRow] = csrRowPtr[iBlockRow*rowsPerBlock]/elementsPerBlock;
}
__global__ void CudaBsrMatrix_transpose_fill_value_by_bid(const int* blockIds, const float* srcValues,
float* dstValues, int blockSize_RxC, int blockR, int blockC, int nnz)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < nnz)
{
int dBlock = i / blockSize_RxC;
int dShift = i - dBlock * blockSize_RxC;
int dR = dShift / blockC;
int dC = dShift - dR * blockC;
int sBlock = blockIds[dBlock];
int sShift = dC*blockR + dR;
dstValues[i] = srcValues[sBlock * blockSize_RxC + sShift];
}
}
template<int colsPerBlock>
__global__ void CudaBsrMatrix_Mv(hipTextureObject_t bsrRowPtr,
hipTextureObject_t bsrColIdx, hipTextureObject_t bsrValue,
const float* x, float* y, float alpha, float beta, int nRow,
int rowsPerBlock)
{
int iRow = threadIdx.x + blockIdx.x * blockDim.x;
if (iRow < nRow)
{
int iBlockRow = iRow / rowsPerBlock;
int rowShift = iRow - iBlockRow * rowsPerBlock;
int blockColPosBegin = 0;
tex1Dfetch(&blockColPosBegin, bsrRowPtr, iBlockRow);
int blockColPosEnd = 0;
tex1Dfetch(&blockColPosEnd, bsrRowPtr, iBlockRow + 1);
float sum = 0.f;
for (int bIdx = blockColPosBegin; bIdx < blockColPosEnd; ++bIdx)
{
int iBlockCol = 0;
tex1Dfetch(&iBlockCol, bsrColIdx, bIdx);
iBlockCol *= colsPerBlock;
int valIdx = (bIdx * rowsPerBlock + rowShift) * colsPerBlock;
for (int c = 0; c < colsPerBlock; c++)
{
float val = 0.f;
tex1Dfetch(&val, bsrValue, valIdx + c);
sum += x[iBlockCol + c] * val;
}
}
y[iRow] = alpha * sum + beta * y[iRow];
}
}
template<bool LowerInsteadOfFull, bool Trans>
__device__ __forceinline__ float CudaBsrMatrix_rightMultDiag_1(int blockBeginLeft, int blockColResult,
int rowPerBlock, int colsPerBlock, int elePerBlock, int rowShiftResult, int colShiftResult,
hipTextureObject_t leftValue, hipTextureObject_t rightValue)
{
return 0.f;
}
template<>
__device__ __forceinline__ float CudaBsrMatrix_rightMultDiag_1<false, false>(int blockBeginLeft, int blockColResult,
int rowPerBlock, int colsPerBlock, int elePerBlock, int rowShiftResult, int colShiftResult,
hipTextureObject_t leftValue, hipTextureObject_t rightValue)
{
float sum = 0.f;
blockBeginLeft = blockBeginLeft * elePerBlock + rowShiftResult * colsPerBlock;
blockColResult = blockColResult * elePerBlock + colShiftResult * 1;
for (int k = 0; k < colsPerBlock; k++)
{
float valLeft = 0.f, valRight = 0.f;
tex1Dfetch(&valLeft, leftValue, blockBeginLeft + k);
tex1Dfetch(&valRight, rightValue, blockColResult + k*colsPerBlock);
sum += valLeft * valRight;
}
return sum;
}
template<>
__device__ __forceinline__ float CudaBsrMatrix_rightMultDiag_1<true, false>(int blockBeginLeft, int blockColResult,
int rowPerBlock, int colsPerBlock, int elePerBlock, int rowShiftResult, int colShiftResult,
hipTextureObject_t leftValue, hipTextureObject_t rightValue)
{
float sum = 0.f;
blockBeginLeft = blockBeginLeft * elePerBlock + rowShiftResult * colsPerBlock;
blockColResult = blockColResult * elePerBlock + colShiftResult * 1;
for (int k = colShiftResult; k < colsPerBlock; k++)
{
float valLeft = 0.f, valRight = 0.f;
tex1Dfetch(&valLeft, leftValue, blockBeginLeft + k);
tex1Dfetch(&valRight, rightValue, blockColResult + k*colsPerBlock);
sum += valLeft * valRight;
}
return sum;
}
template<>
__device__ __forceinline__ float CudaBsrMatrix_rightMultDiag_1<false, true>(int blockBeginLeft, int blockColResult,
int rowPerBlock, int colsPerBlock, int elePerBlock, int rowShiftResult, int colShiftResult,
hipTextureObject_t leftValue, hipTextureObject_t rightValue)
{
float sum = 0.f;
blockBeginLeft = blockBeginLeft * elePerBlock + rowShiftResult * colsPerBlock;
blockColResult = blockColResult * elePerBlock + colShiftResult * colsPerBlock;
for (int k = 0; k < colsPerBlock; k++)
{
float valLeft = 0.f, valRight = 0.f;
tex1Dfetch(&valLeft, leftValue, blockBeginLeft + k);
tex1Dfetch(&valRight, rightValue, blockColResult + k);
sum += valLeft * valRight;
}
return sum;
}
template<>
__device__ __forceinline__ float CudaBsrMatrix_rightMultDiag_1<true, true>(int blockBeginLeft, int blockColResult,
int rowPerBlock, int colsPerBlock, int elePerBlock, int rowShiftResult, int colShiftResult,
hipTextureObject_t leftValue, hipTextureObject_t rightValue)
{
float sum = 0.f;
blockBeginLeft = blockBeginLeft * elePerBlock + rowShiftResult * colsPerBlock;
blockColResult = blockColResult * elePerBlock + colShiftResult * colsPerBlock;
for (int k = 0; k <= colShiftResult; k++)
{
float valLeft = 0.f, valRight = 0.f;
tex1Dfetch(&valLeft, leftValue, blockBeginLeft + k);
tex1Dfetch(&valRight, rightValue, blockColResult + k);
sum += valLeft * valRight;
}
return sum;
}
template<bool LowerInsteadOfFull, bool Trans>
__global__ void CudaBsrMatrix_rightMultDiag(
const int* bsrRowPtr, const int* bsrRowPtr_coo,
hipTextureObject_t bsrColIdx, hipTextureObject_t bsrValue,
hipTextureObject_t x, float* y, float alpha, float beta,
int rowsPerBlock, int colsPerBlock, int nnz)
{
int posResult = threadIdx.x + blockIdx.x * blockDim.x;
if (posResult >= nnz)
return;
int elePerBlock = rowsPerBlock * colsPerBlock;
int posResultBlock = posResult / elePerBlock;
int shiftResult = posResult - posResultBlock * elePerBlock;
int rowShiftResult = shiftResult / colsPerBlock;
int colShiftResult = shiftResult - rowShiftResult * colsPerBlock;
int blockRowResult = bsrRowPtr_coo[posResultBlock];
int blockColResult = 0;
tex1Dfetch(&blockColResult, bsrColIdx, posResultBlock);
int blockBeginLeft = bsrRowPtr[blockRowResult];
int blockEndLeft = bsrRowPtr[blockRowResult + 1];
// binary search diag blocks: blockColResult
while (blockBeginLeft < blockEndLeft)
{
int imid = ((blockBeginLeft + blockEndLeft) >> 1);
int b = 0;
tex1Dfetch(&b, bsrColIdx, imid);
if (b < blockColResult)
blockBeginLeft = imid + 1;
else
blockEndLeft = imid;
}
int b = 0;
tex1Dfetch(&b, bsrColIdx, blockBeginLeft);
float sum = 0.f;
if (b == blockColResult && blockBeginLeft == blockEndLeft)
{
sum = CudaBsrMatrix_rightMultDiag_1<LowerInsteadOfFull, Trans>(
blockBeginLeft, blockColResult, rowsPerBlock, colsPerBlock, elePerBlock,
rowShiftResult, colShiftResult, bsrValue, x);
}
// write the result
y[posResult] = alpha * sum + beta * y[posResult];
}
__global__ void CudaBsrMatrix_Range_multBsrT_value(
const int* bsrRowPtrA, hipTextureObject_t bsrColIdxA, hipTextureObject_t valueA,
int rangeColBeginA, int rangeColEndA,
const int* bsrRowPtrB, hipTextureObject_t bsrColIdxB, hipTextureObject_t valueB,
int rangeColBeginB, int rangeColEndB,
const int* bsrRowPtrD, hipTextureObject_t bsrColIdxD, hipTextureObject_t valueD,
int rangeColBeginD, int rangeColEndD,
const int* bsrRowPtrC_coo, const int* bsrColIdxC, float* valueC,
int rowsPerBlockA, int colsPerBlockA, int rowsPerBlockB, int nnzC, float alpha, float beta
)
{
int innzC = threadIdx.x + blockIdx.x * blockDim.x;
if (innzC >= nnzC)
return;
const int elePerBlockC = rowsPerBlockA * rowsPerBlockB;
int innzBlockC = innzC / elePerBlockC;
int innzShiftC = innzC - innzBlockC * elePerBlockC;
int rowShiftC = innzShiftC / rowsPerBlockB;
int colShiftC = innzShiftC - rowShiftC * rowsPerBlockB;
int rowBlockC = bsrRowPtrC_coo[innzBlockC];
int colBlockC = bsrColIdxC[innzBlockC];
int blockBeginA = bsrRowPtrA[rowBlockC];
int blockEndA = bsrRowPtrA[rowBlockC + 1];
int blockBeginB = bsrRowPtrB[colBlockC];
int blockEndB = bsrRowPtrB[colBlockC + 1];
// A*B
float sum = 0.f;
for (int i0 = blockBeginA, i1 = blockBeginB; i0 < blockEndA && i1 < blockEndB;)
{
int colBlockA = 0, colBlockB = 0;
tex1Dfetch(&colBlockA, bsrColIdxA, i0);
tex1Dfetch(&colBlockB, bsrColIdxB, i1);
if (colBlockA >= rangeColEndA || colBlockB >= rangeColEndB)
break;
colBlockA -= rangeColBeginA;
colBlockB -= rangeColBeginB;
if (colBlockA == colBlockB && colBlockA >= 0)
{
int pos0 = (i0*colsPerBlockA + rowShiftC)*rowsPerBlockA;
int pos1 = (i1*rowsPerBlockB + colShiftC)*colsPerBlockA;
for (int k = 0; k < colsPerBlockA; k++)
{
float v1 = 0.f, v2 = 0.f;
tex1Dfetch(&v1, valueA, pos0 + k);
tex1Dfetch(&v2, valueB, pos1 + k);
sum += v1 * v2;
}
i0++;
i1++;
}
i0 += (colBlockA < colBlockB) || (colBlockA < 0);
i1 += (colBlockA > colBlockB) || (colBlockB < 0);
}// i
// D
float D_val = 0.f;
if (bsrRowPtrD)
{
int blockBeginD = bsrRowPtrD[rowBlockC];
int blockEndD = bsrRowPtrD[rowBlockC + 1];
int colBlockD = 0;
for (int c = blockBeginD; c < blockEndD && colBlockD < rangeColEndD; c++)
{
tex1Dfetch(&colBlockD, bsrColIdxD, c);
if (colBlockD - rangeColBeginD == colBlockC)
{
tex1Dfetch(&D_val, valueD, (c * rowsPerBlockA + rowShiftC) * rowsPerBlockB + colShiftC);
break;
}
}
}// end if bsrRowPtrD
valueC[innzC] = alpha * sum + beta * D_val;
}
__global__ void CudaBsrMatrix_Range_multBsrT_addDiag_value(
const int* bsrRowPtrA, hipTextureObject_t bsrColIdxA, hipTextureObject_t valueA,
int rangeColBeginA, int rangeColEndA,
const int* bsrRowPtrB, hipTextureObject_t bsrColIdxB, hipTextureObject_t valueB,
int rangeColBeginB, int rangeColEndB,
hipTextureObject_t valueD,
const int* bsrRowPtrC_coo, const int* bsrColIdxC, float* valueC,
int rowsPerBlockA, int colsPerBlockA, int rowsPerBlockB, int nnzC, float alpha, float beta
)
{
int innzC = threadIdx.x + blockIdx.x * blockDim.x;
if (innzC >= nnzC)
return;
const int elePerBlockC = rowsPerBlockA * rowsPerBlockB;
int innzBlockC = innzC / elePerBlockC;
int innzShiftC = innzC - innzBlockC * elePerBlockC;
int rowShiftC = innzShiftC / rowsPerBlockB;
int colShiftC = innzShiftC - rowShiftC * rowsPerBlockB;
int rowBlockC = bsrRowPtrC_coo[innzBlockC];
int colBlockC = bsrColIdxC[innzBlockC];
int blockBeginA = bsrRowPtrA[rowBlockC];
int blockEndA = bsrRowPtrA[rowBlockC + 1];
int blockBeginB = bsrRowPtrB[colBlockC];
int blockEndB = bsrRowPtrB[colBlockC + 1];
// A*B
float sum = 0.f;
for (int i0 = blockBeginA, i1 = blockBeginB; i0 < blockEndA && i1 < blockEndB;)
{
int colBlockA = 0, colBlockB = 0;
tex1Dfetch(&colBlockA, bsrColIdxA, i0);
tex1Dfetch(&colBlockB, bsrColIdxB, i1);
if (colBlockA >= rangeColEndA || colBlockB >= rangeColEndB)
break;
colBlockA -= rangeColBeginA;
colBlockB -= rangeColBeginB;
if (colBlockA == colBlockB && colBlockA >= 0)
{
int pos0 = (i0*colsPerBlockA + rowShiftC)*rowsPerBlockA;
int pos1 = (i1*rowsPerBlockB + colShiftC)*colsPerBlockA;
for (int k = 0; k < colsPerBlockA; k++)
{
float v1 = 0.f, v2 = 0.f;
tex1Dfetch(&v1, valueA, pos0 + k);
tex1Dfetch(&v2, valueB, pos1 + k);
sum += v1 * v2;
}
i0++;
i1++;
}
i0 += (colBlockA < colBlockB) || (colBlockA < 0);
i1 += (colBlockA > colBlockB) || (colBlockB < 0);
}// i
// D
float D_val = 0.f;
if (valueD && rowBlockC == colBlockC)
tex1Dfetch(&D_val, valueD, (rowBlockC * rowsPerBlockA + rowShiftC) * rowsPerBlockB + colShiftC);
valueC[innzC] = alpha * sum + beta * D_val;
}
__global__ void CudaBsrMatrix_Range_AAt_blockDiags(
const int* bsrRowPtrA, hipTextureObject_t bsrColIdxA, hipTextureObject_t valueA,
int rangeColBeginA, int rangeColEndA,
float* diag, int rowsPerBlockA, int colsPerBlockA, int nnzDiag,
bool useLowerInsteadOfFull, float alpha, float beta
)
{
int innzDiag = threadIdx.x + blockIdx.x * blockDim.x;
if (innzDiag >= nnzDiag)
return;
int blockDiagSz = rowsPerBlockA*rowsPerBlockA;
int iBlockDiag = innzDiag / blockDiagSz;
int shift = innzDiag - iBlockDiag*blockDiagSz;
int rowShift = shift / colsPerBlockA;
int colShift = shift - rowShift * colsPerBlockA;
if (useLowerInsteadOfFull && rowShift < colShift)
return;
int row0 = bsrRowPtrA[iBlockDiag];
int row0_begin = (row0*rowsPerBlockA + rowShift) * colsPerBlockA;
const int row_blocks = bsrRowPtrA[iBlockDiag + 1] - row0;
int row1_begin = (row0*rowsPerBlockA + colShift) * colsPerBlockA;
int blockSzA = rowsPerBlockA * colsPerBlockA;
float sum = 0;
int colBlock = 0;
for (int iBlocks = 0; iBlocks < row_blocks && colBlock < rangeColEndA;
iBlocks++, row0_begin += blockSzA, row1_begin += blockSzA)
{
tex1Dfetch(&colBlock, bsrColIdxA, row0 + iBlocks);
if (colBlock < rangeColBeginA)
continue;
for (int i = 0; i < colsPerBlockA; i++)
{
float v1 = 0.f, v2 = 0.f;
tex1Dfetch(&v1, valueA, row0_begin + i);
tex1Dfetch(&v2, valueA, row1_begin + i);
sum += v1 * v2;
}
}
diag[innzDiag] = alpha*sum + beta*diag[innzDiag];
}
__global__ void CudaBsrMatrix_subRows_structure_rptr(const int* bsrRowPtrFull, int* bsrRowPtrSub,
int rowBegin, int num)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i <= num)// <= because size=num+1 in bsr row
{
bsrRowPtrSub[i] = bsrRowPtrFull[i + rowBegin] - bsrRowPtrFull[rowBegin];
}
}
__global__ void CudaBsrMatrix_subRows_structure_cidx(hipTextureObject_t bsrRowPtrFull,
const int* bsrColIdxFull, int* bsrColIdxSub, int rowBegin, int nnzBlocks)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < nnzBlocks)
{
int colBegin = 0;
tex1Dfetch(&colBegin, bsrRowPtrFull, rowBegin);
bsrColIdxSub[i] = bsrColIdxFull[i + colBegin];
}
}
__global__ void CudaBsrMatrix_subRows_value(hipTextureObject_t bsrRowPtrFull,
const float* valueFull, float* valueSub, int rowBegin, int nnz, int blockSize2)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < nnz)
{
int nnzBegin = 0;
tex1Dfetch(&nnzBegin, bsrRowPtrFull, rowBegin);
nnzBegin *= blockSize2;
valueSub[i] = valueFull[i + nnzBegin];
}
}
__global__ void CudaBsrMatrix_toCsr_structure_rptr(hipTextureObject_t bsrRowPtr,
int* csrRowPtr, int bsrBlockRow, int bsrBlockCol, int nCsrRows)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int ib = i / bsrBlockRow;
if (i < nCsrRows)
{
int shift = i - ib*bsrBlockRow;
int bsr = 0, bsr1=0;
tex1Dfetch(&bsr, bsrRowPtr, ib);
tex1Dfetch(&bsr1, bsrRowPtr, ib+1);
csrRowPtr[i] = (bsr*bsrBlockRow + (bsr1-bsr)*shift) * bsrBlockCol;
}
if (i == nCsrRows)
{
int bsr = 0;
tex1Dfetch(&bsr, bsrRowPtr, ib);
csrRowPtr[i] = bsr*bsrBlockRow * bsrBlockCol;
}
}
__global__ void CudaBsrMatrix_toCsr_structure_cidx(
hipTextureObject_t bsrRowPtr, hipTextureObject_t bsrColIdx,
const int* csrRowPtr_coo, const int* csrRowPtr, int* csrColIdx,
int bsrBlockRow, int bsrBlockCol, int nCsrNNZ)
{
int innz = threadIdx.x + blockIdx.x * blockDim.x;
if (innz < nCsrNNZ)
{
int iRow = csrRowPtr_coo[innz];
int iBlockRow = iRow / bsrBlockRow;
int colShiftOfRow = innz - csrRowPtr[iRow];
int blockColShiftOfRow = colShiftOfRow / bsrBlockCol;
int iBlock = 0;
tex1Dfetch(&iBlock, bsrRowPtr, iBlockRow);
iBlock += blockColShiftOfRow;
int cshift = colShiftOfRow - blockColShiftOfRow * bsrBlockCol;
int bc = 0;
tex1Dfetch(&bc, bsrColIdx, iBlock);
csrColIdx[innz] = bc * bsrBlockCol + cshift;
}
}
__global__ void CudaBsrMatrix_toCsr_structure_val(
hipTextureObject_t bsrRowPtr, hipTextureObject_t bsrValue,
const int* csrRowPtr_coo, const int* csrRowPtr, float* csrValue,
int bsrBlockRow, int bsrBlockCol, int nCsrNNZ)
{
int innz = threadIdx.x + blockIdx.x * blockDim.x;
if (innz < nCsrNNZ)
{
int iRow = csrRowPtr_coo[innz];
int iBlockRow = iRow / bsrBlockRow;
int colShiftOfRow = innz - csrRowPtr[iRow];
int blockColShiftOfRow = colShiftOfRow / bsrBlockCol;
int iBlock = 0;
tex1Dfetch(&iBlock, bsrRowPtr, iBlockRow);
iBlock += blockColShiftOfRow;
int rshift = iRow - iBlockRow * bsrBlockRow;
int cshift = colShiftOfRow - blockColShiftOfRow * bsrBlockCol;
tex1Dfetch(&csrValue[innz], bsrValue, (iBlock*bsrBlockRow+rshift)*bsrBlockCol + cshift);
}
}
void CudaBsrMatrix::fill_increment_1_n(int* data, int n)
{
if (n == 0)
return;
CudaBsrMatrix_fill_increment_1_n << <divUp(n, CTA_SIZE), CTA_SIZE >> >(
data, n);
cudaSafeCall(hipGetLastError(), "CudaBsrMatrix::fill_increment_1_n");
}
void CudaBsrMatrix::transpose_fill_values_by_blockId(const int* blockIds, const CudaBsrMatrix& t)
{
if (isSymbolic())
throw std::exception("CudaBsrMatrix::transpose_fill_values_by_blockId(): symbolic matrix cannot touch values");
CudaBsrMatrix_transpose_fill_value_by_bid << <divUp(nnz(), CTA_SIZE), CTA_SIZE >> >(
blockIds, t.value(), value(), rowsPerBlock() * colsPerBlock(), rowsPerBlock(),
colsPerBlock(), nnz());
cudaSafeCall(hipGetLastError(), "CudaBsrMatrix::CudaBsrMatrix_transpose_fill_value_by_bid");
}
CudaBsrMatrix& CudaBsrMatrix::operator = (float constVal)
{
if (isSymbolic())
throw std::exception("CudaBsrMatrix::operator =: symbolic matrix cannot touch values");
if (nnz() == 0)
return *this;
if (constVal == 0.f)
{
cudaSafeCall(hipMemset(m_values.ptr(), 0, nnz()*m_values.elem_size),
"CudaBsrMatrix::operator = 0");
}
else
{
CudaBsrMatrix_set << <divUp(nnz(), CTA_SIZE), CTA_SIZE >> >(nnz(), m_values.ptr(), constVal);
cudaSafeCall(hipGetLastError(), "CudaBsrMatrix::operator = constVal");
}
return *this;
}
CudaBsrMatrix& CudaBsrMatrix::operator = (const CudaBsrMatrix& rhs)
{
m_cusparseHandle = rhs.m_cusparseHandle;
m_symbolic = rhs.m_symbolic;
resize(rhs.blocksInRow(), rhs.blocksInCol(), rhs.rowsPerBlock(), rhs.colsPerBlock());
resize_nnzBlocks(rhs.nnzBlocks());
cudaSafeCall(hipMemcpy(bsrRowPtr(), rhs.bsrRowPtr(),
(1+rhs.blocksInRow())*sizeof(int), hipMemcpyDeviceToDevice),
"CudaBsrMatrix::operator =, cpy bsrRowPtr");
cudaSafeCall(hipMemcpy(bsrRowPtr_coo(), rhs.bsrRowPtr_coo(),
rhs.nnzBlocks()*sizeof(int), hipMemcpyDeviceToDevice),
"CudaBsrMatrix::operator =, cpy bsrRowPtr_coo");
cudaSafeCall(hipMemcpy(bsrColIdx(), rhs.bsrColIdx(),
rhs.nnzBlocks()*sizeof(int), hipMemcpyDeviceToDevice),
"CudaBsrMatrix::operator =, cpy bsrColIdx");
if (!isSymbolic())
cudaSafeCall(hipMemcpy(value(), rhs.value(),
rhs.nnz()*sizeof(float), hipMemcpyDeviceToDevice),
"CudaBsrMatrix::operator =, cpy value");
return *this;
}
CudaBsrMatrix& CudaBsrMatrix::axpy(float alpha, float beta)
{
if (isSymbolic())
throw std::exception("CudaBsrMatrix::axpy(): symbolic matrix cannot touch values");
if (nnz() == 0)
return *this;
CudaBsrMatrix_scale_add << <divUp(nnz(), CTA_SIZE), CTA_SIZE >> >(
nnz(), value(), alpha, beta);
cudaSafeCall(hipGetLastError(), "CudaBsrMatrix::axpy");
return *this;
}
CudaBsrMatrix& CudaBsrMatrix::axpy_diag(float alpha, float beta)
{
if (isSymbolic())
throw std::exception("CudaBsrMatrix::axpy_diag(): symbolic matrix cannot touch values");
if (rowsPerBlock() != colsPerBlock() || blocksInRow() != blocksInCol())
throw std::exception("CudaBsrMatrix::axpy_diag(): only square matrix supported");
if (nnz() == 0)
return *this;
CudaBsrMatrix_scale_add_diag << <divUp(rows(), CTA_SIZE), CTA_SIZE >> >(
rows(), value(), bsrRowPtr(), bsrColIdxTexture(), rowsPerBlock(), alpha, beta);
cudaSafeCall(hipGetLastError(), "CudaBsrMatrix::axpy_diag");
return *this;
}
void CudaBsrMatrix::setValue(const float* val_d)
{
if (isSymbolic())
throw std::exception("CudaBsrMatrix::setValue(): symbolic matrix cannot touch values");
cudaSafeCall(hipMemcpy(value(), val_d, nnz()*sizeof(float),
hipMemcpyDeviceToDevice), "CudaBsrMatrix::setValue");
}
void CudaBsrMatrix::Mv(const float* x, float* y, float alpha, float beta)const
{
if (rows() == 0 || cols() == 0)
return;
if (isSymbolic())
throw std::exception("CudaBsrMatrix::Mv(): symbolic matrix cannot touch values");
switch (colsPerBlock())
{
case 0:
break;
case 1:
CudaBsrMatrix_Mv<1> << <divUp(rows(), CTA_SIZE), CTA_SIZE >> >(
bsrRowPtrTexture(), bsrColIdxTexture(), valueTexture(), x, y, alpha, beta,
rows(), rowsPerBlock());
break;
case 2:
CudaBsrMatrix_Mv<2> << <divUp(rows(), CTA_SIZE), CTA_SIZE >> >(
bsrRowPtrTexture(), bsrColIdxTexture(), valueTexture(), x, y, alpha, beta,
rows(), rowsPerBlock());
break;
case 3:
CudaBsrMatrix_Mv<3> << <divUp(rows(), CTA_SIZE), CTA_SIZE >> >(
bsrRowPtrTexture(), bsrColIdxTexture(), valueTexture(), x, y, alpha, beta,
rows(), rowsPerBlock());
break;
case 4:
CudaBsrMatrix_Mv<4> << <divUp(rows(), CTA_SIZE), CTA_SIZE >> >(
bsrRowPtrTexture(), bsrColIdxTexture(), valueTexture(), x, y, alpha, beta,
rows(), rowsPerBlock());
break;
case 5:
CudaBsrMatrix_Mv<5> << <divUp(rows(), CTA_SIZE), CTA_SIZE >> >(
bsrRowPtrTexture(), bsrColIdxTexture(), valueTexture(), x, y, alpha, beta,
rows(), rowsPerBlock());
break;
case 6:
CudaBsrMatrix_Mv<6> << <divUp(rows(), CTA_SIZE), CTA_SIZE >> >(
bsrRowPtrTexture(), bsrColIdxTexture(), valueTexture(), x, y, alpha, beta,
rows(), rowsPerBlock());
break;
default:
throw std::exception("non-supported block size!");
}
cudaSafeCall(hipGetLastError(), "CudaBsrMatrix::Mv");
}
void CudaBsrMatrix::rightMultDiag_structure(const CudaDiagBlockMatrix& x, CudaBsrMatrix& y)const
{
if (cols() != x.rows())
throw std::exception("CudaBsrMatrix::rightMultDiag_structure: block size not matched");
if (x.blockSize() != colsPerBlock() || x.blockSize() != rowsPerBlock())
throw std::exception("CudaBsrMatrix::rightMultDiag_structure: matrix size not matched");
y = *this;
y = 0;
}
void CudaBsrMatrix::rightMultDiag_value(const CudaDiagBlockMatrix& x, CudaBsrMatrix& y,
bool useLowerInsteadOfFull_x, bool trans_x, float alpha, float beta)const
{
if (isSymbolic())
throw std::exception("CudaBsrMatrix::rightMultDiag_value(): symbolic matrix cannot touch values");
if (cols() != x.rows())
throw std::exception("CudaBsrMatrix::rightMultDiag_value: block size not matched");
if (x.blockSize() != colsPerBlock() || x.blockSize() != rowsPerBlock())
throw std::exception("CudaBsrMatrix::rightMultDiag_value: matrix size not matched");
if (cols() != y.cols() || rows() != y.rows())
throw std::exception("CudaBsrMatrix::rightMultDiag_value: y not matched, call rightMultDiag_structure()!");
if (nnz() == 0)
return;
if (useLowerInsteadOfFull_x == true && trans_x == true)
CudaBsrMatrix_rightMultDiag<true, true> << <divUp(nnz(), CTA_SIZE), CTA_SIZE >> >(
bsrRowPtr(), bsrRowPtr_coo(), bsrColIdxTexture(), valueTexture(),
x.getTexture(), y.value(), alpha, beta, rowsPerBlock(), colsPerBlock(), nnz());
if (useLowerInsteadOfFull_x == true && trans_x == false)
CudaBsrMatrix_rightMultDiag<true, false> << <divUp(nnz(), CTA_SIZE), CTA_SIZE >> >(
bsrRowPtr(), bsrRowPtr_coo(), bsrColIdxTexture(), valueTexture(),
x.getTexture(), y.value(), alpha, beta, rowsPerBlock(), colsPerBlock(), nnz());
if (useLowerInsteadOfFull_x == false && trans_x == false)
CudaBsrMatrix_rightMultDiag<false, false> << <divUp(nnz(), CTA_SIZE), CTA_SIZE >> >(
bsrRowPtr(), bsrRowPtr_coo(), bsrColIdxTexture(), valueTexture(),
x.getTexture(), y.value(), alpha, beta, rowsPerBlock(), colsPerBlock(), nnz());
if (useLowerInsteadOfFull_x == false && trans_x == true)
CudaBsrMatrix_rightMultDiag<false, true> << <divUp(nnz(), CTA_SIZE), CTA_SIZE >> >(
bsrRowPtr(), bsrRowPtr_coo(), bsrColIdxTexture(), valueTexture(),
x.getTexture(), y.value(), alpha, beta, rowsPerBlock(), colsPerBlock(), nnz());
cudaSafeCall(hipGetLastError(), "CudaBsrMatrix::rightMultDiag_value");
}
void CudaBsrMatrix::setRowFromBlockedCsrRowPtr(const int* csrRowPtr)
{
if (blocksInRow() == 0)
return;
beginConstructRowPtr();
CudaBsrMatrix_setRowFromBlockedCsrRowPtr << <divUp(blocksInRow(), CTA_SIZE), CTA_SIZE >> >(
csrRowPtr, bsrRowPtr(), blocksInRow(), rowsPerBlock(), rowsPerBlock()*colsPerBlock());
cudaSafeCall(hipGetLastError(), "CudaBsrMatrix::setRowFromBlockedCsrRowPtr");
endConstructRowPtr();
}
void CudaBsrMatrix::Range::multBsr_value(const Range& B, CudaBsrMatrix& C, float alpha,
const Range* D, float beta)const
{
throw std::exception("CudaBsrMatrix::Range::multBsr_value(): not implemented");
}
void CudaBsrMatrix::Range::multBsrT_value(const Range& B, CudaBsrMatrix& C, float alpha,
const Range* D, float beta)const
{
if (A == nullptr || B.A == nullptr)
throw std::exception("CudaBsrMatrix::Range::multBsrT_value(): null pointer exception");
if (D)
if (D->A == nullptr)
throw std::exception("CudaBsrMatrix::Range::multBsrT_value(): null pointer exception");
if (A->isSymbolic() || B.A->isSymbolic())
throw std::exception("CudaBsrMatrix::multBsrT_value(): symbolic matrix cannot touch values");
if (D)
{
if (D->A->isSymbolic())
throw std::exception("CudaBsrMatrix::Range::multBsrT_value(): symbolic matrix cannot touch values");
if (C.blocksInRow() != D->blocksInRow() || C.blocksInCol() != D->blocksInCol()
|| C.rowsPerBlock() != D->rowsPerBlock() || C.colsPerBlock() != D->colsPerBlock())
throw std::exception("CudaBsrMatrix::Range::multBsrT_value(): D size not matched");
}
if (blocksInCol() != B.blocksInCol())
throw std::exception("CudaBsrMatrix::Range::multBsrT_value(): matrix size not matched");
if (colsPerBlock() != B.colsPerBlock())
throw std::exception("CudaBsrMatrix::Range::multBsrT_value(): block size not matched");
if (blocksInRow() != C.blocksInRow() || B.blocksInRow() != C.blocksInCol()
|| rowsPerBlock() != C.rowsPerBlock() || B.rowsPerBlock() != C.colsPerBlock())
throw std::exception("CudaBsrMatrix::Range::multBsrT_value(): result size not matched");
if (C.nnzBlocks() == 0)
return;
const int* D_rptr = nullptr;
hipTextureObject_t D_cidx = 0, D_val = 0;
int D_cbegin = 0, D_cend = 0;
if (D)
{
D_rptr = D->A->bsrRowPtr() +D->blockRowBegin;
D_cidx = D->A->bsrColIdxTexture();
D_val = D->A->valueTexture();
D_cbegin = D->blockColBegin;
D_cend = D->blockColEnd;
}
CudaBsrMatrix_Range_multBsrT_value << <divUp(C.nnz(), CTA_SIZE), CTA_SIZE >> >(
A->bsrRowPtr()+blockRowBegin, A->bsrColIdxTexture(), A->valueTexture(),
blockColBegin, blockColEnd,
B.A->bsrRowPtr()+B.blockRowBegin, B.A->bsrColIdxTexture(), B.A->valueTexture(),
B.blockColBegin, B.blockColEnd,
D_rptr, D_cidx, D_val, D_cbegin, D_cend,
C.bsrRowPtr_coo(), C.bsrColIdx(), C.value(),
rowsPerBlock(), colsPerBlock(), B.rowsPerBlock(), C.nnz(), alpha, beta
);
}
void CudaBsrMatrix::Range::multBsrT_addDiag_value(const Range& B, CudaBsrMatrix& C, float alpha,
const CudaDiagBlockMatrix* D, float beta)const
{
if (A == nullptr || B.A == nullptr)
throw std::exception("CudaBsrMatrix::Range::multBsrT_value()1: null pointer exception");
if (A->isSymbolic() || B.A->isSymbolic())
throw std::exception("CudaBsrMatrix::multBsrT_value()1: symbolic matrix cannot touch values");
if (blocksInCol() != B.blocksInCol())
throw std::exception("CudaBsrMatrix::Range::multBsrT_value()1: matrix size not matched");
if (colsPerBlock() != B.colsPerBlock())
throw std::exception("CudaBsrMatrix::Range::multBsrT_value()1: block size not matched");
if (blocksInRow() != C.blocksInRow() || B.blocksInRow() != C.blocksInCol()
|| rowsPerBlock() != C.rowsPerBlock() || B.rowsPerBlock() != C.colsPerBlock())
throw std::exception("CudaBsrMatrix::Range::multBsrT_value()1: result size not matched");
if (D)
{
if (C.blocksInRow() != D->numBlocks() || C.blocksInCol() != D->numBlocks())
throw std::exception("CudaBsrMatrix::Range::multBsrT_value()1: D size not matched");
if (C.rowsPerBlock() != D->blockSize() || C.colsPerBlock() != D->blockSize())
throw std::exception("CudaBsrMatrix::Range::multBsrT_value()1: D block not matched");
}
if (C.nnzBlocks() == 0)
return;
hipTextureObject_t D_val = 0;
if (D)
D_val = D->getTexture();
CudaBsrMatrix_Range_multBsrT_addDiag_value << <divUp(C.nnz(), CTA_SIZE), CTA_SIZE >> >(
A->bsrRowPtr() + blockRowBegin, A->bsrColIdxTexture(), A->valueTexture(),
blockColBegin, blockColEnd,
B.A->bsrRowPtr() + B.blockRowBegin, B.A->bsrColIdxTexture(), B.A->valueTexture(),
B.blockColBegin, B.blockColEnd,
D_val,
C.bsrRowPtr_coo(), C.bsrColIdx(), C.value(),
rowsPerBlock(), colsPerBlock(), B.rowsPerBlock(), C.nnz(), alpha, beta
);
}
void CudaBsrMatrix::Range::AAt_blockDiags(CudaDiagBlockMatrix& C,
bool lowerInsteadOfFull, float alpha, float beta)const
{
if (A == nullptr)
throw std::exception("CudaBsrMatrix::Range::AAt_blockDiags(): null pointer exception");
if (A->isSymbolic())
throw std::exception("CudaBsrMatrix::AAt_blockDiags(): symbolic matrix cannot touch values");
if (blocksInRow() != C.numBlocks())
throw std::exception("CudaBsrMatrix::Range::AAt_blockDiags(): matrix size not matched");
if (rowsPerBlock() != C.blockSize())
throw std::exception("CudaBsrMatrix::Range::AAt_blockDiags(): block size not matched");
if (A->nnzBlocks() == 0)
return;
CudaBsrMatrix_Range_AAt_blockDiags << <divUp(C.nnz(), CTA_SIZE), CTA_SIZE >> >(
A->bsrRowPtr() + blockRowBegin, A->bsrColIdxTexture(), A->valueTexture(),
blockColBegin, blockColEnd,
C.value(), rowsPerBlock(), colsPerBlock(), C.nnz(),
lowerInsteadOfFull, alpha, beta
);
}
void CudaBsrMatrix::subRows_structure(CudaBsrMatrix& S, int blockRowBegin, int blockRowEnd)const
{
blockRowBegin = ::max(0, blockRowBegin);
blockRowEnd = ::min(blocksInRow(), blockRowEnd);
if (blockRowBegin >= blockRowEnd)
{
S.resize(0, 0, rowsPerBlock(), colsPerBlock());
return;
}
// rows
S.resize(blockRowEnd - blockRowBegin, blocksInCol(), rowsPerBlock(), colsPerBlock());
S.beginConstructRowPtr();
CudaBsrMatrix_subRows_structure_rptr << <divUp(S.blocksInRow(), CTA_SIZE), CTA_SIZE >> >(
bsrRowPtr(), S.bsrRowPtr(), blockRowBegin, S.blocksInRow());
cudaSafeCall(hipGetLastError(), "CudaBsrMatrix_subRows_structure_rptr");
S.endConstructRowPtr();
// cols
CudaBsrMatrix_subRows_structure_cidx << <divUp(S.nnzBlocks(), CTA_SIZE), CTA_SIZE >> >(
bsrRowPtrTexture(), bsrColIdx(), S.bsrColIdx(), blockRowBegin, S.nnzBlocks());
cudaSafeCall(hipGetLastError(), "CudaBsrMatrix_subRows_structure_cidx");
}
void CudaBsrMatrix::subRows_value(CudaBsrMatrix& S, int blockRowBegin, int blockRowEnd)const
{
if (isSymbolic())
throw std::exception("CudaBsrMatrix::AAt_blockDiags(): symbolic matrix cannot touch values");
blockRowBegin = ::max(0, blockRowBegin);
blockRowEnd = ::min(blocksInRow(), blockRowEnd);
if (S.blocksInRow() != blockRowEnd - blockRowBegin ||
S.blocksInCol() != blocksInCol() ||
S.rowsPerBlock() != rowsPerBlock() ||
S.colsPerBlock() != colsPerBlock())
throw std::exception("CudaBsrMatrix::subRows_value: size not matched");
CudaBsrMatrix_subRows_value << <divUp(S.nnz(), CTA_SIZE), CTA_SIZE >> >(
bsrRowPtrTexture(), value(), S.value(), blockRowBegin, S.nnz(),
S.rowsPerBlock() * S.colsPerBlock());
cudaSafeCall(hipGetLastError(), "CudaBsrMatrix_subRows_value");
}
void CudaBsrMatrix::toCsr_structure(CudaBsrMatrix& B)const
{
B.m_symbolic = isSymbolic();
B.resize(rows(), cols(), 1, 1);
B.resize_nnzBlocks(0);
if (rows() == 0 || nnz() == 0)
return;
// 1. rptr
B.beginConstructRowPtr();
CudaBsrMatrix_toCsr_structure_rptr << <divUp(rows()+1, CTA_SIZE), CTA_SIZE >> >(
bsrRowPtrTexture(), B.bsrRowPtr(), rowsPerBlock(), colsPerBlock(), rows());
cudaSafeCall(hipGetLastError(), "CudaBsrMatrix_toCsr_structure_rptr");
B.endConstructRowPtr(nnz());
// 2. cidx
CudaBsrMatrix_toCsr_structure_cidx << <divUp(nnz(), CTA_SIZE), CTA_SIZE >> >(
bsrRowPtrTexture(), bsrColIdxTexture(),
B.bsrRowPtr_coo(), B.bsrRowPtr(), B.bsrColIdx(),
rowsPerBlock(), colsPerBlock(), nnz());
cudaSafeCall(hipGetLastError(), "CudaBsrMatrix_toCsr_structure_cidx");
}
void CudaBsrMatrix::toCsr_value(CudaBsrMatrix& B)const
{
if (isSymbolic() || B.isSymbolic())
throw std::exception("CudaBsrMatrix::toCsr_value(): symbolic cannot touch values");
if (B.rows() != rows() || B.cols() != cols() || B.rowsPerBlock() != 1 || B.colsPerBlock() != 1)
throw std::exception("CudaBsrMatrix::toCsr_value(): size of B not matched");
if (rows() == 0 || nnz() == 0)
return;
CudaBsrMatrix_toCsr_structure_val << <divUp(nnz(), CTA_SIZE), CTA_SIZE >> >(
bsrRowPtrTexture(), valueTexture(),
B.bsrRowPtr_coo(), B.bsrRowPtr(), B.value(),
rowsPerBlock(), colsPerBlock(), nnz());
cudaSafeCall(hipGetLastError(), "CudaBsrMatrix_toCsr_structure_cidx");
} | 051a034b8396595c9c02fb92bd75c2b14a0617a4.cu | #include "CudaBsrMatrix.h"
#include "CudaDiagBlockMatrix.h"
typedef CudaBsrMatrix::Range Range;
__global__ void CudaBsrMatrix_set(int n, float* ptr, float val)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < n)
ptr[i] = val;
}
__global__ void CudaBsrMatrix_scale_add(int n, float* ptr, float alpha, float beta)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < n)
ptr[i] = alpha * ptr[i] + beta;
}
__global__ void CudaBsrMatrix_scale_add_diag(int nRows, float* ptr,
const int* bsrRowPtr, cudaTextureObject_t bsrColIdx,
int blockSize, float alpha, float beta)
{
int iRow = threadIdx.x + blockIdx.x * blockDim.x;
if (iRow >= nRows)
return;
int iBlockRow = iRow / blockSize;
int rowBlockBegin = bsrRowPtr[iBlockRow];
int rowBlockEnd = bsrRowPtr[iBlockRow + 1];
for (int c = rowBlockBegin; c < rowBlockEnd; c++)
{
int iBlockCol = 0;
tex1Dfetch(&iBlockCol, bsrColIdx, c);
if (iBlockCol == iBlockRow)
{
int rowShift = iRow - iBlockRow * blockSize;
int pos = (c * blockSize + rowShift) * blockSize + rowShift;
ptr[pos] = alpha * ptr[pos] + beta;
}
}
}
__global__ void CudaBsrMatrix_fill_increment_1_n(int* data, int n)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < n)
data[i] = i;
}
__global__ void CudaBsrMatrix_setRowFromBlockedCsrRowPtr(const int* csrRowPtr,
int* bsrRowPtr, int blockInRows, int rowsPerBlock, int elementsPerBlock)
{
int iBlockRow = threadIdx.x + blockIdx.x * blockDim.x;
if (iBlockRow <= blockInRows)
bsrRowPtr[iBlockRow] = csrRowPtr[iBlockRow*rowsPerBlock]/elementsPerBlock;
}
__global__ void CudaBsrMatrix_transpose_fill_value_by_bid(const int* blockIds, const float* srcValues,
float* dstValues, int blockSize_RxC, int blockR, int blockC, int nnz)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < nnz)
{
int dBlock = i / blockSize_RxC;
int dShift = i - dBlock * blockSize_RxC;
int dR = dShift / blockC;
int dC = dShift - dR * blockC;
int sBlock = blockIds[dBlock];
int sShift = dC*blockR + dR;
dstValues[i] = srcValues[sBlock * blockSize_RxC + sShift];
}
}
template<int colsPerBlock>
__global__ void CudaBsrMatrix_Mv(cudaTextureObject_t bsrRowPtr,
cudaTextureObject_t bsrColIdx, cudaTextureObject_t bsrValue,
const float* x, float* y, float alpha, float beta, int nRow,
int rowsPerBlock)
{
int iRow = threadIdx.x + blockIdx.x * blockDim.x;
if (iRow < nRow)
{
int iBlockRow = iRow / rowsPerBlock;
int rowShift = iRow - iBlockRow * rowsPerBlock;
int blockColPosBegin = 0;
tex1Dfetch(&blockColPosBegin, bsrRowPtr, iBlockRow);
int blockColPosEnd = 0;
tex1Dfetch(&blockColPosEnd, bsrRowPtr, iBlockRow + 1);
float sum = 0.f;
for (int bIdx = blockColPosBegin; bIdx < blockColPosEnd; ++bIdx)
{
int iBlockCol = 0;
tex1Dfetch(&iBlockCol, bsrColIdx, bIdx);
iBlockCol *= colsPerBlock;
int valIdx = (bIdx * rowsPerBlock + rowShift) * colsPerBlock;
for (int c = 0; c < colsPerBlock; c++)
{
float val = 0.f;
tex1Dfetch(&val, bsrValue, valIdx + c);
sum += x[iBlockCol + c] * val;
}
}
y[iRow] = alpha * sum + beta * y[iRow];
}
}
template<bool LowerInsteadOfFull, bool Trans>
__device__ __forceinline__ float CudaBsrMatrix_rightMultDiag_1(int blockBeginLeft, int blockColResult,
int rowPerBlock, int colsPerBlock, int elePerBlock, int rowShiftResult, int colShiftResult,
cudaTextureObject_t leftValue, cudaTextureObject_t rightValue)
{
return 0.f;
}
template<>
__device__ __forceinline__ float CudaBsrMatrix_rightMultDiag_1<false, false>(int blockBeginLeft, int blockColResult,
int rowPerBlock, int colsPerBlock, int elePerBlock, int rowShiftResult, int colShiftResult,
cudaTextureObject_t leftValue, cudaTextureObject_t rightValue)
{
float sum = 0.f;
blockBeginLeft = blockBeginLeft * elePerBlock + rowShiftResult * colsPerBlock;
blockColResult = blockColResult * elePerBlock + colShiftResult * 1;
for (int k = 0; k < colsPerBlock; k++)
{
float valLeft = 0.f, valRight = 0.f;
tex1Dfetch(&valLeft, leftValue, blockBeginLeft + k);
tex1Dfetch(&valRight, rightValue, blockColResult + k*colsPerBlock);
sum += valLeft * valRight;
}
return sum;
}
template<>
__device__ __forceinline__ float CudaBsrMatrix_rightMultDiag_1<true, false>(int blockBeginLeft, int blockColResult,
int rowPerBlock, int colsPerBlock, int elePerBlock, int rowShiftResult, int colShiftResult,
cudaTextureObject_t leftValue, cudaTextureObject_t rightValue)
{
float sum = 0.f;
blockBeginLeft = blockBeginLeft * elePerBlock + rowShiftResult * colsPerBlock;
blockColResult = blockColResult * elePerBlock + colShiftResult * 1;
for (int k = colShiftResult; k < colsPerBlock; k++)
{
float valLeft = 0.f, valRight = 0.f;
tex1Dfetch(&valLeft, leftValue, blockBeginLeft + k);
tex1Dfetch(&valRight, rightValue, blockColResult + k*colsPerBlock);
sum += valLeft * valRight;
}
return sum;
}
template<>
__device__ __forceinline__ float CudaBsrMatrix_rightMultDiag_1<false, true>(int blockBeginLeft, int blockColResult,
int rowPerBlock, int colsPerBlock, int elePerBlock, int rowShiftResult, int colShiftResult,
cudaTextureObject_t leftValue, cudaTextureObject_t rightValue)
{
float sum = 0.f;
blockBeginLeft = blockBeginLeft * elePerBlock + rowShiftResult * colsPerBlock;
blockColResult = blockColResult * elePerBlock + colShiftResult * colsPerBlock;
for (int k = 0; k < colsPerBlock; k++)
{
float valLeft = 0.f, valRight = 0.f;
tex1Dfetch(&valLeft, leftValue, blockBeginLeft + k);
tex1Dfetch(&valRight, rightValue, blockColResult + k);
sum += valLeft * valRight;
}
return sum;
}
template<>
__device__ __forceinline__ float CudaBsrMatrix_rightMultDiag_1<true, true>(int blockBeginLeft, int blockColResult,
int rowPerBlock, int colsPerBlock, int elePerBlock, int rowShiftResult, int colShiftResult,
cudaTextureObject_t leftValue, cudaTextureObject_t rightValue)
{
float sum = 0.f;
blockBeginLeft = blockBeginLeft * elePerBlock + rowShiftResult * colsPerBlock;
blockColResult = blockColResult * elePerBlock + colShiftResult * colsPerBlock;
for (int k = 0; k <= colShiftResult; k++)
{
float valLeft = 0.f, valRight = 0.f;
tex1Dfetch(&valLeft, leftValue, blockBeginLeft + k);
tex1Dfetch(&valRight, rightValue, blockColResult + k);
sum += valLeft * valRight;
}
return sum;
}
template<bool LowerInsteadOfFull, bool Trans>
__global__ void CudaBsrMatrix_rightMultDiag(
const int* bsrRowPtr, const int* bsrRowPtr_coo,
cudaTextureObject_t bsrColIdx, cudaTextureObject_t bsrValue,
cudaTextureObject_t x, float* y, float alpha, float beta,
int rowsPerBlock, int colsPerBlock, int nnz)
{
int posResult = threadIdx.x + blockIdx.x * blockDim.x;
if (posResult >= nnz)
return;
int elePerBlock = rowsPerBlock * colsPerBlock;
int posResultBlock = posResult / elePerBlock;
int shiftResult = posResult - posResultBlock * elePerBlock;
int rowShiftResult = shiftResult / colsPerBlock;
int colShiftResult = shiftResult - rowShiftResult * colsPerBlock;
int blockRowResult = bsrRowPtr_coo[posResultBlock];
int blockColResult = 0;
tex1Dfetch(&blockColResult, bsrColIdx, posResultBlock);
int blockBeginLeft = bsrRowPtr[blockRowResult];
int blockEndLeft = bsrRowPtr[blockRowResult + 1];
// binary search diag blocks: blockColResult
while (blockBeginLeft < blockEndLeft)
{
int imid = ((blockBeginLeft + blockEndLeft) >> 1);
int b = 0;
tex1Dfetch(&b, bsrColIdx, imid);
if (b < blockColResult)
blockBeginLeft = imid + 1;
else
blockEndLeft = imid;
}
int b = 0;
tex1Dfetch(&b, bsrColIdx, blockBeginLeft);
float sum = 0.f;
if (b == blockColResult && blockBeginLeft == blockEndLeft)
{
sum = CudaBsrMatrix_rightMultDiag_1<LowerInsteadOfFull, Trans>(
blockBeginLeft, blockColResult, rowsPerBlock, colsPerBlock, elePerBlock,
rowShiftResult, colShiftResult, bsrValue, x);
}
// write the result
y[posResult] = alpha * sum + beta * y[posResult];
}
__global__ void CudaBsrMatrix_Range_multBsrT_value(
const int* bsrRowPtrA, cudaTextureObject_t bsrColIdxA, cudaTextureObject_t valueA,
int rangeColBeginA, int rangeColEndA,
const int* bsrRowPtrB, cudaTextureObject_t bsrColIdxB, cudaTextureObject_t valueB,
int rangeColBeginB, int rangeColEndB,
const int* bsrRowPtrD, cudaTextureObject_t bsrColIdxD, cudaTextureObject_t valueD,
int rangeColBeginD, int rangeColEndD,
const int* bsrRowPtrC_coo, const int* bsrColIdxC, float* valueC,
int rowsPerBlockA, int colsPerBlockA, int rowsPerBlockB, int nnzC, float alpha, float beta
)
{
int innzC = threadIdx.x + blockIdx.x * blockDim.x;
if (innzC >= nnzC)
return;
const int elePerBlockC = rowsPerBlockA * rowsPerBlockB;
int innzBlockC = innzC / elePerBlockC;
int innzShiftC = innzC - innzBlockC * elePerBlockC;
int rowShiftC = innzShiftC / rowsPerBlockB;
int colShiftC = innzShiftC - rowShiftC * rowsPerBlockB;
int rowBlockC = bsrRowPtrC_coo[innzBlockC];
int colBlockC = bsrColIdxC[innzBlockC];
int blockBeginA = bsrRowPtrA[rowBlockC];
int blockEndA = bsrRowPtrA[rowBlockC + 1];
int blockBeginB = bsrRowPtrB[colBlockC];
int blockEndB = bsrRowPtrB[colBlockC + 1];
// A*B
float sum = 0.f;
for (int i0 = blockBeginA, i1 = blockBeginB; i0 < blockEndA && i1 < blockEndB;)
{
int colBlockA = 0, colBlockB = 0;
tex1Dfetch(&colBlockA, bsrColIdxA, i0);
tex1Dfetch(&colBlockB, bsrColIdxB, i1);
if (colBlockA >= rangeColEndA || colBlockB >= rangeColEndB)
break;
colBlockA -= rangeColBeginA;
colBlockB -= rangeColBeginB;
if (colBlockA == colBlockB && colBlockA >= 0)
{
int pos0 = (i0*colsPerBlockA + rowShiftC)*rowsPerBlockA;
int pos1 = (i1*rowsPerBlockB + colShiftC)*colsPerBlockA;
for (int k = 0; k < colsPerBlockA; k++)
{
float v1 = 0.f, v2 = 0.f;
tex1Dfetch(&v1, valueA, pos0 + k);
tex1Dfetch(&v2, valueB, pos1 + k);
sum += v1 * v2;
}
i0++;
i1++;
}
i0 += (colBlockA < colBlockB) || (colBlockA < 0);
i1 += (colBlockA > colBlockB) || (colBlockB < 0);
}// i
// D
float D_val = 0.f;
if (bsrRowPtrD)
{
int blockBeginD = bsrRowPtrD[rowBlockC];
int blockEndD = bsrRowPtrD[rowBlockC + 1];
int colBlockD = 0;
for (int c = blockBeginD; c < blockEndD && colBlockD < rangeColEndD; c++)
{
tex1Dfetch(&colBlockD, bsrColIdxD, c);
if (colBlockD - rangeColBeginD == colBlockC)
{
tex1Dfetch(&D_val, valueD, (c * rowsPerBlockA + rowShiftC) * rowsPerBlockB + colShiftC);
break;
}
}
}// end if bsrRowPtrD
valueC[innzC] = alpha * sum + beta * D_val;
}
__global__ void CudaBsrMatrix_Range_multBsrT_addDiag_value(
const int* bsrRowPtrA, cudaTextureObject_t bsrColIdxA, cudaTextureObject_t valueA,
int rangeColBeginA, int rangeColEndA,
const int* bsrRowPtrB, cudaTextureObject_t bsrColIdxB, cudaTextureObject_t valueB,
int rangeColBeginB, int rangeColEndB,
cudaTextureObject_t valueD,
const int* bsrRowPtrC_coo, const int* bsrColIdxC, float* valueC,
int rowsPerBlockA, int colsPerBlockA, int rowsPerBlockB, int nnzC, float alpha, float beta
)
{
int innzC = threadIdx.x + blockIdx.x * blockDim.x;
if (innzC >= nnzC)
return;
const int elePerBlockC = rowsPerBlockA * rowsPerBlockB;
int innzBlockC = innzC / elePerBlockC;
int innzShiftC = innzC - innzBlockC * elePerBlockC;
int rowShiftC = innzShiftC / rowsPerBlockB;
int colShiftC = innzShiftC - rowShiftC * rowsPerBlockB;
int rowBlockC = bsrRowPtrC_coo[innzBlockC];
int colBlockC = bsrColIdxC[innzBlockC];
int blockBeginA = bsrRowPtrA[rowBlockC];
int blockEndA = bsrRowPtrA[rowBlockC + 1];
int blockBeginB = bsrRowPtrB[colBlockC];
int blockEndB = bsrRowPtrB[colBlockC + 1];
// A*B
float sum = 0.f;
for (int i0 = blockBeginA, i1 = blockBeginB; i0 < blockEndA && i1 < blockEndB;)
{
int colBlockA = 0, colBlockB = 0;
tex1Dfetch(&colBlockA, bsrColIdxA, i0);
tex1Dfetch(&colBlockB, bsrColIdxB, i1);
if (colBlockA >= rangeColEndA || colBlockB >= rangeColEndB)
break;
colBlockA -= rangeColBeginA;
colBlockB -= rangeColBeginB;
if (colBlockA == colBlockB && colBlockA >= 0)
{
int pos0 = (i0*colsPerBlockA + rowShiftC)*rowsPerBlockA;
int pos1 = (i1*rowsPerBlockB + colShiftC)*colsPerBlockA;
for (int k = 0; k < colsPerBlockA; k++)
{
float v1 = 0.f, v2 = 0.f;
tex1Dfetch(&v1, valueA, pos0 + k);
tex1Dfetch(&v2, valueB, pos1 + k);
sum += v1 * v2;
}
i0++;
i1++;
}
i0 += (colBlockA < colBlockB) || (colBlockA < 0);
i1 += (colBlockA > colBlockB) || (colBlockB < 0);
}// i
// D
float D_val = 0.f;
if (valueD && rowBlockC == colBlockC)
tex1Dfetch(&D_val, valueD, (rowBlockC * rowsPerBlockA + rowShiftC) * rowsPerBlockB + colShiftC);
valueC[innzC] = alpha * sum + beta * D_val;
}
__global__ void CudaBsrMatrix_Range_AAt_blockDiags(
const int* bsrRowPtrA, cudaTextureObject_t bsrColIdxA, cudaTextureObject_t valueA,
int rangeColBeginA, int rangeColEndA,
float* diag, int rowsPerBlockA, int colsPerBlockA, int nnzDiag,
bool useLowerInsteadOfFull, float alpha, float beta
)
{
int innzDiag = threadIdx.x + blockIdx.x * blockDim.x;
if (innzDiag >= nnzDiag)
return;
int blockDiagSz = rowsPerBlockA*rowsPerBlockA;
int iBlockDiag = innzDiag / blockDiagSz;
int shift = innzDiag - iBlockDiag*blockDiagSz;
int rowShift = shift / colsPerBlockA;
int colShift = shift - rowShift * colsPerBlockA;
if (useLowerInsteadOfFull && rowShift < colShift)
return;
int row0 = bsrRowPtrA[iBlockDiag];
int row0_begin = (row0*rowsPerBlockA + rowShift) * colsPerBlockA;
const int row_blocks = bsrRowPtrA[iBlockDiag + 1] - row0;
int row1_begin = (row0*rowsPerBlockA + colShift) * colsPerBlockA;
int blockSzA = rowsPerBlockA * colsPerBlockA;
float sum = 0;
int colBlock = 0;
for (int iBlocks = 0; iBlocks < row_blocks && colBlock < rangeColEndA;
iBlocks++, row0_begin += blockSzA, row1_begin += blockSzA)
{
tex1Dfetch(&colBlock, bsrColIdxA, row0 + iBlocks);
if (colBlock < rangeColBeginA)
continue;
for (int i = 0; i < colsPerBlockA; i++)
{
float v1 = 0.f, v2 = 0.f;
tex1Dfetch(&v1, valueA, row0_begin + i);
tex1Dfetch(&v2, valueA, row1_begin + i);
sum += v1 * v2;
}
}
diag[innzDiag] = alpha*sum + beta*diag[innzDiag];
}
__global__ void CudaBsrMatrix_subRows_structure_rptr(const int* bsrRowPtrFull, int* bsrRowPtrSub,
int rowBegin, int num)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i <= num)// <= because size=num+1 in bsr row
{
bsrRowPtrSub[i] = bsrRowPtrFull[i + rowBegin] - bsrRowPtrFull[rowBegin];
}
}
__global__ void CudaBsrMatrix_subRows_structure_cidx(cudaTextureObject_t bsrRowPtrFull,
const int* bsrColIdxFull, int* bsrColIdxSub, int rowBegin, int nnzBlocks)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < nnzBlocks)
{
int colBegin = 0;
tex1Dfetch(&colBegin, bsrRowPtrFull, rowBegin);
bsrColIdxSub[i] = bsrColIdxFull[i + colBegin];
}
}
__global__ void CudaBsrMatrix_subRows_value(cudaTextureObject_t bsrRowPtrFull,
const float* valueFull, float* valueSub, int rowBegin, int nnz, int blockSize2)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < nnz)
{
int nnzBegin = 0;
tex1Dfetch(&nnzBegin, bsrRowPtrFull, rowBegin);
nnzBegin *= blockSize2;
valueSub[i] = valueFull[i + nnzBegin];
}
}
__global__ void CudaBsrMatrix_toCsr_structure_rptr(cudaTextureObject_t bsrRowPtr,
int* csrRowPtr, int bsrBlockRow, int bsrBlockCol, int nCsrRows)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int ib = i / bsrBlockRow;
if (i < nCsrRows)
{
int shift = i - ib*bsrBlockRow;
int bsr = 0, bsr1=0;
tex1Dfetch(&bsr, bsrRowPtr, ib);
tex1Dfetch(&bsr1, bsrRowPtr, ib+1);
csrRowPtr[i] = (bsr*bsrBlockRow + (bsr1-bsr)*shift) * bsrBlockCol;
}
if (i == nCsrRows)
{
int bsr = 0;
tex1Dfetch(&bsr, bsrRowPtr, ib);
csrRowPtr[i] = bsr*bsrBlockRow * bsrBlockCol;
}
}
__global__ void CudaBsrMatrix_toCsr_structure_cidx(
cudaTextureObject_t bsrRowPtr, cudaTextureObject_t bsrColIdx,
const int* csrRowPtr_coo, const int* csrRowPtr, int* csrColIdx,
int bsrBlockRow, int bsrBlockCol, int nCsrNNZ)
{
int innz = threadIdx.x + blockIdx.x * blockDim.x;
if (innz < nCsrNNZ)
{
int iRow = csrRowPtr_coo[innz];
int iBlockRow = iRow / bsrBlockRow;
int colShiftOfRow = innz - csrRowPtr[iRow];
int blockColShiftOfRow = colShiftOfRow / bsrBlockCol;
int iBlock = 0;
tex1Dfetch(&iBlock, bsrRowPtr, iBlockRow);
iBlock += blockColShiftOfRow;
int cshift = colShiftOfRow - blockColShiftOfRow * bsrBlockCol;
int bc = 0;
tex1Dfetch(&bc, bsrColIdx, iBlock);
csrColIdx[innz] = bc * bsrBlockCol + cshift;
}
}
__global__ void CudaBsrMatrix_toCsr_structure_val(
cudaTextureObject_t bsrRowPtr, cudaTextureObject_t bsrValue,
const int* csrRowPtr_coo, const int* csrRowPtr, float* csrValue,
int bsrBlockRow, int bsrBlockCol, int nCsrNNZ)
{
int innz = threadIdx.x + blockIdx.x * blockDim.x;
if (innz < nCsrNNZ)
{
int iRow = csrRowPtr_coo[innz];
int iBlockRow = iRow / bsrBlockRow;
int colShiftOfRow = innz - csrRowPtr[iRow];
int blockColShiftOfRow = colShiftOfRow / bsrBlockCol;
int iBlock = 0;
tex1Dfetch(&iBlock, bsrRowPtr, iBlockRow);
iBlock += blockColShiftOfRow;
int rshift = iRow - iBlockRow * bsrBlockRow;
int cshift = colShiftOfRow - blockColShiftOfRow * bsrBlockCol;
tex1Dfetch(&csrValue[innz], bsrValue, (iBlock*bsrBlockRow+rshift)*bsrBlockCol + cshift);
}
}
void CudaBsrMatrix::fill_increment_1_n(int* data, int n)
{
if (n == 0)
return;
CudaBsrMatrix_fill_increment_1_n << <divUp(n, CTA_SIZE), CTA_SIZE >> >(
data, n);
cudaSafeCall(cudaGetLastError(), "CudaBsrMatrix::fill_increment_1_n");
}
void CudaBsrMatrix::transpose_fill_values_by_blockId(const int* blockIds, const CudaBsrMatrix& t)
{
if (isSymbolic())
throw std::exception("CudaBsrMatrix::transpose_fill_values_by_blockId(): symbolic matrix cannot touch values");
CudaBsrMatrix_transpose_fill_value_by_bid << <divUp(nnz(), CTA_SIZE), CTA_SIZE >> >(
blockIds, t.value(), value(), rowsPerBlock() * colsPerBlock(), rowsPerBlock(),
colsPerBlock(), nnz());
cudaSafeCall(cudaGetLastError(), "CudaBsrMatrix::CudaBsrMatrix_transpose_fill_value_by_bid");
}
CudaBsrMatrix& CudaBsrMatrix::operator = (float constVal)
{
if (isSymbolic())
throw std::exception("CudaBsrMatrix::operator =: symbolic matrix cannot touch values");
if (nnz() == 0)
return *this;
if (constVal == 0.f)
{
cudaSafeCall(cudaMemset(m_values.ptr(), 0, nnz()*m_values.elem_size),
"CudaBsrMatrix::operator = 0");
}
else
{
CudaBsrMatrix_set << <divUp(nnz(), CTA_SIZE), CTA_SIZE >> >(nnz(), m_values.ptr(), constVal);
cudaSafeCall(cudaGetLastError(), "CudaBsrMatrix::operator = constVal");
}
return *this;
}
CudaBsrMatrix& CudaBsrMatrix::operator = (const CudaBsrMatrix& rhs)
{
m_cusparseHandle = rhs.m_cusparseHandle;
m_symbolic = rhs.m_symbolic;
resize(rhs.blocksInRow(), rhs.blocksInCol(), rhs.rowsPerBlock(), rhs.colsPerBlock());
resize_nnzBlocks(rhs.nnzBlocks());
cudaSafeCall(cudaMemcpy(bsrRowPtr(), rhs.bsrRowPtr(),
(1+rhs.blocksInRow())*sizeof(int), cudaMemcpyDeviceToDevice),
"CudaBsrMatrix::operator =, cpy bsrRowPtr");
cudaSafeCall(cudaMemcpy(bsrRowPtr_coo(), rhs.bsrRowPtr_coo(),
rhs.nnzBlocks()*sizeof(int), cudaMemcpyDeviceToDevice),
"CudaBsrMatrix::operator =, cpy bsrRowPtr_coo");
cudaSafeCall(cudaMemcpy(bsrColIdx(), rhs.bsrColIdx(),
rhs.nnzBlocks()*sizeof(int), cudaMemcpyDeviceToDevice),
"CudaBsrMatrix::operator =, cpy bsrColIdx");
if (!isSymbolic())
cudaSafeCall(cudaMemcpy(value(), rhs.value(),
rhs.nnz()*sizeof(float), cudaMemcpyDeviceToDevice),
"CudaBsrMatrix::operator =, cpy value");
return *this;
}
CudaBsrMatrix& CudaBsrMatrix::axpy(float alpha, float beta)
{
if (isSymbolic())
throw std::exception("CudaBsrMatrix::axpy(): symbolic matrix cannot touch values");
if (nnz() == 0)
return *this;
CudaBsrMatrix_scale_add << <divUp(nnz(), CTA_SIZE), CTA_SIZE >> >(
nnz(), value(), alpha, beta);
cudaSafeCall(cudaGetLastError(), "CudaBsrMatrix::axpy");
return *this;
}
CudaBsrMatrix& CudaBsrMatrix::axpy_diag(float alpha, float beta)
{
if (isSymbolic())
throw std::exception("CudaBsrMatrix::axpy_diag(): symbolic matrix cannot touch values");
if (rowsPerBlock() != colsPerBlock() || blocksInRow() != blocksInCol())
throw std::exception("CudaBsrMatrix::axpy_diag(): only square matrix supported");
if (nnz() == 0)
return *this;
CudaBsrMatrix_scale_add_diag << <divUp(rows(), CTA_SIZE), CTA_SIZE >> >(
rows(), value(), bsrRowPtr(), bsrColIdxTexture(), rowsPerBlock(), alpha, beta);
cudaSafeCall(cudaGetLastError(), "CudaBsrMatrix::axpy_diag");
return *this;
}
void CudaBsrMatrix::setValue(const float* val_d)
{
if (isSymbolic())
throw std::exception("CudaBsrMatrix::setValue(): symbolic matrix cannot touch values");
cudaSafeCall(cudaMemcpy(value(), val_d, nnz()*sizeof(float),
cudaMemcpyDeviceToDevice), "CudaBsrMatrix::setValue");
}
void CudaBsrMatrix::Mv(const float* x, float* y, float alpha, float beta)const
{
if (rows() == 0 || cols() == 0)
return;
if (isSymbolic())
throw std::exception("CudaBsrMatrix::Mv(): symbolic matrix cannot touch values");
switch (colsPerBlock())
{
case 0:
break;
case 1:
CudaBsrMatrix_Mv<1> << <divUp(rows(), CTA_SIZE), CTA_SIZE >> >(
bsrRowPtrTexture(), bsrColIdxTexture(), valueTexture(), x, y, alpha, beta,
rows(), rowsPerBlock());
break;
case 2:
CudaBsrMatrix_Mv<2> << <divUp(rows(), CTA_SIZE), CTA_SIZE >> >(
bsrRowPtrTexture(), bsrColIdxTexture(), valueTexture(), x, y, alpha, beta,
rows(), rowsPerBlock());
break;
case 3:
CudaBsrMatrix_Mv<3> << <divUp(rows(), CTA_SIZE), CTA_SIZE >> >(
bsrRowPtrTexture(), bsrColIdxTexture(), valueTexture(), x, y, alpha, beta,
rows(), rowsPerBlock());
break;
case 4:
CudaBsrMatrix_Mv<4> << <divUp(rows(), CTA_SIZE), CTA_SIZE >> >(
bsrRowPtrTexture(), bsrColIdxTexture(), valueTexture(), x, y, alpha, beta,
rows(), rowsPerBlock());
break;
case 5:
CudaBsrMatrix_Mv<5> << <divUp(rows(), CTA_SIZE), CTA_SIZE >> >(
bsrRowPtrTexture(), bsrColIdxTexture(), valueTexture(), x, y, alpha, beta,
rows(), rowsPerBlock());
break;
case 6:
CudaBsrMatrix_Mv<6> << <divUp(rows(), CTA_SIZE), CTA_SIZE >> >(
bsrRowPtrTexture(), bsrColIdxTexture(), valueTexture(), x, y, alpha, beta,
rows(), rowsPerBlock());
break;
default:
throw std::exception("non-supported block size!");
}
cudaSafeCall(cudaGetLastError(), "CudaBsrMatrix::Mv");
}
void CudaBsrMatrix::rightMultDiag_structure(const CudaDiagBlockMatrix& x, CudaBsrMatrix& y)const
{
if (cols() != x.rows())
throw std::exception("CudaBsrMatrix::rightMultDiag_structure: block size not matched");
if (x.blockSize() != colsPerBlock() || x.blockSize() != rowsPerBlock())
throw std::exception("CudaBsrMatrix::rightMultDiag_structure: matrix size not matched");
y = *this;
y = 0;
}
void CudaBsrMatrix::rightMultDiag_value(const CudaDiagBlockMatrix& x, CudaBsrMatrix& y,
bool useLowerInsteadOfFull_x, bool trans_x, float alpha, float beta)const
{
if (isSymbolic())
throw std::exception("CudaBsrMatrix::rightMultDiag_value(): symbolic matrix cannot touch values");
if (cols() != x.rows())
throw std::exception("CudaBsrMatrix::rightMultDiag_value: block size not matched");
if (x.blockSize() != colsPerBlock() || x.blockSize() != rowsPerBlock())
throw std::exception("CudaBsrMatrix::rightMultDiag_value: matrix size not matched");
if (cols() != y.cols() || rows() != y.rows())
throw std::exception("CudaBsrMatrix::rightMultDiag_value: y not matched, call rightMultDiag_structure()!");
if (nnz() == 0)
return;
if (useLowerInsteadOfFull_x == true && trans_x == true)
CudaBsrMatrix_rightMultDiag<true, true> << <divUp(nnz(), CTA_SIZE), CTA_SIZE >> >(
bsrRowPtr(), bsrRowPtr_coo(), bsrColIdxTexture(), valueTexture(),
x.getTexture(), y.value(), alpha, beta, rowsPerBlock(), colsPerBlock(), nnz());
if (useLowerInsteadOfFull_x == true && trans_x == false)
CudaBsrMatrix_rightMultDiag<true, false> << <divUp(nnz(), CTA_SIZE), CTA_SIZE >> >(
bsrRowPtr(), bsrRowPtr_coo(), bsrColIdxTexture(), valueTexture(),
x.getTexture(), y.value(), alpha, beta, rowsPerBlock(), colsPerBlock(), nnz());
if (useLowerInsteadOfFull_x == false && trans_x == false)
CudaBsrMatrix_rightMultDiag<false, false> << <divUp(nnz(), CTA_SIZE), CTA_SIZE >> >(
bsrRowPtr(), bsrRowPtr_coo(), bsrColIdxTexture(), valueTexture(),
x.getTexture(), y.value(), alpha, beta, rowsPerBlock(), colsPerBlock(), nnz());
if (useLowerInsteadOfFull_x == false && trans_x == true)
CudaBsrMatrix_rightMultDiag<false, true> << <divUp(nnz(), CTA_SIZE), CTA_SIZE >> >(
bsrRowPtr(), bsrRowPtr_coo(), bsrColIdxTexture(), valueTexture(),
x.getTexture(), y.value(), alpha, beta, rowsPerBlock(), colsPerBlock(), nnz());
cudaSafeCall(cudaGetLastError(), "CudaBsrMatrix::rightMultDiag_value");
}
void CudaBsrMatrix::setRowFromBlockedCsrRowPtr(const int* csrRowPtr)
{
if (blocksInRow() == 0)
return;
beginConstructRowPtr();
CudaBsrMatrix_setRowFromBlockedCsrRowPtr << <divUp(blocksInRow(), CTA_SIZE), CTA_SIZE >> >(
csrRowPtr, bsrRowPtr(), blocksInRow(), rowsPerBlock(), rowsPerBlock()*colsPerBlock());
cudaSafeCall(cudaGetLastError(), "CudaBsrMatrix::setRowFromBlockedCsrRowPtr");
endConstructRowPtr();
}
void CudaBsrMatrix::Range::multBsr_value(const Range& B, CudaBsrMatrix& C, float alpha,
const Range* D, float beta)const
{
throw std::exception("CudaBsrMatrix::Range::multBsr_value(): not implemented");
}
void CudaBsrMatrix::Range::multBsrT_value(const Range& B, CudaBsrMatrix& C, float alpha,
const Range* D, float beta)const
{
if (A == nullptr || B.A == nullptr)
throw std::exception("CudaBsrMatrix::Range::multBsrT_value(): null pointer exception");
if (D)
if (D->A == nullptr)
throw std::exception("CudaBsrMatrix::Range::multBsrT_value(): null pointer exception");
if (A->isSymbolic() || B.A->isSymbolic())
throw std::exception("CudaBsrMatrix::multBsrT_value(): symbolic matrix cannot touch values");
if (D)
{
if (D->A->isSymbolic())
throw std::exception("CudaBsrMatrix::Range::multBsrT_value(): symbolic matrix cannot touch values");
if (C.blocksInRow() != D->blocksInRow() || C.blocksInCol() != D->blocksInCol()
|| C.rowsPerBlock() != D->rowsPerBlock() || C.colsPerBlock() != D->colsPerBlock())
throw std::exception("CudaBsrMatrix::Range::multBsrT_value(): D size not matched");
}
if (blocksInCol() != B.blocksInCol())
throw std::exception("CudaBsrMatrix::Range::multBsrT_value(): matrix size not matched");
if (colsPerBlock() != B.colsPerBlock())
throw std::exception("CudaBsrMatrix::Range::multBsrT_value(): block size not matched");
if (blocksInRow() != C.blocksInRow() || B.blocksInRow() != C.blocksInCol()
|| rowsPerBlock() != C.rowsPerBlock() || B.rowsPerBlock() != C.colsPerBlock())
throw std::exception("CudaBsrMatrix::Range::multBsrT_value(): result size not matched");
if (C.nnzBlocks() == 0)
return;
const int* D_rptr = nullptr;
cudaTextureObject_t D_cidx = 0, D_val = 0;
int D_cbegin = 0, D_cend = 0;
if (D)
{
D_rptr = D->A->bsrRowPtr() +D->blockRowBegin;
D_cidx = D->A->bsrColIdxTexture();
D_val = D->A->valueTexture();
D_cbegin = D->blockColBegin;
D_cend = D->blockColEnd;
}
CudaBsrMatrix_Range_multBsrT_value << <divUp(C.nnz(), CTA_SIZE), CTA_SIZE >> >(
A->bsrRowPtr()+blockRowBegin, A->bsrColIdxTexture(), A->valueTexture(),
blockColBegin, blockColEnd,
B.A->bsrRowPtr()+B.blockRowBegin, B.A->bsrColIdxTexture(), B.A->valueTexture(),
B.blockColBegin, B.blockColEnd,
D_rptr, D_cidx, D_val, D_cbegin, D_cend,
C.bsrRowPtr_coo(), C.bsrColIdx(), C.value(),
rowsPerBlock(), colsPerBlock(), B.rowsPerBlock(), C.nnz(), alpha, beta
);
}
void CudaBsrMatrix::Range::multBsrT_addDiag_value(const Range& B, CudaBsrMatrix& C, float alpha,
const CudaDiagBlockMatrix* D, float beta)const
{
if (A == nullptr || B.A == nullptr)
throw std::exception("CudaBsrMatrix::Range::multBsrT_value()1: null pointer exception");
if (A->isSymbolic() || B.A->isSymbolic())
throw std::exception("CudaBsrMatrix::multBsrT_value()1: symbolic matrix cannot touch values");
if (blocksInCol() != B.blocksInCol())
throw std::exception("CudaBsrMatrix::Range::multBsrT_value()1: matrix size not matched");
if (colsPerBlock() != B.colsPerBlock())
throw std::exception("CudaBsrMatrix::Range::multBsrT_value()1: block size not matched");
if (blocksInRow() != C.blocksInRow() || B.blocksInRow() != C.blocksInCol()
|| rowsPerBlock() != C.rowsPerBlock() || B.rowsPerBlock() != C.colsPerBlock())
throw std::exception("CudaBsrMatrix::Range::multBsrT_value()1: result size not matched");
if (D)
{
if (C.blocksInRow() != D->numBlocks() || C.blocksInCol() != D->numBlocks())
throw std::exception("CudaBsrMatrix::Range::multBsrT_value()1: D size not matched");
if (C.rowsPerBlock() != D->blockSize() || C.colsPerBlock() != D->blockSize())
throw std::exception("CudaBsrMatrix::Range::multBsrT_value()1: D block not matched");
}
if (C.nnzBlocks() == 0)
return;
cudaTextureObject_t D_val = 0;
if (D)
D_val = D->getTexture();
CudaBsrMatrix_Range_multBsrT_addDiag_value << <divUp(C.nnz(), CTA_SIZE), CTA_SIZE >> >(
A->bsrRowPtr() + blockRowBegin, A->bsrColIdxTexture(), A->valueTexture(),
blockColBegin, blockColEnd,
B.A->bsrRowPtr() + B.blockRowBegin, B.A->bsrColIdxTexture(), B.A->valueTexture(),
B.blockColBegin, B.blockColEnd,
D_val,
C.bsrRowPtr_coo(), C.bsrColIdx(), C.value(),
rowsPerBlock(), colsPerBlock(), B.rowsPerBlock(), C.nnz(), alpha, beta
);
}
void CudaBsrMatrix::Range::AAt_blockDiags(CudaDiagBlockMatrix& C,
bool lowerInsteadOfFull, float alpha, float beta)const
{
if (A == nullptr)
throw std::exception("CudaBsrMatrix::Range::AAt_blockDiags(): null pointer exception");
if (A->isSymbolic())
throw std::exception("CudaBsrMatrix::AAt_blockDiags(): symbolic matrix cannot touch values");
if (blocksInRow() != C.numBlocks())
throw std::exception("CudaBsrMatrix::Range::AAt_blockDiags(): matrix size not matched");
if (rowsPerBlock() != C.blockSize())
throw std::exception("CudaBsrMatrix::Range::AAt_blockDiags(): block size not matched");
if (A->nnzBlocks() == 0)
return;
CudaBsrMatrix_Range_AAt_blockDiags << <divUp(C.nnz(), CTA_SIZE), CTA_SIZE >> >(
A->bsrRowPtr() + blockRowBegin, A->bsrColIdxTexture(), A->valueTexture(),
blockColBegin, blockColEnd,
C.value(), rowsPerBlock(), colsPerBlock(), C.nnz(),
lowerInsteadOfFull, alpha, beta
);
}
void CudaBsrMatrix::subRows_structure(CudaBsrMatrix& S, int blockRowBegin, int blockRowEnd)const
{
blockRowBegin = std::max(0, blockRowBegin);
blockRowEnd = std::min(blocksInRow(), blockRowEnd);
if (blockRowBegin >= blockRowEnd)
{
S.resize(0, 0, rowsPerBlock(), colsPerBlock());
return;
}
// rows
S.resize(blockRowEnd - blockRowBegin, blocksInCol(), rowsPerBlock(), colsPerBlock());
S.beginConstructRowPtr();
CudaBsrMatrix_subRows_structure_rptr << <divUp(S.blocksInRow(), CTA_SIZE), CTA_SIZE >> >(
bsrRowPtr(), S.bsrRowPtr(), blockRowBegin, S.blocksInRow());
cudaSafeCall(cudaGetLastError(), "CudaBsrMatrix_subRows_structure_rptr");
S.endConstructRowPtr();
// cols
CudaBsrMatrix_subRows_structure_cidx << <divUp(S.nnzBlocks(), CTA_SIZE), CTA_SIZE >> >(
bsrRowPtrTexture(), bsrColIdx(), S.bsrColIdx(), blockRowBegin, S.nnzBlocks());
cudaSafeCall(cudaGetLastError(), "CudaBsrMatrix_subRows_structure_cidx");
}
void CudaBsrMatrix::subRows_value(CudaBsrMatrix& S, int blockRowBegin, int blockRowEnd)const
{
if (isSymbolic())
throw std::exception("CudaBsrMatrix::AAt_blockDiags(): symbolic matrix cannot touch values");
blockRowBegin = std::max(0, blockRowBegin);
blockRowEnd = std::min(blocksInRow(), blockRowEnd);
if (S.blocksInRow() != blockRowEnd - blockRowBegin ||
S.blocksInCol() != blocksInCol() ||
S.rowsPerBlock() != rowsPerBlock() ||
S.colsPerBlock() != colsPerBlock())
throw std::exception("CudaBsrMatrix::subRows_value: size not matched");
CudaBsrMatrix_subRows_value << <divUp(S.nnz(), CTA_SIZE), CTA_SIZE >> >(
bsrRowPtrTexture(), value(), S.value(), blockRowBegin, S.nnz(),
S.rowsPerBlock() * S.colsPerBlock());
cudaSafeCall(cudaGetLastError(), "CudaBsrMatrix_subRows_value");
}
void CudaBsrMatrix::toCsr_structure(CudaBsrMatrix& B)const
{
B.m_symbolic = isSymbolic();
B.resize(rows(), cols(), 1, 1);
B.resize_nnzBlocks(0);
if (rows() == 0 || nnz() == 0)
return;
// 1. rptr
B.beginConstructRowPtr();
CudaBsrMatrix_toCsr_structure_rptr << <divUp(rows()+1, CTA_SIZE), CTA_SIZE >> >(
bsrRowPtrTexture(), B.bsrRowPtr(), rowsPerBlock(), colsPerBlock(), rows());
cudaSafeCall(cudaGetLastError(), "CudaBsrMatrix_toCsr_structure_rptr");
B.endConstructRowPtr(nnz());
// 2. cidx
CudaBsrMatrix_toCsr_structure_cidx << <divUp(nnz(), CTA_SIZE), CTA_SIZE >> >(
bsrRowPtrTexture(), bsrColIdxTexture(),
B.bsrRowPtr_coo(), B.bsrRowPtr(), B.bsrColIdx(),
rowsPerBlock(), colsPerBlock(), nnz());
cudaSafeCall(cudaGetLastError(), "CudaBsrMatrix_toCsr_structure_cidx");
}
void CudaBsrMatrix::toCsr_value(CudaBsrMatrix& B)const
{
if (isSymbolic() || B.isSymbolic())
throw std::exception("CudaBsrMatrix::toCsr_value(): symbolic cannot touch values");
if (B.rows() != rows() || B.cols() != cols() || B.rowsPerBlock() != 1 || B.colsPerBlock() != 1)
throw std::exception("CudaBsrMatrix::toCsr_value(): size of B not matched");
if (rows() == 0 || nnz() == 0)
return;
CudaBsrMatrix_toCsr_structure_val << <divUp(nnz(), CTA_SIZE), CTA_SIZE >> >(
bsrRowPtrTexture(), valueTexture(),
B.bsrRowPtr_coo(), B.bsrRowPtr(), B.value(),
rowsPerBlock(), colsPerBlock(), nnz());
cudaSafeCall(cudaGetLastError(), "CudaBsrMatrix_toCsr_structure_cidx");
} |
c6c849ff706bc72905bff35cf61b688f3b1b94c9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "CudaRayTracing.cuh"
#include <math_constants.h>
__device__ cuda3DVector CudaGetRayColor(cudaRay& aRay, cudaHitRecord& aHitRecord, cudaSphere* aSomeSpheres, int aNumSpheres, hiprandState_t* aRandState, int& aDepth)
{
cudaRay& currentRay = aRay;
cuda3DVector currentAttenuation{ 1.0f, 1.0f, 1.0f };
for (int i = 0; i < aDepth; ++i)
{
float closestSoFar = 99999.0f;
bool hitAnything = false;
for (int i = 0; i < aNumSpheres; ++i)
{
if (CudaHitSphere(aSomeSpheres[i], 0.0001f, closestSoFar, currentRay, aRandState, aHitRecord))
{
hitAnything = true;
closestSoFar = aHitRecord.temp;
}
}
if (hitAnything)
{
//Diffuse without material scattering
//cuda3DVector target = hitRecord.point + hitRecord.hitNormal + CudaNormalize(CudaGetRandVectorInUnitSphere(aRandState));
//aRay = cudaRay{ hitRecord.point, target - hitRecord.point };
//cuda3DVector color = CudaGetRayColor(aRay, hitRecord, aSpheres, aNumSpheres, aRandState, aDepth - 1);
//return (color * 0.5f);
//Material Scattering
cudaRay scattered;
cuda3DVector attenuation;
bool hasScattered;
switch (aHitRecord.material.type)
{
case LAMBERTIAN:
hasScattered = LambertianScatter(aRay, aHitRecord, aRandState, attenuation, scattered);
break;
case METALLIC:
hasScattered = MetalScatter(aRay, aHitRecord, aRandState, attenuation, scattered);
break;
case DIELECTRIC:
hasScattered = DielectricScatter(aRay, aHitRecord, aRandState, attenuation, scattered);
break;
}
if (hasScattered)
{
currentAttenuation = currentAttenuation * attenuation;
currentRay = scattered;
}
else
{
return cuda3DVector{ 0.0f, 0.0f, 0.0f };;
}
}
else
{
cuda3DVector normalizedDirection = CudaNormalize(aRay.direction);
float t = 0.5f * (normalizedDirection.y + 1.0f);
//linearly blend between blue and white
cuda3DVector blueOperand{ t * 0.5f, t * 0.7f, t * 1.0f };
cuda3DVector whiteOperand{ (1.0f - t) * 1.0f, (1.0f - t) * 1.0f, (1.0f - t) * 1.0f };
cuda3DVector background = blueOperand + whiteOperand;
return currentAttenuation * background;
}
}
return cuda3DVector{ 0.0f, 0.0f, 0.0f };
}
__global__ void CudaInitRandState(int aWidth, int aHeight, hiprandState_t* aRandStates)
{
const int i = (blockDim.x * blockIdx.x) + threadIdx.x;
const int j = (blockDim.y * blockIdx.y) + threadIdx.y;
if (i >= aWidth || j >= aHeight)
return;
const int index = (aWidth * j) + i;
hiprand_init(1994 + index, 0, 0, &aRandStates[index]);
}
__global__ void CudaGetColor(cudaCamera* aCamera, cudaSphere* aSomeSpheres, hiprandState_t* aRandState, cuda3DVector* aSomeInvValues, int aNumSpheres, int aWidth, int aHeight, int aResScale, int aNumSamplesPerPixel, cuda3DVector* anOutColor)
{
int i = (blockDim.y * blockIdx.y) + threadIdx.y;
int j = (blockDim.x * blockIdx.x) + threadIdx.x;
if (j > aSomeInvValues[1].y || i > aSomeInvValues[1].x) //if (j > (aHeight / aResScale) || i > (aWidth / aResScale))
return;
int pixelIndex;
hiprandState_t localRandState;
cuda3DVector color;
for (int k = i * aResScale; k < (i * aResScale) + aResScale; ++k)
{
for (int l = j * aResScale; l < (j * aResScale) + aResScale; ++l)
{
if (l > aHeight || k > aWidth)
return;
pixelIndex = (aWidth * l) + k;
localRandState = aRandState[(int)(pixelIndex * aSomeInvValues[0].z)]; /// (aResScale * aResScale)];
color = cuda3DVector{ 0.0f, 0.0f, 0.0f };
int maxDepth;
for (int sample = 0; sample < aNumSamplesPerPixel; ++sample)
{
float u = float(k + hiprand_uniform(&localRandState)) * aSomeInvValues[0].x;// / (aWidth - 1);
float v = float(l + hiprand_uniform(&localRandState)) * aSomeInvValues[0].y;// / (aHeight - 1);
maxDepth = 10;
cuda3DVector randomInDisk = CudaGetRandVectorInUnitDisk(&localRandState) * aCamera->lensRadius;
cuda3DVector offset = aCamera->u * randomInDisk.x + aCamera->v * randomInDisk.y;
cuda3DVector rayDirection = aCamera->lowerLeftCorner + (aCamera->horizontal * u) + (aCamera->vertical * v) - aCamera->origin - offset;
cudaRay ray = { aCamera->origin + offset, rayDirection };
cudaHitRecord sharedRecord;
cuda3DVector sampleColor = CudaGetRayColor(ray, sharedRecord, aSomeSpheres, aNumSpheres, &localRandState, maxDepth);
color = color + sampleColor;
}
anOutColor[pixelIndex] = color;
}
}
}
__global__ void CudaGetColorRecursive(cudaCamera* aCamera, cudaSphere* aSomeSpheres, hiprandState_t* aRandState, int aNumSpheres, int aWidth, int aHeight, int aNumSamplesPerPixel, cuda3DVector* anOutColor)
{
int i = (blockDim.y * blockIdx.y) + threadIdx.y;
int j = (blockDim.x * blockIdx.x) + threadIdx.x;
if (j > aHeight || i > aWidth )
return;
const int index = (aWidth * j) + i;
hiprandState_t localRandState = aRandState[index];
cuda3DVector color{ 0.0f, 0.0f, 0.0f };
for (int sample = 0; sample < aNumSamplesPerPixel; ++sample)
{
float u = float(i + hiprand_uniform(&localRandState)) / (aWidth - 1);
float v = float(j + hiprand_uniform(&localRandState)) / (aHeight - 1);
int maxDepth = 10;
cuda3DVector randomInDisk = CudaGetRandVectorInUnitDisk(&localRandState) * aCamera->lensRadius;
cuda3DVector offset = aCamera->u * randomInDisk.x + aCamera->v * randomInDisk.y;
cuda3DVector rayDirection = aCamera->lowerLeftCorner + (aCamera->horizontal * u) + (aCamera->vertical * v) - aCamera->origin - offset;
cudaRay ray = { aCamera->origin + offset, rayDirection };
cudaHitRecord sharedRecord;
cuda3DVector sampleColor = CudaGetRayColor(ray, sharedRecord, aSomeSpheres, aNumSpheres, &localRandState, maxDepth);
color = color + sampleColor;
}
anOutColor[index] = color;
}
__global__ void CudaGetIdColor(cuda3DVector* anOutColor, int aNumElem)
{
int index = ((blockDim.x * gridDim.x) * (blockDim.x * blockIdx.x + threadIdx.x)) + ((blockDim.y * blockIdx.y) + threadIdx.y);
int indexY = ((blockDim.y * gridDim.y) * (blockDim.y * blockIdx.y + threadIdx.y)) + ((blockDim.x * blockIdx.x) + threadIdx.x);
anOutColor[index].x = (float)index;
anOutColor[index].y = (float)indexY;
anOutColor[index].z = index + indexY / 2;
} | c6c849ff706bc72905bff35cf61b688f3b1b94c9.cu |
#include "device_launch_parameters.h"
#include "CudaRayTracing.cuh"
#include <math_constants.h>
__device__ cuda3DVector CudaGetRayColor(cudaRay& aRay, cudaHitRecord& aHitRecord, cudaSphere* aSomeSpheres, int aNumSpheres, curandState* aRandState, int& aDepth)
{
cudaRay& currentRay = aRay;
cuda3DVector currentAttenuation{ 1.0f, 1.0f, 1.0f };
for (int i = 0; i < aDepth; ++i)
{
float closestSoFar = 99999.0f;
bool hitAnything = false;
for (int i = 0; i < aNumSpheres; ++i)
{
if (CudaHitSphere(aSomeSpheres[i], 0.0001f, closestSoFar, currentRay, aRandState, aHitRecord))
{
hitAnything = true;
closestSoFar = aHitRecord.temp;
}
}
if (hitAnything)
{
//Diffuse without material scattering
//cuda3DVector target = hitRecord.point + hitRecord.hitNormal + CudaNormalize(CudaGetRandVectorInUnitSphere(aRandState));
//aRay = cudaRay{ hitRecord.point, target - hitRecord.point };
//cuda3DVector color = CudaGetRayColor(aRay, hitRecord, aSpheres, aNumSpheres, aRandState, aDepth - 1);
//return (color * 0.5f);
//Material Scattering
cudaRay scattered;
cuda3DVector attenuation;
bool hasScattered;
switch (aHitRecord.material.type)
{
case LAMBERTIAN:
hasScattered = LambertianScatter(aRay, aHitRecord, aRandState, attenuation, scattered);
break;
case METALLIC:
hasScattered = MetalScatter(aRay, aHitRecord, aRandState, attenuation, scattered);
break;
case DIELECTRIC:
hasScattered = DielectricScatter(aRay, aHitRecord, aRandState, attenuation, scattered);
break;
}
if (hasScattered)
{
currentAttenuation = currentAttenuation * attenuation;
currentRay = scattered;
}
else
{
return cuda3DVector{ 0.0f, 0.0f, 0.0f };;
}
}
else
{
cuda3DVector normalizedDirection = CudaNormalize(aRay.direction);
float t = 0.5f * (normalizedDirection.y + 1.0f);
//linearly blend between blue and white
cuda3DVector blueOperand{ t * 0.5f, t * 0.7f, t * 1.0f };
cuda3DVector whiteOperand{ (1.0f - t) * 1.0f, (1.0f - t) * 1.0f, (1.0f - t) * 1.0f };
cuda3DVector background = blueOperand + whiteOperand;
return currentAttenuation * background;
}
}
return cuda3DVector{ 0.0f, 0.0f, 0.0f };
}
__global__ void CudaInitRandState(int aWidth, int aHeight, curandState* aRandStates)
{
const int i = (blockDim.x * blockIdx.x) + threadIdx.x;
const int j = (blockDim.y * blockIdx.y) + threadIdx.y;
if (i >= aWidth || j >= aHeight)
return;
const int index = (aWidth * j) + i;
curand_init(1994 + index, 0, 0, &aRandStates[index]);
}
__global__ void CudaGetColor(cudaCamera* aCamera, cudaSphere* aSomeSpheres, curandState* aRandState, cuda3DVector* aSomeInvValues, int aNumSpheres, int aWidth, int aHeight, int aResScale, int aNumSamplesPerPixel, cuda3DVector* anOutColor)
{
int i = (blockDim.y * blockIdx.y) + threadIdx.y;
int j = (blockDim.x * blockIdx.x) + threadIdx.x;
if (j > aSomeInvValues[1].y || i > aSomeInvValues[1].x) //if (j > (aHeight / aResScale) || i > (aWidth / aResScale))
return;
int pixelIndex;
curandState localRandState;
cuda3DVector color;
for (int k = i * aResScale; k < (i * aResScale) + aResScale; ++k)
{
for (int l = j * aResScale; l < (j * aResScale) + aResScale; ++l)
{
if (l > aHeight || k > aWidth)
return;
pixelIndex = (aWidth * l) + k;
localRandState = aRandState[(int)(pixelIndex * aSomeInvValues[0].z)]; /// (aResScale * aResScale)];
color = cuda3DVector{ 0.0f, 0.0f, 0.0f };
int maxDepth;
for (int sample = 0; sample < aNumSamplesPerPixel; ++sample)
{
float u = float(k + curand_uniform(&localRandState)) * aSomeInvValues[0].x;// / (aWidth - 1);
float v = float(l + curand_uniform(&localRandState)) * aSomeInvValues[0].y;// / (aHeight - 1);
maxDepth = 10;
cuda3DVector randomInDisk = CudaGetRandVectorInUnitDisk(&localRandState) * aCamera->lensRadius;
cuda3DVector offset = aCamera->u * randomInDisk.x + aCamera->v * randomInDisk.y;
cuda3DVector rayDirection = aCamera->lowerLeftCorner + (aCamera->horizontal * u) + (aCamera->vertical * v) - aCamera->origin - offset;
cudaRay ray = { aCamera->origin + offset, rayDirection };
cudaHitRecord sharedRecord;
cuda3DVector sampleColor = CudaGetRayColor(ray, sharedRecord, aSomeSpheres, aNumSpheres, &localRandState, maxDepth);
color = color + sampleColor;
}
anOutColor[pixelIndex] = color;
}
}
}
__global__ void CudaGetColorRecursive(cudaCamera* aCamera, cudaSphere* aSomeSpheres, curandState* aRandState, int aNumSpheres, int aWidth, int aHeight, int aNumSamplesPerPixel, cuda3DVector* anOutColor)
{
int i = (blockDim.y * blockIdx.y) + threadIdx.y;
int j = (blockDim.x * blockIdx.x) + threadIdx.x;
if (j > aHeight || i > aWidth )
return;
const int index = (aWidth * j) + i;
curandState localRandState = aRandState[index];
cuda3DVector color{ 0.0f, 0.0f, 0.0f };
for (int sample = 0; sample < aNumSamplesPerPixel; ++sample)
{
float u = float(i + curand_uniform(&localRandState)) / (aWidth - 1);
float v = float(j + curand_uniform(&localRandState)) / (aHeight - 1);
int maxDepth = 10;
cuda3DVector randomInDisk = CudaGetRandVectorInUnitDisk(&localRandState) * aCamera->lensRadius;
cuda3DVector offset = aCamera->u * randomInDisk.x + aCamera->v * randomInDisk.y;
cuda3DVector rayDirection = aCamera->lowerLeftCorner + (aCamera->horizontal * u) + (aCamera->vertical * v) - aCamera->origin - offset;
cudaRay ray = { aCamera->origin + offset, rayDirection };
cudaHitRecord sharedRecord;
cuda3DVector sampleColor = CudaGetRayColor(ray, sharedRecord, aSomeSpheres, aNumSpheres, &localRandState, maxDepth);
color = color + sampleColor;
}
anOutColor[index] = color;
}
__global__ void CudaGetIdColor(cuda3DVector* anOutColor, int aNumElem)
{
int index = ((blockDim.x * gridDim.x) * (blockDim.x * blockIdx.x + threadIdx.x)) + ((blockDim.y * blockIdx.y) + threadIdx.y);
int indexY = ((blockDim.y * gridDim.y) * (blockDim.y * blockIdx.y + threadIdx.y)) + ((blockDim.x * blockIdx.x) + threadIdx.x);
anOutColor[index].x = (float)index;
anOutColor[index].y = (float)indexY;
anOutColor[index].z = index + indexY / 2;
} |
1c09a94e67264e75281858f0700615ef3cb4fddf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include <string>
#include "cp_util.cuh"
#include "data_transfer_utils.cuh"
#include "hipcub/hipcub.hpp"
#include "tokenizer_utils_hip.cuh"
#include "tokenizers.cuh"
#define SORT_BIT 22
#define THREADS_PER_BLOCK 64
/*
Returns true if the byte passed in could be a valid head byte for
a utf8 character.
*/
__device__ __forceinline__ bool is_head_byte(unsigned char utf8_byte){
return (utf8_byte >> 6) != 2;
}
/*
If the byte at start_byte_for_thread is a head byte, the unicode code-point encoded by
the utf8 character started at that byte is returned and the head_byte boolean passed in
is set to true.
If the byte at start_byte_for_thread is not a head byte, 0 is returned AND the head_byte
boolean passed in is set to false.
All threads start reading bytes from the pointer denoted by sentences.
Params
--------
sentences: A pointer to the start of the sequence of characters to be tokenized.
*/
__device__ __forceinline__ uint32_t extract_code_points_from_utf8(const unsigned char* sentences,
const uint32_t start_byte_for_thread,
bool& head_byte) {
constexpr uint8_t max_utf8_blocks_for_char = 4;
uint8_t utf8_blocks[max_utf8_blocks_for_char];
#pragma unroll
for(int i = 0; i < max_utf8_blocks_for_char; ++i) {
utf8_blocks[i] = sentences[start_byte_for_thread + i];
}
// We can have at most 5 bits encoding the length. We check those bits to infer the actual length
const uint8_t length_encoding_bits = utf8_blocks[0] >> 3;
head_byte = is_head_byte(utf8_blocks[0]);
// Set the number of characters and the top masks based on the
// length encoding bits.
uint8_t char_encoding_length = 0, top_mask = 0;
if (length_encoding_bits < 16){
char_encoding_length = 1;
top_mask = 0x7F;
} else if (length_encoding_bits >= 24 && length_encoding_bits <= 27) {
char_encoding_length = 2;
top_mask = 0x1F;
} else if (length_encoding_bits == 28 || length_encoding_bits == 29) {
char_encoding_length = 3;
top_mask = 0x0F;
} else if (length_encoding_bits == 30) {
char_encoding_length = 4;
top_mask = 0x07;
}
// Now pack up the bits into a uint32_t. All threads will process 4 bytes
// to reduce divergence.
uint32_t code_point = (utf8_blocks[0] & top_mask) << 18;
#pragma unroll
for(int i = 1; i < 4; ++i) {
code_point |= ((utf8_blocks[i] & 0x3F) << (18 - 6*i));
}
// Zero out the bottom of code points with extra reads
const uint8_t shift_amt = 24 - 6*char_encoding_length;
code_point >>= shift_amt;
return code_point;
}
__global__ void gpuBasicTokenizer(const unsigned char* sentences, uint32_t* device_sentence_offsets,
const size_t total_bytes, uint32_t* cp_metadata, uint64_t* aux_table,
uint32_t* code_points, uint32_t* chars_per_thread, bool do_lower_case,
uint32_t num_sentences) {
constexpr uint32_t init_val = (1 << SORT_BIT);
uint32_t replacement_code_points[MAX_NEW_CHARS] = {init_val, init_val, init_val};
bool head_byte = false;
const uint32_t char_for_thread = blockDim.x * blockIdx.x + threadIdx.x;
uint32_t num_new_chars = 0;
if(char_for_thread < total_bytes){
const uint32_t code_point = extract_code_points_from_utf8(sentences, char_for_thread, head_byte);
const uint32_t thr_cp_metadata = get_cp_metadata(cp_metadata, code_point);
if(!should_remove_cp(thr_cp_metadata, do_lower_case) && head_byte) {
num_new_chars = 1;
// Apply lower cases and accent stripping if necessary
const bool replacement_needed = do_lower_case || always_replace(thr_cp_metadata);
uint32_t new_cp = replacement_needed? get_first_cp(thr_cp_metadata): code_point;
new_cp = new_cp == 0? code_point: new_cp;
replacement_code_points[0] = new_cp;
if(is_multi_char_transform(thr_cp_metadata) && do_lower_case) {
uint64_t next_cps = get_extra_cps(aux_table, code_point);
replacement_code_points[1] = static_cast<uint32_t>(next_cps >> 32);
const uint32_t potential_next_cp = static_cast<uint32_t>(next_cps);
replacement_code_points[2] = potential_next_cp != 0? potential_next_cp: replacement_code_points[2];
num_new_chars = 2 + (potential_next_cp != 0);
}
if(should_add_spaces(thr_cp_metadata, do_lower_case)){
// Need to shift all existing code-points up one
for(int loc = num_new_chars; loc > 0; --loc) {
replacement_code_points[loc] = replacement_code_points[loc - 1];
}
// Write the required spaces at the end
replacement_code_points[0] = SPACE_CODE_POINT;
replacement_code_points[num_new_chars + 1] = SPACE_CODE_POINT;
num_new_chars += 2;
}
}
}
chars_per_thread[char_for_thread] = num_new_chars;
typedef cub::BlockStore<uint32_t, THREADS_PER_BLOCK, MAX_NEW_CHARS, cub::BLOCK_STORE_WARP_TRANSPOSE> BlockStore;
__shared__ typename BlockStore::TempStorage temp_storage;
// Now we perform coalesced writes back to global memory using cub.
uint32_t* block_base = code_points + blockIdx.x * blockDim.x * MAX_NEW_CHARS;
BlockStore(temp_storage).Store(block_base, replacement_code_points);
}
void flatten_sentences(const std::vector<std::string>& sentences,
char* flattened_sentences,
uint32_t* sentence_offsets) {
uint32_t start_copy = 0;
for(uint32_t i = 0; i < sentences.size(); ++i){
const uint32_t sentence_length = sentences[i].size();
sentences[i].copy(flattened_sentences + start_copy, sentence_length);
sentence_offsets[i] = start_copy;
start_copy += sentence_length;
}
sentence_offsets[sentences.size()] = start_copy;
}
// -------------------------------------- Basic tokenizer definitions ------------------------------------------------------------
// See tokenizers.cuh
GpuBasicTokenizer::GpuBasicTokenizer(uint32_t max_num_sentences, uint32_t max_num_chars, std::vector<uint32_t> const& cp_metadata, std::vector<uint64_t> const& aux_table, bool do_lower_case):
do_lower_case(do_lower_case),
device_sentence_offsets(max_num_sentences + 1),
device_sentences(max_num_chars),
device_cp_metadata{cp_metadata},
device_aux_table{aux_table} {
size_t max_BLOCKS = (max_num_chars + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
size_t max_threads_on_device = max_BLOCKS * THREADS_PER_BLOCK;
const size_t max_new_char_total = MAX_NEW_CHARS * max_threads_on_device;
device_code_points.resize(max_new_char_total);
device_chars_per_thread.resize(max_threads_on_device);
// Determine temporary device storage requirements for cub
size_t temp_storage_scan_bytes = 0;
uint32_t* device_chars_per_thread = nullptr;
hipcub::DeviceScan::InclusiveSum(nullptr, temp_storage_scan_bytes, device_chars_per_thread, device_chars_per_thread, max_threads_on_device);
size_t temp_storage_select_bytes = 0;
static NotEqual select_op((1 << SORT_BIT));
hipcub::DeviceSelect::If(nullptr, temp_storage_select_bytes, thrust::raw_pointer_cast(device_code_points.data()), thrust::raw_pointer_cast(device_code_points.data()),
thrust::raw_pointer_cast(device_num_selected.data()), max_new_char_total, select_op);
max_cub_storage_bytes = ::max(temp_storage_scan_bytes, temp_storage_select_bytes);
cub_temp_storage.resize(max_cub_storage_bytes);
device_num_selected.resize(1);
}
std::pair<ptr_length_pair<uint32_t*>, ptr_length_pair<uint32_t*>> GpuBasicTokenizer::tokenize(const std::vector<std::string>& sentences) {
ptr_length_pair<uint32_t*> cp_and_length;
ptr_length_pair<uint32_t*> offset_and_length;
size_t total_sentence_bytes = 0;
for(const auto& sentence: sentences) {
total_sentence_bytes += sentence.length();
}
size_t num_offsets = sentences.size() + 1;
std::vector<uint32_t> sentence_offsets(num_offsets);
std::vector<char> flattened_sentences(total_sentence_bytes);
flatten_sentences(sentences, flattened_sentences.data(), sentence_offsets.data());
device_sentence_offsets = sentence_offsets;
device_sentences = flattened_sentences;
static NotEqual select_op((1 << SORT_BIT));
size_t BLOCKS = (total_sentence_bytes + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const size_t max_new_char_total = MAX_NEW_CHARS * BLOCKS * THREADS_PER_BLOCK;
size_t threads_on_device = BLOCKS * THREADS_PER_BLOCK;
hipLaunchKernelGGL(( gpuBasicTokenizer), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, thrust::raw_pointer_cast(device_sentences.data()), thrust::raw_pointer_cast(device_sentence_offsets.data()), total_sentence_bytes, thrust::raw_pointer_cast(device_cp_metadata.data()), thrust::raw_pointer_cast(device_aux_table.data()),
thrust::raw_pointer_cast(device_code_points.data()), thrust::raw_pointer_cast(device_chars_per_thread.data()), do_lower_case, sentences.size());
assertCudaSuccess(hipPeekAtLastError());
hipcub::DeviceSelect::If(thrust::raw_pointer_cast(cub_temp_storage.data()), max_cub_storage_bytes, thrust::raw_pointer_cast(device_code_points.data()), thrust::raw_pointer_cast(device_code_points.data()), thrust::raw_pointer_cast(device_num_selected.data()), max_new_char_total, select_op);
assertCudaSuccess(hipPeekAtLastError());
// We also need to prefix sum the number of characters up to an including the current character in order to get the new sentence lengths.
hipcub::DeviceScan::InclusiveSum(thrust::raw_pointer_cast(cub_temp_storage.data()), max_cub_storage_bytes, thrust::raw_pointer_cast(device_chars_per_thread.data()), thrust::raw_pointer_cast(device_chars_per_thread.data()), threads_on_device);
assertCudaSuccess(hipPeekAtLastError());
constexpr uint16_t SENTENCE_UPDATE_THREADS = 64;
size_t SEN_KERNEL_BLOCKS = (sentences.size() + SENTENCE_UPDATE_THREADS - 1) / SENTENCE_UPDATE_THREADS;
hipLaunchKernelGGL(( update_sentence_lengths), dim3(SEN_KERNEL_BLOCKS), dim3(SENTENCE_UPDATE_THREADS), 0, 0, thrust::raw_pointer_cast(device_sentence_offsets.data()), thrust::raw_pointer_cast(device_chars_per_thread.data()), sentences.size());
assertCudaSuccess(hipPeekAtLastError());
offset_and_length.gpu_ptr = thrust::raw_pointer_cast(device_sentence_offsets.data());
offset_and_length.length = sentences.size() + 1;
uint32_t num_chars = 0;
assertCudaSuccess(hipMemcpy(&num_chars, offset_and_length.gpu_ptr + sentences.size(), sizeof(num_chars), hipMemcpyDeviceToHost));
cp_and_length.gpu_ptr = thrust::raw_pointer_cast(device_code_points.data());
cp_and_length.length = num_chars;
return std::make_pair(cp_and_length, offset_and_length);
}
std::pair<ptr_length_pair<uint32_t*>, ptr_length_pair<uint32_t*>> GpuBasicTokenizer::tokenize(const char* device_sentences_, uint32_t* offsets, uint32_t offset_size) {
ptr_length_pair<uint32_t*> cp_and_length;
ptr_length_pair<uint32_t*> offset_and_length;
size_t num_offsets = offset_size + 1;
std::vector<uint32_t> sentence_offsets(num_offsets);
uint32_t start_copy = 0;
for(uint32_t i = 0; i < offset_size; ++i){
sentence_offsets[i] = start_copy;
start_copy += offsets[i];
}
sentence_offsets[offset_size] = start_copy;
device_sentence_offsets = sentence_offsets;
static NotEqual select_op((1 << SORT_BIT));
size_t BLOCKS = (sentence_offsets[offset_size] + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const size_t max_new_char_total = MAX_NEW_CHARS * BLOCKS * THREADS_PER_BLOCK;
size_t threads_on_device = BLOCKS * THREADS_PER_BLOCK;
hipLaunchKernelGGL(( gpuBasicTokenizer), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, (unsigned char*)device_sentences_, thrust::raw_pointer_cast(device_sentence_offsets.data()), sentence_offsets[offset_size], thrust::raw_pointer_cast(device_cp_metadata.data()), thrust::raw_pointer_cast(device_aux_table.data()),
thrust::raw_pointer_cast(device_code_points.data()), thrust::raw_pointer_cast(device_chars_per_thread.data()), do_lower_case, offset_size);
assertCudaSuccess(hipPeekAtLastError());
hipcub::DeviceSelect::If(thrust::raw_pointer_cast(cub_temp_storage.data()), max_cub_storage_bytes, thrust::raw_pointer_cast(device_code_points.data()), thrust::raw_pointer_cast(device_code_points.data()), thrust::raw_pointer_cast(device_num_selected.data()), max_new_char_total, select_op);
assertCudaSuccess(hipPeekAtLastError());
// We also need to prefix sum the number of characters up to an including the current character in order to get the new sentence lengths.
hipcub::DeviceScan::InclusiveSum(thrust::raw_pointer_cast(cub_temp_storage.data()), max_cub_storage_bytes, thrust::raw_pointer_cast(device_chars_per_thread.data()), thrust::raw_pointer_cast(device_chars_per_thread.data()), threads_on_device);
assertCudaSuccess(hipPeekAtLastError());
constexpr uint16_t SENTENCE_UPDATE_THREADS = 64;
size_t SEN_KERNEL_BLOCKS = (offset_size + SENTENCE_UPDATE_THREADS - 1) / SENTENCE_UPDATE_THREADS;
hipLaunchKernelGGL(( update_sentence_lengths), dim3(SEN_KERNEL_BLOCKS), dim3(SENTENCE_UPDATE_THREADS), 0, 0, thrust::raw_pointer_cast(device_sentence_offsets.data()), thrust::raw_pointer_cast(device_chars_per_thread.data()), offset_size);
assertCudaSuccess(hipPeekAtLastError());
offset_and_length.gpu_ptr = thrust::raw_pointer_cast(device_sentence_offsets.data());
offset_and_length.length = offset_size + 1;
uint32_t num_chars = 0;
assertCudaSuccess(hipMemcpy(&num_chars, offset_and_length.gpu_ptr + offset_size, sizeof(num_chars), hipMemcpyDeviceToHost));
cp_and_length.gpu_ptr = thrust::raw_pointer_cast(device_code_points.data());
cp_and_length.length = num_chars;
return std::make_pair(cp_and_length, offset_and_length);
}
GpuBasicTokenizer::~GpuBasicTokenizer() {
}
| 1c09a94e67264e75281858f0700615ef3cb4fddf.cu | #include <vector>
#include <string>
#include "cp_util.cuh"
#include "data_transfer_utils.cuh"
#include "cub/cub.cuh"
#include "tokenizer_utils.cuh"
#include "tokenizers.cuh"
#define SORT_BIT 22
#define THREADS_PER_BLOCK 64
/*
Returns true if the byte passed in could be a valid head byte for
a utf8 character.
*/
__device__ __forceinline__ bool is_head_byte(unsigned char utf8_byte){
return (utf8_byte >> 6) != 2;
}
/*
If the byte at start_byte_for_thread is a head byte, the unicode code-point encoded by
the utf8 character started at that byte is returned and the head_byte boolean passed in
is set to true.
If the byte at start_byte_for_thread is not a head byte, 0 is returned AND the head_byte
boolean passed in is set to false.
All threads start reading bytes from the pointer denoted by sentences.
Params
--------
sentences: A pointer to the start of the sequence of characters to be tokenized.
*/
__device__ __forceinline__ uint32_t extract_code_points_from_utf8(const unsigned char* sentences,
const uint32_t start_byte_for_thread,
bool& head_byte) {
constexpr uint8_t max_utf8_blocks_for_char = 4;
uint8_t utf8_blocks[max_utf8_blocks_for_char];
#pragma unroll
for(int i = 0; i < max_utf8_blocks_for_char; ++i) {
utf8_blocks[i] = sentences[start_byte_for_thread + i];
}
// We can have at most 5 bits encoding the length. We check those bits to infer the actual length
const uint8_t length_encoding_bits = utf8_blocks[0] >> 3;
head_byte = is_head_byte(utf8_blocks[0]);
// Set the number of characters and the top masks based on the
// length encoding bits.
uint8_t char_encoding_length = 0, top_mask = 0;
if (length_encoding_bits < 16){
char_encoding_length = 1;
top_mask = 0x7F;
} else if (length_encoding_bits >= 24 && length_encoding_bits <= 27) {
char_encoding_length = 2;
top_mask = 0x1F;
} else if (length_encoding_bits == 28 || length_encoding_bits == 29) {
char_encoding_length = 3;
top_mask = 0x0F;
} else if (length_encoding_bits == 30) {
char_encoding_length = 4;
top_mask = 0x07;
}
// Now pack up the bits into a uint32_t. All threads will process 4 bytes
// to reduce divergence.
uint32_t code_point = (utf8_blocks[0] & top_mask) << 18;
#pragma unroll
for(int i = 1; i < 4; ++i) {
code_point |= ((utf8_blocks[i] & 0x3F) << (18 - 6*i));
}
// Zero out the bottom of code points with extra reads
const uint8_t shift_amt = 24 - 6*char_encoding_length;
code_point >>= shift_amt;
return code_point;
}
__global__ void gpuBasicTokenizer(const unsigned char* sentences, uint32_t* device_sentence_offsets,
const size_t total_bytes, uint32_t* cp_metadata, uint64_t* aux_table,
uint32_t* code_points, uint32_t* chars_per_thread, bool do_lower_case,
uint32_t num_sentences) {
constexpr uint32_t init_val = (1 << SORT_BIT);
uint32_t replacement_code_points[MAX_NEW_CHARS] = {init_val, init_val, init_val};
bool head_byte = false;
const uint32_t char_for_thread = blockDim.x * blockIdx.x + threadIdx.x;
uint32_t num_new_chars = 0;
if(char_for_thread < total_bytes){
const uint32_t code_point = extract_code_points_from_utf8(sentences, char_for_thread, head_byte);
const uint32_t thr_cp_metadata = get_cp_metadata(cp_metadata, code_point);
if(!should_remove_cp(thr_cp_metadata, do_lower_case) && head_byte) {
num_new_chars = 1;
// Apply lower cases and accent stripping if necessary
const bool replacement_needed = do_lower_case || always_replace(thr_cp_metadata);
uint32_t new_cp = replacement_needed? get_first_cp(thr_cp_metadata): code_point;
new_cp = new_cp == 0? code_point: new_cp;
replacement_code_points[0] = new_cp;
if(is_multi_char_transform(thr_cp_metadata) && do_lower_case) {
uint64_t next_cps = get_extra_cps(aux_table, code_point);
replacement_code_points[1] = static_cast<uint32_t>(next_cps >> 32);
const uint32_t potential_next_cp = static_cast<uint32_t>(next_cps);
replacement_code_points[2] = potential_next_cp != 0? potential_next_cp: replacement_code_points[2];
num_new_chars = 2 + (potential_next_cp != 0);
}
if(should_add_spaces(thr_cp_metadata, do_lower_case)){
// Need to shift all existing code-points up one
for(int loc = num_new_chars; loc > 0; --loc) {
replacement_code_points[loc] = replacement_code_points[loc - 1];
}
// Write the required spaces at the end
replacement_code_points[0] = SPACE_CODE_POINT;
replacement_code_points[num_new_chars + 1] = SPACE_CODE_POINT;
num_new_chars += 2;
}
}
}
chars_per_thread[char_for_thread] = num_new_chars;
typedef cub::BlockStore<uint32_t, THREADS_PER_BLOCK, MAX_NEW_CHARS, cub::BLOCK_STORE_WARP_TRANSPOSE> BlockStore;
__shared__ typename BlockStore::TempStorage temp_storage;
// Now we perform coalesced writes back to global memory using cub.
uint32_t* block_base = code_points + blockIdx.x * blockDim.x * MAX_NEW_CHARS;
BlockStore(temp_storage).Store(block_base, replacement_code_points);
}
void flatten_sentences(const std::vector<std::string>& sentences,
char* flattened_sentences,
uint32_t* sentence_offsets) {
uint32_t start_copy = 0;
for(uint32_t i = 0; i < sentences.size(); ++i){
const uint32_t sentence_length = sentences[i].size();
sentences[i].copy(flattened_sentences + start_copy, sentence_length);
sentence_offsets[i] = start_copy;
start_copy += sentence_length;
}
sentence_offsets[sentences.size()] = start_copy;
}
// -------------------------------------- Basic tokenizer definitions ------------------------------------------------------------
// See tokenizers.cuh
GpuBasicTokenizer::GpuBasicTokenizer(uint32_t max_num_sentences, uint32_t max_num_chars, std::vector<uint32_t> const& cp_metadata, std::vector<uint64_t> const& aux_table, bool do_lower_case):
do_lower_case(do_lower_case),
device_sentence_offsets(max_num_sentences + 1),
device_sentences(max_num_chars),
device_cp_metadata{cp_metadata},
device_aux_table{aux_table} {
size_t max_BLOCKS = (max_num_chars + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
size_t max_threads_on_device = max_BLOCKS * THREADS_PER_BLOCK;
const size_t max_new_char_total = MAX_NEW_CHARS * max_threads_on_device;
device_code_points.resize(max_new_char_total);
device_chars_per_thread.resize(max_threads_on_device);
// Determine temporary device storage requirements for cub
size_t temp_storage_scan_bytes = 0;
uint32_t* device_chars_per_thread = nullptr;
cub::DeviceScan::InclusiveSum(nullptr, temp_storage_scan_bytes, device_chars_per_thread, device_chars_per_thread, max_threads_on_device);
size_t temp_storage_select_bytes = 0;
static NotEqual select_op((1 << SORT_BIT));
cub::DeviceSelect::If(nullptr, temp_storage_select_bytes, thrust::raw_pointer_cast(device_code_points.data()), thrust::raw_pointer_cast(device_code_points.data()),
thrust::raw_pointer_cast(device_num_selected.data()), max_new_char_total, select_op);
max_cub_storage_bytes = std::max(temp_storage_scan_bytes, temp_storage_select_bytes);
cub_temp_storage.resize(max_cub_storage_bytes);
device_num_selected.resize(1);
}
std::pair<ptr_length_pair<uint32_t*>, ptr_length_pair<uint32_t*>> GpuBasicTokenizer::tokenize(const std::vector<std::string>& sentences) {
ptr_length_pair<uint32_t*> cp_and_length;
ptr_length_pair<uint32_t*> offset_and_length;
size_t total_sentence_bytes = 0;
for(const auto& sentence: sentences) {
total_sentence_bytes += sentence.length();
}
size_t num_offsets = sentences.size() + 1;
std::vector<uint32_t> sentence_offsets(num_offsets);
std::vector<char> flattened_sentences(total_sentence_bytes);
flatten_sentences(sentences, flattened_sentences.data(), sentence_offsets.data());
device_sentence_offsets = sentence_offsets;
device_sentences = flattened_sentences;
static NotEqual select_op((1 << SORT_BIT));
size_t BLOCKS = (total_sentence_bytes + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const size_t max_new_char_total = MAX_NEW_CHARS * BLOCKS * THREADS_PER_BLOCK;
size_t threads_on_device = BLOCKS * THREADS_PER_BLOCK;
gpuBasicTokenizer<<<BLOCKS, THREADS_PER_BLOCK>>>(thrust::raw_pointer_cast(device_sentences.data()), thrust::raw_pointer_cast(device_sentence_offsets.data()), total_sentence_bytes, thrust::raw_pointer_cast(device_cp_metadata.data()), thrust::raw_pointer_cast(device_aux_table.data()),
thrust::raw_pointer_cast(device_code_points.data()), thrust::raw_pointer_cast(device_chars_per_thread.data()), do_lower_case, sentences.size());
assertCudaSuccess(cudaPeekAtLastError());
cub::DeviceSelect::If(thrust::raw_pointer_cast(cub_temp_storage.data()), max_cub_storage_bytes, thrust::raw_pointer_cast(device_code_points.data()), thrust::raw_pointer_cast(device_code_points.data()), thrust::raw_pointer_cast(device_num_selected.data()), max_new_char_total, select_op);
assertCudaSuccess(cudaPeekAtLastError());
// We also need to prefix sum the number of characters up to an including the current character in order to get the new sentence lengths.
cub::DeviceScan::InclusiveSum(thrust::raw_pointer_cast(cub_temp_storage.data()), max_cub_storage_bytes, thrust::raw_pointer_cast(device_chars_per_thread.data()), thrust::raw_pointer_cast(device_chars_per_thread.data()), threads_on_device);
assertCudaSuccess(cudaPeekAtLastError());
constexpr uint16_t SENTENCE_UPDATE_THREADS = 64;
size_t SEN_KERNEL_BLOCKS = (sentences.size() + SENTENCE_UPDATE_THREADS - 1) / SENTENCE_UPDATE_THREADS;
update_sentence_lengths<<<SEN_KERNEL_BLOCKS, SENTENCE_UPDATE_THREADS>>>(thrust::raw_pointer_cast(device_sentence_offsets.data()), thrust::raw_pointer_cast(device_chars_per_thread.data()), sentences.size());
assertCudaSuccess(cudaPeekAtLastError());
offset_and_length.gpu_ptr = thrust::raw_pointer_cast(device_sentence_offsets.data());
offset_and_length.length = sentences.size() + 1;
uint32_t num_chars = 0;
assertCudaSuccess(cudaMemcpy(&num_chars, offset_and_length.gpu_ptr + sentences.size(), sizeof(num_chars), cudaMemcpyDeviceToHost));
cp_and_length.gpu_ptr = thrust::raw_pointer_cast(device_code_points.data());
cp_and_length.length = num_chars;
return std::make_pair(cp_and_length, offset_and_length);
}
std::pair<ptr_length_pair<uint32_t*>, ptr_length_pair<uint32_t*>> GpuBasicTokenizer::tokenize(const char* device_sentences_, uint32_t* offsets, uint32_t offset_size) {
ptr_length_pair<uint32_t*> cp_and_length;
ptr_length_pair<uint32_t*> offset_and_length;
size_t num_offsets = offset_size + 1;
std::vector<uint32_t> sentence_offsets(num_offsets);
uint32_t start_copy = 0;
for(uint32_t i = 0; i < offset_size; ++i){
sentence_offsets[i] = start_copy;
start_copy += offsets[i];
}
sentence_offsets[offset_size] = start_copy;
device_sentence_offsets = sentence_offsets;
static NotEqual select_op((1 << SORT_BIT));
size_t BLOCKS = (sentence_offsets[offset_size] + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const size_t max_new_char_total = MAX_NEW_CHARS * BLOCKS * THREADS_PER_BLOCK;
size_t threads_on_device = BLOCKS * THREADS_PER_BLOCK;
gpuBasicTokenizer<<<BLOCKS, THREADS_PER_BLOCK>>>((unsigned char*)device_sentences_, thrust::raw_pointer_cast(device_sentence_offsets.data()), sentence_offsets[offset_size], thrust::raw_pointer_cast(device_cp_metadata.data()), thrust::raw_pointer_cast(device_aux_table.data()),
thrust::raw_pointer_cast(device_code_points.data()), thrust::raw_pointer_cast(device_chars_per_thread.data()), do_lower_case, offset_size);
assertCudaSuccess(cudaPeekAtLastError());
cub::DeviceSelect::If(thrust::raw_pointer_cast(cub_temp_storage.data()), max_cub_storage_bytes, thrust::raw_pointer_cast(device_code_points.data()), thrust::raw_pointer_cast(device_code_points.data()), thrust::raw_pointer_cast(device_num_selected.data()), max_new_char_total, select_op);
assertCudaSuccess(cudaPeekAtLastError());
// We also need to prefix sum the number of characters up to an including the current character in order to get the new sentence lengths.
cub::DeviceScan::InclusiveSum(thrust::raw_pointer_cast(cub_temp_storage.data()), max_cub_storage_bytes, thrust::raw_pointer_cast(device_chars_per_thread.data()), thrust::raw_pointer_cast(device_chars_per_thread.data()), threads_on_device);
assertCudaSuccess(cudaPeekAtLastError());
constexpr uint16_t SENTENCE_UPDATE_THREADS = 64;
size_t SEN_KERNEL_BLOCKS = (offset_size + SENTENCE_UPDATE_THREADS - 1) / SENTENCE_UPDATE_THREADS;
update_sentence_lengths<<<SEN_KERNEL_BLOCKS, SENTENCE_UPDATE_THREADS>>>(thrust::raw_pointer_cast(device_sentence_offsets.data()), thrust::raw_pointer_cast(device_chars_per_thread.data()), offset_size);
assertCudaSuccess(cudaPeekAtLastError());
offset_and_length.gpu_ptr = thrust::raw_pointer_cast(device_sentence_offsets.data());
offset_and_length.length = offset_size + 1;
uint32_t num_chars = 0;
assertCudaSuccess(cudaMemcpy(&num_chars, offset_and_length.gpu_ptr + offset_size, sizeof(num_chars), cudaMemcpyDeviceToHost));
cp_and_length.gpu_ptr = thrust::raw_pointer_cast(device_code_points.data());
cp_and_length.length = num_chars;
return std::make_pair(cp_and_length, offset_and_length);
}
GpuBasicTokenizer::~GpuBasicTokenizer() {
}
|
34a13a86a17c73d5dc9db7d5faec948ac1435101.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <iostream>
#include <sys/time.h>
using namespace std;
/* Simple Cuda Program: 2D block version
* - map 1D thread block to 2D data
* - use 2D thread block
* - effect of non-optimal block size
*/
// (*3*) set dataX to 17
#define dataX 16
#define nThreadsX (dataX*dataX)
#define BLOCK_DATA(i,j) block_data[(i)*dataX+(j)]
__global__ void addOne(double *data) {
int b = blockIdx.x;
// pointer to block data
double *block_data = data + b*nThreadsX;
// (*1*) Interchange the definitions of tx and ty
// (*2*) use threadIdx.x and threadIdx.y (for original coalesced access version)
int tx = threadIdx.x;
int ty = threadIdx.y;
// access data as 2D
for (int i=0;i<100000; i++)
BLOCK_DATA(ty,tx)++;
}
int main() {
// time variables
time_t sTime = time(NULL);
struct timeval tt1, tt2;
int ms;
double fms;
// (*3*) set data size to 4624 (17*17 * 16)
int n = 4096;
double *data = (double*) malloc(n * sizeof(double));
for (int i=0; i<n; i++) {
data[i] = (double)i;
}
double *data_dev;
hipMalloc((void**) &data_dev, n * sizeof(double));
hipMemcpy(data_dev, data, n * sizeof(double) , hipMemcpyHostToDevice);
dim3 nBlocks(n/(nThreadsX),1);
// (*2*) modify here to make a 2D block (dataX x dataX)
dim3 nThreads(dataX,dataX,1);
gettimeofday( &tt1, NULL );
hipLaunchKernelGGL(( addOne) , dim3(nBlocks), dim3(nThreads) , 0, 0, data_dev);
hipDeviceSynchronize();
gettimeofday( &tt2, NULL );
hipMemcpy(data, data_dev, n * sizeof(double) , hipMemcpyDeviceToHost);
hipFree(data_dev);
// time calculation
ms = (tt2.tv_sec - tt1.tv_sec);
ms = ms * 1000000 + (tt2.tv_usec - tt1.tv_usec);
fms = ((double)ms)/1000000.0;
cout << "kernel run time = " << fms << endl;
cout << "data[n-1] = " << data[n-1] << endl;
free(data);
}
| 34a13a86a17c73d5dc9db7d5faec948ac1435101.cu | #include <cuda.h>
#include <iostream>
#include <sys/time.h>
using namespace std;
/* Simple Cuda Program: 2D block version
* - map 1D thread block to 2D data
* - use 2D thread block
* - effect of non-optimal block size
*/
// (*3*) set dataX to 17
#define dataX 16
#define nThreadsX (dataX*dataX)
#define BLOCK_DATA(i,j) block_data[(i)*dataX+(j)]
__global__ void addOne(double *data) {
int b = blockIdx.x;
// pointer to block data
double *block_data = data + b*nThreadsX;
// (*1*) Interchange the definitions of tx and ty
// (*2*) use threadIdx.x and threadIdx.y (for original coalesced access version)
int tx = threadIdx.x;
int ty = threadIdx.y;
// access data as 2D
for (int i=0;i<100000; i++)
BLOCK_DATA(ty,tx)++;
}
int main() {
// time variables
time_t sTime = time(NULL);
struct timeval tt1, tt2;
int ms;
double fms;
// (*3*) set data size to 4624 (17*17 * 16)
int n = 4096;
double *data = (double*) malloc(n * sizeof(double));
for (int i=0; i<n; i++) {
data[i] = (double)i;
}
double *data_dev;
cudaMalloc((void**) &data_dev, n * sizeof(double));
cudaMemcpy(data_dev, data, n * sizeof(double) , cudaMemcpyHostToDevice);
dim3 nBlocks(n/(nThreadsX),1);
// (*2*) modify here to make a 2D block (dataX x dataX)
dim3 nThreads(dataX,dataX,1);
gettimeofday( &tt1, NULL );
addOne <<< nBlocks, nThreads >>> (data_dev);
cudaThreadSynchronize();
gettimeofday( &tt2, NULL );
cudaMemcpy(data, data_dev, n * sizeof(double) , cudaMemcpyDeviceToHost);
cudaFree(data_dev);
// time calculation
ms = (tt2.tv_sec - tt1.tv_sec);
ms = ms * 1000000 + (tt2.tv_usec - tt1.tv_usec);
fms = ((double)ms)/1000000.0;
cout << "kernel run time = " << fms << endl;
cout << "data[n-1] = " << data[n-1] << endl;
free(data);
}
|
13d731f40e753d064a63b5f8f9e2cf5d38bf2977.hip | // !!! This is a file automatically generated by hipify!!!
/* -------------------------------------------------------
UNTILED CODE GENERATED BY FORMA COMPILER
---------------------------------------------------------*/
#include "hip/hip_runtime.h"
#ifdef _TIMER_
#include "hip/hip_runtime_api.h"
#endif
#include "stdio.h"
#include <rocm_smi/rocm_smi.h>
#include <assert.h>
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
hipLaunchKernelGGL(( __kernel_init__), dim3(init_grid),dim3(init_block), 0, 0, d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/* Device code Begin */
__global__ void __kernel___forma_kernel__0__(float h2inv, float c1, float c2, float * __restrict__ RHS, float * __restrict__ Ap, float * __restrict__ Dinv, float * __restrict__ Ac, int L, int M, int N, float * __restrict__ __var_2__){
int FORMA_BLOCKDIM_Z = (int)(blockDim.z);
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int __iter_0__;
__iter_0__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 1;
if(__iter_0__ <= (N-2)){
int __iter_1__;
__iter_1__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 1;
if(__iter_1__ <= (M-2)){
int __iter_2__;
__iter_2__ = (int)(blockIdx.z)*(int)(FORMA_BLOCKDIM_Z) + (int)(threadIdx.z) + 1;
if(__iter_2__ <= (L-2)){
float __temp_0__;
__temp_0__ = (Ac[__iter_0__+N*(__iter_1__+M*(__iter_2__))] - Ap[__iter_0__+N*(__iter_1__+M*(__iter_2__))]);
float __temp_1__;
__temp_1__ = (c1 * __temp_0__);
float __temp_2__;
__temp_2__ = (Ac[__iter_0__+N*(__iter_1__+M*(__iter_2__))] + __temp_1__);
float __temp_3__;
__temp_3__ = (c2 * Dinv[__iter_0__+N*(__iter_1__+M*(__iter_2__))]);
float __temp_4__;
__temp_4__ = (Ac[__iter_0__+(-1)+N*(__iter_1__+(-1)+M*(__iter_2__+(-1)))] + Ac[__iter_0__+(1)+N*(__iter_1__+(-1)+M*(__iter_2__+(-1)))]);
float __temp_5__;
__temp_5__ = (__temp_4__ + Ac[__iter_0__+(-1)+N*(__iter_1__+(1)+M*(__iter_2__+(-1)))]);
float __temp_6__;
__temp_6__ = (__temp_5__ + Ac[__iter_0__+(1)+N*(__iter_1__+(1)+M*(__iter_2__+(-1)))]);
float __temp_7__;
__temp_7__ = (__temp_6__ + Ac[__iter_0__+(-1)+N*(__iter_1__+(-1)+M*(__iter_2__+(1)))]);
float __temp_8__;
__temp_8__ = (__temp_7__ + Ac[__iter_0__+(1)+N*(__iter_1__+(-1)+M*(__iter_2__+(1)))]);
float __temp_9__;
__temp_9__ = (__temp_8__ + Ac[__iter_0__+(-1)+N*(__iter_1__+(1)+M*(__iter_2__+(1)))]);
float __temp_10__;
__temp_10__ = (__temp_9__ + Ac[__iter_0__+(1)+N*(__iter_1__+(1)+M*(__iter_2__+(1)))]);
float __temp_11__;
__temp_11__ = (0.030000f * __temp_10__);
float __temp_12__;
__temp_12__ = (Ac[__iter_0__+N*(__iter_1__+(-1)+M*(__iter_2__+(-1)))] + Ac[__iter_0__+(-1)+N*(__iter_1__+M*(__iter_2__+(-1)))]);
float __temp_13__;
__temp_13__ = (__temp_12__ + Ac[__iter_0__+(1)+N*(__iter_1__+M*(__iter_2__+(-1)))]);
float __temp_14__;
__temp_14__ = (__temp_13__ + Ac[__iter_0__+N*(__iter_1__+(1)+M*(__iter_2__+(-1)))]);
float __temp_15__;
__temp_15__ = (__temp_14__ + Ac[__iter_0__+(-1)+N*(__iter_1__+(-1)+M*(__iter_2__))]);
float __temp_16__;
__temp_16__ = (__temp_15__ + Ac[__iter_0__+(1)+N*(__iter_1__+(-1)+M*(__iter_2__))]);
float __temp_17__;
__temp_17__ = (__temp_16__ + Ac[__iter_0__+(-1)+N*(__iter_1__+(1)+M*(__iter_2__))]);
float __temp_18__;
__temp_18__ = (__temp_17__ + Ac[__iter_0__+(1)+N*(__iter_1__+(1)+M*(__iter_2__))]);
float __temp_19__;
__temp_19__ = (__temp_18__ + Ac[__iter_0__+N*(__iter_1__+(-1)+M*(__iter_2__+(1)))]);
float __temp_20__;
__temp_20__ = (__temp_19__ + Ac[__iter_0__+(-1)+N*(__iter_1__+M*(__iter_2__+(1)))]);
float __temp_21__;
__temp_21__ = (__temp_20__ + Ac[__iter_0__+(1)+N*(__iter_1__+M*(__iter_2__+(1)))]);
float __temp_22__;
__temp_22__ = (__temp_21__ + Ac[__iter_0__+N*(__iter_1__+(1)+M*(__iter_2__+(1)))]);
float __temp_23__;
__temp_23__ = (0.100000f * __temp_22__);
float __temp_24__;
__temp_24__ = (__temp_11__ + __temp_23__);
float __temp_25__;
__temp_25__ = (Ac[__iter_0__+N*(__iter_1__+M*(__iter_2__+(-1)))] + Ac[__iter_0__+N*(__iter_1__+(-1)+M*(__iter_2__))]);
float __temp_26__;
__temp_26__ = (__temp_25__ + Ac[__iter_0__+(-1)+N*(__iter_1__+M*(__iter_2__))]);
float __temp_27__;
__temp_27__ = (__temp_26__ + Ac[__iter_0__+(1)+N*(__iter_1__+M*(__iter_2__))]);
float __temp_28__;
__temp_28__ = (__temp_27__ + Ac[__iter_0__+N*(__iter_1__+(1)+M*(__iter_2__))]);
float __temp_29__;
__temp_29__ = (__temp_28__ + Ac[__iter_0__+N*(__iter_1__+M*(__iter_2__+(1)))]);
float __temp_30__;
__temp_30__ = (0.460000f * __temp_29__);
float __temp_31__;
__temp_31__ = (__temp_24__ + __temp_30__);
float __temp_32__;
__temp_32__ = (4.260000f * Ac[__iter_0__+N*(__iter_1__+M*(__iter_2__))]);
float __temp_33__;
__temp_33__ = (__temp_31__ - __temp_32__);
float __temp_34__;
__temp_34__ = (h2inv * __temp_33__);
float __temp_35__;
__temp_35__ = (Ac[__iter_0__+N*(__iter_1__+M*(__iter_2__))] - __temp_34__);
float __temp_36__;
__temp_36__ = (RHS[__iter_0__+N*(__iter_1__+M*(__iter_2__))] - __temp_35__);
float __temp_37__;
__temp_37__ = (__temp_3__ * __temp_36__);
float __temp_38__;
__temp_38__ = (__temp_2__ + __temp_37__);
__var_2__[__iter_0__+N*(__iter_1__+M*(__iter_2__))] = __temp_38__;
}
}
}
}
/*Device code End */
/* Host Code Begin */
extern "C" void host_code (float * h_Ac, float * h_Ap, float * h_Dinv, float * h_RHS, float * __var_0__, float * c1, float * c2, float h2inv, int L, int M, int N) {
/* Host allocation Begin */
float * Ac;
hipMalloc(&Ac,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : Ac\n");
hipMemcpy(Ac,h_Ac,sizeof(float)*(L*M*N), hipMemcpyHostToDevice);
float * Ap;
hipMalloc(&Ap,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : Ap\n");
hipMemcpy(Ap,h_Ap,sizeof(float)*(L*M*N), hipMemcpyHostToDevice);
float * Dinv;
hipMalloc(&Dinv,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : Dinv\n");
hipMemcpy(Dinv,h_Dinv,sizeof(float)*(L*M*N), hipMemcpyHostToDevice);
float * RHS;
hipMalloc(&RHS,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : RHS\n");
hipMemcpy(RHS,h_RHS,sizeof(float)*(L*M*N), hipMemcpyHostToDevice);
float * __var_1__;
hipMalloc(&__var_1__,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
float * __var_2__;
hipMalloc(&__var_2__,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : __var_2__\n");
float * __var_3__;
hipMalloc(&__var_3__,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : __var_3__\n");
float * __var_4__;
hipMalloc(&__var_4__,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : __var_4__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
#ifdef _TIMER_
hipEvent_t _forma_timer_start_,_forma_timer_stop_;
hipEventCreate(&_forma_timer_start_);
hipEventCreate(&_forma_timer_stop_);
hipEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = ((N-2) - 1 ) + 1;
int __size_1___kernel___forma_kernel__0__ = ((M-2) - 1 ) + 1;
int __size_2___kernel___forma_kernel__0__ = ((L-2) - 1 ) + 1;
dim3 blockConfig (16,4,4);
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,16);
int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,4);
int __grid_2___kernel___forma_kernel__0__ = FORMA_CEIL(__size_2___kernel___forma_kernel__0__,4);
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__,__grid_2___kernel___forma_kernel__0__);
unsigned int power1, power2;
rsmi_status_t result;
uint32_t device;
nvmlEnableState_t mode;
result=nvmlInit();
result = nvmlDeviceGetHandleByIndex(0, &device);
assert(RSMI_STATUS_SUCCESS == result);
result=nvmlDeviceGetPowerManagementMode(device, &mode);
printf("enabled = %d\n", mode);
result=nvmlDeviceGetPowerUsage(device,&power1);
assert(RSMI_STATUS_SUCCESS == result);
hipDeviceSynchronize();
for (int x=0; x<500; x++) {
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(blockConfig), 0, 0, h2inv, c1[0], c2[0], RHS, Ap, Dinv, Ac, L, M, N, __var_2__);
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(blockConfig), 0, 0, h2inv, c1[1], c2[1], RHS, Ac, Dinv, __var_2__, L, M, N, __var_3__);
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(blockConfig), 0, 0, h2inv, c1[2], c2[2], RHS, __var_2__, Dinv, __var_3__, L, M, N, __var_4__);
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(blockConfig), 0, 0, h2inv, c1[3], c2[3], RHS, __var_3__, Dinv, __var_4__, L, M, N, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
}
hipDeviceSynchronize();
result=nvmlDeviceGetPowerUsage(device,&power2);
assert(RSMI_STATUS_SUCCESS == result);
power2 -= power1;
printf("%u\n", power2);
nvmlShutdown();
hipPointerAttribute_t ptrAttrib___var_0__;
hipMemcpyKind memcpy_kind___var_0__ = hipMemcpyDeviceToHost;
if (hipPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == hipSuccess)
if (ptrAttrib___var_0__.memoryType == hipMemoryTypeDevice)
memcpy_kind___var_0__ = hipMemcpyDeviceToDevice;
hipGetLastError();
hipMemcpy(__var_0__,__var_1__, sizeof(float)*(L*M*N), memcpy_kind___var_0__);
#ifdef _TIMER_
hipEventRecord(_forma_timer_stop_,0);
hipEventSynchronize(_forma_timer_stop_);
float elapsedTime;
hipEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
hipEventDestroy(_forma_timer_start_);
hipEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
hipFree(Ac);
hipFree(Ap);
hipFree(Dinv);
hipFree(RHS);
hipFree(__var_1__);
hipFree(__var_2__);
hipFree(__var_3__);
hipFree(__var_4__);
}
/*Host Free End*/
| 13d731f40e753d064a63b5f8f9e2cf5d38bf2977.cu | /* -------------------------------------------------------
UNTILED CODE GENERATED BY FORMA COMPILER
---------------------------------------------------------*/
#include "cuda.h"
#ifdef _TIMER_
#include "cuda_profiler_api.h"
#endif
#include "stdio.h"
#include <nvml.h>
#include <assert.h>
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
__kernel_init__<<<init_grid,init_block>>>(d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/* Device code Begin */
__global__ void __kernel___forma_kernel__0__(float h2inv, float c1, float c2, float * __restrict__ RHS, float * __restrict__ Ap, float * __restrict__ Dinv, float * __restrict__ Ac, int L, int M, int N, float * __restrict__ __var_2__){
int FORMA_BLOCKDIM_Z = (int)(blockDim.z);
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int __iter_0__;
__iter_0__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 1;
if(__iter_0__ <= (N-2)){
int __iter_1__;
__iter_1__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 1;
if(__iter_1__ <= (M-2)){
int __iter_2__;
__iter_2__ = (int)(blockIdx.z)*(int)(FORMA_BLOCKDIM_Z) + (int)(threadIdx.z) + 1;
if(__iter_2__ <= (L-2)){
float __temp_0__;
__temp_0__ = (Ac[__iter_0__+N*(__iter_1__+M*(__iter_2__))] - Ap[__iter_0__+N*(__iter_1__+M*(__iter_2__))]);
float __temp_1__;
__temp_1__ = (c1 * __temp_0__);
float __temp_2__;
__temp_2__ = (Ac[__iter_0__+N*(__iter_1__+M*(__iter_2__))] + __temp_1__);
float __temp_3__;
__temp_3__ = (c2 * Dinv[__iter_0__+N*(__iter_1__+M*(__iter_2__))]);
float __temp_4__;
__temp_4__ = (Ac[__iter_0__+(-1)+N*(__iter_1__+(-1)+M*(__iter_2__+(-1)))] + Ac[__iter_0__+(1)+N*(__iter_1__+(-1)+M*(__iter_2__+(-1)))]);
float __temp_5__;
__temp_5__ = (__temp_4__ + Ac[__iter_0__+(-1)+N*(__iter_1__+(1)+M*(__iter_2__+(-1)))]);
float __temp_6__;
__temp_6__ = (__temp_5__ + Ac[__iter_0__+(1)+N*(__iter_1__+(1)+M*(__iter_2__+(-1)))]);
float __temp_7__;
__temp_7__ = (__temp_6__ + Ac[__iter_0__+(-1)+N*(__iter_1__+(-1)+M*(__iter_2__+(1)))]);
float __temp_8__;
__temp_8__ = (__temp_7__ + Ac[__iter_0__+(1)+N*(__iter_1__+(-1)+M*(__iter_2__+(1)))]);
float __temp_9__;
__temp_9__ = (__temp_8__ + Ac[__iter_0__+(-1)+N*(__iter_1__+(1)+M*(__iter_2__+(1)))]);
float __temp_10__;
__temp_10__ = (__temp_9__ + Ac[__iter_0__+(1)+N*(__iter_1__+(1)+M*(__iter_2__+(1)))]);
float __temp_11__;
__temp_11__ = (0.030000f * __temp_10__);
float __temp_12__;
__temp_12__ = (Ac[__iter_0__+N*(__iter_1__+(-1)+M*(__iter_2__+(-1)))] + Ac[__iter_0__+(-1)+N*(__iter_1__+M*(__iter_2__+(-1)))]);
float __temp_13__;
__temp_13__ = (__temp_12__ + Ac[__iter_0__+(1)+N*(__iter_1__+M*(__iter_2__+(-1)))]);
float __temp_14__;
__temp_14__ = (__temp_13__ + Ac[__iter_0__+N*(__iter_1__+(1)+M*(__iter_2__+(-1)))]);
float __temp_15__;
__temp_15__ = (__temp_14__ + Ac[__iter_0__+(-1)+N*(__iter_1__+(-1)+M*(__iter_2__))]);
float __temp_16__;
__temp_16__ = (__temp_15__ + Ac[__iter_0__+(1)+N*(__iter_1__+(-1)+M*(__iter_2__))]);
float __temp_17__;
__temp_17__ = (__temp_16__ + Ac[__iter_0__+(-1)+N*(__iter_1__+(1)+M*(__iter_2__))]);
float __temp_18__;
__temp_18__ = (__temp_17__ + Ac[__iter_0__+(1)+N*(__iter_1__+(1)+M*(__iter_2__))]);
float __temp_19__;
__temp_19__ = (__temp_18__ + Ac[__iter_0__+N*(__iter_1__+(-1)+M*(__iter_2__+(1)))]);
float __temp_20__;
__temp_20__ = (__temp_19__ + Ac[__iter_0__+(-1)+N*(__iter_1__+M*(__iter_2__+(1)))]);
float __temp_21__;
__temp_21__ = (__temp_20__ + Ac[__iter_0__+(1)+N*(__iter_1__+M*(__iter_2__+(1)))]);
float __temp_22__;
__temp_22__ = (__temp_21__ + Ac[__iter_0__+N*(__iter_1__+(1)+M*(__iter_2__+(1)))]);
float __temp_23__;
__temp_23__ = (0.100000f * __temp_22__);
float __temp_24__;
__temp_24__ = (__temp_11__ + __temp_23__);
float __temp_25__;
__temp_25__ = (Ac[__iter_0__+N*(__iter_1__+M*(__iter_2__+(-1)))] + Ac[__iter_0__+N*(__iter_1__+(-1)+M*(__iter_2__))]);
float __temp_26__;
__temp_26__ = (__temp_25__ + Ac[__iter_0__+(-1)+N*(__iter_1__+M*(__iter_2__))]);
float __temp_27__;
__temp_27__ = (__temp_26__ + Ac[__iter_0__+(1)+N*(__iter_1__+M*(__iter_2__))]);
float __temp_28__;
__temp_28__ = (__temp_27__ + Ac[__iter_0__+N*(__iter_1__+(1)+M*(__iter_2__))]);
float __temp_29__;
__temp_29__ = (__temp_28__ + Ac[__iter_0__+N*(__iter_1__+M*(__iter_2__+(1)))]);
float __temp_30__;
__temp_30__ = (0.460000f * __temp_29__);
float __temp_31__;
__temp_31__ = (__temp_24__ + __temp_30__);
float __temp_32__;
__temp_32__ = (4.260000f * Ac[__iter_0__+N*(__iter_1__+M*(__iter_2__))]);
float __temp_33__;
__temp_33__ = (__temp_31__ - __temp_32__);
float __temp_34__;
__temp_34__ = (h2inv * __temp_33__);
float __temp_35__;
__temp_35__ = (Ac[__iter_0__+N*(__iter_1__+M*(__iter_2__))] - __temp_34__);
float __temp_36__;
__temp_36__ = (RHS[__iter_0__+N*(__iter_1__+M*(__iter_2__))] - __temp_35__);
float __temp_37__;
__temp_37__ = (__temp_3__ * __temp_36__);
float __temp_38__;
__temp_38__ = (__temp_2__ + __temp_37__);
__var_2__[__iter_0__+N*(__iter_1__+M*(__iter_2__))] = __temp_38__;
}
}
}
}
/*Device code End */
/* Host Code Begin */
extern "C" void host_code (float * h_Ac, float * h_Ap, float * h_Dinv, float * h_RHS, float * __var_0__, float * c1, float * c2, float h2inv, int L, int M, int N) {
/* Host allocation Begin */
float * Ac;
cudaMalloc(&Ac,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : Ac\n");
cudaMemcpy(Ac,h_Ac,sizeof(float)*(L*M*N), cudaMemcpyHostToDevice);
float * Ap;
cudaMalloc(&Ap,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : Ap\n");
cudaMemcpy(Ap,h_Ap,sizeof(float)*(L*M*N), cudaMemcpyHostToDevice);
float * Dinv;
cudaMalloc(&Dinv,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : Dinv\n");
cudaMemcpy(Dinv,h_Dinv,sizeof(float)*(L*M*N), cudaMemcpyHostToDevice);
float * RHS;
cudaMalloc(&RHS,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : RHS\n");
cudaMemcpy(RHS,h_RHS,sizeof(float)*(L*M*N), cudaMemcpyHostToDevice);
float * __var_1__;
cudaMalloc(&__var_1__,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
float * __var_2__;
cudaMalloc(&__var_2__,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : __var_2__\n");
float * __var_3__;
cudaMalloc(&__var_3__,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : __var_3__\n");
float * __var_4__;
cudaMalloc(&__var_4__,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : __var_4__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
#ifdef _TIMER_
cudaEvent_t _forma_timer_start_,_forma_timer_stop_;
cudaEventCreate(&_forma_timer_start_);
cudaEventCreate(&_forma_timer_stop_);
cudaEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = ((N-2) - 1 ) + 1;
int __size_1___kernel___forma_kernel__0__ = ((M-2) - 1 ) + 1;
int __size_2___kernel___forma_kernel__0__ = ((L-2) - 1 ) + 1;
dim3 blockConfig (16,4,4);
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,16);
int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,4);
int __grid_2___kernel___forma_kernel__0__ = FORMA_CEIL(__size_2___kernel___forma_kernel__0__,4);
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__,__grid_2___kernel___forma_kernel__0__);
unsigned int power1, power2;
nvmlReturn_t result;
nvmlDevice_t device;
nvmlEnableState_t mode;
result=nvmlInit();
result = nvmlDeviceGetHandleByIndex(0, &device);
assert(NVML_SUCCESS == result);
result=nvmlDeviceGetPowerManagementMode(device, &mode);
printf("enabled = %d\n", mode);
result=nvmlDeviceGetPowerUsage(device,&power1);
assert(NVML_SUCCESS == result);
cudaDeviceSynchronize();
for (int x=0; x<500; x++) {
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, blockConfig>>> (h2inv, c1[0], c2[0], RHS, Ap, Dinv, Ac, L, M, N, __var_2__);
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, blockConfig>>> (h2inv, c1[1], c2[1], RHS, Ac, Dinv, __var_2__, L, M, N, __var_3__);
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, blockConfig>>> (h2inv, c1[2], c2[2], RHS, __var_2__, Dinv, __var_3__, L, M, N, __var_4__);
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, blockConfig>>> (h2inv, c1[3], c2[3], RHS, __var_3__, Dinv, __var_4__, L, M, N, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
}
cudaDeviceSynchronize();
result=nvmlDeviceGetPowerUsage(device,&power2);
assert(NVML_SUCCESS == result);
power2 -= power1;
printf("%u\n", power2);
nvmlShutdown();
cudaPointerAttributes ptrAttrib___var_0__;
cudaMemcpyKind memcpy_kind___var_0__ = cudaMemcpyDeviceToHost;
if (cudaPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == cudaSuccess)
if (ptrAttrib___var_0__.memoryType == cudaMemoryTypeDevice)
memcpy_kind___var_0__ = cudaMemcpyDeviceToDevice;
cudaGetLastError();
cudaMemcpy(__var_0__,__var_1__, sizeof(float)*(L*M*N), memcpy_kind___var_0__);
#ifdef _TIMER_
cudaEventRecord(_forma_timer_stop_,0);
cudaEventSynchronize(_forma_timer_stop_);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
cudaEventDestroy(_forma_timer_start_);
cudaEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
cudaFree(Ac);
cudaFree(Ap);
cudaFree(Dinv);
cudaFree(RHS);
cudaFree(__var_1__);
cudaFree(__var_2__);
cudaFree(__var_3__);
cudaFree(__var_4__);
}
/*Host Free End*/
|
4a00ee731062a672f8b2202704e8ad9e61b29c7a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2009 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include "MersenneTwister.h"
__device__ static mt_struct_stripped ds_MT[MT_RNG_COUNT];
static mt_struct_stripped h_MT[MT_RNG_COUNT];
//Load twister configurations
void loadMTGPU(const char *fname){
FILE *fd = fopen(fname, "rb");
if(!fd){
printf("initMTGPU(): failed to open %s\n", fname);
printf("TEST FAILED\n");
exit(0);
}
if( !fread(h_MT, sizeof(h_MT), 1, fd) ){
printf("initMTGPU(): failed to load %s\n", fname);
printf("TEST FAILED\n");
exit(0);
}
fclose(fd);
}
//Initialize/seed twister for current GPU context
void seedMTGPU(unsigned int seed){
int i;
//Need to be thread-safe
mt_struct_stripped *MT = (mt_struct_stripped *)malloc(MT_RNG_COUNT * sizeof(mt_struct_stripped));
for(i = 0; i < MT_RNG_COUNT; i++){
MT[i] = h_MT[i];
MT[i].seed = seed;
}
CUDA_SAFE_CALL( hipMemcpyToSymbol(ds_MT, MT, sizeof(h_MT)) );
free(MT);
}
////////////////////////////////////////////////////////////////////////////////
// Write MT_RNG_COUNT vertical lanes of NPerRng random numbers to *d_Random.
// For coalesced global writes MT_RNG_COUNT should be a multiple of warp size.
// Initial states for each generator are the same, since the states are
// initialized from the global seed. In order to improve distribution properties
// on small NPerRng supply dedicated (local) seed to each twister.
// The local seeds, in their turn, can be extracted from global seed
// by means of any simple random number generator, like LCG.
////////////////////////////////////////////////////////////////////////////////
__global__ void RandomGPU(
float *d_Random,
int NPerRng
){
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int THREAD_N = blockDim.x * gridDim.x;
int iState, iState1, iStateM, iOut;
unsigned int mti, mti1, mtiM, x;
unsigned int mt[MT_NN];
for(int iRng = tid; iRng < MT_RNG_COUNT; iRng += THREAD_N){
//Load bit-vector Mersenne Twister parameters
mt_struct_stripped config = ds_MT[iRng];
//Initialize current state
mt[0] = config.seed;
for(iState = 1; iState < MT_NN; iState++)
mt[iState] = (1812433253U * (mt[iState - 1] ^ (mt[iState - 1] >> 30)) + iState) & MT_WMASK;
iState = 0;
mti1 = mt[0];
for(iOut = 0; iOut < NPerRng; iOut++){
//iState1 = (iState + 1) % MT_NN
//iStateM = (iState + MT_MM) % MT_NN
iState1 = iState + 1;
iStateM = iState + MT_MM;
if(iState1 >= MT_NN) iState1 -= MT_NN;
if(iStateM >= MT_NN) iStateM -= MT_NN;
mti = mti1;
mti1 = mt[iState1];
mtiM = mt[iStateM];
x = (mti & MT_UMASK) | (mti1 & MT_LMASK);
x = mtiM ^ (x >> 1) ^ ((x & 1) ? config.matrix_a : 0);
mt[iState] = x;
iState = iState1;
//Tempering transformation
x ^= (x >> MT_SHIFT0);
x ^= (x << MT_SHIFTB) & config.mask_b;
x ^= (x << MT_SHIFTC) & config.mask_c;
x ^= (x >> MT_SHIFT1);
//Convert to (0, 1] float and write to global memory
d_Random[iRng + iOut * MT_RNG_COUNT] = ((float)x + 1.0f) / 4294967296.0f;
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Transform each of MT_RNG_COUNT lanes of NPerRng uniformly distributed
// random samples, produced by RandomGPU(), to normally distributed lanes
// using Cartesian form of Box-Muller transformation.
// NPerRng must be even.
////////////////////////////////////////////////////////////////////////////////
#define PI 3.14159265358979f
__device__ void BoxMuller(float& u1, float& u2){
float r = sqrtf(-2.0f * logf(u1));
float phi = 2 * PI * u2;
u1 = r * __cosf(phi);
u2 = r * __sinf(phi);
}
__global__ void BoxMullerGPU(float *d_Random, int NPerRng){
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int THREAD_N = blockDim.x * gridDim.x;
for(int iRng = tid; iRng < MT_RNG_COUNT; iRng += THREAD_N)
for(int iOut = 0; iOut < NPerRng; iOut += 2)
BoxMuller(
d_Random[iRng + (iOut + 0) * MT_RNG_COUNT],
d_Random[iRng + (iOut + 1) * MT_RNG_COUNT]
);
}
| 4a00ee731062a672f8b2202704e8ad9e61b29c7a.cu | /*
* Copyright 1993-2009 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include "MersenneTwister.h"
__device__ static mt_struct_stripped ds_MT[MT_RNG_COUNT];
static mt_struct_stripped h_MT[MT_RNG_COUNT];
//Load twister configurations
void loadMTGPU(const char *fname){
FILE *fd = fopen(fname, "rb");
if(!fd){
printf("initMTGPU(): failed to open %s\n", fname);
printf("TEST FAILED\n");
exit(0);
}
if( !fread(h_MT, sizeof(h_MT), 1, fd) ){
printf("initMTGPU(): failed to load %s\n", fname);
printf("TEST FAILED\n");
exit(0);
}
fclose(fd);
}
//Initialize/seed twister for current GPU context
void seedMTGPU(unsigned int seed){
int i;
//Need to be thread-safe
mt_struct_stripped *MT = (mt_struct_stripped *)malloc(MT_RNG_COUNT * sizeof(mt_struct_stripped));
for(i = 0; i < MT_RNG_COUNT; i++){
MT[i] = h_MT[i];
MT[i].seed = seed;
}
CUDA_SAFE_CALL( cudaMemcpyToSymbol(ds_MT, MT, sizeof(h_MT)) );
free(MT);
}
////////////////////////////////////////////////////////////////////////////////
// Write MT_RNG_COUNT vertical lanes of NPerRng random numbers to *d_Random.
// For coalesced global writes MT_RNG_COUNT should be a multiple of warp size.
// Initial states for each generator are the same, since the states are
// initialized from the global seed. In order to improve distribution properties
// on small NPerRng supply dedicated (local) seed to each twister.
// The local seeds, in their turn, can be extracted from global seed
// by means of any simple random number generator, like LCG.
////////////////////////////////////////////////////////////////////////////////
__global__ void RandomGPU(
float *d_Random,
int NPerRng
){
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int THREAD_N = blockDim.x * gridDim.x;
int iState, iState1, iStateM, iOut;
unsigned int mti, mti1, mtiM, x;
unsigned int mt[MT_NN];
for(int iRng = tid; iRng < MT_RNG_COUNT; iRng += THREAD_N){
//Load bit-vector Mersenne Twister parameters
mt_struct_stripped config = ds_MT[iRng];
//Initialize current state
mt[0] = config.seed;
for(iState = 1; iState < MT_NN; iState++)
mt[iState] = (1812433253U * (mt[iState - 1] ^ (mt[iState - 1] >> 30)) + iState) & MT_WMASK;
iState = 0;
mti1 = mt[0];
for(iOut = 0; iOut < NPerRng; iOut++){
//iState1 = (iState + 1) % MT_NN
//iStateM = (iState + MT_MM) % MT_NN
iState1 = iState + 1;
iStateM = iState + MT_MM;
if(iState1 >= MT_NN) iState1 -= MT_NN;
if(iStateM >= MT_NN) iStateM -= MT_NN;
mti = mti1;
mti1 = mt[iState1];
mtiM = mt[iStateM];
x = (mti & MT_UMASK) | (mti1 & MT_LMASK);
x = mtiM ^ (x >> 1) ^ ((x & 1) ? config.matrix_a : 0);
mt[iState] = x;
iState = iState1;
//Tempering transformation
x ^= (x >> MT_SHIFT0);
x ^= (x << MT_SHIFTB) & config.mask_b;
x ^= (x << MT_SHIFTC) & config.mask_c;
x ^= (x >> MT_SHIFT1);
//Convert to (0, 1] float and write to global memory
d_Random[iRng + iOut * MT_RNG_COUNT] = ((float)x + 1.0f) / 4294967296.0f;
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Transform each of MT_RNG_COUNT lanes of NPerRng uniformly distributed
// random samples, produced by RandomGPU(), to normally distributed lanes
// using Cartesian form of Box-Muller transformation.
// NPerRng must be even.
////////////////////////////////////////////////////////////////////////////////
#define PI 3.14159265358979f
__device__ void BoxMuller(float& u1, float& u2){
float r = sqrtf(-2.0f * logf(u1));
float phi = 2 * PI * u2;
u1 = r * __cosf(phi);
u2 = r * __sinf(phi);
}
__global__ void BoxMullerGPU(float *d_Random, int NPerRng){
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int THREAD_N = blockDim.x * gridDim.x;
for(int iRng = tid; iRng < MT_RNG_COUNT; iRng += THREAD_N)
for(int iOut = 0; iOut < NPerRng; iOut += 2)
BoxMuller(
d_Random[iRng + (iOut + 0) * MT_RNG_COUNT],
d_Random[iRng + (iOut + 1) * MT_RNG_COUNT]
);
}
|
70562c99d97bec3dd3d73560af7505a813fca7fb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <benchmark/benchmark.h>
#include "init/init.hpp"
#include "reduction/args.hpp"
#include "utils/utils.hpp"
#include <hipcub/hipcub.hpp>
using namespace cub;
template <int THREADS_PER_BLOCK, int ITEMS_PER_THREAD, BlockReduceAlgorithm ALGORITHM>
__global__ void compute_cub_block_segmented_reduction(half *d_in, half *d_out) {
// Specialize BlockReduce type for our thread block
typedef BlockReduce<half, THREADS_PER_BLOCK, ALGORITHM> BlockReduceT;
// Shared memory
__shared__ typename BlockReduceT::TempStorage temp_storage;
// Per-thread tile data
half data[ITEMS_PER_THREAD];
LoadDirectStriped<THREADS_PER_BLOCK, half, ITEMS_PER_THREAD>(
threadIdx.x, d_in + blockIdx.x * THREADS_PER_BLOCK * ITEMS_PER_THREAD, data);
// Compute sum
half aggregate = BlockReduceT(temp_storage).Sum(data);
// Store aggregate
if (threadIdx.x == 0) {
d_out[blockIdx.x] = aggregate;
}
}
template <int THREADS_PER_BLOCK, int ITEMS_PER_THREAD, BlockReduceAlgorithm ALGORITHM>
static void ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION(benchmark::State &state) {
const size_t num_segments = state.range(0);
const size_t segment_size = state.range(1);
if (segment_size != THREADS_PER_BLOCK * ITEMS_PER_THREAD) {
state.SkipWithError("segment size must be THREADS_PER_BLOCK x ITEMS_PER_THREAD");
return;
}
if (num_segments >= CUDA_MAX_GRID_SIZE) {
state.SkipWithError(
fmt::format("gridDim.x={} is greater than CUDA_MAX_GRID_SIZE", num_segments)
.c_str());
return;
}
const size_t num_elements = num_segments * segment_size;
half *d_in_fp16 = nullptr;
half *d_out = nullptr;
try {
PRINT_IF_ERROR(hipMalloc(&d_in_fp16, num_elements * sizeof(half)));
PRINT_IF_ERROR(hipMalloc(&d_out, num_segments * sizeof(half)));
cuda_memory_set(d_in_fp16, 0.001f, num_elements);
PRINT_IF_ERROR(hipDeviceSynchronize());
hipEvent_t start, stop;
PRINT_IF_ERROR(hipEventCreate(&start));
PRINT_IF_ERROR(hipEventCreate(&stop));
defer(hipEventDestroy(start));
defer(hipEventDestroy(stop));
for (auto _ : state) {
PRINT_IF_ERROR(hipEventRecord(start));
hipLaunchKernelGGL(( compute_cub_block_segmented_reduction<THREADS_PER_BLOCK,
ITEMS_PER_THREAD,
ALGORITHM>)
, dim3(num_segments), dim3(THREADS_PER_BLOCK), 0, 0, d_in_fp16, d_out);
PRINT_IF_ERROR(hipEventRecord(stop));
PRINT_IF_ERROR(hipEventSynchronize(stop));
state.PauseTiming();
float msecTotal = 0.0f;
PRINT_IF_ERROR(hipEventElapsedTime(&msecTotal, start, stop));
state.SetIterationTime(msecTotal / 1000);
state.ResumeTiming();
}
state.counters.insert({{"num_segments", num_segments},
{"segment_size", segment_size},
{"num_elements", num_segments * segment_size},
{"threads_per_block", THREADS_PER_BLOCK},
{"items_per_thread", ITEMS_PER_THREAD},
{"block_reduce_algorithm", (int) ALGORITHM},
{"flops",
{state.iterations() * 1.0 * num_segments * segment_size,
benchmark::Counter::kAvgThreadsRate}}});
#if 0
half *h_out = new half[num_segments];
PRINT_IF_ERROR(hipMemcpy(h_out, d_out, num_segments * sizeof(half),
hipMemcpyDeviceToHost));
float correct_segment_sum = 0;
for (int i = 0; i < segment_size; i++) {
correct_segment_sum += h_in[i];
}
int errors = 0;
for (int i = 0; i < num_segments; i++) {
if (fabs(half_to_float(h_out[i]) - correct_segment_sum) > 0.001) {
errors++;
if (errors < 10) {
printf("segment %d has sum %f (expected %f)\n", i,
half_to_float(h_out[i]), correct_segment_sum);
}
}
}
if (errors > 0) {
printf("ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION does not agree with SEQUENTIAL! %d "
"errors!\n",
errors);
} else {
printf("Results verified: they agree.\n\n");
}
delete h_out;
#endif
hipFree(d_in_fp16);
hipFree(d_out);
} catch (...) {
hipFree(d_in_fp16);
hipFree(d_out);
hipDeviceReset();
const auto p = std::current_exception();
std::rethrow_exception(p);
}
}
// BlockReduceAlgorithm are BLOCK_REDUCE_RAKING,
// BLOCK_REDUCE_WARP_REDUCTIONS and
// BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY
// BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY
#if 1
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 32, 1, BLOCK_REDUCE_RAKING)
->SEG_32_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 32, 2, BLOCK_REDUCE_RAKING)
->SEG_64_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 64, 1, BLOCK_REDUCE_RAKING)
->SEG_64_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 32, 4, BLOCK_REDUCE_RAKING)
->SEG_128_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 64, 2, BLOCK_REDUCE_RAKING)
->SEG_128_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 128, 1, BLOCK_REDUCE_RAKING)
->SEG_128_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 32, 8, BLOCK_REDUCE_RAKING)
->SEG_256_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 64, 4, BLOCK_REDUCE_RAKING)
->SEG_256_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 128, 2, BLOCK_REDUCE_RAKING)
->SEG_256_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 256, 1, BLOCK_REDUCE_RAKING)
->SEG_256_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 32, 16, BLOCK_REDUCE_RAKING)
->SEG_512_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 64, 8, BLOCK_REDUCE_RAKING)
->SEG_512_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 128, 4, BLOCK_REDUCE_RAKING)
->SEG_512_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 256, 2, BLOCK_REDUCE_RAKING)
->SEG_512_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 512, 1, BLOCK_REDUCE_RAKING)
->SEG_512_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 32, 32, BLOCK_REDUCE_RAKING)
->SEG_1024_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 64, 16, BLOCK_REDUCE_RAKING)
->SEG_1024_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 128, 8, BLOCK_REDUCE_RAKING)
->SEG_1024_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 256, 4, BLOCK_REDUCE_RAKING)
->SEG_1024_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 512, 2, BLOCK_REDUCE_RAKING)
->SEG_1024_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 1024, 1, BLOCK_REDUCE_RAKING)
->SEG_1024_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 32, 64, BLOCK_REDUCE_RAKING)
->SEG_2048_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 64, 32, BLOCK_REDUCE_RAKING)
->SEG_2048_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 128, 16, BLOCK_REDUCE_RAKING)
->SEG_2048_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 256, 8, BLOCK_REDUCE_RAKING)
->SEG_2048_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 512, 4, BLOCK_REDUCE_RAKING)
->SEG_2048_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 1024, 2, BLOCK_REDUCE_RAKING)
->SEG_2048_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 32, 128, BLOCK_REDUCE_RAKING)
->SEG_4096_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 64, 64, BLOCK_REDUCE_RAKING)
->SEG_4096_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 128, 32, BLOCK_REDUCE_RAKING)
->SEG_4096_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 256, 16, BLOCK_REDUCE_RAKING)
->SEG_4096_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 512, 8, BLOCK_REDUCE_RAKING)
->SEG_4096_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 1024, 4, BLOCK_REDUCE_RAKING)
->SEG_4096_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 32, 256, BLOCK_REDUCE_RAKING)
->SEG_8192_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 64, 128, BLOCK_REDUCE_RAKING)
->SEG_8192_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 128, 64, BLOCK_REDUCE_RAKING)
->SEG_8192_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 256, 32, BLOCK_REDUCE_RAKING)
->SEG_8192_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 512, 16, BLOCK_REDUCE_RAKING)
->SEG_8192_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 1024, 8, BLOCK_REDUCE_RAKING)
->SEG_8192_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 32, 512, BLOCK_REDUCE_RAKING)
->SEG_16384_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 64, 256, BLOCK_REDUCE_RAKING)
->SEG_16384_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 128, 128, BLOCK_REDUCE_RAKING)
->SEG_16384_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 256, 64, BLOCK_REDUCE_RAKING)
->SEG_16384_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 512, 32, BLOCK_REDUCE_RAKING)
->SEG_16384_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 1024, 16, BLOCK_REDUCE_RAKING)
->SEG_16384_ARGS()
->UseManualTime();
#endif
| 70562c99d97bec3dd3d73560af7505a813fca7fb.cu |
#include <benchmark/benchmark.h>
#include "init/init.hpp"
#include "reduction/args.hpp"
#include "utils/utils.hpp"
#include <cub/cub.cuh>
using namespace cub;
template <int THREADS_PER_BLOCK, int ITEMS_PER_THREAD, BlockReduceAlgorithm ALGORITHM>
__global__ void compute_cub_block_segmented_reduction(half *d_in, half *d_out) {
// Specialize BlockReduce type for our thread block
typedef BlockReduce<half, THREADS_PER_BLOCK, ALGORITHM> BlockReduceT;
// Shared memory
__shared__ typename BlockReduceT::TempStorage temp_storage;
// Per-thread tile data
half data[ITEMS_PER_THREAD];
LoadDirectStriped<THREADS_PER_BLOCK, half, ITEMS_PER_THREAD>(
threadIdx.x, d_in + blockIdx.x * THREADS_PER_BLOCK * ITEMS_PER_THREAD, data);
// Compute sum
half aggregate = BlockReduceT(temp_storage).Sum(data);
// Store aggregate
if (threadIdx.x == 0) {
d_out[blockIdx.x] = aggregate;
}
}
template <int THREADS_PER_BLOCK, int ITEMS_PER_THREAD, BlockReduceAlgorithm ALGORITHM>
static void ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION(benchmark::State &state) {
const size_t num_segments = state.range(0);
const size_t segment_size = state.range(1);
if (segment_size != THREADS_PER_BLOCK * ITEMS_PER_THREAD) {
state.SkipWithError("segment size must be THREADS_PER_BLOCK x ITEMS_PER_THREAD");
return;
}
if (num_segments >= CUDA_MAX_GRID_SIZE) {
state.SkipWithError(
fmt::format("gridDim.x={} is greater than CUDA_MAX_GRID_SIZE", num_segments)
.c_str());
return;
}
const size_t num_elements = num_segments * segment_size;
half *d_in_fp16 = nullptr;
half *d_out = nullptr;
try {
PRINT_IF_ERROR(cudaMalloc(&d_in_fp16, num_elements * sizeof(half)));
PRINT_IF_ERROR(cudaMalloc(&d_out, num_segments * sizeof(half)));
cuda_memory_set(d_in_fp16, 0.001f, num_elements);
PRINT_IF_ERROR(cudaDeviceSynchronize());
cudaEvent_t start, stop;
PRINT_IF_ERROR(cudaEventCreate(&start));
PRINT_IF_ERROR(cudaEventCreate(&stop));
defer(cudaEventDestroy(start));
defer(cudaEventDestroy(stop));
for (auto _ : state) {
PRINT_IF_ERROR(cudaEventRecord(start));
compute_cub_block_segmented_reduction<THREADS_PER_BLOCK,
ITEMS_PER_THREAD,
ALGORITHM>
<<<num_segments, THREADS_PER_BLOCK>>>(d_in_fp16, d_out);
PRINT_IF_ERROR(cudaEventRecord(stop));
PRINT_IF_ERROR(cudaEventSynchronize(stop));
state.PauseTiming();
float msecTotal = 0.0f;
PRINT_IF_ERROR(cudaEventElapsedTime(&msecTotal, start, stop));
state.SetIterationTime(msecTotal / 1000);
state.ResumeTiming();
}
state.counters.insert({{"num_segments", num_segments},
{"segment_size", segment_size},
{"num_elements", num_segments * segment_size},
{"threads_per_block", THREADS_PER_BLOCK},
{"items_per_thread", ITEMS_PER_THREAD},
{"block_reduce_algorithm", (int) ALGORITHM},
{"flops",
{state.iterations() * 1.0 * num_segments * segment_size,
benchmark::Counter::kAvgThreadsRate}}});
#if 0
half *h_out = new half[num_segments];
PRINT_IF_ERROR(cudaMemcpy(h_out, d_out, num_segments * sizeof(half),
cudaMemcpyDeviceToHost));
float correct_segment_sum = 0;
for (int i = 0; i < segment_size; i++) {
correct_segment_sum += h_in[i];
}
int errors = 0;
for (int i = 0; i < num_segments; i++) {
if (fabs(half_to_float(h_out[i]) - correct_segment_sum) > 0.001) {
errors++;
if (errors < 10) {
printf("segment %d has sum %f (expected %f)\n", i,
half_to_float(h_out[i]), correct_segment_sum);
}
}
}
if (errors > 0) {
printf("ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION does not agree with SEQUENTIAL! %d "
"errors!\n",
errors);
} else {
printf("Results verified: they agree.\n\n");
}
delete h_out;
#endif
cudaFree(d_in_fp16);
cudaFree(d_out);
} catch (...) {
cudaFree(d_in_fp16);
cudaFree(d_out);
cudaDeviceReset();
const auto p = std::current_exception();
std::rethrow_exception(p);
}
}
// BlockReduceAlgorithm are BLOCK_REDUCE_RAKING,
// BLOCK_REDUCE_WARP_REDUCTIONS and
// BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY
// BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY
#if 1
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 32, 1, BLOCK_REDUCE_RAKING)
->SEG_32_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 32, 2, BLOCK_REDUCE_RAKING)
->SEG_64_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 64, 1, BLOCK_REDUCE_RAKING)
->SEG_64_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 32, 4, BLOCK_REDUCE_RAKING)
->SEG_128_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 64, 2, BLOCK_REDUCE_RAKING)
->SEG_128_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 128, 1, BLOCK_REDUCE_RAKING)
->SEG_128_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 32, 8, BLOCK_REDUCE_RAKING)
->SEG_256_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 64, 4, BLOCK_REDUCE_RAKING)
->SEG_256_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 128, 2, BLOCK_REDUCE_RAKING)
->SEG_256_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 256, 1, BLOCK_REDUCE_RAKING)
->SEG_256_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 32, 16, BLOCK_REDUCE_RAKING)
->SEG_512_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 64, 8, BLOCK_REDUCE_RAKING)
->SEG_512_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 128, 4, BLOCK_REDUCE_RAKING)
->SEG_512_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 256, 2, BLOCK_REDUCE_RAKING)
->SEG_512_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 512, 1, BLOCK_REDUCE_RAKING)
->SEG_512_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 32, 32, BLOCK_REDUCE_RAKING)
->SEG_1024_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 64, 16, BLOCK_REDUCE_RAKING)
->SEG_1024_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 128, 8, BLOCK_REDUCE_RAKING)
->SEG_1024_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 256, 4, BLOCK_REDUCE_RAKING)
->SEG_1024_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 512, 2, BLOCK_REDUCE_RAKING)
->SEG_1024_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 1024, 1, BLOCK_REDUCE_RAKING)
->SEG_1024_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 32, 64, BLOCK_REDUCE_RAKING)
->SEG_2048_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 64, 32, BLOCK_REDUCE_RAKING)
->SEG_2048_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 128, 16, BLOCK_REDUCE_RAKING)
->SEG_2048_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 256, 8, BLOCK_REDUCE_RAKING)
->SEG_2048_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 512, 4, BLOCK_REDUCE_RAKING)
->SEG_2048_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 1024, 2, BLOCK_REDUCE_RAKING)
->SEG_2048_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 32, 128, BLOCK_REDUCE_RAKING)
->SEG_4096_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 64, 64, BLOCK_REDUCE_RAKING)
->SEG_4096_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 128, 32, BLOCK_REDUCE_RAKING)
->SEG_4096_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 256, 16, BLOCK_REDUCE_RAKING)
->SEG_4096_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 512, 8, BLOCK_REDUCE_RAKING)
->SEG_4096_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 1024, 4, BLOCK_REDUCE_RAKING)
->SEG_4096_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 32, 256, BLOCK_REDUCE_RAKING)
->SEG_8192_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 64, 128, BLOCK_REDUCE_RAKING)
->SEG_8192_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 128, 64, BLOCK_REDUCE_RAKING)
->SEG_8192_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 256, 32, BLOCK_REDUCE_RAKING)
->SEG_8192_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 512, 16, BLOCK_REDUCE_RAKING)
->SEG_8192_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 1024, 8, BLOCK_REDUCE_RAKING)
->SEG_8192_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 32, 512, BLOCK_REDUCE_RAKING)
->SEG_16384_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 64, 256, BLOCK_REDUCE_RAKING)
->SEG_16384_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 128, 128, BLOCK_REDUCE_RAKING)
->SEG_16384_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 256, 64, BLOCK_REDUCE_RAKING)
->SEG_16384_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 512, 32, BLOCK_REDUCE_RAKING)
->SEG_16384_ARGS()
->UseManualTime();
BENCHMARK_TEMPLATE(ORIGINAL_CUB_BLOCK_SEGMENTED_REDUCTION, 1024, 16, BLOCK_REDUCE_RAKING)
->SEG_16384_ARGS()
->UseManualTime();
#endif
|
cc6e11a390d3d2f99c3d97ee3b58b0e9025daeb1.hip | // !!! This is a file automatically generated by hipify!!!
// generated by gen_cuda_conv_bias_kern_impls.py
#include "../conv_bias_int8_implicit_gemm_imma32x8x16_cdiv4hwn4.cuinl"
template void megdnn::cuda::conv_bias_int8::do_conv_bias_int8_implicit_gemm_imma32x8x16_cdiv4hwn4<PerChannelBiasVisitor,
IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::RELU>>>(
const int8_t* d_src,
const int8_t* d_filter,
PerChannelBiasVisitor bias,
IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::RELU>> epilogue,
const ConvParam& param,
float alpha,
float beta,
hipStream_t stream);
| cc6e11a390d3d2f99c3d97ee3b58b0e9025daeb1.cu | // generated by gen_cuda_conv_bias_kern_impls.py
#include "../conv_bias_int8_implicit_gemm_imma32x8x16_cdiv4hwn4.cuinl"
template void megdnn::cuda::conv_bias_int8::do_conv_bias_int8_implicit_gemm_imma32x8x16_cdiv4hwn4<PerChannelBiasVisitor,
IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::RELU>>>(
const int8_t* d_src,
const int8_t* d_filter,
PerChannelBiasVisitor bias,
IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::RELU>> epilogue,
const ConvParam& param,
float alpha,
float beta,
cudaStream_t stream);
|
0e64e9b5d60e80f36019b568564b43343854987a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "src/nfa_kernels.h"
__constant__ int c_transition_offset[SYMBOL_COUNT + 1];
__constant__ int c_optimal_k_per_symbol[SYMBOL_COUNT + 1];
// iNFAnt traversal algorithm to process multiple strings on a NFA
// input : total input string
// input_offset : offset of each input string
// transition_list : list of (source, destination) tuples
// transition_offset : index of first transition trigger by each
// symbol init_states_vector : vector of initial states
// persis_states_vector : vector of persistent states
// final_states_vector : vector of final states
// vector_len : length of state vector (# of ST_BLOCKs)
__global__ void TKO_kernel(unsigned char *input, int *input_offset,
Transition *transition_list,
// int *transition_offset,
ST_BLOCK *init_states_vector,
ST_BLOCK *final_states_vector,
int *top_k_offset_per_symbol,
ST_BLOCK *lim_vector,
int vector_len) {
// Skip to the right input string
input += input_offset[block_ID];
// Get the size of current input string
int input_bytes = input_offset[block_ID + 1] - input_offset[block_ID];
extern __shared__ ST_BLOCK s_data[]; // shared memory
ST_BLOCK *current_st_vec =
s_data; // current active states in shared memory
ST_BLOCK *future_st_vec =
s_data + vector_len; // future active states in shared memory
ST_BLOCK *workspace_vec =
s_data + 2 * vector_len; // workspace states in shared memory, helper
// int *s_transition_offset = (int*)(s_data + 3 * vector_len); //
// transition offset in shared memory
Transition tuple = transition_list[0];
ST_T src_state, dst_state;
ST_BLOCK src_bit, dst_bit;
unsigned int src_block, dst_block;
int c, transition_start, transition_count, wb_transition_start,
wb_transition_count;
// Copy initial and persistent states from global memory into shared memory
for (int i = thread_ID; i < vector_len; i += thread_count) {
current_st_vec[i] = init_states_vector[i];
workspace_vec[i] = 0;
}
__syncthreads();
// First transition and # of transitions triggered by word boundary
wb_transition_start = c_transition_offset[WORD_BOUNDARY];
wb_transition_count =
c_transition_offset[WORD_BOUNDARY + 1] - wb_transition_start;
if (wb_transition_count == 0) goto BYPASS_HEAD;
// If the first character is a word character, there is a word boundary
// before the first character
if (!is_word_char(input[0])) goto BYPASS_HEAD;
// For each transition triggered by word boundary
for (int i = thread_ID; i < wb_transition_count; i += thread_count) {
tuple = transition_list[i + wb_transition_start];
src_state = tuple.src;
dst_state = tuple.dst;
src_bit =
1 << (src_state %
bit_sizeof(ST_BLOCK)); // index of state bit inside the block
dst_bit = 1 << (dst_state % bit_sizeof(ST_BLOCK));
src_block = src_state / bit_sizeof(ST_BLOCK); // index of state block
dst_block = dst_state / bit_sizeof(ST_BLOCK);
// If transition source is set in current active state vector
// (divergence happens here)
if (src_bit & current_st_vec[src_block]) {
// Set transition destination in CURRENT active state vector
atomicOr(¤t_st_vec[dst_block], dst_bit);
}
}
__syncthreads();
BYPASS_HEAD:
// For each byte in the input string
for (int byt = 0; byt < input_bytes; byt++) {
// clean future state vector
for (int i = thread_ID; i < vector_len; i += thread_count) {
future_st_vec[i] = 0;
}
__syncthreads();
c = (int)(input[byt]);
for (int i = c_optimal_k_per_symbol[c]; i < c_optimal_k_per_symbol[c+1]; i++) {
int offset = top_k_offset_per_symbol[i];
for (int j = thread_ID; j < vector_len; j += thread_count) {
workspace_vec[j] =
lim_vector[i * vector_len + j] & current_st_vec[j];
}
__syncthreads();
int sign = 1 - 2 * (offset < 0); // -1-> negative 1->positive
int left_1 = max(int(0), int(offset / bit_sizeof(ST_BLOCK)));
int right_1 =
min(int(vector_len - 1),
int(vector_len - 1 + (offset / bit_sizeof(ST_BLOCK))));
int left_2 = max(int(0), int(offset / bit_sizeof(ST_BLOCK) + sign));
int right_2 = min(
int(vector_len - 1),
int(vector_len - 1 + sign + (offset / bit_sizeof(ST_BLOCK))));
if (offset >= 0) {
for (int j = left_1 + thread_ID; j <= right_1;
j += thread_count) {
future_st_vec[j] |=
(workspace_vec[j - offset / bit_sizeof(ST_BLOCK)]
<< (offset % bit_sizeof(ST_BLOCK)));
}
__syncthreads();
for (int j = left_2 + thread_ID; j <= right_2;
j += thread_count) {
future_st_vec[j] |=
(workspace_vec[j - offset / bit_sizeof(ST_BLOCK) - 1] >>
(bit_sizeof(ST_BLOCK) -
(offset % bit_sizeof(ST_BLOCK))));
}
__syncthreads();
} else {
for (int j = left_1 + thread_ID; j <= right_1;
j += thread_count) {
future_st_vec[j] |=
(workspace_vec[j - (offset / bit_sizeof(ST_BLOCK))] >>
(((-offset) % bit_sizeof(ST_BLOCK))));
}
__syncthreads();
for (int j = left_2 + thread_ID; j <= right_2;
j += thread_count) {
future_st_vec[j] |=
(workspace_vec[j - (offset / bit_sizeof(ST_BLOCK)) + 1]
<< ((bit_sizeof(ST_BLOCK) -
((-offset) % bit_sizeof(ST_BLOCK)))));
}
__syncthreads();
}
}
transition_start = c_transition_offset[c];
transition_count = c_transition_offset[c + 1] - transition_start;
// For each transition triggered by the character
for (int i = thread_ID; i < transition_count; i += thread_count) {
tuple = transition_list[i + transition_start];
src_state = tuple.src;
dst_state = tuple.dst;
src_bit =
1 << (src_state %
bit_sizeof(
ST_BLOCK)); // index of state bit inside the block
dst_bit = 1 << (dst_state % bit_sizeof(ST_BLOCK));
src_block =
src_state / bit_sizeof(ST_BLOCK); // index of state block
dst_block = dst_state / bit_sizeof(ST_BLOCK);
// If transition source is set in current active state vector
// (divergence happens here)
if (src_bit & current_st_vec[src_block]) {
// Set transition destination in future active state vector
atomicOr(&future_st_vec[dst_block], dst_bit);
}
}
// Swap current and future active state vector
if (current_st_vec == s_data) {
current_st_vec = s_data + vector_len;
future_st_vec = s_data;
} else {
current_st_vec = s_data;
future_st_vec = s_data + vector_len;
}
__syncthreads();
// No transition triggered by word boundary
if (wb_transition_count == 0) continue;
// If there is NOT a word boundary between input[byt] and input[byt + 1]
// or after the last character
if ((byt < input_bytes - 1 &&
(is_word_char(input[byt]) ^ is_word_char(input[byt + 1])) == 0) ||
(byt == input_bytes - 1 && !is_word_char(input[input_bytes - 1])))
continue;
// For each transition triggered by word boundary
for (int i = thread_ID; i < wb_transition_count; i += thread_count) {
tuple = transition_list[i + wb_transition_start];
src_state = tuple.src;
dst_state = tuple.dst;
src_bit =
1 << (src_state %
bit_sizeof(
ST_BLOCK)); // index of state bit inside the block
dst_bit = 1 << (dst_state % bit_sizeof(ST_BLOCK));
src_block =
src_state / bit_sizeof(ST_BLOCK); // index of state block
dst_block = dst_state / bit_sizeof(ST_BLOCK);
// If transition source is set in current active state vector
// (divergence happens here)
if (src_bit & current_st_vec[src_block]) {
// Set transition destination in CURRENT active state vector
atomicOr(¤t_st_vec[dst_block], dst_bit);
}
}
__syncthreads();
}
// Copy final active states from shared memory into global memory
for (int i = thread_ID; i < vector_len; i += thread_count) {
final_states_vector[block_ID * vector_len + i] = current_st_vec[i];
}
}
// Host function to run iNFAnt algorithm on GPU
// This function can process multiple strings on a NFA simultaneously
// tg : NFA transition graph
// h_input_array : array of input string in host memory
// input_bytes_array : array of string length
// array_size : array size (# of strings to match)
// threads_per_block : # of threads per block for kernel function
// show_match_result : print regex matching result if this variable is true
void run_TKO(struct ita_scratch &scratch, unsigned char **h_input_array,
int *input_bytes_array, int array_size, int threads_per_block,
bool show_match_result, bool profiler_mode,
vector<int> *accept_rules) {
struct timeval start_time, end_time;
hipEvent_t memalloc_start,
memalloc_end; // start and end events of device memory allocation
hipEvent_t memcpy_h2d_start,
memcpy_h2d_end; // start and end events of memory copy from host to
// device
hipEvent_t kernel_start,
kernel_end; // start and end events of kernel execution
hipEvent_t memcpy_d2h_start,
memcpy_d2h_end; // start and end events of memory copy from device to
// host
hipEvent_t memfree_start,
memfree_end; // start and end events of device memory free
int vec_len = scratch.tg->init_states_vector
.block_count; // length (# of blocks) of state vector
int total_input_bytes = 0; // sum of string length
// Variables in host memory
unsigned char *h_input; // total input string
int h_input_offset[array_size + 1]; // offsets of all input strings
ST_BLOCK *h_final_st_vec; // final active states of all strings
// Variables in device memory
unsigned char *d_input; // total input string
int *d_input_offset; // offset of each input string
ST_BLOCK *d_final_st_vec;
// Create events
if (profiler_mode) {
hipEventCreate(&memalloc_start);
hipEventCreate(&memalloc_end);
hipEventCreate(&memcpy_h2d_start);
hipEventCreate(&memcpy_h2d_end);
hipEventCreate(&kernel_start);
hipEventCreate(&kernel_end);
hipEventCreate(&memcpy_d2h_start);
hipEventCreate(&memcpy_d2h_end);
hipEventCreate(&memfree_start);
hipEventCreate(&memfree_end);
gettimeofday(&start_time, NULL);
}
for (int i = 0; i < array_size; i++) {
h_input_offset[i] = total_input_bytes;
total_input_bytes += input_bytes_array[i];
}
h_input_offset[array_size] = total_input_bytes;
h_input = (unsigned char *)malloc(total_input_bytes);
if (!h_input) {
cerr << "Error: allocate host memory to store total input string"
<< endl;
exit(-1);
}
// Copy each string into h_input to construct a big string
for (int i = 0; i < array_size; i++) {
memcpy(h_input + h_input_offset[i], h_input_array[i],
input_bytes_array[i]);
}
// Allocate host memory
h_final_st_vec =
(ST_BLOCK *)malloc(sizeof(ST_BLOCK) * vec_len * array_size);
if (!h_final_st_vec) {
cerr << "Error: allocate host memory to store final state vectors"
<< endl;
exit(-1);
}
// Allocate device memory
if (profiler_mode) hipEventRecord(memalloc_start, 0);
hipMalloc((void **)&d_input, total_input_bytes);
hipMalloc((void **)&d_input_offset, sizeof(int) * (array_size + 1));
hipMalloc((void **)&d_final_st_vec,
sizeof(ST_BLOCK) * vec_len * array_size);
if (profiler_mode) hipEventRecord(memalloc_end, 0);
// Copy input from host memory into device memory
if (profiler_mode) hipEventRecord(memcpy_h2d_start, 0);
hipMemcpy(d_input, h_input, total_input_bytes, hipMemcpyHostToDevice);
hipMemcpy(d_input_offset, h_input_offset, sizeof(int) * (array_size + 1),
hipMemcpyHostToDevice);
if (hipSuccess != hipMemcpyToSymbol(c_transition_offset,
scratch.tg->offset_per_symbol,
sizeof(int) * (SYMBOL_COUNT + 1))) {
cout << "Error!\n";
exit(-1);
}
if (hipSuccess != hipMemcpyToSymbol(c_optimal_k_per_symbol,
scratch.tg->optimal_k_per_symbol,
sizeof(int) * (SYMBOL_COUNT + 1))) {
cout << "Error!\n";
exit(-1);
}
if (profiler_mode) hipEventRecord(memcpy_h2d_end, 0);
// Calculate the size of shared memory (for 3 state vectors and transition
// offset)
int shem =
3 * vec_len * sizeof(ST_BLOCK); // + sizeof(int) * (SYMBOL_COUNT + 1);
// Launch kernel
if (profiler_mode) hipEventRecord(kernel_start, 0);
hipDeviceSetCacheConfig(hipFuncCachePreferShared);
hipDeviceSetSharedMemConfig(hipSharedMemBankSizeFourByte);
hipLaunchKernelGGL(( TKO_kernel), dim3(array_size), dim3(threads_per_block), shem, 0,
d_input, d_input_offset, scratch.d_transition_list,
scratch.d_init_st_vec, d_final_st_vec,
scratch.d_top_k_offset_per_symbol, scratch.d_lim_vec, vec_len);
if (profiler_mode) hipEventRecord(kernel_end, 0);
if (profiler_mode) hipEventSynchronize(kernel_end);
// Copy result from device memory into host memory
if (profiler_mode) hipEventRecord(memcpy_d2h_start, 0);
hipMemcpy(h_final_st_vec, d_final_st_vec,
sizeof(ST_BLOCK) * vec_len * array_size, hipMemcpyDeviceToHost);
if (profiler_mode) hipEventRecord(memcpy_d2h_end, 0);
// Get final active states and accept rules for each string
vector<ST_T> final_states[array_size];
// vector<int> accept_rules[array_size];
unordered_map<ST_T, vector<int>>::iterator itr;
for (int i = 0; i < array_size; i++) {
get_active_states(h_final_st_vec + i * vec_len, vec_len,
final_states[i]);
// Get all accept rules for string i
for (int j = 0; j < final_states[i].size(); j++) {
// Get accept rules triggered by this state
itr = scratch.tg->accept_states_rules.find(final_states[i][j]);
if (itr != scratch.tg->accept_states_rules.end()) {
accept_rules[i].insert(accept_rules[i].end(),
itr->second.begin(), itr->second.end());
}
}
// Remove repeated accept rules for string i
sort(accept_rules[i].begin(), accept_rules[i].end());
accept_rules[i].erase(
unique(accept_rules[i].begin(), accept_rules[i].end()),
accept_rules[i].end());
}
// Free device memory
if (profiler_mode) hipEventRecord(memfree_start, 0);
hipFree(d_input);
hipFree(d_input_offset);
hipFree(d_final_st_vec);
if (profiler_mode) hipEventRecord(memfree_end, 0);
// Free host memory
free(h_final_st_vec);
free(h_input);
if (profiler_mode) gettimeofday(&end_time, NULL);
if (show_match_result) show_results(array_size, final_states, accept_rules);
if (profiler_mode) {
Profiler(start_time, end_time, array_size, memalloc_start, memalloc_end,
memcpy_h2d_start, memcpy_h2d_end, kernel_start, kernel_end,
memcpy_d2h_start, memcpy_d2h_end, memfree_start, memfree_end);
}
// Destroy events
if (profiler_mode) {
hipEventDestroy(memalloc_start);
hipEventDestroy(memalloc_end);
hipEventDestroy(memcpy_h2d_start);
hipEventDestroy(memcpy_h2d_end);
hipEventDestroy(kernel_start);
hipEventDestroy(kernel_end);
hipEventDestroy(memcpy_d2h_start);
hipEventDestroy(memcpy_d2h_end);
hipEventDestroy(memfree_start);
hipEventDestroy(memfree_end);
}
}
| 0e64e9b5d60e80f36019b568564b43343854987a.cu | #include "src/nfa_kernels.h"
__constant__ int c_transition_offset[SYMBOL_COUNT + 1];
__constant__ int c_optimal_k_per_symbol[SYMBOL_COUNT + 1];
// iNFAnt traversal algorithm to process multiple strings on a NFA
// input : total input string
// input_offset : offset of each input string
// transition_list : list of (source, destination) tuples
// transition_offset : index of first transition trigger by each
// symbol init_states_vector : vector of initial states
// persis_states_vector : vector of persistent states
// final_states_vector : vector of final states
// vector_len : length of state vector (# of ST_BLOCKs)
__global__ void TKO_kernel(unsigned char *input, int *input_offset,
Transition *transition_list,
// int *transition_offset,
ST_BLOCK *init_states_vector,
ST_BLOCK *final_states_vector,
int *top_k_offset_per_symbol,
ST_BLOCK *lim_vector,
int vector_len) {
// Skip to the right input string
input += input_offset[block_ID];
// Get the size of current input string
int input_bytes = input_offset[block_ID + 1] - input_offset[block_ID];
extern __shared__ ST_BLOCK s_data[]; // shared memory
ST_BLOCK *current_st_vec =
s_data; // current active states in shared memory
ST_BLOCK *future_st_vec =
s_data + vector_len; // future active states in shared memory
ST_BLOCK *workspace_vec =
s_data + 2 * vector_len; // workspace states in shared memory, helper
// int *s_transition_offset = (int*)(s_data + 3 * vector_len); //
// transition offset in shared memory
Transition tuple = transition_list[0];
ST_T src_state, dst_state;
ST_BLOCK src_bit, dst_bit;
unsigned int src_block, dst_block;
int c, transition_start, transition_count, wb_transition_start,
wb_transition_count;
// Copy initial and persistent states from global memory into shared memory
for (int i = thread_ID; i < vector_len; i += thread_count) {
current_st_vec[i] = init_states_vector[i];
workspace_vec[i] = 0;
}
__syncthreads();
// First transition and # of transitions triggered by word boundary
wb_transition_start = c_transition_offset[WORD_BOUNDARY];
wb_transition_count =
c_transition_offset[WORD_BOUNDARY + 1] - wb_transition_start;
if (wb_transition_count == 0) goto BYPASS_HEAD;
// If the first character is a word character, there is a word boundary
// before the first character
if (!is_word_char(input[0])) goto BYPASS_HEAD;
// For each transition triggered by word boundary
for (int i = thread_ID; i < wb_transition_count; i += thread_count) {
tuple = transition_list[i + wb_transition_start];
src_state = tuple.src;
dst_state = tuple.dst;
src_bit =
1 << (src_state %
bit_sizeof(ST_BLOCK)); // index of state bit inside the block
dst_bit = 1 << (dst_state % bit_sizeof(ST_BLOCK));
src_block = src_state / bit_sizeof(ST_BLOCK); // index of state block
dst_block = dst_state / bit_sizeof(ST_BLOCK);
// If transition source is set in current active state vector
// (divergence happens here)
if (src_bit & current_st_vec[src_block]) {
// Set transition destination in CURRENT active state vector
atomicOr(¤t_st_vec[dst_block], dst_bit);
}
}
__syncthreads();
BYPASS_HEAD:
// For each byte in the input string
for (int byt = 0; byt < input_bytes; byt++) {
// clean future state vector
for (int i = thread_ID; i < vector_len; i += thread_count) {
future_st_vec[i] = 0;
}
__syncthreads();
c = (int)(input[byt]);
for (int i = c_optimal_k_per_symbol[c]; i < c_optimal_k_per_symbol[c+1]; i++) {
int offset = top_k_offset_per_symbol[i];
for (int j = thread_ID; j < vector_len; j += thread_count) {
workspace_vec[j] =
lim_vector[i * vector_len + j] & current_st_vec[j];
}
__syncthreads();
int sign = 1 - 2 * (offset < 0); // -1-> negative 1->positive
int left_1 = max(int(0), int(offset / bit_sizeof(ST_BLOCK)));
int right_1 =
min(int(vector_len - 1),
int(vector_len - 1 + (offset / bit_sizeof(ST_BLOCK))));
int left_2 = max(int(0), int(offset / bit_sizeof(ST_BLOCK) + sign));
int right_2 = min(
int(vector_len - 1),
int(vector_len - 1 + sign + (offset / bit_sizeof(ST_BLOCK))));
if (offset >= 0) {
for (int j = left_1 + thread_ID; j <= right_1;
j += thread_count) {
future_st_vec[j] |=
(workspace_vec[j - offset / bit_sizeof(ST_BLOCK)]
<< (offset % bit_sizeof(ST_BLOCK)));
}
__syncthreads();
for (int j = left_2 + thread_ID; j <= right_2;
j += thread_count) {
future_st_vec[j] |=
(workspace_vec[j - offset / bit_sizeof(ST_BLOCK) - 1] >>
(bit_sizeof(ST_BLOCK) -
(offset % bit_sizeof(ST_BLOCK))));
}
__syncthreads();
} else {
for (int j = left_1 + thread_ID; j <= right_1;
j += thread_count) {
future_st_vec[j] |=
(workspace_vec[j - (offset / bit_sizeof(ST_BLOCK))] >>
(((-offset) % bit_sizeof(ST_BLOCK))));
}
__syncthreads();
for (int j = left_2 + thread_ID; j <= right_2;
j += thread_count) {
future_st_vec[j] |=
(workspace_vec[j - (offset / bit_sizeof(ST_BLOCK)) + 1]
<< ((bit_sizeof(ST_BLOCK) -
((-offset) % bit_sizeof(ST_BLOCK)))));
}
__syncthreads();
}
}
transition_start = c_transition_offset[c];
transition_count = c_transition_offset[c + 1] - transition_start;
// For each transition triggered by the character
for (int i = thread_ID; i < transition_count; i += thread_count) {
tuple = transition_list[i + transition_start];
src_state = tuple.src;
dst_state = tuple.dst;
src_bit =
1 << (src_state %
bit_sizeof(
ST_BLOCK)); // index of state bit inside the block
dst_bit = 1 << (dst_state % bit_sizeof(ST_BLOCK));
src_block =
src_state / bit_sizeof(ST_BLOCK); // index of state block
dst_block = dst_state / bit_sizeof(ST_BLOCK);
// If transition source is set in current active state vector
// (divergence happens here)
if (src_bit & current_st_vec[src_block]) {
// Set transition destination in future active state vector
atomicOr(&future_st_vec[dst_block], dst_bit);
}
}
// Swap current and future active state vector
if (current_st_vec == s_data) {
current_st_vec = s_data + vector_len;
future_st_vec = s_data;
} else {
current_st_vec = s_data;
future_st_vec = s_data + vector_len;
}
__syncthreads();
// No transition triggered by word boundary
if (wb_transition_count == 0) continue;
// If there is NOT a word boundary between input[byt] and input[byt + 1]
// or after the last character
if ((byt < input_bytes - 1 &&
(is_word_char(input[byt]) ^ is_word_char(input[byt + 1])) == 0) ||
(byt == input_bytes - 1 && !is_word_char(input[input_bytes - 1])))
continue;
// For each transition triggered by word boundary
for (int i = thread_ID; i < wb_transition_count; i += thread_count) {
tuple = transition_list[i + wb_transition_start];
src_state = tuple.src;
dst_state = tuple.dst;
src_bit =
1 << (src_state %
bit_sizeof(
ST_BLOCK)); // index of state bit inside the block
dst_bit = 1 << (dst_state % bit_sizeof(ST_BLOCK));
src_block =
src_state / bit_sizeof(ST_BLOCK); // index of state block
dst_block = dst_state / bit_sizeof(ST_BLOCK);
// If transition source is set in current active state vector
// (divergence happens here)
if (src_bit & current_st_vec[src_block]) {
// Set transition destination in CURRENT active state vector
atomicOr(¤t_st_vec[dst_block], dst_bit);
}
}
__syncthreads();
}
// Copy final active states from shared memory into global memory
for (int i = thread_ID; i < vector_len; i += thread_count) {
final_states_vector[block_ID * vector_len + i] = current_st_vec[i];
}
}
// Host function to run iNFAnt algorithm on GPU
// This function can process multiple strings on a NFA simultaneously
// tg : NFA transition graph
// h_input_array : array of input string in host memory
// input_bytes_array : array of string length
// array_size : array size (# of strings to match)
// threads_per_block : # of threads per block for kernel function
// show_match_result : print regex matching result if this variable is true
void run_TKO(struct ita_scratch &scratch, unsigned char **h_input_array,
int *input_bytes_array, int array_size, int threads_per_block,
bool show_match_result, bool profiler_mode,
vector<int> *accept_rules) {
struct timeval start_time, end_time;
cudaEvent_t memalloc_start,
memalloc_end; // start and end events of device memory allocation
cudaEvent_t memcpy_h2d_start,
memcpy_h2d_end; // start and end events of memory copy from host to
// device
cudaEvent_t kernel_start,
kernel_end; // start and end events of kernel execution
cudaEvent_t memcpy_d2h_start,
memcpy_d2h_end; // start and end events of memory copy from device to
// host
cudaEvent_t memfree_start,
memfree_end; // start and end events of device memory free
int vec_len = scratch.tg->init_states_vector
.block_count; // length (# of blocks) of state vector
int total_input_bytes = 0; // sum of string length
// Variables in host memory
unsigned char *h_input; // total input string
int h_input_offset[array_size + 1]; // offsets of all input strings
ST_BLOCK *h_final_st_vec; // final active states of all strings
// Variables in device memory
unsigned char *d_input; // total input string
int *d_input_offset; // offset of each input string
ST_BLOCK *d_final_st_vec;
// Create events
if (profiler_mode) {
cudaEventCreate(&memalloc_start);
cudaEventCreate(&memalloc_end);
cudaEventCreate(&memcpy_h2d_start);
cudaEventCreate(&memcpy_h2d_end);
cudaEventCreate(&kernel_start);
cudaEventCreate(&kernel_end);
cudaEventCreate(&memcpy_d2h_start);
cudaEventCreate(&memcpy_d2h_end);
cudaEventCreate(&memfree_start);
cudaEventCreate(&memfree_end);
gettimeofday(&start_time, NULL);
}
for (int i = 0; i < array_size; i++) {
h_input_offset[i] = total_input_bytes;
total_input_bytes += input_bytes_array[i];
}
h_input_offset[array_size] = total_input_bytes;
h_input = (unsigned char *)malloc(total_input_bytes);
if (!h_input) {
cerr << "Error: allocate host memory to store total input string"
<< endl;
exit(-1);
}
// Copy each string into h_input to construct a big string
for (int i = 0; i < array_size; i++) {
memcpy(h_input + h_input_offset[i], h_input_array[i],
input_bytes_array[i]);
}
// Allocate host memory
h_final_st_vec =
(ST_BLOCK *)malloc(sizeof(ST_BLOCK) * vec_len * array_size);
if (!h_final_st_vec) {
cerr << "Error: allocate host memory to store final state vectors"
<< endl;
exit(-1);
}
// Allocate device memory
if (profiler_mode) cudaEventRecord(memalloc_start, 0);
cudaMalloc((void **)&d_input, total_input_bytes);
cudaMalloc((void **)&d_input_offset, sizeof(int) * (array_size + 1));
cudaMalloc((void **)&d_final_st_vec,
sizeof(ST_BLOCK) * vec_len * array_size);
if (profiler_mode) cudaEventRecord(memalloc_end, 0);
// Copy input from host memory into device memory
if (profiler_mode) cudaEventRecord(memcpy_h2d_start, 0);
cudaMemcpy(d_input, h_input, total_input_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_input_offset, h_input_offset, sizeof(int) * (array_size + 1),
cudaMemcpyHostToDevice);
if (cudaSuccess != cudaMemcpyToSymbol(c_transition_offset,
scratch.tg->offset_per_symbol,
sizeof(int) * (SYMBOL_COUNT + 1))) {
cout << "Error!\n";
exit(-1);
}
if (cudaSuccess != cudaMemcpyToSymbol(c_optimal_k_per_symbol,
scratch.tg->optimal_k_per_symbol,
sizeof(int) * (SYMBOL_COUNT + 1))) {
cout << "Error!\n";
exit(-1);
}
if (profiler_mode) cudaEventRecord(memcpy_h2d_end, 0);
// Calculate the size of shared memory (for 3 state vectors and transition
// offset)
int shem =
3 * vec_len * sizeof(ST_BLOCK); // + sizeof(int) * (SYMBOL_COUNT + 1);
// Launch kernel
if (profiler_mode) cudaEventRecord(kernel_start, 0);
cudaDeviceSetCacheConfig(cudaFuncCachePreferShared);
cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeFourByte);
TKO_kernel<<<array_size, threads_per_block, shem>>>(
d_input, d_input_offset, scratch.d_transition_list,
scratch.d_init_st_vec, d_final_st_vec,
scratch.d_top_k_offset_per_symbol, scratch.d_lim_vec, vec_len);
if (profiler_mode) cudaEventRecord(kernel_end, 0);
if (profiler_mode) cudaEventSynchronize(kernel_end);
// Copy result from device memory into host memory
if (profiler_mode) cudaEventRecord(memcpy_d2h_start, 0);
cudaMemcpy(h_final_st_vec, d_final_st_vec,
sizeof(ST_BLOCK) * vec_len * array_size, cudaMemcpyDeviceToHost);
if (profiler_mode) cudaEventRecord(memcpy_d2h_end, 0);
// Get final active states and accept rules for each string
vector<ST_T> final_states[array_size];
// vector<int> accept_rules[array_size];
unordered_map<ST_T, vector<int>>::iterator itr;
for (int i = 0; i < array_size; i++) {
get_active_states(h_final_st_vec + i * vec_len, vec_len,
final_states[i]);
// Get all accept rules for string i
for (int j = 0; j < final_states[i].size(); j++) {
// Get accept rules triggered by this state
itr = scratch.tg->accept_states_rules.find(final_states[i][j]);
if (itr != scratch.tg->accept_states_rules.end()) {
accept_rules[i].insert(accept_rules[i].end(),
itr->second.begin(), itr->second.end());
}
}
// Remove repeated accept rules for string i
sort(accept_rules[i].begin(), accept_rules[i].end());
accept_rules[i].erase(
unique(accept_rules[i].begin(), accept_rules[i].end()),
accept_rules[i].end());
}
// Free device memory
if (profiler_mode) cudaEventRecord(memfree_start, 0);
cudaFree(d_input);
cudaFree(d_input_offset);
cudaFree(d_final_st_vec);
if (profiler_mode) cudaEventRecord(memfree_end, 0);
// Free host memory
free(h_final_st_vec);
free(h_input);
if (profiler_mode) gettimeofday(&end_time, NULL);
if (show_match_result) show_results(array_size, final_states, accept_rules);
if (profiler_mode) {
Profiler(start_time, end_time, array_size, memalloc_start, memalloc_end,
memcpy_h2d_start, memcpy_h2d_end, kernel_start, kernel_end,
memcpy_d2h_start, memcpy_d2h_end, memfree_start, memfree_end);
}
// Destroy events
if (profiler_mode) {
cudaEventDestroy(memalloc_start);
cudaEventDestroy(memalloc_end);
cudaEventDestroy(memcpy_h2d_start);
cudaEventDestroy(memcpy_h2d_end);
cudaEventDestroy(kernel_start);
cudaEventDestroy(kernel_end);
cudaEventDestroy(memcpy_d2h_start);
cudaEventDestroy(memcpy_d2h_end);
cudaEventDestroy(memfree_start);
cudaEventDestroy(memfree_end);
}
}
|
8b5f7c03aa27b3817891db16e9ff163b10dca682.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel3_plus_2_back [3][2];
static int dims_update_halo_kernel3_plus_2_back_h [3][2] = {0};
//user function
__device__
inline void update_halo_kernel3_plus_2_back_gpu(ACC<double> &vol_flux_x,
ACC<double> &mass_flux_x,
const int* fields) {
if(fields[FIELD_VOL_FLUX_X] == 1) vol_flux_x(0,0,0) = vol_flux_x(0,0,2);
if(fields[FIELD_MASS_FLUX_X] == 1) mass_flux_x(0,0,0) = mass_flux_x(0,0,2);
}
__global__ void ops_update_halo_kernel3_plus_2_back(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel3_plus_2_back[0][0] + idx_z * 1*1 * dims_update_halo_kernel3_plus_2_back[0][0] * dims_update_halo_kernel3_plus_2_back[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel3_plus_2_back[1][0] + idx_z * 1*1 * dims_update_halo_kernel3_plus_2_back[1][0] * dims_update_halo_kernel3_plus_2_back[1][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_update_halo_kernel3_plus_2_back[0][0], dims_update_halo_kernel3_plus_2_back[0][1], arg0);
ACC<double> argp1(dims_update_halo_kernel3_plus_2_back[1][0], dims_update_halo_kernel3_plus_2_back[1][1], arg1);
update_halo_kernel3_plus_2_back_gpu(argp0, argp1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel3_plus_2_back(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel3_plus_2_back_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,69)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(69,"update_halo_kernel3_plus_2_back");
OPS_kernels[69].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != dims_update_halo_kernel3_plus_2_back_h[0][0] || ydim0 != dims_update_halo_kernel3_plus_2_back_h[0][1] || xdim1 != dims_update_halo_kernel3_plus_2_back_h[1][0] || ydim1 != dims_update_halo_kernel3_plus_2_back_h[1][1]) {
dims_update_halo_kernel3_plus_2_back_h[0][0] = xdim0;
dims_update_halo_kernel3_plus_2_back_h[0][1] = ydim0;
dims_update_halo_kernel3_plus_2_back_h[1][0] = xdim1;
dims_update_halo_kernel3_plus_2_back_h[1][1] = ydim1;
cutilSafeCall(hipMemcpyToSymbol( dims_update_halo_kernel3_plus_2_back, dims_update_halo_kernel3_plus_2_back_h, sizeof(dims_update_halo_kernel3_plus_2_back)));
}
int *arg2h = (int *)arg2.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[69].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_update_halo_kernel3_plus_2_back), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[69].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[69].mpi_time += t2-t1;
OPS_kernels[69].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[69].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel3_plus_2_back(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 69;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 69;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel3_plus_2_back_execute;
if (OPS_diags > 1) {
ops_timing_realloc(69,"update_halo_kernel3_plus_2_back");
}
ops_enqueue_kernel(desc);
}
#endif
| 8b5f7c03aa27b3817891db16e9ff163b10dca682.cu | //
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel3_plus_2_back [3][2];
static int dims_update_halo_kernel3_plus_2_back_h [3][2] = {0};
//user function
__device__
inline void update_halo_kernel3_plus_2_back_gpu(ACC<double> &vol_flux_x,
ACC<double> &mass_flux_x,
const int* fields) {
if(fields[FIELD_VOL_FLUX_X] == 1) vol_flux_x(0,0,0) = vol_flux_x(0,0,2);
if(fields[FIELD_MASS_FLUX_X] == 1) mass_flux_x(0,0,0) = mass_flux_x(0,0,2);
}
__global__ void ops_update_halo_kernel3_plus_2_back(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel3_plus_2_back[0][0] + idx_z * 1*1 * dims_update_halo_kernel3_plus_2_back[0][0] * dims_update_halo_kernel3_plus_2_back[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel3_plus_2_back[1][0] + idx_z * 1*1 * dims_update_halo_kernel3_plus_2_back[1][0] * dims_update_halo_kernel3_plus_2_back[1][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_update_halo_kernel3_plus_2_back[0][0], dims_update_halo_kernel3_plus_2_back[0][1], arg0);
ACC<double> argp1(dims_update_halo_kernel3_plus_2_back[1][0], dims_update_halo_kernel3_plus_2_back[1][1], arg1);
update_halo_kernel3_plus_2_back_gpu(argp0, argp1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel3_plus_2_back(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel3_plus_2_back_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,69)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(69,"update_halo_kernel3_plus_2_back");
OPS_kernels[69].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != dims_update_halo_kernel3_plus_2_back_h[0][0] || ydim0 != dims_update_halo_kernel3_plus_2_back_h[0][1] || xdim1 != dims_update_halo_kernel3_plus_2_back_h[1][0] || ydim1 != dims_update_halo_kernel3_plus_2_back_h[1][1]) {
dims_update_halo_kernel3_plus_2_back_h[0][0] = xdim0;
dims_update_halo_kernel3_plus_2_back_h[0][1] = ydim0;
dims_update_halo_kernel3_plus_2_back_h[1][0] = xdim1;
dims_update_halo_kernel3_plus_2_back_h[1][1] = ydim1;
cutilSafeCall(cudaMemcpyToSymbol( dims_update_halo_kernel3_plus_2_back, dims_update_halo_kernel3_plus_2_back_h, sizeof(dims_update_halo_kernel3_plus_2_back)));
}
int *arg2h = (int *)arg2.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[69].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_update_halo_kernel3_plus_2_back<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[69].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[69].mpi_time += t2-t1;
OPS_kernels[69].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[69].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel3_plus_2_back(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 69;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 69;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel3_plus_2_back_execute;
if (OPS_diags > 1) {
ops_timing_realloc(69,"update_halo_kernel3_plus_2_back");
}
ops_enqueue_kernel(desc);
}
#endif
|
16793b39efd5f2dc3f2f6c7efb54fb51fcda8d9f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2022 The Microsoft DeepSpeed Team
*/
#include "conversion_utils.h"
#include "inference_cuda_layers.h"
#include "memory_access_utils.h"
namespace cg = cooperative_groups;
#define MAX_CAP 4
#define MAX_SEQ 2048
inline __device__ float gelu(const float x)
{
const float sqrt_param = 0.79788456080286535587989211986876f;
const float mul_param = 0.044715;
return x * 0.5f * (1.0f + tanhf(sqrt_param * (x + mul_param * x * x * x)));
}
/*
In-place gelu(biasAdd(x)) for channels last
*/
template <typename T>
__global__ void fused_bias_gelu(T* input, const T* bias, int total_count, int intermediate_size)
{
// Input restriction: intermediate_size % vals_per_access == 0
constexpr int granularity = 16;
constexpr int values_per_access = granularity / sizeof(T);
const int offset = (blockIdx.x * blockDim.x + threadIdx.x) * values_per_access;
if (offset < total_count) {
T data[values_per_access];
T data_bias[values_per_access];
mem_access::load_global<granularity>(data, input + offset);
mem_access::load_global<granularity>(data_bias, bias + (offset % intermediate_size));
#pragma unroll
for (int i = 0; i < values_per_access; i++) {
float data_f = conversion::to<float>(data[i]);
float bias_f = conversion::to<float>(data_bias[i]);
data[i] = conversion::to<T>(gelu(data_f + bias_f));
}
mem_access::store_global<granularity>(input + offset, data);
}
}
template <typename T>
void launch_bias_gelu(T* input,
const T* bias,
int intermediate_size,
int batch_size,
hipStream_t stream)
{
constexpr int threads = 1024;
constexpr int granularity = 16;
const int total_count = batch_size * intermediate_size;
const int elems_per_block = threads * (granularity / sizeof(T));
dim3 block_dims(threads);
dim3 grid_dims((total_count + elems_per_block - 1) / elems_per_block);
hipLaunchKernelGGL(( fused_bias_gelu), dim3(grid_dims), dim3(block_dims), 0, stream,
input, bias, total_count, intermediate_size);
}
template void launch_bias_gelu<float>(float*, const float*, int, int, hipStream_t);
template void launch_bias_gelu<__half>(__half*, const __half*, int, int, hipStream_t);
/*
In-place channels-last bias add
*/
template <typename T>
__global__ void fused_bias_add(T* input, const T* bias, int total_count, int intermediate_size)
{
// Input restriction: intermediate_size % vals_per_access == 0
constexpr int granularity = 16;
constexpr int values_per_access = granularity / sizeof(T);
const int offset = (blockIdx.x * blockDim.x + threadIdx.x) * values_per_access;
if (offset < total_count) {
T data[values_per_access];
T data_bias[values_per_access];
mem_access::load_global<granularity>(data, input + offset);
mem_access::load_global<granularity>(data_bias, bias + (offset % intermediate_size));
#pragma unroll
for (int i = 0; i < values_per_access; i++) {
float data_f = conversion::to<float>(data[i]);
float bias_f = conversion::to<float>(data_bias[i]);
data[i] = conversion::to<T>(data_f + bias_f);
}
mem_access::store_global<granularity>(input + offset, data);
}
}
template <typename T>
void launch_bias_add(T* input,
const T* bias,
int intermediate_size,
int batch_size,
hipStream_t stream)
{
constexpr int threads = 1024;
constexpr int granularity = 16;
const int total_count = batch_size * intermediate_size;
const int elems_per_block = threads * (granularity / sizeof(T));
dim3 block_dims(threads);
dim3 grid_dims((total_count + elems_per_block - 1) / elems_per_block);
hipLaunchKernelGGL(( fused_bias_add), dim3(grid_dims), dim3(block_dims), 0, stream,
input, bias, total_count, intermediate_size);
}
template void launch_bias_add<float>(float*, const float*, int, int, hipStream_t);
template void launch_bias_add<__half>(__half*, const __half*, int, int, hipStream_t);
__global__ void fused_bias_residual(float* residual,
const float* hidden_state,
const float* attn,
const float* bias,
const float* attn_bias,
const int total_count,
const int intermediate_size,
const float mp_scale,
const bool preln)
{
float4* res_fl4_ptr = reinterpret_cast<float4*>(residual);
const float4* hs_fl4_ptr = reinterpret_cast<const float4*>(hidden_state);
const float4* attn_fl4_ptr = reinterpret_cast<const float4*>(attn);
const float4* bias_fl4_ptr = reinterpret_cast<const float4*>(bias);
const float4* attn_bias_fl4_ptr = reinterpret_cast<const float4*>(attn_bias);
const int offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset < total_count) {
float4 res_fl4 = res_fl4_ptr[offset];
const float4 hs_fl4 = hs_fl4_ptr[offset];
const float4 attn_fl4 = attn_fl4_ptr[offset];
const float4 bias_fl4 = bias_fl4_ptr[offset % intermediate_size];
const float4 attn_bias_fl4 = attn_bias_fl4_ptr[offset % intermediate_size];
if (preln) {
// residual = (residual + attention + bias + attention_bias) *
// mp_scale + hidden_state
res_fl4.x =
(res_fl4.x + attn_fl4.x + bias_fl4.x + attn_bias_fl4.x) * mp_scale + (hs_fl4.x);
res_fl4.y =
(res_fl4.y + attn_fl4.y + bias_fl4.y + attn_bias_fl4.y) * mp_scale + (hs_fl4.y);
res_fl4.z =
(res_fl4.z + attn_fl4.z + bias_fl4.z + attn_bias_fl4.z) * mp_scale + (hs_fl4.z);
res_fl4.w =
(res_fl4.w + attn_fl4.w + bias_fl4.w + attn_bias_fl4.w) * mp_scale + (hs_fl4.w);
} else {
// residual += hidden_state + bias
res_fl4.x = res_fl4.x + hs_fl4.x + bias_fl4.x;
res_fl4.y = res_fl4.y + hs_fl4.y + bias_fl4.y;
res_fl4.z = res_fl4.z + hs_fl4.z + bias_fl4.z;
res_fl4.w = res_fl4.w + hs_fl4.w + bias_fl4.w;
}
res_fl4_ptr[offset] = res_fl4;
}
}
__global__ void fused_bias_residual(__half* residual,
const __half* hidden_state,
const __half* attn,
const __half* bias,
const __half* attn_bias,
const int total_count,
const int intermediate_size,
const float mp_scale,
const bool preln)
{
float2* res_fl2_ptr = reinterpret_cast<float2*>(residual);
const float2* hs_fl2_ptr = reinterpret_cast<const float2*>(hidden_state);
const float2* attn_fl2_ptr = reinterpret_cast<const float2*>(attn);
const float2* bias_fl2_ptr = reinterpret_cast<const float2*>(bias);
const float2* attn_bias_fl2_ptr = reinterpret_cast<const float2*>(attn_bias);
const int offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset < total_count) {
float2 res_fl2 = res_fl2_ptr[offset];
const float2 hs_fl2 = hs_fl2_ptr[offset];
const float2 attn_fl2 = attn_fl2_ptr[offset];
const float2 bias_fl2 = bias_fl2_ptr[offset % intermediate_size];
const float2 attn_bias_fl2 = attn_bias_fl2_ptr[offset % intermediate_size];
__half2* res_half2 = reinterpret_cast<__half2*>(&res_fl2);
const __half2* hs_half2 = reinterpret_cast<const __half2*>(&hs_fl2);
const __half2* attn_half2 = reinterpret_cast<const __half2*>(&attn_fl2);
const __half2* bias_half2 = reinterpret_cast<const __half2*>(&bias_fl2);
const __half2* attn_bias_half2 = reinterpret_cast<const __half2*>(&attn_bias_fl2);
float2 res_low = __half22float2(res_half2[0]);
float2 res_high = __half22float2(res_half2[1]);
const float2 hs_low = __half22float2(hs_half2[0]);
const float2 hs_high = __half22float2(hs_half2[1]);
const float2 attn_low = __half22float2(attn_half2[0]);
const float2 attn_high = __half22float2(attn_half2[1]);
const float2 bias_low = __half22float2(bias_half2[0]);
const float2 bias_high = __half22float2(bias_half2[1]);
const float2 attn_bias_low = __half22float2(attn_bias_half2[0]);
const float2 attn_bias_high = __half22float2(attn_bias_half2[1]);
if (preln) {
// residual = (residual + attention + bias + attention_bias) *
// mp_scale + hidden_state
res_low.x =
(res_low.x + attn_low.x + bias_low.x + attn_bias_low.x) * mp_scale + hs_low.x;
res_low.y =
(res_low.y + attn_low.y + bias_low.y + attn_bias_low.y) * mp_scale + hs_low.y;
res_high.x =
(res_high.x + attn_high.x + bias_high.x + attn_bias_high.x) * mp_scale + hs_high.x;
res_high.y =
(res_high.y + attn_high.y + bias_high.y + attn_bias_high.y) * mp_scale + hs_high.y;
} else {
// residual += hidden_state + bias
res_low.x = (res_low.x + hs_low.x + bias_low.x);
res_low.y = (res_low.y + hs_low.y + bias_low.y);
res_high.x = (res_high.x + hs_high.x + bias_high.x);
res_high.y = (res_high.y + hs_high.y + bias_high.y);
}
res_half2[0] = __float22half2_rn(res_low);
res_half2[1] = __float22half2_rn(res_high);
res_fl2_ptr[offset] = res_fl2;
}
}
template <typename T>
void launch_bias_residual(T* residual,
T* hidden_state,
T* attn,
T* bias,
T* attn_bias,
int batch,
int hidden_dim,
int mp_size,
bool preln,
hipStream_t stream)
{
int total_count = batch * hidden_dim / 4;
dim3 block_dims(1024);
dim3 grid_dims((total_count - 1) / 1024 + 1); // (batch_size);
hipLaunchKernelGGL(( fused_bias_residual), dim3(grid_dims), dim3(block_dims), 0, stream, residual,
hidden_state,
attn,
bias,
attn_bias,
total_count,
hidden_dim / 4,
1.0 / mp_size,
preln);
}
template void launch_bias_residual<
float>(float*, float*, float*, float*, float*, int, int, int, bool, hipStream_t);
template void launch_bias_residual<
__half>(__half*, __half*, __half*, __half*, __half*, int, int, int, bool, hipStream_t);
__global__ void gptj_residual_add(float* residual,
const float* hidden_state,
const float* attn,
const float* bias,
const float* attn_bias,
const int total_count,
const int intermediate_size,
const float mp_scale)
{
float4* res_fl4_ptr = reinterpret_cast<float4*>(residual);
const float4* hs_fl4_ptr = reinterpret_cast<const float4*>(hidden_state);
const float4* attn_fl4_ptr = reinterpret_cast<const float4*>(attn);
const float4* bias_fl4_ptr = reinterpret_cast<const float4*>(bias);
const float4* attn_bias_fl4_ptr = reinterpret_cast<const float4*>(attn_bias);
const int offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset < total_count) {
float4 res_fl4 = res_fl4_ptr[offset];
const float4 hs_fl4 = hs_fl4_ptr[offset];
const float4 attn_fl4 = attn_fl4_ptr[offset];
const float4 bias_fl4 = bias_fl4_ptr[offset % intermediate_size];
if (attn_bias) {
float4 attn_bias_fl4 = attn_bias_fl4_ptr[offset % intermediate_size];
// residual += attention_bias
res_fl4.x += attn_bias_fl4.x;
res_fl4.y += attn_bias_fl4.y;
res_fl4.z += attn_bias_fl4.z;
res_fl4.w += attn_bias_fl4.w;
}
// residual = hidden_state + attention + (residual + bias) * mp_scale
res_fl4.x = hs_fl4.x + attn_fl4.x + (res_fl4.x + bias_fl4.x) * mp_scale;
res_fl4.y = hs_fl4.y + attn_fl4.y + (res_fl4.y + bias_fl4.y) * mp_scale;
res_fl4.z = hs_fl4.z + attn_fl4.z + (res_fl4.z + bias_fl4.z) * mp_scale;
res_fl4.w = hs_fl4.w + attn_fl4.w + (res_fl4.w + bias_fl4.w) * mp_scale;
res_fl4_ptr[offset] = res_fl4;
}
}
__global__ void gptj_residual_add(__half* residual,
const __half* hidden_state,
const __half* attn,
const __half* bias,
const __half* attn_bias,
const int total_count,
const int intermediate_size,
const float mp_scale)
{
float2* res_fl2_ptr = reinterpret_cast<float2*>(residual);
const float2* hs_fl2_ptr = reinterpret_cast<const float2*>(hidden_state);
const float2* attn_fl2_ptr = reinterpret_cast<const float2*>(attn);
const float2* bias_fl2_ptr = reinterpret_cast<const float2*>(bias);
const float2* attn_bias_fl2_ptr = reinterpret_cast<const float2*>(attn_bias);
const int offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset < total_count) {
float2 res_fl2 = res_fl2_ptr[offset];
const float2 hs_fl2 = hs_fl2_ptr[offset];
const float2 attn_fl2 = attn_fl2_ptr[offset];
const float2 bias_fl2 = bias_fl2_ptr[offset % intermediate_size];
__half2* res_half2 = reinterpret_cast<__half2*>(&res_fl2);
const __half2* hs_half2 = reinterpret_cast<const __half2*>(&hs_fl2);
const __half2* attn_half2 = reinterpret_cast<const __half2*>(&attn_fl2);
const __half2* bias_half2 = reinterpret_cast<const __half2*>(&bias_fl2);
float2 res_low = __half22float2(res_half2[0]);
float2 res_high = __half22float2(res_half2[1]);
const float2 hs_low = __half22float2(hs_half2[0]);
const float2 hs_high = __half22float2(hs_half2[1]);
const float2 attn_low = __half22float2(attn_half2[0]);
const float2 attn_high = __half22float2(attn_half2[1]);
const float2 bias_low = __half22float2(bias_half2[0]);
const float2 bias_high = __half22float2(bias_half2[1]);
if (attn_bias) {
const float2 attn_bias_fl2 = attn_bias_fl2_ptr[offset % intermediate_size];
const __half2* attn_bias_half2 = reinterpret_cast<const __half2*>(&attn_bias_fl2);
const float2 attn_bias_low = __half22float2(attn_bias_half2[0]);
const float2 attn_bias_high = __half22float2(attn_bias_half2[1]);
// residual += attention_bias
res_low.x += attn_bias_low.x;
res_low.y += attn_bias_low.y;
res_high.x += attn_bias_high.x;
res_high.y += attn_bias_high.y;
}
// residual = hidden_state + attention + (residual + bias) * mp_scale
res_low.x = attn_low.x + hs_low.x + (res_low.x + bias_low.x) * mp_scale;
res_low.y = attn_low.y + hs_low.y + (res_low.y + bias_low.y) * mp_scale;
res_high.x = attn_high.x + hs_high.x + (res_high.x + bias_high.x) * mp_scale;
res_high.y = attn_high.y + hs_high.y + (res_high.y + bias_high.y) * mp_scale;
res_half2[0] = __float22half2_rn(res_low);
res_half2[1] = __float22half2_rn(res_high);
res_fl2_ptr[offset] = res_fl2;
}
}
template <typename T>
void launch_gptj_residual_add(T* residual,
T* hidden_state,
T* attn,
T* bias,
T* attn_bias,
int hidden_dim,
int batch,
int mp_size,
hipStream_t stream)
{
int total_count = batch * hidden_dim / 4;
dim3 block_dims(1024);
dim3 grid_dims((total_count - 1) / 1024 + 1); // (batch_size);
hipLaunchKernelGGL(( gptj_residual_add), dim3(grid_dims), dim3(block_dims), 0, stream,
residual, hidden_state, attn, bias, attn_bias, total_count, hidden_dim / 4, 1.0 / mp_size);
}
template void launch_gptj_residual_add<float>(float*,
float*,
float*,
float*,
float*,
int,
int,
int,
hipStream_t);
template void launch_gptj_residual_add<__half>(__half*,
__half*,
__half*,
__half*,
__half*,
int,
int,
int,
hipStream_t);
template <typename T>
__global__ void moe_res_matmul(T* residual, T* coef, T* mlp_out, int seq_len, int hidden_dim)
{
constexpr int granularity = 16;
constexpr int vals_per_access = granularity / sizeof(T);
T* residual_seq = residual + blockIdx.x * hidden_dim;
T* mlp_out_seq = mlp_out + blockIdx.x * hidden_dim;
for (unsigned tid = threadIdx.x * vals_per_access; tid < hidden_dim;
tid += blockDim.x * vals_per_access) {
T mlp[vals_per_access];
T res[vals_per_access];
T coef1[vals_per_access];
T coef2[vals_per_access];
mem_access::load_global<granularity>(mlp, mlp_out_seq + tid);
mem_access::load_global<granularity>(res, residual_seq + tid);
mem_access::load_global<granularity>(coef1, coef + tid);
mem_access::load_global<granularity>(coef2, coef + tid + hidden_dim);
#pragma unroll
for (int idx = 0; idx < vals_per_access; idx++) {
mlp[idx] = mlp[idx] * coef2[idx] + res[idx] * coef1[idx];
}
mem_access::store_global<granularity>(mlp_out_seq + tid, mlp);
}
}
template <typename T>
void launch_moe_res_matmul(T* residual,
T* coef,
T* mlp_out,
int seq_len,
int hidden_dim,
hipStream_t stream)
{
dim3 grid_dim(seq_len);
dim3 block_dim(1024);
hipLaunchKernelGGL(( moe_res_matmul), dim3(grid_dim), dim3(block_dim), 0, stream,
residual, coef, mlp_out, seq_len, hidden_dim);
}
template void launch_moe_res_matmul(float* residual,
float* coef,
float* mlp_out,
int seq_len,
int hidden_dim,
hipStream_t stream);
template void launch_moe_res_matmul(__half* residual,
__half* coef,
__half* mlp_out,
int seq_len,
int hidden_dim,
hipStream_t stream);
__global__ void pad_data_kernel(__half* padded_output,
__half* output,
int head_size,
int padded_head_size)
{
float4* padded_output_cast = reinterpret_cast<float4*>(padded_output);
float4* output_cast = reinterpret_cast<float4*>(output);
int bid = blockIdx.x * (blockDim.y) + threadIdx.y;
int idx = threadIdx.x;
padded_output_cast += (bid * padded_head_size);
output_cast += (bid * head_size);
float4 ZERO;
const __half2 zero_h = __float2half2_rn(0.f);
__half2* ZERO_h = reinterpret_cast<__half2*>(&ZERO);
#pragma unroll
for (int i = 0; i < 4; i++) ZERO_h[i] = zero_h;
if (idx < head_size)
padded_output_cast[idx] = output_cast[idx];
else
padded_output_cast[idx] = ZERO;
}
__global__ void pad_data_kernel(float* padded_output,
float* output,
int head_size,
int padded_head_size)
{
}
template <typename T>
void pad_data(T* padded_output,
T* output,
int bsz,
int head_size,
int padded_head_size,
hipStream_t stream)
{
dim3 grid_dim((bsz - 1) / 16 + 1);
dim3 block_dim(padded_head_size / 8, 16);
hipLaunchKernelGGL(( pad_data_kernel), dim3(grid_dim), dim3(block_dim), 0, stream,
padded_output, output, head_size / 8, padded_head_size / 8);
}
template void pad_data(__half* padded_output,
__half* output,
int bsz,
int head_size,
int padded_head_size,
hipStream_t stream);
template void pad_data(float* padded_output,
float* output,
int bsz,
int head_size,
int padded_head_size,
hipStream_t stream);
__global__ void pad_head_seq_kernel(__half* padded_output,
__half* output,
int seq_len,
int padded_seq_len,
int head_size,
int padded_head_size)
{
float4* padded_output_cast = reinterpret_cast<float4*>(padded_output);
float4* output_cast = reinterpret_cast<float4*>(output);
int bsz = blockIdx.x;
int bid = blockIdx.y * (blockDim.y) + threadIdx.y;
int idx = threadIdx.x;
padded_output_cast += (bsz * padded_seq_len + bid) * padded_head_size;
output_cast += (bsz * seq_len + bid) * head_size;
float4 ZERO;
const __half2 zero_h = __float2half2_rn(0.f);
__half2* ZERO_h = reinterpret_cast<__half2*>(&ZERO);
#pragma unroll
for (int i = 0; i < 4; i++) ZERO_h[i] = zero_h;
if (idx < head_size && bid < seq_len)
padded_output_cast[idx] = output_cast[idx];
else
padded_output_cast[idx] = ZERO;
}
__global__ void pad_head_seq_kernel(float* padded_output,
float* output,
int seq_len,
int padded_seq_len,
int head_size,
int padded_head_size)
{
}
template <typename T>
void pad_head_seq(T* padded_output,
T* output,
int bsz,
int seq_len,
int padded_seq_len,
int head_size,
int padded_head_size,
hipStream_t stream)
{
dim3 grid_dim(bsz, padded_seq_len / 16);
dim3 block_dim(padded_head_size / 8, 16);
hipLaunchKernelGGL(( pad_head_seq_kernel), dim3(grid_dim), dim3(block_dim), 0, stream,
padded_output, output, seq_len, padded_seq_len, head_size / 8, padded_head_size / 8);
}
template void pad_head_seq(__half* padded_output,
__half* output,
int bsz,
int seq_len,
int padded_seq_len,
int head_size,
int padded_head_size,
hipStream_t stream);
template void pad_head_seq(float* padded_output,
float* output,
int bsz,
int seq_len,
int padded_seq_len,
int head_size,
int padded_head_size,
hipStream_t stream);
// TODO(cmikeh2): evaluate different GeLU performance
__device__ __forceinline__ float old_gelu(float val)
{
// 1 / sqrt(2)
constexpr float rsqrt_2 = 0.707106769084930419922;
return val * 0.5f * (1.0f + erff(val * rsqrt_2));
}
namespace fused_geglu {
constexpr int threads = 256;
constexpr int steps = 2;
constexpr int granularity = 16;
} // namespace fused_geglu
template <typename T>
__global__ void fused_bias_geglu(T* output,
const T* activation,
const T* bias,
int base_channels,
int total_elems)
{
constexpr int T_per_access = fused_geglu::granularity / sizeof(T);
constexpr int T_per_step = T_per_access * fused_geglu::threads;
constexpr int T_per_block = T_per_step * fused_geglu::steps;
const int id = blockIdx.x * T_per_block + threadIdx.x * T_per_access;
#pragma unroll
for (int i = 0; i < fused_geglu::steps; i++) {
T activation_buffer_1[T_per_access];
T activation_buffer_2[T_per_access];
T bias_buffer_1[T_per_access];
T bias_buffer_2[T_per_access];
const int iter_id = id + T_per_step * i;
if (iter_id < total_elems) {
const int channel_id = iter_id % base_channels;
const int seq_id = iter_id / base_channels;
const int seq_offset = seq_id * base_channels * 2;
mem_access::load_global<fused_geglu::granularity>(activation_buffer_1,
activation + seq_offset + channel_id);
mem_access::load_global<fused_geglu::granularity>(
activation_buffer_2, activation + seq_offset + channel_id + base_channels);
mem_access::load_global<fused_geglu::granularity>(bias_buffer_1, bias + channel_id);
mem_access::load_global<fused_geglu::granularity>(bias_buffer_2,
bias + channel_id + base_channels);
// Since the GeLU is going to happen at float, might as well
// convert
#pragma unroll
for (int v = 0; v < T_per_access; v++) {
T hidden_state = activation_buffer_1[v] + bias_buffer_1[v];
T pre_gate = activation_buffer_2[v] + bias_buffer_2[v];
float gate_f = old_gelu(conversion::to<float>(pre_gate));
T gate = conversion::to<T>(gate_f);
activation_buffer_1[v] = hidden_state * gate;
}
mem_access::store_global<fused_geglu::granularity>(output + iter_id,
activation_buffer_1);
}
}
}
template <typename T>
void launch_fused_bias_geglu(T* output,
const T* activation,
const T* bias,
int rows,
int elems_per_row,
hipStream_t stream)
{
/*
Fused bias GEGLU is a variant of the gated activation functions.
The input here is a matrix of [batch, seq_len, 2 * intermediate_dim]
where the second half of the channels act as GeLU gates for the first
half.
*/
// Re-derive the above figures
constexpr int T_per_access = fused_geglu::granularity / sizeof(T);
constexpr int T_per_step = T_per_access * fused_geglu::threads;
constexpr int T_per_block = T_per_step * fused_geglu::steps;
const int base_channels = elems_per_row / 2;
const int total_elems = base_channels * rows;
dim3 block(fused_geglu::threads);
dim3 grid((total_elems + T_per_block - 1) / T_per_block);
hipLaunchKernelGGL(( fused_bias_geglu), dim3(grid), dim3(block), 0, stream,
output, activation, bias, base_channels, total_elems);
}
template void launch_fused_bias_geglu(__half*,
const __half*,
const __half*,
int,
int,
hipStream_t);
template void launch_fused_bias_geglu(float*, const float*, const float*, int, int, hipStream_t);
| 16793b39efd5f2dc3f2f6c7efb54fb51fcda8d9f.cu | /*
Copyright 2022 The Microsoft DeepSpeed Team
*/
#include "conversion_utils.h"
#include "inference_cuda_layers.h"
#include "memory_access_utils.h"
namespace cg = cooperative_groups;
#define MAX_CAP 4
#define MAX_SEQ 2048
inline __device__ float gelu(const float x)
{
const float sqrt_param = 0.79788456080286535587989211986876f;
const float mul_param = 0.044715;
return x * 0.5f * (1.0f + tanhf(sqrt_param * (x + mul_param * x * x * x)));
}
/*
In-place gelu(biasAdd(x)) for channels last
*/
template <typename T>
__global__ void fused_bias_gelu(T* input, const T* bias, int total_count, int intermediate_size)
{
// Input restriction: intermediate_size % vals_per_access == 0
constexpr int granularity = 16;
constexpr int values_per_access = granularity / sizeof(T);
const int offset = (blockIdx.x * blockDim.x + threadIdx.x) * values_per_access;
if (offset < total_count) {
T data[values_per_access];
T data_bias[values_per_access];
mem_access::load_global<granularity>(data, input + offset);
mem_access::load_global<granularity>(data_bias, bias + (offset % intermediate_size));
#pragma unroll
for (int i = 0; i < values_per_access; i++) {
float data_f = conversion::to<float>(data[i]);
float bias_f = conversion::to<float>(data_bias[i]);
data[i] = conversion::to<T>(gelu(data_f + bias_f));
}
mem_access::store_global<granularity>(input + offset, data);
}
}
template <typename T>
void launch_bias_gelu(T* input,
const T* bias,
int intermediate_size,
int batch_size,
cudaStream_t stream)
{
constexpr int threads = 1024;
constexpr int granularity = 16;
const int total_count = batch_size * intermediate_size;
const int elems_per_block = threads * (granularity / sizeof(T));
dim3 block_dims(threads);
dim3 grid_dims((total_count + elems_per_block - 1) / elems_per_block);
fused_bias_gelu<<<grid_dims, block_dims, 0, stream>>>(
input, bias, total_count, intermediate_size);
}
template void launch_bias_gelu<float>(float*, const float*, int, int, cudaStream_t);
template void launch_bias_gelu<__half>(__half*, const __half*, int, int, cudaStream_t);
/*
In-place channels-last bias add
*/
template <typename T>
__global__ void fused_bias_add(T* input, const T* bias, int total_count, int intermediate_size)
{
// Input restriction: intermediate_size % vals_per_access == 0
constexpr int granularity = 16;
constexpr int values_per_access = granularity / sizeof(T);
const int offset = (blockIdx.x * blockDim.x + threadIdx.x) * values_per_access;
if (offset < total_count) {
T data[values_per_access];
T data_bias[values_per_access];
mem_access::load_global<granularity>(data, input + offset);
mem_access::load_global<granularity>(data_bias, bias + (offset % intermediate_size));
#pragma unroll
for (int i = 0; i < values_per_access; i++) {
float data_f = conversion::to<float>(data[i]);
float bias_f = conversion::to<float>(data_bias[i]);
data[i] = conversion::to<T>(data_f + bias_f);
}
mem_access::store_global<granularity>(input + offset, data);
}
}
template <typename T>
void launch_bias_add(T* input,
const T* bias,
int intermediate_size,
int batch_size,
cudaStream_t stream)
{
constexpr int threads = 1024;
constexpr int granularity = 16;
const int total_count = batch_size * intermediate_size;
const int elems_per_block = threads * (granularity / sizeof(T));
dim3 block_dims(threads);
dim3 grid_dims((total_count + elems_per_block - 1) / elems_per_block);
fused_bias_add<<<grid_dims, block_dims, 0, stream>>>(
input, bias, total_count, intermediate_size);
}
template void launch_bias_add<float>(float*, const float*, int, int, cudaStream_t);
template void launch_bias_add<__half>(__half*, const __half*, int, int, cudaStream_t);
__global__ void fused_bias_residual(float* residual,
const float* hidden_state,
const float* attn,
const float* bias,
const float* attn_bias,
const int total_count,
const int intermediate_size,
const float mp_scale,
const bool preln)
{
float4* res_fl4_ptr = reinterpret_cast<float4*>(residual);
const float4* hs_fl4_ptr = reinterpret_cast<const float4*>(hidden_state);
const float4* attn_fl4_ptr = reinterpret_cast<const float4*>(attn);
const float4* bias_fl4_ptr = reinterpret_cast<const float4*>(bias);
const float4* attn_bias_fl4_ptr = reinterpret_cast<const float4*>(attn_bias);
const int offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset < total_count) {
float4 res_fl4 = res_fl4_ptr[offset];
const float4 hs_fl4 = hs_fl4_ptr[offset];
const float4 attn_fl4 = attn_fl4_ptr[offset];
const float4 bias_fl4 = bias_fl4_ptr[offset % intermediate_size];
const float4 attn_bias_fl4 = attn_bias_fl4_ptr[offset % intermediate_size];
if (preln) {
// residual = (residual + attention + bias + attention_bias) *
// mp_scale + hidden_state
res_fl4.x =
(res_fl4.x + attn_fl4.x + bias_fl4.x + attn_bias_fl4.x) * mp_scale + (hs_fl4.x);
res_fl4.y =
(res_fl4.y + attn_fl4.y + bias_fl4.y + attn_bias_fl4.y) * mp_scale + (hs_fl4.y);
res_fl4.z =
(res_fl4.z + attn_fl4.z + bias_fl4.z + attn_bias_fl4.z) * mp_scale + (hs_fl4.z);
res_fl4.w =
(res_fl4.w + attn_fl4.w + bias_fl4.w + attn_bias_fl4.w) * mp_scale + (hs_fl4.w);
} else {
// residual += hidden_state + bias
res_fl4.x = res_fl4.x + hs_fl4.x + bias_fl4.x;
res_fl4.y = res_fl4.y + hs_fl4.y + bias_fl4.y;
res_fl4.z = res_fl4.z + hs_fl4.z + bias_fl4.z;
res_fl4.w = res_fl4.w + hs_fl4.w + bias_fl4.w;
}
res_fl4_ptr[offset] = res_fl4;
}
}
__global__ void fused_bias_residual(__half* residual,
const __half* hidden_state,
const __half* attn,
const __half* bias,
const __half* attn_bias,
const int total_count,
const int intermediate_size,
const float mp_scale,
const bool preln)
{
float2* res_fl2_ptr = reinterpret_cast<float2*>(residual);
const float2* hs_fl2_ptr = reinterpret_cast<const float2*>(hidden_state);
const float2* attn_fl2_ptr = reinterpret_cast<const float2*>(attn);
const float2* bias_fl2_ptr = reinterpret_cast<const float2*>(bias);
const float2* attn_bias_fl2_ptr = reinterpret_cast<const float2*>(attn_bias);
const int offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset < total_count) {
float2 res_fl2 = res_fl2_ptr[offset];
const float2 hs_fl2 = hs_fl2_ptr[offset];
const float2 attn_fl2 = attn_fl2_ptr[offset];
const float2 bias_fl2 = bias_fl2_ptr[offset % intermediate_size];
const float2 attn_bias_fl2 = attn_bias_fl2_ptr[offset % intermediate_size];
__half2* res_half2 = reinterpret_cast<__half2*>(&res_fl2);
const __half2* hs_half2 = reinterpret_cast<const __half2*>(&hs_fl2);
const __half2* attn_half2 = reinterpret_cast<const __half2*>(&attn_fl2);
const __half2* bias_half2 = reinterpret_cast<const __half2*>(&bias_fl2);
const __half2* attn_bias_half2 = reinterpret_cast<const __half2*>(&attn_bias_fl2);
float2 res_low = __half22float2(res_half2[0]);
float2 res_high = __half22float2(res_half2[1]);
const float2 hs_low = __half22float2(hs_half2[0]);
const float2 hs_high = __half22float2(hs_half2[1]);
const float2 attn_low = __half22float2(attn_half2[0]);
const float2 attn_high = __half22float2(attn_half2[1]);
const float2 bias_low = __half22float2(bias_half2[0]);
const float2 bias_high = __half22float2(bias_half2[1]);
const float2 attn_bias_low = __half22float2(attn_bias_half2[0]);
const float2 attn_bias_high = __half22float2(attn_bias_half2[1]);
if (preln) {
// residual = (residual + attention + bias + attention_bias) *
// mp_scale + hidden_state
res_low.x =
(res_low.x + attn_low.x + bias_low.x + attn_bias_low.x) * mp_scale + hs_low.x;
res_low.y =
(res_low.y + attn_low.y + bias_low.y + attn_bias_low.y) * mp_scale + hs_low.y;
res_high.x =
(res_high.x + attn_high.x + bias_high.x + attn_bias_high.x) * mp_scale + hs_high.x;
res_high.y =
(res_high.y + attn_high.y + bias_high.y + attn_bias_high.y) * mp_scale + hs_high.y;
} else {
// residual += hidden_state + bias
res_low.x = (res_low.x + hs_low.x + bias_low.x);
res_low.y = (res_low.y + hs_low.y + bias_low.y);
res_high.x = (res_high.x + hs_high.x + bias_high.x);
res_high.y = (res_high.y + hs_high.y + bias_high.y);
}
res_half2[0] = __float22half2_rn(res_low);
res_half2[1] = __float22half2_rn(res_high);
res_fl2_ptr[offset] = res_fl2;
}
}
template <typename T>
void launch_bias_residual(T* residual,
T* hidden_state,
T* attn,
T* bias,
T* attn_bias,
int batch,
int hidden_dim,
int mp_size,
bool preln,
cudaStream_t stream)
{
int total_count = batch * hidden_dim / 4;
dim3 block_dims(1024);
dim3 grid_dims((total_count - 1) / 1024 + 1); // (batch_size);
fused_bias_residual<<<grid_dims, block_dims, 0, stream>>>(residual,
hidden_state,
attn,
bias,
attn_bias,
total_count,
hidden_dim / 4,
1.0 / mp_size,
preln);
}
template void launch_bias_residual<
float>(float*, float*, float*, float*, float*, int, int, int, bool, cudaStream_t);
template void launch_bias_residual<
__half>(__half*, __half*, __half*, __half*, __half*, int, int, int, bool, cudaStream_t);
__global__ void gptj_residual_add(float* residual,
const float* hidden_state,
const float* attn,
const float* bias,
const float* attn_bias,
const int total_count,
const int intermediate_size,
const float mp_scale)
{
float4* res_fl4_ptr = reinterpret_cast<float4*>(residual);
const float4* hs_fl4_ptr = reinterpret_cast<const float4*>(hidden_state);
const float4* attn_fl4_ptr = reinterpret_cast<const float4*>(attn);
const float4* bias_fl4_ptr = reinterpret_cast<const float4*>(bias);
const float4* attn_bias_fl4_ptr = reinterpret_cast<const float4*>(attn_bias);
const int offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset < total_count) {
float4 res_fl4 = res_fl4_ptr[offset];
const float4 hs_fl4 = hs_fl4_ptr[offset];
const float4 attn_fl4 = attn_fl4_ptr[offset];
const float4 bias_fl4 = bias_fl4_ptr[offset % intermediate_size];
if (attn_bias) {
float4 attn_bias_fl4 = attn_bias_fl4_ptr[offset % intermediate_size];
// residual += attention_bias
res_fl4.x += attn_bias_fl4.x;
res_fl4.y += attn_bias_fl4.y;
res_fl4.z += attn_bias_fl4.z;
res_fl4.w += attn_bias_fl4.w;
}
// residual = hidden_state + attention + (residual + bias) * mp_scale
res_fl4.x = hs_fl4.x + attn_fl4.x + (res_fl4.x + bias_fl4.x) * mp_scale;
res_fl4.y = hs_fl4.y + attn_fl4.y + (res_fl4.y + bias_fl4.y) * mp_scale;
res_fl4.z = hs_fl4.z + attn_fl4.z + (res_fl4.z + bias_fl4.z) * mp_scale;
res_fl4.w = hs_fl4.w + attn_fl4.w + (res_fl4.w + bias_fl4.w) * mp_scale;
res_fl4_ptr[offset] = res_fl4;
}
}
__global__ void gptj_residual_add(__half* residual,
const __half* hidden_state,
const __half* attn,
const __half* bias,
const __half* attn_bias,
const int total_count,
const int intermediate_size,
const float mp_scale)
{
float2* res_fl2_ptr = reinterpret_cast<float2*>(residual);
const float2* hs_fl2_ptr = reinterpret_cast<const float2*>(hidden_state);
const float2* attn_fl2_ptr = reinterpret_cast<const float2*>(attn);
const float2* bias_fl2_ptr = reinterpret_cast<const float2*>(bias);
const float2* attn_bias_fl2_ptr = reinterpret_cast<const float2*>(attn_bias);
const int offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset < total_count) {
float2 res_fl2 = res_fl2_ptr[offset];
const float2 hs_fl2 = hs_fl2_ptr[offset];
const float2 attn_fl2 = attn_fl2_ptr[offset];
const float2 bias_fl2 = bias_fl2_ptr[offset % intermediate_size];
__half2* res_half2 = reinterpret_cast<__half2*>(&res_fl2);
const __half2* hs_half2 = reinterpret_cast<const __half2*>(&hs_fl2);
const __half2* attn_half2 = reinterpret_cast<const __half2*>(&attn_fl2);
const __half2* bias_half2 = reinterpret_cast<const __half2*>(&bias_fl2);
float2 res_low = __half22float2(res_half2[0]);
float2 res_high = __half22float2(res_half2[1]);
const float2 hs_low = __half22float2(hs_half2[0]);
const float2 hs_high = __half22float2(hs_half2[1]);
const float2 attn_low = __half22float2(attn_half2[0]);
const float2 attn_high = __half22float2(attn_half2[1]);
const float2 bias_low = __half22float2(bias_half2[0]);
const float2 bias_high = __half22float2(bias_half2[1]);
if (attn_bias) {
const float2 attn_bias_fl2 = attn_bias_fl2_ptr[offset % intermediate_size];
const __half2* attn_bias_half2 = reinterpret_cast<const __half2*>(&attn_bias_fl2);
const float2 attn_bias_low = __half22float2(attn_bias_half2[0]);
const float2 attn_bias_high = __half22float2(attn_bias_half2[1]);
// residual += attention_bias
res_low.x += attn_bias_low.x;
res_low.y += attn_bias_low.y;
res_high.x += attn_bias_high.x;
res_high.y += attn_bias_high.y;
}
// residual = hidden_state + attention + (residual + bias) * mp_scale
res_low.x = attn_low.x + hs_low.x + (res_low.x + bias_low.x) * mp_scale;
res_low.y = attn_low.y + hs_low.y + (res_low.y + bias_low.y) * mp_scale;
res_high.x = attn_high.x + hs_high.x + (res_high.x + bias_high.x) * mp_scale;
res_high.y = attn_high.y + hs_high.y + (res_high.y + bias_high.y) * mp_scale;
res_half2[0] = __float22half2_rn(res_low);
res_half2[1] = __float22half2_rn(res_high);
res_fl2_ptr[offset] = res_fl2;
}
}
template <typename T>
void launch_gptj_residual_add(T* residual,
T* hidden_state,
T* attn,
T* bias,
T* attn_bias,
int hidden_dim,
int batch,
int mp_size,
cudaStream_t stream)
{
int total_count = batch * hidden_dim / 4;
dim3 block_dims(1024);
dim3 grid_dims((total_count - 1) / 1024 + 1); // (batch_size);
gptj_residual_add<<<grid_dims, block_dims, 0, stream>>>(
residual, hidden_state, attn, bias, attn_bias, total_count, hidden_dim / 4, 1.0 / mp_size);
}
template void launch_gptj_residual_add<float>(float*,
float*,
float*,
float*,
float*,
int,
int,
int,
cudaStream_t);
template void launch_gptj_residual_add<__half>(__half*,
__half*,
__half*,
__half*,
__half*,
int,
int,
int,
cudaStream_t);
template <typename T>
__global__ void moe_res_matmul(T* residual, T* coef, T* mlp_out, int seq_len, int hidden_dim)
{
constexpr int granularity = 16;
constexpr int vals_per_access = granularity / sizeof(T);
T* residual_seq = residual + blockIdx.x * hidden_dim;
T* mlp_out_seq = mlp_out + blockIdx.x * hidden_dim;
for (unsigned tid = threadIdx.x * vals_per_access; tid < hidden_dim;
tid += blockDim.x * vals_per_access) {
T mlp[vals_per_access];
T res[vals_per_access];
T coef1[vals_per_access];
T coef2[vals_per_access];
mem_access::load_global<granularity>(mlp, mlp_out_seq + tid);
mem_access::load_global<granularity>(res, residual_seq + tid);
mem_access::load_global<granularity>(coef1, coef + tid);
mem_access::load_global<granularity>(coef2, coef + tid + hidden_dim);
#pragma unroll
for (int idx = 0; idx < vals_per_access; idx++) {
mlp[idx] = mlp[idx] * coef2[idx] + res[idx] * coef1[idx];
}
mem_access::store_global<granularity>(mlp_out_seq + tid, mlp);
}
}
template <typename T>
void launch_moe_res_matmul(T* residual,
T* coef,
T* mlp_out,
int seq_len,
int hidden_dim,
cudaStream_t stream)
{
dim3 grid_dim(seq_len);
dim3 block_dim(1024);
moe_res_matmul<<<grid_dim, block_dim, 0, stream>>>(
residual, coef, mlp_out, seq_len, hidden_dim);
}
template void launch_moe_res_matmul(float* residual,
float* coef,
float* mlp_out,
int seq_len,
int hidden_dim,
cudaStream_t stream);
template void launch_moe_res_matmul(__half* residual,
__half* coef,
__half* mlp_out,
int seq_len,
int hidden_dim,
cudaStream_t stream);
__global__ void pad_data_kernel(__half* padded_output,
__half* output,
int head_size,
int padded_head_size)
{
float4* padded_output_cast = reinterpret_cast<float4*>(padded_output);
float4* output_cast = reinterpret_cast<float4*>(output);
int bid = blockIdx.x * (blockDim.y) + threadIdx.y;
int idx = threadIdx.x;
padded_output_cast += (bid * padded_head_size);
output_cast += (bid * head_size);
float4 ZERO;
const __half2 zero_h = __float2half2_rn(0.f);
__half2* ZERO_h = reinterpret_cast<__half2*>(&ZERO);
#pragma unroll
for (int i = 0; i < 4; i++) ZERO_h[i] = zero_h;
if (idx < head_size)
padded_output_cast[idx] = output_cast[idx];
else
padded_output_cast[idx] = ZERO;
}
__global__ void pad_data_kernel(float* padded_output,
float* output,
int head_size,
int padded_head_size)
{
}
template <typename T>
void pad_data(T* padded_output,
T* output,
int bsz,
int head_size,
int padded_head_size,
cudaStream_t stream)
{
dim3 grid_dim((bsz - 1) / 16 + 1);
dim3 block_dim(padded_head_size / 8, 16);
pad_data_kernel<<<grid_dim, block_dim, 0, stream>>>(
padded_output, output, head_size / 8, padded_head_size / 8);
}
template void pad_data(__half* padded_output,
__half* output,
int bsz,
int head_size,
int padded_head_size,
cudaStream_t stream);
template void pad_data(float* padded_output,
float* output,
int bsz,
int head_size,
int padded_head_size,
cudaStream_t stream);
__global__ void pad_head_seq_kernel(__half* padded_output,
__half* output,
int seq_len,
int padded_seq_len,
int head_size,
int padded_head_size)
{
float4* padded_output_cast = reinterpret_cast<float4*>(padded_output);
float4* output_cast = reinterpret_cast<float4*>(output);
int bsz = blockIdx.x;
int bid = blockIdx.y * (blockDim.y) + threadIdx.y;
int idx = threadIdx.x;
padded_output_cast += (bsz * padded_seq_len + bid) * padded_head_size;
output_cast += (bsz * seq_len + bid) * head_size;
float4 ZERO;
const __half2 zero_h = __float2half2_rn(0.f);
__half2* ZERO_h = reinterpret_cast<__half2*>(&ZERO);
#pragma unroll
for (int i = 0; i < 4; i++) ZERO_h[i] = zero_h;
if (idx < head_size && bid < seq_len)
padded_output_cast[idx] = output_cast[idx];
else
padded_output_cast[idx] = ZERO;
}
__global__ void pad_head_seq_kernel(float* padded_output,
float* output,
int seq_len,
int padded_seq_len,
int head_size,
int padded_head_size)
{
}
template <typename T>
void pad_head_seq(T* padded_output,
T* output,
int bsz,
int seq_len,
int padded_seq_len,
int head_size,
int padded_head_size,
cudaStream_t stream)
{
dim3 grid_dim(bsz, padded_seq_len / 16);
dim3 block_dim(padded_head_size / 8, 16);
pad_head_seq_kernel<<<grid_dim, block_dim, 0, stream>>>(
padded_output, output, seq_len, padded_seq_len, head_size / 8, padded_head_size / 8);
}
template void pad_head_seq(__half* padded_output,
__half* output,
int bsz,
int seq_len,
int padded_seq_len,
int head_size,
int padded_head_size,
cudaStream_t stream);
template void pad_head_seq(float* padded_output,
float* output,
int bsz,
int seq_len,
int padded_seq_len,
int head_size,
int padded_head_size,
cudaStream_t stream);
// TODO(cmikeh2): evaluate different GeLU performance
__device__ __forceinline__ float old_gelu(float val)
{
// 1 / sqrt(2)
constexpr float rsqrt_2 = 0.707106769084930419922;
return val * 0.5f * (1.0f + erff(val * rsqrt_2));
}
namespace fused_geglu {
constexpr int threads = 256;
constexpr int steps = 2;
constexpr int granularity = 16;
} // namespace fused_geglu
template <typename T>
__global__ void fused_bias_geglu(T* output,
const T* activation,
const T* bias,
int base_channels,
int total_elems)
{
constexpr int T_per_access = fused_geglu::granularity / sizeof(T);
constexpr int T_per_step = T_per_access * fused_geglu::threads;
constexpr int T_per_block = T_per_step * fused_geglu::steps;
const int id = blockIdx.x * T_per_block + threadIdx.x * T_per_access;
#pragma unroll
for (int i = 0; i < fused_geglu::steps; i++) {
T activation_buffer_1[T_per_access];
T activation_buffer_2[T_per_access];
T bias_buffer_1[T_per_access];
T bias_buffer_2[T_per_access];
const int iter_id = id + T_per_step * i;
if (iter_id < total_elems) {
const int channel_id = iter_id % base_channels;
const int seq_id = iter_id / base_channels;
const int seq_offset = seq_id * base_channels * 2;
mem_access::load_global<fused_geglu::granularity>(activation_buffer_1,
activation + seq_offset + channel_id);
mem_access::load_global<fused_geglu::granularity>(
activation_buffer_2, activation + seq_offset + channel_id + base_channels);
mem_access::load_global<fused_geglu::granularity>(bias_buffer_1, bias + channel_id);
mem_access::load_global<fused_geglu::granularity>(bias_buffer_2,
bias + channel_id + base_channels);
// Since the GeLU is going to happen at float, might as well
// convert
#pragma unroll
for (int v = 0; v < T_per_access; v++) {
T hidden_state = activation_buffer_1[v] + bias_buffer_1[v];
T pre_gate = activation_buffer_2[v] + bias_buffer_2[v];
float gate_f = old_gelu(conversion::to<float>(pre_gate));
T gate = conversion::to<T>(gate_f);
activation_buffer_1[v] = hidden_state * gate;
}
mem_access::store_global<fused_geglu::granularity>(output + iter_id,
activation_buffer_1);
}
}
}
template <typename T>
void launch_fused_bias_geglu(T* output,
const T* activation,
const T* bias,
int rows,
int elems_per_row,
cudaStream_t stream)
{
/*
Fused bias GEGLU is a variant of the gated activation functions.
The input here is a matrix of [batch, seq_len, 2 * intermediate_dim]
where the second half of the channels act as GeLU gates for the first
half.
*/
// Re-derive the above figures
constexpr int T_per_access = fused_geglu::granularity / sizeof(T);
constexpr int T_per_step = T_per_access * fused_geglu::threads;
constexpr int T_per_block = T_per_step * fused_geglu::steps;
const int base_channels = elems_per_row / 2;
const int total_elems = base_channels * rows;
dim3 block(fused_geglu::threads);
dim3 grid((total_elems + T_per_block - 1) / T_per_block);
fused_bias_geglu<<<grid, block, 0, stream>>>(
output, activation, bias, base_channels, total_elems);
}
template void launch_fused_bias_geglu(__half*,
const __half*,
const __half*,
int,
int,
cudaStream_t);
template void launch_fused_bias_geglu(float*, const float*, const float*, int, int, cudaStream_t);
|
10b75098496c27051d75bfcf0527987b3efeb2a7.hip | // !!! This is a file automatically generated by hipify!!!
#include <cmath>
#include <limits>
#include <hip/hip_runtime.h>
#include <hip/hip_cooperative_groups.h>
#include <thrust/execution_policy.h>
#include <thrust/sort.h>
namespace cg = cooperative_groups;
#include "PseudoJet.h"
#include "cluster.h"
#include "cudaCheck.h"
#include "launch.h"
#pragma region consts
const double MaxRap = 1e5;
#pragma endregion
#pragma region struct
template <typename T>
__host__ __device__ inline void swap(T &a, T &b) {
auto t = std::move(a);
a = std::move(b);
b = std::move(t);
}
using GridIndexType = int;
using ParticleIndexType = int;
struct PseudoJetExt {
int index;
bool isJet;
double px;
double py;
double pz;
double E;
double rap;
double phi;
double diB;
GridIndexType u;
GridIndexType v;
};
struct Dist {
double distance;
ParticleIndexType i;
ParticleIndexType j;
};
struct Cell {
GridIndexType u;
GridIndexType v;
};
struct Grid {
const double r;
const double min_rap;
const double max_rap;
const double min_phi;
const double max_phi;
const GridIndexType max_u;
const GridIndexType max_v;
const ParticleIndexType n;
ParticleIndexType *jets;
Dist *minimum;
Dist *neighbours;
// TODO use a smaller grid size (esimate from distributions in data/mc)
// TODO usa a SoA
__host__ Grid(double min_rap_, double max_rap_, double min_phi_, double max_phi_, double r_, ParticleIndexType n_)
: r((2 * M_PI) / (int)((2 * M_PI) / r_)), // round up the grid size to have an integer number of cells in phi
min_rap(min_rap_),
max_rap(max_rap_),
min_phi(min_phi_),
max_phi(max_phi_),
max_u((GridIndexType)(((max_rap - min_rap) / r))),
max_v((GridIndexType)(((max_phi - min_phi) / r))),
n(n_),
jets(nullptr),
minimum(nullptr),
neighbours(nullptr)
{}
__host__ __device__ constexpr inline GridIndexType u(double rap) const {
return (GridIndexType)((rap - min_rap) / r);
}
__host__ __device__ constexpr inline GridIndexType v(double phi) const {
return (GridIndexType)((phi - min_phi) / r);
}
__host__ __device__ constexpr inline double rap_min(GridIndexType u) const { return min_rap + r * u; }
__host__ __device__ constexpr inline double rap_max(GridIndexType u) const { return min_rap + r * (u + 1); }
__host__ __device__ constexpr inline double phi_min(GridIndexType v) const { return min_phi + r * v; }
__host__ __device__ constexpr inline double phi_max(GridIndexType v) const { return min_phi + r * (v + 1); }
__host__ __device__ constexpr inline int size() const { return (int) max_u * max_v; }
__host__ __device__ constexpr inline int index(GridIndexType u, GridIndexType v) const { return (int)max_v * u + v; }
__host__ __device__ constexpr inline int offset(GridIndexType u, GridIndexType v) const { return index(u, v) * n; }
};
#pragma endregion
#pragma region device_functions
__host__ __device__ constexpr inline double safe_inverse(double x) { return (x > 1e-300) ? (1.0 / x) : 1e300; }
__host__ __device__ void _set_jet(Grid const &grid, PseudoJetExt &jet, Algorithm algo) {
auto pt2 = jet.px * jet.px + jet.py * jet.py;
jet.isJet = false;
if (pt2 == 0.0) {
jet.phi = 0.0;
} else {
jet.phi = std::atan2(jet.py, jet.px);
if (jet.phi < 0.0) {
jet.phi += (2 * M_PI);
}
// this should never happen !
// can happen if phi=-|eps<1e-15| ?
if (jet.phi >= (2 * M_PI)) {
jet.phi -= (2 * M_PI);
}
}
if (jet.E == std::abs(jet.pz) and pt2 == 0) {
// Point has infinite rapidity -- convert that into a very large
// number, but in such a way that different 0-pt momenta will have
// different rapidities (so as to lift the degeneracy between
// them) [this can be relevant at parton-level]
double MaxRapHere = MaxRap + std::abs(jet.pz);
if (jet.pz >= 0.0) {
jet.rap = MaxRapHere;
} else {
jet.rap = -MaxRapHere;
}
} else {
// get the rapidity in a way that's modestly insensitive to roundoff
// error when things pz,E are large (actually the best we can do without
// explicit knowledge of mass)
double effective_m2 = ::max(0.0, (jet.E + jet.pz) * (jet.E - jet.pz) - pt2); // force non tachyonic mass
double E_plus_pz = jet.E + std::abs(jet.pz); // the safer of p+, p-
// p+/p- = (p+ p-) / (p-)^2 = (kt^2+m^2)/(p-)^2
jet.rap = 0.5 * ::log((pt2 + effective_m2) / (E_plus_pz * E_plus_pz));
if (jet.pz > 0) {
jet.rap = -jet.rap;
}
}
// set the "weight" used depending on the jet algorithm
switch (algo) {
case Algorithm::Kt:
jet.diB = pt2;
break;
case Algorithm::CambridgeAachen:
jet.diB = 1.;
break;
case Algorithm::AntiKt:
jet.diB = safe_inverse(pt2);
break;
}
// set the grid coordinates
jet.u = grid.u(jet.rap);
jet.v = grid.v(jet.phi);
}
__device__ double plain_distance(const PseudoJetExt &p1, const PseudoJetExt &p2) {
double dphi = std::abs(p1.phi - p2.phi);
if (dphi > M_PI) {
dphi = (2 * M_PI) - dphi;
}
double drap = p1.rap - p2.rap;
return (dphi * dphi + drap * drap);
}
__device__ Dist yij_distance(const PseudoJetExt *pseudojets,
ParticleIndexType i,
ParticleIndexType j,
double one_over_r2) {
if (i > j) {
::swap(i, j);
}
Dist d;
d.i = i;
d.j = j;
if (i == j) {
d.distance = pseudojets[i].diB;
} else {
d.distance = min(pseudojets[i].diB, pseudojets[j].diB) * plain_distance(pseudojets[i], pseudojets[j]) * one_over_r2;
}
return d;
}
__device__ Dist minimum_pair_in_cell(Grid const &grid,
const PseudoJetExt *pseudojets,
const GridIndexType u, // cell coordinates
const GridIndexType v,
double one_over_r2) {
int index = grid.index(u, v);
Dist min{ std::numeric_limits<double>::infinity(), -1, -1 };
int k = 0;
GridIndexType first = grid.jets[index * grid.n + k];
while (first >= 0) {
for (int l = 0; l <= k; ++l) {
GridIndexType second = grid.jets[index * grid.n + l];
auto temp = yij_distance(pseudojets, first, second, one_over_r2);
if (temp.distance < min.distance)
min = temp;
}
++k;
first = grid.jets[index * grid.n + k];
}
return min;
}
__device__ Dist minimum_pair_in_cells(Grid const &grid,
const PseudoJetExt *pseudojets,
const GridIndexType first_u,
const GridIndexType first_v,
const GridIndexType second_u,
const GridIndexType second_v,
double one_over_r2) {
int first_index = grid.index(first_u, first_v);
int second_index = grid.index(second_u, second_v);
Dist min{ std::numeric_limits<double>::infinity(), -1, -1 };
int k = 0;
GridIndexType first = grid.jets[first_index * grid.n + k];
while (first >= 0) {
int l = 0;
GridIndexType second = grid.jets[second_index * grid.n + l];
while (second >= 0) {
auto temp = yij_distance(pseudojets, first, second, one_over_r2);
if (temp.distance < min.distance)
min = temp;
++l;
second = grid.jets[second_index * grid.n + l];
}
++k;
first = grid.jets[first_index * grid.n + k];
}
return min;
}
__device__ Dist minimum_in_cell(Grid const &grid,
const PseudoJetExt *pseudojets,
Dist min,
const ParticleIndexType tid, // jet index
const GridIndexType i, // cell coordinates
const GridIndexType j,
double one_over_r2) {
int k = 0;
int index = grid.index(i, j);
ParticleIndexType num = grid.jets[index * grid.n + k];
Dist temp;
while (num >= 0) {
temp = yij_distance(pseudojets, tid, num, one_over_r2);
if (temp.distance < min.distance)
min = temp;
k++;
num = grid.jets[index * grid.n + k];
}
return min;
}
__device__ void remove_from_grid(Grid const &grid, ParticleIndexType jet, const PseudoJetExt &p) {
// Remove an element from a grid cell, and shift all following elements to fill the gap
int index = grid.index(p.u, p.v);
int first, last;
for (int k = 0; k < grid.n; ++k) {
ParticleIndexType num = grid.jets[index * grid.n + k];
if (num == jet) {
first = k;
} else if (num == -1) {
last = k;
break;
}
// FIXME handle the case where the jet is not found
// FIXME handle the case where the cell is full
}
if (first != last - 1) {
grid.jets[index * grid.n + first] = grid.jets[index * grid.n + last - 1];
}
// set the last entry to invalid
grid.jets[index * grid.n + last - 1] = -1;
}
__device__ void add_to_grid(Grid const &grid, ParticleIndexType jet, const PseudoJetExt &p) {
// Add a jet as the last element of a grid cell
int index = grid.index(p.u, p.v);
for (int k = 0; k < grid.n; ++k) {
// if the k-th element is -1, replace it with the jet id
if (atomicCAS(&grid.jets[index * grid.n + k], -1, jet) == -1) {
break;
}
// FIXME handle the case where the cell is full
}
}
#pragma endregion
#pragma region kernels
__global__ void set_jets_coordiinates(Grid grid, PseudoJetExt *particles, const ParticleIndexType n, Algorithm algo) {
int start = threadIdx.x + blockIdx.x * blockDim.x;
int stride = gridDim.x * blockDim.x;
for (int tid = start; tid < n; tid += stride) {
_set_jet(grid, particles[tid], algo);
//printf("particle %3d has (rap,phi,pT) = (%f,%f,%f) and cell (i,j) = (%d,%d)\n", tid, p.rap, p.phi, sqrt(p.diB), p.u, p.j);
}
}
__global__ void set_jets_to_grid(Grid grid, PseudoJetExt *particles, const ParticleIndexType n, Algorithm algo) {
int start = threadIdx.x + blockIdx.x * blockDim.x;
int stride = gridDim.x * blockDim.x;
for (int tid = start; tid < n; tid += stride) {
add_to_grid(grid, tid, particles[tid]);
}
}
__global__ void compute_initial_distances(Grid grid, PseudoJetExt *pseudojets, const ParticleIndexType n, double r) {
const double one_over_r2 = 1. / (r * r);
const Dist none { std::numeric_limits<double>::infinity(), -1, -1 };
int start = threadIdx.x + blockIdx.x * blockDim.x;
int stride = gridDim.x * blockDim.x;
for (int index = start; index < grid.max_u * grid.max_v; index += stride) {
GridIndexType i = index / grid.max_v;
GridIndexType j = index % grid.max_v;
auto jet = grid.jets[index * grid.n];
// check if the cell is empty
if (jet == -1) {
for (int k = 0; k < 9; ++k)
grid.neighbours[index * 9 + k] = none;
grid.minimum[index] = none;
} else {
// FIXME use 9 threads ?
GridIndexType j_plus = (j + 1 < grid.max_v) ? j + 1 : 0;
GridIndexType j_minus = (j - 1 >= 0) ? j - 1 : grid.max_v - 1;
auto min = none;
auto tmp = none;
min = minimum_pair_in_cell(grid, pseudojets, i, j, one_over_r2);
grid.neighbours[index * 9 + 4] = min;
tmp = minimum_pair_in_cells(grid, pseudojets, i, j, i, j_minus, one_over_r2);
grid.neighbours[index * 9 + 3] = tmp;
if (tmp.distance < min.distance) min = tmp;
tmp = minimum_pair_in_cells(grid, pseudojets, i, j, i, j_plus, one_over_r2);
grid.neighbours[index * 9 + 5] = tmp;
if (tmp.distance < min.distance) min = tmp;
if (i - 1 >= 0) {
tmp = minimum_pair_in_cells(grid, pseudojets, i, j, i-1, j_minus, one_over_r2);
grid.neighbours[index * 9 + 0] = tmp;
if (tmp.distance < min.distance) min = tmp;
tmp = minimum_pair_in_cells(grid, pseudojets, i, j, i-1, j, one_over_r2);
grid.neighbours[index * 9 + 1] = tmp;
if (tmp.distance < min.distance) min = tmp;
tmp = minimum_pair_in_cells(grid, pseudojets, i, j, i-1, j_plus, one_over_r2);
grid.neighbours[index * 9 + 2] = tmp;
if (tmp.distance < min.distance) min = tmp;
} else {
grid.neighbours[index * 9 + 0] = none;
grid.neighbours[index * 9 + 1] = none;
grid.neighbours[index * 9 + 2] = none;
}
if (i + 1 < grid.max_u) {
tmp = minimum_pair_in_cells(grid, pseudojets, i, j, i+1, j_minus, one_over_r2);
grid.neighbours[index * 9 + 6] = tmp;
if (tmp.distance < min.distance) min = tmp;
tmp = minimum_pair_in_cells(grid, pseudojets, i, j, i+1, j, one_over_r2);
grid.neighbours[index * 9 + 7] = tmp;
if (tmp.distance < min.distance) min = tmp;
tmp = minimum_pair_in_cells(grid, pseudojets, i, j, i+1, j_plus, one_over_r2);
grid.neighbours[index * 9 + 8] = tmp;
if (tmp.distance < min.distance) min = tmp;
} else {
grid.neighbours[index * 9 + 6] = none;
grid.neighbours[index * 9 + 7] = none;
grid.neighbours[index * 9 + 8] = none;
}
grid.minimum[index] = min;
}
}
}
constexpr const int n_neighbours = 9; // self, plus 8 neighbours
constexpr const int n_affected = 3; // 3 possibly affected cells
constexpr const int active_threads = n_neighbours * n_affected; // 1 cell + 8 neighbours, times 3 possibly affected cells
// reduce_recombine(...) must be called with at least active_threads (27) threads
__global__ void reduce_recombine(
Grid grid, PseudoJetExt *pseudojets, ParticleIndexType n, Algorithm algo, const double r) {
extern __shared__ Dist minima[];
int start = threadIdx.x;
int stride = blockDim.x;
const double one_over_r2 = 1. / (r * r);
const Dist none { std::numeric_limits<double>::infinity(), -1, -1 };
__shared__ Cell affected[n_affected];
while (true) {
// copy the minimum distances into shared memory
for (int index = start; index < grid.max_u * grid.max_v; index += stride) {
minima[index] = grid.minimum[index];
}
__syncthreads();
// find the largest power of 2 smaller than the grid size
unsigned int width = (1u << 31) >> __clz(grid.max_u * grid.max_v - 1);
// find the global minimum
Dist min = none;
for (unsigned int s = width; s >= 16; s >>= 1) {
for (int tid = threadIdx.x; tid < s and tid + s < grid.max_u * grid.max_v; tid += blockDim.x) {
if (minima[tid + s].distance < minima[tid].distance) {
minima[tid] = minima[tid + s];
}
}
__syncthreads();
}
// use a single thread for the last iterations, to avoid bank conflicts and synchronisations
if (threadIdx.x == 0) {
for (int tid = 0; tid < 16; ++tid) {
if (minima[tid].distance < min.distance) {
min = minima[tid];
}
}
}
__syncthreads();
// promote or recombine the minimum pseudojet(s)
if (threadIdx.x == 0) {
if (min.i == min.j) {
// remove the pseudojet at position min.j from the grid and promote it to jet status
//printf("will promote pseudojet %d (%d,%d) with distance %f\n", min.j, pseudojets[min.j].u, pseudojets[min.j].v, min.distance);
pseudojets[min.j].isJet = true;
auto jet = pseudojets[min.j];
remove_from_grid(grid, min.j, jet);
affected[0] = { jet.u, jet.v };
affected[1] = { -1, -1 };
affected[2] = { -1, -1 };
} else {
//printf("will recombine pseudojets %d (%d,%d) and %d (%d,%d) with distance %f\n", min.i, pseudojets[min.i].u, pseudojets[min.i].v, min.j, pseudojets[min.j].u, pseudojets[min.j].v, min.distance);
auto ith = pseudojets[min.i];
auto jth = pseudojets[min.j];
remove_from_grid(grid, min.i, ith);
remove_from_grid(grid, min.j, jth);
affected[0] = { ith.u, ith.v };
if (jth.u != ith.u or jth.v != ith.v) {
affected[1] = { jth.u, jth.v };
} else {
affected[1] = { -1, -1 };
}
// recombine the two pseudojets
PseudoJetExt pseudojet;
pseudojet.px = ith.px + jth.px;
pseudojet.py = ith.py + jth.py;
pseudojet.pz = ith.pz + jth.pz;
pseudojet.E = ith.E + jth.E;
_set_jet(grid, pseudojet, algo);
add_to_grid(grid, min.i, pseudojet);
pseudojets[min.i] = pseudojet;
if ((pseudojet.u != ith.u or pseudojet.v != ith.v) and
(pseudojet.u != jth.u or pseudojet.v != jth.v)) {
affected[2] = { pseudojet.u, pseudojet.v };
} else {
affected[2] = { -1, -1 };
}
}
}
__syncthreads();
if (--n == 0)
break;
int tid = start;
if (tid < active_threads) {
int self = tid / n_neighbours; // potentially affected cell (0..2)
int cell = tid % n_neighbours; // neighbour id (0..8)
GridIndexType u = affected[self].u;
GridIndexType v = affected[self].v;
// consider only the affected cells
if (u >= 0 and v >= 0) {
auto g = cg::coalesced_threads();
const int index = grid.index(u, v);
// check if the cell is empty
bool empty = (grid.jets[index * grid.n] == -1);
// evaluate the neighbouring cells
const int delta_u = cell / 3 - 1;
const int delta_v = cell % 3 - 1;
const GridIndexType other_u = u + delta_u;
const GridIndexType other_v = (v + delta_v + grid.max_v) % grid.max_v;
const bool central = (cell == 4);
const bool outside = other_u < 0 or other_u >= grid.max_u;
// update the local minima
if (central) {
grid.neighbours[index * n_neighbours + cell] = empty ? none : minimum_pair_in_cell(grid, pseudojets, u, v, one_over_r2);
} else if (outside) {
grid.neighbours[index * n_neighbours + cell] = none;
} else {
auto tmp = empty ? none : minimum_pair_in_cells(grid, pseudojets, u, v, other_u, other_v, one_over_r2);
grid.neighbours[index * n_neighbours + cell] = tmp;
grid.neighbours[grid.index(other_u, other_v) * n_neighbours + (n_neighbours - 1) - cell] = tmp;
}
// synchronise the active threads
g.sync();
// update the minimum in neighbouring cells
if (not outside) {
const int other = grid.index(other_u, other_v);
auto min = none;
for (int k = 0; k < n_neighbours; ++k) {
auto tmp = grid.neighbours[other * n_neighbours + k];
if (tmp.distance < min.distance) min = tmp;
}
grid.minimum[other] = min;
}
}
}
__syncthreads();
}
}
#pragma endregion
__global__ void init(const PseudoJet *particles, PseudoJetExt *jets, int size) {
int first = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = first; i < size; i += stride) {
jets[i].px = particles[i].px;
jets[i].py = particles[i].py;
jets[i].pz = particles[i].pz;
jets[i].E = particles[i].E;
jets[i].index = particles[i].index;
jets[i].isJet = particles[i].isJet;
}
}
__global__ void output(const PseudoJetExt *jets, PseudoJet *particles, int size) {
int first = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = first; i < size; i += stride) {
particles[i].px = jets[i].px;
particles[i].py = jets[i].py;
particles[i].pz = jets[i].pz;
particles[i].E = jets[i].E;
particles[i].index = jets[i].index;
particles[i].isJet = jets[i].isJet;
}
}
void cluster(PseudoJet *particles, int size, Algorithm algo, double r) {
#pragma region vectors
// examples from FastJet span |rap| < 10
// TODO: make the rap range dynamic, based on the data themselves
// TODO: make the cell size dynamic, based on the data themselves
// TODO: try to use __constant__ memory for config
Grid grid(-10., +10., 0, 2 * M_PI, r, size);
cudaCheck(hipMalloc(&grid.jets, sizeof(ParticleIndexType) * grid.max_u * grid.max_v * grid.n));
cudaCheck(hipMemset(grid.jets, 0xff, sizeof(ParticleIndexType) * grid.max_u * grid.max_v * grid.n));
cudaCheck(hipMalloc(&grid.minimum, sizeof(Dist) * grid.max_u * grid.max_v));
cudaCheck(hipMalloc(&grid.neighbours, sizeof(Dist) * grid.max_u * grid.max_v * 9));
PseudoJetExt *pseudojets;
cudaCheck(hipMalloc(&pseudojets, size * sizeof(PseudoJetExt)));
#pragma endregion
#pragma region kernel_launches
LaunchParameters l;
// copy the particles from the input buffer to the pseudojet structures
l = estimateMinimalGrid(init, size);
hipLaunchKernelGGL(( init), dim3(l.gridSize), dim3(l.blockSize), 0, 0, particles, pseudojets, size);
cudaCheck(hipGetLastError());
// compute the jets cilindrical coordinates and grid indices
l = estimateMinimalGrid(set_jets_coordiinates, size);
hipLaunchKernelGGL(( set_jets_coordiinates), dim3(l.gridSize), dim3(l.blockSize), 0, 0, grid, pseudojets, size, algo);
cudaCheck(hipGetLastError());
// sort the inputs according to their grid coordinates and "beam" clustering distance
thrust::sort(thrust::device, pseudojets, pseudojets + size, [] __device__(auto const &a, auto const &b) {
return (a.u < b.u) or (a.u == b.u and a.v < b.v) or (a.u == b.u and a.v == b.v and a.diB < b.diB);
});
// organise the jets in the grid
l = estimateMinimalGrid(set_jets_to_grid, size);
hipLaunchKernelGGL(( set_jets_to_grid), dim3(l.gridSize), dim3(l.blockSize), 0, 0, grid, pseudojets, size, algo);
cudaCheck(hipGetLastError());
// compute the minimum distances in all grid cells
l = estimateMinimalGrid(compute_initial_distances, grid.size());
hipLaunchKernelGGL(( compute_initial_distances), dim3(l.gridSize), dim3(l.blockSize), 0, 0, grid, pseudojets, size, r);
cudaCheck(hipGetLastError());
// recombine the particles into jets
l = estimateSingleBlock(reduce_recombine, grid.size());
int sharedMemory = sizeof(Dist) * grid.size();
hipLaunchKernelGGL(( reduce_recombine), dim3(l.gridSize), dim3(l.blockSize), sharedMemory, 0, grid, pseudojets, size, algo, r);
cudaCheck(hipGetLastError());
// copy the clustered jets back to the input buffer
l = estimateMinimalGrid(output, size);
hipLaunchKernelGGL(( output), dim3(l.gridSize), dim3(l.blockSize), 0, 0, pseudojets, particles, size);
cudaCheck(hipGetLastError());
#pragma endregion
cudaCheck(hipFree(pseudojets));
cudaCheck(hipFree(grid.jets));
cudaCheck(hipFree(grid.minimum));
cudaCheck(hipFree(grid.neighbours));
}
| 10b75098496c27051d75bfcf0527987b3efeb2a7.cu | #include <cmath>
#include <limits>
#include <cuda_runtime.h>
#include <cooperative_groups.h>
#include <thrust/execution_policy.h>
#include <thrust/sort.h>
namespace cg = cooperative_groups;
#include "PseudoJet.h"
#include "cluster.h"
#include "cudaCheck.h"
#include "launch.h"
#pragma region consts
const double MaxRap = 1e5;
#pragma endregion
#pragma region struct
template <typename T>
__host__ __device__ inline void swap(T &a, T &b) {
auto t = std::move(a);
a = std::move(b);
b = std::move(t);
}
using GridIndexType = int;
using ParticleIndexType = int;
struct PseudoJetExt {
int index;
bool isJet;
double px;
double py;
double pz;
double E;
double rap;
double phi;
double diB;
GridIndexType u;
GridIndexType v;
};
struct Dist {
double distance;
ParticleIndexType i;
ParticleIndexType j;
};
struct Cell {
GridIndexType u;
GridIndexType v;
};
struct Grid {
const double r;
const double min_rap;
const double max_rap;
const double min_phi;
const double max_phi;
const GridIndexType max_u;
const GridIndexType max_v;
const ParticleIndexType n;
ParticleIndexType *jets;
Dist *minimum;
Dist *neighbours;
// TODO use a smaller grid size (esimate from distributions in data/mc)
// TODO usa a SoA
__host__ Grid(double min_rap_, double max_rap_, double min_phi_, double max_phi_, double r_, ParticleIndexType n_)
: r((2 * M_PI) / (int)((2 * M_PI) / r_)), // round up the grid size to have an integer number of cells in phi
min_rap(min_rap_),
max_rap(max_rap_),
min_phi(min_phi_),
max_phi(max_phi_),
max_u((GridIndexType)(((max_rap - min_rap) / r))),
max_v((GridIndexType)(((max_phi - min_phi) / r))),
n(n_),
jets(nullptr),
minimum(nullptr),
neighbours(nullptr)
{}
__host__ __device__ constexpr inline GridIndexType u(double rap) const {
return (GridIndexType)((rap - min_rap) / r);
}
__host__ __device__ constexpr inline GridIndexType v(double phi) const {
return (GridIndexType)((phi - min_phi) / r);
}
__host__ __device__ constexpr inline double rap_min(GridIndexType u) const { return min_rap + r * u; }
__host__ __device__ constexpr inline double rap_max(GridIndexType u) const { return min_rap + r * (u + 1); }
__host__ __device__ constexpr inline double phi_min(GridIndexType v) const { return min_phi + r * v; }
__host__ __device__ constexpr inline double phi_max(GridIndexType v) const { return min_phi + r * (v + 1); }
__host__ __device__ constexpr inline int size() const { return (int) max_u * max_v; }
__host__ __device__ constexpr inline int index(GridIndexType u, GridIndexType v) const { return (int)max_v * u + v; }
__host__ __device__ constexpr inline int offset(GridIndexType u, GridIndexType v) const { return index(u, v) * n; }
};
#pragma endregion
#pragma region device_functions
__host__ __device__ constexpr inline double safe_inverse(double x) { return (x > 1e-300) ? (1.0 / x) : 1e300; }
__host__ __device__ void _set_jet(Grid const &grid, PseudoJetExt &jet, Algorithm algo) {
auto pt2 = jet.px * jet.px + jet.py * jet.py;
jet.isJet = false;
if (pt2 == 0.0) {
jet.phi = 0.0;
} else {
jet.phi = std::atan2(jet.py, jet.px);
if (jet.phi < 0.0) {
jet.phi += (2 * M_PI);
}
// this should never happen !
// can happen if phi=-|eps<1e-15| ?
if (jet.phi >= (2 * M_PI)) {
jet.phi -= (2 * M_PI);
}
}
if (jet.E == std::abs(jet.pz) and pt2 == 0) {
// Point has infinite rapidity -- convert that into a very large
// number, but in such a way that different 0-pt momenta will have
// different rapidities (so as to lift the degeneracy between
// them) [this can be relevant at parton-level]
double MaxRapHere = MaxRap + std::abs(jet.pz);
if (jet.pz >= 0.0) {
jet.rap = MaxRapHere;
} else {
jet.rap = -MaxRapHere;
}
} else {
// get the rapidity in a way that's modestly insensitive to roundoff
// error when things pz,E are large (actually the best we can do without
// explicit knowledge of mass)
double effective_m2 = ::max(0.0, (jet.E + jet.pz) * (jet.E - jet.pz) - pt2); // force non tachyonic mass
double E_plus_pz = jet.E + std::abs(jet.pz); // the safer of p+, p-
// p+/p- = (p+ p-) / (p-)^2 = (kt^2+m^2)/(p-)^2
jet.rap = 0.5 * std::log((pt2 + effective_m2) / (E_plus_pz * E_plus_pz));
if (jet.pz > 0) {
jet.rap = -jet.rap;
}
}
// set the "weight" used depending on the jet algorithm
switch (algo) {
case Algorithm::Kt:
jet.diB = pt2;
break;
case Algorithm::CambridgeAachen:
jet.diB = 1.;
break;
case Algorithm::AntiKt:
jet.diB = safe_inverse(pt2);
break;
}
// set the grid coordinates
jet.u = grid.u(jet.rap);
jet.v = grid.v(jet.phi);
}
__device__ double plain_distance(const PseudoJetExt &p1, const PseudoJetExt &p2) {
double dphi = std::abs(p1.phi - p2.phi);
if (dphi > M_PI) {
dphi = (2 * M_PI) - dphi;
}
double drap = p1.rap - p2.rap;
return (dphi * dphi + drap * drap);
}
__device__ Dist yij_distance(const PseudoJetExt *pseudojets,
ParticleIndexType i,
ParticleIndexType j,
double one_over_r2) {
if (i > j) {
::swap(i, j);
}
Dist d;
d.i = i;
d.j = j;
if (i == j) {
d.distance = pseudojets[i].diB;
} else {
d.distance = min(pseudojets[i].diB, pseudojets[j].diB) * plain_distance(pseudojets[i], pseudojets[j]) * one_over_r2;
}
return d;
}
__device__ Dist minimum_pair_in_cell(Grid const &grid,
const PseudoJetExt *pseudojets,
const GridIndexType u, // cell coordinates
const GridIndexType v,
double one_over_r2) {
int index = grid.index(u, v);
Dist min{ std::numeric_limits<double>::infinity(), -1, -1 };
int k = 0;
GridIndexType first = grid.jets[index * grid.n + k];
while (first >= 0) {
for (int l = 0; l <= k; ++l) {
GridIndexType second = grid.jets[index * grid.n + l];
auto temp = yij_distance(pseudojets, first, second, one_over_r2);
if (temp.distance < min.distance)
min = temp;
}
++k;
first = grid.jets[index * grid.n + k];
}
return min;
}
__device__ Dist minimum_pair_in_cells(Grid const &grid,
const PseudoJetExt *pseudojets,
const GridIndexType first_u,
const GridIndexType first_v,
const GridIndexType second_u,
const GridIndexType second_v,
double one_over_r2) {
int first_index = grid.index(first_u, first_v);
int second_index = grid.index(second_u, second_v);
Dist min{ std::numeric_limits<double>::infinity(), -1, -1 };
int k = 0;
GridIndexType first = grid.jets[first_index * grid.n + k];
while (first >= 0) {
int l = 0;
GridIndexType second = grid.jets[second_index * grid.n + l];
while (second >= 0) {
auto temp = yij_distance(pseudojets, first, second, one_over_r2);
if (temp.distance < min.distance)
min = temp;
++l;
second = grid.jets[second_index * grid.n + l];
}
++k;
first = grid.jets[first_index * grid.n + k];
}
return min;
}
__device__ Dist minimum_in_cell(Grid const &grid,
const PseudoJetExt *pseudojets,
Dist min,
const ParticleIndexType tid, // jet index
const GridIndexType i, // cell coordinates
const GridIndexType j,
double one_over_r2) {
int k = 0;
int index = grid.index(i, j);
ParticleIndexType num = grid.jets[index * grid.n + k];
Dist temp;
while (num >= 0) {
temp = yij_distance(pseudojets, tid, num, one_over_r2);
if (temp.distance < min.distance)
min = temp;
k++;
num = grid.jets[index * grid.n + k];
}
return min;
}
__device__ void remove_from_grid(Grid const &grid, ParticleIndexType jet, const PseudoJetExt &p) {
// Remove an element from a grid cell, and shift all following elements to fill the gap
int index = grid.index(p.u, p.v);
int first, last;
for (int k = 0; k < grid.n; ++k) {
ParticleIndexType num = grid.jets[index * grid.n + k];
if (num == jet) {
first = k;
} else if (num == -1) {
last = k;
break;
}
// FIXME handle the case where the jet is not found
// FIXME handle the case where the cell is full
}
if (first != last - 1) {
grid.jets[index * grid.n + first] = grid.jets[index * grid.n + last - 1];
}
// set the last entry to invalid
grid.jets[index * grid.n + last - 1] = -1;
}
__device__ void add_to_grid(Grid const &grid, ParticleIndexType jet, const PseudoJetExt &p) {
// Add a jet as the last element of a grid cell
int index = grid.index(p.u, p.v);
for (int k = 0; k < grid.n; ++k) {
// if the k-th element is -1, replace it with the jet id
if (atomicCAS(&grid.jets[index * grid.n + k], -1, jet) == -1) {
break;
}
// FIXME handle the case where the cell is full
}
}
#pragma endregion
#pragma region kernels
__global__ void set_jets_coordiinates(Grid grid, PseudoJetExt *particles, const ParticleIndexType n, Algorithm algo) {
int start = threadIdx.x + blockIdx.x * blockDim.x;
int stride = gridDim.x * blockDim.x;
for (int tid = start; tid < n; tid += stride) {
_set_jet(grid, particles[tid], algo);
//printf("particle %3d has (rap,phi,pT) = (%f,%f,%f) and cell (i,j) = (%d,%d)\n", tid, p.rap, p.phi, sqrt(p.diB), p.u, p.j);
}
}
__global__ void set_jets_to_grid(Grid grid, PseudoJetExt *particles, const ParticleIndexType n, Algorithm algo) {
int start = threadIdx.x + blockIdx.x * blockDim.x;
int stride = gridDim.x * blockDim.x;
for (int tid = start; tid < n; tid += stride) {
add_to_grid(grid, tid, particles[tid]);
}
}
__global__ void compute_initial_distances(Grid grid, PseudoJetExt *pseudojets, const ParticleIndexType n, double r) {
const double one_over_r2 = 1. / (r * r);
const Dist none { std::numeric_limits<double>::infinity(), -1, -1 };
int start = threadIdx.x + blockIdx.x * blockDim.x;
int stride = gridDim.x * blockDim.x;
for (int index = start; index < grid.max_u * grid.max_v; index += stride) {
GridIndexType i = index / grid.max_v;
GridIndexType j = index % grid.max_v;
auto jet = grid.jets[index * grid.n];
// check if the cell is empty
if (jet == -1) {
for (int k = 0; k < 9; ++k)
grid.neighbours[index * 9 + k] = none;
grid.minimum[index] = none;
} else {
// FIXME use 9 threads ?
GridIndexType j_plus = (j + 1 < grid.max_v) ? j + 1 : 0;
GridIndexType j_minus = (j - 1 >= 0) ? j - 1 : grid.max_v - 1;
auto min = none;
auto tmp = none;
min = minimum_pair_in_cell(grid, pseudojets, i, j, one_over_r2);
grid.neighbours[index * 9 + 4] = min;
tmp = minimum_pair_in_cells(grid, pseudojets, i, j, i, j_minus, one_over_r2);
grid.neighbours[index * 9 + 3] = tmp;
if (tmp.distance < min.distance) min = tmp;
tmp = minimum_pair_in_cells(grid, pseudojets, i, j, i, j_plus, one_over_r2);
grid.neighbours[index * 9 + 5] = tmp;
if (tmp.distance < min.distance) min = tmp;
if (i - 1 >= 0) {
tmp = minimum_pair_in_cells(grid, pseudojets, i, j, i-1, j_minus, one_over_r2);
grid.neighbours[index * 9 + 0] = tmp;
if (tmp.distance < min.distance) min = tmp;
tmp = minimum_pair_in_cells(grid, pseudojets, i, j, i-1, j, one_over_r2);
grid.neighbours[index * 9 + 1] = tmp;
if (tmp.distance < min.distance) min = tmp;
tmp = minimum_pair_in_cells(grid, pseudojets, i, j, i-1, j_plus, one_over_r2);
grid.neighbours[index * 9 + 2] = tmp;
if (tmp.distance < min.distance) min = tmp;
} else {
grid.neighbours[index * 9 + 0] = none;
grid.neighbours[index * 9 + 1] = none;
grid.neighbours[index * 9 + 2] = none;
}
if (i + 1 < grid.max_u) {
tmp = minimum_pair_in_cells(grid, pseudojets, i, j, i+1, j_minus, one_over_r2);
grid.neighbours[index * 9 + 6] = tmp;
if (tmp.distance < min.distance) min = tmp;
tmp = minimum_pair_in_cells(grid, pseudojets, i, j, i+1, j, one_over_r2);
grid.neighbours[index * 9 + 7] = tmp;
if (tmp.distance < min.distance) min = tmp;
tmp = minimum_pair_in_cells(grid, pseudojets, i, j, i+1, j_plus, one_over_r2);
grid.neighbours[index * 9 + 8] = tmp;
if (tmp.distance < min.distance) min = tmp;
} else {
grid.neighbours[index * 9 + 6] = none;
grid.neighbours[index * 9 + 7] = none;
grid.neighbours[index * 9 + 8] = none;
}
grid.minimum[index] = min;
}
}
}
constexpr const int n_neighbours = 9; // self, plus 8 neighbours
constexpr const int n_affected = 3; // 3 possibly affected cells
constexpr const int active_threads = n_neighbours * n_affected; // 1 cell + 8 neighbours, times 3 possibly affected cells
// reduce_recombine(...) must be called with at least active_threads (27) threads
__global__ void reduce_recombine(
Grid grid, PseudoJetExt *pseudojets, ParticleIndexType n, Algorithm algo, const double r) {
extern __shared__ Dist minima[];
int start = threadIdx.x;
int stride = blockDim.x;
const double one_over_r2 = 1. / (r * r);
const Dist none { std::numeric_limits<double>::infinity(), -1, -1 };
__shared__ Cell affected[n_affected];
while (true) {
// copy the minimum distances into shared memory
for (int index = start; index < grid.max_u * grid.max_v; index += stride) {
minima[index] = grid.minimum[index];
}
__syncthreads();
// find the largest power of 2 smaller than the grid size
unsigned int width = (1u << 31) >> __clz(grid.max_u * grid.max_v - 1);
// find the global minimum
Dist min = none;
for (unsigned int s = width; s >= 16; s >>= 1) {
for (int tid = threadIdx.x; tid < s and tid + s < grid.max_u * grid.max_v; tid += blockDim.x) {
if (minima[tid + s].distance < minima[tid].distance) {
minima[tid] = minima[tid + s];
}
}
__syncthreads();
}
// use a single thread for the last iterations, to avoid bank conflicts and synchronisations
if (threadIdx.x == 0) {
for (int tid = 0; tid < 16; ++tid) {
if (minima[tid].distance < min.distance) {
min = minima[tid];
}
}
}
__syncthreads();
// promote or recombine the minimum pseudojet(s)
if (threadIdx.x == 0) {
if (min.i == min.j) {
// remove the pseudojet at position min.j from the grid and promote it to jet status
//printf("will promote pseudojet %d (%d,%d) with distance %f\n", min.j, pseudojets[min.j].u, pseudojets[min.j].v, min.distance);
pseudojets[min.j].isJet = true;
auto jet = pseudojets[min.j];
remove_from_grid(grid, min.j, jet);
affected[0] = { jet.u, jet.v };
affected[1] = { -1, -1 };
affected[2] = { -1, -1 };
} else {
//printf("will recombine pseudojets %d (%d,%d) and %d (%d,%d) with distance %f\n", min.i, pseudojets[min.i].u, pseudojets[min.i].v, min.j, pseudojets[min.j].u, pseudojets[min.j].v, min.distance);
auto ith = pseudojets[min.i];
auto jth = pseudojets[min.j];
remove_from_grid(grid, min.i, ith);
remove_from_grid(grid, min.j, jth);
affected[0] = { ith.u, ith.v };
if (jth.u != ith.u or jth.v != ith.v) {
affected[1] = { jth.u, jth.v };
} else {
affected[1] = { -1, -1 };
}
// recombine the two pseudojets
PseudoJetExt pseudojet;
pseudojet.px = ith.px + jth.px;
pseudojet.py = ith.py + jth.py;
pseudojet.pz = ith.pz + jth.pz;
pseudojet.E = ith.E + jth.E;
_set_jet(grid, pseudojet, algo);
add_to_grid(grid, min.i, pseudojet);
pseudojets[min.i] = pseudojet;
if ((pseudojet.u != ith.u or pseudojet.v != ith.v) and
(pseudojet.u != jth.u or pseudojet.v != jth.v)) {
affected[2] = { pseudojet.u, pseudojet.v };
} else {
affected[2] = { -1, -1 };
}
}
}
__syncthreads();
if (--n == 0)
break;
int tid = start;
if (tid < active_threads) {
int self = tid / n_neighbours; // potentially affected cell (0..2)
int cell = tid % n_neighbours; // neighbour id (0..8)
GridIndexType u = affected[self].u;
GridIndexType v = affected[self].v;
// consider only the affected cells
if (u >= 0 and v >= 0) {
auto g = cg::coalesced_threads();
const int index = grid.index(u, v);
// check if the cell is empty
bool empty = (grid.jets[index * grid.n] == -1);
// evaluate the neighbouring cells
const int delta_u = cell / 3 - 1;
const int delta_v = cell % 3 - 1;
const GridIndexType other_u = u + delta_u;
const GridIndexType other_v = (v + delta_v + grid.max_v) % grid.max_v;
const bool central = (cell == 4);
const bool outside = other_u < 0 or other_u >= grid.max_u;
// update the local minima
if (central) {
grid.neighbours[index * n_neighbours + cell] = empty ? none : minimum_pair_in_cell(grid, pseudojets, u, v, one_over_r2);
} else if (outside) {
grid.neighbours[index * n_neighbours + cell] = none;
} else {
auto tmp = empty ? none : minimum_pair_in_cells(grid, pseudojets, u, v, other_u, other_v, one_over_r2);
grid.neighbours[index * n_neighbours + cell] = tmp;
grid.neighbours[grid.index(other_u, other_v) * n_neighbours + (n_neighbours - 1) - cell] = tmp;
}
// synchronise the active threads
g.sync();
// update the minimum in neighbouring cells
if (not outside) {
const int other = grid.index(other_u, other_v);
auto min = none;
for (int k = 0; k < n_neighbours; ++k) {
auto tmp = grid.neighbours[other * n_neighbours + k];
if (tmp.distance < min.distance) min = tmp;
}
grid.minimum[other] = min;
}
}
}
__syncthreads();
}
}
#pragma endregion
__global__ void init(const PseudoJet *particles, PseudoJetExt *jets, int size) {
int first = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = first; i < size; i += stride) {
jets[i].px = particles[i].px;
jets[i].py = particles[i].py;
jets[i].pz = particles[i].pz;
jets[i].E = particles[i].E;
jets[i].index = particles[i].index;
jets[i].isJet = particles[i].isJet;
}
}
__global__ void output(const PseudoJetExt *jets, PseudoJet *particles, int size) {
int first = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = first; i < size; i += stride) {
particles[i].px = jets[i].px;
particles[i].py = jets[i].py;
particles[i].pz = jets[i].pz;
particles[i].E = jets[i].E;
particles[i].index = jets[i].index;
particles[i].isJet = jets[i].isJet;
}
}
void cluster(PseudoJet *particles, int size, Algorithm algo, double r) {
#pragma region vectors
// examples from FastJet span |rap| < 10
// TODO: make the rap range dynamic, based on the data themselves
// TODO: make the cell size dynamic, based on the data themselves
// TODO: try to use __constant__ memory for config
Grid grid(-10., +10., 0, 2 * M_PI, r, size);
cudaCheck(cudaMalloc(&grid.jets, sizeof(ParticleIndexType) * grid.max_u * grid.max_v * grid.n));
cudaCheck(cudaMemset(grid.jets, 0xff, sizeof(ParticleIndexType) * grid.max_u * grid.max_v * grid.n));
cudaCheck(cudaMalloc(&grid.minimum, sizeof(Dist) * grid.max_u * grid.max_v));
cudaCheck(cudaMalloc(&grid.neighbours, sizeof(Dist) * grid.max_u * grid.max_v * 9));
PseudoJetExt *pseudojets;
cudaCheck(cudaMalloc(&pseudojets, size * sizeof(PseudoJetExt)));
#pragma endregion
#pragma region kernel_launches
LaunchParameters l;
// copy the particles from the input buffer to the pseudojet structures
l = estimateMinimalGrid(init, size);
init<<<l.gridSize, l.blockSize>>>(particles, pseudojets, size);
cudaCheck(cudaGetLastError());
// compute the jets cilindrical coordinates and grid indices
l = estimateMinimalGrid(set_jets_coordiinates, size);
set_jets_coordiinates<<<l.gridSize, l.blockSize>>>(grid, pseudojets, size, algo);
cudaCheck(cudaGetLastError());
// sort the inputs according to their grid coordinates and "beam" clustering distance
thrust::sort(thrust::device, pseudojets, pseudojets + size, [] __device__(auto const &a, auto const &b) {
return (a.u < b.u) or (a.u == b.u and a.v < b.v) or (a.u == b.u and a.v == b.v and a.diB < b.diB);
});
// organise the jets in the grid
l = estimateMinimalGrid(set_jets_to_grid, size);
set_jets_to_grid<<<l.gridSize, l.blockSize>>>(grid, pseudojets, size, algo);
cudaCheck(cudaGetLastError());
// compute the minimum distances in all grid cells
l = estimateMinimalGrid(compute_initial_distances, grid.size());
compute_initial_distances<<<l.gridSize, l.blockSize>>>(grid, pseudojets, size, r);
cudaCheck(cudaGetLastError());
// recombine the particles into jets
l = estimateSingleBlock(reduce_recombine, grid.size());
int sharedMemory = sizeof(Dist) * grid.size();
reduce_recombine<<<l.gridSize, l.blockSize, sharedMemory>>>(grid, pseudojets, size, algo, r);
cudaCheck(cudaGetLastError());
// copy the clustered jets back to the input buffer
l = estimateMinimalGrid(output, size);
output<<<l.gridSize, l.blockSize>>>(pseudojets, particles, size);
cudaCheck(cudaGetLastError());
#pragma endregion
cudaCheck(cudaFree(pseudojets));
cudaCheck(cudaFree(grid.jets));
cudaCheck(cudaFree(grid.minimum));
cudaCheck(cudaFree(grid.neighbours));
}
|
b8aa42cfae27020423c1a45cc817cffca5e62d22.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include <assert.h>
#include <stdio.h>
#include "ising.h"
// ----------------------------------------------------------------------------
// Cuda implementation
//
#define GRID_DIM 128
#define BLOCK_DIM 128
#define CUDA_INCLUDE
#include "bits.cpp" // include CUDA device functions
#include "rand48.cu" // random numbers
typedef struct {
int len, dim, nblocks;
unsigned int *blocks;
float h, T; // external field and temperature
int parityTarget;
} IsingCudaParams;
// ----------------------------------------------------------------------------
// Cuda magnetization calculation
#define REDUCE_THREADS 128
#define REDUCE_MAX_BLOCKS 128
__global__ void isingCuda_magnetizationKernel (unsigned int *g_idata, unsigned int *g_odata, unsigned int n) {
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*REDUCE_THREADS + threadIdx.x;
unsigned int gridSize = gridDim.x*REDUCE_THREADS;
unsigned int acc = 0;
while (i < n) {
acc += (unsigned int) bitCount(g_idata[i]);
i += gridSize;
}
extern __shared__ unsigned int s[];
s[tid] = acc;
__syncthreads();
// do reduction in shared mem
if (REDUCE_THREADS >= 256) { if (tid < 128) { s[tid] += s[128+tid]; } __syncthreads(); }
if (REDUCE_THREADS >= 128) { if (tid < 64) { s[tid] += s[ 64+tid]; } __syncthreads(); }
if (tid < 32) {
s[tid] += s[32+tid];
s[tid] += s[16+tid];
s[tid] += s[ 8+tid];
s[tid] += s[ 4+tid];
s[tid] += s[ 2+tid];
s[tid] += s[ 1+tid];
}
if (tid == 0) g_odata[blockIdx.x] = s[tid];
}
int divideCeil(int x, int y) {
return (x + y - 1) / y;
}
double isingCuda_bitCount(unsigned int *d_idata, int n) {
// allocate arrays on device and host to store one float for each block
int blocks = min(REDUCE_MAX_BLOCKS, divideCeil(n, REDUCE_THREADS));
unsigned int h_odata[REDUCE_MAX_BLOCKS];
unsigned int *d_odata;
hipMalloc((void**) &d_odata, blocks*sizeof(unsigned int));
// partial reduction; each block generates one number
dim3 dimBlock(REDUCE_THREADS, 1, 1);
dim3 dimGrid(blocks, 1, 1);
int smemSize = REDUCE_THREADS * sizeof(unsigned int);
hipLaunchKernelGGL(( isingCuda_magnetizationKernel), dim3(blocks), dim3(REDUCE_THREADS), smemSize , 0, d_idata, d_odata, n);
// copy result from device to host, and perform final reduction on CPU
hipMemcpy(h_odata, d_odata, blocks*sizeof(unsigned int), hipMemcpyDeviceToHost);
double gpu_result = 0;
for (int i = 0; i < blocks; i++) {
gpu_result += h_odata[i];
}
hipFree(d_odata);
return gpu_result;
}
// ----------------------------------------------------------------------------
// Cuda Energy calculation JEE
__device__ inline void isingCuda_localInternalEnergy
(IsingCudaParams p, int ip, int & internal_energy) {
int parity = 0;
Bits128 acc = {0, 0, 0, 0};
int lenp_d = 1;
Bits128 n1 = bitsExpand(p.blocks[ip]);
for (int d = 0; d < p.dim; d++) {
int lenp = (d < 5) ? p.len>>2 : p.len;
int xp = (ip / lenp_d) % lenp;
parity += (d < 5) ? 0 : xp;
int dx2 = (xp+1+lenp)%lenp - xp;
int dx0 = (xp-1+lenp)%lenp - xp;
Bits128 n2 = bitsExpand(p.blocks[ip+dx2*lenp_d]);
Bits128 n0 = bitsExpand(p.blocks[ip+dx0*lenp_d]);
if (d < 5) {
int shift = 4 << d; // 4, 8, 16, 32, 64
acc = bitsAdd(acc, bitsMaskShiftL(bitsAdd(n1,n2), shift));
acc = bitsAdd(acc, bitsMaskShiftR(bitsAdd(n1,n0), shift));
}
else {
acc = bitsAdd(bitsAdd(n0,n2), acc);
}
lenp_d *= lenp;
}
int deltaMax = p.dim < 5 ? (1 << p.dim) : 32;
int cube = p.blocks[ip];
for (int delta = 0; delta < deltaMax; delta++) {
// Make sure we only test even parity, to avoid double bond
// counting. This check is not done in the primary kernel.
if (((parity + bitCount(delta)) & 1) == 0) {
// m = total magnetization of neighbors; in range [-2 dim, +2 dim]
int m = 2*(bitsPick4(acc, delta) - p.dim);
// s = spin at this site (+/- 1)
int s = 2*((cube >> delta) & 1) - 1;
// Append internal energy from hypercube
internal_energy += - m * s;
}
}
}
__global__ void isingCuda_internalEnergyKernel
(IsingCudaParams p, int * odata) {
unsigned int tid = threadIdx.x;
unsigned int ip = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
int acc = 0;
int ie; // internal energy
while (ip < p.nblocks) {
// Reset temporary internal energy counter
ie = 0;
isingCuda_localInternalEnergy (p, ip, ie);
acc += ie;
ip += gridSize;
}
extern __shared__ int t[];
t[tid] = acc;
__syncthreads();
// do thread reduction
if (REDUCE_THREADS >= 256) { if (tid < 128) { t[tid] += t[128+tid]; } __syncthreads(); }
if (REDUCE_THREADS >= 128) { if (tid < 64) { t[tid] += t[ 64+tid]; } __syncthreads(); }
if (tid < 32) {
t[tid] += t[32+tid];
t[tid] += t[16+tid];
t[tid] += t[ 8+tid];
t[tid] += t[ 4+tid];
t[tid] += t[ 2+tid];
t[tid] += t[ 1+tid];
}
if (tid == 0) odata[blockIdx.x] = t[tid];
}
// ----------------------------------------------------------------------------
// Cuda update implementation
__device__ inline int shouldFlipSpin(IsingCudaParams p, Rand48 &rng, int s, int m) {
float dE = 2*s*(m + p.h);
if (dE < 0)
return 1;
float r = (float)rand48_nextInt(rng) / (unsigned int)(1<<31);
return r < __expf (- dE / p.T);
}
__device__ inline void isingCuda_updateSite(IsingCudaParams p, Rand48 &rng, int ip) {
int parity = 0;
Bits128 acc = {0, 0, 0, 0};
int lenp_d = 1;
Bits128 n1 = bitsExpand(p.blocks[ip]);
for (int d = 0; d < p.dim; d++) {
int lenp = (d < 5) ? p.len/2 : p.len;
int xp = (ip / lenp_d) % lenp;
parity += (d < 5) ? 0 : xp;
int dx2 = (xp+1+lenp)%lenp - xp;
int dx0 = (xp-1+lenp)%lenp - xp;
Bits128 n2 = bitsExpand(p.blocks[ip+dx2*lenp_d]);
Bits128 n0 = bitsExpand(p.blocks[ip+dx0*lenp_d]);
if (d < 5) {
int shift = 4 << d; // 4, 8, 16, 32, 64
acc = bitsAdd(acc, bitsMaskShiftL(bitsAdd(n1,n2), shift));
acc = bitsAdd(acc, bitsMaskShiftR(bitsAdd(n1,n0), shift));
}
else {
acc = bitsAdd(bitsAdd(n0,n2), acc);
}
lenp_d *= lenp;
}
int deltaMax = p.dim < 5 ? (1 << p.dim) : 32;
int cube = p.blocks[ip];
for (int delta = 0; delta < deltaMax; delta++) {
if ((parity + bitCount(delta)) % 2 == p.parityTarget) {
// m = total magnetization of neighbors; in range [-2 dim, +2 dim]
int m = 2*(bitsPick4(acc, delta) - p.dim);
// s = spin at this site (+/- 1)
int s = 2*((cube >> delta) & 1) - 1;
if (shouldFlipSpin(p, rng, s, m)) {
cube ^= (1 << delta);
}
}
}
p.blocks[ip] = cube;
}
__global__ void isingCuda_update(IsingCudaParams p, Rand48 rng) {
rand48_loadState(rng);
unsigned int ip = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (ip < p.nblocks) {
isingCuda_updateSite(p, rng, ip);
ip += gridSize;
}
rand48_storeState(rng);
}
// ----------------------------------------------------------------------------
// Ising class interface
//
IsingCuda::IsingCuda(int len, int dim, float h, float T) : Ising(len, dim, h, T) {
assert(len % 2 == 0);
assert(dim <= 7);
len = len;
dim = dim;
n = (int)powl(len, dim);
nblocks = n >> min(5,dim);
int nbytes = nblocks*sizeof(unsigned int);
blocks = (unsigned int *)malloc(nbytes);
hipMalloc((void**)&d_blocks, nbytes);
for (int i = 0; i < nblocks; i++) {
blocks[i] = 0;
}
transferHostToDevice();
rng = new Rand48();
//rng->init(GRID_DIM*BLOCK_DIM, 0); // initialize random numbers
rng->init(GRID_DIM*BLOCK_DIM, time(NULL)); // initialize random numbers
}
IsingCuda::~IsingCuda() {
free(blocks);
hipFree(d_blocks);
rng->destroy();
delete rng;
}
void IsingCuda::completeNeighborSum(int *sum) {
assert(0==1);
}
void IsingCuda::rngSeed (int seed)
{
rng->destroy ();
delete rng;
rng = new Rand48 ();
rng->init (GRID_DIM*BLOCK_DIM, seed);
}
void IsingCuda::update(int parityTarget) {
IsingCudaParams p;
p.len = len;
p.dim = dim;
p.nblocks = nblocks;
p.blocks = d_blocks;
p.h = h;
p.T = T;
p.parityTarget = parityTarget;
int sharedBytes = 0;
hipLaunchKernelGGL(( isingCuda_update) , dim3(GRID_DIM), dim3(BLOCK_DIM), sharedBytes, 0, p, *rng);
}
double IsingCuda::magnetization() {
return 2.0*isingCuda_bitCount(d_blocks, nblocks) - n;
}
void IsingCuda::transferHostToDevice() {
hipMemcpy(d_blocks, blocks, nblocks*sizeof(unsigned int), hipMemcpyHostToDevice);
}
void IsingCuda::transferDeviceToHost() {
hipMemcpy(blocks, d_blocks, nblocks*sizeof(unsigned int), hipMemcpyDeviceToHost);
}
// given index 'i' into the full lattice, return compressed index 'ip'
// and bit index 'delta'.
void IsingCuda::index(int i, int *ip, int *delta) {
int len_d = 1;
int lenp_d = 1;
*ip = 0;
*delta = 0;
for (int d = 0; d < dim; d++) {
int x = (i / len_d) % len;
int xp = (d < 5) ? x/2 : x;
int del = (d < 5) ? x%2 : 0;
*delta += (del << d) ;
*ip += xp*lenp_d;
int lenp = (d < 5) ? len/2 : len;
len_d *= len;
lenp_d *= lenp;
}
assert(*ip < nblocks);
assert(*delta < 32);
}
void IsingCuda::set(int i, int s) {
int ip, delta;
index(i, &ip, &delta);
assert(ip < nblocks);
int mask = ~(1 << delta);
blocks[ip] = (blocks[ip] & mask) | (s << delta);
}
int IsingCuda::get(int i) {
int ip, delta;
index(i, &ip, &delta);
assert(ip < nblocks);
return (blocks[ip]>>delta) & 1;
}
void IsingCuda::update ()
{
update (0);
update (1);
}
// JEE
double IsingCuda::energy ()
{
IsingCudaParams p;
p.len = len;
p.dim = dim;
p.nblocks = nblocks;
p.blocks = d_blocks;
p.h = h;
p.T = T;
p.parityTarget = 0; // Unnecessary
int h_odata [BLOCK_DIM];
int *d_odata;
hipMalloc((void**) &d_odata, BLOCK_DIM*sizeof(int));
int sharedBytes = BLOCK_DIM * sizeof(unsigned int);
hipLaunchKernelGGL(( isingCuda_internalEnergyKernel)
, dim3(GRID_DIM), dim3(BLOCK_DIM), sharedBytes, 0, p, d_odata);
hipMemcpy
(h_odata, d_odata, BLOCK_DIM*sizeof (int), hipMemcpyDeviceToHost);
hipFree (d_odata); // dont need these any more
double ie = 0;
for (int i=0; i<BLOCK_DIM; ++i)
ie += (double) h_odata[i];
double m = magnetization ();
return ie - m * h;
}
// ----------------------------------------------------------------------------
// Cuda utility methods
//
void initCuda(int argc, char *argv[]) {
int deviceCount;
hipGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(stderr, "No devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
int dev = deviceCount - 1;
if (argc > 1) {
sscanf(argv[1], "%d", &dev);
}
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
if (deviceProp.major < 1) {
fprintf(stderr, "Device %d does not support CUDA.\n", dev);
exit(EXIT_FAILURE);
}
fprintf(stderr, "Using device %d: %s\n", dev, deviceProp.name);
hipSetDevice(dev);
}
| b8aa42cfae27020423c1a45cc817cffca5e62d22.cu | #include <math.h>
#include <assert.h>
#include <stdio.h>
#include "ising.h"
// ----------------------------------------------------------------------------
// Cuda implementation
//
#define GRID_DIM 128
#define BLOCK_DIM 128
#define CUDA_INCLUDE
#include "bits.cpp" // include CUDA device functions
#include "rand48.cu" // random numbers
typedef struct {
int len, dim, nblocks;
unsigned int *blocks;
float h, T; // external field and temperature
int parityTarget;
} IsingCudaParams;
// ----------------------------------------------------------------------------
// Cuda magnetization calculation
#define REDUCE_THREADS 128
#define REDUCE_MAX_BLOCKS 128
__global__ void isingCuda_magnetizationKernel (unsigned int *g_idata, unsigned int *g_odata, unsigned int n) {
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*REDUCE_THREADS + threadIdx.x;
unsigned int gridSize = gridDim.x*REDUCE_THREADS;
unsigned int acc = 0;
while (i < n) {
acc += (unsigned int) bitCount(g_idata[i]);
i += gridSize;
}
extern __shared__ unsigned int s[];
s[tid] = acc;
__syncthreads();
// do reduction in shared mem
if (REDUCE_THREADS >= 256) { if (tid < 128) { s[tid] += s[128+tid]; } __syncthreads(); }
if (REDUCE_THREADS >= 128) { if (tid < 64) { s[tid] += s[ 64+tid]; } __syncthreads(); }
if (tid < 32) {
s[tid] += s[32+tid];
s[tid] += s[16+tid];
s[tid] += s[ 8+tid];
s[tid] += s[ 4+tid];
s[tid] += s[ 2+tid];
s[tid] += s[ 1+tid];
}
if (tid == 0) g_odata[blockIdx.x] = s[tid];
}
int divideCeil(int x, int y) {
return (x + y - 1) / y;
}
double isingCuda_bitCount(unsigned int *d_idata, int n) {
// allocate arrays on device and host to store one float for each block
int blocks = min(REDUCE_MAX_BLOCKS, divideCeil(n, REDUCE_THREADS));
unsigned int h_odata[REDUCE_MAX_BLOCKS];
unsigned int *d_odata;
cudaMalloc((void**) &d_odata, blocks*sizeof(unsigned int));
// partial reduction; each block generates one number
dim3 dimBlock(REDUCE_THREADS, 1, 1);
dim3 dimGrid(blocks, 1, 1);
int smemSize = REDUCE_THREADS * sizeof(unsigned int);
isingCuda_magnetizationKernel<<< blocks, REDUCE_THREADS, smemSize >>>(d_idata, d_odata, n);
// copy result from device to host, and perform final reduction on CPU
cudaMemcpy(h_odata, d_odata, blocks*sizeof(unsigned int), cudaMemcpyDeviceToHost);
double gpu_result = 0;
for (int i = 0; i < blocks; i++) {
gpu_result += h_odata[i];
}
cudaFree(d_odata);
return gpu_result;
}
// ----------------------------------------------------------------------------
// Cuda Energy calculation JEE
__device__ inline void isingCuda_localInternalEnergy
(IsingCudaParams p, int ip, int & internal_energy) {
int parity = 0;
Bits128 acc = {0, 0, 0, 0};
int lenp_d = 1;
Bits128 n1 = bitsExpand(p.blocks[ip]);
for (int d = 0; d < p.dim; d++) {
int lenp = (d < 5) ? p.len>>2 : p.len;
int xp = (ip / lenp_d) % lenp;
parity += (d < 5) ? 0 : xp;
int dx2 = (xp+1+lenp)%lenp - xp;
int dx0 = (xp-1+lenp)%lenp - xp;
Bits128 n2 = bitsExpand(p.blocks[ip+dx2*lenp_d]);
Bits128 n0 = bitsExpand(p.blocks[ip+dx0*lenp_d]);
if (d < 5) {
int shift = 4 << d; // 4, 8, 16, 32, 64
acc = bitsAdd(acc, bitsMaskShiftL(bitsAdd(n1,n2), shift));
acc = bitsAdd(acc, bitsMaskShiftR(bitsAdd(n1,n0), shift));
}
else {
acc = bitsAdd(bitsAdd(n0,n2), acc);
}
lenp_d *= lenp;
}
int deltaMax = p.dim < 5 ? (1 << p.dim) : 32;
int cube = p.blocks[ip];
for (int delta = 0; delta < deltaMax; delta++) {
// Make sure we only test even parity, to avoid double bond
// counting. This check is not done in the primary kernel.
if (((parity + bitCount(delta)) & 1) == 0) {
// m = total magnetization of neighbors; in range [-2 dim, +2 dim]
int m = 2*(bitsPick4(acc, delta) - p.dim);
// s = spin at this site (+/- 1)
int s = 2*((cube >> delta) & 1) - 1;
// Append internal energy from hypercube
internal_energy += - m * s;
}
}
}
__global__ void isingCuda_internalEnergyKernel
(IsingCudaParams p, int * odata) {
unsigned int tid = threadIdx.x;
unsigned int ip = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
int acc = 0;
int ie; // internal energy
while (ip < p.nblocks) {
// Reset temporary internal energy counter
ie = 0;
isingCuda_localInternalEnergy (p, ip, ie);
acc += ie;
ip += gridSize;
}
extern __shared__ int t[];
t[tid] = acc;
__syncthreads();
// do thread reduction
if (REDUCE_THREADS >= 256) { if (tid < 128) { t[tid] += t[128+tid]; } __syncthreads(); }
if (REDUCE_THREADS >= 128) { if (tid < 64) { t[tid] += t[ 64+tid]; } __syncthreads(); }
if (tid < 32) {
t[tid] += t[32+tid];
t[tid] += t[16+tid];
t[tid] += t[ 8+tid];
t[tid] += t[ 4+tid];
t[tid] += t[ 2+tid];
t[tid] += t[ 1+tid];
}
if (tid == 0) odata[blockIdx.x] = t[tid];
}
// ----------------------------------------------------------------------------
// Cuda update implementation
__device__ inline int shouldFlipSpin(IsingCudaParams p, Rand48 &rng, int s, int m) {
float dE = 2*s*(m + p.h);
if (dE < 0)
return 1;
float r = (float)rand48_nextInt(rng) / (unsigned int)(1<<31);
return r < __expf (- dE / p.T);
}
__device__ inline void isingCuda_updateSite(IsingCudaParams p, Rand48 &rng, int ip) {
int parity = 0;
Bits128 acc = {0, 0, 0, 0};
int lenp_d = 1;
Bits128 n1 = bitsExpand(p.blocks[ip]);
for (int d = 0; d < p.dim; d++) {
int lenp = (d < 5) ? p.len/2 : p.len;
int xp = (ip / lenp_d) % lenp;
parity += (d < 5) ? 0 : xp;
int dx2 = (xp+1+lenp)%lenp - xp;
int dx0 = (xp-1+lenp)%lenp - xp;
Bits128 n2 = bitsExpand(p.blocks[ip+dx2*lenp_d]);
Bits128 n0 = bitsExpand(p.blocks[ip+dx0*lenp_d]);
if (d < 5) {
int shift = 4 << d; // 4, 8, 16, 32, 64
acc = bitsAdd(acc, bitsMaskShiftL(bitsAdd(n1,n2), shift));
acc = bitsAdd(acc, bitsMaskShiftR(bitsAdd(n1,n0), shift));
}
else {
acc = bitsAdd(bitsAdd(n0,n2), acc);
}
lenp_d *= lenp;
}
int deltaMax = p.dim < 5 ? (1 << p.dim) : 32;
int cube = p.blocks[ip];
for (int delta = 0; delta < deltaMax; delta++) {
if ((parity + bitCount(delta)) % 2 == p.parityTarget) {
// m = total magnetization of neighbors; in range [-2 dim, +2 dim]
int m = 2*(bitsPick4(acc, delta) - p.dim);
// s = spin at this site (+/- 1)
int s = 2*((cube >> delta) & 1) - 1;
if (shouldFlipSpin(p, rng, s, m)) {
cube ^= (1 << delta);
}
}
}
p.blocks[ip] = cube;
}
__global__ void isingCuda_update(IsingCudaParams p, Rand48 rng) {
rand48_loadState(rng);
unsigned int ip = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (ip < p.nblocks) {
isingCuda_updateSite(p, rng, ip);
ip += gridSize;
}
rand48_storeState(rng);
}
// ----------------------------------------------------------------------------
// Ising class interface
//
IsingCuda::IsingCuda(int len, int dim, float h, float T) : Ising(len, dim, h, T) {
assert(len % 2 == 0);
assert(dim <= 7);
len = len;
dim = dim;
n = (int)powl(len, dim);
nblocks = n >> min(5,dim);
int nbytes = nblocks*sizeof(unsigned int);
blocks = (unsigned int *)malloc(nbytes);
cudaMalloc((void**)&d_blocks, nbytes);
for (int i = 0; i < nblocks; i++) {
blocks[i] = 0;
}
transferHostToDevice();
rng = new Rand48();
//rng->init(GRID_DIM*BLOCK_DIM, 0); // initialize random numbers
rng->init(GRID_DIM*BLOCK_DIM, time(NULL)); // initialize random numbers
}
IsingCuda::~IsingCuda() {
free(blocks);
cudaFree(d_blocks);
rng->destroy();
delete rng;
}
void IsingCuda::completeNeighborSum(int *sum) {
assert(0==1);
}
void IsingCuda::rngSeed (int seed)
{
rng->destroy ();
delete rng;
rng = new Rand48 ();
rng->init (GRID_DIM*BLOCK_DIM, seed);
}
void IsingCuda::update(int parityTarget) {
IsingCudaParams p;
p.len = len;
p.dim = dim;
p.nblocks = nblocks;
p.blocks = d_blocks;
p.h = h;
p.T = T;
p.parityTarget = parityTarget;
int sharedBytes = 0;
isingCuda_update <<<GRID_DIM, BLOCK_DIM, sharedBytes>>> (p, *rng);
}
double IsingCuda::magnetization() {
return 2.0*isingCuda_bitCount(d_blocks, nblocks) - n;
}
void IsingCuda::transferHostToDevice() {
cudaMemcpy(d_blocks, blocks, nblocks*sizeof(unsigned int), cudaMemcpyHostToDevice);
}
void IsingCuda::transferDeviceToHost() {
cudaMemcpy(blocks, d_blocks, nblocks*sizeof(unsigned int), cudaMemcpyDeviceToHost);
}
// given index 'i' into the full lattice, return compressed index 'ip'
// and bit index 'delta'.
void IsingCuda::index(int i, int *ip, int *delta) {
int len_d = 1;
int lenp_d = 1;
*ip = 0;
*delta = 0;
for (int d = 0; d < dim; d++) {
int x = (i / len_d) % len;
int xp = (d < 5) ? x/2 : x;
int del = (d < 5) ? x%2 : 0;
*delta += (del << d) ;
*ip += xp*lenp_d;
int lenp = (d < 5) ? len/2 : len;
len_d *= len;
lenp_d *= lenp;
}
assert(*ip < nblocks);
assert(*delta < 32);
}
void IsingCuda::set(int i, int s) {
int ip, delta;
index(i, &ip, &delta);
assert(ip < nblocks);
int mask = ~(1 << delta);
blocks[ip] = (blocks[ip] & mask) | (s << delta);
}
int IsingCuda::get(int i) {
int ip, delta;
index(i, &ip, &delta);
assert(ip < nblocks);
return (blocks[ip]>>delta) & 1;
}
void IsingCuda::update ()
{
update (0);
update (1);
}
// JEE
double IsingCuda::energy ()
{
IsingCudaParams p;
p.len = len;
p.dim = dim;
p.nblocks = nblocks;
p.blocks = d_blocks;
p.h = h;
p.T = T;
p.parityTarget = 0; // Unnecessary
int h_odata [BLOCK_DIM];
int *d_odata;
cudaMalloc((void**) &d_odata, BLOCK_DIM*sizeof(int));
int sharedBytes = BLOCK_DIM * sizeof(unsigned int);
isingCuda_internalEnergyKernel
<<<GRID_DIM, BLOCK_DIM, sharedBytes>>> (p, d_odata);
cudaMemcpy
(h_odata, d_odata, BLOCK_DIM*sizeof (int), cudaMemcpyDeviceToHost);
cudaFree (d_odata); // dont need these any more
double ie = 0;
for (int i=0; i<BLOCK_DIM; ++i)
ie += (double) h_odata[i];
double m = magnetization ();
return ie - m * h;
}
// ----------------------------------------------------------------------------
// Cuda utility methods
//
void initCuda(int argc, char *argv[]) {
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(stderr, "No devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
int dev = deviceCount - 1;
if (argc > 1) {
sscanf(argv[1], "%d", &dev);
}
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
if (deviceProp.major < 1) {
fprintf(stderr, "Device %d does not support CUDA.\n", dev);
exit(EXIT_FAILURE);
}
fprintf(stderr, "Using device %d: %s\n", dev, deviceProp.name);
cudaSetDevice(dev);
}
|
4aef343c64282167697bad00501bbdb041ad847d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from sparse-iter/blas/zgesellcmv.cu normal z -> d, Tue Feb 9 16:05:41 2016
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_d
// SELLC SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
__global__ void
dgesellcmv_kernel(
int num_rows,
int num_cols,
int blocksize,
double alpha,
double * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
double * dx,
double beta,
double * dy)
{
// threads assigned to rows
int Idx = blockDim.x * blockIdx.x + threadIdx.x;
int offset = drowptr[ blockIdx.x ];
int border = (drowptr[ blockIdx.x+1 ]-offset)/blocksize;
if(Idx < num_rows ){
double dot = MAGMA_D_MAKE(0.0, 0.0);
for ( int n = 0; n < border; n++){
int col = dcolind [offset+ blocksize * n + threadIdx.x ];
double val = dval[offset+ blocksize * n + threadIdx.x];
if( val != 0){
dot=dot+val*dx[col];
}
}
dy[ Idx ] = dot * alpha + beta * dy [ Idx ];
}
}
/**
Purpose
-------
This routine computes y = alpha * A^t * x + beta * y on the GPU.
Input format is SELLC/SELLP.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
blocksize magma_int_t
number of rows in one ELL-slice
@param[in]
slices magma_int_t
number of slices in matrix
@param[in]
alignment magma_int_t
number of threads assigned to one row (=1)
@param[in]
alpha double
scalar multiplier
@param[in]
dval magmaDouble_ptr
array containing values of A in SELLC/P
@param[in]
dcolind magmaIndex_ptr
columnindices of A in SELLC/P
@param[in]
drowptr magmaIndex_ptr
rowpointer of SELLP
@param[in]
dx magmaDouble_ptr
input vector x
@param[in]
beta double
scalar multiplier
@param[out]
dy magmaDouble_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dblas
********************************************************************/
extern "C" magma_int_t
magma_dgesellcmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t blocksize,
magma_int_t slices,
magma_int_t alignment,
double alpha,
magmaDouble_ptr dval,
magmaIndex_ptr dcolind,
magmaIndex_ptr drowptr,
magmaDouble_ptr dx,
double beta,
magmaDouble_ptr dy,
magma_queue_t queue )
{
// the kernel can only handle up to 65535 slices
// (~2M rows for blocksize 32)
dim3 grid( slices, 1, 1);
magma_int_t threads = blocksize;
hipLaunchKernelGGL(( dgesellcmv_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, blocksize, alpha,
dval, dcolind, drowptr, dx, beta, dy );
return MAGMA_SUCCESS;
}
| 4aef343c64282167697bad00501bbdb041ad847d.cu | /*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from sparse-iter/blas/zgesellcmv.cu normal z -> d, Tue Feb 9 16:05:41 2016
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_d
// SELLC SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
__global__ void
dgesellcmv_kernel(
int num_rows,
int num_cols,
int blocksize,
double alpha,
double * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
double * dx,
double beta,
double * dy)
{
// threads assigned to rows
int Idx = blockDim.x * blockIdx.x + threadIdx.x;
int offset = drowptr[ blockIdx.x ];
int border = (drowptr[ blockIdx.x+1 ]-offset)/blocksize;
if(Idx < num_rows ){
double dot = MAGMA_D_MAKE(0.0, 0.0);
for ( int n = 0; n < border; n++){
int col = dcolind [offset+ blocksize * n + threadIdx.x ];
double val = dval[offset+ blocksize * n + threadIdx.x];
if( val != 0){
dot=dot+val*dx[col];
}
}
dy[ Idx ] = dot * alpha + beta * dy [ Idx ];
}
}
/**
Purpose
-------
This routine computes y = alpha * A^t * x + beta * y on the GPU.
Input format is SELLC/SELLP.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
blocksize magma_int_t
number of rows in one ELL-slice
@param[in]
slices magma_int_t
number of slices in matrix
@param[in]
alignment magma_int_t
number of threads assigned to one row (=1)
@param[in]
alpha double
scalar multiplier
@param[in]
dval magmaDouble_ptr
array containing values of A in SELLC/P
@param[in]
dcolind magmaIndex_ptr
columnindices of A in SELLC/P
@param[in]
drowptr magmaIndex_ptr
rowpointer of SELLP
@param[in]
dx magmaDouble_ptr
input vector x
@param[in]
beta double
scalar multiplier
@param[out]
dy magmaDouble_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dblas
********************************************************************/
extern "C" magma_int_t
magma_dgesellcmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t blocksize,
magma_int_t slices,
magma_int_t alignment,
double alpha,
magmaDouble_ptr dval,
magmaIndex_ptr dcolind,
magmaIndex_ptr drowptr,
magmaDouble_ptr dx,
double beta,
magmaDouble_ptr dy,
magma_queue_t queue )
{
// the kernel can only handle up to 65535 slices
// (~2M rows for blocksize 32)
dim3 grid( slices, 1, 1);
magma_int_t threads = blocksize;
dgesellcmv_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
( m, n, blocksize, alpha,
dval, dcolind, drowptr, dx, beta, dy );
return MAGMA_SUCCESS;
}
|
92dfe8e25fe47917210945c4a681a0d5af1440ad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* calcHydrostaticPressure.cu
*
* Created on: 28-05-2015
* Author: Kamil Szewc ([email protected])
*/
#include "../../../sph.h"
#include "../../../hlp.h"
#include "../../../methods/kernels.cuh"
#include "../../../methods/interactions.cuh"
#include <stdio.h>
__device__ static real interaction(uint i, uint j, real2 dpos, real2 dvel, Particle *p, Parameters *par)
{
real r = sqrt(pow2(dpos.x) + pow2(dpos.y));
real q = r * par->I_H;
if (q < 2.0)
{
real k = kern(q, par->I_H);
return k*p[i].m/p[i].d;
}
else
{
return 0.0;
}
}
__global__ void calcHydrostaticPressure(Particle *p,
uint *gridParticleIndex,
uint *cellStart,
uint *cellEnd,
Parameters *par)
{
uint index = threadIdx.x + blockIdx.x*blockDim.x;
if (index < par->N) {
p[index].ph = 0.0;
real height = 0.0;
real2 pos = MAKE_REAL2(p[index].pos.x, p[index].pos.y);
int2 gridPos = calcGridPos(pos, par);
//if (p[index].id == 20060) printf("--------------\n");
for (int yc = par->NYC-1; yc >= gridPos.y; yc--)
{
int2 gridPosCheck;
gridPosCheck.x = gridPos.x;
gridPosCheck.y = yc;
uint gridHash = calcGridHash(gridPosCheck, par);
uint startIndex = cellStart[gridHash];
if (startIndex == 0xffffffff) continue;
for (int i=5; i>=0; i--)
{
real y = (real)yc*2.0*par->H + i*2.0*par->H/5.0;
real result = 0.0;
//if (p[index].id == 20060) printf("y=%f ", y);
for (int iy = -1; iy <= 1; iy++) {
for (int ix = -1; ix <= 1; ix++) {
gridPosCheck.x = gridPos.x + ix;
gridPosCheck.y = yc + iy;
uint gridHash = calcGridHash(gridPosCheck, par);
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff)
{
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++)
{
real2 dpos, dvel;
real2 pos1 = p[index].pos;
pos1.y = y;
real2 pos2 = p[j].pos;
real2 vel1 = p[j].vel;
real2 vel2 = p[j].vel;
calcRelPosVelNoSlip(pos1, pos2, vel1, vel2, 0, &dpos, &dvel, par);
result += interaction(index, j, dpos, dvel, p, par);
}
}
}
}
//if (p[index].id == 20060 ) printf("result=%f\n", result);
if (result >= 0.5)
{
height = y;
break;
}
}
if (height != 0.0) break;
}
if (height > 0.0)
{
p[index].ph = p[index].di * fabs(par->G_Y) * (height-p[index].pos.y);
}
}
}
| 92dfe8e25fe47917210945c4a681a0d5af1440ad.cu | /*
* calcHydrostaticPressure.cu
*
* Created on: 28-05-2015
* Author: Kamil Szewc ([email protected])
*/
#include "../../../sph.h"
#include "../../../hlp.h"
#include "../../../methods/kernels.cuh"
#include "../../../methods/interactions.cuh"
#include <stdio.h>
__device__ static real interaction(uint i, uint j, real2 dpos, real2 dvel, Particle *p, Parameters *par)
{
real r = sqrt(pow2(dpos.x) + pow2(dpos.y));
real q = r * par->I_H;
if (q < 2.0)
{
real k = kern(q, par->I_H);
return k*p[i].m/p[i].d;
}
else
{
return 0.0;
}
}
__global__ void calcHydrostaticPressure(Particle *p,
uint *gridParticleIndex,
uint *cellStart,
uint *cellEnd,
Parameters *par)
{
uint index = threadIdx.x + blockIdx.x*blockDim.x;
if (index < par->N) {
p[index].ph = 0.0;
real height = 0.0;
real2 pos = MAKE_REAL2(p[index].pos.x, p[index].pos.y);
int2 gridPos = calcGridPos(pos, par);
//if (p[index].id == 20060) printf("--------------\n");
for (int yc = par->NYC-1; yc >= gridPos.y; yc--)
{
int2 gridPosCheck;
gridPosCheck.x = gridPos.x;
gridPosCheck.y = yc;
uint gridHash = calcGridHash(gridPosCheck, par);
uint startIndex = cellStart[gridHash];
if (startIndex == 0xffffffff) continue;
for (int i=5; i>=0; i--)
{
real y = (real)yc*2.0*par->H + i*2.0*par->H/5.0;
real result = 0.0;
//if (p[index].id == 20060) printf("y=%f ", y);
for (int iy = -1; iy <= 1; iy++) {
for (int ix = -1; ix <= 1; ix++) {
gridPosCheck.x = gridPos.x + ix;
gridPosCheck.y = yc + iy;
uint gridHash = calcGridHash(gridPosCheck, par);
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff)
{
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++)
{
real2 dpos, dvel;
real2 pos1 = p[index].pos;
pos1.y = y;
real2 pos2 = p[j].pos;
real2 vel1 = p[j].vel;
real2 vel2 = p[j].vel;
calcRelPosVelNoSlip(pos1, pos2, vel1, vel2, 0, &dpos, &dvel, par);
result += interaction(index, j, dpos, dvel, p, par);
}
}
}
}
//if (p[index].id == 20060 ) printf("result=%f\n", result);
if (result >= 0.5)
{
height = y;
break;
}
}
if (height != 0.0) break;
}
if (height > 0.0)
{
p[index].ph = p[index].di * fabs(par->G_Y) * (height-p[index].pos.y);
}
}
}
|
95d22e262792c101f0c2bdd539f84c3a620b22d5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <cudf/utilities/traits.hpp>
#include <cudf/wrappers/bool.hpp>
#include <cudf/wrappers/timestamps.hpp>
#include <cudf/detail/utilities/device_atomics.cuh>
#include <tests/utilities/base_fixture.hpp>
#include <tests/utilities/timestamp_utilities.cuh>
#include <tests/utilities/type_lists.hpp>
template <typename T>
__global__ void gpu_atomic_test(T* result, T* data, size_t size) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
size_t step = blockDim.x * gridDim.x;
for (; id < size; id += step) {
atomicAdd(&result[0], data[id]);
atomicMin(&result[1], data[id]);
atomicMax(&result[2], data[id]);
cudf::genericAtomicOperation(&result[3], data[id], cudf::DeviceSum{});
cudf::genericAtomicOperation(&result[4], data[id], cudf::DeviceMin{});
cudf::genericAtomicOperation(&result[5], data[id], cudf::DeviceMax{});
}
}
template <typename T, typename BinaryOp>
__device__ T atomic_op(T* addr, T const& value, BinaryOp op) {
T old_value = *addr;
T assumed;
do {
assumed = old_value;
T new_value = op(old_value, value);
old_value = atomicCAS(addr, assumed, new_value);
} while (assumed != old_value);
return old_value;
}
template <typename T>
__global__ void gpu_atomicCAS_test(T* result, T* data, size_t size) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
size_t step = blockDim.x * gridDim.x;
for (; id < size; id += step) {
atomic_op(&result[0], data[id], cudf::DeviceSum{});
atomic_op(&result[1], data[id], cudf::DeviceMin{});
atomic_op(&result[2], data[id], cudf::DeviceMax{});
atomic_op(&result[3], data[id], cudf::DeviceSum{});
atomic_op(&result[4], data[id], cudf::DeviceMin{});
atomic_op(&result[5], data[id], cudf::DeviceMax{});
}
}
template <typename T>
typename std::enable_if_t<!cudf::is_timestamp<T>(), T> accumulate(
std::vector<T> const& xs) {
return std::accumulate(xs.begin(), xs.end(), T{0});
}
template <typename T>
typename std::enable_if_t<cudf::is_timestamp<T>(), T> accumulate(
std::vector<T> const& xs) {
auto ys = std::vector<typename T::rep>(xs.size());
std::transform(xs.begin(), xs.end(), ys.begin(),
[](T const& ts) { return ts.time_since_epoch().count(); });
return T{std::accumulate(ys.begin(), ys.end(), 0)};
}
template <typename T>
struct AtomicsTest : public cudf::test::BaseFixture {
void atomic_test(std::vector<int> const& v_input,
bool is_cas_test,
int block_size = 0,
int grid_size = 1) {
size_t vec_size = v_input.size();
// use transform from std::vector<int> instead.
std::vector<T> v(vec_size);
std::transform(v_input.begin(), v_input.end(), v.begin(), [](int x) {
T t(x);
return t;
});
T exact[3];
exact[0] = accumulate<T>(v);
exact[1] = *(std::min_element(v.begin(), v.end()));
exact[2] = *(std::max_element(v.begin(), v.end()));
std::vector<T> result_init(6);
result_init[0] = T{0};
result_init[1] = std::numeric_limits<T>::max();
result_init[2] = std::numeric_limits<T>::min();
result_init[3] = result_init[0];
result_init[4] = result_init[1];
result_init[5] = result_init[2];
thrust::device_vector<T> dev_data(v);
thrust::device_vector<T> dev_result(result_init);
if (block_size == 0) {
block_size = vec_size;
}
if (is_cas_test) {
hipLaunchKernelGGL(( gpu_atomicCAS_test), dim3(grid_size), dim3(block_size), 0, 0,
dev_result.data().get(), dev_data.data().get(), vec_size);
} else {
hipLaunchKernelGGL(( gpu_atomic_test), dim3(grid_size), dim3(block_size), 0, 0,
dev_result.data().get(), dev_data.data().get(), vec_size);
}
thrust::host_vector<T> host_result(dev_result);
hipDeviceSynchronize();
CHECK_CUDA(0);
EXPECT_EQ(host_result[0], exact[0]) << "atomicAdd test failed";
EXPECT_EQ(host_result[1], exact[1]) << "atomicMin test failed";
EXPECT_EQ(host_result[2], exact[2]) << "atomicMax test failed";
EXPECT_EQ(host_result[3], exact[0]) << "atomicAdd test(2) failed";
EXPECT_EQ(host_result[4], exact[1]) << "atomicMin test(2) failed";
EXPECT_EQ(host_result[5], exact[2]) << "atomicMax test(2) failed";
}
};
TYPED_TEST_CASE(AtomicsTest, cudf::test::FixedWidthTypes);
// tests for atomicAdd/Min/Max
TYPED_TEST(AtomicsTest, atomicOps) {
bool is_cas_test = false;
std::vector<int> input_array({0, 6, 0, -14, 13, 64, -13, -20, 45});
this->atomic_test(input_array, is_cas_test);
std::vector<int> input_array2({6, -6, 13, 62, -11, -20, 33});
this->atomic_test(input_array2, is_cas_test);
}
// tests for atomicCAS
TYPED_TEST(AtomicsTest, atomicCAS) {
bool is_cas_test = true;
std::vector<int> input_array({0, 6, 0, -14, 13, 64, -13, -20, 45});
this->atomic_test(input_array, is_cas_test);
std::vector<int> input_array2({6, -6, 13, 62, -11, -20, 33});
this->atomic_test(input_array2, is_cas_test);
}
// tests for atomicAdd/Min/Max
TYPED_TEST(AtomicsTest, atomicOpsGrid) {
bool is_cas_test = false;
int block_size = 3;
int grid_size = 4;
std::vector<int> input_array({0, 6, 0, -14, 13, 64, -13, -20, 45});
this->atomic_test(input_array, is_cas_test, block_size, grid_size);
std::vector<int> input_array2({6, -6, 13, 62, -11, -20, 33});
this->atomic_test(input_array2, is_cas_test, block_size, grid_size);
}
// tests for atomicCAS
TYPED_TEST(AtomicsTest, atomicCASGrid) {
bool is_cas_test = true;
int block_size = 3;
int grid_size = 4;
std::vector<int> input_array({0, 6, 0, -14, 13, 64, -13, -20, 45});
this->atomic_test(input_array, is_cas_test, block_size, grid_size);
std::vector<int> input_array2({6, -6, 13, 62, -11, -20, 33});
this->atomic_test(input_array2, is_cas_test, block_size, grid_size);
}
// tests for large array
TYPED_TEST(AtomicsTest, atomicOpsRandom) {
bool is_cas_test = false;
int block_size = 256;
int grid_size = 64;
std::vector<int> input_array(grid_size * block_size);
std::default_random_engine engine;
std::uniform_int_distribution<> dist(-10, 10);
std::generate(input_array.begin(), input_array.end(),
[&]() { return dist(engine); });
this->atomic_test(input_array, is_cas_test, block_size, grid_size);
}
TYPED_TEST(AtomicsTest, atomicCASRandom) {
bool is_cas_test = true;
int block_size = 256;
int grid_size = 64;
std::vector<int> input_array(grid_size * block_size);
std::default_random_engine engine;
std::uniform_int_distribution<> dist(-10, 10);
std::generate(input_array.begin(), input_array.end(),
[&]() { return dist(engine); });
this->atomic_test(input_array, is_cas_test, block_size, grid_size);
}
template <typename T>
__global__ void gpu_atomic_bitwiseOp_test(T* result, T* data, size_t size) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
size_t step = blockDim.x * gridDim.x;
for (; id < size; id += step) {
atomicAnd(&result[0], data[id]);
atomicOr(&result[1], data[id]);
atomicXor(&result[2], data[id]);
cudf::genericAtomicOperation(&result[3], data[id], cudf::DeviceAnd{});
cudf::genericAtomicOperation(&result[4], data[id], cudf::DeviceOr{});
cudf::genericAtomicOperation(&result[5], data[id], cudf::DeviceXor{});
}
}
template <typename T>
struct AtomicsBitwiseOpTest : public cudf::test::BaseFixture {
void atomic_test(std::vector<uint64_t> const& v_input,
int block_size = 0,
int grid_size = 1) {
size_t vec_size = v_input.size();
std::vector<T> v(vec_size);
std::transform(v_input.begin(), v_input.end(), v.begin(), [](int x) {
T t(x);
return t;
});
std::vector<T> identity = {T(~0ull), T(0), T(0), T(~0ull), T(0), T(0)};
T exact[3];
exact[0] = std::accumulate(v.begin(), v.end(), identity[0],
[](T acc, uint64_t i) { return acc & T(i); });
exact[1] = std::accumulate(v.begin(), v.end(), identity[1],
[](T acc, uint64_t i) { return acc | T(i); });
exact[2] = std::accumulate(v.begin(), v.end(), identity[2],
[](T acc, uint64_t i) { return acc ^ T(i); });
thrust::device_vector<T> dev_result(identity);
thrust::device_vector<T> dev_data(v);
if (block_size == 0) {
block_size = vec_size;
}
hipLaunchKernelGGL(( gpu_atomic_bitwiseOp_test<T>), dim3(grid_size), dim3(block_size), 0, 0,
reinterpret_cast<T*>(dev_result.data().get()),
reinterpret_cast<T*>(dev_data.data().get()), vec_size);
thrust::host_vector<T> host_result(dev_result);
hipDeviceSynchronize();
CHECK_CUDA(0);
print_exact(exact, "exact");
print_exact(host_result.data(), "result");
EXPECT_EQ(host_result[0], exact[0]) << "atomicAnd test failed";
EXPECT_EQ(host_result[1], exact[1]) << "atomicOr test failed";
EXPECT_EQ(host_result[2], exact[2]) << "atomicXor test failed";
EXPECT_EQ(host_result[3], exact[0]) << "atomicAnd test(2) failed";
EXPECT_EQ(host_result[4], exact[1]) << "atomicOr test(2) failed";
EXPECT_EQ(host_result[5], exact[2]) << "atomicXor test(2) failed";
}
void print_exact(const T* v, const char* msg) {
std::cout << std::hex << std::showbase;
std::cout << "The " << msg << " = {" << +v[0] << ", " << +v[1] << ", "
<< +v[2] << "}" << std::endl;
}
};
using BitwiseOpTestingTypes = cudf::test::Types<int8_t,
int16_t,
int32_t,
int64_t,
uint8_t,
uint16_t,
uint32_t,
uint64_t>;
TYPED_TEST_CASE(AtomicsBitwiseOpTest, BitwiseOpTestingTypes);
TYPED_TEST(AtomicsBitwiseOpTest, atomicBitwiseOps) {
{ // test for AND, XOR
std::vector<uint64_t> input_array({0xfcfcfcfcfcfcfc7f, 0x7f7f7f7f7f7ffc,
0xfffddffddffddfdf, 0x7f7f7f7f7f7ffc});
this->atomic_test(input_array);
}
{ // test for OR, XOR
std::vector<uint64_t> input_array({0x01, 0xfc02, 0x1dff03,
0x1100a0b0801d0003, 0x8000000000000000,
0x1dff03});
this->atomic_test(input_array);
}
}
| 95d22e262792c101f0c2bdd539f84c3a620b22d5.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <cudf/utilities/traits.hpp>
#include <cudf/wrappers/bool.hpp>
#include <cudf/wrappers/timestamps.hpp>
#include <cudf/detail/utilities/device_atomics.cuh>
#include <tests/utilities/base_fixture.hpp>
#include <tests/utilities/timestamp_utilities.cuh>
#include <tests/utilities/type_lists.hpp>
template <typename T>
__global__ void gpu_atomic_test(T* result, T* data, size_t size) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
size_t step = blockDim.x * gridDim.x;
for (; id < size; id += step) {
atomicAdd(&result[0], data[id]);
atomicMin(&result[1], data[id]);
atomicMax(&result[2], data[id]);
cudf::genericAtomicOperation(&result[3], data[id], cudf::DeviceSum{});
cudf::genericAtomicOperation(&result[4], data[id], cudf::DeviceMin{});
cudf::genericAtomicOperation(&result[5], data[id], cudf::DeviceMax{});
}
}
template <typename T, typename BinaryOp>
__device__ T atomic_op(T* addr, T const& value, BinaryOp op) {
T old_value = *addr;
T assumed;
do {
assumed = old_value;
T new_value = op(old_value, value);
old_value = atomicCAS(addr, assumed, new_value);
} while (assumed != old_value);
return old_value;
}
template <typename T>
__global__ void gpu_atomicCAS_test(T* result, T* data, size_t size) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
size_t step = blockDim.x * gridDim.x;
for (; id < size; id += step) {
atomic_op(&result[0], data[id], cudf::DeviceSum{});
atomic_op(&result[1], data[id], cudf::DeviceMin{});
atomic_op(&result[2], data[id], cudf::DeviceMax{});
atomic_op(&result[3], data[id], cudf::DeviceSum{});
atomic_op(&result[4], data[id], cudf::DeviceMin{});
atomic_op(&result[5], data[id], cudf::DeviceMax{});
}
}
template <typename T>
typename std::enable_if_t<!cudf::is_timestamp<T>(), T> accumulate(
std::vector<T> const& xs) {
return std::accumulate(xs.begin(), xs.end(), T{0});
}
template <typename T>
typename std::enable_if_t<cudf::is_timestamp<T>(), T> accumulate(
std::vector<T> const& xs) {
auto ys = std::vector<typename T::rep>(xs.size());
std::transform(xs.begin(), xs.end(), ys.begin(),
[](T const& ts) { return ts.time_since_epoch().count(); });
return T{std::accumulate(ys.begin(), ys.end(), 0)};
}
template <typename T>
struct AtomicsTest : public cudf::test::BaseFixture {
void atomic_test(std::vector<int> const& v_input,
bool is_cas_test,
int block_size = 0,
int grid_size = 1) {
size_t vec_size = v_input.size();
// use transform from std::vector<int> instead.
std::vector<T> v(vec_size);
std::transform(v_input.begin(), v_input.end(), v.begin(), [](int x) {
T t(x);
return t;
});
T exact[3];
exact[0] = accumulate<T>(v);
exact[1] = *(std::min_element(v.begin(), v.end()));
exact[2] = *(std::max_element(v.begin(), v.end()));
std::vector<T> result_init(6);
result_init[0] = T{0};
result_init[1] = std::numeric_limits<T>::max();
result_init[2] = std::numeric_limits<T>::min();
result_init[3] = result_init[0];
result_init[4] = result_init[1];
result_init[5] = result_init[2];
thrust::device_vector<T> dev_data(v);
thrust::device_vector<T> dev_result(result_init);
if (block_size == 0) {
block_size = vec_size;
}
if (is_cas_test) {
gpu_atomicCAS_test<<<grid_size, block_size>>>(
dev_result.data().get(), dev_data.data().get(), vec_size);
} else {
gpu_atomic_test<<<grid_size, block_size>>>(
dev_result.data().get(), dev_data.data().get(), vec_size);
}
thrust::host_vector<T> host_result(dev_result);
cudaDeviceSynchronize();
CHECK_CUDA(0);
EXPECT_EQ(host_result[0], exact[0]) << "atomicAdd test failed";
EXPECT_EQ(host_result[1], exact[1]) << "atomicMin test failed";
EXPECT_EQ(host_result[2], exact[2]) << "atomicMax test failed";
EXPECT_EQ(host_result[3], exact[0]) << "atomicAdd test(2) failed";
EXPECT_EQ(host_result[4], exact[1]) << "atomicMin test(2) failed";
EXPECT_EQ(host_result[5], exact[2]) << "atomicMax test(2) failed";
}
};
TYPED_TEST_CASE(AtomicsTest, cudf::test::FixedWidthTypes);
// tests for atomicAdd/Min/Max
TYPED_TEST(AtomicsTest, atomicOps) {
bool is_cas_test = false;
std::vector<int> input_array({0, 6, 0, -14, 13, 64, -13, -20, 45});
this->atomic_test(input_array, is_cas_test);
std::vector<int> input_array2({6, -6, 13, 62, -11, -20, 33});
this->atomic_test(input_array2, is_cas_test);
}
// tests for atomicCAS
TYPED_TEST(AtomicsTest, atomicCAS) {
bool is_cas_test = true;
std::vector<int> input_array({0, 6, 0, -14, 13, 64, -13, -20, 45});
this->atomic_test(input_array, is_cas_test);
std::vector<int> input_array2({6, -6, 13, 62, -11, -20, 33});
this->atomic_test(input_array2, is_cas_test);
}
// tests for atomicAdd/Min/Max
TYPED_TEST(AtomicsTest, atomicOpsGrid) {
bool is_cas_test = false;
int block_size = 3;
int grid_size = 4;
std::vector<int> input_array({0, 6, 0, -14, 13, 64, -13, -20, 45});
this->atomic_test(input_array, is_cas_test, block_size, grid_size);
std::vector<int> input_array2({6, -6, 13, 62, -11, -20, 33});
this->atomic_test(input_array2, is_cas_test, block_size, grid_size);
}
// tests for atomicCAS
TYPED_TEST(AtomicsTest, atomicCASGrid) {
bool is_cas_test = true;
int block_size = 3;
int grid_size = 4;
std::vector<int> input_array({0, 6, 0, -14, 13, 64, -13, -20, 45});
this->atomic_test(input_array, is_cas_test, block_size, grid_size);
std::vector<int> input_array2({6, -6, 13, 62, -11, -20, 33});
this->atomic_test(input_array2, is_cas_test, block_size, grid_size);
}
// tests for large array
TYPED_TEST(AtomicsTest, atomicOpsRandom) {
bool is_cas_test = false;
int block_size = 256;
int grid_size = 64;
std::vector<int> input_array(grid_size * block_size);
std::default_random_engine engine;
std::uniform_int_distribution<> dist(-10, 10);
std::generate(input_array.begin(), input_array.end(),
[&]() { return dist(engine); });
this->atomic_test(input_array, is_cas_test, block_size, grid_size);
}
TYPED_TEST(AtomicsTest, atomicCASRandom) {
bool is_cas_test = true;
int block_size = 256;
int grid_size = 64;
std::vector<int> input_array(grid_size * block_size);
std::default_random_engine engine;
std::uniform_int_distribution<> dist(-10, 10);
std::generate(input_array.begin(), input_array.end(),
[&]() { return dist(engine); });
this->atomic_test(input_array, is_cas_test, block_size, grid_size);
}
template <typename T>
__global__ void gpu_atomic_bitwiseOp_test(T* result, T* data, size_t size) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
size_t step = blockDim.x * gridDim.x;
for (; id < size; id += step) {
atomicAnd(&result[0], data[id]);
atomicOr(&result[1], data[id]);
atomicXor(&result[2], data[id]);
cudf::genericAtomicOperation(&result[3], data[id], cudf::DeviceAnd{});
cudf::genericAtomicOperation(&result[4], data[id], cudf::DeviceOr{});
cudf::genericAtomicOperation(&result[5], data[id], cudf::DeviceXor{});
}
}
template <typename T>
struct AtomicsBitwiseOpTest : public cudf::test::BaseFixture {
void atomic_test(std::vector<uint64_t> const& v_input,
int block_size = 0,
int grid_size = 1) {
size_t vec_size = v_input.size();
std::vector<T> v(vec_size);
std::transform(v_input.begin(), v_input.end(), v.begin(), [](int x) {
T t(x);
return t;
});
std::vector<T> identity = {T(~0ull), T(0), T(0), T(~0ull), T(0), T(0)};
T exact[3];
exact[0] = std::accumulate(v.begin(), v.end(), identity[0],
[](T acc, uint64_t i) { return acc & T(i); });
exact[1] = std::accumulate(v.begin(), v.end(), identity[1],
[](T acc, uint64_t i) { return acc | T(i); });
exact[2] = std::accumulate(v.begin(), v.end(), identity[2],
[](T acc, uint64_t i) { return acc ^ T(i); });
thrust::device_vector<T> dev_result(identity);
thrust::device_vector<T> dev_data(v);
if (block_size == 0) {
block_size = vec_size;
}
gpu_atomic_bitwiseOp_test<T><<<grid_size, block_size>>>(
reinterpret_cast<T*>(dev_result.data().get()),
reinterpret_cast<T*>(dev_data.data().get()), vec_size);
thrust::host_vector<T> host_result(dev_result);
cudaDeviceSynchronize();
CHECK_CUDA(0);
print_exact(exact, "exact");
print_exact(host_result.data(), "result");
EXPECT_EQ(host_result[0], exact[0]) << "atomicAnd test failed";
EXPECT_EQ(host_result[1], exact[1]) << "atomicOr test failed";
EXPECT_EQ(host_result[2], exact[2]) << "atomicXor test failed";
EXPECT_EQ(host_result[3], exact[0]) << "atomicAnd test(2) failed";
EXPECT_EQ(host_result[4], exact[1]) << "atomicOr test(2) failed";
EXPECT_EQ(host_result[5], exact[2]) << "atomicXor test(2) failed";
}
void print_exact(const T* v, const char* msg) {
std::cout << std::hex << std::showbase;
std::cout << "The " << msg << " = {" << +v[0] << ", " << +v[1] << ", "
<< +v[2] << "}" << std::endl;
}
};
using BitwiseOpTestingTypes = cudf::test::Types<int8_t,
int16_t,
int32_t,
int64_t,
uint8_t,
uint16_t,
uint32_t,
uint64_t>;
TYPED_TEST_CASE(AtomicsBitwiseOpTest, BitwiseOpTestingTypes);
TYPED_TEST(AtomicsBitwiseOpTest, atomicBitwiseOps) {
{ // test for AND, XOR
std::vector<uint64_t> input_array({0xfcfcfcfcfcfcfc7f, 0x7f7f7f7f7f7ffc,
0xfffddffddffddfdf, 0x7f7f7f7f7f7ffc});
this->atomic_test(input_array);
}
{ // test for OR, XOR
std::vector<uint64_t> input_array({0x01, 0xfc02, 0x1dff03,
0x1100a0b0801d0003, 0x8000000000000000,
0x1dff03});
this->atomic_test(input_array);
}
}
|
578e2ee237359bd840f80b266ff148220056e320.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <stdio.h>
#include <math.h>
// CUDA kernel to add elements
__global__ void add(int N, float *x)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<N)
x[i] = x[i] *2;
}
int main(void)
{
int N = 1<<20;
float *x;
// Allocate Unified Memory -- accessible from CPU or GPU
hipMallocManaged(&x, N*sizeof(float));
// initialize x array on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
}
// Launch kernel on 1M elements on the GPU
int threadsPerBlock = 256;
int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
hipSetDevice(0);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, 0);
if( (unsigned long long) (N*sizeof(float)) >= (unsigned long long)deviceProp.totalGlobalMem) {
fprintf(stderr, "Memory overload!\n");
exit(EXIT_FAILURE);
}
if( threadsPerBlock >= deviceProp.maxThreadsPerBlock){
fprintf(stderr, "Threads overload!\n");
exit(EXIT_FAILURE);
}
if( blocksPerGrid >= deviceProp.maxGridSize[0]){
fprintf(stderr, "Grid overload!\n");
exit(EXIT_FAILURE);
}
hipLaunchKernelGGL(( add), dim3(threadsPerBlock), dim3(blocksPerGrid) , 0, 0, N, x);
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
// Free memory
hipFree(x);
return 0;
}
| 578e2ee237359bd840f80b266ff148220056e320.cu | #include <iostream>
#include <stdio.h>
#include <math.h>
// CUDA kernel to add elements
__global__ void add(int N, float *x)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<N)
x[i] = x[i] *2;
}
int main(void)
{
int N = 1<<20;
float *x;
// Allocate Unified Memory -- accessible from CPU or GPU
cudaMallocManaged(&x, N*sizeof(float));
// initialize x array on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
}
// Launch kernel on 1M elements on the GPU
int threadsPerBlock = 256;
int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
cudaSetDevice(0);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, 0);
if( (unsigned long long) (N*sizeof(float)) >= (unsigned long long)deviceProp.totalGlobalMem) {
fprintf(stderr, "Memory overload!\n");
exit(EXIT_FAILURE);
}
if( threadsPerBlock >= deviceProp.maxThreadsPerBlock){
fprintf(stderr, "Threads overload!\n");
exit(EXIT_FAILURE);
}
if( blocksPerGrid >= deviceProp.maxGridSize[0]){
fprintf(stderr, "Grid overload!\n");
exit(EXIT_FAILURE);
}
add<<<threadsPerBlock, blocksPerGrid >>>(N, x);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// Free memory
cudaFree(x);
return 0;
}
|
5317ee509e178d743058e7c959428c9168fdd07b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// BUG IN SRAD APPLICATIONS SEEMS TO BE SOMEWHERE IN THIS CODE, WRONG MEMORY
// ACCESS
#include <stdio.h>
// srad kernel
__global__ void srad(fp d_lambda, int d_Nr, int d_Nc, long d_Ne, int *d_iN,
int *d_iS, int *d_jE, int *d_jW, fp *d_dN, fp *d_dS,
fp *d_dE, fp *d_dW, fp d_q0sqr,
bool *d_c,
fp *d_I) {
// indexes
int bx = blockIdx.x; // get current horizontal block index
int tx = threadIdx.x; // get current horizontal thread index
int ei = bx * NUMBER_THREADS + tx; // more threads than actual elements !!!
int row; // column, x position
int col; // row, y position
// variables
fp d_Jc;
fp d_dN_loc, d_dS_loc, d_dW_loc, d_dE_loc;
bool d_c_loc;
fp d_G2, d_L, d_num, d_den, d_qsqr;
// figure out row/col location in new matrix
row = (ei + 1) % d_Nr - 1; // (0-n) row
col = (ei + 1) / d_Nr + 1 - 1; // (0-n) column
if ((ei + 1) % d_Nr == 0) {
row = d_Nr - 1;
col = col - 1;
}
if (ei < d_Ne) { // make sure that only threads matching jobs run
// directional derivatives, ICOV, diffusion coefficent
d_Jc = d_I[ei]; // get value of the current element
// directional derivates (every element of IMAGE)(try to copy to shared
// memory or temp files)
d_dN_loc =
d_I[d_iN[row] + d_Nr * col] - d_Jc; // north direction derivative
d_dS_loc =
d_I[d_iS[row] + d_Nr * col] - d_Jc; // south direction derivative
d_dW_loc =
d_I[row + d_Nr * d_jW[col]] - d_Jc; // west direction derivative
d_dE_loc =
d_I[row + d_Nr * d_jE[col]] - d_Jc; // east direction derivative
// normalized discrete gradient mag squared (equ 52,53)
d_G2 = (d_dN_loc * d_dN_loc + d_dS_loc * d_dS_loc +
d_dW_loc * d_dW_loc + d_dE_loc * d_dE_loc) /
(d_Jc * d_Jc); // gradient (based on derivatives)
// normalized discrete laplacian (equ 54)
d_L = (d_dN_loc + d_dS_loc + d_dW_loc + d_dE_loc) /
d_Jc; // laplacian (based on derivatives)
// ICOV (equ 31/35)
d_num = (0.5 * d_G2) -
((1.0 / 16.0) *
(d_L * d_L)); // num (based on gradient and laplacian)
d_den = 1 + (0.25 * d_L); // den (based on laplacian)
d_qsqr = d_num / (d_den * d_den); // qsqr (based on num and den)
// diffusion coefficent (equ 33) (every element of IMAGE)
d_den = (d_qsqr - d_q0sqr) /
(d_q0sqr * (1 + d_q0sqr)); // den (based on qsqr and q0sqr)
if(d_den < -1 ){
d_c_loc = 0;
}else if(d_den >-1 && d_den <0){
d_c_loc = 1;
}
// d_c_loc = 1.0 / (1.0 + d_den); // diffusion coefficient (based on den)
// save data to global memory
d_dN[ei] = d_dN_loc;
d_dS[ei] = d_dS_loc;
d_dW[ei] = d_dW_loc;
d_dE[ei] = d_dE_loc;
d_c[ei] = d_c_loc;
}
}
| 5317ee509e178d743058e7c959428c9168fdd07b.cu | // BUG IN SRAD APPLICATIONS SEEMS TO BE SOMEWHERE IN THIS CODE, WRONG MEMORY
// ACCESS
#include <stdio.h>
// srad kernel
__global__ void srad(fp d_lambda, int d_Nr, int d_Nc, long d_Ne, int *d_iN,
int *d_iS, int *d_jE, int *d_jW, fp *d_dN, fp *d_dS,
fp *d_dE, fp *d_dW, fp d_q0sqr,
bool *d_c,
fp *d_I) {
// indexes
int bx = blockIdx.x; // get current horizontal block index
int tx = threadIdx.x; // get current horizontal thread index
int ei = bx * NUMBER_THREADS + tx; // more threads than actual elements !!!
int row; // column, x position
int col; // row, y position
// variables
fp d_Jc;
fp d_dN_loc, d_dS_loc, d_dW_loc, d_dE_loc;
bool d_c_loc;
fp d_G2, d_L, d_num, d_den, d_qsqr;
// figure out row/col location in new matrix
row = (ei + 1) % d_Nr - 1; // (0-n) row
col = (ei + 1) / d_Nr + 1 - 1; // (0-n) column
if ((ei + 1) % d_Nr == 0) {
row = d_Nr - 1;
col = col - 1;
}
if (ei < d_Ne) { // make sure that only threads matching jobs run
// directional derivatives, ICOV, diffusion coefficent
d_Jc = d_I[ei]; // get value of the current element
// directional derivates (every element of IMAGE)(try to copy to shared
// memory or temp files)
d_dN_loc =
d_I[d_iN[row] + d_Nr * col] - d_Jc; // north direction derivative
d_dS_loc =
d_I[d_iS[row] + d_Nr * col] - d_Jc; // south direction derivative
d_dW_loc =
d_I[row + d_Nr * d_jW[col]] - d_Jc; // west direction derivative
d_dE_loc =
d_I[row + d_Nr * d_jE[col]] - d_Jc; // east direction derivative
// normalized discrete gradient mag squared (equ 52,53)
d_G2 = (d_dN_loc * d_dN_loc + d_dS_loc * d_dS_loc +
d_dW_loc * d_dW_loc + d_dE_loc * d_dE_loc) /
(d_Jc * d_Jc); // gradient (based on derivatives)
// normalized discrete laplacian (equ 54)
d_L = (d_dN_loc + d_dS_loc + d_dW_loc + d_dE_loc) /
d_Jc; // laplacian (based on derivatives)
// ICOV (equ 31/35)
d_num = (0.5 * d_G2) -
((1.0 / 16.0) *
(d_L * d_L)); // num (based on gradient and laplacian)
d_den = 1 + (0.25 * d_L); // den (based on laplacian)
d_qsqr = d_num / (d_den * d_den); // qsqr (based on num and den)
// diffusion coefficent (equ 33) (every element of IMAGE)
d_den = (d_qsqr - d_q0sqr) /
(d_q0sqr * (1 + d_q0sqr)); // den (based on qsqr and q0sqr)
if(d_den < -1 ){
d_c_loc = 0;
}else if(d_den >-1 && d_den <0){
d_c_loc = 1;
}
// d_c_loc = 1.0 / (1.0 + d_den); // diffusion coefficient (based on den)
// save data to global memory
d_dN[ei] = d_dN_loc;
d_dS[ei] = d_dS_loc;
d_dW[ei] = d_dW_loc;
d_dE[ei] = d_dE_loc;
d_c[ei] = d_c_loc;
}
}
|
0ec42a57522818c6a55e86ec19fd390e4778292b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#define DEBUGG 1
//static const int N = 16; //Siempre matrices cuadradas
static const int N = 36; //Siempre matrices cuadradas
//...
//Kernel que distribueix la l'execuci a la grid
__global__ void organitza_grid(int *array) {
//Distribueix la grid(blocks i threads) com a un array unidimensional i calcula l'index d'aquesta distribuci.
//On cada index correspon a un thread de la grid
int idx = threadIdx.x;
int idy = threadIdx.y;
int idblocy = blockIdx.y;
int idblocx = blockIdx.x;
int width = gridDim.x * blockDim.x;
int id_array = (idy*width + idx) + (idblocx * blockDim.x) + (idblocy * width * blockDim.y);
array[id_array]=(2*idblocy)+idblocx;
//....
//Recupera l'index del block a la grid
//...
//Guarda resultad al array
//...
}
__host__ void printa(int *array,int sizex,int sizey)
{
//Els vostre codi...
for(int i = 0 ; i < sizey ; ++i){ //impresion de la grid dependiendo del tamao en el eje x e y
for(int j = 0 ; j < sizex; ++j){
printf("%d ",array[i*sizex+j]);
}
printf("\n");
}
}
int main(void) {
int *dev_a , gridsizex,gridsizey;
int *array;
int size = N*sizeof(int);
// Reserva memoria al host i al device
array = (int *)malloc(size);
hipMalloc((void **)&dev_a, size);
memset(array,0,N); //inicializamos en 0 el array
hipMemcpy(dev_a,array,size,hipMemcpyHostToDevice); //copiamos el array del host al device
//Crea blocks de dos dimensions amb diferent nombre de threads. Ex: Comena amb 4x4
dim3 block_dim(sqrt(N)/2,sqrt(N)/2); //4 threads x bloque, dimension 2*2
//...
dim3 grid_dim(sqrt(N)/block_dim.x,sqrt(N)/block_dim.y); //numero de bloques que tendremos
// Crea i inicialitza una grid en 2 dimensions
//dim3 grid_dim(grid_dim,block_dim); //la grid siempre tendra dos bloques en el eje x
gridsizex = grid_dim.x*block_dim.x;
gridsizey = grid_dim.y*block_dim.y;
//...
#if DEBUGG
printf("Dim block (x,y) %d-%d",block_dim.x,block_dim.y);
printf("\nDim Grid (blocks)(x,y) %d-%d",grid_dim.x,grid_dim.y);
printf("\ngrid size (threads)(x,y) %d-%d\n",gridsizex,gridsizey);
#endif
hipLaunchKernelGGL(( organitza_grid), dim3(grid_dim), dim3(block_dim), 0, 0, dev_a);
hipMemcpy(array,dev_a,size,hipMemcpyDeviceToHost);
// Printa els resultats de l'organitzaci de la grid
printa(array,gridsizex,gridsizey);
return 0;
}
| 0ec42a57522818c6a55e86ec19fd390e4778292b.cu | #include <stdio.h>
#include <stdlib.h>
#define DEBUGG 1
//static const int N = 16; //Siempre matrices cuadradas
static const int N = 36; //Siempre matrices cuadradas
//...
//Kernel que distribueix la l'execució a la grid
__global__ void organitza_grid(int *array) {
//Distribueix la grid(blocks i threads) com a un array unidimensional i calcula l'index d'aquesta distribució.
//On cada index correspon a un thread de la grid
int idx = threadIdx.x;
int idy = threadIdx.y;
int idblocy = blockIdx.y;
int idblocx = blockIdx.x;
int width = gridDim.x * blockDim.x;
int id_array = (idy*width + idx) + (idblocx * blockDim.x) + (idblocy * width * blockDim.y);
array[id_array]=(2*idblocy)+idblocx;
//....
//Recupera l'index del block a la grid
//...
//Guarda resultad al array
//...
}
__host__ void printa(int *array,int sizex,int sizey)
{
//Els vostre codi...
for(int i = 0 ; i < sizey ; ++i){ //impresion de la grid dependiendo del tamaño en el eje x e y
for(int j = 0 ; j < sizex; ++j){
printf("%d ",array[i*sizex+j]);
}
printf("\n");
}
}
int main(void) {
int *dev_a , gridsizex,gridsizey;
int *array;
int size = N*sizeof(int);
// Reserva memoria al host i al device
array = (int *)malloc(size);
cudaMalloc((void **)&dev_a, size);
memset(array,0,N); //inicializamos en 0 el array
cudaMemcpy(dev_a,array,size,cudaMemcpyHostToDevice); //copiamos el array del host al device
//Crea blocks de dos dimensions amb diferent nombre de threads. Ex: Comença amb 4x4
dim3 block_dim(sqrt(N)/2,sqrt(N)/2); //4 threads x bloque, dimension 2*2
//...
dim3 grid_dim(sqrt(N)/block_dim.x,sqrt(N)/block_dim.y); //numero de bloques que tendremos
// Crea i inicialitza una grid en 2 dimensions
//dim3 grid_dim(grid_dim,block_dim); //la grid siempre tendra dos bloques en el eje x
gridsizex = grid_dim.x*block_dim.x;
gridsizey = grid_dim.y*block_dim.y;
//...
#if DEBUGG
printf("Dim block (x,y) %d-%d",block_dim.x,block_dim.y);
printf("\nDim Grid (blocks)(x,y) %d-%d",grid_dim.x,grid_dim.y);
printf("\ngrid size (threads)(x,y) %d-%d\n",gridsizex,gridsizey);
#endif
organitza_grid<<<grid_dim, block_dim>>>(dev_a);
cudaMemcpy(array,dev_a,size,cudaMemcpyDeviceToHost);
// Printa els resultats de l'organització de la grid
printa(array,gridsizex,gridsizey);
return 0;
}
|
c57b9459819420d1d3aaacba4b69368e57f181ad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* 2 : 1) ; 2) , .
.
. , . */
/* , , */
#include <iostream>
#define N 256 //shortest dimension of A: 1536 ; for test 50
#define M 102400 //*100 or 10000
using namespace std;
#define CHECK(value) { \
hipError_t _m_cudaStat = value; \
if (_m_cudaStat != hipSuccess) { \
cout<< "Error:" << hipGetErrorString(_m_cudaStat) \
<< " at line " << __LINE__ << " in file " << __FILE__ << "\n"; \
exit(1); \
} }
__global__ void Multiply(int *A, int *B, int *C){
// calculate the row & col index of the element
int row = blockIdx.x*blockDim.x + threadIdx.x;
if (row >= N)
return;
int result = 0;
// product between row of a and b
for(int k = 0; k < M; ++k)
{
result += A[row + k*N] * B[k];
//printf("%d ", result);
}
C[row] = result;
}
int main(int argc, char **argv)
{
srand(time(NULL));
int *A = new int [N*M];
int *b = new int [M];
int *res_CPU = new int[N];
int *res_GPU = new int[N];
int i, j;
for(i = 0; i < N; ++i)
res_CPU[i] = 0;
for(i = 0; i < N; ++i)
{
for(j = 0; j < M; ++j)
{
A[i + j*N] = rand() % 10;
//cout << A[i*N + j] << " ";
}
//cout << endl;
}
//cout << endl;
for(i = 0; i < M; ++i)
{
b[i] = rand() % 10;
//cout << b[i] << " ";
}
//cout << endl;
// shared memory: t = 0..32 - warp
clock_t startCPU = clock();
for(i = 0; i < N; ++i)
{
for(j = 0; j < M; ++j)
res_CPU[i] += A[i + j*N]*b[j];
//cout << "Res_CPU[" << i << "] = " << res_CPU[i] << " " << endl;
}
double elapsedTimeCPU = (double)(clock()-startCPU)/CLOCKS_PER_SEC;
cout << "CPU product time = " << elapsedTimeCPU*1000 << " ms\n";
int (*aA), (*aB), (*aRes);
hipEvent_t startCUDA, stopCUDA;
float elapsedTimeCUDA;
hipEventCreate(&startCUDA);
hipEventCreate(&stopCUDA);
CHECK(hipMalloc((void**)&aA, (N*M)*sizeof(int)));
CHECK(hipMalloc((void**)&aB, (M)*sizeof(int)));
CHECK(hipMalloc((void**)&aRes, (N)*sizeof(int)));
CHECK(hipMemcpy(aA, A, (N*M)*sizeof(int), hipMemcpyHostToDevice));
CHECK(hipMemcpy(aB, b, (M)*sizeof(int), hipMemcpyHostToDevice));
//CHECK(hipMemcpy(aRes, res_CPU, (N)*sizeof(int), hipMemcpyHostToDevice));
//int numBlocks = 1;
//dim3 threadsPerBlock(N,N);
hipEventRecord(startCUDA,0);
hipLaunchKernelGGL(( Multiply), dim3((N+511)/512), dim3(512), 0, 0, aA,aB,aRes);
hipEventRecord(stopCUDA,0);
hipEventSynchronize(stopCUDA);
CHECK(hipGetLastError());
CHECK(hipMemcpy(res_GPU, aRes, N*sizeof(int), hipMemcpyDeviceToHost));
hipEventElapsedTime(&elapsedTimeCUDA, startCUDA, stopCUDA);
cout << "CUDA product time = " << elapsedTimeCUDA << " ms\n";
cout << "CUDA memory throughput = " << 3*N*sizeof(float)/elapsedTimeCUDA/1024/1024/1.024 << " Gb/s\n";
/*for (i = 0; i < N; i++) {
cout << "Res_GPU[" << i << "] = " << res_GPU[i] << " " << endl;
}*/
for (i = 0; i < N; i++) {
if (res_CPU[i] != res_GPU[i])
{
cout << "Not equal. Try again, again." << endl;
break;
}
}
CHECK(hipFree(aA));
CHECK(hipFree(aB));
CHECK(hipFree(aRes));
return 0;
}
| c57b9459819420d1d3aaacba4b69368e57f181ad.cu | /*В данном задании требуется представить 2 варианта программы для видеокарты: 1) максимально простой и короткий; и 2) быстрый, использующий разделяемую память.
Запрограммируйте генерацию случайных входных данных для алгоритма и автоматическую проверку корректности работы программы.
Выполните теоретическую оценку производительности обоих вариантов алгоритма. Укажите в отчете, насколько теоретическая оценка отличается от практической. */
/*Реализуйте умножение длинной матрицы, хранящейся по столбцам, на длинный вектор*/
#include <iostream>
#define N 256 //shortest dimension of A: 1536 ; for test 50
#define M 102400 //*100 or 10000
using namespace std;
#define CHECK(value) { \
cudaError_t _m_cudaStat = value; \
if (_m_cudaStat != cudaSuccess) { \
cout<< "Error:" << cudaGetErrorString(_m_cudaStat) \
<< " at line " << __LINE__ << " in file " << __FILE__ << "\n"; \
exit(1); \
} }
__global__ void Multiply(int *A, int *B, int *C){
// calculate the row & col index of the element
int row = blockIdx.x*blockDim.x + threadIdx.x;
if (row >= N)
return;
int result = 0;
// product between row of a and b
for(int k = 0; k < M; ++k)
{
result += A[row + k*N] * B[k];
//printf("%d ", result);
}
C[row] = result;
}
int main(int argc, char **argv)
{
srand(time(NULL));
int *A = new int [N*M];
int *b = new int [M];
int *res_CPU = new int[N];
int *res_GPU = new int[N];
int i, j;
for(i = 0; i < N; ++i)
res_CPU[i] = 0;
for(i = 0; i < N; ++i)
{
for(j = 0; j < M; ++j)
{
A[i + j*N] = rand() % 10;
//cout << A[i*N + j] << " ";
}
//cout << endl;
}
//cout << endl;
for(i = 0; i < M; ++i)
{
b[i] = rand() % 10;
//cout << b[i] << " ";
}
//cout << endl;
// shared memory: t = 0..32 - warp
clock_t startCPU = clock();
for(i = 0; i < N; ++i)
{
for(j = 0; j < M; ++j)
res_CPU[i] += A[i + j*N]*b[j];
//cout << "Res_CPU[" << i << "] = " << res_CPU[i] << " " << endl;
}
double elapsedTimeCPU = (double)(clock()-startCPU)/CLOCKS_PER_SEC;
cout << "CPU product time = " << elapsedTimeCPU*1000 << " ms\n";
int (*aA), (*aB), (*aRes);
cudaEvent_t startCUDA, stopCUDA;
float elapsedTimeCUDA;
cudaEventCreate(&startCUDA);
cudaEventCreate(&stopCUDA);
CHECK(cudaMalloc((void**)&aA, (N*M)*sizeof(int)));
CHECK(cudaMalloc((void**)&aB, (M)*sizeof(int)));
CHECK(cudaMalloc((void**)&aRes, (N)*sizeof(int)));
CHECK(cudaMemcpy(aA, A, (N*M)*sizeof(int), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(aB, b, (M)*sizeof(int), cudaMemcpyHostToDevice));
//CHECK(cudaMemcpy(aRes, res_CPU, (N)*sizeof(int), cudaMemcpyHostToDevice));
//int numBlocks = 1;
//dim3 threadsPerBlock(N,N);
cudaEventRecord(startCUDA,0);
Multiply<<<(N+511)/512, 512>>>(aA,aB,aRes);
cudaEventRecord(stopCUDA,0);
cudaEventSynchronize(stopCUDA);
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(res_GPU, aRes, N*sizeof(int), cudaMemcpyDeviceToHost));
cudaEventElapsedTime(&elapsedTimeCUDA, startCUDA, stopCUDA);
cout << "CUDA product time = " << elapsedTimeCUDA << " ms\n";
cout << "CUDA memory throughput = " << 3*N*sizeof(float)/elapsedTimeCUDA/1024/1024/1.024 << " Gb/s\n";
/*for (i = 0; i < N; i++) {
cout << "Res_GPU[" << i << "] = " << res_GPU[i] << " " << endl;
}*/
for (i = 0; i < N; i++) {
if (res_CPU[i] != res_GPU[i])
{
cout << "Not equal. Try again, again." << endl;
break;
}
}
CHECK(cudaFree(aA));
CHECK(cudaFree(aB));
CHECK(cudaFree(aRes));
return 0;
}
|
e9fb26a908ee723d0ae348cbe662e94a4b5a19a4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include "../Box_ops/poly_iou_utils.h"
using namespace pet;
int const threadsPerBlock = sizeof(unsigned long long) * 8;
template <typename T>
__global__ void poly_nms_kernel(
const int n_polys,
const float iou_threshold,
const T* dev_polys,
unsigned long long* dev_mask) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
const int row_size =
min(n_polys - row_start * threadsPerBlock, threadsPerBlock);
const int cols_size =
min(n_polys - col_start * threadsPerBlock, threadsPerBlock);
__shared__ T block_polys[threadsPerBlock * 9];
if (threadIdx.x < cols_size) {
block_polys[threadIdx.x * 9 + 0] =
dev_polys[(threadsPerBlock * col_start + threadIdx.x) * 9 + 0];
block_polys[threadIdx.x * 9 + 1] =
dev_polys[(threadsPerBlock * col_start + threadIdx.x) * 9 + 1];
block_polys[threadIdx.x * 9 + 2] =
dev_polys[(threadsPerBlock * col_start + threadIdx.x) * 9 + 2];
block_polys[threadIdx.x * 9 + 3] =
dev_polys[(threadsPerBlock * col_start + threadIdx.x) * 9 + 3];
block_polys[threadIdx.x * 9 + 4] =
dev_polys[(threadsPerBlock * col_start + threadIdx.x) * 9 + 4];
block_polys[threadIdx.x * 9 + 5] =
dev_polys[(threadsPerBlock * col_start + threadIdx.x) * 9 + 5];
block_polys[threadIdx.x * 9 + 6] =
dev_polys[(threadsPerBlock * col_start + threadIdx.x) * 9 + 6];
block_polys[threadIdx.x * 9 + 7] =
dev_polys[(threadsPerBlock * col_start + threadIdx.x) * 9 + 7];
block_polys[threadIdx.x * 9 + 8] =
dev_polys[(threadsPerBlock * col_start + threadIdx.x) * 9 + 8];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const T* cur_box = dev_polys + cur_box_idx * 9;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < cols_size; i++) {
if (single_poly_iou<T>(cur_box, block_polys + i * 9) > iou_threshold) {
t |= 1ULL << i;
}
}
const int col_blocks = at::cuda::ATenCeilDiv(n_polys, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
namespace pet {
// dets is a N x 9 tensor
at::Tensor poly_nms_cuda(
const at::Tensor& dets,
float iou_threshold) {
AT_ASSERTM(dets.type().is_cuda(), "dets must be a CUDA tensor");
at::hip::HIPGuardMasqueradingAsCUDA device_guard(dets.device());
auto scores = dets.select(1, 8);
auto order_t = std::get<1>(scores.sort(0, /*descending=*/true));
auto dets_sorted = dets.index_select(0, order_t);
int dets_num = dets.size(0);
const int col_blocks = at::cuda::ATenCeilDiv(dets_num, threadsPerBlock);
at::Tensor mask =
at::empty({dets_num * col_blocks}, dets.options().dtype(at::kLong));
dim3 blocks(col_blocks, col_blocks);
dim3 threads(threadsPerBlock);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
dets_sorted.scalar_type(), "ml_nms_kernel_cuda", [&] {
hipLaunchKernelGGL(( poly_nms_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, stream,
dets_num,
iou_threshold,
dets_sorted.data_ptr<scalar_t>(),
(unsigned long long*)mask.data_ptr<int64_t>());
});
at::Tensor mask_cpu = mask.to(at::kCPU);
unsigned long long* mask_host = (unsigned long long*)mask_cpu.data_ptr<int64_t>();
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
at::Tensor keep =
at::empty({dets_num}, dets.options().dtype(at::kLong).device(at::kCPU));
int64_t* keep_out = keep.data_ptr<int64_t>();
int num_to_keep = 0;
for (int i = 0; i < dets_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[num_to_keep++] = i;
unsigned long long *p = mask_host + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
AT_CUDA_CHECK(hipGetLastError());
return order_t.index(
{keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep)
.to(order_t.device(), keep.scalar_type())});
}
} // namespace pet
| e9fb26a908ee723d0ae348cbe662e94a4b5a19a4.cu | #include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/cuda/CUDAGuard.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include "../Box_ops/poly_iou_utils.h"
using namespace pet;
int const threadsPerBlock = sizeof(unsigned long long) * 8;
template <typename T>
__global__ void poly_nms_kernel(
const int n_polys,
const float iou_threshold,
const T* dev_polys,
unsigned long long* dev_mask) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
const int row_size =
min(n_polys - row_start * threadsPerBlock, threadsPerBlock);
const int cols_size =
min(n_polys - col_start * threadsPerBlock, threadsPerBlock);
__shared__ T block_polys[threadsPerBlock * 9];
if (threadIdx.x < cols_size) {
block_polys[threadIdx.x * 9 + 0] =
dev_polys[(threadsPerBlock * col_start + threadIdx.x) * 9 + 0];
block_polys[threadIdx.x * 9 + 1] =
dev_polys[(threadsPerBlock * col_start + threadIdx.x) * 9 + 1];
block_polys[threadIdx.x * 9 + 2] =
dev_polys[(threadsPerBlock * col_start + threadIdx.x) * 9 + 2];
block_polys[threadIdx.x * 9 + 3] =
dev_polys[(threadsPerBlock * col_start + threadIdx.x) * 9 + 3];
block_polys[threadIdx.x * 9 + 4] =
dev_polys[(threadsPerBlock * col_start + threadIdx.x) * 9 + 4];
block_polys[threadIdx.x * 9 + 5] =
dev_polys[(threadsPerBlock * col_start + threadIdx.x) * 9 + 5];
block_polys[threadIdx.x * 9 + 6] =
dev_polys[(threadsPerBlock * col_start + threadIdx.x) * 9 + 6];
block_polys[threadIdx.x * 9 + 7] =
dev_polys[(threadsPerBlock * col_start + threadIdx.x) * 9 + 7];
block_polys[threadIdx.x * 9 + 8] =
dev_polys[(threadsPerBlock * col_start + threadIdx.x) * 9 + 8];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const T* cur_box = dev_polys + cur_box_idx * 9;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < cols_size; i++) {
if (single_poly_iou<T>(cur_box, block_polys + i * 9) > iou_threshold) {
t |= 1ULL << i;
}
}
const int col_blocks = at::cuda::ATenCeilDiv(n_polys, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
namespace pet {
// dets is a N x 9 tensor
at::Tensor poly_nms_cuda(
const at::Tensor& dets,
float iou_threshold) {
AT_ASSERTM(dets.type().is_cuda(), "dets must be a CUDA tensor");
at::cuda::CUDAGuard device_guard(dets.device());
auto scores = dets.select(1, 8);
auto order_t = std::get<1>(scores.sort(0, /*descending=*/true));
auto dets_sorted = dets.index_select(0, order_t);
int dets_num = dets.size(0);
const int col_blocks = at::cuda::ATenCeilDiv(dets_num, threadsPerBlock);
at::Tensor mask =
at::empty({dets_num * col_blocks}, dets.options().dtype(at::kLong));
dim3 blocks(col_blocks, col_blocks);
dim3 threads(threadsPerBlock);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
dets_sorted.scalar_type(), "ml_nms_kernel_cuda", [&] {
poly_nms_kernel<scalar_t><<<blocks, threads, 0, stream>>>(
dets_num,
iou_threshold,
dets_sorted.data_ptr<scalar_t>(),
(unsigned long long*)mask.data_ptr<int64_t>());
});
at::Tensor mask_cpu = mask.to(at::kCPU);
unsigned long long* mask_host = (unsigned long long*)mask_cpu.data_ptr<int64_t>();
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
at::Tensor keep =
at::empty({dets_num}, dets.options().dtype(at::kLong).device(at::kCPU));
int64_t* keep_out = keep.data_ptr<int64_t>();
int num_to_keep = 0;
for (int i = 0; i < dets_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[num_to_keep++] = i;
unsigned long long *p = mask_host + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
AT_CUDA_CHECK(cudaGetLastError());
return order_t.index(
{keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep)
.to(order_t.device(), keep.scalar_type())});
}
} // namespace pet
|
751c31a5e9fc8081cd85d5c6a31a6d57704efab8.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "contrib_ops/cuda/bert/tensorrt_fused_multihead_attention/mha_runner.h"
#include "contrib_ops/cuda/bert/tensorrt_fused_multihead_attention/fused_multihead_attention_v2.h"
#include "contrib_ops/cuda/bert/tensorrt_fused_multihead_attention/flash_attention/fmha_flash_attention.h"
namespace onnxruntime {
namespace contrib {
namespace cuda {
union __half2_uint32_t_union {
half2 fp162;
uint32_t u32;
};
void set_alpha_fp16(uint32_t& alpha, float norm) {
__half2_uint32_t_union temp;
temp.fp162 = __float2half2_rn(norm);
alpha = temp.u32;
}
class FusedMHARunnerFP16v2::mhaImpl {
public:
mhaImpl(FusedMHARunnerFP16v2* interface)
: interface(interface),
sm(interface->mSm),
xmmaKernel(getXMMAKernelsV2(DATA_TYPE_FP16, sm)) {
ORT_ENFORCE((sm == kSM_70 || sm == kSM_75 || sm == kSM_80 || sm == kSM_86 || sm == kSM_89),
"Unsupported architecture");
flash_attention_kernel = nullptr;
if (interface->mEnableFlashAttention) {
flash_attention_kernel = get_flash_attention_kernels(DATA_TYPE_FP16, sm);
}
params.clear();
}
~mhaImpl() {}
void setup(const int S, const int B) {
// For bert and vit, use flash attention when sequence length is larger than the threshold.
use_flash_attention = is_flash_attention(S);
params.force_unroll = use_flash_attention;
size_t warps_m = 2;
size_t warps_n = 2;
size_t warps_k = 1;
if (use_flash_attention) {
warps_m = 4;
warps_n = 1;
} else {
if (sm == 70) {
if (S == 64 || S == 96) {
warps_m = 2;
warps_n = 2;
} else if (S == 128) {
warps_m = 1;
warps_n = 4;
} else if (S == 256 || S == 384) {
warps_m = 1;
warps_n = 8;
} else {
ORT_ENFORCE(false, "Unsupported sequence length");
}
} else {
if (S == 32 || S == 64 || S == 96 || S == 128) {
warps_m = 2;
warps_n = 2;
} else if (S == 192 || S == 256) {
warps_m = 1;
warps_n = 4;
} else if (S == 384) {
warps_m = 1;
warps_n = 8;
} else {
ORT_ENFORCE(false, "Unsupported sequence length");
}
}
}
// The number of threads per CTA.
threads_per_cta = warps_m * warps_n * warps_k * 32;
// The number of xmmas in the M dimension. We use one uint32_t per XMMA in the M dimension.
xmmas_m = (S + 16 * warps_m - 1) / (16 * warps_m);
const float scale_bmm1 = interface->mRsqrtHeadSize;
const float scale_softmax = 1.f; // Seems to be only required for int8
const float scale_bmm2 = 1.f;
set_alpha_fp16(params.scale_bmm1, scale_bmm1);
set_alpha_fp16(params.scale_softmax, scale_softmax);
set_alpha_fp16(params.scale_bmm2, scale_bmm2);
params.b = B;
params.h = interface->mNumHeads;
params.s = S;
params.d = interface->mHeadSize;
params.qkv_stride_in_bytes = 3 * interface->mNumHeads * interface->mHeadSize * sizeof(half);
params.packed_mask_stride_in_bytes = xmmas_m * threads_per_cta * sizeof(uint32_t);
params.o_stride_in_bytes = interface->mNumHeads * interface->mHeadSize * sizeof(half);
has_causal_mask = false;
}
void setup_causal_masked_fmha(const int S, const int B) {
const float scale_bmm1 = interface->mRsqrtHeadSize;
const float scale_softmax = 1.f; // Seems to be only required for int8
const float scale_bmm2 = 1.f;
set_alpha_fp16(params.scale_bmm1, scale_bmm1);
set_alpha_fp16(params.scale_softmax, scale_softmax);
set_alpha_fp16(params.scale_bmm2, scale_bmm2);
params.b = B;
params.h = interface->mNumHeads;
params.s = S;
params.d = interface->mHeadSize;
params.qkv_stride_in_bytes = 3 * interface->mNumHeads * interface->mHeadSize * sizeof(half);
params.o_stride_in_bytes = interface->mNumHeads * interface->mHeadSize * sizeof(half);
// fallback to original fmha_v2 when head_size <= 64 and seq_len <- 128
use_flash_attention = interface->mEnableFlashAttention;
if (params.d <= 64 && params.s <= 128) {
use_flash_attention = false;
// get max sequence length
if (params.s > 64) {
params.s = 128;
} else {
params.s = 64;
}
}
// set flags
params.force_unroll = use_flash_attention;
has_causal_mask = true;
}
void run(const void* input, const void* cu_seqlens, void* output, hipStream_t stream) {
params.qkv_ptr = const_cast<void*>(input);
params.o_ptr = output;
params.cu_seqlens = static_cast<int*>(const_cast<void*>(cu_seqlens));
if (use_flash_attention && flash_attention_kernel != nullptr && !has_causal_mask) {
flash_attention_kernel->run(params, stream);
} else {
xmmaKernel->run(params, stream, use_flash_attention, has_causal_mask);
}
CUDA_CALL_THROW(hipPeekAtLastError());
}
bool isValid(int s) const {
if (is_flash_attention(s)) {
return (flash_attention_kernel != nullptr) && flash_attention_kernel->isValid(s);
}
return xmmaKernel->isValid(s);
}
int getSFromMaxSeqLen(const int max_seq_len) const {
if (is_flash_attention(max_seq_len)) {
return max_seq_len;
}
int S = max_seq_len;
if (max_seq_len <= 32) {
S = (sm == 70) ? 64 : 32;
} else if (max_seq_len <= 64) {
S = 64;
} else if (max_seq_len <= 96) {
S = 96;
} else if (max_seq_len <= 128) {
S = 128;
} else if (max_seq_len <= 192) {
S = (sm == 70) ? 256 : 192;
} else if (max_seq_len <= 256) {
S = 256;
} else if (max_seq_len <= 384) {
S = 384;
}
return S;
}
protected:
bool is_flash_attention(const int S) const {
ORT_ENFORCE(interface->mHasCausalMask == false);
return interface->mEnableFlashAttention && S >= kMinSequenceLengthFlashAttention;
}
private:
FusedMHARunnerFP16v2* interface;
Fused_multihead_attention_params_v2 params;
int sm;
const FusedMultiHeadAttentionXMMAKernelV2* xmmaKernel;
const FusedMultiHeadFlashAttentionKernel* flash_attention_kernel;
size_t xmmas_m;
size_t threads_per_cta;
bool use_flash_attention = false;
bool has_causal_mask = false;
};
FusedMHARunnerFP16v2::FusedMHARunnerFP16v2(const int numHeads, const int headSize, const int sm, bool causal_mask, bool enable_flash_attention)
: MHARunner(numHeads, headSize, 2, causal_mask), mSm(sm), mEnableFlashAttention(enable_flash_attention), pimpl(new mhaImpl(this)) {
}
void FusedMHARunnerFP16v2::setup(const int S, const int B) {
MHARunner::setup(S, B);
if (mHasCausalMask) {
pimpl->setup_causal_masked_fmha(S, B);
} else {
pimpl->setup(S, B);
}
}
bool FusedMHARunnerFP16v2::is_supported(int sm, int head_size, int sequence_length,
bool enable_flash_attention, bool causal) {
if (causal) {
if (!(sm == kSM_70 || sm == kSM_75 || sm == kSM_80 || sm == kSM_86 || sm == kSM_89)) {
return false;
}
if (enable_flash_attention) {
return head_size == 64 ||
head_size == 32 ||
head_size == 40 ||
head_size == 80 ||
head_size == 128 ||
head_size == 144 ||
head_size == 160 ||
head_size == 256;
}
return (head_size == 64 || head_size == 32 || head_size == 40) && sequence_length <= 128;
}
bool use_flash = enable_flash_attention && sequence_length >= kMinSequenceLengthFlashAttention;
if (use_flash && has_flash_attention_kernel(sm, head_size)) {
return true;
}
if (!(sm == kSM_70 || sm == kSM_75 || sm == kSM_80 || sm == kSM_86 || sm == kSM_89)) {
return false;
}
if (head_size != 64 && head_size != 32) {
return false;
}
if (sm == kSM_70 && head_size == 32) {
return false;
}
// Normal (not flash) fused kernel supports sequence length up to 384.
constexpr int max_sequence_length = 384;
return sequence_length <= max_sequence_length;
}
size_t FusedMHARunnerFP16v2::getWorkspaceSize() const {
return 0;
}
void FusedMHARunnerFP16v2::run(const void* input, const void* cu_seqlens, void* output, hipStream_t stream) {
pimpl->run(input, cu_seqlens, output, stream);
}
bool FusedMHARunnerFP16v2::isValid(int s) const {
return pimpl->isValid(s);
}
int FusedMHARunnerFP16v2::getSFromMaxSeqLen(const int max_seq_len) const {
return pimpl->getSFromMaxSeqLen(max_seq_len);
}
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
| 751c31a5e9fc8081cd85d5c6a31a6d57704efab8.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "contrib_ops/cuda/bert/tensorrt_fused_multihead_attention/mha_runner.h"
#include "contrib_ops/cuda/bert/tensorrt_fused_multihead_attention/fused_multihead_attention_v2.h"
#include "contrib_ops/cuda/bert/tensorrt_fused_multihead_attention/flash_attention/fmha_flash_attention.h"
namespace onnxruntime {
namespace contrib {
namespace cuda {
union __half2_uint32_t_union {
half2 fp162;
uint32_t u32;
};
void set_alpha_fp16(uint32_t& alpha, float norm) {
__half2_uint32_t_union temp;
temp.fp162 = __float2half2_rn(norm);
alpha = temp.u32;
}
class FusedMHARunnerFP16v2::mhaImpl {
public:
mhaImpl(FusedMHARunnerFP16v2* interface)
: interface(interface),
sm(interface->mSm),
xmmaKernel(getXMMAKernelsV2(DATA_TYPE_FP16, sm)) {
ORT_ENFORCE((sm == kSM_70 || sm == kSM_75 || sm == kSM_80 || sm == kSM_86 || sm == kSM_89),
"Unsupported architecture");
flash_attention_kernel = nullptr;
if (interface->mEnableFlashAttention) {
flash_attention_kernel = get_flash_attention_kernels(DATA_TYPE_FP16, sm);
}
params.clear();
}
~mhaImpl() {}
void setup(const int S, const int B) {
// For bert and vit, use flash attention when sequence length is larger than the threshold.
use_flash_attention = is_flash_attention(S);
params.force_unroll = use_flash_attention;
size_t warps_m = 2;
size_t warps_n = 2;
size_t warps_k = 1;
if (use_flash_attention) {
warps_m = 4;
warps_n = 1;
} else {
if (sm == 70) {
if (S == 64 || S == 96) {
warps_m = 2;
warps_n = 2;
} else if (S == 128) {
warps_m = 1;
warps_n = 4;
} else if (S == 256 || S == 384) {
warps_m = 1;
warps_n = 8;
} else {
ORT_ENFORCE(false, "Unsupported sequence length");
}
} else {
if (S == 32 || S == 64 || S == 96 || S == 128) {
warps_m = 2;
warps_n = 2;
} else if (S == 192 || S == 256) {
warps_m = 1;
warps_n = 4;
} else if (S == 384) {
warps_m = 1;
warps_n = 8;
} else {
ORT_ENFORCE(false, "Unsupported sequence length");
}
}
}
// The number of threads per CTA.
threads_per_cta = warps_m * warps_n * warps_k * 32;
// The number of xmmas in the M dimension. We use one uint32_t per XMMA in the M dimension.
xmmas_m = (S + 16 * warps_m - 1) / (16 * warps_m);
const float scale_bmm1 = interface->mRsqrtHeadSize;
const float scale_softmax = 1.f; // Seems to be only required for int8
const float scale_bmm2 = 1.f;
set_alpha_fp16(params.scale_bmm1, scale_bmm1);
set_alpha_fp16(params.scale_softmax, scale_softmax);
set_alpha_fp16(params.scale_bmm2, scale_bmm2);
params.b = B;
params.h = interface->mNumHeads;
params.s = S;
params.d = interface->mHeadSize;
params.qkv_stride_in_bytes = 3 * interface->mNumHeads * interface->mHeadSize * sizeof(half);
params.packed_mask_stride_in_bytes = xmmas_m * threads_per_cta * sizeof(uint32_t);
params.o_stride_in_bytes = interface->mNumHeads * interface->mHeadSize * sizeof(half);
has_causal_mask = false;
}
void setup_causal_masked_fmha(const int S, const int B) {
const float scale_bmm1 = interface->mRsqrtHeadSize;
const float scale_softmax = 1.f; // Seems to be only required for int8
const float scale_bmm2 = 1.f;
set_alpha_fp16(params.scale_bmm1, scale_bmm1);
set_alpha_fp16(params.scale_softmax, scale_softmax);
set_alpha_fp16(params.scale_bmm2, scale_bmm2);
params.b = B;
params.h = interface->mNumHeads;
params.s = S;
params.d = interface->mHeadSize;
params.qkv_stride_in_bytes = 3 * interface->mNumHeads * interface->mHeadSize * sizeof(half);
params.o_stride_in_bytes = interface->mNumHeads * interface->mHeadSize * sizeof(half);
// fallback to original fmha_v2 when head_size <= 64 and seq_len <- 128
use_flash_attention = interface->mEnableFlashAttention;
if (params.d <= 64 && params.s <= 128) {
use_flash_attention = false;
// get max sequence length
if (params.s > 64) {
params.s = 128;
} else {
params.s = 64;
}
}
// set flags
params.force_unroll = use_flash_attention;
has_causal_mask = true;
}
void run(const void* input, const void* cu_seqlens, void* output, cudaStream_t stream) {
params.qkv_ptr = const_cast<void*>(input);
params.o_ptr = output;
params.cu_seqlens = static_cast<int*>(const_cast<void*>(cu_seqlens));
if (use_flash_attention && flash_attention_kernel != nullptr && !has_causal_mask) {
flash_attention_kernel->run(params, stream);
} else {
xmmaKernel->run(params, stream, use_flash_attention, has_causal_mask);
}
CUDA_CALL_THROW(cudaPeekAtLastError());
}
bool isValid(int s) const {
if (is_flash_attention(s)) {
return (flash_attention_kernel != nullptr) && flash_attention_kernel->isValid(s);
}
return xmmaKernel->isValid(s);
}
int getSFromMaxSeqLen(const int max_seq_len) const {
if (is_flash_attention(max_seq_len)) {
return max_seq_len;
}
int S = max_seq_len;
if (max_seq_len <= 32) {
S = (sm == 70) ? 64 : 32;
} else if (max_seq_len <= 64) {
S = 64;
} else if (max_seq_len <= 96) {
S = 96;
} else if (max_seq_len <= 128) {
S = 128;
} else if (max_seq_len <= 192) {
S = (sm == 70) ? 256 : 192;
} else if (max_seq_len <= 256) {
S = 256;
} else if (max_seq_len <= 384) {
S = 384;
}
return S;
}
protected:
bool is_flash_attention(const int S) const {
ORT_ENFORCE(interface->mHasCausalMask == false);
return interface->mEnableFlashAttention && S >= kMinSequenceLengthFlashAttention;
}
private:
FusedMHARunnerFP16v2* interface;
Fused_multihead_attention_params_v2 params;
int sm;
const FusedMultiHeadAttentionXMMAKernelV2* xmmaKernel;
const FusedMultiHeadFlashAttentionKernel* flash_attention_kernel;
size_t xmmas_m;
size_t threads_per_cta;
bool use_flash_attention = false;
bool has_causal_mask = false;
};
FusedMHARunnerFP16v2::FusedMHARunnerFP16v2(const int numHeads, const int headSize, const int sm, bool causal_mask, bool enable_flash_attention)
: MHARunner(numHeads, headSize, 2, causal_mask), mSm(sm), mEnableFlashAttention(enable_flash_attention), pimpl(new mhaImpl(this)) {
}
void FusedMHARunnerFP16v2::setup(const int S, const int B) {
MHARunner::setup(S, B);
if (mHasCausalMask) {
pimpl->setup_causal_masked_fmha(S, B);
} else {
pimpl->setup(S, B);
}
}
bool FusedMHARunnerFP16v2::is_supported(int sm, int head_size, int sequence_length,
bool enable_flash_attention, bool causal) {
if (causal) {
if (!(sm == kSM_70 || sm == kSM_75 || sm == kSM_80 || sm == kSM_86 || sm == kSM_89)) {
return false;
}
if (enable_flash_attention) {
return head_size == 64 ||
head_size == 32 ||
head_size == 40 ||
head_size == 80 ||
head_size == 128 ||
head_size == 144 ||
head_size == 160 ||
head_size == 256;
}
return (head_size == 64 || head_size == 32 || head_size == 40) && sequence_length <= 128;
}
bool use_flash = enable_flash_attention && sequence_length >= kMinSequenceLengthFlashAttention;
if (use_flash && has_flash_attention_kernel(sm, head_size)) {
return true;
}
if (!(sm == kSM_70 || sm == kSM_75 || sm == kSM_80 || sm == kSM_86 || sm == kSM_89)) {
return false;
}
if (head_size != 64 && head_size != 32) {
return false;
}
if (sm == kSM_70 && head_size == 32) {
return false;
}
// Normal (not flash) fused kernel supports sequence length up to 384.
constexpr int max_sequence_length = 384;
return sequence_length <= max_sequence_length;
}
size_t FusedMHARunnerFP16v2::getWorkspaceSize() const {
return 0;
}
void FusedMHARunnerFP16v2::run(const void* input, const void* cu_seqlens, void* output, cudaStream_t stream) {
pimpl->run(input, cu_seqlens, output, stream);
}
bool FusedMHARunnerFP16v2::isValid(int s) const {
return pimpl->isValid(s);
}
int FusedMHARunnerFP16v2::getSFromMaxSeqLen(const int max_seq_len) const {
return pimpl->getSFromMaxSeqLen(max_seq_len);
}
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
|
9ec5d3efb050b93d6351fca9a27e84df275d32ef.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <array>
#include "plugin.h"
#include "kernel.h"
#include "hip/hip_fp16.h"
inline __device__ __half minus_fb(const __half & a, const __half & b) {
#if __CUDA_ARCH__ >= 530
return a - b;
#else
return __float2half(__half2float(a) - __half2float(b));
#endif
}
template <typename T_BBOX, typename T_SCORE, unsigned nthds_per_cta>
__launch_bounds__(nthds_per_cta)
__global__ void gatherTopDetections_kernel(
const bool shareLocation,
const int numImages,
const int numPredsPerClass,
const int numClasses,
const int topK,
const int keepTopK,
const int* indices,
const T_SCORE* scores,
const T_BBOX* bboxData,
int* keepCount,
T_BBOX* topDetections,
const T_SCORE score_shift)
{
if (keepTopK > topK)
return;
for (int i = blockIdx.x * nthds_per_cta + threadIdx.x;
i < numImages * keepTopK;
i += gridDim.x * nthds_per_cta)
{
const int imgId = i / keepTopK;
const int detId = i % keepTopK;
const int offset = imgId * numClasses * topK;
const int index = indices[offset + detId];
const T_SCORE score = scores[offset + detId];
/*
* It is also likely that there is "bad bounding boxes" in the keepTopK bounding boxes.
* We set the bounding boxes parameters as the parameters shown below.
* These data will only show up at the end of keepTopK bounding boxes since the bounding boxes were sorted previously.
* It is also not going to affect the count of valid bounding boxes (keepCount).
* These data will probably never be used (because we have keepCount).
*/
if (index == -1)
{
topDetections[i * 7] = imgId; // image id
topDetections[i * 7 + 1] = -1; // label
topDetections[i * 7 + 2] = 0; // confidence score
// score==0 will not pass the VisualizeBBox check
topDetections[i * 7 + 3] = 0; // bbox xmin
topDetections[i * 7 + 4] = 0; // bbox ymin
topDetections[i * 7 + 5] = 0; // bbox xmax
topDetections[i * 7 + 6] = 0; // bbox ymax
}
else
{
const int bboxOffset = imgId * (shareLocation ? numPredsPerClass : (numClasses * numPredsPerClass));
const int bboxId = ((shareLocation ? (index % numPredsPerClass)
: index % (numClasses * numPredsPerClass)) + bboxOffset) * 4;
topDetections[i * 7] = imgId; // image id
topDetections[i * 7 + 1] = (index % (numClasses * numPredsPerClass)) / numPredsPerClass; // label
topDetections[i * 7 + 2] = score; // confidence score
// subtract 1.0 score shift we added in sortScorePerClass
topDetections[i * 7 + 2] = minus_fb(topDetections[i * 7 + 2], score_shift);
const T_BBOX xMin = bboxData[bboxId];
const T_BBOX yMin = bboxData[bboxId + 1];
const T_BBOX xMax = bboxData[bboxId + 2];
const T_BBOX yMax = bboxData[bboxId + 3];
// clipped bbox xmin
topDetections[i * 7 + 3] = saturate(xMin);
// clipped bbox ymin
topDetections[i * 7 + 4] = saturate(yMin);
// clipped bbox xmax
topDetections[i * 7 + 5] = saturate(xMax);
// clipped bbox ymax
topDetections[i * 7 + 6] = saturate(yMax);
// Atomic add to increase the count of valid keepTopK bounding boxes
// Without having to do manual sync.
atomicAdd(&keepCount[i / keepTopK], 1);
}
}
}
template <typename T_BBOX, typename T_SCORE>
pluginStatus_t gatherTopDetections_gpu(
hipStream_t stream,
const bool shareLocation,
const int numImages,
const int numPredsPerClass,
const int numClasses,
const int topK,
const int keepTopK,
const void* indices,
const void* scores,
const void* bboxData,
void* keepCount,
void* topDetections,
const float score_shift
)
{
hipMemsetAsync(keepCount, 0, numImages * sizeof(int), stream);
const int BS = 32;
const int GS = 32;
hipLaunchKernelGGL(( gatherTopDetections_kernel<T_BBOX, T_SCORE, BS>), dim3(GS), dim3(BS), 0, stream, shareLocation, numImages, numPredsPerClass,
numClasses, topK, keepTopK,
(int*) indices, (T_SCORE*) scores, (T_BBOX*) bboxData,
(int*) keepCount, (T_BBOX*) topDetections,
T_SCORE(score_shift));
CSC(hipGetLastError(), STATUS_FAILURE);
return STATUS_SUCCESS;
}
// gatherTopDetections LAUNCH CONFIG
typedef pluginStatus_t (*gtdFunc)(hipStream_t,
const bool,
const int,
const int,
const int,
const int,
const int,
const void*,
const void*,
const void*,
void*,
void*,
const float);
struct gtdLaunchConfig
{
DataType t_bbox;
DataType t_score;
gtdFunc function;
gtdLaunchConfig(DataType t_bbox, DataType t_score)
: t_bbox(t_bbox)
, t_score(t_score)
{
}
gtdLaunchConfig(DataType t_bbox, DataType t_score, gtdFunc function)
: t_bbox(t_bbox)
, t_score(t_score)
, function(function)
{
}
bool operator==(const gtdLaunchConfig& other)
{
return t_bbox == other.t_bbox && t_score == other.t_score;
}
};
using nvinfer1::DataType;
static std::array<gtdLaunchConfig, 2> gtdLCOptions = {
gtdLaunchConfig(DataType::kFLOAT, DataType::kFLOAT, gatherTopDetections_gpu<float, float>),
gtdLaunchConfig(DataType::kHALF, DataType::kHALF, gatherTopDetections_gpu<__half, __half>)
};
pluginStatus_t gatherTopDetections(
hipStream_t stream,
const bool shareLocation,
const int numImages,
const int numPredsPerClass,
const int numClasses,
const int topK,
const int keepTopK,
const DataType DT_BBOX,
const DataType DT_SCORE,
const void* indices,
const void* scores,
const void* bboxData,
void* keepCount,
void* topDetections,
const float score_shift)
{
gtdLaunchConfig lc = gtdLaunchConfig(DT_BBOX, DT_SCORE);
for (unsigned i = 0; i < gtdLCOptions.size(); ++i)
{
if (lc == gtdLCOptions[i])
{
DEBUG_PRINTF("gatherTopDetections kernel %d\n", i);
return gtdLCOptions[i].function(stream,
shareLocation,
numImages,
numPredsPerClass,
numClasses,
topK,
keepTopK,
indices,
scores,
bboxData,
keepCount,
topDetections,
score_shift);
}
}
return STATUS_BAD_PARAM;
}
| 9ec5d3efb050b93d6351fca9a27e84df275d32ef.cu | /*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <array>
#include "plugin.h"
#include "kernel.h"
#include "cuda_fp16.h"
inline __device__ __half minus_fb(const __half & a, const __half & b) {
#if __CUDA_ARCH__ >= 530
return a - b;
#else
return __float2half(__half2float(a) - __half2float(b));
#endif
}
template <typename T_BBOX, typename T_SCORE, unsigned nthds_per_cta>
__launch_bounds__(nthds_per_cta)
__global__ void gatherTopDetections_kernel(
const bool shareLocation,
const int numImages,
const int numPredsPerClass,
const int numClasses,
const int topK,
const int keepTopK,
const int* indices,
const T_SCORE* scores,
const T_BBOX* bboxData,
int* keepCount,
T_BBOX* topDetections,
const T_SCORE score_shift)
{
if (keepTopK > topK)
return;
for (int i = blockIdx.x * nthds_per_cta + threadIdx.x;
i < numImages * keepTopK;
i += gridDim.x * nthds_per_cta)
{
const int imgId = i / keepTopK;
const int detId = i % keepTopK;
const int offset = imgId * numClasses * topK;
const int index = indices[offset + detId];
const T_SCORE score = scores[offset + detId];
/*
* It is also likely that there is "bad bounding boxes" in the keepTopK bounding boxes.
* We set the bounding boxes parameters as the parameters shown below.
* These data will only show up at the end of keepTopK bounding boxes since the bounding boxes were sorted previously.
* It is also not going to affect the count of valid bounding boxes (keepCount).
* These data will probably never be used (because we have keepCount).
*/
if (index == -1)
{
topDetections[i * 7] = imgId; // image id
topDetections[i * 7 + 1] = -1; // label
topDetections[i * 7 + 2] = 0; // confidence score
// score==0 will not pass the VisualizeBBox check
topDetections[i * 7 + 3] = 0; // bbox xmin
topDetections[i * 7 + 4] = 0; // bbox ymin
topDetections[i * 7 + 5] = 0; // bbox xmax
topDetections[i * 7 + 6] = 0; // bbox ymax
}
else
{
const int bboxOffset = imgId * (shareLocation ? numPredsPerClass : (numClasses * numPredsPerClass));
const int bboxId = ((shareLocation ? (index % numPredsPerClass)
: index % (numClasses * numPredsPerClass)) + bboxOffset) * 4;
topDetections[i * 7] = imgId; // image id
topDetections[i * 7 + 1] = (index % (numClasses * numPredsPerClass)) / numPredsPerClass; // label
topDetections[i * 7 + 2] = score; // confidence score
// subtract 1.0 score shift we added in sortScorePerClass
topDetections[i * 7 + 2] = minus_fb(topDetections[i * 7 + 2], score_shift);
const T_BBOX xMin = bboxData[bboxId];
const T_BBOX yMin = bboxData[bboxId + 1];
const T_BBOX xMax = bboxData[bboxId + 2];
const T_BBOX yMax = bboxData[bboxId + 3];
// clipped bbox xmin
topDetections[i * 7 + 3] = saturate(xMin);
// clipped bbox ymin
topDetections[i * 7 + 4] = saturate(yMin);
// clipped bbox xmax
topDetections[i * 7 + 5] = saturate(xMax);
// clipped bbox ymax
topDetections[i * 7 + 6] = saturate(yMax);
// Atomic add to increase the count of valid keepTopK bounding boxes
// Without having to do manual sync.
atomicAdd(&keepCount[i / keepTopK], 1);
}
}
}
template <typename T_BBOX, typename T_SCORE>
pluginStatus_t gatherTopDetections_gpu(
cudaStream_t stream,
const bool shareLocation,
const int numImages,
const int numPredsPerClass,
const int numClasses,
const int topK,
const int keepTopK,
const void* indices,
const void* scores,
const void* bboxData,
void* keepCount,
void* topDetections,
const float score_shift
)
{
cudaMemsetAsync(keepCount, 0, numImages * sizeof(int), stream);
const int BS = 32;
const int GS = 32;
gatherTopDetections_kernel<T_BBOX, T_SCORE, BS><<<GS, BS, 0, stream>>>(shareLocation, numImages, numPredsPerClass,
numClasses, topK, keepTopK,
(int*) indices, (T_SCORE*) scores, (T_BBOX*) bboxData,
(int*) keepCount, (T_BBOX*) topDetections,
T_SCORE(score_shift));
CSC(cudaGetLastError(), STATUS_FAILURE);
return STATUS_SUCCESS;
}
// gatherTopDetections LAUNCH CONFIG
typedef pluginStatus_t (*gtdFunc)(cudaStream_t,
const bool,
const int,
const int,
const int,
const int,
const int,
const void*,
const void*,
const void*,
void*,
void*,
const float);
struct gtdLaunchConfig
{
DataType t_bbox;
DataType t_score;
gtdFunc function;
gtdLaunchConfig(DataType t_bbox, DataType t_score)
: t_bbox(t_bbox)
, t_score(t_score)
{
}
gtdLaunchConfig(DataType t_bbox, DataType t_score, gtdFunc function)
: t_bbox(t_bbox)
, t_score(t_score)
, function(function)
{
}
bool operator==(const gtdLaunchConfig& other)
{
return t_bbox == other.t_bbox && t_score == other.t_score;
}
};
using nvinfer1::DataType;
static std::array<gtdLaunchConfig, 2> gtdLCOptions = {
gtdLaunchConfig(DataType::kFLOAT, DataType::kFLOAT, gatherTopDetections_gpu<float, float>),
gtdLaunchConfig(DataType::kHALF, DataType::kHALF, gatherTopDetections_gpu<__half, __half>)
};
pluginStatus_t gatherTopDetections(
cudaStream_t stream,
const bool shareLocation,
const int numImages,
const int numPredsPerClass,
const int numClasses,
const int topK,
const int keepTopK,
const DataType DT_BBOX,
const DataType DT_SCORE,
const void* indices,
const void* scores,
const void* bboxData,
void* keepCount,
void* topDetections,
const float score_shift)
{
gtdLaunchConfig lc = gtdLaunchConfig(DT_BBOX, DT_SCORE);
for (unsigned i = 0; i < gtdLCOptions.size(); ++i)
{
if (lc == gtdLCOptions[i])
{
DEBUG_PRINTF("gatherTopDetections kernel %d\n", i);
return gtdLCOptions[i].function(stream,
shareLocation,
numImages,
numPredsPerClass,
numClasses,
topK,
keepTopK,
indices,
scores,
bboxData,
keepCount,
topDetections,
score_shift);
}
}
return STATUS_BAD_PARAM;
}
|
a24443dcabbf7b73da28f13fcae55a6e73bf21f6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gpu_w_flow.hpp"
using namespace std;
using namespace flowfilter;
using namespace flowfilter::gpu;
__global__ void wflow_K(gpuimage_t<float2> flow, gpuimage_t<float3> sflow, WFlow_Params params)
{
int2 size = make_int2(flow.width,flow.height);
int2 pix = make_int2(blockIdx.x*blockDim.x+threadIdx.x,
blockIdx.y*blockDim.y+threadIdx.y);
if(pix.x>=size.x || pix.y>=size.y){return;}
// Access
float2 flow_pix = *coordPitch(flow, pix);
double flowX = (double)(flow_pix.x);
double flowY = (double)(flow_pix.y);
// Simplifiers
double xd = pix.x-params.x0;
double yd = pix.y-params.y0;
double xymag = sqrt( xd*xd + yd*yd );
double hypmag = sqrt( xymag*xymag + params.f*params.f);
// Structure Flow
double3 sflow_p = make_double3(0.0, 0.0, 0.0);
// printf("sfx = %0.3f\n", (float)(sflow_p.x));
if( xymag>=10 && xymag<=params.rad )
{
double theta = atan2(xymag,params.f);
double S_area = pow(cos(theta),3)/(params.f*params.f);
sflow_p.x = S_area/params.S_cap*params.f/xymag*flowX;
sflow_p.y = S_area/params.S_cap*params.f/xymag*flowY;
sflow_p.z = -S_area/params.S_cap*params.f/xymag*tan(theta)*(flowX*xd/xymag + flowY*yd/xymag);
/*if ( (pix.x==170) && (pix.y==100) )
{
printf("sf = %0.3f, %0.3f, %0.3f\n", sflow_p.x, sflow_p.y, sflow_p.z);
printf("%0.3f, %0.3f, %0.3f\n", params.f, params.S_cap, theta);
}*/
}
else
{
sflow_p.x = 0.0;
sflow_p.y = 0.0;
sflow_p.z = 0.0;
}
float3 sflow_pix = make_float3((float)(sflow_p.x), (float)(sflow_p.y), (float)(sflow_p.z));
*coordPitch(sflow, pix) = sflow_pix;
}
void configureKernelGrid(const int height, const int width, const dim3 block, dim3& grid)
{
float w = width;
float h = height;
float x = block.x;
float y = block.y;
grid.x = (int)ceilf(w / x);
grid.y = (int)ceilf(h / y);
grid.z = 1;
}
WFlow_GPU::WFlow_GPU()
{
}
WFlow_GPU::~WFlow_GPU()
{
}
/* OLD VERSION
__global__ void wflow_K(gpuimage_t<float2> flow, gpuimage_t<float3> sflow, WFlow_Params params)
{
int2 size = make_int2(flow.width,flow.height);
int2 pix = make_int2(blockIdx.x*blockDim.x+threadIdx.x,
blockIdx.y*blockDim.y+threadIdx.y);
if(pix.x>=size.x || pix.y>=size.y){return;}
// Access
float2 flow_pix = *coordPitch(flow, pix);
double flowX = (double)(flow_pix.x);
double flowY = (double)(flow_pix.y);
// Simplifiers
double xd = abs(pix.x-params.x0);
double xd2 = xd*xd;
double yd = abs(pix.y-params.y0);
double yd2 = yd*yd;
double f2 = params.f*params.f;
double pl2 = params.pix_len*params.pix_len;
double dist = sqrt( (xd)*(xd) + (yd)*(yd) );
// Structure Flow
double3 sflow_p = make_double3(0.0, 0.0, 0.0);
// printf("sfx = %0.3f\n", (float)(sflow_p.x));
if( 78.0<=dist && dist<=params.rad )
{
sflow_p.x = -(1.0/(f2)*(pl2)*(flowX*(xd2/(xd2+yd2+f2)-1.0)*1.0/sqrt(xd2+yd2+f2)+flowY*(xd)*(yd)*1.0/pow(xd2+yd2+f2,3.0/2.0))*1.0/pow(1.0/(f2)*(xd2+yd2)+1.0,3.0/2.0))/params.S_cap;
sflow_p.y = -(1.0/(f2)*(pl2)*1.0/pow(1.0/(f2)*(xd2+yd2)+1.0,3.0/2.0)*(flowY*(yd2/(xd2+yd2+f2)-1.0)*1.0/sqrt(xd2+yd2+f2)+flowX*(xd)*(yd)*1.0/pow(xd2+yd2+f2,3.0/2.0)))/params.S_cap;
sflow_p.z = -(1.0/(f2)*(pl2)*1.0/pow(1.0/(f2)*(xd2+yd2)+1.0,3.0/2.0)*(params.f*flowX*(xd)*1.0/pow(xd2+yd2+f2,3.0/2.0)+params.f*flowY*(yd)*1.0/pow(xd2+yd2+f2,3.0/2.0)))/params.S_cap;
}
else
{
sflow_p.x = 0.0;
sflow_p.y = 0.0;
sflow_p.z = 0.0;
}
// if ( (pix.x==50) && (pix.y==50) )
// { printf("sfx = %0.3f\n", (float)(sflow_p.x)); }
float3 sflow_pix = make_float3((float)(sflow_p.x), (float)(sflow_p.y), (float)(sflow_p.z));
*coordPitch(sflow, pix) = sflow_pix;
}
*/
void
WFlow_GPU::calc_wflow()
{
__block = dim3(16, 16, 1);
configureKernelGrid((int)wf_params.cam_h, (int)wf_params.cam_w, __block, __grid);
hipLaunchKernelGGL(( wflow_K), dim3(__grid), dim3(__block), 0, 0, flow_gpu.wrap<float2>(), sflow_gpu.wrap<float3>(), wf_params);
}
| a24443dcabbf7b73da28f13fcae55a6e73bf21f6.cu | #include "gpu_w_flow.hpp"
using namespace std;
using namespace flowfilter;
using namespace flowfilter::gpu;
__global__ void wflow_K(gpuimage_t<float2> flow, gpuimage_t<float3> sflow, WFlow_Params params)
{
int2 size = make_int2(flow.width,flow.height);
int2 pix = make_int2(blockIdx.x*blockDim.x+threadIdx.x,
blockIdx.y*blockDim.y+threadIdx.y);
if(pix.x>=size.x || pix.y>=size.y){return;}
// Access
float2 flow_pix = *coordPitch(flow, pix);
double flowX = (double)(flow_pix.x);
double flowY = (double)(flow_pix.y);
// Simplifiers
double xd = pix.x-params.x0;
double yd = pix.y-params.y0;
double xymag = sqrt( xd*xd + yd*yd );
double hypmag = sqrt( xymag*xymag + params.f*params.f);
// Structure Flow
double3 sflow_p = make_double3(0.0, 0.0, 0.0);
// printf("sfx = %0.3f\n", (float)(sflow_p.x));
if( xymag>=10 && xymag<=params.rad )
{
double theta = atan2(xymag,params.f);
double S_area = pow(cos(theta),3)/(params.f*params.f);
sflow_p.x = S_area/params.S_cap*params.f/xymag*flowX;
sflow_p.y = S_area/params.S_cap*params.f/xymag*flowY;
sflow_p.z = -S_area/params.S_cap*params.f/xymag*tan(theta)*(flowX*xd/xymag + flowY*yd/xymag);
/*if ( (pix.x==170) && (pix.y==100) )
{
printf("sf = %0.3f, %0.3f, %0.3f\n", sflow_p.x, sflow_p.y, sflow_p.z);
printf("%0.3f, %0.3f, %0.3f\n", params.f, params.S_cap, theta);
}*/
}
else
{
sflow_p.x = 0.0;
sflow_p.y = 0.0;
sflow_p.z = 0.0;
}
float3 sflow_pix = make_float3((float)(sflow_p.x), (float)(sflow_p.y), (float)(sflow_p.z));
*coordPitch(sflow, pix) = sflow_pix;
}
void configureKernelGrid(const int height, const int width, const dim3 block, dim3& grid)
{
float w = width;
float h = height;
float x = block.x;
float y = block.y;
grid.x = (int)ceilf(w / x);
grid.y = (int)ceilf(h / y);
grid.z = 1;
}
WFlow_GPU::WFlow_GPU()
{
}
WFlow_GPU::~WFlow_GPU()
{
}
/* OLD VERSION
__global__ void wflow_K(gpuimage_t<float2> flow, gpuimage_t<float3> sflow, WFlow_Params params)
{
int2 size = make_int2(flow.width,flow.height);
int2 pix = make_int2(blockIdx.x*blockDim.x+threadIdx.x,
blockIdx.y*blockDim.y+threadIdx.y);
if(pix.x>=size.x || pix.y>=size.y){return;}
// Access
float2 flow_pix = *coordPitch(flow, pix);
double flowX = (double)(flow_pix.x);
double flowY = (double)(flow_pix.y);
// Simplifiers
double xd = abs(pix.x-params.x0);
double xd2 = xd*xd;
double yd = abs(pix.y-params.y0);
double yd2 = yd*yd;
double f2 = params.f*params.f;
double pl2 = params.pix_len*params.pix_len;
double dist = sqrt( (xd)*(xd) + (yd)*(yd) );
// Structure Flow
double3 sflow_p = make_double3(0.0, 0.0, 0.0);
// printf("sfx = %0.3f\n", (float)(sflow_p.x));
if( 78.0<=dist && dist<=params.rad )
{
sflow_p.x = -(1.0/(f2)*(pl2)*(flowX*(xd2/(xd2+yd2+f2)-1.0)*1.0/sqrt(xd2+yd2+f2)+flowY*(xd)*(yd)*1.0/pow(xd2+yd2+f2,3.0/2.0))*1.0/pow(1.0/(f2)*(xd2+yd2)+1.0,3.0/2.0))/params.S_cap;
sflow_p.y = -(1.0/(f2)*(pl2)*1.0/pow(1.0/(f2)*(xd2+yd2)+1.0,3.0/2.0)*(flowY*(yd2/(xd2+yd2+f2)-1.0)*1.0/sqrt(xd2+yd2+f2)+flowX*(xd)*(yd)*1.0/pow(xd2+yd2+f2,3.0/2.0)))/params.S_cap;
sflow_p.z = -(1.0/(f2)*(pl2)*1.0/pow(1.0/(f2)*(xd2+yd2)+1.0,3.0/2.0)*(params.f*flowX*(xd)*1.0/pow(xd2+yd2+f2,3.0/2.0)+params.f*flowY*(yd)*1.0/pow(xd2+yd2+f2,3.0/2.0)))/params.S_cap;
}
else
{
sflow_p.x = 0.0;
sflow_p.y = 0.0;
sflow_p.z = 0.0;
}
// if ( (pix.x==50) && (pix.y==50) )
// { printf("sfx = %0.3f\n", (float)(sflow_p.x)); }
float3 sflow_pix = make_float3((float)(sflow_p.x), (float)(sflow_p.y), (float)(sflow_p.z));
*coordPitch(sflow, pix) = sflow_pix;
}
*/
void
WFlow_GPU::calc_wflow()
{
__block = dim3(16, 16, 1);
configureKernelGrid((int)wf_params.cam_h, (int)wf_params.cam_w, __block, __grid);
wflow_K<<<__grid, __block>>>(flow_gpu.wrap<float2>(), sflow_gpu.wrap<float3>(), wf_params);
}
|
16bd626ed8b75f4b6c34de36c30ab59dfbc3dcf9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2018 Lip Wee Yeo Amano
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/** libkeccak-tiny
*
* A single-file implementation of SHA-3 and SHAKE.
*
* Modified for CUDA processing by lwYeo
* Date: August, 2018
*
* Implementor: David Leon Gil
* License: CC0, attribution kindly requested. Blame taken too,
* but not liability.
*/
#include "cudaSolver.h"
#include "cudaErrorCheck.cu"
typedef union
{
uint2 uint2;
uint64_t uint64;
uint8_t uint8[UINT64_LENGTH];
} nonce_t;
__constant__ uint8_t d_message[MESSAGE_LENGTH];
__constant__ uint8_t d_target[UINT256_LENGTH];
/******** The Keccak-f[1600] permutation ********/
/*** Constants. ***/
__constant__ static const uint8_t rho[24] =
{
1, 3, 6, 10, 15, 21,
28, 36, 45, 55, 2, 14,
27, 41, 56, 8, 25, 43,
62, 18, 39, 61, 20, 44
};
__constant__ static const uint8_t pi[24] =
{
10, 7, 11, 17, 18, 3,
5, 16, 8, 21, 24, 4,
15, 23, 19, 13, 12, 2,
20, 14, 22, 9, 6, 1
};
/*** Helper macros to unroll the permutation. ***/
#define delim 0x01
#define rate SPONGE_LENGTH - (256 / 4)
#define rol(x, s) (((x) << s) | ((x) >> (64 - s)))
#define REPEAT6(e) e e e e e e
#define REPEAT24(e) REPEAT6(e e e e)
#define REPEAT5(e) e e e e e
#define FOR5(v, s, e) v = 0; REPEAT5(e; v += s;)
/*** Keccak-f[1600] ***/
__device__ __forceinline__ static void keccakf(void *state)
{
uint64_t *a{ (uint64_t *)state };
uint64_t b[5]{ 0 };
uint64_t t{ 0 };
uint8_t x, y;
# pragma unroll
for (uint32_t i{ 0 }; i < 24u; ++i)
{
// Theta
FOR5(x, 1,
b[x] = 0;
FOR5(y, 5,
b[x] ^= a[x + y]; ))
FOR5(x, 1,
FOR5(y, 5,
a[y + x] ^= b[(x + 4) % 5] ^ rol(b[(x + 1) % 5], 1); ))
// Rho and pi
t = a[1];
x = 0;
REPEAT24(b[0] = a[pi[x]];
a[pi[x]] = rol(t, rho[x]);
t = b[0];
x++; )
// Chi
FOR5(y, 5,
FOR5(x, 1,
b[x] = a[y + x];)
FOR5(x, 1,
a[y + x] = b[x] ^ ((~b[(x + 1) % 5]) & b[(x + 2) % 5]); ))
// Iota
a[0] ^= Keccak_f1600_RC[i];
}
}
/******** The FIPS202-defined functions. ********/
/*** Some helper macros. ***/
//#define P keccakf
#define _(S) do { S } while (0)
#define FOR(i, ST, L, S) _(for (size_t i = 0; i < L; i += ST) { S; })
#define mkapply_ds(NAME, S) \
__device__ __forceinline__ static void NAME(uint8_t* dst, const uint8_t* src, size_t len) \
{ \
FOR(i, 1, len, S); \
}
mkapply_ds(xorin, dst[i] ^= src[i]) // xorin
#define mkapply_sd(NAME, S) \
__device__ __forceinline__ static void NAME(const uint8_t* src, uint8_t* dst, size_t len) \
{ \
FOR(i, 1, len, S); \
}
mkapply_sd(setout, dst[i] = src[i]) // setout
// Fold keccakf * F over the full blocks of an input
#define foldP(I, L, F) \
while (L >= rate) \
{ \
F(sponge, I, rate); \
keccakf(sponge); \
I += rate; \
L -= rate; \
}
__device__ __forceinline__ static void keccak256(uint8_t *digest, uint8_t const *message)
{
uint8_t sponge[SPONGE_LENGTH]{ 0 };
uint32_t messageLength{ MESSAGE_LENGTH };
uint32_t digestLength{ UINT256_LENGTH };
// Absorb input.
foldP(message, messageLength, xorin);
// Xor in the DS and pad frame.
sponge[messageLength] ^= delim;
sponge[rate - 1] ^= 0x80;
// Xor in the last block.
xorin(sponge, message, messageLength);
// Apply keccakf
keccakf(sponge);
// Squeeze output.
foldP(digest, digestLength, setout);
setout(sponge, digest, digestLength);
}
__device__ __forceinline__ static bool islessThan(uint8_t *left, uint8_t *right)
{
for (uint32_t i{ 0 }; i < UINT256_LENGTH; ++i)
{
if (left[i] < right[i]) return true;
else if (left[i] > right[i]) return false;
}
return false;
}
__global__ void hashMessage(uint64_t *__restrict__ solutions, uint32_t *__restrict__ solutionCount, uint32_t maxSolutionCount, uint64_t startPosition)
{
uint8_t digest[UINT256_LENGTH];
uint8_t message[MESSAGE_LENGTH];
memcpy(message, d_message, MESSAGE_LENGTH);
nonce_t nonce;
nonce.uint64 = startPosition + (blockDim.x * blockIdx.x + threadIdx.x);
memcpy(&message[NONCE_POSITION], &nonce, UINT64_LENGTH);
keccak256(digest, message);
if (islessThan(digest, d_target))
{
if (*solutionCount < maxSolutionCount)
{
solutions[*solutionCount] = nonce.uint64;
(*solutionCount)++;
}
}
}
// --------------------------------------------------------------------
// CudaSolver
// --------------------------------------------------------------------
namespace CUDASolver
{
void CudaSolver::PushTarget(byte32_t *target, const char *errorMessage)
{
CudaCheckError(hipMemcpyToSymbol(d_target, target, UINT256_LENGTH, 0, hipMemcpyHostToDevice), errorMessage);
}
void CudaSolver::PushMessage(message_ut *message, const char *errorMessage)
{
CudaCheckError(hipMemcpyToSymbol(d_message, message, MESSAGE_LENGTH, 0, hipMemcpyHostToDevice), errorMessage);
}
void CudaSolver::HashMessage(DeviceCUDA *device, const char *errorMessage)
{
hipLaunchKernelGGL(( hashMessage), dim3(device->Grid), dim3(device->Block), 0, 0, device->SolutionsDevice, device->SolutionCountDevice, device->MaxSolutionCount, device->WorkPosition);
CudaSyncAndCheckError(errorMessage);
}
} | 16bd626ed8b75f4b6c34de36c30ab59dfbc3dcf9.cu | /*
Copyright 2018 Lip Wee Yeo Amano
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/** libkeccak-tiny
*
* A single-file implementation of SHA-3 and SHAKE.
*
* Modified for CUDA processing by lwYeo
* Date: August, 2018
*
* Implementor: David Leon Gil
* License: CC0, attribution kindly requested. Blame taken too,
* but not liability.
*/
#include "cudaSolver.h"
#include "cudaErrorCheck.cu"
typedef union
{
uint2 uint2;
uint64_t uint64;
uint8_t uint8[UINT64_LENGTH];
} nonce_t;
__constant__ uint8_t d_message[MESSAGE_LENGTH];
__constant__ uint8_t d_target[UINT256_LENGTH];
/******** The Keccak-f[1600] permutation ********/
/*** Constants. ***/
__constant__ static const uint8_t rho[24] =
{
1, 3, 6, 10, 15, 21,
28, 36, 45, 55, 2, 14,
27, 41, 56, 8, 25, 43,
62, 18, 39, 61, 20, 44
};
__constant__ static const uint8_t pi[24] =
{
10, 7, 11, 17, 18, 3,
5, 16, 8, 21, 24, 4,
15, 23, 19, 13, 12, 2,
20, 14, 22, 9, 6, 1
};
/*** Helper macros to unroll the permutation. ***/
#define delim 0x01
#define rate SPONGE_LENGTH - (256 / 4)
#define rol(x, s) (((x) << s) | ((x) >> (64 - s)))
#define REPEAT6(e) e e e e e e
#define REPEAT24(e) REPEAT6(e e e e)
#define REPEAT5(e) e e e e e
#define FOR5(v, s, e) v = 0; REPEAT5(e; v += s;)
/*** Keccak-f[1600] ***/
__device__ __forceinline__ static void keccakf(void *state)
{
uint64_t *a{ (uint64_t *)state };
uint64_t b[5]{ 0 };
uint64_t t{ 0 };
uint8_t x, y;
# pragma unroll
for (uint32_t i{ 0 }; i < 24u; ++i)
{
// Theta
FOR5(x, 1,
b[x] = 0;
FOR5(y, 5,
b[x] ^= a[x + y]; ))
FOR5(x, 1,
FOR5(y, 5,
a[y + x] ^= b[(x + 4) % 5] ^ rol(b[(x + 1) % 5], 1); ))
// Rho and pi
t = a[1];
x = 0;
REPEAT24(b[0] = a[pi[x]];
a[pi[x]] = rol(t, rho[x]);
t = b[0];
x++; )
// Chi
FOR5(y, 5,
FOR5(x, 1,
b[x] = a[y + x];)
FOR5(x, 1,
a[y + x] = b[x] ^ ((~b[(x + 1) % 5]) & b[(x + 2) % 5]); ))
// Iota
a[0] ^= Keccak_f1600_RC[i];
}
}
/******** The FIPS202-defined functions. ********/
/*** Some helper macros. ***/
//#define P keccakf
#define _(S) do { S } while (0)
#define FOR(i, ST, L, S) _(for (size_t i = 0; i < L; i += ST) { S; })
#define mkapply_ds(NAME, S) \
__device__ __forceinline__ static void NAME(uint8_t* dst, const uint8_t* src, size_t len) \
{ \
FOR(i, 1, len, S); \
}
mkapply_ds(xorin, dst[i] ^= src[i]) // xorin
#define mkapply_sd(NAME, S) \
__device__ __forceinline__ static void NAME(const uint8_t* src, uint8_t* dst, size_t len) \
{ \
FOR(i, 1, len, S); \
}
mkapply_sd(setout, dst[i] = src[i]) // setout
// Fold keccakf * F over the full blocks of an input
#define foldP(I, L, F) \
while (L >= rate) \
{ \
F(sponge, I, rate); \
keccakf(sponge); \
I += rate; \
L -= rate; \
}
__device__ __forceinline__ static void keccak256(uint8_t *digest, uint8_t const *message)
{
uint8_t sponge[SPONGE_LENGTH]{ 0 };
uint32_t messageLength{ MESSAGE_LENGTH };
uint32_t digestLength{ UINT256_LENGTH };
// Absorb input.
foldP(message, messageLength, xorin);
// Xor in the DS and pad frame.
sponge[messageLength] ^= delim;
sponge[rate - 1] ^= 0x80;
// Xor in the last block.
xorin(sponge, message, messageLength);
// Apply keccakf
keccakf(sponge);
// Squeeze output.
foldP(digest, digestLength, setout);
setout(sponge, digest, digestLength);
}
__device__ __forceinline__ static bool islessThan(uint8_t *left, uint8_t *right)
{
for (uint32_t i{ 0 }; i < UINT256_LENGTH; ++i)
{
if (left[i] < right[i]) return true;
else if (left[i] > right[i]) return false;
}
return false;
}
__global__ void hashMessage(uint64_t *__restrict__ solutions, uint32_t *__restrict__ solutionCount, uint32_t maxSolutionCount, uint64_t startPosition)
{
uint8_t digest[UINT256_LENGTH];
uint8_t message[MESSAGE_LENGTH];
memcpy(message, d_message, MESSAGE_LENGTH);
nonce_t nonce;
nonce.uint64 = startPosition + (blockDim.x * blockIdx.x + threadIdx.x);
memcpy(&message[NONCE_POSITION], &nonce, UINT64_LENGTH);
keccak256(digest, message);
if (islessThan(digest, d_target))
{
if (*solutionCount < maxSolutionCount)
{
solutions[*solutionCount] = nonce.uint64;
(*solutionCount)++;
}
}
}
// --------------------------------------------------------------------
// CudaSolver
// --------------------------------------------------------------------
namespace CUDASolver
{
void CudaSolver::PushTarget(byte32_t *target, const char *errorMessage)
{
CudaCheckError(cudaMemcpyToSymbol(d_target, target, UINT256_LENGTH, 0, cudaMemcpyHostToDevice), errorMessage);
}
void CudaSolver::PushMessage(message_ut *message, const char *errorMessage)
{
CudaCheckError(cudaMemcpyToSymbol(d_message, message, MESSAGE_LENGTH, 0, cudaMemcpyHostToDevice), errorMessage);
}
void CudaSolver::HashMessage(DeviceCUDA *device, const char *errorMessage)
{
hashMessage<<<device->Grid, device->Block>>>(device->SolutionsDevice, device->SolutionCountDevice, device->MaxSolutionCount, device->WorkPosition);
CudaSyncAndCheckError(errorMessage);
}
} |
d5cfe549f8515a8b0af89492b858b7f8d71c5894.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void newdt0 (double * dt, const double * __restrict__ dtr,
double * dtp, double * drp) {
unsigned int ip = threadIdx.x + blockIdx.x * blockDim.x
+ blockIdx.y * blockDim.x * gridDim.x;
dtp[ip] = dt[ip];
drp[ip] = dtr[ip];
dt[ip] += dtr[ip];
}
__global__ void newdt(double * dt, const double * __restrict__ dtr,
double * dtp, double * drp,
double s1, double s2, double m, int niv, int biv) {
unsigned int ip = threadIdx.x + blockIdx.x * blockDim.x
+ blockIdx.y * blockDim.x * gridDim.x;
double u = dt[ip] + s1 * (dtp[ip] - dt[ip]) + s2 * (dtp[ip + niv] - dt[ip]);
double v = dt[ip] + dtr[ip]
+ s1 * (dtp[ip] + drp[ip] - dt[ip] - dtr[ip])
+ s2 * (dtp[ip + niv] + drp[ip + niv] - dt[ip] - dtr[ip]);
dtp[ip + biv] = dt[ip];
drp[ip + biv] = dtr[ip];
dt[ip] = u + m * (v - u);
}
__global__ void theta30(double * ds, const double * __restrict__ dtr,
const double * __restrict__ drp, int niv) {
extern __shared__ double sdata[];
unsigned int ip = threadIdx.x + blockIdx.x * blockDim.x
+ blockIdx.y * blockDim.x * gridDim.x;
double t1 = dtr[ip] - drp[ip];
double t2 = dtr[ip] - drp[ip + niv];
sdata[threadIdx.x] = dtr[ip] * t1;
sdata[threadIdx.x + blockDim.x] = dtr[ip] * t2;
sdata[threadIdx.x + blockDim.x * 2] = t1 * t1;
sdata[threadIdx.x + blockDim.x * 3] = t1 * t2;
sdata[threadIdx.x + blockDim.x * 4] = t2 * t2;
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (threadIdx.x < s) {
sdata[threadIdx.x] += sdata[threadIdx.x + s];
sdata[threadIdx.x + blockDim.x] += sdata[threadIdx.x + blockDim.x + s];
sdata[threadIdx.x + blockDim.x * 2]
+= sdata[threadIdx.x + blockDim.x * 2 + s];
sdata[threadIdx.x + blockDim.x * 3]
+= sdata[threadIdx.x + blockDim.x * 3 + s];
sdata[threadIdx.x + blockDim.x * 4]
+= sdata[threadIdx.x + blockDim.x * 4 + s];
}
__syncthreads();
}
if (threadIdx.x == 0) {
ds[blockIdx.x + blockIdx.y * gridDim.x] = sdata[0];
ds[blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y] =
sdata[blockDim.x];
ds[blockIdx.x + blockIdx.y * gridDim.x + (gridDim.x * gridDim.y) * 2] =
sdata[blockDim.x * 2];
ds[blockIdx.x + blockIdx.y * gridDim.x + (gridDim.x * gridDim.y) * 3] =
sdata[blockDim.x * 3];
ds[blockIdx.x + blockIdx.y * gridDim.x + (gridDim.x * gridDim.y) * 4] =
sdata[blockDim.x * 4];
}
}
__global__ void theta31(double * ds2, double * ds) {
extern __shared__ double sdata[];
unsigned int ip = threadIdx.x + blockIdx.x * blockDim.x;
sdata[threadIdx.x] = ds[ip];
sdata[threadIdx.x + blockDim.x] = ds[ip + blockDim.x * gridDim.x];
sdata[threadIdx.x + blockDim.x * 2] = ds[ip + blockDim.x * gridDim.x * 2];
sdata[threadIdx.x + blockDim.x * 3] = ds[ip + blockDim.x * gridDim.x * 3];
sdata[threadIdx.x + blockDim.x * 4] = ds[ip + blockDim.x * gridDim.x * 4];
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (threadIdx.x < s) {
sdata[threadIdx.x] += sdata[threadIdx.x + s];
sdata[threadIdx.x + blockDim.x] += sdata[threadIdx.x + blockDim.x + s];
sdata[threadIdx.x + blockDim.x * 2]
+= sdata[threadIdx.x + blockDim.x * 2 + s];
sdata[threadIdx.x + blockDim.x * 3]
+= sdata[threadIdx.x + blockDim.x * 3 + s];
sdata[threadIdx.x + blockDim.x * 4]
+= sdata[threadIdx.x + blockDim.x * 4 + s];
}
__syncthreads();
}
if (threadIdx.x == 0) {
ds2[blockIdx.x] = sdata[0];
ds2[blockIdx.x + gridDim.x] = sdata[blockDim.x];
ds2[blockIdx.x + gridDim.x * 2] = sdata[blockDim.x * 2];
ds2[blockIdx.x + gridDim.x * 3] = sdata[blockDim.x * 3];
ds2[blockIdx.x + gridDim.x * 4] = sdata[blockDim.x * 4];
}
}
__global__ void theta32(double * ds, double * ds2) {
extern __shared__ double sdata[];
unsigned int ip = threadIdx.x;
sdata[ip] = ds2[ip];
sdata[ip + blockDim.x] = ds2[ip + blockDim.x];
sdata[ip + blockDim.x * 2] = ds2[ip + blockDim.x * 2];
sdata[ip + blockDim.x * 3] = ds2[ip + blockDim.x * 3];
sdata[ip + blockDim.x * 4] = ds2[ip + blockDim.x * 4];
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (ip < s) {
sdata[ip] += sdata[ip + s];
sdata[ip + blockDim.x] += sdata[ip + blockDim.x + s];
sdata[ip + blockDim.x * 2] += sdata[ip + blockDim.x * 2 + s];
sdata[ip + blockDim.x * 3] += sdata[ip + blockDim.x * 3 + s];
sdata[ip + blockDim.x * 4] += sdata[ip + blockDim.x * 4 + s];
}
__syncthreads();
}
if (ip == 0) {
ds[0] = sdata[0];
ds[1] = sdata[blockDim.x];
ds[2] = sdata[blockDim.x * 2];
ds[3] = sdata[blockDim.x * 3];
ds[4] = sdata[blockDim.x * 4];
}
}
| d5cfe549f8515a8b0af89492b858b7f8d71c5894.cu | __global__ void newdt0 (double * dt, const double * __restrict__ dtr,
double * dtp, double * drp) {
unsigned int ip = threadIdx.x + blockIdx.x * blockDim.x
+ blockIdx.y * blockDim.x * gridDim.x;
dtp[ip] = dt[ip];
drp[ip] = dtr[ip];
dt[ip] += dtr[ip];
}
__global__ void newdt(double * dt, const double * __restrict__ dtr,
double * dtp, double * drp,
double s1, double s2, double m, int niv, int biv) {
unsigned int ip = threadIdx.x + blockIdx.x * blockDim.x
+ blockIdx.y * blockDim.x * gridDim.x;
double u = dt[ip] + s1 * (dtp[ip] - dt[ip]) + s2 * (dtp[ip + niv] - dt[ip]);
double v = dt[ip] + dtr[ip]
+ s1 * (dtp[ip] + drp[ip] - dt[ip] - dtr[ip])
+ s2 * (dtp[ip + niv] + drp[ip + niv] - dt[ip] - dtr[ip]);
dtp[ip + biv] = dt[ip];
drp[ip + biv] = dtr[ip];
dt[ip] = u + m * (v - u);
}
__global__ void theta30(double * ds, const double * __restrict__ dtr,
const double * __restrict__ drp, int niv) {
extern __shared__ double sdata[];
unsigned int ip = threadIdx.x + blockIdx.x * blockDim.x
+ blockIdx.y * blockDim.x * gridDim.x;
double t1 = dtr[ip] - drp[ip];
double t2 = dtr[ip] - drp[ip + niv];
sdata[threadIdx.x] = dtr[ip] * t1;
sdata[threadIdx.x + blockDim.x] = dtr[ip] * t2;
sdata[threadIdx.x + blockDim.x * 2] = t1 * t1;
sdata[threadIdx.x + blockDim.x * 3] = t1 * t2;
sdata[threadIdx.x + blockDim.x * 4] = t2 * t2;
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (threadIdx.x < s) {
sdata[threadIdx.x] += sdata[threadIdx.x + s];
sdata[threadIdx.x + blockDim.x] += sdata[threadIdx.x + blockDim.x + s];
sdata[threadIdx.x + blockDim.x * 2]
+= sdata[threadIdx.x + blockDim.x * 2 + s];
sdata[threadIdx.x + blockDim.x * 3]
+= sdata[threadIdx.x + blockDim.x * 3 + s];
sdata[threadIdx.x + blockDim.x * 4]
+= sdata[threadIdx.x + blockDim.x * 4 + s];
}
__syncthreads();
}
if (threadIdx.x == 0) {
ds[blockIdx.x + blockIdx.y * gridDim.x] = sdata[0];
ds[blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y] =
sdata[blockDim.x];
ds[blockIdx.x + blockIdx.y * gridDim.x + (gridDim.x * gridDim.y) * 2] =
sdata[blockDim.x * 2];
ds[blockIdx.x + blockIdx.y * gridDim.x + (gridDim.x * gridDim.y) * 3] =
sdata[blockDim.x * 3];
ds[blockIdx.x + blockIdx.y * gridDim.x + (gridDim.x * gridDim.y) * 4] =
sdata[blockDim.x * 4];
}
}
__global__ void theta31(double * ds2, double * ds) {
extern __shared__ double sdata[];
unsigned int ip = threadIdx.x + blockIdx.x * blockDim.x;
sdata[threadIdx.x] = ds[ip];
sdata[threadIdx.x + blockDim.x] = ds[ip + blockDim.x * gridDim.x];
sdata[threadIdx.x + blockDim.x * 2] = ds[ip + blockDim.x * gridDim.x * 2];
sdata[threadIdx.x + blockDim.x * 3] = ds[ip + blockDim.x * gridDim.x * 3];
sdata[threadIdx.x + blockDim.x * 4] = ds[ip + blockDim.x * gridDim.x * 4];
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (threadIdx.x < s) {
sdata[threadIdx.x] += sdata[threadIdx.x + s];
sdata[threadIdx.x + blockDim.x] += sdata[threadIdx.x + blockDim.x + s];
sdata[threadIdx.x + blockDim.x * 2]
+= sdata[threadIdx.x + blockDim.x * 2 + s];
sdata[threadIdx.x + blockDim.x * 3]
+= sdata[threadIdx.x + blockDim.x * 3 + s];
sdata[threadIdx.x + blockDim.x * 4]
+= sdata[threadIdx.x + blockDim.x * 4 + s];
}
__syncthreads();
}
if (threadIdx.x == 0) {
ds2[blockIdx.x] = sdata[0];
ds2[blockIdx.x + gridDim.x] = sdata[blockDim.x];
ds2[blockIdx.x + gridDim.x * 2] = sdata[blockDim.x * 2];
ds2[blockIdx.x + gridDim.x * 3] = sdata[blockDim.x * 3];
ds2[blockIdx.x + gridDim.x * 4] = sdata[blockDim.x * 4];
}
}
__global__ void theta32(double * ds, double * ds2) {
extern __shared__ double sdata[];
unsigned int ip = threadIdx.x;
sdata[ip] = ds2[ip];
sdata[ip + blockDim.x] = ds2[ip + blockDim.x];
sdata[ip + blockDim.x * 2] = ds2[ip + blockDim.x * 2];
sdata[ip + blockDim.x * 3] = ds2[ip + blockDim.x * 3];
sdata[ip + blockDim.x * 4] = ds2[ip + blockDim.x * 4];
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (ip < s) {
sdata[ip] += sdata[ip + s];
sdata[ip + blockDim.x] += sdata[ip + blockDim.x + s];
sdata[ip + blockDim.x * 2] += sdata[ip + blockDim.x * 2 + s];
sdata[ip + blockDim.x * 3] += sdata[ip + blockDim.x * 3 + s];
sdata[ip + blockDim.x * 4] += sdata[ip + blockDim.x * 4 + s];
}
__syncthreads();
}
if (ip == 0) {
ds[0] = sdata[0];
ds[1] = sdata[blockDim.x];
ds[2] = sdata[blockDim.x * 2];
ds[3] = sdata[blockDim.x * 3];
ds[4] = sdata[blockDim.x * 4];
}
}
|
5a86b2bcd17c43701b9d65f3e28b0e6932067800.hip | // !!! This is a file automatically generated by hipify!!!
//
// Created by songzeceng on 2020/11/26.
//
#include "hip/hip_runtime.h"
#include "kernel.h"
#define TPB 32
__device__ float distance(float x1, float x2) {
return sqrt((x2 - x1) * (x2 - x1));
}
__global__ void distanceKernel(float *d_out, float *d_in, float ref) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
float x = d_in[i];
d_out[i] = distance(x, ref);
}
void distanceArray(float *out, float *in, float ref, int len) {
float *d_in;
float *d_out;
hipMalloc(&d_in, len * sizeof(float ));
hipMalloc(&d_out, len * sizeof(float ));
hipMemcpy(d_in, in, len * sizeof(float ), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( distanceKernel), dim3(len / TPB), dim3(TPB), 0, 0, d_out, d_in, ref);
hipMemcpy(out, d_out, len * sizeof(float ), hipMemcpyDeviceToHost);
hipFree(d_in);
hipFree(d_out);
} | 5a86b2bcd17c43701b9d65f3e28b0e6932067800.cu | //
// Created by songzeceng on 2020/11/26.
//
#include "cuda_runtime.h"
#include "kernel.h"
#define TPB 32
__device__ float distance(float x1, float x2) {
return sqrt((x2 - x1) * (x2 - x1));
}
__global__ void distanceKernel(float *d_out, float *d_in, float ref) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
float x = d_in[i];
d_out[i] = distance(x, ref);
}
void distanceArray(float *out, float *in, float ref, int len) {
float *d_in;
float *d_out;
cudaMalloc(&d_in, len * sizeof(float ));
cudaMalloc(&d_out, len * sizeof(float ));
cudaMemcpy(d_in, in, len * sizeof(float ), cudaMemcpyHostToDevice);
distanceKernel<<<len / TPB, TPB>>>(d_out, d_in, ref);
cudaMemcpy(out, d_out, len * sizeof(float ), cudaMemcpyDeviceToHost);
cudaFree(d_in);
cudaFree(d_out);
} |
83959329763a4b5070a984347e184764b83bded0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/angle/angle_atom_energy_impl.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/util.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/common_sponge.cuh"
__global__ void AngleAtomEnergyKernel(int angle_numbers, const UNSIGNED_INT_VECTOR *uint_crd, const VECTOR *scaler,
const int *atom_a, const int *atom_b, const int *atom_c, const float *angle_k,
const float *angle_theta0, float *atom_energy) {
int angle_i = blockDim.x * blockIdx.x + threadIdx.x;
if (angle_i < angle_numbers) {
int atom_i = atom_a[angle_i];
int atom_j = atom_b[angle_i];
int atom_k = atom_c[angle_i];
float theta0 = angle_theta0[angle_i];
float k = angle_k[angle_i];
VECTOR drij = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]);
VECTOR drkj = Get_Periodic_Displacement(uint_crd[atom_k], uint_crd[atom_j], scaler[0]);
float rij_2 = 1. / (drij * drij);
float rkj_2 = 1. / (drkj * drkj);
float rij_1_rkj_1 = sqrtf(rij_2 * rkj_2);
float costheta = drij * drkj * rij_1_rkj_1;
costheta = fmaxf(-0.999999, fminf(costheta, 0.999999));
float theta = acosf(costheta);
float dtheta = theta - theta0;
atomicAdd(&atom_energy[atom_i], k * dtheta * dtheta);
}
}
void AngleAtomEnergy(int angle_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const int *atom_c, const float *angle_k, const float *angle_theta0, float *ene,
hipStream_t stream) {
size_t thread_per_block = 128;
size_t block_per_grid = ceilf(static_cast<float>(angle_numbers) / 128);
UNSIGNED_INT_VECTOR *uint_crd =
const_cast<UNSIGNED_INT_VECTOR *>(reinterpret_cast<const UNSIGNED_INT_VECTOR *>(uint_crd_f));
VECTOR *scaler = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(scaler_f));
hipLaunchKernelGGL(( AngleAtomEnergyKernel), dim3(block_per_grid), dim3(thread_per_block), 0, stream, angle_numbers, uint_crd, scaler, atom_a,
atom_b, atom_c, angle_k, angle_theta0, ene);
return;
}
void AngleAtomEnergy(int angle_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const int *atom_c, const float *angle_k, const float *angle_theta0, float *ene,
hipStream_t stream);
| 83959329763a4b5070a984347e184764b83bded0.cu | /**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/angle/angle_atom_energy_impl.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/util.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/common_sponge.cuh"
__global__ void AngleAtomEnergyKernel(int angle_numbers, const UNSIGNED_INT_VECTOR *uint_crd, const VECTOR *scaler,
const int *atom_a, const int *atom_b, const int *atom_c, const float *angle_k,
const float *angle_theta0, float *atom_energy) {
int angle_i = blockDim.x * blockIdx.x + threadIdx.x;
if (angle_i < angle_numbers) {
int atom_i = atom_a[angle_i];
int atom_j = atom_b[angle_i];
int atom_k = atom_c[angle_i];
float theta0 = angle_theta0[angle_i];
float k = angle_k[angle_i];
VECTOR drij = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]);
VECTOR drkj = Get_Periodic_Displacement(uint_crd[atom_k], uint_crd[atom_j], scaler[0]);
float rij_2 = 1. / (drij * drij);
float rkj_2 = 1. / (drkj * drkj);
float rij_1_rkj_1 = sqrtf(rij_2 * rkj_2);
float costheta = drij * drkj * rij_1_rkj_1;
costheta = fmaxf(-0.999999, fminf(costheta, 0.999999));
float theta = acosf(costheta);
float dtheta = theta - theta0;
atomicAdd(&atom_energy[atom_i], k * dtheta * dtheta);
}
}
void AngleAtomEnergy(int angle_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const int *atom_c, const float *angle_k, const float *angle_theta0, float *ene,
cudaStream_t stream) {
size_t thread_per_block = 128;
size_t block_per_grid = ceilf(static_cast<float>(angle_numbers) / 128);
UNSIGNED_INT_VECTOR *uint_crd =
const_cast<UNSIGNED_INT_VECTOR *>(reinterpret_cast<const UNSIGNED_INT_VECTOR *>(uint_crd_f));
VECTOR *scaler = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(scaler_f));
AngleAtomEnergyKernel<<<block_per_grid, thread_per_block, 0, stream>>>(angle_numbers, uint_crd, scaler, atom_a,
atom_b, atom_c, angle_k, angle_theta0, ene);
return;
}
void AngleAtomEnergy(int angle_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const int *atom_c, const float *angle_k, const float *angle_theta0, float *ene,
cudaStream_t stream);
|
b4cfd6e2e1eca760280e3c4b82bc4043de4faa05.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#define inf 9999
#define N 1000
__global__ void funct(int n, int k, float* x, int* qx) {
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int j = ix & (n - 1);
float temp2 = x[ix - j + k] + x[k * n + j];
if (x[ix] > temp2) {
x[ix] = temp2;
qx[ix] = k;
}
if (x[ix] == inf) {
qx[ix] = -2;
}
}
__global__ void funct2(int n, int k, float* x, int* qx) {
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int j = ix & (n - 1);
float temp2 = x[ix - j + k] + x[k * n + j];
if (x[ix] > temp2) {
x[ix] = temp2;
qx[ix] = k;
}
}
int main(int argc, char **argv) {
struct timeval first, second, lapsed, third;
struct timezone tzp, tzp2;
float *host_A;
int *host_Q;
float *dev_x;
int *dev_qx;
float *A;
int *Q;
float *D;
int i, j, bk;
int k = 0;
//int n = atoi(argv[1]);
int n = N;
hipEvent_t start,stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float milliseconds = 0;
printf("\n");
printf("RUNNING WITH %d VERTICES \n", n);
printf("\n");
hipMalloc(&dev_x, n * n * sizeof (float));
hipMalloc(&dev_qx, n * n * sizeof (float));
//CPU arrays
A = (float *) malloc(n * n * sizeof (float)); //arxikos pinakas A
D = (float *) malloc(n * n * sizeof (float)); //arxikos pinakas D
Q = (int *) malloc(n * n * sizeof (int)); //arxikos pinakas Q
//GPU arrays
host_A = (float *) malloc(n * n * sizeof (float));
//host_D = (float *) malloc(n * n * sizeof (float));
host_Q = (int *) malloc(n * n * sizeof (int));
srand(time(NULL));
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
if (i == j) {
A[i * n + j] = 0;
} else {
A[i * n + j] = 1200 * (float) rand() / RAND_MAX + 1;
if (A[i * n + j] > 1000) {
A[i * n + j] = inf;
}
}
}
}
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
Q[i * n + j] = -1;
}
}
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
D[i * n + j] = A[i * n + j];
}
}
for (i = 0; i < n; i++) //Antigrafi tou A ston host_A
{
for (j = 0; j < n; j++) {
host_A[i * n + j] = A[i * n + j];
}
}
for (i = 0; i < n; i++) //Antigrafi tou Q ston host_Q
{
for (j = 0; j < n; j++) {
host_Q[i * n + j] = Q[i * n + j];
}
}
gettimeofday(&third, &tzp2);
////////////////////////////First Mem Copy////////////////////
gettimeofday(&first, &tzp);
hipMemcpy(dev_x, host_A, n * n * sizeof (float), hipMemcpyHostToDevice);
hipMemcpy(dev_qx, host_Q, n * n * sizeof (int), hipMemcpyHostToDevice);
gettimeofday(&second, &tzp);
if (first.tv_usec > second.tv_usec) {
second.tv_usec += 1000000;
second.tv_sec--;
}
lapsed.tv_usec = second.tv_usec - first.tv_usec;
lapsed.tv_sec = second.tv_sec - first.tv_sec;
printf("First Transfer CPU to GPU Time elapsed: %lu, %lu s\n", lapsed.tv_sec, lapsed.tv_usec);
////////////////////////////////////////////////////GPU Calculation////////////////////////////////
bk = (int) (n * n / 512);
int gputhreads = 512;
if (bk > 0) {
gputhreads = 512;
} else {
bk = 1;
gputhreads = n*n;
}
printf(" \n");
printf("BLOCKS : %d GPU THREADS: %d \n", bk, gputhreads);
printf(" \n");
//gettimeofday(&first, &tzp);
hipEventRecord(start);
funct << <bk, gputhreads>>>(n, k, dev_x, dev_qx);
for (k = 1; k < n; k++) {
funct2 << <bk, gputhreads>>>(n, k, dev_x, dev_qx);
}
hipDeviceSynchronize();
//gettimeofday(&second, &tzp);
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
if (first.tv_usec > second.tv_usec) {
second.tv_usec += 1000000;
second.tv_sec--;
}
lapsed.tv_usec = second.tv_usec - first.tv_usec;
lapsed.tv_sec = second.tv_sec - first.tv_sec;
printf("GPU Calculation Time elapsed: %.20f\n", milliseconds * .0001);
printf("\n");
printf("ALL OK WE ARE DONE \n");
return 0;
}
| b4cfd6e2e1eca760280e3c4b82bc4043de4faa05.cu |
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#define inf 9999
#define N 1000
__global__ void funct(int n, int k, float* x, int* qx) {
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int j = ix & (n - 1);
float temp2 = x[ix - j + k] + x[k * n + j];
if (x[ix] > temp2) {
x[ix] = temp2;
qx[ix] = k;
}
if (x[ix] == inf) {
qx[ix] = -2;
}
}
__global__ void funct2(int n, int k, float* x, int* qx) {
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int j = ix & (n - 1);
float temp2 = x[ix - j + k] + x[k * n + j];
if (x[ix] > temp2) {
x[ix] = temp2;
qx[ix] = k;
}
}
int main(int argc, char **argv) {
struct timeval first, second, lapsed, third;
struct timezone tzp, tzp2;
float *host_A;
int *host_Q;
float *dev_x;
int *dev_qx;
float *A;
int *Q;
float *D;
int i, j, bk;
int k = 0;
//int n = atoi(argv[1]);
int n = N;
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float milliseconds = 0;
printf("\n");
printf("RUNNING WITH %d VERTICES \n", n);
printf("\n");
cudaMalloc(&dev_x, n * n * sizeof (float));
cudaMalloc(&dev_qx, n * n * sizeof (float));
//CPU arrays
A = (float *) malloc(n * n * sizeof (float)); //arxikos pinakas A
D = (float *) malloc(n * n * sizeof (float)); //arxikos pinakas D
Q = (int *) malloc(n * n * sizeof (int)); //arxikos pinakas Q
//GPU arrays
host_A = (float *) malloc(n * n * sizeof (float));
//host_D = (float *) malloc(n * n * sizeof (float));
host_Q = (int *) malloc(n * n * sizeof (int));
srand(time(NULL));
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
if (i == j) {
A[i * n + j] = 0;
} else {
A[i * n + j] = 1200 * (float) rand() / RAND_MAX + 1;
if (A[i * n + j] > 1000) {
A[i * n + j] = inf;
}
}
}
}
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
Q[i * n + j] = -1;
}
}
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
D[i * n + j] = A[i * n + j];
}
}
for (i = 0; i < n; i++) //Antigrafi tou A ston host_A
{
for (j = 0; j < n; j++) {
host_A[i * n + j] = A[i * n + j];
}
}
for (i = 0; i < n; i++) //Antigrafi tou Q ston host_Q
{
for (j = 0; j < n; j++) {
host_Q[i * n + j] = Q[i * n + j];
}
}
gettimeofday(&third, &tzp2);
////////////////////////////First Mem Copy////////////////////
gettimeofday(&first, &tzp);
cudaMemcpy(dev_x, host_A, n * n * sizeof (float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_qx, host_Q, n * n * sizeof (int), cudaMemcpyHostToDevice);
gettimeofday(&second, &tzp);
if (first.tv_usec > second.tv_usec) {
second.tv_usec += 1000000;
second.tv_sec--;
}
lapsed.tv_usec = second.tv_usec - first.tv_usec;
lapsed.tv_sec = second.tv_sec - first.tv_sec;
printf("First Transfer CPU to GPU Time elapsed: %lu, %lu s\n", lapsed.tv_sec, lapsed.tv_usec);
////////////////////////////////////////////////////GPU Calculation////////////////////////////////
bk = (int) (n * n / 512);
int gputhreads = 512;
if (bk > 0) {
gputhreads = 512;
} else {
bk = 1;
gputhreads = n*n;
}
printf(" \n");
printf("BLOCKS : %d GPU THREADS: %d \n", bk, gputhreads);
printf(" \n");
//gettimeofday(&first, &tzp);
cudaEventRecord(start);
funct << <bk, gputhreads>>>(n, k, dev_x, dev_qx);
for (k = 1; k < n; k++) {
funct2 << <bk, gputhreads>>>(n, k, dev_x, dev_qx);
}
cudaThreadSynchronize();
//gettimeofday(&second, &tzp);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
if (first.tv_usec > second.tv_usec) {
second.tv_usec += 1000000;
second.tv_sec--;
}
lapsed.tv_usec = second.tv_usec - first.tv_usec;
lapsed.tv_sec = second.tv_sec - first.tv_sec;
printf("GPU Calculation Time elapsed: %.20f\n", milliseconds * .0001);
printf("\n");
printf("ALL OK WE ARE DONE \n");
return 0;
}
|
1970fd997dc4fe0bf2bbb853a3fa5ed9918dccbe.hip | // !!! This is a file automatically generated by hipify!!!
//
// Created by joe on 4/10/16.
//
#include"../FBCSR.h"
#include"cuHelper.h"
void fbcsrSingle_memCpy(fbcsr *src, fbcsr *dst, enum DeviceCopyDIR dir) {
dst->n = src->n;
dst->m = src->m;
assert(dst->c == src->c);
assert(dst->r == src->r);
dst->nnz = src->nnz;
dst->nr = src->nr;
dst->nb = src->nb;
dst->nelem = src->nelem;
dst->thresh = src->thresh;
memCopy((void **) &(dst->rptr), (void *) src->rptr, sizeof(int) * src->nr, dir);
memCopy((void **) &(dst->bptr), (void *) src->bptr, sizeof(int) * (src->nr + 1), dir);
memCopy((void **) &(dst->val), (void *) src->val, sizeof(elem_t) * src->nb * src->nelem, dir);
memCopy((void **) &(dst->bindx), (void *) src->bindx, sizeof(int) * (src->nb), dir);
}
extern "C" void fbcsr_memCpy(list *src, list *dst, enum DeviceCopyDIR dir) {
while (src != NULL && dst != NULL) {
fbcsrSingle_memCpy((fbcsr *) list_get(src), (fbcsr *) list_get(dst), dir);
src = list_next(src);
dst = list_next(dst);
}
assert(dst == src);
}
extern "C" void fbcsr_CUDA_SpMV(list *l, vector *v, vector *r) {
fbcsr *f;
while (l != NULL) {
f = (fbcsr *) list_get(l);
if (f->optKernel == NULL) {
fprintf(stderr, "Cannot pass NULL as optkernel for fbcsr CUDA\n");
exit(-1);
} else {
fbcsrSingle_SpMVKernel krnl = (fbcsrSingle_SpMVKernel) f->optKernel;
if (f->nr > 0)
krnl(f, v, r);
cuCheck(hipGetLastError());
}
l = list_next(l);
}
}
extern "C" void fbcsr_CUDA_destroy(void *f) {
fbcsr *ff = (fbcsr *) f;
safeCudaFree(ff->rptr);
safeCudaFree(ff->val);
safeCudaFree(ff->bindx);
safeCudaFree(ff->bptr);
}
| 1970fd997dc4fe0bf2bbb853a3fa5ed9918dccbe.cu | //
// Created by joe on 4/10/16.
//
#include"../FBCSR.h"
#include"cuHelper.h"
void fbcsrSingle_memCpy(fbcsr *src, fbcsr *dst, enum DeviceCopyDIR dir) {
dst->n = src->n;
dst->m = src->m;
assert(dst->c == src->c);
assert(dst->r == src->r);
dst->nnz = src->nnz;
dst->nr = src->nr;
dst->nb = src->nb;
dst->nelem = src->nelem;
dst->thresh = src->thresh;
memCopy((void **) &(dst->rptr), (void *) src->rptr, sizeof(int) * src->nr, dir);
memCopy((void **) &(dst->bptr), (void *) src->bptr, sizeof(int) * (src->nr + 1), dir);
memCopy((void **) &(dst->val), (void *) src->val, sizeof(elem_t) * src->nb * src->nelem, dir);
memCopy((void **) &(dst->bindx), (void *) src->bindx, sizeof(int) * (src->nb), dir);
}
extern "C" void fbcsr_memCpy(list *src, list *dst, enum DeviceCopyDIR dir) {
while (src != NULL && dst != NULL) {
fbcsrSingle_memCpy((fbcsr *) list_get(src), (fbcsr *) list_get(dst), dir);
src = list_next(src);
dst = list_next(dst);
}
assert(dst == src);
}
extern "C" void fbcsr_CUDA_SpMV(list *l, vector *v, vector *r) {
fbcsr *f;
while (l != NULL) {
f = (fbcsr *) list_get(l);
if (f->optKernel == NULL) {
fprintf(stderr, "Cannot pass NULL as optkernel for fbcsr CUDA\n");
exit(-1);
} else {
fbcsrSingle_SpMVKernel krnl = (fbcsrSingle_SpMVKernel) f->optKernel;
if (f->nr > 0)
krnl(f, v, r);
cuCheck(cudaGetLastError());
}
l = list_next(l);
}
}
extern "C" void fbcsr_CUDA_destroy(void *f) {
fbcsr *ff = (fbcsr *) f;
safeCudaFree(ff->rptr);
safeCudaFree(ff->val);
safeCudaFree(ff->bindx);
safeCudaFree(ff->bptr);
}
|
19ab693676a83307ca8b90eee9480bb694618326.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void registerDemo(int width)
{
int start = width * threadIdx.x;
int end = start + width;
for (int i = start; i < end; i++) {
// some codes here
}
}
| 19ab693676a83307ca8b90eee9480bb694618326.cu | __global__ void registerDemo(int width)
{
int start = width * threadIdx.x;
int end = start + width;
for (int i = start; i < end; i++) {
// some codes here
}
}
|
b34255c266439ea4883396cdde0653dce02a9e32.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2009 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation and
* any modifications thereto. Any use, reproduction, disclosure, or distribution
* of this software and related documentation without an express license
* agreement from NVIDIA Corporation is strictly prohibited.
*
*/
#ifdef _WIN32
# define NOMINMAX
#endif
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
// includes, project
#include <cutil_inline.h>
// includes, kernels
#include <scan_naive_kernel.cu>
#include <scan_workefficient_kernel.cu>
#include <scan_best_kernel.cu>
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
// regression test functionality
extern "C"
unsigned int compare( const float* reference, const float* data,
const unsigned int len);
extern "C"
void computeGold( float* reference, float* idata, const unsigned int len);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
runTest( argc, argv);
cutilExit(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a scan test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest( int argc, char** argv)
{
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") )
cutilDeviceInit(argc, argv);
else
hipSetDevice( cutGetMaxGflopsDeviceId() );
unsigned int num_elements = 512;
cutGetCmdLineArgumenti( argc, (const char**) argv, "n", (int*)&num_elements);
unsigned int timer;
cutilCheckError( cutCreateTimer(&timer));
const unsigned int num_threads = num_elements / 2;
const unsigned int mem_size = sizeof( float) * num_elements;
// padding space is used to avoid shared memory bank conflicts
unsigned int extra_space = num_elements / NUM_BANKS;
#ifdef ZERO_BANK_CONFLICTS
extra_space += extra_space / NUM_BANKS;
#endif
const unsigned int shared_mem_size = sizeof(float) *
(num_elements + extra_space);
// allocate host memory to store the input data
float* h_data = (float*) malloc( mem_size);
// initialize the input data on the host to be integer values
// between 0 and 1000
for( unsigned int i = 0; i < num_elements; ++i)
{
h_data[i] = floorf(1000*(rand()/(float)RAND_MAX));
}
// compute reference solution
float* reference = (float*) malloc( mem_size);
computeGold( reference, h_data, num_elements);
// allocate device memory input and output arrays
float* d_idata;
float* d_odata[3];
cutilSafeCall( hipMalloc( (void**) &d_idata, mem_size));
cutilSafeCall( hipMalloc( (void**) &(d_odata[0]), mem_size));
cutilSafeCall( hipMalloc( (void**) &(d_odata[1]), mem_size));
cutilSafeCall( hipMalloc( (void**) &(d_odata[2]), mem_size));
// copy host memory to device input array
cutilSafeCall( hipMemcpy( d_idata, h_data, mem_size, hipMemcpyHostToDevice) );
// setup execution parameters
// Note that these scans only support a single thread-block worth of data,
// but we invoke them here on many blocks so that we can accurately compare
// performance
#ifndef __DEVICE_EMULATION__
dim3 grid(256, 1, 1);
#else
dim3 grid(1, 1, 1); // only one run block in device emu mode or it will be too slow
#endif
dim3 threads(num_threads*2, 1, 1);
// make sure there are no CUDA errors before we start
cutilCheckMsg("Kernel execution failed");
printf("Running parallel prefix sum (scan) of %d elements\n", num_elements);
printf("Comparing 3 versions:\n\n");
// execute the kernels
unsigned int numIterations = 100;
printf("1. scan_naive -- not work efficient (O(n log n) adds).\n");
cutStartTimer(timer);
for (unsigned int i = 0; i < numIterations; ++i)
{
hipLaunchKernelGGL(( scan_naive), dim3(grid), dim3(threads), 2 * shared_mem_size , 0,
d_odata[0], d_idata, num_elements);
}
hipDeviceSynchronize();
cutStopTimer(timer);
printf("Average time: %f ms\n\n", cutGetTimerValue(timer) / numIterations);
cutResetTimer(timer);
threads.x /= 2;
printf("2. scan_workefficient -- Work efficient (O(n) adds), but many bank conflicts.\n");
cutStartTimer(timer);
for (unsigned int i = 0; i < numIterations; ++i)
{
hipLaunchKernelGGL(( scan_workefficient), dim3(grid), dim3(threads), shared_mem_size , 0,
d_odata[1], d_idata, num_elements);
}
hipDeviceSynchronize();
cutStopTimer(timer);
printf("Average time: %f ms\n\n", cutGetTimerValue(timer) / numIterations);
cutResetTimer(timer);
printf("3. scan_best -- work efficient with very few bank conflicts.\n");
cutStartTimer(timer);
for (unsigned int i = 0; i < numIterations; ++i)
{
hipLaunchKernelGGL(( scan_best), dim3(grid), dim3(threads), shared_mem_size , 0,
d_odata[2], d_idata, num_elements);
}
hipDeviceSynchronize();
cutStopTimer(timer);
printf("Average time: %f ms\n\n", cutGetTimerValue(timer) / numIterations);
cutResetTimer(timer);
// check for any errors
cutilCheckMsg("Kernel execution failed");
for (int i = 0; i < 3; ++i) // check all 3 results
{
// copy result from device to host
cutilSafeCall(hipMemcpy( h_data, d_odata[i], sizeof(float) * num_elements,
hipMemcpyDeviceToHost));
// If this is a regression test write the results to a file
if( cutCheckCmdLineFlag( argc, (const char**) argv, "regression"))
{
// write file for regression test
cutWriteFilef( "./data/result.dat", h_data, num_elements, 0.0);
}
else
{
// custom output handling when no regression test running
// in this case check if the result is equivalent to the expected soluion
// We can use an epsilon of 0 since values are integral and in a range
// that can be exactly represented
float epsilon = 0.0f;
unsigned int result_regtest = cutComparefe( reference, h_data, num_elements, epsilon);
char* names[] = {"scan_naive", "scan_workefficient", "scan_best"};
printf( "%s: Test %s\n", names[i], (1 == result_regtest) ? "PASSED" : "FAILED");
}
}
printf("\nCheck out the CUDA Data Parallel Primitives Library for more on scan.\n");
printf("http://www.gpgpu.org/developer/cudpp\n");
// cleanup memory
free( h_data);
free( reference);
cutilSafeCall(hipFree(d_idata));
cutilSafeCall(hipFree(d_odata[0]));
cutilSafeCall(hipFree(d_odata[1]));
cutilSafeCall(hipFree(d_odata[2]));
cutilCheckError(cutDeleteTimer(timer));
hipDeviceReset();
}
| b34255c266439ea4883396cdde0653dce02a9e32.cu | /*
* Copyright 1993-2009 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation and
* any modifications thereto. Any use, reproduction, disclosure, or distribution
* of this software and related documentation without an express license
* agreement from NVIDIA Corporation is strictly prohibited.
*
*/
#ifdef _WIN32
# define NOMINMAX
#endif
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
// includes, project
#include <cutil_inline.h>
// includes, kernels
#include <scan_naive_kernel.cu>
#include <scan_workefficient_kernel.cu>
#include <scan_best_kernel.cu>
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
// regression test functionality
extern "C"
unsigned int compare( const float* reference, const float* data,
const unsigned int len);
extern "C"
void computeGold( float* reference, float* idata, const unsigned int len);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
runTest( argc, argv);
cutilExit(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a scan test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest( int argc, char** argv)
{
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") )
cutilDeviceInit(argc, argv);
else
cudaSetDevice( cutGetMaxGflopsDeviceId() );
unsigned int num_elements = 512;
cutGetCmdLineArgumenti( argc, (const char**) argv, "n", (int*)&num_elements);
unsigned int timer;
cutilCheckError( cutCreateTimer(&timer));
const unsigned int num_threads = num_elements / 2;
const unsigned int mem_size = sizeof( float) * num_elements;
// padding space is used to avoid shared memory bank conflicts
unsigned int extra_space = num_elements / NUM_BANKS;
#ifdef ZERO_BANK_CONFLICTS
extra_space += extra_space / NUM_BANKS;
#endif
const unsigned int shared_mem_size = sizeof(float) *
(num_elements + extra_space);
// allocate host memory to store the input data
float* h_data = (float*) malloc( mem_size);
// initialize the input data on the host to be integer values
// between 0 and 1000
for( unsigned int i = 0; i < num_elements; ++i)
{
h_data[i] = floorf(1000*(rand()/(float)RAND_MAX));
}
// compute reference solution
float* reference = (float*) malloc( mem_size);
computeGold( reference, h_data, num_elements);
// allocate device memory input and output arrays
float* d_idata;
float* d_odata[3];
cutilSafeCall( cudaMalloc( (void**) &d_idata, mem_size));
cutilSafeCall( cudaMalloc( (void**) &(d_odata[0]), mem_size));
cutilSafeCall( cudaMalloc( (void**) &(d_odata[1]), mem_size));
cutilSafeCall( cudaMalloc( (void**) &(d_odata[2]), mem_size));
// copy host memory to device input array
cutilSafeCall( cudaMemcpy( d_idata, h_data, mem_size, cudaMemcpyHostToDevice) );
// setup execution parameters
// Note that these scans only support a single thread-block worth of data,
// but we invoke them here on many blocks so that we can accurately compare
// performance
#ifndef __DEVICE_EMULATION__
dim3 grid(256, 1, 1);
#else
dim3 grid(1, 1, 1); // only one run block in device emu mode or it will be too slow
#endif
dim3 threads(num_threads*2, 1, 1);
// make sure there are no CUDA errors before we start
cutilCheckMsg("Kernel execution failed");
printf("Running parallel prefix sum (scan) of %d elements\n", num_elements);
printf("Comparing 3 versions:\n\n");
// execute the kernels
unsigned int numIterations = 100;
printf("1. scan_naive -- not work efficient (O(n log n) adds).\n");
cutStartTimer(timer);
for (unsigned int i = 0; i < numIterations; ++i)
{
scan_naive<<< grid, threads, 2 * shared_mem_size >>>
(d_odata[0], d_idata, num_elements);
}
cudaThreadSynchronize();
cutStopTimer(timer);
printf("Average time: %f ms\n\n", cutGetTimerValue(timer) / numIterations);
cutResetTimer(timer);
threads.x /= 2;
printf("2. scan_workefficient -- Work efficient (O(n) adds), but many bank conflicts.\n");
cutStartTimer(timer);
for (unsigned int i = 0; i < numIterations; ++i)
{
scan_workefficient<<< grid, threads, shared_mem_size >>>
(d_odata[1], d_idata, num_elements);
}
cudaThreadSynchronize();
cutStopTimer(timer);
printf("Average time: %f ms\n\n", cutGetTimerValue(timer) / numIterations);
cutResetTimer(timer);
printf("3. scan_best -- work efficient with very few bank conflicts.\n");
cutStartTimer(timer);
for (unsigned int i = 0; i < numIterations; ++i)
{
scan_best<<< grid, threads, shared_mem_size >>>
(d_odata[2], d_idata, num_elements);
}
cudaThreadSynchronize();
cutStopTimer(timer);
printf("Average time: %f ms\n\n", cutGetTimerValue(timer) / numIterations);
cutResetTimer(timer);
// check for any errors
cutilCheckMsg("Kernel execution failed");
for (int i = 0; i < 3; ++i) // check all 3 results
{
// copy result from device to host
cutilSafeCall(cudaMemcpy( h_data, d_odata[i], sizeof(float) * num_elements,
cudaMemcpyDeviceToHost));
// If this is a regression test write the results to a file
if( cutCheckCmdLineFlag( argc, (const char**) argv, "regression"))
{
// write file for regression test
cutWriteFilef( "./data/result.dat", h_data, num_elements, 0.0);
}
else
{
// custom output handling when no regression test running
// in this case check if the result is equivalent to the expected soluion
// We can use an epsilon of 0 since values are integral and in a range
// that can be exactly represented
float epsilon = 0.0f;
unsigned int result_regtest = cutComparefe( reference, h_data, num_elements, epsilon);
char* names[] = {"scan_naive", "scan_workefficient", "scan_best"};
printf( "%s: Test %s\n", names[i], (1 == result_regtest) ? "PASSED" : "FAILED");
}
}
printf("\nCheck out the CUDA Data Parallel Primitives Library for more on scan.\n");
printf("http://www.gpgpu.org/developer/cudpp\n");
// cleanup memory
free( h_data);
free( reference);
cutilSafeCall(cudaFree(d_idata));
cutilSafeCall(cudaFree(d_odata[0]));
cutilSafeCall(cudaFree(d_odata[1]));
cutilSafeCall(cudaFree(d_odata[2]));
cutilCheckError(cutDeleteTimer(timer));
cudaThreadExit();
}
|
cf40da9d36c1be20a00afa7ac1a6df50f078bc0a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2012, Thomas Schutzmeier
// FreeBSD License
// See https://github.com/unvirtual/cukd/blob/master/LICENSE
#include "detail/kdtree_node_array.h"
#include <thrust/iterator/constant_iterator.h>
#include <thrust/sequence.h>
#include <thrust/count.h>
#include <thrust/scan.h>
#include <thrust/copy.h>
namespace cukd {
// Kernel declarations
namespace device {
__global__
void
tree_update_children_small_kernel(device::KDTreeNodeArray tree,
int n_nodes);
__global__
void
update_tree_children_from_small_kernel(int n_active_nodes, int n_small,
device::KDTreeNodeArray tree, int* tags,
int* small_offset, int* active_indices);
__global__
void
leaf_elements_kernel(device::SmallNodeArray active, device::SplitCandidateArray sca,
int old_small_nodes, int* marks, int* elem_offsets, int* result);
} // namespace device
void
KDTreeNodeArray::print() {
left_nodes.print("kdtree_node_array::left_nodes");
right_nodes.print("KDTreeNodeArray::right_nodes");
split_axis.print("KDTreeNodeArray::split_axis");
split_position.print("KDTreeNodeArray::split_position");
depth.print("KDTreeNodeArray::depth");
leaf_idx.print("KDTreeNodeArray::leaf_index");
node_size.print("KDTreeNodeArray::leaf_size");
node_element_first_idx.print("KDTreeNodeArray::leaf_first_elem");
element_idx.print("KDTreeNodeArray::element_idx");
}
std::pair<int, int>
KDTreeNodeArray::update_leaves(SmallNodeArray & small_nca,
cukd::SplitCandidateArray & sca,
DevVector<int> & new_elements,
DevVector<int> & marks,
DevVector<int> & mark_offsets) {
int n_nodes_old = n_nodes();
int n_elements_old = n_elements();
int n_leaves_old = n_leaves();
int small_nodes = small_nca.n_nodes();
DevVector<int> leaf_elements, elem_offsets, leaf_element_offsets;
DevVector<int> leaf_element_sizes;
int new_leaf_nodes = mark_offsets.get_at(mark_offsets.size() - 1);
leaf_element_sizes.resize(new_leaf_nodes);
leaf_element_offsets.resize(new_leaf_nodes);
elem_offsets.resize(small_nodes);
thrust::copy_if(new_elements.begin(), new_elements.end(),
marks.begin(), leaf_element_sizes.begin(), GreaterThanZero());
int new_leaf_elements = thrust::reduce(new_elements.begin(), new_elements.end());
thrust::exclusive_scan(leaf_element_sizes.begin(), leaf_element_sizes.end(),
leaf_element_offsets.begin());
thrust::exclusive_scan(new_elements.begin(), new_elements.end(), elem_offsets.begin());
leaf_elements.resize(new_leaf_elements);
get_leaf_elements(small_nca, sca, small_nodes, marks, elem_offsets, leaf_elements);
resize_nodes(n_nodes_old + small_nodes);
resize_elements(n_elements_old + new_leaf_elements);
resize_leaves(n_leaves_old + new_leaf_nodes);
thrust::copy(leaf_element_sizes.begin(), leaf_element_sizes.end(),
node_size.begin() + n_leaves_old);
int next_off = 0;
if(n_leaves_old != 0) {
next_off = node_element_first_idx.get_at(n_leaves_old - 1)
+ node_size.get_at(n_leaves_old - 1);
thrust::transform(leaf_element_offsets.begin(), leaf_element_offsets.end(),
thrust::constant_iterator<int>(next_off),
node_element_first_idx.begin() + n_leaves_old,
thrust::plus<int>());
} else {
thrust::copy(leaf_element_offsets.begin(), leaf_element_offsets.end(),
node_element_first_idx.begin() + n_leaves_old);
}
thrust::copy(leaf_elements.begin(), leaf_elements.end(),
element_idx.begin() + next_off);
return std::make_pair(n_leaves_old, new_leaf_nodes);
}
void
KDTreeNodeArray::update_children_small() {
dim3 grid(IntegerDivide(256)(n_nodes()), 1, 1);
dim3 blocks(256,1,1);
hipLaunchKernelGGL(( device::tree_update_children_small_kernel), dim3(grid), dim3(blocks), 0, 0, dev_array(), n_nodes());
CUT_CHECK_ERROR("tree_update_children_small_kernel failed");
}
void
KDTreeNodeArray::update_tree_children_from_small(int n_nodes_active, int n_nodes_small,
DevVector<int> & small_tags,
DevVector<int> & child_diff,
DevVector<int> & active_indices) {
dim3 grid(IntegerDivide(256)(n_nodes_active),1,1);
dim3 blocks(256,1,1);
hipLaunchKernelGGL(( device::update_tree_children_from_small_kernel), dim3(grid),dim3(blocks), 0, 0,
n_nodes_active, n_nodes_small, dev_array(),
small_tags.pointer(), child_diff.pointer(),
active_indices.pointer());
CUT_CHECK_ERROR("update_tree_children_from_small_kernel failed");
}
void
KDTreeNodeArray::get_leaf_elements(cukd::SmallNodeArray & active,
cukd::SplitCandidateArray & sca,
int old_small_nodes, DevVector<int> & marks,
DevVector<int> & elem_offsets, DevVector<int> & result) {
dim3 grid(IntegerDivide(256)(old_small_nodes),1,1);
dim3 blocks(256,1,1);
hipLaunchKernelGGL(( device::leaf_elements_kernel), dim3(grid), dim3(blocks), 0, 0, active.dev_array(), sca.dev_array(),
old_small_nodes, marks.pointer(),
elem_offsets.pointer(), result.pointer());
}
} // namespace cukd
| cf40da9d36c1be20a00afa7ac1a6df50f078bc0a.cu | // Copyright (c) 2012, Thomas Schutzmeier
// FreeBSD License
// See https://github.com/unvirtual/cukd/blob/master/LICENSE
#include "detail/kdtree_node_array.h"
#include <thrust/iterator/constant_iterator.h>
#include <thrust/sequence.h>
#include <thrust/count.h>
#include <thrust/scan.h>
#include <thrust/copy.h>
namespace cukd {
// Kernel declarations
namespace device {
__global__
void
tree_update_children_small_kernel(device::KDTreeNodeArray tree,
int n_nodes);
__global__
void
update_tree_children_from_small_kernel(int n_active_nodes, int n_small,
device::KDTreeNodeArray tree, int* tags,
int* small_offset, int* active_indices);
__global__
void
leaf_elements_kernel(device::SmallNodeArray active, device::SplitCandidateArray sca,
int old_small_nodes, int* marks, int* elem_offsets, int* result);
} // namespace device
void
KDTreeNodeArray::print() {
left_nodes.print("kdtree_node_array::left_nodes");
right_nodes.print("KDTreeNodeArray::right_nodes");
split_axis.print("KDTreeNodeArray::split_axis");
split_position.print("KDTreeNodeArray::split_position");
depth.print("KDTreeNodeArray::depth");
leaf_idx.print("KDTreeNodeArray::leaf_index");
node_size.print("KDTreeNodeArray::leaf_size");
node_element_first_idx.print("KDTreeNodeArray::leaf_first_elem");
element_idx.print("KDTreeNodeArray::element_idx");
}
std::pair<int, int>
KDTreeNodeArray::update_leaves(SmallNodeArray & small_nca,
cukd::SplitCandidateArray & sca,
DevVector<int> & new_elements,
DevVector<int> & marks,
DevVector<int> & mark_offsets) {
int n_nodes_old = n_nodes();
int n_elements_old = n_elements();
int n_leaves_old = n_leaves();
int small_nodes = small_nca.n_nodes();
DevVector<int> leaf_elements, elem_offsets, leaf_element_offsets;
DevVector<int> leaf_element_sizes;
int new_leaf_nodes = mark_offsets.get_at(mark_offsets.size() - 1);
leaf_element_sizes.resize(new_leaf_nodes);
leaf_element_offsets.resize(new_leaf_nodes);
elem_offsets.resize(small_nodes);
thrust::copy_if(new_elements.begin(), new_elements.end(),
marks.begin(), leaf_element_sizes.begin(), GreaterThanZero());
int new_leaf_elements = thrust::reduce(new_elements.begin(), new_elements.end());
thrust::exclusive_scan(leaf_element_sizes.begin(), leaf_element_sizes.end(),
leaf_element_offsets.begin());
thrust::exclusive_scan(new_elements.begin(), new_elements.end(), elem_offsets.begin());
leaf_elements.resize(new_leaf_elements);
get_leaf_elements(small_nca, sca, small_nodes, marks, elem_offsets, leaf_elements);
resize_nodes(n_nodes_old + small_nodes);
resize_elements(n_elements_old + new_leaf_elements);
resize_leaves(n_leaves_old + new_leaf_nodes);
thrust::copy(leaf_element_sizes.begin(), leaf_element_sizes.end(),
node_size.begin() + n_leaves_old);
int next_off = 0;
if(n_leaves_old != 0) {
next_off = node_element_first_idx.get_at(n_leaves_old - 1)
+ node_size.get_at(n_leaves_old - 1);
thrust::transform(leaf_element_offsets.begin(), leaf_element_offsets.end(),
thrust::constant_iterator<int>(next_off),
node_element_first_idx.begin() + n_leaves_old,
thrust::plus<int>());
} else {
thrust::copy(leaf_element_offsets.begin(), leaf_element_offsets.end(),
node_element_first_idx.begin() + n_leaves_old);
}
thrust::copy(leaf_elements.begin(), leaf_elements.end(),
element_idx.begin() + next_off);
return std::make_pair(n_leaves_old, new_leaf_nodes);
}
void
KDTreeNodeArray::update_children_small() {
dim3 grid(IntegerDivide(256)(n_nodes()), 1, 1);
dim3 blocks(256,1,1);
device::tree_update_children_small_kernel<<<grid, blocks>>>(dev_array(), n_nodes());
CUT_CHECK_ERROR("tree_update_children_small_kernel failed");
}
void
KDTreeNodeArray::update_tree_children_from_small(int n_nodes_active, int n_nodes_small,
DevVector<int> & small_tags,
DevVector<int> & child_diff,
DevVector<int> & active_indices) {
dim3 grid(IntegerDivide(256)(n_nodes_active),1,1);
dim3 blocks(256,1,1);
device::update_tree_children_from_small_kernel<<<grid,blocks>>>(
n_nodes_active, n_nodes_small, dev_array(),
small_tags.pointer(), child_diff.pointer(),
active_indices.pointer());
CUT_CHECK_ERROR("update_tree_children_from_small_kernel failed");
}
void
KDTreeNodeArray::get_leaf_elements(cukd::SmallNodeArray & active,
cukd::SplitCandidateArray & sca,
int old_small_nodes, DevVector<int> & marks,
DevVector<int> & elem_offsets, DevVector<int> & result) {
dim3 grid(IntegerDivide(256)(old_small_nodes),1,1);
dim3 blocks(256,1,1);
device::leaf_elements_kernel<<<grid, blocks>>>(active.dev_array(), sca.dev_array(),
old_small_nodes, marks.pointer(),
elem_offsets.pointer(), result.pointer());
}
} // namespace cukd
|
5bb2c4a0f27c93fca5d7006a6c13534f97bbfe4d.hip | // !!! This is a file automatically generated by hipify!!!
//
// rc_loader.cpp
// RcLoader
//
// Created by Dilip Patlolla on 09/21/17.
// Copyright (c) 2015-2025 STURFEE INC ALL RIGHTS RESERVED
//
#include "rc_loader.hpp"
// RcLoader class constructor
RcLoader::RcLoader() {
x_center_coord_ = 553280; // default values
y_center_coord_ = 4183397; // default values
radius_ = 500; // default values
scene_width_ = 1920; // default values
scene_height_ = 1080; // default values
num_triangles_ = 0;
camera_height_ = 0;
}
int RcLoader::initSf(SturgInputParams inputData) {
center_x_ = inputData.center_x;
center_y_ = inputData.center_y;
// compute the actual center wrt to tile SIZE
x_center_coord_ = center_x_ - fmod(center_x_, TILE_SIZE);
y_center_coord_ = center_y_ - fmod(center_y_, TILE_SIZE);
#ifdef VERBOSE_LOADER
std::cout.precision(16);
std::cout << "center_x_: " << center_x_ << ",";
std::cout << "center_y_: " << center_y_ << std::endl;
std::cout << "x_center_coord_: " << x_center_coord_ << ",";
std::cout << "y_center_coord_: " << y_center_coord_ << std::endl;
#endif
radius_ = inputData.radius;
scene_width_ = inputData.scene_width;
scene_height_ = inputData.scene_height;
camera_height_ = inputData.cam_height;
// image_width_ and image_height_ are respectively scene_width_/4 and scene_height_/4
// if no input image_width_ and image_height_ are provided
// image_width_ = (inputData.image_width == 0) ? (scene_width_/4) : inputData.image_width;
// image_height_ = (inputData.image_height == 0) ? (scene_height_/4) : inputData.image_height;
fov_ = inputData.fov;
write_output_ = inputData.write_output;
num_triangles_ = 0;
return 1;
}
std::vector<SturGVertex> RcLoader::getRawVertices() { return vertices_; }
MbrLsForSF RcLoader::getMbrsAsVector() {
max_binding_rectangles_.min_vertex_x.clear();
max_binding_rectangles_.max_vertex_x.clear();
max_binding_rectangles_.min_vertex_y.clear();
max_binding_rectangles_.max_vertex_y.clear();
// TO DO: check here if the vertices_ has been populated
for (auto i = 0; i < vertices_.size(); i += 3) {
max_binding_rectangles_.min_vertex_x.push_back(
::min({vertices_[i + 0].vertex_param[0], vertices_[i + 1].vertex_param[0],
vertices_[i + 2].vertex_param[0]}));
max_binding_rectangles_.min_vertex_y.push_back(
::min({vertices_[i + 0].vertex_param[1], vertices_[i + 1].vertex_param[1],
vertices_[i + 2].vertex_param[1]}));
max_binding_rectangles_.max_vertex_x.push_back(
::max({vertices_[i + 0].vertex_param[0], vertices_[i + 1].vertex_param[0],
vertices_[i + 2].vertex_param[0]}));
max_binding_rectangles_.max_vertex_y.push_back(
::max({vertices_[i + 0].vertex_param[1], vertices_[i + 1].vertex_param[1],
vertices_[i + 2].vertex_param[1]}));
}
return max_binding_rectangles_;
}
std::vector<GLfloat> RcLoader::getVertices() { return raycast_vertices_; }
std::vector<float3> RcLoader::getTriangles() { return triangles_; }
// encode : x,y -> unique combination
double RcLoader::encode(double param_a, double param_b) {
return (param_a + param_b) * ((param_a + param_b + 1) / 2) + param_a;
}
// get the optimal buffer tile count
int RcLoader::getTileCount() {
// compute tiles required for the given radius
no_of_tiles_ = int(radius_ / TILE_SIZE);
no_of_tiles_ = (no_of_tiles_ == 0) ? 1 : no_of_tiles_;
// minimum tiles required in x and y direction, is 3 if(no_of_tiles == 0)
no_of_optimum_tiles_ = std::fmax(int(3), no_of_tiles_ * 2 + 1);
return 1;
}
// compute the origins of tiles encompassed by the radius
int RcLoader::getTileIds() {
// compute required tile count
getTileCount();
// moving to the left upper corner of area
x_origin_ = x_center_coord_ - no_of_tiles_ * TILE_SIZE;
y_origin_ = y_center_coord_ - no_of_tiles_ * TILE_SIZE;
#ifdef VERBOSE_LOADER
std::cout << "no_of_tiles_: " << no_of_tiles_ << std::endl;
std::cout << "no_of_optimum_tiles_: " << no_of_optimum_tiles_ << std::endl;
std::cout << "x_origin: " << x_origin_ << ",";
std::cout << "y_origin: " << y_origin_ << std::endl;
#endif
// compute origins of all the required tiles
for (int64_t i_iter = 0; i_iter < no_of_optimum_tiles_; i_iter++) {
for (int64_t j_iter = 0; j_iter < no_of_optimum_tiles_; j_iter++) {
tile_ids_.push_back(
this->encode(x_origin_ + i_iter * TILE_SIZE, y_origin_ + j_iter * TILE_SIZE));
}
}
return 1;
}
int RcLoader::displayTileInfo() {
if (tile_ids_.empty()) {
std::cout << "Tile data is empty" << std::endl;
return 0;
}
std::cout << "total tiles:\t" << tile_ids_.size() << std::endl;
// output tile data
for (const uint64_t &iter : tile_ids_) {
std::cout << iter << std::endl;
}
return 1;
}
int RcLoader::displayBinaryFileMeta(const SturGTileMetaData &bin_file_model_info) {
std::cout << "meta_length :" << bin_file_model_info.meta_length << std::endl;
std::cout << "models_count :" << bin_file_model_info.models_count << std::endl;
std::cout << "tile_center_x :" << bin_file_model_info.tile_center_x << std::endl;
std::cout << "tile_center_y :" << bin_file_model_info.tile_center_y << std::endl;
return 1;
}
int RcLoader::getDataFromTiles() {
// currently reading from local path:
// TO DO : Read data from input arg..
if (tile_ids_.empty()) {
std::cout << "Tile data not available" << std::endl;
return 0;
}
#ifdef G2
std::string terr_dir_path = "/home/ubuntu/geometry_terrain/";
#ifdef SPATIAL_FILTER
// TO DO: move to s3 implementation
std::string data_dir_path = "/home/ubuntu/geometry_terrain_holes/";
#else
// TO DO: move to s3 implementation
std::string data_dir_path = "/home/ubuntu/geometry_models/";
#endif
#else
std::string terr_dir_path = "/Users/PDR/Desktop/PROJECTS/sturfee/data/geometry_terrain/";
#ifdef SPATIAL_FILTER
std::string data_dir_path = "/Users/PDR/Desktop/PROJECTS/sturfee/data/geometry_terrain_holes/";
#else
std::string data_dir_path = "/Users/PDR/Desktop/PROJECTS/sturfee/data/geometry_models/";
#endif
#endif
// declare an iterator to a std::vector of uint64_t
std::vector<uint64_t>::iterator iter;
// read sturg bin tiles
for (iter = tile_ids_.begin(); iter < tile_ids_.end(); iter++) {
readSturgBinFile(data_dir_path + std::to_string(*iter), 0);
#ifdef CNN
readSturgBinFile(terr_dir_path + std::to_string(*iter), 1);
#endif
}
return 1;
}
/*int downloadTile(uint32_t tile_id_) {
// TO DO : download data from amazon s3 bucket
return 1;
}*/
int RcLoader::readSturgBinFile(const std::string file_name, unsigned int is_terrain) {
int16_t temp_a, temp_b, temp_c;
uint32_t is_uint16;
SturGVertex vertex;
SturGFace face;
SturGBuildingData temp_building;
std::vector<SturGBuildingData> buildings;
SturGTileMetaData bin_file_meta;
std::ifstream tile_file(file_name, std::ios::in | std::ios::binary);
if (tile_file.is_open()) {
// get meta data
tile_file.read(reinterpret_cast<char *>(&bin_file_meta.version), sizeof(uint32_t));
tile_file.read(reinterpret_cast<char *>(&bin_file_meta.meta_length), sizeof(uint32_t));
tile_file.read(reinterpret_cast<char *>(&bin_file_meta.models_count), sizeof(uint32_t));
tile_file.read(reinterpret_cast<char *>(&bin_file_meta.tile_center_x), sizeof(uint32_t));
tile_file.read(reinterpret_cast<char *>(&bin_file_meta.tile_center_y), sizeof(uint32_t));
#ifdef VERBOSE_LOADER
// display meta data for convinience
displayBinaryFileMeta(bin_file_meta);
#endif
// get models info
for (uint32_t i = 0; i < bin_file_meta.models_count; i++) {
tile_file.read(reinterpret_cast<char *>(&temp_building.id), sizeof(double));
tile_file.read(reinterpret_cast<char *>(&temp_building.vertices_byte_length),
sizeof(uint32_t));
tile_file.read(reinterpret_cast<char *>(&temp_building.faces_byte_length),
sizeof(uint32_t));
tile_file.read(reinterpret_cast<char *>(&is_uint16), sizeof(uint32_t));
temp_building.is_uint_16 = bool(is_uint16);
temp_building.is_terrain = is_terrain;
buildings.push_back(temp_building);
}
// declare an iterator to a std::vector
std::vector<SturGBuildingData>::iterator iter;
for (iter = buildings.begin(); iter != buildings.end(); iter++) {
// TO DO: better refactoring for 32 bit support
iter->count_vertices = uint32_t(iter->vertices_byte_length / sizeof(u_int16_t) / 3);
for (uint32_t i = 0; i < iter->count_vertices; i++) {
tile_file.read(reinterpret_cast<char *>(&temp_a), sizeof(int16_t));
tile_file.read(reinterpret_cast<char *>(&temp_b), sizeof(int16_t));
tile_file.read(reinterpret_cast<char *>(&temp_c), sizeof(int16_t));
vertex.vertex_param[0] =
float(temp_a) / 100.0 + bin_file_meta.tile_center_x - center_x_;
vertex.vertex_param[1] =
float(temp_b) / 100.0 + bin_file_meta.tile_center_y - center_y_;
vertex.vertex_param[2] = float(temp_c) / 100.0;
iter->vertices.push_back(vertex);
}
iter->count_faces = uint32_t(iter->faces_byte_length / sizeof(int16_t) / 3);
for (uint32_t i = 0; i < iter->count_faces; i++) {
tile_file.read(reinterpret_cast<char *>(&(face.face_vertex[0])), sizeof(uint16_t));
tile_file.read(reinterpret_cast<char *>(&(face.face_vertex[1])), sizeof(uint16_t));
tile_file.read(reinterpret_cast<char *>(&(face.face_vertex[2])), sizeof(uint16_t));
iter->faces.push_back(face);
}
}
// concatenate the buildings from the current file to the global buildings_ data
buildings_.reserve(buildings_.size() + buildings.size());
std::move(buildings.begin(), buildings.end(), std::inserter(buildings_, buildings_.end()));
// buildings.clear();
tile_file.close();
} else {
std::cout << "skipping file:\t" << file_name << std::endl;
}
return 1;
}
int RcLoader::displayBuildingsData() {
for (const SturGBuildingData &building_iter : buildings_) {
std::cout << "Building ID: " << building_iter.id << "\t";
std::cout << "No. of vertices: " << building_iter.vertices.size() << "\t";
std::cout << "No. of faces: " << building_iter.faces.size() << std::endl;
std::cout << "Is terrain: " << building_iter.is_terrain << std::endl;
std::cout << "UINT16: " << building_iter.is_uint_16;
std::cout << "\tVertices Byte Length: " << building_iter.vertices_byte_length;
std::cout << "\tFaces Byte Length: " << building_iter.faces_byte_length << std::endl;
#ifdef VERBOSE_LOADER2
std::cout << "Vertices : " << std::endl;
for (const SturGVertex &vertice_iter : building_iter.vertices) {
std::cout << "[" << vertice_iter.vertex_param[0] << " " << vertice_iter.vertex_param[1]
<< " " << vertice_iter.vertex_param[2] << "]" << std::endl;
}
std::cout << "Faces : " << std::endl;
for (const SturGFace &face_iter : building_iter.faces) {
std::cout << "[" << face_iter.face_vertex[0] << " " << face_iter.face_vertex[1] << " "
<< face_iter.face_vertex[2] << "]" << std::endl;
}
#endif
}
return 1;
}
std::vector<SturgCameraParameters> RcLoader::processSf(
std::vector<SturgCameraParameters> all_cam_params) {
processDataforRendering();
readData();
filtered_cam_params_.clear();
SturgCameraParameters temp_cam_param;
// MbrRectangles pid_MB_Rectangles = this->getRectangles();
unsigned int max_triangles_count = this->getTrianglesCount();
std::vector<SturGVertex> vertices = this->getRawVertices();
// MbrLsForSF max_binding_rectangles = this->getMbrsAsVector();
// double x = 0;
// double y = 0;
// float height = 0;
// std::vector<size_t> x_min_vertices; // (max_binding_rectangles.min_vertex_x.size());
// std::vector<size_t> y_min_vertices; // (max_binding_rectangles.min_vertex_y.size());
// std::vector<size_t> x_max_vertices; // (max_binding_rectangles.max_vertex_x.size());
// std::vector<size_t> y_max_vertices; // (max_binding_rectangles.max_vertex_y.size());
// std::vector<size_t> x_intersection, y_intersection;
// std::vector<size_t> face_intersection;
// // std::cout << "size of all cam params" << all_cam_params.size() << std::endl;
// unsigned int count = 0;
// for (auto iter = all_cam_params.begin(); iter < all_cam_params.end(); iter++) {
// x = iter->cam_x - center_x_;
// y = iter->cam_y - center_y_;
// // check if the building id already exists in the map
// auto x_y_iter = x_y_face_intersect_.find(std::make_pair(x, y));
// // not found
// if (x_y_iter == x_y_face_intersect_.end()) {
// // TO DO : from here on, should be in loop
// for (std::size_t index = 0; index < max_binding_rectangles.min_vertex_x.size();
// ++index) {
// if (max_binding_rectangles.min_vertex_x[index] <= x) {
// x_min_vertices.push_back(index);
// }
// if (max_binding_rectangles.max_vertex_x[index] >= x) {
// x_max_vertices.push_back(index);
// }
// if (max_binding_rectangles.min_vertex_y[index] <= y) {
// y_min_vertices.push_back(index);
// }
// if (max_binding_rectangles.max_vertex_y[index] >= y) {
// y_max_vertices.push_back(index);
// }
// }
// std::set_intersection(x_min_vertices.begin(), x_min_vertices.end(),
// x_max_vertices.begin(), x_max_vertices.end(),
// std::back_inserter(x_intersection));
// std::set_intersection(y_min_vertices.begin(), y_min_vertices.end(),
// y_max_vertices.begin(), y_max_vertices.end(),
// std::back_inserter(y_intersection));
// std::set_intersection(x_intersection.begin(), x_intersection.end(),
// y_intersection.begin(), y_intersection.end(),
// std::back_inserter(face_intersection));
// x_y_face_intersect_.insert(std::make_pair(std::make_pair(x, y), face_intersection));
// // clear temp min/max vectors
// x_min_vertices.clear();
// y_min_vertices.clear();
// x_max_vertices.clear();
// y_max_vertices.clear();
// x_intersection.clear();
// y_intersection.clear();
// }
// // found
// else {
// face_intersection = x_y_iter->second;
// }
// // std::cout << std::setprecision(5) << iter->cam_x << "\t" << iter->cam_y << std::endl;
// // std::cout << "face_intersection: " << face_intersection.size() << std::endl;
// count++;
// for (auto i : face_intersection) {
// height = point_in_triangle(i, x, y, vertices);
// if (height != NO_HIT) {
// // std::cout << x << "\t" << y << "\t" << "height: " << height<< " ,cam h: " <<
// // camera_height_ <<std::endl;
// temp_cam_param = *iter;
// temp_cam_param.cam_z = height + camera_height_;
// filtered_cam_params_.push_back(temp_cam_param);
// break;
// }
// }
// face_intersection.clear();
// }
// end the loop here
return filtered_cam_params_;
}
int RcLoader::readData() {
MbrRectangle temp_rectangle;
for (SturGBuildingData &building_iter : buildings_) {
#ifdef VERBOSE_LOADER
std::cout << building_iter.id << "," << building_iter.vertices.size() << ","
<< building_iter.faces.size() << std::endl;
#endif
temp_rectangle = getMbrRectangle(building_iter.vertices);
pid_MB_Rectangles_.id.push_back(building_iter.id);
pid_MB_Rectangles_.min_vertices.push_back(temp_rectangle.min_vertices);
pid_MB_Rectangles_.max_vertices.push_back(temp_rectangle.max_vertices);
unRavelFaces(building_iter);
}
#ifdef VERBOSE_LOADER
std::cout << "building size: " << buildings_.size() << std::endl;
std::cout << "mbr size: " << pid_MB_Rectangles_.id.size() << std::endl;
#endif
return 1;
}
int RcLoader::unRavelFaces(SturGBuildingData data) {
#ifdef VERBOSE_LOADER
std::cout << data.id << "\t";
std::cout << "vertices size: " << data.vertices.size() << "\t";
std::cout << "faces size: " << data.faces.size() << std::endl;
#endif
float3 temp;
MbrFaces pid_faces;
for (auto it = data.faces.begin(); it != data.faces.end(); ++it) {
// std::cout << it->face_vertex[0] << ",";
// std::cout << it->face_vertex[1] << ",";
// std::cout << it->face_vertex[2] << ",";
// auto index = std::distance(data.faces.begin(), it)
auto v1 = it->face_vertex[0];
auto v2 = it->face_vertex[1];
auto v3 = it->face_vertex[2];
pid_faces.id.push_back(data.id);
temp = make_float3(data.vertices[v1].vertex_param[0], data.vertices[v1].vertex_param[1],
data.vertices[v1].vertex_param[2]);
pid_faces.x_face.push_back(temp);
temp = make_float3(data.vertices[v2].vertex_param[0], data.vertices[v2].vertex_param[1],
data.vertices[v2].vertex_param[2]);
pid_faces.y_face.push_back(temp);
temp = make_float3(data.vertices[v3].vertex_param[0], data.vertices[v3].vertex_param[1],
data.vertices[v3].vertex_param[2]);
pid_faces.z_face.push_back(temp);
temp.x = ::min({data.vertices[v1].vertex_param[0], data.vertices[v2].vertex_param[0],
data.vertices[v3].vertex_param[0]});
temp.y = ::min({data.vertices[v1].vertex_param[1], data.vertices[v2].vertex_param[1],
data.vertices[v3].vertex_param[1]});
temp.z = ::min({data.vertices[v1].vertex_param[2], data.vertices[v2].vertex_param[2],
data.vertices[v3].vertex_param[2]});
pid_faces.mbrFace_sub.push_back(temp);
}
// check if the building id already exists in the map
auto map_iter = pid_and_faces_.find(data.id);
// if exists add the new triangles to the existing ones.
if (map_iter != pid_and_faces_.end()) {
map_iter->second.id.insert(map_iter->second.id.end(), pid_faces.id.begin(),
pid_faces.id.end());
map_iter->second.x_face.insert(map_iter->second.x_face.end(), pid_faces.x_face.begin(),
pid_faces.x_face.end());
map_iter->second.y_face.insert(map_iter->second.y_face.end(), pid_faces.y_face.begin(),
pid_faces.y_face.end());
map_iter->second.z_face.insert(map_iter->second.z_face.end(), pid_faces.z_face.begin(),
pid_faces.z_face.end());
}
// add new entry
else {
pid_and_faces_.insert(std::make_pair(data.id, pid_faces));
}
num_triangles_ += pid_faces.x_face.size();
return 1;
}
MbrRectangles RcLoader::getRectangles() { return pid_MB_Rectangles_; }
std::map<double, MbrFaces> RcLoader::getMappedFaces() { return pid_and_faces_; }
unsigned int RcLoader::getTrianglesCount() { return num_triangles_; }
MbrRectangle RcLoader::getMbrRectangle(const std::vector<SturGVertex> vertices) {
MbrRectangle temp_rectangle;
auto minmax_vertices = std::minmax_element(vertices.begin(), vertices.end(),
[](SturGVertex const &lhs, SturGVertex const &rhs) {
return lhs.vertex_param[0] < rhs.vertex_param[0];
});
temp_rectangle.min_vertices.x = minmax_vertices.first->vertex_param[0];
temp_rectangle.max_vertices.x = minmax_vertices.second->vertex_param[0];
// std::cout << "min: " << minmax_vertices.first->vertex_param[0] << " max: " <<
// minmax_vertices.second->vertex_param[0] << std::endl;
minmax_vertices = std::minmax_element(vertices.begin(), vertices.end(),
[](SturGVertex const &lhs, SturGVertex const &rhs) {
return lhs.vertex_param[1] < rhs.vertex_param[1];
});
temp_rectangle.min_vertices.y = minmax_vertices.first->vertex_param[1];
temp_rectangle.max_vertices.y = minmax_vertices.second->vertex_param[1];
// std::cout << "min: " << minmax_vertices.first->vertex_param[1] << " max: " <<
// minmax_vertices.second->vertex_param[1] << std::endl;
minmax_vertices = std::minmax_element(vertices.begin(), vertices.end(),
[](SturGVertex const &lhs, SturGVertex const &rhs) {
return lhs.vertex_param[2] < rhs.vertex_param[2];
});
temp_rectangle.min_vertices.z = minmax_vertices.first->vertex_param[2];
temp_rectangle.max_vertices.z = minmax_vertices.second->vertex_param[2];
// std::cout << "min: " << minmax_vertices.first->vertex_param[2] << " max: " <<
// minmax_vertices.second->vertex_param[2] << std::endl;
return temp_rectangle;
}
int RcLoader::getRandomColor(float rand_color_array[], uint64_t seed, unsigned int is_terrain) {
// generate random for buildings from model files
// the result is unchanged if its for cnn
if (is_terrain) {
rand_color_array[0] = 0.0f;
rand_color_array[1] = 0.0f;
rand_color_array[2] = 0.0f;
} else {
// input unique seed
srand(static_cast<int>(seed * time(0)));
// generate random numbers between 0 to 255;
// making sure R band is never zero
// convert them to OpenGL colors float format
rand_color_array[0] = (((rand() + 1) % 255) / 255.0f);
rand_color_array[1] = (((rand() + 2) % 255) / 255.0f);
rand_color_array[2] = (((rand() + 3) % 255) / 255.0f);
// we are using red band. thus making sure its a non zero value
// if(rand_color_array[0] == 0){
// rand_color_array[0] = (rand_color_array[1] + rand_color_array[2])/2.0f;
// }
}
return 1;
}
int RcLoader::processDataforRendering() {
int j;
// std::cout << "\getting Tile Ids and reading data from them\n" << std::endl;
getTileIds();
getDataFromTiles();
// temp variable to store color params
float rand_color_array[COLOR_PARAM_SIZE] = {1.0f, 1.0f, 1.0f};
if (buildings_.empty()) {
std::cout << "Building data is empty" << std::endl;
return 0;
}
#ifdef VERBOSE
displayBuildingsData();
#endif
for (SturGBuildingData &building_iter : buildings_) {
std::vector<SturGVertex>::iterator vertice_iter = building_iter.vertices.begin();
getRandomColor(rand_color_array, building_iter.id, building_iter.is_terrain);
// for each face get vertices and corresponding colors
for (const SturGFace &face_iter : building_iter.faces) {
for (int i = 0; i < FACE_VERTICES_SIZE; i++) {
vertice_iter = building_iter.vertices.begin() + face_iter.face_vertex[i];
if (!building_iter.is_terrain) {
vertices_.push_back(*vertice_iter);
for (j = 0; j < VERTEX_VERTICES_SIZE; j++) {
colors_.push_back(rand_color_array[j]);
// To DO: needs fix if VERTEX_VERTICES_SIZE!=COLOR_PARAM_SIZE
}
// colors_.push_back(0.0);
}
else {
ter_vertices_.push_back(*vertice_iter);
for (j = 0; j < VERTEX_VERTICES_SIZE; j++) {
ter_colors_.push_back(rand_color_array[j]);
// To DO: needs fix if VERTEX_VERTICES_SIZE!=COLOR_PARAM_SIZE
}
// ter_colors_.push_back(0.0);
}
}
}
// std::cout << "vertices size now: " << vertices_.size() << std::endl;
}
#ifdef VERBOSE
auto iter = max_element(std::begin(indices_), std::end(indices_)); // c++11
std::cout << "max afer ind: " << *iter << std::endl;
std::cout << "vertices size :" << vertices_.size() << std::endl;
std::cout << "color size :" << colors_.size() << std::endl;
std::cout << "indices size :" << indices_.size() << std::endl;
std::cout << "index vertices size :" << indexed_vertices_.size() << std::endl;
std::cout << "index color size :" << indexed_colors_.size() << std::endl;
std::cout << "terrain vertices size :" << ter_vertices_.size() << std::endl;
std::cout << "terrain color size :" << ter_colors_.size() << std::endl;
std::cout << "terrain indices size :" << ter_indices_.size() << std::endl;
std::cout << "terrain index vertices size :" << ter_indexed_vertices_.size() << std::endl;
std::cout << "terrain index color size :" << ter_indexed_colors_.size() << std::endl;
#endif
return 1;
}
int RcLoader::displayVertices() {
std::ofstream outputFile;
outputFile.open("vertices.csv");
outputFile.precision(8);
std::cout << "vertices size :" << vertices_.size() << std::endl;
for (auto pp_iter = vertices_.begin(); pp_iter != vertices_.end(); pp_iter++) {
outputFile << pp_iter->vertex_param[0] << "," << pp_iter->vertex_param[1] << ","
<< pp_iter->vertex_param[2] << std::endl;
}
outputFile.close();
return 1;
}
// int RcLoader::displayMaxBindingRectangles() {
// std::ofstream outputFile;
// outputFile.open("mbrs.csv");
// outputFile.precision(8);
// std::cout << "mbr size :" << max_binding_rectangles_.size() << std::endl;
// for (auto pp_iter = max_binding_rectangles_.begin(); pp_iter !=
// max_binding_rectangles_.end(); pp_iter++) {
// outputFile << pp_iter->x << "," << pp_iter->y << ","<< pp_iter->z <<"," << pp_iter->w <<
// std::endl;
// }
// outputFile.close();
// return 1;
// }
// RcLoader class destructor with input params
RcLoader::~RcLoader() {
// at expense of speed
std::vector<SturGBuildingData>().swap(buildings_);
std::vector<uint64_t>().swap(tile_ids_);
std::vector<SturGVertex>().swap(vertices_);
std::vector<SturGVertex>().swap(ter_vertices_);
}
| 5bb2c4a0f27c93fca5d7006a6c13534f97bbfe4d.cu | //
// rc_loader.cpp
// RcLoader
//
// Created by Dilip Patlolla on 09/21/17.
// Copyright (c) 2015-2025 STURFEE INC ALL RIGHTS RESERVED
//
#include "rc_loader.hpp"
// RcLoader class constructor
RcLoader::RcLoader() {
x_center_coord_ = 553280; // default values
y_center_coord_ = 4183397; // default values
radius_ = 500; // default values
scene_width_ = 1920; // default values
scene_height_ = 1080; // default values
num_triangles_ = 0;
camera_height_ = 0;
}
int RcLoader::initSf(SturgInputParams inputData) {
center_x_ = inputData.center_x;
center_y_ = inputData.center_y;
// compute the actual center wrt to tile SIZE
x_center_coord_ = center_x_ - fmod(center_x_, TILE_SIZE);
y_center_coord_ = center_y_ - fmod(center_y_, TILE_SIZE);
#ifdef VERBOSE_LOADER
std::cout.precision(16);
std::cout << "center_x_: " << center_x_ << ",";
std::cout << "center_y_: " << center_y_ << std::endl;
std::cout << "x_center_coord_: " << x_center_coord_ << ",";
std::cout << "y_center_coord_: " << y_center_coord_ << std::endl;
#endif
radius_ = inputData.radius;
scene_width_ = inputData.scene_width;
scene_height_ = inputData.scene_height;
camera_height_ = inputData.cam_height;
// image_width_ and image_height_ are respectively scene_width_/4 and scene_height_/4
// if no input image_width_ and image_height_ are provided
// image_width_ = (inputData.image_width == 0) ? (scene_width_/4) : inputData.image_width;
// image_height_ = (inputData.image_height == 0) ? (scene_height_/4) : inputData.image_height;
fov_ = inputData.fov;
write_output_ = inputData.write_output;
num_triangles_ = 0;
return 1;
}
std::vector<SturGVertex> RcLoader::getRawVertices() { return vertices_; }
MbrLsForSF RcLoader::getMbrsAsVector() {
max_binding_rectangles_.min_vertex_x.clear();
max_binding_rectangles_.max_vertex_x.clear();
max_binding_rectangles_.min_vertex_y.clear();
max_binding_rectangles_.max_vertex_y.clear();
// TO DO: check here if the vertices_ has been populated
for (auto i = 0; i < vertices_.size(); i += 3) {
max_binding_rectangles_.min_vertex_x.push_back(
std::min({vertices_[i + 0].vertex_param[0], vertices_[i + 1].vertex_param[0],
vertices_[i + 2].vertex_param[0]}));
max_binding_rectangles_.min_vertex_y.push_back(
std::min({vertices_[i + 0].vertex_param[1], vertices_[i + 1].vertex_param[1],
vertices_[i + 2].vertex_param[1]}));
max_binding_rectangles_.max_vertex_x.push_back(
std::max({vertices_[i + 0].vertex_param[0], vertices_[i + 1].vertex_param[0],
vertices_[i + 2].vertex_param[0]}));
max_binding_rectangles_.max_vertex_y.push_back(
std::max({vertices_[i + 0].vertex_param[1], vertices_[i + 1].vertex_param[1],
vertices_[i + 2].vertex_param[1]}));
}
return max_binding_rectangles_;
}
std::vector<GLfloat> RcLoader::getVertices() { return raycast_vertices_; }
std::vector<float3> RcLoader::getTriangles() { return triangles_; }
// encode : x,y -> unique combination
double RcLoader::encode(double param_a, double param_b) {
return (param_a + param_b) * ((param_a + param_b + 1) / 2) + param_a;
}
// get the optimal buffer tile count
int RcLoader::getTileCount() {
// compute tiles required for the given radius
no_of_tiles_ = int(radius_ / TILE_SIZE);
no_of_tiles_ = (no_of_tiles_ == 0) ? 1 : no_of_tiles_;
// minimum tiles required in x and y direction, is 3 if(no_of_tiles == 0)
no_of_optimum_tiles_ = std::fmax(int(3), no_of_tiles_ * 2 + 1);
return 1;
}
// compute the origins of tiles encompassed by the radius
int RcLoader::getTileIds() {
// compute required tile count
getTileCount();
// moving to the left upper corner of area
x_origin_ = x_center_coord_ - no_of_tiles_ * TILE_SIZE;
y_origin_ = y_center_coord_ - no_of_tiles_ * TILE_SIZE;
#ifdef VERBOSE_LOADER
std::cout << "no_of_tiles_: " << no_of_tiles_ << std::endl;
std::cout << "no_of_optimum_tiles_: " << no_of_optimum_tiles_ << std::endl;
std::cout << "x_origin: " << x_origin_ << ",";
std::cout << "y_origin: " << y_origin_ << std::endl;
#endif
// compute origins of all the required tiles
for (int64_t i_iter = 0; i_iter < no_of_optimum_tiles_; i_iter++) {
for (int64_t j_iter = 0; j_iter < no_of_optimum_tiles_; j_iter++) {
tile_ids_.push_back(
this->encode(x_origin_ + i_iter * TILE_SIZE, y_origin_ + j_iter * TILE_SIZE));
}
}
return 1;
}
int RcLoader::displayTileInfo() {
if (tile_ids_.empty()) {
std::cout << "Tile data is empty" << std::endl;
return 0;
}
std::cout << "total tiles:\t" << tile_ids_.size() << std::endl;
// output tile data
for (const uint64_t &iter : tile_ids_) {
std::cout << iter << std::endl;
}
return 1;
}
int RcLoader::displayBinaryFileMeta(const SturGTileMetaData &bin_file_model_info) {
std::cout << "meta_length :" << bin_file_model_info.meta_length << std::endl;
std::cout << "models_count :" << bin_file_model_info.models_count << std::endl;
std::cout << "tile_center_x :" << bin_file_model_info.tile_center_x << std::endl;
std::cout << "tile_center_y :" << bin_file_model_info.tile_center_y << std::endl;
return 1;
}
int RcLoader::getDataFromTiles() {
// currently reading from local path:
// TO DO : Read data from input arg..
if (tile_ids_.empty()) {
std::cout << "Tile data not available" << std::endl;
return 0;
}
#ifdef G2
std::string terr_dir_path = "/home/ubuntu/geometry_terrain/";
#ifdef SPATIAL_FILTER
// TO DO: move to s3 implementation
std::string data_dir_path = "/home/ubuntu/geometry_terrain_holes/";
#else
// TO DO: move to s3 implementation
std::string data_dir_path = "/home/ubuntu/geometry_models/";
#endif
#else
std::string terr_dir_path = "/Users/PDR/Desktop/PROJECTS/sturfee/data/geometry_terrain/";
#ifdef SPATIAL_FILTER
std::string data_dir_path = "/Users/PDR/Desktop/PROJECTS/sturfee/data/geometry_terrain_holes/";
#else
std::string data_dir_path = "/Users/PDR/Desktop/PROJECTS/sturfee/data/geometry_models/";
#endif
#endif
// declare an iterator to a std::vector of uint64_t
std::vector<uint64_t>::iterator iter;
// read sturg bin tiles
for (iter = tile_ids_.begin(); iter < tile_ids_.end(); iter++) {
readSturgBinFile(data_dir_path + std::to_string(*iter), 0);
#ifdef CNN
readSturgBinFile(terr_dir_path + std::to_string(*iter), 1);
#endif
}
return 1;
}
/*int downloadTile(uint32_t tile_id_) {
// TO DO : download data from amazon s3 bucket
return 1;
}*/
int RcLoader::readSturgBinFile(const std::string file_name, unsigned int is_terrain) {
int16_t temp_a, temp_b, temp_c;
uint32_t is_uint16;
SturGVertex vertex;
SturGFace face;
SturGBuildingData temp_building;
std::vector<SturGBuildingData> buildings;
SturGTileMetaData bin_file_meta;
std::ifstream tile_file(file_name, std::ios::in | std::ios::binary);
if (tile_file.is_open()) {
// get meta data
tile_file.read(reinterpret_cast<char *>(&bin_file_meta.version), sizeof(uint32_t));
tile_file.read(reinterpret_cast<char *>(&bin_file_meta.meta_length), sizeof(uint32_t));
tile_file.read(reinterpret_cast<char *>(&bin_file_meta.models_count), sizeof(uint32_t));
tile_file.read(reinterpret_cast<char *>(&bin_file_meta.tile_center_x), sizeof(uint32_t));
tile_file.read(reinterpret_cast<char *>(&bin_file_meta.tile_center_y), sizeof(uint32_t));
#ifdef VERBOSE_LOADER
// display meta data for convinience
displayBinaryFileMeta(bin_file_meta);
#endif
// get models info
for (uint32_t i = 0; i < bin_file_meta.models_count; i++) {
tile_file.read(reinterpret_cast<char *>(&temp_building.id), sizeof(double));
tile_file.read(reinterpret_cast<char *>(&temp_building.vertices_byte_length),
sizeof(uint32_t));
tile_file.read(reinterpret_cast<char *>(&temp_building.faces_byte_length),
sizeof(uint32_t));
tile_file.read(reinterpret_cast<char *>(&is_uint16), sizeof(uint32_t));
temp_building.is_uint_16 = bool(is_uint16);
temp_building.is_terrain = is_terrain;
buildings.push_back(temp_building);
}
// declare an iterator to a std::vector
std::vector<SturGBuildingData>::iterator iter;
for (iter = buildings.begin(); iter != buildings.end(); iter++) {
// TO DO: better refactoring for 32 bit support
iter->count_vertices = uint32_t(iter->vertices_byte_length / sizeof(u_int16_t) / 3);
for (uint32_t i = 0; i < iter->count_vertices; i++) {
tile_file.read(reinterpret_cast<char *>(&temp_a), sizeof(int16_t));
tile_file.read(reinterpret_cast<char *>(&temp_b), sizeof(int16_t));
tile_file.read(reinterpret_cast<char *>(&temp_c), sizeof(int16_t));
vertex.vertex_param[0] =
float(temp_a) / 100.0 + bin_file_meta.tile_center_x - center_x_;
vertex.vertex_param[1] =
float(temp_b) / 100.0 + bin_file_meta.tile_center_y - center_y_;
vertex.vertex_param[2] = float(temp_c) / 100.0;
iter->vertices.push_back(vertex);
}
iter->count_faces = uint32_t(iter->faces_byte_length / sizeof(int16_t) / 3);
for (uint32_t i = 0; i < iter->count_faces; i++) {
tile_file.read(reinterpret_cast<char *>(&(face.face_vertex[0])), sizeof(uint16_t));
tile_file.read(reinterpret_cast<char *>(&(face.face_vertex[1])), sizeof(uint16_t));
tile_file.read(reinterpret_cast<char *>(&(face.face_vertex[2])), sizeof(uint16_t));
iter->faces.push_back(face);
}
}
// concatenate the buildings from the current file to the global buildings_ data
buildings_.reserve(buildings_.size() + buildings.size());
std::move(buildings.begin(), buildings.end(), std::inserter(buildings_, buildings_.end()));
// buildings.clear();
tile_file.close();
} else {
std::cout << "skipping file:\t" << file_name << std::endl;
}
return 1;
}
int RcLoader::displayBuildingsData() {
for (const SturGBuildingData &building_iter : buildings_) {
std::cout << "Building ID: " << building_iter.id << "\t";
std::cout << "No. of vertices: " << building_iter.vertices.size() << "\t";
std::cout << "No. of faces: " << building_iter.faces.size() << std::endl;
std::cout << "Is terrain: " << building_iter.is_terrain << std::endl;
std::cout << "UINT16: " << building_iter.is_uint_16;
std::cout << "\tVertices Byte Length: " << building_iter.vertices_byte_length;
std::cout << "\tFaces Byte Length: " << building_iter.faces_byte_length << std::endl;
#ifdef VERBOSE_LOADER2
std::cout << "Vertices : " << std::endl;
for (const SturGVertex &vertice_iter : building_iter.vertices) {
std::cout << "[" << vertice_iter.vertex_param[0] << " " << vertice_iter.vertex_param[1]
<< " " << vertice_iter.vertex_param[2] << "]" << std::endl;
}
std::cout << "Faces : " << std::endl;
for (const SturGFace &face_iter : building_iter.faces) {
std::cout << "[" << face_iter.face_vertex[0] << " " << face_iter.face_vertex[1] << " "
<< face_iter.face_vertex[2] << "]" << std::endl;
}
#endif
}
return 1;
}
std::vector<SturgCameraParameters> RcLoader::processSf(
std::vector<SturgCameraParameters> all_cam_params) {
processDataforRendering();
readData();
filtered_cam_params_.clear();
SturgCameraParameters temp_cam_param;
// MbrRectangles pid_MB_Rectangles = this->getRectangles();
unsigned int max_triangles_count = this->getTrianglesCount();
std::vector<SturGVertex> vertices = this->getRawVertices();
// MbrLsForSF max_binding_rectangles = this->getMbrsAsVector();
// double x = 0;
// double y = 0;
// float height = 0;
// std::vector<size_t> x_min_vertices; // (max_binding_rectangles.min_vertex_x.size());
// std::vector<size_t> y_min_vertices; // (max_binding_rectangles.min_vertex_y.size());
// std::vector<size_t> x_max_vertices; // (max_binding_rectangles.max_vertex_x.size());
// std::vector<size_t> y_max_vertices; // (max_binding_rectangles.max_vertex_y.size());
// std::vector<size_t> x_intersection, y_intersection;
// std::vector<size_t> face_intersection;
// // std::cout << "size of all cam params" << all_cam_params.size() << std::endl;
// unsigned int count = 0;
// for (auto iter = all_cam_params.begin(); iter < all_cam_params.end(); iter++) {
// x = iter->cam_x - center_x_;
// y = iter->cam_y - center_y_;
// // check if the building id already exists in the map
// auto x_y_iter = x_y_face_intersect_.find(std::make_pair(x, y));
// // not found
// if (x_y_iter == x_y_face_intersect_.end()) {
// // TO DO : from here on, should be in loop
// for (std::size_t index = 0; index < max_binding_rectangles.min_vertex_x.size();
// ++index) {
// if (max_binding_rectangles.min_vertex_x[index] <= x) {
// x_min_vertices.push_back(index);
// }
// if (max_binding_rectangles.max_vertex_x[index] >= x) {
// x_max_vertices.push_back(index);
// }
// if (max_binding_rectangles.min_vertex_y[index] <= y) {
// y_min_vertices.push_back(index);
// }
// if (max_binding_rectangles.max_vertex_y[index] >= y) {
// y_max_vertices.push_back(index);
// }
// }
// std::set_intersection(x_min_vertices.begin(), x_min_vertices.end(),
// x_max_vertices.begin(), x_max_vertices.end(),
// std::back_inserter(x_intersection));
// std::set_intersection(y_min_vertices.begin(), y_min_vertices.end(),
// y_max_vertices.begin(), y_max_vertices.end(),
// std::back_inserter(y_intersection));
// std::set_intersection(x_intersection.begin(), x_intersection.end(),
// y_intersection.begin(), y_intersection.end(),
// std::back_inserter(face_intersection));
// x_y_face_intersect_.insert(std::make_pair(std::make_pair(x, y), face_intersection));
// // clear temp min/max vectors
// x_min_vertices.clear();
// y_min_vertices.clear();
// x_max_vertices.clear();
// y_max_vertices.clear();
// x_intersection.clear();
// y_intersection.clear();
// }
// // found
// else {
// face_intersection = x_y_iter->second;
// }
// // std::cout << std::setprecision(5) << iter->cam_x << "\t" << iter->cam_y << std::endl;
// // std::cout << "face_intersection: " << face_intersection.size() << std::endl;
// count++;
// for (auto i : face_intersection) {
// height = point_in_triangle(i, x, y, vertices);
// if (height != NO_HIT) {
// // std::cout << x << "\t" << y << "\t" << "height: " << height<< " ,cam h: " <<
// // camera_height_ <<std::endl;
// temp_cam_param = *iter;
// temp_cam_param.cam_z = height + camera_height_;
// filtered_cam_params_.push_back(temp_cam_param);
// break;
// }
// }
// face_intersection.clear();
// }
// end the loop here
return filtered_cam_params_;
}
int RcLoader::readData() {
MbrRectangle temp_rectangle;
for (SturGBuildingData &building_iter : buildings_) {
#ifdef VERBOSE_LOADER
std::cout << building_iter.id << "," << building_iter.vertices.size() << ","
<< building_iter.faces.size() << std::endl;
#endif
temp_rectangle = getMbrRectangle(building_iter.vertices);
pid_MB_Rectangles_.id.push_back(building_iter.id);
pid_MB_Rectangles_.min_vertices.push_back(temp_rectangle.min_vertices);
pid_MB_Rectangles_.max_vertices.push_back(temp_rectangle.max_vertices);
unRavelFaces(building_iter);
}
#ifdef VERBOSE_LOADER
std::cout << "building size: " << buildings_.size() << std::endl;
std::cout << "mbr size: " << pid_MB_Rectangles_.id.size() << std::endl;
#endif
return 1;
}
int RcLoader::unRavelFaces(SturGBuildingData data) {
#ifdef VERBOSE_LOADER
std::cout << data.id << "\t";
std::cout << "vertices size: " << data.vertices.size() << "\t";
std::cout << "faces size: " << data.faces.size() << std::endl;
#endif
float3 temp;
MbrFaces pid_faces;
for (auto it = data.faces.begin(); it != data.faces.end(); ++it) {
// std::cout << it->face_vertex[0] << ",";
// std::cout << it->face_vertex[1] << ",";
// std::cout << it->face_vertex[2] << ",";
// auto index = std::distance(data.faces.begin(), it)
auto v1 = it->face_vertex[0];
auto v2 = it->face_vertex[1];
auto v3 = it->face_vertex[2];
pid_faces.id.push_back(data.id);
temp = make_float3(data.vertices[v1].vertex_param[0], data.vertices[v1].vertex_param[1],
data.vertices[v1].vertex_param[2]);
pid_faces.x_face.push_back(temp);
temp = make_float3(data.vertices[v2].vertex_param[0], data.vertices[v2].vertex_param[1],
data.vertices[v2].vertex_param[2]);
pid_faces.y_face.push_back(temp);
temp = make_float3(data.vertices[v3].vertex_param[0], data.vertices[v3].vertex_param[1],
data.vertices[v3].vertex_param[2]);
pid_faces.z_face.push_back(temp);
temp.x = std::min({data.vertices[v1].vertex_param[0], data.vertices[v2].vertex_param[0],
data.vertices[v3].vertex_param[0]});
temp.y = std::min({data.vertices[v1].vertex_param[1], data.vertices[v2].vertex_param[1],
data.vertices[v3].vertex_param[1]});
temp.z = std::min({data.vertices[v1].vertex_param[2], data.vertices[v2].vertex_param[2],
data.vertices[v3].vertex_param[2]});
pid_faces.mbrFace_sub.push_back(temp);
}
// check if the building id already exists in the map
auto map_iter = pid_and_faces_.find(data.id);
// if exists add the new triangles to the existing ones.
if (map_iter != pid_and_faces_.end()) {
map_iter->second.id.insert(map_iter->second.id.end(), pid_faces.id.begin(),
pid_faces.id.end());
map_iter->second.x_face.insert(map_iter->second.x_face.end(), pid_faces.x_face.begin(),
pid_faces.x_face.end());
map_iter->second.y_face.insert(map_iter->second.y_face.end(), pid_faces.y_face.begin(),
pid_faces.y_face.end());
map_iter->second.z_face.insert(map_iter->second.z_face.end(), pid_faces.z_face.begin(),
pid_faces.z_face.end());
}
// add new entry
else {
pid_and_faces_.insert(std::make_pair(data.id, pid_faces));
}
num_triangles_ += pid_faces.x_face.size();
return 1;
}
MbrRectangles RcLoader::getRectangles() { return pid_MB_Rectangles_; }
std::map<double, MbrFaces> RcLoader::getMappedFaces() { return pid_and_faces_; }
unsigned int RcLoader::getTrianglesCount() { return num_triangles_; }
MbrRectangle RcLoader::getMbrRectangle(const std::vector<SturGVertex> vertices) {
MbrRectangle temp_rectangle;
auto minmax_vertices = std::minmax_element(vertices.begin(), vertices.end(),
[](SturGVertex const &lhs, SturGVertex const &rhs) {
return lhs.vertex_param[0] < rhs.vertex_param[0];
});
temp_rectangle.min_vertices.x = minmax_vertices.first->vertex_param[0];
temp_rectangle.max_vertices.x = minmax_vertices.second->vertex_param[0];
// std::cout << "min: " << minmax_vertices.first->vertex_param[0] << " max: " <<
// minmax_vertices.second->vertex_param[0] << std::endl;
minmax_vertices = std::minmax_element(vertices.begin(), vertices.end(),
[](SturGVertex const &lhs, SturGVertex const &rhs) {
return lhs.vertex_param[1] < rhs.vertex_param[1];
});
temp_rectangle.min_vertices.y = minmax_vertices.first->vertex_param[1];
temp_rectangle.max_vertices.y = minmax_vertices.second->vertex_param[1];
// std::cout << "min: " << minmax_vertices.first->vertex_param[1] << " max: " <<
// minmax_vertices.second->vertex_param[1] << std::endl;
minmax_vertices = std::minmax_element(vertices.begin(), vertices.end(),
[](SturGVertex const &lhs, SturGVertex const &rhs) {
return lhs.vertex_param[2] < rhs.vertex_param[2];
});
temp_rectangle.min_vertices.z = minmax_vertices.first->vertex_param[2];
temp_rectangle.max_vertices.z = minmax_vertices.second->vertex_param[2];
// std::cout << "min: " << minmax_vertices.first->vertex_param[2] << " max: " <<
// minmax_vertices.second->vertex_param[2] << std::endl;
return temp_rectangle;
}
int RcLoader::getRandomColor(float rand_color_array[], uint64_t seed, unsigned int is_terrain) {
// generate random for buildings from model files
// the result is unchanged if its for cnn
if (is_terrain) {
rand_color_array[0] = 0.0f;
rand_color_array[1] = 0.0f;
rand_color_array[2] = 0.0f;
} else {
// input unique seed
srand(static_cast<int>(seed * time(0)));
// generate random numbers between 0 to 255;
// making sure R band is never zero
// convert them to OpenGL colors float format
rand_color_array[0] = (((rand() + 1) % 255) / 255.0f);
rand_color_array[1] = (((rand() + 2) % 255) / 255.0f);
rand_color_array[2] = (((rand() + 3) % 255) / 255.0f);
// we are using red band. thus making sure its a non zero value
// if(rand_color_array[0] == 0){
// rand_color_array[0] = (rand_color_array[1] + rand_color_array[2])/2.0f;
// }
}
return 1;
}
int RcLoader::processDataforRendering() {
int j;
// std::cout << "\getting Tile Ids and reading data from them\n" << std::endl;
getTileIds();
getDataFromTiles();
// temp variable to store color params
float rand_color_array[COLOR_PARAM_SIZE] = {1.0f, 1.0f, 1.0f};
if (buildings_.empty()) {
std::cout << "Building data is empty" << std::endl;
return 0;
}
#ifdef VERBOSE
displayBuildingsData();
#endif
for (SturGBuildingData &building_iter : buildings_) {
std::vector<SturGVertex>::iterator vertice_iter = building_iter.vertices.begin();
getRandomColor(rand_color_array, building_iter.id, building_iter.is_terrain);
// for each face get vertices and corresponding colors
for (const SturGFace &face_iter : building_iter.faces) {
for (int i = 0; i < FACE_VERTICES_SIZE; i++) {
vertice_iter = building_iter.vertices.begin() + face_iter.face_vertex[i];
if (!building_iter.is_terrain) {
vertices_.push_back(*vertice_iter);
for (j = 0; j < VERTEX_VERTICES_SIZE; j++) {
colors_.push_back(rand_color_array[j]);
// To DO: needs fix if VERTEX_VERTICES_SIZE!=COLOR_PARAM_SIZE
}
// colors_.push_back(0.0);
}
else {
ter_vertices_.push_back(*vertice_iter);
for (j = 0; j < VERTEX_VERTICES_SIZE; j++) {
ter_colors_.push_back(rand_color_array[j]);
// To DO: needs fix if VERTEX_VERTICES_SIZE!=COLOR_PARAM_SIZE
}
// ter_colors_.push_back(0.0);
}
}
}
// std::cout << "vertices size now: " << vertices_.size() << std::endl;
}
#ifdef VERBOSE
auto iter = max_element(std::begin(indices_), std::end(indices_)); // c++11
std::cout << "max afer ind: " << *iter << std::endl;
std::cout << "vertices size :" << vertices_.size() << std::endl;
std::cout << "color size :" << colors_.size() << std::endl;
std::cout << "indices size :" << indices_.size() << std::endl;
std::cout << "index vertices size :" << indexed_vertices_.size() << std::endl;
std::cout << "index color size :" << indexed_colors_.size() << std::endl;
std::cout << "terrain vertices size :" << ter_vertices_.size() << std::endl;
std::cout << "terrain color size :" << ter_colors_.size() << std::endl;
std::cout << "terrain indices size :" << ter_indices_.size() << std::endl;
std::cout << "terrain index vertices size :" << ter_indexed_vertices_.size() << std::endl;
std::cout << "terrain index color size :" << ter_indexed_colors_.size() << std::endl;
#endif
return 1;
}
int RcLoader::displayVertices() {
std::ofstream outputFile;
outputFile.open("vertices.csv");
outputFile.precision(8);
std::cout << "vertices size :" << vertices_.size() << std::endl;
for (auto pp_iter = vertices_.begin(); pp_iter != vertices_.end(); pp_iter++) {
outputFile << pp_iter->vertex_param[0] << "," << pp_iter->vertex_param[1] << ","
<< pp_iter->vertex_param[2] << std::endl;
}
outputFile.close();
return 1;
}
// int RcLoader::displayMaxBindingRectangles() {
// std::ofstream outputFile;
// outputFile.open("mbrs.csv");
// outputFile.precision(8);
// std::cout << "mbr size :" << max_binding_rectangles_.size() << std::endl;
// for (auto pp_iter = max_binding_rectangles_.begin(); pp_iter !=
// max_binding_rectangles_.end(); pp_iter++) {
// outputFile << pp_iter->x << "," << pp_iter->y << ","<< pp_iter->z <<"," << pp_iter->w <<
// std::endl;
// }
// outputFile.close();
// return 1;
// }
// RcLoader class destructor with input params
RcLoader::~RcLoader() {
// at expense of speed
std::vector<SturGBuildingData>().swap(buildings_);
std::vector<uint64_t>().swap(tile_ids_);
std::vector<SturGVertex>().swap(vertices_);
std::vector<SturGVertex>().swap(ter_vertices_);
}
|
ac2d4e978fdaf89d580b58a825c442564c9b58c8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <stdio.h>
#include "struct_kernel.hu"
#include <stdlib.h>
struct s {
int c[10][10];
};
int main()
{
struct s a[10][10], b[10][10];
for (int i = 0; i < 10; ++i)
for (int j = 0; j < 10; ++j)
for (int k = 0; k < 10; ++k)
for (int l = 0; l < 10; ++l)
a[i][j].c[k][l] = i + j + k + l;
#define ppcg_fdiv_q(n,d) (((n)<0) ? -((-(n)+(d)-1)/(d)) : (n)/(d))
{
#define cudaCheckReturn(ret) \
do { \
hipError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != hipSuccess) { \
fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == hipSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(hipGetLastError()); \
} while(0)
struct s *dev_b;
cudaCheckReturn(hipMalloc((void **) &dev_b, (10) * (10) * sizeof(struct s)));
{
dim3 k0_dimBlock(4, 4, 10);
dim3 k0_dimGrid(1, 1);
hipLaunchKernelGGL(( kernel0) , dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_b);
cudaCheckKernel();
}
cudaCheckReturn(hipMemcpy(b, dev_b, (10) * (10) * sizeof(struct s), hipMemcpyDeviceToHost));
cudaCheckReturn(hipFree(dev_b));
}
for (int i = 0; i < 10; ++i)
for (int j = 0; j < 10; ++j)
for (int k = 0; k < 10; ++k)
for (int l = 0; l < 10; ++l)
if (b[i][j].c[k][l] != a[i][j].c[k][l])
return EXIT_FAILURE;
return EXIT_SUCCESS;
}
| ac2d4e978fdaf89d580b58a825c442564c9b58c8.cu | #include <assert.h>
#include <stdio.h>
#include "struct_kernel.hu"
#include <stdlib.h>
struct s {
int c[10][10];
};
int main()
{
struct s a[10][10], b[10][10];
for (int i = 0; i < 10; ++i)
for (int j = 0; j < 10; ++j)
for (int k = 0; k < 10; ++k)
for (int l = 0; l < 10; ++l)
a[i][j].c[k][l] = i + j + k + l;
#define ppcg_fdiv_q(n,d) (((n)<0) ? -((-(n)+(d)-1)/(d)) : (n)/(d))
{
#define cudaCheckReturn(ret) \
do { \
cudaError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != cudaSuccess) { \
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == cudaSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(cudaGetLastError()); \
} while(0)
struct s *dev_b;
cudaCheckReturn(cudaMalloc((void **) &dev_b, (10) * (10) * sizeof(struct s)));
{
dim3 k0_dimBlock(4, 4, 10);
dim3 k0_dimGrid(1, 1);
kernel0 <<<k0_dimGrid, k0_dimBlock>>> (dev_b);
cudaCheckKernel();
}
cudaCheckReturn(cudaMemcpy(b, dev_b, (10) * (10) * sizeof(struct s), cudaMemcpyDeviceToHost));
cudaCheckReturn(cudaFree(dev_b));
}
for (int i = 0; i < 10; ++i)
for (int j = 0; j < 10; ++j)
for (int k = 0; k < 10; ++k)
for (int l = 0; l < 10; ++l)
if (b[i][j].c[k][l] != a[i][j].c[k][l])
return EXIT_FAILURE;
return EXIT_SUCCESS;
}
|
5931a929807dbe5fad05de315979ef8ba78c20d5.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
// Includes
#include <stdio.h>
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <hip/hip_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 60
#define ITERATIONS REPLACE_ITERATIONS
#include "../include/ContAcq-IntClk.h"
// Variables
float* h_A;
float* h_B;
float* h_C;
float* d_A;
float* d_B;
float* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(float*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line )
{
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
__global__ void PowerKernal2(const float* A, const float* B, float* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
float Value1;
float Value2;
float Value3;
float Value;
float I1=A[i];
float I2=B[i];
// Excessive Addition access
if(((i%32)<=31))
{
for(unsigned k=0; k<ITERATIONS;k++) {
Value1=I1+I2;
Value3=I1-I2;
Value1+=Value2;
Value1+=Value2;
Value2=Value3-Value1;
Value1=Value2+Value3;
}
}
__syncthreads();
Value=Value1;
C[i]=Value+Value2;
}
int main()
{
printf("Power Microbenchmarks\n");
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
size_t size = N * sizeof(float);
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (float*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (float*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
printf("before\n");
checkCudaErrors( hipMalloc((void**)&d_A, size) );
checkCudaErrors( hipMalloc((void**)&d_B, size) );
checkCudaErrors( hipMalloc((void**)&d_C, size) );
printf("after\n");
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
hipLaunchKernelGGL((
PowerKernal2), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B, d_C, N);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
getLastCudaError("kernel launch failure");
CUDA_SAFE_CALL( hipDeviceSynchronize() );
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
#ifdef _DEBUG
checkCudaErrors( hipDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
hipFree(d_A);
if (d_B)
hipFree(d_B);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(float* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
}
| 5931a929807dbe5fad05de315979ef8ba78c20d5.cu | #include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
// Includes
#include <stdio.h>
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <cuda_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 60
#define ITERATIONS REPLACE_ITERATIONS
#include "../include/ContAcq-IntClk.h"
// Variables
float* h_A;
float* h_B;
float* h_C;
float* d_A;
float* d_B;
float* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(float*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
__global__ void PowerKernal2(const float* A, const float* B, float* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
float Value1;
float Value2;
float Value3;
float Value;
float I1=A[i];
float I2=B[i];
// Excessive Addition access
if(((i%32)<=31))
{
for(unsigned k=0; k<ITERATIONS;k++) {
Value1=I1+I2;
Value3=I1-I2;
Value1+=Value2;
Value1+=Value2;
Value2=Value3-Value1;
Value1=Value2+Value3;
}
}
__syncthreads();
Value=Value1;
C[i]=Value+Value2;
}
int main()
{
printf("Power Microbenchmarks\n");
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
size_t size = N * sizeof(float);
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (float*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (float*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
printf("before\n");
checkCudaErrors( cudaMalloc((void**)&d_A, size) );
checkCudaErrors( cudaMalloc((void**)&d_B, size) );
checkCudaErrors( cudaMalloc((void**)&d_C, size) );
printf("after\n");
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
PowerKernal2<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, N);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
getLastCudaError("kernel launch failure");
CUDA_SAFE_CALL( cudaThreadSynchronize() );
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
#ifdef _DEBUG
checkCudaErrors( cudaDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
cudaFree(d_A);
if (d_B)
cudaFree(d_B);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(float* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
}
|
a354d995ea7fd38b68e813e13297507830be3ee7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0-beta3) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date July 2014
@generated from clag2z.cu mixed zc -> ds, Fri Jul 18 17:34:11 2014
@author Mark Gates
*/
#include "common_magma.h"
#define blksize 64
__global__ void
slag2d_array( int m, int n,
const float *SA, int ldsa,
double *A, int lda )
{
int i = blockIdx.x*blksize + threadIdx.x;
if ( i < m ) {
A += i;
SA += i;
const double *Aend = A + lda*n;
while( A < Aend ) {
*A = (double)( *SA );
A += lda;
SA += ldsa;
}
}
}
__global__ void
slag2d_vector( int m,
const float *SA,
double *A )
{
int i = blockIdx.x*blksize + threadIdx.x;
if ( i < m ) {
A += i;
SA += i;
*A = (double)( *SA );
}
}
/**
Purpose
-------
SLAG2D converts a single-real matrix, SA,
to a double-real matrix, A.
Note that while it is possible to overflow while converting
from double to single, it is not possible to overflow when
converting from single to double.
Arguments
---------
@param[in]
m INTEGER
The number of lines of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
@param[in]
SA REAL array, dimension (LDSA,N)
On entry, the M-by-N coefficient matrix SA.
@param[in]
ldsa INTEGER
The leading dimension of the array SA. LDSA >= max(1,M).
@param[out]
A DOUBLE PRECISION array, dimension (LDA,N)
On exit, the M-by-N coefficient matrix A.
@param[in]
lda INTEGER
The leading dimension of the array A. LDA >= max(1,M).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_slag2d(
magma_int_t m, magma_int_t n,
const float *SA, magma_int_t ldsa,
double *A, magma_int_t lda,
magma_int_t *info)
{
*info = 0;
if ( m < 0 )
*info = -1;
else if ( n < 0 )
*info = -2;
else if ( ldsa < max(1,m) )
*info = -4;
else if ( lda < max(1,m) )
*info = -6;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //*info;
}
/* quick return */
if ( m == 0 || n == 0 ) {
return;
}
dim3 threads( blksize );
dim3 grid( (m+blksize-1)/blksize );
if( n > 1 ) {
hipLaunchKernelGGL(( slag2d_array), dim3(grid), dim3(threads), 0, magma_stream , m, n, SA, ldsa, A, lda );
}
else{
hipLaunchKernelGGL(( slag2d_vector), dim3(grid), dim3(threads), 0, magma_stream , m, SA, A );
}
}
| a354d995ea7fd38b68e813e13297507830be3ee7.cu | /*
-- MAGMA (version 1.5.0-beta3) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date July 2014
@generated from clag2z.cu mixed zc -> ds, Fri Jul 18 17:34:11 2014
@author Mark Gates
*/
#include "common_magma.h"
#define blksize 64
__global__ void
slag2d_array( int m, int n,
const float *SA, int ldsa,
double *A, int lda )
{
int i = blockIdx.x*blksize + threadIdx.x;
if ( i < m ) {
A += i;
SA += i;
const double *Aend = A + lda*n;
while( A < Aend ) {
*A = (double)( *SA );
A += lda;
SA += ldsa;
}
}
}
__global__ void
slag2d_vector( int m,
const float *SA,
double *A )
{
int i = blockIdx.x*blksize + threadIdx.x;
if ( i < m ) {
A += i;
SA += i;
*A = (double)( *SA );
}
}
/**
Purpose
-------
SLAG2D converts a single-real matrix, SA,
to a double-real matrix, A.
Note that while it is possible to overflow while converting
from double to single, it is not possible to overflow when
converting from single to double.
Arguments
---------
@param[in]
m INTEGER
The number of lines of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
@param[in]
SA REAL array, dimension (LDSA,N)
On entry, the M-by-N coefficient matrix SA.
@param[in]
ldsa INTEGER
The leading dimension of the array SA. LDSA >= max(1,M).
@param[out]
A DOUBLE PRECISION array, dimension (LDA,N)
On exit, the M-by-N coefficient matrix A.
@param[in]
lda INTEGER
The leading dimension of the array A. LDA >= max(1,M).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_slag2d(
magma_int_t m, magma_int_t n,
const float *SA, magma_int_t ldsa,
double *A, magma_int_t lda,
magma_int_t *info)
{
*info = 0;
if ( m < 0 )
*info = -1;
else if ( n < 0 )
*info = -2;
else if ( ldsa < max(1,m) )
*info = -4;
else if ( lda < max(1,m) )
*info = -6;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //*info;
}
/* quick return */
if ( m == 0 || n == 0 ) {
return;
}
dim3 threads( blksize );
dim3 grid( (m+blksize-1)/blksize );
if( n > 1 ) {
slag2d_array<<< grid, threads, 0, magma_stream >>> ( m, n, SA, ldsa, A, lda );
}
else{
slag2d_vector<<< grid, threads, 0, magma_stream >>> ( m, SA, A );
}
}
|
38b5fb8e074f8806bac52622bcc5b563519b5a53.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#define N 2//64
__device__ int bar() __attribute__((always_inline));
__device__ int bar()
{
return 5;
}
__global__ void foo()
{
int x = bar();
__assert(x == 5);
// printf("%d ", x);
}
| 38b5fb8e074f8806bac52622bcc5b563519b5a53.cu | #include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <cuda.h>
#define N 2//64
__device__ int bar() __attribute__((always_inline));
__device__ int bar()
{
return 5;
}
__global__ void foo()
{
int x = bar();
__assert(x == 5);
// printf("%d ", x);
}
|
c5c3d610ed7cf5420873da2506b5496621863999.hip | // !!! This is a file automatically generated by hipify!!!
#include <algorithm>
#include <vector>
#include "hip/hip_runtime.h"
#include "caffe/layer.hpp"
#include "caffe/custom_layers.hpp"
namespace caffe {
// compute each Vi
template <typename Dtype>
__global__ void ComputeSource(const int total, const int num, const int height, const int width,
const Dtype* target_data, const Dtype* theta, Dtype* source_data, int* source_range_data) {
// total = num * height * width
CUDA_KERNEL_LOOP(index, total) {
int div = height * width;
int n = index / div;
int n_rem = index % div;
div /= height;
int h = n_rem / div;
int w = n_rem % div;
Dtype x_target = target_data[h * width + w];
Dtype y_target = target_data[h * width + w + width * height];
int offset_theta = 6 * n;
Dtype x = x_target * theta[offset_theta] + y_target * theta[offset_theta + 1] + theta[offset_theta + 2];
Dtype y = x_target * theta[offset_theta + 3] + y_target * theta[offset_theta + 4] + theta[offset_theta + 5];
x = (x + (Dtype) 1.) / (Dtype) 2. * (width - 1);
y = (y + (Dtype) 1.) / (Dtype) 2. * (height - 1);
int offset_source = n * height * width * 2 + h * width + w;
source_data[offset_source] = x;
source_data[offset_source + height * width] = y;
int w_min = (floor(x) > 0) ? floor(x) : 0;
int w_max = (ceil(x) < width - 1) ? ceil(x) : (width - 1);
int h_min = (floor(y) > 0) ? floor(y) : 0;
int h_max = (ceil(y) < height - 1) ? ceil(y) : (height - 1);
int offset_range = (n * height * width + h * width + w) * 4;
source_range_data[offset_range] = w_min;
source_range_data[offset_range + 1] = w_max;
source_range_data[offset_range + 2] = h_min;
source_range_data[offset_range + 3] = h_max;
}
}
template <typename Dtype>
__global__ void AffineForward(const int count, const int channels, const int height, const int width,
const Dtype* in, const Dtype* source_data, const int* source_range_data, Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
int div = channels * height * width;
int n = index / div;
int n_rem = index % div;
div /= channels;
int c = n_rem / div;
int c_rem = n_rem % div;
div /= height;
int h = c_rem / div;
int w = c_rem % div;
int offset_source = n * 2 * height * width + h * width + w;
Dtype x = source_data[offset_source];
Dtype y = source_data[offset_source + height * width];
int offset_range = (n * height * width + h * width + w) * 4;
int w_min = source_range_data[offset_range];
int w_max = source_range_data[offset_range + 1];
int h_min = source_range_data[offset_range + 2];
int h_max = source_range_data[offset_range + 3];
int offset_nc = n * channels * height * width + c * height*width;
Dtype tmp = 0;
for (int hh = h_min; hh <= h_max; ++hh) {
for (int ww = w_min; ww <= w_max; ++ww) {
tmp += in[offset_nc + hh * width + ww]*(1 - fabs(x - ww)) * (1 - fabs(y - hh));
}
}
out[offset_nc + h * width + w] = tmp;
}
}
template <typename Dtype>
void SpatialTransformerLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const Dtype* theta_data = bottom[1]->gpu_data();
const Dtype* target_data = target_.gpu_data();
Dtype* source_data = source_.mutable_gpu_data();
int* range_data = source_range_.mutable_gpu_data();
int count = top[0]->count();
caffe_gpu_set<Dtype>(count, 0, top_data);
ComputeSource<Dtype> << <CAFFE_GET_BLOCKS(num_ * height_ * width_),
CAFFE_CUDA_NUM_THREADS >> >(num_ * height_ * width_, num_, height_, width_,
target_data, theta_data, source_data, range_data);
AffineForward<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(count, channel_, height_, width_,
bottom_data, source_data, range_data, top_data);
CUDA_POST_KERNEL_CHECK;
}
__device__ inline void atomic_add(float * address, float val) {
atomicAdd(address, val);
}
__device__ inline void atomic_add(double * address, double val) {
unsigned long long int* address_as_ull =
(unsigned long long int*) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
}
// compute (1) d{V_i} / d{x_i}, then (2) d{V_i} / d{theta}
// compute sum_{i} d{V_i} / d{U_nm}
template <typename Dtype>
__global__ void AffineBackward(const int count, const int num, const int channels, const int height, const int width,
const Dtype* data, const Dtype* source_data, int* source_range_data, const Dtype* top_diff,
Dtype* data_diff, Dtype* source_grad_cache) {
// count = num * channel * height * width
CUDA_KERNEL_LOOP(index, count) {
int div = channels * height * width;
int n = index / div;
int n_rem = index % div;
div /= channels;
int c = n_rem / div;
int c_rem = n_rem % div;
div /= height;
int h = c_rem / div;
int w = c_rem % div;
int offset_source = n * 2 * height * width + h * width + w;
Dtype x = source_data[offset_source];
Dtype y = source_data[offset_source + height * width];
int offset_range = (n * height * width + h * width + w) * 4;
int w_min = source_range_data[offset_range];
int w_max = source_range_data[offset_range + 1];
int h_min = source_range_data[offset_range + 2];
int h_max = source_range_data[offset_range + 3];
int source_diff_x = c * num * 2 * height * width + n * 2 * height * width + h * width + w;
int source_diff_y = source_diff_x + height * width;
Dtype tmp_source_x = 0;
Dtype tmp_source_y = 0;
Dtype buffer = top_diff[n * channels * height * width + c * height * width + h * width + w];
for (int hh = h_min; hh <= h_max; ++hh) {
for (int ww = w_min; ww <= w_max; ++ww) {
int sign_x = (Dtype(0) <= Dtype(ww - x)) - (Dtype(ww - x) < Dtype(0));
int sign_y = (Dtype(0) <= Dtype(hh - y)) - (Dtype(hh - y) < Dtype(0));
Dtype buffer2 = buffer * data[n * channels * height * width + c * height * width + hh * width + ww];
Dtype tmp_hh = 1 - fabs(y - hh);
Dtype tmp_ww = 1 - fabs(x - ww);
tmp_source_x += buffer2 * tmp_hh * sign_x;
tmp_source_y += buffer2 * tmp_ww * sign_y;
Dtype inc = buffer * tmp_hh * tmp_ww;
int offset = n * channels * height * width + c * height * width + hh * width + ww;
atomic_add(data_diff + offset, inc);
}
}
source_grad_cache[source_diff_x] = tmp_source_x * (width - 1) / (Dtype) 2.;
source_grad_cache[source_diff_y] = tmp_source_y * (height - 1) / (Dtype) 2.;
}
}
template <typename Dtype>
void SpatialTransformerLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* data_diff = bottom[0]->mutable_gpu_diff();
Dtype* theta_diff = bottom[1]->mutable_gpu_diff();
Dtype* source_grad_cache = source_grad_cache_.mutable_gpu_data();
const Dtype* target_data = target_.gpu_data();
const Dtype* source_data = source_.gpu_data();
Dtype* source_diff = source_.mutable_gpu_diff();
int* source_range_data = source_range_.mutable_gpu_data();
caffe_gpu_set<Dtype>(bottom[0]->count(), 0, data_diff);
int count = bottom[0]->count();
// compute gradient with respect to theta
AffineBackward<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(count, num_, channel_, height_, width_,
bottom_data, source_data, source_range_data, top_diff,
data_diff, source_grad_cache);
// merge gradient for theta
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, 1, num_ * 2 * map_size_, channel_,
Dtype(1), source_grad_op_.gpu_data(), source_grad_cache, Dtype(0), source_diff);
for (int index = 0; index < num_; ++index) {
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, 2, 3, map_size_,
Dtype(1), source_diff + index * 2 * map_size_, target_data, Dtype(0), theta_diff + index * 6);
}
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(SpatialTransformerLayer);
} // namespace caffe
| c5c3d610ed7cf5420873da2506b5496621863999.cu | #include <algorithm>
#include <vector>
#include "cuda.h"
#include "caffe/layer.hpp"
#include "caffe/custom_layers.hpp"
namespace caffe {
// compute each Vi
template <typename Dtype>
__global__ void ComputeSource(const int total, const int num, const int height, const int width,
const Dtype* target_data, const Dtype* theta, Dtype* source_data, int* source_range_data) {
// total = num * height * width
CUDA_KERNEL_LOOP(index, total) {
int div = height * width;
int n = index / div;
int n_rem = index % div;
div /= height;
int h = n_rem / div;
int w = n_rem % div;
Dtype x_target = target_data[h * width + w];
Dtype y_target = target_data[h * width + w + width * height];
int offset_theta = 6 * n;
Dtype x = x_target * theta[offset_theta] + y_target * theta[offset_theta + 1] + theta[offset_theta + 2];
Dtype y = x_target * theta[offset_theta + 3] + y_target * theta[offset_theta + 4] + theta[offset_theta + 5];
x = (x + (Dtype) 1.) / (Dtype) 2. * (width - 1);
y = (y + (Dtype) 1.) / (Dtype) 2. * (height - 1);
int offset_source = n * height * width * 2 + h * width + w;
source_data[offset_source] = x;
source_data[offset_source + height * width] = y;
int w_min = (floor(x) > 0) ? floor(x) : 0;
int w_max = (ceil(x) < width - 1) ? ceil(x) : (width - 1);
int h_min = (floor(y) > 0) ? floor(y) : 0;
int h_max = (ceil(y) < height - 1) ? ceil(y) : (height - 1);
int offset_range = (n * height * width + h * width + w) * 4;
source_range_data[offset_range] = w_min;
source_range_data[offset_range + 1] = w_max;
source_range_data[offset_range + 2] = h_min;
source_range_data[offset_range + 3] = h_max;
}
}
template <typename Dtype>
__global__ void AffineForward(const int count, const int channels, const int height, const int width,
const Dtype* in, const Dtype* source_data, const int* source_range_data, Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
int div = channels * height * width;
int n = index / div;
int n_rem = index % div;
div /= channels;
int c = n_rem / div;
int c_rem = n_rem % div;
div /= height;
int h = c_rem / div;
int w = c_rem % div;
int offset_source = n * 2 * height * width + h * width + w;
Dtype x = source_data[offset_source];
Dtype y = source_data[offset_source + height * width];
int offset_range = (n * height * width + h * width + w) * 4;
int w_min = source_range_data[offset_range];
int w_max = source_range_data[offset_range + 1];
int h_min = source_range_data[offset_range + 2];
int h_max = source_range_data[offset_range + 3];
int offset_nc = n * channels * height * width + c * height*width;
Dtype tmp = 0;
for (int hh = h_min; hh <= h_max; ++hh) {
for (int ww = w_min; ww <= w_max; ++ww) {
tmp += in[offset_nc + hh * width + ww]*(1 - fabs(x - ww)) * (1 - fabs(y - hh));
}
}
out[offset_nc + h * width + w] = tmp;
}
}
template <typename Dtype>
void SpatialTransformerLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const Dtype* theta_data = bottom[1]->gpu_data();
const Dtype* target_data = target_.gpu_data();
Dtype* source_data = source_.mutable_gpu_data();
int* range_data = source_range_.mutable_gpu_data();
int count = top[0]->count();
caffe_gpu_set<Dtype>(count, 0, top_data);
ComputeSource<Dtype> << <CAFFE_GET_BLOCKS(num_ * height_ * width_),
CAFFE_CUDA_NUM_THREADS >> >(num_ * height_ * width_, num_, height_, width_,
target_data, theta_data, source_data, range_data);
AffineForward<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(count, channel_, height_, width_,
bottom_data, source_data, range_data, top_data);
CUDA_POST_KERNEL_CHECK;
}
__device__ inline void atomic_add(float * address, float val) {
atomicAdd(address, val);
}
__device__ inline void atomic_add(double * address, double val) {
unsigned long long int* address_as_ull =
(unsigned long long int*) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
}
// compute (1) d{V_i} / d{x_i}, then (2) d{V_i} / d{theta}
// compute sum_{i} d{V_i} / d{U_nm}
template <typename Dtype>
__global__ void AffineBackward(const int count, const int num, const int channels, const int height, const int width,
const Dtype* data, const Dtype* source_data, int* source_range_data, const Dtype* top_diff,
Dtype* data_diff, Dtype* source_grad_cache) {
// count = num * channel * height * width
CUDA_KERNEL_LOOP(index, count) {
int div = channels * height * width;
int n = index / div;
int n_rem = index % div;
div /= channels;
int c = n_rem / div;
int c_rem = n_rem % div;
div /= height;
int h = c_rem / div;
int w = c_rem % div;
int offset_source = n * 2 * height * width + h * width + w;
Dtype x = source_data[offset_source];
Dtype y = source_data[offset_source + height * width];
int offset_range = (n * height * width + h * width + w) * 4;
int w_min = source_range_data[offset_range];
int w_max = source_range_data[offset_range + 1];
int h_min = source_range_data[offset_range + 2];
int h_max = source_range_data[offset_range + 3];
int source_diff_x = c * num * 2 * height * width + n * 2 * height * width + h * width + w;
int source_diff_y = source_diff_x + height * width;
Dtype tmp_source_x = 0;
Dtype tmp_source_y = 0;
Dtype buffer = top_diff[n * channels * height * width + c * height * width + h * width + w];
for (int hh = h_min; hh <= h_max; ++hh) {
for (int ww = w_min; ww <= w_max; ++ww) {
int sign_x = (Dtype(0) <= Dtype(ww - x)) - (Dtype(ww - x) < Dtype(0));
int sign_y = (Dtype(0) <= Dtype(hh - y)) - (Dtype(hh - y) < Dtype(0));
Dtype buffer2 = buffer * data[n * channels * height * width + c * height * width + hh * width + ww];
Dtype tmp_hh = 1 - fabs(y - hh);
Dtype tmp_ww = 1 - fabs(x - ww);
tmp_source_x += buffer2 * tmp_hh * sign_x;
tmp_source_y += buffer2 * tmp_ww * sign_y;
Dtype inc = buffer * tmp_hh * tmp_ww;
int offset = n * channels * height * width + c * height * width + hh * width + ww;
atomic_add(data_diff + offset, inc);
}
}
source_grad_cache[source_diff_x] = tmp_source_x * (width - 1) / (Dtype) 2.;
source_grad_cache[source_diff_y] = tmp_source_y * (height - 1) / (Dtype) 2.;
}
}
template <typename Dtype>
void SpatialTransformerLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* data_diff = bottom[0]->mutable_gpu_diff();
Dtype* theta_diff = bottom[1]->mutable_gpu_diff();
Dtype* source_grad_cache = source_grad_cache_.mutable_gpu_data();
const Dtype* target_data = target_.gpu_data();
const Dtype* source_data = source_.gpu_data();
Dtype* source_diff = source_.mutable_gpu_diff();
int* source_range_data = source_range_.mutable_gpu_data();
caffe_gpu_set<Dtype>(bottom[0]->count(), 0, data_diff);
int count = bottom[0]->count();
// compute gradient with respect to theta
AffineBackward<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(count, num_, channel_, height_, width_,
bottom_data, source_data, source_range_data, top_diff,
data_diff, source_grad_cache);
// merge gradient for theta
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, 1, num_ * 2 * map_size_, channel_,
Dtype(1), source_grad_op_.gpu_data(), source_grad_cache, Dtype(0), source_diff);
for (int index = 0; index < num_; ++index) {
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, 2, 3, map_size_,
Dtype(1), source_diff + index * 2 * map_size_, target_data, Dtype(0), theta_diff + index * 6);
}
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(SpatialTransformerLayer);
} // namespace caffe
|
45ce5a0a5b296e01369bea210a10cd4b32225a9d.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp<
int8_t, 8, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutSrc, int32_t, LayoutSrc, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 16, 16, true>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const int8_t* d_src,
const int8_t* d_filter,
const int32_t* d_bias,
const int8_t* d_z,
int8_t* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| 45ce5a0a5b296e01369bea210a10cd4b32225a9d.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp<
int8_t, 8, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutSrc, int32_t, LayoutSrc, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 16, 16, true>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const int8_t* d_src,
const int8_t* d_filter,
const int32_t* d_bias,
const int8_t* d_z,
int8_t* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
1fb2f53267c2b262c740cdbce4b168ce7e611ff5.hip | // !!! This is a file automatically generated by hipify!!!
// MapReduce Inverted Index example using CUDA
// Syntax: invertedindex path-of-data-dir
// (1) assume each host has four processors, each corresponds
// to a GPU, and read one parts of the files in the local dir
// (2) parse into words separated by whitespace
// (3) count occurrence of each word in all files
// (4) print top 10 words
#include <mpi.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <fstream>
#include <sstream>
#include <cstring>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <dirent.h>
#include "mapreduce.h"
#include "keyvalue.h"
#include <thrust/device_ptr.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <thrust/count.h>
#include <thrust/functional.h>
#define CEIL(n,m) ((n)/(m) + (int)((n)%(m) !=0))
#define THREAD_CONF(grid, block, gridBound, blockBound) do {\
block.x = blockBound;\
grid.x = gridBound; \
if (grid.x > 65535) {\
grid.x = (int)sqrt((double)grid.x);\
grid.y = CEIL(gridBound, grid.x); \
}\
}while (0)
using namespace MAPREDUCE_NS;
using namespace std;
void mymap(int , KeyValue *, void *);
void myreduce(char *, int, char *, int, int *, KeyValue *, void *);
void mycombine(char *, int, char *, int, int *, KeyValue *, void *);
char inputdir[100];
int num_file = 1;
int me, nprocs;
//int ncompare(char *, int, char *, int);
//void output(uint64_t, char *, int, char *, int, KeyValue *, void *);
struct Info
{
int me;
int nproc;
};
#define START 0x00
#define IN_TAG 0x01
#define IN_ATAG 0x02
#define FOUND_HREF 0x03
#define START_LINK 0x04
struct is_start
{
__host__ __device__
bool operator()(const int x)
{
return x==1;
}
};
__global__ void mark(
char *text,
int *d_segmask,
int length)
{
const int tid_x = blockDim.x * blockIdx.x + threadIdx.x;
const int tid_y = blockDim.y * blockIdx.y + threadIdx.y;
const int tid = tid_y * (blockDim.x*gridDim.x) + tid_x;
if(tid < length)
d_segmask[tid] = 0;
if(tid >= length-9)
return;
if(text[tid] == '<' &&
text[tid+1] == 'a' &&
text[tid+2] == ' ' &&
text[tid+3] == 'h' &&
text[tid+4] == 'r' &&
text[tid+5] == 'e' &&
text[tid+6] == 'f' &&
text[tid+7] == '=' &&
text[tid+8] == '\"')
{
d_segmask[tid+9] = 1;
}
}
__global__ void compute_url_length(
char *d_text,
int *d_urloffset,
int *d_urllength,
int textlen,
int url_num)
{
const int tid_x = blockDim.x * blockIdx.x + threadIdx.x;
const int tid_y = blockDim.y * blockIdx.y + threadIdx.y;
const int tid = tid_y * (blockDim.x*gridDim.x) + tid_x;
if(tid >= url_num)
return;
int start = d_urloffset[tid];
for(int i=start; i < textlen; i++)
{
if(d_text[i] == '\"' || i == textlen-1)
{
d_urllength[tid] = i-start;
d_text[i] = '\0';
return;
}
}
}
/* ---------------------------------------------------------------------- */
//parameters:
int main(int argc, char **args)
{
MPI_Init(&argc,&args);
MPI_Comm_rank(MPI_COMM_WORLD, &me);
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
if (argc <= 2)
{
if (me == 0) printf("Syntax: invertedindex [input_dir num_file]...\n");
MPI_Abort(MPI_COMM_WORLD,1);
}
strcpy(inputdir, args[1]);
num_file = atoi(args[2]);
MapReduce *mr = new MapReduce(MPI_COMM_WORLD);
mr->verbosity = 2;
mr->timer = 1;
if(NULL==opendir("/mnt/mrmpi/temp"))
{
system("mkdir /mnt/mrmpi");
system("mkdir /mnt/mrmpi/temp");
}
mr->set_fpath("/mnt/mrmpi/temp");
mr->memsize = 64;
//mr->outofcore = 1;
MPI_Barrier(MPI_COMM_WORLD);
double tstart = MPI_Wtime();
//printf("start map %d\n", me);
int mapitem = mr->map(nprocs, mymap, &me);
//int nfiles = mr->mapfilecount;
//mr->compress(mycombine, NULL);
//printf("start aggregate %d\n", me);
mr->aggregate(NULL);
//printf("end aggregate %d\n", me);
mr->convert();
//mr->collate(NULL);
//printf("end convert %d\n", me);
if(NULL==opendir("/mnt/mrmpi_output"))
{
system("mkdir /mnt/mrmpi_output");
}
system("rm /mnt/mrmpi_output/InvertedIndex*");
Info info;
info.me = me;
info.nproc = nprocs;
int reduceitem = mr->reduce(myreduce, &info);
MPI_Barrier(MPI_COMM_WORLD);
double tstop = MPI_Wtime();
/*
mr->sort_values(&ncompare);
Count count;
count.n = 0;
count.limit = 10;
count.flag = 0;
mr->map(mr,output,&count);
mr->gather(1);
mr->sort_values(ncompare);
count.n = 0;
count.limit = 10;
count.flag = 1;
mr->map(mr,output,&count);
*/
delete mr;
//printf("map and reduce item are %d, %d\n", mapitem, reduceitem);
if (me == 0)
{
//printf("%d total words, %d unique words\n",nwords,nunique);
printf("Time to process on %d procs = %g (secs), %d, %d\n", nprocs, tstop-tstart, mapitem, reduceitem);
}
MPI_Finalize();
}
int getfilename(char *fullpath, char *filename)
{
size_t found;
std::string path(fullpath);
found=path.find_last_of("/\\");
const char *name = path.substr(found+1).c_str();
memcpy(filename, name, strlen(name)+1);
return strlen(name);
}
/* ----------------------------------------------------------------------
read a file
for each word in file, emit key = word, value = NULL
------------------------------------------------------------------------- */
void mymap(int nmap, KeyValue *kv, void *ptr)
{
int me = *(int*)ptr;
hipSetDevice(0);
struct timeval start_map, end_map;
double time_map = 0.0;
hipDeviceSynchronize();
gettimeofday(&start_map, NULL);
int resultlen;
char hostname[20];
MPI_Get_processor_name(hostname, &resultlen);
int host_id = -1;
if(strcmp(hostname, "master\0")==0)
host_id = 0;
else
{
sscanf(hostname, "node%d", &host_id);
//host_id -= 1;
}
int file_each_proc = num_file/nprocs;
for(int fid=me*file_each_proc; fid<(me+1)*file_each_proc && fid < num_file; fid++)
{
char fullname[100];
sprintf(fullname, "%s/part-%05d\0", inputdir, fid);
printf("full file name and gpu id is %s, %d\n", fullname, me%4);
// filesize = # of bytes in file
struct stat stbuf;
int flag = stat(fullname,&stbuf);
if (flag < 0) {
printf("ERROR: Could not query file size %d, %s\n", me, fullname);
MPI_Abort(MPI_COMM_WORLD,1);
}
int filesize = stbuf.st_size;
FILE *fp = fopen(fullname,"r");
char *text = new char[filesize+1];
int nchar = fread(text,1,filesize,fp);
text[nchar] = '\0';
fclose(fp);
char filename[100];
int namelen = getfilename(fullname, filename);
//copy text data into gpu memory
char *d_text;
hipMalloc((void**)&d_text, (filesize+1)*sizeof(char));
hipMemcpy(d_text, text, (filesize+1)*sizeof(char), hipMemcpyHostToDevice);
//record the start position of each url
int *d_sequence;
int *d_segmask;
hipMalloc((void**)&d_sequence, (filesize+1)*sizeof(int));
hipMalloc((void**)&d_segmask, (filesize+1)*sizeof(int));
thrust::device_ptr<int> dev_sequence(d_sequence);
thrust::device_ptr<int> dev_segmask(d_segmask);
thrust::sequence(dev_sequence, dev_sequence+(filesize+1));
dim3 h_dimBlock(256,1,1);
dim3 h_dimGrid(1,1,1);
int numBlocks = CEIL(filesize+1, h_dimBlock.x);
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
//hipEvent_t start, stop;
//float time1;
//hipEventCreate(&start);
//hipEventCreate(&stop);
//hipEventRecord(start, 0);
//record the position array (about 4ms for 64M)
mark<<<h_dimGrid, h_dimBlock>>>(d_text, d_segmask, (filesize+1));
//hipEventRecord(stop, 0);
//hipEventSynchronize(stop);
//hipEventElapsedTime(&time1, start, stop);
//printf("time is %f\n", time1);
//printf("zhao2 %d\n", me);
int urlcount = thrust::count(dev_segmask, dev_segmask+(filesize+1), 1);
if(urlcount == 0)
return;
int *d_urloffset;
int *d_urllength;
hipMalloc((void**)&d_urloffset, urlcount*sizeof(int));
hipMalloc((void**)&d_urllength, urlcount*sizeof(int));
thrust::device_ptr<int> dev_urloffset(d_urloffset);
//about 14ms
thrust::copy_if(dev_sequence, dev_sequence+(filesize+1),
dev_segmask, dev_urloffset, is_start());
dim3 h_dimGrid2(1,1,1);
dim3 h_dimBlock2(256,1,1);
numBlocks = CEIL(urlcount, h_dimBlock2.x);
THREAD_CONF(h_dimGrid2, h_dimBlock2, numBlocks, h_dimBlock2.x);
//about 8ms
compute_url_length<<<h_dimGrid2, h_dimBlock2>>>(
d_text,
d_urloffset,
d_urllength,
(filesize+1),
urlcount);
int *h_urloffset = new int[urlcount];
int *h_urllength = new int[urlcount];
hipMemcpy(text, d_text, (filesize+1)*sizeof(char), hipMemcpyDeviceToHost);
hipMemcpy(h_urloffset, d_urloffset, urlcount*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(h_urllength, d_urllength, urlcount*sizeof(int), hipMemcpyDeviceToHost);
//about 18ms for 64m ii data
for(int i=0; i<urlcount; i++)
{
kv->add(text+h_urloffset[i], h_urllength[i]+1, filename, namelen+1);
}
//free device memory
hipFree(d_text);
hipFree(d_sequence);
hipFree(d_segmask);
hipFree(d_urloffset);
hipFree(d_urllength);
delete [] text;
delete [] h_urloffset;
delete [] h_urllength;
}
//printf("end of map %d\n", me);
hipDeviceSynchronize();
gettimeofday(&end_map, NULL);
time_map += (1000*(end_map.tv_sec-start_map.tv_sec)
+(end_map.tv_usec-start_map.tv_usec + 0.0)/1000);
printf("time of %d is %f\n", me, time_map);
}
void mycombine(char *key, int keybytes, char *multivalue,
int nvalues, int *valuebytes, KeyValue *kv, void *ptr)
{
stringstream ss (stringstream::in | stringstream::out);
int t = 0;
if(nvalues)
{
char* curval = multivalue;
for(int i=0; i<nvalues; i++)
{
if(t!=0)
ss << " ";
ss << curval;
curval += valuebytes[i];
t++;
}
}
else
{
MapReduce *mr = (MapReduce *) valuebytes;
int nblocks;
uint64_t nvalues_total = mr->multivalue_blocks(nblocks);
for (int iblock = 0; iblock < nblocks; iblock++)
{
int nv = mr->multivalue_block(iblock,&multivalue,&valuebytes);
char* curval = multivalue;
for (int i = 0; i < nv; i++)
{
if(t!=0)
ss << " ";
ss << curval;
curval += valuebytes[i];
t++;
//process each value within the block of values
}
}
}
string s = ss.str();
kv->add(key, keybytes, (char*)s.c_str(), (int)(s.length()+1));
}
/* ----------------------------------------------------------------------
count word occurrence
emit key = word, value = # of multi-values
------------------------------------------------------------------------- */
void myreduce(char *key, int keybytes, char *multivalue,
int nvalues, int *valuebytes, KeyValue *kv, void *ptr)
{
Info *info = (Info*) ptr;
int me = info->me;
int nproc = info->nproc;
char filename[50];
sprintf(filename, "/mnt/mrmpi_output/InvertedIndex-%d-%d\0", nproc , me);
//printf("filename is %s, %d\n", filename, nvalues);
std::fstream filestr;
filestr.open (filename, fstream::out | fstream::app);
filestr << key << "\t";
if(nvalues)
{
char* curval = multivalue;
for(int i=0; i<nvalues; i++)
{
filestr << curval << " ";
curval += valuebytes[i];
}
filestr << endl;
}
else
{
MapReduce *mr = (MapReduce *) valuebytes;
int nblocks;
uint64_t nvalues_total = mr->multivalue_blocks(nblocks);
for (int iblock = 0; iblock < nblocks; iblock++)
{
int nv = mr->multivalue_block(iblock,&multivalue,&valuebytes);
char* curval = multivalue;
for (int i = 0; i < nv; i++)
{
filestr << curval << " ";
curval += valuebytes[i];
//process each value within the block of values
}
}
filestr << endl;
}
filestr.close();
}
/* ----------------------------------------------------------------------
compare two counts
order values by count, largest first
------------------------------------------------------------------------- */
int ncompare(char *p1, int len1, char *p2, int len2)
{
int i1 = *(int *) p1;
int i2 = *(int *) p2;
if (i1 > i2) return -1;
else if (i1 < i2) return 1;
else return 0;
}
/* ----------------------------------------------------------------------
process a word and its count
depending on flag, emit KV or print it, up to limit
------------------------------------------------------------------------- */
void output(uint64_t itask, char *key, int keybytes, char *value,
int valuebytes, KeyValue *kv, void *ptr)
{
/*
Count *count = (Count *) ptr;
count->n++;
if (count->n > count->limit) return;
int n = *(int *) value;
if (count->flag)
printf("%d %s\n",n,key);
else
kv->add(key,keybytes,(char *) &n,sizeof(int));
*/
}
| 1fb2f53267c2b262c740cdbce4b168ce7e611ff5.cu | // MapReduce Inverted Index example using CUDA
// Syntax: invertedindex path-of-data-dir
// (1) assume each host has four processors, each corresponds
// to a GPU, and read one parts of the files in the local dir
// (2) parse into words separated by whitespace
// (3) count occurrence of each word in all files
// (4) print top 10 words
#include <mpi.h>
#include <cuda.h>
#include <iostream>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <fstream>
#include <sstream>
#include <cstring>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <dirent.h>
#include "mapreduce.h"
#include "keyvalue.h"
#include <thrust/device_ptr.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <thrust/count.h>
#include <thrust/functional.h>
#define CEIL(n,m) ((n)/(m) + (int)((n)%(m) !=0))
#define THREAD_CONF(grid, block, gridBound, blockBound) do {\
block.x = blockBound;\
grid.x = gridBound; \
if (grid.x > 65535) {\
grid.x = (int)sqrt((double)grid.x);\
grid.y = CEIL(gridBound, grid.x); \
}\
}while (0)
using namespace MAPREDUCE_NS;
using namespace std;
void mymap(int , KeyValue *, void *);
void myreduce(char *, int, char *, int, int *, KeyValue *, void *);
void mycombine(char *, int, char *, int, int *, KeyValue *, void *);
char inputdir[100];
int num_file = 1;
int me, nprocs;
//int ncompare(char *, int, char *, int);
//void output(uint64_t, char *, int, char *, int, KeyValue *, void *);
struct Info
{
int me;
int nproc;
};
#define START 0x00
#define IN_TAG 0x01
#define IN_ATAG 0x02
#define FOUND_HREF 0x03
#define START_LINK 0x04
struct is_start
{
__host__ __device__
bool operator()(const int x)
{
return x==1;
}
};
__global__ void mark(
char *text,
int *d_segmask,
int length)
{
const int tid_x = blockDim.x * blockIdx.x + threadIdx.x;
const int tid_y = blockDim.y * blockIdx.y + threadIdx.y;
const int tid = tid_y * (blockDim.x*gridDim.x) + tid_x;
if(tid < length)
d_segmask[tid] = 0;
if(tid >= length-9)
return;
if(text[tid] == '<' &&
text[tid+1] == 'a' &&
text[tid+2] == ' ' &&
text[tid+3] == 'h' &&
text[tid+4] == 'r' &&
text[tid+5] == 'e' &&
text[tid+6] == 'f' &&
text[tid+7] == '=' &&
text[tid+8] == '\"')
{
d_segmask[tid+9] = 1;
}
}
__global__ void compute_url_length(
char *d_text,
int *d_urloffset,
int *d_urllength,
int textlen,
int url_num)
{
const int tid_x = blockDim.x * blockIdx.x + threadIdx.x;
const int tid_y = blockDim.y * blockIdx.y + threadIdx.y;
const int tid = tid_y * (blockDim.x*gridDim.x) + tid_x;
if(tid >= url_num)
return;
int start = d_urloffset[tid];
for(int i=start; i < textlen; i++)
{
if(d_text[i] == '\"' || i == textlen-1)
{
d_urllength[tid] = i-start;
d_text[i] = '\0';
return;
}
}
}
/* ---------------------------------------------------------------------- */
//parameters:
int main(int argc, char **args)
{
MPI_Init(&argc,&args);
MPI_Comm_rank(MPI_COMM_WORLD, &me);
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
if (argc <= 2)
{
if (me == 0) printf("Syntax: invertedindex [input_dir num_file]...\n");
MPI_Abort(MPI_COMM_WORLD,1);
}
strcpy(inputdir, args[1]);
num_file = atoi(args[2]);
MapReduce *mr = new MapReduce(MPI_COMM_WORLD);
mr->verbosity = 2;
mr->timer = 1;
if(NULL==opendir("/mnt/mrmpi/temp"))
{
system("mkdir /mnt/mrmpi");
system("mkdir /mnt/mrmpi/temp");
}
mr->set_fpath("/mnt/mrmpi/temp");
mr->memsize = 64;
//mr->outofcore = 1;
MPI_Barrier(MPI_COMM_WORLD);
double tstart = MPI_Wtime();
//printf("start map %d\n", me);
int mapitem = mr->map(nprocs, mymap, &me);
//int nfiles = mr->mapfilecount;
//mr->compress(mycombine, NULL);
//printf("start aggregate %d\n", me);
mr->aggregate(NULL);
//printf("end aggregate %d\n", me);
mr->convert();
//mr->collate(NULL);
//printf("end convert %d\n", me);
if(NULL==opendir("/mnt/mrmpi_output"))
{
system("mkdir /mnt/mrmpi_output");
}
system("rm /mnt/mrmpi_output/InvertedIndex*");
Info info;
info.me = me;
info.nproc = nprocs;
int reduceitem = mr->reduce(myreduce, &info);
MPI_Barrier(MPI_COMM_WORLD);
double tstop = MPI_Wtime();
/*
mr->sort_values(&ncompare);
Count count;
count.n = 0;
count.limit = 10;
count.flag = 0;
mr->map(mr,output,&count);
mr->gather(1);
mr->sort_values(ncompare);
count.n = 0;
count.limit = 10;
count.flag = 1;
mr->map(mr,output,&count);
*/
delete mr;
//printf("map and reduce item are %d, %d\n", mapitem, reduceitem);
if (me == 0)
{
//printf("%d total words, %d unique words\n",nwords,nunique);
printf("Time to process on %d procs = %g (secs), %d, %d\n", nprocs, tstop-tstart, mapitem, reduceitem);
}
MPI_Finalize();
}
int getfilename(char *fullpath, char *filename)
{
size_t found;
std::string path(fullpath);
found=path.find_last_of("/\\");
const char *name = path.substr(found+1).c_str();
memcpy(filename, name, strlen(name)+1);
return strlen(name);
}
/* ----------------------------------------------------------------------
read a file
for each word in file, emit key = word, value = NULL
------------------------------------------------------------------------- */
void mymap(int nmap, KeyValue *kv, void *ptr)
{
int me = *(int*)ptr;
cudaSetDevice(0);
struct timeval start_map, end_map;
double time_map = 0.0;
cudaDeviceSynchronize();
gettimeofday(&start_map, NULL);
int resultlen;
char hostname[20];
MPI_Get_processor_name(hostname, &resultlen);
int host_id = -1;
if(strcmp(hostname, "master\0")==0)
host_id = 0;
else
{
sscanf(hostname, "node%d", &host_id);
//host_id -= 1;
}
int file_each_proc = num_file/nprocs;
for(int fid=me*file_each_proc; fid<(me+1)*file_each_proc && fid < num_file; fid++)
{
char fullname[100];
sprintf(fullname, "%s/part-%05d\0", inputdir, fid);
printf("full file name and gpu id is %s, %d\n", fullname, me%4);
// filesize = # of bytes in file
struct stat stbuf;
int flag = stat(fullname,&stbuf);
if (flag < 0) {
printf("ERROR: Could not query file size %d, %s\n", me, fullname);
MPI_Abort(MPI_COMM_WORLD,1);
}
int filesize = stbuf.st_size;
FILE *fp = fopen(fullname,"r");
char *text = new char[filesize+1];
int nchar = fread(text,1,filesize,fp);
text[nchar] = '\0';
fclose(fp);
char filename[100];
int namelen = getfilename(fullname, filename);
//copy text data into gpu memory
char *d_text;
cudaMalloc((void**)&d_text, (filesize+1)*sizeof(char));
cudaMemcpy(d_text, text, (filesize+1)*sizeof(char), cudaMemcpyHostToDevice);
//record the start position of each url
int *d_sequence;
int *d_segmask;
cudaMalloc((void**)&d_sequence, (filesize+1)*sizeof(int));
cudaMalloc((void**)&d_segmask, (filesize+1)*sizeof(int));
thrust::device_ptr<int> dev_sequence(d_sequence);
thrust::device_ptr<int> dev_segmask(d_segmask);
thrust::sequence(dev_sequence, dev_sequence+(filesize+1));
dim3 h_dimBlock(256,1,1);
dim3 h_dimGrid(1,1,1);
int numBlocks = CEIL(filesize+1, h_dimBlock.x);
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
//cudaEvent_t start, stop;
//float time1;
//cudaEventCreate(&start);
//cudaEventCreate(&stop);
//cudaEventRecord(start, 0);
//record the position array (about 4ms for 64M)
mark<<<h_dimGrid, h_dimBlock>>>(d_text, d_segmask, (filesize+1));
//cudaEventRecord(stop, 0);
//cudaEventSynchronize(stop);
//cudaEventElapsedTime(&time1, start, stop);
//printf("time is %f\n", time1);
//printf("zhao2 %d\n", me);
int urlcount = thrust::count(dev_segmask, dev_segmask+(filesize+1), 1);
if(urlcount == 0)
return;
int *d_urloffset;
int *d_urllength;
cudaMalloc((void**)&d_urloffset, urlcount*sizeof(int));
cudaMalloc((void**)&d_urllength, urlcount*sizeof(int));
thrust::device_ptr<int> dev_urloffset(d_urloffset);
//about 14ms
thrust::copy_if(dev_sequence, dev_sequence+(filesize+1),
dev_segmask, dev_urloffset, is_start());
dim3 h_dimGrid2(1,1,1);
dim3 h_dimBlock2(256,1,1);
numBlocks = CEIL(urlcount, h_dimBlock2.x);
THREAD_CONF(h_dimGrid2, h_dimBlock2, numBlocks, h_dimBlock2.x);
//about 8ms
compute_url_length<<<h_dimGrid2, h_dimBlock2>>>(
d_text,
d_urloffset,
d_urllength,
(filesize+1),
urlcount);
int *h_urloffset = new int[urlcount];
int *h_urllength = new int[urlcount];
cudaMemcpy(text, d_text, (filesize+1)*sizeof(char), cudaMemcpyDeviceToHost);
cudaMemcpy(h_urloffset, d_urloffset, urlcount*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(h_urllength, d_urllength, urlcount*sizeof(int), cudaMemcpyDeviceToHost);
//about 18ms for 64m ii data
for(int i=0; i<urlcount; i++)
{
kv->add(text+h_urloffset[i], h_urllength[i]+1, filename, namelen+1);
}
//free device memory
cudaFree(d_text);
cudaFree(d_sequence);
cudaFree(d_segmask);
cudaFree(d_urloffset);
cudaFree(d_urllength);
delete [] text;
delete [] h_urloffset;
delete [] h_urllength;
}
//printf("end of map %d\n", me);
cudaDeviceSynchronize();
gettimeofday(&end_map, NULL);
time_map += (1000*(end_map.tv_sec-start_map.tv_sec)
+(end_map.tv_usec-start_map.tv_usec + 0.0)/1000);
printf("time of %d is %f\n", me, time_map);
}
void mycombine(char *key, int keybytes, char *multivalue,
int nvalues, int *valuebytes, KeyValue *kv, void *ptr)
{
stringstream ss (stringstream::in | stringstream::out);
int t = 0;
if(nvalues)
{
char* curval = multivalue;
for(int i=0; i<nvalues; i++)
{
if(t!=0)
ss << " ";
ss << curval;
curval += valuebytes[i];
t++;
}
}
else
{
MapReduce *mr = (MapReduce *) valuebytes;
int nblocks;
uint64_t nvalues_total = mr->multivalue_blocks(nblocks);
for (int iblock = 0; iblock < nblocks; iblock++)
{
int nv = mr->multivalue_block(iblock,&multivalue,&valuebytes);
char* curval = multivalue;
for (int i = 0; i < nv; i++)
{
if(t!=0)
ss << " ";
ss << curval;
curval += valuebytes[i];
t++;
//process each value within the block of values
}
}
}
string s = ss.str();
kv->add(key, keybytes, (char*)s.c_str(), (int)(s.length()+1));
}
/* ----------------------------------------------------------------------
count word occurrence
emit key = word, value = # of multi-values
------------------------------------------------------------------------- */
void myreduce(char *key, int keybytes, char *multivalue,
int nvalues, int *valuebytes, KeyValue *kv, void *ptr)
{
Info *info = (Info*) ptr;
int me = info->me;
int nproc = info->nproc;
char filename[50];
sprintf(filename, "/mnt/mrmpi_output/InvertedIndex-%d-%d\0", nproc , me);
//printf("filename is %s, %d\n", filename, nvalues);
std::fstream filestr;
filestr.open (filename, fstream::out | fstream::app);
filestr << key << "\t";
if(nvalues)
{
char* curval = multivalue;
for(int i=0; i<nvalues; i++)
{
filestr << curval << " ";
curval += valuebytes[i];
}
filestr << endl;
}
else
{
MapReduce *mr = (MapReduce *) valuebytes;
int nblocks;
uint64_t nvalues_total = mr->multivalue_blocks(nblocks);
for (int iblock = 0; iblock < nblocks; iblock++)
{
int nv = mr->multivalue_block(iblock,&multivalue,&valuebytes);
char* curval = multivalue;
for (int i = 0; i < nv; i++)
{
filestr << curval << " ";
curval += valuebytes[i];
//process each value within the block of values
}
}
filestr << endl;
}
filestr.close();
}
/* ----------------------------------------------------------------------
compare two counts
order values by count, largest first
------------------------------------------------------------------------- */
int ncompare(char *p1, int len1, char *p2, int len2)
{
int i1 = *(int *) p1;
int i2 = *(int *) p2;
if (i1 > i2) return -1;
else if (i1 < i2) return 1;
else return 0;
}
/* ----------------------------------------------------------------------
process a word and its count
depending on flag, emit KV or print it, up to limit
------------------------------------------------------------------------- */
void output(uint64_t itask, char *key, int keybytes, char *value,
int valuebytes, KeyValue *kv, void *ptr)
{
/*
Count *count = (Count *) ptr;
count->n++;
if (count->n > count->limit) return;
int n = *(int *) value;
if (count->flag)
printf("%d %s\n",n,key);
else
kv->add(key,keybytes,(char *) &n,sizeof(int));
*/
}
|
f403e9c005f03deb96675df3e552bfcb059200b7.hip | // !!! This is a file automatically generated by hipify!!!
/**
*
* bashCGPU/CUDA
*
https://suzukiiichiro.github.io/search/?keyword=
$ nvcc -O3 -arch=sm_61 03CUDA_Symmetry_BitBoard.cu && ./a.out -c
$ nvcc -O3 -arch=sm_61 03CUDA_Symmetry_BitBoard.cu && ./a.out -r
GPU
$ nvcc -O3 -arch=sm_61 03CUDA_Symmetry_BitBoard.cu && ./a.out -n
GPU
$ nvcc -O3 -arch=sm_61 03CUDA_Symmetry_BitBoard.cu && ./a.out -n
*
*/
#include <iostream>
#include <vector>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <math.h>
#include <string.h>
#include <time.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#define MAX 27
#define THREAD_NUM 96
//
//#define UINT64_C(c) c ## ULL
//
//
unsigned long TOTAL=0;
unsigned long UNIQUE=0;
//GPU
typedef struct local
{
unsigned int BOUND1,BOUND2;
unsigned int TOPBIT,ENDBIT,SIDEMASK,LASTMASK;
unsigned long board[MAX];
unsigned long COUNT2,COUNT4,COUNT8,TOTAL,UNIQUE;
unsigned int STEPS;
}local;
// CPU /
void symmetryOps(unsigned int size,struct local* l)
{
/**
(1) 9090(
180)90(270)
*/
if(l->board[l->BOUND2]==1){
unsigned int ptn;
unsigned int own;
for(ptn=2,own=1;own<size;++own,ptn<<=1){
unsigned int bit;
unsigned int you;
for(bit=1,you=size-1;(l->board[you]!=ptn)&&l->board[own]>=bit;--you){
bit<<=1;
}
if(l->board[own]>bit){
return ;
}
if(l->board[own]<bit){
break;
}
}//end for
//
if(own>size-1){
l->COUNT2++;
return ;
}//end if
}//end if
/**
(2) 90270
180
180
()
*/
//
if(l->board[size-1]==l->ENDBIT){
unsigned int you;
unsigned int own;
for(you=size-1-1,own=1;own<=size-1;++own,--you){
unsigned int bit;
unsigned int ptn;
for(bit=1,ptn=l->TOPBIT;(ptn!=l->board[you])&&(l->board[own]>=bit);ptn>>=1){
bit<<=1;
}
if(l->board[own]>bit){
return ;
}
if(l->board[own]<bit){
break;
}
}//end for
//
if(own>size-1){
l->COUNT4++;
return ;
}
}//end if
/**
(3)180()
*/
//
if(l->board[l->BOUND1]==l->TOPBIT){
unsigned int ptn;
unsigned int own;
unsigned int you;
unsigned int bit;
for(ptn=l->TOPBIT>>1,own=1;own<=size-1;++own,ptn>>=1){
for(bit=1,you=0;(l->board[you]!=ptn)&&(l->board[own]>=bit);++you){
bit<<=1;
}
if(l->board[own]>bit){
return ;
}
if(l->board[own]<bit){
break;
}
}//end for
}//end if
l->COUNT8++;
}
/**
CPU -c
*/
// Q
void symmetry_backTrack_NR(unsigned int size,unsigned int row,unsigned int _left,unsigned int _down,unsigned int _right,struct local *l)
{
unsigned int mask=(1<<size)-1;
unsigned int down[size];
unsigned int left[size];
unsigned int right[size];
unsigned int bitmap[size];
left[row]=_left;
down[row]=_down;
right[row]=_right;
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
while(row>0){
if(bitmap[row]>0){
if(row<l->BOUND1){ //
bitmap[row]|=l->SIDEMASK;
bitmap[row]^=l->SIDEMASK;
}else if(row==l->BOUND2){ //
if((down[row]&l->SIDEMASK)==0){
row--;
}
if((down[row]&l->SIDEMASK)!=l->SIDEMASK){
bitmap[row]&=l->SIDEMASK;
}
}
unsigned int save_bitmap=bitmap[row];
unsigned int bit=-bitmap[row]&bitmap[row];
bitmap[row]^=bit;
l->board[row]=bit; //Q
if((bit&mask)!=0){
if(row==(size-1)){
if( (save_bitmap&l->LASTMASK)==0){
symmetryOps(size,l); //
}
row--;
}else{
unsigned int n=row++;
left[row]=(left[n]|bit)<<1;
down[row]=(down[n]|bit);
right[row]=(right[n]|bit)>>1;
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
}
}else{
row--;
}
}else{
row--;
}
}//end while
}
// Q
void symmetry_backTrack_corner_NR(unsigned int size,unsigned int row,unsigned int _left,unsigned int _down,unsigned int _right,struct local *l)
{
unsigned int mask=(1<<size)-1;
unsigned int bit=0;
unsigned int down[size];
unsigned int left[size];
unsigned int right[size];
unsigned int bitmap[size];
left[row]=_left;
down[row]=_down;
right[row]=_right;
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
while(row>=2){
if(row<l->BOUND1){
// bitmap[row]=bitmap[row]|2;
// bitmap[row]=bitmap[row]^2;
bitmap[row]&=~2;
}
if(bitmap[row]>0){
bit=-bitmap[row]&bitmap[row];
bitmap[row]^=bit;
if(row==(size-1)){
l->COUNT8++;
row--;
}else{
unsigned int n=row++;
left[row]=(left[n]|bit)<<1;
down[row]=(down[n]|bit);
right[row]=(right[n]|bit)>>1;
l->board[row]=bit; //Q
//
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
}
}else{
row--;
}
}//end while
}
//
void symmetry_NR(unsigned int size,struct local* l)
{
l->TOTAL=l->UNIQUE=l->COUNT2=l->COUNT4=l->COUNT8=0;
unsigned int bit=0;
l->TOPBIT=1<<(size-1);
l->ENDBIT=l->SIDEMASK=l->LASTMASK=0;
l->BOUND1=2;
l->BOUND2=0;
l->board[0]=1;
while(l->BOUND1>1&&l->BOUND1<size-1){
if(l->BOUND1<size-1){
bit=1<<l->BOUND1;
l->board[1]=bit; //Q
//Q
symmetry_backTrack_corner_NR(size,2,(2|bit)<<1,1|bit,(2|bit)>>1,l);
}
l->BOUND1++;
}
l->TOPBIT=1<<(size-1);
l->ENDBIT=l->TOPBIT>>1;
l->SIDEMASK=l->TOPBIT|1;
l->LASTMASK=l->TOPBIT|1;
l->BOUND1=1;
l->BOUND2=size-2;
while(l->BOUND1>0 && l->BOUND2<size-1 && l->BOUND1<l->BOUND2){
if(l->BOUND1<l->BOUND2){
bit=1<<l->BOUND1;
l->board[0]=bit; //Q
//Q
symmetry_backTrack_NR(size,1,bit<<1,bit,bit>>1,l);
}
l->BOUND1++;
l->BOUND2--;
l->ENDBIT=l->ENDBIT>>1;
l->LASTMASK=l->LASTMASK<<1|l->LASTMASK|l->LASTMASK>>1;
}//ene while
UNIQUE=l->COUNT2+l->COUNT4+l->COUNT8;
TOTAL=l->COUNT2*2+l->COUNT4*4+l->COUNT8*8;
}
/**
CPU -r
*/
// Q
void symmetry_backTrack(unsigned int size,unsigned int row,unsigned int left,unsigned int down,unsigned int right,struct local* l)
{
unsigned int mask=(1<<size)-1;
unsigned int bitmap=mask&~(left|down|right);
if(row==(size-1)){
if(bitmap){
if( (bitmap&l->LASTMASK)==0){
l->board[row]=bitmap; //Q
symmetryOps(size,l); //
}
}
}else{
if(row<l->BOUND1){
bitmap=bitmap|l->SIDEMASK;
bitmap=bitmap^l->SIDEMASK;
}else{
if(row==l->BOUND2){
if((down&l->SIDEMASK)==0){
return;
}
if( (down&l->SIDEMASK)!=l->SIDEMASK){
bitmap=bitmap&l->SIDEMASK;
}
}
}
while(bitmap){
unsigned int bit=-bitmap&bitmap;
bitmap=bitmap^bit;
l->board[row]=bit;
symmetry_backTrack(size,row+1,(left|bit)<<1,down|bit,(right|bit)>>1,l);
}
}
}
// Q
void symmetry_backTrack_corner(unsigned int size,unsigned int row,unsigned int left,unsigned int down,unsigned int right,struct local* l)
{
unsigned int mask=(1<<size)-1;
unsigned int bitmap=mask&~(left|down|right);
unsigned int bit=0;
if(row==(size-1)){
if(bitmap){
l->board[row]=bitmap;
l->COUNT8++;
}
}else{
if(row<l->BOUND1){ //
bitmap=bitmap|2;
bitmap=bitmap^2;
}
while(bitmap){
bit=-bitmap&bitmap;
bitmap=bitmap^bit;
l->board[row]=bit; //Q
symmetry_backTrack_corner(size,row+1,(left|bit)<<1,down|bit,(right|bit)>>1,l);
}
}
}
//
void symmetry_R(unsigned int size,struct local* l)
{
l->TOTAL=l->UNIQUE=l->COUNT2=l->COUNT4=l->COUNT8=0;
unsigned int bit=0;
l->TOPBIT=1<<(size-1);
l->ENDBIT=l->LASTMASK=l->SIDEMASK=0;
l->BOUND1=2;
l->BOUND2=0;
l->board[0]=1;
while(l->BOUND1>1 && l->BOUND1<size-1){
if(l->BOUND1<size-1){
bit=1<<l->BOUND1;
l->board[1]=bit; //Q
//Q
symmetry_backTrack_corner(size,2,(2|bit)<<1,1|bit,(2|bit)>>1,l);
}
l->BOUND1++;
}//end while
l->TOPBIT=1<<(size-1);
l->ENDBIT=l->TOPBIT>>1;
l->SIDEMASK=l->TOPBIT|1;
l->LASTMASK=l->TOPBIT|1;
l->BOUND1=1;
l->BOUND2=size-2;
while(l->BOUND1>0 && l->BOUND2<size-1 && l->BOUND1<l->BOUND2){
if(l->BOUND1<l->BOUND2){
bit=1<<l->BOUND1;
l->board[0]=bit; //Q
//Q
symmetry_backTrack(size,1,bit<<1,bit,bit>>1,l);
}
l->BOUND1++;
l->BOUND2--;
l->ENDBIT=l->ENDBIT>>1;
l->LASTMASK=l->LASTMASK<<1|l->LASTMASK|l->LASTMASK>>1;
}//ene while
UNIQUE=l->COUNT2+l->COUNT4+l->COUNT8;
TOTAL=l->COUNT2*2+l->COUNT4*4+l->COUNT8*8;
}
/**
GPU -g
*/
__device__
struct dlocal
{
unsigned int BOUND1,BOUND2;
unsigned int TOPBIT,ENDBIT,SIDEMASK,LASTMASK;
unsigned long board[MAX];
unsigned long COUNT2,COUNT4,COUNT8,TOTAL,UNIQUE;
}dlocal;
__device__ struct dlocal gdl[9999];
// GPU
__host__ __device__
long GPU_symmetryOps(unsigned int size,struct dlocal* l)
{
/**
(1) 9090(
180)90(270)
*/
if(l->board[l->BOUND2]==1){
unsigned int ptn;
unsigned int own;
for(ptn=2,own=1;own<size;++own,ptn<<=1){
unsigned int bit;
unsigned int you;
for(bit=1,you=size-1;(l->board[you]!=ptn)&& l->board[own]>=bit;--you){
bit<<=1;
}
if(l->board[own]>bit){
return 0;
}
if(l->board[own]<bit){
break;
}
}//end for
//
if(own>size-1){
l->COUNT2++;
return 2;
}//end if
}//end if
/**
(2) 90270
180
180
()
*/
//
if(l->board[size-1]==l->ENDBIT){
unsigned int you;
unsigned int own;
for(you=size-1-1,own=1;own<=size-1;++own,--you){
unsigned int bit;
unsigned int ptn;
for(bit=1,ptn=l->TOPBIT;(ptn!=l->board[you])&&(l->board[own]>=bit);ptn>>=1){
bit<<=1;
}
if(l->board[own]>bit){
return 0;
}
if(l->board[own]<bit){
break;
}
}//end for
//
if(own>size-1){
l->COUNT4++;
return 4;
}
}//end if
/**
(3)180()
*/
//
if(l->board[l->BOUND1]==l->TOPBIT){
unsigned int ptn;
unsigned int own;
unsigned int you;
unsigned int bit;
for(ptn=l->TOPBIT>>1,own=1;own<=size-1;++own,ptn>>=1){
for(bit=1,you=0;(l->board[you]!=ptn)&&(l->board[own]>=bit);++you){
bit<<=1;
}
if(l->board[own]>bit){
return 0;
}
if(l->board[own]<bit){
break;
}
}//end for
}//end if
l->COUNT8++;
return 8;
}
// GPU Q
__host__ __device__
long GPU_symmetry_backTrack(unsigned int size,unsigned int row,unsigned int left,unsigned int down,unsigned int right,struct dlocal* l)
{
unsigned long counter=0;
unsigned int mask=(1<<size)-1;
unsigned int bitmap=mask&~(left|down|right);
if(row==(size-1)){
if(bitmap){
if( (bitmap& l->LASTMASK)==0){
l->board[row]=bitmap; //Q
counter+=GPU_symmetryOps(size,l); //
}
}
}else{
if(row<l->BOUND1){
bitmap=bitmap|l->SIDEMASK;
bitmap=bitmap^l->SIDEMASK;
}else{
if(row==l->BOUND2){
if((down&l->SIDEMASK)==0){
return 0;
}
if( (down&l->SIDEMASK)!=l->SIDEMASK){
bitmap=bitmap&l->SIDEMASK;
}
}
}
while(bitmap){
unsigned int bit=-bitmap&bitmap;
bitmap=bitmap^bit;
l->board[row]=bit;
counter+=GPU_symmetry_backTrack(size,row+1,(left|bit)<<1,down|bit,(right|bit)>>1,l);
}
}
return counter;
}
// GPU Q
__host__ __device__
long GPU_symmetry_backTrack_corner(unsigned int size,unsigned int row,unsigned int left,unsigned int down,unsigned int right,struct dlocal* l)
{
unsigned long counter=0;
unsigned int mask=(1<<size)-1;
unsigned int bitmap=mask&~(left|down|right);
unsigned int bit=0;
if(row==(size-1)){
if(bitmap){
l->board[row]=bitmap;
l->COUNT8++;
counter+=8;
}
}else{
if(row<l->BOUND1){ //
bitmap=bitmap|2;
bitmap=bitmap^2;
}
while(bitmap){
bit=-bitmap&bitmap;
bitmap=bitmap^bit;
l->board[row]=bit; //Q
counter+=GPU_symmetry_backTrack_corner(size,row+1,(left|bit)<<1,down|bit,(right|bit)>>1,l);
}
}
return counter;
}
// GPU -g
__host__ __device__
void GPU_symmetry_R(unsigned int size,struct local* hostLocal)
{
// GPU dlocal
struct dlocal l;
l.TOTAL=l.UNIQUE=l.COUNT2=l.COUNT4=l.COUNT8=0;
unsigned int bit=0;
l.TOPBIT=1<<(size-1);
l.ENDBIT=l.LASTMASK=l.SIDEMASK=0;
l.BOUND1=2;
l.BOUND2=0;
l.board[0]=1;
while(l.BOUND1>1 && l.BOUND1<size-1){
if(l.BOUND1<size-1){
bit=1<<l.BOUND1;
l.board[1]=bit; //Q
//Q
GPU_symmetry_backTrack_corner(size,2,(2|bit)<<1,1|bit,(2|bit)>>1,&l);
}
l.BOUND1++;
}//end while
l.TOPBIT=1<<(size-1);
l.ENDBIT=l.TOPBIT>>1;
l.SIDEMASK=l.TOPBIT|1;
l.LASTMASK=l.TOPBIT|1;
l.BOUND1=1;
l.BOUND2=size-2;
while(l.BOUND1>0 && l.BOUND2<size-1 && l.BOUND1<l.BOUND2){
if(l.BOUND1<l.BOUND2){
bit=1<<l.BOUND1;
l.board[0]=bit; //Q
//Q
GPU_symmetry_backTrack(size,1,bit<<1,bit,bit>>1,&l);
}
l.BOUND1++;
l.BOUND2--;
l.ENDBIT=l.ENDBIT>>1;
l.LASTMASK=l.LASTMASK<<1|l.LASTMASK|l.LASTMASK>>1;
}//ene while
// hostLocal
hostLocal->UNIQUE=l.COUNT2+l.COUNT4+l.COUNT8;
hostLocal->TOTAL=l.COUNT2*2+l.COUNT4*4+l.COUNT8*8;
}
/**
CUDA13
*/
// GPU -n
__device__
int BitBoard_symmetryOps(const unsigned int size,const unsigned int* board,struct local* l)
{
unsigned int own,ptn,you,bit;
//90
if(board[l->BOUND2]==1){ own=1; ptn=2;
while(own<=size-1){ bit=1; you=size-1;
while((board[you]!=ptn)&&(board[own]>=bit)){ bit<<=1; you--; }
if(board[own]>bit){ return 0; } else if(board[own]<bit){ break; }
own++; ptn<<=1;
}
/** 90180/270 */
if(own>size-1){ return 2; }
}
//180
if(board[size-1]==l->ENDBIT){ own=1; you=size-1-1;
while(own<=size-1){ bit=1; ptn=l->TOPBIT;
while((board[you]!=ptn)&&(board[own]>=bit)){ bit<<=1; ptn>>=1; }
if(board[own]>bit){ return 0; } else if(board[own]<bit){ break; }
own++; you--;
}
/** 90180 */
if(own>size-1){ return 4; }
}
//270
if(board[l->BOUND1]==l->TOPBIT){ own=1; ptn=l->TOPBIT>>1;
while(own<=size-1){ bit=1; you=0;
while((board[you]!=ptn)&&(board[own]>=bit)){ bit<<=1; you++; }
if(board[own]>bit){ return 0; } else if(board[own]<bit){ break; }
own++; ptn>>=1;
}
}
return 8;
}
// GPU -n
__global__
void BitBoard_cuda_kernel_b1(const unsigned int size,unsigned int mark,unsigned int* _down,unsigned int* _left,unsigned int* _right,unsigned int* _total,unsigned int* _unique,unsigned long _cond,unsigned int _row,struct local* l)
{
const unsigned int mask=(1<<size)-1;
unsigned long total=0;
unsigned int unique=0;
int row=0;
unsigned int bit;
//
//
//
//ID
const unsigned int tid=threadIdx.x;
//ID
const unsigned int bid=blockIdx.x;
//ID
const unsigned int idx=bid*blockDim.x+tid;
//
//
//
//shared
//10mask
//GPU10
//THREAD_NUM
__shared__ unsigned int down[THREAD_NUM][10];
down[tid][row]=_down[idx];
__shared__ unsigned int left[THREAD_NUM][10];
left[tid][row]=_left[idx];
__shared__ unsigned int right[THREAD_NUM][10];
right[tid][row]=_right[idx];
__shared__ unsigned int bitmap[THREAD_NUM][10];
bitmap[tid][row] =mask&~(down[tid][row]|left[tid][row]|right[tid][row]);
__shared__ unsigned int sum[THREAD_NUM];
__shared__ unsigned int usum[THREAD_NUM];
//
//GPUSTEPS_cond
if(idx<_cond){
//_down,_left,_right
//down,left,right
//CPU t_STEPS
//
// idx
//
unsigned int bitmap_tid_row;
unsigned int down_tid_row;
unsigned int left_tid_row;
unsigned int right_tid_row;
while(row>=0){
bitmap_tid_row=bitmap[tid][row];
down_tid_row=down[tid][row];
left_tid_row=left[tid][row];
right_tid_row=right[tid][row];
if(bitmap_tid_row==0){
row--;
}else{
/**11 **********/
if(row+_row<l->BOUND1) {
bitmap_tid_row=bitmap[tid][row]&=~2; // bm|=2; bm^=2; (bm&=~2)
}
//
//
bitmap[tid][row]
^=bit
=(-bitmap_tid_row&bitmap_tid_row);
if((bit&mask)!=0){
//?
//
if(row+1==mark){
//TOTAL
//
unique++;
total+=8; //
//}
row--;
}else{
int rowP=row+1;
down[tid][rowP]=down_tid_row|bit;
left[tid][rowP]=(left_tid_row|bit)<<1;
right[tid][rowP]=(right_tid_row|bit)>>1;
bitmap[tid][rowP]=mask&~(down[tid][rowP]|left[tid][rowP]|right[tid][rowP]);
row++;
}
}else{
//
row--;
}
}
}
//sum[tid]
sum[tid]=total;
usum[tid]=unique;
}else{
//_condtotal
sum[tid]=0;
usum[tid]=0;
}
//__syncthreads()
//__syncthreads()
__syncthreads();
if(tid<64&&tid+64<THREAD_NUM){
sum[tid]+=sum[tid+64];
usum[tid]+=usum[tid+64];
}
__syncwarp();
if(tid<32){
sum[tid]+=sum[tid+32];
usum[tid]+=usum[tid+32];
}
__syncwarp();
if(tid<16){
sum[tid]+=sum[tid+16];
usum[tid]+=usum[tid+16];
}
__syncwarp();
if(tid<8){
sum[tid]+=sum[tid+8];
usum[tid]+=usum[tid+8];
}
__syncwarp();
if(tid<4){
sum[tid]+=sum[tid+4];
usum[tid]+=usum[tid+4];
}
__syncwarp();
if(tid<2){
sum[tid]+=sum[tid+2];
usum[tid]+=usum[tid+2];
}
__syncwarp();
if(tid<1){
sum[tid]+=sum[tid+1];
usum[tid]+=usum[tid+1];
}
__syncwarp();
if(tid==0){
_total[bid]=sum[0];
_unique[bid]=usum[0];
}
}
// GPU -n
__global__
void BitBoard_cuda_kernel_b2(const unsigned int size,unsigned int mark,unsigned int* _down,unsigned int* _left,unsigned int* _right,unsigned int* _total,unsigned int* _unique,unsigned long _cond,unsigned int* board,unsigned int _row,struct local* l)
{
const unsigned int mask=(1<<size)-1;
unsigned long total=0;
unsigned int unique=0;
int row=0;
unsigned int bit;
//
//
//
//ID
unsigned const int tid=threadIdx.x;
//ID
unsigned const int bid=blockIdx.x;
//ID
unsigned const int idx=bid*blockDim.x+tid;
//
//
//
//shared
//10mask
//GPU10
//THREAD_NUM
__shared__ unsigned int down[THREAD_NUM][10];
down[tid][row]=_down[idx];
__shared__ unsigned int left[THREAD_NUM][10];
left[tid][row]=_left[idx];
__shared__ unsigned int right[THREAD_NUM][10];
right[tid][row]=_right[idx];
__shared__ unsigned int bitmap[THREAD_NUM][10];
//down,left,rightbitmap
bitmap[tid][row]=mask&~(down[tid][row]|left[tid][row]|right[tid][row]);
__shared__ unsigned int sum[THREAD_NUM];
unsigned int c_aBoard[MAX];
__shared__ unsigned int usum[THREAD_NUM];
//
//GPUSTEPS_cond
if(idx<_cond){
//_down,_left,_right
//down,left,right
//CPU t_STEPS
//
// idx
//
for(int i=0;i<_row;i++){
c_aBoard[i]=board[idx*_row+i]; //1
}
unsigned int bitmap_tid_row;
unsigned int down_tid_row;
unsigned int left_tid_row;
unsigned int right_tid_row;
while(row>=0){
bitmap_tid_row=bitmap[tid][row];
down_tid_row=down[tid][row];
left_tid_row=left[tid][row];
right_tid_row=right[tid][row];
//
//bitmap[tid][row]=00000000
//1
if(bitmap_tid_row==0){
row--;
}else{
/**11 **********/
//
if(row+_row<l->BOUND1){
bitmap_tid_row=bitmap[tid][row]&=~l->SIDEMASK;
//
}else if(row+_row==l->BOUND2) {
if((down_tid_row&l->SIDEMASK)==0){
row--;
continue;
}
if((down_tid_row&l->SIDEMASK)!=l->SIDEMASK){
bitmap_tid_row=bitmap[tid][row]&=l->SIDEMASK;
}
}
int save_bitmap=bitmap[tid][row];
//
//
bitmap[tid][row]^=c_aBoard[row+_row]=bit=(-bitmap_tid_row&bitmap_tid_row);
if((bit&mask)!=0){
//?
//
if(row+1==mark){
/***11 l->LASTMASK*********************/
if((save_bitmap&l->LASTMASK)==0){
/***12 symmetryOps l->BOUND1,l->BOUND2,l->TOPBIT,l->ENDBIT*****/
int s=BitBoard_symmetryOps(size,c_aBoard,l);
if(s!=0){
//print(size); //print()TOTAL++
//TOTAL
//
unique++;
total+=s; //
}
row--;
}
}else{
int rowP=row+1;
down[tid][rowP]=down_tid_row|bit;
left[tid][rowP]=(left_tid_row|bit)<<1;
right[tid][rowP]=(right_tid_row|bit)>>1;
bitmap[tid][rowP]
=mask&~(
down[tid][rowP]
|left[tid][rowP]
|right[tid][rowP]);
row++;
}
}else{
//
row--;
}
}
}
//sum[tid]
sum[tid]=total;
usum[tid]=unique;
}else{
//_condtotal
sum[tid]=0;
usum[tid]=0;
}
//__syncthreads()
//__syncthreads()
__syncthreads();if(tid<64&&tid+64<THREAD_NUM){
sum[tid]+=sum[tid+64];
usum[tid]+=usum[tid+64];
}
__syncwarp();if(tid<32){
sum[tid]+=sum[tid+32];
usum[tid]+=usum[tid+32];
}
__syncwarp();if(tid<16){
sum[tid]+=sum[tid+16];
usum[tid]+=usum[tid+16];
}
__syncwarp();if(tid<8){
sum[tid]+=sum[tid+8];
usum[tid]+=usum[tid+8];
}
__syncwarp();if(tid<4){
sum[tid]+=sum[tid+4];
usum[tid]+=usum[tid+4];
}
__syncwarp();if(tid<2){
sum[tid]+=sum[tid+2];
usum[tid]+=usum[tid+2];
}
__syncwarp();if(tid<1){
sum[tid]+=sum[tid+1];
usum[tid]+=usum[tid+1];
}
__syncwarp();if(tid==0){
_total[bid]=sum[0];
_unique[bid]=usum[0];
}
}
// GPU -n
void BitBoard_backTrack2G(const unsigned int size,unsigned int row,unsigned int _left,unsigned int _down,unsigned int _right,struct local* l)
{
//GPUGPU
/***11 size<8mark2*********************/
unsigned int mark=size>12?size-10:3;
//unsigned int mark=size>11?size-9:3;
if(size<8){ mark=2; }
const unsigned int h_mark=row;
unsigned long totalCond=0;
unsigned int mask=(1<<size)-1;
bool matched=false;
//host
unsigned int down[32]; down[row]=_down;
unsigned int right[32]; right[row]=_right;
unsigned int left[32]; left[row]=_left;
//bitmap
//stack1
unsigned int bitmap[32];
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
unsigned int bit;
unsigned int* hostDown;
hipHostMalloc((void**) &hostDown,sizeof(int)*l->STEPS);
unsigned int* hostLeft;
hipHostMalloc((void**) &hostLeft,sizeof(int)*l->STEPS);
unsigned int* hostRight;
hipHostMalloc((void**) &hostRight,sizeof(int)*l->STEPS);
unsigned int* hostTotal;
unsigned int* deviceDown;
hipMalloc((void**) &deviceDown,sizeof(int)*l->STEPS);
unsigned int* deviceLeft;
hipMalloc((void**) &deviceLeft,sizeof(int)*l->STEPS);
unsigned int* deviceRight;
hipMalloc((void**) &deviceRight,sizeof(int)*l->STEPS);
hipHostMalloc((void**) &hostTotal,sizeof(long)*l->STEPS/THREAD_NUM);
unsigned int* hostUnique;
hipHostMalloc((void**) &hostUnique,sizeof(long)*l->STEPS/THREAD_NUM);
unsigned int* deviceTotal;
hipMalloc((void**) &deviceTotal,sizeof(long)*l->STEPS/THREAD_NUM);
unsigned int* deviceUnique;
hipMalloc((void**) &deviceUnique,sizeof(long)*l->STEPS/THREAD_NUM);
//
unsigned int* hostBoard;
hipHostMalloc((void**) &hostBoard,sizeof(int)*l->STEPS*mark);
unsigned int* deviceBoard;
hipMalloc((void**) &deviceBoard,sizeof(int)*l->STEPS*mark);
//
struct local* hostLocal;
hipHostMalloc((void**) &hostLocal,sizeof(struct local)*l->STEPS/THREAD_NUM);
struct local* deviceLocal;
hipHostMalloc((void**) &deviceLocal,sizeof(struct local)*l->STEPS/THREAD_NUM);
hostLocal[0].BOUND1=l->BOUND1;
hostLocal[0].BOUND2=l->BOUND2;
hostLocal[0].TOPBIT=l->TOPBIT;
hostLocal[0].ENDBIT=l->ENDBIT;
hostLocal[0].SIDEMASK=l->SIDEMASK;
hostLocal[0].LASTMASK=l->LASTMASK;
hostLocal[0].STEPS=l->STEPS;
for(int i=0;i<MAX;i++){
hostLocal[0].board[i]=l->board[i];
}
//123CPU->row==mark 3
//down,left,righthostDown ,hostLeft,hostRight
//
//->3GPU
//13CPU
//n15row=5CPU
//GPU(GPU10
//)
unsigned int rowP=0;
unsigned long total=0;
unsigned long unique=0;
while(row>=h_mark) {
//bitmap[row]=00000000
//1
//06GPU
if(bitmap[row]==0){ row--; }
else{//
/***11 *********************/
//
if(row<l->BOUND1){
bitmap[row]&=~l->SIDEMASK;
//
}else if(row==l->BOUND2) {
if((down[row]&l->SIDEMASK)==0){ row--; }
if((down[row]&l->SIDEMASK)!=l->SIDEMASK){ bitmap[row]&=l->SIDEMASK; }
}
//06SGPU
bitmap[row]^=l->board[row]=bit=(-bitmap[row]&bitmap[row]);
if((bit&mask)!=0){//
rowP=row+1;
down[rowP]=down[row]|bit;
left[rowP]=(left[row]|bit)<<1;
right[rowP]=(right[row]|bit)>>1;
bitmap[rowP]=mask&~(down[rowP]|left[rowP]|right[rowP]);
row++;
if(row==mark){
//3(mark)
//down,left,right
//
//GPU
//totalCond threadId down,left,right
//row=2(13n15row=5)
//hostDown,hostLeft,hostRight
hostDown[totalCond]=down[row];
hostLeft[totalCond]=left[row];
hostRight[totalCond]=right[row];
for(int i=0;i<mark;i++){
hostBoard[totalCond*mark+i]=l->board[i];
}
//
totalCond++;
//GPUGPUSTEPSGPU
//
//ntotalCondSTEPSn
//
//totalCond==STEPS
if(totalCond==l->STEPS){
//matched=trueCOUNT //GPUGPU
//matched=true
if(matched){
//
hipMemcpy(hostTotal,deviceTotal,sizeof(long)*l->STEPS/THREAD_NUM,hipMemcpyDeviceToHost);
hipMemcpy(hostUnique,deviceUnique,sizeof(long)*l->STEPS/THREAD_NUM,hipMemcpyDeviceToHost);
//
for(int col=0;col<l->STEPS/THREAD_NUM;col++){
total+=hostTotal[col];
unique+=hostUnique[col];
}
matched=false;
}
//
hipMemcpy(deviceDown,hostDown,sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(deviceLeft,hostLeft,sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(deviceRight,hostRight,sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(deviceBoard,hostBoard,sizeof(int)*totalCond*mark,hipMemcpyHostToDevice);
hipMemcpy(deviceLocal,hostLocal,sizeof(int)*l->STEPS/THREAD_NUM,hipMemcpyHostToDevice);
// CUDA
hipLaunchKernelGGL(( BitBoard_cuda_kernel_b2), dim3(l->STEPS/THREAD_NUM),dim3(THREAD_NUM) , 0, 0, size,size-mark,deviceDown,deviceLeft,deviceRight,deviceTotal,deviceUnique,totalCond,deviceBoard,row,deviceLocal);
//STEPS
//totalCond
//GPUGPUmatched=true
matched=true;
//totalCond==STEPSGPU0
//(STEPSGPU)
totalCond=0;
}
//hostDown,hostLeft,hostRight1
// row=2
//hostDown,hostLeft,hostRight
row--;
}
}else{
//row==markCPU
//nqueen
row--;
}
}
}
//matched=trueCOUNT //GPUGPU
//matched=true
if(matched){
//
hipMemcpy(hostTotal,deviceTotal,sizeof(int)*l->STEPS/THREAD_NUM,hipMemcpyDeviceToHost);
hipMemcpy(hostUnique,deviceUnique,sizeof(int)*l->STEPS/THREAD_NUM,hipMemcpyDeviceToHost);
//
for(int col=0;col<l->STEPS/THREAD_NUM;col++){
total+=hostTotal[col];
unique+=hostUnique[col];
}
matched=false;
}
//
hipMemcpy(deviceDown,hostDown,sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(deviceLeft,hostLeft,sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(deviceRight,hostRight,sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(deviceBoard,hostBoard,sizeof(int)*totalCond*mark,hipMemcpyHostToDevice);
hipMemcpy(deviceLocal,hostLocal,sizeof(int)*l->STEPS/THREAD_NUM,hipMemcpyDeviceToHost);
//size-mark GPU totalCond
//STEPS
//totalCond
// CUDA
hipLaunchKernelGGL(( BitBoard_cuda_kernel_b2), dim3(l->STEPS/THREAD_NUM),dim3(THREAD_NUM) , 0, 0, size,size-mark,deviceDown,deviceLeft,deviceRight,deviceTotal,deviceUnique,totalCond,deviceBoard,mark,deviceLocal);
//
hipMemcpy(hostTotal,deviceTotal,sizeof(int)*l->STEPS/THREAD_NUM,hipMemcpyDeviceToHost);
hipMemcpy(hostUnique,deviceUnique,sizeof(int)*l->STEPS/THREAD_NUM,hipMemcpyDeviceToHost);
//
for(int col=0;col<l->STEPS/THREAD_NUM;col++){
total+=hostTotal[col];
unique+=hostUnique[col];
}
TOTAL+=total;
UNIQUE+=unique;
//
hipFree(deviceDown);
hipFree(deviceLeft);
hipFree(deviceRight);
hipFree(deviceTotal);
hipFree(deviceUnique);
hipFree(deviceBoard);
hipFree(deviceLocal);
hipHostFree(hostDown);
hipHostFree(hostLeft);
hipHostFree(hostRight);
hipHostFree(hostTotal);
hipHostFree(hostUnique);
hipHostFree(hostBoard);
hipHostFree(hostLocal);
}
// GPU -n
void BitBoard_backTrack1G(const unsigned int size,unsigned int row,unsigned int _left,unsigned int _down,unsigned int _right,struct local* l)
{
//GPUGPU
/***08 mark3*********************/
const unsigned int mark=size>12?size-10:3;
const unsigned int h_mark=row;
const unsigned int mask=(1<<size)-1;
unsigned long totalCond=0;
bool matched=false;
//host
unsigned int down[32]; down[row]=_down;
unsigned int right[32]; right[row]=_right;
unsigned int left[32]; left[row]=_left;
//bitmap
//stack1
unsigned int bitmap[32];
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
unsigned int bit;
unsigned int* hostDown;
hipHostMalloc((void**) &hostDown,sizeof(int)*l->STEPS);
unsigned int* hostLeft;
hipHostMalloc((void**) &hostLeft,sizeof(int)*l->STEPS);
unsigned int* hostRight;
hipHostMalloc((void**) &hostRight,sizeof(int)*l->STEPS);
unsigned int* hostTotal;
hipHostMalloc((void**) &hostTotal,sizeof(int)*l->STEPS);
unsigned int* hostUnique;
hipHostMalloc((void**) &hostUnique,sizeof(int)*l->STEPS);
unsigned int* deviceDown;
hipMalloc((void**) &deviceDown,sizeof(int)*l->STEPS);
unsigned int* deviceLeft;
hipMalloc((void**) &deviceLeft,sizeof(int)*l->STEPS);
unsigned int* deviceRight;
hipMalloc((void**) &deviceRight,sizeof(int)*l->STEPS);
unsigned int* deviceTotal;
hipMalloc((void**) &deviceTotal,sizeof(int)*l->STEPS/THREAD_NUM);
unsigned int* deviceUnique;
hipMalloc((void**) &deviceUnique,sizeof(int)*l->STEPS/THREAD_NUM);
struct local* hostLocal;
hipHostMalloc((void**) &hostLocal,sizeof(struct local)*l->STEPS/THREAD_NUM);
struct local* deviceLocal;
hipHostMalloc((void**) &deviceLocal,sizeof(struct local)*l->STEPS/THREAD_NUM);
hostLocal[0].BOUND1=l->BOUND1;
hostLocal[0].BOUND2=l->BOUND2;
hostLocal[0].TOPBIT=l->TOPBIT;
hostLocal[0].ENDBIT=l->ENDBIT;
hostLocal[0].SIDEMASK=l->SIDEMASK;
hostLocal[0].LASTMASK=l->LASTMASK;
hostLocal[0].STEPS=l->STEPS;
for(int i=0;i<MAX;i++){
hostLocal[0].board[i]=l->board[i];
}
//123CPU->row==mark 3
//down,left,right hostDown,hostLeft,hostRight
//
//->3GPU
//13CPU
//n15row=5CPU
//GPU(GPU10
//)
//while(row>=0) {
int rowP=0;
unsigned long total=0;
unsigned long unique=0;
while(row>=h_mark) {
//bitmap[row]=00000000
//1
//06GPU
if(bitmap[row]==0){ row--; }
else{//
if(row<l->BOUND1) { /***11 *********************/
bitmap[row]&=~2; // bm|=2; bm^=2; (bm&=~2)
}
bitmap[row]^=bit=(-bitmap[row]&bitmap[row]);
if((bit&mask)!=0){//
rowP=row+1;
down[rowP]=down[row]|bit;
left[rowP]=(left[row]|bit)<<1;
right[rowP]=(right[row]|bit)>>1;
bitmap[rowP]=mask&~(down[rowP]|left[rowP]|right[rowP]);
row++;
if(row==mark){
//3(mark)
//down,left,right
//
//GPU
//totalCond threadId down,left,right
//row=2(13n15row=5)
//hostDown,hostLeft,hostRight
hostDown[totalCond]=down[row];
hostLeft[totalCond]=left[row];
hostRight[totalCond]=right[row];
//
totalCond++;
//GPUGPUSTEPSGPU
//
//ntotalCondSTEPSn
//
//totalCond==STEPS
if(totalCond==l->STEPS){
//matched=trueCOUNT //GPUGPU
//matched=true
if(matched){
//
hipMemcpy(hostTotal,deviceTotal,sizeof(int)*l->STEPS/THREAD_NUM,hipMemcpyDeviceToHost);
hipMemcpy(hostUnique,deviceUnique,sizeof(int)*l->STEPS/THREAD_NUM,hipMemcpyDeviceToHost);
//
for(int col=0;col<l->STEPS/THREAD_NUM;col++){
total+=hostTotal[col];
unique+=hostUnique[col];
}
matched=false;
}
//
hipMemcpy(deviceDown,hostDown,sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(deviceLeft,hostLeft,sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(deviceRight,hostRight,sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(deviceLocal,hostLocal,sizeof(int)*l->STEPS/THREAD_NUM,hipMemcpyDeviceToHost);
// CUDA
hipLaunchKernelGGL(( BitBoard_cuda_kernel_b1), dim3(l->STEPS/THREAD_NUM),dim3(THREAD_NUM) , 0, 0, size,size-mark,deviceDown,deviceLeft,deviceRight,deviceTotal,deviceUnique,totalCond,row,deviceLocal);
//STEPS
//totalCond
//GPUGPUmatched=true
matched=true;
//totalCond==STEPSGPU0
//(STEPSGPU)
totalCond=0;
}
//hostDown,hostLeft,hostRight1
// row=2
//hostDown,hostLeft,hostRight
row--;
}
}else{
//row==markCPU
//nqueen
row--;
}
}
}
//matched=trueCOUNT //GPUGPU
//matched=true
if(matched){
//
hipMemcpy(hostTotal,deviceTotal,sizeof(int)*l->STEPS/THREAD_NUM,hipMemcpyDeviceToHost);
hipMemcpy(hostUnique,deviceUnique,sizeof(int)*l->STEPS/THREAD_NUM,hipMemcpyDeviceToHost);
//
for(int col=0;col<l->STEPS/THREAD_NUM;col++){
total+=hostTotal[col];
unique+=hostUnique[col];
}
matched=false;
}
//
hipMemcpy(deviceDown,hostDown,sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(deviceLeft,hostLeft,sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(deviceRight,hostRight,sizeof(int)*totalCond,hipMemcpyHostToDevice);
hipMemcpy(deviceLocal,hostLocal,sizeof(int)*l->STEPS/THREAD_NUM,hipMemcpyDeviceToHost);
// CUDA
hipLaunchKernelGGL(( BitBoard_cuda_kernel_b1), dim3(l->STEPS/THREAD_NUM),dim3(THREAD_NUM) , 0, 0, size,size-mark,deviceDown,deviceLeft,deviceRight,deviceTotal,deviceUnique,totalCond,mark,deviceLocal);
//
hipMemcpy(hostTotal,deviceTotal,sizeof(int)*l->STEPS/THREAD_NUM,hipMemcpyDeviceToHost);
hipMemcpy(hostUnique,deviceUnique,sizeof(int)*l->STEPS/THREAD_NUM,hipMemcpyDeviceToHost);
//
for(int col=0;col<l->STEPS/THREAD_NUM;col++){
total+=hostTotal[col];
unique+=hostUnique[col];
}
TOTAL+=total;
UNIQUE+=unique;
//
hipFree(deviceDown);
hipFree(deviceLeft);
hipFree(deviceRight);
hipFree(deviceTotal);
hipFree(deviceUnique);
hipFree(deviceLocal);
hipHostFree(hostDown);
hipHostFree(hostLeft);
hipHostFree(hostRight);
hipHostFree(hostTotal);
hipHostFree(hostUnique);
hipHostFree(hostLocal);
}
// GPU -n
void BitBoard_build(const unsigned int size,int STEPS)
{
if(size<=0||size>32){return;}
/**
int unsigned
total: TOTAL
*/
struct local l; //GPU
l.STEPS=STEPS;
unsigned int bit=1;
l.board[0]=1;
unsigned int left=bit<<1,down=bit,right=bit>>1;
/**
232
*/
for(l.BOUND1=2;l.BOUND1<size-1;l.BOUND1++){
l.board[1]=bit=(1<<l.BOUND1);
BitBoard_backTrack1G(size,2,(left|bit)<<1,(down|bit),(right|bit)>>1,&l);
}
l.TOPBIT=1<<(size-1);
l.SIDEMASK=l.LASTMASK=(l.TOPBIT|1);
l.ENDBIT=(l.TOPBIT>>1);
/**
12
1/2 n=8 1,2,3 1/2+1 n=9 1,2,3,4
*/
for(l.BOUND1=1,l.BOUND2=size-1-1;l.BOUND1<l.BOUND2;l.BOUND1++,l.BOUND2--){
l.board[0]=bit=(1<<l.BOUND1);
BitBoard_backTrack2G(size,1,bit<<1,bit,bit>>1,&l);
l.LASTMASK|=l.LASTMASK>>1|l.LASTMASK<<1;
l.ENDBIT>>=1;
}
}
// CUDA
bool InitCUDA()
{
int count;
hipGetDeviceCount(&count);
if(count==0){fprintf(stderr,"There is no device.\n");return false;}
unsigned int i;
for(i=0;i<count;++i){
struct hipDeviceProp_t prop;
if(hipGetDeviceProperties(&prop,i)==hipSuccess){if(prop.major>=1){break;} }
}
if(i==count){fprintf(stderr,"There is no device supporting CUDA 1.x.\n");return false;}
hipSetDevice(i);
return true;
}
//
int main(int argc,char** argv)
{
bool cpu=false,cpur=false,gpu=false,gpuBitBoard=false;
unsigned int argstart=2;
if(argc>=2&&argv[1][0]=='-'){
if(argv[1][1]=='c'||argv[1][1]=='C'){cpu=true;}
else if(argv[1][1]=='r'||argv[1][1]=='R'){cpur=true;}
else if(argv[1][1]=='c'||argv[1][1]=='C'){cpu=true;}
else if(argv[1][1]=='g'||argv[1][1]=='G'){gpu=true;}
else if(argv[1][1]=='n'||argv[1][1]=='N'){gpuBitBoard=true;}
else{ gpuBitBoard=true; } //gpu
argstart=2;
}
if(argc<argstart){
printf("Usage: %s [-c|-g|-r|-s] n STEPS\n",argv[0]);
printf(" -r: CPU \n");
printf(" -c: CPU \n");
printf(" -g: GPU \n");
printf(" -n: GPU \n");
}
if(cpur){ printf("\n\n \n"); }
else if(cpu){ printf("\n\n \n"); }
else if(gpu){ printf("\n\n GPU\n"); }
else if(gpuBitBoard){ printf("\n\n GPU \n"); }
if(cpu||cpur)
{
unsigned int min=4;
unsigned int targetN=17;
struct timeval t0;
struct timeval t1;
printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms");
for(unsigned int size=min;size<=targetN;size++){
local l;
gettimeofday(&t0,NULL);//
if(cpur){ //
symmetry_R(size,&l);
}
if(cpu){ //
symmetry_NR(size,&l);
}
//
gettimeofday(&t1,NULL);//
unsigned int ss;
unsigned int ms;
unsigned int dd;
if(t1.tv_usec<t0.tv_usec) {
dd=(t1.tv_sec-t0.tv_sec-1)/86400;
ss=(t1.tv_sec-t0.tv_sec-1)%86400;
ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000;
}else {
dd=(t1.tv_sec-t0.tv_sec)/86400;
ss=(t1.tv_sec-t0.tv_sec)%86400;
ms=(t1.tv_usec-t0.tv_usec+500)/10000;
}//end if
unsigned int hh=ss/3600;
unsigned int mm=(ss-hh*3600)/60;
ss%=60;
printf("%2d:%13ld%12ld%8.2d:%02d:%02d:%02d.%02d\n",size,TOTAL,UNIQUE,dd,hh,mm,ss,ms);
} //end for
}//end if
if(gpu||gpuBitBoard)
{
int STEPS=24576;
if(!InitCUDA()){return 0;}
unsigned int min=4;
unsigned int targetN=21;
struct timeval t0;
struct timeval t1;
printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms");
for(unsigned int size=min;size<=targetN;size++){
gettimeofday(&t0,NULL);
if(gpu){
TOTAL=UNIQUE=0;
local l[MAX];
GPU_symmetry_R(size,&l[0]);
TOTAL=l->TOTAL;
UNIQUE=l->UNIQUE;
}else if(gpuBitBoard){
TOTAL=UNIQUE=0;
BitBoard_build(size,STEPS);
}
gettimeofday(&t1,NULL);
unsigned int ss;
unsigned int ms;
unsigned int dd;
if (t1.tv_usec<t0.tv_usec) {
dd=(int)(t1.tv_sec-t0.tv_sec-1)/86400;
ss=(t1.tv_sec-t0.tv_sec-1)%86400;
ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000;
} else {
dd=(int)(t1.tv_sec-t0.tv_sec)/86400;
ss=(t1.tv_sec-t0.tv_sec)%86400;
ms=(t1.tv_usec-t0.tv_usec+500)/10000;
}//end if
unsigned int hh=ss/3600;
unsigned int mm=(ss-hh*3600)/60;
ss%=60;
printf("%2d:%13ld%12ld%8.2d:%02d:%02d:%02d.%02d\n",size,TOTAL,UNIQUE,dd,hh,mm,ss,ms);
}
}
return 0;
}
| f403e9c005f03deb96675df3e552bfcb059200b7.cu | /**
*
* bash版対称解除法のC言語版のGPU/CUDA移植版
*
詳しい説明はこちらをどうぞ
https://suzukiiichiro.github.io/search/?keyword=Nクイーン問題
非再帰でのコンパイルと実行
$ nvcc -O3 -arch=sm_61 03CUDA_Symmetry_BitBoard.cu && ./a.out -c
再帰でのコンパイルと実行
$ nvcc -O3 -arch=sm_61 03CUDA_Symmetry_BitBoard.cu && ./a.out -r
GPU で並列処理せずに実行
$ nvcc -O3 -arch=sm_61 03CUDA_Symmetry_BitBoard.cu && ./a.out -n
GPU で並列処理で実行(ビットボード)
$ nvcc -O3 -arch=sm_61 03CUDA_Symmetry_BitBoard.cu && ./a.out -n
*
*/
#include <iostream>
#include <vector>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <math.h>
#include <string.h>
#include <time.h>
#include <sys/time.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#define MAX 27
#define THREAD_NUM 96
// システムによって以下のマクロが必要であればコメントを外してください。
//#define UINT64_C(c) c ## ULL
//
// グローバル変数
unsigned long TOTAL=0;
unsigned long UNIQUE=0;
//GPU で使うローカル構造体
typedef struct local
{
unsigned int BOUND1,BOUND2;
unsigned int TOPBIT,ENDBIT,SIDEMASK,LASTMASK;
unsigned long board[MAX];
unsigned long COUNT2,COUNT4,COUNT8,TOTAL,UNIQUE;
unsigned int STEPS;
}local;
// CPU 再帰/非再帰共通 対称解除法
void symmetryOps(unsigned int size,struct local* l)
{
/**
2.クイーンが右上角以外にある場合、
(1) 90度回転させてオリジナルと同型になる場合、さらに90度回転(オリジナルか
ら180度回転)させても、さらに90度回転(オリジナルから270度回転)させてもオリ
ジナルと同型になる。
こちらに該当するユニーク解が属するグループの要素数は、左右反転させたパター
ンを加えて2個しかありません。
*/
if(l->board[l->BOUND2]==1){
unsigned int ptn;
unsigned int own;
for(ptn=2,own=1;own<size;++own,ptn<<=1){
unsigned int bit;
unsigned int you;
for(bit=1,you=size-1;(l->board[you]!=ptn)&&l->board[own]>=bit;--you){
bit<<=1;
}
if(l->board[own]>bit){
return ;
}
if(l->board[own]<bit){
break;
}
}//end for
// 90度回転して同型なら180度回転しても270度回転しても同型である
if(own>size-1){
l->COUNT2++;
return ;
}//end if
}//end if
/**
2.クイーンが右上角以外にある場合、
(2) 90度回転させてオリジナルと異なる場合は、270度回転させても必ずオリジナル
とは異なる。ただし、180度回転させた場合はオリジナルと同型になることも有り得
る。こちらに該当するユニーク解が属するグループの要素数は、180度回転させて同
型になる場合は4個(左右反転×縦横回転)
*/
//180度回転
if(l->board[size-1]==l->ENDBIT){
unsigned int you;
unsigned int own;
for(you=size-1-1,own=1;own<=size-1;++own,--you){
unsigned int bit;
unsigned int ptn;
for(bit=1,ptn=l->TOPBIT;(ptn!=l->board[you])&&(l->board[own]>=bit);ptn>>=1){
bit<<=1;
}
if(l->board[own]>bit){
return ;
}
if(l->board[own]<bit){
break;
}
}//end for
//90度回転が同型でなくても180度回転が同型であることもある
if(own>size-1){
l->COUNT4++;
return ;
}
}//end if
/**
2.クイーンが右上角以外にある場合、
(3)180度回転させてもオリジナルと異なる場合は、8個(左右反転×縦横回転×上下反転)
*/
//270度回転
if(l->board[l->BOUND1]==l->TOPBIT){
unsigned int ptn;
unsigned int own;
unsigned int you;
unsigned int bit;
for(ptn=l->TOPBIT>>1,own=1;own<=size-1;++own,ptn>>=1){
for(bit=1,you=0;(l->board[you]!=ptn)&&(l->board[own]>=bit);++you){
bit<<=1;
}
if(l->board[own]>bit){
return ;
}
if(l->board[own]<bit){
break;
}
}//end for
}//end if
l->COUNT8++;
}
/**
CPU -c
*/
// 非再帰 角にQがないときのバックトラック
void symmetry_backTrack_NR(unsigned int size,unsigned int row,unsigned int _left,unsigned int _down,unsigned int _right,struct local *l)
{
unsigned int mask=(1<<size)-1;
unsigned int down[size];
unsigned int left[size];
unsigned int right[size];
unsigned int bitmap[size];
left[row]=_left;
down[row]=_down;
right[row]=_right;
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
while(row>0){
if(bitmap[row]>0){
if(row<l->BOUND1){ //上部サイド枝刈り
bitmap[row]|=l->SIDEMASK;
bitmap[row]^=l->SIDEMASK;
}else if(row==l->BOUND2){ //下部サイド枝刈り
if((down[row]&l->SIDEMASK)==0){
row--;
}
if((down[row]&l->SIDEMASK)!=l->SIDEMASK){
bitmap[row]&=l->SIDEMASK;
}
}
unsigned int save_bitmap=bitmap[row];
unsigned int bit=-bitmap[row]&bitmap[row];
bitmap[row]^=bit;
l->board[row]=bit; //Qを配置
if((bit&mask)!=0){
if(row==(size-1)){
if( (save_bitmap&l->LASTMASK)==0){
symmetryOps(size,l); //対称解除法
}
row--;
}else{
unsigned int n=row++;
left[row]=(left[n]|bit)<<1;
down[row]=(down[n]|bit);
right[row]=(right[n]|bit)>>1;
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
}
}else{
row--;
}
}else{
row--;
}
}//end while
}
// 非再帰 角にQがあるときのバックトラック
void symmetry_backTrack_corner_NR(unsigned int size,unsigned int row,unsigned int _left,unsigned int _down,unsigned int _right,struct local *l)
{
unsigned int mask=(1<<size)-1;
unsigned int bit=0;
unsigned int down[size];
unsigned int left[size];
unsigned int right[size];
unsigned int bitmap[size];
left[row]=_left;
down[row]=_down;
right[row]=_right;
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
while(row>=2){
if(row<l->BOUND1){
// bitmap[row]=bitmap[row]|2;
// bitmap[row]=bitmap[row]^2;
bitmap[row]&=~2;
}
if(bitmap[row]>0){
bit=-bitmap[row]&bitmap[row];
bitmap[row]^=bit;
if(row==(size-1)){
l->COUNT8++;
row--;
}else{
unsigned int n=row++;
left[row]=(left[n]|bit)<<1;
down[row]=(down[n]|bit);
right[row]=(right[n]|bit)>>1;
l->board[row]=bit; //Qを配置
//クイーンが配置可能な位置を表す
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
}
}else{
row--;
}
}//end while
}
// 非再帰 対称解除法
void symmetry_NR(unsigned int size,struct local* l)
{
l->TOTAL=l->UNIQUE=l->COUNT2=l->COUNT4=l->COUNT8=0;
unsigned int bit=0;
l->TOPBIT=1<<(size-1);
l->ENDBIT=l->SIDEMASK=l->LASTMASK=0;
l->BOUND1=2;
l->BOUND2=0;
l->board[0]=1;
while(l->BOUND1>1&&l->BOUND1<size-1){
if(l->BOUND1<size-1){
bit=1<<l->BOUND1;
l->board[1]=bit; //2行目にQを配置
//角にQがあるときのバックトラック
symmetry_backTrack_corner_NR(size,2,(2|bit)<<1,1|bit,(2|bit)>>1,l);
}
l->BOUND1++;
}
l->TOPBIT=1<<(size-1);
l->ENDBIT=l->TOPBIT>>1;
l->SIDEMASK=l->TOPBIT|1;
l->LASTMASK=l->TOPBIT|1;
l->BOUND1=1;
l->BOUND2=size-2;
while(l->BOUND1>0 && l->BOUND2<size-1 && l->BOUND1<l->BOUND2){
if(l->BOUND1<l->BOUND2){
bit=1<<l->BOUND1;
l->board[0]=bit; //Qを配置
//角にQがないときのバックトラック
symmetry_backTrack_NR(size,1,bit<<1,bit,bit>>1,l);
}
l->BOUND1++;
l->BOUND2--;
l->ENDBIT=l->ENDBIT>>1;
l->LASTMASK=l->LASTMASK<<1|l->LASTMASK|l->LASTMASK>>1;
}//ene while
UNIQUE=l->COUNT2+l->COUNT4+l->COUNT8;
TOTAL=l->COUNT2*2+l->COUNT4*4+l->COUNT8*8;
}
/**
CPU -r
*/
// 再帰 角にQがないときのバックトラック
void symmetry_backTrack(unsigned int size,unsigned int row,unsigned int left,unsigned int down,unsigned int right,struct local* l)
{
unsigned int mask=(1<<size)-1;
unsigned int bitmap=mask&~(left|down|right);
if(row==(size-1)){
if(bitmap){
if( (bitmap&l->LASTMASK)==0){
l->board[row]=bitmap; //Qを配置
symmetryOps(size,l); //対称解除
}
}
}else{
if(row<l->BOUND1){
bitmap=bitmap|l->SIDEMASK;
bitmap=bitmap^l->SIDEMASK;
}else{
if(row==l->BOUND2){
if((down&l->SIDEMASK)==0){
return;
}
if( (down&l->SIDEMASK)!=l->SIDEMASK){
bitmap=bitmap&l->SIDEMASK;
}
}
}
while(bitmap){
unsigned int bit=-bitmap&bitmap;
bitmap=bitmap^bit;
l->board[row]=bit;
symmetry_backTrack(size,row+1,(left|bit)<<1,down|bit,(right|bit)>>1,l);
}
}
}
// 再帰 角にQがあるときのバックトラック
void symmetry_backTrack_corner(unsigned int size,unsigned int row,unsigned int left,unsigned int down,unsigned int right,struct local* l)
{
unsigned int mask=(1<<size)-1;
unsigned int bitmap=mask&~(left|down|right);
unsigned int bit=0;
if(row==(size-1)){
if(bitmap){
l->board[row]=bitmap;
l->COUNT8++;
}
}else{
if(row<l->BOUND1){ //枝刈り
bitmap=bitmap|2;
bitmap=bitmap^2;
}
while(bitmap){
bit=-bitmap&bitmap;
bitmap=bitmap^bit;
l->board[row]=bit; //Qを配置
symmetry_backTrack_corner(size,row+1,(left|bit)<<1,down|bit,(right|bit)>>1,l);
}
}
}
// 再帰 対称解除法
void symmetry_R(unsigned int size,struct local* l)
{
l->TOTAL=l->UNIQUE=l->COUNT2=l->COUNT4=l->COUNT8=0;
unsigned int bit=0;
l->TOPBIT=1<<(size-1);
l->ENDBIT=l->LASTMASK=l->SIDEMASK=0;
l->BOUND1=2;
l->BOUND2=0;
l->board[0]=1;
while(l->BOUND1>1 && l->BOUND1<size-1){
if(l->BOUND1<size-1){
bit=1<<l->BOUND1;
l->board[1]=bit; //2行目にQを配置
//角にQがあるときのバックトラック
symmetry_backTrack_corner(size,2,(2|bit)<<1,1|bit,(2|bit)>>1,l);
}
l->BOUND1++;
}//end while
l->TOPBIT=1<<(size-1);
l->ENDBIT=l->TOPBIT>>1;
l->SIDEMASK=l->TOPBIT|1;
l->LASTMASK=l->TOPBIT|1;
l->BOUND1=1;
l->BOUND2=size-2;
while(l->BOUND1>0 && l->BOUND2<size-1 && l->BOUND1<l->BOUND2){
if(l->BOUND1<l->BOUND2){
bit=1<<l->BOUND1;
l->board[0]=bit; //Qを配置
//角にQがないときのバックトラック
symmetry_backTrack(size,1,bit<<1,bit,bit>>1,l);
}
l->BOUND1++;
l->BOUND2--;
l->ENDBIT=l->ENDBIT>>1;
l->LASTMASK=l->LASTMASK<<1|l->LASTMASK|l->LASTMASK>>1;
}//ene while
UNIQUE=l->COUNT2+l->COUNT4+l->COUNT8;
TOTAL=l->COUNT2*2+l->COUNT4*4+l->COUNT8*8;
}
/**
GPU -g
*/
__device__
struct dlocal
{
unsigned int BOUND1,BOUND2;
unsigned int TOPBIT,ENDBIT,SIDEMASK,LASTMASK;
unsigned long board[MAX];
unsigned long COUNT2,COUNT4,COUNT8,TOTAL,UNIQUE;
}dlocal;
__device__ struct dlocal gdl[9999];
// GPU 対称解除法
__host__ __device__
long GPU_symmetryOps(unsigned int size,struct dlocal* l)
{
/**
2.クイーンが右上角以外にある場合、
(1) 90度回転させてオリジナルと同型になる場合、さらに90度回転(オリジナルか
ら180度回転)させても、さらに90度回転(オリジナルから270度回転)させてもオリ
ジナルと同型になる。
こちらに該当するユニーク解が属するグループの要素数は、左右反転させたパター
ンを加えて2個しかありません。
*/
if(l->board[l->BOUND2]==1){
unsigned int ptn;
unsigned int own;
for(ptn=2,own=1;own<size;++own,ptn<<=1){
unsigned int bit;
unsigned int you;
for(bit=1,you=size-1;(l->board[you]!=ptn)&& l->board[own]>=bit;--you){
bit<<=1;
}
if(l->board[own]>bit){
return 0;
}
if(l->board[own]<bit){
break;
}
}//end for
// 90度回転して同型なら180度回転しても270度回転しても同型である
if(own>size-1){
l->COUNT2++;
return 2;
}//end if
}//end if
/**
2.クイーンが右上角以外にある場合、
(2) 90度回転させてオリジナルと異なる場合は、270度回転させても必ずオリジナル
とは異なる。ただし、180度回転させた場合はオリジナルと同型になることも有り得
る。こちらに該当するユニーク解が属するグループの要素数は、180度回転させて同
型になる場合は4個(左右反転×縦横回転)
*/
//180度回転
if(l->board[size-1]==l->ENDBIT){
unsigned int you;
unsigned int own;
for(you=size-1-1,own=1;own<=size-1;++own,--you){
unsigned int bit;
unsigned int ptn;
for(bit=1,ptn=l->TOPBIT;(ptn!=l->board[you])&&(l->board[own]>=bit);ptn>>=1){
bit<<=1;
}
if(l->board[own]>bit){
return 0;
}
if(l->board[own]<bit){
break;
}
}//end for
//90度回転が同型でなくても180度回転が同型であることもある
if(own>size-1){
l->COUNT4++;
return 4;
}
}//end if
/**
2.クイーンが右上角以外にある場合、
(3)180度回転させてもオリジナルと異なる場合は、8個(左右反転×縦横回転×上下反転)
*/
//270度回転
if(l->board[l->BOUND1]==l->TOPBIT){
unsigned int ptn;
unsigned int own;
unsigned int you;
unsigned int bit;
for(ptn=l->TOPBIT>>1,own=1;own<=size-1;++own,ptn>>=1){
for(bit=1,you=0;(l->board[you]!=ptn)&&(l->board[own]>=bit);++you){
bit<<=1;
}
if(l->board[own]>bit){
return 0;
}
if(l->board[own]<bit){
break;
}
}//end for
}//end if
l->COUNT8++;
return 8;
}
// GPU 角にQがないときのバックトラック
__host__ __device__
long GPU_symmetry_backTrack(unsigned int size,unsigned int row,unsigned int left,unsigned int down,unsigned int right,struct dlocal* l)
{
unsigned long counter=0;
unsigned int mask=(1<<size)-1;
unsigned int bitmap=mask&~(left|down|right);
if(row==(size-1)){
if(bitmap){
if( (bitmap& l->LASTMASK)==0){
l->board[row]=bitmap; //Qを配置
counter+=GPU_symmetryOps(size,l); //対称解除
}
}
}else{
if(row<l->BOUND1){
bitmap=bitmap|l->SIDEMASK;
bitmap=bitmap^l->SIDEMASK;
}else{
if(row==l->BOUND2){
if((down&l->SIDEMASK)==0){
return 0;
}
if( (down&l->SIDEMASK)!=l->SIDEMASK){
bitmap=bitmap&l->SIDEMASK;
}
}
}
while(bitmap){
unsigned int bit=-bitmap&bitmap;
bitmap=bitmap^bit;
l->board[row]=bit;
counter+=GPU_symmetry_backTrack(size,row+1,(left|bit)<<1,down|bit,(right|bit)>>1,l);
}
}
return counter;
}
// GPU 角にQがあるときのバックトラック
__host__ __device__
long GPU_symmetry_backTrack_corner(unsigned int size,unsigned int row,unsigned int left,unsigned int down,unsigned int right,struct dlocal* l)
{
unsigned long counter=0;
unsigned int mask=(1<<size)-1;
unsigned int bitmap=mask&~(left|down|right);
unsigned int bit=0;
if(row==(size-1)){
if(bitmap){
l->board[row]=bitmap;
l->COUNT8++;
counter+=8;
}
}else{
if(row<l->BOUND1){ //枝刈り
bitmap=bitmap|2;
bitmap=bitmap^2;
}
while(bitmap){
bit=-bitmap&bitmap;
bitmap=bitmap^bit;
l->board[row]=bit; //Qを配置
counter+=GPU_symmetry_backTrack_corner(size,row+1,(left|bit)<<1,down|bit,(right|bit)>>1,l);
}
}
return counter;
}
// GPU 対称解除法 -g の実行時のみ呼び出されます
__host__ __device__
void GPU_symmetry_R(unsigned int size,struct local* hostLocal)
{
// GPU内部で使うための dlocal構造体
struct dlocal l;
l.TOTAL=l.UNIQUE=l.COUNT2=l.COUNT4=l.COUNT8=0;
unsigned int bit=0;
l.TOPBIT=1<<(size-1);
l.ENDBIT=l.LASTMASK=l.SIDEMASK=0;
l.BOUND1=2;
l.BOUND2=0;
l.board[0]=1;
while(l.BOUND1>1 && l.BOUND1<size-1){
if(l.BOUND1<size-1){
bit=1<<l.BOUND1;
l.board[1]=bit; //2行目にQを配置
//角にQがあるときのバックトラック
GPU_symmetry_backTrack_corner(size,2,(2|bit)<<1,1|bit,(2|bit)>>1,&l);
}
l.BOUND1++;
}//end while
l.TOPBIT=1<<(size-1);
l.ENDBIT=l.TOPBIT>>1;
l.SIDEMASK=l.TOPBIT|1;
l.LASTMASK=l.TOPBIT|1;
l.BOUND1=1;
l.BOUND2=size-2;
while(l.BOUND1>0 && l.BOUND2<size-1 && l.BOUND1<l.BOUND2){
if(l.BOUND1<l.BOUND2){
bit=1<<l.BOUND1;
l.board[0]=bit; //Qを配置
//角にQがないときのバックトラック
GPU_symmetry_backTrack(size,1,bit<<1,bit,bit>>1,&l);
}
l.BOUND1++;
l.BOUND2--;
l.ENDBIT=l.ENDBIT>>1;
l.LASTMASK=l.LASTMASK<<1|l.LASTMASK|l.LASTMASK>>1;
}//ene while
// 集計値は hostLocalへ代入
hostLocal->UNIQUE=l.COUNT2+l.COUNT4+l.COUNT8;
hostLocal->TOTAL=l.COUNT2*2+l.COUNT4*4+l.COUNT8*8;
}
/**
CUDA13
*/
// GPU -n 対称解除法
__device__
int BitBoard_symmetryOps(const unsigned int size,const unsigned int* board,struct local* l)
{
unsigned int own,ptn,you,bit;
//90度回転
if(board[l->BOUND2]==1){ own=1; ptn=2;
while(own<=size-1){ bit=1; you=size-1;
while((board[you]!=ptn)&&(board[own]>=bit)){ bit<<=1; you--; }
if(board[own]>bit){ return 0; } else if(board[own]<bit){ break; }
own++; ptn<<=1;
}
/** 90度回転して同型なら180度/270度回転も同型である */
if(own>size-1){ return 2; }
}
//180度回転
if(board[size-1]==l->ENDBIT){ own=1; you=size-1-1;
while(own<=size-1){ bit=1; ptn=l->TOPBIT;
while((board[you]!=ptn)&&(board[own]>=bit)){ bit<<=1; ptn>>=1; }
if(board[own]>bit){ return 0; } else if(board[own]<bit){ break; }
own++; you--;
}
/** 90度回転が同型でなくても180度回転が同型である事もある */
if(own>size-1){ return 4; }
}
//270度回転
if(board[l->BOUND1]==l->TOPBIT){ own=1; ptn=l->TOPBIT>>1;
while(own<=size-1){ bit=1; you=0;
while((board[you]!=ptn)&&(board[own]>=bit)){ bit<<=1; you++; }
if(board[own]>bit){ return 0; } else if(board[own]<bit){ break; }
own++; ptn>>=1;
}
}
return 8;
}
// GPU -n Qが角にある場合のバックトラック内の再帰処理をカーネルで行う
__global__
void BitBoard_cuda_kernel_b1(const unsigned int size,unsigned int mark,unsigned int* _down,unsigned int* _left,unsigned int* _right,unsigned int* _total,unsigned int* _unique,unsigned long _cond,unsigned int _row,struct local* l)
{
const unsigned int mask=(1<<size)-1;
unsigned long total=0;
unsigned int unique=0;
int row=0;
unsigned int bit;
//
//スレッド
//
//ブロック内のスレッドID
const unsigned int tid=threadIdx.x;
//グリッド内のブロックID
const unsigned int bid=blockIdx.x;
//全体通してのID
const unsigned int idx=bid*blockDim.x+tid;
//
//シェアードメモリ
//
//sharedメモリを使う ブロック内スレッドで共有
//10固定なのは現在のmask設定で
//GPUで実行するのは最大10だから
//THREAD_NUMはブロックあたりのスレッド数
__shared__ unsigned int down[THREAD_NUM][10];
down[tid][row]=_down[idx];
__shared__ unsigned int left[THREAD_NUM][10];
left[tid][row]=_left[idx];
__shared__ unsigned int right[THREAD_NUM][10];
right[tid][row]=_right[idx];
__shared__ unsigned int bitmap[THREAD_NUM][10];
bitmap[tid][row] =mask&~(down[tid][row]|left[tid][row]|right[tid][row]);
__shared__ unsigned int sum[THREAD_NUM];
__shared__ unsigned int usum[THREAD_NUM];
//余分なスレッドは動かさない
//GPUはSTEPS数起動するが_cond以上は空回しする
if(idx<_cond){
//_down,_left,_rightの情報を
//down,left,rightに詰め直す
//CPU で詰め込んだ t_はSTEPS個あるが
//ブロック内ではブロックあたりのスレッド数に限定
//されるので idxでよい
//
unsigned int bitmap_tid_row;
unsigned int down_tid_row;
unsigned int left_tid_row;
unsigned int right_tid_row;
while(row>=0){
bitmap_tid_row=bitmap[tid][row];
down_tid_row=down[tid][row];
left_tid_row=left[tid][row];
right_tid_row=right[tid][row];
if(bitmap_tid_row==0){
row--;
}else{
/**11 枝刈り**********/
if(row+_row<l->BOUND1) {
bitmap_tid_row=bitmap[tid][row]&=~2; // bm|=2; bm^=2; (bm&=~2と同等)
}
//クイーンを置く
//置く場所があるかどうか
bitmap[tid][row]
^=bit
=(-bitmap_tid_row&bitmap_tid_row);
if((bit&mask)!=0){
//最終行?最終行から1個前の行まで
//無事到達したら 加算する
if(row+1==mark){
//ホストに戻す配列にTOTALを入れる
//スレッドが1つの場合は配列は1個
unique++;
total+=8; //対称解除で得られた解数を加算
//}
row--;
}else{
int rowP=row+1;
down[tid][rowP]=down_tid_row|bit;
left[tid][rowP]=(left_tid_row|bit)<<1;
right[tid][rowP]=(right_tid_row|bit)>>1;
bitmap[tid][rowP]=mask&~(down[tid][rowP]|left[tid][rowP]|right[tid][rowP]);
row++;
}
}else{
//置く場所がなければ1個上に
row--;
}
}
}
//最後sum[tid]に加算する
sum[tid]=total;
usum[tid]=unique;
}else{
//_cond未満は空回しするのでtotalは加算しない
sum[tid]=0;
usum[tid]=0;
}
//__syncthreads()でブロック内のスレッド間の同期
//全てのスレッドが__syncthreads()に辿り着くのを待つ
__syncthreads();
if(tid<64&&tid+64<THREAD_NUM){
sum[tid]+=sum[tid+64];
usum[tid]+=usum[tid+64];
}
__syncwarp();
if(tid<32){
sum[tid]+=sum[tid+32];
usum[tid]+=usum[tid+32];
}
__syncwarp();
if(tid<16){
sum[tid]+=sum[tid+16];
usum[tid]+=usum[tid+16];
}
__syncwarp();
if(tid<8){
sum[tid]+=sum[tid+8];
usum[tid]+=usum[tid+8];
}
__syncwarp();
if(tid<4){
sum[tid]+=sum[tid+4];
usum[tid]+=usum[tid+4];
}
__syncwarp();
if(tid<2){
sum[tid]+=sum[tid+2];
usum[tid]+=usum[tid+2];
}
__syncwarp();
if(tid<1){
sum[tid]+=sum[tid+1];
usum[tid]+=usum[tid+1];
}
__syncwarp();
if(tid==0){
_total[bid]=sum[0];
_unique[bid]=usum[0];
}
}
// GPU -n Qが角にない場合のバックトラック内の再帰処理をカーネルで行う
__global__
void BitBoard_cuda_kernel_b2(const unsigned int size,unsigned int mark,unsigned int* _down,unsigned int* _left,unsigned int* _right,unsigned int* _total,unsigned int* _unique,unsigned long _cond,unsigned int* board,unsigned int _row,struct local* l)
{
const unsigned int mask=(1<<size)-1;
unsigned long total=0;
unsigned int unique=0;
int row=0;
unsigned int bit;
//
//スレッド
//
//ブロック内のスレッドID
unsigned const int tid=threadIdx.x;
//グリッド内のブロックID
unsigned const int bid=blockIdx.x;
//全体通してのID
unsigned const int idx=bid*blockDim.x+tid;
//
//シェアードメモリ
//
//sharedメモリを使う ブロック内スレッドで共有
//10固定なのは現在のmask設定で
//GPUで実行するのは最大10だから
//THREAD_NUMはブロックあたりのスレッド数
__shared__ unsigned int down[THREAD_NUM][10];
down[tid][row]=_down[idx];
__shared__ unsigned int left[THREAD_NUM][10];
left[tid][row]=_left[idx];
__shared__ unsigned int right[THREAD_NUM][10];
right[tid][row]=_right[idx];
__shared__ unsigned int bitmap[THREAD_NUM][10];
//down,left,rightからbitmapを出す
bitmap[tid][row]=mask&~(down[tid][row]|left[tid][row]|right[tid][row]);
__shared__ unsigned int sum[THREAD_NUM];
unsigned int c_aBoard[MAX];
__shared__ unsigned int usum[THREAD_NUM];
//余分なスレッドは動かさない
//GPUはSTEPS数起動するが_cond以上は空回しする
if(idx<_cond){
//_down,_left,_rightの情報を
//down,left,rightに詰め直す
//CPU で詰め込んだ t_はSTEPS個あるが
//ブロック内ではブロックあたりのスレッド数に限定
//されるので idxでよい
//
for(int i=0;i<_row;i++){
c_aBoard[i]=board[idx*_row+i]; //2次元配列だが1次元的に利用
}
unsigned int bitmap_tid_row;
unsigned int down_tid_row;
unsigned int left_tid_row;
unsigned int right_tid_row;
while(row>=0){
bitmap_tid_row=bitmap[tid][row];
down_tid_row=down[tid][row];
left_tid_row=left[tid][row];
right_tid_row=right[tid][row];
//
//bitmap[tid][row]=00000000 クイーンを
//どこにも置けないので1行上に戻る
if(bitmap_tid_row==0){
row--;
}else{
/**11 枝刈り追加**********/
//【枝刈り】上部サイド枝刈り
if(row+_row<l->BOUND1){
bitmap_tid_row=bitmap[tid][row]&=~l->SIDEMASK;
//【枝刈り】下部サイド枝刈り
}else if(row+_row==l->BOUND2) {
if((down_tid_row&l->SIDEMASK)==0){
row--;
continue;
}
if((down_tid_row&l->SIDEMASK)!=l->SIDEMASK){
bitmap_tid_row=bitmap[tid][row]&=l->SIDEMASK;
}
}
int save_bitmap=bitmap[tid][row];
//クイーンを置く
//置く場所があるかどうか
bitmap[tid][row]^=c_aBoard[row+_row]=bit=(-bitmap_tid_row&bitmap_tid_row);
if((bit&mask)!=0){
//最終行?最終行から1個前の行まで
//無事到達したら 加算する
if(row+1==mark){
/***11 l->LASTMASK枝刈り*********************/
if((save_bitmap&l->LASTMASK)==0){
/***12 symmetryOps 省力化のためl->BOUND1,l->BOUND2,l->TOPBIT,l->ENDBITを渡す*****/
int s=BitBoard_symmetryOps(size,c_aBoard,l);
if(s!=0){
//print(size); //print()でTOTALを++しない
//ホストに戻す配列にTOTALを入れる
//スレッドが1つの場合は配列は1個
unique++;
total+=s; //対称解除で得られた解数を加算
}
row--;
}
}else{
int rowP=row+1;
down[tid][rowP]=down_tid_row|bit;
left[tid][rowP]=(left_tid_row|bit)<<1;
right[tid][rowP]=(right_tid_row|bit)>>1;
bitmap[tid][rowP]
=mask&~(
down[tid][rowP]
|left[tid][rowP]
|right[tid][rowP]);
row++;
}
}else{
//置く場所がなければ1個上に
row--;
}
}
}
//最後sum[tid]に加算する
sum[tid]=total;
usum[tid]=unique;
}else{
//_cond未満は空回しするのでtotalは加算しない
sum[tid]=0;
usum[tid]=0;
}
//__syncthreads()でブロック内のスレッド間の同期
//全てのスレッドが__syncthreads()に辿り着くのを待つ
__syncthreads();if(tid<64&&tid+64<THREAD_NUM){
sum[tid]+=sum[tid+64];
usum[tid]+=usum[tid+64];
}
__syncwarp();if(tid<32){
sum[tid]+=sum[tid+32];
usum[tid]+=usum[tid+32];
}
__syncwarp();if(tid<16){
sum[tid]+=sum[tid+16];
usum[tid]+=usum[tid+16];
}
__syncwarp();if(tid<8){
sum[tid]+=sum[tid+8];
usum[tid]+=usum[tid+8];
}
__syncwarp();if(tid<4){
sum[tid]+=sum[tid+4];
usum[tid]+=usum[tid+4];
}
__syncwarp();if(tid<2){
sum[tid]+=sum[tid+2];
usum[tid]+=usum[tid+2];
}
__syncwarp();if(tid<1){
sum[tid]+=sum[tid+1];
usum[tid]+=usum[tid+1];
}
__syncwarp();if(tid==0){
_total[bid]=sum[0];
_unique[bid]=usum[0];
}
}
// GPU -n Qが角にない
void BitBoard_backTrack2G(const unsigned int size,unsigned int row,unsigned int _left,unsigned int _down,unsigned int _right,struct local* l)
{
//何行目からGPUで行くか。ここの設定は変更可能、設定値を多くするほどGPUで並行して動く
/***11 size<8の時はmarkが2*********************/
unsigned int mark=size>12?size-10:3;
//unsigned int mark=size>11?size-9:3;
if(size<8){ mark=2; }
const unsigned int h_mark=row;
unsigned long totalCond=0;
unsigned int mask=(1<<size)-1;
bool matched=false;
//host
unsigned int down[32]; down[row]=_down;
unsigned int right[32]; right[row]=_right;
unsigned int left[32]; left[row]=_left;
//bitmapを配列で持つことにより
//stackを使わないで1行前に戻れる
unsigned int bitmap[32];
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
unsigned int bit;
unsigned int* hostDown;
cudaMallocHost((void**) &hostDown,sizeof(int)*l->STEPS);
unsigned int* hostLeft;
cudaMallocHost((void**) &hostLeft,sizeof(int)*l->STEPS);
unsigned int* hostRight;
cudaMallocHost((void**) &hostRight,sizeof(int)*l->STEPS);
unsigned int* hostTotal;
unsigned int* deviceDown;
cudaMalloc((void**) &deviceDown,sizeof(int)*l->STEPS);
unsigned int* deviceLeft;
cudaMalloc((void**) &deviceLeft,sizeof(int)*l->STEPS);
unsigned int* deviceRight;
cudaMalloc((void**) &deviceRight,sizeof(int)*l->STEPS);
cudaMallocHost((void**) &hostTotal,sizeof(long)*l->STEPS/THREAD_NUM);
unsigned int* hostUnique;
cudaMallocHost((void**) &hostUnique,sizeof(long)*l->STEPS/THREAD_NUM);
unsigned int* deviceTotal;
cudaMalloc((void**) &deviceTotal,sizeof(long)*l->STEPS/THREAD_NUM);
unsigned int* deviceUnique;
cudaMalloc((void**) &deviceUnique,sizeof(long)*l->STEPS/THREAD_NUM);
//
unsigned int* hostBoard;
cudaMallocHost((void**) &hostBoard,sizeof(int)*l->STEPS*mark);
unsigned int* deviceBoard;
cudaMalloc((void**) &deviceBoard,sizeof(int)*l->STEPS*mark);
//
struct local* hostLocal;
cudaMallocHost((void**) &hostLocal,sizeof(struct local)*l->STEPS/THREAD_NUM);
struct local* deviceLocal;
cudaMallocHost((void**) &deviceLocal,sizeof(struct local)*l->STEPS/THREAD_NUM);
hostLocal[0].BOUND1=l->BOUND1;
hostLocal[0].BOUND2=l->BOUND2;
hostLocal[0].TOPBIT=l->TOPBIT;
hostLocal[0].ENDBIT=l->ENDBIT;
hostLocal[0].SIDEMASK=l->SIDEMASK;
hostLocal[0].LASTMASK=l->LASTMASK;
hostLocal[0].STEPS=l->STEPS;
for(int i=0;i<MAX;i++){
hostLocal[0].board[i]=l->board[i];
}
//12行目までは3行目までCPU->row==mark以下で 3行目までの
//down,left,right情報をhostDown ,hostLeft,hostRight
//に格納
//する->3行目以降をGPUマルチスレッドで実行し結果を取得
//13行目以降はCPUで実行する行数が1個ずつ増えて行く
//例えばn15だとrow=5までCPUで実行し、
//それ以降はGPU(現在の設定だとGPUでは最大10行実行する
//ようになっている)
unsigned int rowP=0;
unsigned long total=0;
unsigned long unique=0;
while(row>=h_mark) {
//bitmap[row]=00000000 クイーンを
//どこにも置けないので1行上に戻る
//06GPU こっちのほうが優秀
if(bitmap[row]==0){ row--; }
else{//おける場所があれば進む
/***11 枝刈り追加*********************/
//【枝刈り】上部サイド枝刈り
if(row<l->BOUND1){
bitmap[row]&=~l->SIDEMASK;
//【枝刈り】下部サイド枝刈り
}else if(row==l->BOUND2) {
if((down[row]&l->SIDEMASK)==0){ row--; }
if((down[row]&l->SIDEMASK)!=l->SIDEMASK){ bitmap[row]&=l->SIDEMASK; }
}
//06SGPU
bitmap[row]^=l->board[row]=bit=(-bitmap[row]&bitmap[row]);
if((bit&mask)!=0){//置く場所があれば先に進む
rowP=row+1;
down[rowP]=down[row]|bit;
left[rowP]=(left[row]|bit)<<1;
right[rowP]=(right[row]|bit)>>1;
bitmap[rowP]=mask&~(down[rowP]|left[rowP]|right[rowP]);
row++;
if(row==mark){
//3行目(mark)にクイーンを1個ずつ置いていって、
//down,left,right情報を格納、
//その次の行へは進まない。その行で可能な場所にクイー
//ン置き終わったらGPU並列実行
//totalCond がthreadIdになる 各スレッドに down,left,right情報を渡す
//row=2(13行目以降は増えていく。例えばn15だとrow=5)の情報を
//hostDown,hostLeft,hostRightに格納する
hostDown[totalCond]=down[row];
hostLeft[totalCond]=left[row];
hostRight[totalCond]=right[row];
for(int i=0;i<mark;i++){
hostBoard[totalCond*mark+i]=l->board[i];
}
//スレッド数をインクリメントする
totalCond++;
//最大GPU数に達してしまったら一旦ここでGPUを実行する。STEPSはGPUの同
//時並行稼働数を制御
//nの数が少ないうちはtotalCondがSTEPSを超えることはないがnの数が増え
//て行くと超えるようになる。
//ここではtotalCond==STEPSの場合だけこの中へ
if(totalCond==l->STEPS){
//matched=trueの時にCOUNT追加 //GPU内でカウントしているので、GPUか
//ら出たらmatched=trueになってる
if(matched){
// デバイスからホストへ転送
cudaMemcpy(hostTotal,deviceTotal,sizeof(long)*l->STEPS/THREAD_NUM,cudaMemcpyDeviceToHost);
cudaMemcpy(hostUnique,deviceUnique,sizeof(long)*l->STEPS/THREAD_NUM,cudaMemcpyDeviceToHost);
// 集計
for(int col=0;col<l->STEPS/THREAD_NUM;col++){
total+=hostTotal[col];
unique+=hostUnique[col];
}
matched=false;
}
// ホストからデバイスへ転送
cudaMemcpy(deviceDown,hostDown,sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(deviceLeft,hostLeft,sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(deviceRight,hostRight,sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(deviceBoard,hostBoard,sizeof(int)*totalCond*mark,cudaMemcpyHostToDevice);
cudaMemcpy(deviceLocal,hostLocal,sizeof(int)*l->STEPS/THREAD_NUM,cudaMemcpyHostToDevice);
// CUDA起動
BitBoard_cuda_kernel_b2<<<l->STEPS/THREAD_NUM,THREAD_NUM >>>(size,size-mark,deviceDown,deviceLeft,deviceRight,deviceTotal,deviceUnique,totalCond,deviceBoard,row,deviceLocal);
//STEPS数の数だけマルチスレッドで起動するのだが、実際に計算が行われ
//るのはtotalCondの数だけでそれ以外は空回しになる
//GPU内でカウントしているので、GPUから出たらmatched=trueになってる
matched=true;
//totalCond==STEPSルートでGPUを実行したらスレッドをまた0から開始す
//る(これによりなんどもSTEPS数分だけGPUを起動できる)
totalCond=0;
}
//hostDown,hostLeft,hostRightに情報を格納したら1行上に上がる
//これを繰り返すことにより row=2で可能な場所全てにクイーンを置いて
//hostDown,hostLeft,hostRightに情報を格納する
row--;
}
}else{
//置く場所がなければ上に上がる。row==mark行に達するまではCPU側で普通に
//nqueenをやる
row--;
}
}
}
//matched=trueの時にCOUNT追加 //GPU内でカウントしているので、GPUから出たら
//matched=trueになってる
if(matched){
// デバイスからホストへ転送
cudaMemcpy(hostTotal,deviceTotal,sizeof(int)*l->STEPS/THREAD_NUM,cudaMemcpyDeviceToHost);
cudaMemcpy(hostUnique,deviceUnique,sizeof(int)*l->STEPS/THREAD_NUM,cudaMemcpyDeviceToHost);
// 集計
for(int col=0;col<l->STEPS/THREAD_NUM;col++){
total+=hostTotal[col];
unique+=hostUnique[col];
}
matched=false;
}
// ホストからデバイスへ転送
cudaMemcpy(deviceDown,hostDown,sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(deviceLeft,hostLeft,sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(deviceRight,hostRight,sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(deviceBoard,hostBoard,sizeof(int)*totalCond*mark,cudaMemcpyHostToDevice);
cudaMemcpy(deviceLocal,hostLocal,sizeof(int)*l->STEPS/THREAD_NUM,cudaMemcpyDeviceToHost);
//size-mark は何行GPUを実行するか totalCondはスレッド数
//STEPS数の数だけマルチスレッドで起動するのだが、実際に計算が行われるのは
//totalCondの数だけでそれ以外は空回しになる
// CUDA起動
BitBoard_cuda_kernel_b2<<<l->STEPS/THREAD_NUM,THREAD_NUM >>>(size,size-mark,deviceDown,deviceLeft,deviceRight,deviceTotal,deviceUnique,totalCond,deviceBoard,mark,deviceLocal);
// デバイスからホストへ転送
cudaMemcpy(hostTotal,deviceTotal,sizeof(int)*l->STEPS/THREAD_NUM,cudaMemcpyDeviceToHost);
cudaMemcpy(hostUnique,deviceUnique,sizeof(int)*l->STEPS/THREAD_NUM,cudaMemcpyDeviceToHost);
// 集計
for(int col=0;col<l->STEPS/THREAD_NUM;col++){
total+=hostTotal[col];
unique+=hostUnique[col];
}
TOTAL+=total;
UNIQUE+=unique;
//
cudaFree(deviceDown);
cudaFree(deviceLeft);
cudaFree(deviceRight);
cudaFree(deviceTotal);
cudaFree(deviceUnique);
cudaFree(deviceBoard);
cudaFree(deviceLocal);
cudaFreeHost(hostDown);
cudaFreeHost(hostLeft);
cudaFreeHost(hostRight);
cudaFreeHost(hostTotal);
cudaFreeHost(hostUnique);
cudaFreeHost(hostBoard);
cudaFreeHost(hostLocal);
}
// GPU -n Qが角にある
void BitBoard_backTrack1G(const unsigned int size,unsigned int row,unsigned int _left,unsigned int _down,unsigned int _right,struct local* l)
{
//何行目からGPUで行くか。ここの設定は変更可能、設定値を多くするほどGPUで並行して動く
/***08 クイーンを2行目まで固定で置くためmarkが3以上必要*********************/
const unsigned int mark=size>12?size-10:3;
const unsigned int h_mark=row;
const unsigned int mask=(1<<size)-1;
unsigned long totalCond=0;
bool matched=false;
//host
unsigned int down[32]; down[row]=_down;
unsigned int right[32]; right[row]=_right;
unsigned int left[32]; left[row]=_left;
//bitmapを配列で持つことにより
//stackを使わないで1行前に戻れる
unsigned int bitmap[32];
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
unsigned int bit;
unsigned int* hostDown;
cudaMallocHost((void**) &hostDown,sizeof(int)*l->STEPS);
unsigned int* hostLeft;
cudaMallocHost((void**) &hostLeft,sizeof(int)*l->STEPS);
unsigned int* hostRight;
cudaMallocHost((void**) &hostRight,sizeof(int)*l->STEPS);
unsigned int* hostTotal;
cudaMallocHost((void**) &hostTotal,sizeof(int)*l->STEPS);
unsigned int* hostUnique;
cudaMallocHost((void**) &hostUnique,sizeof(int)*l->STEPS);
unsigned int* deviceDown;
cudaMalloc((void**) &deviceDown,sizeof(int)*l->STEPS);
unsigned int* deviceLeft;
cudaMalloc((void**) &deviceLeft,sizeof(int)*l->STEPS);
unsigned int* deviceRight;
cudaMalloc((void**) &deviceRight,sizeof(int)*l->STEPS);
unsigned int* deviceTotal;
cudaMalloc((void**) &deviceTotal,sizeof(int)*l->STEPS/THREAD_NUM);
unsigned int* deviceUnique;
cudaMalloc((void**) &deviceUnique,sizeof(int)*l->STEPS/THREAD_NUM);
struct local* hostLocal;
cudaMallocHost((void**) &hostLocal,sizeof(struct local)*l->STEPS/THREAD_NUM);
struct local* deviceLocal;
cudaMallocHost((void**) &deviceLocal,sizeof(struct local)*l->STEPS/THREAD_NUM);
hostLocal[0].BOUND1=l->BOUND1;
hostLocal[0].BOUND2=l->BOUND2;
hostLocal[0].TOPBIT=l->TOPBIT;
hostLocal[0].ENDBIT=l->ENDBIT;
hostLocal[0].SIDEMASK=l->SIDEMASK;
hostLocal[0].LASTMASK=l->LASTMASK;
hostLocal[0].STEPS=l->STEPS;
for(int i=0;i<MAX;i++){
hostLocal[0].board[i]=l->board[i];
}
//12行目までは3行目までCPU->row==mark以下で 3行目までの
//down,left,right情報を hostDown,hostLeft,hostRight
//に格納
//する->3行目以降をGPUマルチスレッドで実行し結果を取得
//13行目以降はCPUで実行する行数が1個ずつ増えて行く
//例えばn15だとrow=5までCPUで実行し、
//それ以降はGPU(現在の設定だとGPUでは最大10行実行する
//ようになっている)
//while(row>=0) {
int rowP=0;
unsigned long total=0;
unsigned long unique=0;
while(row>=h_mark) {
//bitmap[row]=00000000 クイーンを
//どこにも置けないので1行上に戻る
//06GPU こっちのほうが優秀
if(bitmap[row]==0){ row--; }
else{//おける場所があれば進む
if(row<l->BOUND1) { /***11 枝刈り*********************/
bitmap[row]&=~2; // bm|=2; bm^=2; (bm&=~2と同等)
}
bitmap[row]^=bit=(-bitmap[row]&bitmap[row]);
if((bit&mask)!=0){//置く場所があれば先に進む
rowP=row+1;
down[rowP]=down[row]|bit;
left[rowP]=(left[row]|bit)<<1;
right[rowP]=(right[row]|bit)>>1;
bitmap[rowP]=mask&~(down[rowP]|left[rowP]|right[rowP]);
row++;
if(row==mark){
//3行目(mark)にクイーンを1個ずつ置いていって、
//down,left,right情報を格納、
//その次の行へは進まない。その行で可能な場所にクイー
//ン置き終わったらGPU並列実行
//totalCond がthreadIdになる 各スレッドに down,left,right情報を渡す
//row=2(13行目以降は増えていく。例えばn15だとrow=5)の情報を
//hostDown,hostLeft,hostRightに格納する
hostDown[totalCond]=down[row];
hostLeft[totalCond]=left[row];
hostRight[totalCond]=right[row];
//スレッド数をインクリメントする
totalCond++;
//最大GPU数に達してしまったら一旦ここでGPUを実行する。STEPSはGPUの同
//時並行稼働数を制御
//nの数が少ないうちはtotalCondがSTEPSを超えることはないがnの数が増え
//て行くと超えるようになる。
//ここではtotalCond==STEPSの場合だけこの中へ
if(totalCond==l->STEPS){
//matched=trueの時にCOUNT追加 //GPU内でカウントしているので、GPUか
//ら出たらmatched=trueになってる
if(matched){
// デバイスからホストへ転送
cudaMemcpy(hostTotal,deviceTotal,sizeof(int)*l->STEPS/THREAD_NUM,cudaMemcpyDeviceToHost);
cudaMemcpy(hostUnique,deviceUnique,sizeof(int)*l->STEPS/THREAD_NUM,cudaMemcpyDeviceToHost);
// 集計
for(int col=0;col<l->STEPS/THREAD_NUM;col++){
total+=hostTotal[col];
unique+=hostUnique[col];
}
matched=false;
}
// ホストからデバイスへ転送
cudaMemcpy(deviceDown,hostDown,sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(deviceLeft,hostLeft,sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(deviceRight,hostRight,sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(deviceLocal,hostLocal,sizeof(int)*l->STEPS/THREAD_NUM,cudaMemcpyDeviceToHost);
// CUDA起動
BitBoard_cuda_kernel_b1<<<l->STEPS/THREAD_NUM,THREAD_NUM >>>(size,size-mark,deviceDown,deviceLeft,deviceRight,deviceTotal,deviceUnique,totalCond,row,deviceLocal);
//STEPS数の数だけマルチスレッドで起動するのだが、実際に計算が行われ
//るのはtotalCondの数だけでそれ以外は空回しになる
//GPU内でカウントしているので、GPUから出たらmatched=trueになってる
matched=true;
//totalCond==STEPSルートでGPUを実行したらスレッドをまた0から開始す
//る(これによりなんどもSTEPS数分だけGPUを起動できる)
totalCond=0;
}
//hostDown,hostLeft,hostRightに情報を格納したら1行上に上がる
//これを繰り返すことにより row=2で可能な場所全てにクイーンを置いて
//hostDown,hostLeft,hostRightに情報を格納する
row--;
}
}else{
//置く場所がなければ上に上がる。row==mark行に達するまではCPU側で普通に
//nqueenをやる
row--;
}
}
}
//matched=trueの時にCOUNT追加 //GPU内でカウントしているので、GPUから出たら
//matched=trueになってる
if(matched){
// デバイスからホストへ転送
cudaMemcpy(hostTotal,deviceTotal,sizeof(int)*l->STEPS/THREAD_NUM,cudaMemcpyDeviceToHost);
cudaMemcpy(hostUnique,deviceUnique,sizeof(int)*l->STEPS/THREAD_NUM,cudaMemcpyDeviceToHost);
// 集計
for(int col=0;col<l->STEPS/THREAD_NUM;col++){
total+=hostTotal[col];
unique+=hostUnique[col];
}
matched=false;
}
// ホストからデバイスへ転送
cudaMemcpy(deviceDown,hostDown,sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(deviceLeft,hostLeft,sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(deviceRight,hostRight,sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(deviceLocal,hostLocal,sizeof(int)*l->STEPS/THREAD_NUM,cudaMemcpyDeviceToHost);
// CUDA起動
BitBoard_cuda_kernel_b1<<<l->STEPS/THREAD_NUM,THREAD_NUM >>>(size,size-mark,deviceDown,deviceLeft,deviceRight,deviceTotal,deviceUnique,totalCond,mark,deviceLocal);
// デバイスからホストへ転送
cudaMemcpy(hostTotal,deviceTotal,sizeof(int)*l->STEPS/THREAD_NUM,cudaMemcpyDeviceToHost);
cudaMemcpy(hostUnique,deviceUnique,sizeof(int)*l->STEPS/THREAD_NUM,cudaMemcpyDeviceToHost);
// 集計
for(int col=0;col<l->STEPS/THREAD_NUM;col++){
total+=hostTotal[col];
unique+=hostUnique[col];
}
TOTAL+=total;
UNIQUE+=unique;
//開放
cudaFree(deviceDown);
cudaFree(deviceLeft);
cudaFree(deviceRight);
cudaFree(deviceTotal);
cudaFree(deviceUnique);
cudaFree(deviceLocal);
cudaFreeHost(hostDown);
cudaFreeHost(hostLeft);
cudaFreeHost(hostRight);
cudaFreeHost(hostTotal);
cudaFreeHost(hostUnique);
cudaFreeHost(hostLocal);
}
// GPU -n ビットボードの実行 角にQがある・ないの分岐を行う
void BitBoard_build(const unsigned int size,int STEPS)
{
if(size<=0||size>32){return;}
/**
int型は unsigned とする
total: グローバル変数TOTALへのアクセスを極小化する
*/
struct local l; //GPU で扱う構造体
l.STEPS=STEPS;
unsigned int bit=1;
l.board[0]=1;
unsigned int left=bit<<1,down=bit,right=bit>>1;
/**
2行目は右から3列目から左端から2列目まで
*/
for(l.BOUND1=2;l.BOUND1<size-1;l.BOUND1++){
l.board[1]=bit=(1<<l.BOUND1);
BitBoard_backTrack1G(size,2,(left|bit)<<1,(down|bit),(right|bit)>>1,&l);
}
l.TOPBIT=1<<(size-1);
l.SIDEMASK=l.LASTMASK=(l.TOPBIT|1);
l.ENDBIT=(l.TOPBIT>>1);
/**
1行目右から2列目から
偶数個は1/2 n=8 なら 1,2,3 奇数個は1/2+1 n=9 なら 1,2,3,4
*/
for(l.BOUND1=1,l.BOUND2=size-1-1;l.BOUND1<l.BOUND2;l.BOUND1++,l.BOUND2--){
l.board[0]=bit=(1<<l.BOUND1);
BitBoard_backTrack2G(size,1,bit<<1,bit,bit>>1,&l);
l.LASTMASK|=l.LASTMASK>>1|l.LASTMASK<<1;
l.ENDBIT>>=1;
}
}
// CUDA 初期化
bool InitCUDA()
{
int count;
cudaGetDeviceCount(&count);
if(count==0){fprintf(stderr,"There is no device.\n");return false;}
unsigned int i;
for(i=0;i<count;++i){
struct cudaDeviceProp prop;
if(cudaGetDeviceProperties(&prop,i)==cudaSuccess){if(prop.major>=1){break;} }
}
if(i==count){fprintf(stderr,"There is no device supporting CUDA 1.x.\n");return false;}
cudaSetDevice(i);
return true;
}
//メイン
int main(int argc,char** argv)
{
bool cpu=false,cpur=false,gpu=false,gpuBitBoard=false;
unsigned int argstart=2;
if(argc>=2&&argv[1][0]=='-'){
if(argv[1][1]=='c'||argv[1][1]=='C'){cpu=true;}
else if(argv[1][1]=='r'||argv[1][1]=='R'){cpur=true;}
else if(argv[1][1]=='c'||argv[1][1]=='C'){cpu=true;}
else if(argv[1][1]=='g'||argv[1][1]=='G'){gpu=true;}
else if(argv[1][1]=='n'||argv[1][1]=='N'){gpuBitBoard=true;}
else{ gpuBitBoard=true; } //デフォルトをgpuとする
argstart=2;
}
if(argc<argstart){
printf("Usage: %s [-c|-g|-r|-s] n STEPS\n",argv[0]);
printf(" -r: CPU 再帰\n");
printf(" -c: CPU 非再帰\n");
printf(" -g: GPU 再帰\n");
printf(" -n: GPU ビットボード\n");
}
if(cpur){ printf("\n\n対称解除法 再帰 \n"); }
else if(cpu){ printf("\n\n対称解除法 非再帰 \n"); }
else if(gpu){ printf("\n\n対称解除法 GPU\n"); }
else if(gpuBitBoard){ printf("\n\n対称解除法 GPUビットボード \n"); }
if(cpu||cpur)
{
unsigned int min=4;
unsigned int targetN=17;
struct timeval t0;
struct timeval t1;
printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms");
for(unsigned int size=min;size<=targetN;size++){
local l;
gettimeofday(&t0,NULL);//計測開始
if(cpur){ //再帰
symmetry_R(size,&l);
}
if(cpu){ //非再帰
symmetry_NR(size,&l);
}
//
gettimeofday(&t1,NULL);//計測終了
unsigned int ss;
unsigned int ms;
unsigned int dd;
if(t1.tv_usec<t0.tv_usec) {
dd=(t1.tv_sec-t0.tv_sec-1)/86400;
ss=(t1.tv_sec-t0.tv_sec-1)%86400;
ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000;
}else {
dd=(t1.tv_sec-t0.tv_sec)/86400;
ss=(t1.tv_sec-t0.tv_sec)%86400;
ms=(t1.tv_usec-t0.tv_usec+500)/10000;
}//end if
unsigned int hh=ss/3600;
unsigned int mm=(ss-hh*3600)/60;
ss%=60;
printf("%2d:%13ld%12ld%8.2d:%02d:%02d:%02d.%02d\n",size,TOTAL,UNIQUE,dd,hh,mm,ss,ms);
} //end for
}//end if
if(gpu||gpuBitBoard)
{
int STEPS=24576;
if(!InitCUDA()){return 0;}
unsigned int min=4;
unsigned int targetN=21;
struct timeval t0;
struct timeval t1;
printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms");
for(unsigned int size=min;size<=targetN;size++){
gettimeofday(&t0,NULL);
if(gpu){
TOTAL=UNIQUE=0;
local l[MAX];
GPU_symmetry_R(size,&l[0]);
TOTAL=l->TOTAL;
UNIQUE=l->UNIQUE;
}else if(gpuBitBoard){
TOTAL=UNIQUE=0;
BitBoard_build(size,STEPS);
}
gettimeofday(&t1,NULL);
unsigned int ss;
unsigned int ms;
unsigned int dd;
if (t1.tv_usec<t0.tv_usec) {
dd=(int)(t1.tv_sec-t0.tv_sec-1)/86400;
ss=(t1.tv_sec-t0.tv_sec-1)%86400;
ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000;
} else {
dd=(int)(t1.tv_sec-t0.tv_sec)/86400;
ss=(t1.tv_sec-t0.tv_sec)%86400;
ms=(t1.tv_usec-t0.tv_usec+500)/10000;
}//end if
unsigned int hh=ss/3600;
unsigned int mm=(ss-hh*3600)/60;
ss%=60;
printf("%2d:%13ld%12ld%8.2d:%02d:%02d:%02d.%02d\n",size,TOTAL,UNIQUE,dd,hh,mm,ss,ms);
}
}
return 0;
}
|
1b3040e8e545b5379a68752cb000388f1b2e7f27.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @file rasterize.cu
* @brief CUDA-accelerated rasterization pipeline.
* @authors Skeleton code: Yining Karl Li, Kai Ninomiya, Shuai Shao (Shrek)
* @date 2012-2016
* @copyright University of Pennsylvania & STUDENT
*/
#include <cmath>
#include <cstdio>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <thrust/random.h>
#include <util/checkCUDAError.h>
#include <util/tiny_gltf_loader.h>
#include "rasterizeTools.h"
#include "rasterize.h"
#include <glm/gtc/quaternion.hpp>
#include <glm/gtc/matrix_transform.hpp>
namespace {
typedef unsigned short VertexIndex;
typedef glm::vec3 VertexAttributePosition;
typedef glm::vec3 VertexAttributeNormal;
typedef glm::vec2 VertexAttributeTexcoord;
typedef unsigned char TextureData;
typedef unsigned char BufferByte;
enum PrimitiveType{
Point = 1,
Line = 2,
Triangle = 3
};
struct VertexOut {
glm::vec4 pos;
// TODO: add new attributes to your VertexOut
// The attributes listed below might be useful,
// but always feel free to modify on your own
glm::vec3 eyePos; // eye space position used for shading
glm::vec3 eyeNor; // eye space normal used for shading, cuz normal will go wrong after perspective transformation
// glm::vec3 col;
glm::vec2 texcoord0;
TextureData* dev_diffuseTex = NULL;
int texWidth, texHeight;
// ...
};
struct Primitive {
PrimitiveType primitiveType = Triangle; // C++ 11 init
VertexOut v[3];
};
struct Fragment {
glm::vec3 color;
// TODO: add new attributes to your Fragment
// The attributes listed below might be useful,
// but always feel free to modify on your own
glm::vec3 eyePos; // eye space position used for shading
glm::vec3 eyeNor;
VertexAttributeTexcoord texcoord0;
TextureData* dev_diffuseTex;
int texWidth, texHeight;
// ...
};
struct PrimitiveDevBufPointers {
int primitiveMode; //from tinygltfloader macro
PrimitiveType primitiveType;
int numPrimitives;
int numIndices;
int numVertices;
// Vertex In, const after loaded
VertexIndex* dev_indices;
VertexAttributePosition* dev_position;
VertexAttributeNormal* dev_normal;
VertexAttributeTexcoord* dev_texcoord0;
// Materials, add more attributes when needed
TextureData* dev_diffuseTex;
int diffuseTexWidth;
int diffuseTexHeight;
// TextureData* dev_specularTex;
// TextureData* dev_normalTex;
// ...
// Vertex Out, vertex used for rasterization, this is changing every frame
VertexOut* dev_verticesOut;
// TODO: add more attributes when needed
};
}
static std::map<std::string, std::vector<PrimitiveDevBufPointers>> mesh2PrimitivesMap;
static int width = 0;
static int height = 0;
static int totalNumPrimitives = 0;
static Primitive *dev_primitives = NULL;
static Fragment *dev_fragmentBuffer = NULL;
static glm::vec3 *dev_framebuffer = NULL;
static glm::vec3 *dev_framebuffer_2 = NULL;
static float * dev_depth = NULL; // you might need this buffer when doing depth test
static int *dev_mutex = NULL;
/**
* Kernel that writes the image to the OpenGL PBO directly.
*/
__global__
void sendImageToPBO(uchar4 *pbo, int w, int h, glm::vec3 *image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
if (x < w && y < h) {
glm::vec3 color;
color.x = glm::clamp(image[index].x, 0.0f, 1.0f) * 255.0;
color.y = glm::clamp(image[index].y, 0.0f, 1.0f) * 255.0;
color.z = glm::clamp(image[index].z, 0.0f, 1.0f) * 255.0;
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
__forceinline__
__device__
glm::vec3 fetchColor(glm::vec3 *textureData, int x, int y, int w, int h) {
int pix = y * w + x;
return textureData[pix];
}
__device__
glm::vec3 colorAt(TextureData* texture, int textureWidth, float u, float v) {
int flatIndex = u + v * textureWidth;
float r = (float) texture[flatIndex * 3] / 255.0f;
float g = (float) texture[flatIndex * 3 + 1] / 255.0f;
float b = (float) texture[flatIndex * 3 + 2] / 255.0f;
return glm::vec3(r, g, b);
}
/**
* Writes fragment colors to the framebuffer
*/
__global__
void render(int w, int h, Fragment *fragmentBuffer, glm::vec3 *framebuffer) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
if (x < w && y < h) {
glm::vec3 lightPos = glm::vec3(1, 1, 1);
Fragment frag = fragmentBuffer[index];
int u = frag.texcoord0.x * frag.texWidth;
int v = frag.texcoord0.y * frag.texHeight;
glm::vec3 col;
if (frag.dev_diffuseTex != NULL) {
col = colorAt(frag.dev_diffuseTex, frag.texWidth, u, v);
} else {
col = frag.color;
}
glm::vec3 lightDir = (lightPos - frag.eyePos);
framebuffer[index] = col * glm::max(0.f, glm::dot(frag.eyeNor, lightDir));
// TODO: add your fragment shader code here
}
}
/**
* Called once at the beginning of the program to allocate memory.
*/
void rasterizeInit(int w, int h) {
hipError_t stat = hipDeviceSetLimit(hipLimitStackSize, 8192);
checkCUDAError("set stack limit");
width = w;
height = h;
hipFree(dev_fragmentBuffer);
hipMalloc(&dev_fragmentBuffer, width * height * sizeof(Fragment));
hipMemset(dev_fragmentBuffer, 0, width * height * sizeof(Fragment));
hipFree(dev_framebuffer);
hipMalloc(&dev_framebuffer, width * height * sizeof(glm::vec3));
hipMemset(dev_framebuffer, 0, width * height * sizeof(glm::vec3));
hipFree(dev_framebuffer_2);
hipMalloc(&dev_framebuffer_2, width * height * sizeof(glm::vec3));
hipMemset(dev_framebuffer_2, 0, width * height * sizeof(glm::vec3));
hipFree(dev_depth);
hipMalloc(&dev_depth, width * height * sizeof(float));
hipFree(dev_mutex);
hipMalloc(&dev_mutex, width * height * sizeof(int));
checkCUDAError("rasterizeInit");
}
__global__
void initDepth(int w, int h, float * depth)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < w && y < h)
{
int index = x + (y * w);
depth[index] = FLT_MAX;
}
}
/**
* kern function with support for stride to sometimes replace hipMemcpy
* One thread is responsible for copying one component
*/
__global__
void _deviceBufferCopy(int N, BufferByte* dev_dst, const BufferByte* dev_src, int n, int byteStride, int byteOffset, int componentTypeByteSize) {
// Attribute (vec3 position)
// component (3 * float)
// byte (4 * byte)
// id of component
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < N) {
int count = i / n;
int offset = i - count * n; // which component of the attribute
for (int j = 0; j < componentTypeByteSize; j++) {
dev_dst[count * componentTypeByteSize * n
+ offset * componentTypeByteSize
+ j]
=
dev_src[byteOffset
+ count * (byteStride == 0 ? componentTypeByteSize * n : byteStride)
+ offset * componentTypeByteSize
+ j];
}
}
}
__global__
void _nodeMatrixTransform(
int numVertices,
VertexAttributePosition* position,
VertexAttributeNormal* normal,
glm::mat4 MV, glm::mat3 MV_normal) {
// vertex id
int vid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (vid < numVertices) {
position[vid] = glm::vec3(MV * glm::vec4(position[vid], 1.0f));
normal[vid] = glm::normalize(MV_normal * normal[vid]);
}
}
glm::mat4 getMatrixFromNodeMatrixVector(const tinygltf::Node & n) {
glm::mat4 curMatrix(1.0);
const std::vector<double> &m = n.matrix;
if (m.size() > 0) {
// matrix, copy it
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
curMatrix[i][j] = (float)m.at(4 * i + j);
}
}
} else {
// no matrix, use rotation, scale, translation
if (n.translation.size() > 0) {
curMatrix[3][0] = n.translation[0];
curMatrix[3][1] = n.translation[1];
curMatrix[3][2] = n.translation[2];
}
if (n.rotation.size() > 0) {
glm::mat4 R;
glm::quat q;
q[0] = n.rotation[0];
q[1] = n.rotation[1];
q[2] = n.rotation[2];
R = glm::mat4_cast(q);
curMatrix = curMatrix * R;
}
if (n.scale.size() > 0) {
curMatrix = curMatrix * glm::scale(glm::vec3(n.scale[0], n.scale[1], n.scale[2]));
}
}
return curMatrix;
}
void traverseNode (
std::map<std::string, glm::mat4> & n2m,
const tinygltf::Scene & scene,
const std::string & nodeString,
const glm::mat4 & parentMatrix
)
{
const tinygltf::Node & n = scene.nodes.at(nodeString);
glm::mat4 M = parentMatrix * getMatrixFromNodeMatrixVector(n);
n2m.insert(std::pair<std::string, glm::mat4>(nodeString, M));
auto it = n.children.begin();
auto itEnd = n.children.end();
for (; it != itEnd; ++it) {
traverseNode(n2m, scene, *it, M);
}
}
void rasterizeSetBuffers(const tinygltf::Scene & scene) {
totalNumPrimitives = 0;
std::map<std::string, BufferByte*> bufferViewDevPointers;
// 1. copy all `bufferViews` to device memory
{
std::map<std::string, tinygltf::BufferView>::const_iterator it(
scene.bufferViews.begin());
std::map<std::string, tinygltf::BufferView>::const_iterator itEnd(
scene.bufferViews.end());
for (; it != itEnd; it++) {
const std::string key = it->first;
const tinygltf::BufferView &bufferView = it->second;
if (bufferView.target == 0) {
continue; // Unsupported bufferView.
}
const tinygltf::Buffer &buffer = scene.buffers.at(bufferView.buffer);
BufferByte* dev_bufferView;
hipMalloc(&dev_bufferView, bufferView.byteLength);
hipMemcpy(dev_bufferView, &buffer.data.front() + bufferView.byteOffset, bufferView.byteLength, hipMemcpyHostToDevice);
checkCUDAError("Set BufferView Device Mem");
bufferViewDevPointers.insert(std::make_pair(key, dev_bufferView));
}
}
// 2. for each mesh:
// for each primitive:
// build device buffer of indices, materail, and each attributes
// and store these pointers in a map
{
std::map<std::string, glm::mat4> nodeString2Matrix;
auto rootNodeNamesList = scene.scenes.at(scene.defaultScene);
{
auto it = rootNodeNamesList.begin();
auto itEnd = rootNodeNamesList.end();
for (; it != itEnd; ++it) {
traverseNode(nodeString2Matrix, scene, *it, glm::mat4(1.0f));
}
}
// parse through node to access mesh
auto itNode = nodeString2Matrix.begin();
auto itEndNode = nodeString2Matrix.end();
for (; itNode != itEndNode; ++itNode) {
const tinygltf::Node & N = scene.nodes.at(itNode->first);
const glm::mat4 & matrix = itNode->second;
const glm::mat3 & matrixNormal = glm::transpose(glm::inverse(glm::mat3(matrix)));
auto itMeshName = N.meshes.begin();
auto itEndMeshName = N.meshes.end();
for (; itMeshName != itEndMeshName; ++itMeshName) {
const tinygltf::Mesh & mesh = scene.meshes.at(*itMeshName);
auto res = mesh2PrimitivesMap.insert(std::pair<std::string, std::vector<PrimitiveDevBufPointers>>(mesh.name, std::vector<PrimitiveDevBufPointers>()));
std::vector<PrimitiveDevBufPointers> & primitiveVector = (res.first)->second;
// for each primitive
for (size_t i = 0; i < mesh.primitives.size(); i++) {
const tinygltf::Primitive &primitive = mesh.primitives[i];
if (primitive.indices.empty())
return;
// TODO: add new attributes for your PrimitiveDevBufPointers when you add new attributes
VertexIndex* dev_indices = NULL;
VertexAttributePosition* dev_position = NULL;
VertexAttributeNormal* dev_normal = NULL;
VertexAttributeTexcoord* dev_texcoord0 = NULL;
// ----------Indices-------------
const tinygltf::Accessor &indexAccessor = scene.accessors.at(primitive.indices);
const tinygltf::BufferView &bufferView = scene.bufferViews.at(indexAccessor.bufferView);
BufferByte* dev_bufferView = bufferViewDevPointers.at(indexAccessor.bufferView);
// assume type is SCALAR for indices
int n = 1;
int numIndices = indexAccessor.count;
int componentTypeByteSize = sizeof(VertexIndex);
int byteLength = numIndices * n * componentTypeByteSize;
dim3 numThreadsPerBlock(128);
dim3 numBlocks((numIndices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
hipMalloc(&dev_indices, byteLength);
_deviceBufferCopy << <numBlocks, numThreadsPerBlock >> > (
numIndices,
(BufferByte*)dev_indices,
dev_bufferView,
n,
indexAccessor.byteStride,
indexAccessor.byteOffset,
componentTypeByteSize);
checkCUDAError("Set Index Buffer");
// ---------Primitive Info-------
// Warning: LINE_STRIP is not supported in tinygltfloader
int numPrimitives;
PrimitiveType primitiveType;
switch (primitive.mode) {
case TINYGLTF_MODE_TRIANGLES:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices / 3;
break;
case TINYGLTF_MODE_TRIANGLE_STRIP:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices - 2;
break;
case TINYGLTF_MODE_TRIANGLE_FAN:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices - 2;
break;
case TINYGLTF_MODE_LINE:
primitiveType = PrimitiveType::Line;
numPrimitives = numIndices / 2;
break;
case TINYGLTF_MODE_LINE_LOOP:
primitiveType = PrimitiveType::Line;
numPrimitives = numIndices + 1;
break;
case TINYGLTF_MODE_POINTS:
primitiveType = PrimitiveType::Point;
numPrimitives = numIndices;
break;
default:
// output error
break;
};
// ----------Attributes-------------
auto it(primitive.attributes.begin());
auto itEnd(primitive.attributes.end());
int numVertices = 0;
// for each attribute
for (; it != itEnd; it++) {
const tinygltf::Accessor &accessor = scene.accessors.at(it->second);
const tinygltf::BufferView &bufferView = scene.bufferViews.at(accessor.bufferView);
int n = 1;
if (accessor.type == TINYGLTF_TYPE_SCALAR) {
n = 1;
}
else if (accessor.type == TINYGLTF_TYPE_VEC2) {
n = 2;
}
else if (accessor.type == TINYGLTF_TYPE_VEC3) {
n = 3;
}
else if (accessor.type == TINYGLTF_TYPE_VEC4) {
n = 4;
}
BufferByte * dev_bufferView = bufferViewDevPointers.at(accessor.bufferView);
BufferByte ** dev_attribute = NULL;
numVertices = accessor.count;
int componentTypeByteSize;
// Note: since the type of our attribute array (dev_position) is static (float32)
// We assume the glTF model attribute type are 5126(FLOAT) here
if (it->first.compare("POSITION") == 0) {
componentTypeByteSize = sizeof(VertexAttributePosition) / n;
dev_attribute = (BufferByte**)&dev_position;
}
else if (it->first.compare("NORMAL") == 0) {
componentTypeByteSize = sizeof(VertexAttributeNormal) / n;
dev_attribute = (BufferByte**)&dev_normal;
}
else if (it->first.compare("TEXCOORD_0") == 0) {
componentTypeByteSize = sizeof(VertexAttributeTexcoord) / n;
dev_attribute = (BufferByte**)&dev_texcoord0;
}
std::cout << accessor.bufferView << " - " << it->second << " - " << it->first << '\n';
dim3 numThreadsPerBlock(128);
dim3 numBlocks((n * numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
int byteLength = numVertices * n * componentTypeByteSize;
hipMalloc(dev_attribute, byteLength);
_deviceBufferCopy << <numBlocks, numThreadsPerBlock >> > (
n * numVertices,
*dev_attribute,
dev_bufferView,
n,
accessor.byteStride,
accessor.byteOffset,
componentTypeByteSize);
std::string msg = "Set Attribute Buffer: " + it->first;
checkCUDAError(msg.c_str());
}
// malloc for VertexOut
VertexOut* dev_vertexOut;
hipMalloc(&dev_vertexOut, numVertices * sizeof(VertexOut));
checkCUDAError("Malloc VertexOut Buffer");
// ----------Materials-------------
// You can only worry about this part once you started to
// implement textures for your rasterizer
TextureData* dev_diffuseTex = NULL;
int diffuseTexWidth = 0;
int diffuseTexHeight = 0;
if (!primitive.material.empty()) {
const tinygltf::Material &mat = scene.materials.at(primitive.material);
printf("material.name = %s\n", mat.name.c_str());
if (mat.values.find("diffuse") != mat.values.end()) {
std::string diffuseTexName = mat.values.at("diffuse").string_value;
if (scene.textures.find(diffuseTexName) != scene.textures.end()) {
const tinygltf::Texture &tex = scene.textures.at(diffuseTexName);
if (scene.images.find(tex.source) != scene.images.end()) {
const tinygltf::Image &image = scene.images.at(tex.source);
size_t s = image.image.size() * sizeof(TextureData);
hipMalloc(&dev_diffuseTex, s);
hipMemcpy(dev_diffuseTex, &image.image.at(0), s, hipMemcpyHostToDevice);
diffuseTexWidth = image.width;
diffuseTexHeight = image.height;
checkCUDAError("Set Texture Image data");
}
}
}
// TODO: write your code for other materails
// You may have to take a look at tinygltfloader
// You can also use the above code loading diffuse material as a start point
}
// ---------Node hierarchy transform--------
hipDeviceSynchronize();
dim3 numBlocksNodeTransform((numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
_nodeMatrixTransform << <numBlocksNodeTransform, numThreadsPerBlock >> > (
numVertices,
dev_position,
dev_normal,
matrix,
matrixNormal);
checkCUDAError("Node hierarchy transformation");
// at the end of the for loop of primitive
// push dev pointers to map
primitiveVector.push_back(PrimitiveDevBufPointers{
primitive.mode,
primitiveType,
numPrimitives,
numIndices,
numVertices,
dev_indices,
dev_position,
dev_normal,
dev_texcoord0,
dev_diffuseTex,
diffuseTexWidth,
diffuseTexHeight,
dev_vertexOut //VertexOut
});
totalNumPrimitives += numPrimitives;
} // for each primitive
} // for each mesh
} // for each node
}
// 3. Malloc for dev_primitives
{
hipMalloc(&dev_primitives, totalNumPrimitives * sizeof(Primitive));
}
// Finally, hipFree raw dev_bufferViews
{
std::map<std::string, BufferByte*>::const_iterator it(bufferViewDevPointers.begin());
std::map<std::string, BufferByte*>::const_iterator itEnd(bufferViewDevPointers.end());
//bufferViewDevPointers
for (; it != itEnd; it++) {
hipFree(it->second);
}
checkCUDAError("Free BufferView Device Mem");
}
}
__global__
void _vertexTransformAndAssembly(
int numVertices,
PrimitiveDevBufPointers primitive,
glm::mat4 MVP, glm::mat4 MV, glm::mat3 MV_normal,
int width, int height) {
// vertex id
int vid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (vid < numVertices) {
// TODO: Apply vertex transformation here
// Multiply the MVP matrix for each vertex position, this will transform everything into clipping space
// Then divide the pos by its w element to transform into NDC space
// Finally transform x and y to viewport space
glm::vec4 mPos(primitive.dev_position[vid], 1.f);
// clip space
glm::vec4 camPos = MVP * mPos;
// NDC
glm::vec4 ndcPos = camPos / camPos.w;
// viewport
float x = (ndcPos.x + 1.f) * ((float) width) * 0.5f;
float y = (1.f - ndcPos.y) * ((float) height) * 0.5f;
float z = -ndcPos.z;
// TODO: Apply vertex assembly here
// Assemble all attribute arraies into the primitive array
primitive.dev_verticesOut[vid].pos = { x, y, z, 1.f };
primitive.dev_verticesOut[vid].eyePos = glm::vec3(MV * mPos);
primitive.dev_verticesOut[vid].eyeNor = glm::normalize(MV_normal * primitive.dev_normal[vid]);
primitive.dev_verticesOut[vid].texcoord0 = primitive.dev_texcoord0[vid];
primitive.dev_verticesOut[vid].dev_diffuseTex = primitive.dev_diffuseTex;
primitive.dev_verticesOut[vid].texHeight = primitive.diffuseTexHeight;
primitive.dev_verticesOut[vid].texWidth = primitive.diffuseTexWidth;
}
}
static int curPrimitiveBeginId = 0;
__global__
void _primitiveAssembly(int numIndices, int curPrimitiveBeginId, Primitive* dev_primitives, PrimitiveDevBufPointers primitive) {
// index id
int iid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (iid < numIndices) {
// TODO: uncomment the following code for a start
// This is primitive assembly for triangles
int pid; // id for cur primitives vector
if (primitive.primitiveMode == TINYGLTF_MODE_TRIANGLES) {
pid = iid / (int)primitive.primitiveType;
dev_primitives[pid + curPrimitiveBeginId].v[iid % (int)primitive.primitiveType]
= primitive.dev_verticesOut[primitive.dev_indices[iid]];
}
// TODO: other primitive types (point, line)
}
}
__global__
void _rasterize(int numPrimitives, Primitive *dev_primitives, Fragment *dev_fragmentBuffer, int width, int height, float *dev_depthBuffer, int *mutexes) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx < numPrimitives) {
Primitive prim = dev_primitives[idx];
VertexOut vs[3];
vs[0] = prim.v[0];
vs[1] = prim.v[1];
vs[2] = prim.v[2];
glm::vec3 pos[3];
pos[0] = glm::vec3(vs[0].pos);
pos[1] = glm::vec3(vs[1].pos);
pos[2] = glm::vec3(vs[2].pos);
// Get bounds of this primitive
glm::vec2 min, max;
AABB bounds = getAABBForTriangle(pos);
min.x = glm::clamp(bounds.min.x, 0.f, (float) (width - 1));
min.y = glm::clamp(bounds.min.y, 0.f, (float) (height - 1));
max.x = glm::clamp(bounds.max.x, 0.f, (float) (width - 1));
max.y = glm::clamp(bounds.max.y, 0.f, (float) (height - 1));
// Generate fragments for each pixel this primitive overlaps
for (int x = min.x; x <= max.x; ++x) {
for (int y = min.y; y <= max.y; ++y) {
glm::vec3 bary = calculateBarycentricCoordinate(pos, { x, y });
if (isBarycentricCoordInBounds(bary)) {
int pixIdx = x + width * y;
float depth = getZAtCoordinate(bary, pos);
glm::vec3 nor = bary.x * vs[0].eyeNor +
bary.y * vs[1].eyeNor +
bary.z * vs[2].eyeNor;
Fragment frag;
frag.color = glm::vec3(.95f, .95, .15);
frag.eyeNor = nor;
glm::vec2 cord = bary.x * vs[0].texcoord0 / vs[0].eyePos.z +
bary.y * vs[1].texcoord0 / vs[1].eyePos.z +
bary.z * vs[2].texcoord0 / vs[2].eyePos.z;
float z = bary.x * (1.f / vs[0].eyePos.z) +
bary.y * (1.f / vs[1].eyePos.z) +
bary.z * (1.f / vs[2].eyePos.z);
frag.texcoord0 = cord / z;
frag.texHeight = vs[0].texHeight;
frag.texWidth = vs[0].texWidth;
frag.dev_diffuseTex = vs[0].dev_diffuseTex;
int *mutex = &mutexes[pixIdx];
bool isSet;
do {
isSet = (atomicCAS(mutex, 0, 1) == 0);
if (isSet) {
if (depth < dev_depthBuffer[pixIdx]) {
dev_depthBuffer[pixIdx] = depth;
dev_fragmentBuffer[pixIdx] = frag;
}
}
if (isSet) {
mutexes[pixIdx] = 0;
}
} while (!isSet);
}
}
}
}
}
__forceinline__
__device__
float rgb2luma(glm::vec3 rgb) {
return glm::dot(rgb, glm::vec3(0.299, 0.587, 0.114));
}
__forceinline__
__device__
int flatIdx(int w, int h, glm::vec2 pos) {
pos.x = w - pos.x;
pos = glm::clamp(pos, glm::vec2(0, 0), glm::vec2(w - 1, h - 1));
return pos.x + (pos.y * w);
}
// bilinear filtering
__forceinline__
__device__
float getAlpha(float y, float py, float qy) {
return (y - py) / (qy - py);
}
__forceinline__
__device__
glm::vec3 slerp(float alpha, glm::vec3 az, glm::vec3 bz) {
return glm::vec3((1 - alpha) * az.r + alpha * bz.r,
(1 - alpha) * az.g + alpha * bz.g,
(1 - alpha) * az.b + alpha * bz.b);
}
__forceinline__
__device__
float fract(float t) {
return t - glm::floor(t);
}
__forceinline__
__device__
glm::vec3 textureFetch(glm::vec3 *t, glm::vec2 pix, int w, int h) {
pix.x = w - pix.x;
pix = glm::clamp(pix, glm::vec2(0.f, 0.f), glm::vec2(w - 1, h - 1));
glm::vec3 f = slerp(getAlpha(pix.x, glm::ceil(pix.x), glm::floor(pix.y)),
fetchColor(t, glm::ceil(pix.x), glm::ceil(pix.y), w, h),
fetchColor(t, glm::floor(pix.x), glm::ceil(pix.y), w, h));
glm::vec3 s = slerp(getAlpha(pix.x, glm::ceil(pix.x), glm::floor(pix.y)),
fetchColor(t, glm::ceil(pix.x), glm::floor(pix.y), w, h),
fetchColor(t, glm::floor(pix.x), glm::floor(pix.y), w, h));
return slerp(getAlpha(pix.y, glm::ceil(pix.y), glm::floor(pix.y)), f, s);
}
__forceinline__
__device__ int pow2(int e) {
int r = 1;
for (int i = 0; i < e; ++i) {
r *= 2;
}
return r;
}
__forceinline__
__device__
float fxaaQualityStep(int i) {
return i < 5 ? 2.f : pow2(i - 3);
}
#define EDGE_THRESHOLD_MIN 0.0312
#define EDGE_THRESHOLD_MAX 0.125
#define FXAA_ITERATIONS 12
#define SUBPIXEL_QUALITY 0.75
#define FXAA_REDUCE_MIN 1.0 / 128.0
#define FXAA_REDUCE_MUL 1.0 / 8.0
#define FXAA_SPAN_MAX 8.0
__global__
void _fxaa_post(int w, int h, glm::vec3 *i_framebuffer, glm::vec3 *o_framebuffer) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int idx = (w - x) + (y * w);
if (x < w && y < h) {
glm::vec3 rgbN = i_framebuffer[flatIdx(w, h, { x, y - 1 })];
glm::vec3 rgbW = i_framebuffer[flatIdx(w, h, { x - 1, y })];
glm::vec3 rgbE = i_framebuffer[flatIdx(w, h, { x + 1, y })];
glm::vec3 rgbS = i_framebuffer[flatIdx(w, h, { x, y + 1 })];
glm::vec3 rgbM = i_framebuffer[flatIdx(w, h, { x, y })];
float lumaN = rgb2luma(rgbN);
float lumaW = rgb2luma(rgbW);
float lumaE = rgb2luma(rgbE);
float lumaS = rgb2luma(rgbS);
float lumaM = rgb2luma(rgbM);
float rangeMin = glm::min(lumaM, glm::min(glm::min(lumaN, lumaW), glm::min(lumaS, lumaE)));
float rangeMax = glm::max(lumaM, glm::max(glm::max(lumaN, lumaW), glm::max(lumaS, lumaE)));
// Check local contrast to avoid processing non edges
float range = rangeMax - rangeMin;
if (range < glm::max(FXAA_EDGE_THRESHOLD_MIN, rangeMax * FXAA_EDGE_THRESHOLD)) {
o_framebuffer[idx] = i_framebuffer[idx];
return;
}
#if FXAA_DEBUG_PASSTHROUGH
// Set edges to red
o_framebuffer[idx] = COLOR_RED;
return;
#endif
float lumaL = (lumaN + lumaW + lumaE + lumaS) * 0.25f;
float rangeL = glm::abs(lumaL - lumaM);
float blendL = glm::max(0.f, (rangeL / range) - FXAA_SUBPIX_TRIM) * FXAA_SUBPIX_TRIM_SCALE;
blendL = glm::min(FXAA_SUBPIX_CAP, blendL);
glm::vec3 rgbL = rgbN + rgbW + rgbM + rgbE + rgbS;
glm::vec3 rgbNW = i_framebuffer[flatIdx(w, h, { x - 1, y - 1 })];
glm::vec3 rgbNE = i_framebuffer[flatIdx(w, h, { x + 1, y - 1 })];
glm::vec3 rgbSW = i_framebuffer[flatIdx(w, h, { x - 1, y + 1 })];
glm::vec3 rgbSE = i_framebuffer[flatIdx(w, h, { x + 1, y + 1 })];
float lumaNW = rgb2luma(rgbNW);
float lumaNE = rgb2luma(rgbNE);
float lumaSW = rgb2luma(rgbSW);
float lumaSE = rgb2luma(rgbSE);
rgbL += (rgbNW + rgbNE + rgbSW + rgbSE);
rgbL *= (1.f / 9.f);
float edgeVert =
glm::abs((0.25f * lumaNW) + (-0.5f * lumaN) + (0.25f * lumaNE)) +
glm::abs((0.50f * lumaW ) + (-1.0f * lumaM) + (0.50f * lumaE )) +
glm::abs((0.25f * lumaSW) + (-0.5f * lumaS) + (0.25f * lumaSE));
float edgeHorz =
glm::abs((0.25f * lumaNW) + (-0.5f * lumaW) + (0.25f * lumaSW)) +
glm::abs((0.50f * lumaN ) + (-1.0f * lumaM) + (0.50f * lumaS )) +
glm::abs((0.25f * lumaNE) + (-0.5f * lumaE) + (0.25f * lumaSE));
bool isHor = edgeHorz >= edgeVert;
#if FXAA_DEBUG_HORZVERT
// Set horizontal edges to yellow, vertical edges to blue
o_framebuffer[idx] = isHor ? COLOR_YELLOW : COLOR_BLUE;
return;
#endif
// Select highest contrast pixel pair orthogonal to the edge
// If horizontal edge, check pair of M with S and N
// If vertical edge, check pair of M with W and E
float luma1 = isHor ? lumaS : lumaE;
float luma2 = isHor ? lumaN : lumaW;
float grad1 = luma1 - lumaM;
float grad2 = luma2 - lumaM;
bool is1Steepest = glm::abs(grad1) >= glm::abs(grad2);
float gradScaled = 0.25f * glm::max(glm::abs(grad1), glm::abs(grad2));
float stepLen = 1.f;
float lumaLocalAvg = 0.f;
if (is1Steepest) {
lumaLocalAvg = 0.5f * (luma1 + lumaM);
} else {
stepLen = -stepLen;
lumaLocalAvg = 0.5f * (luma2 + lumaM);
}
glm::vec2 currUV = { x, y };
if (isHor) {
currUV.y += stepLen * 0.5f;
} else {
currUV.x += stepLen * 0.5f;
}
#if FXAA_DEBUG_PAIR
// Set pixel up or left to BLUE
// Set pixel down or right to GREEN
glm::vec2 secondCoord = { x + (isHor ? stepLen : 0), y + (isHor ? 0 : stepLen) };
int secondIdx = flatIdx(w, h, secondCoord);
if (secondCoord.x < x || secondCoord.y < y) {
o_framebuffer[idx] = COLOR_GREEN;
} else {
o_framebuffer[idx] = COLOR_BLUE;
}
return;
#endif
// Search for end of edge in both - and + directions
glm::vec2 offset = isHor ? glm::vec2(1.f, 0.f) : glm::vec2(0.f, 1.f);
glm::vec2 uv1 = currUV;
glm::vec2 uv2 = currUV;
float lumaEnd1, lumaEnd2;
bool reached1 = false;
bool reached2 = false;
bool reachedBoth = reached1 && reached2;
for (int i = 0; i < FXAA_SEARCH_STEPS; ++i) {
if (!reached1) {
uv1 -= offset * fxaaQualityStep(i);
lumaEnd1 = rgb2luma(textureFetch(i_framebuffer, uv1, w, h));
//lumaEnd1 -= lumaLocalAvg;
}
if (!reached2) {
uv2 += offset * fxaaQualityStep(i);
lumaEnd2 = rgb2luma(textureFetch(i_framebuffer, uv2, w, h));
//lumaEnd2 -= lumaLocalAvg;
}
reached1 = (glm::abs(lumaEnd1 - lumaN) >= gradScaled);
reached2 = (glm::abs(lumaEnd2 - lumaN) >= gradScaled);
reachedBoth = (reached1 && reached2);
if (reachedBoth) { break; }
}
// Compute subpixel offset based on distance to end of edge
float dist1 = glm::abs(isHor ? (x - uv1.x) : (y - uv1.y));
float dist2 = glm::abs(isHor ? (uv2.x - x) : (uv2.y - y));
bool isDir1 = dist1 < dist2;
float distFinal = glm::min(dist1, dist2);
float edgeLength = dist1 + dist2;
#if FXAA_DEBUG_EDGEPOS
float alpha = distFinal / 12.f;
o_framebuffer[idx] = alpha * COLOR_YELLOW + (1 - alpha) * COLOR_GREEN;
return;
#endif
float pixelOffset = -distFinal / edgeLength + 0.5;
//printf("pixelOffset: %f\n", pixelOffset);
bool isLumaCenterSmaller = lumaM < lumaLocalAvg;
bool correctVariation = ((isDir1 ? lumaEnd1 : lumaEnd2) < 0.0) != isLumaCenterSmaller;
pixelOffset = correctVariation ? pixelOffset : 0.f;
glm::vec2 finalUV = isHor ? glm::vec2(x, y + pixelOffset) : glm::vec2(x + pixelOffset, y);
o_framebuffer[idx] = textureFetch(i_framebuffer, finalUV, w, h);
/*
float lumaC = rgb2luma(i_framebuffer[idx]);
float lumaD = rgb2luma(i_framebuffer[flatIdx(w, h, { x, y + 1 })]);
float lumaU = rgb2luma(i_framebuffer[flatIdx(w, h, { x, y - 1 })]);
float lumaL = rgb2luma(i_framebuffer[flatIdx(w, h, { x - 1, y })]);
float lumaR = rgb2luma(i_framebuffer[flatIdx(w, h, { x + 1, y })]);
float lumaMin = glm::min(lumaC, glm::min(glm::min(lumaD, lumaU), glm::min(lumaL, lumaR)));
float lumaMax = glm::max(lumaC, glm::max(glm::max(lumaD, lumaU), glm::max(lumaL, lumaR)));
float lumaDelta = lumaMax - lumaMin;
if (glm::isnan(lumaDelta)) {
lumaDelta = 0.f;
}
if (lumaDelta < glm::max(EDGE_THRESHOLD_MIN, lumaMax * EDGE_THRESHOLD_MAX)) {
o_framebuffer[idx] = i_framebuffer[idx];
return;
}
float lumaDL = rgb2luma(i_framebuffer[flatIdx(w, h, { x - 1, y + 1 })]);
float lumaUL = rgb2luma(i_framebuffer[flatIdx(w, h, { x - 1, y - 1 })]);
float lumaDR = rgb2luma(i_framebuffer[flatIdx(w, h, { x + 1, y + 1 })]);
float lumaUR = rgb2luma(i_framebuffer[flatIdx(w, h, { x + 1, y - 1 })]);
float lumaDU = lumaD + lumaU;
float lumaLR = lumaL + lumaR;
float lumaLCorn = lumaDL + lumaUL;
float lumaDCorn = lumaDL + lumaDR;
float lumaRCorn = lumaDR + lumaUR;
float lumaUCorn = lumaUL + lumaUR;
float edgeHor = glm::abs(-2.f * lumaL + lumaLCorn) +
glm::abs(-2.f * lumaC + lumaDU) * 2.f +
glm::abs(-2.f * lumaR + lumaRCorn);
float edgeVer = glm::abs(-2.f * lumaU + lumaUCorn) +
glm::abs(-2.f * lumaC + lumaLR) * 2.f +
glm::abs(-2.f * lumaD + lumaDCorn);
bool isHor = (edgeHor >= edgeVer);
float luma1 = isHor ? lumaD : lumaL;
float luma2 = isHor ? lumaU : lumaR;
float grad1 = luma1 - lumaC;
float grad2 = luma2 - lumaC;
bool is1Steepest = glm::abs(grad1) >= glm::abs(grad2);
float gradScale = 0.25f * glm::max(glm::abs(grad1), glm::abs(grad2));
float stepLen = 1.f;
float lumaLocalAvg = 0.f;
if (is1Steepest) {
stepLen = -stepLen;
lumaLocalAvg = 0.5f * (luma1 + lumaC);
} else {
lumaLocalAvg = 0.5f * (luma2 + lumaC);
}
glm::vec2 currPos(x, y);
if (isHor) {
currPos.y += stepLen * 0.5f;
} else {
currPos.x += stepLen * 0.5f;
}
glm::vec2 offset = isHor ? glm::vec2(1.f, 0.f) : glm::vec2(0.f, 1.f);
glm::vec2 p1 = currPos - offset;
glm::vec2 p2 = currPos + offset;
float lumaEnd1 = rgb2luma(textureFetch(i_framebuffer, p1, w, h));
float lumaEnd2 = rgb2luma(textureFetch(i_framebuffer, p2, w, h));
lumaEnd1 -= lumaLocalAvg;
lumaEnd2 -= lumaLocalAvg;
bool reached1 = glm::abs(lumaEnd1) >= gradScale;
bool reached2 = glm::abs(lumaEnd2) >= gradScale;
bool reachedBoth = reached1 && reached2;
if (!reached1) {
p1 -= offset;
}
if (!reached2) {
p2 += offset;
}
if (!reachedBoth) {
for (int i = 2; i < FXAA_ITERATIONS; ++i) {
if (!reached1) {
lumaEnd1 = rgb2luma(textureFetch(i_framebuffer, p1, w, h));
lumaEnd1 -= lumaLocalAvg;
}
if (!reached2) {
lumaEnd2 = rgb2luma(textureFetch(i_framebuffer, p2, w, h));
lumaEnd2 -= lumaLocalAvg;
}
reached1 = glm::abs(lumaEnd1) >= gradScale;
reached2 = glm::abs(lumaEnd2) >= gradScale;
reachedBoth = reached1 && reached2;
if (!reached1) {
p1 -= offset * fxaaQualityStep(i);
}
if (!reached2) {
p2 += offset * fxaaQualityStep(i);
}
if (reachedBoth) { break; }
}
}
float dist1 = isHor ? ((float) x - p1.x) : ((float) y - p1.y);
float dist2 = isHor ? (p2.x - (float) x) : (p2.y - (float) y);
bool isDir1 = dist1 < dist2;
float distFinal = glm::min(dist1, dist2);
float edgeThickness = (dist1 + dist2);
float pixOffset = -distFinal / edgeThickness + 0.5f;
bool isLumaCSmaller = lumaC < lumaLocalAvg;
bool correctVar = ((isDir1 ? lumaEnd1 : lumaEnd2) < 0.f) != isLumaCSmaller;
float finalOffset = correctVar ? pixOffset : 0.f;
float lumaAvg = (1.f / 12.f) * (2.f * (lumaDU + lumaLR) + lumaLCorn + lumaRCorn);
float subPixOffset1 = glm::clamp(glm::abs(lumaAvg - lumaC) / lumaDelta, 0.f, 1.f);
float subPixOffset2 = (-2.f * subPixOffset1 + 3.f) * subPixOffset1 * subPixOffset1;
float subPixOffsetFinal = subPixOffset2 * subPixOffset2 * SUBPIXEL_QUALITY;
finalOffset = glm::max(finalOffset, subPixOffsetFinal);
glm::vec2 finalPixPos = glm::vec2(x, y);
if (isHor) {
finalPixPos.y += finalOffset * stepLen;
} else {
finalPixPos.x += finalOffset * stepLen;
}
o_framebuffer[idx] = textureFetch(i_framebuffer, finalPixPos, w, h);
o_framebuffer[idx] = isHor ? glm::vec3(1, 0, 0) : glm::vec3(0, 1, 0);
*/
}
}
/**
* Perform rasterization.
*/
void rasterize(uchar4 *pbo, const glm::mat4 & MVP, const glm::mat4 & MV, const glm::mat3 MV_normal) {
int sideLength2d = 8;
dim3 blockSize2d(sideLength2d, sideLength2d);
dim3 blockCount2d((width - 1) / blockSize2d.x + 1,
(height - 1) / blockSize2d.y + 1);
// Execute your rasterization pipeline here
// (See README for rasterization pipeline outline.)
// Vertex Process & primitive assembly
{
curPrimitiveBeginId = 0;
dim3 numThreadsPerBlock(128);
auto it = mesh2PrimitivesMap.begin();
auto itEnd = mesh2PrimitivesMap.end();
for (; it != itEnd; ++it) {
auto p = (it->second).begin(); // each primitive
auto pEnd = (it->second).end();
for (; p != pEnd; ++p) {
dim3 numBlocksForVertices((p->numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
dim3 numBlocksForIndices((p->numIndices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
_vertexTransformAndAssembly << < numBlocksForVertices, numThreadsPerBlock >> >(p->numVertices, *p, MVP, MV, MV_normal, width, height);
checkCUDAError("Vertex Processing");
hipDeviceSynchronize();
_primitiveAssembly << < numBlocksForIndices, numThreadsPerBlock >> >
(p->numIndices,
curPrimitiveBeginId,
dev_primitives,
*p);
checkCUDAError("Primitive Assembly");
curPrimitiveBeginId += p->numPrimitives;
}
}
checkCUDAError("Vertex Processing and Primitive Assembly");
}
hipMemset(dev_fragmentBuffer, 0, width * height * sizeof(Fragment));
initDepth << <blockCount2d, blockSize2d >> >(width, height, dev_depth);
// TODO: rasterize
hipMemset(dev_mutex, 0, sizeof(int));
dim3 numThreadsPerBlock(128);
dim3 numBlocksPerPrimitive = (totalNumPrimitives + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x;
_rasterize << <numBlocksPerPrimitive, numThreadsPerBlock >> > (totalNumPrimitives, dev_primitives, dev_fragmentBuffer, width, height, dev_depth, dev_mutex);
// Copy depthbuffer colors into framebuffer
render << <blockCount2d, blockSize2d >> >(width, height, dev_fragmentBuffer, dev_framebuffer);
checkCUDAError("fragment shader");
// Do post process effects here:
// FXAA, SSAO
#if FXAA
{
_fxaa_post << < blockCount2d , blockSize2d >> > (width, height, dev_framebuffer, dev_framebuffer_2);
checkCUDAError("FXAA postprocess");
std::swap(dev_framebuffer, dev_framebuffer_2);
}
#endif
#if SSAO
_ssao_post << <blockCount2d, blockSize2d >> > (width, height, dev_framebuffer, dev_framebuffer_2);
#endif
// Copy framebuffer into OpenGL buffer for OpenGL previewing
hipLaunchKernelGGL(( sendImageToPBO), dim3(blockCount2d), dim3(blockSize2d), 0, 0, pbo, width, height, dev_framebuffer);
checkCUDAError("copy render result to pbo");
}
/**
* Called once at the end of the program to free CUDA memory.
*/
void rasterizeFree() {
// deconstruct primitives attribute/indices device buffer
auto it(mesh2PrimitivesMap.begin());
auto itEnd(mesh2PrimitivesMap.end());
for (; it != itEnd; ++it) {
for (auto p = it->second.begin(); p != it->second.end(); ++p) {
hipFree(p->dev_indices);
hipFree(p->dev_position);
hipFree(p->dev_normal);
hipFree(p->dev_texcoord0);
hipFree(p->dev_diffuseTex);
hipFree(p->dev_verticesOut);
//TODO: release other attributes and materials
}
}
////////////
hipFree(dev_primitives);
dev_primitives = NULL;
hipFree(dev_fragmentBuffer);
dev_fragmentBuffer = NULL;
hipFree(dev_framebuffer);
dev_framebuffer = NULL;
hipFree(dev_framebuffer_2);
dev_framebuffer_2 = NULL;
hipFree(dev_depth);
dev_depth = NULL;
hipFree(dev_mutex);
dev_mutex = NULL;
checkCUDAError("rasterize Free");
}
| 1b3040e8e545b5379a68752cb000388f1b2e7f27.cu | /**
* @file rasterize.cu
* @brief CUDA-accelerated rasterization pipeline.
* @authors Skeleton code: Yining Karl Li, Kai Ninomiya, Shuai Shao (Shrek)
* @date 2012-2016
* @copyright University of Pennsylvania & STUDENT
*/
#include <cmath>
#include <cstdio>
#include <cuda.h>
#include <cuda_runtime.h>
#include <thrust/random.h>
#include <util/checkCUDAError.h>
#include <util/tiny_gltf_loader.h>
#include "rasterizeTools.h"
#include "rasterize.h"
#include <glm/gtc/quaternion.hpp>
#include <glm/gtc/matrix_transform.hpp>
namespace {
typedef unsigned short VertexIndex;
typedef glm::vec3 VertexAttributePosition;
typedef glm::vec3 VertexAttributeNormal;
typedef glm::vec2 VertexAttributeTexcoord;
typedef unsigned char TextureData;
typedef unsigned char BufferByte;
enum PrimitiveType{
Point = 1,
Line = 2,
Triangle = 3
};
struct VertexOut {
glm::vec4 pos;
// TODO: add new attributes to your VertexOut
// The attributes listed below might be useful,
// but always feel free to modify on your own
glm::vec3 eyePos; // eye space position used for shading
glm::vec3 eyeNor; // eye space normal used for shading, cuz normal will go wrong after perspective transformation
// glm::vec3 col;
glm::vec2 texcoord0;
TextureData* dev_diffuseTex = NULL;
int texWidth, texHeight;
// ...
};
struct Primitive {
PrimitiveType primitiveType = Triangle; // C++ 11 init
VertexOut v[3];
};
struct Fragment {
glm::vec3 color;
// TODO: add new attributes to your Fragment
// The attributes listed below might be useful,
// but always feel free to modify on your own
glm::vec3 eyePos; // eye space position used for shading
glm::vec3 eyeNor;
VertexAttributeTexcoord texcoord0;
TextureData* dev_diffuseTex;
int texWidth, texHeight;
// ...
};
struct PrimitiveDevBufPointers {
int primitiveMode; //from tinygltfloader macro
PrimitiveType primitiveType;
int numPrimitives;
int numIndices;
int numVertices;
// Vertex In, const after loaded
VertexIndex* dev_indices;
VertexAttributePosition* dev_position;
VertexAttributeNormal* dev_normal;
VertexAttributeTexcoord* dev_texcoord0;
// Materials, add more attributes when needed
TextureData* dev_diffuseTex;
int diffuseTexWidth;
int diffuseTexHeight;
// TextureData* dev_specularTex;
// TextureData* dev_normalTex;
// ...
// Vertex Out, vertex used for rasterization, this is changing every frame
VertexOut* dev_verticesOut;
// TODO: add more attributes when needed
};
}
static std::map<std::string, std::vector<PrimitiveDevBufPointers>> mesh2PrimitivesMap;
static int width = 0;
static int height = 0;
static int totalNumPrimitives = 0;
static Primitive *dev_primitives = NULL;
static Fragment *dev_fragmentBuffer = NULL;
static glm::vec3 *dev_framebuffer = NULL;
static glm::vec3 *dev_framebuffer_2 = NULL;
static float * dev_depth = NULL; // you might need this buffer when doing depth test
static int *dev_mutex = NULL;
/**
* Kernel that writes the image to the OpenGL PBO directly.
*/
__global__
void sendImageToPBO(uchar4 *pbo, int w, int h, glm::vec3 *image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
if (x < w && y < h) {
glm::vec3 color;
color.x = glm::clamp(image[index].x, 0.0f, 1.0f) * 255.0;
color.y = glm::clamp(image[index].y, 0.0f, 1.0f) * 255.0;
color.z = glm::clamp(image[index].z, 0.0f, 1.0f) * 255.0;
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
__forceinline__
__device__
glm::vec3 fetchColor(glm::vec3 *textureData, int x, int y, int w, int h) {
int pix = y * w + x;
return textureData[pix];
}
__device__
glm::vec3 colorAt(TextureData* texture, int textureWidth, float u, float v) {
int flatIndex = u + v * textureWidth;
float r = (float) texture[flatIndex * 3] / 255.0f;
float g = (float) texture[flatIndex * 3 + 1] / 255.0f;
float b = (float) texture[flatIndex * 3 + 2] / 255.0f;
return glm::vec3(r, g, b);
}
/**
* Writes fragment colors to the framebuffer
*/
__global__
void render(int w, int h, Fragment *fragmentBuffer, glm::vec3 *framebuffer) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
if (x < w && y < h) {
glm::vec3 lightPos = glm::vec3(1, 1, 1);
Fragment frag = fragmentBuffer[index];
int u = frag.texcoord0.x * frag.texWidth;
int v = frag.texcoord0.y * frag.texHeight;
glm::vec3 col;
if (frag.dev_diffuseTex != NULL) {
col = colorAt(frag.dev_diffuseTex, frag.texWidth, u, v);
} else {
col = frag.color;
}
glm::vec3 lightDir = (lightPos - frag.eyePos);
framebuffer[index] = col * glm::max(0.f, glm::dot(frag.eyeNor, lightDir));
// TODO: add your fragment shader code here
}
}
/**
* Called once at the beginning of the program to allocate memory.
*/
void rasterizeInit(int w, int h) {
cudaError_t stat = cudaDeviceSetLimit(cudaLimitStackSize, 8192);
checkCUDAError("set stack limit");
width = w;
height = h;
cudaFree(dev_fragmentBuffer);
cudaMalloc(&dev_fragmentBuffer, width * height * sizeof(Fragment));
cudaMemset(dev_fragmentBuffer, 0, width * height * sizeof(Fragment));
cudaFree(dev_framebuffer);
cudaMalloc(&dev_framebuffer, width * height * sizeof(glm::vec3));
cudaMemset(dev_framebuffer, 0, width * height * sizeof(glm::vec3));
cudaFree(dev_framebuffer_2);
cudaMalloc(&dev_framebuffer_2, width * height * sizeof(glm::vec3));
cudaMemset(dev_framebuffer_2, 0, width * height * sizeof(glm::vec3));
cudaFree(dev_depth);
cudaMalloc(&dev_depth, width * height * sizeof(float));
cudaFree(dev_mutex);
cudaMalloc(&dev_mutex, width * height * sizeof(int));
checkCUDAError("rasterizeInit");
}
__global__
void initDepth(int w, int h, float * depth)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < w && y < h)
{
int index = x + (y * w);
depth[index] = FLT_MAX;
}
}
/**
* kern function with support for stride to sometimes replace cudaMemcpy
* One thread is responsible for copying one component
*/
__global__
void _deviceBufferCopy(int N, BufferByte* dev_dst, const BufferByte* dev_src, int n, int byteStride, int byteOffset, int componentTypeByteSize) {
// Attribute (vec3 position)
// component (3 * float)
// byte (4 * byte)
// id of component
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < N) {
int count = i / n;
int offset = i - count * n; // which component of the attribute
for (int j = 0; j < componentTypeByteSize; j++) {
dev_dst[count * componentTypeByteSize * n
+ offset * componentTypeByteSize
+ j]
=
dev_src[byteOffset
+ count * (byteStride == 0 ? componentTypeByteSize * n : byteStride)
+ offset * componentTypeByteSize
+ j];
}
}
}
__global__
void _nodeMatrixTransform(
int numVertices,
VertexAttributePosition* position,
VertexAttributeNormal* normal,
glm::mat4 MV, glm::mat3 MV_normal) {
// vertex id
int vid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (vid < numVertices) {
position[vid] = glm::vec3(MV * glm::vec4(position[vid], 1.0f));
normal[vid] = glm::normalize(MV_normal * normal[vid]);
}
}
glm::mat4 getMatrixFromNodeMatrixVector(const tinygltf::Node & n) {
glm::mat4 curMatrix(1.0);
const std::vector<double> &m = n.matrix;
if (m.size() > 0) {
// matrix, copy it
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
curMatrix[i][j] = (float)m.at(4 * i + j);
}
}
} else {
// no matrix, use rotation, scale, translation
if (n.translation.size() > 0) {
curMatrix[3][0] = n.translation[0];
curMatrix[3][1] = n.translation[1];
curMatrix[3][2] = n.translation[2];
}
if (n.rotation.size() > 0) {
glm::mat4 R;
glm::quat q;
q[0] = n.rotation[0];
q[1] = n.rotation[1];
q[2] = n.rotation[2];
R = glm::mat4_cast(q);
curMatrix = curMatrix * R;
}
if (n.scale.size() > 0) {
curMatrix = curMatrix * glm::scale(glm::vec3(n.scale[0], n.scale[1], n.scale[2]));
}
}
return curMatrix;
}
void traverseNode (
std::map<std::string, glm::mat4> & n2m,
const tinygltf::Scene & scene,
const std::string & nodeString,
const glm::mat4 & parentMatrix
)
{
const tinygltf::Node & n = scene.nodes.at(nodeString);
glm::mat4 M = parentMatrix * getMatrixFromNodeMatrixVector(n);
n2m.insert(std::pair<std::string, glm::mat4>(nodeString, M));
auto it = n.children.begin();
auto itEnd = n.children.end();
for (; it != itEnd; ++it) {
traverseNode(n2m, scene, *it, M);
}
}
void rasterizeSetBuffers(const tinygltf::Scene & scene) {
totalNumPrimitives = 0;
std::map<std::string, BufferByte*> bufferViewDevPointers;
// 1. copy all `bufferViews` to device memory
{
std::map<std::string, tinygltf::BufferView>::const_iterator it(
scene.bufferViews.begin());
std::map<std::string, tinygltf::BufferView>::const_iterator itEnd(
scene.bufferViews.end());
for (; it != itEnd; it++) {
const std::string key = it->first;
const tinygltf::BufferView &bufferView = it->second;
if (bufferView.target == 0) {
continue; // Unsupported bufferView.
}
const tinygltf::Buffer &buffer = scene.buffers.at(bufferView.buffer);
BufferByte* dev_bufferView;
cudaMalloc(&dev_bufferView, bufferView.byteLength);
cudaMemcpy(dev_bufferView, &buffer.data.front() + bufferView.byteOffset, bufferView.byteLength, cudaMemcpyHostToDevice);
checkCUDAError("Set BufferView Device Mem");
bufferViewDevPointers.insert(std::make_pair(key, dev_bufferView));
}
}
// 2. for each mesh:
// for each primitive:
// build device buffer of indices, materail, and each attributes
// and store these pointers in a map
{
std::map<std::string, glm::mat4> nodeString2Matrix;
auto rootNodeNamesList = scene.scenes.at(scene.defaultScene);
{
auto it = rootNodeNamesList.begin();
auto itEnd = rootNodeNamesList.end();
for (; it != itEnd; ++it) {
traverseNode(nodeString2Matrix, scene, *it, glm::mat4(1.0f));
}
}
// parse through node to access mesh
auto itNode = nodeString2Matrix.begin();
auto itEndNode = nodeString2Matrix.end();
for (; itNode != itEndNode; ++itNode) {
const tinygltf::Node & N = scene.nodes.at(itNode->first);
const glm::mat4 & matrix = itNode->second;
const glm::mat3 & matrixNormal = glm::transpose(glm::inverse(glm::mat3(matrix)));
auto itMeshName = N.meshes.begin();
auto itEndMeshName = N.meshes.end();
for (; itMeshName != itEndMeshName; ++itMeshName) {
const tinygltf::Mesh & mesh = scene.meshes.at(*itMeshName);
auto res = mesh2PrimitivesMap.insert(std::pair<std::string, std::vector<PrimitiveDevBufPointers>>(mesh.name, std::vector<PrimitiveDevBufPointers>()));
std::vector<PrimitiveDevBufPointers> & primitiveVector = (res.first)->second;
// for each primitive
for (size_t i = 0; i < mesh.primitives.size(); i++) {
const tinygltf::Primitive &primitive = mesh.primitives[i];
if (primitive.indices.empty())
return;
// TODO: add new attributes for your PrimitiveDevBufPointers when you add new attributes
VertexIndex* dev_indices = NULL;
VertexAttributePosition* dev_position = NULL;
VertexAttributeNormal* dev_normal = NULL;
VertexAttributeTexcoord* dev_texcoord0 = NULL;
// ----------Indices-------------
const tinygltf::Accessor &indexAccessor = scene.accessors.at(primitive.indices);
const tinygltf::BufferView &bufferView = scene.bufferViews.at(indexAccessor.bufferView);
BufferByte* dev_bufferView = bufferViewDevPointers.at(indexAccessor.bufferView);
// assume type is SCALAR for indices
int n = 1;
int numIndices = indexAccessor.count;
int componentTypeByteSize = sizeof(VertexIndex);
int byteLength = numIndices * n * componentTypeByteSize;
dim3 numThreadsPerBlock(128);
dim3 numBlocks((numIndices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
cudaMalloc(&dev_indices, byteLength);
_deviceBufferCopy << <numBlocks, numThreadsPerBlock >> > (
numIndices,
(BufferByte*)dev_indices,
dev_bufferView,
n,
indexAccessor.byteStride,
indexAccessor.byteOffset,
componentTypeByteSize);
checkCUDAError("Set Index Buffer");
// ---------Primitive Info-------
// Warning: LINE_STRIP is not supported in tinygltfloader
int numPrimitives;
PrimitiveType primitiveType;
switch (primitive.mode) {
case TINYGLTF_MODE_TRIANGLES:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices / 3;
break;
case TINYGLTF_MODE_TRIANGLE_STRIP:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices - 2;
break;
case TINYGLTF_MODE_TRIANGLE_FAN:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices - 2;
break;
case TINYGLTF_MODE_LINE:
primitiveType = PrimitiveType::Line;
numPrimitives = numIndices / 2;
break;
case TINYGLTF_MODE_LINE_LOOP:
primitiveType = PrimitiveType::Line;
numPrimitives = numIndices + 1;
break;
case TINYGLTF_MODE_POINTS:
primitiveType = PrimitiveType::Point;
numPrimitives = numIndices;
break;
default:
// output error
break;
};
// ----------Attributes-------------
auto it(primitive.attributes.begin());
auto itEnd(primitive.attributes.end());
int numVertices = 0;
// for each attribute
for (; it != itEnd; it++) {
const tinygltf::Accessor &accessor = scene.accessors.at(it->second);
const tinygltf::BufferView &bufferView = scene.bufferViews.at(accessor.bufferView);
int n = 1;
if (accessor.type == TINYGLTF_TYPE_SCALAR) {
n = 1;
}
else if (accessor.type == TINYGLTF_TYPE_VEC2) {
n = 2;
}
else if (accessor.type == TINYGLTF_TYPE_VEC3) {
n = 3;
}
else if (accessor.type == TINYGLTF_TYPE_VEC4) {
n = 4;
}
BufferByte * dev_bufferView = bufferViewDevPointers.at(accessor.bufferView);
BufferByte ** dev_attribute = NULL;
numVertices = accessor.count;
int componentTypeByteSize;
// Note: since the type of our attribute array (dev_position) is static (float32)
// We assume the glTF model attribute type are 5126(FLOAT) here
if (it->first.compare("POSITION") == 0) {
componentTypeByteSize = sizeof(VertexAttributePosition) / n;
dev_attribute = (BufferByte**)&dev_position;
}
else if (it->first.compare("NORMAL") == 0) {
componentTypeByteSize = sizeof(VertexAttributeNormal) / n;
dev_attribute = (BufferByte**)&dev_normal;
}
else if (it->first.compare("TEXCOORD_0") == 0) {
componentTypeByteSize = sizeof(VertexAttributeTexcoord) / n;
dev_attribute = (BufferByte**)&dev_texcoord0;
}
std::cout << accessor.bufferView << " - " << it->second << " - " << it->first << '\n';
dim3 numThreadsPerBlock(128);
dim3 numBlocks((n * numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
int byteLength = numVertices * n * componentTypeByteSize;
cudaMalloc(dev_attribute, byteLength);
_deviceBufferCopy << <numBlocks, numThreadsPerBlock >> > (
n * numVertices,
*dev_attribute,
dev_bufferView,
n,
accessor.byteStride,
accessor.byteOffset,
componentTypeByteSize);
std::string msg = "Set Attribute Buffer: " + it->first;
checkCUDAError(msg.c_str());
}
// malloc for VertexOut
VertexOut* dev_vertexOut;
cudaMalloc(&dev_vertexOut, numVertices * sizeof(VertexOut));
checkCUDAError("Malloc VertexOut Buffer");
// ----------Materials-------------
// You can only worry about this part once you started to
// implement textures for your rasterizer
TextureData* dev_diffuseTex = NULL;
int diffuseTexWidth = 0;
int diffuseTexHeight = 0;
if (!primitive.material.empty()) {
const tinygltf::Material &mat = scene.materials.at(primitive.material);
printf("material.name = %s\n", mat.name.c_str());
if (mat.values.find("diffuse") != mat.values.end()) {
std::string diffuseTexName = mat.values.at("diffuse").string_value;
if (scene.textures.find(diffuseTexName) != scene.textures.end()) {
const tinygltf::Texture &tex = scene.textures.at(diffuseTexName);
if (scene.images.find(tex.source) != scene.images.end()) {
const tinygltf::Image &image = scene.images.at(tex.source);
size_t s = image.image.size() * sizeof(TextureData);
cudaMalloc(&dev_diffuseTex, s);
cudaMemcpy(dev_diffuseTex, &image.image.at(0), s, cudaMemcpyHostToDevice);
diffuseTexWidth = image.width;
diffuseTexHeight = image.height;
checkCUDAError("Set Texture Image data");
}
}
}
// TODO: write your code for other materails
// You may have to take a look at tinygltfloader
// You can also use the above code loading diffuse material as a start point
}
// ---------Node hierarchy transform--------
cudaDeviceSynchronize();
dim3 numBlocksNodeTransform((numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
_nodeMatrixTransform << <numBlocksNodeTransform, numThreadsPerBlock >> > (
numVertices,
dev_position,
dev_normal,
matrix,
matrixNormal);
checkCUDAError("Node hierarchy transformation");
// at the end of the for loop of primitive
// push dev pointers to map
primitiveVector.push_back(PrimitiveDevBufPointers{
primitive.mode,
primitiveType,
numPrimitives,
numIndices,
numVertices,
dev_indices,
dev_position,
dev_normal,
dev_texcoord0,
dev_diffuseTex,
diffuseTexWidth,
diffuseTexHeight,
dev_vertexOut //VertexOut
});
totalNumPrimitives += numPrimitives;
} // for each primitive
} // for each mesh
} // for each node
}
// 3. Malloc for dev_primitives
{
cudaMalloc(&dev_primitives, totalNumPrimitives * sizeof(Primitive));
}
// Finally, cudaFree raw dev_bufferViews
{
std::map<std::string, BufferByte*>::const_iterator it(bufferViewDevPointers.begin());
std::map<std::string, BufferByte*>::const_iterator itEnd(bufferViewDevPointers.end());
//bufferViewDevPointers
for (; it != itEnd; it++) {
cudaFree(it->second);
}
checkCUDAError("Free BufferView Device Mem");
}
}
__global__
void _vertexTransformAndAssembly(
int numVertices,
PrimitiveDevBufPointers primitive,
glm::mat4 MVP, glm::mat4 MV, glm::mat3 MV_normal,
int width, int height) {
// vertex id
int vid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (vid < numVertices) {
// TODO: Apply vertex transformation here
// Multiply the MVP matrix for each vertex position, this will transform everything into clipping space
// Then divide the pos by its w element to transform into NDC space
// Finally transform x and y to viewport space
glm::vec4 mPos(primitive.dev_position[vid], 1.f);
// clip space
glm::vec4 camPos = MVP * mPos;
// NDC
glm::vec4 ndcPos = camPos / camPos.w;
// viewport
float x = (ndcPos.x + 1.f) * ((float) width) * 0.5f;
float y = (1.f - ndcPos.y) * ((float) height) * 0.5f;
float z = -ndcPos.z;
// TODO: Apply vertex assembly here
// Assemble all attribute arraies into the primitive array
primitive.dev_verticesOut[vid].pos = { x, y, z, 1.f };
primitive.dev_verticesOut[vid].eyePos = glm::vec3(MV * mPos);
primitive.dev_verticesOut[vid].eyeNor = glm::normalize(MV_normal * primitive.dev_normal[vid]);
primitive.dev_verticesOut[vid].texcoord0 = primitive.dev_texcoord0[vid];
primitive.dev_verticesOut[vid].dev_diffuseTex = primitive.dev_diffuseTex;
primitive.dev_verticesOut[vid].texHeight = primitive.diffuseTexHeight;
primitive.dev_verticesOut[vid].texWidth = primitive.diffuseTexWidth;
}
}
static int curPrimitiveBeginId = 0;
__global__
void _primitiveAssembly(int numIndices, int curPrimitiveBeginId, Primitive* dev_primitives, PrimitiveDevBufPointers primitive) {
// index id
int iid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (iid < numIndices) {
// TODO: uncomment the following code for a start
// This is primitive assembly for triangles
int pid; // id for cur primitives vector
if (primitive.primitiveMode == TINYGLTF_MODE_TRIANGLES) {
pid = iid / (int)primitive.primitiveType;
dev_primitives[pid + curPrimitiveBeginId].v[iid % (int)primitive.primitiveType]
= primitive.dev_verticesOut[primitive.dev_indices[iid]];
}
// TODO: other primitive types (point, line)
}
}
__global__
void _rasterize(int numPrimitives, Primitive *dev_primitives, Fragment *dev_fragmentBuffer, int width, int height, float *dev_depthBuffer, int *mutexes) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx < numPrimitives) {
Primitive prim = dev_primitives[idx];
VertexOut vs[3];
vs[0] = prim.v[0];
vs[1] = prim.v[1];
vs[2] = prim.v[2];
glm::vec3 pos[3];
pos[0] = glm::vec3(vs[0].pos);
pos[1] = glm::vec3(vs[1].pos);
pos[2] = glm::vec3(vs[2].pos);
// Get bounds of this primitive
glm::vec2 min, max;
AABB bounds = getAABBForTriangle(pos);
min.x = glm::clamp(bounds.min.x, 0.f, (float) (width - 1));
min.y = glm::clamp(bounds.min.y, 0.f, (float) (height - 1));
max.x = glm::clamp(bounds.max.x, 0.f, (float) (width - 1));
max.y = glm::clamp(bounds.max.y, 0.f, (float) (height - 1));
// Generate fragments for each pixel this primitive overlaps
for (int x = min.x; x <= max.x; ++x) {
for (int y = min.y; y <= max.y; ++y) {
glm::vec3 bary = calculateBarycentricCoordinate(pos, { x, y });
if (isBarycentricCoordInBounds(bary)) {
int pixIdx = x + width * y;
float depth = getZAtCoordinate(bary, pos);
glm::vec3 nor = bary.x * vs[0].eyeNor +
bary.y * vs[1].eyeNor +
bary.z * vs[2].eyeNor;
Fragment frag;
frag.color = glm::vec3(.95f, .95, .15);
frag.eyeNor = nor;
glm::vec2 cord = bary.x * vs[0].texcoord0 / vs[0].eyePos.z +
bary.y * vs[1].texcoord0 / vs[1].eyePos.z +
bary.z * vs[2].texcoord0 / vs[2].eyePos.z;
float z = bary.x * (1.f / vs[0].eyePos.z) +
bary.y * (1.f / vs[1].eyePos.z) +
bary.z * (1.f / vs[2].eyePos.z);
frag.texcoord0 = cord / z;
frag.texHeight = vs[0].texHeight;
frag.texWidth = vs[0].texWidth;
frag.dev_diffuseTex = vs[0].dev_diffuseTex;
int *mutex = &mutexes[pixIdx];
bool isSet;
do {
isSet = (atomicCAS(mutex, 0, 1) == 0);
if (isSet) {
if (depth < dev_depthBuffer[pixIdx]) {
dev_depthBuffer[pixIdx] = depth;
dev_fragmentBuffer[pixIdx] = frag;
}
}
if (isSet) {
mutexes[pixIdx] = 0;
}
} while (!isSet);
}
}
}
}
}
__forceinline__
__device__
float rgb2luma(glm::vec3 rgb) {
return glm::dot(rgb, glm::vec3(0.299, 0.587, 0.114));
}
__forceinline__
__device__
int flatIdx(int w, int h, glm::vec2 pos) {
pos.x = w - pos.x;
pos = glm::clamp(pos, glm::vec2(0, 0), glm::vec2(w - 1, h - 1));
return pos.x + (pos.y * w);
}
// bilinear filtering
__forceinline__
__device__
float getAlpha(float y, float py, float qy) {
return (y - py) / (qy - py);
}
__forceinline__
__device__
glm::vec3 slerp(float alpha, glm::vec3 az, glm::vec3 bz) {
return glm::vec3((1 - alpha) * az.r + alpha * bz.r,
(1 - alpha) * az.g + alpha * bz.g,
(1 - alpha) * az.b + alpha * bz.b);
}
__forceinline__
__device__
float fract(float t) {
return t - glm::floor(t);
}
__forceinline__
__device__
glm::vec3 textureFetch(glm::vec3 *t, glm::vec2 pix, int w, int h) {
pix.x = w - pix.x;
pix = glm::clamp(pix, glm::vec2(0.f, 0.f), glm::vec2(w - 1, h - 1));
glm::vec3 f = slerp(getAlpha(pix.x, glm::ceil(pix.x), glm::floor(pix.y)),
fetchColor(t, glm::ceil(pix.x), glm::ceil(pix.y), w, h),
fetchColor(t, glm::floor(pix.x), glm::ceil(pix.y), w, h));
glm::vec3 s = slerp(getAlpha(pix.x, glm::ceil(pix.x), glm::floor(pix.y)),
fetchColor(t, glm::ceil(pix.x), glm::floor(pix.y), w, h),
fetchColor(t, glm::floor(pix.x), glm::floor(pix.y), w, h));
return slerp(getAlpha(pix.y, glm::ceil(pix.y), glm::floor(pix.y)), f, s);
}
__forceinline__
__device__ int pow2(int e) {
int r = 1;
for (int i = 0; i < e; ++i) {
r *= 2;
}
return r;
}
__forceinline__
__device__
float fxaaQualityStep(int i) {
return i < 5 ? 2.f : pow2(i - 3);
}
#define EDGE_THRESHOLD_MIN 0.0312
#define EDGE_THRESHOLD_MAX 0.125
#define FXAA_ITERATIONS 12
#define SUBPIXEL_QUALITY 0.75
#define FXAA_REDUCE_MIN 1.0 / 128.0
#define FXAA_REDUCE_MUL 1.0 / 8.0
#define FXAA_SPAN_MAX 8.0
__global__
void _fxaa_post(int w, int h, glm::vec3 *i_framebuffer, glm::vec3 *o_framebuffer) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int idx = (w - x) + (y * w);
if (x < w && y < h) {
glm::vec3 rgbN = i_framebuffer[flatIdx(w, h, { x, y - 1 })];
glm::vec3 rgbW = i_framebuffer[flatIdx(w, h, { x - 1, y })];
glm::vec3 rgbE = i_framebuffer[flatIdx(w, h, { x + 1, y })];
glm::vec3 rgbS = i_framebuffer[flatIdx(w, h, { x, y + 1 })];
glm::vec3 rgbM = i_framebuffer[flatIdx(w, h, { x, y })];
float lumaN = rgb2luma(rgbN);
float lumaW = rgb2luma(rgbW);
float lumaE = rgb2luma(rgbE);
float lumaS = rgb2luma(rgbS);
float lumaM = rgb2luma(rgbM);
float rangeMin = glm::min(lumaM, glm::min(glm::min(lumaN, lumaW), glm::min(lumaS, lumaE)));
float rangeMax = glm::max(lumaM, glm::max(glm::max(lumaN, lumaW), glm::max(lumaS, lumaE)));
// Check local contrast to avoid processing non edges
float range = rangeMax - rangeMin;
if (range < glm::max(FXAA_EDGE_THRESHOLD_MIN, rangeMax * FXAA_EDGE_THRESHOLD)) {
o_framebuffer[idx] = i_framebuffer[idx];
return;
}
#if FXAA_DEBUG_PASSTHROUGH
// Set edges to red
o_framebuffer[idx] = COLOR_RED;
return;
#endif
float lumaL = (lumaN + lumaW + lumaE + lumaS) * 0.25f;
float rangeL = glm::abs(lumaL - lumaM);
float blendL = glm::max(0.f, (rangeL / range) - FXAA_SUBPIX_TRIM) * FXAA_SUBPIX_TRIM_SCALE;
blendL = glm::min(FXAA_SUBPIX_CAP, blendL);
glm::vec3 rgbL = rgbN + rgbW + rgbM + rgbE + rgbS;
glm::vec3 rgbNW = i_framebuffer[flatIdx(w, h, { x - 1, y - 1 })];
glm::vec3 rgbNE = i_framebuffer[flatIdx(w, h, { x + 1, y - 1 })];
glm::vec3 rgbSW = i_framebuffer[flatIdx(w, h, { x - 1, y + 1 })];
glm::vec3 rgbSE = i_framebuffer[flatIdx(w, h, { x + 1, y + 1 })];
float lumaNW = rgb2luma(rgbNW);
float lumaNE = rgb2luma(rgbNE);
float lumaSW = rgb2luma(rgbSW);
float lumaSE = rgb2luma(rgbSE);
rgbL += (rgbNW + rgbNE + rgbSW + rgbSE);
rgbL *= (1.f / 9.f);
float edgeVert =
glm::abs((0.25f * lumaNW) + (-0.5f * lumaN) + (0.25f * lumaNE)) +
glm::abs((0.50f * lumaW ) + (-1.0f * lumaM) + (0.50f * lumaE )) +
glm::abs((0.25f * lumaSW) + (-0.5f * lumaS) + (0.25f * lumaSE));
float edgeHorz =
glm::abs((0.25f * lumaNW) + (-0.5f * lumaW) + (0.25f * lumaSW)) +
glm::abs((0.50f * lumaN ) + (-1.0f * lumaM) + (0.50f * lumaS )) +
glm::abs((0.25f * lumaNE) + (-0.5f * lumaE) + (0.25f * lumaSE));
bool isHor = edgeHorz >= edgeVert;
#if FXAA_DEBUG_HORZVERT
// Set horizontal edges to yellow, vertical edges to blue
o_framebuffer[idx] = isHor ? COLOR_YELLOW : COLOR_BLUE;
return;
#endif
// Select highest contrast pixel pair orthogonal to the edge
// If horizontal edge, check pair of M with S and N
// If vertical edge, check pair of M with W and E
float luma1 = isHor ? lumaS : lumaE;
float luma2 = isHor ? lumaN : lumaW;
float grad1 = luma1 - lumaM;
float grad2 = luma2 - lumaM;
bool is1Steepest = glm::abs(grad1) >= glm::abs(grad2);
float gradScaled = 0.25f * glm::max(glm::abs(grad1), glm::abs(grad2));
float stepLen = 1.f;
float lumaLocalAvg = 0.f;
if (is1Steepest) {
lumaLocalAvg = 0.5f * (luma1 + lumaM);
} else {
stepLen = -stepLen;
lumaLocalAvg = 0.5f * (luma2 + lumaM);
}
glm::vec2 currUV = { x, y };
if (isHor) {
currUV.y += stepLen * 0.5f;
} else {
currUV.x += stepLen * 0.5f;
}
#if FXAA_DEBUG_PAIR
// Set pixel up or left to BLUE
// Set pixel down or right to GREEN
glm::vec2 secondCoord = { x + (isHor ? stepLen : 0), y + (isHor ? 0 : stepLen) };
int secondIdx = flatIdx(w, h, secondCoord);
if (secondCoord.x < x || secondCoord.y < y) {
o_framebuffer[idx] = COLOR_GREEN;
} else {
o_framebuffer[idx] = COLOR_BLUE;
}
return;
#endif
// Search for end of edge in both - and + directions
glm::vec2 offset = isHor ? glm::vec2(1.f, 0.f) : glm::vec2(0.f, 1.f);
glm::vec2 uv1 = currUV;
glm::vec2 uv2 = currUV;
float lumaEnd1, lumaEnd2;
bool reached1 = false;
bool reached2 = false;
bool reachedBoth = reached1 && reached2;
for (int i = 0; i < FXAA_SEARCH_STEPS; ++i) {
if (!reached1) {
uv1 -= offset * fxaaQualityStep(i);
lumaEnd1 = rgb2luma(textureFetch(i_framebuffer, uv1, w, h));
//lumaEnd1 -= lumaLocalAvg;
}
if (!reached2) {
uv2 += offset * fxaaQualityStep(i);
lumaEnd2 = rgb2luma(textureFetch(i_framebuffer, uv2, w, h));
//lumaEnd2 -= lumaLocalAvg;
}
reached1 = (glm::abs(lumaEnd1 - lumaN) >= gradScaled);
reached2 = (glm::abs(lumaEnd2 - lumaN) >= gradScaled);
reachedBoth = (reached1 && reached2);
if (reachedBoth) { break; }
}
// Compute subpixel offset based on distance to end of edge
float dist1 = glm::abs(isHor ? (x - uv1.x) : (y - uv1.y));
float dist2 = glm::abs(isHor ? (uv2.x - x) : (uv2.y - y));
bool isDir1 = dist1 < dist2;
float distFinal = glm::min(dist1, dist2);
float edgeLength = dist1 + dist2;
#if FXAA_DEBUG_EDGEPOS
float alpha = distFinal / 12.f;
o_framebuffer[idx] = alpha * COLOR_YELLOW + (1 - alpha) * COLOR_GREEN;
return;
#endif
float pixelOffset = -distFinal / edgeLength + 0.5;
//printf("pixelOffset: %f\n", pixelOffset);
bool isLumaCenterSmaller = lumaM < lumaLocalAvg;
bool correctVariation = ((isDir1 ? lumaEnd1 : lumaEnd2) < 0.0) != isLumaCenterSmaller;
pixelOffset = correctVariation ? pixelOffset : 0.f;
glm::vec2 finalUV = isHor ? glm::vec2(x, y + pixelOffset) : glm::vec2(x + pixelOffset, y);
o_framebuffer[idx] = textureFetch(i_framebuffer, finalUV, w, h);
/*
float lumaC = rgb2luma(i_framebuffer[idx]);
float lumaD = rgb2luma(i_framebuffer[flatIdx(w, h, { x, y + 1 })]);
float lumaU = rgb2luma(i_framebuffer[flatIdx(w, h, { x, y - 1 })]);
float lumaL = rgb2luma(i_framebuffer[flatIdx(w, h, { x - 1, y })]);
float lumaR = rgb2luma(i_framebuffer[flatIdx(w, h, { x + 1, y })]);
float lumaMin = glm::min(lumaC, glm::min(glm::min(lumaD, lumaU), glm::min(lumaL, lumaR)));
float lumaMax = glm::max(lumaC, glm::max(glm::max(lumaD, lumaU), glm::max(lumaL, lumaR)));
float lumaDelta = lumaMax - lumaMin;
if (glm::isnan(lumaDelta)) {
lumaDelta = 0.f;
}
if (lumaDelta < glm::max(EDGE_THRESHOLD_MIN, lumaMax * EDGE_THRESHOLD_MAX)) {
o_framebuffer[idx] = i_framebuffer[idx];
return;
}
float lumaDL = rgb2luma(i_framebuffer[flatIdx(w, h, { x - 1, y + 1 })]);
float lumaUL = rgb2luma(i_framebuffer[flatIdx(w, h, { x - 1, y - 1 })]);
float lumaDR = rgb2luma(i_framebuffer[flatIdx(w, h, { x + 1, y + 1 })]);
float lumaUR = rgb2luma(i_framebuffer[flatIdx(w, h, { x + 1, y - 1 })]);
float lumaDU = lumaD + lumaU;
float lumaLR = lumaL + lumaR;
float lumaLCorn = lumaDL + lumaUL;
float lumaDCorn = lumaDL + lumaDR;
float lumaRCorn = lumaDR + lumaUR;
float lumaUCorn = lumaUL + lumaUR;
float edgeHor = glm::abs(-2.f * lumaL + lumaLCorn) +
glm::abs(-2.f * lumaC + lumaDU) * 2.f +
glm::abs(-2.f * lumaR + lumaRCorn);
float edgeVer = glm::abs(-2.f * lumaU + lumaUCorn) +
glm::abs(-2.f * lumaC + lumaLR) * 2.f +
glm::abs(-2.f * lumaD + lumaDCorn);
bool isHor = (edgeHor >= edgeVer);
float luma1 = isHor ? lumaD : lumaL;
float luma2 = isHor ? lumaU : lumaR;
float grad1 = luma1 - lumaC;
float grad2 = luma2 - lumaC;
bool is1Steepest = glm::abs(grad1) >= glm::abs(grad2);
float gradScale = 0.25f * glm::max(glm::abs(grad1), glm::abs(grad2));
float stepLen = 1.f;
float lumaLocalAvg = 0.f;
if (is1Steepest) {
stepLen = -stepLen;
lumaLocalAvg = 0.5f * (luma1 + lumaC);
} else {
lumaLocalAvg = 0.5f * (luma2 + lumaC);
}
glm::vec2 currPos(x, y);
if (isHor) {
currPos.y += stepLen * 0.5f;
} else {
currPos.x += stepLen * 0.5f;
}
glm::vec2 offset = isHor ? glm::vec2(1.f, 0.f) : glm::vec2(0.f, 1.f);
glm::vec2 p1 = currPos - offset;
glm::vec2 p2 = currPos + offset;
float lumaEnd1 = rgb2luma(textureFetch(i_framebuffer, p1, w, h));
float lumaEnd2 = rgb2luma(textureFetch(i_framebuffer, p2, w, h));
lumaEnd1 -= lumaLocalAvg;
lumaEnd2 -= lumaLocalAvg;
bool reached1 = glm::abs(lumaEnd1) >= gradScale;
bool reached2 = glm::abs(lumaEnd2) >= gradScale;
bool reachedBoth = reached1 && reached2;
if (!reached1) {
p1 -= offset;
}
if (!reached2) {
p2 += offset;
}
if (!reachedBoth) {
for (int i = 2; i < FXAA_ITERATIONS; ++i) {
if (!reached1) {
lumaEnd1 = rgb2luma(textureFetch(i_framebuffer, p1, w, h));
lumaEnd1 -= lumaLocalAvg;
}
if (!reached2) {
lumaEnd2 = rgb2luma(textureFetch(i_framebuffer, p2, w, h));
lumaEnd2 -= lumaLocalAvg;
}
reached1 = glm::abs(lumaEnd1) >= gradScale;
reached2 = glm::abs(lumaEnd2) >= gradScale;
reachedBoth = reached1 && reached2;
if (!reached1) {
p1 -= offset * fxaaQualityStep(i);
}
if (!reached2) {
p2 += offset * fxaaQualityStep(i);
}
if (reachedBoth) { break; }
}
}
float dist1 = isHor ? ((float) x - p1.x) : ((float) y - p1.y);
float dist2 = isHor ? (p2.x - (float) x) : (p2.y - (float) y);
bool isDir1 = dist1 < dist2;
float distFinal = glm::min(dist1, dist2);
float edgeThickness = (dist1 + dist2);
float pixOffset = -distFinal / edgeThickness + 0.5f;
bool isLumaCSmaller = lumaC < lumaLocalAvg;
bool correctVar = ((isDir1 ? lumaEnd1 : lumaEnd2) < 0.f) != isLumaCSmaller;
float finalOffset = correctVar ? pixOffset : 0.f;
float lumaAvg = (1.f / 12.f) * (2.f * (lumaDU + lumaLR) + lumaLCorn + lumaRCorn);
float subPixOffset1 = glm::clamp(glm::abs(lumaAvg - lumaC) / lumaDelta, 0.f, 1.f);
float subPixOffset2 = (-2.f * subPixOffset1 + 3.f) * subPixOffset1 * subPixOffset1;
float subPixOffsetFinal = subPixOffset2 * subPixOffset2 * SUBPIXEL_QUALITY;
finalOffset = glm::max(finalOffset, subPixOffsetFinal);
glm::vec2 finalPixPos = glm::vec2(x, y);
if (isHor) {
finalPixPos.y += finalOffset * stepLen;
} else {
finalPixPos.x += finalOffset * stepLen;
}
o_framebuffer[idx] = textureFetch(i_framebuffer, finalPixPos, w, h);
o_framebuffer[idx] = isHor ? glm::vec3(1, 0, 0) : glm::vec3(0, 1, 0);
*/
}
}
/**
* Perform rasterization.
*/
void rasterize(uchar4 *pbo, const glm::mat4 & MVP, const glm::mat4 & MV, const glm::mat3 MV_normal) {
int sideLength2d = 8;
dim3 blockSize2d(sideLength2d, sideLength2d);
dim3 blockCount2d((width - 1) / blockSize2d.x + 1,
(height - 1) / blockSize2d.y + 1);
// Execute your rasterization pipeline here
// (See README for rasterization pipeline outline.)
// Vertex Process & primitive assembly
{
curPrimitiveBeginId = 0;
dim3 numThreadsPerBlock(128);
auto it = mesh2PrimitivesMap.begin();
auto itEnd = mesh2PrimitivesMap.end();
for (; it != itEnd; ++it) {
auto p = (it->second).begin(); // each primitive
auto pEnd = (it->second).end();
for (; p != pEnd; ++p) {
dim3 numBlocksForVertices((p->numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
dim3 numBlocksForIndices((p->numIndices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
_vertexTransformAndAssembly << < numBlocksForVertices, numThreadsPerBlock >> >(p->numVertices, *p, MVP, MV, MV_normal, width, height);
checkCUDAError("Vertex Processing");
cudaDeviceSynchronize();
_primitiveAssembly << < numBlocksForIndices, numThreadsPerBlock >> >
(p->numIndices,
curPrimitiveBeginId,
dev_primitives,
*p);
checkCUDAError("Primitive Assembly");
curPrimitiveBeginId += p->numPrimitives;
}
}
checkCUDAError("Vertex Processing and Primitive Assembly");
}
cudaMemset(dev_fragmentBuffer, 0, width * height * sizeof(Fragment));
initDepth << <blockCount2d, blockSize2d >> >(width, height, dev_depth);
// TODO: rasterize
cudaMemset(dev_mutex, 0, sizeof(int));
dim3 numThreadsPerBlock(128);
dim3 numBlocksPerPrimitive = (totalNumPrimitives + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x;
_rasterize << <numBlocksPerPrimitive, numThreadsPerBlock >> > (totalNumPrimitives, dev_primitives, dev_fragmentBuffer, width, height, dev_depth, dev_mutex);
// Copy depthbuffer colors into framebuffer
render << <blockCount2d, blockSize2d >> >(width, height, dev_fragmentBuffer, dev_framebuffer);
checkCUDAError("fragment shader");
// Do post process effects here:
// FXAA, SSAO
#if FXAA
{
_fxaa_post << < blockCount2d , blockSize2d >> > (width, height, dev_framebuffer, dev_framebuffer_2);
checkCUDAError("FXAA postprocess");
std::swap(dev_framebuffer, dev_framebuffer_2);
}
#endif
#if SSAO
_ssao_post << <blockCount2d, blockSize2d >> > (width, height, dev_framebuffer, dev_framebuffer_2);
#endif
// Copy framebuffer into OpenGL buffer for OpenGL previewing
sendImageToPBO<<<blockCount2d, blockSize2d>>>(pbo, width, height, dev_framebuffer);
checkCUDAError("copy render result to pbo");
}
/**
* Called once at the end of the program to free CUDA memory.
*/
void rasterizeFree() {
// deconstruct primitives attribute/indices device buffer
auto it(mesh2PrimitivesMap.begin());
auto itEnd(mesh2PrimitivesMap.end());
for (; it != itEnd; ++it) {
for (auto p = it->second.begin(); p != it->second.end(); ++p) {
cudaFree(p->dev_indices);
cudaFree(p->dev_position);
cudaFree(p->dev_normal);
cudaFree(p->dev_texcoord0);
cudaFree(p->dev_diffuseTex);
cudaFree(p->dev_verticesOut);
//TODO: release other attributes and materials
}
}
////////////
cudaFree(dev_primitives);
dev_primitives = NULL;
cudaFree(dev_fragmentBuffer);
dev_fragmentBuffer = NULL;
cudaFree(dev_framebuffer);
dev_framebuffer = NULL;
cudaFree(dev_framebuffer_2);
dev_framebuffer_2 = NULL;
cudaFree(dev_depth);
dev_depth = NULL;
cudaFree(dev_mutex);
dev_mutex = NULL;
checkCUDAError("rasterize Free");
}
|
bc385a1040b7fef9720d07cd8aa56e223a63fceb.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <math.h>
#include "utils/utils.h"
#define BUF_2M (2 * 1024 * 1024)
#define BUF_32M (32 * 1024 * 1024)
int main(void) {
hipSetDevice(0);
int *host_array_a = 0;
int *host_array_b = 0;
int *device_array_a = 0;
int *device_array_b = 0;
int *device_array_c = 0;
// TODO 1: Allocate the host's arrays with the specified number of elements:
// host_array_a => 32M
// host_array_b => 32M
host_array_a = (int *) malloc(BUF_32M * sizeof(int));
host_array_b = (int *) malloc(BUF_32M * sizeof(int));
// TODO 2: Allocate the device's arrays with the specified number of elements:
// device_array_a => 32M
// device_array_b => 32M
// device_array_c => 2M
hipMalloc(&device_array_a, BUF_32M * sizeof(int));
hipMalloc(&device_array_b, BUF_32M * sizeof(int));
hipMalloc(&device_array_c, BUF_2M * sizeof(int));
// Check for allocation errors
if (host_array_a == 0 || host_array_b == 0 ||
device_array_a == 0 || device_array_b == 0 ||
device_array_c == 0) {
printf("[*] Error!\n");
return 1;
}
for (int i = 0; i < BUF_32M; ++i) {
host_array_a[i] = i % 32;
host_array_b[i] = i % 2;
}
printf("Before swap:\n");
printf("a[i]\tb[i]\n");
for (int i = 0; i < 10; ++i) {
printf("%d\t%d\n", host_array_a[i], host_array_b[i]);
}
// TODO 3: Copy from host to device
hipMemcpy(device_array_a, host_array_a, BUF_32M * sizeof(int),
hipMemcpyHostToDevice);
hipMemcpy(device_array_b, host_array_b, BUF_32M * sizeof(int),
hipMemcpyHostToDevice);
// TODO 4: Swap the buffers (BUF_2M values each iteration)
// Hint 1: device_array_c should be used as a temporary buffer
// Hint 2: hipMemcpy
for (int i = 0; i != BUF_32M; i += BUF_2M) {
hipMemcpy(device_array_c, device_array_b + i, BUF_2M * sizeof(int),
hipMemcpyDeviceToDevice);
hipMemcpy(device_array_b + i, device_array_a + i, BUF_2M * sizeof(int),
hipMemcpyDeviceToDevice);
hipMemcpy(device_array_a + i, device_array_c, BUF_2M * sizeof(int),
hipMemcpyDeviceToDevice);
}
// TODO 5: Copy from device to host
hipMemcpy(host_array_a, device_array_a, BUF_32M * sizeof(int),
hipMemcpyDeviceToHost);
hipMemcpy(host_array_b, device_array_b, BUF_32M * sizeof(int),
hipMemcpyDeviceToHost);
printf("\nAfter swap:\n");
printf("a[i]\tb[i]\n");
for (int i = 0; i < 10; ++i) {
printf("%d\t%d\n", host_array_a[i], host_array_b[i]);
}
// TODO 6: Free the memory
free(host_array_a);
free(host_array_b);
hipFree(device_array_a);
hipFree(device_array_b);
hipFree(device_array_c);
return 0;
}
| bc385a1040b7fef9720d07cd8aa56e223a63fceb.cu | #include <stdio.h>
#include <math.h>
#include "utils/utils.h"
#define BUF_2M (2 * 1024 * 1024)
#define BUF_32M (32 * 1024 * 1024)
int main(void) {
cudaSetDevice(0);
int *host_array_a = 0;
int *host_array_b = 0;
int *device_array_a = 0;
int *device_array_b = 0;
int *device_array_c = 0;
// TODO 1: Allocate the host's arrays with the specified number of elements:
// host_array_a => 32M
// host_array_b => 32M
host_array_a = (int *) malloc(BUF_32M * sizeof(int));
host_array_b = (int *) malloc(BUF_32M * sizeof(int));
// TODO 2: Allocate the device's arrays with the specified number of elements:
// device_array_a => 32M
// device_array_b => 32M
// device_array_c => 2M
cudaMalloc(&device_array_a, BUF_32M * sizeof(int));
cudaMalloc(&device_array_b, BUF_32M * sizeof(int));
cudaMalloc(&device_array_c, BUF_2M * sizeof(int));
// Check for allocation errors
if (host_array_a == 0 || host_array_b == 0 ||
device_array_a == 0 || device_array_b == 0 ||
device_array_c == 0) {
printf("[*] Error!\n");
return 1;
}
for (int i = 0; i < BUF_32M; ++i) {
host_array_a[i] = i % 32;
host_array_b[i] = i % 2;
}
printf("Before swap:\n");
printf("a[i]\tb[i]\n");
for (int i = 0; i < 10; ++i) {
printf("%d\t%d\n", host_array_a[i], host_array_b[i]);
}
// TODO 3: Copy from host to device
cudaMemcpy(device_array_a, host_array_a, BUF_32M * sizeof(int),
cudaMemcpyHostToDevice);
cudaMemcpy(device_array_b, host_array_b, BUF_32M * sizeof(int),
cudaMemcpyHostToDevice);
// TODO 4: Swap the buffers (BUF_2M values each iteration)
// Hint 1: device_array_c should be used as a temporary buffer
// Hint 2: cudaMemcpy
for (int i = 0; i != BUF_32M; i += BUF_2M) {
cudaMemcpy(device_array_c, device_array_b + i, BUF_2M * sizeof(int),
cudaMemcpyDeviceToDevice);
cudaMemcpy(device_array_b + i, device_array_a + i, BUF_2M * sizeof(int),
cudaMemcpyDeviceToDevice);
cudaMemcpy(device_array_a + i, device_array_c, BUF_2M * sizeof(int),
cudaMemcpyDeviceToDevice);
}
// TODO 5: Copy from device to host
cudaMemcpy(host_array_a, device_array_a, BUF_32M * sizeof(int),
cudaMemcpyDeviceToHost);
cudaMemcpy(host_array_b, device_array_b, BUF_32M * sizeof(int),
cudaMemcpyDeviceToHost);
printf("\nAfter swap:\n");
printf("a[i]\tb[i]\n");
for (int i = 0; i < 10; ++i) {
printf("%d\t%d\n", host_array_a[i], host_array_b[i]);
}
// TODO 6: Free the memory
free(host_array_a);
free(host_array_b);
cudaFree(device_array_a);
cudaFree(device_array_b);
cudaFree(device_array_c);
return 0;
}
|
86506a7ffec1a609ee710038a5dcbc94e7b4074d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C"
{
__global__ void expkernel(const int lengthA, const double *a, double *b)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<lengthA)
{
b[i] = exp(a[i]);
}
}
} | 86506a7ffec1a609ee710038a5dcbc94e7b4074d.cu | extern "C"
{
__global__ void expkernel(const int lengthA, const double *a, double *b)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<lengthA)
{
b[i] = exp(a[i]);
}
}
} |
fb76f9496eafd472666a72d1939a088c3977d1b5.hip | // !!! This is a file automatically generated by hipify!!!
#define PI 3.14159265358979323846264338327950288419716939937510582
#include <gsl/gsl_errno.h>
#include <gsl/gsl_matrix.h>
#include <gsl/gsl_odeiv2.h>
#include <gsl/gsl_errno.h>
#include <gsl/gsl_spline.h>
#include <gsl/gsl_sf.h>
//#include <cmath>
#include <iostream>
#include <fstream>
//#include <complex>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <vector>
#include <ctime>
#include <chrono>
#include <iomanip>
#include <random>
#include "functions.h"
//#include <hip/hip_complex.h>
#include <thrust/complex.h>
#include <hip/hip_runtime.h>
#include "device_launch_parameters.h"
#include "constants.h"
#include <cusolverDn.h>
#include <time.h>
#include "math.h"
#include <thrust/device_vector.h>
using namespace std;
__host__ __device__ double mysinc(double x) {
double out = 0.0;
if (abs(x)> 1e-3) {
out = sin(x) / x;
}
else out = 1.0;
return out;
};
__host__ __device__ double myP(double x) {
double out = 0.0;
if (abs(x) > 1e-3) {
out = (1.0-cos(x)) / x;
}
else out = x/2.0;
return out;
};
__device__ thrust::complex<double> Gfunc(double a, double b, double L) {
thrust::complex<double> out(0.0,0.0);
thrust::complex<double> iunit(0.0, 1.0);
if (a == 0.0 && b!=0.0) {
out = (exp(iunit*b*L)*(1.0 - iunit * b*L) - 1.0) / (b*b);
}
else if (b == 0.0 && a != 0.0) {
out = 1.0 / (iunit*a)*( (exp(iunit*a*L)-1.0)/(iunit*a)-L);
}
else if (a == 0.0 && b == 0.0) {
out = L * L / 2.0;
}
else {
out = 1.0 / (iunit*a)*( (exp(iunit*(a+b)*L)-1.0)/(iunit*(a+b))-(exp(iunit*b*L)-1.0)/(iunit*b) );
}
return out;
};
//__device__ double mysinc_dev(double x) {
// double out = 0.0;
// if (x != 0.0) {
// out = sin(x) / x;
// }
//
// else out = 1.0;
// return out;
//};
double lnfac(double x) {
double out=0.0;
if (x!=0.0) {
out=x*log(x)-x+0.5*log(2.0*pi*x)+1.0/(12.0*x)-1.0/(360.0*pow(x,3.0));
}
return out;
};
__device__ void mycross_dev(double *a, double *b, double *c) {
c[1] = a[2] * b[3] - a[3] * b[2];
c[2] = a[3] * b[1] - a[1] * b[3];
c[3] = a[1] * b[2] - a[2] * b[1];
};
__device__ thrust::complex<double> M_dev(double *const_dev ,double* pveci, double* kvec, int ni, int nf, double kbi, double kbf, int n, double* MATi, double* MATf, double* VECi, double* VECf, int si, int sf, int sfot) {
//arma::mat eigveci(MATi, 2 * N + 1, 2 * N + 1, false, true);
double m = const_dev[0];
double k0 = const_dev[1];
double wmin = const_dev[2];
double wmax = const_dev[3];
double phimin = const_dev[4];
double phimax = const_dev[5];
double pxi = const_dev[6];
double kbmin = const_dev[7];
double kbmax = const_dev[8];
double thetamax = const_dev[9];
double e = const_dev[10];
double pi = const_dev[11];
int N = lround(const_dev[14]);
double pxf = pveci[1] - kvec[1];
double pzf = pveci[3] - kvec[3];
double Ei = pow(2.0*pveci[1] *VECi[ni] + pveci[1] * pveci[1] + pveci[3]* pveci[3] + m * m, 0.5); // there seems to be a mistake here? pxi should be pveci[1]
double Ef = pow(2.0*pxf*VECf[nf] + pxf * pxf + pzf* pzf + m * m, 0.5);
/*double *pol1 = new double[4]();
double *pol2 = new double[4]();*/
double pol1[4];
double pol2[4];
/*mycross(kvec, pveci, pol1);
normalize(pol1);
mycross(kvec, pol1, pol2);
normalize(pol2);*/
double thetasq = (kvec[2]* kvec[2] + kvec[3]* kvec[3]) / (kvec[0]* kvec[0]);
double theta = sqrt(thetasq);
double cphi = kvec[2] / (kvec[0] * theta);
double sphi = kvec[3] / (kvec[0] * theta);
pol1[2] = sphi;
pol1[3] = -cphi;
pol2[1] = -theta;
pol2[2] = cphi;
pol2[3] = sphi;
double *pol;
/*if (sfot == 1) {
pol = pol1;
}
else {
pol = pol2;
}*/
double *pols[2];
pols[0] = pol1;
pols[1] = pol2;
pol = pols[sfot];
/*double *Pi = new double[4]();
double *Pf = new double[4]();*/
double Pi[4];
double Pf[4];
Pi[0] = 0.0;
Pi[1] = 0.0;
Pi[2] = 0.0;
Pi[3] = 0.0;
Pf[0] = 0.0;
Pf[1] = 0.0;
Pf[2] = 0.0;
Pf[3] = 0.0;
double prodsum = 0.0;
int mimin, mimax;
if (n >= 0) {
mimin = -N;
mimax = N - n;
}
else {
mimin = -N - n;
mimax = N;
}
for (int mi = mimin; mi <= mimax; mi++) {
int mf = mi + n;
//double cmi = eigveci(mi + N, ni);
//double cmf = eigvecf(mf + N, nf);
double cmi = MATi[mi + N + (2 * N + 1)*ni];
double cmf = MATf[mf + N + (2 * N + 1)*nf];
double py_i = k0 * (double)mi + kbi;
double py_f = k0 * (double)mf + kbf;
double px_itemp = pveci[1] + VECi[ni] - py_i * py_i / (2.0*Ei);
double px_ftemp = pxf + VECf[nf] - py_f * py_f / (2.0*Ef);
Pi[1] += cmi * cmf*px_itemp;
Pi[2] += cmi * cmf*py_i;
prodsum += cmi * cmf;
Pf[1] += cmi * cmf* px_ftemp;
Pf[2] += cmi * cmf* py_f;
} // end ns sum
Pi[1] = Pi[1] / (Ei + m);
Pi[2] = Pi[2] / (Ei + m);
Pi[3] = prodsum * pveci[3] / (Ei + m);
Pf[1] = Pf[1] / (Ef + m);
Pf[2] = Pf[2] / (Ef + m);
Pf[3] = prodsum * pzf / (Ef + m);
/*double *A = new double[4]();
double *B = new double[4]();
double *B2 = new double[4]();*/
double A[4];
double B[4];
double B2[4];
A[1] = Pi[1] + Pf[1];
A[2] = Pi[2] + Pf[2];
A[3] = Pi[3] + Pf[3];
B2[1] = Pi[1] - Pf[1];
B2[2] = Pi[2] - Pf[2];
B2[3] = Pi[3] - Pf[3];
mycross_dev(pol, B2, B);
double realcomp = pol[1] * A[1] + pol[2] * A[2] + pol[3] * A[3];
//delete[] pol1;
//delete[] pol2;
thrust::complex<double> out(0.0,0.0);
if (si == 1 && sf == 1) out = thrust::complex<double>(realcomp, B[3]); // up-up case
else if (si == 1 && sf == 0) out = thrust::complex<double>(-B[2], B[1]); // up-down
else if (si == 0 && sf == 0) out = thrust::complex<double>(realcomp, -B[3]); // down-down
else if (si == 0 && sf == 1) out = thrust::complex<double>(B[2], B[1]); // down-up
/*if (si == 1 && sf == 1) {
result[0] = realcomp;
result[1] = B[3];
}
else if (si == 1 && sf == 0) {
result[0]= -B[2];
result[1] = B[1];
}
else if (si == 0 && sf == 0) {
result[0] = realcomp;
result[1] = -B[3];
}
else if (si == 0 && sf == 1) {
result[0] = B[2];
result[1] = B[1];
}
*/
/*delete[] B2;
delete[] B;
delete[] A;*/
/*delete[] Pi;
delete[] Pf;*/
//out = thrust::complex<double>(realcomp, B[1] + B[2] + B[3]);
return out;
}
__device__ double mymod_dev(double a, double b) {
double x;
if (a >= 0) x = fmod(a, b);
else x = fmod(a, b) + b;
return x;
};
__global__ void myradKernel(double *const_dev, double *trajec_dev, double *result_dev) {
double m = const_dev[0];
double wmin = const_dev[1];
double wmax = const_dev[2];
double thetaxmin = const_dev[3];
double thetaxmax = const_dev[4];
double thetaymin = const_dev[5];
double thetaymax = const_dev[6];
double gam0 = const_dev[7];
int points = round(const_dev[8]);
thrust::complex<double> resultx(0.0, 0.0);
thrust::complex<double> resulty(0.0, 0.0);
thrust::complex<double> resultJ(0.0, 0.0);
int id = threadIdx.x + blockIdx.x*blockDim.x;
int a, b, c;
a = id % thetaxfine;
b = (id % (thetaxfine*thetayfine) - a)/ thetaxfine;
c = (id - a - b * thetaxfine) / (thetaxfine*thetayfine);
double w = (double)c/(double)nfreq*(wmax - wmin) + wmin;
double thetay = (double)b/(double)thetayfine*(thetaymax - thetaymin) + thetaymin;
double thetax = (double)a/(double)thetaxfine*(thetaxmax - thetaxmin) + thetaxmin;
double nx = thetax;
double ny = thetay;
for (int i = 0; i < points - 1; i++) {
double vx = trajec_dev[1 * points + i];
double vy = trajec_dev[2 * points + i];
double vz = trajec_dev[3 * points + i];
double dt = trajec_dev[i + 1] - trajec_dev[i];
double betaxdot = (trajec_dev[1 * points + i + 1] - trajec_dev[1 * points + i]) / dt;
double betaydot = (trajec_dev[2 * points + i + 1] - trajec_dev[2 * points + i]) / dt;
double betazdot = (trajec_dev[3 * points + i + 1] - trajec_dev[3 * points + i]) / dt;
double t = trajec_dev[i];
double x = trajec_dev[4 * points + i];
double y = trajec_dev[5 * points + i];
double z = trajec_dev[6 * points + i];
double deltagam = trajec_dev[7 * points + i];
double betaortho = sqrt(vx*vx + vy * vy);
// Classical
//(*exponent) = { 0.0,*omega*(-*nx**x - *ny**y - *z + 0.5*(pow(*northo,2) + pow(gam,-2))**t) };
double Ecur = (gam0 + deltagam)*m;
double Ep = Ecur - w;
// Strong field
thrust::complex<double> exponent(0.0, w*Ecur / (Ecur - w)*(-nx * x - ny * y - z + 0.5*(nx*nx+ny*ny + 1.0 / (gam0*gam0))*t));
thrust::complex<double> faktor(1.0 / ((0.5*(1.0 / (gam0*gam0) + nx * nx + ny * ny) - nx * vx - ny * vy - vz)*(0.5*(1.0 / (gam0*gam0) + nx * nx + ny * ny) - nx * vx - ny * vy - vz)), 0.0);
if (w < Ecur) {
thrust::complex<double> v1(ny*(betaydot*(nx - vx) - betaxdot * (ny - vy)) - betaxdot * 0.5*(1.0 / (gam0*gam0) - nx * nx - ny * ny - 2.0*vz) + (nx - vx)*betazdot, 0.0);
thrust::complex<double> v2((ny - vy)*betazdot - betaydot * 0.5*(1.0 / (gam0*gam0) - nx * nx - ny * ny - 2.0*vz) - nx * ((nx - vx)*betaydot - (ny - vy)*betaxdot), 0.0);
double scale = sqrt((Ecur*Ecur + Ep * Ep) / (2.0*Ecur*Ecur)); // Strong field
//scale = { 1.0,0.0 }; // Classical
v1 = v1 * faktor*exp(exponent)*scale;
v2 = v2 * faktor*exp(exponent)*scale;
thrust::complex<double> vJ(nx*(betaxdot)+(ny)*(betaydot)+betazdot, 0.0);
double scaleJ = w * m / (Ecur*Ecur)*0.70710678118;
vJ = vJ * faktor*exp(exponent)*scaleJ;
resultx += v1 * dt;
resulty += v2 * dt;
resultJ += vJ * dt;
}
}
result_dev[id] = (thrust::norm(resultx)+ thrust::norm(resulty)+ thrust::norm(resultJ));
//result_dev[id] = thrust::norm(resultx);
}
__global__ void myradKernelspin(double *const_dev, double *trajec_dev, double *result_dev) {
double m = const_dev[0];
double wmin = const_dev[1];
double wmax = const_dev[2];
double thetaxmin = const_dev[3];
double thetaxmax = const_dev[4];
double thetaymin = const_dev[5];
double thetaymax = const_dev[6];
double gam0 = const_dev[7];
int points = round(const_dev[8]);
double vorthosqavg = const_dev[9];
thrust::complex<double> resultIx(0.0, 0.0);
thrust::complex<double> resultIy(0.0, 0.0);
thrust::complex<double> resultJ(0.0, 0.0);
int id = threadIdx.x + blockIdx.x*blockDim.x;
int a, b, c;
a = id % thetaxfine;
b = (id % (thetaxfine*thetayfine) - a) / thetaxfine;
c = (id - a - b * thetaxfine) / (thetayfine*thetaxfine);
double w = (double)c / (double)nfreq*(wmax - wmin) + wmin;
double thetay = (double)b / ((double)thetayfine-1.0)*(thetaymax - thetaymin) + thetaymin;
double thetax = (double)a / ((double)thetaxfine-1.0)*(thetaxmax - thetaxmin) + thetaxmin;
double w0 = 2.0*PI / (trajec_dev[points - 1] - trajec_dev[0]);
double wp = w * gam0*m / (gam0*m - w);
double Ecur = gam0 * m;
double Ep = Ecur - w;
double thetasq = thetax*thetax+thetay*thetay;
double theta = sqrt(thetasq);
double nx = thetax;
double ny = thetay;
for (int i = 0; i < points - 1; i++) {
double vx = trajec_dev[1 * points + i];
double vy = trajec_dev[2 * points + i];
double vz = trajec_dev[3 * points + i];
double dt = trajec_dev[i + 1] - trajec_dev[i];
double betaxdot = (trajec_dev[1 * points + i + 1] - trajec_dev[1 * points + i]) / dt;
double betaydot = (trajec_dev[2 * points + i + 1] - trajec_dev[2 * points + i]) / dt;
double betazdot = (trajec_dev[3 * points + i + 1] - trajec_dev[3 * points + i]) / dt;
double t = trajec_dev[i];
double x = trajec_dev[4 * points + i];
double y = trajec_dev[5 * points + i];
double z = trajec_dev[6 * points + i];
double deltagam = trajec_dev[7 * points + i];
double betaortho = sqrt(vx*vx + vy * vy);
// Classical
//(*exponent) = { 0.0,*omega*(-*nx**x - *ny**y - *z + 0.5*(pow(*northo,2) + pow(gam,-2))**t) };
double Ecur = (gam0 + deltagam)*m;
double Ep = Ecur - w;
// Strong field
thrust::complex<double> exponent(0.0, w*Ecur / (Ecur - w)*(-nx * x - ny * y - z + 0.5*(nx*nx + ny * ny + 1.0 / (gam0*gam0))*t));
thrust::complex<double> faktor(1.0 / ((0.5*(1.0 / (gam0*gam0) + nx * nx + ny * ny) - nx * vx - ny * vy - vz)*(0.5*(1.0 / (gam0*gam0) + nx * nx + ny * ny) - nx * vx - ny * vy - vz)), 0.0);
if (w < Ecur) {
thrust::complex<double> v1(ny*(betaydot*(nx - vx) - betaxdot * (ny - vy)) - betaxdot * 0.5*(1.0 / (gam0*gam0) - nx * nx - ny * ny - 2.0*vz) + (nx - vx)*betazdot, 0.0);
thrust::complex<double> v2((ny - vy)*betazdot - betaydot * 0.5*(1.0 / (gam0*gam0) - nx * nx - ny * ny - 2.0*vz) - nx * ((nx - vx)*betaydot - (ny - vy)*betaxdot), 0.0);
//double scale = sqrt((Ecur*Ecur + Ep * Ep) / (2.0*Ecur*Ecur)); // Strong field
//scale = { 1.0,0.0 }; // Classical
v1 = v1 * faktor*exp(exponent);
v2 = v2 * faktor*exp(exponent);
thrust::complex<double> vJ(nx*(betaxdot)+(ny)*(betaydot)+betazdot, 0.0);
//double scaleJ = w * m / (Ecur*Ecur)*0.70710678118;
vJ = vJ * faktor*exp(exponent);
resultIx += v1 * dt;
resultIy += v2 * dt;
resultJ += vJ * dt;
}
}
thrust::complex<double> si_u;
thrust::complex<double> si_d;
thrust::complex<double> sf_u;
thrust::complex<double> sf_d;
thrust::complex<double> polupx;
thrust::complex<double> polupy;
thrust::complex<double> polupz;
thrust::complex<double> poldownx;
thrust::complex<double> poldowny;
thrust::complex<double> poldownz;
thrust::complex<double> polx;
thrust::complex<double> poly;
thrust::complex<double> polz;
//circular
polupx = pow(2.0,-0.5)*thrust::complex<double>(1.0,0.0); // Left handed i.e. positive helicity
polupy = pow(2.0, -0.5)*thrust::complex<double>(0.0, 1.0);
polupz = pow(2.0, -0.5)*thrust::complex<double>(-nx, -ny);
poldownx = pow(2.0, -0.5)*thrust::complex<double>(1.0, 0.0);
poldowny = pow(2.0, -0.5)*thrust::complex<double>(0.0, -1.0);
poldownz = pow(2.0, -0.5)*thrust::complex<double>(-nx, ny);
//linear
/*polupx = thrust::complex<double>(1.0, 0.0);
polupy = thrust::complex<double>(0.0, 0.0);
polupz = thrust::complex<double>(-nx, 0.0);
poldownx = thrust::complex<double>(0.0, 0.0);
poldowny = thrust::complex<double>(1.0, 0.0);
poldownz = thrust::complex<double>(-ny, 0.0);*/
double C1 = Ecur / (2.0*pow(Ecur*Ep, 0.5))*(pow((Ep+m)/(Ecur+m),0.5) + pow((Ecur + m) / (Ep + m), 0.5));
double D1 = Ecur / (2.0*pow(Ecur*Ep, 0.5))*(pow((Ep + m) / (Ecur + m), 0.5) - pow((Ecur + m) / (Ep + m), 0.5));
double D2 = w / (2.0*pow(Ecur*Ep, 0.5))*pow((Ecur + m) / (Ep + m), 0.5);
result_dev[id] = 0.0;
for (int d = 0; d < 8; d++) {
if (d == 0) { //up-up
//Spin along y
/*si_u = thrust::complex<double>(1.0 / sqrt(2.0), 0.0);
si_d = thrust::complex<double>(0.0, 1.0 / sqrt(2.0));
sf_u = thrust::complex<double>(1.0 / sqrt(2.0), 0.0);
sf_d = thrust::complex<double>(0.0, 1.0 / sqrt(2.0));*/
//Spin along z
si_u = thrust::complex<double>(1.0 , 0.0);
si_d = thrust::complex<double>(0.0, 0.0);
sf_u = thrust::complex<double>(1.0, 0.0);
sf_d = thrust::complex<double>(0.0, 0.0);
polx = polupx;
poly = polupy;
polz = polupz;
}
if (d == 1) { //down-down
//Spin along y
/*si_u = thrust::complex<double>(1.0 / sqrt(2.0), 0.0);
si_d = thrust::complex<double>(0.0, -1.0 / sqrt(2.0));
sf_u = thrust::complex<double>(1.0 / sqrt(2.0), 0.0);
sf_d = thrust::complex<double>(0.0, -1.0 / sqrt(2.0));*/
//Spin along z
si_u = thrust::complex<double>(0.0, 0.0);
si_d = thrust::complex<double>(1.0, 0.0);
sf_u = thrust::complex<double>(0.0, 0.0);
sf_d = thrust::complex<double>(1.0, 0.0);
polx = polupx;
poly = polupy;
polz = polupz;
}
if (d == 2) { //up-down
//Spin along y
/*si_u = thrust::complex<double>(1.0 / sqrt(2.0), 0.0);
si_d = thrust::complex<double>(0.0, 1.0 / sqrt(2.0));
sf_u = thrust::complex<double>(1.0 / sqrt(2.0), 0.0);
sf_d = thrust::complex<double>(0.0, -1.0 / sqrt(2.0));*/
//Spin along z
si_u = thrust::complex<double>(1.0, 0.0);
si_d = thrust::complex<double>(0.0, 0.0);
sf_u = thrust::complex<double>(0.0, 0.0);
sf_d = thrust::complex<double>(1.0, 0.0);
polx = polupx;
poly = polupy;
polz = polupz;
}
if (d == 3) { //down-up
//Spin along y
/*si_u = thrust::complex<double>(1.0 / sqrt(2.0), 0.0);
si_d = thrust::complex<double>(0.0, -1.0 / sqrt(2.0));
sf_u = thrust::complex<double>(1.0 / sqrt(2.0), 0.0);
sf_d = thrust::complex<double>(0.0, 1.0 / sqrt(2.0));*/
//Spin along z
si_u = thrust::complex<double>(0.0, 0.0);
si_d = thrust::complex<double>(1.0, 0.0);
sf_u = thrust::complex<double>(1.0, 0.0);
sf_d = thrust::complex<double>(0.0, 0.0);
polx = polupx;
poly = polupy;
polz = polupz;
}
if (d == 4) { //up-up
//Spin along y
/*si_u = thrust::complex<double>(1.0 / sqrt(2.0), 0.0);
si_d = thrust::complex<double>(0.0, 1.0 / sqrt(2.0));
sf_u = thrust::complex<double>(1.0 / sqrt(2.0), 0.0);
sf_d = thrust::complex<double>(0.0, 1.0 / sqrt(2.0));*/
//Spin along z
si_u = thrust::complex<double>(1.0 , 0.0);
si_d = thrust::complex<double>(0.0, 0.0);
sf_u = thrust::complex<double>(1.0, 0.0);
sf_d = thrust::complex<double>(0.0, 0.0);
polx = poldownx;
poly = poldowny;
polz = poldownz;
}
if (d == 5) { //down-down
//Spin along y
/*si_u = thrust::complex<double>(1.0 / sqrt(2.0), 0.0);
si_d = thrust::complex<double>(0.0, -1.0 / sqrt(2.0));
sf_u = thrust::complex<double>(1.0 / sqrt(2.0), 0.0);
sf_d = thrust::complex<double>(0.0, -1.0 / sqrt(2.0));*/
//Spin along z
si_u = thrust::complex<double>(0.0, 0.0);
si_d = thrust::complex<double>(1.0, 0.0);
sf_u = thrust::complex<double>(0.0, 0.0);
sf_d = thrust::complex<double>(1.0, 0.0);
polx = poldownx;
poly = poldowny;
polz = poldownz;
}
if (d == 6) { //up-down
//Spin along y
/*si_u = thrust::complex<double>(1.0 / sqrt(2.0), 0.0);
si_d = thrust::complex<double>(0.0, 1.0 / sqrt(2.0));
sf_u = thrust::complex<double>(1.0 / sqrt(2.0), 0.0);
sf_d = thrust::complex<double>(0.0, -1.0 / sqrt(2.0));*/
//Spin along z
si_u = thrust::complex<double>(1.0, 0.0);
si_d = thrust::complex<double>(0.0, 0.0);
sf_u = thrust::complex<double>(0.0, 0.0);
sf_d = thrust::complex<double>(1.0, 0.0);
polx = poldownx;
poly = poldowny;
polz = poldownz;
}
if (d == 7) { //down-up
//Spin along y
/*si_u = thrust::complex<double>(1.0 / sqrt(2.0), 0.0);
si_d = thrust::complex<double>(0.0, -1.0 / sqrt(2.0));
sf_u = thrust::complex<double>(1.0 / sqrt(2.0), 0.0);
sf_d = thrust::complex<double>(0.0, 1.0 / sqrt(2.0));*/
//Spin along z
si_u = thrust::complex<double>(0.0, 0.0);
si_d = thrust::complex<double>(1.0, 0.0);
sf_u = thrust::complex<double>(1.0, 0.0);
sf_d = thrust::complex<double>(0.0, 0.0);
polx = poldownx;
poly = poldowny;
polz = poldownz;
}
thrust::complex<double> iunit(0.0, 1.0);
thrust::complex<double> Bx = (D1*resultIx-(D1+D2)*nx*resultJ);
thrust::complex<double> By = (D1*resultIy - (D1 + D2)*ny*resultJ);
thrust::complex<double> Bz = 1.0*(0.0 - (D1 + D2)*resultJ);
thrust::complex<double> Dx = conj(poly) * Bz - conj(polz) * By;
thrust::complex<double> Dy = conj(polz) * Bx - conj(polx) * Bz;
thrust::complex<double> Dz = 1.0*(conj(polx) * By - conj(poly) * Bx);
thrust::complex<double> Ccomp = C1 * (conj(polx)*resultIx + conj(poly) * resultIy);
thrust::complex<double> imagu(0.0, 1.0);
result_dev[id+d* thetayfine*thetaxfine*nfreq] += thrust::norm((conj(sf_u)*si_u + conj(sf_d)*si_d)*Ccomp + conj(sf_u)*(imagu*Dz*si_u + (imagu*Dx + Dy)*si_d) + conj(sf_d)*((imagu*Dx - Dy)*si_u - imagu * Dz*si_d));
result_dev[id + d * thetayfine*thetaxfine*nfreq] = result_dev[id+d* thetayfine*thetaxfine*nfreq] * w*w/(wp*wp);
}
}
hipError_t calculator2(double *consts, double *trajec, double *out, double thetaxmin, double thetaxmax, double thetaymin, double thetaymax) {
hipError_t cudaStatus;
//double *consts = new double[9];
int nDevices;
hipGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth / 8) / 1.0e6);
}
cudaStatus = hipSetDevice(0);
//hipDeviceReset();
//int devID = 0;
//int argc;
//char argv[1000];
//devID =findCudaDevice(argc, (const char **)argv);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
double *const_dev;
double *result_dev;
double *trajec_dev;
cudaStatus = hipMalloc((void**)&const_dev, 9 * sizeof(double));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&result_dev, thetaxfine*thetayfine*nfreq * sizeof(double));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&trajec_dev, 8*pointsprim * sizeof(double));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMemcpy(const_dev, consts, 9 * sizeof(double), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy 1 failed!");
goto Error;
}
cudaStatus = hipMemcpy(trajec_dev, trajec, 8*pointsprim * sizeof(double), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy 2 failed!");
goto Error;
}
myradKernel << <BLOCKS, THREADS >> > (const_dev, trajec_dev, result_dev);
cudaStatus = hipPeekAtLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "myKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching myKernel!\n", cudaStatus);
goto Error;
}
double *result = new double[thetaxfine*thetayfine*nfreq]();
cudaStatus = hipMemcpy(result, result_dev, thetaxfine*thetayfine*nfreq * sizeof(double), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy 1 failed!");
goto Error;
}
for (int i = 0; i < nfreq; i++) {
for (int j = 0; j < thetayfine; j++) {
for (int l = 0; l < thetaxfine; l++) {
int id = i * thetaxfine*thetayfine + j * thetaxfine + l;
out[i] += result[id]*e*e/(4.0*pi*pi)*(thetaxmax-thetaxmin)/(double)thetaxfine*(thetaymax-thetaymin)/(double)thetayfine/tprim;
}
}
}
hipFree(result_dev);
hipFree(trajec_dev);
hipFree(const_dev);
delete[] result;
Error:
return cudaStatus;
} // end calc2
hipError_t calculator3(double *consts, double *trajec, double *out1, double *out2, double *out3, double *out4, double *out5, double *out6, double *out7, double *out8) {
hipError_t cudaStatus;
//double *consts = new double[9];
double thetaxmin = consts[3];
double thetaxmax = consts[4];
double thetaymin = consts[5];
double thetaymax = consts[6];
double *const_dev;
double *result_dev;
double *trajec_dev;
cudaStatus = hipMalloc((void**)&const_dev, 9 * sizeof(double));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&result_dev, 8*thetaxfine*thetayfine*nfreq * sizeof(double));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&trajec_dev, 8 * pointsprim * sizeof(double));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMemcpy(const_dev, consts, 9 * sizeof(double), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy 1 failed!");
goto Error;
}
cudaStatus = hipMemcpy(trajec_dev, trajec, 8 * pointsprim * sizeof(double), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy 2 failed!");
goto Error;
}
myradKernelspin << <BLOCKS, THREADS >> > (const_dev, trajec_dev, result_dev);
cudaStatus = hipPeekAtLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "myKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching myKernel!\n", cudaStatus);
goto Error;
}
double *result = new double[thetaxfine*thetayfine*nfreq*8]();
cudaStatus = hipMemcpy(result, result_dev, 8*thetaxfine*thetayfine*nfreq * sizeof(double), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy 1 failed!");
goto Error;
}
for (int i = 0; i < nfreq; i++) {
for (int j = 0; j < thetayfine; j++) {
for (int l = 0; l < thetaxfine; l++) {
int id = i * thetaxfine*thetayfine + j * thetaxfine + l;
out1[i] += result[id] *(thetaxmax - thetaxmin) / ((double)thetaxfine - 1.0)*(thetaymax - thetaymin) / ((double)thetayfine - 1.0);
out2[i] += result[id+ 1*thetaxfine * thetayfine*nfreq] *(thetaxmax - thetaxmin) / ((double)thetaxfine-1.0)*(thetaymax - thetaymin) / ((double)thetayfine-1.0);
out3[i] += result[id + 2*thetaxfine * thetayfine*nfreq] *(thetaxmax - thetaxmin) / ((double)thetaxfine - 1.0)*(thetaymax - thetaymin) / ((double)thetayfine - 1.0);
out4[i] += result[id + 3 * thetaxfine * thetayfine*nfreq] *(thetaxmax - thetaxmin) / ((double)thetaxfine - 1.0)*(thetaymax - thetaymin) / ((double)thetayfine - 1.0);
out5[i] += result[id + 4 * thetaxfine * thetayfine*nfreq] * (thetaxmax - thetaxmin) / ((double)thetaxfine - 1.0)*(thetaymax - thetaymin) / ((double)thetayfine - 1.0);
out6[i] += result[id + 5 * thetaxfine * thetayfine*nfreq] * (thetaxmax - thetaxmin) / ((double)thetaxfine - 1.0)*(thetaymax - thetaymin) / ((double)thetayfine - 1.0);
out7[i] += result[id + 6 * thetaxfine * thetayfine*nfreq] * (thetaxmax - thetaxmin) / ((double)thetaxfine - 1.0)*(thetaymax - thetaymin) / ((double)thetayfine - 1.0);
out8[i] += result[id + 7 * thetaxfine * thetayfine*nfreq] * (thetaxmax - thetaxmin) / ((double)thetaxfine - 1.0)*(thetaymax - thetaymin) / ((double)thetayfine - 1.0);
}
}
}
hipFree(result_dev);
hipFree(trajec_dev);
hipFree(const_dev);
delete[] result;
Error:
return cudaStatus;
} // end calc3
//
//hipError_t calculator4(double *trajec, double vorthosqavg, double *out, double phimin, double phimax) {
// hipError_t cudaStatus;
// double *consts = new double[10];
//
// consts[0] = m;
// consts[1] = wmin;
// consts[2] = wmax;
// consts[3] = phimin;
// consts[4] = phimax;
// consts[5] = thetamin;
// consts[6] = thetamax;
// consts[7] = E0 / m;
// consts[8] = (double)points1period;
// consts[9] = vorthosqavg;
//
//
//
//
// int nDevices;
// hipGetDeviceCount(&nDevices);
// for (int i = 0; i < nDevices; i++) {
// hipDeviceProp_t prop;
// hipGetDeviceProperties(&prop, i);
// printf("Device Number: %d\n", i);
// printf(" Device name: %s\n", prop.name);
// printf(" Memory Clock Rate (KHz): %d\n",
// prop.memoryClockRate);
// printf(" Memory Bus Width (bits): %d\n",
// prop.memoryBusWidth);
// printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
// 2.0*prop.memoryClockRate*(prop.memoryBusWidth / 8) / 1.0e6);
// }
//
//
// cudaStatus = hipSetDevice(0);
// //hipDeviceReset();
//
////int devID = 0;
////int argc;
////char argv[1000];
////devID =findCudaDevice(argc, (const char **)argv);
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
// goto Error;
// }
//
//
//
// double *const_dev;
// double *result_dev;
// double *trajec_dev;
//
// cudaStatus = hipMalloc((void**)&const_dev, 10 * sizeof(double));
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMalloc failed!");
// goto Error;
// }
//
// cudaStatus = hipMalloc((void**)&result_dev, harmonicsfine*phifine*nfreq * sizeof(double));
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMalloc failed!");
// goto Error;
// }
//
// cudaStatus = hipMalloc((void**)&trajec_dev, 8 * points1period * sizeof(double));
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMalloc failed!");
// goto Error;
// }
//
// cudaStatus = hipMemcpy(const_dev, consts, 10 * sizeof(double), hipMemcpyHostToDevice);
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMemcpy 1 failed!");
// goto Error;
// }
//
// cudaStatus = hipMemcpy(trajec_dev, trajec, 8 * points1period * sizeof(double), hipMemcpyHostToDevice);
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMemcpy 2 failed!");
// goto Error;
// }
//
//
//
// myradKernelspin << <BLOCKS2, THREADS2 >> > (const_dev, trajec_dev, result_dev);
//
// cudaStatus = hipPeekAtLastError();
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "myKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
// goto Error;
// }
//
//
// cudaStatus = hipDeviceSynchronize();
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching myKernel!\n", cudaStatus);
// goto Error;
// }
//
//
// double *result = new double[harmonicsfine*phifine*nfreq]();
//
// cudaStatus = hipMemcpy(result, result_dev, harmonicsfine*phifine*nfreq * sizeof(double), hipMemcpyDeviceToHost);
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMemcpy 1 failed!");
// goto Error;
// }
//
// for (int i = 0; i < nfreq; i++) {
// for (int j = 0; j < harmonicsfine; j++) {
// for (int l = 0; l < phifine; l++) {
// int id = i * harmonicsfine*phifine + j * phifine + l;
// out[i] += result[id] * e*e*(phimax - phimin) / (double)phifine;
// }
// }
// }
//
// hipFree(result_dev);
// hipFree(trajec_dev);
// hipFree(const_dev);
//
// delete[] result;
//
//Error:
// return cudaStatus;
//
//} // end calc2 | fb76f9496eafd472666a72d1939a088c3977d1b5.cu | #define PI 3.14159265358979323846264338327950288419716939937510582
#include <gsl/gsl_errno.h>
#include <gsl/gsl_matrix.h>
#include <gsl/gsl_odeiv2.h>
#include <gsl/gsl_errno.h>
#include <gsl/gsl_spline.h>
#include <gsl/gsl_sf.h>
//#include <cmath>
#include <iostream>
#include <fstream>
//#include <complex>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <vector>
#include <ctime>
#include <chrono>
#include <iomanip>
#include <random>
#include "functions.h"
//#include <cuComplex.h>
#include <thrust/complex.h>
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
#include "constants.h"
#include <cusolverDn.h>
#include <time.h>
#include "math.h"
#include <thrust/device_vector.h>
using namespace std;
__host__ __device__ double mysinc(double x) {
double out = 0.0;
if (abs(x)> 1e-3) {
out = sin(x) / x;
}
else out = 1.0;
return out;
};
__host__ __device__ double myP(double x) {
double out = 0.0;
if (abs(x) > 1e-3) {
out = (1.0-cos(x)) / x;
}
else out = x/2.0;
return out;
};
__device__ thrust::complex<double> Gfunc(double a, double b, double L) {
thrust::complex<double> out(0.0,0.0);
thrust::complex<double> iunit(0.0, 1.0);
if (a == 0.0 && b!=0.0) {
out = (exp(iunit*b*L)*(1.0 - iunit * b*L) - 1.0) / (b*b);
}
else if (b == 0.0 && a != 0.0) {
out = 1.0 / (iunit*a)*( (exp(iunit*a*L)-1.0)/(iunit*a)-L);
}
else if (a == 0.0 && b == 0.0) {
out = L * L / 2.0;
}
else {
out = 1.0 / (iunit*a)*( (exp(iunit*(a+b)*L)-1.0)/(iunit*(a+b))-(exp(iunit*b*L)-1.0)/(iunit*b) );
}
return out;
};
//__device__ double mysinc_dev(double x) {
// double out = 0.0;
// if (x != 0.0) {
// out = sin(x) / x;
// }
//
// else out = 1.0;
// return out;
//};
double lnfac(double x) {
double out=0.0;
if (x!=0.0) {
out=x*log(x)-x+0.5*log(2.0*pi*x)+1.0/(12.0*x)-1.0/(360.0*pow(x,3.0));
}
return out;
};
__device__ void mycross_dev(double *a, double *b, double *c) {
c[1] = a[2] * b[3] - a[3] * b[2];
c[2] = a[3] * b[1] - a[1] * b[3];
c[3] = a[1] * b[2] - a[2] * b[1];
};
__device__ thrust::complex<double> M_dev(double *const_dev ,double* pveci, double* kvec, int ni, int nf, double kbi, double kbf, int n, double* MATi, double* MATf, double* VECi, double* VECf, int si, int sf, int sfot) {
//arma::mat eigveci(MATi, 2 * N + 1, 2 * N + 1, false, true);
double m = const_dev[0];
double k0 = const_dev[1];
double wmin = const_dev[2];
double wmax = const_dev[3];
double phimin = const_dev[4];
double phimax = const_dev[5];
double pxi = const_dev[6];
double kbmin = const_dev[7];
double kbmax = const_dev[8];
double thetamax = const_dev[9];
double e = const_dev[10];
double pi = const_dev[11];
int N = lround(const_dev[14]);
double pxf = pveci[1] - kvec[1];
double pzf = pveci[3] - kvec[3];
double Ei = pow(2.0*pveci[1] *VECi[ni] + pveci[1] * pveci[1] + pveci[3]* pveci[3] + m * m, 0.5); // there seems to be a mistake here? pxi should be pveci[1]
double Ef = pow(2.0*pxf*VECf[nf] + pxf * pxf + pzf* pzf + m * m, 0.5);
/*double *pol1 = new double[4]();
double *pol2 = new double[4]();*/
double pol1[4];
double pol2[4];
/*mycross(kvec, pveci, pol1);
normalize(pol1);
mycross(kvec, pol1, pol2);
normalize(pol2);*/
double thetasq = (kvec[2]* kvec[2] + kvec[3]* kvec[3]) / (kvec[0]* kvec[0]);
double theta = sqrt(thetasq);
double cphi = kvec[2] / (kvec[0] * theta);
double sphi = kvec[3] / (kvec[0] * theta);
pol1[2] = sphi;
pol1[3] = -cphi;
pol2[1] = -theta;
pol2[2] = cphi;
pol2[3] = sphi;
double *pol;
/*if (sfot == 1) {
pol = pol1;
}
else {
pol = pol2;
}*/
double *pols[2];
pols[0] = pol1;
pols[1] = pol2;
pol = pols[sfot];
/*double *Pi = new double[4]();
double *Pf = new double[4]();*/
double Pi[4];
double Pf[4];
Pi[0] = 0.0;
Pi[1] = 0.0;
Pi[2] = 0.0;
Pi[3] = 0.0;
Pf[0] = 0.0;
Pf[1] = 0.0;
Pf[2] = 0.0;
Pf[3] = 0.0;
double prodsum = 0.0;
int mimin, mimax;
if (n >= 0) {
mimin = -N;
mimax = N - n;
}
else {
mimin = -N - n;
mimax = N;
}
for (int mi = mimin; mi <= mimax; mi++) {
int mf = mi + n;
//double cmi = eigveci(mi + N, ni);
//double cmf = eigvecf(mf + N, nf);
double cmi = MATi[mi + N + (2 * N + 1)*ni];
double cmf = MATf[mf + N + (2 * N + 1)*nf];
double py_i = k0 * (double)mi + kbi;
double py_f = k0 * (double)mf + kbf;
double px_itemp = pveci[1] + VECi[ni] - py_i * py_i / (2.0*Ei);
double px_ftemp = pxf + VECf[nf] - py_f * py_f / (2.0*Ef);
Pi[1] += cmi * cmf*px_itemp;
Pi[2] += cmi * cmf*py_i;
prodsum += cmi * cmf;
Pf[1] += cmi * cmf* px_ftemp;
Pf[2] += cmi * cmf* py_f;
} // end ns sum
Pi[1] = Pi[1] / (Ei + m);
Pi[2] = Pi[2] / (Ei + m);
Pi[3] = prodsum * pveci[3] / (Ei + m);
Pf[1] = Pf[1] / (Ef + m);
Pf[2] = Pf[2] / (Ef + m);
Pf[3] = prodsum * pzf / (Ef + m);
/*double *A = new double[4]();
double *B = new double[4]();
double *B2 = new double[4]();*/
double A[4];
double B[4];
double B2[4];
A[1] = Pi[1] + Pf[1];
A[2] = Pi[2] + Pf[2];
A[3] = Pi[3] + Pf[3];
B2[1] = Pi[1] - Pf[1];
B2[2] = Pi[2] - Pf[2];
B2[3] = Pi[3] - Pf[3];
mycross_dev(pol, B2, B);
double realcomp = pol[1] * A[1] + pol[2] * A[2] + pol[3] * A[3];
//delete[] pol1;
//delete[] pol2;
thrust::complex<double> out(0.0,0.0);
if (si == 1 && sf == 1) out = thrust::complex<double>(realcomp, B[3]); // up-up case
else if (si == 1 && sf == 0) out = thrust::complex<double>(-B[2], B[1]); // up-down
else if (si == 0 && sf == 0) out = thrust::complex<double>(realcomp, -B[3]); // down-down
else if (si == 0 && sf == 1) out = thrust::complex<double>(B[2], B[1]); // down-up
/*if (si == 1 && sf == 1) {
result[0] = realcomp;
result[1] = B[3];
}
else if (si == 1 && sf == 0) {
result[0]= -B[2];
result[1] = B[1];
}
else if (si == 0 && sf == 0) {
result[0] = realcomp;
result[1] = -B[3];
}
else if (si == 0 && sf == 1) {
result[0] = B[2];
result[1] = B[1];
}
*/
/*delete[] B2;
delete[] B;
delete[] A;*/
/*delete[] Pi;
delete[] Pf;*/
//out = thrust::complex<double>(realcomp, B[1] + B[2] + B[3]);
return out;
}
__device__ double mymod_dev(double a, double b) {
double x;
if (a >= 0) x = fmod(a, b);
else x = fmod(a, b) + b;
return x;
};
__global__ void myradKernel(double *const_dev, double *trajec_dev, double *result_dev) {
double m = const_dev[0];
double wmin = const_dev[1];
double wmax = const_dev[2];
double thetaxmin = const_dev[3];
double thetaxmax = const_dev[4];
double thetaymin = const_dev[5];
double thetaymax = const_dev[6];
double gam0 = const_dev[7];
int points = round(const_dev[8]);
thrust::complex<double> resultx(0.0, 0.0);
thrust::complex<double> resulty(0.0, 0.0);
thrust::complex<double> resultJ(0.0, 0.0);
int id = threadIdx.x + blockIdx.x*blockDim.x;
int a, b, c;
a = id % thetaxfine;
b = (id % (thetaxfine*thetayfine) - a)/ thetaxfine;
c = (id - a - b * thetaxfine) / (thetaxfine*thetayfine);
double w = (double)c/(double)nfreq*(wmax - wmin) + wmin;
double thetay = (double)b/(double)thetayfine*(thetaymax - thetaymin) + thetaymin;
double thetax = (double)a/(double)thetaxfine*(thetaxmax - thetaxmin) + thetaxmin;
double nx = thetax;
double ny = thetay;
for (int i = 0; i < points - 1; i++) {
double vx = trajec_dev[1 * points + i];
double vy = trajec_dev[2 * points + i];
double vz = trajec_dev[3 * points + i];
double dt = trajec_dev[i + 1] - trajec_dev[i];
double betaxdot = (trajec_dev[1 * points + i + 1] - trajec_dev[1 * points + i]) / dt;
double betaydot = (trajec_dev[2 * points + i + 1] - trajec_dev[2 * points + i]) / dt;
double betazdot = (trajec_dev[3 * points + i + 1] - trajec_dev[3 * points + i]) / dt;
double t = trajec_dev[i];
double x = trajec_dev[4 * points + i];
double y = trajec_dev[5 * points + i];
double z = trajec_dev[6 * points + i];
double deltagam = trajec_dev[7 * points + i];
double betaortho = sqrt(vx*vx + vy * vy);
// Classical
//(*exponent) = { 0.0,*omega*(-*nx**x - *ny**y - *z + 0.5*(pow(*northo,2) + pow(gam,-2))**t) };
double Ecur = (gam0 + deltagam)*m;
double Ep = Ecur - w;
// Strong field
thrust::complex<double> exponent(0.0, w*Ecur / (Ecur - w)*(-nx * x - ny * y - z + 0.5*(nx*nx+ny*ny + 1.0 / (gam0*gam0))*t));
thrust::complex<double> faktor(1.0 / ((0.5*(1.0 / (gam0*gam0) + nx * nx + ny * ny) - nx * vx - ny * vy - vz)*(0.5*(1.0 / (gam0*gam0) + nx * nx + ny * ny) - nx * vx - ny * vy - vz)), 0.0);
if (w < Ecur) {
thrust::complex<double> v1(ny*(betaydot*(nx - vx) - betaxdot * (ny - vy)) - betaxdot * 0.5*(1.0 / (gam0*gam0) - nx * nx - ny * ny - 2.0*vz) + (nx - vx)*betazdot, 0.0);
thrust::complex<double> v2((ny - vy)*betazdot - betaydot * 0.5*(1.0 / (gam0*gam0) - nx * nx - ny * ny - 2.0*vz) - nx * ((nx - vx)*betaydot - (ny - vy)*betaxdot), 0.0);
double scale = sqrt((Ecur*Ecur + Ep * Ep) / (2.0*Ecur*Ecur)); // Strong field
//scale = { 1.0,0.0 }; // Classical
v1 = v1 * faktor*exp(exponent)*scale;
v2 = v2 * faktor*exp(exponent)*scale;
thrust::complex<double> vJ(nx*(betaxdot)+(ny)*(betaydot)+betazdot, 0.0);
double scaleJ = w * m / (Ecur*Ecur)*0.70710678118;
vJ = vJ * faktor*exp(exponent)*scaleJ;
resultx += v1 * dt;
resulty += v2 * dt;
resultJ += vJ * dt;
}
}
result_dev[id] = (thrust::norm(resultx)+ thrust::norm(resulty)+ thrust::norm(resultJ));
//result_dev[id] = thrust::norm(resultx);
}
__global__ void myradKernelspin(double *const_dev, double *trajec_dev, double *result_dev) {
double m = const_dev[0];
double wmin = const_dev[1];
double wmax = const_dev[2];
double thetaxmin = const_dev[3];
double thetaxmax = const_dev[4];
double thetaymin = const_dev[5];
double thetaymax = const_dev[6];
double gam0 = const_dev[7];
int points = round(const_dev[8]);
double vorthosqavg = const_dev[9];
thrust::complex<double> resultIx(0.0, 0.0);
thrust::complex<double> resultIy(0.0, 0.0);
thrust::complex<double> resultJ(0.0, 0.0);
int id = threadIdx.x + blockIdx.x*blockDim.x;
int a, b, c;
a = id % thetaxfine;
b = (id % (thetaxfine*thetayfine) - a) / thetaxfine;
c = (id - a - b * thetaxfine) / (thetayfine*thetaxfine);
double w = (double)c / (double)nfreq*(wmax - wmin) + wmin;
double thetay = (double)b / ((double)thetayfine-1.0)*(thetaymax - thetaymin) + thetaymin;
double thetax = (double)a / ((double)thetaxfine-1.0)*(thetaxmax - thetaxmin) + thetaxmin;
double w0 = 2.0*PI / (trajec_dev[points - 1] - trajec_dev[0]);
double wp = w * gam0*m / (gam0*m - w);
double Ecur = gam0 * m;
double Ep = Ecur - w;
double thetasq = thetax*thetax+thetay*thetay;
double theta = sqrt(thetasq);
double nx = thetax;
double ny = thetay;
for (int i = 0; i < points - 1; i++) {
double vx = trajec_dev[1 * points + i];
double vy = trajec_dev[2 * points + i];
double vz = trajec_dev[3 * points + i];
double dt = trajec_dev[i + 1] - trajec_dev[i];
double betaxdot = (trajec_dev[1 * points + i + 1] - trajec_dev[1 * points + i]) / dt;
double betaydot = (trajec_dev[2 * points + i + 1] - trajec_dev[2 * points + i]) / dt;
double betazdot = (trajec_dev[3 * points + i + 1] - trajec_dev[3 * points + i]) / dt;
double t = trajec_dev[i];
double x = trajec_dev[4 * points + i];
double y = trajec_dev[5 * points + i];
double z = trajec_dev[6 * points + i];
double deltagam = trajec_dev[7 * points + i];
double betaortho = sqrt(vx*vx + vy * vy);
// Classical
//(*exponent) = { 0.0,*omega*(-*nx**x - *ny**y - *z + 0.5*(pow(*northo,2) + pow(gam,-2))**t) };
double Ecur = (gam0 + deltagam)*m;
double Ep = Ecur - w;
// Strong field
thrust::complex<double> exponent(0.0, w*Ecur / (Ecur - w)*(-nx * x - ny * y - z + 0.5*(nx*nx + ny * ny + 1.0 / (gam0*gam0))*t));
thrust::complex<double> faktor(1.0 / ((0.5*(1.0 / (gam0*gam0) + nx * nx + ny * ny) - nx * vx - ny * vy - vz)*(0.5*(1.0 / (gam0*gam0) + nx * nx + ny * ny) - nx * vx - ny * vy - vz)), 0.0);
if (w < Ecur) {
thrust::complex<double> v1(ny*(betaydot*(nx - vx) - betaxdot * (ny - vy)) - betaxdot * 0.5*(1.0 / (gam0*gam0) - nx * nx - ny * ny - 2.0*vz) + (nx - vx)*betazdot, 0.0);
thrust::complex<double> v2((ny - vy)*betazdot - betaydot * 0.5*(1.0 / (gam0*gam0) - nx * nx - ny * ny - 2.0*vz) - nx * ((nx - vx)*betaydot - (ny - vy)*betaxdot), 0.0);
//double scale = sqrt((Ecur*Ecur + Ep * Ep) / (2.0*Ecur*Ecur)); // Strong field
//scale = { 1.0,0.0 }; // Classical
v1 = v1 * faktor*exp(exponent);
v2 = v2 * faktor*exp(exponent);
thrust::complex<double> vJ(nx*(betaxdot)+(ny)*(betaydot)+betazdot, 0.0);
//double scaleJ = w * m / (Ecur*Ecur)*0.70710678118;
vJ = vJ * faktor*exp(exponent);
resultIx += v1 * dt;
resultIy += v2 * dt;
resultJ += vJ * dt;
}
}
thrust::complex<double> si_u;
thrust::complex<double> si_d;
thrust::complex<double> sf_u;
thrust::complex<double> sf_d;
thrust::complex<double> polupx;
thrust::complex<double> polupy;
thrust::complex<double> polupz;
thrust::complex<double> poldownx;
thrust::complex<double> poldowny;
thrust::complex<double> poldownz;
thrust::complex<double> polx;
thrust::complex<double> poly;
thrust::complex<double> polz;
//circular
polupx = pow(2.0,-0.5)*thrust::complex<double>(1.0,0.0); // Left handed i.e. positive helicity
polupy = pow(2.0, -0.5)*thrust::complex<double>(0.0, 1.0);
polupz = pow(2.0, -0.5)*thrust::complex<double>(-nx, -ny);
poldownx = pow(2.0, -0.5)*thrust::complex<double>(1.0, 0.0);
poldowny = pow(2.0, -0.5)*thrust::complex<double>(0.0, -1.0);
poldownz = pow(2.0, -0.5)*thrust::complex<double>(-nx, ny);
//linear
/*polupx = thrust::complex<double>(1.0, 0.0);
polupy = thrust::complex<double>(0.0, 0.0);
polupz = thrust::complex<double>(-nx, 0.0);
poldownx = thrust::complex<double>(0.0, 0.0);
poldowny = thrust::complex<double>(1.0, 0.0);
poldownz = thrust::complex<double>(-ny, 0.0);*/
double C1 = Ecur / (2.0*pow(Ecur*Ep, 0.5))*(pow((Ep+m)/(Ecur+m),0.5) + pow((Ecur + m) / (Ep + m), 0.5));
double D1 = Ecur / (2.0*pow(Ecur*Ep, 0.5))*(pow((Ep + m) / (Ecur + m), 0.5) - pow((Ecur + m) / (Ep + m), 0.5));
double D2 = w / (2.0*pow(Ecur*Ep, 0.5))*pow((Ecur + m) / (Ep + m), 0.5);
result_dev[id] = 0.0;
for (int d = 0; d < 8; d++) {
if (d == 0) { //up-up
//Spin along y
/*si_u = thrust::complex<double>(1.0 / sqrt(2.0), 0.0);
si_d = thrust::complex<double>(0.0, 1.0 / sqrt(2.0));
sf_u = thrust::complex<double>(1.0 / sqrt(2.0), 0.0);
sf_d = thrust::complex<double>(0.0, 1.0 / sqrt(2.0));*/
//Spin along z
si_u = thrust::complex<double>(1.0 , 0.0);
si_d = thrust::complex<double>(0.0, 0.0);
sf_u = thrust::complex<double>(1.0, 0.0);
sf_d = thrust::complex<double>(0.0, 0.0);
polx = polupx;
poly = polupy;
polz = polupz;
}
if (d == 1) { //down-down
//Spin along y
/*si_u = thrust::complex<double>(1.0 / sqrt(2.0), 0.0);
si_d = thrust::complex<double>(0.0, -1.0 / sqrt(2.0));
sf_u = thrust::complex<double>(1.0 / sqrt(2.0), 0.0);
sf_d = thrust::complex<double>(0.0, -1.0 / sqrt(2.0));*/
//Spin along z
si_u = thrust::complex<double>(0.0, 0.0);
si_d = thrust::complex<double>(1.0, 0.0);
sf_u = thrust::complex<double>(0.0, 0.0);
sf_d = thrust::complex<double>(1.0, 0.0);
polx = polupx;
poly = polupy;
polz = polupz;
}
if (d == 2) { //up-down
//Spin along y
/*si_u = thrust::complex<double>(1.0 / sqrt(2.0), 0.0);
si_d = thrust::complex<double>(0.0, 1.0 / sqrt(2.0));
sf_u = thrust::complex<double>(1.0 / sqrt(2.0), 0.0);
sf_d = thrust::complex<double>(0.0, -1.0 / sqrt(2.0));*/
//Spin along z
si_u = thrust::complex<double>(1.0, 0.0);
si_d = thrust::complex<double>(0.0, 0.0);
sf_u = thrust::complex<double>(0.0, 0.0);
sf_d = thrust::complex<double>(1.0, 0.0);
polx = polupx;
poly = polupy;
polz = polupz;
}
if (d == 3) { //down-up
//Spin along y
/*si_u = thrust::complex<double>(1.0 / sqrt(2.0), 0.0);
si_d = thrust::complex<double>(0.0, -1.0 / sqrt(2.0));
sf_u = thrust::complex<double>(1.0 / sqrt(2.0), 0.0);
sf_d = thrust::complex<double>(0.0, 1.0 / sqrt(2.0));*/
//Spin along z
si_u = thrust::complex<double>(0.0, 0.0);
si_d = thrust::complex<double>(1.0, 0.0);
sf_u = thrust::complex<double>(1.0, 0.0);
sf_d = thrust::complex<double>(0.0, 0.0);
polx = polupx;
poly = polupy;
polz = polupz;
}
if (d == 4) { //up-up
//Spin along y
/*si_u = thrust::complex<double>(1.0 / sqrt(2.0), 0.0);
si_d = thrust::complex<double>(0.0, 1.0 / sqrt(2.0));
sf_u = thrust::complex<double>(1.0 / sqrt(2.0), 0.0);
sf_d = thrust::complex<double>(0.0, 1.0 / sqrt(2.0));*/
//Spin along z
si_u = thrust::complex<double>(1.0 , 0.0);
si_d = thrust::complex<double>(0.0, 0.0);
sf_u = thrust::complex<double>(1.0, 0.0);
sf_d = thrust::complex<double>(0.0, 0.0);
polx = poldownx;
poly = poldowny;
polz = poldownz;
}
if (d == 5) { //down-down
//Spin along y
/*si_u = thrust::complex<double>(1.0 / sqrt(2.0), 0.0);
si_d = thrust::complex<double>(0.0, -1.0 / sqrt(2.0));
sf_u = thrust::complex<double>(1.0 / sqrt(2.0), 0.0);
sf_d = thrust::complex<double>(0.0, -1.0 / sqrt(2.0));*/
//Spin along z
si_u = thrust::complex<double>(0.0, 0.0);
si_d = thrust::complex<double>(1.0, 0.0);
sf_u = thrust::complex<double>(0.0, 0.0);
sf_d = thrust::complex<double>(1.0, 0.0);
polx = poldownx;
poly = poldowny;
polz = poldownz;
}
if (d == 6) { //up-down
//Spin along y
/*si_u = thrust::complex<double>(1.0 / sqrt(2.0), 0.0);
si_d = thrust::complex<double>(0.0, 1.0 / sqrt(2.0));
sf_u = thrust::complex<double>(1.0 / sqrt(2.0), 0.0);
sf_d = thrust::complex<double>(0.0, -1.0 / sqrt(2.0));*/
//Spin along z
si_u = thrust::complex<double>(1.0, 0.0);
si_d = thrust::complex<double>(0.0, 0.0);
sf_u = thrust::complex<double>(0.0, 0.0);
sf_d = thrust::complex<double>(1.0, 0.0);
polx = poldownx;
poly = poldowny;
polz = poldownz;
}
if (d == 7) { //down-up
//Spin along y
/*si_u = thrust::complex<double>(1.0 / sqrt(2.0), 0.0);
si_d = thrust::complex<double>(0.0, -1.0 / sqrt(2.0));
sf_u = thrust::complex<double>(1.0 / sqrt(2.0), 0.0);
sf_d = thrust::complex<double>(0.0, 1.0 / sqrt(2.0));*/
//Spin along z
si_u = thrust::complex<double>(0.0, 0.0);
si_d = thrust::complex<double>(1.0, 0.0);
sf_u = thrust::complex<double>(1.0, 0.0);
sf_d = thrust::complex<double>(0.0, 0.0);
polx = poldownx;
poly = poldowny;
polz = poldownz;
}
thrust::complex<double> iunit(0.0, 1.0);
thrust::complex<double> Bx = (D1*resultIx-(D1+D2)*nx*resultJ);
thrust::complex<double> By = (D1*resultIy - (D1 + D2)*ny*resultJ);
thrust::complex<double> Bz = 1.0*(0.0 - (D1 + D2)*resultJ);
thrust::complex<double> Dx = conj(poly) * Bz - conj(polz) * By;
thrust::complex<double> Dy = conj(polz) * Bx - conj(polx) * Bz;
thrust::complex<double> Dz = 1.0*(conj(polx) * By - conj(poly) * Bx);
thrust::complex<double> Ccomp = C1 * (conj(polx)*resultIx + conj(poly) * resultIy);
thrust::complex<double> imagu(0.0, 1.0);
result_dev[id+d* thetayfine*thetaxfine*nfreq] += thrust::norm((conj(sf_u)*si_u + conj(sf_d)*si_d)*Ccomp + conj(sf_u)*(imagu*Dz*si_u + (imagu*Dx + Dy)*si_d) + conj(sf_d)*((imagu*Dx - Dy)*si_u - imagu * Dz*si_d));
result_dev[id + d * thetayfine*thetaxfine*nfreq] = result_dev[id+d* thetayfine*thetaxfine*nfreq] * w*w/(wp*wp);
}
}
cudaError_t calculator2(double *consts, double *trajec, double *out, double thetaxmin, double thetaxmax, double thetaymin, double thetaymax) {
cudaError_t cudaStatus;
//double *consts = new double[9];
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth / 8) / 1.0e6);
}
cudaStatus = cudaSetDevice(0);
//cudaDeviceReset();
//int devID = 0;
//int argc;
//char argv[1000];
//devID =findCudaDevice(argc, (const char **)argv);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
double *const_dev;
double *result_dev;
double *trajec_dev;
cudaStatus = cudaMalloc((void**)&const_dev, 9 * sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&result_dev, thetaxfine*thetayfine*nfreq * sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&trajec_dev, 8*pointsprim * sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMemcpy(const_dev, consts, 9 * sizeof(double), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy 1 failed!");
goto Error;
}
cudaStatus = cudaMemcpy(trajec_dev, trajec, 8*pointsprim * sizeof(double), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy 2 failed!");
goto Error;
}
myradKernel << <BLOCKS, THREADS >> > (const_dev, trajec_dev, result_dev);
cudaStatus = cudaPeekAtLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "myKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching myKernel!\n", cudaStatus);
goto Error;
}
double *result = new double[thetaxfine*thetayfine*nfreq]();
cudaStatus = cudaMemcpy(result, result_dev, thetaxfine*thetayfine*nfreq * sizeof(double), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy 1 failed!");
goto Error;
}
for (int i = 0; i < nfreq; i++) {
for (int j = 0; j < thetayfine; j++) {
for (int l = 0; l < thetaxfine; l++) {
int id = i * thetaxfine*thetayfine + j * thetaxfine + l;
out[i] += result[id]*e*e/(4.0*pi*pi)*(thetaxmax-thetaxmin)/(double)thetaxfine*(thetaymax-thetaymin)/(double)thetayfine/tprim;
}
}
}
cudaFree(result_dev);
cudaFree(trajec_dev);
cudaFree(const_dev);
delete[] result;
Error:
return cudaStatus;
} // end calc2
cudaError_t calculator3(double *consts, double *trajec, double *out1, double *out2, double *out3, double *out4, double *out5, double *out6, double *out7, double *out8) {
cudaError_t cudaStatus;
//double *consts = new double[9];
double thetaxmin = consts[3];
double thetaxmax = consts[4];
double thetaymin = consts[5];
double thetaymax = consts[6];
double *const_dev;
double *result_dev;
double *trajec_dev;
cudaStatus = cudaMalloc((void**)&const_dev, 9 * sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&result_dev, 8*thetaxfine*thetayfine*nfreq * sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&trajec_dev, 8 * pointsprim * sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMemcpy(const_dev, consts, 9 * sizeof(double), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy 1 failed!");
goto Error;
}
cudaStatus = cudaMemcpy(trajec_dev, trajec, 8 * pointsprim * sizeof(double), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy 2 failed!");
goto Error;
}
myradKernelspin << <BLOCKS, THREADS >> > (const_dev, trajec_dev, result_dev);
cudaStatus = cudaPeekAtLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "myKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching myKernel!\n", cudaStatus);
goto Error;
}
double *result = new double[thetaxfine*thetayfine*nfreq*8]();
cudaStatus = cudaMemcpy(result, result_dev, 8*thetaxfine*thetayfine*nfreq * sizeof(double), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy 1 failed!");
goto Error;
}
for (int i = 0; i < nfreq; i++) {
for (int j = 0; j < thetayfine; j++) {
for (int l = 0; l < thetaxfine; l++) {
int id = i * thetaxfine*thetayfine + j * thetaxfine + l;
out1[i] += result[id] *(thetaxmax - thetaxmin) / ((double)thetaxfine - 1.0)*(thetaymax - thetaymin) / ((double)thetayfine - 1.0);
out2[i] += result[id+ 1*thetaxfine * thetayfine*nfreq] *(thetaxmax - thetaxmin) / ((double)thetaxfine-1.0)*(thetaymax - thetaymin) / ((double)thetayfine-1.0);
out3[i] += result[id + 2*thetaxfine * thetayfine*nfreq] *(thetaxmax - thetaxmin) / ((double)thetaxfine - 1.0)*(thetaymax - thetaymin) / ((double)thetayfine - 1.0);
out4[i] += result[id + 3 * thetaxfine * thetayfine*nfreq] *(thetaxmax - thetaxmin) / ((double)thetaxfine - 1.0)*(thetaymax - thetaymin) / ((double)thetayfine - 1.0);
out5[i] += result[id + 4 * thetaxfine * thetayfine*nfreq] * (thetaxmax - thetaxmin) / ((double)thetaxfine - 1.0)*(thetaymax - thetaymin) / ((double)thetayfine - 1.0);
out6[i] += result[id + 5 * thetaxfine * thetayfine*nfreq] * (thetaxmax - thetaxmin) / ((double)thetaxfine - 1.0)*(thetaymax - thetaymin) / ((double)thetayfine - 1.0);
out7[i] += result[id + 6 * thetaxfine * thetayfine*nfreq] * (thetaxmax - thetaxmin) / ((double)thetaxfine - 1.0)*(thetaymax - thetaymin) / ((double)thetayfine - 1.0);
out8[i] += result[id + 7 * thetaxfine * thetayfine*nfreq] * (thetaxmax - thetaxmin) / ((double)thetaxfine - 1.0)*(thetaymax - thetaymin) / ((double)thetayfine - 1.0);
}
}
}
cudaFree(result_dev);
cudaFree(trajec_dev);
cudaFree(const_dev);
delete[] result;
Error:
return cudaStatus;
} // end calc3
//
//cudaError_t calculator4(double *trajec, double vorthosqavg, double *out, double phimin, double phimax) {
// cudaError_t cudaStatus;
// double *consts = new double[10];
//
// consts[0] = m;
// consts[1] = wmin;
// consts[2] = wmax;
// consts[3] = phimin;
// consts[4] = phimax;
// consts[5] = thetamin;
// consts[6] = thetamax;
// consts[7] = E0 / m;
// consts[8] = (double)points1period;
// consts[9] = vorthosqavg;
//
//
//
//
// int nDevices;
// cudaGetDeviceCount(&nDevices);
// for (int i = 0; i < nDevices; i++) {
// cudaDeviceProp prop;
// cudaGetDeviceProperties(&prop, i);
// printf("Device Number: %d\n", i);
// printf(" Device name: %s\n", prop.name);
// printf(" Memory Clock Rate (KHz): %d\n",
// prop.memoryClockRate);
// printf(" Memory Bus Width (bits): %d\n",
// prop.memoryBusWidth);
// printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
// 2.0*prop.memoryClockRate*(prop.memoryBusWidth / 8) / 1.0e6);
// }
//
//
// cudaStatus = cudaSetDevice(0);
// //cudaDeviceReset();
//
////int devID = 0;
////int argc;
////char argv[1000];
////devID =findCudaDevice(argc, (const char **)argv);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
// goto Error;
// }
//
//
//
// double *const_dev;
// double *result_dev;
// double *trajec_dev;
//
// cudaStatus = cudaMalloc((void**)&const_dev, 10 * sizeof(double));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error;
// }
//
// cudaStatus = cudaMalloc((void**)&result_dev, harmonicsfine*phifine*nfreq * sizeof(double));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error;
// }
//
// cudaStatus = cudaMalloc((void**)&trajec_dev, 8 * points1period * sizeof(double));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error;
// }
//
// cudaStatus = cudaMemcpy(const_dev, consts, 10 * sizeof(double), cudaMemcpyHostToDevice);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMemcpy 1 failed!");
// goto Error;
// }
//
// cudaStatus = cudaMemcpy(trajec_dev, trajec, 8 * points1period * sizeof(double), cudaMemcpyHostToDevice);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMemcpy 2 failed!");
// goto Error;
// }
//
//
//
// myradKernelspin << <BLOCKS2, THREADS2 >> > (const_dev, trajec_dev, result_dev);
//
// cudaStatus = cudaPeekAtLastError();
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "myKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
// goto Error;
// }
//
//
// cudaStatus = cudaDeviceSynchronize();
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching myKernel!\n", cudaStatus);
// goto Error;
// }
//
//
// double *result = new double[harmonicsfine*phifine*nfreq]();
//
// cudaStatus = cudaMemcpy(result, result_dev, harmonicsfine*phifine*nfreq * sizeof(double), cudaMemcpyDeviceToHost);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMemcpy 1 failed!");
// goto Error;
// }
//
// for (int i = 0; i < nfreq; i++) {
// for (int j = 0; j < harmonicsfine; j++) {
// for (int l = 0; l < phifine; l++) {
// int id = i * harmonicsfine*phifine + j * phifine + l;
// out[i] += result[id] * e*e*(phimax - phimin) / (double)phifine;
// }
// }
// }
//
// cudaFree(result_dev);
// cudaFree(trajec_dev);
// cudaFree(const_dev);
//
// delete[] result;
//
//Error:
// return cudaStatus;
//
//} // end calc2 |
c4977ff84651c95547b6a6fd8f84a03aff43d5ae.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
hipError_t err = hipGetLastError();
if (hipSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Convert an inclusice scan result to an exclusive scan result
*
*/
__global__ void inclusiveToExclusiveScanResult(int n, int* odata, const int* idata) {
int tid = threadIdx.x + (blockIdx.x * blockDim.x);
if (tid >= n) {
return;
}
if (tid == 0) {
odata[0] = 0;
return;
}
odata[tid] = idata[tid - 1];
}
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= n) {
return;
}
bools[tid] = (bool)idata[tid];
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
// TODO
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= n) {
return;
}
if (bools[tid] == 1) {
odata[indices[tid]] = idata[tid];
}
}
}
}
| c4977ff84651c95547b6a6fd8f84a03aff43d5ae.cu | #include "common.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Convert an inclusice scan result to an exclusive scan result
*
*/
__global__ void inclusiveToExclusiveScanResult(int n, int* odata, const int* idata) {
int tid = threadIdx.x + (blockIdx.x * blockDim.x);
if (tid >= n) {
return;
}
if (tid == 0) {
odata[0] = 0;
return;
}
odata[tid] = idata[tid - 1];
}
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= n) {
return;
}
bools[tid] = (bool)idata[tid];
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
// TODO
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= n) {
return;
}
if (bools[tid] == 1) {
odata[indices[tid]] = idata[tid];
}
}
}
}
|
16aa25b159bb95778472eb2b654bbf08db82cade.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// MP 5 Scan
// Given a list (lst) of length n
// Output its prefix sum = {lst[0], lst[0] + lst[1], lst[0] + lst[1] + ... + lst[n-1]}
// Due Tuesday, January 22, 2013 at 11:59 p.m. PST
#include <wb.h>
#define BLOCK_SIZE 512 //@@ You can change this
#define wbCheck(stmt) do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
return -1; \
} \
} while(0)
__global__ void scan(float * input, float * output, int len) {
//@@ Modify the body of this function to complete the functionality of
//@@ the scan on the device
//@@ You may need multiple kernel calls; write your kernels before this
//@@ function and call them from here
}
int main(int argc, char ** argv) {
wbArg_t args;
float * hostInput; // The input 1D list
float * hostOutput; // The output list
float * deviceInput;
float * deviceOutput;
int numElements; // number of elements in the list
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput = (float *) wbImport(wbArg_getInputFile(args, 0), &numElements);
hostOutput = (float*) malloc(numElements * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The number of input elements in the input is ", numElements);
wbTime_start(GPU, "Allocating GPU memory.");
wbCheck(hipMalloc((void**)&deviceInput, numElements*sizeof(float)));
wbCheck(hipMalloc((void**)&deviceOutput, numElements*sizeof(float)));
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Clearing output memory.");
wbCheck(hipMemset(deviceOutput, 0, numElements*sizeof(float)));
wbTime_stop(GPU, "Clearing output memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
wbCheck(hipMemcpy(deviceInput, hostInput, numElements*sizeof(float), hipMemcpyHostToDevice));
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
wbTime_start(Compute, "Performing CUDA computation");
//@@ Modify this to complete the functionality of the scan
//@@ on the deivce
hipDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
wbCheck(hipMemcpy(hostOutput, deviceOutput, numElements*sizeof(float), hipMemcpyDeviceToHost));
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
hipFree(deviceInput);
hipFree(deviceOutput);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostOutput, numElements);
free(hostInput);
free(hostOutput);
return 0;
}
| 16aa25b159bb95778472eb2b654bbf08db82cade.cu | // MP 5 Scan
// Given a list (lst) of length n
// Output its prefix sum = {lst[0], lst[0] + lst[1], lst[0] + lst[1] + ... + lst[n-1]}
// Due Tuesday, January 22, 2013 at 11:59 p.m. PST
#include <wb.h>
#define BLOCK_SIZE 512 //@@ You can change this
#define wbCheck(stmt) do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
return -1; \
} \
} while(0)
__global__ void scan(float * input, float * output, int len) {
//@@ Modify the body of this function to complete the functionality of
//@@ the scan on the device
//@@ You may need multiple kernel calls; write your kernels before this
//@@ function and call them from here
}
int main(int argc, char ** argv) {
wbArg_t args;
float * hostInput; // The input 1D list
float * hostOutput; // The output list
float * deviceInput;
float * deviceOutput;
int numElements; // number of elements in the list
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput = (float *) wbImport(wbArg_getInputFile(args, 0), &numElements);
hostOutput = (float*) malloc(numElements * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The number of input elements in the input is ", numElements);
wbTime_start(GPU, "Allocating GPU memory.");
wbCheck(cudaMalloc((void**)&deviceInput, numElements*sizeof(float)));
wbCheck(cudaMalloc((void**)&deviceOutput, numElements*sizeof(float)));
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Clearing output memory.");
wbCheck(cudaMemset(deviceOutput, 0, numElements*sizeof(float)));
wbTime_stop(GPU, "Clearing output memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
wbCheck(cudaMemcpy(deviceInput, hostInput, numElements*sizeof(float), cudaMemcpyHostToDevice));
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
wbTime_start(Compute, "Performing CUDA computation");
//@@ Modify this to complete the functionality of the scan
//@@ on the deivce
cudaDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
wbCheck(cudaMemcpy(hostOutput, deviceOutput, numElements*sizeof(float), cudaMemcpyDeviceToHost));
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
cudaFree(deviceInput);
cudaFree(deviceOutput);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostOutput, numElements);
free(hostInput);
free(hostOutput);
return 0;
}
|
c1e0fa5eef8b1be00d2efd2f58f99bfc0ad3fe32.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
////for debug purposes
//#define PRINT_RESIDUALS_SPARSE
//#define PRINT_RESIDUALS_DENSE
#define ENABLE_EARLY_OUT
#include "GlobalDefines.h"
#include "SolverBundlingParameters.h"
#include "SolverBundlingState.h"
#include "SolverBundlingUtil.h"
#include "SolverBundlingEquations.h"
#include "SolverBundlingEquationsLie.h"
#include "SolverBundlingDenseUtil.h"
#include "../../SiftGPU/CUDATimer.h"
#include <conio.h>
#define THREADS_PER_BLOCK_DENSE_DEPTH 128
#define THREADS_PER_BLOCK_DENSE_DEPTH_FLIP 64
#define THREADS_PER_BLOCK_DENSE_OVERLAP 512
/////////////////////////////////////////////////////////////////////////
// Dense Depth Term
/////////////////////////////////////////////////////////////////////////
template<bool usePairwise>
__global__ void FindImageImageCorr_Kernel(SolverInput input, SolverState state, SolverParameters parameters)
{
// image indices
unsigned int i, j; // project from j to i
if (usePairwise) {
i = blockIdx.x; j = blockIdx.y; // all pairwise
if (i >= j) return;
}
else {
i = blockIdx.x; j = i + 1; // frame-to-frame
}
if (input.d_validImages[i] == 0 || input.d_validImages[j] == 0) return;
const unsigned int tidx = threadIdx.x;
const unsigned int subWidth = input.denseDepthWidth / parameters.denseOverlapCheckSubsampleFactor;
const unsigned int x = (tidx % subWidth) * parameters.denseOverlapCheckSubsampleFactor;
const unsigned int y = (tidx / subWidth) * parameters.denseOverlapCheckSubsampleFactor;
const unsigned int idx = y * input.denseDepthWidth + x;
if (idx < (input.denseDepthWidth * input.denseDepthHeight)) {
#ifdef USE_LIE_SPACE
float4x4 transform = state.d_xTransformInverses[i] * state.d_xTransforms[j];
#else
float4x4 transform_i = evalRtMat(state.d_xRot[i], state.d_xTrans[i]);
float4x4 transform_j = evalRtMat(state.d_xRot[j], state.d_xTrans[j]);
float4x4 invTransform_i = transform_i.getInverse(); //TODO PRECOMPUTE?
float4x4 transform = invTransform_i * transform_j;
#endif
//if (!computeAngleDiff(transform, 1.0f)) return; //~60 degrees //TODO HERE ANGIE
//if (!computeAngleDiff(transform, 0.8f)) return; //~45 degrees
if (!computeAngleDiff(transform, 0.52f)) return; //~30 degrees
// find correspondence
__shared__ int foundCorr[1]; foundCorr[0] = 0;
__syncthreads();
if (findDenseCorr(idx, input.denseDepthWidth, input.denseDepthHeight,
parameters.denseDistThresh, transform, input.intrinsics,
input.d_cacheFrames[i].d_depthDownsampled, input.d_cacheFrames[j].d_depthDownsampled,
parameters.denseDepthMin, parameters.denseDepthMax)) { //i tgt, j src //TODO PARAMS
atomicAdd(foundCorr, 1);
} // found correspondence
__syncthreads();
if (tidx == 0) {
if (foundCorr[0] > 10) { //TODO PARAMS
int addr = atomicAdd(state.d_numDenseOverlappingImages, 1);
state.d_denseOverlappingImages[addr] = make_uint2(i, j);
}
}
} // valid image pixel
}
__global__ void FlipJtJ_Kernel(unsigned int total, unsigned int dim, float* d_JtJ)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < total) {
const unsigned int x = idx % dim;
const unsigned int y = idx / dim;
if (x > y) {
d_JtJ[y * dim + x] = d_JtJ[x * dim + y];
}
}
}
__global__ void FindDenseCorrespondences_Kernel(SolverInput input, SolverState state, SolverParameters parameters)
{
const int imPairIdx = blockIdx.x; //should not go out of bounds, no need to check
uint2 imageIndices = state.d_denseOverlappingImages[imPairIdx];
unsigned int i = imageIndices.x; unsigned int j = imageIndices.y;
const unsigned int tidx = threadIdx.x;
const unsigned int gidx = tidx * gridDim.y + blockIdx.y;
if (gidx < (input.denseDepthWidth * input.denseDepthHeight)) {
#ifdef USE_LIE_SPACE
float4x4 transform = state.d_xTransformInverses[i] * state.d_xTransforms[j]; //invTransform_i * transform_j
#else
float4x4 transform_i = evalRtMat(state.d_xRot[i], state.d_xTrans[i]);
float4x4 transform_j = evalRtMat(state.d_xRot[j], state.d_xTrans[j]);
float4x4 invTransform_i = transform_i.getInverse();
float4x4 transform = invTransform_i * transform_j;
#endif
// find correspondence
const int numWarps = THREADS_PER_BLOCK_DENSE_DEPTH / WARP_SIZE;
__shared__ int s_count[numWarps];
s_count[0] = 0;
int count = 0.0f;
//TODO HERE ANGIE
#ifdef CUDACACHE_UCHAR_NORMALS
if (findDenseCorr(gidx, input.denseDepthWidth, input.denseDepthHeight,
parameters.denseDistThresh, parameters.denseNormalThresh, transform, input.intrinsics,
input.d_cacheFrames[i].d_depthDownsampled, input.d_cacheFrames[i].d_normalsDownsampledUCHAR4,
input.d_cacheFrames[j].d_depthDownsampled, input.d_cacheFrames[j].d_normalsDownsampledUCHAR4,
parameters.denseDepthMin, parameters.denseDepthMax)) { //i tgt, j src
#elif defined(CUDACACHE_FLOAT_NORMALS)
if (findDenseCorr(gidx, input.denseDepthWidth, input.denseDepthHeight,
parameters.denseDistThresh, parameters.denseNormalThresh, transform, input.intrinsics,
input.d_cacheFrames[i].d_depthDownsampled, input.d_cacheFrames[i].d_normalsDownsampled,
input.d_cacheFrames[j].d_depthDownsampled, input.d_cacheFrames[j].d_normalsDownsampled,
parameters.denseDepthMin, parameters.denseDepthMax)) { //i tgt, j src
#endif
//#ifdef CUDACACHE_UCHAR_NORMALS
// if (findDenseCorr(gidx, input.denseDepthWidth, input.denseDepthHeight,
// parameters.denseDistThresh, parameters.denseNormalThresh, transform, input.intrinsics,
// input.d_cacheFrames[i].d_depthDownsampled, input.d_cacheFrames[i].d_normalsDownsampledUCHAR4,
// input.d_cacheFrames[j].d_depthDownsampled, input.d_cacheFrames[j].d_normalsDownsampledUCHAR4,
// parameters.denseDepthMin, parameters.denseDepthMax)) { //i tgt, j src
//#else
// if (findDenseCorr(gidx, input.denseDepthWidth, input.denseDepthHeight,
// parameters.denseDistThresh, parameters.denseNormalThresh, transform, input.intrinsics,
// input.d_cacheFrames[i].d_depthDownsampled, input.d_cacheFrames[i].d_normalsDownsampled,
// input.d_cacheFrames[j].d_depthDownsampled, input.d_cacheFrames[j].d_normalsDownsampled,
// parameters.denseDepthMin, parameters.denseDepthMax)) { //i tgt, j src
//#endif
//atomicAdd(&state.d_denseCorrCounts[imPairIdx], 1.0f);
count++;
} // found correspondence
count = warpReduce(count);
__syncthreads();
if (tidx % WARP_SIZE == 0) {
s_count[tidx / WARP_SIZE] = count;
//atomicAdd(&state.d_denseCorrCounts[imPairIdx], count);
}
__syncthreads();
for (unsigned int stride = numWarps / 2; stride > 0; stride /= 2) {
if (tidx < stride) s_count[tidx] = s_count[tidx] + s_count[tidx + stride];
__syncthreads();
}
if (tidx == 0) {
atomicAdd(&state.d_denseCorrCounts[imPairIdx], s_count[0]);
}
} // valid image pixel
}
__global__ void WeightDenseCorrespondences_Kernel(unsigned int N, SolverState state)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) {
// apply ln to weights
float x = state.d_denseCorrCounts[idx];
if (x > 0) {
//if (x < 3200) state.d_denseCorrCounts[idx] = 0; //don't consider too small #corr //TODO PARAMS
if (x < 800) state.d_denseCorrCounts[idx] = 0; //don't consider too small #corr //TODO PARAMS
//if (x < 400) state.d_denseCorrCounts[idx] = 0; //don't consider too small #corr //TODO PARAMS
//if (x < 200) state.d_denseCorrCounts[idx] = 0; //don't consider too small #corr //TODO PARAMS //TODO EVAL DEBUG
else {
state.d_denseCorrCounts[idx] = 1.0f / min(logf(x), 9.0f); // natural log //TODO PARAMS
}
//state.d_denseCorrCounts[idx] = 1.0f / clamp(logf(x), 2.0f, 9.0f); // natural log //TODO PARAMS
}
}
}
template<bool useDepth, bool useColor>
__global__ void BuildDenseSystem_Kernel(SolverInput input, SolverState state, SolverParameters parameters)
{
const int imPairIdx = blockIdx.x;
uint2 imageIndices = state.d_denseOverlappingImages[imPairIdx];
unsigned int i = imageIndices.x; unsigned int j = imageIndices.y;
float imPairWeight = state.d_denseCorrCounts[imPairIdx];
if (imPairWeight == 0.0f) return;
const unsigned int idx = threadIdx.x;
const unsigned int srcIdx = idx * gridDim.y + blockIdx.y;
if (srcIdx < (input.denseDepthWidth * input.denseDepthHeight)) {
#ifdef USE_LIE_SPACE
float4x4 transform_i = state.d_xTransforms[i];
float4x4 transform_j = state.d_xTransforms[j];
float4x4 invTransform_i = state.d_xTransformInverses[i];
float4x4 invTransform_j = state.d_xTransformInverses[j];
float4x4 transform = invTransform_i * transform_j;
#else
float4x4 transform_i = evalRtMat(state.d_xRot[i], state.d_xTrans[i]);
float4x4 transform_j = evalRtMat(state.d_xRot[j], state.d_xTrans[j]);
float4x4 invTransform_i = transform_i.getInverse(); //TODO PRECOMPUTE?
float4x4 transform = invTransform_i * transform_j;
#endif
// point-to-plane term
matNxM<1, 6> depthJacBlockRow_i, depthJacBlockRow_j; depthJacBlockRow_i.setZero(); depthJacBlockRow_j.setZero();
float depthRes = 0.0f; float depthWeight = 0.0f;
// color term
matNxM<1, 6> colorJacBlockRow_i, colorJacBlockRow_j; colorJacBlockRow_i.setZero(); colorJacBlockRow_j.setZero();
float colorRes = 0.0f; float colorWeight = 0.0f;
// find correspondence
float3 camPosSrc; float3 camPosSrcToTgt; float3 camPosTgt; float3 normalTgt; float2 tgtScreenPos;
//TODO HERE ANGIE
#ifdef CUDACACHE_FLOAT_NORMALS
bool foundCorr = findDenseCorr(srcIdx, input.denseDepthWidth, input.denseDepthHeight,
parameters.denseDistThresh, parameters.denseNormalThresh, transform, input.intrinsics,
input.d_cacheFrames[i].d_cameraposDownsampled, input.d_cacheFrames[i].d_normalsDownsampled,
input.d_cacheFrames[j].d_cameraposDownsampled, input.d_cacheFrames[j].d_normalsDownsampled,
parameters.denseDepthMin, parameters.denseDepthMax, camPosSrc, camPosSrcToTgt, tgtScreenPos, camPosTgt, normalTgt); //i tgt, j src
#elif defined(CUDACACHE_UCHAR_NORMALS)
bool foundCorr = findDenseCorr(srcIdx, input.denseDepthWidth, input.denseDepthHeight,
parameters.denseDistThresh, parameters.denseNormalThresh, transform, input.intrinsics,
input.d_cacheFrames[i].d_cameraposDownsampled, input.d_cacheFrames[i].d_normalsDownsampledUCHAR4,
input.d_cacheFrames[j].d_cameraposDownsampled, input.d_cacheFrames[j].d_normalsDownsampledUCHAR4,
parameters.denseDepthMin, parameters.denseDepthMax, camPosSrc, camPosSrcToTgt, tgtScreenPos, camPosTgt, normalTgt); //i tgt, j src
#endif
//#ifdef CUDACACHE_UCHAR_NORMALS
// bool foundCorr = findDenseCorr(srcIdx, input.denseDepthWidth, input.denseDepthHeight,
// parameters.denseDistThresh, parameters.denseNormalThresh, transform, input.intrinsics,
// input.d_cacheFrames[i].d_cameraposDownsampled, input.d_cacheFrames[i].d_normalsDownsampledUCHAR4,
// input.d_cacheFrames[j].d_cameraposDownsampled, input.d_cacheFrames[j].d_normalsDownsampledUCHAR4,
// parameters.denseDepthMin, parameters.denseDepthMax, camPosSrc, camPosSrcToTgt, tgtScreenPos, camPosTgt, normalTgt); //i tgt, j src
//#else
// bool foundCorr = findDenseCorr(srcIdx, input.denseDepthWidth, input.denseDepthHeight,
// parameters.denseDistThresh, parameters.denseNormalThresh, transform, input.intrinsics,
// input.d_cacheFrames[i].d_cameraposDownsampled, input.d_cacheFrames[i].d_normalsDownsampled,
// input.d_cacheFrames[j].d_cameraposDownsampled, input.d_cacheFrames[j].d_normalsDownsampled,
// parameters.denseDepthMin, parameters.denseDepthMax, camPosSrc, camPosSrcToTgt, tgtScreenPos, camPosTgt, normalTgt); //i tgt, j src
//#endif
if (useDepth) {
if (foundCorr) {
// point-to-plane residual
float3 diff = camPosTgt - camPosSrcToTgt;
depthRes = dot(diff, normalTgt);
//depthWeight = parameters.weightDenseDepth * imPairWeight * max(0.0f, 0.5f*((1.0f - length(diff) / parameters.denseDistThresh) + (1.0f - camPosTgt.z / parameters.denseDepthMax)));
//depthWeight = parameters.weightDenseDepth * imPairWeight * max(0.0f, (1.0f - camPosTgt.z / 2.0f)); //fr1_desk
//depthWeight = parameters.weightDenseDepth * imPairWeight * max(0.0f, (1.0f - camPosTgt.z / 2.5f)); //fr3_office, fr2_xyz_half // livingroom1
//depthWeight = parameters.weightDenseDepth * imPairWeight * max(0.0f, (1.0f - camPosTgt.z / 3.0f)); //fr3_nstn
//depthWeight = parameters.weightDenseDepth * imPairWeight * max(0.0f, (1.0f - camPosTgt.z / 1.8f));
//depthWeight = parameters.weightDenseDepth * imPairWeight * (pow(max(0.0f, 1.0f - camPosTgt.z / 2.5f), 1.8f));
//depthWeight = parameters.weightDenseDepth * imPairWeight * (pow(max(0.0f, 1.0f - camPosTgt.z / 2.0f), 1.8f)); //fr3_office, fr1_desk_f20
depthWeight = parameters.weightDenseDepth * imPairWeight * (pow(max(0.0f, 1.0f - camPosTgt.z / 2.0f), 2.5f)); //fr2_xyz_half
//depthWeight = parameters.weightDenseDepth * imPairWeight * (pow(max(0.0f, 1.0f - camPosTgt.z / 3.5f), 1.8f)); //fr3_nstn
//depthWeight = parameters.weightDenseDepth * imPairWeight * (pow(max(0.0f, 1.0f - camPosTgt.z / parameters.denseDepthMax), 1.8f)); //TODO EVAL DEBUGGING
//float wtgt = (pow(max(0.0f, 1.0f - camPosTgt.z / 2.5f), 1.8f));
//float wsrc = (pow(max(0.0f, 1.0f - camPosSrc.z / 2.5f), 1.8f));
//depthWeight = parameters.weightDenseDepth * imPairWeight * wtgt * wsrc;
#ifdef USE_LIE_SPACE
if (i > 0) computeJacobianBlockRow_i(depthJacBlockRow_i, transform_i, invTransform_j, camPosSrc, normalTgt);
if (j > 0) computeJacobianBlockRow_j(depthJacBlockRow_j, invTransform_i, transform_j, camPosSrc, normalTgt);
#else
if (i > 0) computeJacobianBlockRow_i(depthJacBlockRow_i, state.d_xRot[i], state.d_xTrans[i], transform_j, camPosSrc, normalTgt);
if (j > 0) computeJacobianBlockRow_j(depthJacBlockRow_j, state.d_xRot[j], state.d_xTrans[j], invTransform_i, camPosSrc, normalTgt);
#endif
}
addToLocalSystem(foundCorr, state.d_denseJtJ, state.d_denseJtr, input.numberOfImages * 6,
depthJacBlockRow_i, depthJacBlockRow_j, i, j, depthRes, depthWeight, idx
, state.d_sumResidual, state.d_corrCount);
//addToLocalSystemBrute(foundCorr, state.d_denseJtJ, state.d_denseJtr, input.numberOfImages * 6,
// depthJacBlockRow_i, depthJacBlockRow_j, i, j, depthRes, depthWeight, idx);
}
if (useColor) {
bool foundCorrColor = false;
if (foundCorr) {
const float2 intensityDerivTgt = bilinearInterpolationFloat2(tgtScreenPos.x, tgtScreenPos.y, input.d_cacheFrames[i].d_intensityDerivsDownsampled, input.denseDepthWidth, input.denseDepthHeight);
const float intensityTgt = bilinearInterpolationFloat(tgtScreenPos.x, tgtScreenPos.y, input.d_cacheFrames[i].d_intensityDownsampled, input.denseDepthWidth, input.denseDepthHeight);
colorRes = intensityTgt - input.d_cacheFrames[j].d_intensityDownsampled[srcIdx];
foundCorrColor = (intensityDerivTgt.x != MINF && abs(colorRes) < parameters.denseColorThresh && length(intensityDerivTgt) > parameters.denseColorGradientMin);
if (foundCorrColor) {
const float2 focalLength = make_float2(input.intrinsics.x, input.intrinsics.y);
#ifdef USE_LIE_SPACE
if (i > 0) computeJacobianBlockIntensityRow_i(colorJacBlockRow_i, focalLength, transform_i, invTransform_j, camPosSrc, camPosSrcToTgt, intensityDerivTgt);
if (j > 0) computeJacobianBlockIntensityRow_j(colorJacBlockRow_j, focalLength, invTransform_i, transform_j, camPosSrc, camPosSrcToTgt, intensityDerivTgt);
#else
if (i > 0) computeJacobianBlockIntensityRow_i(colorJacBlockRow_i, focalLength, state.d_xRot[i], state.d_xTrans[i], transform_j, camPosSrc, camPosSrcToTgt, intensityDerivTgt);
if (j > 0) computeJacobianBlockIntensityRow_j(colorJacBlockRow_j, focalLength, state.d_xRot[j], state.d_xTrans[j], invTransform_i, camPosSrc, camPosSrcToTgt, intensityDerivTgt);
#endif
colorWeight = parameters.weightDenseColor * imPairWeight * max(0.0f, 1.0f - abs(colorRes) / (1.15f*parameters.denseColorThresh));
//colorWeight = parameters.weightDenseColor * imPairWeight * max(0.0f, 1.0f - abs(colorRes) / parameters.denseColorThresh) * max(0.0f, (1.0f - camPosTgt.z / 1.0f));
//colorWeight = parameters.weightDenseColor * imPairWeight * max(0.0f, 0.5f*(1.0f - abs(colorRes) / parameters.denseColorThresh) + 0.5f*max(0.0f, (1.0f - camPosTgt.z / 1.0f)));
}
}
addToLocalSystem(foundCorrColor, state.d_denseJtJ, state.d_denseJtr, input.numberOfImages * 6,
colorJacBlockRow_i, colorJacBlockRow_j, i, j, colorRes, colorWeight, idx
, state.d_sumResidualColor, state.d_corrCountColor);
//addToLocalSystemBrute(foundCorrColor, state.d_denseJtJ, state.d_denseJtr, input.numberOfImages * 6,
// colorJacBlockRow_i, colorJacBlockRow_j, i, j, colorRes, colorWeight, idx);
}
} // valid image pixel
}
bool BuildDenseSystem(const SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer* timer)
{
const unsigned int N = input.numberOfImages;
const int sizeJtr = 6 * N;
const int sizeJtJ = sizeJtr * sizeJtr;
#ifdef PRINT_RESIDUALS_DENSE
cutilSafeCall(hipMemset(state.d_corrCount, 0, sizeof(int)));
cutilSafeCall(hipMemset(state.d_sumResidual, 0, sizeof(float)));
cutilSafeCall(hipMemset(state.d_corrCountColor, 0, sizeof(int)));
cutilSafeCall(hipMemset(state.d_sumResidualColor, 0, sizeof(float)));
#endif
const unsigned int maxDenseImPairs = input.numberOfImages * (input.numberOfImages - 1) / 2;
cutilSafeCall(hipMemset(state.d_denseCorrCounts, 0, sizeof(float) * maxDenseImPairs));
cutilSafeCall(hipMemset(state.d_denseJtJ, 0, sizeof(float) * sizeJtJ));
cutilSafeCall(hipMemset(state.d_denseJtr, 0, sizeof(float) * sizeJtr));
cutilSafeCall(hipMemset(state.d_numDenseOverlappingImages, 0, sizeof(int)));
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
dim3 gridImImOverlap;
if (parameters.useDenseDepthAllPairwise) gridImImOverlap = dim3(N, N, 1); // pairwise
else gridImImOverlap = dim3(N - 1, 1, 1); // for frame-to-frame
if (timer) timer->startEvent("BuildDenseDepthSystem - find image corr");
if (parameters.useDenseDepthAllPairwise) FindImageImageCorr_Kernel<true> << < gridImImOverlap, THREADS_PER_BLOCK_DENSE_OVERLAP >> >(input, state, parameters);
else FindImageImageCorr_Kernel<false> << < gridImImOverlap, THREADS_PER_BLOCK_DENSE_OVERLAP >> >(input, state, parameters);
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
if (timer) timer->endEvent();
int numOverlapImagePairs;
cutilSafeCall(hipMemcpy(&numOverlapImagePairs, state.d_numDenseOverlappingImages, sizeof(int), hipMemcpyDeviceToHost));
if (numOverlapImagePairs == 0) {
printf("warning: no overlapping images for dense solve\n");
return false;
}
const int reductionGlobal = (input.denseDepthWidth*input.denseDepthHeight + THREADS_PER_BLOCK_DENSE_DEPTH - 1) / THREADS_PER_BLOCK_DENSE_DEPTH;
dim3 grid(numOverlapImagePairs, reductionGlobal);
//if (N > 11) printf("num overlap image pairs = %d\n", numOverlapImagePairs); //debugging only
if (timer) timer->startEvent("BuildDenseDepthSystem - compute im-im weights");
FindDenseCorrespondences_Kernel << <grid, THREADS_PER_BLOCK_DENSE_DEPTH >> >(input, state, parameters);
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
////debugging //remember the delete!
//float* denseCorrCounts = new float[numOverlapImagePairs];
//cutilSafeCall(hipMemcpy(denseCorrCounts, state.d_denseCorrCounts, sizeof(float)*numOverlapImagePairs, hipMemcpyDeviceToHost));
//unsigned int totalCount = 0;
//for (unsigned int i = 0; i < numOverlapImagePairs; i++) { totalCount += (unsigned int)denseCorrCounts[i]; }
//printf("total count = %d\n", totalCount);
//uint2* imageIndices = new uint2[numOverlapImagePairs];
//cutilSafeCall(hipMemcpy(imageIndices, state.d_denseOverlappingImages, sizeof(uint2)*numOverlapImagePairs, hipMemcpyDeviceToHost));
//if (imageIndices) delete[] imageIndices;
////debugging
//debugging - compute some overlap stats
//if (true || input.numberOfImages > 11) {
// float4x4* transforms = new float4x4[input.numberOfImages];
// float* denseCorrCounts = new float[numOverlapImagePairs];
// uint2* imageIndices = new uint2[numOverlapImagePairs];
// cutilSafeCall(hipMemcpy(denseCorrCounts, state.d_denseCorrCounts, sizeof(float)*numOverlapImagePairs, hipMemcpyDeviceToHost));
// cutilSafeCall(hipMemcpy(imageIndices, state.d_denseOverlappingImages, sizeof(uint2)*numOverlapImagePairs, hipMemcpyDeviceToHost));
// cutilSafeCall(hipMemcpy(transforms, state.d_xTransforms, sizeof(float4x4)*input.numberOfImages, hipMemcpyDeviceToHost));
// FILE* fp = fopen("debug/overlaps.csv", "w");
// char buffer[128];
// for (int i = 0; i < numOverlapImagePairs; i++) {
// if (denseCorrCounts[i] > 0) {
// float3 d = transforms[imageIndices[i].x].getTranslation() - transforms[imageIndices[i].y].getTranslation();
// sprintf(buffer, "%d,%d,%d,%f\n", imageIndices[i].x, imageIndices[i].y, (int)denseCorrCounts[i], length(d));
// fwrite(buffer, sizeof(char), strlen(buffer), fp);
// }
// }
// fclose(fp);
// if (transforms) delete[] transforms;
// if (denseCorrCounts) delete[] denseCorrCounts;
// if (imageIndices) delete[] imageIndices;
// int a = 5;
//}
int wgrid = (numOverlapImagePairs + THREADS_PER_BLOCK_DENSE_DEPTH_FLIP - 1) / THREADS_PER_BLOCK_DENSE_DEPTH_FLIP;
WeightDenseCorrespondences_Kernel << < wgrid, THREADS_PER_BLOCK_DENSE_DEPTH_FLIP >> >(maxDenseImPairs, state);
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
////debugging
//cutilSafeCall(hipMemcpy(denseCorrCounts, state.d_denseCorrCounts, sizeof(float)*maxDenseImPairs, hipMemcpyDeviceToHost));
//totalCount = 0;
//for (unsigned int i = 0; i < maxDenseImPairs; i++) { if (denseCorrCounts[i] > 0.0f) totalCount++; }
//printf("total count = %d\n", totalCount);
//if (denseCorrCounts) delete[] denseCorrCounts;
////debugging
if (timer) timer->endEvent();
if (timer) timer->startEvent("BuildDenseDepthSystem - build jtj/jtr");
if (parameters.weightDenseDepth > 0.0f) {
if (parameters.weightDenseColor > 0.0f) BuildDenseSystem_Kernel<true, true> << <grid, THREADS_PER_BLOCK_DENSE_DEPTH >> >(input, state, parameters);
else BuildDenseSystem_Kernel<true, false> << <grid, THREADS_PER_BLOCK_DENSE_DEPTH >> >(input, state, parameters);
}
else {
BuildDenseSystem_Kernel<false, true> << <grid, THREADS_PER_BLOCK_DENSE_DEPTH >> >(input, state, parameters);
}
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
////debugging
//bool debugPrint = true;
//float* h_JtJ = NULL;
//float* h_Jtr = NULL;
//if (debugPrint) {
// h_JtJ = new float[sizeJtJ];
// h_Jtr = new float[sizeJtr];
// cutilSafeCall(hipMemcpy(h_JtJ, state.d_denseJtJ, sizeof(float) * sizeJtJ, hipMemcpyDeviceToHost));
// cutilSafeCall(hipMemcpy(h_Jtr, state.d_denseJtr, sizeof(float) * sizeJtr, hipMemcpyDeviceToHost));
// printf("JtJ:\n");
// //for (unsigned int i = 0; i < 6 * N; i++) {
// // for (unsigned int j = 0; j < 6 * N; j++)
// for (unsigned int i = 6 * 1; i < 6 * 2; i++) {
// for (unsigned int j = 6 * 1; j < 6 * 2; j++)
// printf(" %f,", h_JtJ[j * 6 * N + i]);
// printf("\n");
// }
// printf("Jtr:\n");
// for (unsigned int i = 0; i < 6 * N; i++) {
// printf(" %f,", h_Jtr[i]);
// }
// printf("\n");
//}
////debugging
#ifdef PRINT_RESIDUALS_DENSE
if (parameters.weightDenseDepth > 0) {
float sumResidual; int corrCount;
cutilSafeCall(hipMemcpy(&sumResidual, state.d_sumResidual, sizeof(float), hipMemcpyDeviceToHost));
cutilSafeCall(hipMemcpy(&corrCount, state.d_corrCount, sizeof(int), hipMemcpyDeviceToHost));
printf("\tdense depth: weights * residual = %f * %f = %f\t[#corr = %d]\n", parameters.weightDenseDepth, sumResidual / parameters.weightDenseDepth, sumResidual, corrCount);
}
if (parameters.weightDenseColor > 0) {
float sumResidual; int corrCount;
cutilSafeCall(hipMemcpy(&sumResidual, state.d_sumResidualColor, sizeof(float), hipMemcpyDeviceToHost));
cutilSafeCall(hipMemcpy(&corrCount, state.d_corrCountColor, sizeof(int), hipMemcpyDeviceToHost));
printf("\tdense color: weights * residual = %f * %f = %f\t[#corr = %d]\n", parameters.weightDenseColor, sumResidual / parameters.weightDenseColor, sumResidual, corrCount);
}
#endif
const unsigned int flipgrid = (sizeJtJ + THREADS_PER_BLOCK_DENSE_DEPTH_FLIP - 1) / THREADS_PER_BLOCK_DENSE_DEPTH_FLIP;
FlipJtJ_Kernel << <flipgrid, THREADS_PER_BLOCK_DENSE_DEPTH_FLIP >> >(sizeJtJ, sizeJtr, state.d_denseJtJ);
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
if (timer) timer->endEvent();
return true;
}
//todo more efficient?? (there are multiple per image-image...)
//get high residuals
__global__ void collectHighResidualsDevice(SolverInput input, SolverState state, SolverStateAnalysis analysis, SolverParameters parameters, unsigned int maxNumHighResiduals)
{
const unsigned int N = input.numberOfCorrespondences; // Number of block variables
const unsigned int corrIdx = blockIdx.x * blockDim.x + threadIdx.x;
if (corrIdx < N) {
float residual = evalAbsMaxResidualDevice(corrIdx, input, state, parameters);
if (residual > parameters.highResidualThresh) {
int idx = atomicAdd(state.d_countHighResidual, 1);
if (idx < maxNumHighResiduals) {
analysis.d_maxResidual[idx] = residual;
analysis.d_maxResidualIndex[idx] = corrIdx;
}
}
}
}
extern "C" void collectHighResiduals(SolverInput& input, SolverState& state, SolverStateAnalysis& analysis, SolverParameters& parameters, CUDATimer* timer)
{
if (timer) timer->startEvent(__FUNCTION__);
cutilSafeCall(hipMemset(state.d_countHighResidual, 0, sizeof(int)));
const unsigned int N = input.numberOfCorrespondences; // Number of correspondences
unsigned int maxNumHighResiduals = (input.maxCorrPerImage*input.maxNumberOfImages + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
collectHighResidualsDevice << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(input, state, analysis, parameters, maxNumHighResiduals);
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
if (timer) timer->endEvent();
}
/////////////////////////////////////////////////////////////////////////
// Eval Max Residual
/////////////////////////////////////////////////////////////////////////
__global__ void EvalMaxResidualDevice(SolverInput input, SolverState state, SolverStateAnalysis analysis, SolverParameters parameters)
{
__shared__ int maxResIndex[THREADS_PER_BLOCK];
__shared__ float maxRes[THREADS_PER_BLOCK];
const unsigned int N = input.numberOfCorrespondences; // Number of block variables
const unsigned int corrIdx = blockIdx.x * blockDim.x + threadIdx.x;
maxResIndex[threadIdx.x] = 0;
maxRes[threadIdx.x] = 0.0f;
if (corrIdx < N) {
float residual = evalAbsMaxResidualDevice(corrIdx, input, state, parameters);
maxRes[threadIdx.x] = residual;
maxResIndex[threadIdx.x] = corrIdx;
__syncthreads();
for (int stride = THREADS_PER_BLOCK / 2; stride > 0; stride /= 2) {
if (threadIdx.x < stride) {
int first = threadIdx.x;
int second = threadIdx.x + stride;
if (maxRes[first] < maxRes[second]) {
maxRes[first] = maxRes[second];
maxResIndex[first] = maxResIndex[second];
}
}
__syncthreads();
}
if (threadIdx.x == 0) {
//printf("d_maxResidual[%d] = %f (index %d)\n", blockIdx.x, maxRes[0], maxResIndex[0]);
analysis.d_maxResidual[blockIdx.x] = maxRes[0];
analysis.d_maxResidualIndex[blockIdx.x] = maxResIndex[0];
}
}
}
extern "C" void evalMaxResidual(SolverInput& input, SolverState& state, SolverStateAnalysis& analysis, SolverParameters& parameters, CUDATimer* timer)
{
if (timer) timer->startEvent(__FUNCTION__);
const unsigned int N = input.numberOfCorrespondences; // Number of correspondences
EvalMaxResidualDevice << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(input, state, analysis, parameters);
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
if (timer) timer->endEvent();
}
/////////////////////////////////////////////////////////////////////////
// Eval Cost
/////////////////////////////////////////////////////////////////////////
__global__ void ResetResidualDevice(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x == 0) state.d_sumResidual[0] = 0.0f;
}
__global__ void EvalResidualDevice(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.numberOfCorrespondences; // Number of block variables
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
float residual = 0.0f;
if (x < N) {
residual = evalFDevice(x, input, state, parameters);
//float out = warpReduce(residual);
//unsigned int laneid;
////This command gets the lane ID within the current warp
//asm("mov.u32 %0, %%laneid;" : "=r"(laneid));
//if (laneid == 0) {
// atomicAdd(&state.d_sumResidual[0], out);
//}
atomicAdd(&state.d_sumResidual[0], residual);
}
}
extern "C" float EvalResidual(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer* timer)
{
if (timer) timer->startEvent(__FUNCTION__);
float residual = 0.0f;
const unsigned int N = input.numberOfCorrespondences; // Number of block variables
ResetResidualDevice << < 1, 1, 1 >> >(input, state, parameters);
EvalResidualDevice << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(input, state, parameters);
residual = state.getSumResidual();
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
if (timer) timer->endEvent();
return residual;
}
/////////////////////////////////////////////////////////////////////////
// Eval Linear Residual
/////////////////////////////////////////////////////////////////////////
//__global__ void SumLinearResDevice(SolverInput input, SolverState state, SolverParameters parameters)
//{
// const unsigned int N = input.numberOfImages; // Number of block variables
// const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
//
// float residual = 0.0f;
// if (x > 0 && x < N) {
// residual = dot(state.d_rRot[x], state.d_rRot[x]) + dot(state.d_rTrans[x], state.d_rTrans[x]);
// atomicAdd(state.d_sumLinResidual, residual);
// }
//}
//float EvalLinearRes(SolverInput& input, SolverState& state, SolverParameters& parameters)
//{
// float residual = 0.0f;
//
// const unsigned int N = input.numberOfImages; // Number of block variables
//
// // Do PCG step
// const int blocksPerGrid = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
//
// float init = 0.0f;
// cutilSafeCall(hipMemcpy(state.d_sumLinResidual, &init, sizeof(float), hipMemcpyHostToDevice));
//
// SumLinearResDevice << <blocksPerGrid, THREADS_PER_BLOCK >> >(input, state, parameters);
//#ifdef _DEBUG
// cutilSafeCall(hipDeviceSynchronize());
// cutilCheckMsg(__FUNCTION__);
//#endif
//
// cutilSafeCall(hipMemcpy(&residual, state.d_sumLinResidual, sizeof(float), hipMemcpyDeviceToHost));
// return residual;
//}
/////////////////////////////////////////////////////////////////////////
// Count High Residuals
/////////////////////////////////////////////////////////////////////////
__global__ void CountHighResidualsDevice(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.numberOfCorrespondences; // Number of block variables
const unsigned int corrIdx = blockIdx.x * blockDim.x + threadIdx.x;
if (corrIdx < N) {
float residual = evalAbsMaxResidualDevice(corrIdx, input, state, parameters);
if (residual > parameters.verifyOptDistThresh)
atomicAdd(state.d_countHighResidual, 1);
}
}
extern "C" int countHighResiduals(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer* timer)
{
if (timer) timer->startEvent(__FUNCTION__);
const unsigned int N = input.numberOfCorrespondences; // Number of correspondences
cutilSafeCall(hipMemset(state.d_countHighResidual, 0, sizeof(int)));
CountHighResidualsDevice << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(input, state, parameters);
int count;
cutilSafeCall(hipMemcpy(&count, state.d_countHighResidual, sizeof(int), hipMemcpyDeviceToHost));
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
if (timer) timer->endEvent();
return count;
}
/////////////////////////////////////////////////////////////////////////
// Convergence Analysis
/////////////////////////////////////////////////////////////////////////
//uses same data store as max residual
__global__ void EvalGNConvergenceDevice(SolverInput input, SolverStateAnalysis analysis, SolverState state) //compute max of delta
{
__shared__ float maxVal[THREADS_PER_BLOCK];
const unsigned int N = input.numberOfImages;
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
maxVal[threadIdx.x] = 0.0f;
if (x < N)
{
if (x == 0 || input.d_validImages[x] == 0)
maxVal[threadIdx.x] = 0.0f;
else {
float3 r3 = fmaxf(fabs(state.d_deltaRot[x]), fabs(state.d_deltaTrans[x]));
float r = fmaxf(r3.x, fmaxf(r3.y, r3.z));
maxVal[threadIdx.x] = r;
}
__syncthreads();
for (int stride = THREADS_PER_BLOCK / 2; stride > 0; stride /= 2) {
if (threadIdx.x < stride) {
int first = threadIdx.x;
int second = threadIdx.x + stride;
maxVal[first] = fmaxf(maxVal[first], maxVal[second]);
}
__syncthreads();
}
if (threadIdx.x == 0) {
analysis.d_maxResidual[blockIdx.x] = maxVal[0];
}
}
}
float EvalGNConvergence(SolverInput& input, SolverState& state, SolverStateAnalysis& analysis, CUDATimer* timer)
{
if (timer) timer->startEvent(__FUNCTION__);
const unsigned int N = input.numberOfImages;
const unsigned int blocksPerGrid = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
EvalGNConvergenceDevice << < blocksPerGrid, THREADS_PER_BLOCK >> >(input, analysis, state);
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
//copy to host and compute max
cutilSafeCall(hipMemcpy(analysis.h_maxResidual, analysis.d_maxResidual, sizeof(float) * blocksPerGrid, hipMemcpyDeviceToHost));
cutilSafeCall(hipMemcpy(analysis.h_maxResidualIndex, analysis.d_maxResidualIndex, sizeof(int) * blocksPerGrid, hipMemcpyDeviceToHost));
float maxVal = 0.0f;
for (unsigned int i = 0; i < blocksPerGrid; i++) {
if (maxVal < analysis.h_maxResidual[i]) maxVal = analysis.h_maxResidual[i];
}
if (timer) timer->endEvent();
return maxVal;
}
// For the naming scheme of the variables see:
// http://en.wikipedia.org/wiki/Conjugate_gradient_method
// This code is an implementation of their PCG pseudo code
template<bool useDense>
__global__ void PCGInit_Kernel1(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.numberOfImages;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
float d = 0.0f;
if (x > 0 && x < N)
{
float3 resRot, resTrans;
evalMinusJTFDevice<useDense>(x, input, state, parameters, resRot, resTrans); // residuum = J^T x -F - A x delta_0 => J^T x -F, since A x x_0 == 0
state.d_rRot[x] = resRot; // store for next iteration
state.d_rTrans[x] = resTrans; // store for next iteration
const float3 pRot = state.d_precondionerRot[x] * resRot; // apply preconditioner M^-1
state.d_pRot[x] = pRot;
const float3 pTrans = state.d_precondionerTrans[x] * resTrans; // apply preconditioner M^-1
state.d_pTrans[x] = pTrans;
d = dot(resRot, pRot) + dot(resTrans, pTrans); // x-th term of nomimator for computing alpha and denominator for computing beta
state.d_Ap_XRot[x] = make_float3(0.0f, 0.0f, 0.0f);
state.d_Ap_XTrans[x] = make_float3(0.0f, 0.0f, 0.0f);
}
d = warpReduce(d);
if (threadIdx.x % WARP_SIZE == 0)
{
atomicAdd(state.d_scanAlpha, d);
}
}
__global__ void PCGInit_Kernel2(unsigned int N, SolverState state)
{
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x > 0 && x < N) state.d_rDotzOld[x] = state.d_scanAlpha[0]; // store result for next kernel call
}
void Initialization(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer* timer)
{
const unsigned int N = input.numberOfImages;
const int blocksPerGrid = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
if (blocksPerGrid > THREADS_PER_BLOCK)
{
std::cout << "Too many variables for this block size. Maximum number of variables for two kernel scan: " << THREADS_PER_BLOCK*THREADS_PER_BLOCK << std::endl;
while (1);
}
if (timer) timer->startEvent("Initialization");
//!!!DEBUGGING //remember to uncomment the delete...
//float3* rRot = new float3[input.numberOfImages]; // -jtf
//float3* rTrans = new float3[input.numberOfImages];
//!!!DEBUGGING
cutilSafeCall(hipMemset(state.d_scanAlpha, 0, sizeof(float)));
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
if (parameters.useDense) PCGInit_Kernel1<true> << <blocksPerGrid, THREADS_PER_BLOCK >> >(input, state, parameters);
else PCGInit_Kernel1<false> << <blocksPerGrid, THREADS_PER_BLOCK >> >(input, state, parameters);
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
//cutilSafeCall(hipMemcpy(rRot, state.d_rRot, sizeof(float3)*input.numberOfImages, hipMemcpyDeviceToHost));
//cutilSafeCall(hipMemcpy(rTrans, state.d_rTrans, sizeof(float3)*input.numberOfImages, hipMemcpyDeviceToHost));
//for (unsigned int i = 1; i < input.numberOfImages; i++) { if (isnan(rRot[i].x)) { printf("NaN in jtr rRot %d\n", i); getchar(); } }
//for (unsigned int i = 1; i < input.numberOfImages; i++) { if (isnan(rTrans[i].x)) { printf("NaN in jtr rTrans %d\n", i); getchar(); } }
//cutilSafeCall(hipMemcpy(rRot, state.d_pRot, sizeof(float3)*input.numberOfImages, hipMemcpyDeviceToHost));
//cutilSafeCall(hipMemcpy(rTrans, state.d_pTrans, sizeof(float3)*input.numberOfImages, hipMemcpyDeviceToHost));
//for (unsigned int i = 1; i < input.numberOfImages; i++) { if (isnan(rRot[i].x)) { printf("NaN in jtr pRot %d\n", i); getchar(); } }
//for (unsigned int i = 1; i < input.numberOfImages; i++) { if (isnan(rTrans[i].x)) { printf("NaN in jtr pTrans %d\n", i); getchar(); } }
PCGInit_Kernel2 << <blocksPerGrid, THREADS_PER_BLOCK >> >(N, state);
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
if (timer) timer->endEvent();
//float scanAlpha;
//cutilSafeCall(hipMemcpy(&scanAlpha, state.d_scanAlpha, sizeof(float), hipMemcpyDeviceToHost));
//if (rRot) delete[] rRot;
//if (rTrans) delete[] rTrans;
}
/////////////////////////////////////////////////////////////////////////
// PCG Iteration Parts
/////////////////////////////////////////////////////////////////////////
//inefficient
__global__ void PCGStep_Kernel_Dense_Brute(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.numberOfImages; // Number of block variables
const unsigned int x = blockIdx.x;
if (x > 0 && x < N)
{
float3 rot, trans;
applyJTJDenseBruteDevice(x, state, state.d_denseJtJ, input.numberOfImages, rot, trans); // A x p_k => J^T x J x p_k
state.d_Ap_XRot[x] += rot;
state.d_Ap_XTrans[x] += trans;
}
}
__global__ void PCGStep_Kernel_Dense(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.numberOfImages; // Number of block variables
const unsigned int x = blockIdx.x;
const unsigned int lane = threadIdx.x % WARP_SIZE;
if (x > 0 && x < N)
{
float3 rot, trans;
applyJTJDenseDevice(x, state, state.d_denseJtJ, input.numberOfImages, rot, trans, threadIdx.x); // A x p_k => J^T x J x p_k
if (lane == 0)
{
atomicAdd(&state.d_Ap_XRot[x].x, rot.x);
atomicAdd(&state.d_Ap_XRot[x].y, rot.y);
atomicAdd(&state.d_Ap_XRot[x].z, rot.z);
atomicAdd(&state.d_Ap_XTrans[x].x, trans.x);
atomicAdd(&state.d_Ap_XTrans[x].y, trans.y);
atomicAdd(&state.d_Ap_XTrans[x].z, trans.z);
}
}
}
__global__ void PCGStep_Kernel0(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.numberOfCorrespondences; // Number of block variables
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < N)
{
const float3 tmp = applyJDevice(x, input, state, parameters); // A x p_k => J^T x J x p_k
state.d_Jp[x] = tmp; // store for next kernel call
}
}
__global__ void PCGStep_Kernel1a(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.numberOfImages; // Number of block variables
const unsigned int x = blockIdx.x;
const unsigned int lane = threadIdx.x % WARP_SIZE;
if (x > 0 && x < N)
{
float3 rot, trans;
applyJTDevice(x, input, state, parameters, rot, trans, threadIdx.x, lane); // A x p_k => J^T x J x p_k
if (lane == 0)
{
atomicAdd(&state.d_Ap_XRot[x].x, rot.x);
atomicAdd(&state.d_Ap_XRot[x].y, rot.y);
atomicAdd(&state.d_Ap_XRot[x].z, rot.z);
atomicAdd(&state.d_Ap_XTrans[x].x, trans.x);
atomicAdd(&state.d_Ap_XTrans[x].y, trans.y);
atomicAdd(&state.d_Ap_XTrans[x].z, trans.z);
}
}
}
__global__ void PCGStep_Kernel1b(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.numberOfImages; // Number of block variables
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
float d = 0.0f;
if (x > 0 && x < N)
{
d = dot(state.d_pRot[x], state.d_Ap_XRot[x]) + dot(state.d_pTrans[x], state.d_Ap_XTrans[x]); // x-th term of denominator of alpha
}
d = warpReduce(d);
if (threadIdx.x % WARP_SIZE == 0)
{
atomicAdd(state.d_scanAlpha, d);
}
}
__global__ void PCGStep_Kernel2(SolverInput input, SolverState state)
{
const unsigned int N = input.numberOfImages;
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
const float dotProduct = state.d_scanAlpha[0];
float b = 0.0f;
if (x > 0 && x < N)
{
float alpha = 0.0f;
if (dotProduct > FLOAT_EPSILON) alpha = state.d_rDotzOld[x] / dotProduct; // update step size alpha
state.d_deltaRot[x] = state.d_deltaRot[x] + alpha*state.d_pRot[x]; // do a decent step
state.d_deltaTrans[x] = state.d_deltaTrans[x] + alpha*state.d_pTrans[x]; // do a decent step
float3 rRot = state.d_rRot[x] - alpha*state.d_Ap_XRot[x]; // update residuum
state.d_rRot[x] = rRot; // store for next kernel call
float3 rTrans = state.d_rTrans[x] - alpha*state.d_Ap_XTrans[x]; // update residuum
state.d_rTrans[x] = rTrans; // store for next kernel call
float3 zRot = state.d_precondionerRot[x] * rRot; // apply preconditioner M^-1
state.d_zRot[x] = zRot; // save for next kernel call
float3 zTrans = state.d_precondionerTrans[x] * rTrans; // apply preconditioner M^-1
state.d_zTrans[x] = zTrans; // save for next kernel call
b = dot(zRot, rRot) + dot(zTrans, rTrans); // compute x-th term of the nominator of beta
}
b = warpReduce(b);
if (threadIdx.x % WARP_SIZE == 0)
{
atomicAdd(&state.d_scanAlpha[1], b);
}
}
template<bool lastIteration>
__global__ void PCGStep_Kernel3(SolverInput input, SolverState state)
{
const unsigned int N = input.numberOfImages;
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x > 0 && x < N)
{
const float rDotzNew = state.d_scanAlpha[1]; // get new nominator
const float rDotzOld = state.d_rDotzOld[x]; // get old denominator
float beta = 0.0f;
if (rDotzOld > FLOAT_EPSILON) beta = rDotzNew / rDotzOld; // update step size beta
state.d_rDotzOld[x] = rDotzNew; // save new rDotz for next iteration
state.d_pRot[x] = state.d_zRot[x] + beta*state.d_pRot[x]; // update decent direction
state.d_pTrans[x] = state.d_zTrans[x] + beta*state.d_pTrans[x]; // update decent direction
state.d_Ap_XRot[x] = make_float3(0.0f, 0.0f, 0.0f);
state.d_Ap_XTrans[x] = make_float3(0.0f, 0.0f, 0.0f);
if (lastIteration)
{
//if (input.d_validImages[x]) { //not really necessary
#ifdef USE_LIE_SPACE //TODO just keep that matrix transforms around
float3 rot, trans;
computeLieUpdate(state.d_deltaRot[x], state.d_deltaTrans[x], state.d_xRot[x], state.d_xTrans[x], rot, trans);
state.d_xRot[x] = rot;
state.d_xTrans[x] = trans;
#else
state.d_xRot[x] = state.d_xRot[x] + state.d_deltaRot[x];
state.d_xTrans[x] = state.d_xTrans[x] + state.d_deltaTrans[x];
#endif
//}
}
}
}
template<bool useSparse, bool useDense>
bool PCGIteration(SolverInput& input, SolverState& state, SolverParameters& parameters, SolverStateAnalysis& analysis, bool lastIteration, CUDATimer *timer)
{
const unsigned int N = input.numberOfImages; // Number of block variables
// Do PCG step
const int blocksPerGrid = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
if (blocksPerGrid > THREADS_PER_BLOCK)
{
std::cout << "Too many variables for this block size. Maximum number of variables for two kernel scan: " << THREADS_PER_BLOCK*THREADS_PER_BLOCK << std::endl;
while (1);
}
if (timer) timer->startEvent("PCGIteration");
cutilSafeCall(hipMemset(state.d_scanAlpha, 0, sizeof(float) * 2));
// sparse part
if (useSparse) {
const unsigned int Ncorr = input.numberOfCorrespondences;
const int blocksPerGridCorr = (Ncorr + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
PCGStep_Kernel0 << <blocksPerGridCorr, THREADS_PER_BLOCK >> >(input, state, parameters);
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
PCGStep_Kernel1a << < N, THREADS_PER_BLOCK_JT >> >(input, state, parameters);
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
if (useDense) {
//if (timer) timer->startEvent("apply JTJ dense");
PCGStep_Kernel_Dense << < N, THREADS_PER_BLOCK_JT_DENSE >> >(input, state, parameters);
//PCGStep_Kernel_Dense_Brute << < N, 1 >> >(input, state, parameters);
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
//if (timer) timer->endEvent();
}
//!!!debugging
//float3* Ap_Rot = new float3[input.numberOfImages];
//float3* Ap_Trans = new float3[input.numberOfImages];
//cutilSafeCall(hipMemcpy(Ap_Rot, state.d_Ap_XRot, sizeof(float3)*input.numberOfImages, hipMemcpyDeviceToHost));
//cutilSafeCall(hipMemcpy(Ap_Trans, state.d_Ap_XTrans, sizeof(float3)*input.numberOfImages, hipMemcpyDeviceToHost));
//for (unsigned int i = 1; i < input.maxNumberOfImages; i++) { if (isnan(Ap_Rot[i].x)) { printf("NaN at Ap rot %d\n", i); getchar(); } }
//for (unsigned int i = 1; i < input.maxNumberOfImages; i++) { if (isnan(Ap_Trans[i].x)) { printf("NaN at Ap trans %d\n", i); getchar(); } }
//if (Ap_Rot) delete[] Ap_Rot;
//if (Ap_Trans) delete[] Ap_Trans;
//!!!debugging
PCGStep_Kernel1b << <blocksPerGrid, THREADS_PER_BLOCK >> >(input, state, parameters);
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
PCGStep_Kernel2 << <blocksPerGrid, THREADS_PER_BLOCK >> >(input, state);
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
#ifdef ENABLE_EARLY_OUT //for convergence
float scanAlpha; cutilSafeCall(hipMemcpy(&scanAlpha, state.d_scanAlpha, sizeof(float), hipMemcpyDeviceToHost));
//if (fabs(scanAlpha) < 0.00005f) lastIteration = true; //todo check this part
//if (fabs(scanAlpha) < 1e-6) lastIteration = true; //todo check this part
if (fabs(scanAlpha) < 5e-7) { lastIteration = true; } //todo check this part
#endif
if (lastIteration) {
PCGStep_Kernel3<true> << <blocksPerGrid, THREADS_PER_BLOCK >> >(input, state);
}
else {
PCGStep_Kernel3<false> << <blocksPerGrid, THREADS_PER_BLOCK >> >(input, state);
}
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
if (timer) timer->endEvent();
return lastIteration;
}
#ifdef USE_LIE_SPACE //TODO
////////////////////////////////////////////////////////////////////
// matrix <-> pose
////////////////////////////////////////////////////////////////////
__global__ void convertLiePosesToMatricesCU_Kernel(const float3* d_rot, const float3* d_trans, unsigned int numTransforms, float4x4* d_transforms, float4x4* d_transformInvs)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < numTransforms) {
poseToMatrix(d_rot[idx], d_trans[idx], d_transforms[idx]);
d_transformInvs[idx] = d_transforms[idx].getInverse();
}
}
extern "C"
void convertLiePosesToMatricesCU(const float3* d_rot, const float3* d_trans, unsigned int numTransforms, float4x4* d_transforms, float4x4* d_transformInvs)
{
convertLiePosesToMatricesCU_Kernel << <(numTransforms + 8 - 1) / 8, 8 >> >(d_rot, d_trans, numTransforms, d_transforms, d_transformInvs);
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
#endif
////////////////////////////////////////////////////////////////////
// Main GN Solver Loop
////////////////////////////////////////////////////////////////////
extern "C" void solveBundlingStub(SolverInput& input, SolverState& state, SolverParameters& parameters, SolverStateAnalysis& analysis, float* convergenceAnalysis, CUDATimer *timer)
{
if (convergenceAnalysis) {
float initialResidual = EvalResidual(input, state, parameters, timer);
convergenceAnalysis[0] = initialResidual; // initial residual
}
//!!!DEBUGGING
#ifdef PRINT_RESIDUALS_SPARSE
if (parameters.weightSparse > 0) {
if (input.numberOfCorrespondences == 0) { printf("ERROR: %d correspondences\n", input.numberOfCorrespondences); getchar(); }
float initialResidual = EvalResidual(input, state, parameters, timer);
printf("initial sparse = %f*%f = %f\n", parameters.weightSparse, initialResidual / parameters.weightSparse, initialResidual);
}
#endif
//float3* xRot = new float3[input.numberOfImages]; //remember the delete!
//float3* xTrans = new float3[input.numberOfImages];
//timer = new CUDATimer();
//static unsigned int totalLinIters = 0, numLin = 0, totalNonLinIters = 0, numNonLin = 0;
//!!!DEBUGGING
for (unsigned int nIter = 0; nIter < parameters.nNonLinearIterations; nIter++)
{
parameters.weightSparse = input.weightsSparse[nIter];
parameters.weightDenseDepth = input.weightsDenseDepth[nIter];
parameters.weightDenseColor = input.weightsDenseColor[nIter];
parameters.useDense = (parameters.weightDenseDepth > 0 || parameters.weightDenseColor > 0);
#ifdef USE_LIE_SPACE
convertLiePosesToMatricesCU(state.d_xRot, state.d_xTrans, input.numberOfImages, state.d_xTransforms, state.d_xTransformInverses);
#endif
if (parameters.useDense) parameters.useDense = BuildDenseSystem(input, state, parameters, timer); //don't solve dense if no overlapping frames found
Initialization(input, state, parameters, timer);
if (parameters.weightSparse > 0.0f) {
if (parameters.useDense) {
for (unsigned int linIter = 0; linIter < parameters.nLinIterations; linIter++)
if (PCGIteration<true, true>(input, state, parameters, analysis, linIter == parameters.nLinIterations - 1, timer)) { break; }
}
else {
for (unsigned int linIter = 0; linIter < parameters.nLinIterations; linIter++)
if (PCGIteration<true, false>(input, state, parameters, analysis, linIter == parameters.nLinIterations - 1, timer)) {
//totalLinIters += (linIter+1); numLin++;
break;
}
}
}
else {
for (unsigned int linIter = 0; linIter < parameters.nLinIterations; linIter++)
if (PCGIteration<false, true>(input, state, parameters, analysis, linIter == parameters.nLinIterations - 1, timer)) break;
}
//!!!debugging
//cutilSafeCall(hipMemcpy(xRot, state.d_xRot, sizeof(float3)*input.numberOfImages, hipMemcpyDeviceToHost));
//cutilSafeCall(hipMemcpy(xTrans, state.d_xTrans, sizeof(float3)*input.numberOfImages, hipMemcpyDeviceToHost));
//!!!debugging
#ifdef PRINT_RESIDUALS_SPARSE
if (parameters.weightSparse > 0) {
float residual = EvalResidual(input, state, parameters, timer);
printf("[niter %d] weight * sparse = %f*%f = %f\t[#corr = %d]\n", nIter, parameters.weightSparse, residual / parameters.weightSparse, residual, input.numberOfCorrespondences);
}
#endif
if (convergenceAnalysis) {
float residual = EvalResidual(input, state, parameters, timer);
convergenceAnalysis[nIter + 1] = residual;
}
//if (timer) timer->evaluate(true);
#ifdef ENABLE_EARLY_OUT //convergence
//if (nIter < parameters.nNonLinearIterations - 1 && EvalGNConvergence(input, state, analysis, timer) < 0.01f) { //!!! TODO CHECK HOW THESE GENERALIZE
if (nIter < parameters.nNonLinearIterations - 1 && EvalGNConvergence(input, state, analysis, timer) < 0.005f) { //0.001?
//if (nIter < parameters.nNonLinearIterations - 1 && EvalGNConvergence(input, state, analysis, timer) < 0.001f) {
//if (!parameters.useDense) { totalNonLinIters += (nIter+1); numNonLin++; }
break;
}
//else if (!parameters.useDense && nIter == parameters.nNonLinearIterations - 1) { totalNonLinIters += (nIter+1); numNonLin++; }
#endif
}
//!!!debugging
//if (xRot) delete[] xRot;
//if (xTrans) delete[] xTrans;
//if (timer) { timer->evaluate(true, false); delete timer; }
//if (!parameters.useDense) { printf("mean #pcg its = %f\tmean #gn its = %f\n", (float)totalLinIters / (float)numLin, (float)totalNonLinIters / (float)numNonLin); } //just stats for global solve
//!!!debugging
}
////////////////////////////////////////////////////////////////////
// build variables to correspondences lookup
////////////////////////////////////////////////////////////////////
__global__ void BuildVariablesToCorrespondencesTableDevice(EntryJ* d_correspondences, unsigned int numberOfCorrespondences,
unsigned int maxNumCorrespondencesPerImage, int* d_variablesToCorrespondences, int* d_numEntriesPerRow)
{
const unsigned int N = numberOfCorrespondences; // Number of block variables
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < N) {
EntryJ& corr = d_correspondences[x];
if (corr.isValid()) {
int offset0 = atomicAdd(&d_numEntriesPerRow[corr.imgIdx_i], 1); // may overflow - need to check when read
int offset1 = atomicAdd(&d_numEntriesPerRow[corr.imgIdx_j], 1); // may overflow - need to check when read
if (offset0 < maxNumCorrespondencesPerImage && offset1 < maxNumCorrespondencesPerImage) {
d_variablesToCorrespondences[corr.imgIdx_i * maxNumCorrespondencesPerImage + offset0] = x;
d_variablesToCorrespondences[corr.imgIdx_j * maxNumCorrespondencesPerImage + offset1] = x;
}
else { //invalidate
printf("EXCEEDED MAX NUM CORR PER IMAGE IN SOLVER, INVALIDATING %d(%d,%d) [%d,%d | %d]\n",
x, corr.imgIdx_i, corr.imgIdx_j, offset0, offset1, maxNumCorrespondencesPerImage); //debugging
corr.setInvalid(); //make sure j corresponds to jt
}
}
}
}
extern "C" void buildVariablesToCorrespondencesTableCUDA(EntryJ* d_correspondences, unsigned int numberOfCorrespondences, unsigned int maxNumCorrespondencesPerImage, int* d_variablesToCorrespondences, int* d_numEntriesPerRow, CUDATimer* timer)
{
const unsigned int N = numberOfCorrespondences;
if (timer) timer->startEvent(__FUNCTION__);
BuildVariablesToCorrespondencesTableDevice << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(d_correspondences, numberOfCorrespondences, maxNumCorrespondencesPerImage, d_variablesToCorrespondences, d_numEntriesPerRow);
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
if (timer) timer->endEvent();
}
| c1e0fa5eef8b1be00d2efd2f58f99bfc0ad3fe32.cu | #include <iostream>
////for debug purposes
//#define PRINT_RESIDUALS_SPARSE
//#define PRINT_RESIDUALS_DENSE
#define ENABLE_EARLY_OUT
#include "GlobalDefines.h"
#include "SolverBundlingParameters.h"
#include "SolverBundlingState.h"
#include "SolverBundlingUtil.h"
#include "SolverBundlingEquations.h"
#include "SolverBundlingEquationsLie.h"
#include "SolverBundlingDenseUtil.h"
#include "../../SiftGPU/CUDATimer.h"
#include <conio.h>
#define THREADS_PER_BLOCK_DENSE_DEPTH 128
#define THREADS_PER_BLOCK_DENSE_DEPTH_FLIP 64
#define THREADS_PER_BLOCK_DENSE_OVERLAP 512
/////////////////////////////////////////////////////////////////////////
// Dense Depth Term
/////////////////////////////////////////////////////////////////////////
template<bool usePairwise>
__global__ void FindImageImageCorr_Kernel(SolverInput input, SolverState state, SolverParameters parameters)
{
// image indices
unsigned int i, j; // project from j to i
if (usePairwise) {
i = blockIdx.x; j = blockIdx.y; // all pairwise
if (i >= j) return;
}
else {
i = blockIdx.x; j = i + 1; // frame-to-frame
}
if (input.d_validImages[i] == 0 || input.d_validImages[j] == 0) return;
const unsigned int tidx = threadIdx.x;
const unsigned int subWidth = input.denseDepthWidth / parameters.denseOverlapCheckSubsampleFactor;
const unsigned int x = (tidx % subWidth) * parameters.denseOverlapCheckSubsampleFactor;
const unsigned int y = (tidx / subWidth) * parameters.denseOverlapCheckSubsampleFactor;
const unsigned int idx = y * input.denseDepthWidth + x;
if (idx < (input.denseDepthWidth * input.denseDepthHeight)) {
#ifdef USE_LIE_SPACE
float4x4 transform = state.d_xTransformInverses[i] * state.d_xTransforms[j];
#else
float4x4 transform_i = evalRtMat(state.d_xRot[i], state.d_xTrans[i]);
float4x4 transform_j = evalRtMat(state.d_xRot[j], state.d_xTrans[j]);
float4x4 invTransform_i = transform_i.getInverse(); //TODO PRECOMPUTE?
float4x4 transform = invTransform_i * transform_j;
#endif
//if (!computeAngleDiff(transform, 1.0f)) return; //~60 degrees //TODO HERE ANGIE
//if (!computeAngleDiff(transform, 0.8f)) return; //~45 degrees
if (!computeAngleDiff(transform, 0.52f)) return; //~30 degrees
// find correspondence
__shared__ int foundCorr[1]; foundCorr[0] = 0;
__syncthreads();
if (findDenseCorr(idx, input.denseDepthWidth, input.denseDepthHeight,
parameters.denseDistThresh, transform, input.intrinsics,
input.d_cacheFrames[i].d_depthDownsampled, input.d_cacheFrames[j].d_depthDownsampled,
parameters.denseDepthMin, parameters.denseDepthMax)) { //i tgt, j src //TODO PARAMS
atomicAdd(foundCorr, 1);
} // found correspondence
__syncthreads();
if (tidx == 0) {
if (foundCorr[0] > 10) { //TODO PARAMS
int addr = atomicAdd(state.d_numDenseOverlappingImages, 1);
state.d_denseOverlappingImages[addr] = make_uint2(i, j);
}
}
} // valid image pixel
}
__global__ void FlipJtJ_Kernel(unsigned int total, unsigned int dim, float* d_JtJ)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < total) {
const unsigned int x = idx % dim;
const unsigned int y = idx / dim;
if (x > y) {
d_JtJ[y * dim + x] = d_JtJ[x * dim + y];
}
}
}
__global__ void FindDenseCorrespondences_Kernel(SolverInput input, SolverState state, SolverParameters parameters)
{
const int imPairIdx = blockIdx.x; //should not go out of bounds, no need to check
uint2 imageIndices = state.d_denseOverlappingImages[imPairIdx];
unsigned int i = imageIndices.x; unsigned int j = imageIndices.y;
const unsigned int tidx = threadIdx.x;
const unsigned int gidx = tidx * gridDim.y + blockIdx.y;
if (gidx < (input.denseDepthWidth * input.denseDepthHeight)) {
#ifdef USE_LIE_SPACE
float4x4 transform = state.d_xTransformInverses[i] * state.d_xTransforms[j]; //invTransform_i * transform_j
#else
float4x4 transform_i = evalRtMat(state.d_xRot[i], state.d_xTrans[i]);
float4x4 transform_j = evalRtMat(state.d_xRot[j], state.d_xTrans[j]);
float4x4 invTransform_i = transform_i.getInverse();
float4x4 transform = invTransform_i * transform_j;
#endif
// find correspondence
const int numWarps = THREADS_PER_BLOCK_DENSE_DEPTH / WARP_SIZE;
__shared__ int s_count[numWarps];
s_count[0] = 0;
int count = 0.0f;
//TODO HERE ANGIE
#ifdef CUDACACHE_UCHAR_NORMALS
if (findDenseCorr(gidx, input.denseDepthWidth, input.denseDepthHeight,
parameters.denseDistThresh, parameters.denseNormalThresh, transform, input.intrinsics,
input.d_cacheFrames[i].d_depthDownsampled, input.d_cacheFrames[i].d_normalsDownsampledUCHAR4,
input.d_cacheFrames[j].d_depthDownsampled, input.d_cacheFrames[j].d_normalsDownsampledUCHAR4,
parameters.denseDepthMin, parameters.denseDepthMax)) { //i tgt, j src
#elif defined(CUDACACHE_FLOAT_NORMALS)
if (findDenseCorr(gidx, input.denseDepthWidth, input.denseDepthHeight,
parameters.denseDistThresh, parameters.denseNormalThresh, transform, input.intrinsics,
input.d_cacheFrames[i].d_depthDownsampled, input.d_cacheFrames[i].d_normalsDownsampled,
input.d_cacheFrames[j].d_depthDownsampled, input.d_cacheFrames[j].d_normalsDownsampled,
parameters.denseDepthMin, parameters.denseDepthMax)) { //i tgt, j src
#endif
//#ifdef CUDACACHE_UCHAR_NORMALS
// if (findDenseCorr(gidx, input.denseDepthWidth, input.denseDepthHeight,
// parameters.denseDistThresh, parameters.denseNormalThresh, transform, input.intrinsics,
// input.d_cacheFrames[i].d_depthDownsampled, input.d_cacheFrames[i].d_normalsDownsampledUCHAR4,
// input.d_cacheFrames[j].d_depthDownsampled, input.d_cacheFrames[j].d_normalsDownsampledUCHAR4,
// parameters.denseDepthMin, parameters.denseDepthMax)) { //i tgt, j src
//#else
// if (findDenseCorr(gidx, input.denseDepthWidth, input.denseDepthHeight,
// parameters.denseDistThresh, parameters.denseNormalThresh, transform, input.intrinsics,
// input.d_cacheFrames[i].d_depthDownsampled, input.d_cacheFrames[i].d_normalsDownsampled,
// input.d_cacheFrames[j].d_depthDownsampled, input.d_cacheFrames[j].d_normalsDownsampled,
// parameters.denseDepthMin, parameters.denseDepthMax)) { //i tgt, j src
//#endif
//atomicAdd(&state.d_denseCorrCounts[imPairIdx], 1.0f);
count++;
} // found correspondence
count = warpReduce(count);
__syncthreads();
if (tidx % WARP_SIZE == 0) {
s_count[tidx / WARP_SIZE] = count;
//atomicAdd(&state.d_denseCorrCounts[imPairIdx], count);
}
__syncthreads();
for (unsigned int stride = numWarps / 2; stride > 0; stride /= 2) {
if (tidx < stride) s_count[tidx] = s_count[tidx] + s_count[tidx + stride];
__syncthreads();
}
if (tidx == 0) {
atomicAdd(&state.d_denseCorrCounts[imPairIdx], s_count[0]);
}
} // valid image pixel
}
__global__ void WeightDenseCorrespondences_Kernel(unsigned int N, SolverState state)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) {
// apply ln to weights
float x = state.d_denseCorrCounts[idx];
if (x > 0) {
//if (x < 3200) state.d_denseCorrCounts[idx] = 0; //don't consider too small #corr //TODO PARAMS
if (x < 800) state.d_denseCorrCounts[idx] = 0; //don't consider too small #corr //TODO PARAMS
//if (x < 400) state.d_denseCorrCounts[idx] = 0; //don't consider too small #corr //TODO PARAMS
//if (x < 200) state.d_denseCorrCounts[idx] = 0; //don't consider too small #corr //TODO PARAMS //TODO EVAL DEBUG
else {
state.d_denseCorrCounts[idx] = 1.0f / min(logf(x), 9.0f); // natural log //TODO PARAMS
}
//state.d_denseCorrCounts[idx] = 1.0f / clamp(logf(x), 2.0f, 9.0f); // natural log //TODO PARAMS
}
}
}
template<bool useDepth, bool useColor>
__global__ void BuildDenseSystem_Kernel(SolverInput input, SolverState state, SolverParameters parameters)
{
const int imPairIdx = blockIdx.x;
uint2 imageIndices = state.d_denseOverlappingImages[imPairIdx];
unsigned int i = imageIndices.x; unsigned int j = imageIndices.y;
float imPairWeight = state.d_denseCorrCounts[imPairIdx];
if (imPairWeight == 0.0f) return;
const unsigned int idx = threadIdx.x;
const unsigned int srcIdx = idx * gridDim.y + blockIdx.y;
if (srcIdx < (input.denseDepthWidth * input.denseDepthHeight)) {
#ifdef USE_LIE_SPACE
float4x4 transform_i = state.d_xTransforms[i];
float4x4 transform_j = state.d_xTransforms[j];
float4x4 invTransform_i = state.d_xTransformInverses[i];
float4x4 invTransform_j = state.d_xTransformInverses[j];
float4x4 transform = invTransform_i * transform_j;
#else
float4x4 transform_i = evalRtMat(state.d_xRot[i], state.d_xTrans[i]);
float4x4 transform_j = evalRtMat(state.d_xRot[j], state.d_xTrans[j]);
float4x4 invTransform_i = transform_i.getInverse(); //TODO PRECOMPUTE?
float4x4 transform = invTransform_i * transform_j;
#endif
// point-to-plane term
matNxM<1, 6> depthJacBlockRow_i, depthJacBlockRow_j; depthJacBlockRow_i.setZero(); depthJacBlockRow_j.setZero();
float depthRes = 0.0f; float depthWeight = 0.0f;
// color term
matNxM<1, 6> colorJacBlockRow_i, colorJacBlockRow_j; colorJacBlockRow_i.setZero(); colorJacBlockRow_j.setZero();
float colorRes = 0.0f; float colorWeight = 0.0f;
// find correspondence
float3 camPosSrc; float3 camPosSrcToTgt; float3 camPosTgt; float3 normalTgt; float2 tgtScreenPos;
//TODO HERE ANGIE
#ifdef CUDACACHE_FLOAT_NORMALS
bool foundCorr = findDenseCorr(srcIdx, input.denseDepthWidth, input.denseDepthHeight,
parameters.denseDistThresh, parameters.denseNormalThresh, transform, input.intrinsics,
input.d_cacheFrames[i].d_cameraposDownsampled, input.d_cacheFrames[i].d_normalsDownsampled,
input.d_cacheFrames[j].d_cameraposDownsampled, input.d_cacheFrames[j].d_normalsDownsampled,
parameters.denseDepthMin, parameters.denseDepthMax, camPosSrc, camPosSrcToTgt, tgtScreenPos, camPosTgt, normalTgt); //i tgt, j src
#elif defined(CUDACACHE_UCHAR_NORMALS)
bool foundCorr = findDenseCorr(srcIdx, input.denseDepthWidth, input.denseDepthHeight,
parameters.denseDistThresh, parameters.denseNormalThresh, transform, input.intrinsics,
input.d_cacheFrames[i].d_cameraposDownsampled, input.d_cacheFrames[i].d_normalsDownsampledUCHAR4,
input.d_cacheFrames[j].d_cameraposDownsampled, input.d_cacheFrames[j].d_normalsDownsampledUCHAR4,
parameters.denseDepthMin, parameters.denseDepthMax, camPosSrc, camPosSrcToTgt, tgtScreenPos, camPosTgt, normalTgt); //i tgt, j src
#endif
//#ifdef CUDACACHE_UCHAR_NORMALS
// bool foundCorr = findDenseCorr(srcIdx, input.denseDepthWidth, input.denseDepthHeight,
// parameters.denseDistThresh, parameters.denseNormalThresh, transform, input.intrinsics,
// input.d_cacheFrames[i].d_cameraposDownsampled, input.d_cacheFrames[i].d_normalsDownsampledUCHAR4,
// input.d_cacheFrames[j].d_cameraposDownsampled, input.d_cacheFrames[j].d_normalsDownsampledUCHAR4,
// parameters.denseDepthMin, parameters.denseDepthMax, camPosSrc, camPosSrcToTgt, tgtScreenPos, camPosTgt, normalTgt); //i tgt, j src
//#else
// bool foundCorr = findDenseCorr(srcIdx, input.denseDepthWidth, input.denseDepthHeight,
// parameters.denseDistThresh, parameters.denseNormalThresh, transform, input.intrinsics,
// input.d_cacheFrames[i].d_cameraposDownsampled, input.d_cacheFrames[i].d_normalsDownsampled,
// input.d_cacheFrames[j].d_cameraposDownsampled, input.d_cacheFrames[j].d_normalsDownsampled,
// parameters.denseDepthMin, parameters.denseDepthMax, camPosSrc, camPosSrcToTgt, tgtScreenPos, camPosTgt, normalTgt); //i tgt, j src
//#endif
if (useDepth) {
if (foundCorr) {
// point-to-plane residual
float3 diff = camPosTgt - camPosSrcToTgt;
depthRes = dot(diff, normalTgt);
//depthWeight = parameters.weightDenseDepth * imPairWeight * max(0.0f, 0.5f*((1.0f - length(diff) / parameters.denseDistThresh) + (1.0f - camPosTgt.z / parameters.denseDepthMax)));
//depthWeight = parameters.weightDenseDepth * imPairWeight * max(0.0f, (1.0f - camPosTgt.z / 2.0f)); //fr1_desk
//depthWeight = parameters.weightDenseDepth * imPairWeight * max(0.0f, (1.0f - camPosTgt.z / 2.5f)); //fr3_office, fr2_xyz_half // livingroom1
//depthWeight = parameters.weightDenseDepth * imPairWeight * max(0.0f, (1.0f - camPosTgt.z / 3.0f)); //fr3_nstn
//depthWeight = parameters.weightDenseDepth * imPairWeight * max(0.0f, (1.0f - camPosTgt.z / 1.8f));
//depthWeight = parameters.weightDenseDepth * imPairWeight * (pow(max(0.0f, 1.0f - camPosTgt.z / 2.5f), 1.8f));
//depthWeight = parameters.weightDenseDepth * imPairWeight * (pow(max(0.0f, 1.0f - camPosTgt.z / 2.0f), 1.8f)); //fr3_office, fr1_desk_f20
depthWeight = parameters.weightDenseDepth * imPairWeight * (pow(max(0.0f, 1.0f - camPosTgt.z / 2.0f), 2.5f)); //fr2_xyz_half
//depthWeight = parameters.weightDenseDepth * imPairWeight * (pow(max(0.0f, 1.0f - camPosTgt.z / 3.5f), 1.8f)); //fr3_nstn
//depthWeight = parameters.weightDenseDepth * imPairWeight * (pow(max(0.0f, 1.0f - camPosTgt.z / parameters.denseDepthMax), 1.8f)); //TODO EVAL DEBUGGING
//float wtgt = (pow(max(0.0f, 1.0f - camPosTgt.z / 2.5f), 1.8f));
//float wsrc = (pow(max(0.0f, 1.0f - camPosSrc.z / 2.5f), 1.8f));
//depthWeight = parameters.weightDenseDepth * imPairWeight * wtgt * wsrc;
#ifdef USE_LIE_SPACE
if (i > 0) computeJacobianBlockRow_i(depthJacBlockRow_i, transform_i, invTransform_j, camPosSrc, normalTgt);
if (j > 0) computeJacobianBlockRow_j(depthJacBlockRow_j, invTransform_i, transform_j, camPosSrc, normalTgt);
#else
if (i > 0) computeJacobianBlockRow_i(depthJacBlockRow_i, state.d_xRot[i], state.d_xTrans[i], transform_j, camPosSrc, normalTgt);
if (j > 0) computeJacobianBlockRow_j(depthJacBlockRow_j, state.d_xRot[j], state.d_xTrans[j], invTransform_i, camPosSrc, normalTgt);
#endif
}
addToLocalSystem(foundCorr, state.d_denseJtJ, state.d_denseJtr, input.numberOfImages * 6,
depthJacBlockRow_i, depthJacBlockRow_j, i, j, depthRes, depthWeight, idx
, state.d_sumResidual, state.d_corrCount);
//addToLocalSystemBrute(foundCorr, state.d_denseJtJ, state.d_denseJtr, input.numberOfImages * 6,
// depthJacBlockRow_i, depthJacBlockRow_j, i, j, depthRes, depthWeight, idx);
}
if (useColor) {
bool foundCorrColor = false;
if (foundCorr) {
const float2 intensityDerivTgt = bilinearInterpolationFloat2(tgtScreenPos.x, tgtScreenPos.y, input.d_cacheFrames[i].d_intensityDerivsDownsampled, input.denseDepthWidth, input.denseDepthHeight);
const float intensityTgt = bilinearInterpolationFloat(tgtScreenPos.x, tgtScreenPos.y, input.d_cacheFrames[i].d_intensityDownsampled, input.denseDepthWidth, input.denseDepthHeight);
colorRes = intensityTgt - input.d_cacheFrames[j].d_intensityDownsampled[srcIdx];
foundCorrColor = (intensityDerivTgt.x != MINF && abs(colorRes) < parameters.denseColorThresh && length(intensityDerivTgt) > parameters.denseColorGradientMin);
if (foundCorrColor) {
const float2 focalLength = make_float2(input.intrinsics.x, input.intrinsics.y);
#ifdef USE_LIE_SPACE
if (i > 0) computeJacobianBlockIntensityRow_i(colorJacBlockRow_i, focalLength, transform_i, invTransform_j, camPosSrc, camPosSrcToTgt, intensityDerivTgt);
if (j > 0) computeJacobianBlockIntensityRow_j(colorJacBlockRow_j, focalLength, invTransform_i, transform_j, camPosSrc, camPosSrcToTgt, intensityDerivTgt);
#else
if (i > 0) computeJacobianBlockIntensityRow_i(colorJacBlockRow_i, focalLength, state.d_xRot[i], state.d_xTrans[i], transform_j, camPosSrc, camPosSrcToTgt, intensityDerivTgt);
if (j > 0) computeJacobianBlockIntensityRow_j(colorJacBlockRow_j, focalLength, state.d_xRot[j], state.d_xTrans[j], invTransform_i, camPosSrc, camPosSrcToTgt, intensityDerivTgt);
#endif
colorWeight = parameters.weightDenseColor * imPairWeight * max(0.0f, 1.0f - abs(colorRes) / (1.15f*parameters.denseColorThresh));
//colorWeight = parameters.weightDenseColor * imPairWeight * max(0.0f, 1.0f - abs(colorRes) / parameters.denseColorThresh) * max(0.0f, (1.0f - camPosTgt.z / 1.0f));
//colorWeight = parameters.weightDenseColor * imPairWeight * max(0.0f, 0.5f*(1.0f - abs(colorRes) / parameters.denseColorThresh) + 0.5f*max(0.0f, (1.0f - camPosTgt.z / 1.0f)));
}
}
addToLocalSystem(foundCorrColor, state.d_denseJtJ, state.d_denseJtr, input.numberOfImages * 6,
colorJacBlockRow_i, colorJacBlockRow_j, i, j, colorRes, colorWeight, idx
, state.d_sumResidualColor, state.d_corrCountColor);
//addToLocalSystemBrute(foundCorrColor, state.d_denseJtJ, state.d_denseJtr, input.numberOfImages * 6,
// colorJacBlockRow_i, colorJacBlockRow_j, i, j, colorRes, colorWeight, idx);
}
} // valid image pixel
}
bool BuildDenseSystem(const SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer* timer)
{
const unsigned int N = input.numberOfImages;
const int sizeJtr = 6 * N;
const int sizeJtJ = sizeJtr * sizeJtr;
#ifdef PRINT_RESIDUALS_DENSE
cutilSafeCall(cudaMemset(state.d_corrCount, 0, sizeof(int)));
cutilSafeCall(cudaMemset(state.d_sumResidual, 0, sizeof(float)));
cutilSafeCall(cudaMemset(state.d_corrCountColor, 0, sizeof(int)));
cutilSafeCall(cudaMemset(state.d_sumResidualColor, 0, sizeof(float)));
#endif
const unsigned int maxDenseImPairs = input.numberOfImages * (input.numberOfImages - 1) / 2;
cutilSafeCall(cudaMemset(state.d_denseCorrCounts, 0, sizeof(float) * maxDenseImPairs));
cutilSafeCall(cudaMemset(state.d_denseJtJ, 0, sizeof(float) * sizeJtJ));
cutilSafeCall(cudaMemset(state.d_denseJtr, 0, sizeof(float) * sizeJtr));
cutilSafeCall(cudaMemset(state.d_numDenseOverlappingImages, 0, sizeof(int)));
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
dim3 gridImImOverlap;
if (parameters.useDenseDepthAllPairwise) gridImImOverlap = dim3(N, N, 1); // pairwise
else gridImImOverlap = dim3(N - 1, 1, 1); // for frame-to-frame
if (timer) timer->startEvent("BuildDenseDepthSystem - find image corr");
if (parameters.useDenseDepthAllPairwise) FindImageImageCorr_Kernel<true> << < gridImImOverlap, THREADS_PER_BLOCK_DENSE_OVERLAP >> >(input, state, parameters);
else FindImageImageCorr_Kernel<false> << < gridImImOverlap, THREADS_PER_BLOCK_DENSE_OVERLAP >> >(input, state, parameters);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
if (timer) timer->endEvent();
int numOverlapImagePairs;
cutilSafeCall(cudaMemcpy(&numOverlapImagePairs, state.d_numDenseOverlappingImages, sizeof(int), cudaMemcpyDeviceToHost));
if (numOverlapImagePairs == 0) {
printf("warning: no overlapping images for dense solve\n");
return false;
}
const int reductionGlobal = (input.denseDepthWidth*input.denseDepthHeight + THREADS_PER_BLOCK_DENSE_DEPTH - 1) / THREADS_PER_BLOCK_DENSE_DEPTH;
dim3 grid(numOverlapImagePairs, reductionGlobal);
//if (N > 11) printf("num overlap image pairs = %d\n", numOverlapImagePairs); //debugging only
if (timer) timer->startEvent("BuildDenseDepthSystem - compute im-im weights");
FindDenseCorrespondences_Kernel << <grid, THREADS_PER_BLOCK_DENSE_DEPTH >> >(input, state, parameters);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
////debugging //remember the delete!
//float* denseCorrCounts = new float[numOverlapImagePairs];
//cutilSafeCall(cudaMemcpy(denseCorrCounts, state.d_denseCorrCounts, sizeof(float)*numOverlapImagePairs, cudaMemcpyDeviceToHost));
//unsigned int totalCount = 0;
//for (unsigned int i = 0; i < numOverlapImagePairs; i++) { totalCount += (unsigned int)denseCorrCounts[i]; }
//printf("total count = %d\n", totalCount);
//uint2* imageIndices = new uint2[numOverlapImagePairs];
//cutilSafeCall(cudaMemcpy(imageIndices, state.d_denseOverlappingImages, sizeof(uint2)*numOverlapImagePairs, cudaMemcpyDeviceToHost));
//if (imageIndices) delete[] imageIndices;
////debugging
//debugging - compute some overlap stats
//if (true || input.numberOfImages > 11) {
// float4x4* transforms = new float4x4[input.numberOfImages];
// float* denseCorrCounts = new float[numOverlapImagePairs];
// uint2* imageIndices = new uint2[numOverlapImagePairs];
// cutilSafeCall(cudaMemcpy(denseCorrCounts, state.d_denseCorrCounts, sizeof(float)*numOverlapImagePairs, cudaMemcpyDeviceToHost));
// cutilSafeCall(cudaMemcpy(imageIndices, state.d_denseOverlappingImages, sizeof(uint2)*numOverlapImagePairs, cudaMemcpyDeviceToHost));
// cutilSafeCall(cudaMemcpy(transforms, state.d_xTransforms, sizeof(float4x4)*input.numberOfImages, cudaMemcpyDeviceToHost));
// FILE* fp = fopen("debug/overlaps.csv", "w");
// char buffer[128];
// for (int i = 0; i < numOverlapImagePairs; i++) {
// if (denseCorrCounts[i] > 0) {
// float3 d = transforms[imageIndices[i].x].getTranslation() - transforms[imageIndices[i].y].getTranslation();
// sprintf(buffer, "%d,%d,%d,%f\n", imageIndices[i].x, imageIndices[i].y, (int)denseCorrCounts[i], length(d));
// fwrite(buffer, sizeof(char), strlen(buffer), fp);
// }
// }
// fclose(fp);
// if (transforms) delete[] transforms;
// if (denseCorrCounts) delete[] denseCorrCounts;
// if (imageIndices) delete[] imageIndices;
// int a = 5;
//}
int wgrid = (numOverlapImagePairs + THREADS_PER_BLOCK_DENSE_DEPTH_FLIP - 1) / THREADS_PER_BLOCK_DENSE_DEPTH_FLIP;
WeightDenseCorrespondences_Kernel << < wgrid, THREADS_PER_BLOCK_DENSE_DEPTH_FLIP >> >(maxDenseImPairs, state);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
////debugging
//cutilSafeCall(cudaMemcpy(denseCorrCounts, state.d_denseCorrCounts, sizeof(float)*maxDenseImPairs, cudaMemcpyDeviceToHost));
//totalCount = 0;
//for (unsigned int i = 0; i < maxDenseImPairs; i++) { if (denseCorrCounts[i] > 0.0f) totalCount++; }
//printf("total count = %d\n", totalCount);
//if (denseCorrCounts) delete[] denseCorrCounts;
////debugging
if (timer) timer->endEvent();
if (timer) timer->startEvent("BuildDenseDepthSystem - build jtj/jtr");
if (parameters.weightDenseDepth > 0.0f) {
if (parameters.weightDenseColor > 0.0f) BuildDenseSystem_Kernel<true, true> << <grid, THREADS_PER_BLOCK_DENSE_DEPTH >> >(input, state, parameters);
else BuildDenseSystem_Kernel<true, false> << <grid, THREADS_PER_BLOCK_DENSE_DEPTH >> >(input, state, parameters);
}
else {
BuildDenseSystem_Kernel<false, true> << <grid, THREADS_PER_BLOCK_DENSE_DEPTH >> >(input, state, parameters);
}
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
////debugging
//bool debugPrint = true;
//float* h_JtJ = NULL;
//float* h_Jtr = NULL;
//if (debugPrint) {
// h_JtJ = new float[sizeJtJ];
// h_Jtr = new float[sizeJtr];
// cutilSafeCall(cudaMemcpy(h_JtJ, state.d_denseJtJ, sizeof(float) * sizeJtJ, cudaMemcpyDeviceToHost));
// cutilSafeCall(cudaMemcpy(h_Jtr, state.d_denseJtr, sizeof(float) * sizeJtr, cudaMemcpyDeviceToHost));
// printf("JtJ:\n");
// //for (unsigned int i = 0; i < 6 * N; i++) {
// // for (unsigned int j = 0; j < 6 * N; j++)
// for (unsigned int i = 6 * 1; i < 6 * 2; i++) {
// for (unsigned int j = 6 * 1; j < 6 * 2; j++)
// printf(" %f,", h_JtJ[j * 6 * N + i]);
// printf("\n");
// }
// printf("Jtr:\n");
// for (unsigned int i = 0; i < 6 * N; i++) {
// printf(" %f,", h_Jtr[i]);
// }
// printf("\n");
//}
////debugging
#ifdef PRINT_RESIDUALS_DENSE
if (parameters.weightDenseDepth > 0) {
float sumResidual; int corrCount;
cutilSafeCall(cudaMemcpy(&sumResidual, state.d_sumResidual, sizeof(float), cudaMemcpyDeviceToHost));
cutilSafeCall(cudaMemcpy(&corrCount, state.d_corrCount, sizeof(int), cudaMemcpyDeviceToHost));
printf("\tdense depth: weights * residual = %f * %f = %f\t[#corr = %d]\n", parameters.weightDenseDepth, sumResidual / parameters.weightDenseDepth, sumResidual, corrCount);
}
if (parameters.weightDenseColor > 0) {
float sumResidual; int corrCount;
cutilSafeCall(cudaMemcpy(&sumResidual, state.d_sumResidualColor, sizeof(float), cudaMemcpyDeviceToHost));
cutilSafeCall(cudaMemcpy(&corrCount, state.d_corrCountColor, sizeof(int), cudaMemcpyDeviceToHost));
printf("\tdense color: weights * residual = %f * %f = %f\t[#corr = %d]\n", parameters.weightDenseColor, sumResidual / parameters.weightDenseColor, sumResidual, corrCount);
}
#endif
const unsigned int flipgrid = (sizeJtJ + THREADS_PER_BLOCK_DENSE_DEPTH_FLIP - 1) / THREADS_PER_BLOCK_DENSE_DEPTH_FLIP;
FlipJtJ_Kernel << <flipgrid, THREADS_PER_BLOCK_DENSE_DEPTH_FLIP >> >(sizeJtJ, sizeJtr, state.d_denseJtJ);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
if (timer) timer->endEvent();
return true;
}
//todo more efficient?? (there are multiple per image-image...)
//get high residuals
__global__ void collectHighResidualsDevice(SolverInput input, SolverState state, SolverStateAnalysis analysis, SolverParameters parameters, unsigned int maxNumHighResiduals)
{
const unsigned int N = input.numberOfCorrespondences; // Number of block variables
const unsigned int corrIdx = blockIdx.x * blockDim.x + threadIdx.x;
if (corrIdx < N) {
float residual = evalAbsMaxResidualDevice(corrIdx, input, state, parameters);
if (residual > parameters.highResidualThresh) {
int idx = atomicAdd(state.d_countHighResidual, 1);
if (idx < maxNumHighResiduals) {
analysis.d_maxResidual[idx] = residual;
analysis.d_maxResidualIndex[idx] = corrIdx;
}
}
}
}
extern "C" void collectHighResiduals(SolverInput& input, SolverState& state, SolverStateAnalysis& analysis, SolverParameters& parameters, CUDATimer* timer)
{
if (timer) timer->startEvent(__FUNCTION__);
cutilSafeCall(cudaMemset(state.d_countHighResidual, 0, sizeof(int)));
const unsigned int N = input.numberOfCorrespondences; // Number of correspondences
unsigned int maxNumHighResiduals = (input.maxCorrPerImage*input.maxNumberOfImages + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
collectHighResidualsDevice << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(input, state, analysis, parameters, maxNumHighResiduals);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
if (timer) timer->endEvent();
}
/////////////////////////////////////////////////////////////////////////
// Eval Max Residual
/////////////////////////////////////////////////////////////////////////
__global__ void EvalMaxResidualDevice(SolverInput input, SolverState state, SolverStateAnalysis analysis, SolverParameters parameters)
{
__shared__ int maxResIndex[THREADS_PER_BLOCK];
__shared__ float maxRes[THREADS_PER_BLOCK];
const unsigned int N = input.numberOfCorrespondences; // Number of block variables
const unsigned int corrIdx = blockIdx.x * blockDim.x + threadIdx.x;
maxResIndex[threadIdx.x] = 0;
maxRes[threadIdx.x] = 0.0f;
if (corrIdx < N) {
float residual = evalAbsMaxResidualDevice(corrIdx, input, state, parameters);
maxRes[threadIdx.x] = residual;
maxResIndex[threadIdx.x] = corrIdx;
__syncthreads();
for (int stride = THREADS_PER_BLOCK / 2; stride > 0; stride /= 2) {
if (threadIdx.x < stride) {
int first = threadIdx.x;
int second = threadIdx.x + stride;
if (maxRes[first] < maxRes[second]) {
maxRes[first] = maxRes[second];
maxResIndex[first] = maxResIndex[second];
}
}
__syncthreads();
}
if (threadIdx.x == 0) {
//printf("d_maxResidual[%d] = %f (index %d)\n", blockIdx.x, maxRes[0], maxResIndex[0]);
analysis.d_maxResidual[blockIdx.x] = maxRes[0];
analysis.d_maxResidualIndex[blockIdx.x] = maxResIndex[0];
}
}
}
extern "C" void evalMaxResidual(SolverInput& input, SolverState& state, SolverStateAnalysis& analysis, SolverParameters& parameters, CUDATimer* timer)
{
if (timer) timer->startEvent(__FUNCTION__);
const unsigned int N = input.numberOfCorrespondences; // Number of correspondences
EvalMaxResidualDevice << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(input, state, analysis, parameters);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
if (timer) timer->endEvent();
}
/////////////////////////////////////////////////////////////////////////
// Eval Cost
/////////////////////////////////////////////////////////////////////////
__global__ void ResetResidualDevice(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x == 0) state.d_sumResidual[0] = 0.0f;
}
__global__ void EvalResidualDevice(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.numberOfCorrespondences; // Number of block variables
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
float residual = 0.0f;
if (x < N) {
residual = evalFDevice(x, input, state, parameters);
//float out = warpReduce(residual);
//unsigned int laneid;
////This command gets the lane ID within the current warp
//asm("mov.u32 %0, %%laneid;" : "=r"(laneid));
//if (laneid == 0) {
// atomicAdd(&state.d_sumResidual[0], out);
//}
atomicAdd(&state.d_sumResidual[0], residual);
}
}
extern "C" float EvalResidual(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer* timer)
{
if (timer) timer->startEvent(__FUNCTION__);
float residual = 0.0f;
const unsigned int N = input.numberOfCorrespondences; // Number of block variables
ResetResidualDevice << < 1, 1, 1 >> >(input, state, parameters);
EvalResidualDevice << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(input, state, parameters);
residual = state.getSumResidual();
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
if (timer) timer->endEvent();
return residual;
}
/////////////////////////////////////////////////////////////////////////
// Eval Linear Residual
/////////////////////////////////////////////////////////////////////////
//__global__ void SumLinearResDevice(SolverInput input, SolverState state, SolverParameters parameters)
//{
// const unsigned int N = input.numberOfImages; // Number of block variables
// const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
//
// float residual = 0.0f;
// if (x > 0 && x < N) {
// residual = dot(state.d_rRot[x], state.d_rRot[x]) + dot(state.d_rTrans[x], state.d_rTrans[x]);
// atomicAdd(state.d_sumLinResidual, residual);
// }
//}
//float EvalLinearRes(SolverInput& input, SolverState& state, SolverParameters& parameters)
//{
// float residual = 0.0f;
//
// const unsigned int N = input.numberOfImages; // Number of block variables
//
// // Do PCG step
// const int blocksPerGrid = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
//
// float init = 0.0f;
// cutilSafeCall(cudaMemcpy(state.d_sumLinResidual, &init, sizeof(float), cudaMemcpyHostToDevice));
//
// SumLinearResDevice << <blocksPerGrid, THREADS_PER_BLOCK >> >(input, state, parameters);
//#ifdef _DEBUG
// cutilSafeCall(cudaDeviceSynchronize());
// cutilCheckMsg(__FUNCTION__);
//#endif
//
// cutilSafeCall(cudaMemcpy(&residual, state.d_sumLinResidual, sizeof(float), cudaMemcpyDeviceToHost));
// return residual;
//}
/////////////////////////////////////////////////////////////////////////
// Count High Residuals
/////////////////////////////////////////////////////////////////////////
__global__ void CountHighResidualsDevice(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.numberOfCorrespondences; // Number of block variables
const unsigned int corrIdx = blockIdx.x * blockDim.x + threadIdx.x;
if (corrIdx < N) {
float residual = evalAbsMaxResidualDevice(corrIdx, input, state, parameters);
if (residual > parameters.verifyOptDistThresh)
atomicAdd(state.d_countHighResidual, 1);
}
}
extern "C" int countHighResiduals(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer* timer)
{
if (timer) timer->startEvent(__FUNCTION__);
const unsigned int N = input.numberOfCorrespondences; // Number of correspondences
cutilSafeCall(cudaMemset(state.d_countHighResidual, 0, sizeof(int)));
CountHighResidualsDevice << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(input, state, parameters);
int count;
cutilSafeCall(cudaMemcpy(&count, state.d_countHighResidual, sizeof(int), cudaMemcpyDeviceToHost));
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
if (timer) timer->endEvent();
return count;
}
/////////////////////////////////////////////////////////////////////////
// Convergence Analysis
/////////////////////////////////////////////////////////////////////////
//uses same data store as max residual
__global__ void EvalGNConvergenceDevice(SolverInput input, SolverStateAnalysis analysis, SolverState state) //compute max of delta
{
__shared__ float maxVal[THREADS_PER_BLOCK];
const unsigned int N = input.numberOfImages;
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
maxVal[threadIdx.x] = 0.0f;
if (x < N)
{
if (x == 0 || input.d_validImages[x] == 0)
maxVal[threadIdx.x] = 0.0f;
else {
float3 r3 = fmaxf(fabs(state.d_deltaRot[x]), fabs(state.d_deltaTrans[x]));
float r = fmaxf(r3.x, fmaxf(r3.y, r3.z));
maxVal[threadIdx.x] = r;
}
__syncthreads();
for (int stride = THREADS_PER_BLOCK / 2; stride > 0; stride /= 2) {
if (threadIdx.x < stride) {
int first = threadIdx.x;
int second = threadIdx.x + stride;
maxVal[first] = fmaxf(maxVal[first], maxVal[second]);
}
__syncthreads();
}
if (threadIdx.x == 0) {
analysis.d_maxResidual[blockIdx.x] = maxVal[0];
}
}
}
float EvalGNConvergence(SolverInput& input, SolverState& state, SolverStateAnalysis& analysis, CUDATimer* timer)
{
if (timer) timer->startEvent(__FUNCTION__);
const unsigned int N = input.numberOfImages;
const unsigned int blocksPerGrid = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
EvalGNConvergenceDevice << < blocksPerGrid, THREADS_PER_BLOCK >> >(input, analysis, state);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
//copy to host and compute max
cutilSafeCall(cudaMemcpy(analysis.h_maxResidual, analysis.d_maxResidual, sizeof(float) * blocksPerGrid, cudaMemcpyDeviceToHost));
cutilSafeCall(cudaMemcpy(analysis.h_maxResidualIndex, analysis.d_maxResidualIndex, sizeof(int) * blocksPerGrid, cudaMemcpyDeviceToHost));
float maxVal = 0.0f;
for (unsigned int i = 0; i < blocksPerGrid; i++) {
if (maxVal < analysis.h_maxResidual[i]) maxVal = analysis.h_maxResidual[i];
}
if (timer) timer->endEvent();
return maxVal;
}
// For the naming scheme of the variables see:
// http://en.wikipedia.org/wiki/Conjugate_gradient_method
// This code is an implementation of their PCG pseudo code
template<bool useDense>
__global__ void PCGInit_Kernel1(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.numberOfImages;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
float d = 0.0f;
if (x > 0 && x < N)
{
float3 resRot, resTrans;
evalMinusJTFDevice<useDense>(x, input, state, parameters, resRot, resTrans); // residuum = J^T x -F - A x delta_0 => J^T x -F, since A x x_0 == 0
state.d_rRot[x] = resRot; // store for next iteration
state.d_rTrans[x] = resTrans; // store for next iteration
const float3 pRot = state.d_precondionerRot[x] * resRot; // apply preconditioner M^-1
state.d_pRot[x] = pRot;
const float3 pTrans = state.d_precondionerTrans[x] * resTrans; // apply preconditioner M^-1
state.d_pTrans[x] = pTrans;
d = dot(resRot, pRot) + dot(resTrans, pTrans); // x-th term of nomimator for computing alpha and denominator for computing beta
state.d_Ap_XRot[x] = make_float3(0.0f, 0.0f, 0.0f);
state.d_Ap_XTrans[x] = make_float3(0.0f, 0.0f, 0.0f);
}
d = warpReduce(d);
if (threadIdx.x % WARP_SIZE == 0)
{
atomicAdd(state.d_scanAlpha, d);
}
}
__global__ void PCGInit_Kernel2(unsigned int N, SolverState state)
{
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x > 0 && x < N) state.d_rDotzOld[x] = state.d_scanAlpha[0]; // store result for next kernel call
}
void Initialization(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer* timer)
{
const unsigned int N = input.numberOfImages;
const int blocksPerGrid = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
if (blocksPerGrid > THREADS_PER_BLOCK)
{
std::cout << "Too many variables for this block size. Maximum number of variables for two kernel scan: " << THREADS_PER_BLOCK*THREADS_PER_BLOCK << std::endl;
while (1);
}
if (timer) timer->startEvent("Initialization");
//!!!DEBUGGING //remember to uncomment the delete...
//float3* rRot = new float3[input.numberOfImages]; // -jtf
//float3* rTrans = new float3[input.numberOfImages];
//!!!DEBUGGING
cutilSafeCall(cudaMemset(state.d_scanAlpha, 0, sizeof(float)));
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
if (parameters.useDense) PCGInit_Kernel1<true> << <blocksPerGrid, THREADS_PER_BLOCK >> >(input, state, parameters);
else PCGInit_Kernel1<false> << <blocksPerGrid, THREADS_PER_BLOCK >> >(input, state, parameters);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
//cutilSafeCall(cudaMemcpy(rRot, state.d_rRot, sizeof(float3)*input.numberOfImages, cudaMemcpyDeviceToHost));
//cutilSafeCall(cudaMemcpy(rTrans, state.d_rTrans, sizeof(float3)*input.numberOfImages, cudaMemcpyDeviceToHost));
//for (unsigned int i = 1; i < input.numberOfImages; i++) { if (isnan(rRot[i].x)) { printf("NaN in jtr rRot %d\n", i); getchar(); } }
//for (unsigned int i = 1; i < input.numberOfImages; i++) { if (isnan(rTrans[i].x)) { printf("NaN in jtr rTrans %d\n", i); getchar(); } }
//cutilSafeCall(cudaMemcpy(rRot, state.d_pRot, sizeof(float3)*input.numberOfImages, cudaMemcpyDeviceToHost));
//cutilSafeCall(cudaMemcpy(rTrans, state.d_pTrans, sizeof(float3)*input.numberOfImages, cudaMemcpyDeviceToHost));
//for (unsigned int i = 1; i < input.numberOfImages; i++) { if (isnan(rRot[i].x)) { printf("NaN in jtr pRot %d\n", i); getchar(); } }
//for (unsigned int i = 1; i < input.numberOfImages; i++) { if (isnan(rTrans[i].x)) { printf("NaN in jtr pTrans %d\n", i); getchar(); } }
PCGInit_Kernel2 << <blocksPerGrid, THREADS_PER_BLOCK >> >(N, state);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
if (timer) timer->endEvent();
//float scanAlpha;
//cutilSafeCall(cudaMemcpy(&scanAlpha, state.d_scanAlpha, sizeof(float), cudaMemcpyDeviceToHost));
//if (rRot) delete[] rRot;
//if (rTrans) delete[] rTrans;
}
/////////////////////////////////////////////////////////////////////////
// PCG Iteration Parts
/////////////////////////////////////////////////////////////////////////
//inefficient
__global__ void PCGStep_Kernel_Dense_Brute(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.numberOfImages; // Number of block variables
const unsigned int x = blockIdx.x;
if (x > 0 && x < N)
{
float3 rot, trans;
applyJTJDenseBruteDevice(x, state, state.d_denseJtJ, input.numberOfImages, rot, trans); // A x p_k => J^T x J x p_k
state.d_Ap_XRot[x] += rot;
state.d_Ap_XTrans[x] += trans;
}
}
__global__ void PCGStep_Kernel_Dense(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.numberOfImages; // Number of block variables
const unsigned int x = blockIdx.x;
const unsigned int lane = threadIdx.x % WARP_SIZE;
if (x > 0 && x < N)
{
float3 rot, trans;
applyJTJDenseDevice(x, state, state.d_denseJtJ, input.numberOfImages, rot, trans, threadIdx.x); // A x p_k => J^T x J x p_k
if (lane == 0)
{
atomicAdd(&state.d_Ap_XRot[x].x, rot.x);
atomicAdd(&state.d_Ap_XRot[x].y, rot.y);
atomicAdd(&state.d_Ap_XRot[x].z, rot.z);
atomicAdd(&state.d_Ap_XTrans[x].x, trans.x);
atomicAdd(&state.d_Ap_XTrans[x].y, trans.y);
atomicAdd(&state.d_Ap_XTrans[x].z, trans.z);
}
}
}
__global__ void PCGStep_Kernel0(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.numberOfCorrespondences; // Number of block variables
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < N)
{
const float3 tmp = applyJDevice(x, input, state, parameters); // A x p_k => J^T x J x p_k
state.d_Jp[x] = tmp; // store for next kernel call
}
}
__global__ void PCGStep_Kernel1a(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.numberOfImages; // Number of block variables
const unsigned int x = blockIdx.x;
const unsigned int lane = threadIdx.x % WARP_SIZE;
if (x > 0 && x < N)
{
float3 rot, trans;
applyJTDevice(x, input, state, parameters, rot, trans, threadIdx.x, lane); // A x p_k => J^T x J x p_k
if (lane == 0)
{
atomicAdd(&state.d_Ap_XRot[x].x, rot.x);
atomicAdd(&state.d_Ap_XRot[x].y, rot.y);
atomicAdd(&state.d_Ap_XRot[x].z, rot.z);
atomicAdd(&state.d_Ap_XTrans[x].x, trans.x);
atomicAdd(&state.d_Ap_XTrans[x].y, trans.y);
atomicAdd(&state.d_Ap_XTrans[x].z, trans.z);
}
}
}
__global__ void PCGStep_Kernel1b(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.numberOfImages; // Number of block variables
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
float d = 0.0f;
if (x > 0 && x < N)
{
d = dot(state.d_pRot[x], state.d_Ap_XRot[x]) + dot(state.d_pTrans[x], state.d_Ap_XTrans[x]); // x-th term of denominator of alpha
}
d = warpReduce(d);
if (threadIdx.x % WARP_SIZE == 0)
{
atomicAdd(state.d_scanAlpha, d);
}
}
__global__ void PCGStep_Kernel2(SolverInput input, SolverState state)
{
const unsigned int N = input.numberOfImages;
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
const float dotProduct = state.d_scanAlpha[0];
float b = 0.0f;
if (x > 0 && x < N)
{
float alpha = 0.0f;
if (dotProduct > FLOAT_EPSILON) alpha = state.d_rDotzOld[x] / dotProduct; // update step size alpha
state.d_deltaRot[x] = state.d_deltaRot[x] + alpha*state.d_pRot[x]; // do a decent step
state.d_deltaTrans[x] = state.d_deltaTrans[x] + alpha*state.d_pTrans[x]; // do a decent step
float3 rRot = state.d_rRot[x] - alpha*state.d_Ap_XRot[x]; // update residuum
state.d_rRot[x] = rRot; // store for next kernel call
float3 rTrans = state.d_rTrans[x] - alpha*state.d_Ap_XTrans[x]; // update residuum
state.d_rTrans[x] = rTrans; // store for next kernel call
float3 zRot = state.d_precondionerRot[x] * rRot; // apply preconditioner M^-1
state.d_zRot[x] = zRot; // save for next kernel call
float3 zTrans = state.d_precondionerTrans[x] * rTrans; // apply preconditioner M^-1
state.d_zTrans[x] = zTrans; // save for next kernel call
b = dot(zRot, rRot) + dot(zTrans, rTrans); // compute x-th term of the nominator of beta
}
b = warpReduce(b);
if (threadIdx.x % WARP_SIZE == 0)
{
atomicAdd(&state.d_scanAlpha[1], b);
}
}
template<bool lastIteration>
__global__ void PCGStep_Kernel3(SolverInput input, SolverState state)
{
const unsigned int N = input.numberOfImages;
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x > 0 && x < N)
{
const float rDotzNew = state.d_scanAlpha[1]; // get new nominator
const float rDotzOld = state.d_rDotzOld[x]; // get old denominator
float beta = 0.0f;
if (rDotzOld > FLOAT_EPSILON) beta = rDotzNew / rDotzOld; // update step size beta
state.d_rDotzOld[x] = rDotzNew; // save new rDotz for next iteration
state.d_pRot[x] = state.d_zRot[x] + beta*state.d_pRot[x]; // update decent direction
state.d_pTrans[x] = state.d_zTrans[x] + beta*state.d_pTrans[x]; // update decent direction
state.d_Ap_XRot[x] = make_float3(0.0f, 0.0f, 0.0f);
state.d_Ap_XTrans[x] = make_float3(0.0f, 0.0f, 0.0f);
if (lastIteration)
{
//if (input.d_validImages[x]) { //not really necessary
#ifdef USE_LIE_SPACE //TODO just keep that matrix transforms around
float3 rot, trans;
computeLieUpdate(state.d_deltaRot[x], state.d_deltaTrans[x], state.d_xRot[x], state.d_xTrans[x], rot, trans);
state.d_xRot[x] = rot;
state.d_xTrans[x] = trans;
#else
state.d_xRot[x] = state.d_xRot[x] + state.d_deltaRot[x];
state.d_xTrans[x] = state.d_xTrans[x] + state.d_deltaTrans[x];
#endif
//}
}
}
}
template<bool useSparse, bool useDense>
bool PCGIteration(SolverInput& input, SolverState& state, SolverParameters& parameters, SolverStateAnalysis& analysis, bool lastIteration, CUDATimer *timer)
{
const unsigned int N = input.numberOfImages; // Number of block variables
// Do PCG step
const int blocksPerGrid = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
if (blocksPerGrid > THREADS_PER_BLOCK)
{
std::cout << "Too many variables for this block size. Maximum number of variables for two kernel scan: " << THREADS_PER_BLOCK*THREADS_PER_BLOCK << std::endl;
while (1);
}
if (timer) timer->startEvent("PCGIteration");
cutilSafeCall(cudaMemset(state.d_scanAlpha, 0, sizeof(float) * 2));
// sparse part
if (useSparse) {
const unsigned int Ncorr = input.numberOfCorrespondences;
const int blocksPerGridCorr = (Ncorr + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
PCGStep_Kernel0 << <blocksPerGridCorr, THREADS_PER_BLOCK >> >(input, state, parameters);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
PCGStep_Kernel1a << < N, THREADS_PER_BLOCK_JT >> >(input, state, parameters);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
if (useDense) {
//if (timer) timer->startEvent("apply JTJ dense");
PCGStep_Kernel_Dense << < N, THREADS_PER_BLOCK_JT_DENSE >> >(input, state, parameters);
//PCGStep_Kernel_Dense_Brute << < N, 1 >> >(input, state, parameters);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
//if (timer) timer->endEvent();
}
//!!!debugging
//float3* Ap_Rot = new float3[input.numberOfImages];
//float3* Ap_Trans = new float3[input.numberOfImages];
//cutilSafeCall(cudaMemcpy(Ap_Rot, state.d_Ap_XRot, sizeof(float3)*input.numberOfImages, cudaMemcpyDeviceToHost));
//cutilSafeCall(cudaMemcpy(Ap_Trans, state.d_Ap_XTrans, sizeof(float3)*input.numberOfImages, cudaMemcpyDeviceToHost));
//for (unsigned int i = 1; i < input.maxNumberOfImages; i++) { if (isnan(Ap_Rot[i].x)) { printf("NaN at Ap rot %d\n", i); getchar(); } }
//for (unsigned int i = 1; i < input.maxNumberOfImages; i++) { if (isnan(Ap_Trans[i].x)) { printf("NaN at Ap trans %d\n", i); getchar(); } }
//if (Ap_Rot) delete[] Ap_Rot;
//if (Ap_Trans) delete[] Ap_Trans;
//!!!debugging
PCGStep_Kernel1b << <blocksPerGrid, THREADS_PER_BLOCK >> >(input, state, parameters);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
PCGStep_Kernel2 << <blocksPerGrid, THREADS_PER_BLOCK >> >(input, state);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
#ifdef ENABLE_EARLY_OUT //for convergence
float scanAlpha; cutilSafeCall(cudaMemcpy(&scanAlpha, state.d_scanAlpha, sizeof(float), cudaMemcpyDeviceToHost));
//if (fabs(scanAlpha) < 0.00005f) lastIteration = true; //todo check this part
//if (fabs(scanAlpha) < 1e-6) lastIteration = true; //todo check this part
if (fabs(scanAlpha) < 5e-7) { lastIteration = true; } //todo check this part
#endif
if (lastIteration) {
PCGStep_Kernel3<true> << <blocksPerGrid, THREADS_PER_BLOCK >> >(input, state);
}
else {
PCGStep_Kernel3<false> << <blocksPerGrid, THREADS_PER_BLOCK >> >(input, state);
}
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
if (timer) timer->endEvent();
return lastIteration;
}
#ifdef USE_LIE_SPACE //TODO
////////////////////////////////////////////////////////////////////
// matrix <-> pose
////////////////////////////////////////////////////////////////////
__global__ void convertLiePosesToMatricesCU_Kernel(const float3* d_rot, const float3* d_trans, unsigned int numTransforms, float4x4* d_transforms, float4x4* d_transformInvs)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < numTransforms) {
poseToMatrix(d_rot[idx], d_trans[idx], d_transforms[idx]);
d_transformInvs[idx] = d_transforms[idx].getInverse();
}
}
extern "C"
void convertLiePosesToMatricesCU(const float3* d_rot, const float3* d_trans, unsigned int numTransforms, float4x4* d_transforms, float4x4* d_transformInvs)
{
convertLiePosesToMatricesCU_Kernel << <(numTransforms + 8 - 1) / 8, 8 >> >(d_rot, d_trans, numTransforms, d_transforms, d_transformInvs);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
#endif
////////////////////////////////////////////////////////////////////
// Main GN Solver Loop
////////////////////////////////////////////////////////////////////
extern "C" void solveBundlingStub(SolverInput& input, SolverState& state, SolverParameters& parameters, SolverStateAnalysis& analysis, float* convergenceAnalysis, CUDATimer *timer)
{
if (convergenceAnalysis) {
float initialResidual = EvalResidual(input, state, parameters, timer);
convergenceAnalysis[0] = initialResidual; // initial residual
}
//!!!DEBUGGING
#ifdef PRINT_RESIDUALS_SPARSE
if (parameters.weightSparse > 0) {
if (input.numberOfCorrespondences == 0) { printf("ERROR: %d correspondences\n", input.numberOfCorrespondences); getchar(); }
float initialResidual = EvalResidual(input, state, parameters, timer);
printf("initial sparse = %f*%f = %f\n", parameters.weightSparse, initialResidual / parameters.weightSparse, initialResidual);
}
#endif
//float3* xRot = new float3[input.numberOfImages]; //remember the delete!
//float3* xTrans = new float3[input.numberOfImages];
//timer = new CUDATimer();
//static unsigned int totalLinIters = 0, numLin = 0, totalNonLinIters = 0, numNonLin = 0;
//!!!DEBUGGING
for (unsigned int nIter = 0; nIter < parameters.nNonLinearIterations; nIter++)
{
parameters.weightSparse = input.weightsSparse[nIter];
parameters.weightDenseDepth = input.weightsDenseDepth[nIter];
parameters.weightDenseColor = input.weightsDenseColor[nIter];
parameters.useDense = (parameters.weightDenseDepth > 0 || parameters.weightDenseColor > 0);
#ifdef USE_LIE_SPACE
convertLiePosesToMatricesCU(state.d_xRot, state.d_xTrans, input.numberOfImages, state.d_xTransforms, state.d_xTransformInverses);
#endif
if (parameters.useDense) parameters.useDense = BuildDenseSystem(input, state, parameters, timer); //don't solve dense if no overlapping frames found
Initialization(input, state, parameters, timer);
if (parameters.weightSparse > 0.0f) {
if (parameters.useDense) {
for (unsigned int linIter = 0; linIter < parameters.nLinIterations; linIter++)
if (PCGIteration<true, true>(input, state, parameters, analysis, linIter == parameters.nLinIterations - 1, timer)) { break; }
}
else {
for (unsigned int linIter = 0; linIter < parameters.nLinIterations; linIter++)
if (PCGIteration<true, false>(input, state, parameters, analysis, linIter == parameters.nLinIterations - 1, timer)) {
//totalLinIters += (linIter+1); numLin++;
break;
}
}
}
else {
for (unsigned int linIter = 0; linIter < parameters.nLinIterations; linIter++)
if (PCGIteration<false, true>(input, state, parameters, analysis, linIter == parameters.nLinIterations - 1, timer)) break;
}
//!!!debugging
//cutilSafeCall(cudaMemcpy(xRot, state.d_xRot, sizeof(float3)*input.numberOfImages, cudaMemcpyDeviceToHost));
//cutilSafeCall(cudaMemcpy(xTrans, state.d_xTrans, sizeof(float3)*input.numberOfImages, cudaMemcpyDeviceToHost));
//!!!debugging
#ifdef PRINT_RESIDUALS_SPARSE
if (parameters.weightSparse > 0) {
float residual = EvalResidual(input, state, parameters, timer);
printf("[niter %d] weight * sparse = %f*%f = %f\t[#corr = %d]\n", nIter, parameters.weightSparse, residual / parameters.weightSparse, residual, input.numberOfCorrespondences);
}
#endif
if (convergenceAnalysis) {
float residual = EvalResidual(input, state, parameters, timer);
convergenceAnalysis[nIter + 1] = residual;
}
//if (timer) timer->evaluate(true);
#ifdef ENABLE_EARLY_OUT //convergence
//if (nIter < parameters.nNonLinearIterations - 1 && EvalGNConvergence(input, state, analysis, timer) < 0.01f) { //!!! TODO CHECK HOW THESE GENERALIZE
if (nIter < parameters.nNonLinearIterations - 1 && EvalGNConvergence(input, state, analysis, timer) < 0.005f) { //0.001?
//if (nIter < parameters.nNonLinearIterations - 1 && EvalGNConvergence(input, state, analysis, timer) < 0.001f) {
//if (!parameters.useDense) { totalNonLinIters += (nIter+1); numNonLin++; }
break;
}
//else if (!parameters.useDense && nIter == parameters.nNonLinearIterations - 1) { totalNonLinIters += (nIter+1); numNonLin++; }
#endif
}
//!!!debugging
//if (xRot) delete[] xRot;
//if (xTrans) delete[] xTrans;
//if (timer) { timer->evaluate(true, false); delete timer; }
//if (!parameters.useDense) { printf("mean #pcg its = %f\tmean #gn its = %f\n", (float)totalLinIters / (float)numLin, (float)totalNonLinIters / (float)numNonLin); } //just stats for global solve
//!!!debugging
}
////////////////////////////////////////////////////////////////////
// build variables to correspondences lookup
////////////////////////////////////////////////////////////////////
__global__ void BuildVariablesToCorrespondencesTableDevice(EntryJ* d_correspondences, unsigned int numberOfCorrespondences,
unsigned int maxNumCorrespondencesPerImage, int* d_variablesToCorrespondences, int* d_numEntriesPerRow)
{
const unsigned int N = numberOfCorrespondences; // Number of block variables
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < N) {
EntryJ& corr = d_correspondences[x];
if (corr.isValid()) {
int offset0 = atomicAdd(&d_numEntriesPerRow[corr.imgIdx_i], 1); // may overflow - need to check when read
int offset1 = atomicAdd(&d_numEntriesPerRow[corr.imgIdx_j], 1); // may overflow - need to check when read
if (offset0 < maxNumCorrespondencesPerImage && offset1 < maxNumCorrespondencesPerImage) {
d_variablesToCorrespondences[corr.imgIdx_i * maxNumCorrespondencesPerImage + offset0] = x;
d_variablesToCorrespondences[corr.imgIdx_j * maxNumCorrespondencesPerImage + offset1] = x;
}
else { //invalidate
printf("EXCEEDED MAX NUM CORR PER IMAGE IN SOLVER, INVALIDATING %d(%d,%d) [%d,%d | %d]\n",
x, corr.imgIdx_i, corr.imgIdx_j, offset0, offset1, maxNumCorrespondencesPerImage); //debugging
corr.setInvalid(); //make sure j corresponds to jt
}
}
}
}
extern "C" void buildVariablesToCorrespondencesTableCUDA(EntryJ* d_correspondences, unsigned int numberOfCorrespondences, unsigned int maxNumCorrespondencesPerImage, int* d_variablesToCorrespondences, int* d_numEntriesPerRow, CUDATimer* timer)
{
const unsigned int N = numberOfCorrespondences;
if (timer) timer->startEvent(__FUNCTION__);
BuildVariablesToCorrespondencesTableDevice << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(d_correspondences, numberOfCorrespondences, maxNumCorrespondencesPerImage, d_variablesToCorrespondences, d_numEntriesPerRow);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
if (timer) timer->endEvent();
}
|
721374eadf1997c2c0859c787fbfa684afe5a3e5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/pooling_layer.hpp"
#include "caffe/util/math_functions.hpp"
#define SIGNED_SATURATE_MAX 2047
#define SIGNED_SATURATE_MIN -2048
#define UNSIGNED_SATURATE_MAX 4095
namespace caffe {
template <typename Dtype>
__global__ void MaxPoolForward(const int nthreads,
const Dtype* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
Dtype* const top_data, int* mask, Dtype* top_mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
const int hend = min(hstart + kernel_h, height);
const int wend = min(wstart + kernel_w, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
Dtype maxval = -FLT_MAX;
int maxidx = -1;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (bottom_slice[h * width + w] > maxval) {
maxidx = h * width + w;
maxval = bottom_slice[maxidx];
}
}
}
top_data[index] = maxval;
if (mask) {
mask[index] = maxidx;
} else {
top_mask[index] = maxidx;
}
}
}
template <typename Dtype>
__global__ void AvePoolForward(const int nthreads,
const Dtype* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w,
//const int pad_h, const int pad_w,
const int pad_top, const int pad_left, const int pad_bottom, const int pad_right, //CUSTOMIZATION
Dtype* const top_data, const int output_shift_instead_division, const Dtype saturate) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
//<--CUSTOMIZATION
//int hstart = ph * stride_h - pad_h;
//int wstart = pw * stride_w - pad_w;
int hstart = ph * stride_h - pad_top;
int wstart = pw * stride_w - pad_left;
//int hend = min(hstart + kernel_h, height + pad_h);
//int wend = min(wstart + kernel_w, width + pad_w);
int hend = min(hstart + kernel_h, height + pad_bottom);
int wend = min(wstart + kernel_w, width + pad_right);
//CUSTOMIZATION-->
const int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
Dtype aveval = 0;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += bottom_slice[h * width + w];
}
}
if (output_shift_instead_division != Dtype(0)) {
top_data[index] = aveval / output_shift_instead_division;
top_data[index] = rint(top_data[index]);
if(saturate == PoolingParameter_SaturateMethod_Unsigned)
{
if(top_data[index] > UNSIGNED_SATURATE_MAX)
top_data[index] = UNSIGNED_SATURATE_MAX;
if(top_data[index] < 0)
top_data[index] = 0;
}
if(saturate == PoolingParameter_SaturateMethod_Signed)
{
if(top_data[index] > SIGNED_SATURATE_MAX)
top_data[index] = SIGNED_SATURATE_MAX;
if(top_data[index] < SIGNED_SATURATE_MIN)
top_data[index] = SIGNED_SATURATE_MIN;
}
}
else{
if(saturate == PoolingParameter_SaturateMethod_Unsigned)
{
top_data[index] = aveval;
if(top_data[index] > UNSIGNED_SATURATE_MAX)
top_data[index] = UNSIGNED_SATURATE_MAX;
if(top_data[index] < 0)
top_data[index] = 0;
}
else if(saturate == PoolingParameter_SaturateMethod_Signed)
{
top_data[index] = aveval;
if(top_data[index] > SIGNED_SATURATE_MAX)
top_data[index] = SIGNED_SATURATE_MAX;
if(top_data[index] < SIGNED_SATURATE_MIN)
top_data[index] = SIGNED_SATURATE_MIN;
}
else //original implementation
top_data[index] = aveval / pool_size;
}
}
}
//<--CUSTOMIZATION
template <typename Dtype>
__global__ void AvePoolForward_TF(const int nthreads,
const Dtype* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w,
//const int pad_h, const int pad_w,
const int pad_top, const int pad_left, const int pad_bottom, const int pad_right, //CUSTOMI
Dtype* const top_data, const int output_shift_instead_division, const Dtype saturate) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
//<--CUSTOMIZATION
//int hstart = ph * stride_h - pad_h;
//int wstart = pw * stride_w - pad_w;
int hstart = ph * stride_h - pad_top;
int wstart = pw * stride_w - pad_left;
//int hend = min(hstart + kernel_h, height + pad_h);
//int wend = min(wstart + kernel_w, width + pad_w);
int hend = min(hstart + kernel_h, height + pad_bottom);
int wend = min(wstart + kernel_w, width + pad_right);
//CUSTOMIZATION-->
const int full_pool_size = (hend - hstart) * (wend - wstart); //
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
const int pool_size = (hend - hstart) * (wend - wstart); //
Dtype aveval = 0;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += bottom_slice[h * width + w];
}
}
if (output_shift_instead_division != Dtype(0)) {
if (full_pool_size == pool_size)
top_data[index] = aveval / output_shift_instead_division;
else {
//special fix: Non zero paddings for the case when:
//1)the kernel runs off the edge only by 1 pixel
//2)and the kernel_size-1 is a power of 2
//refer to "Repair by changing padding" at
//https://wwwin.synopsys.com/~tpennell/cnn_papers/29_average_pooling_repair_shop.htm
bool wfix = (pw * stride_w - pad_left == -1) || (wstart + kernel_w - width == 1);
bool hfix = (ph * stride_h - pad_top == -1) || (hstart + kernel_h - height == 1);
if (wfix && hfix)
{
Dtype aveval_fix;
for (int h = hstart; h < hend; ++h) {
aveval_fix = 0;
for (int w = wstart; w < wend; ++w) {
aveval_fix += bottom_slice[h * width + w];
}
aveval += rint(aveval_fix / (wend - wstart));
}
for (int w = wstart; w < wend; ++w) {
aveval_fix = 0;
for (int h = hstart; h < hend; ++h) {
aveval_fix += bottom_slice[h * width + w];
}
aveval += rint(aveval_fix / (hend - hstart));
}
aveval_fix = 0;
for (int w = wstart; w < wend; ++w) {
Dtype aveval_fix_tmp = 0;
for (int h = hstart; h < hend; ++h) {
aveval_fix_tmp += bottom_slice[h * width + w];
}
aveval_fix += rint(aveval_fix_tmp / (hend - hstart));
}
aveval += rint(aveval_fix / (wend - wstart));
top_data[index] = aveval / output_shift_instead_division;
}
else if (hfix && !wfix)
{
Dtype aveval_fix;
for (int w = wstart; w < wend; ++w) {
aveval_fix = 0;
for (int h = hstart; h < hend; ++h) {
aveval_fix += bottom_slice[h * width + w];
}
aveval += rint(aveval_fix / (hend - hstart));
}
top_data[index] = aveval / output_shift_instead_division;
}
else if (wfix && !hfix)
{
Dtype aveval_fix;
for (int h = hstart; h < hend; ++h) {
aveval_fix = 0;
for (int w = wstart; w < wend; ++w) {
aveval_fix += bottom_slice[h * width + w];
}
aveval += rint(aveval_fix / (wend - wstart));
}
top_data[index] = aveval / output_shift_instead_division;
}
else
top_data[index] = aveval / output_shift_instead_division * full_pool_size / pool_size;
}
top_data[index] = rint(top_data[index]);
if(saturate == PoolingParameter_SaturateMethod_Unsigned)
{
if(top_data[index] > UNSIGNED_SATURATE_MAX)
top_data[index] = UNSIGNED_SATURATE_MAX;
if(top_data[index] < 0)
top_data[index] = 0;
}
if(saturate == PoolingParameter_SaturateMethod_Signed)
{
if(top_data[index] > SIGNED_SATURATE_MAX)
top_data[index] = SIGNED_SATURATE_MAX;
if(top_data[index] < SIGNED_SATURATE_MIN)
top_data[index] = SIGNED_SATURATE_MIN;
}
}
else{
if(saturate == PoolingParameter_SaturateMethod_Unsigned)
{
top_data[index] = aveval;
if(top_data[index] > UNSIGNED_SATURATE_MAX)
top_data[index] = UNSIGNED_SATURATE_MAX;
if(top_data[index] < 0)
top_data[index] = 0;
}
else if(saturate == PoolingParameter_SaturateMethod_Signed)
{
top_data[index] = aveval;
if(top_data[index] > SIGNED_SATURATE_MAX)
top_data[index] = SIGNED_SATURATE_MAX;
if(top_data[index] < SIGNED_SATURATE_MIN)
top_data[index] = SIGNED_SATURATE_MIN;
}
else //original implementation
top_data[index] = aveval / pool_size;
}
}
}
//CUSTOMIZATION-->
template <typename Dtype>
__global__ void StoPoolForwardTrain(const int nthreads,
const Dtype* const bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* const rand_idx, Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
const int hstart = ph * stride_h;
const int hend = min(hstart + kernel_h, height);
const int wstart = pw * stride_w;
const int wend = min(wstart + kernel_w, width);
Dtype cumsum = 0.;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_slice[h * width + w];
}
}
const float thres = rand_idx[index] * cumsum;
// Second pass: get value, and set index.
cumsum = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_slice[h * width + w];
if (cumsum >= thres) {
rand_idx[index] = ((n * channels + c) * height + h) * width + w;
top_data[index] = bottom_slice[h * width + w];
return;
}
}
}
}
}
template <typename Dtype>
__global__ void StoPoolForwardTest(const int nthreads,
const Dtype* const bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
const int hstart = ph * stride_h;
const int hend = min(hstart + kernel_h, height);
const int wstart = pw * stride_w;
const int wend = min(wstart + kernel_w, width);
// We set cumsum to be 0 to avoid divide-by-zero problems
Dtype cumsum = 0.;
Dtype cumvalues = 0.;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_slice[h * width + w];
cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];
}
}
top_data[index] = (cumsum > 0.) ? cumvalues / cumsum : 0.;
}
}
template <typename Dtype>
void PoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int count = top[0]->count();
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
int* mask = NULL;
Dtype* top_mask = NULL;
//<--CUSOMIZATION
int pad_top=0, pad_bottom=0, pad_left=0, pad_right=0;
switch (pad_type_) {
case 0:
if (pad_l_ != 0 || pad_r_ != 0 || pad_t_ != 0 || pad_b_ != 0) {
pad_top = pad_t_;
pad_bottom = pad_b_;
pad_left = pad_l_;
pad_right = pad_r_;
} else {
pad_top = pad_h_;
pad_bottom = pad_h_;
pad_left = pad_w_;
pad_right = pad_w_;
}
break;
case 1: //for "SAME"padding
int pad_along_height, pad_along_width;
if (height_ % stride_h_ == 0)
pad_along_height = (kernel_h_ - stride_h_)>0 ? (kernel_h_ - stride_h_) : 0;
else
pad_along_height = (kernel_h_ - height_ % stride_h_)>0 ? (kernel_h_ - height_ % stride_h_) : 0;
if (width_ % stride_w_ == 0)
pad_along_width = (kernel_w_ - stride_w_)>0 ? (kernel_w_ - stride_w_) : 0;
else
pad_along_width = (kernel_w_ - width_ % stride_w_)>0 ? (kernel_w_ - width_ % stride_w_): 0;
pad_top = pad_along_height / 2;
pad_bottom = pad_along_height - pad_top;
pad_left = pad_along_width / 2;
pad_right = pad_along_width - pad_left;
break;
default:
LOG(FATAL) << "Unknown pooling padding type.";
break;
}
//CUSTOMIZATION-->
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = top[1]->mutable_gpu_data();
} else {
mask = max_idx_.mutable_gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( MaxPoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_,
//pad_h_, pad_w_,
pad_top, pad_left, //CUSTOMIZATION
top_data,
mask, top_mask);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( AvePoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_,
//pad_h_, pad_w_,
pad_top, pad_left, pad_bottom, pad_right, //CUSTOMIZATION
top_data, output_shift_instead_division_, saturate_);
break;
//<--CUSTOMIZATION
case PoolingParameter_PoolMethod_AVE_EXC_PAD:
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( AvePoolForward_TF<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_,
//pad_h_, pad_w_,
pad_top, pad_left, pad_bottom, pad_right, //CUSTOMIZATION
top_data, output_shift_instead_division_, saturate_);
break;
//CUSTOMIZATION-->
case PoolingParameter_PoolMethod_STOCHASTIC:
if (this->phase_ == TRAIN) {
// We need to create the random index as well.
caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1),
rand_idx_.mutable_gpu_data());
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( StoPoolForwardTrain<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_,
rand_idx_.mutable_gpu_data(), top_data);
} else {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( StoPoolForwardTest<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_,
top_data);
}
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void MaxPoolBackward(const int nthreads, const Dtype* const top_diff,
const int* const mask, const Dtype* const top_mask, const int num,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, const int kernel_h,
const int kernel_w, const int stride_h, const int stride_w,
//const int pad_h, const int pad_w,
const int pad_top, const int pad_left, const int pad_bottom, const int pad_right, //CUSTOMIZATION
Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
//<--CUSTOMIZATION
//const int phstart =
// (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;
//const int phend = min((h + pad_h) / stride_h + 1, pooled_height);
//const int pwstart =
// (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;
//const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);
const int phstart =
(h + pad_top < kernel_h) ? 0 : (h + pad_top - kernel_h) / stride_h + 1;
const int phend = min((h + pad_bottom) / stride_h + 1, pooled_height);
const int pwstart =
(w + pad_left < kernel_w) ? 0 : (w + pad_left - kernel_w) / stride_w + 1;
const int pwend = min((w + pad_right) / stride_w + 1, pooled_width);
//CUSTOMIZATION-->
Dtype gradient = 0;
const int offset = (n * channels + c) * pooled_height * pooled_width;
const Dtype* const top_diff_slice = top_diff + offset;
if (mask) {
const int* const mask_slice = mask + offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (mask_slice[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff_slice[ph * pooled_width + pw];
}
}
}
} else {
const Dtype* const top_mask_slice = top_mask + offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff_slice[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
__global__ void AvePoolBackward(const int nthreads, const Dtype* const top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h, const int stride_w,
//const int pad_h, const int pad_w,
const int pad_top, const int pad_left, const int pad_bottom, const int pad_right, //CUSTOMIZATION
Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
//const int w = index % width + pad_w;
//const int h = (index / width) % height + pad_h;
const int w = index % width; //CUSTOMIZATION
const int h = (index / width) % height; //CUSTOMIZATION
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
//<--CUSTOMIZATION
//const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
//const int phend = min(h / stride_h + 1, pooled_height);
//const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
//const int pwend = min(w / stride_w + 1, pooled_width);
const int phstart = ( (h+pad_top) < kernel_h) ? 0 : ( (h+pad_top) - kernel_h) / stride_h + 1;
const int phend = min( (h+pad_bottom) / stride_h + 1, pooled_height);
const int pwstart = ( (w+pad_left) < kernel_w) ? 0 : ( (w+pad_left) - kernel_w) / stride_w + 1;
const int pwend = min( (w+pad_right) / stride_w + 1, pooled_width);
//CUSTOMIZATION-->
Dtype gradient = 0;
const Dtype* const top_diff_slice =
top_diff + (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
//<--CUSTOMIZATION
//int hstart = ph * stride_h - pad_h;
//int wstart = pw * stride_w - pad_w;
//int hend = min(hstart + kernel_h, height + pad_h);
//int wend = min(wstart + kernel_w, width + pad_w);
int hstart = ph * stride_h - pad_top;
int wstart = pw * stride_w - pad_left;
int hend = min(hstart + kernel_h, height + pad_bottom);
int wend = min(wstart + kernel_w, width + pad_right);
//-->CUSTOMIZATION
int pool_size = (hend - hstart) * (wend - wstart);
gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;
}
}
bottom_diff[index] = gradient;
}
}
//<--CUSTOMIZATION
template <typename Dtype>
__global__ void AvePoolBackward_TF(const int nthreads, const Dtype* const top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h, const int stride_w,
//const int pad_h, const int pad_w,
const int pad_top, const int pad_left, const int pad_bottom, const int pad_right, //CUSTOMIZATION
Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
//const int w = index % width + pad_w;
//const int h = (index / width) % height + pad_h;
const int w = index % width; //CUSTOMIZATION
const int h = (index / width) % height; //CUSTOMIZATION
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
//<--CUSTOMIZATION
//const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
//const int phend = min(h / stride_h + 1, pooled_height);
//const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
//const int pwend = min(w / stride_w + 1, pooled_width);
const int phstart = ( (h+pad_top) < kernel_h) ? 0 : ( (h+pad_top) - kernel_h) / stride_h + 1;
const int phend = min( (h+pad_bottom) / stride_h + 1, pooled_height);
const int pwstart = ( (w+pad_left) < kernel_w) ? 0 : ( (w+pad_left) - kernel_w) / stride_w + 1;
const int pwend = min( (w+pad_right) / stride_w + 1, pooled_width);
//CUSTOMIZATION-->
Dtype gradient = 0;
const Dtype* const top_diff_slice =
top_diff + (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
//<--CUSTOMIZATION
//int hstart = ph * stride_h - pad_h;
//int wstart = pw * stride_w - pad_w;
//int hend = min(hstart + kernel_h, height + pad_h);
//int wend = min(wstart + kernel_w, width + pad_w);
int hstart = ph * stride_h - pad_top;
int wstart = pw * stride_w - pad_left;
int hend = min(hstart + kernel_h, height + pad_bottom);
int wend = min(wstart + kernel_w, width + pad_right);
//-->CUSTOMIZATION
hstart = max(hstart, 0); //
wstart = max(wstart, 0); //
hend = min(hend, height); //
wend = min(wend, width); //
int pool_size = (hend - hstart) * (wend - wstart);
gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;
}
}
bottom_diff[index] = gradient;
}
}
//CUSTOMIZATION-->
template <typename Dtype>
__global__ void StoPoolBackward(const int nthreads,
const Dtype* const rand_idx, const Dtype* const top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w,
Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, pooled_height);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, pooled_width);
Dtype gradient = 0;
const Dtype* const rand_idx_slice =
rand_idx + (n * channels + c) * pooled_height * pooled_width;
const Dtype* const top_diff_slice =
top_diff + (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
gradient += top_diff_slice[ph * pooled_width + pw] *
(index == static_cast<int>(rand_idx_slice[ph * pooled_width + pw]));
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void PoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
const int* mask = NULL;
const Dtype* top_mask = NULL;
//<--CUSOMIZATION
int pad_top=0, pad_bottom=0, pad_left=0, pad_right=0;
switch (pad_type_) {
case 0:
if (pad_l_ != 0 || pad_r_ != 0 || pad_t_ != 0 || pad_b_ != 0) {
pad_top = pad_t_;
pad_bottom = pad_b_;
pad_left = pad_l_;
pad_right = pad_r_;
} else {
pad_top = pad_h_;
pad_bottom = pad_h_;
pad_left = pad_w_;
pad_right = pad_w_;
}
break;
case 1: //for "SAME"padding
int pad_along_height, pad_along_width;
if (height_ % stride_h_ == 0)
pad_along_height = (kernel_h_ - stride_h_)>0 ? (kernel_h_ - stride_h_) : 0;
else
pad_along_height = (kernel_h_ - height_ % stride_h_)>0 ? (kernel_h_ - height_ % stride_h_) : 0;
if (width_ % stride_w_ == 0)
pad_along_width = (kernel_w_ - stride_w_)>0 ? (kernel_w_ - stride_w_) : 0;
else
pad_along_width = (kernel_w_ - width_ % stride_w_)>0 ? (kernel_w_ - width_ % stride_w_): 0;
pad_top = pad_along_height / 2;
pad_bottom = pad_along_height - pad_top;
pad_left = pad_along_width / 2;
pad_right = pad_along_width - pad_left;
break;
default:
LOG(FATAL) << "Unknown pooling padding type.";
break;
}
//CUSTOMIZATION-->
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = top[1]->gpu_data();
} else {
mask = max_idx_.gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( MaxPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, mask, top_mask, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_,
kernel_h_, kernel_w_, stride_h_, stride_w_,
//pad_h_, pad_w_,
pad_top, pad_left, pad_bottom, pad_right, //CUSTOMIZATION
bottom_diff);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( AvePoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_,
//pad_h_, pad_w_,
pad_top, pad_left, pad_bottom, pad_right, //CUSTOMIZATION
bottom_diff);
break;
//<--CUSTOMIZATION
case PoolingParameter_PoolMethod_AVE_EXC_PAD:
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( AvePoolBackward_TF<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_,
//pad_h_, pad_w_,
pad_top, pad_left, pad_bottom, pad_right, //CUSTOMIZATION
bottom_diff);
break;
//CUSTOMIZATION-->
case PoolingParameter_PoolMethod_STOCHASTIC:
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( StoPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, rand_idx_.gpu_data(), top_diff,
top[0]->num(), channels_, height_, width_, pooled_height_,
pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_,
bottom_diff);
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(PoolingLayer);
} // namespace caffe
| 721374eadf1997c2c0859c787fbfa684afe5a3e5.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/pooling_layer.hpp"
#include "caffe/util/math_functions.hpp"
#define SIGNED_SATURATE_MAX 2047
#define SIGNED_SATURATE_MIN -2048
#define UNSIGNED_SATURATE_MAX 4095
namespace caffe {
template <typename Dtype>
__global__ void MaxPoolForward(const int nthreads,
const Dtype* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
Dtype* const top_data, int* mask, Dtype* top_mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
const int hend = min(hstart + kernel_h, height);
const int wend = min(wstart + kernel_w, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
Dtype maxval = -FLT_MAX;
int maxidx = -1;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (bottom_slice[h * width + w] > maxval) {
maxidx = h * width + w;
maxval = bottom_slice[maxidx];
}
}
}
top_data[index] = maxval;
if (mask) {
mask[index] = maxidx;
} else {
top_mask[index] = maxidx;
}
}
}
template <typename Dtype>
__global__ void AvePoolForward(const int nthreads,
const Dtype* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w,
//const int pad_h, const int pad_w,
const int pad_top, const int pad_left, const int pad_bottom, const int pad_right, //CUSTOMIZATION
Dtype* const top_data, const int output_shift_instead_division, const Dtype saturate) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
//<--CUSTOMIZATION
//int hstart = ph * stride_h - pad_h;
//int wstart = pw * stride_w - pad_w;
int hstart = ph * stride_h - pad_top;
int wstart = pw * stride_w - pad_left;
//int hend = min(hstart + kernel_h, height + pad_h);
//int wend = min(wstart + kernel_w, width + pad_w);
int hend = min(hstart + kernel_h, height + pad_bottom);
int wend = min(wstart + kernel_w, width + pad_right);
//CUSTOMIZATION-->
const int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
Dtype aveval = 0;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += bottom_slice[h * width + w];
}
}
if (output_shift_instead_division != Dtype(0)) {
top_data[index] = aveval / output_shift_instead_division;
top_data[index] = rint(top_data[index]);
if(saturate == PoolingParameter_SaturateMethod_Unsigned)
{
if(top_data[index] > UNSIGNED_SATURATE_MAX)
top_data[index] = UNSIGNED_SATURATE_MAX;
if(top_data[index] < 0)
top_data[index] = 0;
}
if(saturate == PoolingParameter_SaturateMethod_Signed)
{
if(top_data[index] > SIGNED_SATURATE_MAX)
top_data[index] = SIGNED_SATURATE_MAX;
if(top_data[index] < SIGNED_SATURATE_MIN)
top_data[index] = SIGNED_SATURATE_MIN;
}
}
else{
if(saturate == PoolingParameter_SaturateMethod_Unsigned)
{
top_data[index] = aveval;
if(top_data[index] > UNSIGNED_SATURATE_MAX)
top_data[index] = UNSIGNED_SATURATE_MAX;
if(top_data[index] < 0)
top_data[index] = 0;
}
else if(saturate == PoolingParameter_SaturateMethod_Signed)
{
top_data[index] = aveval;
if(top_data[index] > SIGNED_SATURATE_MAX)
top_data[index] = SIGNED_SATURATE_MAX;
if(top_data[index] < SIGNED_SATURATE_MIN)
top_data[index] = SIGNED_SATURATE_MIN;
}
else //original implementation
top_data[index] = aveval / pool_size;
}
}
}
//<--CUSTOMIZATION
template <typename Dtype>
__global__ void AvePoolForward_TF(const int nthreads,
const Dtype* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w,
//const int pad_h, const int pad_w,
const int pad_top, const int pad_left, const int pad_bottom, const int pad_right, //CUSTOMI
Dtype* const top_data, const int output_shift_instead_division, const Dtype saturate) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
//<--CUSTOMIZATION
//int hstart = ph * stride_h - pad_h;
//int wstart = pw * stride_w - pad_w;
int hstart = ph * stride_h - pad_top;
int wstart = pw * stride_w - pad_left;
//int hend = min(hstart + kernel_h, height + pad_h);
//int wend = min(wstart + kernel_w, width + pad_w);
int hend = min(hstart + kernel_h, height + pad_bottom);
int wend = min(wstart + kernel_w, width + pad_right);
//CUSTOMIZATION-->
const int full_pool_size = (hend - hstart) * (wend - wstart); //
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
const int pool_size = (hend - hstart) * (wend - wstart); //
Dtype aveval = 0;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += bottom_slice[h * width + w];
}
}
if (output_shift_instead_division != Dtype(0)) {
if (full_pool_size == pool_size)
top_data[index] = aveval / output_shift_instead_division;
else {
//special fix: Non zero paddings for the case when:
//1)the kernel runs off the edge only by 1 pixel
//2)and the kernel_size-1 is a power of 2
//refer to "Repair by changing padding" at
//https://wwwin.synopsys.com/~tpennell/cnn_papers/29_average_pooling_repair_shop.htm
bool wfix = (pw * stride_w - pad_left == -1) || (wstart + kernel_w - width == 1);
bool hfix = (ph * stride_h - pad_top == -1) || (hstart + kernel_h - height == 1);
if (wfix && hfix)
{
Dtype aveval_fix;
for (int h = hstart; h < hend; ++h) {
aveval_fix = 0;
for (int w = wstart; w < wend; ++w) {
aveval_fix += bottom_slice[h * width + w];
}
aveval += rint(aveval_fix / (wend - wstart));
}
for (int w = wstart; w < wend; ++w) {
aveval_fix = 0;
for (int h = hstart; h < hend; ++h) {
aveval_fix += bottom_slice[h * width + w];
}
aveval += rint(aveval_fix / (hend - hstart));
}
aveval_fix = 0;
for (int w = wstart; w < wend; ++w) {
Dtype aveval_fix_tmp = 0;
for (int h = hstart; h < hend; ++h) {
aveval_fix_tmp += bottom_slice[h * width + w];
}
aveval_fix += rint(aveval_fix_tmp / (hend - hstart));
}
aveval += rint(aveval_fix / (wend - wstart));
top_data[index] = aveval / output_shift_instead_division;
}
else if (hfix && !wfix)
{
Dtype aveval_fix;
for (int w = wstart; w < wend; ++w) {
aveval_fix = 0;
for (int h = hstart; h < hend; ++h) {
aveval_fix += bottom_slice[h * width + w];
}
aveval += rint(aveval_fix / (hend - hstart));
}
top_data[index] = aveval / output_shift_instead_division;
}
else if (wfix && !hfix)
{
Dtype aveval_fix;
for (int h = hstart; h < hend; ++h) {
aveval_fix = 0;
for (int w = wstart; w < wend; ++w) {
aveval_fix += bottom_slice[h * width + w];
}
aveval += rint(aveval_fix / (wend - wstart));
}
top_data[index] = aveval / output_shift_instead_division;
}
else
top_data[index] = aveval / output_shift_instead_division * full_pool_size / pool_size;
}
top_data[index] = rint(top_data[index]);
if(saturate == PoolingParameter_SaturateMethod_Unsigned)
{
if(top_data[index] > UNSIGNED_SATURATE_MAX)
top_data[index] = UNSIGNED_SATURATE_MAX;
if(top_data[index] < 0)
top_data[index] = 0;
}
if(saturate == PoolingParameter_SaturateMethod_Signed)
{
if(top_data[index] > SIGNED_SATURATE_MAX)
top_data[index] = SIGNED_SATURATE_MAX;
if(top_data[index] < SIGNED_SATURATE_MIN)
top_data[index] = SIGNED_SATURATE_MIN;
}
}
else{
if(saturate == PoolingParameter_SaturateMethod_Unsigned)
{
top_data[index] = aveval;
if(top_data[index] > UNSIGNED_SATURATE_MAX)
top_data[index] = UNSIGNED_SATURATE_MAX;
if(top_data[index] < 0)
top_data[index] = 0;
}
else if(saturate == PoolingParameter_SaturateMethod_Signed)
{
top_data[index] = aveval;
if(top_data[index] > SIGNED_SATURATE_MAX)
top_data[index] = SIGNED_SATURATE_MAX;
if(top_data[index] < SIGNED_SATURATE_MIN)
top_data[index] = SIGNED_SATURATE_MIN;
}
else //original implementation
top_data[index] = aveval / pool_size;
}
}
}
//CUSTOMIZATION-->
template <typename Dtype>
__global__ void StoPoolForwardTrain(const int nthreads,
const Dtype* const bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* const rand_idx, Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
const int hstart = ph * stride_h;
const int hend = min(hstart + kernel_h, height);
const int wstart = pw * stride_w;
const int wend = min(wstart + kernel_w, width);
Dtype cumsum = 0.;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_slice[h * width + w];
}
}
const float thres = rand_idx[index] * cumsum;
// Second pass: get value, and set index.
cumsum = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_slice[h * width + w];
if (cumsum >= thres) {
rand_idx[index] = ((n * channels + c) * height + h) * width + w;
top_data[index] = bottom_slice[h * width + w];
return;
}
}
}
}
}
template <typename Dtype>
__global__ void StoPoolForwardTest(const int nthreads,
const Dtype* const bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
const int hstart = ph * stride_h;
const int hend = min(hstart + kernel_h, height);
const int wstart = pw * stride_w;
const int wend = min(wstart + kernel_w, width);
// We set cumsum to be 0 to avoid divide-by-zero problems
Dtype cumsum = 0.;
Dtype cumvalues = 0.;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_slice[h * width + w];
cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];
}
}
top_data[index] = (cumsum > 0.) ? cumvalues / cumsum : 0.;
}
}
template <typename Dtype>
void PoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int count = top[0]->count();
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
int* mask = NULL;
Dtype* top_mask = NULL;
//<--CUSOMIZATION
int pad_top=0, pad_bottom=0, pad_left=0, pad_right=0;
switch (pad_type_) {
case 0:
if (pad_l_ != 0 || pad_r_ != 0 || pad_t_ != 0 || pad_b_ != 0) {
pad_top = pad_t_;
pad_bottom = pad_b_;
pad_left = pad_l_;
pad_right = pad_r_;
} else {
pad_top = pad_h_;
pad_bottom = pad_h_;
pad_left = pad_w_;
pad_right = pad_w_;
}
break;
case 1: //for "SAME"padding
int pad_along_height, pad_along_width;
if (height_ % stride_h_ == 0)
pad_along_height = (kernel_h_ - stride_h_)>0 ? (kernel_h_ - stride_h_) : 0;
else
pad_along_height = (kernel_h_ - height_ % stride_h_)>0 ? (kernel_h_ - height_ % stride_h_) : 0;
if (width_ % stride_w_ == 0)
pad_along_width = (kernel_w_ - stride_w_)>0 ? (kernel_w_ - stride_w_) : 0;
else
pad_along_width = (kernel_w_ - width_ % stride_w_)>0 ? (kernel_w_ - width_ % stride_w_): 0;
pad_top = pad_along_height / 2;
pad_bottom = pad_along_height - pad_top;
pad_left = pad_along_width / 2;
pad_right = pad_along_width - pad_left;
break;
default:
LOG(FATAL) << "Unknown pooling padding type.";
break;
}
//CUSTOMIZATION-->
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = top[1]->mutable_gpu_data();
} else {
mask = max_idx_.mutable_gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
MaxPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_,
//pad_h_, pad_w_,
pad_top, pad_left, //CUSTOMIZATION
top_data,
mask, top_mask);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
AvePoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_,
//pad_h_, pad_w_,
pad_top, pad_left, pad_bottom, pad_right, //CUSTOMIZATION
top_data, output_shift_instead_division_, saturate_);
break;
//<--CUSTOMIZATION
case PoolingParameter_PoolMethod_AVE_EXC_PAD:
// NOLINT_NEXT_LINE(whitespace/operators)
AvePoolForward_TF<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_,
//pad_h_, pad_w_,
pad_top, pad_left, pad_bottom, pad_right, //CUSTOMIZATION
top_data, output_shift_instead_division_, saturate_);
break;
//CUSTOMIZATION-->
case PoolingParameter_PoolMethod_STOCHASTIC:
if (this->phase_ == TRAIN) {
// We need to create the random index as well.
caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1),
rand_idx_.mutable_gpu_data());
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolForwardTrain<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_,
rand_idx_.mutable_gpu_data(), top_data);
} else {
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolForwardTest<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_,
top_data);
}
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void MaxPoolBackward(const int nthreads, const Dtype* const top_diff,
const int* const mask, const Dtype* const top_mask, const int num,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, const int kernel_h,
const int kernel_w, const int stride_h, const int stride_w,
//const int pad_h, const int pad_w,
const int pad_top, const int pad_left, const int pad_bottom, const int pad_right, //CUSTOMIZATION
Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
//<--CUSTOMIZATION
//const int phstart =
// (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;
//const int phend = min((h + pad_h) / stride_h + 1, pooled_height);
//const int pwstart =
// (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;
//const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);
const int phstart =
(h + pad_top < kernel_h) ? 0 : (h + pad_top - kernel_h) / stride_h + 1;
const int phend = min((h + pad_bottom) / stride_h + 1, pooled_height);
const int pwstart =
(w + pad_left < kernel_w) ? 0 : (w + pad_left - kernel_w) / stride_w + 1;
const int pwend = min((w + pad_right) / stride_w + 1, pooled_width);
//CUSTOMIZATION-->
Dtype gradient = 0;
const int offset = (n * channels + c) * pooled_height * pooled_width;
const Dtype* const top_diff_slice = top_diff + offset;
if (mask) {
const int* const mask_slice = mask + offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (mask_slice[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff_slice[ph * pooled_width + pw];
}
}
}
} else {
const Dtype* const top_mask_slice = top_mask + offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff_slice[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
__global__ void AvePoolBackward(const int nthreads, const Dtype* const top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h, const int stride_w,
//const int pad_h, const int pad_w,
const int pad_top, const int pad_left, const int pad_bottom, const int pad_right, //CUSTOMIZATION
Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
//const int w = index % width + pad_w;
//const int h = (index / width) % height + pad_h;
const int w = index % width; //CUSTOMIZATION
const int h = (index / width) % height; //CUSTOMIZATION
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
//<--CUSTOMIZATION
//const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
//const int phend = min(h / stride_h + 1, pooled_height);
//const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
//const int pwend = min(w / stride_w + 1, pooled_width);
const int phstart = ( (h+pad_top) < kernel_h) ? 0 : ( (h+pad_top) - kernel_h) / stride_h + 1;
const int phend = min( (h+pad_bottom) / stride_h + 1, pooled_height);
const int pwstart = ( (w+pad_left) < kernel_w) ? 0 : ( (w+pad_left) - kernel_w) / stride_w + 1;
const int pwend = min( (w+pad_right) / stride_w + 1, pooled_width);
//CUSTOMIZATION-->
Dtype gradient = 0;
const Dtype* const top_diff_slice =
top_diff + (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
//<--CUSTOMIZATION
//int hstart = ph * stride_h - pad_h;
//int wstart = pw * stride_w - pad_w;
//int hend = min(hstart + kernel_h, height + pad_h);
//int wend = min(wstart + kernel_w, width + pad_w);
int hstart = ph * stride_h - pad_top;
int wstart = pw * stride_w - pad_left;
int hend = min(hstart + kernel_h, height + pad_bottom);
int wend = min(wstart + kernel_w, width + pad_right);
//-->CUSTOMIZATION
int pool_size = (hend - hstart) * (wend - wstart);
gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;
}
}
bottom_diff[index] = gradient;
}
}
//<--CUSTOMIZATION
template <typename Dtype>
__global__ void AvePoolBackward_TF(const int nthreads, const Dtype* const top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h, const int stride_w,
//const int pad_h, const int pad_w,
const int pad_top, const int pad_left, const int pad_bottom, const int pad_right, //CUSTOMIZATION
Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
//const int w = index % width + pad_w;
//const int h = (index / width) % height + pad_h;
const int w = index % width; //CUSTOMIZATION
const int h = (index / width) % height; //CUSTOMIZATION
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
//<--CUSTOMIZATION
//const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
//const int phend = min(h / stride_h + 1, pooled_height);
//const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
//const int pwend = min(w / stride_w + 1, pooled_width);
const int phstart = ( (h+pad_top) < kernel_h) ? 0 : ( (h+pad_top) - kernel_h) / stride_h + 1;
const int phend = min( (h+pad_bottom) / stride_h + 1, pooled_height);
const int pwstart = ( (w+pad_left) < kernel_w) ? 0 : ( (w+pad_left) - kernel_w) / stride_w + 1;
const int pwend = min( (w+pad_right) / stride_w + 1, pooled_width);
//CUSTOMIZATION-->
Dtype gradient = 0;
const Dtype* const top_diff_slice =
top_diff + (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
//<--CUSTOMIZATION
//int hstart = ph * stride_h - pad_h;
//int wstart = pw * stride_w - pad_w;
//int hend = min(hstart + kernel_h, height + pad_h);
//int wend = min(wstart + kernel_w, width + pad_w);
int hstart = ph * stride_h - pad_top;
int wstart = pw * stride_w - pad_left;
int hend = min(hstart + kernel_h, height + pad_bottom);
int wend = min(wstart + kernel_w, width + pad_right);
//-->CUSTOMIZATION
hstart = max(hstart, 0); //
wstart = max(wstart, 0); //
hend = min(hend, height); //
wend = min(wend, width); //
int pool_size = (hend - hstart) * (wend - wstart);
gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;
}
}
bottom_diff[index] = gradient;
}
}
//CUSTOMIZATION-->
template <typename Dtype>
__global__ void StoPoolBackward(const int nthreads,
const Dtype* const rand_idx, const Dtype* const top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w,
Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, pooled_height);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, pooled_width);
Dtype gradient = 0;
const Dtype* const rand_idx_slice =
rand_idx + (n * channels + c) * pooled_height * pooled_width;
const Dtype* const top_diff_slice =
top_diff + (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
gradient += top_diff_slice[ph * pooled_width + pw] *
(index == static_cast<int>(rand_idx_slice[ph * pooled_width + pw]));
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void PoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
const int* mask = NULL;
const Dtype* top_mask = NULL;
//<--CUSOMIZATION
int pad_top=0, pad_bottom=0, pad_left=0, pad_right=0;
switch (pad_type_) {
case 0:
if (pad_l_ != 0 || pad_r_ != 0 || pad_t_ != 0 || pad_b_ != 0) {
pad_top = pad_t_;
pad_bottom = pad_b_;
pad_left = pad_l_;
pad_right = pad_r_;
} else {
pad_top = pad_h_;
pad_bottom = pad_h_;
pad_left = pad_w_;
pad_right = pad_w_;
}
break;
case 1: //for "SAME"padding
int pad_along_height, pad_along_width;
if (height_ % stride_h_ == 0)
pad_along_height = (kernel_h_ - stride_h_)>0 ? (kernel_h_ - stride_h_) : 0;
else
pad_along_height = (kernel_h_ - height_ % stride_h_)>0 ? (kernel_h_ - height_ % stride_h_) : 0;
if (width_ % stride_w_ == 0)
pad_along_width = (kernel_w_ - stride_w_)>0 ? (kernel_w_ - stride_w_) : 0;
else
pad_along_width = (kernel_w_ - width_ % stride_w_)>0 ? (kernel_w_ - width_ % stride_w_): 0;
pad_top = pad_along_height / 2;
pad_bottom = pad_along_height - pad_top;
pad_left = pad_along_width / 2;
pad_right = pad_along_width - pad_left;
break;
default:
LOG(FATAL) << "Unknown pooling padding type.";
break;
}
//CUSTOMIZATION-->
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = top[1]->gpu_data();
} else {
mask = max_idx_.gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
MaxPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, mask, top_mask, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_,
kernel_h_, kernel_w_, stride_h_, stride_w_,
//pad_h_, pad_w_,
pad_top, pad_left, pad_bottom, pad_right, //CUSTOMIZATION
bottom_diff);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
AvePoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_,
//pad_h_, pad_w_,
pad_top, pad_left, pad_bottom, pad_right, //CUSTOMIZATION
bottom_diff);
break;
//<--CUSTOMIZATION
case PoolingParameter_PoolMethod_AVE_EXC_PAD:
// NOLINT_NEXT_LINE(whitespace/operators)
AvePoolBackward_TF<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_,
//pad_h_, pad_w_,
pad_top, pad_left, pad_bottom, pad_right, //CUSTOMIZATION
bottom_diff);
break;
//CUSTOMIZATION-->
case PoolingParameter_PoolMethod_STOCHASTIC:
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, rand_idx_.gpu_data(), top_diff,
top[0]->num(), channels_, height_, width_, pooled_height_,
pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_,
bottom_diff);
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(PoolingLayer);
} // namespace caffe
|
c037336e1e1ff83e155f437b40990b066834923c.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file writer_impl.cu
* @brief cuDF-IO CSV writer class implementation
*/
#include "writer_impl.hpp"
#include <cudf/column/column_device_view.cuh>
#include <cudf/copying.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/null_mask.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/strings/detail/combine.hpp>
#include <cudf/strings/detail/converters.hpp>
#include <cudf/strings/detail/replace.hpp>
#include <cudf/strings/detail/utilities.cuh>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <thrust/execution_policy.h>
#include <thrust/logical.h>
#include <thrust/scan.h>
#include <algorithm>
#include <sstream>
namespace cudf {
namespace io {
namespace detail {
namespace csv {
namespace {
/**
* @brief Functor to modify a string column for CSV format.
*
* If a row contains specific characters, the entire row must be
* output in double-quotes. Also, if a double-quote appears it
* must be escaped using a 2nd double-quote.
*/
struct escape_strings_fn {
column_device_view const d_column;
string_view const d_delimiter; // check for column delimiter
offset_type* d_offsets{};
char* d_chars{};
__device__ void write_char(char_utf8 chr, char*& d_buffer, offset_type& bytes)
{
if (d_buffer)
d_buffer += cudf::strings::detail::from_char_utf8(chr, d_buffer);
else
bytes += cudf::strings::detail::bytes_in_char_utf8(chr);
}
__device__ void operator()(size_type idx)
{
if (d_column.is_null(idx)) {
if (!d_chars) d_offsets[idx] = 0;
return;
}
constexpr char_utf8 const quote = '\"'; // check for quote
constexpr char_utf8 const new_line = '\n'; // and for new-line
auto const d_str = d_column.element<string_view>(idx);
// if quote, new-line or a column delimiter appear in the string
// the entire string must be double-quoted.
bool const quote_row = thrust::any_of(
thrust::seq, d_str.begin(), d_str.end(), [d_delimiter = d_delimiter](auto chr) {
return chr == quote || chr == new_line || chr == d_delimiter[0];
});
char* d_buffer = d_chars ? d_chars + d_offsets[idx] : nullptr;
offset_type bytes = 0;
if (quote_row) write_char(quote, d_buffer, bytes);
for (auto chr : d_str) {
if (chr == quote) write_char(quote, d_buffer, bytes);
write_char(chr, d_buffer, bytes);
}
if (quote_row) write_char(quote, d_buffer, bytes);
if (!d_chars) d_offsets[idx] = bytes;
}
};
struct column_to_strings_fn {
// compile-time predicate that defines unsupported column types;
// based on the conditions used for instantiations of individual
// converters in strings/convert/convert_*.hpp;
//(this should have been a `variable template`,
// instead of a static function, but nvcc (10.0)
// fails to compile var-templs);
//
template <typename column_type>
constexpr static bool is_not_handled(void)
{
// Note: the case (not std::is_same<column_type, bool>::value)
// is already covered by is_integral)
//
return not((std::is_same<column_type, cudf::string_view>::value) ||
(std::is_integral<column_type>::value) ||
(std::is_floating_point<column_type>::value) ||
(cudf::is_fixed_point<column_type>()) || (cudf::is_timestamp<column_type>()) ||
(cudf::is_duration<column_type>()));
}
explicit column_to_strings_fn(
csv_writer_options const& options,
rmm::cuda_stream_view stream = rmm::cuda_stream_default,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
: options_(options), stream_(stream), mr_(mr)
{
}
// Note: `null` replacement with `na_rep` deferred to `concatenate()`
// instead of column-wise; might be faster
//
// Note: Cannot pass `stream` to detail::<fname> version of <fname> calls below, because they are
// not exposed in header (see, for example, detail::concatenate(tbl_view, separator, na_rep, mr,
// stream) is declared and defined in combine.cu); Possible solution: declare `extern`, or just
// declare a prototype inside `namespace cudf::strings::detail`;
// bools:
//
template <typename column_type>
std::enable_if_t<std::is_same<column_type, bool>::value, std::unique_ptr<column>> operator()(
column_view const& column) const
{
return cudf::strings::detail::from_booleans(
column, options_.get_true_value(), options_.get_false_value(), stream_, mr_);
}
// strings:
//
template <typename column_type>
std::enable_if_t<std::is_same<column_type, cudf::string_view>::value, std::unique_ptr<column>>
operator()(column_view const& column_v) const
{
// handle special characters: {delimiter, '\n', "} in row:
string_scalar delimiter{std::string{options_.get_inter_column_delimiter()}, true, stream_};
auto d_column = column_device_view::create(column_v, stream_);
escape_strings_fn fn{*d_column, delimiter.value(stream_)};
auto children = cudf::strings::detail::make_strings_children(fn, column_v.size(), stream_, mr_);
return make_strings_column(column_v.size(),
std::move(children.first),
std::move(children.second),
column_v.null_count(),
cudf::detail::copy_bitmask(column_v, stream_, mr_),
stream_,
mr_);
}
// ints:
//
template <typename column_type>
std::enable_if_t<std::is_integral<column_type>::value && !std::is_same<column_type, bool>::value,
std::unique_ptr<column>>
operator()(column_view const& column) const
{
return cudf::strings::detail::from_integers(column, stream_, mr_);
}
// floats:
//
template <typename column_type>
std::enable_if_t<std::is_floating_point<column_type>::value, std::unique_ptr<column>> operator()(
column_view const& column) const
{
return cudf::strings::detail::from_floats(column, stream_, mr_);
}
// fixed point:
//
template <typename column_type>
std::enable_if_t<cudf::is_fixed_point<column_type>(), std::unique_ptr<column>> operator()(
column_view const& column) const
{
return cudf::strings::detail::from_fixed_point(column, stream_, mr_);
}
// timestamps:
//
template <typename column_type>
std::enable_if_t<cudf::is_timestamp<column_type>(), std::unique_ptr<column>> operator()(
column_view const& column) const
{
std::string format = [&]() {
if (std::is_same<cudf::timestamp_s, column_type>::value) {
return std::string{"%Y-%m-%dT%H:%M:%SZ"};
} else if (std::is_same<cudf::timestamp_ms, column_type>::value) {
return std::string{"%Y-%m-%dT%H:%M:%S.%3fZ"};
} else if (std::is_same<cudf::timestamp_us, column_type>::value) {
return std::string{"%Y-%m-%dT%H:%M:%S.%6fZ"};
} else if (std::is_same<cudf::timestamp_ns, column_type>::value) {
return std::string{"%Y-%m-%dT%H:%M:%S.%9fZ"};
} else {
return std::string{"%Y-%m-%d"};
}
}();
// handle the cases where delimiter / line-terminator can be
// "-" or ":", in which case we need to add quotes to the format
//
std::string delimiter{options_.get_inter_column_delimiter()};
std::string newline{options_.get_line_terminator()};
constexpr char const* dash{"-"};
constexpr char const* colon{":"};
if (delimiter == dash || newline == dash || delimiter == colon || newline == colon) {
format = "\"" + format + "\"";
}
return cudf::strings::detail::from_timestamps(column, format, stream_, mr_);
}
template <typename column_type>
std::enable_if_t<cudf::is_duration<column_type>(), std::unique_ptr<column>> operator()(
column_view const& column) const
{
return cudf::io::detail::csv::pandas_format_durations(column, stream_, mr_);
}
// unsupported type of column:
//
template <typename column_type>
std::enable_if_t<is_not_handled<column_type>(), std::unique_ptr<column>> operator()(
column_view const&) const
{
CUDF_FAIL("Unsupported column type.");
}
private:
csv_writer_options const& options_;
rmm::cuda_stream_view stream_;
rmm::mr::device_memory_resource* mr_;
};
} // unnamed namespace
// Forward to implementation
writer::writer(std::unique_ptr<data_sink> sink,
csv_writer_options const& options,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
: _impl(std::make_unique<impl>(std::move(sink), options, mr))
{
}
// Destructor within this translation unit
writer::~writer() = default;
writer::impl::impl(std::unique_ptr<data_sink> sink,
csv_writer_options const& options,
rmm::mr::device_memory_resource* mr)
: out_sink_(std::move(sink)), mr_(mr), options_(options)
{
}
// write the header: column names:
//
void writer::impl::write_chunked_begin(table_view const& table,
const table_metadata* metadata,
rmm::cuda_stream_view stream)
{
if ((metadata != nullptr) && (options_.is_enabled_include_header())) {
CUDF_EXPECTS(metadata->column_names.size() == static_cast<size_t>(table.num_columns()),
"Mismatch between number of column headers and table columns.");
std::string delimiter_str{options_.get_inter_column_delimiter()};
// avoid delimiter after last element:
//
std::stringstream ss;
std::copy(metadata->column_names.begin(),
metadata->column_names.end() - 1,
std::ostream_iterator<std::string>(ss, delimiter_str.c_str()));
if (metadata->column_names.size() > 0) {
ss << metadata->column_names.back() << options_.get_line_terminator();
} else {
ss << options_.get_line_terminator();
}
out_sink_->host_write(ss.str().data(), ss.str().size());
}
}
void writer::impl::write_chunked(strings_column_view const& str_column_view,
const table_metadata* metadata,
rmm::cuda_stream_view stream)
{
// algorithm outline:
//
// for_each(strings_column.begin(), strings_column.end(),
// [sink = out_sink_](auto str_row) mutable {
// auto host_buffer = str_row.host_buffer();
// sink->host_write(host_buffer_.data(), host_buffer_.size());
// });//or...sink->device_write(device_buffer,...);
//
// added line_terminator functionality
//
CUDF_EXPECTS(str_column_view.size() > 0, "Unexpected empty strings column.");
cudf::string_scalar newline{options_.get_line_terminator()};
auto p_str_col_w_nl =
cudf::strings::detail::join_strings(str_column_view, newline, string_scalar("", false), stream);
strings_column_view strings_column{p_str_col_w_nl->view()};
auto total_num_bytes = strings_column.chars_size();
char const* ptr_all_bytes = strings_column.chars().data<char>();
if (out_sink_->is_device_write_preferred(total_num_bytes)) {
// Direct write from device memory
out_sink_->device_write(ptr_all_bytes, total_num_bytes, stream);
} else {
// copy the bytes to host to write them out
thrust::host_vector<char> h_bytes(total_num_bytes);
CUDA_TRY(hipMemcpyAsync(h_bytes.data(),
ptr_all_bytes,
total_num_bytes * sizeof(char),
hipMemcpyDeviceToHost,
stream.value()));
stream.synchronize();
out_sink_->host_write(h_bytes.data(), total_num_bytes);
}
// Needs newline at the end, to separate from next chunk
if (out_sink_->is_device_write_preferred(newline.size())) {
out_sink_->device_write(newline.data(), newline.size(), stream);
} else {
out_sink_->host_write(options_.get_line_terminator().data(),
options_.get_line_terminator().size());
}
}
void writer::impl::write(table_view const& table,
const table_metadata* metadata,
rmm::cuda_stream_view stream)
{
// write header: column names separated by delimiter:
// (even for tables with no rows)
//
write_chunked_begin(table, metadata, stream);
if (table.num_rows() > 0) {
// no need to check same-size columns constraint; auto-enforced by table_view
auto n_rows_per_chunk = options_.get_rows_per_chunk();
//
// This outputs the CSV in row chunks to save memory.
// Maybe we can use the total_rows*count calculation and a memory threshold
// instead of an arbitrary chunk count.
// The entire CSV chunk must fit in CPU memory before writing it out.
//
if (n_rows_per_chunk % 8) // must be divisible by 8
n_rows_per_chunk += 8 - (n_rows_per_chunk % 8);
CUDF_EXPECTS(n_rows_per_chunk >= 8, "write_csv: invalid chunk_rows; must be at least 8");
auto num_rows = table.num_rows();
std::vector<table_view> vector_views;
if (num_rows <= n_rows_per_chunk) {
vector_views.push_back(table);
} else {
auto const n_chunks = num_rows / n_rows_per_chunk;
std::vector<size_type> splits(n_chunks);
thrust::tabulate(splits.begin(), splits.end(), [n_rows_per_chunk](auto idx) {
return (idx + 1) * n_rows_per_chunk;
});
// split table_view into chunks:
vector_views = cudf::split(table, splits);
}
// convert each chunk to CSV:
//
column_to_strings_fn converter{options_, stream, rmm::mr::get_current_device_resource()};
for (auto&& sub_view : vector_views) {
// Skip if the table has no rows
if (sub_view.num_rows() == 0) continue;
std::vector<std::unique_ptr<column>> str_column_vec;
// populate vector of string-converted columns:
//
std::transform(sub_view.begin(),
sub_view.end(),
std::back_inserter(str_column_vec),
[converter](auto const& current_col) {
return cudf::type_dispatcher(current_col.type(), converter, current_col);
});
// create string table view from str_column_vec:
//
auto str_table_ptr = std::make_unique<cudf::table>(std::move(str_column_vec));
auto str_table_view = str_table_ptr->view();
// concatenate columns in each row into one big string column
// (using null representation and delimiter):
//
std::string delimiter_str{options_.get_inter_column_delimiter()};
auto str_concat_col = [&] {
if (str_table_view.num_columns() > 1)
return cudf::strings::detail::concatenate(str_table_view,
delimiter_str,
options_.get_na_rep(),
strings::separator_on_nulls::YES,
stream);
cudf::string_scalar narep{options_.get_na_rep()};
return cudf::strings::detail::replace_nulls(str_table_view.column(0), narep, stream);
}();
write_chunked(str_concat_col->view(), metadata, stream);
}
}
// finalize (no-op, for now, but offers a hook for future extensions):
//
write_chunked_end(table, metadata, stream);
}
void writer::write(table_view const& table,
const table_metadata* metadata,
rmm::cuda_stream_view stream)
{
_impl->write(table, metadata, stream);
}
} // namespace csv
} // namespace detail
} // namespace io
} // namespace cudf
| c037336e1e1ff83e155f437b40990b066834923c.cu | /*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file writer_impl.cu
* @brief cuDF-IO CSV writer class implementation
*/
#include "writer_impl.hpp"
#include <cudf/column/column_device_view.cuh>
#include <cudf/copying.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/null_mask.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/strings/detail/combine.hpp>
#include <cudf/strings/detail/converters.hpp>
#include <cudf/strings/detail/replace.hpp>
#include <cudf/strings/detail/utilities.cuh>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <thrust/execution_policy.h>
#include <thrust/logical.h>
#include <thrust/scan.h>
#include <algorithm>
#include <sstream>
namespace cudf {
namespace io {
namespace detail {
namespace csv {
namespace {
/**
* @brief Functor to modify a string column for CSV format.
*
* If a row contains specific characters, the entire row must be
* output in double-quotes. Also, if a double-quote appears it
* must be escaped using a 2nd double-quote.
*/
struct escape_strings_fn {
column_device_view const d_column;
string_view const d_delimiter; // check for column delimiter
offset_type* d_offsets{};
char* d_chars{};
__device__ void write_char(char_utf8 chr, char*& d_buffer, offset_type& bytes)
{
if (d_buffer)
d_buffer += cudf::strings::detail::from_char_utf8(chr, d_buffer);
else
bytes += cudf::strings::detail::bytes_in_char_utf8(chr);
}
__device__ void operator()(size_type idx)
{
if (d_column.is_null(idx)) {
if (!d_chars) d_offsets[idx] = 0;
return;
}
constexpr char_utf8 const quote = '\"'; // check for quote
constexpr char_utf8 const new_line = '\n'; // and for new-line
auto const d_str = d_column.element<string_view>(idx);
// if quote, new-line or a column delimiter appear in the string
// the entire string must be double-quoted.
bool const quote_row = thrust::any_of(
thrust::seq, d_str.begin(), d_str.end(), [d_delimiter = d_delimiter](auto chr) {
return chr == quote || chr == new_line || chr == d_delimiter[0];
});
char* d_buffer = d_chars ? d_chars + d_offsets[idx] : nullptr;
offset_type bytes = 0;
if (quote_row) write_char(quote, d_buffer, bytes);
for (auto chr : d_str) {
if (chr == quote) write_char(quote, d_buffer, bytes);
write_char(chr, d_buffer, bytes);
}
if (quote_row) write_char(quote, d_buffer, bytes);
if (!d_chars) d_offsets[idx] = bytes;
}
};
struct column_to_strings_fn {
// compile-time predicate that defines unsupported column types;
// based on the conditions used for instantiations of individual
// converters in strings/convert/convert_*.hpp;
//(this should have been a `variable template`,
// instead of a static function, but nvcc (10.0)
// fails to compile var-templs);
//
template <typename column_type>
constexpr static bool is_not_handled(void)
{
// Note: the case (not std::is_same<column_type, bool>::value)
// is already covered by is_integral)
//
return not((std::is_same<column_type, cudf::string_view>::value) ||
(std::is_integral<column_type>::value) ||
(std::is_floating_point<column_type>::value) ||
(cudf::is_fixed_point<column_type>()) || (cudf::is_timestamp<column_type>()) ||
(cudf::is_duration<column_type>()));
}
explicit column_to_strings_fn(
csv_writer_options const& options,
rmm::cuda_stream_view stream = rmm::cuda_stream_default,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
: options_(options), stream_(stream), mr_(mr)
{
}
// Note: `null` replacement with `na_rep` deferred to `concatenate()`
// instead of column-wise; might be faster
//
// Note: Cannot pass `stream` to detail::<fname> version of <fname> calls below, because they are
// not exposed in header (see, for example, detail::concatenate(tbl_view, separator, na_rep, mr,
// stream) is declared and defined in combine.cu); Possible solution: declare `extern`, or just
// declare a prototype inside `namespace cudf::strings::detail`;
// bools:
//
template <typename column_type>
std::enable_if_t<std::is_same<column_type, bool>::value, std::unique_ptr<column>> operator()(
column_view const& column) const
{
return cudf::strings::detail::from_booleans(
column, options_.get_true_value(), options_.get_false_value(), stream_, mr_);
}
// strings:
//
template <typename column_type>
std::enable_if_t<std::is_same<column_type, cudf::string_view>::value, std::unique_ptr<column>>
operator()(column_view const& column_v) const
{
// handle special characters: {delimiter, '\n', "} in row:
string_scalar delimiter{std::string{options_.get_inter_column_delimiter()}, true, stream_};
auto d_column = column_device_view::create(column_v, stream_);
escape_strings_fn fn{*d_column, delimiter.value(stream_)};
auto children = cudf::strings::detail::make_strings_children(fn, column_v.size(), stream_, mr_);
return make_strings_column(column_v.size(),
std::move(children.first),
std::move(children.second),
column_v.null_count(),
cudf::detail::copy_bitmask(column_v, stream_, mr_),
stream_,
mr_);
}
// ints:
//
template <typename column_type>
std::enable_if_t<std::is_integral<column_type>::value && !std::is_same<column_type, bool>::value,
std::unique_ptr<column>>
operator()(column_view const& column) const
{
return cudf::strings::detail::from_integers(column, stream_, mr_);
}
// floats:
//
template <typename column_type>
std::enable_if_t<std::is_floating_point<column_type>::value, std::unique_ptr<column>> operator()(
column_view const& column) const
{
return cudf::strings::detail::from_floats(column, stream_, mr_);
}
// fixed point:
//
template <typename column_type>
std::enable_if_t<cudf::is_fixed_point<column_type>(), std::unique_ptr<column>> operator()(
column_view const& column) const
{
return cudf::strings::detail::from_fixed_point(column, stream_, mr_);
}
// timestamps:
//
template <typename column_type>
std::enable_if_t<cudf::is_timestamp<column_type>(), std::unique_ptr<column>> operator()(
column_view const& column) const
{
std::string format = [&]() {
if (std::is_same<cudf::timestamp_s, column_type>::value) {
return std::string{"%Y-%m-%dT%H:%M:%SZ"};
} else if (std::is_same<cudf::timestamp_ms, column_type>::value) {
return std::string{"%Y-%m-%dT%H:%M:%S.%3fZ"};
} else if (std::is_same<cudf::timestamp_us, column_type>::value) {
return std::string{"%Y-%m-%dT%H:%M:%S.%6fZ"};
} else if (std::is_same<cudf::timestamp_ns, column_type>::value) {
return std::string{"%Y-%m-%dT%H:%M:%S.%9fZ"};
} else {
return std::string{"%Y-%m-%d"};
}
}();
// handle the cases where delimiter / line-terminator can be
// "-" or ":", in which case we need to add quotes to the format
//
std::string delimiter{options_.get_inter_column_delimiter()};
std::string newline{options_.get_line_terminator()};
constexpr char const* dash{"-"};
constexpr char const* colon{":"};
if (delimiter == dash || newline == dash || delimiter == colon || newline == colon) {
format = "\"" + format + "\"";
}
return cudf::strings::detail::from_timestamps(column, format, stream_, mr_);
}
template <typename column_type>
std::enable_if_t<cudf::is_duration<column_type>(), std::unique_ptr<column>> operator()(
column_view const& column) const
{
return cudf::io::detail::csv::pandas_format_durations(column, stream_, mr_);
}
// unsupported type of column:
//
template <typename column_type>
std::enable_if_t<is_not_handled<column_type>(), std::unique_ptr<column>> operator()(
column_view const&) const
{
CUDF_FAIL("Unsupported column type.");
}
private:
csv_writer_options const& options_;
rmm::cuda_stream_view stream_;
rmm::mr::device_memory_resource* mr_;
};
} // unnamed namespace
// Forward to implementation
writer::writer(std::unique_ptr<data_sink> sink,
csv_writer_options const& options,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
: _impl(std::make_unique<impl>(std::move(sink), options, mr))
{
}
// Destructor within this translation unit
writer::~writer() = default;
writer::impl::impl(std::unique_ptr<data_sink> sink,
csv_writer_options const& options,
rmm::mr::device_memory_resource* mr)
: out_sink_(std::move(sink)), mr_(mr), options_(options)
{
}
// write the header: column names:
//
void writer::impl::write_chunked_begin(table_view const& table,
const table_metadata* metadata,
rmm::cuda_stream_view stream)
{
if ((metadata != nullptr) && (options_.is_enabled_include_header())) {
CUDF_EXPECTS(metadata->column_names.size() == static_cast<size_t>(table.num_columns()),
"Mismatch between number of column headers and table columns.");
std::string delimiter_str{options_.get_inter_column_delimiter()};
// avoid delimiter after last element:
//
std::stringstream ss;
std::copy(metadata->column_names.begin(),
metadata->column_names.end() - 1,
std::ostream_iterator<std::string>(ss, delimiter_str.c_str()));
if (metadata->column_names.size() > 0) {
ss << metadata->column_names.back() << options_.get_line_terminator();
} else {
ss << options_.get_line_terminator();
}
out_sink_->host_write(ss.str().data(), ss.str().size());
}
}
void writer::impl::write_chunked(strings_column_view const& str_column_view,
const table_metadata* metadata,
rmm::cuda_stream_view stream)
{
// algorithm outline:
//
// for_each(strings_column.begin(), strings_column.end(),
// [sink = out_sink_](auto str_row) mutable {
// auto host_buffer = str_row.host_buffer();
// sink->host_write(host_buffer_.data(), host_buffer_.size());
// });//or...sink->device_write(device_buffer,...);
//
// added line_terminator functionality
//
CUDF_EXPECTS(str_column_view.size() > 0, "Unexpected empty strings column.");
cudf::string_scalar newline{options_.get_line_terminator()};
auto p_str_col_w_nl =
cudf::strings::detail::join_strings(str_column_view, newline, string_scalar("", false), stream);
strings_column_view strings_column{p_str_col_w_nl->view()};
auto total_num_bytes = strings_column.chars_size();
char const* ptr_all_bytes = strings_column.chars().data<char>();
if (out_sink_->is_device_write_preferred(total_num_bytes)) {
// Direct write from device memory
out_sink_->device_write(ptr_all_bytes, total_num_bytes, stream);
} else {
// copy the bytes to host to write them out
thrust::host_vector<char> h_bytes(total_num_bytes);
CUDA_TRY(cudaMemcpyAsync(h_bytes.data(),
ptr_all_bytes,
total_num_bytes * sizeof(char),
cudaMemcpyDeviceToHost,
stream.value()));
stream.synchronize();
out_sink_->host_write(h_bytes.data(), total_num_bytes);
}
// Needs newline at the end, to separate from next chunk
if (out_sink_->is_device_write_preferred(newline.size())) {
out_sink_->device_write(newline.data(), newline.size(), stream);
} else {
out_sink_->host_write(options_.get_line_terminator().data(),
options_.get_line_terminator().size());
}
}
void writer::impl::write(table_view const& table,
const table_metadata* metadata,
rmm::cuda_stream_view stream)
{
// write header: column names separated by delimiter:
// (even for tables with no rows)
//
write_chunked_begin(table, metadata, stream);
if (table.num_rows() > 0) {
// no need to check same-size columns constraint; auto-enforced by table_view
auto n_rows_per_chunk = options_.get_rows_per_chunk();
//
// This outputs the CSV in row chunks to save memory.
// Maybe we can use the total_rows*count calculation and a memory threshold
// instead of an arbitrary chunk count.
// The entire CSV chunk must fit in CPU memory before writing it out.
//
if (n_rows_per_chunk % 8) // must be divisible by 8
n_rows_per_chunk += 8 - (n_rows_per_chunk % 8);
CUDF_EXPECTS(n_rows_per_chunk >= 8, "write_csv: invalid chunk_rows; must be at least 8");
auto num_rows = table.num_rows();
std::vector<table_view> vector_views;
if (num_rows <= n_rows_per_chunk) {
vector_views.push_back(table);
} else {
auto const n_chunks = num_rows / n_rows_per_chunk;
std::vector<size_type> splits(n_chunks);
thrust::tabulate(splits.begin(), splits.end(), [n_rows_per_chunk](auto idx) {
return (idx + 1) * n_rows_per_chunk;
});
// split table_view into chunks:
vector_views = cudf::split(table, splits);
}
// convert each chunk to CSV:
//
column_to_strings_fn converter{options_, stream, rmm::mr::get_current_device_resource()};
for (auto&& sub_view : vector_views) {
// Skip if the table has no rows
if (sub_view.num_rows() == 0) continue;
std::vector<std::unique_ptr<column>> str_column_vec;
// populate vector of string-converted columns:
//
std::transform(sub_view.begin(),
sub_view.end(),
std::back_inserter(str_column_vec),
[converter](auto const& current_col) {
return cudf::type_dispatcher(current_col.type(), converter, current_col);
});
// create string table view from str_column_vec:
//
auto str_table_ptr = std::make_unique<cudf::table>(std::move(str_column_vec));
auto str_table_view = str_table_ptr->view();
// concatenate columns in each row into one big string column
// (using null representation and delimiter):
//
std::string delimiter_str{options_.get_inter_column_delimiter()};
auto str_concat_col = [&] {
if (str_table_view.num_columns() > 1)
return cudf::strings::detail::concatenate(str_table_view,
delimiter_str,
options_.get_na_rep(),
strings::separator_on_nulls::YES,
stream);
cudf::string_scalar narep{options_.get_na_rep()};
return cudf::strings::detail::replace_nulls(str_table_view.column(0), narep, stream);
}();
write_chunked(str_concat_col->view(), metadata, stream);
}
}
// finalize (no-op, for now, but offers a hook for future extensions):
//
write_chunked_end(table, metadata, stream);
}
void writer::write(table_view const& table,
const table_metadata* metadata,
rmm::cuda_stream_view stream)
{
_impl->write(table, metadata, stream);
}
} // namespace csv
} // namespace detail
} // namespace io
} // namespace cudf
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.