repo_name
stringclasses 10
values | file_path
stringlengths 29
222
| content
stringlengths 24
926k
| extention
stringclasses 5
values |
---|---|---|---|
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/guided_convolutionSeparable_SYCLmigration/01_dpct_output/include/dpct/memory.hpp | //==---- memory.hpp -------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_MEMORY_HPP__
#define __DPCT_MEMORY_HPP__
#include "device.hpp"
#include <sycl/sycl.hpp>
#include <cassert>
#include <cstdint>
#include <cstring>
#include <mutex>
#include <unordered_map>
#include <map>
#include <utility>
#include <thread>
#include <type_traits>
#if defined(__linux__)
#include <sys/mman.h>
#elif defined(_WIN64)
#define NOMINMAX
#include <windows.h>
#else
#error "Only support Windows and Linux."
#endif
namespace dpct {
enum memcpy_direction {
host_to_host,
host_to_device,
device_to_host,
device_to_device,
automatic
};
enum memory_region {
global = 0, // device global memory
constant, // device constant memory
local, // device local memory
shared, // memory which can be accessed by host and device
};
typedef uint8_t byte_t;
/// Buffer type to be used in Memory Management runtime.
typedef sycl::buffer<byte_t> buffer_t;
/// Pitched 2D/3D memory data.
class pitched_data {
public:
pitched_data() : pitched_data(nullptr, 0, 0, 0) {}
pitched_data(void *data, size_t pitch, size_t x, size_t y)
: _data(data), _pitch(pitch), _x(x), _y(y) {}
void *get_data_ptr() { return _data; }
void set_data_ptr(void *data) { _data = data; }
size_t get_pitch() { return _pitch; }
void set_pitch(size_t pitch) { _pitch = pitch; }
size_t get_x() { return _x; }
void set_x(size_t x) { _x = x; };
size_t get_y() { return _y; }
void set_y(size_t y) { _y = y; }
private:
void *_data;
size_t _pitch, _x, _y;
};
namespace detail {
class mem_mgr {
mem_mgr() {
// Reserved address space, no real memory allocation happens here.
#if defined(__linux__)
mapped_address_space =
(byte_t *)mmap(nullptr, mapped_region_size, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
#elif defined(_WIN64)
mapped_address_space = (byte_t *)VirtualAlloc(
NULL, // NULL specified as the base address parameter
mapped_region_size, // Size of allocation
MEM_RESERVE, // Allocate reserved pages
PAGE_NOACCESS); // Protection = no access
#else
#error "Only support Windows and Linux."
#endif
next_free = mapped_address_space;
};
public:
using buffer_id_t = int;
struct allocation {
buffer_t buffer;
byte_t *alloc_ptr;
size_t size;
};
~mem_mgr() {
#if defined(__linux__)
munmap(mapped_address_space, mapped_region_size);
#elif defined(_WIN64)
VirtualFree(mapped_address_space, 0, MEM_RELEASE);
#else
#error "Only support Windows and Linux."
#endif
};
mem_mgr(const mem_mgr &) = delete;
mem_mgr &operator=(const mem_mgr &) = delete;
mem_mgr(mem_mgr &&) = delete;
mem_mgr &operator=(mem_mgr &&) = delete;
/// Allocate
void *mem_alloc(size_t size) {
if (!size)
return nullptr;
std::lock_guard<std::mutex> lock(m_mutex);
if (next_free + size > mapped_address_space + mapped_region_size) {
throw std::runtime_error("dpct_malloc: out of memory for virtual memory pool");
}
// Allocation
sycl::range<1> r(size);
buffer_t buf(r);
allocation A{buf, next_free, size};
// Map allocation to device pointer
void *result = next_free;
m_map.emplace(next_free + size, A);
// Update pointer to the next free space.
next_free += (size + extra_padding + alignment - 1) & ~(alignment - 1);
return result;
}
/// Deallocate
void mem_free(const void *ptr) {
if (!ptr)
return;
std::lock_guard<std::mutex> lock(m_mutex);
auto it = get_map_iterator(ptr);
m_map.erase(it);
}
/// map: device pointer -> allocation(buffer, alloc_ptr, size)
allocation translate_ptr(const void *ptr) {
std::lock_guard<std::mutex> lock(m_mutex);
auto it = get_map_iterator(ptr);
return it->second;
}
/// Check if the pointer represents device pointer or not.
bool is_device_ptr(const void *ptr) const {
std::lock_guard<std::mutex> lock(m_mutex);
return (mapped_address_space <= ptr) &&
(ptr < mapped_address_space + mapped_region_size);
}
/// Returns the instance of memory manager singleton.
static mem_mgr &instance() {
static mem_mgr m;
return m;
}
private:
std::map<byte_t *, allocation> m_map;
mutable std::mutex m_mutex;
byte_t *mapped_address_space;
byte_t *next_free;
const size_t mapped_region_size = 128ull * 1024 * 1024 * 1024;
const size_t alignment = 256;
/// This padding may be defined to some positive value to debug
/// out of bound accesses.
const size_t extra_padding = 0;
std::map<byte_t *, allocation>::iterator get_map_iterator(const void *ptr) {
auto it = m_map.upper_bound((byte_t *)ptr);
if (it == m_map.end()) {
// Not a virtual pointer.
throw std::runtime_error("can not get buffer from non-virtual pointer");
}
const allocation &alloc = it->second;
if (ptr < alloc.alloc_ptr) {
// Out of bound.
// This may happen if there's a gap between allocations due to alignment
// or extra padding and pointer points to this gap.
throw std::runtime_error("invalid virtual pointer");
}
return it;
}
};
template <class T, memory_region Memory, size_t Dimension> class accessor;
template <memory_region Memory, class T = byte_t> class memory_traits {
public:
static constexpr sycl::access::target target =
sycl::access::target::device;
static constexpr sycl::access_mode mode =
(Memory == constant) ? sycl::access_mode::read
: sycl::access_mode::read_write;
static constexpr size_t type_size = sizeof(T);
using element_t =
typename std::conditional<Memory == constant, const T, T>::type;
using value_t = typename std::remove_cv<T>::type;
template <size_t Dimension = 1>
using accessor_t = typename std::conditional<
Memory == local, sycl::local_accessor<value_t, Dimension>,
sycl::accessor<T, Dimension, mode, target>>::type;
using pointer_t = T *;
};
static inline void *dpct_malloc(size_t size, sycl::queue &q) {
#ifdef DPCT_USM_LEVEL_NONE
return mem_mgr::instance().mem_alloc(size * sizeof(byte_t));
#else
return sycl::malloc_device(size, q.get_device(), q.get_context());
#endif // DPCT_USM_LEVEL_NONE
}
#define PITCH_DEFAULT_ALIGN(x) (((x) + 31) & ~(0x1F))
static inline void *dpct_malloc(size_t &pitch, size_t x, size_t y, size_t z,
sycl::queue &q) {
pitch = PITCH_DEFAULT_ALIGN(x);
return dpct_malloc(pitch * y * z, q);
}
/// Set \p value to the first \p size bytes starting from \p dev_ptr in \p q.
///
/// \param q The queue in which the operation is done.
/// \param dev_ptr Pointer to the device memory address.
/// \param value Value to be set.
/// \param size Number of bytes to be set to the value.
/// \returns An event representing the memset operation.
static inline sycl::event dpct_memset(sycl::queue &q, void *dev_ptr,
int value, size_t size) {
#ifdef DPCT_USM_LEVEL_NONE
auto &mm = mem_mgr::instance();
assert(mm.is_device_ptr(dev_ptr));
auto alloc = mm.translate_ptr(dev_ptr);
size_t offset = (byte_t *)dev_ptr - alloc.alloc_ptr;
return q.submit([&](sycl::handler &cgh) {
auto r = sycl::range<1>(size);
auto o = sycl::id<1>(offset);
sycl::accessor<byte_t, 1, sycl::access_mode::write,
sycl::access::target::device>
acc(alloc.buffer, cgh, r, o);
cgh.fill(acc, (byte_t)value);
});
#else
return q.memset(dev_ptr, value, size);
#endif // DPCT_USM_LEVEL_NONE
}
/// Set \p value to the 3D memory region pointed by \p data in \p q. \p size
/// specifies the 3D memory size to set.
///
/// \param q The queue in which the operation is done.
/// \param data Pointer to the device memory region.
/// \param value Value to be set.
/// \param size Memory region size.
/// \returns An event list representing the memset operations.
static inline std::vector<sycl::event>
dpct_memset(sycl::queue &q, pitched_data data, int value,
sycl::range<3> size) {
std::vector<sycl::event> event_list;
size_t slice = data.get_pitch() * data.get_y();
unsigned char *data_surface = (unsigned char *)data.get_data_ptr();
for (size_t z = 0; z < size.get(2); ++z) {
unsigned char *data_ptr = data_surface;
for (size_t y = 0; y < size.get(1); ++y) {
event_list.push_back(dpct_memset(q, data_ptr, value, size.get(0)));
data_ptr += data.get_pitch();
}
data_surface += slice;
}
return event_list;
}
/// memset 2D matrix with pitch.
static inline std::vector<sycl::event>
dpct_memset(sycl::queue &q, void *ptr, size_t pitch, int val, size_t x,
size_t y) {
return dpct_memset(q, pitched_data(ptr, pitch, x, 1), val,
sycl::range<3>(x, y, 1));
}
enum class pointer_access_attribute {
host_only = 0,
device_only,
host_device,
end
};
static pointer_access_attribute get_pointer_attribute(sycl::queue &q,
const void *ptr) {
#ifdef DPCT_USM_LEVEL_NONE
return mem_mgr::instance().is_device_ptr(ptr)
? pointer_access_attribute::device_only
: pointer_access_attribute::host_only;
#else
switch (sycl::get_pointer_type(ptr, q.get_context())) {
case sycl::usm::alloc::unknown:
return pointer_access_attribute::host_only;
case sycl::usm::alloc::device:
return pointer_access_attribute::device_only;
case sycl::usm::alloc::shared:
case sycl::usm::alloc::host:
return pointer_access_attribute::host_device;
}
#endif
}
static memcpy_direction deduce_memcpy_direction(sycl::queue &q, void *to_ptr,
const void *from_ptr,
memcpy_direction dir) {
switch (dir) {
case memcpy_direction::host_to_host:
case memcpy_direction::host_to_device:
case memcpy_direction::device_to_host:
case memcpy_direction::device_to_device:
return dir;
case memcpy_direction::automatic: {
// table[to_attribute][from_attribute]
static const memcpy_direction
direction_table[static_cast<unsigned>(pointer_access_attribute::end)]
[static_cast<unsigned>(pointer_access_attribute::end)] =
{{memcpy_direction::host_to_host,
memcpy_direction::device_to_host,
memcpy_direction::host_to_host},
{memcpy_direction::host_to_device,
memcpy_direction::device_to_device,
memcpy_direction::device_to_device},
{memcpy_direction::host_to_host,
memcpy_direction::device_to_device,
memcpy_direction::device_to_device}};
return direction_table[static_cast<unsigned>(get_pointer_attribute(
q, to_ptr))][static_cast<unsigned>(get_pointer_attribute(q, from_ptr))];
}
default:
throw std::runtime_error("dpct_memcpy: invalid direction value");
}
}
static sycl::event
dpct_memcpy(sycl::queue &q, void *to_ptr, const void *from_ptr, size_t size,
memcpy_direction direction,
const std::vector<sycl::event> &dep_events = {}) {
if (!size)
return sycl::event{};
#ifdef DPCT_USM_LEVEL_NONE
auto &mm = mem_mgr::instance();
auto real_direction = deduce_memcpy_direction(q, to_ptr, from_ptr, direction);
switch (real_direction) {
case host_to_host:
return q.submit([&](sycl::handler &cgh) {
cgh.depends_on(dep_events);
cgh.host_task([=] { std::memcpy(to_ptr, from_ptr, size); });
});
case host_to_device: {
auto alloc = mm.translate_ptr(to_ptr);
size_t offset = (byte_t *)to_ptr - alloc.alloc_ptr;
return q.submit([&](sycl::handler &cgh) {
cgh.depends_on(dep_events);
auto r = sycl::range<1>(size);
auto o = sycl::id<1>(offset);
sycl::accessor<byte_t, 1, sycl::access_mode::write,
sycl::access::target::device>
acc(alloc.buffer, cgh, r, o);
cgh.copy(from_ptr, acc);
});
}
case device_to_host: {
auto alloc = mm.translate_ptr(from_ptr);
size_t offset = (byte_t *)from_ptr - alloc.alloc_ptr;
return q.submit([&](sycl::handler &cgh) {
cgh.depends_on(dep_events);
auto r = sycl::range<1>(size);
auto o = sycl::id<1>(offset);
sycl::accessor<byte_t, 1, sycl::access_mode::read,
sycl::access::target::device>
acc(alloc.buffer, cgh, r, o);
cgh.copy(acc, to_ptr);
});
}
case device_to_device: {
auto to_alloc = mm.translate_ptr(to_ptr);
auto from_alloc = mm.translate_ptr(from_ptr);
size_t to_offset = (byte_t *)to_ptr - to_alloc.alloc_ptr;
size_t from_offset = (byte_t *)from_ptr - from_alloc.alloc_ptr;
return q.submit([&](sycl::handler &cgh) {
cgh.depends_on(dep_events);
auto r = sycl::range<1>(size);
auto to_o = sycl::id<1>(to_offset);
auto from_o = sycl::id<1>(from_offset);
sycl::accessor<byte_t, 1, sycl::access_mode::write,
sycl::access::target::device>
to_acc(to_alloc.buffer, cgh, r, to_o);
sycl::accessor<byte_t, 1, sycl::access_mode::read,
sycl::access::target::device>
from_acc(from_alloc.buffer, cgh, r, from_o);
cgh.copy(from_acc, to_acc);
});
}
default:
throw std::runtime_error("dpct_memcpy: invalid direction value");
}
#else
return q.memcpy(to_ptr, from_ptr, size, dep_events);
#endif // DPCT_USM_LEVEL_NONE
}
// Get actual copy range and make sure it will not exceed range.
static inline size_t get_copy_range(sycl::range<3> size, size_t slice,
size_t pitch) {
return slice * (size.get(2) - 1) + pitch * (size.get(1) - 1) + size.get(0);
}
static inline size_t get_offset(sycl::id<3> id, size_t slice,
size_t pitch) {
return slice * id.get(2) + pitch * id.get(1) + id.get(0);
}
/// copy 3D matrix specified by \p size from 3D matrix specified by \p from_ptr
/// and \p from_range to another specified by \p to_ptr and \p to_range.
static inline std::vector<sycl::event>
dpct_memcpy(sycl::queue &q, void *to_ptr, const void *from_ptr,
sycl::range<3> to_range, sycl::range<3> from_range,
sycl::id<3> to_id, sycl::id<3> from_id,
sycl::range<3> size, memcpy_direction direction,
const std::vector<sycl::event> &dep_events = {}) {
// RAII for host pointer
class host_buffer {
void *_buf;
size_t _size;
sycl::queue &_q;
const std::vector<sycl::event> &_deps; // free operation depends
public:
host_buffer(size_t size, sycl::queue &q,
const std::vector<sycl::event> &deps)
: _buf(std::malloc(size)), _size(size), _q(q), _deps(deps) {}
void *get_ptr() const { return _buf; }
size_t get_size() const { return _size; }
~host_buffer() {
if (_buf) {
_q.submit([&](sycl::handler &cgh) {
cgh.depends_on(_deps);
cgh.host_task([buf = _buf] { std::free(buf); });
});
}
}
};
std::vector<sycl::event> event_list;
size_t to_slice = to_range.get(1) * to_range.get(0),
from_slice = from_range.get(1) * from_range.get(0);
unsigned char *to_surface =
(unsigned char *)to_ptr + get_offset(to_id, to_slice, to_range.get(0));
const unsigned char *from_surface =
(const unsigned char *)from_ptr +
get_offset(from_id, from_slice, from_range.get(0));
if (to_slice == from_slice && to_slice == size.get(1) * size.get(0)) {
return {dpct_memcpy(q, to_surface, from_surface, to_slice * size.get(2),
direction, dep_events)};
}
direction = deduce_memcpy_direction(q, to_ptr, from_ptr, direction);
size_t size_slice = size.get(1) * size.get(0);
switch (direction) {
case host_to_host:
for (size_t z = 0; z < size.get(2); ++z) {
unsigned char *to_ptr = to_surface;
const unsigned char *from_ptr = from_surface;
if (to_range.get(0) == from_range.get(0) &&
to_range.get(0) == size.get(0)) {
event_list.push_back(dpct_memcpy(q, to_ptr, from_ptr, size_slice,
direction, dep_events));
} else {
for (size_t y = 0; y < size.get(1); ++y) {
event_list.push_back(dpct_memcpy(q, to_ptr, from_ptr, size.get(0),
direction, dep_events));
to_ptr += to_range.get(0);
from_ptr += from_range.get(0);
}
}
to_surface += to_slice;
from_surface += from_slice;
}
break;
case host_to_device: {
host_buffer buf(get_copy_range(size, to_slice, to_range.get(0)), q,
event_list);
std::vector<sycl::event> host_events;
if (to_slice == size_slice) {
// Copy host data to a temp host buffer with the shape of target.
host_events =
dpct_memcpy(q, buf.get_ptr(), from_surface, to_range, from_range,
sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0), size,
host_to_host, dep_events);
} else {
// Copy host data to a temp host buffer with the shape of target.
host_events = dpct_memcpy(
q, buf.get_ptr(), from_surface, to_range, from_range,
sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0), size, host_to_host,
// If has padding data, not sure whether it is useless. So fill temp
// buffer with it.
std::vector<sycl::event>{
dpct_memcpy(q, buf.get_ptr(), to_surface, buf.get_size(),
device_to_host, dep_events)});
}
// Copy from temp host buffer to device with only one submit.
event_list.push_back(dpct_memcpy(q, to_surface, buf.get_ptr(),
buf.get_size(), host_to_device,
host_events));
break;
}
case device_to_host: {
host_buffer buf(get_copy_range(size, from_slice, from_range.get(0)), q,
event_list);
// Copy from host temp buffer to host target with reshaping.
event_list = dpct_memcpy(
q, to_surface, buf.get_ptr(), to_range, from_range, sycl::id<3>(0, 0, 0),
sycl::id<3>(0, 0, 0), size, host_to_host,
// Copy from device to temp host buffer with only one submit.
std::vector<sycl::event>{dpct_memcpy(q, buf.get_ptr(), from_surface,
buf.get_size(),
device_to_host, dep_events)});
break;
}
case device_to_device:
#ifdef DPCT_USM_LEVEL_NONE
{
auto &mm = mem_mgr::instance();
auto to_alloc = mm.translate_ptr(to_surface);
auto from_alloc = mm.translate_ptr(from_surface);
size_t to_offset = (byte_t *)to_surface - to_alloc.alloc_ptr;
size_t from_offset = (byte_t *)from_surface - from_alloc.alloc_ptr;
event_list.push_back(q.submit([&](sycl::handler &cgh) {
cgh.depends_on(dep_events);
auto to_o = sycl::id<1>(to_offset);
auto from_o = sycl::id<1>(from_offset);
sycl::accessor<byte_t, 1, sycl::access_mode::write,
sycl::access::target::device>
to_acc(to_alloc.buffer, cgh,
get_copy_range(size, to_slice, to_range.get(0)), to_o);
sycl::accessor<byte_t, 1, sycl::access_mode::read,
sycl::access::target::device>
from_acc(from_alloc.buffer, cgh,
get_copy_range(size, from_slice, from_range.get(0)), from_o);
cgh.parallel_for<class dpct_memcpy_3d_detail_usmnone>(
size,
[=](sycl::id<3> id) {
to_acc[get_offset(id, to_slice, to_range.get(0))] =
from_acc[get_offset(id, from_slice, from_range.get(0))];
});
}));
}
#else
event_list.push_back(q.submit([&](sycl::handler &cgh) {
cgh.depends_on(dep_events);
cgh.parallel_for<class dpct_memcpy_3d_detail>(
size,
[=](sycl::id<3> id) {
to_surface[get_offset(id, to_slice, to_range.get(0))] =
from_surface[get_offset(id, from_slice, from_range.get(0))];
});
}));
#endif
break;
default:
throw std::runtime_error("dpct_memcpy: invalid direction value");
}
return event_list;
}
/// memcpy 2D/3D matrix specified by pitched_data.
static inline std::vector<sycl::event>
dpct_memcpy(sycl::queue &q, pitched_data to, sycl::id<3> to_id,
pitched_data from, sycl::id<3> from_id, sycl::range<3> size,
memcpy_direction direction = automatic) {
return dpct_memcpy(q, to.get_data_ptr(), from.get_data_ptr(),
sycl::range<3>(to.get_pitch(), to.get_y(), 1),
sycl::range<3>(from.get_pitch(), from.get_y(), 1), to_id, from_id,
size, direction);
}
/// memcpy 2D matrix with pitch.
static inline std::vector<sycl::event>
dpct_memcpy(sycl::queue &q, void *to_ptr, const void *from_ptr,
size_t to_pitch, size_t from_pitch, size_t x, size_t y,
memcpy_direction direction = automatic) {
return dpct_memcpy(q, to_ptr, from_ptr, sycl::range<3>(to_pitch, y, 1),
sycl::range<3>(from_pitch, y, 1),
sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0),
sycl::range<3>(x, y, 1), direction);
}
namespace deprecated {
template <typename T, sycl::usm::alloc AllocKind>
class usm_allocator {
private:
using Alloc = sycl::usm_allocator<T, AllocKind>;
Alloc _impl;
public:
using value_type = typename std::allocator_traits<Alloc>::value_type;
using pointer = typename std::allocator_traits<Alloc>::pointer;
using const_pointer = typename std::allocator_traits<Alloc>::const_pointer;
using void_pointer = typename std::allocator_traits<Alloc>::void_pointer;
using const_void_pointer =
typename std::allocator_traits<Alloc>::const_void_pointer;
using reference = typename std::allocator_traits<Alloc>::value_type &;
using const_reference =
const typename std::allocator_traits<Alloc>::value_type &;
using difference_type =
typename std::allocator_traits<Alloc>::difference_type;
using size_type = typename std::allocator_traits<Alloc>::size_type;
using propagate_on_container_copy_assignment = typename std::allocator_traits<
Alloc>::propagate_on_container_copy_assignment;
using propagate_on_container_move_assignment = typename std::allocator_traits<
Alloc>::propagate_on_container_move_assignment;
using propagate_on_container_swap =
typename std::allocator_traits<Alloc>::propagate_on_container_swap;
using is_always_equal =
typename std::allocator_traits<Alloc>::is_always_equal;
template <typename U> struct rebind {
typedef usm_allocator<U, AllocKind> other;
};
usm_allocator() : _impl(dpct::get_default_queue()) {}
~usm_allocator() {}
usm_allocator(const usm_allocator &other) : _impl(other._impl) {}
usm_allocator(usm_allocator &&other) : _impl(std::move(other._impl)) {}
pointer address(reference r) { return &r; }
const_pointer address(const_reference r) { return &r; }
pointer allocate(size_type cnt, const_void_pointer hint = nullptr) {
return std::allocator_traits<Alloc>::allocate(_impl, cnt, hint);
}
void deallocate(pointer p, size_type cnt) {
std::allocator_traits<Alloc>::deallocate(_impl, p, cnt);
}
size_type max_size() const {
return std::allocator_traits<Alloc>::max_size(_impl);
}
bool operator==(const usm_allocator &other) const { return _impl == other._impl; }
bool operator!=(const usm_allocator &other) const { return _impl != other._impl; }
};
} // namespace deprecated
inline void dpct_free(void *ptr,
const sycl::queue &q) {
if (ptr) {
#ifdef DPCT_USM_LEVEL_NONE
detail::mem_mgr::instance().mem_free(ptr);
#else
sycl::free(ptr, q.get_context());
#endif // DPCT_USM_LEVEL_NONE
}
}
} // namespace detail
#ifdef DPCT_USM_LEVEL_NONE
/// Check if the pointer \p ptr represents device pointer or not.
///
/// \param ptr The pointer to be checked.
/// \returns true if \p ptr is a device pointer.
template<class T>
static inline bool is_device_ptr(T ptr) {
if constexpr (std::is_pointer<T>::value) {
return detail::mem_mgr::instance().is_device_ptr(ptr);
}
return false;
}
#endif
/// Get the buffer and the offset of a piece of memory pointed to by \p ptr.
///
/// \param ptr Pointer to a piece of memory.
/// If NULL is passed as an argument, an exception will be thrown.
/// \returns a pair containing both the buffer and the offset.
static std::pair<buffer_t, size_t> get_buffer_and_offset(const void *ptr) {
if (ptr) {
auto alloc = detail::mem_mgr::instance().translate_ptr(ptr);
size_t offset = (byte_t *)ptr - alloc.alloc_ptr;
return std::make_pair(alloc.buffer, offset);
} else {
throw std::runtime_error(
"NULL pointer argument in get_buffer_and_offset function is invalid");
}
}
/// Get the data pointed from \p ptr as a 1D buffer reinterpreted as type T.
template <typename T> static sycl::buffer<T> get_buffer(const void *ptr) {
if (!ptr)
return sycl::buffer<T>(sycl::range<1>(0));
auto alloc = detail::mem_mgr::instance().translate_ptr(ptr);
return alloc.buffer.reinterpret<T>(
sycl::range<1>(alloc.size / sizeof(T)));
}
/// Get the buffer of a piece of memory pointed to by \p ptr.
///
/// \param ptr Pointer to a piece of memory.
/// \returns the buffer.
static buffer_t get_buffer(const void *ptr) {
return detail::mem_mgr::instance().translate_ptr(ptr).buffer;
}
/// A wrapper class contains an accessor and an offset.
template <typename dataT,
sycl::access_mode accessMode = sycl::access_mode::read_write>
class access_wrapper {
sycl::accessor<byte_t, 1, accessMode> accessor;
size_t offset;
public:
/// Construct the accessor wrapper for memory pointed by \p ptr.
///
/// \param ptr Pointer to memory.
/// \param cgh The command group handler.
access_wrapper(const void *ptr, sycl::handler &cgh)
: accessor(get_buffer(ptr).get_access<accessMode>(cgh)), offset(0) {
auto alloc = detail::mem_mgr::instance().translate_ptr(ptr);
offset = (byte_t *)ptr - alloc.alloc_ptr;
}
/// Get the device pointer.
///
/// \returns a device pointer with offset.
dataT get_raw_pointer() const { return (dataT)(&accessor[0] + offset); }
};
/// Get the accessor for memory pointed by \p ptr.
///
/// \param ptr Pointer to memory.
/// If NULL is passed as an argument, an exception will be thrown.
/// \param cgh The command group handler.
/// \returns an accessor.
template <sycl::access_mode accessMode = sycl::access_mode::read_write>
static sycl::accessor<byte_t, 1, accessMode>
get_access(const void *ptr, sycl::handler &cgh) {
if (ptr) {
auto alloc = detail::mem_mgr::instance().translate_ptr(ptr);
return alloc.buffer.get_access<accessMode>(cgh);
} else {
throw std::runtime_error(
"NULL pointer argument in get_access function is invalid");
}
}
/// Allocate memory block on the device.
/// \param num_bytes Number of bytes to allocate.
/// \param q Queue to execute the allocate task.
/// \returns A pointer to the newly allocated memory.
template <typename T>
static inline void *dpct_malloc(T num_bytes,
sycl::queue &q = get_default_queue()) {
return detail::dpct_malloc(static_cast<size_t>(num_bytes), q);
}
/// Get the host pointer from a buffer that is mapped to virtual pointer ptr.
/// \param ptr Virtual Pointer mapped to device buffer
/// \returns A host pointer
template <typename T> static inline T *get_host_ptr(const void *ptr) {
auto BufferOffset = get_buffer_and_offset(ptr);
auto host_ptr =
BufferOffset.first.get_host_access()
.get_pointer();
return (T *)(host_ptr + BufferOffset.second);
}
/// Allocate memory block for 3D array on the device.
/// \param size Size of the memory block, in bytes.
/// \param q Queue to execute the allocate task.
/// \returns A pitched_data object which stores the memory info.
static inline pitched_data
dpct_malloc(sycl::range<3> size, sycl::queue &q = get_default_queue()) {
pitched_data pitch(nullptr, 0, size.get(0), size.get(1));
size_t pitch_size;
pitch.set_data_ptr(detail::dpct_malloc(pitch_size, size.get(0), size.get(1),
size.get(2), q));
pitch.set_pitch(pitch_size);
return pitch;
}
/// Allocate memory block for 2D array on the device.
/// \param [out] pitch Aligned size of x in bytes.
/// \param x Range in dim x.
/// \param y Range in dim y.
/// \param q Queue to execute the allocate task.
/// \returns A pointer to the newly allocated memory.
static inline void *dpct_malloc(size_t &pitch, size_t x, size_t y,
sycl::queue &q = get_default_queue()) {
return detail::dpct_malloc(pitch, x, y, 1, q);
}
/// free
/// \param ptr Point to free.
/// \param q Queue to execute the free task.
/// \returns no return value.
static inline void dpct_free(void *ptr,
sycl::queue &q = get_default_queue()) {
detail::dpct_free(ptr, q);
}
/// Free the device memory pointed by a batch of pointers in \p pointers which
/// are related to \p q after \p events completed.
///
/// \param pointers The pointers point to the device memory requested to be freed.
/// \param events The events to be waited.
/// \param q The sycl::queue the memory relates to.
inline void async_dpct_free(const std::vector<void *> &pointers,
const std::vector<sycl::event> &events,
sycl::queue &q = get_default_queue()) {
q.submit([&](sycl::handler &cgh) {
cgh.depends_on(events);
cgh.host_task([=] {
for (auto p : pointers)
if (p) {
detail::dpct_free(p, q);
}
});
});
}
/// Synchronously copies \p size bytes from the address specified by \p from_ptr
/// to the address specified by \p to_ptr. The value of \p direction is used to
/// set the copy direction, it can be \a host_to_host, \a host_to_device,
/// \a device_to_host, \a device_to_device or \a automatic. The function will
/// return after the copy is completed.
///
/// \param to_ptr Pointer to destination memory address.
/// \param from_ptr Pointer to source memory address.
/// \param size Number of bytes to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static void dpct_memcpy(void *to_ptr, const void *from_ptr, size_t size,
memcpy_direction direction = automatic,
sycl::queue &q = get_default_queue()) {
detail::dpct_memcpy(q, to_ptr, from_ptr, size, direction).wait();
}
/// Asynchronously copies \p size bytes from the address specified by \p
/// from_ptr to the address specified by \p to_ptr. The value of \p direction is
/// used to set the copy direction, it can be \a host_to_host, \a
/// host_to_device, \a device_to_host, \a device_to_device or \a automatic. The
/// return of the function does NOT guarantee the copy is completed.
///
/// \param to_ptr Pointer to destination memory address.
/// \param from_ptr Pointer to source memory address.
/// \param size Number of bytes to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static void async_dpct_memcpy(void *to_ptr, const void *from_ptr, size_t size,
memcpy_direction direction = automatic,
sycl::queue &q = dpct::get_default_queue()) {
detail::dpct_memcpy(q, to_ptr, from_ptr, size, direction);
}
/// Synchronously copies 2D matrix specified by \p x and \p y from the address
/// specified by \p from_ptr to the address specified by \p to_ptr, while \p
/// from_pitch and \p to_pitch are the range of dim x in bytes of the matrix
/// specified by \p from_ptr and \p to_ptr. The value of \p direction is used to
/// set the copy direction, it can be \a host_to_host, \a host_to_device, \a
/// device_to_host, \a device_to_device or \a automatic. The function will
/// return after the copy is completed.
///
/// \param to_ptr Pointer to destination memory address.
/// \param to_pitch Range of dim x in bytes of destination matrix.
/// \param from_ptr Pointer to source memory address.
/// \param from_pitch Range of dim x in bytes of source matrix.
/// \param x Range of dim x of matrix to be copied.
/// \param y Range of dim y of matrix to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static inline void dpct_memcpy(void *to_ptr, size_t to_pitch,
const void *from_ptr, size_t from_pitch,
size_t x, size_t y,
memcpy_direction direction = automatic,
sycl::queue &q = dpct::get_default_queue()) {
sycl::event::wait(detail::dpct_memcpy(q, to_ptr, from_ptr, to_pitch,
from_pitch, x, y, direction));
}
/// Asynchronously copies 2D matrix specified by \p x and \p y from the address
/// specified by \p from_ptr to the address specified by \p to_ptr, while \p
/// \p from_pitch and \p to_pitch are the range of dim x in bytes of the matrix
/// specified by \p from_ptr and \p to_ptr. The value of \p direction is used to
/// set the copy direction, it can be \a host_to_host, \a host_to_device, \a
/// device_to_host, \a device_to_device or \a automatic. The return of the
/// function does NOT guarantee the copy is completed.
///
/// \param to_ptr Pointer to destination memory address.
/// \param to_pitch Range of dim x in bytes of destination matrix.
/// \param from_ptr Pointer to source memory address.
/// \param from_pitch Range of dim x in bytes of source matrix.
/// \param x Range of dim x of matrix to be copied.
/// \param y Range of dim y of matrix to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static inline void
async_dpct_memcpy(void *to_ptr, size_t to_pitch, const void *from_ptr,
size_t from_pitch, size_t x, size_t y,
memcpy_direction direction = automatic,
sycl::queue &q = get_default_queue()) {
detail::dpct_memcpy(q, to_ptr, from_ptr, to_pitch, from_pitch, x, y,
direction);
}
/// Synchronously copies a subset of a 3D matrix specified by \p to to another
/// 3D matrix specified by \p from. The from and to position info are specified
/// by \p from_pos and \p to_pos The copied matrix size is specified by \p size.
/// The value of \p direction is used to set the copy direction, it can be \a
/// host_to_host, \a host_to_device, \a device_to_host, \a device_to_device or
/// \a automatic. The function will return after the copy is completed.
///
/// \param to Destination matrix info.
/// \param to_pos Position of destination.
/// \param from Source matrix info.
/// \param from_pos Position of destination.
/// \param size Range of the submatrix to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static inline void dpct_memcpy(pitched_data to, sycl::id<3> to_pos,
pitched_data from, sycl::id<3> from_pos,
sycl::range<3> size,
memcpy_direction direction = automatic,
sycl::queue &q = dpct::get_default_queue()) {
sycl::event::wait(
detail::dpct_memcpy(q, to, to_pos, from, from_pos, size, direction));
}
/// Asynchronously copies a subset of a 3D matrix specified by \p to to another
/// 3D matrix specified by \p from. The from and to position info are specified
/// by \p from_pos and \p to_pos The copied matrix size is specified by \p size.
/// The value of \p direction is used to set the copy direction, it can be \a
/// host_to_host, \a host_to_device, \a device_to_host, \a device_to_device or
/// \a automatic. The return of the function does NOT guarantee the copy is
/// completed.
///
/// \param to Destination matrix info.
/// \param to_pos Position of destination.
/// \param from Source matrix info.
/// \param from_pos Position of destination.
/// \param size Range of the submatrix to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static inline void
async_dpct_memcpy(pitched_data to, sycl::id<3> to_pos, pitched_data from,
sycl::id<3> from_pos, sycl::range<3> size,
memcpy_direction direction = automatic,
sycl::queue &q = get_default_queue()) {
detail::dpct_memcpy(q, to, to_pos, from, from_pos, size, direction);
}
/// Synchronously sets \p value to the first \p size bytes starting from \p
/// dev_ptr. The function will return after the memset operation is completed.
///
/// \param dev_ptr Pointer to the device memory address.
/// \param value Value to be set.
/// \param size Number of bytes to be set to the value.
/// \param q The queue in which the operation is done.
/// \returns no return value.
static void dpct_memset(void *dev_ptr, int value, size_t size,
sycl::queue &q = get_default_queue()) {
detail::dpct_memset(q, dev_ptr, value, size).wait();
}
/// Asynchronously sets \p value to the first \p size bytes starting from \p
/// dev_ptr. The return of the function does NOT guarantee the memset operation
/// is completed.
///
/// \param dev_ptr Pointer to the device memory address.
/// \param value Value to be set.
/// \param size Number of bytes to be set to the value.
/// \returns no return value.
static void async_dpct_memset(void *dev_ptr, int value, size_t size,
sycl::queue &q = dpct::get_default_queue()) {
detail::dpct_memset(q, dev_ptr, value, size);
}
/// Sets \p value to the 2D memory region pointed by \p ptr in \p q. \p x and
/// \p y specify the setted 2D memory size. \p pitch is the bytes in linear
/// dimension, including padding bytes. The function will return after the
/// memset operation is completed.
///
/// \param ptr Pointer to the device memory region.
/// \param pitch Bytes in linear dimension, including padding bytes.
/// \param value Value to be set.
/// \param x The setted memory size in linear dimension.
/// \param y The setted memory size in second dimension.
/// \param q The queue in which the operation is done.
/// \returns no return value.
static inline void dpct_memset(void *ptr, size_t pitch, int val, size_t x,
size_t y,
sycl::queue &q = get_default_queue()) {
sycl::event::wait(detail::dpct_memset(q, ptr, pitch, val, x, y));
}
/// Sets \p value to the 2D memory region pointed by \p ptr in \p q. \p x and
/// \p y specify the setted 2D memory size. \p pitch is the bytes in linear
/// dimension, including padding bytes. The return of the function does NOT
/// guarantee the memset operation is completed.
///
/// \param ptr Pointer to the device memory region.
/// \param pitch Bytes in linear dimension, including padding bytes.
/// \param value Value to be set.
/// \param x The setted memory size in linear dimension.
/// \param y The setted memory size in second dimension.
/// \param q The queue in which the operation is done.
/// \returns no return value.
static inline void async_dpct_memset(void *ptr, size_t pitch, int val, size_t x,
size_t y,
sycl::queue &q = get_default_queue()) {
detail::dpct_memset(q, ptr, pitch, val, x, y);
}
/// Sets \p value to the 3D memory region specified by \p pitch in \p q. \p size
/// specify the setted 3D memory size. The function will return after the
/// memset operation is completed.
///
/// \param pitch Specify the 3D memory region.
/// \param value Value to be set.
/// \param size The setted 3D memory size.
/// \param q The queue in which the operation is done.
/// \returns no return value.
static inline void dpct_memset(pitched_data pitch, int val,
sycl::range<3> size,
sycl::queue &q = get_default_queue()) {
sycl::event::wait(detail::dpct_memset(q, pitch, val, size));
}
/// Sets \p value to the 3D memory region specified by \p pitch in \p q. \p size
/// specify the setted 3D memory size. The return of the function does NOT
/// guarantee the memset operation is completed.
///
/// \param pitch Specify the 3D memory region.
/// \param value Value to be set.
/// \param size The setted 3D memory size.
/// \param q The queue in which the operation is done.
/// \returns no return value.
static inline void async_dpct_memset(pitched_data pitch, int val,
sycl::range<3> size,
sycl::queue &q = get_default_queue()) {
detail::dpct_memset(q, pitch, val, size);
}
/// dpct accessor used as device function parameter.
template <class T, memory_region Memory, size_t Dimension> class accessor;
template <class T, memory_region Memory> class accessor<T, Memory, 3> {
public:
using memory_t = detail::memory_traits<Memory, T>;
using element_t = typename memory_t::element_t;
using pointer_t = typename memory_t::pointer_t;
using accessor_t = typename memory_t::template accessor_t<3>;
accessor(pointer_t data, const sycl::range<3> &in_range)
: _data(data), _range(in_range) {}
template <memory_region M = Memory>
accessor(typename std::enable_if<M != local, const accessor_t>::type &acc)
: accessor(acc, acc.get_range()) {}
accessor(const accessor_t &acc, const sycl::range<3> &in_range)
: accessor(acc.get_pointer(), in_range) {}
accessor<T, Memory, 2> operator[](size_t index) const {
sycl::range<2> sub(_range.get(1), _range.get(2));
return accessor<T, Memory, 2>(_data + index * sub.size(), sub);
}
pointer_t get_ptr() const { return _data; }
private:
pointer_t _data;
sycl::range<3> _range;
};
template <class T, memory_region Memory> class accessor<T, Memory, 2> {
public:
using memory_t = detail::memory_traits<Memory, T>;
using element_t = typename memory_t::element_t;
using pointer_t = typename memory_t::pointer_t;
using accessor_t = typename memory_t::template accessor_t<2>;
accessor(pointer_t data, const sycl::range<2> &in_range)
: _data(data), _range(in_range) {}
template <memory_region M = Memory>
accessor(typename std::enable_if<M != local, const accessor_t>::type &acc)
: accessor(acc, acc.get_range()) {}
accessor(const accessor_t &acc, const sycl::range<2> &in_range)
: accessor(acc.get_pointer(), in_range) {}
pointer_t operator[](size_t index) const {
return _data + _range.get(1) * index;
}
pointer_t get_ptr() const { return _data; }
private:
pointer_t _data;
sycl::range<2> _range;
};
namespace detail {
/// Device variable with address space of shared, global or constant.
template <class T, memory_region Memory, size_t Dimension>
class device_memory {
public:
using accessor_t =
typename detail::memory_traits<Memory, T>::template accessor_t<Dimension>;
using value_t = typename detail::memory_traits<Memory, T>::value_t;
using dpct_accessor_t = dpct::accessor<T, Memory, Dimension>;
device_memory() : device_memory(sycl::range<Dimension>(1)) {}
/// Constructor of 1-D array with initializer list
device_memory(
const sycl::range<Dimension> &in_range,
std::initializer_list<value_t> &&init_list)
: device_memory(in_range) {
assert(init_list.size() <= in_range.size());
_host_ptr = (value_t *)std::malloc(_size);
std::memset(_host_ptr, 0, _size);
std::memcpy(_host_ptr, init_list.begin(), init_list.size() * sizeof(T));
}
/// Constructor of 2-D array with initializer list
template <size_t D = Dimension>
device_memory(
const typename std::enable_if<D == 2, sycl::range<2>>::type &in_range,
std::initializer_list<std::initializer_list<value_t>> &&init_list)
: device_memory(in_range) {
assert(init_list.size() <= in_range[0]);
_host_ptr = (value_t *)std::malloc(_size);
std::memset(_host_ptr, 0, _size);
auto tmp_data = _host_ptr;
for (auto sub_list : init_list) {
assert(sub_list.size() <= in_range[1]);
std::memcpy(tmp_data, sub_list.begin(), sub_list.size() * sizeof(T));
tmp_data += in_range[1];
}
}
/// Constructor with range
device_memory(const sycl::range<Dimension> &range_in)
: _size(range_in.size() * sizeof(T)), _range(range_in), _reference(false),
_host_ptr(nullptr), _device_ptr(nullptr) {
static_assert(
(Memory == global) || (Memory == constant) || (Memory == shared),
"device memory region should be global, constant or shared");
// Make sure that singleton class mem_mgr and dev_mgr will destruct later
// than this.
detail::mem_mgr::instance();
dev_mgr::instance();
}
/// Constructor with range
template <class... Args>
device_memory(Args... Arguments)
: device_memory(sycl::range<Dimension>(Arguments...)) {}
~device_memory() {
if (_device_ptr && !_reference)
dpct::dpct_free(_device_ptr);
if (_host_ptr)
std::free(_host_ptr);
}
/// Allocate memory with default queue, and init memory if has initial value.
void init() {
init(dpct::get_default_queue());
}
/// Allocate memory with specified queue, and init memory if has initial value.
void init(sycl::queue &q) {
if (_device_ptr)
return;
if (!_size)
return;
allocate_device(q);
if (_host_ptr)
detail::dpct_memcpy(q, _device_ptr, _host_ptr, _size, host_to_device);
}
/// The variable is assigned to a device pointer.
void assign(value_t *src, size_t size) {
this->~device_memory();
new (this) device_memory(src, size);
}
/// Get memory pointer of the memory object, which is virtual pointer when
/// usm is not used, and device pointer when usm is used.
value_t *get_ptr() {
return get_ptr(get_default_queue());
}
/// Get memory pointer of the memory object, which is virtual pointer when
/// usm is not used, and device pointer when usm is used.
value_t *get_ptr(sycl::queue &q) {
init(q);
return _device_ptr;
}
/// Get the device memory object size in bytes.
size_t get_size() { return _size; }
template <size_t D = Dimension>
typename std::enable_if<D == 1, T>::type &operator[](size_t index) {
init();
#ifdef DPCT_USM_LEVEL_NONE
return dpct::get_buffer<typename std::enable_if<D == 1, T>::type>(
_device_ptr)
.template get_access<sycl::access_mode::read_write>()[index];
#else
return _device_ptr[index];
#endif // DPCT_USM_LEVEL_NONE
}
#ifdef DPCT_USM_LEVEL_NONE
/// Get sycl::accessor for the device memory object when usm is not used.
accessor_t get_access(sycl::handler &cgh) {
return get_buffer(_device_ptr)
.template reinterpret<T, Dimension>(_range)
.template get_access<detail::memory_traits<Memory, T>::mode,
detail::memory_traits<Memory, T>::target>(cgh);
}
#else
/// Get dpct::accessor with dimension info for the device memory object
/// when usm is used and dimension is greater than 1.
template <size_t D = Dimension>
typename std::enable_if<D != 1, dpct_accessor_t>::type
get_access(sycl::handler &cgh) {
return dpct_accessor_t((T *)_device_ptr, _range);
}
#endif // DPCT_USM_LEVEL_NONE
private:
device_memory(value_t *memory_ptr, size_t size)
: _size(size), _range(size / sizeof(T)), _reference(true),
_device_ptr(memory_ptr) {}
void allocate_device(sycl::queue &q) {
#ifndef DPCT_USM_LEVEL_NONE
if (Memory == shared) {
_device_ptr = (value_t *)sycl::malloc_shared(
_size, q.get_device(), q.get_context());
return;
}
#endif
_device_ptr = (value_t *)detail::dpct_malloc(_size, q);
}
size_t _size;
sycl::range<Dimension> _range;
bool _reference;
value_t *_host_ptr;
value_t *_device_ptr;
};
template <class T, memory_region Memory>
class device_memory<T, Memory, 0> : public device_memory<T, Memory, 1> {
public:
using base = device_memory<T, Memory, 1>;
using value_t = typename base::value_t;
using accessor_t =
typename detail::memory_traits<Memory, T>::template accessor_t<0>;
/// Constructor with initial value.
device_memory(const value_t &val) : base(sycl::range<1>(1), {val}) {}
/// Default constructor
device_memory() : base(1) {}
#ifdef DPCT_USM_LEVEL_NONE
/// Get sycl::accessor for the device memory object when usm is not used.
accessor_t get_access(sycl::handler &cgh) {
auto buf = get_buffer(base::get_ptr())
.template reinterpret<T, 1>(sycl::range<1>(1));
return accessor_t(buf, cgh);
}
#endif // DPCT_USM_LEVEL_NONE
};
}
template <class T, size_t Dimension>
using global_memory = detail::device_memory<T, global, Dimension>;
template <class T, size_t Dimension>
using constant_memory = detail::device_memory<T, constant, Dimension>;
template <class T, size_t Dimension>
using shared_memory = detail::device_memory<T, shared, Dimension>;
// dpct::deprecated:: is for functionality that was introduced for compatibility
// purpose, but relies on deprecated C++ features, which are either removed or
// will be removed in the future standards.
// Direct use of deprecated functionality in this namespace should be avoided.
namespace deprecated {
template <typename T>
using usm_host_allocator = detail::deprecated::usm_allocator<T, sycl::usm::alloc::host>;
template <typename T>
using usm_device_allocator = detail::deprecated::usm_allocator<T, sycl::usm::alloc::shared>;
} // namespace deprecated
class pointer_attributes {
public:
void init(const void *ptr,
sycl::queue &q = dpct::get_default_queue()) {
#ifdef DPCT_USM_LEVEL_NONE
throw std::runtime_error(
"dpct::pointer_attributes: only works for USM pointer.");
#else
memory_type = sycl::get_pointer_type(ptr, q.get_context());
device_pointer = (memory_type !=
sycl::usm::alloc::unknown) ? ptr : nullptr;
host_pointer = (memory_type !=
sycl::usm::alloc::unknown) &&
(memory_type != sycl::usm::alloc::device) ? ptr : nullptr;
sycl::device device_obj = sycl::get_pointer_device(ptr, q.get_context());
device_id = dpct::dev_mgr::instance().get_device_id(device_obj);
#endif
}
sycl::usm::alloc get_memory_type() {
return memory_type;
}
const void *get_device_pointer() {
return device_pointer;
}
const void *get_host_pointer() {
return host_pointer;
}
bool is_memory_shared() {
return memory_type == sycl::usm::alloc::shared;
}
unsigned int get_device_id() {
return device_id;
}
private:
sycl::usm::alloc memory_type = sycl::usm::alloc::unknown;
const void *device_pointer = nullptr;
const void *host_pointer = nullptr;
unsigned int device_id = 0;
};
} // namespace dpct
#endif // __DPCT_MEMORY_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/guided_convolutionSeparable_SYCLmigration/01_dpct_output/include/dpct/dpl_utils.hpp | //==---- dpl_utils.hpp ----------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_DPL_UTILS_HPP__
#define __DPCT_DPL_UTILS_HPP__
#define ONEDPL_USE_DPCPP_BACKEND 1
#define __USE_DPCT 1
#include <oneapi/dpl/execution>
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/numeric>
#include "dpl_extras/memory.h"
#include "dpl_extras/algorithm.h"
#include "dpl_extras/numeric.h"
#include "dpl_extras/iterators.h"
#include "dpl_extras/vector.h"
#include "dpl_extras/dpcpp_extensions.h"
#endif // __DPCT_DPL_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/guided_convolutionSeparable_SYCLmigration/01_dpct_output/include/dpct/math.hpp | //==---- math.hpp ---------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_MATH_HPP__
#define __DPCT_MATH_HPP__
#include <sycl/sycl.hpp>
namespace dpct {
namespace detail {
template <typename VecT, class BinaryOperation, class = void>
class vectorized_binary {
public:
inline VecT operator()(VecT a, VecT b, const BinaryOperation binary_op) {
VecT v4;
for (size_t i = 0; i < v4.size(); ++i) {
v4[i] = binary_op(a[i], b[i]);
}
return v4;
}
};
template <typename VecT, class BinaryOperation>
class vectorized_binary<
VecT, BinaryOperation,
std::void_t<std::invoke_result_t<BinaryOperation, VecT, VecT>>> {
public:
inline VecT operator()(VecT a, VecT b, const BinaryOperation binary_op) {
return binary_op(a, b).template as<VecT>();
}
};
template <typename T> bool isnan(const T a) { return sycl::isnan(a); }
// TODO: Need add more specialization such as bfloat16 version.
} // namespace detail
/// Compute fast_length for variable-length array
/// \param [in] a The array
/// \param [in] len Length of the array
/// \returns The computed fast_length
inline float fast_length(const float *a, int len) {
switch (len) {
case 1:
return a[0];
case 2:
return sycl::fast_length(sycl::float2(a[0], a[1]));
case 3:
return sycl::fast_length(sycl::float3(a[0], a[1], a[2]));
case 4:
return sycl::fast_length(sycl::float4(a[0], a[1], a[2], a[3]));
case 0:
return 0;
default:
float f = 0;
for (int i = 0; i < len; ++i)
f += a[i] * a[i];
return sycl::sqrt(f);
}
}
/// Calculate the square root of the input array.
/// \param [in] a The array pointer
/// \param [in] len Length of the array
/// \returns The square root
template <typename T> inline T length(const T *a, const int len) {
switch (len) {
case 1:
return a[0];
case 2:
return sycl::length(sycl::vec<T, 2>(a[0], a[1]));
case 3:
return sycl::length(sycl::vec<T, 3>(a[0], a[1], a[2]));
case 4:
return sycl::length(sycl::vec<T, 4>(a[0], a[1], a[2], a[3]));
default:
T ret = 0;
for (int i = 0; i < len; ++i)
ret += a[i] * a[i];
return sycl::sqrt(ret);
}
}
/// Performs comparison.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline std::enable_if_t<
std::is_same_v<std::invoke_result_t<BinaryOperation, T, T>, bool>, bool>
compare(const T a, const T b, const BinaryOperation binary_op) {
return binary_op(a, b);
}
template <typename T>
inline std::enable_if_t<
std::is_same_v<std::invoke_result_t<std::not_equal_to<>, T, T>, bool>, bool>
compare(const T a, const T b, const std::not_equal_to<> binary_op) {
return !detail::isnan(a) && !detail::isnan(b) && binary_op(a, b);
}
/// Performs unordered comparison.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline std::enable_if_t<
std::is_same_v<std::invoke_result_t<BinaryOperation, T, T>, bool>, bool>
unordered_compare(const T a, const T b, const BinaryOperation binary_op) {
return detail::isnan(a) || detail::isnan(b) || binary_op(a, b);
}
/// Performs 2 element comparison and return true if both results are true.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline std::enable_if_t<T::size() == 2, bool>
compare_both(const T a, const T b, const BinaryOperation binary_op) {
return compare(a[0], b[0], binary_op) && compare(a[1], b[1], binary_op);
}
/// Performs 2 element unordered comparison and return true if both results are
/// true.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline std::enable_if_t<T::size() == 2, bool>
unordered_compare_both(const T a, const T b, const BinaryOperation binary_op) {
return unordered_compare(a[0], b[0], binary_op) &&
unordered_compare(a[1], b[1], binary_op);
}
/// Performs 2 element comparison.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline std::enable_if_t<T::size() == 2, T>
compare(const T a, const T b, const BinaryOperation binary_op) {
return {compare(a[0], b[0], binary_op), compare(a[1], b[1], binary_op)};
}
/// Performs 2 elements comparison, compare result of each element is 0 (false)
/// or 0xffff (true), returns an unsigned int by composing compare result of two
/// elements.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline unsigned compare_mask(const sycl::vec<T, 2> a, const sycl::vec<T, 2> b,
const BinaryOperation binary_op) {
return sycl::vec<short, 2>(-compare(a[0], b[0], binary_op),
-compare(a[1], b[1], binary_op))
.as<sycl::vec<unsigned, 1>>();
}
/// Performs 2 element unordered comparison.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline std::enable_if_t<T::size() == 2, T>
unordered_compare(const T a, const T b, const BinaryOperation binary_op) {
return {unordered_compare(a[0], b[0], binary_op),
unordered_compare(a[1], b[1], binary_op)};
}
/// Performs 2 elements unordered comparison, compare result of each element is
/// 0 (false) or 0xffff (true), returns an unsigned int by composing compare
/// result of two elements.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline unsigned unordered_compare_mask(const sycl::vec<T, 2> a,
const sycl::vec<T, 2> b,
const BinaryOperation binary_op) {
return sycl::vec<short, 2>(-unordered_compare(a[0], b[0], binary_op),
-unordered_compare(a[1], b[1], binary_op))
.as<sycl::vec<unsigned, 1>>();
}
/// Determine whether 2 element value is NaN.
/// \param [in] a The input value
/// \returns the comparison result
template <typename T>
inline std::enable_if_t<T::size() == 2, T> isnan(const T a) {
return {detail::isnan(a[0]), detail::isnan(a[1])};
}
// min function overloads.
// For floating-point types, `float` or `double` arguments are acceptable.
// For integer types, `std::uint32_t`, `std::int32_t`, `std::uint64_t` or
// `std::int64_t` type arguments are acceptable.
inline double min(const double a, const float b) {
return sycl::fmin(a, static_cast<double>(b));
}
inline double min(const float a, const double b) {
return sycl::fmin(static_cast<double>(a), b);
}
inline float min(const float a, const float b) { return sycl::fmin(a, b); }
inline double min(const double a, const double b) { return sycl::fmin(a, b); }
inline std::uint32_t min(const std::uint32_t a, const std::int32_t b) {
return sycl::min(a, static_cast<std::uint32_t>(b));
}
inline std::uint32_t min(const std::int32_t a, const std::uint32_t b) {
return sycl::min(static_cast<std::uint32_t>(a), b);
}
inline std::int32_t min(const std::int32_t a, const std::int32_t b) {
return sycl::min(a, b);
}
inline std::uint32_t min(const std::uint32_t a, const std::uint32_t b) {
return sycl::min(a, b);
}
inline std::uint64_t min(const std::uint64_t a, const std::int64_t b) {
return sycl::min(a, static_cast<std::uint64_t>(b));
}
inline std::uint64_t min(const std::int64_t a, const std::uint64_t b) {
return sycl::min(static_cast<std::uint64_t>(a), b);
}
inline std::int64_t min(const std::int64_t a, const std::int64_t b) {
return sycl::min(a, b);
}
inline std::uint64_t min(const std::uint64_t a, const std::uint64_t b) {
return sycl::min(a, b);
}
inline std::uint64_t min(const std::uint64_t a, const std::int32_t b) {
return sycl::min(a, static_cast<std::uint64_t>(b));
}
inline std::uint64_t min(const std::int32_t a, const std::uint64_t b) {
return sycl::min(static_cast<std::uint64_t>(a), b);
}
inline std::uint64_t min(const std::uint64_t a, const std::uint32_t b) {
return sycl::min(a, static_cast<std::uint64_t>(b));
}
inline std::uint64_t min(const std::uint32_t a, const std::uint64_t b) {
return sycl::min(static_cast<std::uint64_t>(a), b);
}
// max function overloads.
// For floating-point types, `float` or `double` arguments are acceptable.
// For integer types, `std::uint32_t`, `std::int32_t`, `std::uint64_t` or
// `std::int64_t` type arguments are acceptable.
inline double max(const double a, const float b) {
return sycl::fmax(a, static_cast<double>(b));
}
inline double max(const float a, const double b) {
return sycl::fmax(static_cast<double>(a), b);
}
inline float max(const float a, const float b) { return sycl::fmax(a, b); }
inline double max(const double a, const double b) { return sycl::fmax(a, b); }
inline std::uint32_t max(const std::uint32_t a, const std::int32_t b) {
return sycl::max(a, static_cast<std::uint32_t>(b));
}
inline std::uint32_t max(const std::int32_t a, const std::uint32_t b) {
return sycl::max(static_cast<std::uint32_t>(a), b);
}
inline std::int32_t max(const std::int32_t a, const std::int32_t b) {
return sycl::max(a, b);
}
inline std::uint32_t max(const std::uint32_t a, const std::uint32_t b) {
return sycl::max(a, b);
}
inline std::uint64_t max(const std::uint64_t a, const std::int64_t b) {
return sycl::max(a, static_cast<std::uint64_t>(b));
}
inline std::uint64_t max(const std::int64_t a, const std::uint64_t b) {
return sycl::max(static_cast<std::uint64_t>(a), b);
}
inline std::int64_t max(const std::int64_t a, const std::int64_t b) {
return sycl::max(a, b);
}
inline std::uint64_t max(const std::uint64_t a, const std::uint64_t b) {
return sycl::max(a, b);
}
inline std::uint64_t max(const std::uint64_t a, const std::int32_t b) {
return sycl::max(a, static_cast<std::uint64_t>(b));
}
inline std::uint64_t max(const std::int32_t a, const std::uint64_t b) {
return sycl::max(static_cast<std::uint64_t>(a), b);
}
inline std::uint64_t max(const std::uint64_t a, const std::uint32_t b) {
return sycl::max(a, static_cast<std::uint64_t>(b));
}
inline std::uint64_t max(const std::uint32_t a, const std::uint64_t b) {
return sycl::max(static_cast<std::uint64_t>(a), b);
}
/// Performs relu saturation.
/// \param [in] a The input value
/// \returns the relu saturation result
template <typename T> inline T relu(const T a) {
if (!detail::isnan(a) && a < 0.f)
return 0.f;
return a;
}
template <class T> inline sycl::vec<T, 2> relu(const sycl::vec<T, 2> a) {
return {relu(a[0]), relu(a[1])};
}
/// Performs complex number multiply addition.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] c The third value
/// \returns the operation result
template <typename T>
inline sycl::vec<T, 2> complex_mul_add(const sycl::vec<T, 2> a,
const sycl::vec<T, 2> b,
const sycl::vec<T, 2> c) {
return sycl::vec<T, 2>{a[0] * b[0] - a[1] * b[1] + c[0],
a[0] * b[1] + a[1] * b[0] + c[1]};
}
/// Performs 2 elements comparison and returns the bigger one. If either of
/// inputs is NaN, then return NaN.
/// \param [in] a The first value
/// \param [in] b The second value
/// \returns the bigger value
template <typename T> inline T fmax_nan(const T a, const T b) {
if (detail::isnan(a) || detail::isnan(b))
return NAN;
return sycl::fmax(a, b);
}
template <typename T>
inline sycl::vec<T, 2> fmax_nan(const sycl::vec<T, 2> a,
const sycl::vec<T, 2> b) {
return {fmax_nan(a[0], b[0]), fmax_nan(a[1], b[1])};
}
/// Performs 2 elements comparison and returns the smaller one. If either of
/// inputs is NaN, then return NaN.
/// \param [in] a The first value
/// \param [in] b The second value
/// \returns the smaller value
template <typename T> inline T fmin_nan(const T a, const T b) {
if (detail::isnan(a) || detail::isnan(b))
return NAN;
return sycl::fmin(a, b);
}
template <typename T>
inline sycl::vec<T, 2> fmin_nan(const sycl::vec<T, 2> a,
const sycl::vec<T, 2> b) {
return {fmin_nan(a[0], b[0]), fmin_nan(a[1], b[1])};
}
/// A sycl::abs wrapper functors.
struct abs {
template <typename T> auto operator()(const T x) const {
return sycl::abs(x);
}
};
/// A sycl::abs_diff wrapper functors.
struct abs_diff {
template <typename T> auto operator()(const T x, const T y) const {
return sycl::abs_diff(x, y);
}
};
/// A sycl::add_sat wrapper functors.
struct add_sat {
template <typename T> auto operator()(const T x, const T y) const {
return sycl::add_sat(x, y);
}
};
/// A sycl::rhadd wrapper functors.
struct rhadd {
template <typename T> auto operator()(const T x, const T y) const {
return sycl::rhadd(x, y);
}
};
/// A sycl::hadd wrapper functors.
struct hadd {
template <typename T> auto operator()(const T x, const T y) const {
return sycl::hadd(x, y);
}
};
/// A sycl::max wrapper functors.
struct maximum {
template <typename T> auto operator()(const T x, const T y) const {
return sycl::max(x, y);
}
};
/// A sycl::min wrapper functors.
struct minimum {
template <typename T> auto operator()(const T x, const T y) const {
return sycl::min(x, y);
}
};
/// A sycl::sub_sat wrapper functors.
struct sub_sat {
template <typename T> auto operator()(const T x, const T y) const {
return sycl::sub_sat(x, y);
}
};
/// Compute vectorized binary operation value for two values, with each value
/// treated as a vector type \p VecT.
/// \tparam [in] VecT The type of the vector
/// \tparam [in] BinaryOperation The binary operation class
/// \param [in] a The first value
/// \param [in] b The second value
/// \returns The vectorized binary operation value of the two values
template <typename VecT, class BinaryOperation>
inline unsigned vectorized_binary(unsigned a, unsigned b,
const BinaryOperation binary_op) {
sycl::vec<unsigned, 1> v0{a}, v1{b};
auto v2 = v0.as<VecT>();
auto v3 = v1.as<VecT>();
auto v4 =
detail::vectorized_binary<VecT, BinaryOperation>()(v2, v3, binary_op);
v0 = v4.template as<sycl::vec<unsigned, 1>>();
return v0;
}
/// Compute vectorized isgreater for two values, with each value treated as a
/// vector type \p S.
/// \tparam [in] S The type of the vector
/// \tparam [in] T The type of the original values
/// \param [in] a The first value
/// \param [in] b The second value
/// \returns The vectorized greater than of the two values
template <typename S, typename T> inline T vectorized_isgreater(T a, T b) {
sycl::vec<T, 1> v0{a}, v1{b};
auto v2 = v0.template as<S>();
auto v3 = v1.template as<S>();
auto v4 = v2 > v3;
v0 = v4.template as<sycl::vec<T, 1>>();
return v0;
}
/// Compute vectorized max for two values, with each value treated as a vector
/// type \p S.
/// \tparam [in] S The type of the vector
/// \tparam [in] T The type of the original values
/// \param [in] a The first value
/// \param [in] b The second value
/// \returns The vectorized max of the two values
template <typename S, typename T> inline T vectorized_max(T a, T b) {
sycl::vec<T, 1> v0{a}, v1{b};
auto v2 = v0.template as<S>();
auto v3 = v1.template as<S>();
auto v4 = sycl::max(v2, v3);
v0 = v4.template as<sycl::vec<T, 1>>();
return v0;
}
/// Compute vectorized min for two values, with each value treated as a vector
/// type \p S.
/// \tparam [in] S The type of the vector
/// \tparam [in] T The type of the original values
/// \param [in] a The first value
/// \param [in] b The second value
/// \returns The vectorized min of the two values
template <typename S, typename T> inline T vectorized_min(T a, T b) {
sycl::vec<T, 1> v0{a}, v1{b};
auto v2 = v0.template as<S>();
auto v3 = v1.template as<S>();
auto v4 = sycl::min(v2, v3);
v0 = v4.template as<sycl::vec<T, 1>>();
return v0;
}
/// Compute vectorized unary operation for a value, with the value treated as a
/// vector type \p VecT.
/// \tparam [in] VecT The type of the vector
/// \tparam [in] UnaryOperation The unary operation class
/// \param [in] a The input value
/// \returns The vectorized unary operation value of the input value
template <typename VecT, class UnaryOperation>
inline unsigned vectorized_unary(unsigned a, const UnaryOperation unary_op) {
sycl::vec<unsigned, 1> v0{a};
auto v1 = v0.as<VecT>();
auto v2 = unary_op(v1);
v0 = v2.template as<sycl::vec<unsigned, 1>>();
return v0;
}
/// Compute vectorized absolute difference for two values without modulo
/// overflow, with each value treated as a vector type \p VecT.
/// \tparam [in] VecT The type of the vector
/// \param [in] a The first value
/// \param [in] b The second value
/// \returns The vectorized absolute difference of the two values
template <typename VecT>
inline unsigned vectorized_sum_abs_diff(unsigned a, unsigned b) {
sycl::vec<unsigned, 1> v0{a}, v1{b};
auto v2 = v0.as<VecT>();
auto v3 = v1.as<VecT>();
auto v4 = sycl::abs_diff(v2, v3);
unsigned sum = 0;
for (size_t i = 0; i < v4.size(); ++i) {
sum += v4[i];
}
return sum;
}
} // namespace dpct
#endif // __DPCT_MATH_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/guided_convolutionSeparable_SYCLmigration/01_dpct_output/include/dpct/blas_utils.hpp | //==---- blas_utils.hpp----------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_BLAS_UTILS_HPP__
#define __DPCT_BLAS_UTILS_HPP__
#include "memory.hpp"
#include "util.hpp"
#include "lib_common_utils.hpp"
#include <sycl/sycl.hpp>
#include <oneapi/mkl.hpp>
#include <utility>
#include <vector>
#include <thread>
namespace dpct {
/// Get the value of \p s.
/// Copy the data to host synchronously, then return the data.
/// \param [in] p The pointer points the data.
/// \param [in] q The queue where the memory copy should be executed.
template <typename T>
inline auto get_value(const T *s, sycl::queue &q) {
return detail::get_value(s, q);
}
namespace detail {
inline void mem_free(sycl::queue *exec_queue,
std::vector<void *> pointers_array, sycl::event e) {
e.wait();
for (auto p : pointers_array)
sycl::free(p, *exec_queue);
}
inline int stride_for(int num_elems, int mem_align_in_elems) {
return ((num_elems - 1) / mem_align_in_elems + 1) * mem_align_in_elems;
}
#ifndef DPCT_USM_LEVEL_NONE
template<typename T>
class working_memory {
T *_input_ptr;
T *_temp_ptr;
bool _is_sycl_malloced = false;
bool _is_scalar_value = false;
sycl::queue _q;
sycl::event _e;
public:
working_memory(size_t size, sycl::queue q) : _q(q) {
_is_scalar_value = false;
_temp_ptr = (T *)sycl::malloc_device(size, q);
}
working_memory(T *result_ptr, sycl::queue q) : _input_ptr(result_ptr), _q(q) {
_is_scalar_value = true;
_is_sycl_malloced = sycl::get_pointer_type(_input_ptr, _q.get_context()) !=
sycl::usm::alloc::unknown;
if (!_is_sycl_malloced)
_temp_ptr = sycl::malloc_shared<T>(1, _q);
}
auto get_ptr() {
if (_is_scalar_value && _is_sycl_malloced)
return _input_ptr;
return _temp_ptr;
}
void set_event(sycl::event e) { _e = e; }
~working_memory() {
if (_is_scalar_value) {
if (!_is_sycl_malloced) {
_q.memcpy(_input_ptr, _temp_ptr, sizeof(T)).wait();
sycl::free(_temp_ptr, _q);
}
} else {
std::vector<void *> ptrs{_temp_ptr};
dpct::async_dpct_free(ptrs, {_e});
}
}
};
#endif
template <typename Tx, typename Tr>
inline void nrm2_impl(sycl::queue &q, int n, const void *x, int incx,
void *result) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
#ifdef DPCT_USM_LEVEL_NONE
auto x_buffer = dpct::get_buffer<Tx>(x);
auto r_buffer =
sycl::buffer<Tr, 1>(reinterpret_cast<Tr *>(result), sycl::range<1>(1));
if (dpct::is_device_ptr(result))
r_buffer = dpct::get_buffer<Tr>(result);
oneapi::mkl::blas::column_major::nrm2(q, n, x_buffer, incx, r_buffer);
#else
working_memory<Tr> res_mem(reinterpret_cast<Tr *>(result), q);
oneapi::mkl::blas::column_major::nrm2(q, n, reinterpret_cast<const Tx *>(x),
incx, res_mem.get_ptr());
#endif
#endif
}
template <bool is_conjugate, class Txy, class Tr>
inline void dotuc_impl(sycl::queue &q, int n, const Txy *x, int incx,
const Txy *y, int incy, Tr *result) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
#ifdef DPCT_USM_LEVEL_NONE
auto x_buffer = dpct::get_buffer<Txy>(x);
auto y_buffer = dpct::get_buffer<Txy>(y);
auto r_buffer = sycl::buffer<Tr, 1>((Tr *)result, sycl::range<1>(1));
if (dpct::is_device_ptr(result))
r_buffer = dpct::get_buffer<Tr>(result);
if constexpr (std::is_same_v<Txy, std::complex<float>> ||
std::is_same_v<Txy, std::complex<double>>) {
if constexpr (is_conjugate)
oneapi::mkl::blas::column_major::dotc(q, n, x_buffer, incx, y_buffer,
incy, r_buffer);
else
oneapi::mkl::blas::column_major::dotu(q, n, x_buffer, incx, y_buffer,
incy, r_buffer);
} else
oneapi::mkl::blas::column_major::dot(q, n, x_buffer, incx, y_buffer, incy,
r_buffer);
#else
working_memory<Tr> res_mem(result, q);
if constexpr (std::is_same_v<Txy, std::complex<float>> ||
std::is_same_v<Txy, std::complex<double>>) {
if constexpr (is_conjugate)
oneapi::mkl::blas::column_major::dotc(q, n, x, incx, y, incy, res_mem.get_ptr());
else
oneapi::mkl::blas::column_major::dotu(q, n, x, incx, y, incy, res_mem.get_ptr());
} else
oneapi::mkl::blas::column_major::dot(q, n, x, incx, y, incy, res_mem.get_ptr());
#endif
#endif
}
template <bool is_conjugate>
inline void dotuc(sycl::queue &q, int n, const void *x,
library_data_t x_type, int incx, const void *y,
library_data_t y_type, int incy, void *result,
library_data_t result_type) {
std::uint64_t key = detail::get_type_combination_id(x_type, y_type, result_type);
switch (key) {
case detail::get_type_combination_id(library_data_t::real_float, library_data_t::real_float,
library_data_t::real_float): {
detail::dotuc_impl<is_conjugate>(
q, n, reinterpret_cast<const float *>(x), incx,
reinterpret_cast<const float *>(y), incy,
reinterpret_cast<float *>(result));
break;
}
case detail::get_type_combination_id(library_data_t::real_double, library_data_t::real_double,
library_data_t::real_double): {
detail::dotuc_impl<is_conjugate>(
q, n, reinterpret_cast<const double *>(x), incx,
reinterpret_cast<const double *>(y), incy,
reinterpret_cast<double *>(result));
break;
}
case detail::get_type_combination_id(library_data_t::complex_float,
library_data_t::complex_float,
library_data_t::complex_float): {
detail::dotuc_impl<is_conjugate>(
q, n, reinterpret_cast<const std::complex<float> *>(x), incx,
reinterpret_cast<const std::complex<float> *>(y), incy,
reinterpret_cast<std::complex<float> *>(result));
break;
}
case detail::get_type_combination_id(library_data_t::complex_double,
library_data_t::complex_double,
library_data_t::complex_double): {
detail::dotuc_impl<is_conjugate>(
q, n, reinterpret_cast<const std::complex<double> *>(x), incx,
reinterpret_cast<const std::complex<double> *>(y), incy,
reinterpret_cast<std::complex<double> *>(result));
break;
}
case detail::get_type_combination_id(library_data_t::real_half, library_data_t::real_half,
library_data_t::real_half): {
detail::dotuc_impl<is_conjugate>(
q, n, reinterpret_cast<const sycl::half *>(x), incx,
reinterpret_cast<const sycl::half *>(y), incy,
reinterpret_cast<sycl::half *>(result));
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
template <class Tx, class Te>
inline void scal_impl(sycl::queue &q, int n, const void *alpha, void *x,
int incx) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
Te alpha_val = dpct::get_value(reinterpret_cast<const Te *>(alpha), q);
auto data_x = get_memory(reinterpret_cast<Tx *>(x));
oneapi::mkl::blas::column_major::scal(q, n, alpha_val,
data_x, incx);
#endif
}
template <class Txy, class Te>
inline void axpy_impl(sycl::queue &q, int n, const void *alpha, const void *x,
int incx, void *y, int incy) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
Te alpha_val = dpct::get_value(reinterpret_cast<const Te *>(alpha), q);
auto data_x = get_memory(reinterpret_cast<const Txy *>(x));
auto data_y = get_memory(reinterpret_cast<Txy *>(y));
oneapi::mkl::blas::column_major::axpy(q, n, alpha_val,
data_x, incx,
data_y, incy);
#endif
}
template <class Txy, class Tc, class Ts>
inline void rot_impl(sycl::queue &q, int n, void *x, int incx, void *y,
int incy, const void *c, const void *s) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
Tc c_value = dpct::get_value(reinterpret_cast<const Tc *>(c), q);
Ts s_value = dpct::get_value(reinterpret_cast<const Ts *>(s), q);
auto data_x = get_memory(reinterpret_cast<Txy *>(x));
auto data_y = get_memory(reinterpret_cast<Txy *>(y));
oneapi::mkl::blas::column_major::rot(q, n, data_x, incx,
data_y, incy, c_value,
s_value);
#endif
}
template <class Ta, class Tb, class Tc, class Ts>
inline void gemm_impl(sycl::queue &q, oneapi::mkl::transpose a_trans,
oneapi::mkl::transpose b_trans, int m, int n, int k,
const void *alpha, const void *a, int lda, const void *b,
int ldb, const void *beta, void *c, int ldc) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
Ts alpha_value = dpct::get_value(reinterpret_cast<const Ts *>(alpha), q);
Ts beta_value = dpct::get_value(reinterpret_cast<const Ts *>(beta), q);
auto data_a = get_memory(reinterpret_cast<const Ta *>(a));
auto data_b = get_memory(reinterpret_cast<const Tb *>(b));
auto data_c = get_memory(reinterpret_cast<Tc *>(c));
oneapi::mkl::blas::column_major::gemm(
q, a_trans, b_trans, m, n, k, alpha_value, data_a, lda,
data_b, ldb, beta_value, data_c, ldc);
#endif
}
template <class Ta, class Tb, class Tc, class Ts>
inline void gemm_batch_impl(sycl::queue &q, oneapi::mkl::transpose a_trans,
oneapi::mkl::transpose b_trans, int m, int n, int k,
const void *alpha, const void **a, int lda,
const void **b, int ldb, const void *beta, void **c,
int ldc, int batch_size) {
struct matrix_info_t {
oneapi::mkl::transpose transpose_info[2];
Ts value_info[2];
std::int64_t size_info[3];
std::int64_t ld_info[3];
std::int64_t groupsize_info;
};
Ts alpha_value = dpct::get_value(reinterpret_cast<const Ts *>(alpha), q);
Ts beta_value = dpct::get_value(reinterpret_cast<const Ts *>(beta), q);
matrix_info_t *matrix_info =
(matrix_info_t *)std::malloc(sizeof(matrix_info_t));
matrix_info->transpose_info[0] = a_trans;
matrix_info->transpose_info[1] = b_trans;
matrix_info->value_info[0] = alpha_value;
matrix_info->value_info[1] = beta_value;
matrix_info->size_info[0] = m;
matrix_info->size_info[1] = n;
matrix_info->size_info[2] = k;
matrix_info->ld_info[0] = lda;
matrix_info->ld_info[1] = ldb;
matrix_info->ld_info[2] = ldc;
matrix_info->groupsize_info = batch_size;
sycl::event e = oneapi::mkl::blas::column_major::gemm_batch(
q, matrix_info->transpose_info, matrix_info->transpose_info + 1,
matrix_info->size_info, matrix_info->size_info + 1,
matrix_info->size_info + 2, matrix_info->value_info,
reinterpret_cast<const Ta **>(a), matrix_info->ld_info,
reinterpret_cast<const Tb **>(b), matrix_info->ld_info + 1,
matrix_info->value_info + 1, reinterpret_cast<Tc **>(c),
matrix_info->ld_info + 2, 1, &(matrix_info->groupsize_info));
q.submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] { std::free(matrix_info); });
});
}
template <class Ta, class Tb, class Tc, class Ts>
inline void
gemm_batch_impl(sycl::queue &q, oneapi::mkl::transpose a_trans,
oneapi::mkl::transpose b_trans, int m, int n,
int k, const void *alpha, const void *a, int lda,
long long int stride_a, const void *b, int ldb,
long long int stride_b, const void *beta, void *c,
int ldc, long long int stride_c, int batch_size) {
Ts alpha_value = dpct::get_value(reinterpret_cast<const Ts *>(alpha), q);
Ts beta_value = dpct::get_value(reinterpret_cast<const Ts *>(beta), q);
auto data_a = get_memory(reinterpret_cast<const Ta *>(a));
auto data_b = get_memory(reinterpret_cast<const Tb *>(b));
auto data_c = get_memory(reinterpret_cast<Tc *>(c));
oneapi::mkl::blas::column_major::gemm_batch(
q, a_trans, b_trans, m, n, k, alpha_value, data_a, lda,
stride_a, data_b, ldb, stride_b, beta_value,
data_c, ldc, stride_c, batch_size);
}
template <bool is_hermitian, class T, class Tbeta>
inline void rk_impl(sycl::queue &q, oneapi::mkl::uplo uplo,
oneapi::mkl::transpose trans, int n, int k,
const T *alpha, const T *a, int lda, const T *b,
int ldb, const Tbeta *beta, T *c, int ldc) {
// For symmetric matrix, this function performs: C = alpha*OP(A)*(OP(B))^T + beta*C
// For Hermitian matrix, this function performs: C = alpha*OP(A)*(OP(B))^H + beta*C
// The gemmt() function performs: C = alpha*OPA(A)*OPB(B) + beta*C
// So the OPB need be updated before we call gemmt().
using Ty = typename dpct::DataType<T>::T2;
using Ts = typename dpct::DataType<Tbeta>::T2;
Ty alpha_value = dpct::get_value(reinterpret_cast<const Ty *>(alpha), q);
Ts beta_value = dpct::get_value(reinterpret_cast<const Ts *>(beta), q);
oneapi::mkl::transpose trans_A = trans, trans_B = trans;
int origin_b_rows = trans == oneapi::mkl::transpose::nontrans ? n : k;
int origin_b_cols = trans == oneapi::mkl::transpose::nontrans ? k : n;
if ((is_hermitian && trans == oneapi::mkl::transpose::trans) ||
(!is_hermitian && !std::is_floating_point_v<Ty> && trans == oneapi::mkl::transpose::conjtrans)) {
// In this case, OPB need be a conjugate operation,
// but only notrans, conjtrans and trans are available.
// So we need do a conjtrans operation first, then do a trans operation.
trans_B = oneapi::mkl::transpose::trans;
auto data_a = get_memory(reinterpret_cast<const Ty *>(a));
auto data_c = get_memory(reinterpret_cast<Ty *>(c));
#ifdef DPCT_USM_LEVEL_NONE
auto new_B_buffer = sycl::buffer<Ty, 1>(sycl::range<1>(origin_b_rows * origin_b_cols));
auto from_buffer = dpct::get_buffer<Ty>(b);
oneapi::mkl::blas::column_major::omatcopy_batch(
q, oneapi::mkl::transpose::conjtrans, origin_b_rows, origin_b_cols,
Ts(1.0), from_buffer, ldb, origin_b_rows * ldb, new_B_buffer,
origin_b_cols, origin_b_rows * origin_b_cols, 1);
oneapi::mkl::blas::column_major::gemmt(
q, uplo, trans_A, trans_B, n, k, alpha_value,
data_a, lda, new_B_buffer, origin_b_cols, beta_value, data_c, ldc);
#else
working_memory<T> new_B(origin_b_rows * origin_b_cols * sizeof(T), q);
oneapi::mkl::blas::column_major::omatcopy_batch(
q, oneapi::mkl::transpose::conjtrans, origin_b_rows, origin_b_cols,
Ts(1.0), reinterpret_cast<const Ty *>(b), ldb, origin_b_rows * ldb,
reinterpret_cast<Ty *>(new_B.get_ptr()), origin_b_cols,
origin_b_rows * origin_b_cols, 1);
sycl::event e = oneapi::mkl::blas::column_major::gemmt(
q, uplo, trans_A, trans_B, n, k, alpha_value,
data_a, lda, reinterpret_cast<Ty *>(new_B.get_ptr()), origin_b_cols,
beta_value, data_c, ldc);
new_B.set_event(e);
#endif
} else {
if constexpr (is_hermitian) {
trans_B = trans == oneapi::mkl::transpose::nontrans
? oneapi::mkl::transpose::conjtrans
: oneapi::mkl::transpose::nontrans;
} else {
trans_B = trans == oneapi::mkl::transpose::nontrans
? oneapi::mkl::transpose::trans
: oneapi::mkl::transpose::nontrans;
}
auto data_a = get_memory(reinterpret_cast<const Ty *>(a));
auto data_b = get_memory(reinterpret_cast<const Ty *>(b));
auto data_c = get_memory(reinterpret_cast<Ty *>(c));
oneapi::mkl::blas::column_major::gemmt(
q, uplo, trans_A, trans_B, n, k, alpha_value,
data_a, lda, data_b, ldb, beta_value, data_c, ldc);
}
}
template <class Ta, class Tb, class Ts>
inline void
trsm_batch_impl(sycl::queue &q, oneapi::mkl::side left_right,
oneapi::mkl::uplo upper_lower, oneapi::mkl::transpose trans,
oneapi::mkl::diag unit_diag, int m, int n, const void *alpha,
const void **a, int lda, void **b, int ldb, int batch_size) {
struct matrix_info_t {
matrix_info_t(oneapi::mkl::side side_info, oneapi::mkl::uplo uplo_info,
oneapi::mkl::transpose transpose_info,
oneapi::mkl::diag diag_info, Ts value_info, std::int64_t m,
std::int64_t n, std::int64_t lda, std::int64_t ldb,
std::int64_t groupsize_info)
: side_info(side_info), uplo_info(uplo_info),
transpose_info(transpose_info), diag_info(diag_info),
value_info(value_info), groupsize_info(groupsize_info) {
size_info[0] = m;
size_info[1] = n;
ld_info[0] = lda;
ld_info[1] = ldb;
}
oneapi::mkl::side side_info;
oneapi::mkl::uplo uplo_info;
oneapi::mkl::transpose transpose_info;
oneapi::mkl::diag diag_info;
Ts value_info;
std::int64_t size_info[2];
std::int64_t ld_info[2];
std::int64_t groupsize_info;
};
Ts alpha_value = dpct::get_value(reinterpret_cast<const Ts *>(alpha), q);
matrix_info_t *matrix_info =
new matrix_info_t(left_right, upper_lower, trans, unit_diag, alpha_value,
m, n, lda, ldb, batch_size);
sycl::event e = oneapi::mkl::blas::column_major::trsm_batch(
q, &(matrix_info->side_info), &(matrix_info->uplo_info),
&(matrix_info->transpose_info), &(matrix_info->diag_info),
matrix_info->size_info, matrix_info->size_info + 1,
&(matrix_info->value_info), reinterpret_cast<const Ta **>(a),
matrix_info->ld_info, reinterpret_cast<Tb **>(b),
matrix_info->ld_info + 1, 1, &(matrix_info->groupsize_info));
q.submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] { delete matrix_info; });
});
}
template <typename T>
inline void getrfnp_batch_wrapper(sycl::queue &exec_queue, int n, T *a[],
int lda, int *info, int batch_size) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using Ty = typename DataType<T>::T2;
// Set the info array value to 0
detail::dpct_memset(exec_queue, info, 0, sizeof(int) * batch_size);
std::int64_t stride_a = n * lda;
std::int64_t scratchpad_size =
oneapi::mkl::lapack::getrfnp_batch_scratchpad_size<Ty>(
exec_queue, n, n, lda, stride_a, batch_size);
Ty *a_strided_mem =
(Ty *)dpct::dpct_malloc(stride_a * batch_size * sizeof(Ty), exec_queue);
T **host_a = (T **)malloc(batch_size * sizeof(T *));
dpct::dpct_memcpy(host_a, a, batch_size * sizeof(T *));
for (std::int64_t i = 0; i < batch_size; ++i)
dpct::dpct_memcpy(a_strided_mem + i * stride_a, host_a[i],
n * lda * sizeof(T));
#ifdef DPCT_USM_LEVEL_NONE
{
sycl::buffer<Ty, 1> scratchpad{sycl::range<1>(scratchpad_size)};
auto a_buffer = get_buffer<Ty>(a_strided_mem);
oneapi::mkl::lapack::getrfnp_batch(exec_queue, n, n, a_buffer, lda,
stride_a, batch_size, scratchpad,
scratchpad_size);
}
std::vector<sycl::event> events;
for (std::int64_t i = 0; i < batch_size; ++i)
events.push_back(detail::dpct_memcpy(exec_queue, host_a[i],
a_strided_mem + i * stride_a,
n * lda * sizeof(T), automatic));
#else
Ty *scratchpad = sycl::malloc_device<Ty>(scratchpad_size, exec_queue);
sycl::event e = oneapi::mkl::lapack::getrfnp_batch(
exec_queue, n, n, a_strided_mem, lda, stride_a, batch_size, scratchpad,
scratchpad_size);
std::vector<sycl::event> events;
for (std::int64_t i = 0; i < batch_size; ++i)
events.push_back(detail::dpct_memcpy(exec_queue, host_a[i],
a_strided_mem + i * stride_a,
n * lda * sizeof(T), automatic, {e}));
std::vector<void *> ptrs{scratchpad, a_strided_mem};
dpct::async_dpct_free(ptrs, events, exec_queue);
#endif
exec_queue.submit([&](sycl::handler &cgh) {
cgh.depends_on(events);
cgh.host_task([=] { free(host_a); });
});
#endif
}
} // namespace detail
inline oneapi::mkl::transpose get_transpose(int t) {
if (t == 0) {
return oneapi::mkl::transpose::nontrans;
} else if (t == 1) {
return oneapi::mkl::transpose::trans;
} else {
return oneapi::mkl::transpose::conjtrans;
}
}
/// Computes the LU factorizations of a batch of general matrices.
/// \param [in] exec_queue The queue where the routine should be executed.
/// \param [in] n The order of the matrices.
/// \param [in, out] a Array of pointers to matrices. These matrices will be
/// overwritten by lower triangulars with unit diagonal elements and upper
/// triangulars.
/// \param [in] lda The leading dimension of the matrices.
/// \param [out] ipiv An array stores the pivot indices. If \p ipiv is nullptr,
/// non-pivoting LU factorization is computed.
/// \param [out] info An array stores the error information.
/// \param [in] batch_size The size of the batch.
template <typename T>
inline void getrf_batch_wrapper(sycl::queue &exec_queue, int n, T *a[],
int lda, int *ipiv, int *info, int batch_size) {
if (ipiv == nullptr) {
detail::getrfnp_batch_wrapper(exec_queue, n, a, lda, info, batch_size);
return;
}
using Ty = typename DataType<T>::T2;
// Set the info array value to 0
detail::dpct_memset(exec_queue, info, 0, sizeof(int) * batch_size);
#ifdef DPCT_USM_LEVEL_NONE
std::int64_t stride_a = n * lda;
std::int64_t stride_ipiv = n;
std::int64_t scratchpad_size = oneapi::mkl::lapack::getrf_batch_scratchpad_size<Ty>(
exec_queue, n, n, lda, stride_a, stride_ipiv, batch_size);
T *a_buffer_ptr;
a_buffer_ptr = (T *)dpct_malloc(stride_a * batch_size * sizeof(T));
T **host_a = (T **)malloc(batch_size * sizeof(T *));
dpct_memcpy(host_a, a, batch_size * sizeof(T *));
for (std::int64_t i = 0; i < batch_size; ++i)
dpct_memcpy(a_buffer_ptr + i * stride_a, host_a[i], n * lda * sizeof(T));
{
sycl::buffer<std::int64_t, 1> ipiv_buf(
sycl::range<1>(batch_size * stride_ipiv));
sycl::buffer<Ty, 1> scratchpad{sycl::range<1>(scratchpad_size)};
auto a_buffer = get_buffer<Ty>(a_buffer_ptr);
oneapi::mkl::lapack::getrf_batch(exec_queue, n, n, a_buffer, lda, stride_a,
ipiv_buf, stride_ipiv, batch_size, scratchpad,
scratchpad_size);
auto to_buffer = get_buffer<int>(ipiv);
exec_queue.submit([&](sycl::handler &cgh) {
auto from_acc = ipiv_buf.get_access<sycl::access_mode::read>(cgh);
auto to_acc = to_buffer.get_access<sycl::access_mode::write>(cgh);
cgh.parallel_for<dpct_kernel_name<class getrf_device_int64_to_int, T>>(
sycl::range<2>(batch_size, n), [=](sycl::id<2> id) {
to_acc[id.get(0) * n + id.get(1)] =
static_cast<int>(from_acc[id.get(0) * stride_ipiv + id.get(1)]);
});
});
}
// Copy back to the original buffers
std::vector<sycl::event> events;
for (std::int64_t i = 0; i < batch_size; ++i)
events.push_back(detail::dpct_memcpy(exec_queue, host_a[i],
a_buffer_ptr + i * stride_a,
n * lda * sizeof(T), automatic));
std::vector<void *> ptrs{host_a};
std::thread mem_free_thread(
[=](std::vector<void *> pointers_array,
std::vector<sycl::event> events_array) {
sycl::event::wait(events_array);
for (auto p : pointers_array)
free(p);
},
ptrs, events);
mem_free_thread.detach();
#else
std::int64_t m_int64 = n;
std::int64_t n_int64 = n;
std::int64_t lda_int64 = lda;
std::int64_t group_sizes = batch_size;
std::int64_t scratchpad_size = oneapi::mkl::lapack::getrf_batch_scratchpad_size<Ty>(
exec_queue, &m_int64, &n_int64, &lda_int64, 1, &group_sizes);
Ty *scratchpad = sycl::malloc_device<Ty>(scratchpad_size, exec_queue);
std::int64_t *ipiv_int64 =
sycl::malloc_device<std::int64_t>(batch_size * n, exec_queue);
std::int64_t **ipiv_int64_ptr =
sycl::malloc_shared<std::int64_t *>(batch_size, exec_queue);
T **a_shared = sycl::malloc_shared<T *>(batch_size, exec_queue);
exec_queue.memcpy(a_shared, a, batch_size * sizeof(T *)).wait();
for (std::int64_t i = 0; i < batch_size; ++i)
ipiv_int64_ptr[i] = ipiv_int64 + n * i;
oneapi::mkl::lapack::getrf_batch(exec_queue, &m_int64, &n_int64, (Ty **)a_shared, &lda_int64,
ipiv_int64_ptr, 1, &group_sizes, scratchpad,
scratchpad_size);
sycl::event e = exec_queue.submit([&](sycl::handler &cgh) {
cgh.parallel_for<dpct_kernel_name<class getrf_device_int64_to_int, T>>(
sycl::range<1>(batch_size * n), [=](sycl::id<1> idx) {
ipiv[idx] = static_cast<int>(ipiv_int64[idx]);
});
});
std::vector<void *> ptrs{scratchpad, ipiv_int64, ipiv_int64_ptr, a_shared};
async_dpct_free(ptrs, {e}, exec_queue);
#endif
}
/// Solves a system of linear equations with a batch of LU-factored square
/// coefficient matrices, with multiple right-hand sides.
/// \param [in] exec_queue The queue where the routine should be executed.
/// \param [in] trans Indicates the form of the linear equations.
/// \param [in] n The order of the matrices.
/// \param [in] nrhs The number of right hand sides.
/// \param [in] a Array of pointers to matrices.
/// \param [in] lda The leading dimension of the matrices in \p a.
/// \param [in] ipiv An array stores the pivots.
/// \param [in, out] b Array of pointers to matrices, whose columns are
/// the right-hand sides for the systems of equations.
/// \param [in] ldb The leading dimension of the matrices in \p b.
/// \param [out] info A value stores the error information.
/// \param [in] batch_size The size of the batch.
template <typename T>
inline void getrs_batch_wrapper(sycl::queue &exec_queue,
oneapi::mkl::transpose trans, int n, int nrhs,
const T *a[], int lda, int *ipiv, T *b[],
int ldb, int *info, int batch_size) {
using Ty = typename DataType<T>::T2;
// Set the info value to 0
*info = 0;
#ifdef DPCT_USM_LEVEL_NONE
std::int64_t stride_a = n * lda;
std::int64_t stride_b = nrhs * ldb;
std::int64_t stride_ipiv = n;
std::int64_t scratchpad_size = oneapi::mkl::lapack::getrs_batch_scratchpad_size<Ty>(
exec_queue, trans, n, nrhs, lda, stride_a, stride_ipiv, ldb, stride_b,
batch_size);
T *a_buffer_ptr, *b_buffer_ptr;
a_buffer_ptr = (T *)dpct_malloc(stride_a * batch_size * sizeof(T));
b_buffer_ptr = (T *)dpct_malloc(stride_b * batch_size * sizeof(T));
T **host_a = (T **)malloc(batch_size * sizeof(T *));
T **host_b = (T **)malloc(batch_size * sizeof(T *));
dpct_memcpy(host_a, a, batch_size * sizeof(T *));
dpct_memcpy(host_b, b, batch_size * sizeof(T *));
for (std::int64_t i = 0; i < batch_size; ++i) {
dpct_memcpy(a_buffer_ptr + i * stride_a, host_a[i], n * lda * sizeof(T));
dpct_memcpy(b_buffer_ptr + i * stride_b, host_b[i], nrhs * ldb * sizeof(T));
}
{
auto a_buffer = get_buffer<Ty>(a_buffer_ptr);
auto b_buffer = get_buffer<Ty>(b_buffer_ptr);
sycl::buffer<Ty, 1> scratchpad{sycl::range<1>(scratchpad_size)};
sycl::buffer<std::int64_t, 1> ipiv_buf(
sycl::range<1>(batch_size * stride_ipiv));
auto from_buf = get_buffer<int>(ipiv);
exec_queue.submit([&](sycl::handler &cgh) {
auto from_acc = from_buf.get_access<sycl::access_mode::read>(cgh);
auto to_acc = ipiv_buf.get_access<sycl::access_mode::write>(cgh);
cgh.parallel_for<dpct_kernel_name<class getrs_device_int64_to_int, T>>(
sycl::range<2>(batch_size, n), [=](sycl::id<2> id) {
to_acc[id.get(0) * stride_ipiv + id.get(1)] =
static_cast<std::int64_t>(from_acc[id.get(0) * n + id.get(1)]);
});
});
oneapi::mkl::lapack::getrs_batch(exec_queue, trans, n, nrhs, a_buffer, lda,
stride_a, ipiv_buf, stride_ipiv, b_buffer, ldb,
stride_b, batch_size, scratchpad, scratchpad_size);
}
// Copy back to the original buffers
std::vector<sycl::event> events;
for (std::int64_t i = 0; i < batch_size; ++i)
events.push_back(detail::dpct_memcpy(exec_queue, host_b[i],
b_buffer_ptr + i * stride_b,
nrhs * ldb * sizeof(T), automatic));
std::vector<void *> ptrs{host_a, host_b};
std::thread mem_free_thread(
[=](std::vector<void *> pointers_array,
std::vector<sycl::event> events_array) {
sycl::event::wait(events_array);
for (auto p : pointers_array)
free(p);
},
ptrs, events);
mem_free_thread.detach();
#else
std::int64_t n_int64 = n;
std::int64_t nrhs_int64 = nrhs;
std::int64_t lda_int64 = lda;
std::int64_t ldb_int64 = ldb;
std::int64_t group_sizes = batch_size;
std::int64_t scratchpad_size = oneapi::mkl::lapack::getrs_batch_scratchpad_size<Ty>(
exec_queue, &trans, &n_int64, &nrhs_int64, &lda_int64, &ldb_int64, 1,
&group_sizes);
Ty *scratchpad = sycl::malloc_device<Ty>(scratchpad_size, exec_queue);
std::int64_t *ipiv_int64 =
sycl::malloc_device<std::int64_t>(batch_size * n, exec_queue);
std::int64_t **ipiv_int64_ptr =
sycl::malloc_shared<std::int64_t *>(batch_size, exec_queue);
T **a_shared = sycl::malloc_shared<T *>(batch_size, exec_queue);
T **b_shared = sycl::malloc_shared<T *>(batch_size, exec_queue);
exec_queue.memcpy(a_shared, a, batch_size * sizeof(T *));
exec_queue.memcpy(b_shared, b, batch_size * sizeof(T *));
exec_queue.submit([&](sycl::handler &cgh) {
cgh.parallel_for<dpct_kernel_name<class getrs_device_int64_to_int, T>>(
sycl::range<1>(batch_size * n), [=](sycl::id<1> idx) {
ipiv_int64[idx] = static_cast<std::int64_t>(ipiv[idx]);
});
}).wait();
for (std::int64_t i = 0; i < batch_size; ++i)
ipiv_int64_ptr[i] = ipiv_int64 + n * i;
sycl::event e = oneapi::mkl::lapack::getrs_batch(
exec_queue, &trans, &n_int64, &nrhs_int64, (Ty **)a_shared, &lda_int64,
ipiv_int64_ptr, (Ty **)b_shared, &ldb_int64, 1, &group_sizes, scratchpad,
scratchpad_size);
std::vector<void *> ptrs{scratchpad, ipiv_int64_ptr, ipiv_int64, a_shared, b_shared};
async_dpct_free(ptrs, {e}, exec_queue);
#endif
}
/// Computes the inverses of a batch of LU-factored matrices.
/// \param [in] exec_queue The queue where the routine should be executed.
/// \param [in] n The order of the matrices.
/// \param [in] a Array of pointers to matrices.
/// \param [in] lda The leading dimension of the matrices in \p a.
/// \param [in] ipiv An array stores the pivots.
/// \param [out] b Array of pointers to inverse matrices.
/// \param [in] ldb The leading dimension of the matrices in \p b.
/// \param [out] info An array stores the error information.
/// \param [in] batch_size The size of the batch.
template <typename T>
inline void getri_batch_wrapper(sycl::queue &exec_queue, int n,
const T *a[], int lda, int *ipiv, T *b[],
int ldb, int *info, int batch_size) {
using Ty = typename DataType<T>::T2;
// Set the info array value to 0
detail::dpct_memset(exec_queue, info, 0, sizeof(int) * batch_size);
#ifdef DPCT_USM_LEVEL_NONE
std::int64_t stride_b = n * ldb;
std::int64_t stride_ipiv = n;
std::int64_t scratchpad_size = oneapi::mkl::lapack::getri_batch_scratchpad_size<Ty>(
exec_queue, n, ldb, stride_b, stride_ipiv, batch_size);
T *b_buffer_ptr;
b_buffer_ptr = (T *)dpct_malloc(stride_b * batch_size * sizeof(T));
T **host_a = (T **)malloc(batch_size * sizeof(T *));
T **host_b = (T **)malloc(batch_size * sizeof(T *));
dpct_memcpy(host_a, a, batch_size * sizeof(T *));
dpct_memcpy(host_b, b, batch_size * sizeof(T *));
for (std::int64_t i = 0; i < batch_size; ++i) {
// Need to create a copy of input matrices "a" to keep them unchanged.
// Matrices "b" (copy of matrices "a") will be used as input and output
// parameter in oneapi::mkl::lapack::getri_batch call.
matrix_mem_copy(b_buffer_ptr + i * stride_b, host_a[i], ldb, lda, n, n,
dpct::device_to_device, exec_queue);
}
{
auto b_buffer = get_buffer<Ty>(b_buffer_ptr);
sycl::buffer<Ty, 1> scratchpad{sycl::range<1>(scratchpad_size)};
sycl::buffer<std::int64_t, 1> ipiv_buf(
sycl::range<1>(batch_size * stride_ipiv));
auto from_buf = get_buffer<int>(ipiv);
exec_queue.submit([&](sycl::handler &cgh) {
auto from_acc = from_buf.get_access<sycl::access_mode::read>(cgh);
auto to_acc = ipiv_buf.get_access<sycl::access_mode::write>(cgh);
cgh.parallel_for<dpct_kernel_name<class getri_device_int64_to_int, T>>(
sycl::range<2>(batch_size, n), [=](sycl::id<2> id) {
to_acc[id.get(0) * stride_ipiv + id.get(1)] =
static_cast<std::int64_t>(from_acc[id.get(0) * n + id.get(1)]);
});
});
oneapi::mkl::lapack::getri_batch(exec_queue, n, b_buffer, ldb, stride_b, ipiv_buf,
stride_ipiv, batch_size, scratchpad,
scratchpad_size);
}
// Copy back to the original buffers
std::vector<sycl::event> events;
for (std::int64_t i = 0; i < batch_size; ++i)
events.push_back(detail::dpct_memcpy(exec_queue, host_b[i],
b_buffer_ptr + i * stride_b,
n * ldb * sizeof(T), automatic));
std::vector<void *> ptrs{host_a, host_b};
std::thread mem_free_thread(
[=](std::vector<void *> pointers_array,
std::vector<sycl::event> events_array) {
sycl::event::wait(events_array);
for (auto p : pointers_array)
free(p);
},
ptrs, events);
mem_free_thread.detach();
#else
std::int64_t n_int64 = n;
std::int64_t ldb_int64 = ldb;
std::int64_t group_sizes = batch_size;
std::int64_t scratchpad_size = oneapi::mkl::lapack::getri_batch_scratchpad_size<Ty>(
exec_queue, &n_int64, &ldb_int64, 1, &group_sizes);
Ty *scratchpad = sycl::malloc_device<Ty>(scratchpad_size, exec_queue);
std::int64_t *ipiv_int64 =
sycl::malloc_device<std::int64_t>(batch_size * n, exec_queue);
std::int64_t **ipiv_int64_ptr =
sycl::malloc_shared<std::int64_t *>(batch_size, exec_queue);
exec_queue.submit([&](sycl::handler &cgh) {
cgh.parallel_for<dpct_kernel_name<class getri_device_int64_to_int, T>>(
sycl::range<1>(batch_size * n), [=](sycl::id<1> idx) {
ipiv_int64[idx] = static_cast<std::int64_t>(ipiv[idx]);
});
});
T **a_shared = sycl::malloc_shared<T *>(batch_size, exec_queue);
T **b_shared = sycl::malloc_shared<T *>(batch_size, exec_queue);
exec_queue.memcpy(a_shared, a, batch_size * sizeof(T *));
exec_queue.memcpy(b_shared, b, batch_size * sizeof(T *)).wait();
for (std::int64_t i = 0; i < batch_size; ++i) {
ipiv_int64_ptr[i] = ipiv_int64 + n * i;
// Need to create a copy of input matrices "a" to keep them unchanged.
// Matrices "b" (copy of matrices "a") will be used as input and output
// parameter in oneapi::mkl::lapack::getri_batch call.
matrix_mem_copy(b_shared[i], a_shared[i], ldb, lda, n, n, dpct::device_to_device,
exec_queue);
}
sycl::event e = oneapi::mkl::lapack::getri_batch(
exec_queue, &n_int64, (Ty **)b_shared, &ldb_int64, ipiv_int64_ptr, 1,
&group_sizes, scratchpad, scratchpad_size);
std::vector<void *> ptrs{scratchpad, ipiv_int64_ptr, ipiv_int64, a_shared, b_shared};
async_dpct_free(ptrs, {e}, exec_queue);
#endif
}
/// Computes the QR factorizations of a batch of general matrices.
/// \param [in] exec_queue The queue where the routine should be executed.
/// \param [in] m The number of rows in the matrices.
/// \param [in] n The number of columns in the matrices.
/// \param [in, out] a Array of pointers to matrices. These
/// matrices will be overwritten by the factorization data.
/// \param [in] lda The leading dimension of the matrices in \p a.
/// \param [out] tau An array stores the scalars.
/// \param [out] info A value stores the error information.
/// \param [in] batch_size The size of the batch.
template <typename T>
inline void geqrf_batch_wrapper(sycl::queue exec_queue, int m, int n,
T *a[], int lda, T *tau[], int *info,
int batch_size) {
using Ty = typename DataType<T>::T2;
// Set the info value to 0
*info = 0;
#ifdef DPCT_USM_LEVEL_NONE
std::int64_t stride_a = n * lda;
std::int64_t stride_tau = std::max(1, std::min(m, n));
std::int64_t scratchpad_size = oneapi::mkl::lapack::geqrf_batch_scratchpad_size<Ty>(
exec_queue, m, n, lda, stride_a, stride_tau, batch_size);
T *a_buffer_ptr, *tau_buffer_ptr;
a_buffer_ptr = (T *)dpct_malloc(stride_a * batch_size * sizeof(T));
tau_buffer_ptr = (T *)dpct_malloc(stride_tau * batch_size * sizeof(T));
T **host_a = (T **)malloc(batch_size * sizeof(T *));
T **host_tau = (T **)malloc(batch_size * sizeof(T *));
dpct_memcpy(host_a, a, batch_size * sizeof(T *));
dpct_memcpy(host_tau, tau, batch_size * sizeof(T *));
for (std::int64_t i = 0; i < batch_size; ++i)
dpct_memcpy(a_buffer_ptr + i * stride_a, host_a[i], n * lda * sizeof(T));
{
auto a_buffer = get_buffer<Ty>(a_buffer_ptr);
auto tau_buffer = get_buffer<Ty>(tau_buffer_ptr);
sycl::buffer<Ty, 1> scratchpad{sycl::range<1>(scratchpad_size)};
oneapi::mkl::lapack::geqrf_batch(exec_queue, m, n, a_buffer, lda, stride_a,
tau_buffer, stride_tau, batch_size, scratchpad,
scratchpad_size);
}
// Copy back to the original buffers
std::vector<sycl::event> events_a;
std::vector<sycl::event> events_tau;
for (std::int64_t i = 0; i < batch_size; ++i) {
events_a.push_back(detail::dpct_memcpy(exec_queue, host_a[i],
a_buffer_ptr + i * stride_a,
n * lda * sizeof(T), automatic));
events_tau.push_back(detail::dpct_memcpy(
exec_queue, host_tau[i], tau_buffer_ptr + i * stride_tau,
std::max(1, std::min(m, n)) * sizeof(T), automatic));
}
std::vector<void *> ptr_a{host_a};
std::vector<void *> ptr_tau{host_tau};
std::thread mem_free_thread_a(
[=](std::vector<void *> pointers_array,
std::vector<sycl::event> events_array) {
sycl::event::wait(events_array);
for (auto p : pointers_array)
free(p);
},
ptr_a, events_a);
std::thread mem_free_thread_tau(
[=](std::vector<void *> pointers_array,
std::vector<sycl::event> events_array) {
sycl::event::wait(events_array);
for (auto p : pointers_array)
free(p);
},
ptr_tau, events_tau);
mem_free_thread_a.detach();
mem_free_thread_tau.detach();
#else
std::int64_t m_int64 = n;
std::int64_t n_int64 = n;
std::int64_t lda_int64 = lda;
std::int64_t group_sizes = batch_size;
std::int64_t scratchpad_size = oneapi::mkl::lapack::geqrf_batch_scratchpad_size<Ty>(
exec_queue, &m_int64, &n_int64, &lda_int64, 1, &group_sizes);
Ty *scratchpad = sycl::malloc_device<Ty>(scratchpad_size, exec_queue);
T **a_shared = sycl::malloc_shared<T *>(batch_size, exec_queue);
T **tau_shared = sycl::malloc_shared<T *>(batch_size, exec_queue);
exec_queue.memcpy(a_shared, a, batch_size * sizeof(T *));
exec_queue.memcpy(tau_shared, tau, batch_size * sizeof(T *)).wait();
sycl::event e = oneapi::mkl::lapack::geqrf_batch(
exec_queue, &m_int64, &n_int64, (Ty **)a_shared, &lda_int64, (Ty **)tau_shared, 1,
&group_sizes, scratchpad, scratchpad_size);
std::vector<void *> ptrs{scratchpad, a_shared, tau_shared};
async_dpct_free(ptrs, {e}, exec_queue);
#endif
}
/// Computes the Euclidean norm of a vector.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] n Number of elements in vector x.
/// \param [in] x Input vector x.
/// \param [in] x_type Data type of the vector x.
/// \param [in] incx Stride of vector x.
/// \param [out] result The result scalar.
/// \param [in] result_type Data type of the result.
inline void nrm2(sycl::queue &q, int n, const void *x, library_data_t x_type,
int incx, void *result, library_data_t result_type) {
std::uint64_t key = detail::get_type_combination_id(x_type, result_type);
switch (key) {
case detail::get_type_combination_id(library_data_t::real_float,
library_data_t::real_float): {
detail::nrm2_impl<float, float>(q, n, x, incx, result);
break;
}
case detail::get_type_combination_id(library_data_t::real_double,
library_data_t::real_double): {
detail::nrm2_impl<double, double>(q, n, x, incx, result);
break;
}
case detail::get_type_combination_id(library_data_t::complex_float,
library_data_t::real_float): {
detail::nrm2_impl<std::complex<float>, float>(
q, n, x, incx, result);
break;
}
case detail::get_type_combination_id(library_data_t::complex_double,
library_data_t::real_double): {
detail::nrm2_impl<std::complex<double>, double>(
q, n, x, incx, result);
break;
}
case detail::get_type_combination_id(library_data_t::real_half,
library_data_t::real_half): {
detail::nrm2_impl<sycl::half, sycl::half>(
q, n, x, incx, result);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
/// Computes the dot product of two vectors.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] n Number of elements in vector x.
/// \param [in] x Input vector x.
/// \param [in] x_type Data type of the vector x.
/// \param [in] incx Stride of vector x.
/// \param [in] y Input vector y.
/// \param [in] y_type Data type of the vector y.
/// \param [in] incy Stride of vector y.
/// \param [out] result The result scalar.
/// \param [in] result_type Data type of the result.
inline void dot(sycl::queue &q, int n, const void *x, library_data_t x_type,
int incx, const void *y, library_data_t y_type, int incy,
void *result, library_data_t result_type) {
detail::dotuc<false>(q, n, x, x_type, incx, y, y_type, incy, result,
result_type);
}
/// Computes the dot product of two vectors, conjugating the first vector.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] n Number of elements in vector x.
/// \param [in] x Input vector x.
/// \param [in] x_type Data type of the vector x.
/// \param [in] incx Stride of vector x.
/// \param [in] y Input vector y.
/// \param [in] y_type Data type of the vector y.
/// \param [in] incy Stride of vector y.
/// \param [out] result The result scalar.
/// \param [in] result_type Data type of the result.
inline void dotc(sycl::queue &q, int n, const void *x, library_data_t x_type,
int incx, const void *y, library_data_t y_type, int incy,
void *result, library_data_t result_type) {
detail::dotuc<true>(q, n, x, x_type, incx, y, y_type, incy, result,
result_type);
}
/// Computes the product of a vector by a scalar.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] n Number of elements in vector x.
/// \param [in] alpha The scale factor alpha.
/// \param [in] alpha_type The data type of alpha.
/// \param [in, out] x Input/Output vector x.
/// \param [in] x_type Data type of the vector x.
/// \param [in] incx Stride of vector x.
inline void scal(sycl::queue &q, int n, const void *alpha,
library_data_t alpha_type, void *x, library_data_t x_type,
int incx) {
std::uint64_t key = detail::get_type_combination_id(x_type);
switch (key) {
case detail::get_type_combination_id(library_data_t::real_float): {
detail::scal_impl<float, float>(q, n, alpha, x, incx);
break;
}
case detail::get_type_combination_id(library_data_t::real_double): {
detail::scal_impl<double, double>(q, n, alpha, x, incx);
break;
}
case detail::get_type_combination_id(library_data_t::complex_float): {
detail::scal_impl<std::complex<float>, std::complex<float>>(q, n, alpha,
x, incx);
break;
}
case detail::get_type_combination_id(library_data_t::complex_double): {
detail::scal_impl<std::complex<double>, std::complex<double>>(
q, n, alpha, x, incx);
break;
}
case detail::get_type_combination_id(library_data_t::real_half): {
float alpha_value =
dpct::get_value(reinterpret_cast<const float *>(alpha), q);
sycl::half alaph_half(alpha_value);
detail::scal_impl<sycl::half, sycl::half>(q, n, &alaph_half, x, incx);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
/// Computes a vector-scalar product and adds the result to a vector.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] n Number of elements in vector x.
/// \param [in] alpha The scale factor alpha.
/// \param [in] alpha_type The data type of alpha.
/// \param [in] x Input vector x.
/// \param [in] x_type Data type of the vector x.
/// \param [in] incx Stride of vector x.
/// \param [in, out] y Input/Output vector y.
/// \param [in] y_type Data type of the vector y.
/// \param [in] incy Stride of vector y.
inline void axpy(sycl::queue &q, int n, const void *alpha,
library_data_t alpha_type, const void *x, library_data_t x_type,
int incx, void *y, library_data_t y_type, int incy) {
std::uint64_t key = detail::get_type_combination_id(x_type, alpha_type);
switch (key) {
case detail::get_type_combination_id(library_data_t::real_float,
library_data_t::real_float): {
detail::axpy_impl<float, float>(q, n, alpha, x, incx, y, incy);
break;
}
case detail::get_type_combination_id(library_data_t::real_double,
library_data_t::real_double): {
detail::axpy_impl<double, double>(q, n, alpha, x, incx, y, incy);
break;
}
case detail::get_type_combination_id(library_data_t::complex_float,
library_data_t::complex_float): {
detail::axpy_impl<std::complex<float>, std::complex<float>>(
q, n, alpha, x, incx, y, incy);
break;
}
case detail::get_type_combination_id(library_data_t::complex_double,
library_data_t::complex_double): {
detail::axpy_impl<std::complex<double>, std::complex<double>>(
q, n, alpha, x, incx, y, incy);
break;
}
case detail::get_type_combination_id(library_data_t::real_half,
library_data_t::real_float): {
float alpha_value =
dpct::get_value(reinterpret_cast<const float *>(alpha), q);
sycl::half alaph_half(alpha_value);
detail::axpy_impl<sycl::half, sycl::half>(q, n, &alaph_half, x, incx, y, incy);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
/// Performs rotation of points in the plane.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] n Number of elements in vector x.
/// \param [in, out] x Input/Output vector x.
/// \param [in] x_type Data type of the vector x.
/// \param [in] incx Stride of vector x.
/// \param [in, out] y Input/Output vector y.
/// \param [in] y_type Data type of the vector y.
/// \param [in] incy Stride of vector y.
/// \param [in] c Scaling factor.
/// \param [in] s Scaling factor.
/// \param [in] cs_type Data type of the scaling factors.
inline void rot(sycl::queue &q, int n, void *x, library_data_t x_type,
int incx, void *y, library_data_t y_type, int incy,
const void *c, const void *s, library_data_t cs_type) {
std::uint64_t key = detail::get_type_combination_id(x_type, cs_type);
switch (key) {
case detail::get_type_combination_id(library_data_t::real_float,
library_data_t::real_float): {
detail::rot_impl<float, float, float>(q, n, x, incx, y, incy, c, s);
break;
}
case detail::get_type_combination_id(library_data_t::real_double,
library_data_t::real_double): {
detail::rot_impl<double, double, double>(q, n, x, incx, y, incy, c, s);
break;
}
case detail::get_type_combination_id(library_data_t::complex_float,
library_data_t::real_float): {
detail::rot_impl<std::complex<float>, float, float>(q, n, x, incx, y, incy, c,
s);
break;
}
case detail::get_type_combination_id(library_data_t::complex_double,
library_data_t::real_double): {
detail::rot_impl<std::complex<double>, double, double>(q, n, x, incx, y, incy, c,
s);
break;
}
case detail::get_type_combination_id(library_data_t::complex_float,
library_data_t::complex_float): {
detail::rot_impl<std::complex<float>, float, std::complex<float>>(q, n, x, incx, y, incy, c, s);
break;
}
case detail::get_type_combination_id(library_data_t::complex_double,
library_data_t::complex_double): {
detail::rot_impl<std::complex<double>, double, std::complex<double>>(q, n, x, incx, y, incy, c, s);
break;
}
case detail::get_type_combination_id(library_data_t::real_half,
library_data_t::real_half): {
detail::rot_impl<sycl::half, sycl::half, sycl::half>(q, n, x, incx, y, incy, c, s);
break;
}
case detail::get_type_combination_id(library_data_t::real_bfloat16,
library_data_t::real_bfloat16): {
detail::rot_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, oneapi::mkl::bfloat16>(q, n, x, incx, y, incy, c, s);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
/// Computes matrix-matrix product with general matrices.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] a_trans Specifies the operation applied to A.
/// \param [in] b_trans Specifies the operation applied to B.
/// \param [in] m Specifies the number of rows of the matrix op(A) and of the matrix C.
/// \param [in] n Specifies the number of columns of the matrix op(B) and of the matrix C.
/// \param [in] k Specifies the number of columns of the matrix op(A) and the number of rows of the matrix op(B).
/// \param [in] alpha Scaling factor for the matrix-matrix product.
/// \param [in] a Input matrix A.
/// \param [in] a_type Data type of the matrix A.
/// \param [in] lda Leading dimension of A.
/// \param [in] b Input matrix B.
/// \param [in] b_type Data type of the matrix B.
/// \param [in] ldb Leading dimension of B.
/// \param [in] beta Scaling factor for matrix C.
/// \param [in, out] c Input/Output matrix C.
/// \param [in] c_type Data type of the matrix C.
/// \param [in] ldc Leading dimension of C.
/// \param [in] scaling_type Data type of the scaling factors.
inline void gemm(sycl::queue &q, oneapi::mkl::transpose a_trans,
oneapi::mkl::transpose b_trans, int m, int n, int k,
const void *alpha, const void *a, library_data_t a_type,
int lda, const void *b, library_data_t b_type, int ldb,
const void *beta, void *c, library_data_t c_type, int ldc,
library_data_t scaling_type) {
bool matched = false;
if (scaling_type == library_data_t::real_float &&
c_type == library_data_t::complex_float) {
scaling_type = library_data_t::complex_float;
} else if (scaling_type == library_data_t::real_double &&
c_type == library_data_t::complex_double) {
scaling_type = library_data_t::complex_double;
}
std::uint64_t key =
detail::get_type_combination_id(a_type, b_type, c_type, scaling_type);
switch (key) {
case detail::get_type_combination_id(
library_data_t::real_float, library_data_t::real_float,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_impl<float, float, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_double, library_data_t::real_double,
library_data_t::real_double, library_data_t::real_double): {
detail::gemm_impl<double, double, double, double>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::complex_float, library_data_t::complex_float,
library_data_t::complex_float, library_data_t::complex_float): {
detail::gemm_impl<std::complex<float>, std::complex<float>,
std::complex<float>, std::complex<float>>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::complex_double, library_data_t::complex_double,
library_data_t::complex_double, library_data_t::complex_double): {
detail::gemm_impl<std::complex<double>, std::complex<double>,
std::complex<double>, std::complex<double>>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_half, library_data_t::real_half): {
detail::gemm_impl<sycl::half, sycl::half, sycl::half,
sycl::half>(q, a_trans, b_trans, m, n, k, alpha, a,
lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_bfloat16, library_data_t::real_bfloat16,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, float,
float>(q, a_trans, b_trans, m, n, k, alpha, a, lda, b,
ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_impl<sycl::half, sycl::half, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_half, library_data_t::real_float): {
float alpha_value =
dpct::get_value(reinterpret_cast<const float *>(alpha), q);
float beta_value =
dpct::get_value(reinterpret_cast<const float *>(beta), q);
sycl::half alpha_half(alpha_value);
sycl::half beta_half(beta_value);
detail::gemm_impl<sycl::half, sycl::half, sycl::half,
sycl::half>(q, a_trans, b_trans, m, n, k, &alpha_half,
a, lda, b, ldb, &beta_half, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_int8, library_data_t::real_int8,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_impl<std::int8_t, std::int8_t, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_bfloat16, library_data_t::real_bfloat16,
library_data_t::real_bfloat16, library_data_t::real_float): {
detail::gemm_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16,
oneapi::mkl::bfloat16, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_int8, library_data_t::real_int8,
library_data_t::real_int32, library_data_t::real_int32): {
float alpha_float =
dpct::get_value(reinterpret_cast<const std::int32_t *>(alpha), q);
float beta_float =
dpct::get_value(reinterpret_cast<const std::int32_t *>(beta), q);
detail::gemm_impl<std::int8_t, std::int8_t, std::int32_t, float>(
q, a_trans, b_trans, m, n, k, &alpha_float, a, lda, b, ldb, &beta_float, c, ldc);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
/// Computes a batch of matrix-matrix product with general matrices.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] a_trans Specifies the operation applied to A.
/// \param [in] b_trans Specifies the operation applied to B.
/// \param [in] m Specifies the number of rows of the matrix op(A) and of the matrix C.
/// \param [in] n Specifies the number of columns of the matrix op(B) and of the matrix C.
/// \param [in] k Specifies the number of columns of the matrix op(A) and the number of rows of the matrix op(B).
/// \param [in] alpha Scaling factor for the matrix-matrix product.
/// \param [in] a Input matrix A.
/// \param [in] a_type Data type of the matrix A.
/// \param [in] lda Leading dimension of A.
/// \param [in] b Input matrix B.
/// \param [in] b_type Data type of the matrix B.
/// \param [in] ldb Leading dimension of B.
/// \param [in] beta Scaling factor for matrix C.
/// \param [in, out] c Input/Output matrix C.
/// \param [in] c_type Data type of the matrix C.
/// \param [in] ldc Leading dimension of C.
/// \param [in] batch_size Specifies the number of matrix multiply operations to perform.
/// \param [in] scaling_type Data type of the scaling factors.
inline void gemm_batch(sycl::queue &q, oneapi::mkl::transpose a_trans,
oneapi::mkl::transpose b_trans, int m, int n, int k,
const void *alpha, const void *a[],
library_data_t a_type, int lda, const void *b[],
library_data_t b_type, int ldb, const void *beta,
void *c[], library_data_t c_type, int ldc,
int batch_size, library_data_t scaling_type) {
#ifdef DPCT_USM_LEVEL_NONE
throw std::runtime_error("this API is unsupported when USM level is none");
#else
bool matched = false;
if (scaling_type == library_data_t::real_float &&
c_type == library_data_t::complex_float) {
scaling_type = library_data_t::complex_float;
} else if (scaling_type == library_data_t::real_double &&
c_type == library_data_t::complex_double) {
scaling_type = library_data_t::complex_double;
}
std::uint64_t key =
detail::get_type_combination_id(a_type, b_type, c_type, scaling_type);
switch (key) {
case detail::get_type_combination_id(
library_data_t::real_float, library_data_t::real_float,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<float, float, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_double, library_data_t::real_double,
library_data_t::real_double, library_data_t::real_double): {
detail::gemm_batch_impl<double, double, double, double>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::complex_float, library_data_t::complex_float,
library_data_t::complex_float, library_data_t::complex_float): {
detail::gemm_batch_impl<std::complex<float>, std::complex<float>,
std::complex<float>, std::complex<float>>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::complex_double, library_data_t::complex_double,
library_data_t::complex_double, library_data_t::complex_double): {
detail::gemm_batch_impl<std::complex<double>, std::complex<double>,
std::complex<double>, std::complex<double>>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_half, library_data_t::real_half): {
detail::gemm_batch_impl<sycl::half, sycl::half, sycl::half,
sycl::half>(q, a_trans, b_trans, m, n, k, alpha,
a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
#ifdef __INTEL_MKL__
case detail::get_type_combination_id(
library_data_t::real_bfloat16, library_data_t::real_bfloat16,
library_data_t::real_bfloat16, library_data_t::real_float): {
detail::gemm_batch_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16,
oneapi::mkl::bfloat16, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_bfloat16, library_data_t::real_bfloat16,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, float,
float>(q, a_trans, b_trans, m, n, k, alpha, a, lda,
b, ldb, beta, c, ldc, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_int8, library_data_t::real_int8,
library_data_t::real_int32, library_data_t::real_int32): {
float alpha_float =
dpct::get_value(reinterpret_cast<const std::int32_t *>(alpha), q);
float beta_float =
dpct::get_value(reinterpret_cast<const std::int32_t *>(beta), q);
detail::gemm_batch_impl<std::int8_t, std::int8_t, std::int32_t,
float>(q, a_trans, b_trans, m, n, k, &alpha_float,
a, lda, b, ldb, &beta_float, c, ldc,
batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_int8, library_data_t::real_int8,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<std::int8_t, std::int8_t, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<sycl::half, sycl::half, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
#endif
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_half, library_data_t::real_float): {
float alpha_value =
dpct::get_value(reinterpret_cast<const float *>(alpha), q);
float beta_value =
dpct::get_value(reinterpret_cast<const float *>(beta), q);
sycl::half alpha_half(alpha_value);
sycl::half beta_half(beta_value);
detail::gemm_batch_impl<sycl::half, sycl::half, sycl::half, sycl::half>(
q, a_trans, b_trans, m, n, k, &alpha_half, a, lda, b, ldb, &beta_half, c, ldc,
batch_size);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
#endif
}
/// Computes a batch of matrix-matrix product with general matrices.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] a_trans Specifies the operation applied to A.
/// \param [in] b_trans Specifies the operation applied to B.
/// \param [in] m Specifies the number of rows of the matrix op(A) and of the matrix C.
/// \param [in] n Specifies the number of columns of the matrix op(B) and of the matrix C.
/// \param [in] k Specifies the number of columns of the matrix op(A) and the number of rows of the matrix op(B).
/// \param [in] alpha Scaling factor for the matrix-matrix product.
/// \param [in] a Input matrix A.
/// \param [in] a_type Data type of the matrix A.
/// \param [in] lda Leading dimension of A.
/// \param [in] stride_a Stride between the different A matrices.
/// \param [in] b Input matrix B.
/// \param [in] b_type Data type of the matrix B.
/// \param [in] ldb Leading dimension of B.
/// \param [in] stride_b Stride between the different B matrices.
/// \param [in] beta Scaling factor for matrix C.
/// \param [in, out] c Input/Output matrix C.
/// \param [in] c_type Data type of the matrix C.
/// \param [in] ldc Leading dimension of C.
/// \param [in] stride_c Stride between the different C matrices.
/// \param [in] batch_size Specifies the number of matrix multiply operations to perform.
/// \param [in] scaling_type Data type of the scaling factors.
inline void gemm_batch(sycl::queue &q, oneapi::mkl::transpose a_trans,
oneapi::mkl::transpose b_trans, int m, int n, int k,
const void *alpha, const void *a, library_data_t a_type,
int lda, long long int stride_a, const void *b,
library_data_t b_type, int ldb, long long int stride_b,
const void *beta, void *c, library_data_t c_type,
int ldc, long long int stride_c, int batch_size,
library_data_t scaling_type) {
bool matched = false;
if (scaling_type == library_data_t::real_float &&
c_type == library_data_t::complex_float) {
scaling_type = library_data_t::complex_float;
} else if (scaling_type == library_data_t::real_double &&
c_type == library_data_t::complex_double) {
scaling_type = library_data_t::complex_double;
}
std::uint64_t key =
detail::get_type_combination_id(a_type, b_type, c_type, scaling_type);
switch (key) {
case detail::get_type_combination_id(
library_data_t::real_float, library_data_t::real_float,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<float, float, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b,
beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_double, library_data_t::real_double,
library_data_t::real_double, library_data_t::real_double): {
detail::gemm_batch_impl<double, double, double, double>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b,
beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::complex_float, library_data_t::complex_float,
library_data_t::complex_float, library_data_t::complex_float): {
detail::gemm_batch_impl<std::complex<float>, std::complex<float>,
std::complex<float>, std::complex<float>>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b,
beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::complex_double, library_data_t::complex_double,
library_data_t::complex_double, library_data_t::complex_double): {
detail::gemm_batch_impl<std::complex<double>, std::complex<double>,
std::complex<double>, std::complex<double>>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b,
beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_half, library_data_t::real_half): {
detail::gemm_batch_impl<sycl::half, sycl::half, sycl::half,
sycl::half>(q, a_trans, b_trans, m, n, k, alpha,
a, lda, stride_a, b, ldb, stride_b,
beta, c, ldc, stride_c, batch_size);
break;
}
#ifdef __INTEL_MKL__
case detail::get_type_combination_id(
library_data_t::real_bfloat16, library_data_t::real_bfloat16,
library_data_t::real_bfloat16, library_data_t::real_float): {
detail::gemm_batch_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16,
oneapi::mkl::bfloat16, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b,
beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_bfloat16, library_data_t::real_bfloat16,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, float,
float>(q, a_trans, b_trans, m, n, k, alpha, a, lda,
stride_a, b, ldb, stride_b, beta, c, ldc,
stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_int8, library_data_t::real_int8,
library_data_t::real_int32, library_data_t::real_int32): {
detail::gemm_batch_impl<std::int8_t, std::int8_t, std::int32_t,
std::int32_t>(q, a_trans, b_trans, m, n, k, alpha,
a, lda, stride_a, b, ldb, stride_b,
beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_int8, library_data_t::real_int8,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<std::int8_t, std::int8_t, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b,
beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<sycl::half, sycl::half, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b,
beta, c, ldc, stride_c, batch_size);
break;
}
#endif
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_half, library_data_t::real_float): {
float alpha_value =
dpct::get_value(reinterpret_cast<const float *>(alpha), q);
float beta_value =
dpct::get_value(reinterpret_cast<const float *>(beta), q);
sycl::half alpha_half(alpha_value);
sycl::half beta_half(beta_value);
detail::gemm_batch_impl<sycl::half, sycl::half, sycl::half, sycl::half>(
q, a_trans, b_trans, m, n, k, &alpha_half, a, lda, stride_a, b, ldb, stride_b,
&beta_half, c, ldc, stride_c, batch_size);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
/// This routines perform a special rank-k update of a symmetric matrix C by
/// general matrices A and B.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] uplo Specifies whether C's data is stored in its upper or lower triangle.
/// \param [in] trans Specifies the operation to apply.
/// \param [in] n The number of rows and columns in C.
/// \param [in] k The inner dimension of matrix multiplications.
/// \param [in] alpha Scaling factor for the rank-k update.
/// \param [in] a Input matrix A.
/// \param [in] lda Leading dimension of A.
/// \param [in] b Input matrix B.
/// \param [in] ldb Leading dimension of B.
/// \param [in] beta Scaling factor for the rank-k update.
/// \param [in, out] c Input/Output matrix C.
/// \param [in] ldc Leading dimension of C.
template <class T>
inline void syrk(sycl::queue &q, oneapi::mkl::uplo uplo,
oneapi::mkl::transpose trans, int n, int k, const T *alpha,
const T *a, int lda, const T *b, int ldb, const T *beta, T *c,
int ldc) {
detail::rk_impl<false, T, T>(q, uplo, trans, n, k, alpha, a, lda, b,
ldb, beta, c, ldc);
}
/// This routines perform a special rank-k update of a Hermitian matrix C by
/// general matrices A and B.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] uplo Specifies whether C's data is stored in its upper or lower triangle.
/// \param [in] trans Specifies the operation to apply.
/// \param [in] n The number of rows and columns in C.
/// \param [in] k The inner dimension of matrix multiplications.
/// \param [in] alpha Scaling factor for the rank-k update.
/// \param [in] a Input matrix A.
/// \param [in] lda Leading dimension of A.
/// \param [in] b Input matrix B.
/// \param [in] ldb Leading dimension of B.
/// \param [in] beta Scaling factor for the rank-k update.
/// \param [in, out] c Input/Output matrix C.
/// \param [in] ldc Leading dimension of C.
template <class T, class Tbeta>
inline void herk(sycl::queue &q, oneapi::mkl::uplo uplo,
oneapi::mkl::transpose trans, int n, int k, const T *alpha,
const T *a, int lda, const T *b, int ldb, const Tbeta *beta,
T *c, int ldc) {
detail::rk_impl<true, T, Tbeta>(q, uplo, trans, n, k, alpha, a, lda, b,
ldb, beta, c, ldc);
}
/// This routine performs a group of trsm operations. Each trsm solves an
/// equation of the form op(A) * X = alpha * B or X * op(A) = alpha * B.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] left_right Specifies A multiplies X on the left or on the right.
/// \param [in] upper_lower Specifies A is upper or lower triangular.
/// \param [in] trans Specifies the operation applied to A.
/// \param [in] unit_diag Specifies whether A is unit triangular.
/// \param [in] m Number of rows of the B matrices.
/// \param [in] n Number of columns of the B matrices.
/// \param [in] alpha Scaling factor for the solutions.
/// \param [in] a Input matrices A.
/// \param [in] a_type Data type of the matrices A.
/// \param [in] lda Leading dimension of the matrices A.
/// \param [in, out] b Input and output matrices B.
/// \param [in] b_type Data type of the matrices B.
/// \param [in] ldb Leading dimension of the matrices B.
/// \param [in] batch_size Specifies the number of trsm operations to perform.
/// \param [in] scaling_type Data type of the scaling factors.
inline void trsm_batch(sycl::queue &q, oneapi::mkl::side left_right,
oneapi::mkl::uplo upper_lower,
oneapi::mkl::transpose trans,
oneapi::mkl::diag unit_diag, int m, int n,
const void *alpha, const void **a, library_data_t a_type,
int lda, void **b, library_data_t b_type, int ldb,
int batch_size, library_data_t scaling_type) {
#ifdef DPCT_USM_LEVEL_NONE
throw std::runtime_error("this API is unsupported when USM level is none");
#else
std::uint64_t key =
detail::get_type_combination_id(a_type, b_type, scaling_type);
switch (key) {
case detail::get_type_combination_id(library_data_t::real_float,
library_data_t::real_float,
library_data_t::real_float): {
detail::trsm_batch_impl<float, float, float>(q, left_right, upper_lower,
trans, unit_diag, m, n, alpha,
a, lda, b, ldb, batch_size);
break;
}
case detail::get_type_combination_id(library_data_t::real_double,
library_data_t::real_double,
library_data_t::real_double): {
detail::trsm_batch_impl<double, double, double>(
q, left_right, upper_lower, trans, unit_diag, m, n, alpha, a, lda, b,
ldb, batch_size);
break;
}
case detail::get_type_combination_id(library_data_t::complex_float,
library_data_t::complex_float,
library_data_t::complex_float): {
detail::trsm_batch_impl<std::complex<float>, std::complex<float>,
std::complex<float>>(q, left_right, upper_lower,
trans, unit_diag, m, n, alpha,
a, lda, b, ldb, batch_size);
break;
}
case detail::get_type_combination_id(library_data_t::complex_double,
library_data_t::complex_double,
library_data_t::complex_double): {
detail::trsm_batch_impl<std::complex<double>, std::complex<double>,
std::complex<double>>(q, left_right, upper_lower,
trans, unit_diag, m, n, alpha,
a, lda, b, ldb, batch_size);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
#endif
}
/// Computes a triangular matrix-general matrix product.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] left_right Specifies A is on the left or right side of the
/// multiplication.
/// \param [in] upper_lower Specifies A is upper or lower triangular.
/// \param [in] trans Specifies the operation applied to A.
/// \param [in] unit_diag Specifies whether A is unit triangular.
/// \param [in] m Number of rows of B.
/// \param [in] n Number of columns of B.
/// \param [in] alpha Scaling factor for the matrix-matrix product.
/// \param [in] a Input matrices A.
/// \param [in] lda Leading dimension of the matrices A.
/// \param [in] b Input matrices B.
/// \param [in] ldb Leading dimension of the matrices B.
/// \param [out] c Output matrices C.
/// \param [in] ldc Leading dimension of the matrices C.
template <class T>
inline void trmm(sycl::queue &q, oneapi::mkl::side left_right,
oneapi::mkl::uplo upper_lower, oneapi::mkl::transpose trans,
oneapi::mkl::diag unit_diag, int m, int n, const T *alpha,
const T *a, int lda, const T *b, int ldb, T *c, int ldc) {
using Ty = typename DataType<T>::T2;
auto alpha_val = dpct::get_value(alpha, q);
if (b != c) {
dpct::matrix_mem_copy(c, b, ldc, ldb, m, n, dpct::device_to_device, q);
}
auto data_a = detail::get_memory(reinterpret_cast<const Ty *>(a));
auto data_c = detail::get_memory(reinterpret_cast<Ty *>(c));
oneapi::mkl::blas::column_major::trmm(q, left_right, upper_lower, trans,
unit_diag, m, n, alpha_val, data_a, lda,
data_c, ldc);
}
} // namespace dpct
#endif // __DPCT_BLAS_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/guided_convolutionSeparable_SYCLmigration/01_dpct_output/include/dpct/atomic.hpp | //==---- atomic.hpp -------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_ATOMIC_HPP__
#define __DPCT_ATOMIC_HPP__
#include <sycl/sycl.hpp>
namespace dpct {
/// Atomically add the value operand to the value at the addr and assign the
/// result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to add to the value at \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_fetch_add(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_add(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_fetch_add(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_add(operand);
}
/// Atomically add the value operand to the value at the addr and assign the
/// result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to add to the value at \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_fetch_add(T *addr, T operand,
sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_add<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_add<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_add<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false && "Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_fetch_add(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_fetch_add<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically subtract the value operand from the value at the addr and assign
/// the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to subtract from the value at \p addr
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_fetch_sub(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_sub(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_fetch_sub(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_sub(operand);
}
/// Atomically subtract the value operand from the value at the addr and assign
/// the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to subtract from the value at \p addr
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_fetch_sub(T *addr, T operand,
sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_sub<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_sub<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_sub<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false && "Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_fetch_sub(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_fetch_sub<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically perform a bitwise AND between the value operand and the value at the addr
/// and assign the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to use in bitwise AND operation with the value at the \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_fetch_and(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_and(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_fetch_and(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_and(operand);
}
/// Atomically perform a bitwise AND between the value operand and the value at the addr
/// and assign the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to use in bitwise AND operation with the value at the \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_fetch_and(T *addr, T operand,
sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_and<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_and<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_and<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false && "Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_fetch_and(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_fetch_and<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically or the value at the addr with the value operand, and assign
/// the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to use in bitwise OR operation with the value at the \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_fetch_or(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_or(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_fetch_or(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_or(operand);
}
/// Atomically or the value at the addr with the value operand, and assign
/// the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to use in bitwise OR operation with the value at the \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_fetch_or(T *addr, T operand,
sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_or<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_or<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_or<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false && "Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_fetch_or(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_fetch_or<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically xor the value at the addr with the value operand, and assign
/// the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to use in bitwise XOR operation with the value at the \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_fetch_xor(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_xor(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_fetch_xor(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_xor(operand);
}
/// Atomically xor the value at the addr with the value operand, and assign
/// the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to use in bitwise XOR operation with the value at the \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_fetch_xor(T *addr, T operand,
sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_xor<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_xor<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_xor<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false && "Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_fetch_xor(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_fetch_xor<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically calculate the minimum of the value at addr and the value operand
/// and assign the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_fetch_min(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_min(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_fetch_min(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_min(operand);
}
/// Atomically calculate the minimum of the value at addr and the value operand
/// and assign the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_fetch_min(T *addr, T operand,
sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_min<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_min<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_min<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false && "Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_fetch_min(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_fetch_min<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically calculate the maximum of the value at addr and the value operand
/// and assign the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_fetch_max(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_max(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_fetch_max(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_max(operand);
}
/// Atomically calculate the maximum of the value at addr and the value operand
/// and assign the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_fetch_max(T *addr, T operand,
sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_max<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_max<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_max<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false && "Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_fetch_max(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_fetch_max<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically set \p operand to the value stored in \p addr, if old value stored in
/// \p addr is equal to zero or greater than \p operand, else decrease the value stored
/// in \p addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The threshold value.
/// \param memoryOrder The memory ordering used.
/// \returns The old value stored in \p addr.
template <sycl::access::address_space addressSpace = sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline unsigned int atomic_fetch_compare_dec(unsigned int *addr,
unsigned int operand) {
auto atm = sycl::atomic_ref<unsigned int, memoryOrder, memoryScope,
addressSpace>(addr[0]);
unsigned int old;
while (true) {
old = atm.load();
if (old == 0 || old > operand) {
if (atm.compare_exchange_strong(old, operand))
break;
} else if (atm.compare_exchange_strong(old, old - 1))
break;
}
return old;
}
/// Atomically increment the value stored in \p addr if old value stored in \p
/// addr is less than \p operand, else set 0 to the value stored in \p addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The threshold value.
/// \param memoryOrder The memory ordering used.
/// \returns The old value stored in \p addr.
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline unsigned int atomic_fetch_compare_inc(unsigned int *addr,
unsigned int operand) {
auto atm = sycl::atomic_ref<unsigned int, memoryOrder, memoryScope,
addressSpace>(addr[0]);
unsigned int old;
while (true) {
old = atm.load();
if (old >= operand) {
if (atm.compare_exchange_strong(old, 0))
break;
} else if (atm.compare_exchange_strong(old, old + 1))
break;
}
return old;
}
/// Atomically increment the value stored in \p addr if old value stored in \p
/// addr is less than \p operand, else set 0 to the value stored in \p addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The threshold value.
/// \param memoryOrder The memory ordering used.
/// \returns The old value stored in \p addr.
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline unsigned int
atomic_fetch_compare_inc(unsigned int *addr, unsigned int operand,
sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_compare_inc<addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr,
operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_compare_inc<addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr,
operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_compare_inc<addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr,
operand);
default:
assert(false && "Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
/// Atomically exchange the value at the address addr with the value operand.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to be exchanged with the value pointed by \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_exchange(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.exchange(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_exchange(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.exchange(operand);
}
/// Atomically exchange the value at the address addr with the value operand.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to be exchanged with the value pointed by \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_exchange(T *addr, T operand,
sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_exchange<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_exchange<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_exchange<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false && "Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_exchange(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_exchange<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically compare the value at \p addr to the value expected and exchange
/// with the value desired if the value at \p addr is equal to the value expected.
/// Returns the value at the \p addr before the call.
/// \param [in, out] addr Multi_ptr.
/// \param expected The value to compare against the value at \p addr.
/// \param desired The value to assign to \p addr if the value at \p addr is expected.
/// \param success The memory ordering used when comparison succeeds.
/// \param fail The memory ordering used when comparison fails.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
T atomic_compare_exchange_strong(
sycl::multi_ptr<T, addressSpace> addr, T expected, T desired,
sycl::memory_order success = sycl::memory_order::relaxed,
sycl::memory_order fail = sycl::memory_order::relaxed) {
auto atm = sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(*addr);
atm.compare_exchange_strong(expected, desired, success, fail);
return expected;
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2, typename T3>
T1 atomic_compare_exchange_strong(
sycl::multi_ptr<T1, addressSpace> addr, T2 expected, T3 desired,
sycl::memory_order success = sycl::memory_order::relaxed,
sycl::memory_order fail = sycl::memory_order::relaxed) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(*addr);
T1 expected_value = expected;
atm.compare_exchange_strong(expected_value, desired, success, fail);
return expected_value;
}
/// Atomically compare the value at \p addr to the value expected and exchange
/// with the value desired if the value at \p addr is equal to the value expected.
/// Returns the value at the \p addr before the call.
/// \param [in] addr The pointer to the data.
/// \param expected The value to compare against the value at \p addr.
/// \param desired The value to assign to \p addr if the value at \p addr is expected.
/// \param success The memory ordering used when comparison succeeds.
/// \param fail The memory ordering used when comparison fails.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
T atomic_compare_exchange_strong(
T *addr, T expected, T desired,
sycl::memory_order success = sycl::memory_order::relaxed,
sycl::memory_order fail = sycl::memory_order::relaxed) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
atm.compare_exchange_strong(expected, desired, success, fail);
return expected;
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2, typename T3>
T1 atomic_compare_exchange_strong(
T1 *addr, T2 expected, T3 desired,
sycl::memory_order success = sycl::memory_order::relaxed,
sycl::memory_order fail = sycl::memory_order::relaxed) {
T1 expected_value = expected;
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
atm.compare_exchange_strong(expected_value, desired, success, fail);
return expected_value;
}
/// Atomic extension to implement standard APIs in std::atomic
namespace detail{
template <typename T> struct IsValidAtomicType {
static constexpr bool value =
(std::is_same<T, int>::value || std::is_same<T, unsigned int>::value ||
std::is_same<T, long>::value || std::is_same<T, unsigned long>::value ||
std::is_same<T, long long>::value ||
std::is_same<T, unsigned long long>::value ||
std::is_same<T, float>::value || std::is_same<T, double>::value ||
std::is_pointer<T>::value);
};
} // namespace detail
template <typename T,
sycl::memory_scope DefaultScope = sycl::memory_scope::system,
sycl::memory_order DefaultOrder = sycl::memory_order::seq_cst,
sycl::access::address_space Space =
sycl::access::address_space::generic_space>
class atomic{
static_assert(
detail::IsValidAtomicType<T>::value,
"Invalid atomic type. Valid types are int, unsigned int, long, "
"unsigned long, long long, unsigned long long, float, double "
"and pointer types");
T __d;
public:
/// default memory synchronization order
static constexpr sycl::memory_order default_read_order =
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space>::default_read_order;
static constexpr sycl::memory_order default_write_order =
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space>::default_write_order;
static constexpr sycl::memory_scope default_scope = DefaultScope;
static constexpr sycl::memory_order default_read_modify_write_order =
DefaultOrder;
/// Default constructor.
constexpr atomic() noexcept = default;
/// Constructor with initialize value.
constexpr atomic(T d) noexcept : __d(d){};
/// atomically replaces the value of the referenced object with a non-atomic argument
/// \param operand The value to replace the pointed value.
/// \param memoryOrder The memory ordering used.
/// \param memoryScope The memory scope used.
void store(T operand, sycl::memory_order memoryOrder = default_write_order,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
atm.store(operand, memoryOrder, memoryScope);
}
/// atomically obtains the value of the referenced object
/// \param memoryOrder The memory ordering used.
/// \param memoryScope The memory scope used.
/// \returns The value of the referenced object
T load(sycl::memory_order memoryOrder = default_read_order,
sycl::memory_scope memoryScope = default_scope) const noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(
const_cast<T &>(__d));
return atm.load(memoryOrder, memoryScope);
}
/// atomically replaces the value of the referenced object and obtains the value held previously
/// \param operand The value to replace the pointed value.
/// \param memoryOrder The memory ordering used.
/// \param memoryScope The memory scope used.
/// \returns The value of the referenced object before the call.
T exchange(T operand,
sycl::memory_order memoryOrder = default_read_modify_write_order,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
return atm.exchange(operand, memoryOrder, memoryScope);
}
/// atomically compares the value of the referenced object with non-atomic argument
/// and performs atomic exchange if equal or atomic load if not
/// \param expected The value expected to be found in the object referenced by the atomic_ref object
/// \param desired The value to store in the referenced object if it is as expected
/// \param success The memory models for the read-modify-write
/// \param failure The memory models for load operations
/// \param memoryScope The memory scope used.
/// \returns true if the referenced object was successfully changed, false otherwise.
bool compare_exchange_weak(
T &expected, T desired,
sycl::memory_order success, sycl::memory_order failure,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
return atm.compare_exchange_weak(expected, desired, success, failure, memoryScope);
}
/// \param expected The value expected to be found in the object referenced by the atomic_ref object
/// \param desired The value to store in the referenced object if it is as expected
/// \param memoryOrder The memory synchronization ordering for operations
/// \param memoryScope The memory scope used.
/// \returns true if the referenced object was successfully changed, false otherwise.
bool compare_exchange_weak(T &expected, T desired,
sycl::memory_order memoryOrder = default_read_modify_write_order,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
return atm.compare_exchange_weak(expected, desired, memoryOrder, memoryScope);
}
/// atomically compares the value of the referenced object with non-atomic argument
/// and performs atomic exchange if equal or atomic load if not
/// \param expected The value expected to be found in the object referenced by the atomic_ref object
/// \param desired The value to store in the referenced object if it is as expected
/// \param success The memory models for the read-modify-write
/// \param failure The memory models for load operations
/// \param memoryScope The memory scope used.
/// \returns true if the referenced object was successfully changed, false otherwise.
bool compare_exchange_strong(
T &expected, T desired,
sycl::memory_order success, sycl::memory_order failure,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
return atm.compare_exchange_strong(expected, desired, success, failure, memoryScope);
}
/// \param expected The value expected to be found in the object referenced by the atomic_ref object
/// \param desired The value to store in the referenced object if it is as expected
/// \param memoryOrder The memory synchronization ordering for operations
/// \param memoryScope The memory scope used.
/// \returns true if the referenced object was successfully changed, false otherwise.
bool compare_exchange_strong(T &expected, T desired,
sycl::memory_order memoryOrder = default_read_modify_write_order,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
return atm.compare_exchange_strong(expected, desired, memoryOrder, memoryScope);
}
/// atomically adds the argument to the value stored in the atomic object and obtains the value held previously
/// \param operand The other argument of arithmetic addition
/// \param memoryOrder The memory ordering used.
/// \param memoryScope The memory scope used.
/// \returns The value of the referenced object before the call.
T fetch_add(T operand,
sycl::memory_order memoryOrder = default_read_modify_write_order,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
return atm.fetch_add(operand, memoryOrder, memoryScope);
}
/// atomically subtracts the argument from the value stored in the atomic object and obtains the value held previously
/// \param operand The other argument of arithmetic subtraction
/// \param memoryOrder The memory ordering used.
/// \param memoryScope The memory scope used.
/// \returns The value of the referenced object before the call.
T fetch_sub(T operand,
sycl::memory_order memoryOrder = default_read_modify_write_order,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
return atm.fetch_sub(operand, memoryOrder, memoryScope);
}
};
} // namespace dpct
#endif // __DPCT_ATOMIC_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/guided_convolutionSeparable_SYCLmigration/01_dpct_output/include/dpct/rng_utils.hpp | //==---- rng_utils.hpp ----------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_RNG_UTILS_HPP__
#define __DPCT_RNG_UTILS_HPP__
#include <sycl/sycl.hpp>
#include <oneapi/mkl.hpp>
#ifdef __INTEL_MKL__ // The oneMKL Interfaces Project does not support this.
#include <oneapi/mkl/rng/device.hpp>
#endif
#include "device.hpp"
#include "lib_common_utils.hpp"
namespace dpct {
namespace rng {
#ifdef __INTEL_MKL__ // The oneMKL Interfaces Project does not support this.
namespace device {
/// The random number generator on device.
/// \tparam engine_t The device random number generator engine. It can only be
/// oneapi::mkl::rng::device::mrg32k3a<1> or
/// oneapi::mkl::rng::device::mrg32k3a<4> or
/// oneapi::mkl::rng::device::philox4x32x10<1> or
/// oneapi::mkl::rng::device::philox4x32x10<4>.
template <typename engine_t> class rng_generator {
static_assert(
std::disjunction_v<
std::is_same<engine_t, oneapi::mkl::rng::device::mrg32k3a<1>>,
std::is_same<engine_t, oneapi::mkl::rng::device::mrg32k3a<4>>,
std::is_same<engine_t, oneapi::mkl::rng::device::philox4x32x10<1>>,
std::is_same<engine_t, oneapi::mkl::rng::device::philox4x32x10<4>>,
std::is_same<engine_t, oneapi::mkl::rng::device::mcg59<1>>>,
"engine_t can only be oneapi::mkl::rng::device::mrg32k3a<1> or "
"oneapi::mkl::rng::device::mrg32k3a<4> or "
"oneapi::mkl::rng::device::philox4x32x10<1> or "
"oneapi::mkl::rng::device::philox4x32x10<4> or "
"oneapi::mkl::rng::device::mcg59<1>.");
static constexpr bool _is_engine_vec_size_one = std::disjunction_v<
std::is_same<engine_t, oneapi::mkl::rng::device::mrg32k3a<1>>,
std::is_same<engine_t, oneapi::mkl::rng::device::philox4x32x10<1>>,
std::is_same<engine_t, oneapi::mkl::rng::device::mcg59<1>>>;
static constexpr std::uint64_t default_seed = 0;
oneapi::mkl::rng::device::bits<std::uint32_t> _distr_bits;
oneapi::mkl::rng::device::uniform_bits<std::uint32_t> _distr_uniform_bits;
oneapi::mkl::rng::device::gaussian<float> _distr_gaussian_float;
oneapi::mkl::rng::device::gaussian<double> _distr_gaussian_double;
oneapi::mkl::rng::device::lognormal<float> _distr_lognormal_float;
oneapi::mkl::rng::device::lognormal<double> _distr_lognormal_double;
oneapi::mkl::rng::device::poisson<std::uint32_t> _distr_poisson;
oneapi::mkl::rng::device::uniform<float> _distr_uniform_float;
oneapi::mkl::rng::device::uniform<double> _distr_uniform_double;
engine_t _engine;
public:
/// Default constructor of rng_generator
rng_generator() { _engine = engine_t(default_seed); }
/// Constructor of rng_generator if engine type is not mcg59
/// \param [in] seed The seed to initialize the engine state.
/// \param [in] num_to_skip Set the number of elements need to be skipped.
/// The number is calculated as: num_to_skip[0] + num_to_skip[1] * 2^64 +
/// num_to_skip[2] * 2^128 + ... + num_to_skip[n-1] * 2^(64*(n-1))
template <typename T = engine_t,
typename std::enable_if<!std::is_same_v<
T, oneapi::mkl::rng::device::mcg59<1>>>::type * = nullptr>
rng_generator(std::uint64_t seed,
std::initializer_list<std::uint64_t> num_to_skip) {
_engine = engine_t(seed, num_to_skip);
}
/// Constructor of rng_generator if engine type is mcg59
/// \param [in] seed The seed to initialize the engine state.
/// \param [in] num_to_skip Set the number of elements need to be skipped.
template <typename T = engine_t,
typename std::enable_if<std::is_same_v<
T, oneapi::mkl::rng::device::mcg59<1>>>::type * = nullptr>
rng_generator(std::uint64_t seed, std::uint64_t num_to_skip) {
_engine = engine_t(seed, num_to_skip);
}
/// Generate random number(s) obeys distribution \tparam distr_t.
/// \tparam T The distribution of the random number. It can only be
/// oneapi::mkl::rng::device::bits<std::uint32_t>,
/// oneapi::mkl::rng::device::uniform_bits<std::uint32_t>,
/// oneapi::mkl::rng::device::gaussian<float>,
/// oneapi::mkl::rng::device::gaussian<double>,
/// oneapi::mkl::rng::device::lognormal<float>,
/// oneapi::mkl::rng::device::lognormal<double>,
/// oneapi::mkl::rng::device::poisson<std::uint32_t>,
/// oneapi::mkl::rng::device::uniform<float> or
/// oneapi::mkl::rng::device::uniform<double>
/// \tparam vec_size The length of the return vector. It can only be 1, 2
/// or 4.
/// \param distr_params The parameter(s) for lognormal or poisson
/// distribution.
/// \return The vector of the random number(s).
template <typename distr_t, int vec_size, class... distr_params_t>
auto generate(distr_params_t... distr_params) {
static_assert(vec_size == 1 || vec_size == 2 || vec_size == 4,
"vec_size is not supported.");
static_assert(
std::disjunction_v<
std::is_same<distr_t,
oneapi::mkl::rng::device::bits<std::uint32_t>>,
std::is_same<distr_t,
oneapi::mkl::rng::device::uniform_bits<std::uint32_t>>,
std::is_same<distr_t, oneapi::mkl::rng::device::gaussian<float>>,
std::is_same<distr_t, oneapi::mkl::rng::device::gaussian<double>>,
std::is_same<distr_t, oneapi::mkl::rng::device::lognormal<float>>,
std::is_same<distr_t, oneapi::mkl::rng::device::lognormal<double>>,
std::is_same<distr_t,
oneapi::mkl::rng::device::poisson<std::uint32_t>>,
std::is_same<distr_t, oneapi::mkl::rng::device::uniform<float>>,
std::is_same<distr_t, oneapi::mkl::rng::device::uniform<double>>>,
"distribution is not supported.");
if constexpr (std::is_same_v<
distr_t, oneapi::mkl::rng::device::bits<std::uint32_t>>) {
return generate_vec<vec_size>(_distr_bits);
}
if constexpr (std::is_same_v<
distr_t,
oneapi::mkl::rng::device::uniform_bits<std::uint32_t>>) {
return generate_vec<vec_size>(_distr_uniform_bits);
}
if constexpr (std::is_same_v<distr_t,
oneapi::mkl::rng::device::gaussian<float>>) {
return generate_vec<vec_size>(_distr_gaussian_float);
}
if constexpr (std::is_same_v<distr_t,
oneapi::mkl::rng::device::gaussian<double>>) {
return generate_vec<vec_size>(_distr_gaussian_double);
}
if constexpr (std::is_same_v<distr_t,
oneapi::mkl::rng::device::lognormal<float>>) {
return generate_vec<vec_size>(_distr_lognormal_float, distr_params...,
0.0f, 1.0f);
}
if constexpr (std::is_same_v<distr_t,
oneapi::mkl::rng::device::lognormal<double>>) {
return generate_vec<vec_size>(_distr_lognormal_double, distr_params...,
0.0, 1.0);
}
if constexpr (std::is_same_v<distr_t, oneapi::mkl::rng::device::poisson<
std::uint32_t>>) {
return generate_vec<vec_size>(_distr_poisson, distr_params...);
}
if constexpr (std::is_same_v<distr_t,
oneapi::mkl::rng::device::uniform<float>>) {
return generate_vec<vec_size>(_distr_uniform_float);
}
if constexpr (std::is_same_v<distr_t,
oneapi::mkl::rng::device::uniform<double>>) {
return generate_vec<vec_size>(_distr_uniform_double);
}
}
/// Get the random number generator engine.
/// \return The reference of the internal random number generator engine.
engine_t &get_engine() { return _engine; }
private:
template <int vec_size, typename distr_t, class... distr_params_t>
auto generate_vec(distr_t &distr, distr_params_t... distr_params) {
if constexpr (sizeof...(distr_params_t)) {
typename distr_t::param_type pt(distr_params...);
distr.param(pt);
}
if constexpr (vec_size == 4) {
if constexpr (_is_engine_vec_size_one) {
sycl::vec<typename distr_t::result_type, 4> res;
res.x() = oneapi::mkl::rng::device::generate(distr, _engine);
res.y() = oneapi::mkl::rng::device::generate(distr, _engine);
res.z() = oneapi::mkl::rng::device::generate(distr, _engine);
res.w() = oneapi::mkl::rng::device::generate(distr, _engine);
return res;
} else {
return oneapi::mkl::rng::device::generate(distr, _engine);
}
} else if constexpr (vec_size == 1) {
if constexpr (_is_engine_vec_size_one) {
return oneapi::mkl::rng::device::generate(distr, _engine);
} else {
return oneapi::mkl::rng::device::generate_single(distr, _engine);
}
} else if constexpr (vec_size == 2) {
if constexpr (_is_engine_vec_size_one) {
sycl::vec<typename distr_t::result_type, 2> res;
res.x() = oneapi::mkl::rng::device::generate(distr, _engine);
res.y() = oneapi::mkl::rng::device::generate(distr, _engine);
return res;
} else {
sycl::vec<typename distr_t::result_type, 2> res;
res.x() = oneapi::mkl::rng::device::generate_single(distr, _engine);
res.y() = oneapi::mkl::rng::device::generate_single(distr, _engine);
return res;
}
}
}
};
} // namespace device
#endif
namespace host {
namespace detail {
class rng_generator_base {
public:
/// Set the seed of host rng_generator.
/// \param seed The engine seed.
virtual void set_seed(const std::uint64_t seed) = 0;
/// Set the dimensions of host rng_generator.
/// \param dimensions The engine dimensions.
virtual void set_dimensions(const std::uint32_t dimensions) = 0;
/// Set the queue of host rng_generator.
/// \param queue The engine queue.
virtual void set_queue(sycl::queue *queue) = 0;
/// Generate unsigned int random number(s) with 'uniform_bits' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
virtual inline void generate_uniform_bits(unsigned int *output,
std::int64_t n) = 0;
/// Generate unsigned long long random number(s) with 'uniform_bits'
/// distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
virtual inline void generate_uniform_bits(unsigned long long *output,
std::int64_t n) = 0;
/// Generate float random number(s) with 'lognormal' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param m Mean of associated normal distribution
/// \param s Standard deviation of associated normal distribution.
virtual inline void generate_lognormal(float *output, std::int64_t n, float m,
float s) = 0;
/// Generate double random number(s) with 'lognormal' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param m Mean of associated normal distribution
/// \param s Standard deviation of associated normal distribution.
virtual inline void generate_lognormal(double *output, std::int64_t n,
double m, double s) = 0;
/// Generate float random number(s) with 'gaussian' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param mean Mean of normal distribution
/// \param stddev Standard deviation of normal distribution.
virtual inline void generate_gaussian(float *output, std::int64_t n,
float mean, float stddev) = 0;
/// Generate double random number(s) with 'gaussian' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param mean Mean of normal distribution
/// \param stddev Standard deviation of normal distribution.
virtual inline void generate_gaussian(double *output, std::int64_t n,
double mean, double stddev) = 0;
/// Generate unsigned int random number(s) with 'poisson' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param lambda Lambda for the Poisson distribution.
virtual inline void generate_poisson(unsigned int *output, std::int64_t n,
double lambda) = 0;
/// Generate float random number(s) with 'uniform' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
virtual inline void generate_uniform(float *output, std::int64_t n) = 0;
/// Generate double random number(s) with 'uniform' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
virtual inline void generate_uniform(double *output, std::int64_t n) = 0;
/// Skip ahead several random number(s).
/// \param num_to_skip The number of random numbers to be skipped.
virtual void skip_ahead(const std::uint64_t num_to_skip) = 0;
protected:
sycl::queue *_queue{&dpct::get_default_queue()};
std::uint64_t _seed{0};
std::uint32_t _dimensions{1};
};
/// The random number generator on host.
template <typename engine_t = oneapi::mkl::rng::philox4x32x10>
class rng_generator : public rng_generator_base {
public:
/// Constructor of rng_generator.
rng_generator() : _engine(create_engine(_queue, _seed, _dimensions)) {}
/// Set the seed of host rng_generator.
/// \param seed The engine seed.
void set_seed(const std::uint64_t seed) {
if (seed == _seed) {
return;
}
_seed = seed;
_engine = create_engine(_queue, _seed, _dimensions);
}
/// Set the dimensions of host rng_generator.
/// \param dimensions The engine dimensions.
void set_dimensions(const std::uint32_t dimensions) {
if (dimensions == _dimensions) {
return;
}
_dimensions = dimensions;
_engine = create_engine(_queue, _seed, _dimensions);
}
/// Set the queue of host rng_generator.
/// \param queue The engine queue.
void set_queue(sycl::queue *queue) {
if (queue == _queue) {
return;
}
_queue = queue;
_engine = create_engine(_queue, _seed, _dimensions);
}
/// Generate unsigned int random number(s) with 'uniform_bits' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
inline void generate_uniform_bits(unsigned int *output, std::int64_t n) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) "
"Interfaces Project does not support this API.");
#else
static_assert(sizeof(unsigned int) == sizeof(std::uint32_t));
generate<oneapi::mkl::rng::uniform_bits<std::uint32_t>>(
(std::uint32_t *)output, n);
#endif
}
/// Generate unsigned long long random number(s) with 'uniform_bits'
/// distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
inline void generate_uniform_bits(unsigned long long *output,
std::int64_t n) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) "
"Interfaces Project does not support this API.");
#else
static_assert(sizeof(unsigned long long) == sizeof(std::uint64_t));
generate<oneapi::mkl::rng::uniform_bits<std::uint64_t>>(
(std::uint64_t *)output, n);
#endif
}
/// Generate float random number(s) with 'lognormal' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param m Mean of associated normal distribution
/// \param s Standard deviation of associated normal distribution.
inline void generate_lognormal(float *output, std::int64_t n, float m,
float s) {
generate<oneapi::mkl::rng::lognormal<float>>(output, n, m, s);
}
/// Generate double random number(s) with 'lognormal' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param m Mean of associated normal distribution
/// \param s Standard deviation of associated normal distribution.
inline void generate_lognormal(double *output, std::int64_t n, double m,
double s) {
generate<oneapi::mkl::rng::lognormal<double>>(output, n, m, s);
}
/// Generate float random number(s) with 'gaussian' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param mean Mean of normal distribution
/// \param stddev Standard deviation of normal distribution.
inline void generate_gaussian(float *output, std::int64_t n, float mean,
float stddev) {
generate<oneapi::mkl::rng::gaussian<float>>(output, n, mean, stddev);
}
/// Generate double random number(s) with 'gaussian' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param mean Mean of normal distribution
/// \param stddev Standard deviation of normal distribution.
inline void generate_gaussian(double *output, std::int64_t n, double mean,
double stddev) {
generate<oneapi::mkl::rng::gaussian<double>>(output, n, mean, stddev);
}
/// Generate unsigned int random number(s) with 'poisson' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param lambda Lambda for the Poisson distribution.
inline void generate_poisson(unsigned int *output, std::int64_t n,
double lambda) {
generate<oneapi::mkl::rng::poisson<unsigned int>>(output, n, lambda);
}
/// Generate float random number(s) with 'uniform' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
inline void generate_uniform(float *output, std::int64_t n) {
generate<oneapi::mkl::rng::uniform<float>>(output, n);
}
/// Generate double random number(s) with 'uniform' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
inline void generate_uniform(double *output, std::int64_t n) {
generate<oneapi::mkl::rng::uniform<double>>(output, n);
}
/// Skip ahead several random number(s).
/// \param num_to_skip The number of random numbers to be skipped.
void skip_ahead(const std::uint64_t num_to_skip) {
#ifndef __INTEL_MKL__
oneapi::mkl::rng::skip_ahead(_engine, num_to_skip);
#else
if constexpr (std::is_same_v<engine_t, oneapi::mkl::rng::mt2203>)
throw std::runtime_error("no skip_ahead method of mt2203 engine.");
else
oneapi::mkl::rng::skip_ahead(_engine, num_to_skip);
#endif
}
private:
static inline engine_t create_engine(sycl::queue *queue,
const std::uint64_t seed,
const std::uint32_t dimensions) {
#ifdef __INTEL_MKL__
return std::is_same_v<engine_t, oneapi::mkl::rng::sobol>
? engine_t(*queue, dimensions)
: engine_t(*queue, seed);
#else
return engine_t(*queue, seed);
#endif
}
template <typename distr_t, typename buffer_t, class... distr_params_t>
void generate(buffer_t *output, const std::int64_t n,
const distr_params_t... distr_params) {
auto output_buf = dpct::detail::get_memory(output);
oneapi::mkl::rng::generate(distr_t(distr_params...), _engine, n,
output_buf);
}
engine_t _engine{};
};
} // namespace detail
} // namespace host
enum class random_engine_type {
philox4x32x10,
mrg32k3a,
mt2203,
mt19937,
sobol,
mcg59
};
typedef std::shared_ptr<rng::host::detail::rng_generator_base> host_rng_ptr;
/// Create a host random number generator.
/// \param type The random engine type.
/// \return The pointer of random number generator.
inline host_rng_ptr create_host_rng(const random_engine_type type) {
switch (type) {
case random_engine_type::philox4x32x10:
return std::make_shared<
rng::host::detail::rng_generator<oneapi::mkl::rng::philox4x32x10>>();
case random_engine_type::mrg32k3a:
return std::make_shared<
rng::host::detail::rng_generator<oneapi::mkl::rng::mrg32k3a>>();
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) "
"Interfaces Project does not support this API.");
#else
case random_engine_type::mt2203:
return std::make_shared<
rng::host::detail::rng_generator<oneapi::mkl::rng::mt2203>>();
case random_engine_type::mt19937:
return std::make_shared<
rng::host::detail::rng_generator<oneapi::mkl::rng::mt19937>>();
case random_engine_type::sobol:
return std::make_shared<
rng::host::detail::rng_generator<oneapi::mkl::rng::sobol>>();
case random_engine_type::mcg59:
return std::make_shared<
rng::host::detail::rng_generator<oneapi::mkl::rng::mcg59>>();
#endif
}
}
} // namespace rng
} // namespace dpct
#endif // __DPCT_RNG_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/guided_convolutionSeparable_SYCLmigration/01_dpct_output/include/dpct/dpl_extras/numeric.h | //==---- numeric.h --------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_NUMERIC_H__
#define __DPCT_NUMERIC_H__
namespace dpct {
template <typename Policy, typename InputIt1, typename InputIt2, typename T>
T inner_product(Policy &&policy, InputIt1 first1, InputIt1 last1,
InputIt2 first2, T init) {
return std::transform_reduce(std::forward<Policy>(policy), first1, last1,
first2, init);
}
template <typename Policy, typename InputIt1, typename InputIt2, typename T,
typename BinaryOperation1, typename BinaryOperation2>
T inner_product(Policy &&policy, InputIt1 first1, InputIt1 last1,
InputIt2 first2, T init, BinaryOperation1 op1,
BinaryOperation2 op2) {
return std::transform_reduce(std::forward<Policy>(policy), first1, last1,
first2, init, op1, op2);
}
} // end namespace dpct
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/guided_convolutionSeparable_SYCLmigration/01_dpct_output/include/dpct/dpl_extras/iterators.h | //==---- iterators.h ------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_ITERATORS_H__
#define __DPCT_ITERATORS_H__
#include <oneapi/dpl/iterator>
#include "functional.h"
namespace dpct {
namespace internal {
// Wrapper class returned from a dereferenced transform_iterator which was
// created using
// make_transform_output_iterator(). Used to apply the supplied transform
// function when writing into an object of this class.
//
// Example:
// int a[] = {0, 1, 2, 3, 4};
// int* p = a;
// auto f = [](auto v) {return v*v;};
// auto tr_out = dpct::make_transform_output_iterator(p+1, f);
// auto wrap = *tr_out; // wrap is a transform_output_ref_wrapper
// std::cout<<*(p+1)<<std::endl; // '1'
// wrap = 2; // apply function, store 2*2=4
// std::cout<<*(p+1)<<std::endl; // '4'
template <typename T, typename _UnaryFunc> class transform_output_ref_wrapper {
private:
T __my_reference_;
_UnaryFunc __my_unary_func_;
public:
template <typename U>
transform_output_ref_wrapper(U &&__reference, _UnaryFunc __unary_func)
: __my_reference_(std::forward<U>(__reference)),
__my_unary_func_(__unary_func) {}
// When writing to an object of this type, apply the supplied unary function,
// then write to the wrapped reference
template <typename UnaryInputType>
transform_output_ref_wrapper &operator=(const UnaryInputType &e) {
__my_reference_ = __my_unary_func_(e);
return *this;
}
};
// Unary functor to create a transform_output_reference_wrapper when a
// transform_iterator is dereferenced, so that a
// the supplied unary function may be applied on write, resulting in a
// transform_output_iterator
template <typename _UnaryFunc> struct _Unary_Out {
_Unary_Out(_UnaryFunc __f_) : __f(__f_) {}
_UnaryFunc __f;
template <typename T> auto operator()(T &&val) const {
return transform_output_ref_wrapper<T, _UnaryFunc>(std::forward<T>(val),
__f);
}
};
} // end namespace internal
using std::advance;
using std::distance;
template <typename T>
oneapi::dpl::counting_iterator<T> make_counting_iterator(const T &input) {
return oneapi::dpl::counting_iterator<T>(input);
}
template <typename _Tp> class constant_iterator {
public:
typedef std::false_type is_hetero;
typedef std::true_type is_passed_directly;
typedef std::ptrdiff_t difference_type;
typedef _Tp value_type;
typedef _Tp *pointer;
// There is no storage behind the iterator, so we return a value instead of
// reference.
typedef const _Tp reference;
typedef const _Tp const_reference;
typedef std::random_access_iterator_tag iterator_category;
explicit constant_iterator(_Tp __init)
: __my_value_(__init), __my_counter_(0) {}
private:
// used to construct iterator instances with different counter values required
// by arithmetic operators
constant_iterator(const _Tp &__value, const difference_type &__offset)
: __my_value_(__value), __my_counter_(__offset) {}
public:
// non-const variants of access operators are not provided so unintended
// writes are caught at compile time.
const_reference operator*() const { return __my_value_; }
const_reference operator[](difference_type) const { return __my_value_; }
difference_type operator-(const constant_iterator &__it) const {
return __my_counter_ - __it.__my_counter_;
}
constant_iterator &operator+=(difference_type __forward) {
__my_counter_ += __forward;
return *this;
}
constant_iterator &operator-=(difference_type __backward) {
return *this += -__backward;
}
constant_iterator &operator++() { return *this += 1; }
constant_iterator &operator--() { return *this -= 1; }
constant_iterator operator++(int) {
constant_iterator __it(*this);
++(*this);
return __it;
}
constant_iterator operator--(int) {
constant_iterator __it(*this);
--(*this);
return __it;
}
constant_iterator operator-(difference_type __backward) const {
return constant_iterator(__my_value_, __my_counter_ - __backward);
}
constant_iterator operator+(difference_type __forward) const {
return constant_iterator(__my_value_, __my_counter_ + __forward);
}
friend constant_iterator operator+(difference_type __forward,
const constant_iterator __it) {
return __it + __forward;
}
bool operator==(const constant_iterator &__it) const {
return __my_value_ == __it.__my_value_ &&
this->__my_counter_ == __it.__my_counter_;
}
bool operator!=(const constant_iterator &__it) const {
return !(*this == __it);
}
bool operator<(const constant_iterator &__it) const {
return *this - __it < 0;
}
bool operator>(const constant_iterator &__it) const { return __it < *this; }
bool operator<=(const constant_iterator &__it) const {
return !(*this > __it);
}
bool operator>=(const constant_iterator &__it) const {
return !(*this < __it);
}
private:
_Tp __my_value_;
uint64_t __my_counter_;
};
template <typename _Tp>
constant_iterator<_Tp> make_constant_iterator(_Tp __value) {
return constant_iterator<_Tp>(__value);
}
// key_value_pair class to represent a key and value, specifically a
// dereferenced arg_index_input_iterator
template <typename _KeyTp, typename _ValueTp> class key_value_pair {
public:
key_value_pair() = default;
key_value_pair(const _KeyTp &_key, const _ValueTp &_value)
: key(_key), value(_value) {}
bool operator==(const key_value_pair<_KeyTp, _ValueTp> &_kvp) const {
return (key == _kvp.key) && (value == _kvp.value);
}
bool operator!=(const key_value_pair<_KeyTp, _ValueTp> &_kvp) const {
return (key != _kvp.key) || (value != _kvp.value);
}
_KeyTp key;
_ValueTp value;
};
namespace detail {
template <typename KeyTp, typename _ValueTp> struct make_key_value_pair {
template <typename ValRefTp>
key_value_pair<KeyTp, _ValueTp>
operator()(const oneapi::dpl::__internal::tuple<KeyTp, ValRefTp> &tup) const {
return ::dpct::key_value_pair<KeyTp, _ValueTp>(::std::get<0>(tup),
::std::get<1>(tup));
}
};
template <class T> struct __zip_iterator_impl;
template <class... Ts> struct __zip_iterator_impl<std::tuple<Ts...>> {
using type = oneapi::dpl::zip_iterator<Ts...>;
};
} // end namespace detail
// dpct::zip_iterator can only accept std::tuple type as template argument for
// compatibility purpose. Please use oneapi::dpl::zip_iterator if you want to
// pass iterator's types directly.
template <typename... Ts>
using zip_iterator = typename detail::__zip_iterator_impl<Ts...>::type;
// arg_index_input_iterator is an iterator over a input iterator, with a index.
// When dereferenced, it returns a key_value_pair, which can be interrogated for
// the index key or the value from the input iterator
template <typename InputIteratorT, typename OffsetT = ptrdiff_t,
typename OutputValueT =
typename ::std::iterator_traits<InputIteratorT>::value_type>
class arg_index_input_iterator
: public oneapi::dpl::transform_iterator<
oneapi::dpl::zip_iterator<oneapi::dpl::counting_iterator<OffsetT>,
InputIteratorT>,
detail::make_key_value_pair<OffsetT, OutputValueT>> {
using arg_index_input_iterator_wrap = oneapi::dpl::transform_iterator<
oneapi::dpl::zip_iterator<oneapi::dpl::counting_iterator<OffsetT>,
InputIteratorT>,
detail::make_key_value_pair<OffsetT, OutputValueT>>;
public:
typedef OffsetT difference_type;
// signal to __get_sycl_range that this iterator is as a direct pass iterator
using is_zip = ::std::true_type;
arg_index_input_iterator(const arg_index_input_iterator_wrap &__arg_wrap)
: arg_index_input_iterator_wrap(__arg_wrap) {}
arg_index_input_iterator(InputIteratorT __iter)
: arg_index_input_iterator_wrap(
oneapi::dpl::make_zip_iterator(
oneapi::dpl::counting_iterator(OffsetT(0)), __iter),
detail::make_key_value_pair<OffsetT, OutputValueT>()) {}
arg_index_input_iterator &operator=(const arg_index_input_iterator &__input) {
arg_index_input_iterator_wrap::operator=(__input);
return *this;
}
arg_index_input_iterator &operator++() {
arg_index_input_iterator_wrap::operator++();
return *this;
}
arg_index_input_iterator &operator--() {
arg_index_input_iterator_wrap::operator--();
return *this;
}
arg_index_input_iterator operator++(int) {
arg_index_input_iterator __it(*this);
++(*this);
return __it;
}
arg_index_input_iterator operator--(int) {
arg_index_input_iterator __it(*this);
--(*this);
return __it;
}
arg_index_input_iterator operator+(difference_type __forward) const {
return arg_index_input_iterator(
arg_index_input_iterator_wrap::operator+(__forward));
}
arg_index_input_iterator operator-(difference_type __backward) const {
return arg_index_input_iterator(
arg_index_input_iterator_wrap::operator-(__backward));
}
arg_index_input_iterator &operator+=(difference_type __forward) {
arg_index_input_iterator_wrap::operator+=(__forward);
return *this;
}
arg_index_input_iterator &operator-=(difference_type __backward) {
arg_index_input_iterator_wrap::operator-=(__backward);
return *this;
}
friend arg_index_input_iterator
operator+(difference_type __forward, const arg_index_input_iterator &__it) {
return __it + __forward;
}
difference_type operator-(const arg_index_input_iterator &__it) const {
return arg_index_input_iterator_wrap::operator-(__it);
}
bool operator==(const arg_index_input_iterator &__it) const {
return arg_index_input_iterator_wrap::operator==(__it);
}
bool operator!=(const arg_index_input_iterator &__it) const {
return !(*this == __it);
}
bool operator<(const arg_index_input_iterator &__it) const {
return *this - __it < 0;
}
bool operator>(const arg_index_input_iterator &__it) const {
return __it < *this;
}
bool operator<=(const arg_index_input_iterator &__it) const {
return !(*this > __it);
}
bool operator>=(const arg_index_input_iterator &__it) const {
return !(*this < __it);
}
// returns an arg_index_input_iterator with the same iter position, but a
// count reset to 0
arg_index_input_iterator create_normalized() {
return arg_index_input_iterator(
::std::get<1>(arg_index_input_iterator_wrap::base().base()));
}
};
template <typename IterT> struct io_iterator_pair {
inline io_iterator_pair() : selector(false) {}
inline io_iterator_pair(const IterT &first, const IterT &second)
: selector(false) {
iter[0] = first;
iter[1] = second;
}
inline IterT first() const { return selector ? iter[1] : iter[0]; }
inline IterT second() const { return selector ? iter[0] : iter[1]; }
inline void swap() { selector = !selector; }
bool selector;
IterT iter[2];
};
template <typename _Iter, typename _UnaryFunc>
auto make_transform_output_iterator(_Iter __it, _UnaryFunc __unary_func) {
return oneapi::dpl::transform_iterator(
__it, internal::_Unary_Out<_UnaryFunc>(__unary_func));
}
} // end namespace dpct
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/guided_convolutionSeparable_SYCLmigration/01_dpct_output/include/dpct/dpl_extras/algorithm.h | //==---- algorithm.h ------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_ALGORITHM_H__
#define __DPCT_ALGORITHM_H__
#include <oneapi/dpl/execution>
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/numeric>
#include "functional.h"
#include "iterators.h"
#include "vector.h"
namespace dpct {
template <typename Policy, typename Iter1, typename Iter2, typename Pred,
typename T>
void replace_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p,
const T &new_value) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
std::transform(std::forward<Policy>(policy), first, last, mask, first,
internal::replace_if_fun<typename std::iterator_traits<Iter1>::value_type, Pred>(p, new_value));
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3,
typename Pred, typename T>
Iter3 replace_copy_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask,
Iter3 result, Pred p, const T &new_value) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
return std::transform(std::forward<Policy>(policy), first, last, mask, result,
internal::replace_if_fun<typename std::iterator_traits<Iter3>::value_type, Pred>(p, new_value));
}
template <typename Policy, typename Iter1, typename Iter2, typename Pred>
internal::enable_if_hetero_execution_policy<Policy, Iter1>
remove_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using oneapi::dpl::make_zip_iterator;
using policy_type = typename std::decay<Policy>::type;
using internal::__buffer;
using ValueType = typename std::iterator_traits<Iter1>::value_type;
__buffer<ValueType> _tmp(std::distance(first, last));
auto end = std::copy_if(
std::forward<Policy>(policy), make_zip_iterator(first, mask),
make_zip_iterator(last, mask + std::distance(first, last)),
make_zip_iterator(_tmp.get(), oneapi::dpl::discard_iterator()),
internal::negate_predicate_key_fun<Pred>(p));
return std::copy(std::forward<Policy>(policy), _tmp.get(),
std::get<0>(end.base()), first);
}
template <typename Policy, typename Iter1, typename Iter2, typename Pred>
typename std::enable_if<!internal::is_hetero_execution_policy<
typename std::decay<Policy>::type>::value,
Iter1>::type
remove_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using oneapi::dpl::make_zip_iterator;
using policy_type = typename std::decay<Policy>::type;
using ValueType = typename std::iterator_traits<Iter1>::value_type;
std::vector<ValueType> _tmp(std::distance(first, last));
auto end = std::copy_if(
policy, make_zip_iterator(first, mask),
make_zip_iterator(last, mask + std::distance(first, last)),
make_zip_iterator(_tmp.begin(), oneapi::dpl::discard_iterator()),
internal::negate_predicate_key_fun<Pred>(p));
return std::copy(policy, _tmp.begin(), std::get<0>(end.base()), first);
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3,
typename Pred>
Iter3 remove_copy_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask,
Iter3 result, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using oneapi::dpl::make_zip_iterator;
auto ret_val = std::remove_copy_if(
std::forward<Policy>(policy), make_zip_iterator(first, mask),
make_zip_iterator(last, mask + std::distance(first, last)),
make_zip_iterator(result, oneapi::dpl::discard_iterator()),
internal::predicate_key_fun<Pred>(p));
return std::get<0>(ret_val.base());
}
template <class Policy, class Iter1, class Iter2, class BinaryPred>
std::pair<Iter1, Iter2> unique(Policy &&policy, Iter1 keys_first,
Iter1 keys_last, Iter2 values_first,
BinaryPred binary_pred) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::unique(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first, values_first),
oneapi::dpl::make_zip_iterator(
keys_last, values_first + std::distance(keys_first, keys_last)),
internal::compare_key_fun<BinaryPred>(binary_pred));
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_first, values_first), ret_val);
return std::make_pair(keys_first + n1, values_first + n1);
}
template <class Policy, class Iter1, class Iter2>
std::pair<Iter1, Iter2> unique(Policy &&policy, Iter1 keys_first,
Iter1 keys_last, Iter2 values_first) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using T = typename std::iterator_traits<Iter1>::value_type;
return unique(std::forward<Policy>(policy), keys_first, keys_last,
values_first, std::equal_to<T>());
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class BinaryPred>
std::pair<Iter3, Iter4> unique_copy(Policy &&policy, Iter1 keys_first,
Iter1 keys_last, Iter2 values_first,
Iter3 keys_result, Iter4 values_result,
BinaryPred binary_pred) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::unique_copy(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first, values_first),
oneapi::dpl::make_zip_iterator(
keys_last, values_first + std::distance(keys_first, keys_last)),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::unique_fun<BinaryPred>(binary_pred));
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4>
std::pair<Iter3, Iter4> unique_copy(Policy &&policy, Iter1 keys_first,
Iter1 keys_last, Iter2 values_first,
Iter3 keys_result, Iter4 values_result) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using T = typename std::iterator_traits<Iter1>::value_type;
auto comp = std::equal_to<T>();
return unique_copy(std::forward<Policy>(policy), keys_first, keys_last,
values_first, keys_result, values_result, comp);
}
template <typename Policy, typename Iter, typename Pred>
Iter partition_point(Policy &&policy, Iter first, Iter last, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
if (std::is_partitioned(std::forward<Policy>(policy), first, last, p))
return std::find_if_not(std::forward<Policy>(policy), first, last, p);
else
return first;
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3,
typename Pred>
Iter3 copy_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask,
Iter3 result, Pred pred) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::copy_if(
std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(first, mask),
oneapi::dpl::make_zip_iterator(last, mask + std::distance(first, last)),
oneapi::dpl::make_zip_iterator(result, oneapi::dpl::discard_iterator()),
internal::predicate_key_fun<Pred>(pred));
return std::get<0>(ret_val.base());
}
template <class Policy, class Iter1, class Iter2, class UnaryOperation,
class Pred>
Iter2 transform_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 result,
UnaryOperation unary_op, Pred pred) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using T = typename std::iterator_traits<Iter1>::value_type;
const auto n = std::distance(first, last);
std::for_each(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(first, result),
oneapi::dpl::make_zip_iterator(first, result) + n,
internal::transform_if_fun<T, Pred, UnaryOperation>(pred, unary_op));
return result + n;
}
template <class Policy, class Iter1, class Iter2, class Iter3,
class UnaryOperation, class Pred>
Iter3 transform_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask,
Iter3 result, UnaryOperation unary_op, Pred pred) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using T = typename std::iterator_traits<Iter1>::value_type;
using Ref1 = typename std::iterator_traits<Iter1>::reference;
using Ref2 = typename std::iterator_traits<Iter2>::reference;
const auto n = std::distance(first, last);
std::for_each(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(first, mask, result),
oneapi::dpl::make_zip_iterator(first, mask, result) + n,
internal::transform_if_unary_zip_mask_fun<T, Pred, UnaryOperation>(
pred, unary_op));
return result + n;
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class BinaryOperation, class Pred>
Iter4 transform_if(Policy &&policy, Iter1 first1, Iter1 last1, Iter2 first2,
Iter3 mask, Iter4 result, BinaryOperation binary_op,
Pred pred) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
const auto n = std::distance(first1, last1);
using ZipIterator =
typename oneapi::dpl::zip_iterator<Iter1, Iter2, Iter3, Iter4>;
using T = typename std::iterator_traits<ZipIterator>::value_type;
std::for_each(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(first1, first2, mask, result),
oneapi::dpl::make_zip_iterator(last1, first2 + n, mask + n, result + n),
internal::transform_if_zip_mask_fun<T, Pred, BinaryOperation>(pred,
binary_op));
return result + n;
}
template <typename Policy, typename InputIter1, typename InputIter2,
typename OutputIter>
void scatter(Policy &&policy, InputIter1 first, InputIter1 last, InputIter2 map,
OutputIter result) {
static_assert(
std::is_same<typename std::iterator_traits<InputIter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<InputIter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<OutputIter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
oneapi::dpl::copy(policy, first, last,
oneapi::dpl::make_permutation_iterator(result, map));
}
template <typename Policy, typename InputIter1, typename InputIter2,
typename OutputIter>
OutputIter gather(Policy &&policy, InputIter1 map_first, InputIter1 map_last,
InputIter2 input_first, OutputIter result) {
static_assert(
std::is_same<typename std::iterator_traits<InputIter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<InputIter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<OutputIter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto perm_begin =
oneapi::dpl::make_permutation_iterator(input_first, map_first);
const int n = ::std::distance(map_first, map_last);
return oneapi::dpl::copy(policy, perm_begin, perm_begin + n, result);
}
template <typename Policy, typename InputIter1, typename InputIter2,
typename InputIter3, typename OutputIter, typename Predicate>
void scatter_if(Policy &&policy, InputIter1 first, InputIter1 last,
InputIter2 map, InputIter3 mask, OutputIter result,
Predicate pred) {
static_assert(
std::is_same<typename std::iterator_traits<InputIter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<InputIter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<InputIter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<OutputIter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
transform_if(policy, first, last, mask,
oneapi::dpl::make_permutation_iterator(result, map),
[=](auto &&v) { return v; }, [=](auto &&m) { return pred(m); });
}
template <typename Policy, typename InputIter1, typename InputIter2,
typename InputIter3, typename OutputIter, typename Predicate>
OutputIter gather_if(Policy &&policy, InputIter1 map_first, InputIter1 map_last,
InputIter2 mask, InputIter3 input_first, OutputIter result,
Predicate pred) {
static_assert(
std::is_same<typename std::iterator_traits<InputIter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<InputIter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<InputIter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<OutputIter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto perm_begin =
oneapi::dpl::make_permutation_iterator(input_first, map_first);
const int n = std::distance(map_first, map_last);
return transform_if(policy, perm_begin, perm_begin + n, mask, result,
[=](auto &&v) { return v; },
[=](auto &&m) { return pred(m); });
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3,
typename Iter4, typename Iter5, typename Iter6>
std::pair<Iter5, Iter6>
merge(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1, Iter2 keys_first2,
Iter2 keys_last2, Iter3 values_first1, Iter4 values_first2,
Iter5 keys_result, Iter6 values_result) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto n1 = std::distance(keys_first1, keys_last1);
auto n2 = std::distance(keys_first2, keys_last2);
std::merge(std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(keys_last1, values_first1 + n1),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(keys_last2, values_first2 + n2),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<>());
return std::make_pair(keys_result + n1 + n2, values_result + n1 + n2);
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3,
typename Iter4, typename Iter5, typename Iter6, typename Comp>
std::pair<Iter5, Iter6>
merge(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1, Iter2 keys_first2,
Iter2 keys_last2, Iter3 values_first1, Iter4 values_first2,
Iter5 keys_result, Iter6 values_result, Comp comp) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto n1 = std::distance(keys_first1, keys_last1);
auto n2 = std::distance(keys_first2, keys_last2);
std::merge(std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(keys_last1, values_first1 + n1),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(keys_last2, values_first2 + n2),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<Comp>(comp));
return std::make_pair(keys_result + n1 + n2, values_result + n1 + n2);
}
template <class Policy, class Iter, class T>
void iota(Policy &&policy, Iter first, Iter last, T init, T step) {
static_assert(
std::is_same<typename std::iterator_traits<Iter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using DiffSize = typename std::iterator_traits<Iter>::difference_type;
std::transform(
std::forward<Policy>(policy), oneapi::dpl::counting_iterator<DiffSize>(0),
oneapi::dpl::counting_iterator<DiffSize>(std::distance(first, last)),
first, internal::sequence_fun<T>(init, step));
}
template <class Policy, class Iter, class T>
void iota(Policy &&policy, Iter first, Iter last, T init) {
static_assert(
std::is_same<typename std::iterator_traits<Iter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
iota(std::forward<Policy>(policy), first, last, init, T(1));
}
template <class Policy, class Iter>
void iota(Policy &&policy, Iter first, Iter last) {
static_assert(
std::is_same<typename std::iterator_traits<Iter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using DiffSize = typename std::iterator_traits<Iter>::difference_type;
iota(std::forward<Policy>(policy), first, last, DiffSize(0), DiffSize(1));
}
template <class Policy, class Iter1, class Iter2, class Comp>
void sort(Policy &&policy, Iter1 keys_first, Iter1 keys_last,
Iter2 values_first, Comp comp) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto first = oneapi::dpl::make_zip_iterator(keys_first, values_first);
auto last = first + std::distance(keys_first, keys_last);
std::sort(std::forward<Policy>(policy), first, last,
internal::compare_key_fun<Comp>(comp));
}
template <class Policy, class Iter1, class Iter2>
void sort(Policy &&policy, Iter1 keys_first, Iter1 keys_last,
Iter2 values_first) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
sort(std::forward<Policy>(policy), keys_first, keys_last, values_first,
internal::__less());
}
template <class Policy, class Iter1, class Iter2, class Comp>
void stable_sort(Policy &&policy, Iter1 keys_first, Iter1 keys_last,
Iter2 values_first, Comp comp) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
std::stable_sort(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first, values_first),
oneapi::dpl::make_zip_iterator(
keys_last, values_first + std::distance(keys_first, keys_last)),
internal::compare_key_fun<Comp>(comp));
}
template <class Policy, class Iter1, class Iter2>
void stable_sort(Policy &&policy, Iter1 keys_first, Iter1 keys_last,
Iter2 values_first) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
stable_sort(std::forward<Policy>(policy), keys_first, keys_last, values_first,
internal::__less());
}
template <class Policy, class Iter, class Operator>
void for_each_index(Policy &&policy, Iter first, Iter last, Operator unary_op) {
static_assert(
std::is_same<typename std::iterator_traits<Iter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using DiffSize = typename std::iterator_traits<Iter>::difference_type;
std::transform(
std::forward<Policy>(policy), oneapi::dpl::counting_iterator<DiffSize>(0),
oneapi::dpl::counting_iterator<DiffSize>(std::distance(first, last)),
first, unary_op);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5>
std::pair<Iter4, Iter5>
set_intersection(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1,
Iter2 keys_first2, Iter2 keys_last2, Iter3 values_first1,
Iter4 keys_result, Iter5 values_result) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_intersection(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2,
oneapi::dpl::discard_iterator()),
oneapi::dpl::make_zip_iterator(keys_last2,
oneapi::dpl::discard_iterator()),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<>());
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5, class Comp>
std::pair<Iter4, Iter5>
set_intersection(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1,
Iter2 keys_first2, Iter2 keys_last2, Iter3 values_first1,
Iter4 keys_result, Iter5 values_result, Comp comp) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_intersection(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2,
oneapi::dpl::discard_iterator()),
oneapi::dpl::make_zip_iterator(keys_last2,
oneapi::dpl::discard_iterator()),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<Comp>(comp));
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5, class Iter6>
std::pair<Iter5, Iter6>
set_symmetric_difference(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1,
Iter2 keys_first2, Iter2 keys_last2,
Iter3 values_first1, Iter4 values_first2,
Iter5 keys_result, Iter6 values_result) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_symmetric_difference(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(
keys_last2, values_first2 + std::distance(keys_first2, keys_last2)),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<>());
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5, class Iter6, class Comp>
std::pair<Iter5, Iter6>
set_symmetric_difference(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1,
Iter2 keys_first2, Iter2 keys_last2,
Iter3 values_first1, Iter4 values_first2,
Iter5 keys_result, Iter6 values_result, Comp comp) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_symmetric_difference(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(
keys_last2, values_first2 + std::distance(keys_first2, keys_last2)),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<Comp>(comp));
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5, class Iter6>
std::pair<Iter5, Iter6>
set_difference(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1,
Iter2 keys_first2, Iter2 keys_last2, Iter3 values_first1,
Iter4 values_first2, Iter5 keys_result, Iter6 values_result) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_difference(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(
keys_last2, values_first2 + std::distance(keys_first2, keys_last2)),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<>());
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5, class Iter6, class Comp>
std::pair<Iter5, Iter6> set_difference(Policy &&policy, Iter1 keys_first1,
Iter1 keys_last1, Iter2 keys_first2,
Iter2 keys_last2, Iter3 values_first1,
Iter4 values_first2, Iter5 keys_result,
Iter6 values_result, Comp comp) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_difference(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(
keys_last2, values_first2 + std::distance(keys_first2, keys_last2)),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<Comp>(comp));
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5, class Iter6>
internal::enable_if_execution_policy<Policy, std::pair<Iter5, Iter6>>
set_union(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1,
Iter2 keys_first2, Iter2 keys_last2, Iter3 values_first1,
Iter4 values_first2, Iter5 keys_result, Iter6 values_result) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_union(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(
keys_last2, values_first2 + std::distance(keys_first2, keys_last2)),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<>());
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5, class Iter6, class Comp>
internal::enable_if_execution_policy<Policy, std::pair<Iter5, Iter6>>
set_union(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1,
Iter2 keys_first2, Iter2 keys_last2, Iter3 values_first1,
Iter4 values_first2, Iter5 keys_result, Iter6 values_result,
Comp comp) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_union(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(
keys_last2, values_first2 + std::distance(keys_first2, keys_last2)),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<Comp>(comp));
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3,
typename Iter4, typename Pred>
internal::enable_if_execution_policy<Policy, std::pair<Iter3, Iter4>>
stable_partition_copy(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask,
Iter3 out_true, Iter4 out_false, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::partition_copy(
std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(first, mask),
oneapi::dpl::make_zip_iterator(last, mask + std::distance(first, last)),
oneapi::dpl::make_zip_iterator(out_true, oneapi::dpl::discard_iterator()),
oneapi::dpl::make_zip_iterator(out_false,
oneapi::dpl::discard_iterator()),
internal::predicate_key_fun<Pred>(p));
return std::make_pair(std::get<0>(ret_val.first.base()),
std::get<0>(ret_val.second.base()));
}
template <typename Policy, typename Iter1, typename Iter3, typename Iter4,
typename Pred>
internal::enable_if_execution_policy<Policy, std::pair<Iter3, Iter4>>
stable_partition_copy(Policy &&policy, Iter1 first, Iter1 last, Iter3 out_true,
Iter4 out_false, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
return std::partition_copy(std::forward<Policy>(policy), first, last,
out_true, out_false, p);
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3,
typename Iter4, typename Pred>
internal::enable_if_execution_policy<Policy, std::pair<Iter3, Iter4>>
partition_copy(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask,
Iter3 out_true, Iter4 out_false, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
return stable_partition_copy(std::forward<Policy>(policy), first, last, mask,
out_true, out_false, p);
}
template <typename Policy, typename Iter1, typename Iter2, typename Pred>
internal::enable_if_hetero_execution_policy<Policy, Iter1>
stable_partition(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
typedef typename std::decay<Policy>::type policy_type;
internal::__buffer<typename std::iterator_traits<Iter1>::value_type> _tmp(
std::distance(first, last));
std::copy(std::forward<Policy>(policy), mask,
mask + std::distance(first, last), _tmp.get());
auto ret_val =
std::stable_partition(std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(first, _tmp.get()),
oneapi::dpl::make_zip_iterator(
last, _tmp.get() + std::distance(first, last)),
internal::predicate_key_fun<Pred>(p));
return std::get<0>(ret_val.base());
}
template <typename Policy, typename Iter1, typename Iter2, typename Pred>
typename std::enable_if<!internal::is_hetero_execution_policy<
typename std::decay<Policy>::type>::value,
Iter1>::type
stable_partition(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
typedef typename std::decay<Policy>::type policy_type;
std::vector<typename std::iterator_traits<Iter1>::value_type> _tmp(
std::distance(first, last));
std::copy(std::forward<Policy>(policy), mask,
mask + std::distance(first, last), _tmp.begin());
auto ret_val = std::stable_partition(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(first, _tmp.begin()),
oneapi::dpl::make_zip_iterator(last,
_tmp.begin() + std::distance(first, last)),
internal::predicate_key_fun<Pred>(p));
return std::get<0>(ret_val.base());
}
template <typename Policy, typename Iter1, typename Iter2, typename Pred>
internal::enable_if_execution_policy<Policy, Iter1>
partition(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
return stable_partition(std::forward<Policy>(policy), first, last, mask, p);
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t>
inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value &&
dpct::internal::is_iterator<key_out_t>::value &&
dpct::internal::is_iterator<value_t>::value &&
dpct::internal::is_iterator<value_out_t>::value>
sort_pairs(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_t values_in, value_out_t values_out, int64_t n,
bool descending = false, int begin_bit = 0,
int end_bit =
sizeof(typename ::std::iterator_traits<key_t>::value_type) * 8);
template <typename _ExecutionPolicy, typename key_t, typename key_out_t>
inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value &&
dpct::internal::is_iterator<key_out_t>::value>
sort_keys(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
int64_t n, bool descending = false, int begin_bit = 0,
int end_bit =
sizeof(typename ::std::iterator_traits<key_t>::value_type) * 8);
namespace internal {
// Transforms key to a specific bit range and sorts the transformed key
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename transformed_key_t>
inline void transform_and_sort(_ExecutionPolicy &&policy, key_t keys_in,
key_out_t keys_out, int64_t n, bool descending,
int begin_bit, int end_bit) {
using key_t_value_t = typename std::iterator_traits<key_t>::value_type;
auto trans_key =
translate_key<key_t_value_t, transformed_key_t>(begin_bit, end_bit);
// Use of the comparison operator that is not simply std::greater() or
// std::less() will result in
// not using radix sort which will cost some performance. However, this is
// necessary to provide the transformation of the key to the bitrange
// desired.
auto partial_sort_with_comp = [&](const auto &comp) {
return oneapi::dpl::partial_sort_copy(
std::forward<_ExecutionPolicy>(policy), keys_in, keys_in + n, keys_out,
keys_out + n, [=](const auto a, const auto b) {
return comp(trans_key(a), trans_key(b));
});
};
if (descending)
partial_sort_with_comp(::std::greater<transformed_key_t>());
else
partial_sort_with_comp(::std::less<transformed_key_t>());
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t>
inline void sort_only(_ExecutionPolicy &&policy, key_t keys_in,
key_out_t keys_out, int64_t n, bool descending) {
using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type;
if constexpr (::std::is_floating_point<key_t_value_t>::value) {
if (descending) {
// Comparison operator that is not std::greater() ensures stability of
// -0.0 and 0.0
// at the cost of some performance because radix sort will not be used.
auto comp_descending = [=](const auto a, const auto b) { return a > b; };
oneapi::dpl::partial_sort_copy(::std::forward<_ExecutionPolicy>(policy),
keys_in, keys_in + n, keys_out,
keys_out + n, comp_descending);
} else {
// Comparison operator that is not std::less() ensures stability of -0.0
// and 0.0
// at the cost of some performance because radix sort will not be used.
auto comp_ascending = [=](const auto a, const auto b) { return a < b; };
oneapi::dpl::partial_sort_copy(::std::forward<_ExecutionPolicy>(policy),
keys_in, keys_in + n, keys_out,
keys_out + n, comp_ascending);
}
} else {
if (descending) {
oneapi::dpl::partial_sort_copy(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_in + n,
keys_out, keys_out + n, ::std::greater<key_t_value_t>());
} else {
oneapi::dpl::partial_sort_copy(::std::forward<_ExecutionPolicy>(policy),
keys_in, keys_in + n, keys_out,
keys_out + n);
}
}
}
// Transforms key from a pair to a specific bit range and sorts the pairs by the
// transformed key
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename transform_key_t, typename value_t, typename value_out_t>
inline void transform_and_sort_pairs(_ExecutionPolicy &&policy, key_t keys_in,
key_out_t keys_out, value_t values_in,
value_out_t values_out, int64_t n,
bool descending, int begin_bit,
int end_bit) {
using key_t_value_t = typename std::iterator_traits<key_t>::value_type;
auto zip_input = oneapi::dpl::zip_iterator(keys_in, values_in);
auto zip_output = oneapi::dpl::zip_iterator(keys_out, values_out);
auto trans_key =
translate_key<key_t_value_t, transform_key_t>(begin_bit, end_bit);
// Use of the comparison operator that is not simply std::greater() or
// std::less() will result in
// not using radix sort which will cost some performance. However, this is
// necessary to provide the transformation of the key to the bitrange desired
// and also to select the key from the zipped pair.
auto load_val = [=](const auto a) { return trans_key(std::get<0>(a)); };
auto partial_sort_with_comp = [&](const auto &comp) {
return oneapi::dpl::partial_sort_copy(
std::forward<_ExecutionPolicy>(policy), zip_input, zip_input + n,
zip_output, zip_output + n, [=](const auto a, const auto b) {
return comp(load_val(a), load_val(b));
});
};
if (descending)
partial_sort_with_comp(::std::greater<key_t_value_t>());
else
partial_sort_with_comp(::std::less<key_t_value_t>());
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t>
inline void sort_only_pairs(_ExecutionPolicy &&policy, key_t keys_in,
key_out_t keys_out, value_t values_in,
value_out_t values_out, int64_t n,
bool descending) {
using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type;
auto zip_input = oneapi::dpl::zip_iterator(keys_in, values_in);
auto zip_output = oneapi::dpl::zip_iterator(keys_out, values_out);
// Use of the comparison operator that is not simply std::greater() or
// std::less() will result in
// not using radix sort which will cost some performance. However, this is
// necessary to select the key from the zipped pair.
auto load_val = [=](const auto a) { return std::get<0>(a); };
auto partial_sort_with_comp = [&](const auto &comp) {
return oneapi::dpl::partial_sort_copy(
std::forward<_ExecutionPolicy>(policy), zip_input, zip_input + n,
zip_output, zip_output + n, [=](const auto a, const auto b) {
return comp(load_val(a), load_val(b));
});
};
if (descending)
partial_sort_with_comp(::std::greater<key_t_value_t>());
else
partial_sort_with_comp(::std::less<key_t_value_t>());
}
// overload for key_out_t != std::nullptr_t
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t>
typename ::std::enable_if<!::std::is_null_pointer<key_out_t>::value>::type
sort_pairs_impl(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_t values_in, value_out_t values_out, int64_t n,
bool descending, int begin_bit, int end_bit) {
using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type;
int clipped_begin_bit = ::std::max(begin_bit, 0);
int clipped_end_bit =
::std::min((::std::uint64_t)end_bit, sizeof(key_t_value_t) * 8);
int num_bytes = (clipped_end_bit - clipped_begin_bit - 1) / 8 + 1;
auto transform_and_sort_pairs_f = [&](auto x) {
using T = typename ::std::decay_t<decltype(x)>;
internal::transform_and_sort_pairs<decltype(policy), key_t, key_out_t, T,
value_t, value_out_t>(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, values_in,
values_out, n, descending, clipped_begin_bit, clipped_end_bit);
};
if (clipped_end_bit - clipped_begin_bit == sizeof(key_t_value_t) * 8) {
internal::sort_only_pairs(::std::forward<_ExecutionPolicy>(policy), keys_in,
keys_out, values_in, values_out, n, descending);
} else if (num_bytes == 1) {
transform_and_sort_pairs_f.template operator()<uint8_t>(0);
} else if (num_bytes == 2) {
transform_and_sort_pairs_f.template operator()<uint16_t>(0);
} else if (num_bytes <= 4) {
transform_and_sort_pairs_f.template operator()<uint32_t>(0);
} else // if (num_bytes <= 8)
{
transform_and_sort_pairs_f.template operator()<uint64_t>(0);
}
}
// overload for key_out_t == std::nullptr_t
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t>
typename ::std::enable_if<::std::is_null_pointer<key_out_t>::value>::type
sort_pairs_impl(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_t values_in, value_out_t values_out, int64_t n,
bool descending, int begin_bit, int end_bit) {
// create temporary keys_out to discard, memory footprint could be improved by
// a specialized iterator with a single
// unchanging dummy key_t element
using key_t_value_t = typename std::iterator_traits<key_t>::value_type;
sycl::buffer<key_t_value_t, 1> temp_keys_out{sycl::range<1>(n)};
internal::sort_pairs_impl(std::forward<_ExecutionPolicy>(policy), keys_in,
oneapi::dpl::begin(temp_keys_out), values_in,
values_out, n, descending, begin_bit, end_bit);
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t, typename OffsetIteratorT>
inline void segmented_sort_pairs_by_parallel_sorts(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_out_t values_in, value_t values_out, int64_t n, int64_t nsegments,
OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets,
bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
using offset_type =
typename ::std::iterator_traits<OffsetIteratorT>::value_type;
::std::vector<offset_type> host_accessible_offset_starts(nsegments);
::std::vector<offset_type> host_accessible_offset_ends(nsegments);
// make offsets accessible on host
::std::copy(::std::forward<_ExecutionPolicy>(policy), begin_offsets,
begin_offsets + nsegments, host_accessible_offset_starts.begin());
::std::copy(::std::forward<_ExecutionPolicy>(policy), end_offsets,
end_offsets + nsegments, host_accessible_offset_ends.begin());
for (::std::uint64_t i = 0; i < nsegments; i++) {
uint64_t segment_begin = host_accessible_offset_starts[i];
uint64_t segment_end =
::std::min(n, (int64_t)host_accessible_offset_ends[i]);
if (segment_begin < segment_end) {
::dpct::sort_pairs(::std::forward<_ExecutionPolicy>(policy),
keys_in + segment_begin, keys_out + segment_begin,
values_in + segment_begin, values_out + segment_begin,
segment_end - segment_begin, descending, begin_bit,
end_bit);
}
}
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename OffsetIteratorT>
inline void segmented_sort_keys_by_parallel_sorts(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, int64_t n,
int64_t nsegments, OffsetIteratorT begin_offsets,
OffsetIteratorT end_offsets, bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
using offset_type =
typename ::std::iterator_traits<OffsetIteratorT>::value_type;
::std::vector<offset_type> host_accessible_offset_starts(nsegments);
::std::vector<offset_type> host_accessible_offset_ends(nsegments);
// make offsets accessible on host
::std::copy(::std::forward<_ExecutionPolicy>(policy), begin_offsets,
begin_offsets + nsegments, host_accessible_offset_starts.begin());
::std::copy(::std::forward<_ExecutionPolicy>(policy), end_offsets,
end_offsets + nsegments, host_accessible_offset_ends.begin());
for (::std::uint64_t i = 0; i < nsegments; i++) {
uint64_t segment_begin = host_accessible_offset_starts[i];
uint64_t segment_end =
::std::min(n, (int64_t)host_accessible_offset_ends[i]);
if (segment_begin < segment_end) {
::dpct::sort_keys(::std::forward<_ExecutionPolicy>(policy),
keys_in + segment_begin, keys_out + segment_begin,
segment_end - segment_begin, descending, begin_bit,
end_bit);
}
}
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t, typename OffsetIteratorT>
inline void segmented_sort_pairs_by_parallel_for_of_sorts(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_t values_in, value_out_t values_out, int64_t n, int64_t nsegments,
OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets,
bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
policy.queue().submit([&](sycl::handler &cgh) {
cgh.parallel_for(nsegments, [=](sycl::id<1> i) {
uint64_t segment_begin = begin_offsets[i];
uint64_t segment_end = ::std::min(n, (int64_t)end_offsets[i]);
if (segment_begin == segment_end) {
return;
}
::dpct::sort_pairs(::std::execution::seq, keys_in + segment_begin,
keys_out + segment_begin, values_in + segment_begin,
values_out + segment_begin,
segment_end - segment_begin, descending, begin_bit,
end_bit);
});
});
policy.queue().wait();
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename OffsetIteratorT>
inline void segmented_sort_keys_by_parallel_for_of_sorts(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, int64_t n,
int64_t nsegments, OffsetIteratorT begin_offsets,
OffsetIteratorT end_offsets, bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
policy.queue().submit([&](sycl::handler &cgh) {
cgh.parallel_for(nsegments, [=](sycl::id<1> i) {
uint64_t segment_begin = begin_offsets[i];
uint64_t segment_end = ::std::min(n, (int64_t)end_offsets[i]);
if (segment_begin == segment_end) {
return;
}
::dpct::sort_keys(::std::execution::seq, keys_in + segment_begin,
keys_out + segment_begin, segment_end - segment_begin,
descending, begin_bit, end_bit);
});
});
policy.queue().wait();
}
template <typename _ExecutionPolicy, typename OffsetIteratorT>
inline void
mark_segments(_ExecutionPolicy &&policy, OffsetIteratorT begin_offsets,
OffsetIteratorT end_offsets, int64_t n, int64_t nsegments,
sycl::buffer<::std::size_t, 1> segments) {
::std::size_t work_group_size =
policy.queue()
.get_device()
.template get_info<sycl::info::device::max_work_group_size>();
auto sg_sizes = policy.queue()
.get_device()
.template get_info<sycl::info::device::sub_group_sizes>();
::std::size_t sub_group_size = sg_sizes.empty() ? 0 : sg_sizes.back();
float avg_seg_size = (float)n / (float)nsegments;
if (avg_seg_size > work_group_size) {
// If average segment size is larger than workgroup, use workgroup to
// coordinate to mark segments
policy.queue()
.submit([&](sycl::handler &h) {
auto segments_acc = segments.get_access<sycl::access_mode::write>(h);
h.parallel_for(work_group_size, ([=](sycl::id<1> id) {
for (::std::size_t seg = 0; seg < nsegments; seg++) {
::std::size_t i = begin_offsets[seg];
::std::size_t end = end_offsets[seg];
while (i + id < end) {
segments_acc[i + id] = seg;
i += work_group_size;
}
}
}));
})
.wait();
} else if (sub_group_size > 0 && avg_seg_size > sub_group_size / 2) {
// If average segment size is larger than half a subgroup, use subgroup to
// coordinate to mark segments
policy.queue()
.submit([&](sycl::handler &h) {
auto segments_acc = segments.get_access<sycl::access_mode::write>(h);
h.parallel_for(
sycl::nd_range<1>{work_group_size, work_group_size},
([=](sycl::nd_item<1> item) {
auto sub_group = item.get_sub_group();
::std::size_t num_subgroups =
sub_group.get_group_range().size();
::std::size_t local_size = sub_group.get_local_range().size();
::std::size_t sub_group_id = sub_group.get_group_id();
while (sub_group_id < nsegments) {
::std::size_t subgroup_local_id = sub_group.get_local_id();
::std::size_t i = begin_offsets[sub_group_id];
::std::size_t end = end_offsets[sub_group_id];
while (i + subgroup_local_id < end) {
segments_acc[i + subgroup_local_id] = sub_group_id;
i += local_size;
}
sub_group_id += num_subgroups;
}
}));
})
.wait();
} else {
// If average segment size is small as compared to subgroup, use single
// work item to mark each segment
policy.queue()
.submit([&](sycl::handler &h) {
auto segments_acc = segments.get_access<sycl::access_mode::write>(h);
h.parallel_for(nsegments, ([=](sycl::id<1> seg) {
for (::std::size_t i = begin_offsets[seg];
i < end_offsets[seg]; i++) {
segments_acc[i] = seg;
}
}));
})
.wait();
}
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename OffsetIteratorT>
inline void segmented_sort_keys_by_two_pair_sorts(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, int64_t n,
int64_t nsegments, OffsetIteratorT begin_offsets,
OffsetIteratorT end_offsets, bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
sycl::buffer<::std::size_t, 1> segments{sycl::range<1>(n)};
sycl::buffer<::std::size_t, 1> segments_sorted{sycl::range<1>(n)};
using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type;
sycl::buffer<key_t_value_t, 1> keys_temp{sycl::range<1>(n)};
mark_segments(::std::forward<_ExecutionPolicy>(policy), begin_offsets,
end_offsets, n, nsegments, segments);
// Part 1: Sort by keys keeping track of which segment were in
dpct::sort_pairs(::std::forward<_ExecutionPolicy>(policy), keys_in,
oneapi::dpl::begin(keys_temp), oneapi::dpl::begin(segments),
oneapi::dpl::begin(segments_sorted), n, descending);
// Part 2: Sort the segments with a stable sort to get back sorted segments.
dpct::sort_pairs(::std::forward<_ExecutionPolicy>(policy),
oneapi::dpl::begin(segments_sorted),
oneapi::dpl::begin(segments), oneapi::dpl::begin(keys_temp),
keys_out, n, false);
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t, typename OffsetIteratorT>
inline void segmented_sort_pairs_by_two_pair_sorts(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_out_t values_in, value_t values_out, int64_t n, int64_t nsegments,
OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets,
bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
sycl::buffer<::std::size_t, 1> segments{sycl::range<1>(n)};
sycl::buffer<::std::size_t, 1> segments_sorted{sycl::range<1>(n)};
using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type;
sycl::buffer<key_t_value_t, 1> keys_temp{sycl::range<1>(n)};
using value_t_value_t = typename ::std::iterator_traits<value_t>::value_type;
sycl::buffer<value_t_value_t, 1> values_temp{sycl::range<1>(n)};
mark_segments(::std::forward<_ExecutionPolicy>(policy), begin_offsets,
end_offsets, n, nsegments, segments);
auto zip_seg_vals =
oneapi::dpl::make_zip_iterator(oneapi::dpl::begin(segments), values_in);
auto zip_seg_vals_out = oneapi::dpl::make_zip_iterator(
oneapi::dpl::begin(segments_sorted), oneapi::dpl::begin(values_temp));
// Part 1: Sort by keys keeping track of which segment were in
dpct::sort_pairs(::std::forward<_ExecutionPolicy>(policy), keys_in,
oneapi::dpl::begin(keys_temp), zip_seg_vals,
zip_seg_vals_out, n, descending);
auto zip_keys_vals = oneapi::dpl::make_zip_iterator(
oneapi::dpl::begin(keys_temp), oneapi::dpl::begin(values_temp));
auto zip_keys_vals_out = oneapi::dpl::make_zip_iterator(keys_out, values_out);
// Part 2: Sort the segments with a stable sort to get back sorted segments.
dpct::sort_pairs(::std::forward<_ExecutionPolicy>(policy),
oneapi::dpl::begin(segments_sorted),
oneapi::dpl::begin(segments), zip_keys_vals,
zip_keys_vals_out, n, false);
}
} // end namespace internal
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t>
inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value &&
dpct::internal::is_iterator<key_out_t>::value &&
dpct::internal::is_iterator<value_t>::value &&
dpct::internal::is_iterator<value_out_t>::value>
sort_pairs(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_t values_in, value_out_t values_out, int64_t n,
bool descending, int begin_bit, int end_bit) {
internal::sort_pairs_impl(std::forward<_ExecutionPolicy>(policy), keys_in,
keys_out, values_in, values_out, n, descending,
begin_bit, end_bit);
}
template <typename _ExecutionPolicy, typename key_t, typename value_t>
inline void sort_pairs(
_ExecutionPolicy &&policy, io_iterator_pair<key_t> &keys,
io_iterator_pair<value_t> &values, int64_t n, bool descending = false,
bool do_swap_iters = false,
int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
sort_pairs(::std::forward<_ExecutionPolicy>(policy), keys.first(),
keys.second(), values.first(), values.second(), n, descending,
begin_bit, end_bit);
if (do_swap_iters) {
keys.swap();
values.swap();
}
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t>
inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value &&
dpct::internal::is_iterator<key_out_t>::value>
sort_keys(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
int64_t n, bool descending, int begin_bit, int end_bit) {
using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type;
int clipped_begin_bit = ::std::max(begin_bit, 0);
int clipped_end_bit =
::std::min((::std::uint64_t)end_bit, sizeof(key_t_value_t) * 8);
int num_bytes = (clipped_end_bit - clipped_begin_bit - 1) / 8 + 1;
auto transform_and_sort_f = [&](auto x) {
using T = typename ::std::decay_t<decltype(x)>;
internal::transform_and_sort<decltype(policy), key_t, key_out_t, T>(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, n,
descending, clipped_begin_bit, clipped_end_bit);
};
if (clipped_end_bit - clipped_begin_bit == sizeof(key_t_value_t) * 8) {
internal::sort_only(::std::forward<_ExecutionPolicy>(policy), keys_in,
keys_out, n, descending);
} else if (num_bytes == 1) {
transform_and_sort_f.template operator()<uint8_t>(0);
} else if (num_bytes == 2) {
transform_and_sort_f.template operator()<uint16_t>(0);
} else if (num_bytes <= 4) {
transform_and_sort_f.template operator()<uint32_t>(0);
} else // if (num_bytes <= 8)
{
transform_and_sort_f.template operator()<uint64_t>(0);
}
}
template <typename _ExecutionPolicy, typename key_t>
inline void sort_keys(
_ExecutionPolicy &&policy, io_iterator_pair<key_t> &keys, int64_t n,
bool descending = false,
bool do_swap_iters = false,
int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
sort_keys(std::forward<_ExecutionPolicy>(policy), keys.first(), keys.second(),
n, descending, begin_bit, end_bit);
if (do_swap_iters)
keys.swap();
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename OffsetIteratorT>
inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value &&
dpct::internal::is_iterator<key_out_t>::value>
segmented_sort_keys(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, int64_t n,
int64_t nsegments, OffsetIteratorT begin_offsets,
OffsetIteratorT end_offsets, bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
int compute_units =
policy.queue()
.get_device()
.template get_info<sycl::info::device::max_compute_units>();
auto sg_sizes = policy.queue()
.get_device()
.template get_info<sycl::info::device::sub_group_sizes>();
int subgroup_size = sg_sizes.empty() ? 1 : sg_sizes.back();
// parallel for of serial sorts when we have sufficient number of segments for
// load balance when number of segments is large as compared to our target
// compute capability
if (nsegments >
compute_units *
(policy.queue().get_device().is_gpu() ? subgroup_size : 1)) {
dpct::internal::segmented_sort_keys_by_parallel_for_of_sorts(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, n,
nsegments, begin_offsets, end_offsets, descending, begin_bit, end_bit);
} else if (nsegments < 512) // for loop of parallel sorts when we have a small
// number of total sorts to limit total overhead
{
dpct::internal::segmented_sort_keys_by_parallel_sorts(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, n,
nsegments, begin_offsets, end_offsets, descending, begin_bit, end_bit);
} else // decent catch all using 2 full sorts
{
dpct::internal::segmented_sort_keys_by_two_pair_sorts(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, n,
nsegments, begin_offsets, end_offsets, descending, begin_bit, end_bit);
}
}
template <typename _ExecutionPolicy, typename key_t, typename OffsetIteratorT>
inline void segmented_sort_keys(
_ExecutionPolicy &&policy, io_iterator_pair<key_t> &keys, int64_t n,
int64_t nsegments, OffsetIteratorT begin_offsets,
OffsetIteratorT end_offsets, bool descending = false,
bool do_swap_iters = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
segmented_sort_keys(::std::forward<_ExecutionPolicy>(policy), keys.first(),
keys.second(), n, nsegments, begin_offsets, end_offsets,
descending, begin_bit, end_bit);
if (do_swap_iters) {
keys.swap();
}
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t, typename OffsetIteratorT>
inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value &&
dpct::internal::is_iterator<key_out_t>::value &&
dpct::internal::is_iterator<value_t>::value &&
dpct::internal::is_iterator<value_out_t>::value>
segmented_sort_pairs(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_t values_in, value_out_t values_out, int64_t n, int64_t nsegments,
OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets,
bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
int compute_units =
policy.queue()
.get_device()
.template get_info<sycl::info::device::max_compute_units>();
auto sg_sizes = policy.queue()
.get_device()
.template get_info<sycl::info::device::sub_group_sizes>();
int subgroup_size = sg_sizes.empty() ? 1 : sg_sizes.back();
// parallel for of serial sorts when we have sufficient number of segments for
// load balance when number of segments is large as compared to our target
// compute capability
if (nsegments >
compute_units *
(policy.queue().get_device().is_gpu() ? subgroup_size : 1)) {
dpct::internal::segmented_sort_pairs_by_parallel_for_of_sorts(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, values_in,
values_out, n, nsegments, begin_offsets, end_offsets, descending,
begin_bit, end_bit);
} else if (nsegments < 512) // for loop of parallel sorts when we have a small
// number of total sorts to limit total overhead
{
dpct::internal::segmented_sort_pairs_by_parallel_sorts(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, values_in,
values_out, n, nsegments, begin_offsets, end_offsets, descending,
begin_bit, end_bit);
} else // decent catch all using 2 full sorts
{
dpct::internal::segmented_sort_pairs_by_two_pair_sorts(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, values_in,
values_out, n, nsegments, begin_offsets, end_offsets, descending,
begin_bit, end_bit);
}
}
template <typename _ExecutionPolicy, typename key_t, typename value_t,
typename OffsetIteratorT>
inline void segmented_sort_pairs(
_ExecutionPolicy &&policy, io_iterator_pair<key_t> &keys,
io_iterator_pair<value_t> &values, int64_t n, int64_t nsegments,
OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets,
bool descending = false, bool do_swap_iters = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
segmented_sort_pairs(std::forward<_ExecutionPolicy>(policy), keys.first(),
keys.second(), values.first(), values.second(), n,
nsegments, begin_offsets, end_offsets, descending,
begin_bit, end_bit);
if (do_swap_iters) {
keys.swap();
values.swap();
}
}
template <typename _ExecutionPolicy, typename Iter1, typename Iter2>
inline void reduce_argmax(_ExecutionPolicy &&policy, Iter1 input, Iter2 output,
::std::size_t n) {
dpct::arg_index_input_iterator<decltype(input), int> input_arg_idx(input);
auto ret = ::std::max_element(
::std::forward<_ExecutionPolicy>(policy), input_arg_idx,
input_arg_idx + n,
[](const auto &a, const auto &b) { return (a.value < b.value); });
::std::copy(::std::forward<_ExecutionPolicy>(policy), ret, ret + 1, output);
}
template <typename _ExecutionPolicy, typename Iter1, typename Iter2>
inline void reduce_argmin(_ExecutionPolicy &&policy, Iter1 input, Iter2 output,
::std::size_t n) {
dpct::arg_index_input_iterator<decltype(input), int> input_arg_idx(input);
auto ret = ::std::min_element(
::std::forward<_ExecutionPolicy>(policy), input_arg_idx,
input_arg_idx + n,
[](const auto &a, const auto &b) { return (a.value < b.value); });
::std::copy(::std::forward<_ExecutionPolicy>(policy), ret, ret + 1, output);
}
template <typename _ExecutionPolicy, typename Iter1,
typename ValueLessComparable, typename StrictWeakOrdering>
inline ::std::pair<Iter1, Iter1>
equal_range(_ExecutionPolicy &&policy, Iter1 start, Iter1 end,
const ValueLessComparable &value, StrictWeakOrdering comp) {
::std::vector<::std::int64_t> res_lower(1);
::std::vector<::std::int64_t> res_upper(1);
::std::vector<ValueLessComparable> value_vec(1, value);
::oneapi::dpl::lower_bound(policy, start, end, value_vec.begin(),
value_vec.end(), res_lower.begin(), comp);
::oneapi::dpl::upper_bound(::std::forward<_ExecutionPolicy>(policy), start,
end, value_vec.begin(), value_vec.end(),
res_upper.begin(), comp);
auto result = ::std::make_pair(start + res_lower[0], start + res_upper[0]);
return result;
}
template <typename _ExecutionPolicy, typename Iter1,
typename ValueLessComparable>
inline ::std::pair<Iter1, Iter1> equal_range(_ExecutionPolicy &&policy,
Iter1 start, Iter1 end,
const ValueLessComparable &value) {
return equal_range(::std::forward<_ExecutionPolicy>(policy), start, end,
value, internal::__less());
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3>
inline ::std::enable_if_t<
dpct::internal::is_iterator<Iter1>::value &&
dpct::internal::is_iterator<Iter2>::value &&
internal::is_hetero_execution_policy<::std::decay_t<Policy>>::value>
segmented_reduce_argmin(Policy &&policy, Iter1 keys_in, Iter2 keys_out,
::std::int64_t nsegments, Iter3 begin_offsets,
Iter3 end_offsets) {
policy.queue().submit([&](sycl::handler &cgh) {
cgh.parallel_for(nsegments, [=](sycl::id<1> i) {
if (end_offsets[i] <= begin_offsets[i]) {
keys_out[i] = dpct::key_value_pair(
1, ::std::numeric_limits<
typename ::std::iterator_traits<Iter1>::value_type>::max());
} else {
dpct::arg_index_input_iterator<Iter1, int> arg_index(keys_in +
begin_offsets[i]);
keys_out[i] = *::std::min_element(
arg_index, arg_index + (end_offsets[i] - begin_offsets[i]),
[](const auto &a, const auto &b) { return a.value < b.value; });
}
});
});
policy.queue().wait();
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3>
inline ::std::enable_if_t<
dpct::internal::is_iterator<Iter1>::value &&
dpct::internal::is_iterator<Iter2>::value &&
internal::is_hetero_execution_policy<::std::decay_t<Policy>>::value>
segmented_reduce_argmax(Policy &&policy, Iter1 keys_in, Iter2 keys_out,
::std::int64_t nsegments, Iter3 begin_offsets,
Iter3 end_offsets) {
policy.queue().submit([&](sycl::handler &cgh) {
cgh.parallel_for(nsegments, [=](sycl::id<1> i) {
if (end_offsets[i] <= begin_offsets[i]) {
keys_out[i] = dpct::key_value_pair(
1,
::std::numeric_limits<
typename ::std::iterator_traits<Iter1>::value_type>::lowest());
} else {
dpct::arg_index_input_iterator<Iter1, int> arg_index(keys_in +
begin_offsets[i]);
keys_out[i] = *::std::max_element(
arg_index, arg_index + (end_offsets[i] - begin_offsets[i]),
[](const auto &a, const auto &b) { return a.value < b.value; });
}
});
});
policy.queue().wait();
}
} // end namespace dpct
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/guided_convolutionSeparable_SYCLmigration/01_dpct_output/include/dpct/dpl_extras/memory.h | //==---- memory.h ---------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_MEMORY_H__
#define __DPCT_MEMORY_H__
#include <sycl/sycl.hpp>
// Memory management section:
// device_pointer, device_reference, swap, device_iterator, malloc_device,
// device_new, free_device, device_delete
namespace dpct {
namespace detail {
template <typename T>
struct make_allocatable
{
using type = T;
};
template <>
struct make_allocatable<void>
{
using type = dpct::byte_t;
};
#if defined(__LIBSYCL_MAJOR_VERSION) && defined(__LIBSYCL_MINOR_VERSION) && \
defined(__LIBSYCL_PATCH_VERSION)
#define _DPCT_LIBSYCL_VERSION \
(__LIBSYCL_MAJOR_VERSION * 10000 + __LIBSYCL_MINOR_VERSION * 100 + \
__LIBSYCL_PATCH_VERSION)
#else
#define _DPCT_LIBSYCL_VERSION 0
#endif
template <typename _DataT>
using __buffer_allocator =
#if _DPCT_LIBSYCL_VERSION >= 60000
sycl::buffer_allocator<typename make_allocatable<_DataT>::type>;
#else
sycl::buffer_allocator;
#endif
} // namespace detail
#ifdef DPCT_USM_LEVEL_NONE
template <typename T, sycl::access_mode Mode = sycl::access_mode::read_write,
typename Allocator = detail::__buffer_allocator<T>>
class device_pointer;
#else
template <typename T> class device_pointer;
#endif
template <typename T> struct device_reference {
using pointer = device_pointer<T>;
using value_type = T;
template <typename OtherT>
device_reference(const device_reference<OtherT> &input)
: value(input.value) {}
device_reference(const pointer &input) : value((*input).value) {}
device_reference(value_type &input) : value(input) {}
template <typename OtherT>
device_reference &operator=(const device_reference<OtherT> &input) {
value = input;
return *this;
};
device_reference &operator=(const device_reference &input) {
T val = input.value;
value = val;
return *this;
};
device_reference &operator=(const value_type &x) {
value = x;
return *this;
};
pointer operator&() const { return pointer(&value); };
operator value_type() const { return T(value); }
device_reference &operator++() {
++value;
return *this;
};
device_reference &operator--() {
--value;
return *this;
};
device_reference operator++(int) {
device_reference ref(*this);
++(*this);
return ref;
};
device_reference operator--(int) {
device_reference ref(*this);
--(*this);
return ref;
};
device_reference &operator+=(const T &input) {
value += input;
return *this;
};
device_reference &operator-=(const T &input) {
value -= input;
return *this;
};
device_reference &operator*=(const T &input) {
value *= input;
return *this;
};
device_reference &operator/=(const T &input) {
value /= input;
return *this;
};
device_reference &operator%=(const T &input) {
value %= input;
return *this;
};
device_reference &operator&=(const T &input) {
value &= input;
return *this;
};
device_reference &operator|=(const T &input) {
value |= input;
return *this;
};
device_reference &operator^=(const T &input) {
value ^= input;
return *this;
};
device_reference &operator<<=(const T &input) {
value <<= input;
return *this;
};
device_reference &operator>>=(const T &input) {
value >>= input;
return *this;
};
void swap(device_reference &input) {
T tmp = (*this);
*this = (input);
input = (tmp);
}
T &value;
};
template <typename T>
void swap(device_reference<T> &x, device_reference<T> &y) {
x.swap(y);
}
template <typename T> void swap(T &x, T &y) {
T tmp = x;
x = y;
y = tmp;
}
namespace internal {
// struct for checking if iterator is heterogeneous or not
template <typename Iter,
typename Void = void> // for non-heterogeneous iterators
struct is_hetero_iterator : std::false_type {};
template <typename Iter> // for heterogeneous iterators
struct is_hetero_iterator<
Iter, typename std::enable_if<Iter::is_hetero::value, void>::type>
: std::true_type {};
} // namespace internal
#ifdef DPCT_USM_LEVEL_NONE
template <typename T, sycl::access_mode Mode, typename Allocator>
class device_iterator;
template <typename ValueType, typename Allocator, typename Derived>
class device_pointer_base {
protected:
sycl::buffer<ValueType, 1, Allocator> buffer;
std::size_t idx;
public:
using pointer = ValueType *;
using difference_type = std::make_signed<std::size_t>::type;
device_pointer_base(sycl::buffer<ValueType, 1> in, std::size_t i = 0)
: buffer(in), idx(i) {}
#ifdef __USE_DPCT
template <typename OtherT>
device_pointer_base(OtherT *ptr)
: buffer(
dpct::detail::mem_mgr::instance()
.translate_ptr(ptr)
.buffer.template reinterpret<ValueType, 1>(sycl::range<1>(
dpct::detail::mem_mgr::instance().translate_ptr(ptr).size /
sizeof(ValueType)))),
idx(ptr - (ValueType*)dpct::detail::mem_mgr::instance()
.translate_ptr(ptr).alloc_ptr) {}
#endif
device_pointer_base(const std::size_t count)
: buffer(sycl::range<1>(count / sizeof(ValueType))), idx() {}
// buffer has no default ctor we pass zero-range to create an empty buffer
device_pointer_base() : buffer(sycl::range<1>(0)) {}
device_pointer_base(const device_pointer_base &in)
: buffer(in.buffer), idx(in.idx) {}
pointer get() const {
auto res =
(const_cast<device_pointer_base *>(this)
->buffer.template get_access<sycl::access_mode::read_write>())
.get_pointer();
return res + idx;
}
operator ValueType *() {
auto res = (buffer.template get_access<sycl::access_mode::read_write>())
.get_pointer();
return res + idx;
}
operator ValueType *() const {
auto res =
(const_cast<device_pointer_base *>(this)
->buffer.template get_access<sycl::access_mode::read_write>())
.get_pointer();
return res + idx;
}
Derived operator+(difference_type forward) const {
return Derived{buffer, idx + forward};
}
Derived operator-(difference_type backward) const {
return Derived{buffer, idx - backward};
}
Derived operator++(int) {
Derived p(buffer, idx);
idx += 1;
return p;
}
Derived operator--(int) {
Derived p(buffer, idx);
idx -= 1;
return p;
}
difference_type operator-(const Derived &it) const { return idx - it.idx; }
template <typename OtherIterator>
typename std::enable_if<internal::is_hetero_iterator<OtherIterator>::value,
difference_type>::type
operator-(const OtherIterator &it) const {
return idx - std::distance(oneapi::dpl::begin(buffer), it);
}
std::size_t get_idx() const { return idx; } // required
sycl::buffer<ValueType, 1, Allocator> get_buffer() {
return buffer;
} // required
};
template <typename T, sycl::access_mode Mode, typename Allocator>
class device_pointer
: public device_pointer_base<T, Allocator,
device_pointer<T, Mode, Allocator>> {
private:
using base_type = device_pointer_base<T, Allocator, device_pointer>;
public:
using value_type = T;
using difference_type = std::make_signed<std::size_t>::type;
using pointer = T *;
using reference = T &;
using iterator_category = std::random_access_iterator_tag;
using is_hetero = std::true_type; // required
using is_passed_directly = std::false_type;
static constexpr sycl::access_mode mode = Mode; // required
device_pointer(sycl::buffer<T, 1> in, std::size_t i = 0) : base_type(in, i) {}
#ifdef __USE_DPCT
template <typename OtherT> device_pointer(OtherT *ptr) : base_type(ptr) {}
#endif
// needed for malloc_device, count is number of bytes to allocate
device_pointer(const std::size_t count) : base_type(count) {}
device_pointer() : base_type() {}
device_pointer(const device_pointer &in) : base_type(in) {}
device_pointer &operator+=(difference_type forward) {
this->idx += forward;
return *this;
}
device_pointer &operator-=(difference_type backward) {
this->idx -= backward;
return *this;
}
// include operators from base class
using base_type::operator++;
using base_type::operator--;
device_pointer &operator++() {
this->idx += 1;
return *this;
}
device_pointer &operator--() {
this->idx -= 1;
return *this;
}
};
template <sycl::access_mode Mode, typename Allocator>
class device_pointer<void, Mode, Allocator>
: public device_pointer_base<dpct::byte_t, Allocator,
device_pointer<void, Mode, Allocator>> {
private:
using base_type =
device_pointer_base<dpct::byte_t, Allocator, device_pointer>;
public:
using value_type = dpct::byte_t;
using difference_type = std::make_signed<std::size_t>::type;
using pointer = void *;
using reference = value_type &;
using iterator_category = std::random_access_iterator_tag;
using is_hetero = std::true_type; // required
using is_passed_directly = std::false_type;
static constexpr sycl::access_mode mode = Mode; // required
device_pointer(sycl::buffer<value_type, 1> in, std::size_t i = 0)
: base_type(in, i) {}
#ifdef __USE_DPCT
template <typename OtherT> device_pointer(OtherT *ptr) : base_type(ptr) {}
#endif
// needed for malloc_device, count is number of bytes to allocate
device_pointer(const std::size_t count) : base_type(count) {}
device_pointer() : base_type() {}
device_pointer(const device_pointer &in) : base_type(in) {}
device_pointer &operator+=(difference_type forward) {
this->idx += forward;
return *this;
}
device_pointer &operator-=(difference_type backward) {
this->idx -= backward;
return *this;
}
// include operators from base class
using base_type::operator++;
using base_type::operator--;
device_pointer &operator++() {
this->idx += 1;
return *this;
}
device_pointer &operator--() {
this->idx -= 1;
return *this;
}
};
#else
template <typename T> class device_iterator;
template <typename ValueType, typename Derived> class device_pointer_base {
protected:
ValueType *ptr;
public:
using pointer = ValueType *;
using difference_type = std::make_signed<std::size_t>::type;
device_pointer_base(ValueType *p) : ptr(p) {}
device_pointer_base(const std::size_t count) {
sycl::queue default_queue = dpct::get_default_queue();
ptr = static_cast<ValueType *>(sycl::malloc_shared(
count, default_queue.get_device(), default_queue.get_context()));
}
device_pointer_base() {}
pointer get() const { return ptr; }
operator ValueType *() { return ptr; }
operator ValueType *() const { return ptr; }
ValueType &operator[](difference_type idx) { return ptr[idx]; }
ValueType &operator[](difference_type idx) const { return ptr[idx]; }
Derived operator+(difference_type forward) const {
return Derived{ptr + forward};
}
Derived operator-(difference_type backward) const {
return Derived{ptr - backward};
}
Derived operator++(int) {
Derived p(ptr);
++ptr;
return p;
}
Derived operator--(int) {
Derived p(ptr);
--ptr;
return p;
}
difference_type operator-(const Derived &it) const { return ptr - it.ptr; }
};
template <typename T>
class device_pointer : public device_pointer_base<T, device_pointer<T>> {
private:
using base_type = device_pointer_base<T, device_pointer<T>>;
public:
using value_type = T;
using difference_type = std::make_signed<std::size_t>::type;
using pointer = T *;
using reference = T &;
using const_reference = const T &;
using iterator_category = std::random_access_iterator_tag;
using is_hetero = std::false_type; // required
using is_passed_directly = std::true_type; // required
device_pointer(T *p) : base_type(p) {}
// needed for malloc_device, count is number of bytes to allocate
device_pointer(const std::size_t count) : base_type(count) {}
device_pointer() : base_type() {}
device_pointer &operator=(const device_iterator<T> &in) {
this->ptr = static_cast<device_pointer<T>>(in).ptr;
return *this;
}
// include operators from base class
using base_type::operator++;
using base_type::operator--;
device_pointer &operator++() {
++(this->ptr);
return *this;
}
device_pointer &operator--() {
--(this->ptr);
return *this;
}
device_pointer &operator+=(difference_type forward) {
this->ptr = this->ptr + forward;
return *this;
}
device_pointer &operator-=(difference_type backward) {
this->ptr = this->ptr - backward;
return *this;
}
};
template <>
class device_pointer<void>
: public device_pointer_base<dpct::byte_t, device_pointer<void>> {
private:
using base_type = device_pointer_base<dpct::byte_t, device_pointer<void>>;
public:
using value_type = dpct::byte_t;
using difference_type = std::make_signed<std::size_t>::type;
using pointer = void *;
using reference = value_type &;
using const_reference = const value_type &;
using iterator_category = std::random_access_iterator_tag;
using is_hetero = std::false_type; // required
using is_passed_directly = std::true_type; // required
device_pointer(void *p) : base_type(static_cast<value_type *>(p)) {}
// needed for malloc_device, count is number of bytes to allocate
device_pointer(const std::size_t count) : base_type(count) {}
device_pointer() : base_type() {}
pointer get() const { return static_cast<pointer>(this->ptr); }
operator void *() { return this->ptr; }
operator void *() const { return this->ptr; }
// include operators from base class
using base_type::operator++;
using base_type::operator--;
device_pointer &operator++() {
++(this->ptr);
return *this;
}
device_pointer &operator--() {
--(this->ptr);
return *this;
}
device_pointer &operator+=(difference_type forward) {
this->ptr = this->ptr + forward;
return *this;
}
device_pointer &operator-=(difference_type backward) {
this->ptr = this->ptr - backward;
return *this;
}
};
#endif
#ifdef DPCT_USM_LEVEL_NONE
template <typename T, sycl::access_mode Mode = sycl::access_mode::read_write,
typename Allocator = detail::__buffer_allocator<T>>
class device_iterator : public device_pointer<T, Mode, Allocator> {
using Base = device_pointer<T, Mode, Allocator>;
public:
using value_type = T;
using difference_type = std::make_signed<std::size_t>::type;
using pointer = T *;
using reference = T &;
using iterator_category = std::random_access_iterator_tag;
using is_hetero = std::true_type; // required
using is_passed_directly = std::false_type; // required
static constexpr sycl::access_mode mode = Mode; // required
device_iterator() : Base() {}
device_iterator(sycl::buffer<T, 1, Allocator> vec, std::size_t index)
: Base(vec, index) {}
template <sycl::access_mode inMode>
device_iterator(const device_iterator<T, inMode, Allocator> &in)
: Base(in.buffer, in.idx) {} // required for iter_mode
device_iterator &operator=(const device_iterator &in) {
Base::buffer = in.buffer;
Base::idx = in.idx;
return *this;
}
reference operator*() const {
return const_cast<device_iterator *>(this)
->buffer.template get_access<mode>()[Base::idx];
}
reference operator[](difference_type i) const { return *(*this + i); }
device_iterator &operator++() {
++Base::idx;
return *this;
}
device_iterator &operator--() {
--Base::idx;
return *this;
}
device_iterator operator++(int) {
device_iterator it(*this);
++(*this);
return it;
}
device_iterator operator--(int) {
device_iterator it(*this);
--(*this);
return it;
}
device_iterator operator+(difference_type forward) const {
const auto new_idx = Base::idx + forward;
return {Base::buffer, new_idx};
}
device_iterator &operator+=(difference_type forward) {
Base::idx += forward;
return *this;
}
device_iterator operator-(difference_type backward) const {
return {Base::buffer, Base::idx - backward};
}
device_iterator &operator-=(difference_type backward) {
Base::idx -= backward;
return *this;
}
friend device_iterator operator+(difference_type forward,
const device_iterator &it) {
return it + forward;
}
difference_type operator-(const device_iterator &it) const {
return Base::idx - it.idx;
}
template <typename OtherIterator>
typename std::enable_if<internal::is_hetero_iterator<OtherIterator>::value,
difference_type>::type
operator-(const OtherIterator &it) const {
return Base::idx - std::distance(oneapi::dpl::begin(Base::buffer), it);
}
bool operator==(const device_iterator &it) const { return *this - it == 0; }
bool operator!=(const device_iterator &it) const { return !(*this == it); }
bool operator<(const device_iterator &it) const { return *this - it < 0; }
bool operator>(const device_iterator &it) const { return it < *this; }
bool operator<=(const device_iterator &it) const { return !(*this > it); }
bool operator>=(const device_iterator &it) const { return !(*this < it); }
std::size_t get_idx() const { return Base::idx; } // required
sycl::buffer<T, 1, Allocator> get_buffer() {
return Base::buffer;
} // required
};
#else
template <typename T> class device_iterator : public device_pointer<T> {
using Base = device_pointer<T>;
protected:
std::size_t idx;
public:
using value_type = T;
using difference_type = std::make_signed<std::size_t>::type;
using pointer = typename Base::pointer;
using reference = typename Base::reference;
using iterator_category = std::random_access_iterator_tag;
using is_hetero = std::false_type; // required
using is_passed_directly = std::true_type; // required
static constexpr sycl::access_mode mode =
sycl::access_mode::read_write; // required
device_iterator() : Base(nullptr), idx(0) {}
device_iterator(T *vec, std::size_t index) : Base(vec), idx(index) {}
template <sycl::access_mode inMode>
device_iterator(const device_iterator<T> &in)
: Base(in.ptr), idx(in.idx) {} // required for iter_mode
device_iterator &operator=(const device_iterator &in) {
Base::operator=(in);
idx = in.idx;
return *this;
}
reference operator*() const { return *(Base::ptr + idx); }
reference operator[](difference_type i) { return Base::ptr[idx + i]; }
reference operator[](difference_type i) const { return Base::ptr[idx + i]; }
device_iterator &operator++() {
++idx;
return *this;
}
device_iterator &operator--() {
--idx;
return *this;
}
device_iterator operator++(int) {
device_iterator it(*this);
++(*this);
return it;
}
device_iterator operator--(int) {
device_iterator it(*this);
--(*this);
return it;
}
device_iterator operator+(difference_type forward) const {
const auto new_idx = idx + forward;
return {Base::ptr, new_idx};
}
device_iterator &operator+=(difference_type forward) {
idx += forward;
return *this;
}
device_iterator operator-(difference_type backward) const {
return {Base::ptr, idx - backward};
}
device_iterator &operator-=(difference_type backward) {
idx -= backward;
return *this;
}
friend device_iterator operator+(difference_type forward,
const device_iterator &it) {
return it + forward;
}
difference_type operator-(const device_iterator &it) const {
return idx - it.idx;
}
template <typename OtherIterator>
typename std::enable_if<internal::is_hetero_iterator<OtherIterator>::value,
difference_type>::type
operator-(const OtherIterator &it) const {
return idx - it.get_idx();
}
bool operator==(const device_iterator &it) const { return *this - it == 0; }
bool operator!=(const device_iterator &it) const { return !(*this == it); }
bool operator<(const device_iterator &it) const { return *this - it < 0; }
bool operator>(const device_iterator &it) const { return it < *this; }
bool operator<=(const device_iterator &it) const { return !(*this > it); }
bool operator>=(const device_iterator &it) const { return !(*this < it); }
std::size_t get_idx() const { return idx; } // required
device_iterator &get_buffer() { return *this; } // required
std::size_t size() const { return idx; }
};
#endif
template <typename T>
device_pointer<T> malloc_device(const std::size_t num_elements) {
return device_pointer<T>(num_elements * sizeof(T));
}
static inline device_pointer<void> malloc_device(const std::size_t num_bytes) {
return device_pointer<void>(num_bytes);
}
template <typename T>
device_pointer<T> device_new(device_pointer<T> p, const T &value,
const std::size_t count = 1) {
std::vector<T> result(count, value);
p.buffer = sycl::buffer<T, 1>(result.begin(), result.end());
return p + count;
}
template <typename T>
device_pointer<T> device_new(device_pointer<T> p, const std::size_t count = 1) {
return device_new(p, T{}, count);
}
template <typename T>
device_pointer<T> device_new(const std::size_t count = 1) {
return device_pointer<T>(count);
}
template <typename T> void free_device(device_pointer<T> ptr) {}
template <typename T>
typename std::enable_if<!std::is_trivially_destructible<T>::value, void>::type
device_delete(device_pointer<T> p, const std::size_t count = 1) {
for (std::size_t i = 0; i < count; ++i) {
p[i].~T();
}
}
template <typename T>
typename std::enable_if<std::is_trivially_destructible<T>::value, void>::type
device_delete(device_pointer<T>, const std::size_t count = 1) {}
template <typename T> device_pointer<T> get_device_pointer(T *ptr) {
return device_pointer<T>(ptr);
}
template <typename T>
device_pointer<T> get_device_pointer(const device_pointer<T> &ptr) {
return device_pointer<T>(ptr);
}
template <typename T> T *get_raw_pointer(const device_pointer<T> &ptr) {
return ptr.get();
}
template <typename Pointer> Pointer get_raw_pointer(const Pointer &ptr) {
return ptr;
}
template <typename T> const T &get_raw_reference(const device_reference<T> &ref) {
return ref.value;
}
template <typename T> T &get_raw_reference(device_reference<T> &ref) {
return ref.value;
}
template <typename T> const T &get_raw_reference(const T &ref) {
return ref;
}
template <typename T> T &get_raw_reference(T &ref) {
return ref;
}
} // namespace dpct
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/guided_convolutionSeparable_SYCLmigration/01_dpct_output/include/dpct/dpl_extras/vector.h | //==---- vector.h ---------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_VECTOR_H__
#define __DPCT_VECTOR_H__
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
#include <sycl/sycl.hpp>
#include "memory.h"
#include <algorithm>
#include <iterator>
#include <vector>
#include "../device.hpp"
namespace dpct {
namespace internal {
template <typename Iter, typename Void = void> // for non-iterators
struct is_iterator : std::false_type {};
template <typename Iter> // For iterators
struct is_iterator<
Iter,
typename std::enable_if<
!std::is_void<typename Iter::iterator_category>::value, void>::type>
: std::true_type {};
template <typename T> // For pointers
struct is_iterator<T *> : std::true_type {};
} // end namespace internal
#ifndef DPCT_USM_LEVEL_NONE
template <typename T,
typename Allocator = sycl::usm_allocator<T, sycl::usm::alloc::shared>>
class device_vector {
public:
using iterator = device_iterator<T>;
using const_iterator = const iterator;
using reference = device_reference<T>;
using const_reference = const reference;
using value_type = T;
using pointer = T *;
using const_pointer = const T *;
using difference_type =
typename ::std::iterator_traits<iterator>::difference_type;
using size_type = ::std::size_t;
private:
Allocator _alloc;
size_type _size;
size_type _capacity;
pointer _storage;
size_type _min_capacity() const { return size_type(1); }
void _set_capacity_and_alloc() {
_capacity = ::std::max(_size * 2, _min_capacity());
_storage = _alloc.allocate(_capacity);
}
public:
template <typename OtherA> operator ::std::vector<T, OtherA>() const {
auto __tmp = ::std::vector<T, OtherA>(this->size());
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
this->begin(), this->end(), __tmp.begin());
return __tmp;
}
device_vector()
: _alloc(get_default_queue()), _size(0), _capacity(_min_capacity()) {
_set_capacity_and_alloc();
}
~device_vector() /*= default*/ { _alloc.deallocate(_storage, _capacity); };
explicit device_vector(size_type n) : device_vector(n, T()) {}
explicit device_vector(size_type n, const T &value)
: _alloc(get_default_queue()), _size(n) {
_set_capacity_and_alloc();
if (_size > 0) {
::std::fill(oneapi::dpl::execution::make_device_policy(get_default_queue()),
begin(), end(), T(value));
}
}
device_vector(const device_vector &other) : _alloc(get_default_queue()) {
_size = other.size();
_capacity = other.capacity();
_storage = _alloc.allocate(_capacity);
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
other.begin(), other.end(), begin());
}
device_vector(device_vector &&other)
: _alloc(get_default_queue()), _size(other.size()),
_capacity(other.capacity()), _storage(other._storage) {
other._size = 0;
other._capacity = 0;
other._storage = nullptr;
}
template <typename InputIterator>
device_vector(InputIterator first,
typename ::std::enable_if<
internal::is_iterator<InputIterator>::value &&
!::std::is_pointer<InputIterator>::value &&
::std::is_same<typename ::std::iterator_traits<
InputIterator>::iterator_category,
::std::random_access_iterator_tag>::value,
InputIterator>::type last)
: _alloc(get_default_queue()) {
_size = ::std::distance(first, last);
_set_capacity_and_alloc();
if (_size > 0) {
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
first, last, begin());
}
}
template <typename InputIterator>
device_vector(InputIterator first,
typename ::std::enable_if<::std::is_pointer<InputIterator>::value,
InputIterator>::type last)
: _alloc(get_default_queue()) {
_size = ::std::distance(first, last);
_set_capacity_and_alloc();
if (_size > 0) {
auto ptr_type = sycl::get_pointer_type(first, get_default_context());
if (ptr_type != sycl::usm::alloc::host &&
ptr_type != sycl::usm::alloc::unknown) {
::std::copy(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
first, last, begin());
} else {
sycl::buffer<T, 1> buf(first, last);
auto buf_first = oneapi::dpl::begin(buf);
auto buf_last = oneapi::dpl::end(buf);
::std::copy(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
buf_first, buf_last, begin());
}
}
}
template <typename InputIterator>
device_vector(InputIterator first,
typename ::std::enable_if<
internal::is_iterator<InputIterator>::value &&
!::std::is_pointer<InputIterator>::value &&
!::std::is_same<typename ::std::iterator_traits<
InputIterator>::iterator_category,
::std::random_access_iterator_tag>::value,
InputIterator>::type last)
: _alloc(get_default_queue()), _size(::std::distance(first, last)) {
_set_capacity_and_alloc();
::std::vector<T> _tmp(first, last);
if (_size > 0) {
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
_tmp.begin(), _tmp.end(), this->begin());
}
}
template <typename OtherAllocator>
device_vector(const device_vector<T, OtherAllocator> &v)
: _alloc(get_default_queue()), _storage(v.real_begin()), _size(v.size()),
_capacity(v.capacity()) {}
template <typename OtherAllocator>
device_vector(::std::vector<T, OtherAllocator> &v)
: _alloc(get_default_queue()), _size(v.size()) {
_set_capacity_and_alloc();
if (_size > 0) {
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
v.begin(), v.end(), this->begin());
}
}
template <typename OtherAllocator>
device_vector &operator=(const ::std::vector<T, OtherAllocator> &v) {
resize(v.size());
if (_size > 0) {
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
v.begin(), v.end(), begin());
}
return *this;
}
device_vector &operator=(const device_vector &other) {
// Copy assignment operator:
resize(other.size());
if (_size > 0) {
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
other.begin(), other.end(), begin());
}
return *this;
}
device_vector &operator=(device_vector &&other) {
// Move assignment operator:
device_vector dummy(::std::move(other));
this->swap(dummy);
return *this;
}
size_type size() const { return _size; }
iterator begin() noexcept { return device_iterator<T>(_storage, 0); }
iterator end() { return device_iterator<T>(_storage, size()); }
const_iterator begin() const noexcept {
return device_iterator<T>(_storage, 0);
}
const_iterator cbegin() const noexcept { return begin(); }
const_iterator end() const { return device_iterator<T>(_storage, size()); }
const_iterator cend() const { return end(); }
T *real_begin() { return _storage; }
const T *real_begin() const { return _storage; }
void swap(device_vector &v) {
::std::swap(_size, v._size);
::std::swap(_capacity, v._capacity);
::std::swap(_storage, v._storage);
::std::swap(_alloc, v._alloc);
}
reference operator[](size_type n) { return _storage[n]; }
const_reference operator[](size_type n) const { return _storage[n]; }
void reserve(size_type n) {
if (n > capacity()) {
// allocate buffer for new size
auto tmp = _alloc.allocate(2 * n);
// copy content (old buffer to new buffer)
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
begin(), end(), tmp);
// deallocate old memory
_alloc.deallocate(_storage, _capacity);
_storage = tmp;
_capacity = 2 * n;
}
}
void resize(size_type new_size, const T &x = T()) {
reserve(new_size);
if (_size < new_size) {
::std::fill(oneapi::dpl::execution::make_device_policy(get_default_queue()),
begin() + _size, begin() + new_size, x);
}
_size = new_size;
}
size_type max_size(void) const {
return ::std::numeric_limits<size_type>::max() / sizeof(T);
}
size_type capacity() const { return _capacity; }
const_reference front() const { return *begin(); }
reference front() { return *begin(); }
const_reference back(void) const { return *(end() - 1); }
reference back(void) { return *(end() - 1); }
pointer data(void) { return _storage; }
const_pointer data(void) const { return _storage; }
void shrink_to_fit(void) {
if (_size != capacity()) {
size_type tmp_capacity = ::std::max(_size, _min_capacity());
auto tmp = _alloc.allocate(tmp_capacity);
if (_size > 0) {
::std::copy(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
begin(), end(), tmp);
}
_alloc.deallocate(_storage, _capacity);
_storage = tmp;
_capacity = tmp_capacity;
}
}
void assign(size_type n, const T &x) {
resize(n);
if (_size > 0) {
::std::fill(oneapi::dpl::execution::make_device_policy(get_default_queue()),
begin(), begin() + n, x);
}
}
template <typename InputIterator>
void
assign(InputIterator first,
typename ::std::enable_if<internal::is_iterator<InputIterator>::value,
InputIterator>::type last) {
auto n = ::std::distance(first, last);
resize(n);
if (_size > 0) {
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
first, last, begin());
}
}
void clear(void) { _size = 0; }
bool empty(void) const { return (size() == 0); }
void push_back(const T &x) { insert(end(), size_type(1), x); }
void pop_back(void) {
if (_size > 0)
--_size;
}
iterator erase(iterator first, iterator last) {
auto n = ::std::distance(first, last);
if (last == end()) {
_size = _size - n;
return end();
}
auto m = ::std::distance(last, end());
if (m <= 0) {
return end();
}
auto tmp = _alloc.allocate(m);
// copy remainder to temporary buffer.
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
last, end(), tmp);
// override (erase) subsequence in storage.
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
tmp, tmp + m, first);
_alloc.deallocate(tmp, m);
_size -= n;
return begin() + first.get_idx() + n;
}
iterator erase(iterator pos) { return erase(pos, pos + 1); }
iterator insert(iterator position, const T &x) {
auto n = ::std::distance(begin(), position);
insert(position, size_type(1), x);
return begin() + n;
}
void insert(iterator position, size_type n, const T &x) {
if (position == end()) {
resize(size() + n);
::std::fill(oneapi::dpl::execution::make_device_policy(get_default_queue()),
end() - n, end(), x);
} else {
auto i_n = ::std::distance(begin(), position);
// allocate temporary storage
auto m = ::std::distance(position, end());
// will throw if position is not inside active vector
auto tmp = _alloc.allocate(m);
// copy remainder
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
position, end(), tmp);
resize(size() + n);
// resizing might invalidate position
position = begin() + position.get_idx();
::std::fill(oneapi::dpl::execution::make_device_policy(get_default_queue()),
position, position + n, x);
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
tmp, tmp + m, position + n);
_alloc.deallocate(tmp, m);
}
}
template <typename InputIterator>
void
insert(iterator position, InputIterator first,
typename ::std::enable_if<internal::is_iterator<InputIterator>::value,
InputIterator>::type last) {
auto n = ::std::distance(first, last);
if (position == end()) {
resize(size() + n);
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
first, last, end());
} else {
auto m = ::std::distance(position, end());
// will throw if position is not inside active vector
auto tmp = _alloc.allocate(m);
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
position, end(), tmp);
resize(size() + n);
// resizing might invalidate position
position = begin() + position.get_idx();
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
first, last, position);
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
tmp, tmp + m, position + n);
_alloc.deallocate(tmp, m);
}
}
Allocator get_allocator() const { return _alloc; }
};
#else
template <typename T, typename Allocator = detail::__buffer_allocator<T>>
class device_vector {
static_assert(
std::is_same<Allocator, detail::__buffer_allocator<T>>::value,
"device_vector doesn't support custom allocator when USM is not used.");
public:
using iterator = device_iterator<T>;
using const_iterator = const iterator;
using reference = device_reference<T>;
using const_reference = const reference;
using value_type = T;
using pointer = T *;
using const_pointer = const T *;
using difference_type =
typename std::iterator_traits<iterator>::difference_type;
using size_type = std::size_t;
private:
using Buffer = sycl::buffer<T, 1>;
using Range = sycl::range<1>;
// Using mem_mgr to handle memory allocation
void *_storage;
size_type _size;
size_type _min_capacity() const { return size_type(1); }
void *alloc_store(size_type num_bytes) {
return detail::mem_mgr::instance().mem_alloc(num_bytes);
}
public:
template <typename OtherA> operator std::vector<T, OtherA>() const {
auto __tmp = std::vector<T, OtherA>(this->size());
std::copy(oneapi::dpl::execution::dpcpp_default, this->begin(), this->end(),
__tmp.begin());
return __tmp;
}
device_vector()
: _storage(alloc_store(_min_capacity() * sizeof(T))), _size(0) {}
~device_vector() = default;
explicit device_vector(size_type n) : device_vector(n, T()) {}
explicit device_vector(size_type n, const T &value)
: _storage(alloc_store(std::max(n, _min_capacity()) * sizeof(T))),
_size(n) {
auto buf = get_buffer();
std::fill(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(buf),
oneapi::dpl::begin(buf) + n, T(value));
}
device_vector(const device_vector &other)
: _storage(other._storage), _size(other.size()) {}
device_vector(device_vector &&other)
: _storage(std::move(other._storage)), _size(other.size()) {}
template <typename InputIterator>
device_vector(InputIterator first,
typename std::enable_if<
internal::is_iterator<InputIterator>::value &&
!std::is_pointer<InputIterator>::value &&
std::is_same<typename std::iterator_traits<
InputIterator>::iterator_category,
std::random_access_iterator_tag>::value,
InputIterator>::type last)
: _storage(alloc_store(std::distance(first, last) * sizeof(T))),
_size(std::distance(first, last)) {
auto buf = get_buffer();
auto dst = oneapi::dpl::begin(buf);
std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
first, last, dst);
}
template <typename InputIterator>
device_vector(InputIterator first,
typename std::enable_if<std::is_pointer<InputIterator>::value,
InputIterator>::type last)
: _storage(alloc_store(std::distance(first, last) * sizeof(T))),
_size(std::distance(first, last)) {
auto buf = get_buffer();
Buffer tmp_buf(first, last);
auto start = oneapi::dpl::begin(tmp_buf);
auto end = oneapi::dpl::end(tmp_buf);
auto dst = oneapi::dpl::begin(buf);
std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
start, end, dst);
}
template <typename InputIterator>
device_vector(InputIterator first,
typename std::enable_if<
internal::is_iterator<InputIterator>::value &&
!std::is_same<typename std::iterator_traits<
InputIterator>::iterator_category,
std::random_access_iterator_tag>::value,
InputIterator>::type last)
: _storage(alloc_store(std::distance(first, last) * sizeof(T))),
_size(std::distance(first, last)) {
auto buf = get_buffer();
std::vector<T> tmp(first, last);
Buffer tmp_buf(tmp);
auto start = oneapi::dpl::begin(tmp_buf);
auto end = oneapi::dpl::end(tmp_buf);
auto dst = oneapi::dpl::begin(buf);
std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
start, end, dst);
}
template <typename OtherAllocator>
device_vector(const device_vector<T, OtherAllocator> &v)
: _storage(alloc_store(v.size() * sizeof(T))), _size(v.size()) {
auto buf = get_buffer();
auto dst = oneapi::dpl::begin(buf);
std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
v.real_begin(), v.real_begin() + v.size(), dst);
}
template <typename OtherAllocator>
device_vector(std::vector<T, OtherAllocator> &v)
: _storage(alloc_store(v.size() * sizeof(T))), _size(v.size()) {
std::copy(oneapi::dpl::execution::dpcpp_default, v.begin(), v.end(),
oneapi::dpl::begin(get_buffer()));
}
device_vector &operator=(const device_vector &other) {
// Copy assignment operator:
_size = other.size();
void *tmp = alloc_store(_size * sizeof(T));
auto tmp_buf =
detail::mem_mgr::instance()
.translate_ptr(tmp)
.buffer.template reinterpret<T, 1>(sycl::range<1>(_size));
std::copy(oneapi::dpl::execution::dpcpp_default,
oneapi::dpl::begin(other.get_buffer()),
oneapi::dpl::end(other.get_buffer()),
oneapi::dpl::begin(tmp_buf));
detail::mem_mgr::instance().mem_free(_storage);
_storage = tmp;
return *this;
}
device_vector &operator=(device_vector &&other) {
// Move assignment operator:
_size = other.size();
this->_storage = std::move(other._storage);
return *this;
}
template <typename OtherAllocator>
device_vector &operator=(const std::vector<T, OtherAllocator> &v) {
Buffer data(v.begin(), v.end());
_size = v.size();
void *tmp = alloc_store(_size * sizeof(T));
auto tmp_buf =
detail::mem_mgr::instance()
.translate_ptr(tmp)
.buffer.template reinterpret<T, 1>(sycl::range<1>(_size));
std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(data),
oneapi::dpl::end(data), oneapi::dpl::begin(tmp_buf));
detail::mem_mgr::instance().mem_free(_storage);
_storage = tmp;
return *this;
}
Buffer get_buffer() const {
return detail::mem_mgr::instance()
.translate_ptr(_storage)
.buffer.template reinterpret<T, 1>(sycl::range<1>(capacity()));
}
size_type size() const { return _size; }
iterator begin() noexcept { return device_iterator<T>(get_buffer(), 0); }
iterator end() { return device_iterator<T>(get_buffer(), _size); }
const_iterator begin() const noexcept {
return device_iterator<T>(get_buffer(), 0);
}
const_iterator cbegin() const noexcept { return begin(); }
const_iterator end() const { return device_iterator<T>(get_buffer(), _size); }
const_iterator cend() const { return end(); }
T *real_begin() {
return (detail::mem_mgr::instance()
.translate_ptr(_storage)
.buffer.template get_access<sycl::access_mode::read_write>())
.get_pointer();
}
const T *real_begin() const {
return const_cast<device_vector *>(this)
->detail::mem_mgr::instance()
.translate_ptr(_storage)
.buffer.template get_access<sycl::access_mode::read_write>()
.get_pointer();
}
void swap(device_vector &v) {
void *temp = v._storage;
v._storage = this->_storage;
this->_storage = temp;
std::swap(_size, v._size);
}
reference operator[](size_type n) { return *(begin() + n); }
const_reference operator[](size_type n) const { return *(begin() + n); }
void reserve(size_type n) {
if (n > capacity()) {
// create new buffer (allocate for new size)
void *a = alloc_store(n * sizeof(T));
// copy content (old buffer to new buffer)
if (_storage != nullptr) {
auto tmp = detail::mem_mgr::instance()
.translate_ptr(a)
.buffer.template reinterpret<T, 1>(sycl::range<1>(n));
auto src_buf = get_buffer();
std::copy(oneapi::dpl::execution::dpcpp_default,
oneapi::dpl::begin(src_buf), oneapi::dpl::end(src_buf),
oneapi::dpl::begin(tmp));
// deallocate old memory
detail::mem_mgr::instance().mem_free(_storage);
}
_storage = a;
}
}
void resize(size_type new_size, const T &x = T()) {
reserve(new_size);
if (_size < new_size) {
auto src_buf = get_buffer();
std::fill(oneapi::dpl::execution::dpcpp_default,
oneapi::dpl::begin(src_buf) + _size,
oneapi::dpl::begin(src_buf) + new_size, x);
}
_size = new_size;
}
size_type max_size(void) const {
return std::numeric_limits<size_type>::max() / sizeof(T);
}
size_type capacity() const {
return _storage != nullptr ? detail::mem_mgr::instance()
.translate_ptr(_storage)
.buffer.size() /
sizeof(T)
: 0;
}
const_reference front() const { return *begin(); }
reference front() { return *begin(); }
const_reference back(void) const { return *(end() - 1); }
reference back(void) { return *(end() - 1); }
pointer data(void) { return reinterpret_cast<pointer>(_storage); }
const_pointer data(void) const {
return reinterpret_cast<const_pointer>(_storage);
}
void shrink_to_fit(void) {
if (_size != capacity()) {
void *a = alloc_store(_size * sizeof(T));
auto tmp = detail::mem_mgr::instance()
.translate_ptr(a)
.buffer.template reinterpret<T, 1>(sycl::range<1>(_size));
std::copy(oneapi::dpl::execution::dpcpp_default,
oneapi::dpl::begin(get_buffer()),
oneapi::dpl::begin(get_buffer()) + _size,
oneapi::dpl::begin(tmp));
detail::mem_mgr::instance().mem_free(_storage);
_storage = a;
}
}
void assign(size_type n, const T &x) {
resize(n);
std::fill(oneapi::dpl::execution::dpcpp_default, begin(), begin() + n, x);
}
template <typename InputIterator>
void
assign(InputIterator first,
typename std::enable_if<internal::is_iterator<InputIterator>::value,
InputIterator>::type last) {
auto n = std::distance(first, last);
resize(n);
if (internal::is_iterator<InputIterator>::value &&
!std::is_pointer<InputIterator>::value)
std::copy(oneapi::dpl::execution::dpcpp_default, first, last, begin());
else {
Buffer tmp(first, last);
std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(tmp),
oneapi::dpl::end(tmp), begin());
}
}
void clear(void) {
_size = 0;
detail::mem_mgr::instance().mem_free(_storage);
_storage = nullptr;
}
bool empty(void) const { return (size() == 0); }
void push_back(const T &x) { insert(end(), size_type(1), x); }
void pop_back(void) {
if (_size > 0)
--_size;
}
iterator erase(iterator first, iterator last) {
auto n = std::distance(first, last);
if (last == end()) {
_size = _size - n;
return end();
}
Buffer tmp{Range(std::distance(last, end()))};
// copy remainder to temporary buffer.
std::copy(oneapi::dpl::execution::dpcpp_default, last, end(),
oneapi::dpl::begin(tmp));
// override (erase) subsequence in storage.
std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(tmp),
oneapi::dpl::end(tmp), first);
resize(_size - n);
return begin() + first.get_idx() + n;
}
iterator erase(iterator pos) { return erase(pos, pos + 1); }
iterator insert(iterator position, const T &x) {
auto n = std::distance(begin(), position);
insert(position, size_type(1), x);
return begin() + n;
}
void insert(iterator position, size_type n, const T &x) {
if (position == end()) {
resize(size() + n);
std::fill(oneapi::dpl::execution::dpcpp_default, end() - n, end(), x);
} else {
auto i_n = std::distance(begin(), position);
// allocate temporary storage
Buffer tmp{Range(std::distance(position, end()))};
// copy remainder
std::copy(oneapi::dpl::execution::dpcpp_default, position, end(),
oneapi::dpl::begin(tmp));
resize(size() + n);
// resizing might invalidate position
position = begin() + position.get_idx();
std::fill(oneapi::dpl::execution::dpcpp_default, position, position + n,
x);
std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(tmp),
oneapi::dpl::end(tmp), position + n);
}
}
template <typename InputIterator>
void
insert(iterator position, InputIterator first,
typename std::enable_if<internal::is_iterator<InputIterator>::value,
InputIterator>::type last) {
auto n = std::distance(first, last);
if (position == end()) {
resize(size() + n);
std::copy(oneapi::dpl::execution::dpcpp_default, first, last, end());
} else {
Buffer tmp{Range(std::distance(position, end()))};
std::copy(oneapi::dpl::execution::dpcpp_default, position, end(),
oneapi::dpl::begin(tmp));
resize(size() + n);
// resizing might invalidate position
position = begin() + position.get_idx();
std::copy(oneapi::dpl::execution::dpcpp_default, first, last, position);
std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(tmp),
oneapi::dpl::end(tmp), position + n);
}
}
};
#endif
} // end namespace dpct
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/guided_convolutionSeparable_SYCLmigration/01_dpct_output/include/dpct/dpl_extras/dpcpp_extensions.h | //==---- dpcpp_extensions.h ------------------*- C++ -*---------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------===//
#ifndef __DPCT_DPCPP_EXTENSIONS_H__
#define __DPCT_DPCPP_EXTENSIONS_H__
#include <sycl/sycl.hpp>
#include <stdexcept>
#ifdef SYCL_EXT_ONEAPI_USER_DEFINED_REDUCTIONS
#include <sycl/ext/oneapi/experimental/user_defined_reductions.hpp>
#endif
#include "../dpct.hpp"
namespace dpct {
namespace group {
namespace detail {
template <typename... _Args>
constexpr auto __reduce_over_group(_Args... __args) {
return sycl::reduce_over_group(__args...);
}
template <typename... _Args> constexpr auto __group_broadcast(_Args... __args) {
return sycl::group_broadcast(__args...);
}
template <typename... _Args>
constexpr auto __exclusive_scan_over_group(_Args... __args) {
return sycl::exclusive_scan_over_group(__args...);
}
template <typename... _Args>
constexpr auto __inclusive_scan_over_group(_Args... __args) {
return sycl::inclusive_scan_over_group(__args...);
}
} // end namespace detail
/// Perform an exclusive scan over the values of inputs from all work-items in
/// the group using the operator binary_op, which must be one of the SYCL 2020
/// group algorithms library function objects.
///
/// \param item A work-item in a group.
/// \param inputs Pointer to the input data for the scan operation.
/// \param outputs Pointer to the location where scan results will be stored.
/// \param init initial value of the scan result.
/// \param binary_op functor that implements the binary operation used to
/// perform the scan.
template <typename Item, typename T, class BinaryOperation,
int VALUES_PER_THREAD>
__dpct_inline__ void
exclusive_scan(const Item &item, T (&inputs)[VALUES_PER_THREAD],
T (&outputs)[VALUES_PER_THREAD], T init,
BinaryOperation binary_op) {
T result = inputs[0];
#pragma unroll
for (int i = 1; i < VALUES_PER_THREAD; ++i) {
result = binary_op(result, inputs[i]);
}
T exclusive_result =
detail::__exclusive_scan_over_group(item.get_group(), result, binary_op);
T input = inputs[0];
if (item.get_local_linear_id() == 0) {
outputs[0] = init;
} else {
outputs[0] = exclusive_result;
}
#pragma unroll
for (int i = 1; i < VALUES_PER_THREAD; ++i) {
T output = binary_op(input, outputs[i - 1]);
input = inputs[i];
outputs[i] = output;
}
}
/// Perform an exclusive scan over the values of input from all work-items in
/// the group using the operator binary_op, which must be one of the SYCL 2020
/// group algorithms library function objects.
///
/// \param item A work-item in a group.
/// \param input Input data for the scan operation.
/// \param init initial value of the scan result.
/// \param binary_op functor that implements the binary operation used to
/// perform the scan. \param group_aggregate group-wide aggregate of all inputs
/// in the work-items of the group. \returns exclusive scan of the first i
/// work-items where item is the i-th work item.
template <typename Item, typename T, class BinaryOperation>
__dpct_inline__ T
exclusive_scan(const Item &item, T input, T init, BinaryOperation binary_op,
T &group_aggregate) {
T output = detail::__exclusive_scan_over_group(item.get_group(), input, init,
binary_op);
if (item.get_local_linear_id() == item.get_local_range().size() - 1) {
group_aggregate = binary_op(output, input);
}
group_aggregate = detail::__group_broadcast(
item.get_group(), group_aggregate, item.get_local_range().size() - 1);
return output;
}
/// Perform an exclusive scan over the values of input from all work-items in
/// the group using the operator binary_op, which must be one of the SYCL 2020
/// group algorithms library function objects.
///
/// \param item A work-item in a group.
/// \param input Input data for the scan operation.
/// \param binary_op functor that implements the binary operation used to
/// perform the scan. \param prefix_callback_op functor invoked by the first
/// work-item in the group that returns the
/// initial value in the resulting scan of the work-items in the group.
/// \returns exclusive scan of the input elements assigned to work-items in the
/// group.
template <typename Item, typename T, class BinaryOperation,
class GroupPrefixCallbackOperation>
__dpct_inline__ T
exclusive_scan(const Item &item, T input, BinaryOperation binary_op,
GroupPrefixCallbackOperation &prefix_callback_op) {
T group_aggregate;
T output =
detail::__exclusive_scan_over_group(item.get_group(), input, binary_op);
if (item.get_local_linear_id() == item.get_local_range().size() - 1) {
group_aggregate = binary_op(output, input);
}
group_aggregate = detail::__group_broadcast(
item.get_group(), group_aggregate, item.get_local_range().size() - 1);
T group_prefix = prefix_callback_op(group_aggregate);
if (item.get_local_linear_id() == 0) {
output = group_prefix;
} else {
output = binary_op(group_prefix, output);
}
return output;
}
namespace detail {
typedef uint16_t digit_counter_type;
typedef uint32_t packed_counter_type;
template <int N, int CURRENT_VAL = N, int COUNT = 0> struct log2 {
enum { VALUE = log2<N, (CURRENT_VAL >> 1), COUNT + 1>::VALUE };
};
template <int N, int COUNT> struct log2<N, 0, COUNT> {
enum { VALUE = (1 << (COUNT - 1) < N) ? COUNT : COUNT - 1 };
};
__dpct_inline__ uint32_t bfe(uint32_t source, uint32_t bit_start,
uint32_t num_bits) {
const uint32_t MASK = (1 << num_bits) - 1;
return (source >> bit_start) & MASK;
}
template <int RADIX_BITS, bool DESCENDING = false> class radix_rank {
public:
static size_t get_local_memory_size(size_t group_threads) {
return group_threads * PADDED_COUNTER_LANES * sizeof(packed_counter_type);
}
radix_rank(uint8_t *local_memory) : _local_memory(local_memory) {}
template <typename Item, int VALUES_PER_THREAD>
__dpct_inline__ void
rank_keys(const Item &item, uint32_t (&keys)[VALUES_PER_THREAD],
int (&ranks)[VALUES_PER_THREAD], int current_bit, int num_bits) {
digit_counter_type thread_prefixes[VALUES_PER_THREAD];
digit_counter_type *digit_counters[VALUES_PER_THREAD];
digit_counter_type *buffer =
reinterpret_cast<digit_counter_type *>(_local_memory);
reset_local_memory(item);
item.barrier(sycl::access::fence_space::local_space);
#pragma unroll
for (int i = 0; i < VALUES_PER_THREAD; ++i) {
uint32_t digit = bfe(keys[i], current_bit, num_bits);
uint32_t sub_counter = digit >> LOG_COUNTER_LANES;
uint32_t counter_lane = digit & (COUNTER_LANES - 1);
if (DESCENDING) {
sub_counter = PACKING_RATIO - 1 - sub_counter;
counter_lane = COUNTER_LANES - 1 - counter_lane;
}
digit_counters[i] =
&buffer[counter_lane * item.get_local_range().size() * PACKING_RATIO +
item.get_local_linear_id() * PACKING_RATIO + sub_counter];
thread_prefixes[i] = *digit_counters[i];
*digit_counters[i] = thread_prefixes[i] + 1;
}
item.barrier(sycl::access::fence_space::local_space);
scan_counters(item);
item.barrier(sycl::access::fence_space::local_space);
for (int i = 0; i < VALUES_PER_THREAD; ++i) {
ranks[i] = thread_prefixes[i] + *digit_counters[i];
}
}
private:
template <typename Item>
__dpct_inline__ void reset_local_memory(const Item &item) {
packed_counter_type *ptr =
reinterpret_cast<packed_counter_type *>(_local_memory);
#pragma unroll
for (int i = 0; i < PADDED_COUNTER_LANES; ++i) {
ptr[i * item.get_local_range().size() + item.get_local_linear_id()] = 0;
}
}
template <typename Item>
__dpct_inline__ packed_counter_type upsweep(const Item &item) {
packed_counter_type sum = 0;
packed_counter_type *ptr =
reinterpret_cast<packed_counter_type *>(_local_memory);
#pragma unroll
for (int i = 0; i < PADDED_COUNTER_LANES; i++) {
cached_segment[i] =
ptr[item.get_local_linear_id() * PADDED_COUNTER_LANES + i];
}
#pragma unroll
for (int i = 0; i < PADDED_COUNTER_LANES; ++i) {
sum += cached_segment[i];
}
return sum;
}
template <typename Item>
__dpct_inline__ void
exclusive_downsweep(const Item &item, packed_counter_type raking_partial) {
packed_counter_type *ptr =
reinterpret_cast<packed_counter_type *>(_local_memory);
packed_counter_type sum = raking_partial;
#pragma unroll
for (int i = 0; i < PADDED_COUNTER_LANES; ++i) {
packed_counter_type value = cached_segment[i];
cached_segment[i] = sum;
sum += value;
}
#pragma unroll
for (int i = 0; i < PADDED_COUNTER_LANES; ++i) {
ptr[item.get_local_linear_id() * PADDED_COUNTER_LANES + i] =
cached_segment[i];
}
}
struct prefix_callback {
__dpct_inline__ packed_counter_type
operator()(packed_counter_type block_aggregate) {
packed_counter_type block_prefix = 0;
#pragma unroll
for (int packed = 1; packed < PACKING_RATIO; packed++) {
block_prefix += block_aggregate
<< (sizeof(digit_counter_type) * 8 * packed);
}
return block_prefix;
}
};
template <typename Item>
__dpct_inline__ void scan_counters(const Item &item) {
packed_counter_type raking_partial = upsweep(item);
prefix_callback callback;
packed_counter_type exclusive_partial = exclusive_scan(
item, raking_partial, sycl::ext::oneapi::plus<packed_counter_type>(),
callback);
exclusive_downsweep(item, exclusive_partial);
}
private:
static constexpr int PACKING_RATIO =
sizeof(packed_counter_type) / sizeof(digit_counter_type);
static constexpr int LOG_PACKING_RATIO = log2<PACKING_RATIO>::VALUE;
static constexpr int LOG_COUNTER_LANES = RADIX_BITS - LOG_PACKING_RATIO;
static constexpr int COUNTER_LANES = 1 << LOG_COUNTER_LANES;
static constexpr int PADDED_COUNTER_LANES = COUNTER_LANES + 1;
packed_counter_type cached_segment[PADDED_COUNTER_LANES];
uint8_t *_local_memory;
};
template <typename T, typename U> struct base_traits {
static __dpct_inline__ U twiddle_in(U key) {
throw std::runtime_error("Not implemented");
}
static __dpct_inline__ U twiddle_out(U key) {
throw std::runtime_error("Not implemented");
}
};
template <typename U> struct base_traits<uint32_t, U> {
static __dpct_inline__ U twiddle_in(U key) { return key; }
static __dpct_inline__ U twiddle_out(U key) { return key; }
};
template <typename U> struct base_traits<int, U> {
static constexpr U HIGH_BIT = U(1) << ((sizeof(U) * 8) - 1);
static __dpct_inline__ U twiddle_in(U key) { return key ^ HIGH_BIT; }
static __dpct_inline__ U twiddle_out(U key) { return key ^ HIGH_BIT; }
};
template <typename U> struct base_traits<float, U> {
static constexpr U HIGH_BIT = U(1) << ((sizeof(U) * 8) - 1);
static __dpct_inline__ U twiddle_in(U key) {
U mask = (key & HIGH_BIT) ? U(-1) : HIGH_BIT;
return key ^ mask;
}
static __dpct_inline__ U twiddle_out(U key) {
U mask = (key & HIGH_BIT) ? HIGH_BIT : U(-1);
return key ^ mask;
}
};
template <typename T> struct traits : base_traits<T, T> {};
template <> struct traits<uint32_t> : base_traits<uint32_t, uint32_t> {};
template <> struct traits<int> : base_traits<int, uint32_t> {};
template <> struct traits<float> : base_traits<float, uint32_t> {};
} // namespace detail
namespace detail {
template <int N> struct power_of_two {
enum { VALUE = ((N & (N - 1)) == 0) };
};
__dpct_inline__ uint32_t shr_add(uint32_t x, uint32_t shift, uint32_t addend) {
return (x >> shift) + addend;
}
} // namespace detail
/// Implements scatter to blocked exchange pattern used in radix sort algorithm.
///
/// \tparam T type of the data elements exchanges
/// \tparam VALUES_PER_THREAD number of data elements assigned to a thread
template <typename T, int VALUES_PER_THREAD> class exchange {
public:
static size_t get_local_memory_size(size_t group_threads) {
size_t padding_values =
(INSERT_PADDING)
? ((group_threads * VALUES_PER_THREAD) >> LOG_LOCAL_MEMORY_BANKS)
: 0;
return (group_threads * VALUES_PER_THREAD + padding_values) * sizeof(T);
}
exchange(uint8_t *local_memory) : _local_memory(local_memory) {}
/// Rearrange elements from rank order to blocked order
template <typename Item>
__dpct_inline__ void
scatter_to_blocked(Item item, T (&keys)[VALUES_PER_THREAD],
int (&ranks)[VALUES_PER_THREAD]) {
T *buffer = reinterpret_cast<T *>(_local_memory);
#pragma unroll
for (int i = 0; i < VALUES_PER_THREAD; i++) {
int offset = ranks[i];
if (INSERT_PADDING)
offset = detail::shr_add(offset, LOG_LOCAL_MEMORY_BANKS, offset);
buffer[offset] = keys[i];
}
item.barrier(sycl::access::fence_space::local_space);
#pragma unroll
for (int i = 0; i < VALUES_PER_THREAD; i++) {
int offset = (item.get_local_id(0) * VALUES_PER_THREAD) + i;
if (INSERT_PADDING)
offset = detail::shr_add(offset, LOG_LOCAL_MEMORY_BANKS, offset);
keys[i] = buffer[offset];
}
}
private:
static constexpr int LOG_LOCAL_MEMORY_BANKS = 5;
static constexpr bool INSERT_PADDING =
(VALUES_PER_THREAD > 4) &&
(detail::power_of_two<VALUES_PER_THREAD>::VALUE);
uint8_t *_local_memory;
};
/// Implements radix sort to sort integer data elements assigned to all threads
/// in the group.
///
/// \tparam T type of the data elements exchanges
/// \tparam VALUES_PER_THREAD number of data elements assigned to a thread
/// \tparam DECENDING boolean value indicating if data elements are sorted in
/// decending order.
template <typename T, int VALUES_PER_THREAD, bool DESCENDING = false>
class radix_sort {
public:
static size_t get_local_memory_size(size_t group_threads) {
size_t ranks_size =
detail::radix_rank<RADIX_BITS>::get_local_memory_size(group_threads);
size_t exchange_size =
exchange<T, VALUES_PER_THREAD>::get_local_memory_size(group_threads);
return sycl::max(ranks_size, exchange_size);
}
radix_sort(uint8_t *local_memory) : _local_memory(local_memory) {}
template <typename Item>
__dpct_inline__ void
sort(const Item &item, T (&keys)[VALUES_PER_THREAD], int begin_bit = 0,
int end_bit = 8 * sizeof(T)) {
uint32_t(&unsigned_keys)[VALUES_PER_THREAD] =
reinterpret_cast<uint32_t(&)[VALUES_PER_THREAD]>(keys);
#pragma unroll
for (int i = 0; i < VALUES_PER_THREAD; ++i) {
unsigned_keys[i] = detail::traits<T>::twiddle_in(unsigned_keys[i]);
}
while (true) {
int pass_bits = sycl::min(RADIX_BITS, end_bit - begin_bit);
int ranks[VALUES_PER_THREAD];
detail::radix_rank<RADIX_BITS, DESCENDING>(_local_memory)
.template rank_keys(item, unsigned_keys, ranks, begin_bit, pass_bits);
begin_bit += RADIX_BITS;
item.barrier(sycl::access::fence_space::local_space);
exchange<T, VALUES_PER_THREAD>(_local_memory)
.scatter_to_blocked(item, keys, ranks);
item.barrier(sycl::access::fence_space::local_space);
if (begin_bit >= end_bit)
break;
}
#pragma unroll
for (int i = 0; i < VALUES_PER_THREAD; ++i) {
unsigned_keys[i] = detail::traits<T>::twiddle_out(unsigned_keys[i]);
}
}
private:
static constexpr int RADIX_BITS = 4;
uint8_t *_local_memory;
};
/// Perform a reduction of the data elements assigned to all threads in the
/// group.
///
/// \param item A work-item in a group.
/// \param inputs Pointer to the input data for the reduce operation.
/// \param binary_op functor that implements the binary operation used to
/// perform the scan. \returns value of the reduction using binary_op
template <typename Item, typename T, class BinaryOperation,
int VALUES_PER_THREAD>
__dpct_inline__ T
reduce(Item item, T (&inputs)[VALUES_PER_THREAD], BinaryOperation binary_op) {
T result = inputs[0];
#pragma unroll
for (int i = 1; i < VALUES_PER_THREAD; i++) {
result = binary_op(result, inputs[i]);
}
return detail::__reduce_over_group(item.get_group(), result, binary_op);
}
/// Perform a reduction on a limited number of the work items in a subgroup
///
/// \param item A work-item in a group.
/// \param value value per work item which is to be reduced
/// \param items_to_reduce num work items at the start of the subgroup to reduce
/// \param binary_op functor that implements the binary operation used to
/// perform the scan. \returns value of the reduction using binary_op
template <typename Item, typename T, class BinaryOperation>
__dpct_inline__
typename ::std::enable_if_t<sycl::has_known_identity_v<BinaryOperation, T>, T>
reduce_over_partial_group(const Item &item, const T &value,
const ::std::uint16_t &items_to_reduce,
BinaryOperation binary_op) {
T value_temp = (item.get_local_linear_id() < items_to_reduce)
? value
: sycl::known_identity_v<BinaryOperation, T>;
return detail::__reduce_over_group(item.get_sub_group(), value_temp,
binary_op);
}
/// Perform an inclusive scan over the values of inputs from all work-items in
/// the group using the operator binary_op, which must be one of the SYCL 2020
/// group algorithms library function objects.
///
/// \param item A work-item in a group.
/// \param inputs Pointer to the input data for the scan operation.
/// \param outputs Pointer to the location where scan results will be stored.
/// \param binary_op functor that implements the binary operation used to
/// perform the scan. \returns inclusive scan of the input elements assigned to
/// work-items in the group.
template <typename Item, typename T, class BinaryOperation,
int VALUES_PER_THREAD>
__dpct_inline__ void
inclusive_scan(const Item &item, T (&inputs)[VALUES_PER_THREAD],
T (&outputs)[VALUES_PER_THREAD], BinaryOperation binary_op) {
T result = inputs[0];
#pragma unroll
for (int i = 1; i < VALUES_PER_THREAD; ++i) {
result = binary_op(result, inputs[i]);
}
T exclusive_result =
detail::__exclusive_scan_over_group(item.get_group(), result, binary_op);
if (item.get_local_linear_id() == 0) {
outputs[0] = inputs[0];
} else {
outputs[0] = binary_op(inputs[0], exclusive_result);
}
#pragma unroll
for (int i = 1; i < VALUES_PER_THREAD; ++i) {
outputs[i] = binary_op(inputs[i], outputs[i - 1]);
}
}
/// Perform an inclusive scan over the values of inputs from all work-items in
/// the group using the operator binary_op, which must be one of the SYCL 2020
/// group algorithms library function objects.
///
/// \param item A work-item in a group.
/// \param input Pointer to the input data for the scan operation.
/// \param binary_op functor that implements the binary operation used to
/// perform the scan. \param group_aggregate group-wide aggregate of all inputs
/// in the work-items of the group. \returns inclusive scan of the input
/// elements assigned to work-items in the group.
template <typename Item, typename T, class BinaryOperation>
__dpct_inline__ T inclusive_scan(const Item &item, T input,
BinaryOperation binary_op,
T &group_aggregate) {
T output =
detail::__inclusive_scan_over_group(item.get_group(), input, binary_op);
if (item.get_local_linear_id() == item.get_local_range().size() - 1) {
group_aggregate = output;
}
group_aggregate = detail::__group_broadcast(
item.get_group(), group_aggregate, item.get_local_range().size() - 1);
return output;
}
/// Perform an inclusive scan over the values of input from all work-items in
/// the group using the operator binary_op, which must be one of the SYCL 2020
/// group algorithms library function objects.
///
/// \param item A work-item in a group.
/// \param input Input data for the scan operation.
/// \param binary_op functor that implements the binary operation used to
/// perform the scan. \param prefix_callback_op functor invoked by the first
/// work-item in the group that returns the
/// initial value in the resulting scan of the work-items in the group.
/// \returns inclusive scan of the input elements assigned to work-items in the
/// group.
template <typename Item, typename T, class BinaryOperation,
class GroupPrefixCallbackOperation>
__dpct_inline__ T
inclusive_scan(const Item &item, T input, BinaryOperation binary_op,
GroupPrefixCallbackOperation &prefix_callback_op) {
T group_aggregate;
T output = inclusive_scan(item, input, binary_op, group_aggregate);
T group_prefix = prefix_callback_op(group_aggregate);
return binary_op(group_prefix, output);
}
} // namespace group
namespace device {
namespace detail {
template <typename... _Args> constexpr auto __joint_reduce(_Args... __args) {
return sycl::joint_reduce(__args...);
}
} // namespace detail
/// Perform a reduce on each of the segments specified within data stored on
/// the device.
///
/// \param queue Command queue used to access device used for reduction
/// \param inputs Pointer to the data elements on the device to be reduced
/// \param outputs Pointer to the storage where the reduced value for each
/// segment will be stored \param segment_count number of segments to be reduced
/// \param begin_offsets Pointer to the set of indices that are the first
/// element in each segment \param end_offsets Pointer to the set of indices
/// that are one past the last element in each segment \param binary_op functor
/// that implements the binary operation used to perform the scan. \param init
/// initial value of the reduction for each segment.
template <int GROUP_SIZE, typename T, typename OffsetT, class BinaryOperation>
void segmented_reduce(sycl::queue queue, T *inputs, T *outputs,
size_t segment_count, OffsetT *begin_offsets,
OffsetT *end_offsets, BinaryOperation binary_op, T init) {
sycl::range<1> global_size(segment_count * GROUP_SIZE);
sycl::range<1> local_size(GROUP_SIZE);
queue.submit([&](sycl::handler &cgh) {
cgh.parallel_for(
sycl::nd_range<1>(global_size, local_size), [=](sycl::nd_item<1> item) {
OffsetT segment_begin = begin_offsets[item.get_group_linear_id()];
OffsetT segment_end = end_offsets[item.get_group_linear_id()];
if (segment_begin == segment_end) {
if (item.get_local_linear_id() == 0) {
outputs[item.get_group_linear_id()] = init;
}
return;
}
sycl::multi_ptr<T, sycl::access::address_space::global_space>
input_ptr = inputs;
T group_aggregate = detail::__joint_reduce(
item.get_group(), input_ptr + segment_begin,
input_ptr + segment_end, init, binary_op);
if (item.get_local_linear_id() == 0) {
outputs[item.get_group_linear_id()] = group_aggregate;
}
});
});
}
#ifdef SYCL_EXT_ONEAPI_USER_DEFINED_REDUCTIONS
namespace experimental {
namespace detail {
template <typename _Tp, typename... _Ts> struct __is_any {
constexpr static bool value = std::disjunction_v<
std::is_same<std::remove_cv_t<_Tp>, std::remove_cv_t<_Ts>>...>;
};
template <typename _Tp, typename _Bp> struct __in_native_op_list {
constexpr static bool value =
__is_any<_Bp, sycl::plus<_Tp>, sycl::bit_or<_Tp>, sycl::bit_xor<_Tp>,
sycl::bit_and<_Tp>, sycl::maximum<_Tp>, sycl::minimum<_Tp>,
sycl::multiplies<_Tp>>::value;
};
template <typename _Tp, typename _Bp> struct __is_native_op {
constexpr static bool value = __in_native_op_list<_Tp, _Bp>::value ||
__in_native_op_list<void, _Bp>::value;
};
} // namespace detail
/// Perform a reduce on each of the segments specified within data stored on
/// the device. Compared with dpct::device::segmented_reduce, this experimental
/// feature support user define reductions.
///
/// \param queue Command queue used to access device used for reduction
/// \param inputs Pointer to the data elements on the device to be reduced
/// \param outputs Pointer to the storage where the reduced value for each
/// segment will be stored \param segment_count number of segments to be reduced
/// \param begin_offsets Pointer to the set of indices that are the first
/// element in each segment \param end_offsets Pointer to the set of indices
/// that are one past the last element in each segment \param binary_op functor
/// that implements the binary operation used to perform the scan. \param init
/// initial value of the reduction for each segment.
template <int GROUP_SIZE, typename T, typename OffsetT, class BinaryOperation>
void segmented_reduce(sycl::queue queue, T *inputs, T *outputs,
size_t segment_count, OffsetT *begin_offsets,
OffsetT *end_offsets, BinaryOperation binary_op, T init) {
sycl::range<1> global_size(segment_count * GROUP_SIZE);
sycl::range<1> local_size(GROUP_SIZE);
if constexpr (!detail::__is_native_op<T, BinaryOperation>::value) {
queue.submit([&](sycl::handler &cgh) {
size_t temp_memory_size = GROUP_SIZE * sizeof(T);
auto scratch = sycl::local_accessor<std::byte, 1>(temp_memory_size, cgh);
cgh.parallel_for(
sycl::nd_range<1>(global_size, local_size),
[=](sycl::nd_item<1> item) {
OffsetT segment_begin = begin_offsets[item.get_group_linear_id()];
OffsetT segment_end = end_offsets[item.get_group_linear_id()];
if (segment_begin == segment_end) {
if (item.get_local_linear_id() == 0) {
outputs[item.get_group_linear_id()] = init;
}
return;
}
// Create a handle that associates the group with an allocation it
// can use
auto handle =
sycl::ext::oneapi::experimental::group_with_scratchpad(
item.get_group(),
sycl::span(&scratch[0], temp_memory_size));
T group_aggregate = sycl::ext::oneapi::experimental::joint_reduce(
handle, inputs + segment_begin, inputs + segment_end, init,
binary_op);
if (item.get_local_linear_id() == 0) {
outputs[item.get_group_linear_id()] = group_aggregate;
}
});
});
} else {
dpct::device::segmented_reduce<GROUP_SIZE>(queue, inputs, outputs,
segment_count, begin_offsets,
end_offsets, binary_op, init);
}
}
} // namespace experimental
#endif // SYCL_EXT_ONEAPI_USER_DEFINED_REDUCTIONS
} // namespace device
} // namespace dpct
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/guided_convolutionSeparable_SYCLmigration/01_dpct_output/include/dpct/dpl_extras/functional.h | //==---- functional.h -----------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_FUNCTIONAL_H__
#define __DPCT_FUNCTIONAL_H__
#include <functional>
#include <oneapi/dpl/functional>
#include <oneapi/dpl/iterator>
#if ONEDPL_USE_DPCPP_BACKEND
#include <oneapi/dpl/pstl/hetero/dpcpp/parallel_backend_sycl_utils.h>
#endif
#include <tuple>
#include <utility>
namespace dpct {
struct null_type {};
namespace internal {
template <class _ExecPolicy, class _T>
using enable_if_execution_policy =
typename std::enable_if<oneapi::dpl::execution::is_execution_policy<
typename std::decay<_ExecPolicy>::type>::value,
_T>::type;
template <typename _T> struct is_hetero_execution_policy : ::std::false_type {};
template <typename... PolicyParams>
struct is_hetero_execution_policy<
oneapi::dpl::execution::device_policy<PolicyParams...>> : ::std::true_type {
};
template <typename _T> struct is_fpga_execution_policy : ::std::false_type {};
#if _ONEDPL_FPGA_DEVICE
template <unsigned int unroll_factor, typename... PolicyParams>
struct is_hetero_execution_policy<
execution::fpga_policy<unroll_factor, PolicyParams...>> : ::std::true_type {
};
#endif
template <class _ExecPolicy, class _T>
using enable_if_hetero_execution_policy = typename std::enable_if<
is_hetero_execution_policy<typename std::decay<_ExecPolicy>::type>::value,
_T>::type;
#if _ONEDPL_CPP14_INTEGER_SEQUENCE_PRESENT
template <std::size_t... _Sp>
using index_sequence = std::index_sequence<_Sp...>;
template <std::size_t _Np>
using make_index_sequence = std::make_index_sequence<_Np>;
#else
template <std::size_t... _Sp> class index_sequence {};
template <std::size_t _Np, std::size_t... _Sp>
struct make_index_sequence_impl
: make_index_sequence_impl<_Np - 1, _Np - 1, _Sp...> {};
template <std::size_t... _Sp> struct make_index_sequence_impl<0, _Sp...> {
using type = index_sequence<_Sp...>;
};
template <std::size_t _Np>
using make_index_sequence = typename make_index_sequence_impl<_Np>::type;
#endif
// Minimal buffer implementations for temporary storage in mapping rules
// Some of our algorithms need to start with raw memory buffer,
// not an initialized array, because initialization/destruction
// would make the span be at least O(N).
#if ONEDPL_USE_DPCPP_BACKEND
template <typename _Tp> class __buffer {
sycl::buffer<_Tp, 1> __buf;
__buffer(const __buffer &) = delete;
void operator=(const __buffer &) = delete;
public:
// Try to obtain buffer of given size to store objects of _Tp type
__buffer(std::size_t __n) : __buf(sycl::range<1>(__n)) {}
// Return pointer to buffer, or NULL if buffer could not be obtained.
auto get() -> decltype(oneapi::dpl::begin(__buf)) const {
return oneapi::dpl::begin(__buf);
}
};
#else
template <typename _Tp> class __buffer {
std::unique_ptr<_Tp> _M_ptr;
__buffer(const __buffer &) = delete;
void operator=(const __buffer &) = delete;
public:
// Try to obtain buffer of given size to store objects of _Tp type
__buffer(const std::size_t __n) : _M_ptr(new _Tp[__n]) {}
// Return pointer to buffer, or NULL if buffer could not be obtained.
_Tp *get() const { return _M_ptr.get(); }
};
#endif
// Implements C++14 std::less<void> specialization to allow parameter type
// deduction.
class __less {
public:
template <typename _Xp, typename _Yp>
bool operator()(_Xp &&__x, _Yp &&__y) const {
return std::forward<_Xp>(__x) < std::forward<_Yp>(__y);
}
};
template <typename Policy, typename NewName> struct rebind_policy {
using type = Policy;
};
template <typename KernelName, typename NewName>
struct rebind_policy<oneapi::dpl::execution::device_policy<KernelName>,
NewName> {
using type = oneapi::dpl::execution::device_policy<NewName>;
};
#if _ONEDPL_FPGA_DEVICE
template <unsigned int factor, typename KernelName, typename NewName>
struct rebind_policy<oneapi::dpl::execution::fpga_policy<factor, KernelName>,
NewName> {
using type = oneapi::dpl::execution::fpga_policy<factor, NewName>;
};
#endif
template <typename T1, typename T2,
typename R1 = typename std::iterator_traits<T1>::reference,
typename R2 = typename std::iterator_traits<T2>::reference>
struct perm_fun {
typedef R2 result_of;
perm_fun(T1 input) : source(input) {}
R2 operator()(R1 x) const { return *(source + x); }
private:
T1 source;
};
// Functor compares first element (key) from tied sequence.
template <typename Compare = class internal::__less> struct compare_key_fun {
typedef bool result_of;
compare_key_fun(Compare _comp = internal::__less()) : comp(_comp) {}
template <typename _T1, typename _T2>
result_of operator()(_T1 &&a, _T2 &&b) const {
using std::get;
return comp(get<0>(a), get<0>(b));
}
private:
mutable Compare comp;
};
// Functor evaluates second element of tied sequence with predicate.
// Used by: copy_if, remove_copy_if, stable_partition_copy
// Lambda:
template <typename Predicate> struct predicate_key_fun {
typedef bool result_of;
predicate_key_fun(Predicate _pred) : pred(_pred) {}
template <typename _T1> result_of operator()(_T1 &&a) const {
using std::get;
return pred(get<1>(a));
}
private:
mutable Predicate pred;
};
// Used by: remove_if
template <typename Predicate> struct negate_predicate_key_fun {
typedef bool result_of;
negate_predicate_key_fun(Predicate _pred) : pred(_pred) {}
template <typename _T1> result_of operator()(_T1 &&a) const {
using std::get;
return !pred(get<1>(a));
}
private:
mutable Predicate pred;
};
template <typename T> struct sequence_fun {
using result_type = T;
sequence_fun(T _init, T _step) : init(_init), step(_step) {}
template <typename _T> result_type operator()(_T &&i) const {
return static_cast<T>(init + step * i);
}
private:
const T init;
const T step;
};
//[binary_pred](Ref a, Ref b){ return(binary_pred(get<0>(a),get<0>(b)));
template <typename Predicate> struct unique_fun {
typedef bool result_of;
unique_fun(Predicate _pred) : pred(_pred) {}
template <typename _T> result_of operator()(_T &&a, _T &&b) const {
using std::get;
return pred(get<0>(a), get<0>(b));
}
private:
mutable Predicate pred;
};
// Lambda: [pred, &new_value](Ref1 a, Ref2 s) {return pred(s) ? new_value : a;
// });
template <typename T, typename Predicate> struct replace_if_fun {
public:
typedef T result_of;
replace_if_fun(Predicate _pred, T _new_value)
: pred(_pred), new_value(_new_value) {}
template <typename _T1, typename _T2> T operator()(_T1 &&a, _T2 &&s) const {
return pred(s) ? new_value : a;
}
private:
mutable Predicate pred;
const T new_value;
};
//[pred,op](Ref a){return pred(a) ? op(a) : a; }
template <typename T, typename Predicate, typename Operator>
struct transform_if_fun {
transform_if_fun(Predicate _pred, Operator _op) : pred(_pred), op(_op) {}
template <typename _T>
void operator()(_T&& t) const {
using std::get;
if (pred(get<0>(t)))
get<1>(t) = op(get<0>(t));
}
private:
mutable Predicate pred;
mutable Operator op;
};
//[pred, op](Ref1 a, Ref2 s) { return pred(s) ? op(a) : a; });
template <typename T, typename Predicate, typename Operator>
struct transform_if_unary_zip_mask_fun {
transform_if_unary_zip_mask_fun(Predicate _pred, Operator _op) : pred(_pred), op(_op) {}
template <typename _T>
void operator()(_T&& t) const {
using std::get;
if (pred(get<1>(t)))
get<2>(t) = op(get<0>(t));
}
private:
mutable Predicate pred;
mutable Operator op;
};
template <typename T, typename Predicate, typename BinaryOperation>
class transform_if_zip_mask_fun {
public:
transform_if_zip_mask_fun(Predicate _pred = oneapi::dpl::identity(),
BinaryOperation _op = oneapi::dpl::identity())
: pred(_pred), op(_op) {}
template <typename _T> void operator()(_T &&t) const {
using std::get;
if (pred(get<2>(t)))
get<3>(t) = op(get<0>(t), get<1>(t));
}
private:
mutable Predicate pred;
mutable BinaryOperation op;
};
// This following code is similar to a section of code in
// oneDPL/include/oneapi/dpl/pstl/hetero/dpcpp/parallel_backend_sycl_radix_sort.h
// It has a similar approach, and could be consolidated.
// Outside of some differences in approach, there are two significant
// differences in function.
//
// 1) This code allows the output type of the bit range translation to be fit
// into to the minimal type required to provide that many bits. The code in
// oneDPL to calculate the bucket for the radix is similar but its output is
// always std::uint32_t. The assumption that the bit range desired will fit in
// 32 bits is not true for this code.
//
// 2) This code ensures that for floating point type, -0.0f and 0.0f map to the
// same value. This allows the output of this translation to be used to provide
// a sort which ensures the stability of these values for floating point types.
template <int N> struct uint_byte_map {};
template <> struct uint_byte_map<1> { using type = uint8_t; };
template <> struct uint_byte_map<2> { using type = uint16_t; };
template <> struct uint_byte_map<4> { using type = uint32_t; };
template <> struct uint_byte_map<8> { using type = uint64_t; };
template <typename T> struct uint_map {
using type = typename uint_byte_map<sizeof(T)>::type;
};
template <typename T, typename OutKeyT> class translate_key {
using uint_type_t = typename uint_map<T>::type;
public:
translate_key(int begin_bit, int end_bit) {
shift = begin_bit;
mask = ~OutKeyT(0); // all ones
mask = mask >> (sizeof(OutKeyT) * 8 -
(end_bit - begin_bit)); // setup appropriate mask
flip_sign = uint_type_t(1) << (sizeof(uint_type_t) * 8 - 1); // sign bit
flip_key = ~uint_type_t(0); // 0xF...F
}
inline OutKeyT operator()(const T &key) const {
uint_type_t intermediate;
if constexpr (std::is_floating_point<T>::value) {
// normal case (both -0.0f and 0.0f equal -0.0f)
if (key != T(-0.0f)) {
uint_type_t is_negative = reinterpret_cast<const uint_type_t &>(key) >>
(sizeof(uint_type_t) * 8 - 1);
intermediate = reinterpret_cast<const uint_type_t &>(key) ^
((is_negative * flip_key) | flip_sign);
} else // special case for -0.0f to keep stability with 0.0f
{
T negzero = T(-0.0f);
intermediate = reinterpret_cast<const uint_type_t &>(negzero);
}
} else if constexpr (std::is_signed<T>::value) {
intermediate = reinterpret_cast<const uint_type_t &>(key) ^ flip_sign;
} else {
intermediate = key;
}
return static_cast<OutKeyT>(intermediate >> shift) &
mask; // shift, cast, and mask
}
private:
uint8_t shift;
OutKeyT mask;
uint_type_t flip_sign;
uint_type_t flip_key;
};
} // end namespace internal
} // end namespace dpct
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/guided_concurrentKernels_SYCLMigration/02_sycl_migrated/Common/helper_timer.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Helper Timing Functions
#ifndef COMMON_HELPER_TIMER_H_
#define COMMON_HELPER_TIMER_H_
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
// includes, system
#include <vector>
// includes, project
#include <exception.h>
// Definition of the StopWatch Interface, this is used if we don't want to use
// the CUT functions But rather in a self contained class interface
class StopWatchInterface {
public:
StopWatchInterface() {}
virtual ~StopWatchInterface() {}
public:
//! Start time measurement
virtual void start() = 0;
//! Stop time measurement
virtual void stop() = 0;
//! Reset time counters to zero
virtual void reset() = 0;
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned, otherwise the
//! time between the last start() and stop call is returned
virtual float getTime() = 0;
//! Mean time to date based on the number of times the stopwatch has been
//! _stopped_ (ie finished sessions) and the current total time
virtual float getAverageTime() = 0;
};
//////////////////////////////////////////////////////////////////
// Begin Stopwatch timer class definitions for all OS platforms //
//////////////////////////////////////////////////////////////////
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
// includes, system
#define WINDOWS_LEAN_AND_MEAN
#include <windows.h>
#undef min
#undef max
//! Windows specific implementation of StopWatch
class StopWatchWin : public StopWatchInterface {
public:
//! Constructor, default
StopWatchWin()
: start_time(),
end_time(),
diff_time(0.0f),
total_time(0.0f),
running(false),
clock_sessions(0),
freq(0),
freq_set(false) {
if (!freq_set) {
// helper variable
LARGE_INTEGER temp;
// get the tick frequency from the OS
QueryPerformanceFrequency(reinterpret_cast<LARGE_INTEGER *>(&temp));
// convert to type in which it is needed
freq = (static_cast<double>(temp.QuadPart)) / 1000.0;
// rememeber query
freq_set = true;
}
}
// Destructor
~StopWatchWin() {}
public:
//! Start time measurement
inline void start();
//! Stop time measurement
inline void stop();
//! Reset time counters to zero
inline void reset();
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned, otherwise the
//! time between the last start() and stop call is returned
inline float getTime();
//! Mean time to date based on the number of times the stopwatch has been
//! _stopped_ (ie finished sessions) and the current total time
inline float getAverageTime();
private:
// member variables
//! Start of measurement
LARGE_INTEGER start_time;
//! End of measurement
LARGE_INTEGER end_time;
//! Time difference between the last start and stop
float diff_time;
//! TOTAL time difference between starts and stops
float total_time;
//! flag if the stop watch is running
bool running;
//! Number of times clock has been started
//! and stopped to allow averaging
int clock_sessions;
//! tick frequency
double freq;
//! flag if the frequency has been set
bool freq_set;
};
// functions, inlined
////////////////////////////////////////////////////////////////////////////////
//! Start time measurement
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchWin::start() {
QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&start_time));
running = true;
}
////////////////////////////////////////////////////////////////////////////////
//! Stop time measurement and increment add to the current diff_time summation
//! variable. Also increment the number of times this clock has been run.
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchWin::stop() {
QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&end_time));
diff_time = static_cast<float>(((static_cast<double>(end_time.QuadPart) -
static_cast<double>(start_time.QuadPart)) /
freq));
total_time += diff_time;
clock_sessions++;
running = false;
}
////////////////////////////////////////////////////////////////////////////////
//! Reset the timer to 0. Does not change the timer running state but does
//! recapture this point in time as the current start time if it is running.
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchWin::reset() {
diff_time = 0;
total_time = 0;
clock_sessions = 0;
if (running) {
QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&start_time));
}
}
////////////////////////////////////////////////////////////////////////////////
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned added to the
//! current diff_time sum, otherwise the current summed time difference alone
//! is returned.
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchWin::getTime() {
// Return the TOTAL time to date
float retval = total_time;
if (running) {
LARGE_INTEGER temp;
QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&temp));
retval += static_cast<float>(((static_cast<double>(temp.QuadPart) -
static_cast<double>(start_time.QuadPart)) /
freq));
}
return retval;
}
////////////////////////////////////////////////////////////////////////////////
//! Time in msec. for a single run based on the total number of COMPLETED runs
//! and the total time.
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchWin::getAverageTime() {
return (clock_sessions > 0) ? (total_time / clock_sessions) : 0.0f;
}
#else
// Declarations for Stopwatch on Linux and Mac OSX
// includes, system
#include <sys/time.h>
#include <ctime>
//! Windows specific implementation of StopWatch
class StopWatchLinux : public StopWatchInterface {
public:
//! Constructor, default
StopWatchLinux()
: start_time(),
diff_time(0.0),
total_time(0.0),
running(false),
clock_sessions(0) {}
// Destructor
virtual ~StopWatchLinux() {}
public:
//! Start time measurement
inline void start();
//! Stop time measurement
inline void stop();
//! Reset time counters to zero
inline void reset();
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned, otherwise the
//! time between the last start() and stop call is returned
inline float getTime();
//! Mean time to date based on the number of times the stopwatch has been
//! _stopped_ (ie finished sessions) and the current total time
inline float getAverageTime();
private:
// helper functions
//! Get difference between start time and current time
inline float getDiffTime();
private:
// member variables
//! Start of measurement
struct timeval start_time;
//! Time difference between the last start and stop
float diff_time;
//! TOTAL time difference between starts and stops
float total_time;
//! flag if the stop watch is running
bool running;
//! Number of times clock has been started
//! and stopped to allow averaging
int clock_sessions;
};
// functions, inlined
////////////////////////////////////////////////////////////////////////////////
//! Start time measurement
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchLinux::start() {
gettimeofday(&start_time, 0);
running = true;
}
////////////////////////////////////////////////////////////////////////////////
//! Stop time measurement and increment add to the current diff_time summation
//! variable. Also increment the number of times this clock has been run.
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchLinux::stop() {
diff_time = getDiffTime();
total_time += diff_time;
running = false;
clock_sessions++;
}
////////////////////////////////////////////////////////////////////////////////
//! Reset the timer to 0. Does not change the timer running state but does
//! recapture this point in time as the current start time if it is running.
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchLinux::reset() {
diff_time = 0;
total_time = 0;
clock_sessions = 0;
if (running) {
gettimeofday(&start_time, 0);
}
}
////////////////////////////////////////////////////////////////////////////////
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned added to the
//! current diff_time sum, otherwise the current summed time difference alone
//! is returned.
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchLinux::getTime() {
// Return the TOTAL time to date
float retval = total_time;
if (running) {
retval += getDiffTime();
}
return retval;
}
////////////////////////////////////////////////////////////////////////////////
//! Time in msec. for a single run based on the total number of COMPLETED runs
//! and the total time.
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchLinux::getAverageTime() {
return (clock_sessions > 0) ? (total_time / clock_sessions) : 0.0f;
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchLinux::getDiffTime() {
struct timeval t_time;
gettimeofday(&t_time, 0);
// time difference in milli-seconds
return static_cast<float>(1000.0 * (t_time.tv_sec - start_time.tv_sec) +
(0.001 * (t_time.tv_usec - start_time.tv_usec)));
}
#endif // WIN32
////////////////////////////////////////////////////////////////////////////////
//! Timer functionality exported
////////////////////////////////////////////////////////////////////////////////
//! Create a new timer
//! @return true if a time has been created, otherwise false
//! @param name of the new timer, 0 if the creation failed
////////////////////////////////////////////////////////////////////////////////
inline bool sdkCreateTimer(StopWatchInterface **timer_interface) {
// printf("sdkCreateTimer called object %08x\n", (void *)*timer_interface);
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
*timer_interface = reinterpret_cast<StopWatchInterface *>(new StopWatchWin());
#else
*timer_interface =
reinterpret_cast<StopWatchInterface *>(new StopWatchLinux());
#endif
return (*timer_interface != NULL) ? true : false;
}
////////////////////////////////////////////////////////////////////////////////
//! Delete a timer
//! @return true if a time has been deleted, otherwise false
//! @param name of the timer to delete
////////////////////////////////////////////////////////////////////////////////
inline bool sdkDeleteTimer(StopWatchInterface **timer_interface) {
// printf("sdkDeleteTimer called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
delete *timer_interface;
*timer_interface = NULL;
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Start the time with name \a name
//! @param name name of the timer to start
////////////////////////////////////////////////////////////////////////////////
inline bool sdkStartTimer(StopWatchInterface **timer_interface) {
// printf("sdkStartTimer called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
(*timer_interface)->start();
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Stop the time with name \a name. Does not reset.
//! @param name name of the timer to stop
////////////////////////////////////////////////////////////////////////////////
inline bool sdkStopTimer(StopWatchInterface **timer_interface) {
// printf("sdkStopTimer called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
(*timer_interface)->stop();
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Resets the timer's counter.
//! @param name name of the timer to reset.
////////////////////////////////////////////////////////////////////////////////
inline bool sdkResetTimer(StopWatchInterface **timer_interface) {
// printf("sdkResetTimer called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
(*timer_interface)->reset();
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Return the average time for timer execution as the total time
//! for the timer dividied by the number of completed (stopped) runs the timer
//! has made.
//! Excludes the current running time if the timer is currently running.
//! @param name name of the timer to return the time of
////////////////////////////////////////////////////////////////////////////////
inline float sdkGetAverageTimerValue(StopWatchInterface **timer_interface) {
// printf("sdkGetAverageTimerValue called object %08x\n", (void
// *)*timer_interface);
if (*timer_interface) {
return (*timer_interface)->getAverageTime();
} else {
return 0.0f;
}
}
////////////////////////////////////////////////////////////////////////////////
//! Total execution time for the timer over all runs since the last reset
//! or timer creation.
//! @param name name of the timer to obtain the value of.
////////////////////////////////////////////////////////////////////////////////
inline float sdkGetTimerValue(StopWatchInterface **timer_interface) {
// printf("sdkGetTimerValue called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
return (*timer_interface)->getTime();
} else {
return 0.0f;
}
}
#endif // COMMON_HELPER_TIMER_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/guided_concurrentKernels_SYCLMigration/02_sycl_migrated/Common/helper_string.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// These are helper functions for the SDK samples (string parsing, timers, etc)
#ifndef COMMON_HELPER_STRING_H_
#define COMMON_HELPER_STRING_H_
#include <stdio.h>
#include <stdlib.h>
#include <fstream>
#include <string>
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
#ifndef _CRT_SECURE_NO_DEPRECATE
#define _CRT_SECURE_NO_DEPRECATE
#endif
#ifndef STRCASECMP
#define STRCASECMP _stricmp
#endif
#ifndef STRNCASECMP
#define STRNCASECMP _strnicmp
#endif
#ifndef STRCPY
#define STRCPY(sFilePath, nLength, sPath) strcpy_s(sFilePath, nLength, sPath)
#endif
#ifndef FOPEN
#define FOPEN(fHandle, filename, mode) fopen_s(&fHandle, filename, mode)
#endif
#ifndef FOPEN_FAIL
#define FOPEN_FAIL(result) (result != 0)
#endif
#ifndef SSCANF
#define SSCANF sscanf_s
#endif
#ifndef SPRINTF
#define SPRINTF sprintf_s
#endif
#else // Linux Includes
#include <string.h>
#include <strings.h>
#ifndef STRCASECMP
#define STRCASECMP strcasecmp
#endif
#ifndef STRNCASECMP
#define STRNCASECMP strncasecmp
#endif
#ifndef STRCPY
#define STRCPY(sFilePath, nLength, sPath) strcpy(sFilePath, sPath)
#endif
#ifndef FOPEN
#define FOPEN(fHandle, filename, mode) (fHandle = fopen(filename, mode))
#endif
#ifndef FOPEN_FAIL
#define FOPEN_FAIL(result) (result == NULL)
#endif
#ifndef SSCANF
#define SSCANF sscanf
#endif
#ifndef SPRINTF
#define SPRINTF sprintf
#endif
#endif
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
// CUDA Utility Helper Functions
inline int stringRemoveDelimiter(char delimiter, const char *string) {
int string_start = 0;
while (string[string_start] == delimiter) {
string_start++;
}
if (string_start >= static_cast<int>(strlen(string) - 1)) {
return 0;
}
return string_start;
}
inline int getFileExtension(char *filename, char **extension) {
int string_length = static_cast<int>(strlen(filename));
while (filename[string_length--] != '.') {
if (string_length == 0) break;
}
if (string_length > 0) string_length += 2;
if (string_length == 0)
*extension = NULL;
else
*extension = &filename[string_length];
return string_length;
}
inline bool checkCmdLineFlag(const int argc, const char **argv,
const char *string_ref) {
bool bFound = false;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
const char *equal_pos = strchr(string_argv, '=');
int argv_length = static_cast<int>(
equal_pos == 0 ? strlen(string_argv) : equal_pos - string_argv);
int length = static_cast<int>(strlen(string_ref));
if (length == argv_length &&
!STRNCASECMP(string_argv, string_ref, length)) {
bFound = true;
continue;
}
}
}
return bFound;
}
// This function wraps the CUDA Driver API into a template function
template <class T>
inline bool getCmdLineArgumentValue(const int argc, const char **argv,
const char *string_ref, T *value) {
bool bFound = false;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
int length = static_cast<int>(strlen(string_ref));
if (!STRNCASECMP(string_argv, string_ref, length)) {
if (length + 1 <= static_cast<int>(strlen(string_argv))) {
int auto_inc = (string_argv[length] == '=') ? 1 : 0;
*value = (T)atoi(&string_argv[length + auto_inc]);
}
bFound = true;
i = argc;
}
}
}
return bFound;
}
inline int getCmdLineArgumentInt(const int argc, const char **argv,
const char *string_ref) {
bool bFound = false;
int value = -1;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
int length = static_cast<int>(strlen(string_ref));
if (!STRNCASECMP(string_argv, string_ref, length)) {
if (length + 1 <= static_cast<int>(strlen(string_argv))) {
int auto_inc = (string_argv[length] == '=') ? 1 : 0;
value = atoi(&string_argv[length + auto_inc]);
} else {
value = 0;
}
bFound = true;
continue;
}
}
}
if (bFound) {
return value;
} else {
return 0;
}
}
inline float getCmdLineArgumentFloat(const int argc, const char **argv,
const char *string_ref) {
bool bFound = false;
float value = -1;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
int length = static_cast<int>(strlen(string_ref));
if (!STRNCASECMP(string_argv, string_ref, length)) {
if (length + 1 <= static_cast<int>(strlen(string_argv))) {
int auto_inc = (string_argv[length] == '=') ? 1 : 0;
value = static_cast<float>(atof(&string_argv[length + auto_inc]));
} else {
value = 0.f;
}
bFound = true;
continue;
}
}
}
if (bFound) {
return value;
} else {
return 0;
}
}
inline bool getCmdLineArgumentString(const int argc, const char **argv,
const char *string_ref,
char **string_retval) {
bool bFound = false;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
char *string_argv = const_cast<char *>(&argv[i][string_start]);
int length = static_cast<int>(strlen(string_ref));
if (!STRNCASECMP(string_argv, string_ref, length)) {
*string_retval = &string_argv[length + 1];
bFound = true;
continue;
}
}
}
if (!bFound) {
*string_retval = NULL;
}
return bFound;
}
//////////////////////////////////////////////////////////////////////////////
//! Find the path for a file assuming that
//! files are found in the searchPath.
//!
//! @return the path if succeeded, otherwise 0
//! @param filename name of the file
//! @param executable_path optional absolute path of the executable
//////////////////////////////////////////////////////////////////////////////
inline char *sdkFindFilePath(const char *filename,
const char *executable_path) {
// <executable_name> defines a variable that is replaced with the name of the
// executable
// Typical relative search paths to locate needed companion files (e.g. sample
// input data, or JIT source files) The origin for the relative search may be
// the .exe file, a .bat file launching an .exe, a browser .exe launching the
// .exe or .bat, etc
const char *searchPath[] = {
"./", // same dir
"./data/", // same dir
"../../../../Samples/<executable_name>/", // up 4 in tree
"../../../Samples/<executable_name>/", // up 3 in tree
"../../Samples/<executable_name>/", // up 2 in tree
"../../../../Samples/<executable_name>/data/", // up 4 in tree
"../../../Samples/<executable_name>/data/", // up 3 in tree
"../../Samples/<executable_name>/data/", // up 2 in tree
"../../../../Samples/0_Introduction/<executable_name>/", // up 4 in tree
"../../../Samples/0_Introduction/<executable_name>/", // up 3 in tree
"../../Samples/0_Introduction/<executable_name>/", // up 2 in tree
"../../../../Samples/1_Utilities/<executable_name>/", // up 4 in tree
"../../../Samples/1_Utilities/<executable_name>/", // up 3 in tree
"../../Samples/1_Utilities/<executable_name>/", // up 2 in tree
"../../../../Samples/2_Concepts_and_Techniques/<executable_name>/", // up 4 in tree
"../../../Samples/2_Concepts_and_Techniques/<executable_name>/", // up 3 in tree
"../../Samples/2_Concepts_and_Techniques/<executable_name>/", // up 2 in tree
"../../../../Samples/3_CUDA_Features/<executable_name>/", // up 4 in tree
"../../../Samples/3_CUDA_Features/<executable_name>/", // up 3 in tree
"../../Samples/3_CUDA_Features/<executable_name>/", // up 2 in tree
"../../../../Samples/4_CUDA_Libraries/<executable_name>/", // up 4 in tree
"../../../Samples/4_CUDA_Libraries/<executable_name>/", // up 3 in tree
"../../Samples/4_CUDA_Libraries/<executable_name>/", // up 2 in tree
"../../../../Samples/5_Domain_Specific/<executable_name>/", // up 4 in tree
"../../../Samples/5_Domain_Specific/<executable_name>/", // up 3 in tree
"../../Samples/5_Domain_Specific/<executable_name>/", // up 2 in tree
"../../../../Samples/6_Performance/<executable_name>/", // up 4 in tree
"../../../Samples/6_Performance/<executable_name>/", // up 3 in tree
"../../Samples/6_Performance/<executable_name>/", // up 2 in tree
"../../../../Samples/0_Introduction/<executable_name>/data/", // up 4 in tree
"../../../Samples/0_Introduction/<executable_name>/data/", // up 3 in tree
"../../Samples/0_Introduction/<executable_name>/data/", // up 2 in tree
"../../../../Samples/1_Utilities/<executable_name>/data/", // up 4 in tree
"../../../Samples/1_Utilities/<executable_name>/data/", // up 3 in tree
"../../Samples/1_Utilities/<executable_name>/data/", // up 2 in tree
"../../../../Samples/2_Concepts_and_Techniques/<executable_name>/data/", // up 4 in tree
"../../../Samples/2_Concepts_and_Techniques/<executable_name>/data/", // up 3 in tree
"../../Samples/2_Concepts_and_Techniques/<executable_name>/data/", // up 2 in tree
"../../../../Samples/3_CUDA_Features/<executable_name>/data/", // up 4 in tree
"../../../Samples/3_CUDA_Features/<executable_name>/data/", // up 3 in tree
"../../Samples/3_CUDA_Features/<executable_name>/data/", // up 2 in tree
"../../../../Samples/4_CUDA_Libraries/<executable_name>/data/", // up 4 in tree
"../../../Samples/4_CUDA_Libraries/<executable_name>/data/", // up 3 in tree
"../../Samples/4_CUDA_Libraries/<executable_name>/data/", // up 2 in tree
"../../../../Samples/5_Domain_Specific/<executable_name>/data/", // up 4 in tree
"../../../Samples/5_Domain_Specific/<executable_name>/data/", // up 3 in tree
"../../Samples/5_Domain_Specific/<executable_name>/data/", // up 2 in tree
"../../../../Samples/6_Performance/<executable_name>/data/", // up 4 in tree
"../../../Samples/6_Performance/<executable_name>/data/", // up 3 in tree
"../../Samples/6_Performance/<executable_name>/data/", // up 2 in tree
"../../../../Common/data/", // up 4 in tree
"../../../Common/data/", // up 3 in tree
"../../Common/data/" // up 2 in tree
};
// Extract the executable name
std::string executable_name;
if (executable_path != 0) {
executable_name = std::string(executable_path);
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
// Windows path delimiter
size_t delimiter_pos = executable_name.find_last_of('\\');
executable_name.erase(0, delimiter_pos + 1);
if (executable_name.rfind(".exe") != std::string::npos) {
// we strip .exe, only if the .exe is found
executable_name.resize(executable_name.size() - 4);
}
#else
// Linux & OSX path delimiter
size_t delimiter_pos = executable_name.find_last_of('/');
executable_name.erase(0, delimiter_pos + 1);
#endif
}
// Loop over all search paths and return the first hit
for (unsigned int i = 0; i < sizeof(searchPath) / sizeof(char *); ++i) {
std::string path(searchPath[i]);
size_t executable_name_pos = path.find("<executable_name>");
// If there is executable_name variable in the searchPath
// replace it with the value
if (executable_name_pos != std::string::npos) {
if (executable_path != 0) {
path.replace(executable_name_pos, strlen("<executable_name>"),
executable_name);
} else {
// Skip this path entry if no executable argument is given
continue;
}
}
#ifdef _DEBUG
printf("sdkFindFilePath <%s> in %s\n", filename, path.c_str());
#endif
// Test if the file exists
path.append(filename);
FILE *fp;
FOPEN(fp, path.c_str(), "rb");
if (fp != NULL) {
fclose(fp);
// File found
// returning an allocated array here for backwards compatibility reasons
char *file_path = reinterpret_cast<char *>(malloc(path.length() + 1));
STRCPY(file_path, path.length() + 1, path.c_str());
return file_path;
}
if (fp) {
fclose(fp);
}
}
// File not found
printf("\nerror: sdkFindFilePath: file <%s> not found!\n", filename);
return 0;
}
#endif // COMMON_HELPER_STRING_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/guided_concurrentKernels_SYCLMigration/02_sycl_migrated/Common/helper_image.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// These are helper functions for the SDK samples (image,bitmap)
#ifndef COMMON_HELPER_IMAGE_H_
#define COMMON_HELPER_IMAGE_H_
#include <assert.h>
#include <exception.h>
#include <math.h>
#include <stdint.h>
#include <algorithm>
#include <fstream>
#include <iostream>
#include <string>
#include <vector>
#ifndef MIN
#define MIN(a, b) ((a < b) ? a : b)
#endif
#ifndef MAX
#define MAX(a, b) ((a > b) ? a : b)
#endif
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
#include <helper_string.h>
// namespace unnamed (internal)
namespace helper_image_internal {
//! size of PGM file header
const unsigned int PGMHeaderSize = 0x40;
// types
//! Data converter from unsigned char / unsigned byte to type T
template <class T>
struct ConverterFromUByte;
//! Data converter from unsigned char / unsigned byte
template <>
struct ConverterFromUByte<unsigned char> {
//! Conversion operator
//! @return converted value
//! @param val value to convert
float operator()(const unsigned char &val) {
return static_cast<unsigned char>(val);
}
};
//! Data converter from unsigned char / unsigned byte to float
template <>
struct ConverterFromUByte<float> {
//! Conversion operator
//! @return converted value
//! @param val value to convert
float operator()(const unsigned char &val) {
return static_cast<float>(val) / 255.0f;
}
};
//! Data converter from unsigned char / unsigned byte to type T
template <class T>
struct ConverterToUByte;
//! Data converter from unsigned char / unsigned byte to unsigned int
template <>
struct ConverterToUByte<unsigned char> {
//! Conversion operator (essentially a passthru
//! @return converted value
//! @param val value to convert
unsigned char operator()(const unsigned char &val) { return val; }
};
//! Data converter from unsigned char / unsigned byte to unsigned int
template <>
struct ConverterToUByte<float> {
//! Conversion operator
//! @return converted value
//! @param val value to convert
unsigned char operator()(const float &val) {
return static_cast<unsigned char>(val * 255.0f);
}
};
} // namespace helper_image_internal
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
#ifndef FOPEN
#define FOPEN(fHandle, filename, mode) fopen_s(&fHandle, filename, mode)
#endif
#ifndef FOPEN_FAIL
#define FOPEN_FAIL(result) (result != 0)
#endif
#ifndef SSCANF
#define SSCANF sscanf_s
#endif
#else
#ifndef FOPEN
#define FOPEN(fHandle, filename, mode) (fHandle = fopen(filename, mode))
#endif
#ifndef FOPEN_FAIL
#define FOPEN_FAIL(result) (result == NULL)
#endif
#ifndef SSCANF
#define SSCANF sscanf
#endif
#endif
inline bool __loadPPM(const char *file, unsigned char **data, unsigned int *w,
unsigned int *h, unsigned int *channels) {
FILE *fp = NULL;
if (FOPEN_FAIL(FOPEN(fp, file, "rb"))) {
std::cerr << "__LoadPPM() : Failed to open file: " << file << std::endl;
return false;
}
// check header
char header[helper_image_internal::PGMHeaderSize];
if (fgets(header, helper_image_internal::PGMHeaderSize, fp) == NULL) {
std::cerr << "__LoadPPM() : reading PGM header returned NULL" << std::endl;
return false;
}
if (strncmp(header, "P5", 2) == 0) {
*channels = 1;
} else if (strncmp(header, "P6", 2) == 0) {
*channels = 3;
} else {
std::cerr << "__LoadPPM() : File is not a PPM or PGM image" << std::endl;
*channels = 0;
return false;
}
// parse header, read maxval, width and height
unsigned int width = 0;
unsigned int height = 0;
unsigned int maxval = 0;
unsigned int i = 0;
while (i < 3) {
if (fgets(header, helper_image_internal::PGMHeaderSize, fp) == NULL) {
std::cerr << "__LoadPPM() : reading PGM header returned NULL"
<< std::endl;
return false;
}
if (header[0] == '#') {
continue;
}
if (i == 0) {
i += SSCANF(header, "%u %u %u", &width, &height, &maxval);
} else if (i == 1) {
i += SSCANF(header, "%u %u", &height, &maxval);
} else if (i == 2) {
i += SSCANF(header, "%u", &maxval);
}
}
// check if given handle for the data is initialized
if (NULL != *data) {
if (*w != width || *h != height) {
std::cerr << "__LoadPPM() : Invalid image dimensions." << std::endl;
}
} else {
*data = (unsigned char *)malloc(sizeof(unsigned char) * width * height *
*channels);
*w = width;
*h = height;
}
// read and close file
if (fread(*data, sizeof(unsigned char), width * height * *channels, fp) ==
0) {
std::cerr << "__LoadPPM() read data returned error." << std::endl;
}
fclose(fp);
return true;
}
template <class T>
inline bool sdkLoadPGM(const char *file, T **data, unsigned int *w,
unsigned int *h) {
unsigned char *idata = NULL;
unsigned int channels;
if (true != __loadPPM(file, &idata, w, h, &channels)) {
return false;
}
unsigned int size = *w * *h * channels;
// initialize mem if necessary
// the correct size is checked / set in loadPGMc()
if (NULL == *data) {
*data = reinterpret_cast<T *>(malloc(sizeof(T) * size));
}
// copy and cast data
std::transform(idata, idata + size, *data,
helper_image_internal::ConverterFromUByte<T>());
free(idata);
return true;
}
template <class T>
inline bool sdkLoadPPM4(const char *file, T **data, unsigned int *w,
unsigned int *h) {
unsigned char *idata = 0;
unsigned int channels;
if (__loadPPM(file, &idata, w, h, &channels)) {
// pad 4th component
int size = *w * *h;
// keep the original pointer
unsigned char *idata_orig = idata;
*data = reinterpret_cast<T *>(malloc(sizeof(T) * size * 4));
unsigned char *ptr = *data;
for (int i = 0; i < size; i++) {
*ptr++ = *idata++;
*ptr++ = *idata++;
*ptr++ = *idata++;
*ptr++ = 0;
}
free(idata_orig);
return true;
} else {
free(idata);
return false;
}
}
inline bool __savePPM(const char *file, unsigned char *data, unsigned int w,
unsigned int h, unsigned int channels) {
assert(NULL != data);
assert(w > 0);
assert(h > 0);
std::fstream fh(file, std::fstream::out | std::fstream::binary);
if (fh.bad()) {
std::cerr << "__savePPM() : Opening file failed." << std::endl;
return false;
}
if (channels == 1) {
fh << "P5\n";
} else if (channels == 3) {
fh << "P6\n";
} else {
std::cerr << "__savePPM() : Invalid number of channels." << std::endl;
return false;
}
fh << w << "\n" << h << "\n" << 0xff << std::endl;
for (unsigned int i = 0; (i < (w * h * channels)) && fh.good(); ++i) {
fh << data[i];
}
fh.flush();
if (fh.bad()) {
std::cerr << "__savePPM() : Writing data failed." << std::endl;
return false;
}
fh.close();
return true;
}
template <class T>
inline bool sdkSavePGM(const char *file, T *data, unsigned int w,
unsigned int h) {
unsigned int size = w * h;
unsigned char *idata = (unsigned char *)malloc(sizeof(unsigned char) * size);
std::transform(data, data + size, idata,
helper_image_internal::ConverterToUByte<T>());
// write file
bool result = __savePPM(file, idata, w, h, 1);
// cleanup
free(idata);
return result;
}
inline bool sdkSavePPM4ub(const char *file, unsigned char *data, unsigned int w,
unsigned int h) {
// strip 4th component
int size = w * h;
unsigned char *ndata =
(unsigned char *)malloc(sizeof(unsigned char) * size * 3);
unsigned char *ptr = ndata;
for (int i = 0; i < size; i++) {
*ptr++ = *data++;
*ptr++ = *data++;
*ptr++ = *data++;
data++;
}
bool result = __savePPM(file, ndata, w, h, 3);
free(ndata);
return result;
}
//////////////////////////////////////////////////////////////////////////////
//! Read file \filename and return the data
//! @return bool if reading the file succeeded, otherwise false
//! @param filename name of the source file
//! @param data uninitialized pointer, returned initialized and pointing to
//! the data read
//! @param len number of data elements in data, -1 on error
//////////////////////////////////////////////////////////////////////////////
template <class T>
inline bool sdkReadFile(const char *filename, T **data, unsigned int *len,
bool verbose) {
// check input arguments
assert(NULL != filename);
assert(NULL != len);
// intermediate storage for the data read
std::vector<T> data_read;
// open file for reading
FILE *fh = NULL;
// check if filestream is valid
if (FOPEN_FAIL(FOPEN(fh, filename, "r"))) {
printf("Unable to open input file: %s\n", filename);
return false;
}
// read all data elements
T token;
while (!feof(fh)) {
fscanf(fh, "%f", &token);
data_read.push_back(token);
}
// the last element is read twice
data_read.pop_back();
fclose(fh);
// check if the given handle is already initialized
if (NULL != *data) {
if (*len != data_read.size()) {
std::cerr << "sdkReadFile() : Initialized memory given but "
<< "size mismatch with signal read "
<< "(data read / data init = " << (unsigned int)data_read.size()
<< " / " << *len << ")" << std::endl;
return false;
}
} else {
// allocate storage for the data read
*data = reinterpret_cast<T *>(malloc(sizeof(T) * data_read.size()));
// store signal size
*len = static_cast<unsigned int>(data_read.size());
}
// copy data
memcpy(*data, &data_read.front(), sizeof(T) * data_read.size());
return true;
}
//////////////////////////////////////////////////////////////////////////////
//! Read file \filename and return the data
//! @return bool if reading the file succeeded, otherwise false
//! @param filename name of the source file
//! @param data uninitialized pointer, returned initialized and pointing to
//! the data read
//! @param len number of data elements in data, -1 on error
//////////////////////////////////////////////////////////////////////////////
template <class T>
inline bool sdkReadFileBlocks(const char *filename, T **data, unsigned int *len,
unsigned int block_num, unsigned int block_size,
bool verbose) {
// check input arguments
assert(NULL != filename);
assert(NULL != len);
// open file for reading
FILE *fh = fopen(filename, "rb");
if (fh == NULL && verbose) {
std::cerr << "sdkReadFile() : Opening file failed." << std::endl;
return false;
}
// check if the given handle is already initialized
// allocate storage for the data read
data[block_num] = reinterpret_cast<T *>(malloc(block_size));
// read all data elements
fseek(fh, block_num * block_size, SEEK_SET);
*len = fread(data[block_num], sizeof(T), block_size / sizeof(T), fh);
fclose(fh);
return true;
}
//////////////////////////////////////////////////////////////////////////////
//! Write a data file \filename
//! @return true if writing the file succeeded, otherwise false
//! @param filename name of the source file
//! @param data data to write
//! @param len number of data elements in data, -1 on error
//! @param epsilon epsilon for comparison
//////////////////////////////////////////////////////////////////////////////
template <class T, class S>
inline bool sdkWriteFile(const char *filename, const T *data, unsigned int len,
const S epsilon, bool verbose, bool append = false) {
assert(NULL != filename);
assert(NULL != data);
// open file for writing
// if (append) {
std::fstream fh(filename, std::fstream::out | std::fstream::ate);
if (verbose) {
std::cerr << "sdkWriteFile() : Open file " << filename
<< " for write/append." << std::endl;
}
/* } else {
std::fstream fh(filename, std::fstream::out);
if (verbose) {
std::cerr << "sdkWriteFile() : Open file " << filename << " for
write." << std::endl;
}
}
*/
// check if filestream is valid
if (!fh.good()) {
if (verbose) {
std::cerr << "sdkWriteFile() : Opening file failed." << std::endl;
}
return false;
}
// first write epsilon
fh << "# " << epsilon << "\n";
// write data
for (unsigned int i = 0; (i < len) && (fh.good()); ++i) {
fh << data[i] << ' ';
}
// Check if writing succeeded
if (!fh.good()) {
if (verbose) {
std::cerr << "sdkWriteFile() : Writing file failed." << std::endl;
}
return false;
}
// file ends with nl
fh << std::endl;
return true;
}
//////////////////////////////////////////////////////////////////////////////
//! Compare two arrays of arbitrary type
//! @return true if \a reference and \a data are identical, otherwise false
//! @param reference timer_interface to the reference data / gold image
//! @param data handle to the computed data
//! @param len number of elements in reference and data
//! @param epsilon epsilon to use for the comparison
//////////////////////////////////////////////////////////////////////////////
template <class T, class S>
inline bool compareData(const T *reference, const T *data,
const unsigned int len, const S epsilon,
const float threshold) {
assert(epsilon >= 0);
bool result = true;
unsigned int error_count = 0;
for (unsigned int i = 0; i < len; ++i) {
float diff = static_cast<float>(reference[i]) - static_cast<float>(data[i]);
bool comp = (diff <= epsilon) && (diff >= -epsilon);
result &= comp;
error_count += !comp;
#if 0
if (!comp) {
std::cerr << "ERROR, i = " << i << ",\t "
<< reference[i] << " / "
<< data[i]
<< " (reference / data)\n";
}
#endif
}
if (threshold == 0.0f) {
return (result) ? true : false;
} else {
if (error_count) {
printf("%4.2f(%%) of bytes mismatched (count=%d)\n",
static_cast<float>(error_count) * 100 / static_cast<float>(len),
error_count);
}
return (len * threshold > error_count) ? true : false;
}
}
#ifndef __MIN_EPSILON_ERROR
#define __MIN_EPSILON_ERROR 1e-3f
#endif
//////////////////////////////////////////////////////////////////////////////
//! Compare two arrays of arbitrary type
//! @return true if \a reference and \a data are identical, otherwise false
//! @param reference handle to the reference data / gold image
//! @param data handle to the computed data
//! @param len number of elements in reference and data
//! @param epsilon epsilon to use for the comparison
//! @param epsilon threshold % of (# of bytes) for pass/fail
//////////////////////////////////////////////////////////////////////////////
template <class T, class S>
inline bool compareDataAsFloatThreshold(const T *reference, const T *data,
const unsigned int len, const S epsilon,
const float threshold) {
assert(epsilon >= 0);
// If we set epsilon to be 0, let's set a minimum threshold
float max_error = MAX((float)epsilon, __MIN_EPSILON_ERROR);
int error_count = 0;
bool result = true;
for (unsigned int i = 0; i < len; ++i) {
float diff =
fabs(static_cast<float>(reference[i]) - static_cast<float>(data[i]));
bool comp = (diff < max_error);
result &= comp;
if (!comp) {
error_count++;
}
}
if (threshold == 0.0f) {
if (error_count) {
printf("total # of errors = %d\n", error_count);
}
return (error_count == 0) ? true : false;
} else {
if (error_count) {
printf("%4.2f(%%) of bytes mismatched (count=%d)\n",
static_cast<float>(error_count) * 100 / static_cast<float>(len),
error_count);
}
return ((len * threshold > error_count) ? true : false);
}
}
inline void sdkDumpBin(void *data, unsigned int bytes, const char *filename) {
printf("sdkDumpBin: <%s>\n", filename);
FILE *fp;
FOPEN(fp, filename, "wb");
fwrite(data, bytes, 1, fp);
fflush(fp);
fclose(fp);
}
inline bool sdkCompareBin2BinUint(const char *src_file, const char *ref_file,
unsigned int nelements, const float epsilon,
const float threshold, char *exec_path) {
unsigned int *src_buffer, *ref_buffer;
FILE *src_fp = NULL, *ref_fp = NULL;
uint64_t error_count = 0;
size_t fsize = 0;
if (FOPEN_FAIL(FOPEN(src_fp, src_file, "rb"))) {
printf("compareBin2Bin <unsigned int> unable to open src_file: %s\n",
src_file);
error_count++;
}
char *ref_file_path = sdkFindFilePath(ref_file, exec_path);
if (ref_file_path == NULL) {
printf("compareBin2Bin <unsigned int> unable to find <%s> in <%s>\n",
ref_file, exec_path);
printf(">>> Check info.xml and [project//data] folder <%s> <<<\n",
ref_file);
printf("Aborting comparison!\n");
printf(" FAILED\n");
error_count++;
if (src_fp) {
fclose(src_fp);
}
if (ref_fp) {
fclose(ref_fp);
}
} else {
if (FOPEN_FAIL(FOPEN(ref_fp, ref_file_path, "rb"))) {
printf(
"compareBin2Bin <unsigned int>"
" unable to open ref_file: %s\n",
ref_file_path);
error_count++;
}
if (src_fp && ref_fp) {
src_buffer = (unsigned int *)malloc(nelements * sizeof(unsigned int));
ref_buffer = (unsigned int *)malloc(nelements * sizeof(unsigned int));
fsize = fread(src_buffer, nelements, sizeof(unsigned int), src_fp);
fsize = fread(ref_buffer, nelements, sizeof(unsigned int), ref_fp);
printf(
"> compareBin2Bin <unsigned int> nelements=%d,"
" epsilon=%4.2f, threshold=%4.2f\n",
nelements, epsilon, threshold);
printf(" src_file <%s>, size=%d bytes\n", src_file,
static_cast<int>(fsize));
printf(" ref_file <%s>, size=%d bytes\n", ref_file_path,
static_cast<int>(fsize));
if (!compareData<unsigned int, float>(ref_buffer, src_buffer, nelements,
epsilon, threshold)) {
error_count++;
}
fclose(src_fp);
fclose(ref_fp);
free(src_buffer);
free(ref_buffer);
} else {
if (src_fp) {
fclose(src_fp);
}
if (ref_fp) {
fclose(ref_fp);
}
}
}
if (error_count == 0) {
printf(" OK\n");
} else {
printf(" FAILURE: %d errors...\n", (unsigned int)error_count);
}
return (error_count == 0); // returns true if all pixels pass
}
inline bool sdkCompareBin2BinFloat(const char *src_file, const char *ref_file,
unsigned int nelements, const float epsilon,
const float threshold, char *exec_path) {
float *src_buffer = NULL, *ref_buffer = NULL;
FILE *src_fp = NULL, *ref_fp = NULL;
size_t fsize = 0;
uint64_t error_count = 0;
if (FOPEN_FAIL(FOPEN(src_fp, src_file, "rb"))) {
printf("compareBin2Bin <float> unable to open src_file: %s\n", src_file);
error_count = 1;
}
char *ref_file_path = sdkFindFilePath(ref_file, exec_path);
if (ref_file_path == NULL) {
printf("compareBin2Bin <float> unable to find <%s> in <%s>\n", ref_file,
exec_path);
printf(">>> Check info.xml and [project//data] folder <%s> <<<\n",
exec_path);
printf("Aborting comparison!\n");
printf(" FAILED\n");
error_count++;
if (src_fp) {
fclose(src_fp);
}
if (ref_fp) {
fclose(ref_fp);
}
} else {
if (FOPEN_FAIL(FOPEN(ref_fp, ref_file_path, "rb"))) {
printf("compareBin2Bin <float> unable to open ref_file: %s\n",
ref_file_path);
error_count = 1;
}
if (src_fp && ref_fp) {
src_buffer = reinterpret_cast<float *>(malloc(nelements * sizeof(float)));
ref_buffer = reinterpret_cast<float *>(malloc(nelements * sizeof(float)));
printf(
"> compareBin2Bin <float> nelements=%d, epsilon=%4.2f,"
" threshold=%4.2f\n",
nelements, epsilon, threshold);
fsize = fread(src_buffer, sizeof(float), nelements, src_fp);
printf(" src_file <%s>, size=%d bytes\n", src_file,
static_cast<int>(fsize * sizeof(float)));
fsize = fread(ref_buffer, sizeof(float), nelements, ref_fp);
printf(" ref_file <%s>, size=%d bytes\n", ref_file_path,
static_cast<int>(fsize * sizeof(float)));
if (!compareDataAsFloatThreshold<float, float>(
ref_buffer, src_buffer, nelements, epsilon, threshold)) {
error_count++;
}
fclose(src_fp);
fclose(ref_fp);
free(src_buffer);
free(ref_buffer);
} else {
if (src_fp) {
fclose(src_fp);
}
if (ref_fp) {
fclose(ref_fp);
}
}
}
if (error_count == 0) {
printf(" OK\n");
} else {
printf(" FAILURE: %d errors...\n", (unsigned int)error_count);
}
return (error_count == 0); // returns true if all pixels pass
}
inline bool sdkCompareL2fe(const float *reference, const float *data,
const unsigned int len, const float epsilon) {
assert(epsilon >= 0);
float error = 0;
float ref = 0;
for (unsigned int i = 0; i < len; ++i) {
float diff = reference[i] - data[i];
error += diff * diff;
ref += reference[i] * reference[i];
}
float normRef = sqrtf(ref);
if (fabs(ref) < 1e-7) {
#ifdef _DEBUG
std::cerr << "ERROR, reference l2-norm is 0\n";
#endif
return false;
}
float normError = sqrtf(error);
error = normError / normRef;
bool result = error < epsilon;
#ifdef _DEBUG
if (!result) {
std::cerr << "ERROR, l2-norm error " << error << " is greater than epsilon "
<< epsilon << "\n";
}
#endif
return result;
}
inline bool sdkLoadPPMub(const char *file, unsigned char **data,
unsigned int *w, unsigned int *h) {
unsigned int channels;
return __loadPPM(file, data, w, h, &channels);
}
inline bool sdkLoadPPM4ub(const char *file, unsigned char **data,
unsigned int *w, unsigned int *h) {
unsigned char *idata = 0;
unsigned int channels;
if (__loadPPM(file, &idata, w, h, &channels)) {
// pad 4th component
int size = *w * *h;
// keep the original pointer
unsigned char *idata_orig = idata;
*data = (unsigned char *)malloc(sizeof(unsigned char) * size * 4);
unsigned char *ptr = *data;
for (int i = 0; i < size; i++) {
*ptr++ = *idata++;
*ptr++ = *idata++;
*ptr++ = *idata++;
*ptr++ = 0;
}
free(idata_orig);
return true;
} else {
free(idata);
return false;
}
}
inline bool sdkComparePPM(const char *src_file, const char *ref_file,
const float epsilon, const float threshold,
bool verboseErrors) {
unsigned char *src_data, *ref_data;
uint64_t error_count = 0;
unsigned int ref_width, ref_height;
unsigned int src_width, src_height;
if (src_file == NULL || ref_file == NULL) {
if (verboseErrors) {
std::cerr << "PPMvsPPM: src_file or ref_file is NULL."
" Aborting comparison\n";
}
return false;
}
if (verboseErrors) {
std::cerr << "> Compare (a)rendered: <" << src_file << ">\n";
std::cerr << "> (b)reference: <" << ref_file << ">\n";
}
if (sdkLoadPPM4ub(ref_file, &ref_data, &ref_width, &ref_height) != true) {
if (verboseErrors) {
std::cerr << "PPMvsPPM: unable to load ref image file: " << ref_file
<< "\n";
}
return false;
}
if (sdkLoadPPM4ub(src_file, &src_data, &src_width, &src_height) != true) {
std::cerr << "PPMvsPPM: unable to load src image file: " << src_file
<< "\n";
return false;
}
if (src_height != ref_height || src_width != ref_width) {
if (verboseErrors) {
std::cerr << "PPMvsPPM: source and ref size mismatch (" << src_width
<< "," << src_height << ")vs(" << ref_width << "," << ref_height
<< ")\n";
}
}
if (verboseErrors) {
std::cerr << "PPMvsPPM: comparing images size (" << src_width << ","
<< src_height << ") epsilon(" << epsilon << "), threshold("
<< threshold * 100 << "%)\n";
}
if (compareData(ref_data, src_data, src_width * src_height * 4, epsilon,
threshold) == false) {
error_count = 1;
}
if (error_count == 0) {
if (verboseErrors) {
std::cerr << " OK\n\n";
}
} else {
if (verboseErrors) {
std::cerr << " FAILURE! " << error_count << " errors...\n\n";
}
}
// returns true if all pixels pass
return (error_count == 0) ? true : false;
}
inline bool sdkComparePGM(const char *src_file, const char *ref_file,
const float epsilon, const float threshold,
bool verboseErrors) {
unsigned char *src_data = 0, *ref_data = 0;
uint64_t error_count = 0;
unsigned int ref_width, ref_height;
unsigned int src_width, src_height;
if (src_file == NULL || ref_file == NULL) {
if (verboseErrors) {
std::cerr << "PGMvsPGM: src_file or ref_file is NULL."
" Aborting comparison\n";
}
return false;
}
if (verboseErrors) {
std::cerr << "> Compare (a)rendered: <" << src_file << ">\n";
std::cerr << "> (b)reference: <" << ref_file << ">\n";
}
if (sdkLoadPPMub(ref_file, &ref_data, &ref_width, &ref_height) != true) {
if (verboseErrors) {
std::cerr << "PGMvsPGM: unable to load ref image file: " << ref_file
<< "\n";
}
return false;
}
if (sdkLoadPPMub(src_file, &src_data, &src_width, &src_height) != true) {
std::cerr << "PGMvsPGM: unable to load src image file: " << src_file
<< "\n";
return false;
}
if (src_height != ref_height || src_width != ref_width) {
if (verboseErrors) {
std::cerr << "PGMvsPGM: source and ref size mismatch (" << src_width
<< "," << src_height << ")vs(" << ref_width << "," << ref_height
<< ")\n";
}
}
if (verboseErrors)
std::cerr << "PGMvsPGM: comparing images size (" << src_width << ","
<< src_height << ") epsilon(" << epsilon << "), threshold("
<< threshold * 100 << "%)\n";
if (compareData(ref_data, src_data, src_width * src_height, epsilon,
threshold) == false) {
error_count = 1;
}
if (error_count == 0) {
if (verboseErrors) {
std::cerr << " OK\n\n";
}
} else {
if (verboseErrors) {
std::cerr << " FAILURE! " << error_count << " errors...\n\n";
}
}
// returns true if all pixels pass
return (error_count == 0) ? true : false;
}
#endif // COMMON_HELPER_IMAGE_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/guided_concurrentKernels_SYCLMigration/02_sycl_migrated/Common/exception.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* CUda UTility Library */
#ifndef COMMON_EXCEPTION_H_
#define COMMON_EXCEPTION_H_
// includes, system
#include <stdlib.h>
#include <exception>
#include <iostream>
#include <stdexcept>
#include <string>
//! Exception wrapper.
//! @param Std_Exception Exception out of namespace std for easy typing.
template <class Std_Exception>
class Exception : public Std_Exception {
public:
//! @brief Static construction interface
//! @return Alwayss throws ( Located_Exception<Exception>)
//! @param file file in which the Exception occurs
//! @param line line in which the Exception occurs
//! @param detailed details on the code fragment causing the Exception
static void throw_it(const char *file, const int line,
const char *detailed = "-");
//! Static construction interface
//! @return Alwayss throws ( Located_Exception<Exception>)
//! @param file file in which the Exception occurs
//! @param line line in which the Exception occurs
//! @param detailed details on the code fragment causing the Exception
static void throw_it(const char *file, const int line,
const std::string &detailed);
//! Destructor
virtual ~Exception() throw();
private:
//! Constructor, default (private)
Exception();
//! Constructor, standard
//! @param str string returned by what()
explicit Exception(const std::string &str);
};
////////////////////////////////////////////////////////////////////////////////
//! Exception handler function for arbitrary exceptions
//! @param ex exception to handle
////////////////////////////////////////////////////////////////////////////////
template <class Exception_Typ>
inline void handleException(const Exception_Typ &ex) {
std::cerr << ex.what() << std::endl;
exit(EXIT_FAILURE);
}
//! Convenience macros
//! Exception caused by dynamic program behavior, e.g. file does not exist
#define RUNTIME_EXCEPTION(msg) \
Exception<std::runtime_error>::throw_it(__FILE__, __LINE__, msg)
//! Logic exception in program, e.g. an assert failed
#define LOGIC_EXCEPTION(msg) \
Exception<std::logic_error>::throw_it(__FILE__, __LINE__, msg)
//! Out of range exception
#define RANGE_EXCEPTION(msg) \
Exception<std::range_error>::throw_it(__FILE__, __LINE__, msg)
////////////////////////////////////////////////////////////////////////////////
//! Implementation
// includes, system
#include <sstream>
////////////////////////////////////////////////////////////////////////////////
//! Static construction interface.
//! @param Exception causing code fragment (file and line) and detailed infos.
////////////////////////////////////////////////////////////////////////////////
/*static*/ template <class Std_Exception>
void Exception<Std_Exception>::throw_it(const char *file, const int line,
const char *detailed) {
std::stringstream s;
// Quiet heavy-weight but exceptions are not for
// performance / release versions
s << "Exception in file '" << file << "' in line " << line << "\n"
<< "Detailed description: " << detailed << "\n";
throw Exception(s.str());
}
////////////////////////////////////////////////////////////////////////////////
//! Static construction interface.
//! @param Exception causing code fragment (file and line) and detailed infos.
////////////////////////////////////////////////////////////////////////////////
/*static*/ template <class Std_Exception>
void Exception<Std_Exception>::throw_it(const char *file, const int line,
const std::string &msg) {
throw_it(file, line, msg.c_str());
}
////////////////////////////////////////////////////////////////////////////////
//! Constructor, default (private).
////////////////////////////////////////////////////////////////////////////////
template <class Std_Exception>
Exception<Std_Exception>::Exception() : Std_Exception("Unknown Exception.\n") {}
////////////////////////////////////////////////////////////////////////////////
//! Constructor, standard (private).
//! String returned by what().
////////////////////////////////////////////////////////////////////////////////
template <class Std_Exception>
Exception<Std_Exception>::Exception(const std::string &s) : Std_Exception(s) {}
////////////////////////////////////////////////////////////////////////////////
//! Destructor
////////////////////////////////////////////////////////////////////////////////
template <class Std_Exception>
Exception<Std_Exception>::~Exception() throw() {}
// functions, exported
#endif // COMMON_EXCEPTION_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/guided_concurrentKernels_SYCLMigration/02_sycl_migrated/Common/helper_cuda.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions for initialization and error checking
#ifndef COMMON_HELPER_CUDA_H_
#define COMMON_HELPER_CUDA_H_
#pragma once
#include <sycl/sycl.hpp>
#include <dpct/dpct.hpp>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <helper_string.h>
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
// Note, it is required that your SDK sample to include the proper header
// files, please refer the CUDA examples for examples of the needed CUDA
// headers, which may change depending on which CUDA functions are used.
// CUDA Runtime error messages
#ifdef __DPCT_HPP__
static const char *_cudaGetErrorEnum(dpct::err0 error) {
/*
DPCT1009:2: SYCL uses exceptions to report errors and does not use the error
codes. The original code was commented out and a warning string was inserted.
You need to rewrite this code.
*/
return "cudaGetErrorName is not supported" /*cudaGetErrorName(error)*/;
}
#endif
#ifdef CUDA_DRIVER_API
// CUDA Driver API errors
static const char *_cudaGetErrorEnum(CUresult error) {
static char unknown[] = "<unknown>";
const char *ret = NULL;
cuGetErrorName(error, &ret);
return ret ? ret : unknown;
}
#endif
#ifdef CUBLAS_API_H_
// cuBLAS API errors
static const char *_cudaGetErrorEnum(cublasStatus_t error) {
switch (error) {
case CUBLAS_STATUS_SUCCESS:
return "CUBLAS_STATUS_SUCCESS";
case CUBLAS_STATUS_NOT_INITIALIZED:
return "CUBLAS_STATUS_NOT_INITIALIZED";
case CUBLAS_STATUS_ALLOC_FAILED:
return "CUBLAS_STATUS_ALLOC_FAILED";
case CUBLAS_STATUS_INVALID_VALUE:
return "CUBLAS_STATUS_INVALID_VALUE";
case CUBLAS_STATUS_ARCH_MISMATCH:
return "CUBLAS_STATUS_ARCH_MISMATCH";
case CUBLAS_STATUS_MAPPING_ERROR:
return "CUBLAS_STATUS_MAPPING_ERROR";
case CUBLAS_STATUS_EXECUTION_FAILED:
return "CUBLAS_STATUS_EXECUTION_FAILED";
case CUBLAS_STATUS_INTERNAL_ERROR:
return "CUBLAS_STATUS_INTERNAL_ERROR";
case CUBLAS_STATUS_NOT_SUPPORTED:
return "CUBLAS_STATUS_NOT_SUPPORTED";
case CUBLAS_STATUS_LICENSE_ERROR:
return "CUBLAS_STATUS_LICENSE_ERROR";
}
return "<unknown>";
}
#endif
#ifdef _CUFFT_H_
// cuFFT API errors
static const char *_cudaGetErrorEnum(cufftResult error) {
switch (error) {
case CUFFT_SUCCESS:
return "CUFFT_SUCCESS";
case CUFFT_INVALID_PLAN:
return "CUFFT_INVALID_PLAN";
case CUFFT_ALLOC_FAILED:
return "CUFFT_ALLOC_FAILED";
case CUFFT_INVALID_TYPE:
return "CUFFT_INVALID_TYPE";
case CUFFT_INVALID_VALUE:
return "CUFFT_INVALID_VALUE";
case CUFFT_INTERNAL_ERROR:
return "CUFFT_INTERNAL_ERROR";
case CUFFT_EXEC_FAILED:
return "CUFFT_EXEC_FAILED";
case CUFFT_SETUP_FAILED:
return "CUFFT_SETUP_FAILED";
case CUFFT_INVALID_SIZE:
return "CUFFT_INVALID_SIZE";
case CUFFT_UNALIGNED_DATA:
return "CUFFT_UNALIGNED_DATA";
case CUFFT_INCOMPLETE_PARAMETER_LIST:
return "CUFFT_INCOMPLETE_PARAMETER_LIST";
case CUFFT_INVALID_DEVICE:
return "CUFFT_INVALID_DEVICE";
case CUFFT_PARSE_ERROR:
return "CUFFT_PARSE_ERROR";
case CUFFT_NO_WORKSPACE:
return "CUFFT_NO_WORKSPACE";
case CUFFT_NOT_IMPLEMENTED:
return "CUFFT_NOT_IMPLEMENTED";
case CUFFT_LICENSE_ERROR:
return "CUFFT_LICENSE_ERROR";
case CUFFT_NOT_SUPPORTED:
return "CUFFT_NOT_SUPPORTED";
}
return "<unknown>";
}
#endif
#ifdef CUSPARSEAPI
// cuSPARSE API errors
static const char *_cudaGetErrorEnum(cusparseStatus_t error) {
switch (error) {
case CUSPARSE_STATUS_SUCCESS:
return "CUSPARSE_STATUS_SUCCESS";
case CUSPARSE_STATUS_NOT_INITIALIZED:
return "CUSPARSE_STATUS_NOT_INITIALIZED";
case CUSPARSE_STATUS_ALLOC_FAILED:
return "CUSPARSE_STATUS_ALLOC_FAILED";
case CUSPARSE_STATUS_INVALID_VALUE:
return "CUSPARSE_STATUS_INVALID_VALUE";
case CUSPARSE_STATUS_ARCH_MISMATCH:
return "CUSPARSE_STATUS_ARCH_MISMATCH";
case CUSPARSE_STATUS_MAPPING_ERROR:
return "CUSPARSE_STATUS_MAPPING_ERROR";
case CUSPARSE_STATUS_EXECUTION_FAILED:
return "CUSPARSE_STATUS_EXECUTION_FAILED";
case CUSPARSE_STATUS_INTERNAL_ERROR:
return "CUSPARSE_STATUS_INTERNAL_ERROR";
case CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
}
return "<unknown>";
}
#endif
#ifdef CUSOLVER_COMMON_H_
// cuSOLVER API errors
static const char *_cudaGetErrorEnum(cusolverStatus_t error) {
switch (error) {
case CUSOLVER_STATUS_SUCCESS:
return "CUSOLVER_STATUS_SUCCESS";
case CUSOLVER_STATUS_NOT_INITIALIZED:
return "CUSOLVER_STATUS_NOT_INITIALIZED";
case CUSOLVER_STATUS_ALLOC_FAILED:
return "CUSOLVER_STATUS_ALLOC_FAILED";
case CUSOLVER_STATUS_INVALID_VALUE:
return "CUSOLVER_STATUS_INVALID_VALUE";
case CUSOLVER_STATUS_ARCH_MISMATCH:
return "CUSOLVER_STATUS_ARCH_MISMATCH";
case CUSOLVER_STATUS_MAPPING_ERROR:
return "CUSOLVER_STATUS_MAPPING_ERROR";
case CUSOLVER_STATUS_EXECUTION_FAILED:
return "CUSOLVER_STATUS_EXECUTION_FAILED";
case CUSOLVER_STATUS_INTERNAL_ERROR:
return "CUSOLVER_STATUS_INTERNAL_ERROR";
case CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
case CUSOLVER_STATUS_NOT_SUPPORTED:
return "CUSOLVER_STATUS_NOT_SUPPORTED ";
case CUSOLVER_STATUS_ZERO_PIVOT:
return "CUSOLVER_STATUS_ZERO_PIVOT";
case CUSOLVER_STATUS_INVALID_LICENSE:
return "CUSOLVER_STATUS_INVALID_LICENSE";
}
return "<unknown>";
}
#endif
#ifdef CURAND_H_
// cuRAND API errors
static const char *_cudaGetErrorEnum(int error) {
switch (error) {
case 0:
return "CURAND_STATUS_SUCCESS";
case 100:
return "CURAND_STATUS_VERSION_MISMATCH";
case 101:
return "CURAND_STATUS_NOT_INITIALIZED";
case 102:
return "CURAND_STATUS_ALLOCATION_FAILED";
case 103:
return "CURAND_STATUS_TYPE_ERROR";
case 104:
return "CURAND_STATUS_OUT_OF_RANGE";
case 105:
return "CURAND_STATUS_LENGTH_NOT_MULTIPLE";
case 106:
return "CURAND_STATUS_DOUBLE_PRECISION_REQUIRED";
case 201:
return "CURAND_STATUS_LAUNCH_FAILURE";
case 202:
return "CURAND_STATUS_PREEXISTING_FAILURE";
case 203:
return "CURAND_STATUS_INITIALIZATION_FAILED";
case 204:
return "CURAND_STATUS_ARCH_MISMATCH";
case 999:
return "CURAND_STATUS_INTERNAL_ERROR";
}
return "<unknown>";
}
#endif
#ifdef NVJPEGAPI
// nvJPEG API errors
static const char *_cudaGetErrorEnum(nvjpegStatus_t error) {
switch (error) {
case NVJPEG_STATUS_SUCCESS:
return "NVJPEG_STATUS_SUCCESS";
case NVJPEG_STATUS_NOT_INITIALIZED:
return "NVJPEG_STATUS_NOT_INITIALIZED";
case NVJPEG_STATUS_INVALID_PARAMETER:
return "NVJPEG_STATUS_INVALID_PARAMETER";
case NVJPEG_STATUS_BAD_JPEG:
return "NVJPEG_STATUS_BAD_JPEG";
case NVJPEG_STATUS_JPEG_NOT_SUPPORTED:
return "NVJPEG_STATUS_JPEG_NOT_SUPPORTED";
case NVJPEG_STATUS_ALLOCATOR_FAILURE:
return "NVJPEG_STATUS_ALLOCATOR_FAILURE";
case NVJPEG_STATUS_EXECUTION_FAILED:
return "NVJPEG_STATUS_EXECUTION_FAILED";
case NVJPEG_STATUS_ARCH_MISMATCH:
return "NVJPEG_STATUS_ARCH_MISMATCH";
case NVJPEG_STATUS_INTERNAL_ERROR:
return "NVJPEG_STATUS_INTERNAL_ERROR";
}
return "<unknown>";
}
#endif
#ifdef NV_NPPIDEFS_H
// NPP API errors
static const char *_cudaGetErrorEnum(NppStatus error) {
switch (error) {
case NPP_NOT_SUPPORTED_MODE_ERROR:
return "NPP_NOT_SUPPORTED_MODE_ERROR";
case NPP_ROUND_MODE_NOT_SUPPORTED_ERROR:
return "NPP_ROUND_MODE_NOT_SUPPORTED_ERROR";
case NPP_RESIZE_NO_OPERATION_ERROR:
return "NPP_RESIZE_NO_OPERATION_ERROR";
case NPP_NOT_SUFFICIENT_COMPUTE_CAPABILITY:
return "NPP_NOT_SUFFICIENT_COMPUTE_CAPABILITY";
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) <= 0x5000
case NPP_BAD_ARG_ERROR:
return "NPP_BAD_ARGUMENT_ERROR";
case NPP_COEFF_ERROR:
return "NPP_COEFFICIENT_ERROR";
case NPP_RECT_ERROR:
return "NPP_RECTANGLE_ERROR";
case NPP_QUAD_ERROR:
return "NPP_QUADRANGLE_ERROR";
case NPP_MEM_ALLOC_ERR:
return "NPP_MEMORY_ALLOCATION_ERROR";
case NPP_HISTO_NUMBER_OF_LEVELS_ERROR:
return "NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR";
case NPP_INVALID_INPUT:
return "NPP_INVALID_INPUT";
case NPP_POINTER_ERROR:
return "NPP_POINTER_ERROR";
case NPP_WARNING:
return "NPP_WARNING";
case NPP_ODD_ROI_WARNING:
return "NPP_ODD_ROI_WARNING";
#else
// These are for CUDA 5.5 or higher
case NPP_BAD_ARGUMENT_ERROR:
return "NPP_BAD_ARGUMENT_ERROR";
case NPP_COEFFICIENT_ERROR:
return "NPP_COEFFICIENT_ERROR";
case NPP_RECTANGLE_ERROR:
return "NPP_RECTANGLE_ERROR";
case NPP_QUADRANGLE_ERROR:
return "NPP_QUADRANGLE_ERROR";
case NPP_MEMORY_ALLOCATION_ERR:
return "NPP_MEMORY_ALLOCATION_ERROR";
case NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR:
return "NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR";
case NPP_INVALID_HOST_POINTER_ERROR:
return "NPP_INVALID_HOST_POINTER_ERROR";
case NPP_INVALID_DEVICE_POINTER_ERROR:
return "NPP_INVALID_DEVICE_POINTER_ERROR";
#endif
case NPP_LUT_NUMBER_OF_LEVELS_ERROR:
return "NPP_LUT_NUMBER_OF_LEVELS_ERROR";
case NPP_TEXTURE_BIND_ERROR:
return "NPP_TEXTURE_BIND_ERROR";
case NPP_WRONG_INTERSECTION_ROI_ERROR:
return "NPP_WRONG_INTERSECTION_ROI_ERROR";
case NPP_NOT_EVEN_STEP_ERROR:
return "NPP_NOT_EVEN_STEP_ERROR";
case NPP_INTERPOLATION_ERROR:
return "NPP_INTERPOLATION_ERROR";
case NPP_RESIZE_FACTOR_ERROR:
return "NPP_RESIZE_FACTOR_ERROR";
case NPP_HAAR_CLASSIFIER_PIXEL_MATCH_ERROR:
return "NPP_HAAR_CLASSIFIER_PIXEL_MATCH_ERROR";
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) <= 0x5000
case NPP_MEMFREE_ERR:
return "NPP_MEMFREE_ERR";
case NPP_MEMSET_ERR:
return "NPP_MEMSET_ERR";
case NPP_MEMCPY_ERR:
return "NPP_MEMCPY_ERROR";
case NPP_MIRROR_FLIP_ERR:
return "NPP_MIRROR_FLIP_ERR";
#else
case NPP_MEMFREE_ERROR:
return "NPP_MEMFREE_ERROR";
case NPP_MEMSET_ERROR:
return "NPP_MEMSET_ERROR";
case NPP_MEMCPY_ERROR:
return "NPP_MEMCPY_ERROR";
case NPP_MIRROR_FLIP_ERROR:
return "NPP_MIRROR_FLIP_ERROR";
#endif
case NPP_ALIGNMENT_ERROR:
return "NPP_ALIGNMENT_ERROR";
case NPP_STEP_ERROR:
return "NPP_STEP_ERROR";
case NPP_SIZE_ERROR:
return "NPP_SIZE_ERROR";
case NPP_NULL_POINTER_ERROR:
return "NPP_NULL_POINTER_ERROR";
case NPP_CUDA_KERNEL_EXECUTION_ERROR:
return "NPP_CUDA_KERNEL_EXECUTION_ERROR";
case NPP_NOT_IMPLEMENTED_ERROR:
return "NPP_NOT_IMPLEMENTED_ERROR";
case NPP_ERROR:
return "NPP_ERROR";
case NPP_SUCCESS:
return "NPP_SUCCESS";
case NPP_WRONG_INTERSECTION_QUAD_WARNING:
return "NPP_WRONG_INTERSECTION_QUAD_WARNING";
case NPP_MISALIGNED_DST_ROI_WARNING:
return "NPP_MISALIGNED_DST_ROI_WARNING";
case NPP_AFFINE_QUAD_INCORRECT_WARNING:
return "NPP_AFFINE_QUAD_INCORRECT_WARNING";
case NPP_DOUBLE_SIZE_WARNING:
return "NPP_DOUBLE_SIZE_WARNING";
case NPP_WRONG_INTERSECTION_ROI_WARNING:
return "NPP_WRONG_INTERSECTION_ROI_WARNING";
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) >= 0x6000
/* These are 6.0 or higher */
case NPP_LUT_PALETTE_BITSIZE_ERROR:
return "NPP_LUT_PALETTE_BITSIZE_ERROR";
case NPP_ZC_MODE_NOT_SUPPORTED_ERROR:
return "NPP_ZC_MODE_NOT_SUPPORTED_ERROR";
case NPP_QUALITY_INDEX_ERROR:
return "NPP_QUALITY_INDEX_ERROR";
case NPP_CHANNEL_ORDER_ERROR:
return "NPP_CHANNEL_ORDER_ERROR";
case NPP_ZERO_MASK_VALUE_ERROR:
return "NPP_ZERO_MASK_VALUE_ERROR";
case NPP_NUMBER_OF_CHANNELS_ERROR:
return "NPP_NUMBER_OF_CHANNELS_ERROR";
case NPP_COI_ERROR:
return "NPP_COI_ERROR";
case NPP_DIVISOR_ERROR:
return "NPP_DIVISOR_ERROR";
case NPP_CHANNEL_ERROR:
return "NPP_CHANNEL_ERROR";
case NPP_STRIDE_ERROR:
return "NPP_STRIDE_ERROR";
case NPP_ANCHOR_ERROR:
return "NPP_ANCHOR_ERROR";
case NPP_MASK_SIZE_ERROR:
return "NPP_MASK_SIZE_ERROR";
case NPP_MOMENT_00_ZERO_ERROR:
return "NPP_MOMENT_00_ZERO_ERROR";
case NPP_THRESHOLD_NEGATIVE_LEVEL_ERROR:
return "NPP_THRESHOLD_NEGATIVE_LEVEL_ERROR";
case NPP_THRESHOLD_ERROR:
return "NPP_THRESHOLD_ERROR";
case NPP_CONTEXT_MATCH_ERROR:
return "NPP_CONTEXT_MATCH_ERROR";
case NPP_FFT_FLAG_ERROR:
return "NPP_FFT_FLAG_ERROR";
case NPP_FFT_ORDER_ERROR:
return "NPP_FFT_ORDER_ERROR";
case NPP_SCALE_RANGE_ERROR:
return "NPP_SCALE_RANGE_ERROR";
case NPP_DATA_TYPE_ERROR:
return "NPP_DATA_TYPE_ERROR";
case NPP_OUT_OFF_RANGE_ERROR:
return "NPP_OUT_OFF_RANGE_ERROR";
case NPP_DIVIDE_BY_ZERO_ERROR:
return "NPP_DIVIDE_BY_ZERO_ERROR";
case NPP_RANGE_ERROR:
return "NPP_RANGE_ERROR";
case NPP_NO_MEMORY_ERROR:
return "NPP_NO_MEMORY_ERROR";
case NPP_ERROR_RESERVED:
return "NPP_ERROR_RESERVED";
case NPP_NO_OPERATION_WARNING:
return "NPP_NO_OPERATION_WARNING";
case NPP_DIVIDE_BY_ZERO_WARNING:
return "NPP_DIVIDE_BY_ZERO_WARNING";
#endif
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) >= 0x7000
/* These are 7.0 or higher */
case NPP_OVERFLOW_ERROR:
return "NPP_OVERFLOW_ERROR";
case NPP_CORRUPTED_DATA_ERROR:
return "NPP_CORRUPTED_DATA_ERROR";
#endif
}
return "<unknown>";
}
#endif
template <typename T>
void check(T result, char const *const func, const char *const file,
int const line) {
}
#ifdef __DPCT_HPP__
// This will output the proper CUDA error strings in the event
// that a CUDA host call returns an error
#define checkCudaErrors(val) check((val), #val, __FILE__, __LINE__)
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError(msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file,
const int line) {
/*
DPCT1010:3: SYCL uses exceptions to report errors and does not use the error
codes. The call was replaced with 0. You need to rewrite this code.
*/
dpct::err0 err = 0;
}
// This will only print the proper error string when calling cudaGetLastError
// but not exit program incase error detected.
#define printLastCudaError(msg) __printLastCudaError(msg, __FILE__, __LINE__)
inline void __printLastCudaError(const char *errorMessage, const char *file,
const int line) {
/*
DPCT1010:5: SYCL uses exceptions to report errors and does not use the error
codes. The call was replaced with 0. You need to rewrite this code.
*/
dpct::err0 err = 0;
}
#endif
#ifndef MAX
#define MAX(a, b) (a > b ? a : b)
#endif
// Float To Int conversion
inline int ftoi(float value) {
return (value >= 0 ? static_cast<int>(value + 0.5)
: static_cast<int>(value - 0.5));
}
// Beginning of GPU Architecture definitions
inline int _ConvertSMVer2Cores(int major, int minor) {
// Defines for GPU Architecture types (using the SM version to determine
// the # of cores per SM
typedef struct dpct_type_554348 {
int SM; // 0xMm (hexidecimal notation), M = SM Major version,
// and m = SM minor version
int Cores;
} sSMtoCores;
sSMtoCores nGpuArchCoresPerSM[] = {
{0x30, 192},
{0x32, 192},
{0x35, 192},
{0x37, 192},
{0x50, 128},
{0x52, 128},
{0x53, 128},
{0x60, 64},
{0x61, 128},
{0x62, 128},
{0x70, 64},
{0x72, 64},
{0x75, 64},
{0x80, 64},
{0x86, 128},
{0x87, 128},
{0x89, 128},
{0x90, 128},
{-1, -1}};
int index = 0;
while (nGpuArchCoresPerSM[index].SM != -1) {
if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor)) {
return nGpuArchCoresPerSM[index].Cores;
}
index++;
}
// If we don't find the values, we default use the previous one
// to run properly
printf(
"MapSMtoCores for SM %d.%d is undefined."
" Default to use %d Cores/SM\n",
major, minor, nGpuArchCoresPerSM[index - 1].Cores);
return nGpuArchCoresPerSM[index - 1].Cores;
}
inline const char* _ConvertSMVer2ArchName(int major, int minor) {
// Defines for GPU Architecture types (using the SM version to determine
// the GPU Arch name)
typedef struct dpct_type_876740 {
int SM; // 0xMm (hexidecimal notation), M = SM Major version,
// and m = SM minor version
const char* name;
} sSMtoArchName;
sSMtoArchName nGpuArchNameSM[] = {
{0x30, "Kepler"},
{0x32, "Kepler"},
{0x35, "Kepler"},
{0x37, "Kepler"},
{0x50, "Maxwell"},
{0x52, "Maxwell"},
{0x53, "Maxwell"},
{0x60, "Pascal"},
{0x61, "Pascal"},
{0x62, "Pascal"},
{0x70, "Volta"},
{0x72, "Xavier"},
{0x75, "Turing"},
{0x80, "Ampere"},
{0x86, "Ampere"},
{0x87, "Ampere"},
{0x89, "Ada"},
{0x90, "Hopper"},
{-1, "Graphics Device"}};
int index = 0;
while (nGpuArchNameSM[index].SM != -1) {
if (nGpuArchNameSM[index].SM == ((major << 4) + minor)) {
return nGpuArchNameSM[index].name;
}
index++;
}
// If we don't find the values, we default use the previous one
// to run properly
printf(
"MapSMtoArchName for SM %d.%d is undefined."
" Default to use %s\n",
major, minor, nGpuArchNameSM[index - 1].name);
return nGpuArchNameSM[index - 1].name;
}
// end of GPU Architecture definitions
#ifdef __DPCT_HPP__
// General GPU Device CUDA Initialization
inline int gpuDeviceInit(int devID) {
int device_count;
/*
DPCT1003:7: Migrated API does not return error code. (*, 0) is inserted. You
may need to rewrite this code.
*/
checkCudaErrors((device_count = dpct::dev_mgr::instance().device_count(), 0));
if (device_count == 0) {
fprintf(stderr,
"gpuDeviceInit() CUDA error: "
"no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
if (devID < 0) {
devID = 0;
}
if (devID > device_count - 1) {
fprintf(stderr, "\n");
fprintf(stderr, ">> %d CUDA capable GPU device(s) detected. <<\n",
device_count);
fprintf(stderr,
">> gpuDeviceInit (-device=%d) is not a valid"
" GPU device. <<\n",
devID);
fprintf(stderr, "\n");
return -devID;
}
int computeMode = -1, major = 0, minor = 0;
/*
DPCT1035:8: All SYCL devices can be used by the host to submit tasks. You may
need to adjust this code.
*/
checkCudaErrors((computeMode = 1, 0));
checkCudaErrors(
(major = dpct::dev_mgr::instance().get_device(devID).get_major_version(),
0));
checkCudaErrors(
(minor = dpct::dev_mgr::instance().get_device(devID).get_minor_version(),
0));
/*
DPCT1035:9: All SYCL devices can be used by the host to submit tasks. You may
need to adjust this code.
*/
if (computeMode == 0) {
fprintf(stderr,
"Error: device is running in <Compute Mode "
"Prohibited>, no threads can use cudaSetDevice().\n");
return -1;
}
if (major < 1) {
fprintf(stderr, "gpuDeviceInit(): GPU device does not support CUDA.\n");
exit(EXIT_FAILURE);
}
/*
DPCT1093:10: The "devID" device may be not the one intended for use. Adjust
the selected device if needed.
*/
/*
DPCT1003:11: Migrated API does not return error code. (*, 0) is inserted. You
may need to rewrite this code.
*/
checkCudaErrors((dpct::select_device(devID), 0));
printf("gpuDeviceInit() CUDA Device [%d]: \"%s\n", devID, _ConvertSMVer2ArchName(major, minor));
return devID;
}
// This function returns the best GPU (with maximum GFLOPS)
inline int gpuGetMaxGflopsDeviceId() try {
int current_device = 0, sm_per_multiproc = 0;
int max_perf_device = 0;
int device_count = 0;
int devices_prohibited = 0;
uint64_t max_compute_perf = 0;
/*
DPCT1003:12: Migrated API does not return error code. (*, 0) is inserted. You
may need to rewrite this code.
*/
checkCudaErrors((device_count = dpct::dev_mgr::instance().device_count(), 0));
if (device_count == 0) {
fprintf(stderr,
"gpuGetMaxGflopsDeviceId() CUDA error:"
" no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
// Find the best CUDA capable GPU device
current_device = 0;
while (current_device < device_count) {
int computeMode = -1, major = 0, minor = 0;
/*
DPCT1035:13: All SYCL devices can be used by the host to submit tasks. You
may need to adjust this code.
*/
checkCudaErrors((computeMode = 1, 0));
checkCudaErrors((major = dpct::dev_mgr::instance()
.get_device(current_device)
.get_major_version(),
0));
checkCudaErrors((minor = dpct::dev_mgr::instance()
.get_device(current_device)
.get_minor_version(),
0));
// If this GPU is not running on Compute Mode prohibited,
// then we can add it to the list
/*
DPCT1035:14: All SYCL devices can be used by the host to submit tasks. You
may need to adjust this code.
*/
if (computeMode != 0) {
if (major == 9999 && minor == 9999) {
sm_per_multiproc = 1;
} else {
sm_per_multiproc =
_ConvertSMVer2Cores(major, minor);
}
int multiProcessorCount = 0, clockRate = 0;
checkCudaErrors((multiProcessorCount = dpct::dev_mgr::instance()
.get_device(current_device)
.get_max_compute_units(),
0));
dpct::err0 result = (clockRate = dpct::dev_mgr::instance()
.get_device(current_device)
.get_max_clock_frequency(),
0);
uint64_t compute_perf = (uint64_t)multiProcessorCount * sm_per_multiproc * clockRate;
if (compute_perf > max_compute_perf) {
max_compute_perf = compute_perf;
max_perf_device = current_device;
}
} else {
devices_prohibited++;
}
++current_device;
}
if (devices_prohibited == device_count) {
fprintf(stderr,
"gpuGetMaxGflopsDeviceId() CUDA error:"
" all devices have compute mode prohibited.\n");
exit(EXIT_FAILURE);
}
return max_perf_device;
}
catch (sycl::exception const &exc) {
std::cerr << exc.what() << "Exception caught at file:" << __FILE__
<< ", line:" << __LINE__ << std::endl;
std::exit(1);
}
// Initialization code to find the best CUDA Device
inline int findCudaDevice(int argc, const char **argv) {
int devID = 0;
// If the command-line has a device number specified, use it
if (checkCmdLineFlag(argc, argv, "device")) {
devID = getCmdLineArgumentInt(argc, argv, "device=");
if (devID < 0) {
printf("Invalid command line parameter\n ");
exit(EXIT_FAILURE);
} else {
devID = gpuDeviceInit(devID);
if (devID < 0) {
printf("exiting...\n");
exit(EXIT_FAILURE);
}
}
} else {
// Otherwise pick the device with highest Gflops/s
devID = gpuGetMaxGflopsDeviceId();
/*
DPCT1093:15: The "devID" device may be not the one intended for use. Adjust
the selected device if needed.
*/
/*
DPCT1003:16: Migrated API does not return error code. (*, 0) is inserted.
You may need to rewrite this code.
*/
checkCudaErrors((dpct::select_device(devID), 0));
int major = 0, minor = 0;
checkCudaErrors((
major = dpct::dev_mgr::instance().get_device(devID).get_major_version(),
0));
checkCudaErrors((
minor = dpct::dev_mgr::instance().get_device(devID).get_minor_version(),
0));
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n",
devID, _ConvertSMVer2ArchName(major, minor), major, minor);
}
return devID;
}
inline int findIntegratedGPU() {
int current_device = 0;
int device_count = 0;
int devices_prohibited = 0;
/*
DPCT1003:17: Migrated API does not return error code. (*, 0) is inserted. You
may need to rewrite this code.
*/
checkCudaErrors((device_count = dpct::dev_mgr::instance().device_count(), 0));
if (device_count == 0) {
fprintf(stderr, "CUDA error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
// Find the integrated GPU which is compute capable
while (current_device < device_count) {
int computeMode = -1, integrated = -1;
/*
DPCT1035:18: All SYCL devices can be used by the host to submit tasks. You
may need to adjust this code.
*/
checkCudaErrors((computeMode = 1, 0));
checkCudaErrors((integrated = dpct::dev_mgr::instance()
.get_device(current_device)
.get_integrated(),
0));
// If GPU is integrated and is not running on Compute Mode prohibited,
// then cuda can map to GLES resource
/*
DPCT1035:19: All SYCL devices can be used by the host to submit tasks. You
may need to adjust this code.
*/
if (integrated && (computeMode != 0)) {
/*
DPCT1093:20: The "current_device" device may be not the one intended for
use. Adjust the selected device if needed.
*/
/*
DPCT1003:21: Migrated API does not return error code. (*, 0) is inserted.
You may need to rewrite this code.
*/
checkCudaErrors((dpct::select_device(current_device), 0));
int major = 0, minor = 0;
checkCudaErrors((major = dpct::dev_mgr::instance()
.get_device(current_device)
.get_major_version(),
0));
checkCudaErrors((minor = dpct::dev_mgr::instance()
.get_device(current_device)
.get_minor_version(),
0));
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n",
current_device, _ConvertSMVer2ArchName(major, minor), major, minor);
return current_device;
} else {
devices_prohibited++;
}
current_device++;
}
if (devices_prohibited == device_count) {
fprintf(stderr,
"CUDA error:"
" No GLES-CUDA Interop capable GPU found.\n");
exit(EXIT_FAILURE);
}
return -1;
}
// General check for CUDA GPU SM Capabilities
inline bool checkCudaCapabilities(int major_version, int minor_version) {
int dev;
int major = 0, minor = 0;
checkCudaErrors(dev = dpct::dev_mgr::instance().current_device_id());
checkCudaErrors(
(major = dpct::dev_mgr::instance().get_device(dev).get_major_version(),
0));
checkCudaErrors(
(minor = dpct::dev_mgr::instance().get_device(dev).get_minor_version(),
0));
if ((major > major_version) ||
(major == major_version &&
minor >= minor_version)) {
printf(" Device %d: <%16s >, Compute SM %d.%d detected\n", dev,
_ConvertSMVer2ArchName(major, minor), major, minor);
return true;
} else {
printf(
" No GPU device was found that can support "
"CUDA compute capability %d.%d.\n",
major_version, minor_version);
return false;
}
}
#endif
// end of CUDA Helper Functions
#endif // COMMON_HELPER_CUDA_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/guided_concurrentKernels_SYCLMigration/02_sycl_migrated/Common/helper_functions.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// These are helper functions for the SDK samples (string parsing,
// timers, image helpers, etc)
#ifndef COMMON_HELPER_FUNCTIONS_H_
#define COMMON_HELPER_FUNCTIONS_H_
#ifdef WIN32
#pragma warning(disable : 4996)
#endif
// includes, project
#include <assert.h>
#include <exception.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <algorithm>
#include <fstream>
#include <iostream>
#include <string>
#include <vector>
// includes, timer, string parsing, image helpers
#include <helper_image.h> // helper functions for image compare, dump, data comparisons
#include <helper_string.h> // helper functions for string parsing
#include <helper_timer.h> // helper functions for timers
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
#endif // COMMON_HELPER_FUNCTIONS_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/guided_concurrentKernels_SYCLMigration/02_sycl_migrated/Samples/0_Introduction/concurrentKernels/concurrentKernels.dp.cpp | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
//
// This sample demonstrates the use of streams for concurrent execution. It also
// illustrates how to introduce dependencies between CUDA streams with the
// cudaStreamWaitEvent function.
//
// Devices of compute capability 2.0 or higher can overlap the kernels
//
#include <sycl/sycl.hpp>
#include <dpct/dpct.hpp>
#include <stdio.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include <time.h>
#include <chrono>
// This is a kernel that does no real work but runs at least for a specified
// number of count
void count_block(long *d_o, sycl::nd_item<3> &item_ct1) {
for (int i = item_ct1.get_local_id(2); i < 23000;
i+=1) {
auto dummy = sycl::sin((float)i) + sycl::cos((float)i);
d_o[0] = d_o[0] + i;
}
}
// Single warp reduction kernel
void sum(long *d_count, int N, const sycl::nd_item<3> &item_ct1,
long *s_count) {
// Handle to thread block group
// auto cta = item_ct1.get_group();
long my_sum = 0;
for (int i = item_ct1.get_local_id(2); i < N;
i += item_ct1.get_local_range(2)) {
my_sum += d_count[i];
}
s_count[item_ct1.get_local_id(2)] = my_sum;
item_ct1.barrier();
for (int i = 16; i > 0; i /= 2) {
if (item_ct1.get_local_id(2) < i) {
s_count[item_ct1.get_local_id(2)] +=
s_count[item_ct1.get_local_id(2) + i];
}
item_ct1.barrier();
}
d_count[0] = s_count[0];
}
int main(int argc, char **argv) {
int nkernels = 8; // number of concurrent kernels
int nstreams = nkernels + 1; // use one more stream than concurrent kernel
int nbytes = nkernels * sizeof(long); // number of data bytes
float kernel_time = 10; // time the kernel should run in ms
float elapsed_time; // timing variables
int cuda_device = 0;
printf("[%s] - Starting...\n", argv[0]);
// get number of kernels if overridden on the command line
if (checkCmdLineFlag(argc, (const char **)argv, "nkernels")) {
nkernels = getCmdLineArgumentInt(argc, (const char **)argv, "nkernels");
nstreams = nkernels + 1;
}
// use command-line specified CUDA device, otherwise use device with highest
// Gflops/s
//cuda_device = findCudaDevice(argc, (const char **)argv);
dpct::device_info deviceProp;
checkCudaErrors(cuda_device = dpct::dev_mgr::instance().current_device_id());
checkCudaErrors((dpct::dev_mgr::instance()
.get_device(cuda_device)
.get_device_info(deviceProp),
0));
if ((true == 0)) {
printf("> GPU does not support concurrent kernel execution\n");
printf(" CUDA kernel runs will be serialized\n");
}
printf("> Detected Compute SM %d.%d hardware with %d multi-processors\n",
deviceProp.get_major_version(), deviceProp.get_minor_version(),
deviceProp.get_max_compute_units());
// allocate host memory
long *a = 0; // pointer to the array data in host memory
checkCudaErrors(
(a = (long *)sycl::malloc_host(nbytes, dpct::get_default_queue()), 0));
// allocate device memory
long *d_a = 0; // pointers to data and init value in the device memory
checkCudaErrors(
(d_a = (long *)sycl::malloc_device(nbytes, dpct::get_default_queue()),
0));
// allocate and initialize an array of stream handles
dpct::queue_ptr *streams =
(dpct::queue_ptr *)malloc(nstreams * sizeof(dpct::queue_ptr));
for (int i = 0; i < nstreams; i++) {
checkCudaErrors(
((streams[i]) = dpct::get_current_device().create_queue(), 0));
}
// create CUDA event handles
dpct::event_ptr start_event, stop_event;
std::chrono::time_point<std::chrono::steady_clock> start_event_ct1;
std::chrono::time_point<std::chrono::steady_clock> stop_event_ct1;
checkCudaErrors((start_event = new sycl::event(), 0));
checkCudaErrors((stop_event = new sycl::event(), 0));
// the events are used for synchronization only and hence do not need to
// record timings this also makes events not introduce global sync points when
// recorded which is critical to get overlap
dpct::event_ptr *kernelEvent;
std::chrono::time_point<std::chrono::steady_clock> kernelEvent_ct1_i;
kernelEvent = (dpct::event_ptr *)malloc(nkernels * sizeof(dpct::event_ptr));
for (int i = 0; i < nkernels; i++) {
checkCudaErrors((kernelEvent[i] = new sycl::event(), 0));
}
//////////////////////////////////////////////////////////////////////
// time execution with nkernels streams
long total_count = 0;
#if defined(__arm__) || defined(__aarch64__)
// the kernel takes more time than the channel reset time on arm archs, so to
// prevent hangs reduce time_count.
long time_count = (long)(kernel_time * (deviceProp.clockRate / 100));
#else
long time_count =
(long)(kernel_time * deviceProp.get_max_clock_frequency());
#endif
sycl::event stop_event_streams_nstreams_1;
start_event_ct1 = std::chrono::steady_clock::now();
*start_event = dpct::get_default_queue().ext_oneapi_submit_barrier();
// queue nkernels in separate streams and record when they are done
for (int i = 0; i < nkernels; ++i) {
streams[i]->submit([&](sycl::handler &cgh) {
auto d_a_i_ct0 = &d_a[i];
cgh.parallel_for(
sycl::nd_range<3>(sycl::range<3>(1, 1, 1), sycl::range<3>(1, 1, 1)),
[=](sycl::nd_item<3> item_ct1) {
count_block(d_a_i_ct0, item_ct1);
});
});
total_count += time_count;
kernelEvent_ct1_i = std::chrono::steady_clock::now();
checkCudaErrors(
(*kernelEvent[i] = streams[i]->ext_oneapi_submit_barrier(), 0));
// make the last stream wait for the kernel event to be recorded
checkCudaErrors(
(streams[nstreams - 1]->ext_oneapi_submit_barrier({*kernelEvent[i]}),
0));
}
// queue a sum kernel and a copy back to host in the last stream.
// the commands in this stream get dispatched as soon as all the kernel events
// have been recorded
streams[nstreams - 1]->submit([&](sycl::handler &cgh) {
sycl::local_accessor<long, 1> s_count_acc_ct1(sycl::range<1>(32), cgh);
cgh.parallel_for(
sycl::nd_range<3>(sycl::range<3>(1, 1, 32), sycl::range<3>(1, 1, 32)),
[=](sycl::nd_item<3> item_ct1) {
sum(d_a, nkernels, item_ct1, s_count_acc_ct1.get_pointer());
});
});
checkCudaErrors((stop_event_streams_nstreams_1 =
streams[nstreams - 1]->memcpy(a, d_a, sizeof(long)),
0));
// at this point the CPU has dispatched all work for the GPU and can continue
// processing other tasks in parallel
// in this sample we just wait until the GPU is done
dpct::get_current_device().queues_wait_and_throw();
stop_event_streams_nstreams_1.wait();
stop_event_ct1 = std::chrono::steady_clock::now();
checkCudaErrors(
(*stop_event = dpct::get_default_queue().ext_oneapi_submit_barrier(), 0));
checkCudaErrors(0);
checkCudaErrors((elapsed_time = std::chrono::duration<float, std::milli>(
stop_event_ct1 - start_event_ct1)
.count(),
0));
printf("Expected time for serial execution of %d kernels = %.3fs\n", nkernels,
nkernels * kernel_time / 1000.0f);
printf("Expected time for concurrent execution of %d kernels = %.3fs\n",
nkernels, kernel_time / 1000.0f);
printf("Measured time for sample = %.3fs\n", elapsed_time / 1000.0f);
bool bTestResult = (a[0] > total_count);
// release resources
for (int i = 0; i < nkernels; i++) {
dpct::get_current_device().destroy_queue(streams[i]);
dpct::destroy_event(kernelEvent[i]);
}
free(streams);
free(kernelEvent);
dpct::destroy_event(start_event);
dpct::destroy_event(stop_event);
sycl::free(a, dpct::get_default_queue());
sycl::free(d_a, dpct::get_default_queue());
if (!bTestResult) {
printf("Test failed!\n");
exit(EXIT_FAILURE);
}
printf("Test passed\n");
exit(EXIT_SUCCESS);
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/guided_concurrentKernels_SYCLMigration/01_dpct_output/Common/helper_timer.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Helper Timing Functions
#ifndef COMMON_HELPER_TIMER_H_
#define COMMON_HELPER_TIMER_H_
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
// includes, system
#include <vector>
// includes, project
#include <exception.h>
// Definition of the StopWatch Interface, this is used if we don't want to use
// the CUT functions But rather in a self contained class interface
class StopWatchInterface {
public:
StopWatchInterface() {}
virtual ~StopWatchInterface() {}
public:
//! Start time measurement
virtual void start() = 0;
//! Stop time measurement
virtual void stop() = 0;
//! Reset time counters to zero
virtual void reset() = 0;
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned, otherwise the
//! time between the last start() and stop call is returned
virtual float getTime() = 0;
//! Mean time to date based on the number of times the stopwatch has been
//! _stopped_ (ie finished sessions) and the current total time
virtual float getAverageTime() = 0;
};
//////////////////////////////////////////////////////////////////
// Begin Stopwatch timer class definitions for all OS platforms //
//////////////////////////////////////////////////////////////////
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
// includes, system
#define WINDOWS_LEAN_AND_MEAN
#include <windows.h>
#undef min
#undef max
//! Windows specific implementation of StopWatch
class StopWatchWin : public StopWatchInterface {
public:
//! Constructor, default
StopWatchWin()
: start_time(),
end_time(),
diff_time(0.0f),
total_time(0.0f),
running(false),
clock_sessions(0),
freq(0),
freq_set(false) {
if (!freq_set) {
// helper variable
LARGE_INTEGER temp;
// get the tick frequency from the OS
QueryPerformanceFrequency(reinterpret_cast<LARGE_INTEGER *>(&temp));
// convert to type in which it is needed
freq = (static_cast<double>(temp.QuadPart)) / 1000.0;
// rememeber query
freq_set = true;
}
}
// Destructor
~StopWatchWin() {}
public:
//! Start time measurement
inline void start();
//! Stop time measurement
inline void stop();
//! Reset time counters to zero
inline void reset();
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned, otherwise the
//! time between the last start() and stop call is returned
inline float getTime();
//! Mean time to date based on the number of times the stopwatch has been
//! _stopped_ (ie finished sessions) and the current total time
inline float getAverageTime();
private:
// member variables
//! Start of measurement
LARGE_INTEGER start_time;
//! End of measurement
LARGE_INTEGER end_time;
//! Time difference between the last start and stop
float diff_time;
//! TOTAL time difference between starts and stops
float total_time;
//! flag if the stop watch is running
bool running;
//! Number of times clock has been started
//! and stopped to allow averaging
int clock_sessions;
//! tick frequency
double freq;
//! flag if the frequency has been set
bool freq_set;
};
// functions, inlined
////////////////////////////////////////////////////////////////////////////////
//! Start time measurement
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchWin::start() {
QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&start_time));
running = true;
}
////////////////////////////////////////////////////////////////////////////////
//! Stop time measurement and increment add to the current diff_time summation
//! variable. Also increment the number of times this clock has been run.
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchWin::stop() {
QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&end_time));
diff_time = static_cast<float>(((static_cast<double>(end_time.QuadPart) -
static_cast<double>(start_time.QuadPart)) /
freq));
total_time += diff_time;
clock_sessions++;
running = false;
}
////////////////////////////////////////////////////////////////////////////////
//! Reset the timer to 0. Does not change the timer running state but does
//! recapture this point in time as the current start time if it is running.
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchWin::reset() {
diff_time = 0;
total_time = 0;
clock_sessions = 0;
if (running) {
QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&start_time));
}
}
////////////////////////////////////////////////////////////////////////////////
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned added to the
//! current diff_time sum, otherwise the current summed time difference alone
//! is returned.
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchWin::getTime() {
// Return the TOTAL time to date
float retval = total_time;
if (running) {
LARGE_INTEGER temp;
QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&temp));
retval += static_cast<float>(((static_cast<double>(temp.QuadPart) -
static_cast<double>(start_time.QuadPart)) /
freq));
}
return retval;
}
////////////////////////////////////////////////////////////////////////////////
//! Time in msec. for a single run based on the total number of COMPLETED runs
//! and the total time.
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchWin::getAverageTime() {
return (clock_sessions > 0) ? (total_time / clock_sessions) : 0.0f;
}
#else
// Declarations for Stopwatch on Linux and Mac OSX
// includes, system
#include <sys/time.h>
#include <ctime>
//! Windows specific implementation of StopWatch
class StopWatchLinux : public StopWatchInterface {
public:
//! Constructor, default
StopWatchLinux()
: start_time(),
diff_time(0.0),
total_time(0.0),
running(false),
clock_sessions(0) {}
// Destructor
virtual ~StopWatchLinux() {}
public:
//! Start time measurement
inline void start();
//! Stop time measurement
inline void stop();
//! Reset time counters to zero
inline void reset();
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned, otherwise the
//! time between the last start() and stop call is returned
inline float getTime();
//! Mean time to date based on the number of times the stopwatch has been
//! _stopped_ (ie finished sessions) and the current total time
inline float getAverageTime();
private:
// helper functions
//! Get difference between start time and current time
inline float getDiffTime();
private:
// member variables
//! Start of measurement
struct timeval start_time;
//! Time difference between the last start and stop
float diff_time;
//! TOTAL time difference between starts and stops
float total_time;
//! flag if the stop watch is running
bool running;
//! Number of times clock has been started
//! and stopped to allow averaging
int clock_sessions;
};
// functions, inlined
////////////////////////////////////////////////////////////////////////////////
//! Start time measurement
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchLinux::start() {
gettimeofday(&start_time, 0);
running = true;
}
////////////////////////////////////////////////////////////////////////////////
//! Stop time measurement and increment add to the current diff_time summation
//! variable. Also increment the number of times this clock has been run.
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchLinux::stop() {
diff_time = getDiffTime();
total_time += diff_time;
running = false;
clock_sessions++;
}
////////////////////////////////////////////////////////////////////////////////
//! Reset the timer to 0. Does not change the timer running state but does
//! recapture this point in time as the current start time if it is running.
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchLinux::reset() {
diff_time = 0;
total_time = 0;
clock_sessions = 0;
if (running) {
gettimeofday(&start_time, 0);
}
}
////////////////////////////////////////////////////////////////////////////////
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned added to the
//! current diff_time sum, otherwise the current summed time difference alone
//! is returned.
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchLinux::getTime() {
// Return the TOTAL time to date
float retval = total_time;
if (running) {
retval += getDiffTime();
}
return retval;
}
////////////////////////////////////////////////////////////////////////////////
//! Time in msec. for a single run based on the total number of COMPLETED runs
//! and the total time.
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchLinux::getAverageTime() {
return (clock_sessions > 0) ? (total_time / clock_sessions) : 0.0f;
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchLinux::getDiffTime() {
struct timeval t_time;
gettimeofday(&t_time, 0);
// time difference in milli-seconds
return static_cast<float>(1000.0 * (t_time.tv_sec - start_time.tv_sec) +
(0.001 * (t_time.tv_usec - start_time.tv_usec)));
}
#endif // WIN32
////////////////////////////////////////////////////////////////////////////////
//! Timer functionality exported
////////////////////////////////////////////////////////////////////////////////
//! Create a new timer
//! @return true if a time has been created, otherwise false
//! @param name of the new timer, 0 if the creation failed
////////////////////////////////////////////////////////////////////////////////
inline bool sdkCreateTimer(StopWatchInterface **timer_interface) {
// printf("sdkCreateTimer called object %08x\n", (void *)*timer_interface);
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
*timer_interface = reinterpret_cast<StopWatchInterface *>(new StopWatchWin());
#else
*timer_interface =
reinterpret_cast<StopWatchInterface *>(new StopWatchLinux());
#endif
return (*timer_interface != NULL) ? true : false;
}
////////////////////////////////////////////////////////////////////////////////
//! Delete a timer
//! @return true if a time has been deleted, otherwise false
//! @param name of the timer to delete
////////////////////////////////////////////////////////////////////////////////
inline bool sdkDeleteTimer(StopWatchInterface **timer_interface) {
// printf("sdkDeleteTimer called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
delete *timer_interface;
*timer_interface = NULL;
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Start the time with name \a name
//! @param name name of the timer to start
////////////////////////////////////////////////////////////////////////////////
inline bool sdkStartTimer(StopWatchInterface **timer_interface) {
// printf("sdkStartTimer called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
(*timer_interface)->start();
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Stop the time with name \a name. Does not reset.
//! @param name name of the timer to stop
////////////////////////////////////////////////////////////////////////////////
inline bool sdkStopTimer(StopWatchInterface **timer_interface) {
// printf("sdkStopTimer called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
(*timer_interface)->stop();
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Resets the timer's counter.
//! @param name name of the timer to reset.
////////////////////////////////////////////////////////////////////////////////
inline bool sdkResetTimer(StopWatchInterface **timer_interface) {
// printf("sdkResetTimer called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
(*timer_interface)->reset();
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Return the average time for timer execution as the total time
//! for the timer dividied by the number of completed (stopped) runs the timer
//! has made.
//! Excludes the current running time if the timer is currently running.
//! @param name name of the timer to return the time of
////////////////////////////////////////////////////////////////////////////////
inline float sdkGetAverageTimerValue(StopWatchInterface **timer_interface) {
// printf("sdkGetAverageTimerValue called object %08x\n", (void
// *)*timer_interface);
if (*timer_interface) {
return (*timer_interface)->getAverageTime();
} else {
return 0.0f;
}
}
////////////////////////////////////////////////////////////////////////////////
//! Total execution time for the timer over all runs since the last reset
//! or timer creation.
//! @param name name of the timer to obtain the value of.
////////////////////////////////////////////////////////////////////////////////
inline float sdkGetTimerValue(StopWatchInterface **timer_interface) {
// printf("sdkGetTimerValue called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
return (*timer_interface)->getTime();
} else {
return 0.0f;
}
}
#endif // COMMON_HELPER_TIMER_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/guided_concurrentKernels_SYCLMigration/01_dpct_output/Common/helper_string.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// These are helper functions for the SDK samples (string parsing, timers, etc)
#ifndef COMMON_HELPER_STRING_H_
#define COMMON_HELPER_STRING_H_
#include <stdio.h>
#include <stdlib.h>
#include <fstream>
#include <string>
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
#ifndef _CRT_SECURE_NO_DEPRECATE
#define _CRT_SECURE_NO_DEPRECATE
#endif
#ifndef STRCASECMP
#define STRCASECMP _stricmp
#endif
#ifndef STRNCASECMP
#define STRNCASECMP _strnicmp
#endif
#ifndef STRCPY
#define STRCPY(sFilePath, nLength, sPath) strcpy_s(sFilePath, nLength, sPath)
#endif
#ifndef FOPEN
#define FOPEN(fHandle, filename, mode) fopen_s(&fHandle, filename, mode)
#endif
#ifndef FOPEN_FAIL
#define FOPEN_FAIL(result) (result != 0)
#endif
#ifndef SSCANF
#define SSCANF sscanf_s
#endif
#ifndef SPRINTF
#define SPRINTF sprintf_s
#endif
#else // Linux Includes
#include <string.h>
#include <strings.h>
#ifndef STRCASECMP
#define STRCASECMP strcasecmp
#endif
#ifndef STRNCASECMP
#define STRNCASECMP strncasecmp
#endif
#ifndef STRCPY
#define STRCPY(sFilePath, nLength, sPath) strcpy(sFilePath, sPath)
#endif
#ifndef FOPEN
#define FOPEN(fHandle, filename, mode) (fHandle = fopen(filename, mode))
#endif
#ifndef FOPEN_FAIL
#define FOPEN_FAIL(result) (result == NULL)
#endif
#ifndef SSCANF
#define SSCANF sscanf
#endif
#ifndef SPRINTF
#define SPRINTF sprintf
#endif
#endif
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
// CUDA Utility Helper Functions
inline int stringRemoveDelimiter(char delimiter, const char *string) {
int string_start = 0;
while (string[string_start] == delimiter) {
string_start++;
}
if (string_start >= static_cast<int>(strlen(string) - 1)) {
return 0;
}
return string_start;
}
inline int getFileExtension(char *filename, char **extension) {
int string_length = static_cast<int>(strlen(filename));
while (filename[string_length--] != '.') {
if (string_length == 0) break;
}
if (string_length > 0) string_length += 2;
if (string_length == 0)
*extension = NULL;
else
*extension = &filename[string_length];
return string_length;
}
inline bool checkCmdLineFlag(const int argc, const char **argv,
const char *string_ref) {
bool bFound = false;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
const char *equal_pos = strchr(string_argv, '=');
int argv_length = static_cast<int>(
equal_pos == 0 ? strlen(string_argv) : equal_pos - string_argv);
int length = static_cast<int>(strlen(string_ref));
if (length == argv_length &&
!STRNCASECMP(string_argv, string_ref, length)) {
bFound = true;
continue;
}
}
}
return bFound;
}
// This function wraps the CUDA Driver API into a template function
template <class T>
inline bool getCmdLineArgumentValue(const int argc, const char **argv,
const char *string_ref, T *value) {
bool bFound = false;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
int length = static_cast<int>(strlen(string_ref));
if (!STRNCASECMP(string_argv, string_ref, length)) {
if (length + 1 <= static_cast<int>(strlen(string_argv))) {
int auto_inc = (string_argv[length] == '=') ? 1 : 0;
*value = (T)atoi(&string_argv[length + auto_inc]);
}
bFound = true;
i = argc;
}
}
}
return bFound;
}
inline int getCmdLineArgumentInt(const int argc, const char **argv,
const char *string_ref) {
bool bFound = false;
int value = -1;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
int length = static_cast<int>(strlen(string_ref));
if (!STRNCASECMP(string_argv, string_ref, length)) {
if (length + 1 <= static_cast<int>(strlen(string_argv))) {
int auto_inc = (string_argv[length] == '=') ? 1 : 0;
value = atoi(&string_argv[length + auto_inc]);
} else {
value = 0;
}
bFound = true;
continue;
}
}
}
if (bFound) {
return value;
} else {
return 0;
}
}
inline float getCmdLineArgumentFloat(const int argc, const char **argv,
const char *string_ref) {
bool bFound = false;
float value = -1;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
int length = static_cast<int>(strlen(string_ref));
if (!STRNCASECMP(string_argv, string_ref, length)) {
if (length + 1 <= static_cast<int>(strlen(string_argv))) {
int auto_inc = (string_argv[length] == '=') ? 1 : 0;
value = static_cast<float>(atof(&string_argv[length + auto_inc]));
} else {
value = 0.f;
}
bFound = true;
continue;
}
}
}
if (bFound) {
return value;
} else {
return 0;
}
}
inline bool getCmdLineArgumentString(const int argc, const char **argv,
const char *string_ref,
char **string_retval) {
bool bFound = false;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
char *string_argv = const_cast<char *>(&argv[i][string_start]);
int length = static_cast<int>(strlen(string_ref));
if (!STRNCASECMP(string_argv, string_ref, length)) {
*string_retval = &string_argv[length + 1];
bFound = true;
continue;
}
}
}
if (!bFound) {
*string_retval = NULL;
}
return bFound;
}
//////////////////////////////////////////////////////////////////////////////
//! Find the path for a file assuming that
//! files are found in the searchPath.
//!
//! @return the path if succeeded, otherwise 0
//! @param filename name of the file
//! @param executable_path optional absolute path of the executable
//////////////////////////////////////////////////////////////////////////////
inline char *sdkFindFilePath(const char *filename,
const char *executable_path) {
// <executable_name> defines a variable that is replaced with the name of the
// executable
// Typical relative search paths to locate needed companion files (e.g. sample
// input data, or JIT source files) The origin for the relative search may be
// the .exe file, a .bat file launching an .exe, a browser .exe launching the
// .exe or .bat, etc
const char *searchPath[] = {
"./", // same dir
"./data/", // same dir
"../../../../Samples/<executable_name>/", // up 4 in tree
"../../../Samples/<executable_name>/", // up 3 in tree
"../../Samples/<executable_name>/", // up 2 in tree
"../../../../Samples/<executable_name>/data/", // up 4 in tree
"../../../Samples/<executable_name>/data/", // up 3 in tree
"../../Samples/<executable_name>/data/", // up 2 in tree
"../../../../Samples/0_Introduction/<executable_name>/", // up 4 in tree
"../../../Samples/0_Introduction/<executable_name>/", // up 3 in tree
"../../Samples/0_Introduction/<executable_name>/", // up 2 in tree
"../../../../Samples/1_Utilities/<executable_name>/", // up 4 in tree
"../../../Samples/1_Utilities/<executable_name>/", // up 3 in tree
"../../Samples/1_Utilities/<executable_name>/", // up 2 in tree
"../../../../Samples/2_Concepts_and_Techniques/<executable_name>/", // up 4 in tree
"../../../Samples/2_Concepts_and_Techniques/<executable_name>/", // up 3 in tree
"../../Samples/2_Concepts_and_Techniques/<executable_name>/", // up 2 in tree
"../../../../Samples/3_CUDA_Features/<executable_name>/", // up 4 in tree
"../../../Samples/3_CUDA_Features/<executable_name>/", // up 3 in tree
"../../Samples/3_CUDA_Features/<executable_name>/", // up 2 in tree
"../../../../Samples/4_CUDA_Libraries/<executable_name>/", // up 4 in tree
"../../../Samples/4_CUDA_Libraries/<executable_name>/", // up 3 in tree
"../../Samples/4_CUDA_Libraries/<executable_name>/", // up 2 in tree
"../../../../Samples/5_Domain_Specific/<executable_name>/", // up 4 in tree
"../../../Samples/5_Domain_Specific/<executable_name>/", // up 3 in tree
"../../Samples/5_Domain_Specific/<executable_name>/", // up 2 in tree
"../../../../Samples/6_Performance/<executable_name>/", // up 4 in tree
"../../../Samples/6_Performance/<executable_name>/", // up 3 in tree
"../../Samples/6_Performance/<executable_name>/", // up 2 in tree
"../../../../Samples/0_Introduction/<executable_name>/data/", // up 4 in tree
"../../../Samples/0_Introduction/<executable_name>/data/", // up 3 in tree
"../../Samples/0_Introduction/<executable_name>/data/", // up 2 in tree
"../../../../Samples/1_Utilities/<executable_name>/data/", // up 4 in tree
"../../../Samples/1_Utilities/<executable_name>/data/", // up 3 in tree
"../../Samples/1_Utilities/<executable_name>/data/", // up 2 in tree
"../../../../Samples/2_Concepts_and_Techniques/<executable_name>/data/", // up 4 in tree
"../../../Samples/2_Concepts_and_Techniques/<executable_name>/data/", // up 3 in tree
"../../Samples/2_Concepts_and_Techniques/<executable_name>/data/", // up 2 in tree
"../../../../Samples/3_CUDA_Features/<executable_name>/data/", // up 4 in tree
"../../../Samples/3_CUDA_Features/<executable_name>/data/", // up 3 in tree
"../../Samples/3_CUDA_Features/<executable_name>/data/", // up 2 in tree
"../../../../Samples/4_CUDA_Libraries/<executable_name>/data/", // up 4 in tree
"../../../Samples/4_CUDA_Libraries/<executable_name>/data/", // up 3 in tree
"../../Samples/4_CUDA_Libraries/<executable_name>/data/", // up 2 in tree
"../../../../Samples/5_Domain_Specific/<executable_name>/data/", // up 4 in tree
"../../../Samples/5_Domain_Specific/<executable_name>/data/", // up 3 in tree
"../../Samples/5_Domain_Specific/<executable_name>/data/", // up 2 in tree
"../../../../Samples/6_Performance/<executable_name>/data/", // up 4 in tree
"../../../Samples/6_Performance/<executable_name>/data/", // up 3 in tree
"../../Samples/6_Performance/<executable_name>/data/", // up 2 in tree
"../../../../Common/data/", // up 4 in tree
"../../../Common/data/", // up 3 in tree
"../../Common/data/" // up 2 in tree
};
// Extract the executable name
std::string executable_name;
if (executable_path != 0) {
executable_name = std::string(executable_path);
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
// Windows path delimiter
size_t delimiter_pos = executable_name.find_last_of('\\');
executable_name.erase(0, delimiter_pos + 1);
if (executable_name.rfind(".exe") != std::string::npos) {
// we strip .exe, only if the .exe is found
executable_name.resize(executable_name.size() - 4);
}
#else
// Linux & OSX path delimiter
size_t delimiter_pos = executable_name.find_last_of('/');
executable_name.erase(0, delimiter_pos + 1);
#endif
}
// Loop over all search paths and return the first hit
for (unsigned int i = 0; i < sizeof(searchPath) / sizeof(char *); ++i) {
std::string path(searchPath[i]);
size_t executable_name_pos = path.find("<executable_name>");
// If there is executable_name variable in the searchPath
// replace it with the value
if (executable_name_pos != std::string::npos) {
if (executable_path != 0) {
path.replace(executable_name_pos, strlen("<executable_name>"),
executable_name);
} else {
// Skip this path entry if no executable argument is given
continue;
}
}
#ifdef _DEBUG
printf("sdkFindFilePath <%s> in %s\n", filename, path.c_str());
#endif
// Test if the file exists
path.append(filename);
FILE *fp;
FOPEN(fp, path.c_str(), "rb");
if (fp != NULL) {
fclose(fp);
// File found
// returning an allocated array here for backwards compatibility reasons
char *file_path = reinterpret_cast<char *>(malloc(path.length() + 1));
STRCPY(file_path, path.length() + 1, path.c_str());
return file_path;
}
if (fp) {
fclose(fp);
}
}
// File not found
printf("\nerror: sdkFindFilePath: file <%s> not found!\n", filename);
return 0;
}
#endif // COMMON_HELPER_STRING_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/guided_concurrentKernels_SYCLMigration/01_dpct_output/Common/helper_image.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// These are helper functions for the SDK samples (image,bitmap)
#ifndef COMMON_HELPER_IMAGE_H_
#define COMMON_HELPER_IMAGE_H_
#include <assert.h>
#include <exception.h>
#include <math.h>
#include <stdint.h>
#include <algorithm>
#include <fstream>
#include <iostream>
#include <string>
#include <vector>
#ifndef MIN
#define MIN(a, b) ((a < b) ? a : b)
#endif
#ifndef MAX
#define MAX(a, b) ((a > b) ? a : b)
#endif
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
#include <helper_string.h>
// namespace unnamed (internal)
namespace helper_image_internal {
//! size of PGM file header
const unsigned int PGMHeaderSize = 0x40;
// types
//! Data converter from unsigned char / unsigned byte to type T
template <class T>
struct ConverterFromUByte;
//! Data converter from unsigned char / unsigned byte
template <>
struct ConverterFromUByte<unsigned char> {
//! Conversion operator
//! @return converted value
//! @param val value to convert
float operator()(const unsigned char &val) {
return static_cast<unsigned char>(val);
}
};
//! Data converter from unsigned char / unsigned byte to float
template <>
struct ConverterFromUByte<float> {
//! Conversion operator
//! @return converted value
//! @param val value to convert
float operator()(const unsigned char &val) {
return static_cast<float>(val) / 255.0f;
}
};
//! Data converter from unsigned char / unsigned byte to type T
template <class T>
struct ConverterToUByte;
//! Data converter from unsigned char / unsigned byte to unsigned int
template <>
struct ConverterToUByte<unsigned char> {
//! Conversion operator (essentially a passthru
//! @return converted value
//! @param val value to convert
unsigned char operator()(const unsigned char &val) { return val; }
};
//! Data converter from unsigned char / unsigned byte to unsigned int
template <>
struct ConverterToUByte<float> {
//! Conversion operator
//! @return converted value
//! @param val value to convert
unsigned char operator()(const float &val) {
return static_cast<unsigned char>(val * 255.0f);
}
};
} // namespace helper_image_internal
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
#ifndef FOPEN
#define FOPEN(fHandle, filename, mode) fopen_s(&fHandle, filename, mode)
#endif
#ifndef FOPEN_FAIL
#define FOPEN_FAIL(result) (result != 0)
#endif
#ifndef SSCANF
#define SSCANF sscanf_s
#endif
#else
#ifndef FOPEN
#define FOPEN(fHandle, filename, mode) (fHandle = fopen(filename, mode))
#endif
#ifndef FOPEN_FAIL
#define FOPEN_FAIL(result) (result == NULL)
#endif
#ifndef SSCANF
#define SSCANF sscanf
#endif
#endif
inline bool __loadPPM(const char *file, unsigned char **data, unsigned int *w,
unsigned int *h, unsigned int *channels) {
FILE *fp = NULL;
if (FOPEN_FAIL(FOPEN(fp, file, "rb"))) {
std::cerr << "__LoadPPM() : Failed to open file: " << file << std::endl;
return false;
}
// check header
char header[helper_image_internal::PGMHeaderSize];
if (fgets(header, helper_image_internal::PGMHeaderSize, fp) == NULL) {
std::cerr << "__LoadPPM() : reading PGM header returned NULL" << std::endl;
return false;
}
if (strncmp(header, "P5", 2) == 0) {
*channels = 1;
} else if (strncmp(header, "P6", 2) == 0) {
*channels = 3;
} else {
std::cerr << "__LoadPPM() : File is not a PPM or PGM image" << std::endl;
*channels = 0;
return false;
}
// parse header, read maxval, width and height
unsigned int width = 0;
unsigned int height = 0;
unsigned int maxval = 0;
unsigned int i = 0;
while (i < 3) {
if (fgets(header, helper_image_internal::PGMHeaderSize, fp) == NULL) {
std::cerr << "__LoadPPM() : reading PGM header returned NULL"
<< std::endl;
return false;
}
if (header[0] == '#') {
continue;
}
if (i == 0) {
i += SSCANF(header, "%u %u %u", &width, &height, &maxval);
} else if (i == 1) {
i += SSCANF(header, "%u %u", &height, &maxval);
} else if (i == 2) {
i += SSCANF(header, "%u", &maxval);
}
}
// check if given handle for the data is initialized
if (NULL != *data) {
if (*w != width || *h != height) {
std::cerr << "__LoadPPM() : Invalid image dimensions." << std::endl;
}
} else {
*data = (unsigned char *)malloc(sizeof(unsigned char) * width * height *
*channels);
*w = width;
*h = height;
}
// read and close file
if (fread(*data, sizeof(unsigned char), width * height * *channels, fp) ==
0) {
std::cerr << "__LoadPPM() read data returned error." << std::endl;
}
fclose(fp);
return true;
}
template <class T>
inline bool sdkLoadPGM(const char *file, T **data, unsigned int *w,
unsigned int *h) {
unsigned char *idata = NULL;
unsigned int channels;
if (true != __loadPPM(file, &idata, w, h, &channels)) {
return false;
}
unsigned int size = *w * *h * channels;
// initialize mem if necessary
// the correct size is checked / set in loadPGMc()
if (NULL == *data) {
*data = reinterpret_cast<T *>(malloc(sizeof(T) * size));
}
// copy and cast data
std::transform(idata, idata + size, *data,
helper_image_internal::ConverterFromUByte<T>());
free(idata);
return true;
}
template <class T>
inline bool sdkLoadPPM4(const char *file, T **data, unsigned int *w,
unsigned int *h) {
unsigned char *idata = 0;
unsigned int channels;
if (__loadPPM(file, &idata, w, h, &channels)) {
// pad 4th component
int size = *w * *h;
// keep the original pointer
unsigned char *idata_orig = idata;
*data = reinterpret_cast<T *>(malloc(sizeof(T) * size * 4));
unsigned char *ptr = *data;
for (int i = 0; i < size; i++) {
*ptr++ = *idata++;
*ptr++ = *idata++;
*ptr++ = *idata++;
*ptr++ = 0;
}
free(idata_orig);
return true;
} else {
free(idata);
return false;
}
}
inline bool __savePPM(const char *file, unsigned char *data, unsigned int w,
unsigned int h, unsigned int channels) {
assert(NULL != data);
assert(w > 0);
assert(h > 0);
std::fstream fh(file, std::fstream::out | std::fstream::binary);
if (fh.bad()) {
std::cerr << "__savePPM() : Opening file failed." << std::endl;
return false;
}
if (channels == 1) {
fh << "P5\n";
} else if (channels == 3) {
fh << "P6\n";
} else {
std::cerr << "__savePPM() : Invalid number of channels." << std::endl;
return false;
}
fh << w << "\n" << h << "\n" << 0xff << std::endl;
for (unsigned int i = 0; (i < (w * h * channels)) && fh.good(); ++i) {
fh << data[i];
}
fh.flush();
if (fh.bad()) {
std::cerr << "__savePPM() : Writing data failed." << std::endl;
return false;
}
fh.close();
return true;
}
template <class T>
inline bool sdkSavePGM(const char *file, T *data, unsigned int w,
unsigned int h) {
unsigned int size = w * h;
unsigned char *idata = (unsigned char *)malloc(sizeof(unsigned char) * size);
std::transform(data, data + size, idata,
helper_image_internal::ConverterToUByte<T>());
// write file
bool result = __savePPM(file, idata, w, h, 1);
// cleanup
free(idata);
return result;
}
inline bool sdkSavePPM4ub(const char *file, unsigned char *data, unsigned int w,
unsigned int h) {
// strip 4th component
int size = w * h;
unsigned char *ndata =
(unsigned char *)malloc(sizeof(unsigned char) * size * 3);
unsigned char *ptr = ndata;
for (int i = 0; i < size; i++) {
*ptr++ = *data++;
*ptr++ = *data++;
*ptr++ = *data++;
data++;
}
bool result = __savePPM(file, ndata, w, h, 3);
free(ndata);
return result;
}
//////////////////////////////////////////////////////////////////////////////
//! Read file \filename and return the data
//! @return bool if reading the file succeeded, otherwise false
//! @param filename name of the source file
//! @param data uninitialized pointer, returned initialized and pointing to
//! the data read
//! @param len number of data elements in data, -1 on error
//////////////////////////////////////////////////////////////////////////////
template <class T>
inline bool sdkReadFile(const char *filename, T **data, unsigned int *len,
bool verbose) {
// check input arguments
assert(NULL != filename);
assert(NULL != len);
// intermediate storage for the data read
std::vector<T> data_read;
// open file for reading
FILE *fh = NULL;
// check if filestream is valid
if (FOPEN_FAIL(FOPEN(fh, filename, "r"))) {
printf("Unable to open input file: %s\n", filename);
return false;
}
// read all data elements
T token;
while (!feof(fh)) {
fscanf(fh, "%f", &token);
data_read.push_back(token);
}
// the last element is read twice
data_read.pop_back();
fclose(fh);
// check if the given handle is already initialized
if (NULL != *data) {
if (*len != data_read.size()) {
std::cerr << "sdkReadFile() : Initialized memory given but "
<< "size mismatch with signal read "
<< "(data read / data init = " << (unsigned int)data_read.size()
<< " / " << *len << ")" << std::endl;
return false;
}
} else {
// allocate storage for the data read
*data = reinterpret_cast<T *>(malloc(sizeof(T) * data_read.size()));
// store signal size
*len = static_cast<unsigned int>(data_read.size());
}
// copy data
memcpy(*data, &data_read.front(), sizeof(T) * data_read.size());
return true;
}
//////////////////////////////////////////////////////////////////////////////
//! Read file \filename and return the data
//! @return bool if reading the file succeeded, otherwise false
//! @param filename name of the source file
//! @param data uninitialized pointer, returned initialized and pointing to
//! the data read
//! @param len number of data elements in data, -1 on error
//////////////////////////////////////////////////////////////////////////////
template <class T>
inline bool sdkReadFileBlocks(const char *filename, T **data, unsigned int *len,
unsigned int block_num, unsigned int block_size,
bool verbose) {
// check input arguments
assert(NULL != filename);
assert(NULL != len);
// open file for reading
FILE *fh = fopen(filename, "rb");
if (fh == NULL && verbose) {
std::cerr << "sdkReadFile() : Opening file failed." << std::endl;
return false;
}
// check if the given handle is already initialized
// allocate storage for the data read
data[block_num] = reinterpret_cast<T *>(malloc(block_size));
// read all data elements
fseek(fh, block_num * block_size, SEEK_SET);
*len = fread(data[block_num], sizeof(T), block_size / sizeof(T), fh);
fclose(fh);
return true;
}
//////////////////////////////////////////////////////////////////////////////
//! Write a data file \filename
//! @return true if writing the file succeeded, otherwise false
//! @param filename name of the source file
//! @param data data to write
//! @param len number of data elements in data, -1 on error
//! @param epsilon epsilon for comparison
//////////////////////////////////////////////////////////////////////////////
template <class T, class S>
inline bool sdkWriteFile(const char *filename, const T *data, unsigned int len,
const S epsilon, bool verbose, bool append = false) {
assert(NULL != filename);
assert(NULL != data);
// open file for writing
// if (append) {
std::fstream fh(filename, std::fstream::out | std::fstream::ate);
if (verbose) {
std::cerr << "sdkWriteFile() : Open file " << filename
<< " for write/append." << std::endl;
}
/* } else {
std::fstream fh(filename, std::fstream::out);
if (verbose) {
std::cerr << "sdkWriteFile() : Open file " << filename << " for
write." << std::endl;
}
}
*/
// check if filestream is valid
if (!fh.good()) {
if (verbose) {
std::cerr << "sdkWriteFile() : Opening file failed." << std::endl;
}
return false;
}
// first write epsilon
fh << "# " << epsilon << "\n";
// write data
for (unsigned int i = 0; (i < len) && (fh.good()); ++i) {
fh << data[i] << ' ';
}
// Check if writing succeeded
if (!fh.good()) {
if (verbose) {
std::cerr << "sdkWriteFile() : Writing file failed." << std::endl;
}
return false;
}
// file ends with nl
fh << std::endl;
return true;
}
//////////////////////////////////////////////////////////////////////////////
//! Compare two arrays of arbitrary type
//! @return true if \a reference and \a data are identical, otherwise false
//! @param reference timer_interface to the reference data / gold image
//! @param data handle to the computed data
//! @param len number of elements in reference and data
//! @param epsilon epsilon to use for the comparison
//////////////////////////////////////////////////////////////////////////////
template <class T, class S>
inline bool compareData(const T *reference, const T *data,
const unsigned int len, const S epsilon,
const float threshold) {
assert(epsilon >= 0);
bool result = true;
unsigned int error_count = 0;
for (unsigned int i = 0; i < len; ++i) {
float diff = static_cast<float>(reference[i]) - static_cast<float>(data[i]);
bool comp = (diff <= epsilon) && (diff >= -epsilon);
result &= comp;
error_count += !comp;
#if 0
if (!comp) {
std::cerr << "ERROR, i = " << i << ",\t "
<< reference[i] << " / "
<< data[i]
<< " (reference / data)\n";
}
#endif
}
if (threshold == 0.0f) {
return (result) ? true : false;
} else {
if (error_count) {
printf("%4.2f(%%) of bytes mismatched (count=%d)\n",
static_cast<float>(error_count) * 100 / static_cast<float>(len),
error_count);
}
return (len * threshold > error_count) ? true : false;
}
}
#ifndef __MIN_EPSILON_ERROR
#define __MIN_EPSILON_ERROR 1e-3f
#endif
//////////////////////////////////////////////////////////////////////////////
//! Compare two arrays of arbitrary type
//! @return true if \a reference and \a data are identical, otherwise false
//! @param reference handle to the reference data / gold image
//! @param data handle to the computed data
//! @param len number of elements in reference and data
//! @param epsilon epsilon to use for the comparison
//! @param epsilon threshold % of (# of bytes) for pass/fail
//////////////////////////////////////////////////////////////////////////////
template <class T, class S>
inline bool compareDataAsFloatThreshold(const T *reference, const T *data,
const unsigned int len, const S epsilon,
const float threshold) {
assert(epsilon >= 0);
// If we set epsilon to be 0, let's set a minimum threshold
float max_error = MAX((float)epsilon, __MIN_EPSILON_ERROR);
int error_count = 0;
bool result = true;
for (unsigned int i = 0; i < len; ++i) {
float diff =
fabs(static_cast<float>(reference[i]) - static_cast<float>(data[i]));
bool comp = (diff < max_error);
result &= comp;
if (!comp) {
error_count++;
}
}
if (threshold == 0.0f) {
if (error_count) {
printf("total # of errors = %d\n", error_count);
}
return (error_count == 0) ? true : false;
} else {
if (error_count) {
printf("%4.2f(%%) of bytes mismatched (count=%d)\n",
static_cast<float>(error_count) * 100 / static_cast<float>(len),
error_count);
}
return ((len * threshold > error_count) ? true : false);
}
}
inline void sdkDumpBin(void *data, unsigned int bytes, const char *filename) {
printf("sdkDumpBin: <%s>\n", filename);
FILE *fp;
FOPEN(fp, filename, "wb");
fwrite(data, bytes, 1, fp);
fflush(fp);
fclose(fp);
}
inline bool sdkCompareBin2BinUint(const char *src_file, const char *ref_file,
unsigned int nelements, const float epsilon,
const float threshold, char *exec_path) {
unsigned int *src_buffer, *ref_buffer;
FILE *src_fp = NULL, *ref_fp = NULL;
uint64_t error_count = 0;
size_t fsize = 0;
if (FOPEN_FAIL(FOPEN(src_fp, src_file, "rb"))) {
printf("compareBin2Bin <unsigned int> unable to open src_file: %s\n",
src_file);
error_count++;
}
char *ref_file_path = sdkFindFilePath(ref_file, exec_path);
if (ref_file_path == NULL) {
printf("compareBin2Bin <unsigned int> unable to find <%s> in <%s>\n",
ref_file, exec_path);
printf(">>> Check info.xml and [project//data] folder <%s> <<<\n",
ref_file);
printf("Aborting comparison!\n");
printf(" FAILED\n");
error_count++;
if (src_fp) {
fclose(src_fp);
}
if (ref_fp) {
fclose(ref_fp);
}
} else {
if (FOPEN_FAIL(FOPEN(ref_fp, ref_file_path, "rb"))) {
printf(
"compareBin2Bin <unsigned int>"
" unable to open ref_file: %s\n",
ref_file_path);
error_count++;
}
if (src_fp && ref_fp) {
src_buffer = (unsigned int *)malloc(nelements * sizeof(unsigned int));
ref_buffer = (unsigned int *)malloc(nelements * sizeof(unsigned int));
fsize = fread(src_buffer, nelements, sizeof(unsigned int), src_fp);
fsize = fread(ref_buffer, nelements, sizeof(unsigned int), ref_fp);
printf(
"> compareBin2Bin <unsigned int> nelements=%d,"
" epsilon=%4.2f, threshold=%4.2f\n",
nelements, epsilon, threshold);
printf(" src_file <%s>, size=%d bytes\n", src_file,
static_cast<int>(fsize));
printf(" ref_file <%s>, size=%d bytes\n", ref_file_path,
static_cast<int>(fsize));
if (!compareData<unsigned int, float>(ref_buffer, src_buffer, nelements,
epsilon, threshold)) {
error_count++;
}
fclose(src_fp);
fclose(ref_fp);
free(src_buffer);
free(ref_buffer);
} else {
if (src_fp) {
fclose(src_fp);
}
if (ref_fp) {
fclose(ref_fp);
}
}
}
if (error_count == 0) {
printf(" OK\n");
} else {
printf(" FAILURE: %d errors...\n", (unsigned int)error_count);
}
return (error_count == 0); // returns true if all pixels pass
}
inline bool sdkCompareBin2BinFloat(const char *src_file, const char *ref_file,
unsigned int nelements, const float epsilon,
const float threshold, char *exec_path) {
float *src_buffer = NULL, *ref_buffer = NULL;
FILE *src_fp = NULL, *ref_fp = NULL;
size_t fsize = 0;
uint64_t error_count = 0;
if (FOPEN_FAIL(FOPEN(src_fp, src_file, "rb"))) {
printf("compareBin2Bin <float> unable to open src_file: %s\n", src_file);
error_count = 1;
}
char *ref_file_path = sdkFindFilePath(ref_file, exec_path);
if (ref_file_path == NULL) {
printf("compareBin2Bin <float> unable to find <%s> in <%s>\n", ref_file,
exec_path);
printf(">>> Check info.xml and [project//data] folder <%s> <<<\n",
exec_path);
printf("Aborting comparison!\n");
printf(" FAILED\n");
error_count++;
if (src_fp) {
fclose(src_fp);
}
if (ref_fp) {
fclose(ref_fp);
}
} else {
if (FOPEN_FAIL(FOPEN(ref_fp, ref_file_path, "rb"))) {
printf("compareBin2Bin <float> unable to open ref_file: %s\n",
ref_file_path);
error_count = 1;
}
if (src_fp && ref_fp) {
src_buffer = reinterpret_cast<float *>(malloc(nelements * sizeof(float)));
ref_buffer = reinterpret_cast<float *>(malloc(nelements * sizeof(float)));
printf(
"> compareBin2Bin <float> nelements=%d, epsilon=%4.2f,"
" threshold=%4.2f\n",
nelements, epsilon, threshold);
fsize = fread(src_buffer, sizeof(float), nelements, src_fp);
printf(" src_file <%s>, size=%d bytes\n", src_file,
static_cast<int>(fsize * sizeof(float)));
fsize = fread(ref_buffer, sizeof(float), nelements, ref_fp);
printf(" ref_file <%s>, size=%d bytes\n", ref_file_path,
static_cast<int>(fsize * sizeof(float)));
if (!compareDataAsFloatThreshold<float, float>(
ref_buffer, src_buffer, nelements, epsilon, threshold)) {
error_count++;
}
fclose(src_fp);
fclose(ref_fp);
free(src_buffer);
free(ref_buffer);
} else {
if (src_fp) {
fclose(src_fp);
}
if (ref_fp) {
fclose(ref_fp);
}
}
}
if (error_count == 0) {
printf(" OK\n");
} else {
printf(" FAILURE: %d errors...\n", (unsigned int)error_count);
}
return (error_count == 0); // returns true if all pixels pass
}
inline bool sdkCompareL2fe(const float *reference, const float *data,
const unsigned int len, const float epsilon) {
assert(epsilon >= 0);
float error = 0;
float ref = 0;
for (unsigned int i = 0; i < len; ++i) {
float diff = reference[i] - data[i];
error += diff * diff;
ref += reference[i] * reference[i];
}
float normRef = sqrtf(ref);
if (fabs(ref) < 1e-7) {
#ifdef _DEBUG
std::cerr << "ERROR, reference l2-norm is 0\n";
#endif
return false;
}
float normError = sqrtf(error);
error = normError / normRef;
bool result = error < epsilon;
#ifdef _DEBUG
if (!result) {
std::cerr << "ERROR, l2-norm error " << error << " is greater than epsilon "
<< epsilon << "\n";
}
#endif
return result;
}
inline bool sdkLoadPPMub(const char *file, unsigned char **data,
unsigned int *w, unsigned int *h) {
unsigned int channels;
return __loadPPM(file, data, w, h, &channels);
}
inline bool sdkLoadPPM4ub(const char *file, unsigned char **data,
unsigned int *w, unsigned int *h) {
unsigned char *idata = 0;
unsigned int channels;
if (__loadPPM(file, &idata, w, h, &channels)) {
// pad 4th component
int size = *w * *h;
// keep the original pointer
unsigned char *idata_orig = idata;
*data = (unsigned char *)malloc(sizeof(unsigned char) * size * 4);
unsigned char *ptr = *data;
for (int i = 0; i < size; i++) {
*ptr++ = *idata++;
*ptr++ = *idata++;
*ptr++ = *idata++;
*ptr++ = 0;
}
free(idata_orig);
return true;
} else {
free(idata);
return false;
}
}
inline bool sdkComparePPM(const char *src_file, const char *ref_file,
const float epsilon, const float threshold,
bool verboseErrors) {
unsigned char *src_data, *ref_data;
uint64_t error_count = 0;
unsigned int ref_width, ref_height;
unsigned int src_width, src_height;
if (src_file == NULL || ref_file == NULL) {
if (verboseErrors) {
std::cerr << "PPMvsPPM: src_file or ref_file is NULL."
" Aborting comparison\n";
}
return false;
}
if (verboseErrors) {
std::cerr << "> Compare (a)rendered: <" << src_file << ">\n";
std::cerr << "> (b)reference: <" << ref_file << ">\n";
}
if (sdkLoadPPM4ub(ref_file, &ref_data, &ref_width, &ref_height) != true) {
if (verboseErrors) {
std::cerr << "PPMvsPPM: unable to load ref image file: " << ref_file
<< "\n";
}
return false;
}
if (sdkLoadPPM4ub(src_file, &src_data, &src_width, &src_height) != true) {
std::cerr << "PPMvsPPM: unable to load src image file: " << src_file
<< "\n";
return false;
}
if (src_height != ref_height || src_width != ref_width) {
if (verboseErrors) {
std::cerr << "PPMvsPPM: source and ref size mismatch (" << src_width
<< "," << src_height << ")vs(" << ref_width << "," << ref_height
<< ")\n";
}
}
if (verboseErrors) {
std::cerr << "PPMvsPPM: comparing images size (" << src_width << ","
<< src_height << ") epsilon(" << epsilon << "), threshold("
<< threshold * 100 << "%)\n";
}
if (compareData(ref_data, src_data, src_width * src_height * 4, epsilon,
threshold) == false) {
error_count = 1;
}
if (error_count == 0) {
if (verboseErrors) {
std::cerr << " OK\n\n";
}
} else {
if (verboseErrors) {
std::cerr << " FAILURE! " << error_count << " errors...\n\n";
}
}
// returns true if all pixels pass
return (error_count == 0) ? true : false;
}
inline bool sdkComparePGM(const char *src_file, const char *ref_file,
const float epsilon, const float threshold,
bool verboseErrors) {
unsigned char *src_data = 0, *ref_data = 0;
uint64_t error_count = 0;
unsigned int ref_width, ref_height;
unsigned int src_width, src_height;
if (src_file == NULL || ref_file == NULL) {
if (verboseErrors) {
std::cerr << "PGMvsPGM: src_file or ref_file is NULL."
" Aborting comparison\n";
}
return false;
}
if (verboseErrors) {
std::cerr << "> Compare (a)rendered: <" << src_file << ">\n";
std::cerr << "> (b)reference: <" << ref_file << ">\n";
}
if (sdkLoadPPMub(ref_file, &ref_data, &ref_width, &ref_height) != true) {
if (verboseErrors) {
std::cerr << "PGMvsPGM: unable to load ref image file: " << ref_file
<< "\n";
}
return false;
}
if (sdkLoadPPMub(src_file, &src_data, &src_width, &src_height) != true) {
std::cerr << "PGMvsPGM: unable to load src image file: " << src_file
<< "\n";
return false;
}
if (src_height != ref_height || src_width != ref_width) {
if (verboseErrors) {
std::cerr << "PGMvsPGM: source and ref size mismatch (" << src_width
<< "," << src_height << ")vs(" << ref_width << "," << ref_height
<< ")\n";
}
}
if (verboseErrors)
std::cerr << "PGMvsPGM: comparing images size (" << src_width << ","
<< src_height << ") epsilon(" << epsilon << "), threshold("
<< threshold * 100 << "%)\n";
if (compareData(ref_data, src_data, src_width * src_height, epsilon,
threshold) == false) {
error_count = 1;
}
if (error_count == 0) {
if (verboseErrors) {
std::cerr << " OK\n\n";
}
} else {
if (verboseErrors) {
std::cerr << " FAILURE! " << error_count << " errors...\n\n";
}
}
// returns true if all pixels pass
return (error_count == 0) ? true : false;
}
#endif // COMMON_HELPER_IMAGE_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/guided_concurrentKernels_SYCLMigration/01_dpct_output/Common/exception.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* CUda UTility Library */
#ifndef COMMON_EXCEPTION_H_
#define COMMON_EXCEPTION_H_
// includes, system
#include <stdlib.h>
#include <exception>
#include <iostream>
#include <stdexcept>
#include <string>
//! Exception wrapper.
//! @param Std_Exception Exception out of namespace std for easy typing.
template <class Std_Exception>
class Exception : public Std_Exception {
public:
//! @brief Static construction interface
//! @return Alwayss throws ( Located_Exception<Exception>)
//! @param file file in which the Exception occurs
//! @param line line in which the Exception occurs
//! @param detailed details on the code fragment causing the Exception
static void throw_it(const char *file, const int line,
const char *detailed = "-");
//! Static construction interface
//! @return Alwayss throws ( Located_Exception<Exception>)
//! @param file file in which the Exception occurs
//! @param line line in which the Exception occurs
//! @param detailed details on the code fragment causing the Exception
static void throw_it(const char *file, const int line,
const std::string &detailed);
//! Destructor
virtual ~Exception() throw();
private:
//! Constructor, default (private)
Exception();
//! Constructor, standard
//! @param str string returned by what()
explicit Exception(const std::string &str);
};
////////////////////////////////////////////////////////////////////////////////
//! Exception handler function for arbitrary exceptions
//! @param ex exception to handle
////////////////////////////////////////////////////////////////////////////////
template <class Exception_Typ>
inline void handleException(const Exception_Typ &ex) {
std::cerr << ex.what() << std::endl;
exit(EXIT_FAILURE);
}
//! Convenience macros
//! Exception caused by dynamic program behavior, e.g. file does not exist
#define RUNTIME_EXCEPTION(msg) \
Exception<std::runtime_error>::throw_it(__FILE__, __LINE__, msg)
//! Logic exception in program, e.g. an assert failed
#define LOGIC_EXCEPTION(msg) \
Exception<std::logic_error>::throw_it(__FILE__, __LINE__, msg)
//! Out of range exception
#define RANGE_EXCEPTION(msg) \
Exception<std::range_error>::throw_it(__FILE__, __LINE__, msg)
////////////////////////////////////////////////////////////////////////////////
//! Implementation
// includes, system
#include <sstream>
////////////////////////////////////////////////////////////////////////////////
//! Static construction interface.
//! @param Exception causing code fragment (file and line) and detailed infos.
////////////////////////////////////////////////////////////////////////////////
/*static*/ template <class Std_Exception>
void Exception<Std_Exception>::throw_it(const char *file, const int line,
const char *detailed) {
std::stringstream s;
// Quiet heavy-weight but exceptions are not for
// performance / release versions
s << "Exception in file '" << file << "' in line " << line << "\n"
<< "Detailed description: " << detailed << "\n";
throw Exception(s.str());
}
////////////////////////////////////////////////////////////////////////////////
//! Static construction interface.
//! @param Exception causing code fragment (file and line) and detailed infos.
////////////////////////////////////////////////////////////////////////////////
/*static*/ template <class Std_Exception>
void Exception<Std_Exception>::throw_it(const char *file, const int line,
const std::string &msg) {
throw_it(file, line, msg.c_str());
}
////////////////////////////////////////////////////////////////////////////////
//! Constructor, default (private).
////////////////////////////////////////////////////////////////////////////////
template <class Std_Exception>
Exception<Std_Exception>::Exception() : Std_Exception("Unknown Exception.\n") {}
////////////////////////////////////////////////////////////////////////////////
//! Constructor, standard (private).
//! String returned by what().
////////////////////////////////////////////////////////////////////////////////
template <class Std_Exception>
Exception<Std_Exception>::Exception(const std::string &s) : Std_Exception(s) {}
////////////////////////////////////////////////////////////////////////////////
//! Destructor
////////////////////////////////////////////////////////////////////////////////
template <class Std_Exception>
Exception<Std_Exception>::~Exception() throw() {}
// functions, exported
#endif // COMMON_EXCEPTION_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/guided_concurrentKernels_SYCLMigration/01_dpct_output/Common/helper_cuda.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions for initialization and error checking
#ifndef COMMON_HELPER_CUDA_H_
#define COMMON_HELPER_CUDA_H_
#pragma once
#include <sycl/sycl.hpp>
#include <dpct/dpct.hpp>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <helper_string.h>
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
// Note, it is required that your SDK sample to include the proper header
// files, please refer the CUDA examples for examples of the needed CUDA
// headers, which may change depending on which CUDA functions are used.
// CUDA Runtime error messages
#ifdef __DPCT_HPP__
static const char *_cudaGetErrorEnum(dpct::err0 error) {
/*
DPCT1009:2: SYCL uses exceptions to report errors and does not use the error
codes. The original code was commented out and a warning string was inserted.
You need to rewrite this code.
*/
return "cudaGetErrorName is not supported" /*cudaGetErrorName(error)*/;
}
#endif
#ifdef CUDA_DRIVER_API
// CUDA Driver API errors
static const char *_cudaGetErrorEnum(CUresult error) {
static char unknown[] = "<unknown>";
const char *ret = NULL;
cuGetErrorName(error, &ret);
return ret ? ret : unknown;
}
#endif
#ifdef CUBLAS_API_H_
// cuBLAS API errors
static const char *_cudaGetErrorEnum(cublasStatus_t error) {
switch (error) {
case CUBLAS_STATUS_SUCCESS:
return "CUBLAS_STATUS_SUCCESS";
case CUBLAS_STATUS_NOT_INITIALIZED:
return "CUBLAS_STATUS_NOT_INITIALIZED";
case CUBLAS_STATUS_ALLOC_FAILED:
return "CUBLAS_STATUS_ALLOC_FAILED";
case CUBLAS_STATUS_INVALID_VALUE:
return "CUBLAS_STATUS_INVALID_VALUE";
case CUBLAS_STATUS_ARCH_MISMATCH:
return "CUBLAS_STATUS_ARCH_MISMATCH";
case CUBLAS_STATUS_MAPPING_ERROR:
return "CUBLAS_STATUS_MAPPING_ERROR";
case CUBLAS_STATUS_EXECUTION_FAILED:
return "CUBLAS_STATUS_EXECUTION_FAILED";
case CUBLAS_STATUS_INTERNAL_ERROR:
return "CUBLAS_STATUS_INTERNAL_ERROR";
case CUBLAS_STATUS_NOT_SUPPORTED:
return "CUBLAS_STATUS_NOT_SUPPORTED";
case CUBLAS_STATUS_LICENSE_ERROR:
return "CUBLAS_STATUS_LICENSE_ERROR";
}
return "<unknown>";
}
#endif
#ifdef _CUFFT_H_
// cuFFT API errors
static const char *_cudaGetErrorEnum(cufftResult error) {
switch (error) {
case CUFFT_SUCCESS:
return "CUFFT_SUCCESS";
case CUFFT_INVALID_PLAN:
return "CUFFT_INVALID_PLAN";
case CUFFT_ALLOC_FAILED:
return "CUFFT_ALLOC_FAILED";
case CUFFT_INVALID_TYPE:
return "CUFFT_INVALID_TYPE";
case CUFFT_INVALID_VALUE:
return "CUFFT_INVALID_VALUE";
case CUFFT_INTERNAL_ERROR:
return "CUFFT_INTERNAL_ERROR";
case CUFFT_EXEC_FAILED:
return "CUFFT_EXEC_FAILED";
case CUFFT_SETUP_FAILED:
return "CUFFT_SETUP_FAILED";
case CUFFT_INVALID_SIZE:
return "CUFFT_INVALID_SIZE";
case CUFFT_UNALIGNED_DATA:
return "CUFFT_UNALIGNED_DATA";
case CUFFT_INCOMPLETE_PARAMETER_LIST:
return "CUFFT_INCOMPLETE_PARAMETER_LIST";
case CUFFT_INVALID_DEVICE:
return "CUFFT_INVALID_DEVICE";
case CUFFT_PARSE_ERROR:
return "CUFFT_PARSE_ERROR";
case CUFFT_NO_WORKSPACE:
return "CUFFT_NO_WORKSPACE";
case CUFFT_NOT_IMPLEMENTED:
return "CUFFT_NOT_IMPLEMENTED";
case CUFFT_LICENSE_ERROR:
return "CUFFT_LICENSE_ERROR";
case CUFFT_NOT_SUPPORTED:
return "CUFFT_NOT_SUPPORTED";
}
return "<unknown>";
}
#endif
#ifdef CUSPARSEAPI
// cuSPARSE API errors
static const char *_cudaGetErrorEnum(cusparseStatus_t error) {
switch (error) {
case CUSPARSE_STATUS_SUCCESS:
return "CUSPARSE_STATUS_SUCCESS";
case CUSPARSE_STATUS_NOT_INITIALIZED:
return "CUSPARSE_STATUS_NOT_INITIALIZED";
case CUSPARSE_STATUS_ALLOC_FAILED:
return "CUSPARSE_STATUS_ALLOC_FAILED";
case CUSPARSE_STATUS_INVALID_VALUE:
return "CUSPARSE_STATUS_INVALID_VALUE";
case CUSPARSE_STATUS_ARCH_MISMATCH:
return "CUSPARSE_STATUS_ARCH_MISMATCH";
case CUSPARSE_STATUS_MAPPING_ERROR:
return "CUSPARSE_STATUS_MAPPING_ERROR";
case CUSPARSE_STATUS_EXECUTION_FAILED:
return "CUSPARSE_STATUS_EXECUTION_FAILED";
case CUSPARSE_STATUS_INTERNAL_ERROR:
return "CUSPARSE_STATUS_INTERNAL_ERROR";
case CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
}
return "<unknown>";
}
#endif
#ifdef CUSOLVER_COMMON_H_
// cuSOLVER API errors
static const char *_cudaGetErrorEnum(cusolverStatus_t error) {
switch (error) {
case CUSOLVER_STATUS_SUCCESS:
return "CUSOLVER_STATUS_SUCCESS";
case CUSOLVER_STATUS_NOT_INITIALIZED:
return "CUSOLVER_STATUS_NOT_INITIALIZED";
case CUSOLVER_STATUS_ALLOC_FAILED:
return "CUSOLVER_STATUS_ALLOC_FAILED";
case CUSOLVER_STATUS_INVALID_VALUE:
return "CUSOLVER_STATUS_INVALID_VALUE";
case CUSOLVER_STATUS_ARCH_MISMATCH:
return "CUSOLVER_STATUS_ARCH_MISMATCH";
case CUSOLVER_STATUS_MAPPING_ERROR:
return "CUSOLVER_STATUS_MAPPING_ERROR";
case CUSOLVER_STATUS_EXECUTION_FAILED:
return "CUSOLVER_STATUS_EXECUTION_FAILED";
case CUSOLVER_STATUS_INTERNAL_ERROR:
return "CUSOLVER_STATUS_INTERNAL_ERROR";
case CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
case CUSOLVER_STATUS_NOT_SUPPORTED:
return "CUSOLVER_STATUS_NOT_SUPPORTED ";
case CUSOLVER_STATUS_ZERO_PIVOT:
return "CUSOLVER_STATUS_ZERO_PIVOT";
case CUSOLVER_STATUS_INVALID_LICENSE:
return "CUSOLVER_STATUS_INVALID_LICENSE";
}
return "<unknown>";
}
#endif
#ifdef CURAND_H_
// cuRAND API errors
static const char *_cudaGetErrorEnum(int error) {
switch (error) {
case 0:
return "CURAND_STATUS_SUCCESS";
case 100:
return "CURAND_STATUS_VERSION_MISMATCH";
case 101:
return "CURAND_STATUS_NOT_INITIALIZED";
case 102:
return "CURAND_STATUS_ALLOCATION_FAILED";
case 103:
return "CURAND_STATUS_TYPE_ERROR";
case 104:
return "CURAND_STATUS_OUT_OF_RANGE";
case 105:
return "CURAND_STATUS_LENGTH_NOT_MULTIPLE";
case 106:
return "CURAND_STATUS_DOUBLE_PRECISION_REQUIRED";
case 201:
return "CURAND_STATUS_LAUNCH_FAILURE";
case 202:
return "CURAND_STATUS_PREEXISTING_FAILURE";
case 203:
return "CURAND_STATUS_INITIALIZATION_FAILED";
case 204:
return "CURAND_STATUS_ARCH_MISMATCH";
case 999:
return "CURAND_STATUS_INTERNAL_ERROR";
}
return "<unknown>";
}
#endif
#ifdef NVJPEGAPI
// nvJPEG API errors
static const char *_cudaGetErrorEnum(nvjpegStatus_t error) {
switch (error) {
case NVJPEG_STATUS_SUCCESS:
return "NVJPEG_STATUS_SUCCESS";
case NVJPEG_STATUS_NOT_INITIALIZED:
return "NVJPEG_STATUS_NOT_INITIALIZED";
case NVJPEG_STATUS_INVALID_PARAMETER:
return "NVJPEG_STATUS_INVALID_PARAMETER";
case NVJPEG_STATUS_BAD_JPEG:
return "NVJPEG_STATUS_BAD_JPEG";
case NVJPEG_STATUS_JPEG_NOT_SUPPORTED:
return "NVJPEG_STATUS_JPEG_NOT_SUPPORTED";
case NVJPEG_STATUS_ALLOCATOR_FAILURE:
return "NVJPEG_STATUS_ALLOCATOR_FAILURE";
case NVJPEG_STATUS_EXECUTION_FAILED:
return "NVJPEG_STATUS_EXECUTION_FAILED";
case NVJPEG_STATUS_ARCH_MISMATCH:
return "NVJPEG_STATUS_ARCH_MISMATCH";
case NVJPEG_STATUS_INTERNAL_ERROR:
return "NVJPEG_STATUS_INTERNAL_ERROR";
}
return "<unknown>";
}
#endif
#ifdef NV_NPPIDEFS_H
// NPP API errors
static const char *_cudaGetErrorEnum(NppStatus error) {
switch (error) {
case NPP_NOT_SUPPORTED_MODE_ERROR:
return "NPP_NOT_SUPPORTED_MODE_ERROR";
case NPP_ROUND_MODE_NOT_SUPPORTED_ERROR:
return "NPP_ROUND_MODE_NOT_SUPPORTED_ERROR";
case NPP_RESIZE_NO_OPERATION_ERROR:
return "NPP_RESIZE_NO_OPERATION_ERROR";
case NPP_NOT_SUFFICIENT_COMPUTE_CAPABILITY:
return "NPP_NOT_SUFFICIENT_COMPUTE_CAPABILITY";
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) <= 0x5000
case NPP_BAD_ARG_ERROR:
return "NPP_BAD_ARGUMENT_ERROR";
case NPP_COEFF_ERROR:
return "NPP_COEFFICIENT_ERROR";
case NPP_RECT_ERROR:
return "NPP_RECTANGLE_ERROR";
case NPP_QUAD_ERROR:
return "NPP_QUADRANGLE_ERROR";
case NPP_MEM_ALLOC_ERR:
return "NPP_MEMORY_ALLOCATION_ERROR";
case NPP_HISTO_NUMBER_OF_LEVELS_ERROR:
return "NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR";
case NPP_INVALID_INPUT:
return "NPP_INVALID_INPUT";
case NPP_POINTER_ERROR:
return "NPP_POINTER_ERROR";
case NPP_WARNING:
return "NPP_WARNING";
case NPP_ODD_ROI_WARNING:
return "NPP_ODD_ROI_WARNING";
#else
// These are for CUDA 5.5 or higher
case NPP_BAD_ARGUMENT_ERROR:
return "NPP_BAD_ARGUMENT_ERROR";
case NPP_COEFFICIENT_ERROR:
return "NPP_COEFFICIENT_ERROR";
case NPP_RECTANGLE_ERROR:
return "NPP_RECTANGLE_ERROR";
case NPP_QUADRANGLE_ERROR:
return "NPP_QUADRANGLE_ERROR";
case NPP_MEMORY_ALLOCATION_ERR:
return "NPP_MEMORY_ALLOCATION_ERROR";
case NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR:
return "NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR";
case NPP_INVALID_HOST_POINTER_ERROR:
return "NPP_INVALID_HOST_POINTER_ERROR";
case NPP_INVALID_DEVICE_POINTER_ERROR:
return "NPP_INVALID_DEVICE_POINTER_ERROR";
#endif
case NPP_LUT_NUMBER_OF_LEVELS_ERROR:
return "NPP_LUT_NUMBER_OF_LEVELS_ERROR";
case NPP_TEXTURE_BIND_ERROR:
return "NPP_TEXTURE_BIND_ERROR";
case NPP_WRONG_INTERSECTION_ROI_ERROR:
return "NPP_WRONG_INTERSECTION_ROI_ERROR";
case NPP_NOT_EVEN_STEP_ERROR:
return "NPP_NOT_EVEN_STEP_ERROR";
case NPP_INTERPOLATION_ERROR:
return "NPP_INTERPOLATION_ERROR";
case NPP_RESIZE_FACTOR_ERROR:
return "NPP_RESIZE_FACTOR_ERROR";
case NPP_HAAR_CLASSIFIER_PIXEL_MATCH_ERROR:
return "NPP_HAAR_CLASSIFIER_PIXEL_MATCH_ERROR";
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) <= 0x5000
case NPP_MEMFREE_ERR:
return "NPP_MEMFREE_ERR";
case NPP_MEMSET_ERR:
return "NPP_MEMSET_ERR";
case NPP_MEMCPY_ERR:
return "NPP_MEMCPY_ERROR";
case NPP_MIRROR_FLIP_ERR:
return "NPP_MIRROR_FLIP_ERR";
#else
case NPP_MEMFREE_ERROR:
return "NPP_MEMFREE_ERROR";
case NPP_MEMSET_ERROR:
return "NPP_MEMSET_ERROR";
case NPP_MEMCPY_ERROR:
return "NPP_MEMCPY_ERROR";
case NPP_MIRROR_FLIP_ERROR:
return "NPP_MIRROR_FLIP_ERROR";
#endif
case NPP_ALIGNMENT_ERROR:
return "NPP_ALIGNMENT_ERROR";
case NPP_STEP_ERROR:
return "NPP_STEP_ERROR";
case NPP_SIZE_ERROR:
return "NPP_SIZE_ERROR";
case NPP_NULL_POINTER_ERROR:
return "NPP_NULL_POINTER_ERROR";
case NPP_CUDA_KERNEL_EXECUTION_ERROR:
return "NPP_CUDA_KERNEL_EXECUTION_ERROR";
case NPP_NOT_IMPLEMENTED_ERROR:
return "NPP_NOT_IMPLEMENTED_ERROR";
case NPP_ERROR:
return "NPP_ERROR";
case NPP_SUCCESS:
return "NPP_SUCCESS";
case NPP_WRONG_INTERSECTION_QUAD_WARNING:
return "NPP_WRONG_INTERSECTION_QUAD_WARNING";
case NPP_MISALIGNED_DST_ROI_WARNING:
return "NPP_MISALIGNED_DST_ROI_WARNING";
case NPP_AFFINE_QUAD_INCORRECT_WARNING:
return "NPP_AFFINE_QUAD_INCORRECT_WARNING";
case NPP_DOUBLE_SIZE_WARNING:
return "NPP_DOUBLE_SIZE_WARNING";
case NPP_WRONG_INTERSECTION_ROI_WARNING:
return "NPP_WRONG_INTERSECTION_ROI_WARNING";
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) >= 0x6000
/* These are 6.0 or higher */
case NPP_LUT_PALETTE_BITSIZE_ERROR:
return "NPP_LUT_PALETTE_BITSIZE_ERROR";
case NPP_ZC_MODE_NOT_SUPPORTED_ERROR:
return "NPP_ZC_MODE_NOT_SUPPORTED_ERROR";
case NPP_QUALITY_INDEX_ERROR:
return "NPP_QUALITY_INDEX_ERROR";
case NPP_CHANNEL_ORDER_ERROR:
return "NPP_CHANNEL_ORDER_ERROR";
case NPP_ZERO_MASK_VALUE_ERROR:
return "NPP_ZERO_MASK_VALUE_ERROR";
case NPP_NUMBER_OF_CHANNELS_ERROR:
return "NPP_NUMBER_OF_CHANNELS_ERROR";
case NPP_COI_ERROR:
return "NPP_COI_ERROR";
case NPP_DIVISOR_ERROR:
return "NPP_DIVISOR_ERROR";
case NPP_CHANNEL_ERROR:
return "NPP_CHANNEL_ERROR";
case NPP_STRIDE_ERROR:
return "NPP_STRIDE_ERROR";
case NPP_ANCHOR_ERROR:
return "NPP_ANCHOR_ERROR";
case NPP_MASK_SIZE_ERROR:
return "NPP_MASK_SIZE_ERROR";
case NPP_MOMENT_00_ZERO_ERROR:
return "NPP_MOMENT_00_ZERO_ERROR";
case NPP_THRESHOLD_NEGATIVE_LEVEL_ERROR:
return "NPP_THRESHOLD_NEGATIVE_LEVEL_ERROR";
case NPP_THRESHOLD_ERROR:
return "NPP_THRESHOLD_ERROR";
case NPP_CONTEXT_MATCH_ERROR:
return "NPP_CONTEXT_MATCH_ERROR";
case NPP_FFT_FLAG_ERROR:
return "NPP_FFT_FLAG_ERROR";
case NPP_FFT_ORDER_ERROR:
return "NPP_FFT_ORDER_ERROR";
case NPP_SCALE_RANGE_ERROR:
return "NPP_SCALE_RANGE_ERROR";
case NPP_DATA_TYPE_ERROR:
return "NPP_DATA_TYPE_ERROR";
case NPP_OUT_OFF_RANGE_ERROR:
return "NPP_OUT_OFF_RANGE_ERROR";
case NPP_DIVIDE_BY_ZERO_ERROR:
return "NPP_DIVIDE_BY_ZERO_ERROR";
case NPP_RANGE_ERROR:
return "NPP_RANGE_ERROR";
case NPP_NO_MEMORY_ERROR:
return "NPP_NO_MEMORY_ERROR";
case NPP_ERROR_RESERVED:
return "NPP_ERROR_RESERVED";
case NPP_NO_OPERATION_WARNING:
return "NPP_NO_OPERATION_WARNING";
case NPP_DIVIDE_BY_ZERO_WARNING:
return "NPP_DIVIDE_BY_ZERO_WARNING";
#endif
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) >= 0x7000
/* These are 7.0 or higher */
case NPP_OVERFLOW_ERROR:
return "NPP_OVERFLOW_ERROR";
case NPP_CORRUPTED_DATA_ERROR:
return "NPP_CORRUPTED_DATA_ERROR";
#endif
}
return "<unknown>";
}
#endif
template <typename T>
void check(T result, char const *const func, const char *const file,
int const line) {
}
#ifdef __DPCT_HPP__
// This will output the proper CUDA error strings in the event
// that a CUDA host call returns an error
#define checkCudaErrors(val) check((val), #val, __FILE__, __LINE__)
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError(msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file,
const int line) {
/*
DPCT1010:3: SYCL uses exceptions to report errors and does not use the error
codes. The call was replaced with 0. You need to rewrite this code.
*/
dpct::err0 err = 0;
}
// This will only print the proper error string when calling cudaGetLastError
// but not exit program incase error detected.
#define printLastCudaError(msg) __printLastCudaError(msg, __FILE__, __LINE__)
inline void __printLastCudaError(const char *errorMessage, const char *file,
const int line) {
/*
DPCT1010:5: SYCL uses exceptions to report errors and does not use the error
codes. The call was replaced with 0. You need to rewrite this code.
*/
dpct::err0 err = 0;
}
#endif
#ifndef MAX
#define MAX(a, b) (a > b ? a : b)
#endif
// Float To Int conversion
inline int ftoi(float value) {
return (value >= 0 ? static_cast<int>(value + 0.5)
: static_cast<int>(value - 0.5));
}
// Beginning of GPU Architecture definitions
inline int _ConvertSMVer2Cores(int major, int minor) {
// Defines for GPU Architecture types (using the SM version to determine
// the # of cores per SM
typedef struct dpct_type_554348 {
int SM; // 0xMm (hexidecimal notation), M = SM Major version,
// and m = SM minor version
int Cores;
} sSMtoCores;
sSMtoCores nGpuArchCoresPerSM[] = {
{0x30, 192},
{0x32, 192},
{0x35, 192},
{0x37, 192},
{0x50, 128},
{0x52, 128},
{0x53, 128},
{0x60, 64},
{0x61, 128},
{0x62, 128},
{0x70, 64},
{0x72, 64},
{0x75, 64},
{0x80, 64},
{0x86, 128},
{0x87, 128},
{0x89, 128},
{0x90, 128},
{-1, -1}};
int index = 0;
while (nGpuArchCoresPerSM[index].SM != -1) {
if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor)) {
return nGpuArchCoresPerSM[index].Cores;
}
index++;
}
// If we don't find the values, we default use the previous one
// to run properly
printf(
"MapSMtoCores for SM %d.%d is undefined."
" Default to use %d Cores/SM\n",
major, minor, nGpuArchCoresPerSM[index - 1].Cores);
return nGpuArchCoresPerSM[index - 1].Cores;
}
inline const char* _ConvertSMVer2ArchName(int major, int minor) {
// Defines for GPU Architecture types (using the SM version to determine
// the GPU Arch name)
typedef struct dpct_type_876740 {
int SM; // 0xMm (hexidecimal notation), M = SM Major version,
// and m = SM minor version
const char* name;
} sSMtoArchName;
sSMtoArchName nGpuArchNameSM[] = {
{0x30, "Kepler"},
{0x32, "Kepler"},
{0x35, "Kepler"},
{0x37, "Kepler"},
{0x50, "Maxwell"},
{0x52, "Maxwell"},
{0x53, "Maxwell"},
{0x60, "Pascal"},
{0x61, "Pascal"},
{0x62, "Pascal"},
{0x70, "Volta"},
{0x72, "Xavier"},
{0x75, "Turing"},
{0x80, "Ampere"},
{0x86, "Ampere"},
{0x87, "Ampere"},
{0x89, "Ada"},
{0x90, "Hopper"},
{-1, "Graphics Device"}};
int index = 0;
while (nGpuArchNameSM[index].SM != -1) {
if (nGpuArchNameSM[index].SM == ((major << 4) + minor)) {
return nGpuArchNameSM[index].name;
}
index++;
}
// If we don't find the values, we default use the previous one
// to run properly
printf(
"MapSMtoArchName for SM %d.%d is undefined."
" Default to use %s\n",
major, minor, nGpuArchNameSM[index - 1].name);
return nGpuArchNameSM[index - 1].name;
}
// end of GPU Architecture definitions
#ifdef __DPCT_HPP__
// General GPU Device CUDA Initialization
inline int gpuDeviceInit(int devID) {
int device_count;
/*
DPCT1003:7: Migrated API does not return error code. (*, 0) is inserted. You
may need to rewrite this code.
*/
checkCudaErrors((device_count = dpct::dev_mgr::instance().device_count(), 0));
if (device_count == 0) {
fprintf(stderr,
"gpuDeviceInit() CUDA error: "
"no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
if (devID < 0) {
devID = 0;
}
if (devID > device_count - 1) {
fprintf(stderr, "\n");
fprintf(stderr, ">> %d CUDA capable GPU device(s) detected. <<\n",
device_count);
fprintf(stderr,
">> gpuDeviceInit (-device=%d) is not a valid"
" GPU device. <<\n",
devID);
fprintf(stderr, "\n");
return -devID;
}
int computeMode = -1, major = 0, minor = 0;
/*
DPCT1035:8: All SYCL devices can be used by the host to submit tasks. You may
need to adjust this code.
*/
checkCudaErrors((computeMode = 1, 0));
checkCudaErrors(
(major = dpct::dev_mgr::instance().get_device(devID).get_major_version(),
0));
checkCudaErrors(
(minor = dpct::dev_mgr::instance().get_device(devID).get_minor_version(),
0));
/*
DPCT1035:9: All SYCL devices can be used by the host to submit tasks. You may
need to adjust this code.
*/
if (computeMode == 0) {
fprintf(stderr,
"Error: device is running in <Compute Mode "
"Prohibited>, no threads can use cudaSetDevice().\n");
return -1;
}
if (major < 1) {
fprintf(stderr, "gpuDeviceInit(): GPU device does not support CUDA.\n");
exit(EXIT_FAILURE);
}
/*
DPCT1093:10: The "devID" device may be not the one intended for use. Adjust
the selected device if needed.
*/
/*
DPCT1003:11: Migrated API does not return error code. (*, 0) is inserted. You
may need to rewrite this code.
*/
checkCudaErrors((dpct::select_device(devID), 0));
printf("gpuDeviceInit() CUDA Device [%d]: \"%s\n", devID, _ConvertSMVer2ArchName(major, minor));
return devID;
}
// This function returns the best GPU (with maximum GFLOPS)
inline int gpuGetMaxGflopsDeviceId() try {
int current_device = 0, sm_per_multiproc = 0;
int max_perf_device = 0;
int device_count = 0;
int devices_prohibited = 0;
uint64_t max_compute_perf = 0;
/*
DPCT1003:12: Migrated API does not return error code. (*, 0) is inserted. You
may need to rewrite this code.
*/
checkCudaErrors((device_count = dpct::dev_mgr::instance().device_count(), 0));
if (device_count == 0) {
fprintf(stderr,
"gpuGetMaxGflopsDeviceId() CUDA error:"
" no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
// Find the best CUDA capable GPU device
current_device = 0;
while (current_device < device_count) {
int computeMode = -1, major = 0, minor = 0;
/*
DPCT1035:13: All SYCL devices can be used by the host to submit tasks. You
may need to adjust this code.
*/
checkCudaErrors((computeMode = 1, 0));
checkCudaErrors((major = dpct::dev_mgr::instance()
.get_device(current_device)
.get_major_version(),
0));
checkCudaErrors((minor = dpct::dev_mgr::instance()
.get_device(current_device)
.get_minor_version(),
0));
// If this GPU is not running on Compute Mode prohibited,
// then we can add it to the list
/*
DPCT1035:14: All SYCL devices can be used by the host to submit tasks. You
may need to adjust this code.
*/
if (computeMode != 0) {
if (major == 9999 && minor == 9999) {
sm_per_multiproc = 1;
} else {
sm_per_multiproc =
_ConvertSMVer2Cores(major, minor);
}
int multiProcessorCount = 0, clockRate = 0;
checkCudaErrors((multiProcessorCount = dpct::dev_mgr::instance()
.get_device(current_device)
.get_max_compute_units(),
0));
dpct::err0 result = (clockRate = dpct::dev_mgr::instance()
.get_device(current_device)
.get_max_clock_frequency(),
0);
uint64_t compute_perf = (uint64_t)multiProcessorCount * sm_per_multiproc * clockRate;
if (compute_perf > max_compute_perf) {
max_compute_perf = compute_perf;
max_perf_device = current_device;
}
} else {
devices_prohibited++;
}
++current_device;
}
if (devices_prohibited == device_count) {
fprintf(stderr,
"gpuGetMaxGflopsDeviceId() CUDA error:"
" all devices have compute mode prohibited.\n");
exit(EXIT_FAILURE);
}
return max_perf_device;
}
catch (sycl::exception const &exc) {
std::cerr << exc.what() << "Exception caught at file:" << __FILE__
<< ", line:" << __LINE__ << std::endl;
std::exit(1);
}
// Initialization code to find the best CUDA Device
inline int findCudaDevice(int argc, const char **argv) {
int devID = 0;
// If the command-line has a device number specified, use it
if (checkCmdLineFlag(argc, argv, "device")) {
devID = getCmdLineArgumentInt(argc, argv, "device=");
if (devID < 0) {
printf("Invalid command line parameter\n ");
exit(EXIT_FAILURE);
} else {
devID = gpuDeviceInit(devID);
if (devID < 0) {
printf("exiting...\n");
exit(EXIT_FAILURE);
}
}
} else {
// Otherwise pick the device with highest Gflops/s
devID = gpuGetMaxGflopsDeviceId();
/*
DPCT1093:15: The "devID" device may be not the one intended for use. Adjust
the selected device if needed.
*/
/*
DPCT1003:16: Migrated API does not return error code. (*, 0) is inserted.
You may need to rewrite this code.
*/
checkCudaErrors((dpct::select_device(devID), 0));
int major = 0, minor = 0;
checkCudaErrors((
major = dpct::dev_mgr::instance().get_device(devID).get_major_version(),
0));
checkCudaErrors((
minor = dpct::dev_mgr::instance().get_device(devID).get_minor_version(),
0));
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n",
devID, _ConvertSMVer2ArchName(major, minor), major, minor);
}
return devID;
}
inline int findIntegratedGPU() {
int current_device = 0;
int device_count = 0;
int devices_prohibited = 0;
/*
DPCT1003:17: Migrated API does not return error code. (*, 0) is inserted. You
may need to rewrite this code.
*/
checkCudaErrors((device_count = dpct::dev_mgr::instance().device_count(), 0));
if (device_count == 0) {
fprintf(stderr, "CUDA error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
// Find the integrated GPU which is compute capable
while (current_device < device_count) {
int computeMode = -1, integrated = -1;
/*
DPCT1035:18: All SYCL devices can be used by the host to submit tasks. You
may need to adjust this code.
*/
checkCudaErrors((computeMode = 1, 0));
checkCudaErrors((integrated = dpct::dev_mgr::instance()
.get_device(current_device)
.get_integrated(),
0));
// If GPU is integrated and is not running on Compute Mode prohibited,
// then cuda can map to GLES resource
/*
DPCT1035:19: All SYCL devices can be used by the host to submit tasks. You
may need to adjust this code.
*/
if (integrated && (computeMode != 0)) {
/*
DPCT1093:20: The "current_device" device may be not the one intended for
use. Adjust the selected device if needed.
*/
/*
DPCT1003:21: Migrated API does not return error code. (*, 0) is inserted.
You may need to rewrite this code.
*/
checkCudaErrors((dpct::select_device(current_device), 0));
int major = 0, minor = 0;
checkCudaErrors((major = dpct::dev_mgr::instance()
.get_device(current_device)
.get_major_version(),
0));
checkCudaErrors((minor = dpct::dev_mgr::instance()
.get_device(current_device)
.get_minor_version(),
0));
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n",
current_device, _ConvertSMVer2ArchName(major, minor), major, minor);
return current_device;
} else {
devices_prohibited++;
}
current_device++;
}
if (devices_prohibited == device_count) {
fprintf(stderr,
"CUDA error:"
" No GLES-CUDA Interop capable GPU found.\n");
exit(EXIT_FAILURE);
}
return -1;
}
// General check for CUDA GPU SM Capabilities
inline bool checkCudaCapabilities(int major_version, int minor_version) {
int dev;
int major = 0, minor = 0;
checkCudaErrors(dev = dpct::dev_mgr::instance().current_device_id());
checkCudaErrors(
(major = dpct::dev_mgr::instance().get_device(dev).get_major_version(),
0));
checkCudaErrors(
(minor = dpct::dev_mgr::instance().get_device(dev).get_minor_version(),
0));
if ((major > major_version) ||
(major == major_version &&
minor >= minor_version)) {
printf(" Device %d: <%16s >, Compute SM %d.%d detected\n", dev,
_ConvertSMVer2ArchName(major, minor), major, minor);
return true;
} else {
printf(
" No GPU device was found that can support "
"CUDA compute capability %d.%d.\n",
major_version, minor_version);
return false;
}
}
#endif
// end of CUDA Helper Functions
#endif // COMMON_HELPER_CUDA_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/guided_concurrentKernels_SYCLMigration/01_dpct_output/Common/helper_functions.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// These are helper functions for the SDK samples (string parsing,
// timers, image helpers, etc)
#ifndef COMMON_HELPER_FUNCTIONS_H_
#define COMMON_HELPER_FUNCTIONS_H_
#ifdef WIN32
#pragma warning(disable : 4996)
#endif
// includes, project
#include <assert.h>
#include <exception.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <algorithm>
#include <fstream>
#include <iostream>
#include <string>
#include <vector>
// includes, timer, string parsing, image helpers
#include <helper_image.h> // helper functions for image compare, dump, data comparisons
#include <helper_string.h> // helper functions for string parsing
#include <helper_timer.h> // helper functions for timers
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
#endif // COMMON_HELPER_FUNCTIONS_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/guided_concurrentKernels_SYCLMigration/01_dpct_output/Samples/0_Introduction/concurrentKernels/concurrentKernels.dp.cpp | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
//
// This sample demonstrates the use of streams for concurrent execution. It also
// illustrates how to introduce dependencies between CUDA streams with the
// cudaStreamWaitEvent function.
//
// Devices of compute capability 2.0 or higher can overlap the kernels
//
#include <sycl/sycl.hpp>
#include <dpct/dpct.hpp>
#include <stdio.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include <time.h>
#include <chrono>
// This is a kernel that does no real work but runs at least for a specified
// number of clocks
void clock_block(clock_t *d_o, clock_t clock_count) {
/*
DPCT1008:22: clock function is not defined in SYCL. This is a
hardware-specific feature. Consult with your hardware vendor to find a
replacement.
*/
unsigned int start_clock = (unsigned int)clock();
clock_t clock_offset = 0;
while (clock_offset < clock_count) {
/*
DPCT1008:23: clock function is not defined in SYCL. This is a
hardware-specific feature. Consult with your hardware vendor to find a
replacement.
*/
unsigned int end_clock = (unsigned int)clock();
// The code below should work like
// this (thanks to modular arithmetics):
//
// clock_offset = (clock_t) (end_clock > start_clock ?
// end_clock - start_clock :
// end_clock + (0xffffffffu - start_clock));
//
// Indeed, let m = 2^32 then
// end - start = end + m - start (mod m).
clock_offset = (clock_t)(end_clock - start_clock);
}
d_o[0] = clock_offset;
}
// Single warp reduction kernel
void sum(clock_t *d_clocks, int N, const sycl::nd_item<3> &item_ct1,
clock_t *s_clocks) {
// Handle to thread block group
auto cta = item_ct1.get_group();
clock_t my_sum = 0;
for (int i = item_ct1.get_local_id(2); i < N;
i += item_ct1.get_local_range(2)) {
my_sum += d_clocks[i];
}
s_clocks[item_ct1.get_local_id(2)] = my_sum;
/*
DPCT1065:0: Consider replacing sycl::nd_item::barrier() with
sycl::nd_item::barrier(sycl::access::fence_space::local_space) for better
performance if there is no access to global memory.
*/
item_ct1.barrier();
for (int i = 16; i > 0; i /= 2) {
if (item_ct1.get_local_id(2) < i) {
s_clocks[item_ct1.get_local_id(2)] +=
s_clocks[item_ct1.get_local_id(2) + i];
}
/*
DPCT1065:1: Consider replacing sycl::nd_item::barrier() with
sycl::nd_item::barrier(sycl::access::fence_space::local_space) for better
performance if there is no access to global memory.
*/
item_ct1.barrier();
}
d_clocks[0] = s_clocks[0];
}
int main(int argc, char **argv) {
int nkernels = 8; // number of concurrent kernels
int nstreams = nkernels + 1; // use one more stream than concurrent kernel
int nbytes = nkernels * sizeof(clock_t); // number of data bytes
float kernel_time = 10; // time the kernel should run in ms
float elapsed_time; // timing variables
int cuda_device = 0;
printf("[%s] - Starting...\n", argv[0]);
// get number of kernels if overridden on the command line
if (checkCmdLineFlag(argc, (const char **)argv, "nkernels")) {
nkernels = getCmdLineArgumentInt(argc, (const char **)argv, "nkernels");
nstreams = nkernels + 1;
}
// use command-line specified CUDA device, otherwise use device with highest
// Gflops/s
//cuda_device = findCudaDevice(argc, (const char **)argv);
dpct::device_info deviceProp;
checkCudaErrors(cuda_device = dpct::dev_mgr::instance().current_device_id());
/*
DPCT1003:25: Migrated API does not return error code. (*, 0) is inserted. You
may need to rewrite this code.
*/
checkCudaErrors((dpct::dev_mgr::instance()
.get_device(cuda_device)
.get_device_info(deviceProp),
0));
/*
DPCT1051:26: SYCL does not support a device property functionally compatible
with concurrentKernels. It was migrated to true. You may need to adjust the
value of true for the specific device.
*/
if ((true == 0)) {
printf("> GPU does not support concurrent kernel execution\n");
printf(" CUDA kernel runs will be serialized\n");
}
printf("> Detected Compute SM %d.%d hardware with %d multi-processors\n",
/*
DPCT1005:27: The SYCL device version is different from CUDA Compute
Compatibility. You may need to rewrite this code.
*/
deviceProp.get_major_version(), deviceProp.get_minor_version(),
deviceProp.get_max_compute_units());
// allocate host memory
clock_t *a = 0; // pointer to the array data in host memory
/*
DPCT1003:28: Migrated API does not return error code. (*, 0) is inserted. You
may need to rewrite this code.
*/
checkCudaErrors(
(a = (clock_t *)sycl::malloc_host(nbytes, dpct::get_default_queue()), 0));
// allocate device memory
clock_t *d_a = 0; // pointers to data and init value in the device memory
/*
DPCT1003:29: Migrated API does not return error code. (*, 0) is inserted. You
may need to rewrite this code.
*/
checkCudaErrors(
(d_a = (clock_t *)sycl::malloc_device(nbytes, dpct::get_default_queue()),
0));
// allocate and initialize an array of stream handles
dpct::queue_ptr *streams =
(dpct::queue_ptr *)malloc(nstreams * sizeof(dpct::queue_ptr));
for (int i = 0; i < nstreams; i++) {
/*
DPCT1003:30: Migrated API does not return error code. (*, 0) is inserted.
You may need to rewrite this code.
*/
checkCudaErrors(
((streams[i]) = dpct::get_current_device().create_queue(), 0));
}
// create CUDA event handles
dpct::event_ptr start_event, stop_event;
std::chrono::time_point<std::chrono::steady_clock> start_event_ct1;
std::chrono::time_point<std::chrono::steady_clock> stop_event_ct1;
/*
DPCT1003:31: Migrated API does not return error code. (*, 0) is inserted. You
may need to rewrite this code.
*/
checkCudaErrors((start_event = new sycl::event(), 0));
/*
DPCT1003:32: Migrated API does not return error code. (*, 0) is inserted. You
may need to rewrite this code.
*/
checkCudaErrors((stop_event = new sycl::event(), 0));
// the events are used for synchronization only and hence do not need to
// record timings this also makes events not introduce global sync points when
// recorded which is critical to get overlap
dpct::event_ptr *kernelEvent;
std::chrono::time_point<std::chrono::steady_clock> kernelEvent_ct1_i;
kernelEvent = (dpct::event_ptr *)malloc(nkernels * sizeof(dpct::event_ptr));
for (int i = 0; i < nkernels; i++) {
/*
DPCT1003:33: Migrated API does not return error code. (*, 0) is inserted.
You may need to rewrite this code.
*/
checkCudaErrors((kernelEvent[i] = new sycl::event(), 0));
}
//////////////////////////////////////////////////////////////////////
// time execution with nkernels streams
clock_t total_clocks = 0;
#if defined(__arm__) || defined(__aarch64__)
// the kernel takes more time than the channel reset time on arm archs, so to
// prevent hangs reduce time_clocks.
clock_t time_clocks = (clock_t)(kernel_time * (deviceProp.clockRate / 100));
#else
clock_t time_clocks =
(clock_t)(kernel_time * deviceProp.get_max_clock_frequency());
#endif
/*
DPCT1012:24: Detected kernel execution time measurement pattern and generated
an initial code for time measurements in SYCL. You can change the way time is
measured depending on your goals.
*/
sycl::event stop_event_streams_nstreams_1;
start_event_ct1 = std::chrono::steady_clock::now();
*start_event = dpct::get_default_queue().ext_oneapi_submit_barrier();
// queue nkernels in separate streams and record when they are done
for (int i = 0; i < nkernels; ++i) {
streams[i]->submit([&](sycl::handler &cgh) {
auto d_a_i_ct0 = &d_a[i];
cgh.parallel_for(
sycl::nd_range<3>(sycl::range<3>(1, 1, 1), sycl::range<3>(1, 1, 1)),
[=](sycl::nd_item<3> item_ct1) {
clock_block(d_a_i_ct0, time_clocks);
});
});
total_clocks += time_clocks;
/*
DPCT1012:34: Detected kernel execution time measurement pattern and
generated an initial code for time measurements in SYCL. You can change the
way time is measured depending on your goals.
*/
/*
DPCT1024:35: The original code returned the error code that was further
consumed by the program logic. This original code was replaced with 0. You
may need to rewrite the program logic consuming the error code.
*/
kernelEvent_ct1_i = std::chrono::steady_clock::now();
checkCudaErrors(
(*kernelEvent[i] = streams[i]->ext_oneapi_submit_barrier(), 0));
// make the last stream wait for the kernel event to be recorded
/*
DPCT1003:36: Migrated API does not return error code. (*, 0) is inserted.
You may need to rewrite this code.
*/
checkCudaErrors(
(streams[nstreams - 1]->ext_oneapi_submit_barrier({*kernelEvent[i]}),
0));
}
// queue a sum kernel and a copy back to host in the last stream.
// the commands in this stream get dispatched as soon as all the kernel events
// have been recorded
streams[nstreams - 1]->submit([&](sycl::handler &cgh) {
sycl::local_accessor<clock_t, 1> s_clocks_acc_ct1(sycl::range<1>(32), cgh);
cgh.parallel_for(
sycl::nd_range<3>(sycl::range<3>(1, 1, 32), sycl::range<3>(1, 1, 32)),
[=](sycl::nd_item<3> item_ct1) {
sum(d_a, nkernels, item_ct1, s_clocks_acc_ct1.get_pointer());
});
});
/*
DPCT1003:37: Migrated API does not return error code. (*, 0) is inserted. You
may need to rewrite this code.
*/
checkCudaErrors((stop_event_streams_nstreams_1 =
streams[nstreams - 1]->memcpy(a, d_a, sizeof(clock_t)),
0));
// at this point the CPU has dispatched all work for the GPU and can continue
// processing other tasks in parallel
// in this sample we just wait until the GPU is done
/*
DPCT1012:38: Detected kernel execution time measurement pattern and generated
an initial code for time measurements in SYCL. You can change the way time is
measured depending on your goals.
*/
/*
DPCT1024:39: The original code returned the error code that was further
consumed by the program logic. This original code was replaced with 0. You may
need to rewrite the program logic consuming the error code.
*/
dpct::get_current_device().queues_wait_and_throw();
stop_event_streams_nstreams_1.wait();
stop_event_ct1 = std::chrono::steady_clock::now();
checkCudaErrors(
(*stop_event = dpct::get_default_queue().ext_oneapi_submit_barrier(), 0));
checkCudaErrors(0);
/*
DPCT1003:40: Migrated API does not return error code. (*, 0) is inserted. You
may need to rewrite this code.
*/
checkCudaErrors((elapsed_time = std::chrono::duration<float, std::milli>(
stop_event_ct1 - start_event_ct1)
.count(),
0));
printf("Expected time for serial execution of %d kernels = %.3fs\n", nkernels,
nkernels * kernel_time / 1000.0f);
printf("Expected time for concurrent execution of %d kernels = %.3fs\n",
nkernels, kernel_time / 1000.0f);
printf("Measured time for sample = %.3fs\n", elapsed_time / 1000.0f);
bool bTestResult = (a[0] > total_clocks);
// release resources
for (int i = 0; i < nkernels; i++) {
dpct::get_current_device().destroy_queue(streams[i]);
dpct::destroy_event(kernelEvent[i]);
}
free(streams);
free(kernelEvent);
dpct::destroy_event(start_event);
dpct::destroy_event(stop_event);
sycl::free(a, dpct::get_default_queue());
sycl::free(d_a, dpct::get_default_queue());
if (!bTestResult) {
printf("Test failed!\n");
exit(EXIT_FAILURE);
}
printf("Test passed\n");
exit(EXIT_SUCCESS);
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/CombinationalLogic/mandelbrot/src/main.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <chrono>
#include <iomanip>
#include <iostream>
// dpc_common.hpp can be found in the dev-utilities include folder.
// e.g., $ONEAPI_ROOT/dev-utilities/<version>/include/dpc_common.hpp
#include "dpc_common.hpp"
#include "mandel.hpp"
using namespace std;
using namespace sycl;
void ShowDevice(queue &q) {
// Output platform and device information.
auto device = q.get_device();
auto p_name = device.get_platform().get_info<info::platform::name>();
cout << std::setw(20) << "Platform Name: " << p_name << "\n";
auto p_version = device.get_platform().get_info<info::platform::version>();
cout << std::setw(20) << "Platform Version: " << p_version << "\n";
auto d_name = device.get_info<info::device::name>();
cout << std::setw(20) << "Device Name: " << d_name << "\n";
auto max_work_group = device.get_info<info::device::max_work_group_size>();
cout << std::setw(20) << "Max Work Group: " << max_work_group << "\n";
auto max_compute_units = device.get_info<info::device::max_compute_units>();
cout << std::setw(20) << "Max Compute Units: " << max_compute_units << "\n\n";
}
void Execute(queue &q) {
// Demonstrate the Mandelbrot calculation serial and parallel.
#ifdef MANDELBROT_USM
cout << "Parallel Mandelbrot set using USM.\n";
MandelParallelUsm m_par(row_size, col_size, max_iterations, &q);
#else
cout << "Parallel Mandelbrot set using buffers.\n";
MandelParallel m_par(row_size, col_size, max_iterations);
#endif
MandelSerial m_ser(row_size, col_size, max_iterations);
// Run the code once to trigger JIT.
m_par.Evaluate(q);
// Run the parallel version and time it.
dpc_common::TimeInterval t_par;
for (int i = 0; i < repetitions; ++i) m_par.Evaluate(q);
double parallel_time = t_par.Elapsed();
// Print the results.
m_par.Print();
m_par.WriteImage();
// Run the serial version.
dpc_common::TimeInterval t_ser;
m_ser.Evaluate();
double serial_time = t_ser.Elapsed();
// Report the results.
cout << std::setw(20) << "Serial time: " << serial_time << "s\n";
cout << std::setw(20) << "Parallel time: " << (parallel_time / repetitions)
<< "s\n";
// Validate.
m_par.Verify(m_ser);
}
int main(int argc, char *argv[]) {
try {
// Create a queue on the default device. Set SYCL_DEVICE_TYPE environment
// variable to (CPU|GPU|FPGA|HOST) to change the device.
queue q(default_selector_v);
// Display the device info.
ShowDevice(q);
// Compute Mandelbrot set.
Execute(q);
} catch (...) {
// Some other exception detected.
cout << "Failed to compute Mandelbrot set.\n";
std::terminate();
}
cout << "Successfully computed Mandelbrot set.\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/CombinationalLogic/mandelbrot/src/mandel.hpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#pragma once
#include <complex>
#include <exception>
#include <iomanip>
#include <iostream>
// stb/*.h files can be found in the dev-utilities include folder.
// e.g., $ONEAPI_ROOT/dev-utilities/<version>/include/stb/*.h
#define STB_IMAGE_IMPLEMENTATION
#include "stb/stb_image.h"
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "stb/stb_image_write.h"
using namespace std;
using namespace sycl;
constexpr int row_size = 512;
constexpr int col_size = 512;
constexpr int max_iterations = 100;
constexpr int repetitions = 100;
// Parameters used in Mandelbrot including number of row, column, and iteration.
struct MandelParameters {
int row_count_;
int col_count_;
int max_iterations_;
typedef std::complex<float> ComplexF;
static std::complex<float> complex_square( std::complex<float> c)
{
return std::complex<float>( c.real()*c.real() - c.imag()*c.imag(), c.real()*c.imag()*2 );
}
MandelParameters(int row_count, int col_count, int max_iterations)
: row_count_(row_count),
col_count_(col_count),
max_iterations_(max_iterations) {}
int row_count() const { return row_count_; }
int col_count() const { return col_count_; }
int max_iterations() const { return max_iterations_; }
// Scale from 0..row_count to -1.5..0.5
float ScaleRow(int i) const { return -1.5f + (i * (2.0f / row_count_)); }
// Scale from 0..col_count to -1..1
float ScaleCol(int i) const { return -1.0f + (i * (2.0f / col_count_)); }
// Mandelbrot set are points that do not diverge within max_iterations.
int Point(const ComplexF &c) const {
int count = 0;
ComplexF z = 0;
for (int i = 0; i < max_iterations_; ++i) {
auto r = z.real();
auto im = z.imag();
// Leave loop if diverging.
if (((r * r) + (im * im)) >= 4.0f) {
break;
}
// z = z * z + c;
z = complex_square(z) + c;
count++;
}
return count;
}
};
// Shared functions for computing Mandelbrot set.
class Mandel {
private:
MandelParameters p_;
protected:
int *data_;
public:
Mandel(int row_count, int col_count, int max_iterations)
: p_(row_count, col_count, max_iterations) {
data_ = nullptr;
}
virtual ~Mandel() {}
virtual void Alloc() { data_ = new int[p_.row_count() * p_.col_count()]; }
virtual void Free() { delete[] data_; }
MandelParameters GetParameters() const { return p_; }
void WriteImage() {
constexpr int channel_num{3};
int row_count = p_.row_count();
int col_count = p_.col_count();
uint8_t *pixels = new uint8_t[col_count * row_count * channel_num];
int index = 0;
for (int j = 0; j < row_count; ++j) {
for (int i = 0; i < col_count; ++i) {
float normalized = (1.0 * data_[i * col_count + j]) / max_iterations;
int color = int(normalized * 0xFFFFFF); // 16M color.
int r = (color >> 16) & 0xFF;
int g = (color >> 8) & 0xFF;
int b = color & 0xFF;
pixels[index++] = r;
pixels[index++] = g;
pixels[index++] = b;
}
}
stbi_write_png("mandelbrot.png", row_count, col_count, channel_num, pixels,
col_count * channel_num);
delete[] pixels;
}
// Use only for debugging with small dimensions.
void Print() {
if (p_.row_count() > 128 || p_.col_count() > 128) {
cout << " Rendered image output to file: mandelbrot.png "
"(output too large to display in text)\n";
return;
}
for (int i = 0; i < p_.row_count(); ++i) {
for (int j = 0; j < p_.col_count_; ++j) {
cout << std::setw(1)
<< ((GetValue(i, j) >= p_.max_iterations()) ? "x" : " ");
}
cout << "\n";
}
}
// Accessor for data and count values.
int *data() const { return data_; }
// Accessor to read a value from the mandelbrot data matrix.
int GetValue(int i, int j) const { return data_[i * p_.col_count_ + j]; }
// Mutator to store a value into the mandelbrot data matrix.
void SetValue(int i, int j, float v) { data_[i * p_.col_count_ + j] = v; }
// Validate the results match.
void Verify(Mandel &m) {
if ((m.p_.row_count() != p_.row_count_) ||
(m.p_.col_count() != p_.col_count_)) {
cout << "Fail verification - matrix size is different\n";
throw std::runtime_error("Verification failure");
}
int diff = 0;
for (int i = 0; i < p_.row_count(); ++i) {
for (int j = 0; j < p_.col_count(); ++j) {
if (m.GetValue(i, j) != GetValue(i, j)) diff++;
}
}
double tolerance = 0.05;
double ratio = (double)diff / (double)(p_.row_count() * p_.col_count());
#if _DEBUG
cout << "diff: " << diff << "\n";
cout << "total count: " << p_.row_count() * p_.col_count() << "\n";
#endif
if (ratio > tolerance) {
cout << "Fail verification - diff larger than tolerance\n";
throw std::runtime_error("Verification failure");
}
#if _DEBUG
cout << "Pass verification\n";
#endif
}
};
// Serial implementation for computing Mandelbrot set.
class MandelSerial : public Mandel {
public:
MandelSerial(int row_count, int col_count, int max_iterations)
: Mandel(row_count, col_count, max_iterations) {
Alloc();
}
~MandelSerial() { Free(); }
void Evaluate() {
// Iterate over image and compute mandel for each point.
MandelParameters p = GetParameters();
for (int i = 0; i < p.row_count(); ++i) {
for (int j = 0; j < p.col_count(); ++j) {
auto c = MandelParameters::ComplexF(p.ScaleRow(i), p.ScaleCol(j));
SetValue(i, j, p.Point(c));
}
}
}
};
// Parallel implementation for computing Mandelbrot set using buffers.
class MandelParallel : public Mandel {
public:
MandelParallel(int row_count, int col_count, int max_iterations)
: Mandel(row_count, col_count, max_iterations) {
Alloc();
}
~MandelParallel() { Free(); }
void Evaluate(queue &q) {
// Iterate over image and check if each point is in Mandelbrot set.
MandelParameters p = GetParameters();
const int rows = p.row_count();
const int cols = p.col_count();
buffer data_buf(data(), range(rows, cols));
// We submit a command group to the queue.
q.submit([&](handler &h) {
// Get access to the buffer.
auto b = data_buf.get_access(h,write_only);
// Iterate over image and compute mandel for each point.
h.parallel_for(range<2>(rows, cols), [=](auto index) {
int i = int(index[0]);
int j = int(index[1]);
auto c = MandelParameters::ComplexF(p.ScaleRow(i), p.ScaleCol(j));
b[index] = p.Point(c);
});
});
}
};
// Parallel implementation for computing Mandelbrot set using Unified Shared
// Memory (USM).
class MandelParallelUsm : public Mandel {
private:
queue *q;
public:
MandelParallelUsm(int row_count, int col_count, int max_iterations, queue *q)
: Mandel(row_count, col_count, max_iterations) {
this->q = q;
Alloc();
}
~MandelParallelUsm() { Free(); }
virtual void Alloc() {
MandelParameters p = GetParameters();
data_ = malloc_shared<int>(p.row_count() * p.col_count(), *q);
}
virtual void Free() { free(data_, *q); }
void Evaluate(queue &q) {
// Iterate over image and check if each point is in Mandelbrot set.
MandelParameters p = GetParameters();
const int rows = p.row_count();
const int cols = p.col_count();
auto ldata = data_;
// Iterate over image and compute mandel for each point.
auto e = q.parallel_for(range(rows * cols), [=](id<1> index) {
int i = index / cols;
int j = index % cols;
auto c = MandelParameters::ComplexF(p.ScaleRow(i), p.ScaleCol(j));
ldata[index] = p.Point(c);
});
// Wait for the asynchronous computation on device to complete.
e.wait();
}
};
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/CombinationalLogic/sepia-filter/src/device_selector.hpp | //==============================================================
// Copyright © 2019 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#ifndef DEVICESELECTOR_HPP
#define DEVICESELECTOR_HPP
#include <cstring>
#include <iostream>
#include <string>
#include "CL/sycl.hpp"
// This is the class provided to SYCL runtime by the application to decide
// on which device to run, or whether to run at all.
// When selecting a device, SYCL runtime first takes (1) a selector provided by
// the program or a default one and (2) the set of all available devices. Then
// it passes each device to the '()' operator of the selector. Device, for
// which '()' returned the highest number, is selected. If a negative number
// was returned for all devices, then the selection process will cause an
// exception.
class MyDeviceSelector {
public:
MyDeviceSelector() {}
// This is the function which gives a "rating" to devices.
virtual int operator()(const cl::sycl::device &device) const {
// The template parameter to device.get_info can be a variety of properties
// defined by the SYCL spec's cl::sycl::info:: enum. Properties may have
// different types. Here we query name which is a string.
const std::string name = device.get_info<cl::sycl::info::device::name>();
//uncomment to see the list of devices available on the system
//std::cout << "Trying device: " << name << "..." << "\n";
//std::cout << " Vendor: "
// << device.get_info<cl::sycl::info::device::vendor>() << "\n";
if (device.is_gpu()) return 500;
if (device.is_accelerator()) return 400;
if (device.is_cpu()) return 300;
return -1;
}
};
#endif
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/CombinationalLogic/sepia-filter/src/sepia_sycl.cpp | //==============================================================
// Copyright © 2019 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <chrono>
#include <cmath>
#include <iostream>
#include <sycl/sycl.hpp>
#include "device_selector.hpp"
// dpc_common.hpp can be found in the dev-utilities include folder.
// e.g., $ONEAPI_ROOT/dev-utilities/<version>/include/dpc_common.hpp
#include "dpc_common.hpp"
// stb/*.h files can be found in the dev-utilities include folder.
// e.g., $ONEAPI_ROOT/dev-utilities/<version>/include/stb/*.h
#define STB_IMAGE_IMPLEMENTATION
#include "stb/stb_image.h"
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "stb/stb_image_write.h"
using namespace std;
using namespace sycl;
// Few useful acronyms.
constexpr auto sycl_read = access::mode::read;
constexpr auto sycl_write = access::mode::write;
constexpr auto sycl_device = access::target::device;
static void ReportTime(const string &msg, event e) {
cl_ulong time_start =
e.get_profiling_info<info::event_profiling::command_start>();
cl_ulong time_end =
e.get_profiling_info<info::event_profiling::command_end>();
double elapsed = (time_end - time_start) / 1e6;
cout << msg << elapsed << " milliseconds\n";
}
// SYCL does not need any special mark-up for functions which are called from
// SYCL kernel and defined in the same compilation unit. SYCL compiler must be
// able to find the full call graph automatically.
// always_inline as calls are expensive on Gen GPU.
// Notes:
// - coeffs can be declared outside of the function, but still must be constant
// - SYCL compiler will automatically deduce the address space for the two
// pointers; sycl::multi_ptr specialization for particular address space
// can used for more control
__attribute__((always_inline)) static void ApplyFilter(uint8_t *src_image,
uint8_t *dst_image,
int i) {
i *= 3;
float temp;
temp = (0.393f * src_image[i]) + (0.769f * src_image[i + 1]) +
(0.189f * src_image[i + 2]);
dst_image[i] = temp > 255 ? 255 : temp;
temp = (0.349f * src_image[i]) + (0.686f * src_image[i + 1]) +
(0.168f * src_image[i + 2]);
dst_image[i + 1] = temp > 255 ? 255 : temp;
temp = (0.272f * src_image[i]) + (0.534f * src_image[i + 1]) +
(0.131f * src_image[i + 2]);
dst_image[i + 2] = temp > 255 ? 255 : temp;
}
// This is alternative (to a lambda) representation of a SYCL kernel.
// Internally, compiler transforms lambdas into instances of a very simlar
// class. With functors, capturing kernel parameters is done manually via the
// constructor, unlike automatic capturing with lambdas.
class SepiaFunctor {
public:
// Constructor captures needed data into fields
SepiaFunctor(
accessor<uint8_t, 1, sycl_read, sycl_device> &image_acc_,
accessor<uint8_t, 1, sycl_write, sycl_device> &image_exp_acc_)
: image_acc(image_acc_), image_exp_acc(image_exp_acc_) {}
// The '()' operator is the actual kernel
void operator()(id<1> i) const {
ApplyFilter(image_acc.get_pointer(), image_exp_acc.get_pointer(), i.get(0));
}
private:
// Captured values:
accessor<uint8_t, 1, sycl_read, sycl_device> image_acc;
accessor<uint8_t, 1, sycl_write, sycl_device> image_exp_acc;
};
int main(int argc, char **argv) {
if (argc < 2) {
cout << "Program usage is <executable> <inputfile>\n";
exit(1);
}
// loading the input image
int img_width, img_height, channels;
uint8_t *image = stbi_load(argv[1], &img_width, &img_height, &channels, 0);
if (image == NULL) {
cout << "Error in loading the image\n";
exit(1);
}
cout << "Loaded image with a width of " << img_width << ", a height of "
<< img_height << " and " << channels << " channels\n";
size_t num_pixels = img_width * img_height;
size_t img_size = img_width * img_height * channels;
// allocating memory for output images
uint8_t *image_ref = new uint8_t[img_size];
uint8_t *image_exp1 = new uint8_t[img_size];
uint8_t *image_exp2 = new uint8_t[img_size];
memset(image_ref, 0, img_size * sizeof(uint8_t));
memset(image_exp1, 0, img_size * sizeof(uint8_t));
memset(image_exp2, 0, img_size * sizeof(uint8_t));
// Create a device selector which rates available devices in the preferred
// order for the runtime to select the highest rated device
// Note: This is only to illustrate the usage of a custom device selector.
// default_selector can be used if no customization is required.
MyDeviceSelector sel;
// Using these events to time command group execution
event e1, e2;
// Wrap main SYCL API calls into a try/catch to diagnose potential errors
try {
// Create a command queue using the device selector and request profiling
auto prop_list = property_list{property::queue::enable_profiling()};
queue q(sel, dpc_common::exception_handler, prop_list);
// See what device was actually selected for this queue.
cout << "Running on " << q.get_device().get_info<info::device::name>()
<< "\n";
// Create SYCL buffer representing source data .
// By default, this buffers will be created with global_buffer access
// target, which means the buffer "projection" to the device (actual
// device memory chunk allocated or mapped on the device to reflect
// buffer's data) will belong to the SYCL global address space - this
// is what host data usually maps to. Other address spaces are:
// private, local and constant.
// Notes:
// - access type (read/write) is not specified when creating a buffer -
// this is done when actual accessor is created
// - there can be multiple accessors to the same buffer in multiple command
// groups
// - 'image' pointer was passed to the constructor, so this host memory
// will be used for "host projection", no allocation will happen on host
buffer image_buf(image, range(img_size));
// This is the output buffer device writes to
buffer image_buf_exp1(image_exp1, range(img_size));
cout << "Submitting lambda kernel...\n";
// Submit a command group for execution. Returns immediately, not waiting
// for command group completion.
e1 = q.submit([&](auto &h) {
// This lambda defines a "command group" - a set of commands for the
// device sharing some state and executed in-order - i.e. creation of
// accessors may lead to on-device memory allocation, only after that
// the kernel will be enqueued.
// A command group can contain at most one parallel_for, single_task or
// parallel_for_workgroup construct.
accessor image_acc(image_buf, h, read_only);
accessor image_exp_acc(image_buf_exp1, h, write_only);
// This is the simplest form sycl::handler::parallel_for -
// - it specifies "flat" 1D ND range(num_pixels), runtime will select
// local size
// - kernel lambda accepts single sycl::id argument, which has very
// limited API; see the spec for more complex forms
// the lambda parameter of the parallel_for is the kernel, which
// actually executes on device
h.parallel_for(range<1>(num_pixels), [=](auto i) {
ApplyFilter(image_acc.get_pointer(), image_exp_acc.get_pointer(), i);
});
});
q.wait_and_throw();
cout << "Submitting functor kernel...\n";
buffer image_buf_exp2(image_exp2, range(img_size));
// Submit another command group. This time kernel is represented as a
// functor object.
e2 = q.submit([&](auto &h) {
accessor image_acc(image_buf, h, read_only);
accessor image_exp_acc(image_buf_exp2, h, write_only);
SepiaFunctor kernel(image_acc, image_exp_acc);
h.parallel_for(range<1>(num_pixels), kernel);
});
cout << "Waiting for execution to complete...\n";
q.wait_and_throw();
} catch (sycl::exception e) {
// This catches only synchronous exceptions that happened in current thread
// during execution. The asynchronous exceptions caused by execution of the
// command group are caught by the asynchronous exception handler
// registered. Synchronous exceptions are usually those which are thrown
// from the SYCL runtime code, such as on invalid constructor arguments. An
// example of asynchronous exceptions is error occurred during execution of
// a kernel. Make sure sycl::exception is caught, not std::exception.
cout << "SYCL exception caught: " << e.what() << "\n";
return 1;
}
cout << "Execution completed\n";
// report execution times:
ReportTime("Lambda kernel time: ", e1);
ReportTime("Functor kernel time: ", e2);
// get reference result
for (size_t i = 0; i < num_pixels; i++) {
ApplyFilter(image, image_ref, i);
}
stbi_write_png("sepia_ref.png", img_width, img_height, channels, image_ref,
img_width * channels);
stbi_write_png("sepia_lambda.png", img_width, img_height, channels,
image_exp1, img_width * channels);
stbi_write_png("sepia_functor.png", img_width, img_height, channels,
image_exp2, img_width * channels);
stbi_image_free(image);
delete[] image_ref;
delete[] image_exp1;
delete[] image_exp2;
cout << "Sepia tone successfully applied to image:[" << argv[1] << "]\n";
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/N-BodyMethods/Nbody/src/main.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <iostream>
#include "GSimulation.hpp"
int main(int argc, char** argv) {
int n; // number of particles
int nstep; // number ot integration steps
GSimulation sim;
#ifdef DEBUG
char* env = std::getenv("SYCL_BE");
std::cout << "[ENV] SYCL_BE = " << (env ? env : "<not set>") << "\n";
#endif
if (argc > 1) {
n = std::atoi(argv[1]);
sim.SetNumberOfParticles(n);
if (argc == 3) {
nstep = std::atoi(argv[2]);
sim.SetNumberOfSteps(nstep);
}
}
sim.Start();
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/N-BodyMethods/Nbody/src/GSimulation.hpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#ifndef _GSIMULATION_HPP
#define _GSIMULATION_HPP
#include <sycl/sycl.hpp>
#include <cstdlib>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <random>
#include <sstream>
#include <string>
#include <vector>
#include "Particle.hpp"
class GSimulation {
public:
GSimulation();
void Init();
void SetNumberOfParticles(int N);
void SetNumberOfSteps(int N);
void Start();
private:
// Particle *particles_;
std::vector<Particle> particles_;
int npart_; // number of particles
int nsteps_; // number of integration steps
RealType tstep_; // time step of the simulation
int sfreq_; // sample frequency
RealType kenergy_; // kinetic energy
double total_time_; // total time of the simulation
double total_flops_; // total number of FLOPS
void InitPos();
void InitVel();
void InitAcc();
void InitMass();
void set_npart(const int &N) { npart_ = N; }
int get_npart() const { return npart_; }
void set_tstep(const RealType &dt) { tstep_ = dt; }
RealType get_tstep() const { return tstep_; }
void set_nsteps(const int &n) { nsteps_ = n; }
int get_nsteps() const { return nsteps_; }
void set_sfreq(const int &sf) { sfreq_ = sf; }
int get_sfreq() const { return sfreq_; }
void PrintHeader();
};
#endif
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/N-BodyMethods/Nbody/src/type.hpp | using RealType = float;
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/N-BodyMethods/Nbody/src/Particle.hpp | #ifndef _PARTICLE_HPP
#define _PARTICLE_HPP
#include <cmath>
#include "type.hpp"
struct Particle {
public:
Particle() : pos{}, vel{}, acc{}, mass{} {};
RealType pos[3];
RealType vel[3];
RealType acc[3];
RealType mass;
};
#endif
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/N-BodyMethods/Nbody/src/GSimulation.cpp | //==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include "GSimulation.hpp"
// dpc_common.hpp can be found in the dev-utilities include folder.
// e.g., $ONEAPI_ROOT/dev-utilities/latest/include/dpc_common.hpp
#include "dpc_common.hpp"
using namespace sycl;
/* Default Constructor for the GSimulation class which sets up the default
* values for number of particles, number of integration steps, time steo and
* sample frequency */
GSimulation::GSimulation() {
std::cout << "==============================="
<< "\n";
std::cout << " Initialize Gravity Simulation"
<< "\n";
set_npart(16000);
set_nsteps(10);
set_tstep(0.1);
set_sfreq(1);
}
/* Set the number of particles */
void GSimulation::SetNumberOfParticles(int N) { set_npart(N); }
/* Set the number of integration steps */
void GSimulation::SetNumberOfSteps(int N) { set_nsteps(N); }
/* Initialize the position of all the particles using random number generator
* between 0 and 1.0 */
void GSimulation::InitPos() {
std::random_device rd; // random number generator
std::mt19937 gen(42);
std::uniform_real_distribution<RealType> unif_d(0, 1.0);
for (int i = 0; i < get_npart(); ++i) {
particles_[i].pos[0] = unif_d(gen);
particles_[i].pos[1] = unif_d(gen);
particles_[i].pos[2] = unif_d(gen);
}
}
/* Initialize the velocity of all the particles using random number generator
* between -1.0 and 1.0 */
void GSimulation::InitVel() {
std::random_device rd; // random number generator
std::mt19937 gen(42);
std::uniform_real_distribution<RealType> unif_d(-1.0, 1.0);
for (int i = 0; i < get_npart(); ++i) {
particles_[i].vel[0] = unif_d(gen) * 1.0e-3f;
particles_[i].vel[1] = unif_d(gen) * 1.0e-3f;
particles_[i].vel[2] = unif_d(gen) * 1.0e-3f;
}
}
/* Initialize the acceleration of all the particles to 0 */
void GSimulation::InitAcc() {
for (int i = 0; i < get_npart(); ++i) {
particles_[i].acc[0] = 0.f;
particles_[i].acc[1] = 0.f;
particles_[i].acc[2] = 0.f;
}
}
/* Initialize the mass of all the particles using a random number generator
* between 0 and 1 */
void GSimulation::InitMass() {
RealType n = static_cast<RealType>(get_npart());
std::random_device rd; // random number generator
std::mt19937 gen(42);
std::uniform_real_distribution<RealType> unif_d(0.0, 1.0);
for (int i = 0; i < get_npart(); ++i) {
particles_[i].mass = n * unif_d(gen);
}
}
/* This function does the simulation logic for Nbody */
void GSimulation::Start() {
RealType dt = get_tstep();
int n = get_npart();
particles_.resize(n);
InitPos();
InitVel();
InitAcc();
InitMass();
PrintHeader();
total_time_ = 0.;
constexpr float kSofteningSquared = 1e-3f;
// prevents explosion in the case the particles are really close to each other
constexpr float kG = 6.67259e-11f;
double gflops = 1e-9 * ((11. + 18.) * n * n + n * 19.);
int nf = 0;
double av = 0.0, dev = 0.0;
// Create global range
auto r = range<1>(n);
// Create local range
auto lr = range<1>(128);
// Create ndrange
auto ndrange = nd_range<1>(r, lr);
// Create a queue to the selected device and enabled asynchronous exception
// handling for that queue
queue q(default_selector_v);
// Create SYCL buffer for the Particle array of size "n"
buffer pbuf(particles_.data(), r,
{sycl::property::buffer::use_host_ptr()});
// Allocate energy using USM allocator shared
RealType *energy = malloc_shared<RealType>(1,q);
*energy = 0.f;
dpc_common::TimeInterval t0;
int nsteps = get_nsteps();
// Looping across integration steps
for (int s = 1; s <= nsteps; ++s) {
dpc_common::TimeInterval ts0;
// Submitting first kernel to device which computes acceleration of all
// particles
q.submit([&](handler& h) {
auto p = pbuf.get_access(h);
h.parallel_for(ndrange, [=](nd_item<1> it) {
auto i = it.get_global_id();
RealType acc0 = p[i].acc[0];
RealType acc1 = p[i].acc[1];
RealType acc2 = p[i].acc[2];
for (int j = 0; j < n; j++) {
RealType dx, dy, dz;
RealType distance_sqr = 0.0f;
RealType distance_inv = 0.0f;
dx = p[j].pos[0] - p[i].pos[0]; // 1flop
dy = p[j].pos[1] - p[i].pos[1]; // 1flop
dz = p[j].pos[2] - p[i].pos[2]; // 1flop
distance_sqr =
dx * dx + dy * dy + dz * dz + kSofteningSquared; // 6flops
distance_inv = 1.0f / sycl::sqrt(distance_sqr); // 1div+1sqrt
acc0 += dx * kG * p[j].mass * distance_inv * distance_inv *
distance_inv; // 6flops
acc1 += dy * kG * p[j].mass * distance_inv * distance_inv *
distance_inv; // 6flops
acc2 += dz * kG * p[j].mass * distance_inv * distance_inv *
distance_inv; // 6flops
}
p[i].acc[0] = acc0;
p[i].acc[1] = acc1;
p[i].acc[2] = acc2;
});
}).wait_and_throw();
// Second kernel updates the velocity and position for all particles
q.submit([&](handler& h) {
auto p = pbuf.get_access(h);
h.parallel_for(ndrange, reduction(energy, 0.f, std::plus<RealType>()), [=](nd_item<1> it, auto& energy) {
auto i = it.get_global_id();
p[i].vel[0] += p[i].acc[0] * dt; // 2flops
p[i].vel[1] += p[i].acc[1] * dt; // 2flops
p[i].vel[2] += p[i].acc[2] * dt; // 2flops
p[i].pos[0] += p[i].vel[0] * dt; // 2flops
p[i].pos[1] += p[i].vel[1] * dt; // 2flops
p[i].pos[2] += p[i].vel[2] * dt; // 2flops
p[i].acc[0] = 0.f;
p[i].acc[1] = 0.f;
p[i].acc[2] = 0.f;
energy += (p[i].mass *
(p[i].vel[0] * p[i].vel[0] + p[i].vel[1] * p[i].vel[1] +
p[i].vel[2] * p[i].vel[2])); // 7flops
});
}).wait_and_throw();
kenergy_ = 0.5 * (*energy);
*energy = 0.f;
double elapsed_seconds = ts0.Elapsed();
if ((s % get_sfreq()) == 0) {
nf += 1;
std::cout << " " << std::left << std::setw(8) << s << std::left
<< std::setprecision(5) << std::setw(8) << s * get_tstep()
<< std::left << std::setprecision(5) << std::setw(12)
<< kenergy_ << std::left << std::setprecision(5)
<< std::setw(12) << elapsed_seconds << std::left
<< std::setprecision(5) << std::setw(12)
<< gflops * get_sfreq() / elapsed_seconds << "\n";
if (nf > 2) {
av += gflops * get_sfreq() / elapsed_seconds;
dev += gflops * get_sfreq() * gflops * get_sfreq() /
(elapsed_seconds * elapsed_seconds);
}
}
} // end of the time step loop
total_time_ = t0.Elapsed();
total_flops_ = gflops * get_nsteps();
av /= (double)(nf - 2);
dev = sqrt(dev / (double)(nf - 2) - av * av);
std::cout << "\n";
std::cout << "# Total Time (s) : " << total_time_ << "\n";
std::cout << "# Average Performance : " << av << " +- " << dev << "\n";
std::cout << "==============================="
<< "\n";
}
/* Print the headers for the output */
void GSimulation::PrintHeader() {
std::cout << " nPart = " << get_npart() << "; "
<< "nSteps = " << get_nsteps() << "; "
<< "dt = " << get_tstep() << "\n";
std::cout << "------------------------------------------------"
<< "\n";
std::cout << " " << std::left << std::setw(8) << "s" << std::left
<< std::setw(8) << "dt" << std::left << std::setw(12) << "kenergy"
<< std::left << std::setw(12) << "time (s)" << std::left
<< std::setw(12) << "GFLOPS"
<< "\n";
std::cout << "------------------------------------------------"
<< "\n";
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/GraphAlgorithms/all-pairs-shortest-paths/src/apsp.cpp | //==============================================================
// This sample provides a parallel implementation of blocked Floyd Warshall
// algorithm to compute all pairs shortest paths using SYCL.
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
#include <chrono>
#include <cstdlib>
#include <iostream>
// dpc_common.hpp can be found in the dev-utilities include folder.
// e.g., $ONEAPI_ROOT/dev-utilities/<version>/include/dpc_common.hpp
#include "dpc_common.hpp"
using namespace std;
using namespace sycl;
// Number of nodes in the graph.
constexpr int nodes = 1024;
// Block length and block count (along a single dimension).
constexpr int block_length = 16;
constexpr int block_count = (nodes / block_length);
// Maximum distance between two adjacent nodes.
constexpr int max_distance = 100;
constexpr int infinite = (nodes * max_distance);
// Number of repetitions.
constexpr int repetitions = 8;
// Randomly initialize directed graph.
void InitializeDirectedGraph(int *graph) {
for (int i = 0; i < nodes; i++) {
for (int j = 0; j < nodes; j++) {
int cell = i * nodes + j;
if (i == j) {
graph[cell] = 0;
} else if (rand() % 2) {
graph[cell] = infinite;
} else {
graph[cell] = rand() % max_distance + 1;
}
}
}
}
// Copy graph.
void CopyGraph(int *to, int *from) {
for (int i = 0; i < nodes; i++) {
for (int j = 0; j < nodes; j++) {
int cell = i * nodes + j;
to[cell] = from[cell];
}
}
}
// Check if two graphs are equal.
bool VerifyGraphsAreEqual(int *graph, int *h) {
for (int i = 0; i < nodes; i++) {
for (int j = 0; j < nodes; j++) {
int cell = i * nodes + j;
if (graph[cell] != h[cell]) {
return false;
}
}
}
return true;
}
// The basic (sequential) implementation of Floyd Warshall algorithm for
// computing all pairs shortest paths.
void FloydWarshall(int *graph) {
for (int k = 0; k < nodes; k++) {
for (int i = 0; i < nodes; i++) {
for (int j = 0; j < nodes; j++) {
if (graph[i * nodes + j] >
graph[i * nodes + k] + graph[k * nodes + j]) {
graph[i * nodes + j] = graph[i * nodes + k] + graph[k * nodes + j];
}
}
}
}
}
typedef local_accessor<int, 2>
LocalBlock;
// Inner loop of the blocked Floyd Warshall algorithm. A thread handles one cell
// of a block. To complete the computation of a block, this function is invoked
// by as many threads as there are cells in the block. Each such invocation
// computes as many iterations as there are blocks (along a single dimension).
// Moreover, each thread (simultaneously operating on a block), synchronizes
// between them at the end of each iteration. This is required for correctness
// as a following iteration depends on the previous iteration.
void BlockedFloydWarshallCompute(nd_item<1> &item, const LocalBlock &C,
const LocalBlock &A, const LocalBlock &B,
int i, int j) {
for (int k = 0; k < block_length; k++) {
if (C[i][j] > A[i][k] + B[k][j]) {
C[i][j] = A[i][k] + B[k][j];
}
item.barrier(access::fence_space::local_space);
}
}
// Phase 1 of blocked Floyd Warshall algorithm. It always operates on a block
// on the diagonal of the adjacency matrix of the graph.
void BlockedFloydWarshallPhase1(queue &q, int *graph, int round) {
// Each group will process one block.
constexpr auto blocks = 1;
// Each item/thread in a group will handle one cell of the block.
constexpr auto block_size = block_length * block_length;
q.submit([&](handler &h) {
LocalBlock block(range<2>(block_length, block_length), h);
h.parallel_for<class KernelPhase1>(
nd_range<1>(blocks * block_size, block_size), [=](nd_item<1> item) {
auto tid = item.get_local_id(0);
auto i = tid / block_length;
auto j = tid % block_length;
// Copy data to local memory.
block[i][j] = graph[(round * block_length + i) * nodes +
(round * block_length + j)];
item.barrier(access::fence_space::local_space);
// Compute.
BlockedFloydWarshallCompute(item, block, block, block, i, j);
// Copy back data to global memory.
graph[(round * block_length + i) * nodes +
(round * block_length + j)] = block[i][j];
item.barrier(access::fence_space::local_space);
});
});
q.wait();
}
// Phase 2 of blocked Floyd Warshall algorithm. It always operates on blocks
// that are either on the same row or on the same column of a diagonal block.
void BlockedFloydWarshallPhase2(queue &q, int *graph, int round) {
// Each group will process one block.
constexpr auto blocks = block_count;
// Each item/thread in a group will handle one cell of the block.
constexpr auto block_size = block_length * block_length;
q.submit([&](handler &h) {
LocalBlock diagonal(range<2>(block_length, block_length), h);
LocalBlock off_diag(range<2>(block_length, block_length), h);
h.parallel_for<class KernelPhase2>(
nd_range<1>(blocks * block_size, block_size), [=](nd_item<1> item) {
auto gid = item.get_group(0);
auto index = gid;
if (index != round) {
auto tid = item.get_local_id(0);
auto i = tid / block_length;
auto j = tid % block_length;
// Copy data to local memory.
diagonal[i][j] = graph[(round * block_length + i) * nodes +
(round * block_length + j)];
off_diag[i][j] = graph[(index * block_length + i) * nodes +
(round * block_length + j)];
item.barrier(access::fence_space::local_space);
// Compute for blocks above and below the diagonal block.
BlockedFloydWarshallCompute(item, off_diag, off_diag, diagonal, i,
j);
// Copy back data to global memory.
graph[(index * block_length + i) * nodes +
(round * block_length + j)] = off_diag[i][j];
// Copy data to local memory.
off_diag[i][j] = graph[(round * block_length + i) * nodes +
(index * block_length + j)];
item.barrier(access::fence_space::local_space);
// Compute for blocks at left and at right of the diagonal block.
BlockedFloydWarshallCompute(item, off_diag, diagonal, off_diag, i,
j);
// Copy back data to global memory.
graph[(round * block_length + i) * nodes +
(index * block_length + j)] = off_diag[i][j];
item.barrier(access::fence_space::local_space);
}
});
});
q.wait();
}
// Phase 3 of blocked Floyd Warshall algorithm. It operates on all blocks except
// the ones that are handled in phase 1 and in phase 2 of the algorithm.
void BlockedFloydWarshallPhase3(queue &q, int *graph, int round) {
// Each group will process one block.
constexpr auto blocks = block_count * block_count;
// Each item/thread in a group will handle one cell of the block.
constexpr auto block_size = block_length * block_length;
q.submit([&](handler &h) {
LocalBlock A(range<2>(block_length, block_length), h);
LocalBlock B(range<2>(block_length, block_length), h);
LocalBlock C(range<2>(block_length, block_length), h);
h.parallel_for<class KernelPhase3>(
nd_range<1>(blocks * block_size, block_size), [=](nd_item<1> item) {
auto bk = round;
auto gid = item.get_group(0);
auto bi = gid / block_count;
auto bj = gid % block_count;
if ((bi != bk) && (bj != bk)) {
auto tid = item.get_local_id(0);
auto i = tid / block_length;
auto j = tid % block_length;
// Copy data to local memory.
A[i][j] = graph[(bi * block_length + i) * nodes +
(bk * block_length + j)];
B[i][j] = graph[(bk * block_length + i) * nodes +
(bj * block_length + j)];
C[i][j] = graph[(bi * block_length + i) * nodes +
(bj * block_length + j)];
item.barrier(access::fence_space::local_space);
// Compute.
BlockedFloydWarshallCompute(item, C, A, B, i, j);
// Copy back data to global memory.
graph[(bi * block_length + i) * nodes + (bj * block_length + j)] =
C[i][j];
item.barrier(access::fence_space::local_space);
}
});
});
q.wait();
}
// Parallel implementation of blocked Floyd Warshall algorithm. It has three
// phases. Given a prior round of these computation phases are complete, phase 1
// is independent; Phase 2 can only execute after phase 1 completes; Similarly
// phase 3 depends on phase 2 so can only execute after phase 2 is complete.
//
// The inner loop of the sequential implementation is similar to:
// g[i][j] = min(g[i][j], g[i][k] + g[k][j])
// A careful observation shows that for the kth iteration of the outer loop,
// the computation depends on cells either on the kth column, g[i][k] or on the
// kth row, g[k][j] of the graph. Phase 1 handles g[k][k], phase 2 handles
// g[*][k] and g[k][*], and phase 3 handles g[*][*] in that sequence. This cell
// level observations largely propagate to the blocks as well.
void BlockedFloydWarshall(queue &q, int *graph) {
for (int round = 0; round < block_count; round++) {
BlockedFloydWarshallPhase1(q, graph, round);
BlockedFloydWarshallPhase2(q, graph, round);
BlockedFloydWarshallPhase3(q, graph, round);
}
}
int main() {
try {
queue q{default_selector_v};
auto device = q.get_device();
auto work_group_size = device.get_info<info::device::max_work_group_size>();
auto block_size = block_length * block_length;
cout << "Device: " << device.get_info<info::device::name>() << "\n";
if (work_group_size < block_size) {
cout << "Work group size " << work_group_size
<< " is less than required size " << block_size << "\n";
return -1;
}
// Allocate unified shared memory so that graph data is accessible to both
// the CPU and the device (e.g., a GPU).
int *graph = (int *)malloc(sizeof(int) * nodes * nodes);
int *sequential = malloc_shared<int>(nodes * nodes, q);
int *parallel = malloc_shared<int>(nodes * nodes, q);
if ((graph == nullptr) || (sequential == nullptr) ||
(parallel == nullptr)) {
if (graph != nullptr) free(graph);
if (sequential != nullptr) free(sequential, q);
if (parallel != nullptr) free(parallel, q);
cout << "Memory allocation failure.\n";
return -1;
}
// Initialize directed graph.
InitializeDirectedGraph(graph);
// Warm up the JIT.
CopyGraph(parallel, graph);
BlockedFloydWarshall(q, parallel);
// Measure execution times.
double elapsed_s = 0;
double elapsed_p = 0;
int i;
cout << "Repeating computation " << repetitions
<< " times to measure run time ...\n";
for (i = 0; i < repetitions; i++) {
cout << "Iteration: " << (i + 1) << "\n";
// Sequential all pairs shortest paths.
CopyGraph(sequential, graph);
dpc_common::TimeInterval timer_s;
FloydWarshall(sequential);
elapsed_s += timer_s.Elapsed();
// Parallel all pairs shortest paths.
CopyGraph(parallel, graph);
dpc_common::TimeInterval timer_p;
BlockedFloydWarshall(q, parallel);
elapsed_p += timer_p.Elapsed();
// Verify two results are equal.
if (!VerifyGraphsAreEqual(sequential, parallel)) {
cout << "Failed to correctly compute all pairs shortest paths!\n";
break;
}
}
if (i == repetitions) {
cout << "Successfully computed all pairs shortest paths in parallel!\n";
elapsed_s /= repetitions;
elapsed_p /= repetitions;
cout << "Time sequential: " << elapsed_s << " sec\n";
cout << "Time parallel: " << elapsed_p << " sec\n";
}
// Free unified shared memory.
free(graph);
free(sequential, q);
free(parallel, q);
} catch (std::exception const &e) {
cout << "An exception is caught while computing on device.\n";
terminate();
}
return 0;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/helper_timer.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Helper Timing Functions
#ifndef COMMON_HELPER_TIMER_H_
#define COMMON_HELPER_TIMER_H_
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
// includes, system
#include <vector>
// includes, project
#include <exception.h>
// Definition of the StopWatch Interface, this is used if we don't want to use
// the CUT functions But rather in a self contained class interface
class StopWatchInterface {
public:
StopWatchInterface() {}
virtual ~StopWatchInterface() {}
public:
//! Start time measurement
virtual void start() = 0;
//! Stop time measurement
virtual void stop() = 0;
//! Reset time counters to zero
virtual void reset() = 0;
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned, otherwise the
//! time between the last start() and stop call is returned
virtual float getTime() = 0;
//! Mean time to date based on the number of times the stopwatch has been
//! _stopped_ (ie finished sessions) and the current total time
virtual float getAverageTime() = 0;
};
//////////////////////////////////////////////////////////////////
// Begin Stopwatch timer class definitions for all OS platforms //
//////////////////////////////////////////////////////////////////
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
// includes, system
#define WINDOWS_LEAN_AND_MEAN
#include <windows.h>
#undef min
#undef max
//! Windows specific implementation of StopWatch
class StopWatchWin : public StopWatchInterface {
public:
//! Constructor, default
StopWatchWin()
: start_time(),
end_time(),
diff_time(0.0f),
total_time(0.0f),
running(false),
clock_sessions(0),
freq(0),
freq_set(false) {
if (!freq_set) {
// helper variable
LARGE_INTEGER temp;
// get the tick frequency from the OS
QueryPerformanceFrequency(reinterpret_cast<LARGE_INTEGER *>(&temp));
// convert to type in which it is needed
freq = (static_cast<double>(temp.QuadPart)) / 1000.0;
// rememeber query
freq_set = true;
}
}
// Destructor
~StopWatchWin() {}
public:
//! Start time measurement
inline void start();
//! Stop time measurement
inline void stop();
//! Reset time counters to zero
inline void reset();
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned, otherwise the
//! time between the last start() and stop call is returned
inline float getTime();
//! Mean time to date based on the number of times the stopwatch has been
//! _stopped_ (ie finished sessions) and the current total time
inline float getAverageTime();
private:
// member variables
//! Start of measurement
LARGE_INTEGER start_time;
//! End of measurement
LARGE_INTEGER end_time;
//! Time difference between the last start and stop
float diff_time;
//! TOTAL time difference between starts and stops
float total_time;
//! flag if the stop watch is running
bool running;
//! Number of times clock has been started
//! and stopped to allow averaging
int clock_sessions;
//! tick frequency
double freq;
//! flag if the frequency has been set
bool freq_set;
};
// functions, inlined
////////////////////////////////////////////////////////////////////////////////
//! Start time measurement
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchWin::start() {
QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&start_time));
running = true;
}
////////////////////////////////////////////////////////////////////////////////
//! Stop time measurement and increment add to the current diff_time summation
//! variable. Also increment the number of times this clock has been run.
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchWin::stop() {
QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&end_time));
diff_time = static_cast<float>(((static_cast<double>(end_time.QuadPart) -
static_cast<double>(start_time.QuadPart)) /
freq));
total_time += diff_time;
clock_sessions++;
running = false;
}
////////////////////////////////////////////////////////////////////////////////
//! Reset the timer to 0. Does not change the timer running state but does
//! recapture this point in time as the current start time if it is running.
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchWin::reset() {
diff_time = 0;
total_time = 0;
clock_sessions = 0;
if (running) {
QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&start_time));
}
}
////////////////////////////////////////////////////////////////////////////////
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned added to the
//! current diff_time sum, otherwise the current summed time difference alone
//! is returned.
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchWin::getTime() {
// Return the TOTAL time to date
float retval = total_time;
if (running) {
LARGE_INTEGER temp;
QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&temp));
retval += static_cast<float>(((static_cast<double>(temp.QuadPart) -
static_cast<double>(start_time.QuadPart)) /
freq));
}
return retval;
}
////////////////////////////////////////////////////////////////////////////////
//! Time in msec. for a single run based on the total number of COMPLETED runs
//! and the total time.
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchWin::getAverageTime() {
return (clock_sessions > 0) ? (total_time / clock_sessions) : 0.0f;
}
#else
// Declarations for Stopwatch on Linux and Mac OSX
// includes, system
#include <sys/time.h>
#include <ctime>
//! Windows specific implementation of StopWatch
class StopWatchLinux : public StopWatchInterface {
public:
//! Constructor, default
StopWatchLinux()
: start_time(),
diff_time(0.0),
total_time(0.0),
running(false),
clock_sessions(0) {}
// Destructor
virtual ~StopWatchLinux() {}
public:
//! Start time measurement
inline void start();
//! Stop time measurement
inline void stop();
//! Reset time counters to zero
inline void reset();
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned, otherwise the
//! time between the last start() and stop call is returned
inline float getTime();
//! Mean time to date based on the number of times the stopwatch has been
//! _stopped_ (ie finished sessions) and the current total time
inline float getAverageTime();
private:
// helper functions
//! Get difference between start time and current time
inline float getDiffTime();
private:
// member variables
//! Start of measurement
struct timeval start_time;
//! Time difference between the last start and stop
float diff_time;
//! TOTAL time difference between starts and stops
float total_time;
//! flag if the stop watch is running
bool running;
//! Number of times clock has been started
//! and stopped to allow averaging
int clock_sessions;
};
// functions, inlined
////////////////////////////////////////////////////////////////////////////////
//! Start time measurement
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchLinux::start() {
gettimeofday(&start_time, 0);
running = true;
}
////////////////////////////////////////////////////////////////////////////////
//! Stop time measurement and increment add to the current diff_time summation
//! variable. Also increment the number of times this clock has been run.
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchLinux::stop() {
diff_time = getDiffTime();
total_time += diff_time;
running = false;
clock_sessions++;
}
////////////////////////////////////////////////////////////////////////////////
//! Reset the timer to 0. Does not change the timer running state but does
//! recapture this point in time as the current start time if it is running.
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchLinux::reset() {
diff_time = 0;
total_time = 0;
clock_sessions = 0;
if (running) {
gettimeofday(&start_time, 0);
}
}
////////////////////////////////////////////////////////////////////////////////
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned added to the
//! current diff_time sum, otherwise the current summed time difference alone
//! is returned.
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchLinux::getTime() {
// Return the TOTAL time to date
float retval = total_time;
if (running) {
retval += getDiffTime();
}
return retval;
}
////////////////////////////////////////////////////////////////////////////////
//! Time in msec. for a single run based on the total number of COMPLETED runs
//! and the total time.
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchLinux::getAverageTime() {
return (clock_sessions > 0) ? (total_time / clock_sessions) : 0.0f;
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchLinux::getDiffTime() {
struct timeval t_time;
gettimeofday(&t_time, 0);
// time difference in milli-seconds
return static_cast<float>(1000.0 * (t_time.tv_sec - start_time.tv_sec) +
(0.001 * (t_time.tv_usec - start_time.tv_usec)));
}
#endif // WIN32
////////////////////////////////////////////////////////////////////////////////
//! Timer functionality exported
////////////////////////////////////////////////////////////////////////////////
//! Create a new timer
//! @return true if a time has been created, otherwise false
//! @param name of the new timer, 0 if the creation failed
////////////////////////////////////////////////////////////////////////////////
inline bool sdkCreateTimer(StopWatchInterface **timer_interface) {
// printf("sdkCreateTimer called object %08x\n", (void *)*timer_interface);
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
*timer_interface = reinterpret_cast<StopWatchInterface *>(new StopWatchWin());
#else
*timer_interface =
reinterpret_cast<StopWatchInterface *>(new StopWatchLinux());
#endif
return (*timer_interface != NULL) ? true : false;
}
////////////////////////////////////////////////////////////////////////////////
//! Delete a timer
//! @return true if a time has been deleted, otherwise false
//! @param name of the timer to delete
////////////////////////////////////////////////////////////////////////////////
inline bool sdkDeleteTimer(StopWatchInterface **timer_interface) {
// printf("sdkDeleteTimer called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
delete *timer_interface;
*timer_interface = NULL;
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Start the time with name \a name
//! @param name name of the timer to start
////////////////////////////////////////////////////////////////////////////////
inline bool sdkStartTimer(StopWatchInterface **timer_interface) {
// printf("sdkStartTimer called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
(*timer_interface)->start();
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Stop the time with name \a name. Does not reset.
//! @param name name of the timer to stop
////////////////////////////////////////////////////////////////////////////////
inline bool sdkStopTimer(StopWatchInterface **timer_interface) {
// printf("sdkStopTimer called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
(*timer_interface)->stop();
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Resets the timer's counter.
//! @param name name of the timer to reset.
////////////////////////////////////////////////////////////////////////////////
inline bool sdkResetTimer(StopWatchInterface **timer_interface) {
// printf("sdkResetTimer called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
(*timer_interface)->reset();
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Return the average time for timer execution as the total time
//! for the timer dividied by the number of completed (stopped) runs the timer
//! has made.
//! Excludes the current running time if the timer is currently running.
//! @param name name of the timer to return the time of
////////////////////////////////////////////////////////////////////////////////
inline float sdkGetAverageTimerValue(StopWatchInterface **timer_interface) {
// printf("sdkGetAverageTimerValue called object %08x\n", (void
// *)*timer_interface);
if (*timer_interface) {
return (*timer_interface)->getAverageTime();
} else {
return 0.0f;
}
}
////////////////////////////////////////////////////////////////////////////////
//! Total execution time for the timer over all runs since the last reset
//! or timer creation.
//! @param name name of the timer to obtain the value of.
////////////////////////////////////////////////////////////////////////////////
inline float sdkGetTimerValue(StopWatchInterface **timer_interface) {
// printf("sdkGetTimerValue called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
return (*timer_interface)->getTime();
} else {
return 0.0f;
}
}
#endif // COMMON_HELPER_TIMER_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/helper_string.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// These are helper functions for the SDK samples (string parsing, timers, etc)
#ifndef COMMON_HELPER_STRING_H_
#define COMMON_HELPER_STRING_H_
#include <stdio.h>
#include <stdlib.h>
#include <fstream>
#include <string>
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
#ifndef _CRT_SECURE_NO_DEPRECATE
#define _CRT_SECURE_NO_DEPRECATE
#endif
#ifndef STRCASECMP
#define STRCASECMP _stricmp
#endif
#ifndef STRNCASECMP
#define STRNCASECMP _strnicmp
#endif
#ifndef STRCPY
#define STRCPY(sFilePath, nLength, sPath) strcpy_s(sFilePath, nLength, sPath)
#endif
#ifndef FOPEN
#define FOPEN(fHandle, filename, mode) fopen_s(&fHandle, filename, mode)
#endif
#ifndef FOPEN_FAIL
#define FOPEN_FAIL(result) (result != 0)
#endif
#ifndef SSCANF
#define SSCANF sscanf_s
#endif
#ifndef SPRINTF
#define SPRINTF sprintf_s
#endif
#else // Linux Includes
#include <string.h>
#include <strings.h>
#ifndef STRCASECMP
#define STRCASECMP strcasecmp
#endif
#ifndef STRNCASECMP
#define STRNCASECMP strncasecmp
#endif
#ifndef STRCPY
#define STRCPY(sFilePath, nLength, sPath) strcpy(sFilePath, sPath)
#endif
#ifndef FOPEN
#define FOPEN(fHandle, filename, mode) (fHandle = fopen(filename, mode))
#endif
#ifndef FOPEN_FAIL
#define FOPEN_FAIL(result) (result == NULL)
#endif
#ifndef SSCANF
#define SSCANF sscanf
#endif
#ifndef SPRINTF
#define SPRINTF sprintf
#endif
#endif
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
// CUDA Utility Helper Functions
inline int stringRemoveDelimiter(char delimiter, const char *string) {
int string_start = 0;
while (string[string_start] == delimiter) {
string_start++;
}
if (string_start >= static_cast<int>(strlen(string) - 1)) {
return 0;
}
return string_start;
}
inline int getFileExtension(char *filename, char **extension) {
int string_length = static_cast<int>(strlen(filename));
while (filename[string_length--] != '.') {
if (string_length == 0) break;
}
if (string_length > 0) string_length += 2;
if (string_length == 0)
*extension = NULL;
else
*extension = &filename[string_length];
return string_length;
}
inline bool checkCmdLineFlag(const int argc, const char **argv,
const char *string_ref) {
bool bFound = false;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
const char *equal_pos = strchr(string_argv, '=');
int argv_length = static_cast<int>(
equal_pos == 0 ? strlen(string_argv) : equal_pos - string_argv);
int length = static_cast<int>(strlen(string_ref));
if (length == argv_length &&
!STRNCASECMP(string_argv, string_ref, length)) {
bFound = true;
continue;
}
}
}
return bFound;
}
// This function wraps the CUDA Driver API into a template function
template <class T>
inline bool getCmdLineArgumentValue(const int argc, const char **argv,
const char *string_ref, T *value) {
bool bFound = false;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
int length = static_cast<int>(strlen(string_ref));
if (!STRNCASECMP(string_argv, string_ref, length)) {
if (length + 1 <= static_cast<int>(strlen(string_argv))) {
int auto_inc = (string_argv[length] == '=') ? 1 : 0;
*value = (T)atoi(&string_argv[length + auto_inc]);
}
bFound = true;
i = argc;
}
}
}
return bFound;
}
inline int getCmdLineArgumentInt(const int argc, const char **argv,
const char *string_ref) {
bool bFound = false;
int value = -1;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
int length = static_cast<int>(strlen(string_ref));
if (!STRNCASECMP(string_argv, string_ref, length)) {
if (length + 1 <= static_cast<int>(strlen(string_argv))) {
int auto_inc = (string_argv[length] == '=') ? 1 : 0;
value = atoi(&string_argv[length + auto_inc]);
} else {
value = 0;
}
bFound = true;
continue;
}
}
}
if (bFound) {
return value;
} else {
return 0;
}
}
inline float getCmdLineArgumentFloat(const int argc, const char **argv,
const char *string_ref) {
bool bFound = false;
float value = -1;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
int length = static_cast<int>(strlen(string_ref));
if (!STRNCASECMP(string_argv, string_ref, length)) {
if (length + 1 <= static_cast<int>(strlen(string_argv))) {
int auto_inc = (string_argv[length] == '=') ? 1 : 0;
value = static_cast<float>(atof(&string_argv[length + auto_inc]));
} else {
value = 0.f;
}
bFound = true;
continue;
}
}
}
if (bFound) {
return value;
} else {
return 0;
}
}
inline bool getCmdLineArgumentString(const int argc, const char **argv,
const char *string_ref,
char **string_retval) {
bool bFound = false;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
char *string_argv = const_cast<char *>(&argv[i][string_start]);
int length = static_cast<int>(strlen(string_ref));
if (!STRNCASECMP(string_argv, string_ref, length)) {
*string_retval = &string_argv[length + 1];
bFound = true;
continue;
}
}
}
if (!bFound) {
*string_retval = NULL;
}
return bFound;
}
//////////////////////////////////////////////////////////////////////////////
//! Find the path for a file assuming that
//! files are found in the searchPath.
//!
//! @return the path if succeeded, otherwise 0
//! @param filename name of the file
//! @param executable_path optional absolute path of the executable
//////////////////////////////////////////////////////////////////////////////
inline char *sdkFindFilePath(const char *filename,
const char *executable_path) {
// <executable_name> defines a variable that is replaced with the name of the
// executable
// Typical relative search paths to locate needed companion files (e.g. sample
// input data, or JIT source files) The origin for the relative search may be
// the .exe file, a .bat file launching an .exe, a browser .exe launching the
// .exe or .bat, etc
const char *searchPath[] = {
"./", // same dir
"./data/", // same dir
"../../../../Samples/<executable_name>/", // up 4 in tree
"../../../Samples/<executable_name>/", // up 3 in tree
"../../Samples/<executable_name>/", // up 2 in tree
"../../../../Samples/<executable_name>/data/", // up 4 in tree
"../../../Samples/<executable_name>/data/", // up 3 in tree
"../../Samples/<executable_name>/data/", // up 2 in tree
"../../../../Samples/0_Introduction/<executable_name>/", // up 4 in tree
"../../../Samples/0_Introduction/<executable_name>/", // up 3 in tree
"../../Samples/0_Introduction/<executable_name>/", // up 2 in tree
"../../../../Samples/1_Utilities/<executable_name>/", // up 4 in tree
"../../../Samples/1_Utilities/<executable_name>/", // up 3 in tree
"../../Samples/1_Utilities/<executable_name>/", // up 2 in tree
"../../../../Samples/2_Concepts_and_Techniques/<executable_name>/", // up 4 in tree
"../../../Samples/2_Concepts_and_Techniques/<executable_name>/", // up 3 in tree
"../../Samples/2_Concepts_and_Techniques/<executable_name>/", // up 2 in tree
"../../../../Samples/3_CUDA_Features/<executable_name>/", // up 4 in tree
"../../../Samples/3_CUDA_Features/<executable_name>/", // up 3 in tree
"../../Samples/3_CUDA_Features/<executable_name>/", // up 2 in tree
"../../../../Samples/4_CUDA_Libraries/<executable_name>/", // up 4 in tree
"../../../Samples/4_CUDA_Libraries/<executable_name>/", // up 3 in tree
"../../Samples/4_CUDA_Libraries/<executable_name>/", // up 2 in tree
"../../../../Samples/5_Domain_Specific/<executable_name>/", // up 4 in tree
"../../../Samples/5_Domain_Specific/<executable_name>/", // up 3 in tree
"../../Samples/5_Domain_Specific/<executable_name>/", // up 2 in tree
"../../../../Samples/6_Performance/<executable_name>/", // up 4 in tree
"../../../Samples/6_Performance/<executable_name>/", // up 3 in tree
"../../Samples/6_Performance/<executable_name>/", // up 2 in tree
"../../../../Samples/0_Introduction/<executable_name>/data/", // up 4 in tree
"../../../Samples/0_Introduction/<executable_name>/data/", // up 3 in tree
"../../Samples/0_Introduction/<executable_name>/data/", // up 2 in tree
"../../../../Samples/1_Utilities/<executable_name>/data/", // up 4 in tree
"../../../Samples/1_Utilities/<executable_name>/data/", // up 3 in tree
"../../Samples/1_Utilities/<executable_name>/data/", // up 2 in tree
"../../../../Samples/2_Concepts_and_Techniques/<executable_name>/data/", // up 4 in tree
"../../../Samples/2_Concepts_and_Techniques/<executable_name>/data/", // up 3 in tree
"../../Samples/2_Concepts_and_Techniques/<executable_name>/data/", // up 2 in tree
"../../../../Samples/3_CUDA_Features/<executable_name>/data/", // up 4 in tree
"../../../Samples/3_CUDA_Features/<executable_name>/data/", // up 3 in tree
"../../Samples/3_CUDA_Features/<executable_name>/data/", // up 2 in tree
"../../../../Samples/4_CUDA_Libraries/<executable_name>/data/", // up 4 in tree
"../../../Samples/4_CUDA_Libraries/<executable_name>/data/", // up 3 in tree
"../../Samples/4_CUDA_Libraries/<executable_name>/data/", // up 2 in tree
"../../../../Samples/5_Domain_Specific/<executable_name>/data/", // up 4 in tree
"../../../Samples/5_Domain_Specific/<executable_name>/data/", // up 3 in tree
"../../Samples/5_Domain_Specific/<executable_name>/data/", // up 2 in tree
"../../../../Samples/6_Performance/<executable_name>/data/", // up 4 in tree
"../../../Samples/6_Performance/<executable_name>/data/", // up 3 in tree
"../../Samples/6_Performance/<executable_name>/data/", // up 2 in tree
"../../../../Common/data/", // up 4 in tree
"../../../Common/data/", // up 3 in tree
"../../Common/data/" // up 2 in tree
};
// Extract the executable name
std::string executable_name;
if (executable_path != 0) {
executable_name = std::string(executable_path);
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
// Windows path delimiter
size_t delimiter_pos = executable_name.find_last_of('\\');
executable_name.erase(0, delimiter_pos + 1);
if (executable_name.rfind(".exe") != std::string::npos) {
// we strip .exe, only if the .exe is found
executable_name.resize(executable_name.size() - 4);
}
#else
// Linux & OSX path delimiter
size_t delimiter_pos = executable_name.find_last_of('/');
executable_name.erase(0, delimiter_pos + 1);
#endif
}
// Loop over all search paths and return the first hit
for (unsigned int i = 0; i < sizeof(searchPath) / sizeof(char *); ++i) {
std::string path(searchPath[i]);
size_t executable_name_pos = path.find("<executable_name>");
// If there is executable_name variable in the searchPath
// replace it with the value
if (executable_name_pos != std::string::npos) {
if (executable_path != 0) {
path.replace(executable_name_pos, strlen("<executable_name>"),
executable_name);
} else {
// Skip this path entry if no executable argument is given
continue;
}
}
#ifdef _DEBUG
printf("sdkFindFilePath <%s> in %s\n", filename, path.c_str());
#endif
// Test if the file exists
path.append(filename);
FILE *fp;
FOPEN(fp, path.c_str(), "rb");
if (fp != NULL) {
fclose(fp);
// File found
// returning an allocated array here for backwards compatibility reasons
char *file_path = reinterpret_cast<char *>(malloc(path.length() + 1));
STRCPY(file_path, path.length() + 1, path.c_str());
return file_path;
}
if (fp) {
fclose(fp);
}
}
// File not found
printf("\nerror: sdkFindFilePath: file <%s> not found!\n", filename);
return 0;
}
#endif // COMMON_HELPER_STRING_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/exception.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* CUda UTility Library */
#ifndef COMMON_EXCEPTION_H_
#define COMMON_EXCEPTION_H_
// includes, system
#include <stdlib.h>
#include <exception>
#include <iostream>
#include <stdexcept>
#include <string>
//! Exception wrapper.
//! @param Std_Exception Exception out of namespace std for easy typing.
template <class Std_Exception>
class Exception : public Std_Exception {
public:
//! @brief Static construction interface
//! @return Alwayss throws ( Located_Exception<Exception>)
//! @param file file in which the Exception occurs
//! @param line line in which the Exception occurs
//! @param detailed details on the code fragment causing the Exception
static void throw_it(const char *file, const int line,
const char *detailed = "-");
//! Static construction interface
//! @return Alwayss throws ( Located_Exception<Exception>)
//! @param file file in which the Exception occurs
//! @param line line in which the Exception occurs
//! @param detailed details on the code fragment causing the Exception
static void throw_it(const char *file, const int line,
const std::string &detailed);
//! Destructor
virtual ~Exception() throw();
private:
//! Constructor, default (private)
Exception();
//! Constructor, standard
//! @param str string returned by what()
explicit Exception(const std::string &str);
};
////////////////////////////////////////////////////////////////////////////////
//! Exception handler function for arbitrary exceptions
//! @param ex exception to handle
////////////////////////////////////////////////////////////////////////////////
template <class Exception_Typ>
inline void handleException(const Exception_Typ &ex) {
std::cerr << ex.what() << std::endl;
exit(EXIT_FAILURE);
}
//! Convenience macros
//! Exception caused by dynamic program behavior, e.g. file does not exist
#define RUNTIME_EXCEPTION(msg) \
Exception<std::runtime_error>::throw_it(__FILE__, __LINE__, msg)
//! Logic exception in program, e.g. an assert failed
#define LOGIC_EXCEPTION(msg) \
Exception<std::logic_error>::throw_it(__FILE__, __LINE__, msg)
//! Out of range exception
#define RANGE_EXCEPTION(msg) \
Exception<std::range_error>::throw_it(__FILE__, __LINE__, msg)
////////////////////////////////////////////////////////////////////////////////
//! Implementation
// includes, system
#include <sstream>
////////////////////////////////////////////////////////////////////////////////
//! Static construction interface.
//! @param Exception causing code fragment (file and line) and detailed infos.
////////////////////////////////////////////////////////////////////////////////
/*static*/ template <class Std_Exception>
void Exception<Std_Exception>::throw_it(const char *file, const int line,
const char *detailed) {
std::stringstream s;
// Quiet heavy-weight but exceptions are not for
// performance / release versions
s << "Exception in file '" << file << "' in line " << line << "\n"
<< "Detailed description: " << detailed << "\n";
throw Exception(s.str());
}
////////////////////////////////////////////////////////////////////////////////
//! Static construction interface.
//! @param Exception causing code fragment (file and line) and detailed infos.
////////////////////////////////////////////////////////////////////////////////
/*static*/ template <class Std_Exception>
void Exception<Std_Exception>::throw_it(const char *file, const int line,
const std::string &msg) {
throw_it(file, line, msg.c_str());
}
////////////////////////////////////////////////////////////////////////////////
//! Constructor, default (private).
////////////////////////////////////////////////////////////////////////////////
template <class Std_Exception>
Exception<Std_Exception>::Exception() : Std_Exception("Unknown Exception.\n") {}
////////////////////////////////////////////////////////////////////////////////
//! Constructor, standard (private).
//! String returned by what().
////////////////////////////////////////////////////////////////////////////////
template <class Std_Exception>
Exception<Std_Exception>::Exception(const std::string &s) : Std_Exception(s) {}
////////////////////////////////////////////////////////////////////////////////
//! Destructor
////////////////////////////////////////////////////////////////////////////////
template <class Std_Exception>
Exception<Std_Exception>::~Exception() throw() {}
// functions, exported
#endif // COMMON_EXCEPTION_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/helper_cuda.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions for initialization and error checking
#ifndef COMMON_HELPER_CUDA_H_
#define COMMON_HELPER_CUDA_H_
#pragma once
#include <sycl/sycl.hpp>
#include <dpct/dpct.hpp>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <helper_string.h>
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
// Note, it is required that your SDK sample to include the proper header
// files, please refer the CUDA examples for examples of the needed CUDA
// headers, which may change depending on which CUDA functions are used.
// CUDA Runtime error messages
#ifdef __DPCT_HPP__
static const char *_cudaGetErrorEnum(dpct::err0 error) {
/*
DPCT1009:16: SYCL uses exceptions to report errors and does not use the error
codes. The original code was commented out and a warning string was inserted.
You need to rewrite this code.
*/
return "cudaGetErrorName is not supported" /*cudaGetErrorName(error)*/;
}
#endif
#ifdef CUDA_DRIVER_API
// CUDA Driver API errors
static const char *_cudaGetErrorEnum(CUresult error) {
static char unknown[] = "<unknown>";
const char *ret = NULL;
cuGetErrorName(error, &ret);
return ret ? ret : unknown;
}
#endif
#ifdef CUBLAS_API_H_
// cuBLAS API errors
static const char *_cudaGetErrorEnum(cublasStatus_t error) {
switch (error) {
case CUBLAS_STATUS_SUCCESS:
return "CUBLAS_STATUS_SUCCESS";
case CUBLAS_STATUS_NOT_INITIALIZED:
return "CUBLAS_STATUS_NOT_INITIALIZED";
case CUBLAS_STATUS_ALLOC_FAILED:
return "CUBLAS_STATUS_ALLOC_FAILED";
case CUBLAS_STATUS_INVALID_VALUE:
return "CUBLAS_STATUS_INVALID_VALUE";
case CUBLAS_STATUS_ARCH_MISMATCH:
return "CUBLAS_STATUS_ARCH_MISMATCH";
case CUBLAS_STATUS_MAPPING_ERROR:
return "CUBLAS_STATUS_MAPPING_ERROR";
case CUBLAS_STATUS_EXECUTION_FAILED:
return "CUBLAS_STATUS_EXECUTION_FAILED";
case CUBLAS_STATUS_INTERNAL_ERROR:
return "CUBLAS_STATUS_INTERNAL_ERROR";
case CUBLAS_STATUS_NOT_SUPPORTED:
return "CUBLAS_STATUS_NOT_SUPPORTED";
case CUBLAS_STATUS_LICENSE_ERROR:
return "CUBLAS_STATUS_LICENSE_ERROR";
}
return "<unknown>";
}
#endif
#ifdef _CUFFT_H_
// cuFFT API errors
static const char *_cudaGetErrorEnum(cufftResult error) {
switch (error) {
case CUFFT_SUCCESS:
return "CUFFT_SUCCESS";
case CUFFT_INVALID_PLAN:
return "CUFFT_INVALID_PLAN";
case CUFFT_ALLOC_FAILED:
return "CUFFT_ALLOC_FAILED";
case CUFFT_INVALID_TYPE:
return "CUFFT_INVALID_TYPE";
case CUFFT_INVALID_VALUE:
return "CUFFT_INVALID_VALUE";
case CUFFT_INTERNAL_ERROR:
return "CUFFT_INTERNAL_ERROR";
case CUFFT_EXEC_FAILED:
return "CUFFT_EXEC_FAILED";
case CUFFT_SETUP_FAILED:
return "CUFFT_SETUP_FAILED";
case CUFFT_INVALID_SIZE:
return "CUFFT_INVALID_SIZE";
case CUFFT_UNALIGNED_DATA:
return "CUFFT_UNALIGNED_DATA";
case CUFFT_INCOMPLETE_PARAMETER_LIST:
return "CUFFT_INCOMPLETE_PARAMETER_LIST";
case CUFFT_INVALID_DEVICE:
return "CUFFT_INVALID_DEVICE";
case CUFFT_PARSE_ERROR:
return "CUFFT_PARSE_ERROR";
case CUFFT_NO_WORKSPACE:
return "CUFFT_NO_WORKSPACE";
case CUFFT_NOT_IMPLEMENTED:
return "CUFFT_NOT_IMPLEMENTED";
case CUFFT_LICENSE_ERROR:
return "CUFFT_LICENSE_ERROR";
case CUFFT_NOT_SUPPORTED:
return "CUFFT_NOT_SUPPORTED";
}
return "<unknown>";
}
#endif
#ifdef CUSPARSEAPI
// cuSPARSE API errors
static const char *_cudaGetErrorEnum(cusparseStatus_t error) {
switch (error) {
case CUSPARSE_STATUS_SUCCESS:
return "CUSPARSE_STATUS_SUCCESS";
case CUSPARSE_STATUS_NOT_INITIALIZED:
return "CUSPARSE_STATUS_NOT_INITIALIZED";
case CUSPARSE_STATUS_ALLOC_FAILED:
return "CUSPARSE_STATUS_ALLOC_FAILED";
case CUSPARSE_STATUS_INVALID_VALUE:
return "CUSPARSE_STATUS_INVALID_VALUE";
case CUSPARSE_STATUS_ARCH_MISMATCH:
return "CUSPARSE_STATUS_ARCH_MISMATCH";
case CUSPARSE_STATUS_MAPPING_ERROR:
return "CUSPARSE_STATUS_MAPPING_ERROR";
case CUSPARSE_STATUS_EXECUTION_FAILED:
return "CUSPARSE_STATUS_EXECUTION_FAILED";
case CUSPARSE_STATUS_INTERNAL_ERROR:
return "CUSPARSE_STATUS_INTERNAL_ERROR";
case CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
}
return "<unknown>";
}
#endif
#ifdef CUSOLVER_COMMON_H_
// cuSOLVER API errors
static const char *_cudaGetErrorEnum(cusolverStatus_t error) {
switch (error) {
case CUSOLVER_STATUS_SUCCESS:
return "CUSOLVER_STATUS_SUCCESS";
case CUSOLVER_STATUS_NOT_INITIALIZED:
return "CUSOLVER_STATUS_NOT_INITIALIZED";
case CUSOLVER_STATUS_ALLOC_FAILED:
return "CUSOLVER_STATUS_ALLOC_FAILED";
case CUSOLVER_STATUS_INVALID_VALUE:
return "CUSOLVER_STATUS_INVALID_VALUE";
case CUSOLVER_STATUS_ARCH_MISMATCH:
return "CUSOLVER_STATUS_ARCH_MISMATCH";
case CUSOLVER_STATUS_MAPPING_ERROR:
return "CUSOLVER_STATUS_MAPPING_ERROR";
case CUSOLVER_STATUS_EXECUTION_FAILED:
return "CUSOLVER_STATUS_EXECUTION_FAILED";
case CUSOLVER_STATUS_INTERNAL_ERROR:
return "CUSOLVER_STATUS_INTERNAL_ERROR";
case CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
case CUSOLVER_STATUS_NOT_SUPPORTED:
return "CUSOLVER_STATUS_NOT_SUPPORTED ";
case CUSOLVER_STATUS_ZERO_PIVOT:
return "CUSOLVER_STATUS_ZERO_PIVOT";
case CUSOLVER_STATUS_INVALID_LICENSE:
return "CUSOLVER_STATUS_INVALID_LICENSE";
}
return "<unknown>";
}
#endif
#ifdef CURAND_H_
// cuRAND API errors
static const char *_cudaGetErrorEnum(int error) {
switch (error) {
case 0:
return "CURAND_STATUS_SUCCESS";
case 100:
return "CURAND_STATUS_VERSION_MISMATCH";
case 101:
return "CURAND_STATUS_NOT_INITIALIZED";
case 102:
return "CURAND_STATUS_ALLOCATION_FAILED";
case 103:
return "CURAND_STATUS_TYPE_ERROR";
case 104:
return "CURAND_STATUS_OUT_OF_RANGE";
case 105:
return "CURAND_STATUS_LENGTH_NOT_MULTIPLE";
case 106:
return "CURAND_STATUS_DOUBLE_PRECISION_REQUIRED";
case 201:
return "CURAND_STATUS_LAUNCH_FAILURE";
case 202:
return "CURAND_STATUS_PREEXISTING_FAILURE";
case 203:
return "CURAND_STATUS_INITIALIZATION_FAILED";
case 204:
return "CURAND_STATUS_ARCH_MISMATCH";
case 999:
return "CURAND_STATUS_INTERNAL_ERROR";
}
return "<unknown>";
}
#endif
#ifdef NVJPEGAPI
// nvJPEG API errors
static const char *_cudaGetErrorEnum(nvjpegStatus_t error) {
switch (error) {
case NVJPEG_STATUS_SUCCESS:
return "NVJPEG_STATUS_SUCCESS";
case NVJPEG_STATUS_NOT_INITIALIZED:
return "NVJPEG_STATUS_NOT_INITIALIZED";
case NVJPEG_STATUS_INVALID_PARAMETER:
return "NVJPEG_STATUS_INVALID_PARAMETER";
case NVJPEG_STATUS_BAD_JPEG:
return "NVJPEG_STATUS_BAD_JPEG";
case NVJPEG_STATUS_JPEG_NOT_SUPPORTED:
return "NVJPEG_STATUS_JPEG_NOT_SUPPORTED";
case NVJPEG_STATUS_ALLOCATOR_FAILURE:
return "NVJPEG_STATUS_ALLOCATOR_FAILURE";
case NVJPEG_STATUS_EXECUTION_FAILED:
return "NVJPEG_STATUS_EXECUTION_FAILED";
case NVJPEG_STATUS_ARCH_MISMATCH:
return "NVJPEG_STATUS_ARCH_MISMATCH";
case NVJPEG_STATUS_INTERNAL_ERROR:
return "NVJPEG_STATUS_INTERNAL_ERROR";
}
return "<unknown>";
}
#endif
#ifdef NV_NPPIDEFS_H
// NPP API errors
static const char *_cudaGetErrorEnum(NppStatus error) {
switch (error) {
case NPP_NOT_SUPPORTED_MODE_ERROR:
return "NPP_NOT_SUPPORTED_MODE_ERROR";
case NPP_ROUND_MODE_NOT_SUPPORTED_ERROR:
return "NPP_ROUND_MODE_NOT_SUPPORTED_ERROR";
case NPP_RESIZE_NO_OPERATION_ERROR:
return "NPP_RESIZE_NO_OPERATION_ERROR";
case NPP_NOT_SUFFICIENT_COMPUTE_CAPABILITY:
return "NPP_NOT_SUFFICIENT_COMPUTE_CAPABILITY";
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) <= 0x5000
case NPP_BAD_ARG_ERROR:
return "NPP_BAD_ARGUMENT_ERROR";
case NPP_COEFF_ERROR:
return "NPP_COEFFICIENT_ERROR";
case NPP_RECT_ERROR:
return "NPP_RECTANGLE_ERROR";
case NPP_QUAD_ERROR:
return "NPP_QUADRANGLE_ERROR";
case NPP_MEM_ALLOC_ERR:
return "NPP_MEMORY_ALLOCATION_ERROR";
case NPP_HISTO_NUMBER_OF_LEVELS_ERROR:
return "NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR";
case NPP_INVALID_INPUT:
return "NPP_INVALID_INPUT";
case NPP_POINTER_ERROR:
return "NPP_POINTER_ERROR";
case NPP_WARNING:
return "NPP_WARNING";
case NPP_ODD_ROI_WARNING:
return "NPP_ODD_ROI_WARNING";
#else
// These are for CUDA 5.5 or higher
case NPP_BAD_ARGUMENT_ERROR:
return "NPP_BAD_ARGUMENT_ERROR";
case NPP_COEFFICIENT_ERROR:
return "NPP_COEFFICIENT_ERROR";
case NPP_RECTANGLE_ERROR:
return "NPP_RECTANGLE_ERROR";
case NPP_QUADRANGLE_ERROR:
return "NPP_QUADRANGLE_ERROR";
case NPP_MEMORY_ALLOCATION_ERR:
return "NPP_MEMORY_ALLOCATION_ERROR";
case NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR:
return "NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR";
case NPP_INVALID_HOST_POINTER_ERROR:
return "NPP_INVALID_HOST_POINTER_ERROR";
case NPP_INVALID_DEVICE_POINTER_ERROR:
return "NPP_INVALID_DEVICE_POINTER_ERROR";
#endif
case NPP_LUT_NUMBER_OF_LEVELS_ERROR:
return "NPP_LUT_NUMBER_OF_LEVELS_ERROR";
case NPP_TEXTURE_BIND_ERROR:
return "NPP_TEXTURE_BIND_ERROR";
case NPP_WRONG_INTERSECTION_ROI_ERROR:
return "NPP_WRONG_INTERSECTION_ROI_ERROR";
case NPP_NOT_EVEN_STEP_ERROR:
return "NPP_NOT_EVEN_STEP_ERROR";
case NPP_INTERPOLATION_ERROR:
return "NPP_INTERPOLATION_ERROR";
case NPP_RESIZE_FACTOR_ERROR:
return "NPP_RESIZE_FACTOR_ERROR";
case NPP_HAAR_CLASSIFIER_PIXEL_MATCH_ERROR:
return "NPP_HAAR_CLASSIFIER_PIXEL_MATCH_ERROR";
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) <= 0x5000
case NPP_MEMFREE_ERR:
return "NPP_MEMFREE_ERR";
case NPP_MEMSET_ERR:
return "NPP_MEMSET_ERR";
case NPP_MEMCPY_ERR:
return "NPP_MEMCPY_ERROR";
case NPP_MIRROR_FLIP_ERR:
return "NPP_MIRROR_FLIP_ERR";
#else
case NPP_MEMFREE_ERROR:
return "NPP_MEMFREE_ERROR";
case NPP_MEMSET_ERROR:
return "NPP_MEMSET_ERROR";
case NPP_MEMCPY_ERROR:
return "NPP_MEMCPY_ERROR";
case NPP_MIRROR_FLIP_ERROR:
return "NPP_MIRROR_FLIP_ERROR";
#endif
case NPP_ALIGNMENT_ERROR:
return "NPP_ALIGNMENT_ERROR";
case NPP_STEP_ERROR:
return "NPP_STEP_ERROR";
case NPP_SIZE_ERROR:
return "NPP_SIZE_ERROR";
case NPP_NULL_POINTER_ERROR:
return "NPP_NULL_POINTER_ERROR";
case NPP_CUDA_KERNEL_EXECUTION_ERROR:
return "NPP_CUDA_KERNEL_EXECUTION_ERROR";
case NPP_NOT_IMPLEMENTED_ERROR:
return "NPP_NOT_IMPLEMENTED_ERROR";
case NPP_ERROR:
return "NPP_ERROR";
case NPP_SUCCESS:
return "NPP_SUCCESS";
case NPP_WRONG_INTERSECTION_QUAD_WARNING:
return "NPP_WRONG_INTERSECTION_QUAD_WARNING";
case NPP_MISALIGNED_DST_ROI_WARNING:
return "NPP_MISALIGNED_DST_ROI_WARNING";
case NPP_AFFINE_QUAD_INCORRECT_WARNING:
return "NPP_AFFINE_QUAD_INCORRECT_WARNING";
case NPP_DOUBLE_SIZE_WARNING:
return "NPP_DOUBLE_SIZE_WARNING";
case NPP_WRONG_INTERSECTION_ROI_WARNING:
return "NPP_WRONG_INTERSECTION_ROI_WARNING";
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) >= 0x6000
/* These are 6.0 or higher */
case NPP_LUT_PALETTE_BITSIZE_ERROR:
return "NPP_LUT_PALETTE_BITSIZE_ERROR";
case NPP_ZC_MODE_NOT_SUPPORTED_ERROR:
return "NPP_ZC_MODE_NOT_SUPPORTED_ERROR";
case NPP_QUALITY_INDEX_ERROR:
return "NPP_QUALITY_INDEX_ERROR";
case NPP_CHANNEL_ORDER_ERROR:
return "NPP_CHANNEL_ORDER_ERROR";
case NPP_ZERO_MASK_VALUE_ERROR:
return "NPP_ZERO_MASK_VALUE_ERROR";
case NPP_NUMBER_OF_CHANNELS_ERROR:
return "NPP_NUMBER_OF_CHANNELS_ERROR";
case NPP_COI_ERROR:
return "NPP_COI_ERROR";
case NPP_DIVISOR_ERROR:
return "NPP_DIVISOR_ERROR";
case NPP_CHANNEL_ERROR:
return "NPP_CHANNEL_ERROR";
case NPP_STRIDE_ERROR:
return "NPP_STRIDE_ERROR";
case NPP_ANCHOR_ERROR:
return "NPP_ANCHOR_ERROR";
case NPP_MASK_SIZE_ERROR:
return "NPP_MASK_SIZE_ERROR";
case NPP_MOMENT_00_ZERO_ERROR:
return "NPP_MOMENT_00_ZERO_ERROR";
case NPP_THRESHOLD_NEGATIVE_LEVEL_ERROR:
return "NPP_THRESHOLD_NEGATIVE_LEVEL_ERROR";
case NPP_THRESHOLD_ERROR:
return "NPP_THRESHOLD_ERROR";
case NPP_CONTEXT_MATCH_ERROR:
return "NPP_CONTEXT_MATCH_ERROR";
case NPP_FFT_FLAG_ERROR:
return "NPP_FFT_FLAG_ERROR";
case NPP_FFT_ORDER_ERROR:
return "NPP_FFT_ORDER_ERROR";
case NPP_SCALE_RANGE_ERROR:
return "NPP_SCALE_RANGE_ERROR";
case NPP_DATA_TYPE_ERROR:
return "NPP_DATA_TYPE_ERROR";
case NPP_OUT_OFF_RANGE_ERROR:
return "NPP_OUT_OFF_RANGE_ERROR";
case NPP_DIVIDE_BY_ZERO_ERROR:
return "NPP_DIVIDE_BY_ZERO_ERROR";
case NPP_RANGE_ERROR:
return "NPP_RANGE_ERROR";
case NPP_NO_MEMORY_ERROR:
return "NPP_NO_MEMORY_ERROR";
case NPP_ERROR_RESERVED:
return "NPP_ERROR_RESERVED";
case NPP_NO_OPERATION_WARNING:
return "NPP_NO_OPERATION_WARNING";
case NPP_DIVIDE_BY_ZERO_WARNING:
return "NPP_DIVIDE_BY_ZERO_WARNING";
#endif
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) >= 0x7000
/* These are 7.0 or higher */
case NPP_OVERFLOW_ERROR:
return "NPP_OVERFLOW_ERROR";
case NPP_CORRUPTED_DATA_ERROR:
return "NPP_CORRUPTED_DATA_ERROR";
#endif
}
return "<unknown>";
}
#endif
template <typename T>
void check(T result, char const *const func, const char *const file,
int const line) {
}
#ifdef __DPCT_HPP__
// This will output the proper CUDA error strings in the event
// that a CUDA host call returns an error
#define checkCudaErrors(val) check((val), #val, __FILE__, __LINE__)
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError(msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file,
const int line) {
/*
DPCT1010:17: SYCL uses exceptions to report errors and does not use the error
codes. The call was replaced with 0. You need to rewrite this code.
*/
dpct::err0 err = 0;
}
// This will only print the proper error string when calling cudaGetLastError
// but not exit program incase error detected.
#define printLastCudaError(msg) __printLastCudaError(msg, __FILE__, __LINE__)
inline void __printLastCudaError(const char *errorMessage, const char *file,
const int line) {
/*
DPCT1010:19: SYCL uses exceptions to report errors and does not use the error
codes. The call was replaced with 0. You need to rewrite this code.
*/
dpct::err0 err = 0;
}
#endif
#ifndef MAX
#define MAX(a, b) (a > b ? a : b)
#endif
// Float To Int conversion
inline int ftoi(float value) {
return (value >= 0 ? static_cast<int>(value + 0.5)
: static_cast<int>(value - 0.5));
}
// Beginning of GPU Architecture definitions
inline int _ConvertSMVer2Cores(int major, int minor) {
// Defines for GPU Architecture types (using the SM version to determine
// the # of cores per SM
typedef struct dpct_type_113531 {
int SM; // 0xMm (hexidecimal notation), M = SM Major version,
// and m = SM minor version
int Cores;
} sSMtoCores;
sSMtoCores nGpuArchCoresPerSM[] = {
{0x30, 192},
{0x32, 192},
{0x35, 192},
{0x37, 192},
{0x50, 128},
{0x52, 128},
{0x53, 128},
{0x60, 64},
{0x61, 128},
{0x62, 128},
{0x70, 64},
{0x72, 64},
{0x75, 64},
{0x80, 64},
{0x86, 128},
{0x87, 128},
{0x89, 128},
{0x90, 128},
{-1, -1}};
int index = 0;
while (nGpuArchCoresPerSM[index].SM != -1) {
if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor)) {
return nGpuArchCoresPerSM[index].Cores;
}
index++;
}
// If we don't find the values, we default use the previous one
// to run properly
printf(
"MapSMtoCores for SM %d.%d is undefined."
" Default to use %d Cores/SM\n",
major, minor, nGpuArchCoresPerSM[index - 1].Cores);
return nGpuArchCoresPerSM[index - 1].Cores;
}
inline const char* _ConvertSMVer2ArchName(int major, int minor) {
// Defines for GPU Architecture types (using the SM version to determine
// the GPU Arch name)
typedef struct dpct_type_281558 {
int SM; // 0xMm (hexidecimal notation), M = SM Major version,
// and m = SM minor version
const char* name;
} sSMtoArchName;
sSMtoArchName nGpuArchNameSM[] = {
{0x30, "Kepler"},
{0x32, "Kepler"},
{0x35, "Kepler"},
{0x37, "Kepler"},
{0x50, "Maxwell"},
{0x52, "Maxwell"},
{0x53, "Maxwell"},
{0x60, "Pascal"},
{0x61, "Pascal"},
{0x62, "Pascal"},
{0x70, "Volta"},
{0x72, "Xavier"},
{0x75, "Turing"},
{0x80, "Ampere"},
{0x86, "Ampere"},
{0x87, "Ampere"},
{0x89, "Ada"},
{0x90, "Hopper"},
{-1, "Graphics Device"}};
int index = 0;
while (nGpuArchNameSM[index].SM != -1) {
if (nGpuArchNameSM[index].SM == ((major << 4) + minor)) {
return nGpuArchNameSM[index].name;
}
index++;
}
// If we don't find the values, we default use the previous one
// to run properly
printf(
"MapSMtoArchName for SM %d.%d is undefined."
" Default to use %s\n",
major, minor, nGpuArchNameSM[index - 1].name);
return nGpuArchNameSM[index - 1].name;
}
// end of GPU Architecture definitions
#ifdef __DPCT_HPP__
// General GPU Device CUDA Initialization
inline int gpuDeviceInit(int devID) {
int device_count;
checkCudaErrors(DPCT_CHECK_ERROR(
device_count = dpct::dev_mgr::instance().device_count()));
if (device_count == 0) {
fprintf(stderr,
"gpuDeviceInit() CUDA error: "
"no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
if (devID < 0) {
devID = 0;
}
if (devID > device_count - 1) {
fprintf(stderr, "\n");
fprintf(stderr, ">> %d CUDA capable GPU device(s) detected. <<\n",
device_count);
fprintf(stderr,
">> gpuDeviceInit (-device=%d) is not a valid"
" GPU device. <<\n",
devID);
fprintf(stderr, "\n");
return -devID;
}
int computeMode = -1, major = 0, minor = 0;
/*
DPCT1035:21: All SYCL devices can be used by the host to submit tasks. You may
need to adjust this code.
*/
checkCudaErrors(DPCT_CHECK_ERROR(computeMode = 1));
checkCudaErrors(DPCT_CHECK_ERROR(
major = dpct::dev_mgr::instance().get_device(devID).get_major_version()));
checkCudaErrors(DPCT_CHECK_ERROR(
minor = dpct::dev_mgr::instance().get_device(devID).get_minor_version()));
/*
DPCT1035:22: All SYCL devices can be used by the host to submit tasks. You may
need to adjust this code.
*/
if (computeMode == 0) {
fprintf(stderr,
"Error: device is running in <Compute Mode "
"Prohibited>, no threads can use cudaSetDevice().\n");
return -1;
}
if (major < 1) {
fprintf(stderr, "gpuDeviceInit(): GPU device does not support CUDA.\n");
exit(EXIT_FAILURE);
}
/*
DPCT1093:23: The "devID" device may be not the one intended for use. Adjust
the selected device if needed.
*/
checkCudaErrors(DPCT_CHECK_ERROR(dpct::select_device(devID)));
printf("gpuDeviceInit() CUDA Device [%d]: \"%s\n", devID, _ConvertSMVer2ArchName(major, minor));
return devID;
}
// This function returns the best GPU (with maximum GFLOPS)
inline int gpuGetMaxGflopsDeviceId() try {
int current_device = 0, sm_per_multiproc = 0;
int max_perf_device = 0;
int device_count = 0;
int devices_prohibited = 0;
uint64_t max_compute_perf = 0;
checkCudaErrors(DPCT_CHECK_ERROR(
device_count = dpct::dev_mgr::instance().device_count()));
if (device_count == 0) {
fprintf(stderr,
"gpuGetMaxGflopsDeviceId() CUDA error:"
" no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
// Find the best CUDA capable GPU device
current_device = 0;
while (current_device < device_count) {
int computeMode = -1, major = 0, minor = 0;
/*
DPCT1035:24: All SYCL devices can be used by the host to submit tasks. You
may need to adjust this code.
*/
checkCudaErrors(DPCT_CHECK_ERROR(computeMode = 1));
checkCudaErrors(DPCT_CHECK_ERROR(major = dpct::dev_mgr::instance()
.get_device(current_device)
.get_major_version()));
checkCudaErrors(DPCT_CHECK_ERROR(minor = dpct::dev_mgr::instance()
.get_device(current_device)
.get_minor_version()));
// If this GPU is not running on Compute Mode prohibited,
// then we can add it to the list
/*
DPCT1035:25: All SYCL devices can be used by the host to submit tasks. You
may need to adjust this code.
*/
if (computeMode != 0) {
if (major == 9999 && minor == 9999) {
sm_per_multiproc = 1;
} else {
sm_per_multiproc =
_ConvertSMVer2Cores(major, minor);
}
int multiProcessorCount = 0, clockRate = 0;
checkCudaErrors(
DPCT_CHECK_ERROR(multiProcessorCount = dpct::dev_mgr::instance()
.get_device(current_device)
.get_max_compute_units()));
dpct::err0 result =
DPCT_CHECK_ERROR(clockRate = dpct::dev_mgr::instance()
.get_device(current_device)
.get_max_clock_frequency());
uint64_t compute_perf = (uint64_t)multiProcessorCount * sm_per_multiproc * clockRate;
if (compute_perf > max_compute_perf) {
max_compute_perf = compute_perf;
max_perf_device = current_device;
}
} else {
devices_prohibited++;
}
++current_device;
}
if (devices_prohibited == device_count) {
fprintf(stderr,
"gpuGetMaxGflopsDeviceId() CUDA error:"
" all devices have compute mode prohibited.\n");
exit(EXIT_FAILURE);
}
return max_perf_device;
}
catch (sycl::exception const &exc) {
std::cerr << exc.what() << "Exception caught at file:" << __FILE__
<< ", line:" << __LINE__ << std::endl;
std::exit(1);
}
// Initialization code to find the best CUDA Device
inline int findCudaDevice(int argc, const char **argv) {
int devID = 0;
// If the command-line has a device number specified, use it
if (checkCmdLineFlag(argc, argv, "device")) {
devID = getCmdLineArgumentInt(argc, argv, "device=");
if (devID < 0) {
printf("Invalid command line parameter\n ");
exit(EXIT_FAILURE);
} else {
devID = gpuDeviceInit(devID);
if (devID < 0) {
printf("exiting...\n");
exit(EXIT_FAILURE);
}
}
} else {
// Otherwise pick the device with highest Gflops/s
devID = gpuGetMaxGflopsDeviceId();
/*
DPCT1093:26: The "devID" device may be not the one intended for use. Adjust
the selected device if needed.
*/
checkCudaErrors(DPCT_CHECK_ERROR(dpct::select_device(devID)));
int major = 0, minor = 0;
checkCudaErrors(DPCT_CHECK_ERROR(
major =
dpct::dev_mgr::instance().get_device(devID).get_major_version()));
checkCudaErrors(DPCT_CHECK_ERROR(
minor =
dpct::dev_mgr::instance().get_device(devID).get_minor_version()));
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n",
devID, _ConvertSMVer2ArchName(major, minor), major, minor);
}
return devID;
}
inline int findIntegratedGPU() {
int current_device = 0;
int device_count = 0;
int devices_prohibited = 0;
checkCudaErrors(DPCT_CHECK_ERROR(
device_count = dpct::dev_mgr::instance().device_count()));
if (device_count == 0) {
fprintf(stderr, "CUDA error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
// Find the integrated GPU which is compute capable
while (current_device < device_count) {
int computeMode = -1, integrated = -1;
/*
DPCT1035:27: All SYCL devices can be used by the host to submit tasks. You
may need to adjust this code.
*/
checkCudaErrors(DPCT_CHECK_ERROR(computeMode = 1));
checkCudaErrors(
DPCT_CHECK_ERROR(integrated = dpct::dev_mgr::instance()
.get_device(current_device)
.get_integrated()));
// If GPU is integrated and is not running on Compute Mode prohibited,
// then cuda can map to GLES resource
/*
DPCT1035:28: All SYCL devices can be used by the host to submit tasks. You
may need to adjust this code.
*/
if (integrated && (computeMode != 0)) {
/*
DPCT1093:29: The "current_device" device may be not the one intended for
use. Adjust the selected device if needed.
*/
checkCudaErrors(DPCT_CHECK_ERROR(dpct::select_device(current_device)));
int major = 0, minor = 0;
checkCudaErrors(DPCT_CHECK_ERROR(major = dpct::dev_mgr::instance()
.get_device(current_device)
.get_major_version()));
checkCudaErrors(DPCT_CHECK_ERROR(minor = dpct::dev_mgr::instance()
.get_device(current_device)
.get_minor_version()));
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n",
current_device, _ConvertSMVer2ArchName(major, minor), major, minor);
return current_device;
} else {
devices_prohibited++;
}
current_device++;
}
if (devices_prohibited == device_count) {
fprintf(stderr,
"CUDA error:"
" No GLES-CUDA Interop capable GPU found.\n");
exit(EXIT_FAILURE);
}
return -1;
}
// General check for CUDA GPU SM Capabilities
inline bool checkCudaCapabilities(int major_version, int minor_version) {
int dev;
int major = 0, minor = 0;
checkCudaErrors(dev = dpct::dev_mgr::instance().current_device_id());
checkCudaErrors(DPCT_CHECK_ERROR(
major = dpct::dev_mgr::instance().get_device(dev).get_major_version()));
checkCudaErrors(DPCT_CHECK_ERROR(
minor = dpct::dev_mgr::instance().get_device(dev).get_minor_version()));
if ((major > major_version) ||
(major == major_version &&
minor >= minor_version)) {
printf(" Device %d: <%16s >, Compute SM %d.%d detected\n", dev,
_ConvertSMVer2ArchName(major, minor), major, minor);
return true;
} else {
printf(
" No GPU device was found that can support "
"CUDA compute capability %d.%d.\n",
major_version, minor_version);
return false;
}
}
#endif
// end of CUDA Helper Functions
#endif // COMMON_HELPER_CUDA_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/taskflow.hpp | #pragma once
#include "core/executor.hpp"
#include "algorithm/critical.hpp"
#include "algorithm/for_each.hpp"
/**
@dir taskflow
@brief root taskflow include dir
*/
/**
@dir taskflow/core
@brief taskflow core include dir
*/
/**
@dir taskflow/algorithm
@brief taskflow algorithms include dir
*/
/**
@dir taskflow/cuda
@brief taskflow CUDA include dir
*/
/**
@file taskflow/taskflow.hpp
@brief main taskflow include file
*/
// TF_VERSION % 100 is the patch level
// TF_VERSION / 100 % 1000 is the minor version
// TF_VERSION / 100000 is the major version
// current version: 3.5.0
#define TF_VERSION 300500
#define TF_MAJOR_VERSION TF_VERSION/100000
#define TF_MINOR_VERSION TF_VERSION/100%1000
#define TF_PATCH_VERSION TF_VERSION%100
/**
@brief taskflow namespace
*/
namespace tf {
/**
@private
*/
namespace detail { }
/**
@brief queries the version information in a string format @c major.minor.patch
Release notes are available here: https://taskflow.github.io/taskflow/Releases.html
*/
constexpr const char* version() {
return "3.5.0";
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/dsl/meta_macro.hpp | // 2020/08/30 - Created by netcan: https://github.com/netcan
// ref https://github.com/Erlkoenig90/map-macro/
#pragma once
#ifdef _MSC_VER
#define TF_EMPTY()
#define TF_GET_ARG_COUNT_(...) \
TF_PASTE(TF_GET_ARG_COUNT_I(__VA_ARGS__, 64, 63, 62, 61, 60, 59, 58, 57, 56, \
55, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, \
43, 42, 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, \
31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, \
19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, \
6, 5, 4, 3, 2, 1, 0, ), \
TF_EMPTY())
#else
#define TF_GET_ARG_COUNT_(...) \
TF_GET_ARG_COUNT_I(__VA_ARGS__, 64, 63, 62, 61, 60, 59, 58, 57, 56, 55, 54, \
53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, \
39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, \
25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, \
11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, )
#endif
#define TF_GET_ARG_COUNT(...) TF_GET_ARG_COUNT_(__dummy__, ##__VA_ARGS__)
#define TF_GET_ARG_COUNT_I( \
e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, \
e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, \
e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, \
e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, \
e62, e63, e64, size, ...) \
size
#define TF_GET_FIRST(a, ...) a
#define TF_GET_SECOND(a, b, ...) b
#define TF_CONCATE(x, y) x##y
#define TF_PASTE(x, y) TF_CONCATE(x, y)
#define TF_EVAL0(...) __VA_ARGS__
#define TF_EVAL1(...) TF_EVAL0(TF_EVAL0(TF_EVAL0(__VA_ARGS__)))
#define TF_EVAL2(...) TF_EVAL1(TF_EVAL1(TF_EVAL1(__VA_ARGS__)))
#define TF_EVAL3(...) TF_EVAL2(TF_EVAL2(TF_EVAL2(__VA_ARGS__)))
#define TF_EVAL4(...) TF_EVAL3(TF_EVAL3(TF_EVAL3(__VA_ARGS__)))
#define TF_EVAL5(...) TF_EVAL4(TF_EVAL4(TF_EVAL4(__VA_ARGS__)))
#ifdef _MSC_VER
// MSVC needs more evaluations
#define TF_EVAL6(...) TF_EVAL5(TF_EVAL5(TF_EVAL5(__VA_ARGS__)))
#define TF_EVAL(...) TF_EVAL6(TF_EVAL6(__VA_ARGS__))
#else
#define TF_EVAL(...) TF_EVAL5(__VA_ARGS__)
#endif
#define TF_MAP_END(...)
#define TF_MAP_OUT
#define EMPTY()
#define DEFER(id) id EMPTY()
#define TF_MAP_GET_END2() 0, TF_MAP_END
#define TF_MAP_GET_END1(...) TF_MAP_GET_END2
#define TF_MAP_GET_END(...) TF_MAP_GET_END1
#define TF_MAP_NEXT0(test, next, ...) next TF_MAP_OUT
#define TF_MAP_NEXT1(test, next) DEFER(TF_MAP_NEXT0)(test, next, 0)
#define TF_MAP_NEXT(test, next) TF_MAP_NEXT1(TF_MAP_GET_END test, next)
#define TF_MAP0(f, x, peek, ...) \
f(x) DEFER(TF_MAP_NEXT(peek, TF_MAP1))(f, peek, __VA_ARGS__)
#define TF_MAP1(f, x, peek, ...) \
f(x) DEFER(TF_MAP_NEXT(peek, TF_MAP0))(f, peek, __VA_ARGS__)
#define TF_MAP(f, ...) \
TF_EVAL(TF_MAP1(f, __VA_ARGS__, ()()(), ()()(), ()()(), 0))
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/dsl/type_list.hpp | // 2020/08/28 - Created by netcan: https://github.com/netcan
#pragma once
#include <cstddef>
namespace tf {
namespace dsl {
template <typename...> using void_t = void;
template <typename... Ts> struct TypeList {
using type = TypeList<Ts...>;
static constexpr size_t size = 0;
template <typename... T> struct append { using type = TypeList<T...>; };
template <typename... T> using appendTo = typename append<T...>::type;
template <typename T> using prepend = typename TypeList<T>::type;
template <template <typename...> class T> using exportTo = T<Ts...>;
};
template <typename Head, typename... Tails> struct TypeList<Head, Tails...> {
using type = TypeList<Head, Tails...>;
using head = Head;
using tails = TypeList<Tails...>;
static constexpr size_t size = sizeof...(Tails) + 1;
template <typename... Ts> struct append {
using type = TypeList<Head, Tails..., Ts...>;
};
template <typename... Ts> using appendTo = typename append<Ts...>::type;
template <typename T>
using prepend = typename TypeList<T, Head, Tails...>::type;
template <template <typename...> class T> using exportTo = T<Head, Tails...>;
};
template <typename IN> struct IsTypeList {
constexpr static bool value = false;
};
template <typename IN> constexpr bool IsTypeList_v = IsTypeList<IN>::value;
template <typename... Ts> struct IsTypeList<TypeList<Ts...>> {
constexpr static bool value = true;
};
template <typename... IN> struct Concat;
template <typename... IN> using Concat_t = typename Concat<IN...>::type;
template <> struct Concat<> { using type = TypeList<>; };
template <typename IN> struct Concat<IN> { using type = IN; };
template <typename IN, typename IN2> struct Concat<IN, IN2> {
using type = typename IN2::template exportTo<IN::template append>::type;
};
template <typename IN, typename IN2, typename... Rest>
struct Concat<IN, IN2, Rest...> {
using type = Concat_t<Concat_t<IN, IN2>, Rest...>;
};
template <typename IN, typename OUT = TypeList<>, typename = void>
struct Flatten {
using type = OUT;
};
template <typename IN> using Flatten_t = typename Flatten<IN>::type;
template <typename IN, typename OUT>
struct Flatten<IN, OUT, std::enable_if_t<IsTypeList_v<typename IN::head>>> {
using type =
typename Flatten<typename IN::tails,
Concat_t<OUT, Flatten_t<typename IN::head>>>::type;
};
template <typename IN, typename OUT>
struct Flatten<IN, OUT, std::enable_if_t<!IsTypeList_v<typename IN::head>>> {
using type = typename Flatten<
typename IN::tails,
typename OUT::template appendTo<typename IN::head>>::type;
};
template <typename IN, template <typename> class F> struct Map {
using type = TypeList<>;
};
template <typename IN, template <typename> class F>
using Map_t = typename Map<IN, F>::type;
template <template <typename> class F, typename... Ts>
struct Map<TypeList<Ts...>, F> {
using type = TypeList<typename F<Ts>::type...>;
};
template <typename IN, template <typename> class F, typename OUT = TypeList<>,
typename = void>
struct Filter {
using type = OUT;
};
template <typename IN, template <typename> class F>
using Filter_t = typename Filter<IN, F>::type;
template <typename IN, template <typename> class F, typename OUT>
class Filter<IN, F, OUT, void_t<typename IN::head>> {
using H = typename IN::head;
public:
using type = typename std::conditional_t<
F<H>::value,
Filter<typename IN::tails, F, typename OUT::template appendTo<H>>,
Filter<typename IN::tails, F, OUT>>::type;
};
template <typename IN, typename = void> struct Unique { using type = IN; };
template <typename IN> using Unique_t = typename Unique<IN>::type;
template <typename IN> class Unique<IN, void_t<typename IN::head>> {
template <typename T> struct IsDifferR {
template <typename R> struct apply {
static constexpr bool value = !std::is_same<T, R>::value;
};
};
using tails = Unique_t<typename IN::tails>;
using eraseHead =
Filter_t<tails, IsDifferR<typename IN::head>::template apply>;
public:
using type = typename eraseHead::template prepend<typename IN::head>;
};
} // namespace dsl
} // namespace tf
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/dsl/task_dsl.hpp | // 2020/08/28 - Created by netcan: https://github.com/netcan
#pragma once
#include "../core/flow_builder.hpp"
#include "meta_macro.hpp"
#include "task_analyzer.hpp"
#include "task_trait.hpp"
namespace tf {
namespace dsl {
struct EmptyContext {};
template <typename CONTEXT = EmptyContext, typename... Chains> class TaskDsl {
using Links = Unique_t<Flatten_t<TypeList<typename Chain<Chains>::type...>>>;
using Analyzer = typename Links::template exportTo<TaskAnalyzer>;
using AllTasks = typename Analyzer::AllTasks;
template <typename TASK> struct TaskCbWithContext {
using type = TaskCb<TASK, CONTEXT>;
};
using TasksCB =
typename Map_t<AllTasks,
TaskCbWithContext>::template exportTo<std::tuple>;
using OneToOneLinkSet = typename Analyzer::OneToOneLinkSet;
template <typename OneToOneLink> struct OneToOneLinkInstanceType {
using type = typename OneToOneLink::template InstanceType<TasksCB>;
};
using OneToOneLinkInstances =
typename Map_t<OneToOneLinkSet,
OneToOneLinkInstanceType>::template exportTo<std::tuple>;
public:
constexpr TaskDsl(FlowBuilder &flow_builder, const CONTEXT &context = {}) {
build_tasks_cb(flow_builder, context,
std::make_index_sequence<AllTasks::size>{});
build_links(std::make_index_sequence<OneToOneLinkSet::size>{});
}
template <typename TASK> Task &get_task() {
constexpr size_t TasksCBSize = std::tuple_size<TasksCB>::value;
constexpr size_t TaskIndex =
TupleElementByF_v<TasksCB, IsTask<TASK>::template apply>;
static_assert(TaskIndex < TasksCBSize, "fatal: not find TaskCb in TasksCB");
return std::get<TaskIndex>(tasksCb_).task_;
}
private:
template <size_t... Is>
void build_tasks_cb(FlowBuilder &flow_builder, const CONTEXT &context,
std::index_sequence<Is...>) {
auto _ = {0, (std::get<Is>(tasksCb_).build(flow_builder, context), 0)...};
(void)_;
}
template <size_t... Is> void build_links(std::index_sequence<Is...>) {
auto _ = {0, (std::get<Is>(links_).build(tasksCb_), 0)...};
(void)_;
}
private:
TasksCB tasksCb_;
OneToOneLinkInstances links_;
};
template <typename = void, typename... Chains, typename CONTEXT = EmptyContext>
constexpr TaskDsl<CONTEXT, Chains...> taskDsl(FlowBuilder &flow_builder,
CONTEXT &&context = {}) {
return {flow_builder, context};
}
} // namespace dsl
} // namespace tf
///////////////////////////////////////////////////////////////////////////////
#define TF_CHAIN(link) , link->void
#define TF_CONTEXT_1(name) tf::dsl::EmptyContext
#define TF_CONTEXT_2(name, context) context
#define TF_CAPTURE_THIS_1
#define TF_CAPTURE_THIS_2 *this
///////////////////////////////////////////////////////////////////////////////
// make_task(TASK_NAME, { return a action lambda })
#define make_task(name, ...) \
struct TF_GET_FIRST name : tf::dsl::TaskSignature, \
TF_PASTE(TF_CONTEXT_, TF_GET_ARG_COUNT name) \
name { \
using _ContextType = TF_PASTE(TF_CONTEXT_, TF_GET_ARG_COUNT name) name; \
TF_GET_FIRST name(const _ContextType &context) : _ContextType(context) {} \
auto operator()() { \
return [TF_PASTE(TF_CAPTURE_THIS_, TF_GET_ARG_COUNT name)] __VA_ARGS__; \
} \
}
// some_tasks(A, B, C) means SomeTask
#define some_tasks(...) auto (*)(tf::dsl::SomeTask<__VA_ARGS__>)
// same as some_tasks
#define fork_tasks(...) some_tasks(__VA_ARGS__)
// same as some_tasks
#define merge_tasks(...) some_tasks(__VA_ARGS__)
// task(A) means a task A
#define task(Task) auto (*)(Task)
// taskbuild(...) build a task dsl graph
#define build_taskflow(...) tf::dsl::taskDsl<void TF_MAP(TF_CHAIN, __VA_ARGS__)>
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/dsl/tuple_utils.hpp | // 2020/08/28 - Created by netcan: https://github.com/netcan
#pragma once
#include <cstddef>
#include <tuple>
namespace tf {
namespace dsl {
namespace detail {
// get tuple element index by f, if not exists then index >= tuple_size
template <typename TUP, template <typename> class F, typename = void>
struct TupleElementByF {
constexpr static size_t Index = 0;
};
template <template <typename> class F, typename H, typename... Ts>
struct TupleElementByF<std::tuple<H, Ts...>, F, std::enable_if_t<F<H>::value>> {
constexpr static size_t Index = 0;
};
template <template <typename> class F, typename H, typename... Ts>
struct TupleElementByF<std::tuple<H, Ts...>, F,
std::enable_if_t<!F<H>::value>> {
constexpr static size_t Index =
1 + TupleElementByF<std::tuple<Ts...>, F>::Index;
};
template <typename T, typename TUP, size_t... Is>
constexpr inline T AggregationByTupImpl(TUP &&tup, std::index_sequence<Is...>) {
return T{std::get<Is>(tup)...};
}
} // namespace detail
template <typename TUP, template <typename> class F>
constexpr size_t TupleElementByF_v = detail::TupleElementByF<TUP, F>::Index;
template <typename T, typename TUP>
constexpr inline T AggregationByTup(TUP &&tup) {
return detail::AggregationByTupImpl<T>(
std::forward<TUP>(tup),
std::make_index_sequence<std::tuple_size<std::decay_t<TUP>>::size>{});
}
} // namespace dsl
} // namespace tf
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/dsl/task_trait.hpp | // 2020/08/28 - Created by netcan: https://github.com/netcan
#pragma once
#include "../core/flow_builder.hpp"
#include "../core/task.hpp"
#include "type_list.hpp"
#include <type_traits>
namespace tf {
namespace dsl {
struct TaskSignature {};
template <typename TASK, typename CONTEXT> struct TaskCb {
using TaskType = TASK;
void build(FlowBuilder &build, const CONTEXT &context) {
task_ = build.emplace(TaskType{context}());
}
Task task_;
};
template <typename TASK> struct IsTask {
template <typename TaskCb> struct apply {
constexpr static bool value =
std::is_same<typename TaskCb::TaskType, TASK>::value;
};
};
template <typename TASK, typename = void> struct TaskTrait;
template <typename... TASK> struct SomeTask {
using TaskList =
Unique_t<Flatten_t<TypeList<typename TaskTrait<TASK>::TaskList...>>>;
};
// a task self
template <typename TASK>
struct TaskTrait<
TASK, std::enable_if_t<std::is_base_of<TaskSignature, TASK>::value>> {
using TaskList = TypeList<TASK>;
};
template <typename... TASK> struct TaskTrait<SomeTask<TASK...>> {
using TaskList = typename SomeTask<TASK...>::TaskList;
};
} // namespace dsl
} // namespace tf
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/dsl/dsl.hpp | // TaskflowDSL is an experimental project that leverages C++17 to
// provide a dedicated interface for expressive taskflow programming
//
// Created by netcan: https://github.com/netcan
#pragma once
#include "dsl/task_dsl.hpp"
namespace tf {
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/dsl/task_analyzer.hpp | // 2020/08/28 - Created by netcan: https://github.com/netcan
#pragma once
#include "connection.hpp"
#include "type_list.hpp"
#include <type_traits>
namespace tf {
namespace dsl {
template <typename... Links> class TaskAnalyzer {
template <typename FROMs, typename TOs, typename = void>
struct BuildOneToOneLink;
template <typename... Fs, typename Ts>
struct BuildOneToOneLink<TypeList<Fs...>, Ts> {
using type = Concat_t<typename BuildOneToOneLink<Fs, Ts>::type...>;
};
template <typename F, typename... Ts>
struct BuildOneToOneLink<F, TypeList<Ts...>,
std::enable_if_t<!IsTypeList_v<F>>> {
using type = TypeList<OneToOneLink<F, Ts>...>;
};
template <typename Link> class OneToOneLinkSetF {
using FromTaskList = typename Link::FromTaskList;
using ToTaskList = typename Link::ToTaskList;
public:
using type = typename BuildOneToOneLink<FromTaskList, ToTaskList>::type;
};
public:
using AllTasks = Unique_t<
Concat_t<typename Links::FromTaskList..., typename Links::ToTaskList...>>;
using OneToOneLinkSet =
Unique_t<Flatten_t<Map_t<TypeList<Links...>, OneToOneLinkSetF>>>;
};
} // namespace dsl
} // namespace tf
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/dsl/connection.hpp | // 2020/08/28 - Created by netcan: https://github.com/netcan
#pragma once
#include "../core/flow_builder.hpp"
#include "task_trait.hpp"
#include "tuple_utils.hpp"
#include "type_list.hpp"
namespace tf {
namespace dsl {
template <typename F, typename T> class Connection {
using FROMs = typename TaskTrait<F>::TaskList;
using TOs = typename TaskTrait<T>::TaskList;
public:
using FromTaskList = Unique_t<Flatten_t<FROMs>>;
using ToTaskList = Unique_t<Flatten_t<TOs>>;
};
template <typename T, typename OUT = TypeList<>> struct Chain;
template <typename F, typename OUT> struct Chain<auto (*)(F)->void, OUT> {
using From = F;
using type = OUT;
};
template <typename F, typename T, typename OUT>
struct Chain<auto (*)(F)->T, OUT> {
private:
using To = typename Chain<T, OUT>::From;
public:
using From = F;
using type = typename Chain<
T, typename OUT::template appendTo<Connection<From, To>>>::type;
};
template <typename FROM, typename TO> struct OneToOneLink {
template <typename TasksCB> struct InstanceType {
constexpr void build(TasksCB &tasksCb) {
constexpr size_t TasksCBSize = std::tuple_size<TasksCB>::value;
constexpr size_t FromTaskIndex =
TupleElementByF_v<TasksCB, IsTask<FROM>::template apply>;
constexpr size_t ToTaskIndex =
TupleElementByF_v<TasksCB, IsTask<TO>::template apply>;
static_assert(FromTaskIndex < TasksCBSize && ToTaskIndex < TasksCBSize,
"fatal: not find TaskCb in TasksCB");
std::get<FromTaskIndex>(tasksCb).task_.precede(
std::get<ToTaskIndex>(tasksCb).task_);
}
};
};
} // namespace dsl
}; // namespace tf
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/core/tsq.hpp | #pragma once
#include "../utility/macros.hpp"
#include "../utility/traits.hpp"
/**
@file tsq.hpp
@brief task queue include file
*/
namespace tf {
// ----------------------------------------------------------------------------
// Task Types
// ----------------------------------------------------------------------------
/**
@enum TaskPriority
@brief enumeration of all task priority values
A priority is an enumerated value of type @c unsigned.
Currently, %Taskflow defines three priority levels,
@c HIGH, @c NORMAL, and @c LOW, starting from 0, 1, to 2.
That is, the lower the value, the higher the priority.
*/
enum class TaskPriority : unsigned {
/** @brief value of the highest priority (i.e., 0) */
HIGH = 0,
/** @brief value of the normal priority (i.e., 1) */
NORMAL = 1,
/** @brief value of the lowest priority (i.e., 2) */
LOW = 2,
/** @brief conventional value for iterating priority values */
MAX = 3
};
// ----------------------------------------------------------------------------
// Task Queue
// ----------------------------------------------------------------------------
/**
@class: TaskQueue
@tparam T data type (must be a pointer type)
@tparam MAX_PRIORITY maximum level of the priority
@brief class to create a lock-free unbounded single-producer multiple-consumer queue
This class implements the work-stealing queue described in the paper,
<a href="https://www.di.ens.fr/~zappa/readings/ppopp13.pdf">Correct and Efficient Work-Stealing for Weak Memory Models</a>,
and extends it to include priority.
Only the queue owner can perform pop and push operations,
while others can steal data from the queue simultaneously.
Priority starts from zero (highest priority) to the template value
`MAX_PRIORITY-1` (lowest priority).
All operations are associated with priority values to indicate
the corresponding queues to which an operation is applied.
The default template value, `MAX_PRIORITY`, is `TaskPriority::MAX`
which applies only three priority levels to the task queue.
@code{.cpp}
auto [A, B, C, D, E] = taskflow.emplace(
[] () { },
[&] () {
std::cout << "Task B: " << counter++ << '\n'; // 0
},
[&] () {
std::cout << "Task C: " << counter++ << '\n'; // 2
},
[&] () {
std::cout << "Task D: " << counter++ << '\n'; // 1
},
[] () { }
);
A.precede(B, C, D);
E.succeed(B, C, D);
B.priority(tf::TaskPriority::HIGH);
C.priority(tf::TaskPriority::LOW);
D.priority(tf::TaskPriority::NORMAL);
executor.run(taskflow).wait();
@endcode
In the above example, we have a task graph of five tasks,
@c A, @c B, @c C, @c D, and @c E, in which @c B, @c C, and @c D
can run in simultaneously when @c A finishes.
Since we only uses one worker thread in the executor,
we can deterministically run @c B first, then @c D, and @c C
in order of their priority values.
The output is as follows:
@code{.shell-session}
Task B: 0
Task D: 1
Task C: 2
@endcode
*/
template <typename T, unsigned MAX_PRIORITY = static_cast<unsigned>(TaskPriority::MAX)>
class TaskQueue {
static_assert(MAX_PRIORITY > 0, "MAX_PRIORITY must be at least one");
static_assert(std::is_pointer_v<T>, "T must be a pointer type");
struct Array {
int64_t C;
int64_t M;
std::atomic<T>* S;
explicit Array(int64_t c) :
C {c},
M {c-1},
S {new std::atomic<T>[static_cast<size_t>(C)]} {
}
~Array() {
delete [] S;
}
int64_t capacity() const noexcept {
return C;
}
void push(int64_t i, T o) noexcept {
S[i & M].store(o, std::memory_order_relaxed);
}
T pop(int64_t i) noexcept {
return S[i & M].load(std::memory_order_relaxed);
}
Array* resize(int64_t b, int64_t t) {
Array* ptr = new Array {2*C};
for(int64_t i=t; i!=b; ++i) {
ptr->push(i, pop(i));
}
return ptr;
}
};
// Doubling the alignment by 2 seems to generate the most
// decent performance.
CachelineAligned<std::atomic<int64_t>> _top[MAX_PRIORITY];
CachelineAligned<std::atomic<int64_t>> _bottom[MAX_PRIORITY];
std::atomic<Array*> _array[MAX_PRIORITY];
std::vector<Array*> _garbage[MAX_PRIORITY];
//std::atomic<T> _cache {nullptr};
public:
/**
@brief constructs the queue with a given capacity
@param capacity the capacity of the queue (must be power of 2)
*/
explicit TaskQueue(int64_t capacity = 512);
/**
@brief destructs the queue
*/
~TaskQueue();
/**
@brief queries if the queue is empty at the time of this call
*/
bool empty() const noexcept;
/**
@brief queries if the queue is empty at a specific priority value
*/
bool empty(unsigned priority) const noexcept;
/**
@brief queries the number of items at the time of this call
*/
size_t size() const noexcept;
/**
@brief queries the number of items with the given priority
at the time of this call
*/
size_t size(unsigned priority) const noexcept;
/**
@brief queries the capacity of the queue
*/
int64_t capacity() const noexcept;
/**
@brief queries the capacity of the queue at a specific priority value
*/
int64_t capacity(unsigned priority) const noexcept;
/**
@brief inserts an item to the queue
@param item the item to push to the queue
@param priority priority value of the item to push (default = 0)
Only the owner thread can insert an item to the queue.
The operation can trigger the queue to resize its capacity
if more space is required.
*/
TF_FORCE_INLINE void push(T item, unsigned priority);
/**
@brief pops out an item from the queue
Only the owner thread can pop out an item from the queue.
The return can be a @c nullptr if this operation failed (empty queue).
*/
T pop();
/**
@brief pops out an item with a specific priority value from the queue
@param priority priority of the item to pop
Only the owner thread can pop out an item from the queue.
The return can be a @c nullptr if this operation failed (empty queue).
*/
TF_FORCE_INLINE T pop(unsigned priority);
/**
@brief steals an item from the queue
Any threads can try to steal an item from the queue.
The return can be a @c nullptr if this operation failed (not necessary empty).
*/
T steal();
/**
@brief steals an item with a specific priority value from the queue
@param priority priority of the item to steal
Any threads can try to steal an item from the queue.
The return can be a @c nullptr if this operation failed (not necessary empty).
*/
T steal(unsigned priority);
private:
TF_NO_INLINE Array* resize_array(Array* a, unsigned p, std::int64_t b, std::int64_t t);
};
// Constructor
template <typename T, unsigned MAX_PRIORITY>
TaskQueue<T, MAX_PRIORITY>::TaskQueue(int64_t c) {
assert(c && (!(c & (c-1))));
unroll<0, MAX_PRIORITY, 1>([&](auto p){
_top[p].data.store(0, std::memory_order_relaxed);
_bottom[p].data.store(0, std::memory_order_relaxed);
_array[p].store(new Array{c}, std::memory_order_relaxed);
_garbage[p].reserve(32);
});
}
// Destructor
template <typename T, unsigned MAX_PRIORITY>
TaskQueue<T, MAX_PRIORITY>::~TaskQueue() {
unroll<0, MAX_PRIORITY, 1>([&](auto p){
for(auto a : _garbage[p]) {
delete a;
}
delete _array[p].load();
});
}
// Function: empty
template <typename T, unsigned MAX_PRIORITY>
bool TaskQueue<T, MAX_PRIORITY>::empty() const noexcept {
for(unsigned i=0; i<MAX_PRIORITY; i++) {
if(!empty(i)) {
return false;
}
}
return true;
}
// Function: empty
template <typename T, unsigned MAX_PRIORITY>
bool TaskQueue<T, MAX_PRIORITY>::empty(unsigned p) const noexcept {
int64_t b = _bottom[p].data.load(std::memory_order_relaxed);
int64_t t = _top[p].data.load(std::memory_order_relaxed);
return (b <= t);
}
// Function: size
template <typename T, unsigned MAX_PRIORITY>
size_t TaskQueue<T, MAX_PRIORITY>::size() const noexcept {
size_t s;
unroll<0, MAX_PRIORITY, 1>([&](auto i) { s = i ? size(i) + s : size(i); });
return s;
}
// Function: size
template <typename T, unsigned MAX_PRIORITY>
size_t TaskQueue<T, MAX_PRIORITY>::size(unsigned p) const noexcept {
int64_t b = _bottom[p].data.load(std::memory_order_relaxed);
int64_t t = _top[p].data.load(std::memory_order_relaxed);
return static_cast<size_t>(b >= t ? b - t : 0);
}
// Function: push
template <typename T, unsigned MAX_PRIORITY>
TF_FORCE_INLINE void TaskQueue<T, MAX_PRIORITY>::push(T o, unsigned p) {
int64_t b = _bottom[p].data.load(std::memory_order_relaxed);
int64_t t = _top[p].data.load(std::memory_order_acquire);
Array* a = _array[p].load(std::memory_order_relaxed);
// queue is full
if(a->capacity() - 1 < (b - t)) {
a = resize_array(a, p, b, t);
}
a->push(b, o);
std::atomic_thread_fence(std::memory_order_release);
_bottom[p].data.store(b + 1, std::memory_order_relaxed);
}
// Function: pop
template <typename T, unsigned MAX_PRIORITY>
T TaskQueue<T, MAX_PRIORITY>::pop() {
for(unsigned i=0; i<MAX_PRIORITY; i++) {
if(auto t = pop(i); t) {
return t;
}
}
return nullptr;
}
// Function: pop
template <typename T, unsigned MAX_PRIORITY>
TF_FORCE_INLINE T TaskQueue<T, MAX_PRIORITY>::pop(unsigned p) {
int64_t b = _bottom[p].data.load(std::memory_order_relaxed) - 1;
Array* a = _array[p].load(std::memory_order_relaxed);
_bottom[p].data.store(b, std::memory_order_relaxed);
std::atomic_thread_fence(std::memory_order_seq_cst);
int64_t t = _top[p].data.load(std::memory_order_relaxed);
T item {nullptr};
if(t <= b) {
item = a->pop(b);
if(t == b) {
// the last item just got stolen
if(!_top[p].data.compare_exchange_strong(t, t+1,
std::memory_order_seq_cst,
std::memory_order_relaxed)) {
item = nullptr;
}
_bottom[p].data.store(b + 1, std::memory_order_relaxed);
}
}
else {
_bottom[p].data.store(b + 1, std::memory_order_relaxed);
}
return item;
}
// Function: steal
template <typename T, unsigned MAX_PRIORITY>
T TaskQueue<T, MAX_PRIORITY>::steal() {
for(unsigned i=0; i<MAX_PRIORITY; i++) {
if(auto t = steal(i); t) {
return t;
}
}
return nullptr;
}
// Function: steal
template <typename T, unsigned MAX_PRIORITY>
T TaskQueue<T, MAX_PRIORITY>::steal(unsigned p) {
int64_t t = _top[p].data.load(std::memory_order_acquire);
std::atomic_thread_fence(std::memory_order_seq_cst);
int64_t b = _bottom[p].data.load(std::memory_order_acquire);
T item {nullptr};
if(t < b) {
Array* a = _array[p].load(std::memory_order_consume);
item = a->pop(t);
if(!_top[p].data.compare_exchange_strong(t, t+1,
std::memory_order_seq_cst,
std::memory_order_relaxed)) {
return nullptr;
}
}
return item;
}
// Function: capacity
template <typename T, unsigned MAX_PRIORITY>
int64_t TaskQueue<T, MAX_PRIORITY>::capacity() const noexcept {
size_t s;
unroll<0, MAX_PRIORITY, 1>([&](auto i) {
s = i ? capacity(i) + s : capacity(i);
});
return s;
}
// Function: capacity
template <typename T, unsigned MAX_PRIORITY>
int64_t TaskQueue<T, MAX_PRIORITY>::capacity(unsigned p) const noexcept {
return _array[p].load(std::memory_order_relaxed)->capacity();
}
template <typename T, unsigned MAX_PRIORITY>
TF_NO_INLINE typename TaskQueue<T, MAX_PRIORITY>::Array*
TaskQueue<T, MAX_PRIORITY>::resize_array(Array* a, unsigned p, std::int64_t b, std::int64_t t) {
Array* tmp = a->resize(b, t);
_garbage[p].push_back(a);
std::swap(a, tmp);
_array[p].store(a, std::memory_order_release);
// Note: the original paper using relaxed causes t-san to complain
//_array.store(a, std::memory_order_relaxed);
return a;
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/core/graph.hpp | #pragma once
#include "../utility/traits.hpp"
#include "../utility/iterator.hpp"
#include "../utility/object_pool.hpp"
#include "../utility/os.hpp"
#include "../utility/math.hpp"
#include "../utility/small_vector.hpp"
#include "../utility/serializer.hpp"
#include "error.hpp"
#include "declarations.hpp"
#include "semaphore.hpp"
#include "environment.hpp"
#include "topology.hpp"
#include "tsq.hpp"
/**
@file graph.hpp
@brief graph include file
*/
namespace tf {
// ----------------------------------------------------------------------------
// Class: CustomGraphBase
// ----------------------------------------------------------------------------
/**
@private
*/
class CustomGraphBase {
public:
virtual void dump(std::ostream&, const void*, const std::string&) const = 0;
virtual ~CustomGraphBase() = default;
};
// ----------------------------------------------------------------------------
// Class: Graph
// ----------------------------------------------------------------------------
/**
@class Graph
@brief class to create a graph object
A graph is the ultimate storage for a task dependency graph and is the main
gateway to interact with an executor.
A graph manages a set of nodes in a global object pool that animates and
recycles node objects efficiently without going through repetitive and
expensive memory allocations and deallocations.
This class is mainly used for creating an opaque graph object in a custom
class to interact with the executor through taskflow composition.
A graph object is move-only.
*/
class Graph {
friend class Node;
friend class FlowBuilder;
friend class Subflow;
friend class Taskflow;
friend class Executor;
public:
/**
@brief constructs a graph object
*/
Graph() = default;
/**
@brief disabled copy constructor
*/
Graph(const Graph&) = delete;
/**
@brief constructs a graph using move semantics
*/
Graph(Graph&&);
/**
@brief destructs the graph object
*/
~Graph();
/**
@brief disabled copy assignment operator
*/
Graph& operator = (const Graph&) = delete;
/**
@brief assigns a graph using move semantics
*/
Graph& operator = (Graph&&);
/**
@brief queries if the graph is empty
*/
bool empty() const;
/**
@brief queries the number of nodes in the graph
*/
size_t size() const;
/**
@brief clears the graph
*/
void clear();
private:
std::vector<Node*> _nodes;
void _clear();
void _clear_detached();
void _merge(Graph&&);
void _erase(Node*);
template <typename ...ArgsT>
Node* _emplace_back(ArgsT&&... args);
Node* _emplace_back();
};
// ----------------------------------------------------------------------------
/**
@class Runtime
@brief class to create a runtime object used by a runtime task
A runtime object is used by a runtime task for users to interact with the
scheduling runtime, such as scheduling an active task and
spawning a subflow.
@code{.cpp}
taskflow.emplace([](tf::Runtime& rt){
rt.run([](tf::Subflow& sf){
tf::Task A = sf.emplace([](){});
tf::Task B = sf.emplace([](){});
A.precede(B);
});
});
@endcode
A runtime task is associated with an executor and a worker that
runs the runtime task.
*/
class Runtime {
friend class Executor;
public:
/**
@brief obtains the running executor
The running executor of a runtime task is the executor that runs
the parent taskflow of that runtime task.
@code{.cpp}
tf::Executor executor;
tf::Taskflow taskflow;
taskflow.emplace([&](tf::Runtime& rt){
assert(&(rt.executor()) == &executor);
});
executor.run(taskflow).wait();
@endcode
*/
Executor& executor();
/**
@brief schedules an active task immediately to the worker's queue
@param task the given active task to schedule immediately
This member function immediately schedules an active task to the
task queue of the associated worker in the runtime task.
An active task is a task in a running taskflow.
The task may or may not be running, and scheduling that task
will immediately put the task into the task queue of the worker
that is running the runtime task.
Consider the following example:
@code{.cpp}
tf::Task A, B, C, D;
std::tie(A, B, C, D) = taskflow.emplace(
[] () { return 0; },
[&C] (tf::Runtime& rt) { // C must be captured by reference
std::cout << "B\n";
rt.schedule(C);
},
[] () { std::cout << "C\n"; },
[] () { std::cout << "D\n"; }
);
A.precede(B, C, D);
executor.run(taskflow).wait();
@endcode
The executor will first run the condition task @c A which returns @c 0
to inform the scheduler to go to the runtime task @c B.
During the execution of @c B, it directly schedules task @c C without
going through the normal taskflow graph scheduling process.
At this moment, task @c C is active because its parent taskflow is running.
When the taskflow finishes, we will see both @c B and @c C in the output.
*/
void schedule(Task task);
/**
@brief runs the given target and waits until it completes
A target can be
(1) a callable to spawn a subflow or
(2) a composable target with `tf::Graph& T::graph()` defined
@code{.cpp}
// complete a subflow synchronously
taskflow.emplace([](tf::Runtime& rt){
rt.run_and_wait([](tf::Subflow& sf){
tf::Task A = sf.emplace([](){});
tf::Task B = sf.emplace([](){});
});
});
// complete a custom graph synchronously
tf::Taskflow taskflow;
taskflow.emplace([](){});
taskflow.emplace([&](tf::Runtime& rt){
rt.run_and_wait(taskflow);
});
@endcode
*/
template <typename T>
void run_and_wait(T&& target);
private:
explicit Runtime(Executor&, Worker&, Node*);
Executor& _executor;
Worker& _worker;
Node* _parent;
};
// constructor
inline Runtime::Runtime(Executor& e, Worker& w, Node* p) :
_executor{e},
_worker {w},
_parent {p}{
}
// Function: executor
inline Executor& Runtime::executor() {
return _executor;
}
// ----------------------------------------------------------------------------
// Node
// ----------------------------------------------------------------------------
/**
@private
*/
class Node {
friend class Graph;
friend class Task;
friend class TaskView;
friend class Taskflow;
friend class Executor;
friend class FlowBuilder;
friend class Subflow;
friend class Runtime;
TF_ENABLE_POOLABLE_ON_THIS;
// state bit flag
constexpr static int CONDITIONED = 1;
constexpr static int DETACHED = 2;
constexpr static int ACQUIRED = 4;
constexpr static int READY = 8;
constexpr static int DEFERRED = 16;
// static work handle
struct Static {
template <typename C>
Static(C&&);
std::function<void()> work;
};
// runtime work handle
struct Runtime {
template <typename C>
Runtime(C&&);
std::function<void(tf::Runtime&)> work;
};
// dynamic work handle
struct Dynamic {
template <typename C>
Dynamic(C&&);
std::function<void(Subflow&)> work;
Graph subgraph;
};
// condition work handle
struct Condition {
template <typename C>
Condition(C&&);
std::function<int()> work;
};
// multi-condition work handle
struct MultiCondition {
template <typename C>
MultiCondition(C&&);
std::function<SmallVector<int>()> work;
};
// module work handle
struct Module {
template <typename T>
Module(T&);
Graph& graph;
};
// Async work
struct Async {
template <typename T>
Async(T&&, std::shared_ptr<AsyncTopology>);
std::function<void(bool)> work;
std::shared_ptr<AsyncTopology> topology;
};
// Silent async work
struct SilentAsync {
template <typename C>
SilentAsync(C&&);
std::function<void()> work;
};
// cudaFlow work handle
struct cudaFlow {
template <typename C, typename G>
cudaFlow(C&& c, G&& g);
std::function<void(Executor&, Node*)> work;
std::unique_ptr<CustomGraphBase> graph;
};
// syclFlow work handle
struct syclFlow {
template <typename C, typename G>
syclFlow(C&& c, G&& g);
std::function<void(Executor&, Node*)> work;
std::unique_ptr<CustomGraphBase> graph;
};
using handle_t = std::variant<
std::monostate, // placeholder
Static, // static tasking
Dynamic, // dynamic tasking
Condition, // conditional tasking
MultiCondition, // multi-conditional tasking
Module, // composable tasking
Async, // async tasking
SilentAsync, // async tasking (no future)
cudaFlow, // cudaFlow
syclFlow, // syclFlow
Runtime // runtime tasking
>;
struct Semaphores {
SmallVector<Semaphore*> to_acquire;
SmallVector<Semaphore*> to_release;
};
public:
// variant index
constexpr static auto PLACEHOLDER = get_index_v<std::monostate, handle_t>;
constexpr static auto STATIC = get_index_v<Static, handle_t>;
constexpr static auto DYNAMIC = get_index_v<Dynamic, handle_t>;
constexpr static auto CONDITION = get_index_v<Condition, handle_t>;
constexpr static auto MULTI_CONDITION = get_index_v<MultiCondition, handle_t>;
constexpr static auto MODULE = get_index_v<Module, handle_t>;
constexpr static auto ASYNC = get_index_v<Async, handle_t>;
constexpr static auto SILENT_ASYNC = get_index_v<SilentAsync, handle_t>;
constexpr static auto CUDAFLOW = get_index_v<cudaFlow, handle_t>;
constexpr static auto SYCLFLOW = get_index_v<syclFlow, handle_t>;
constexpr static auto RUNTIME = get_index_v<Runtime, handle_t>;
template <typename... Args>
Node(Args&&... args);
~Node();
size_t num_successors() const;
size_t num_dependents() const;
size_t num_strong_dependents() const;
size_t num_weak_dependents() const;
const std::string& name() const;
private:
std::string _name;
unsigned _priority {0};
void* _data {nullptr};
handle_t _handle;
SmallVector<Node*> _successors;
SmallVector<Node*> _dependents;
Topology* _topology {nullptr};
Node* _parent {nullptr};
std::atomic<int> _state {0};
std::atomic<size_t> _join_counter {0};
std::unique_ptr<Semaphores> _semaphores;
void _precede(Node*);
void _set_up_join_counter();
bool _is_cancelled() const;
bool _is_conditioner() const;
bool _acquire_all(SmallVector<Node*>&);
SmallVector<Node*> _release_all();
};
// ----------------------------------------------------------------------------
// Node Object Pool
// ----------------------------------------------------------------------------
/**
@private
*/
inline ObjectPool<Node> node_pool;
// ----------------------------------------------------------------------------
// Definition for Node::Static
// ----------------------------------------------------------------------------
// Constructor
template <typename C>
Node::Static::Static(C&& c) : work {std::forward<C>(c)} {
}
// ----------------------------------------------------------------------------
// Definition for Node::Dynamic
// ----------------------------------------------------------------------------
// Constructor
template <typename C>
Node::Dynamic::Dynamic(C&& c) : work {std::forward<C>(c)} {
}
// ----------------------------------------------------------------------------
// Definition for Node::Condition
// ----------------------------------------------------------------------------
// Constructor
template <typename C>
Node::Condition::Condition(C&& c) : work {std::forward<C>(c)} {
}
// ----------------------------------------------------------------------------
// Definition for Node::MultiCondition
// ----------------------------------------------------------------------------
// Constructor
template <typename C>
Node::MultiCondition::MultiCondition(C&& c) : work {std::forward<C>(c)} {
}
// ----------------------------------------------------------------------------
// Definition for Node::cudaFlow
// ----------------------------------------------------------------------------
template <typename C, typename G>
Node::cudaFlow::cudaFlow(C&& c, G&& g) :
work {std::forward<C>(c)},
graph {std::forward<G>(g)} {
}
// ----------------------------------------------------------------------------
// Definition for Node::syclFlow
// ----------------------------------------------------------------------------
template <typename C, typename G>
Node::syclFlow::syclFlow(C&& c, G&& g) :
work {std::forward<C>(c)},
graph {std::forward<G>(g)} {
}
// ----------------------------------------------------------------------------
// Definition for Node::Module
// ----------------------------------------------------------------------------
// Constructor
template <typename T>
inline Node::Module::Module(T& obj) : graph{ obj.graph() } {
}
// ----------------------------------------------------------------------------
// Definition for Node::Async
// ----------------------------------------------------------------------------
// Constructor
template <typename C>
Node::Async::Async(C&& c, std::shared_ptr<AsyncTopology>tpg) :
work {std::forward<C>(c)},
topology {std::move(tpg)} {
}
// ----------------------------------------------------------------------------
// Definition for Node::SilentAsync
// ----------------------------------------------------------------------------
// Constructor
template <typename C>
Node::SilentAsync::SilentAsync(C&& c) :
work {std::forward<C>(c)} {
}
// ----------------------------------------------------------------------------
// Definition for Node::Runtime
// ----------------------------------------------------------------------------
// Constructor
template <typename C>
Node::Runtime::Runtime(C&& c) :
work {std::forward<C>(c)} {
}
// ----------------------------------------------------------------------------
// Definition for Node
// ----------------------------------------------------------------------------
// Constructor
template <typename... Args>
Node::Node(Args&&... args): _handle{std::forward<Args>(args)...} {
}
// Destructor
inline Node::~Node() {
// this is to avoid stack overflow
if(_handle.index() == DYNAMIC) {
// using std::get_if instead of std::get makes this compatible
// with older macOS versions
// the result of std::get_if is guaranteed to be non-null
// due to the index check above
auto& subgraph = std::get_if<Dynamic>(&_handle)->subgraph;
std::vector<Node*> nodes;
nodes.reserve(subgraph.size());
std::move(
subgraph._nodes.begin(), subgraph._nodes.end(), std::back_inserter(nodes)
);
subgraph._nodes.clear();
size_t i = 0;
while(i < nodes.size()) {
if(nodes[i]->_handle.index() == DYNAMIC) {
auto& sbg = std::get_if<Dynamic>(&(nodes[i]->_handle))->subgraph;
std::move(
sbg._nodes.begin(), sbg._nodes.end(), std::back_inserter(nodes)
);
sbg._nodes.clear();
}
++i;
}
//auto& np = Graph::_node_pool();
for(i=0; i<nodes.size(); ++i) {
node_pool.recycle(nodes[i]);
}
}
}
// Procedure: _precede
inline void Node::_precede(Node* v) {
_successors.push_back(v);
v->_dependents.push_back(this);
}
// Function: num_successors
inline size_t Node::num_successors() const {
return _successors.size();
}
// Function: dependents
inline size_t Node::num_dependents() const {
return _dependents.size();
}
// Function: num_weak_dependents
inline size_t Node::num_weak_dependents() const {
size_t n = 0;
for(size_t i=0; i<_dependents.size(); i++) {
//if(_dependents[i]->_handle.index() == Node::CONDITION) {
if(_dependents[i]->_is_conditioner()) {
n++;
}
}
return n;
}
// Function: num_strong_dependents
inline size_t Node::num_strong_dependents() const {
size_t n = 0;
for(size_t i=0; i<_dependents.size(); i++) {
//if(_dependents[i]->_handle.index() != Node::CONDITION) {
if(!_dependents[i]->_is_conditioner()) {
n++;
}
}
return n;
}
// Function: name
inline const std::string& Node::name() const {
return _name;
}
// Function: _is_conditioner
inline bool Node::_is_conditioner() const {
return _handle.index() == Node::CONDITION ||
_handle.index() == Node::MULTI_CONDITION;
}
// Function: _is_cancelled
inline bool Node::_is_cancelled() const {
if(_handle.index() == Node::ASYNC) {
auto h = std::get_if<Node::Async>(&_handle);
if(h->topology && h->topology->_is_cancelled.load(std::memory_order_relaxed)) {
return true;
}
// async tasks spawned from subflow does not have topology
}
return _topology && _topology->_is_cancelled.load(std::memory_order_relaxed);
}
// Procedure: _set_up_join_counter
inline void Node::_set_up_join_counter() {
size_t c = 0;
for(auto p : _dependents) {
//if(p->_handle.index() == Node::CONDITION) {
if(p->_is_conditioner()) {
_state.fetch_or(Node::CONDITIONED, std::memory_order_relaxed);
}
else {
c++;
}
}
_join_counter.store(c, std::memory_order_release);
}
// Function: _acquire_all
inline bool Node::_acquire_all(SmallVector<Node*>& nodes) {
auto& to_acquire = _semaphores->to_acquire;
for(size_t i = 0; i < to_acquire.size(); ++i) {
if(!to_acquire[i]->_try_acquire_or_wait(this)) {
for(size_t j = 1; j <= i; ++j) {
auto r = to_acquire[i-j]->_release();
nodes.insert(std::end(nodes), std::begin(r), std::end(r));
}
return false;
}
}
return true;
}
// Function: _release_all
inline SmallVector<Node*> Node::_release_all() {
auto& to_release = _semaphores->to_release;
SmallVector<Node*> nodes;
for(const auto& sem : to_release) {
auto r = sem->_release();
nodes.insert(std::end(nodes), std::begin(r), std::end(r));
}
return nodes;
}
// ----------------------------------------------------------------------------
// Graph definition
// ----------------------------------------------------------------------------
// Destructor
inline Graph::~Graph() {
_clear();
}
// Move constructor
inline Graph::Graph(Graph&& other) :
_nodes {std::move(other._nodes)} {
}
// Move assignment
inline Graph& Graph::operator = (Graph&& other) {
_clear();
_nodes = std::move(other._nodes);
return *this;
}
// Procedure: clear
inline void Graph::clear() {
_clear();
}
// Procedure: clear
inline void Graph::_clear() {
for(auto node : _nodes) {
node_pool.recycle(node);
}
_nodes.clear();
}
// Procedure: clear_detached
inline void Graph::_clear_detached() {
auto mid = std::partition(_nodes.begin(), _nodes.end(), [] (Node* node) {
return !(node->_state.load(std::memory_order_relaxed) & Node::DETACHED);
});
for(auto itr = mid; itr != _nodes.end(); ++itr) {
node_pool.recycle(*itr);
}
_nodes.resize(std::distance(_nodes.begin(), mid));
}
// Procedure: merge
inline void Graph::_merge(Graph&& g) {
for(auto n : g._nodes) {
_nodes.push_back(n);
}
g._nodes.clear();
}
// Function: erase
inline void Graph::_erase(Node* node) {
if(auto I = std::find(_nodes.begin(), _nodes.end(), node); I != _nodes.end()) {
_nodes.erase(I);
node_pool.recycle(node);
}
}
// Function: size
inline size_t Graph::size() const {
return _nodes.size();
}
// Function: empty
inline bool Graph::empty() const {
return _nodes.empty();
}
// Function: emplace_back
template <typename ...ArgsT>
Node* Graph::_emplace_back(ArgsT&&... args) {
_nodes.push_back(node_pool.animate(std::forward<ArgsT>(args)...));
return _nodes.back();
}
// Function: emplace_back
inline Node* Graph::_emplace_back() {
_nodes.push_back(node_pool.animate());
return _nodes.back();
}
} // end of namespace tf. ---------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/core/executor.hpp | #pragma once
#include "observer.hpp"
#include "taskflow.hpp"
/**
@file executor.hpp
@brief executor include file
*/
namespace tf {
// ----------------------------------------------------------------------------
// Executor Definition
// ----------------------------------------------------------------------------
/** @class Executor
@brief class to create an executor for running a taskflow graph
An executor manages a set of worker threads to run one or multiple taskflows
using an efficient work-stealing scheduling algorithm.
@code{.cpp}
// Declare an executor and a taskflow
tf::Executor executor;
tf::Taskflow taskflow;
// Add three tasks into the taskflow
tf::Task A = taskflow.emplace([] () { std::cout << "This is TaskA\n"; });
tf::Task B = taskflow.emplace([] () { std::cout << "This is TaskB\n"; });
tf::Task C = taskflow.emplace([] () { std::cout << "This is TaskC\n"; });
// Build precedence between tasks
A.precede(B, C);
tf::Future<void> fu = executor.run(taskflow);
fu.wait(); // block until the execution completes
executor.run(taskflow, [](){ std::cout << "end of 1 run"; }).wait();
executor.run_n(taskflow, 4);
executor.wait_for_all(); // block until all associated executions finish
executor.run_n(taskflow, 4, [](){ std::cout << "end of 4 runs"; }).wait();
executor.run_until(taskflow, [cnt=0] () mutable { return ++cnt == 10; });
@endcode
All the @c run methods are @em thread-safe. You can submit multiple
taskflows at the same time to an executor from different threads.
*/
class Executor {
friend class FlowBuilder;
friend class Subflow;
friend class Runtime;
public:
/**
@brief constructs the executor with @c N worker threads
@param N number of workers (default std::thread::hardware_concurrency)
@param wix worker interface class to alter worker (thread) behaviors
The constructor spawns @c N worker threads to run tasks in a
work-stealing loop. The number of workers must be greater than zero
or an exception will be thrown.
By default, the number of worker threads is equal to the maximum
hardware concurrency returned by std::thread::hardware_concurrency.
Users can alter the worker behavior, such as changing thread affinity,
via deriving an instance from tf::WorkerInterface.
*/
explicit Executor(
size_t N = std::thread::hardware_concurrency(),
std::shared_ptr<WorkerInterface> wix = nullptr
);
/**
@brief destructs the executor
The destructor calls Executor::wait_for_all to wait for all submitted
taskflows to complete and then notifies all worker threads to stop
and join these threads.
*/
~Executor();
/**
@brief runs a taskflow once
@param taskflow a tf::Taskflow object
@return a tf::Future that holds the result of the execution
This member function executes the given taskflow once and returns a tf::Future
object that eventually holds the result of the execution.
@code{.cpp}
tf::Future<void> future = executor.run(taskflow);
// do something else
future.wait();
@endcode
This member function is thread-safe.
@attention
The executor does not own the given taskflow. It is your responsibility to
ensure the taskflow remains alive during its execution.
*/
tf::Future<void> run(Taskflow& taskflow);
/**
@brief runs a moved taskflow once
@param taskflow a moved tf::Taskflow object
@return a tf::Future that holds the result of the execution
This member function executes a moved taskflow once and returns a tf::Future
object that eventually holds the result of the execution.
The executor will take care of the lifetime of the moved taskflow.
@code{.cpp}
tf::Future<void> future = executor.run(std::move(taskflow));
// do something else
future.wait();
@endcode
This member function is thread-safe.
*/
tf::Future<void> run(Taskflow&& taskflow);
/**
@brief runs a taskflow once and invoke a callback upon completion
@param taskflow a tf::Taskflow object
@param callable a callable object to be invoked after this run
@return a tf::Future that holds the result of the execution
This member function executes the given taskflow once and invokes the given
callable when the execution completes.
This member function returns a tf::Future object that
eventually holds the result of the execution.
@code{.cpp}
tf::Future<void> future = executor.run(taskflow, [](){ std::cout << "done"; });
// do something else
future.wait();
@endcode
This member function is thread-safe.
@attention
The executor does not own the given taskflow. It is your responsibility to
ensure the taskflow remains alive during its execution.
*/
template<typename C>
tf::Future<void> run(Taskflow& taskflow, C&& callable);
/**
@brief runs a moved taskflow once and invoke a callback upon completion
@param taskflow a moved tf::Taskflow object
@param callable a callable object to be invoked after this run
@return a tf::Future that holds the result of the execution
This member function executes a moved taskflow once and invokes the given
callable when the execution completes.
This member function returns a tf::Future object that
eventually holds the result of the execution.
The executor will take care of the lifetime of the moved taskflow.
@code{.cpp}
tf::Future<void> future = executor.run(
std::move(taskflow), [](){ std::cout << "done"; }
);
// do something else
future.wait();
@endcode
This member function is thread-safe.
*/
template<typename C>
tf::Future<void> run(Taskflow&& taskflow, C&& callable);
/**
@brief runs a taskflow for @c N times
@param taskflow a tf::Taskflow object
@param N number of runs
@return a tf::Future that holds the result of the execution
This member function executes the given taskflow @c N times and returns a tf::Future
object that eventually holds the result of the execution.
@code{.cpp}
tf::Future<void> future = executor.run_n(taskflow, 2); // run taskflow 2 times
// do something else
future.wait();
@endcode
This member function is thread-safe.
@attention
The executor does not own the given taskflow. It is your responsibility to
ensure the taskflow remains alive during its execution.
*/
tf::Future<void> run_n(Taskflow& taskflow, size_t N);
/**
@brief runs a moved taskflow for @c N times
@param taskflow a moved tf::Taskflow object
@param N number of runs
@return a tf::Future that holds the result of the execution
This member function executes a moved taskflow @c N times and returns a tf::Future
object that eventually holds the result of the execution.
The executor will take care of the lifetime of the moved taskflow.
@code{.cpp}
tf::Future<void> future = executor.run_n(
std::move(taskflow), 2 // run the moved taskflow 2 times
);
// do something else
future.wait();
@endcode
This member function is thread-safe.
*/
tf::Future<void> run_n(Taskflow&& taskflow, size_t N);
/**
@brief runs a taskflow for @c N times and then invokes a callback
@param taskflow a tf::Taskflow
@param N number of runs
@param callable a callable object to be invoked after this run
@return a tf::Future that holds the result of the execution
This member function executes the given taskflow @c N times and invokes the given
callable when the execution completes.
This member function returns a tf::Future object that
eventually holds the result of the execution.
@code{.cpp}
tf::Future<void> future = executor.run(
taskflow, 2, [](){ std::cout << "done"; } // runs taskflow 2 times and invoke
// the lambda to print "done"
);
// do something else
future.wait();
@endcode
This member function is thread-safe.
@attention
The executor does not own the given taskflow. It is your responsibility to
ensure the taskflow remains alive during its execution.
*/
template<typename C>
tf::Future<void> run_n(Taskflow& taskflow, size_t N, C&& callable);
/**
@brief runs a moved taskflow for @c N times and then invokes a callback
@param taskflow a moved tf::Taskflow
@param N number of runs
@param callable a callable object to be invoked after this run
@return a tf::Future that holds the result of the execution
This member function executes a moved taskflow @c N times and invokes the given
callable when the execution completes.
This member function returns a tf::Future object that
eventually holds the result of the execution.
@code{.cpp}
tf::Future<void> future = executor.run_n(
// run the moved taskflow 2 times and invoke the lambda to print "done"
std::move(taskflow), 2, [](){ std::cout << "done"; }
);
// do something else
future.wait();
@endcode
This member function is thread-safe.
*/
template<typename C>
tf::Future<void> run_n(Taskflow&& taskflow, size_t N, C&& callable);
/**
@brief runs a taskflow multiple times until the predicate becomes true
@param taskflow a tf::Taskflow
@param pred a boolean predicate to return @c true for stop
@return a tf::Future that holds the result of the execution
This member function executes the given taskflow multiple times until
the predicate returns @c true.
This member function returns a tf::Future object that
eventually holds the result of the execution.
@code{.cpp}
tf::Future<void> future = executor.run_until(
taskflow, [](){ return rand()%10 == 0 }
);
// do something else
future.wait();
@endcode
This member function is thread-safe.
@attention
The executor does not own the given taskflow. It is your responsibility to
ensure the taskflow remains alive during its execution.
*/
template<typename P>
tf::Future<void> run_until(Taskflow& taskflow, P&& pred);
/**
@brief runs a moved taskflow and keeps running it
until the predicate becomes true
@param taskflow a moved tf::Taskflow object
@param pred a boolean predicate to return @c true for stop
@return a tf::Future that holds the result of the execution
This member function executes a moved taskflow multiple times until
the predicate returns @c true.
This member function returns a tf::Future object that
eventually holds the result of the execution.
The executor will take care of the lifetime of the moved taskflow.
@code{.cpp}
tf::Future<void> future = executor.run_until(
std::move(taskflow), [](){ return rand()%10 == 0 }
);
// do something else
future.wait();
@endcode
This member function is thread-safe.
*/
template<typename P>
tf::Future<void> run_until(Taskflow&& taskflow, P&& pred);
/**
@brief runs a taskflow multiple times until the predicate becomes true and
then invokes the callback
@param taskflow a tf::Taskflow
@param pred a boolean predicate to return @c true for stop
@param callable a callable object to be invoked after this run completes
@return a tf::Future that holds the result of the execution
This member function executes the given taskflow multiple times until
the predicate returns @c true and then invokes the given callable when
the execution completes.
This member function returns a tf::Future object that
eventually holds the result of the execution.
@code{.cpp}
tf::Future<void> future = executor.run_until(
taskflow, [](){ return rand()%10 == 0 }, [](){ std::cout << "done"; }
);
// do something else
future.wait();
@endcode
This member function is thread-safe.
@attention
The executor does not own the given taskflow. It is your responsibility to
ensure the taskflow remains alive during its execution.
*/
template<typename P, typename C>
tf::Future<void> run_until(Taskflow& taskflow, P&& pred, C&& callable);
/**
@brief runs a moved taskflow and keeps running
it until the predicate becomes true and then invokes the callback
@param taskflow a moved tf::Taskflow
@param pred a boolean predicate to return @c true for stop
@param callable a callable object to be invoked after this run completes
@return a tf::Future that holds the result of the execution
This member function executes a moved taskflow multiple times until
the predicate returns @c true and then invokes the given callable when
the execution completes.
This member function returns a tf::Future object that
eventually holds the result of the execution.
The executor will take care of the lifetime of the moved taskflow.
@code{.cpp}
tf::Future<void> future = executor.run_until(
std::move(taskflow),
[](){ return rand()%10 == 0 }, [](){ std::cout << "done"; }
);
// do something else
future.wait();
@endcode
This member function is thread-safe.
*/
template<typename P, typename C>
tf::Future<void> run_until(Taskflow&& taskflow, P&& pred, C&& callable);
/**
@brief runs a target graph and waits until it completes using
an internal worker of this executor
@tparam T target type which has `tf::Graph& T::graph()` defined
@param target the target task graph object
The method runs a target graph which has `tf::Graph& T::graph()` defined
and waits until the execution completes.
Unlike the typical flow of calling `tf::Executor::run` series
plus waiting on the result, this method must be called by an internal
worker of this executor. The caller worker will participate in
the work-stealing loop of the scheduler, therby avoiding potential
deadlock caused by blocked waiting.
@code{.cpp}
tf::Executor executor(2);
tf::Taskflow taskflow;
std::array<tf::Taskflow, 1000> others;
std::atomic<size_t> counter{0};
for(size_t n=0; n<1000; n++) {
for(size_t i=0; i<1000; i++) {
others[n].emplace([&](){ counter++; });
}
taskflow.emplace([&executor, &tf=others[n]](){
executor.run_and_wait(tf);
//executor.run(tf).wait(); <- blocking the worker without doing anything
// will introduce deadlock
});
}
executor.run(taskflow).wait();
@endcode
The method is thread-safe as long as the target is not concurrently
ran by two or more threads.
@attention
You must call tf::Executor::run_and_wait from a worker of the calling executor
or an exception will be thrown.
*/
template <typename T>
void run_and_wait(T& target);
/**
@brief keeps running the work-stealing loop until the predicate becomes true
@tparam P predicate type
@param predicate a boolean predicate to indicate when to stop the loop
The method keeps the caller worker in the work-stealing loop such that it
does not block (e.g., causing deadlock with other blocking workers)
until the stop predicate becomes true.
@code{.cpp}
taskflow.emplace([&](){
std::future<void> fu = std::async([](){ std::sleep(100s); });
executor.loop_until([](){
return fu.wait_for(std::chrono::seconds(0)) == future_status::ready;
});
});
@endcode
@attention
You must call tf::Executor::loop_until from a worker of the calling executor
or an exception will be thrown.
*/
template <typename P>
void loop_until(P&& predicate);
/**
@brief waits for all tasks to complete
This member function waits until all submitted tasks
(e.g., taskflows, asynchronous tasks) to finish.
@code{.cpp}
executor.run(taskflow1);
executor.run_n(taskflow2, 10);
executor.run_n(taskflow3, 100);
executor.wait_for_all(); // wait until the above submitted taskflows finish
@endcode
*/
void wait_for_all();
/**
@brief queries the number of worker threads
Each worker represents one unique thread spawned by an executor
upon its construction time.
@code{.cpp}
tf::Executor executor(4);
std::cout << executor.num_workers(); // 4
@endcode
*/
size_t num_workers() const noexcept;
/**
@brief queries the number of running topologies at the time of this call
When a taskflow is submitted to an executor, a topology is created to store
runtime metadata of the running taskflow.
When the execution of the submitted taskflow finishes,
its corresponding topology will be removed from the executor.
@code{.cpp}
executor.run(taskflow);
std::cout << executor.num_topologies(); // 0 or 1 (taskflow still running)
@endcode
*/
size_t num_topologies() const;
/**
@brief queries the number of running taskflows with moved ownership
@code{.cpp}
executor.run(std::move(taskflow));
std::cout << executor.num_taskflows(); // 0 or 1 (taskflow still running)
@endcode
*/
size_t num_taskflows() const;
/**
@brief queries the id of the caller thread in this executor
Each worker has an unique id in the range of @c 0 to @c N-1 associated with
its parent executor.
If the caller thread does not belong to the executor, @c -1 is returned.
@code{.cpp}
tf::Executor executor(4); // 4 workers in the executor
executor.this_worker_id(); // -1 (main thread is not a worker)
taskflow.emplace([&](){
std::cout << executor.this_worker_id(); // 0, 1, 2, or 3
});
executor.run(taskflow);
@endcode
*/
int this_worker_id() const;
/**
@brief runs a given function asynchronously
@tparam F callable type
@tparam ArgsT parameter types
@param f callable object to call
@param args parameters to pass to the callable
@return a tf::Future that will holds the result of the execution
The method creates an asynchronous task to launch the given
function on the given arguments.
Unlike std::async, the return here is a @em tf::Future that holds
an optional object to the result.
If the asynchronous task is cancelled before it runs, the return is
a @c std::nullopt, or the value returned by the callable.
@code{.cpp}
tf::Future<std::optional<int>> future = executor.async([](){
std::cout << "create an asynchronous task and returns 1\n";
return 1;
});
@endcode
This member function is thread-safe.
*/
template <typename F, typename... ArgsT>
auto async(F&& f, ArgsT&&... args);
/**
@brief runs a given function asynchronously and gives a name to this task
@tparam F callable type
@tparam ArgsT parameter types
@param name name of the asynchronous task
@param f callable object to call
@param args parameters to pass to the callable
@return a tf::Future that will holds the result of the execution
The method creates a named asynchronous task to launch the given
function on the given arguments.
Naming an asynchronous task is primarily used for profiling and visualizing
the task execution timeline.
Unlike std::async, the return here is a tf::Future that holds
an optional object to the result.
If the asynchronous task is cancelled before it runs, the return is
a @c std::nullopt, or the value returned by the callable.
@code{.cpp}
tf::Future<std::optional<int>> future = executor.named_async("name", [](){
std::cout << "create an asynchronous task with a name and returns 1\n";
return 1;
});
@endcode
This member function is thread-safe.
*/
template <typename F, typename... ArgsT>
auto named_async(const std::string& name, F&& f, ArgsT&&... args);
/**
@brief similar to tf::Executor::async but does not return a future object
This member function is more efficient than tf::Executor::async
and is encouraged to use when there is no data returned.
@code{.cpp}
executor.silent_async([](){
std::cout << "create an asynchronous task with no return\n";
});
@endcode
This member function is thread-safe.
*/
template <typename F, typename... ArgsT>
void silent_async(F&& f, ArgsT&&... args);
/**
@brief similar to tf::Executor::named_async but does not return a future object
This member function is more efficient than tf::Executor::named_async
and is encouraged to use when there is no data returned.
@code{.cpp}
executor.named_silent_async("name", [](){
std::cout << "create an asynchronous task with a name and no return\n";
});
@endcode
This member function is thread-safe.
*/
template <typename F, typename... ArgsT>
void named_silent_async(const std::string& name, F&& f, ArgsT&&... args);
/**
@brief constructs an observer to inspect the activities of worker threads
@tparam Observer observer type derived from tf::ObserverInterface
@tparam ArgsT argument parameter pack
@param args arguments to forward to the constructor of the observer
@return a shared pointer to the created observer
Each executor manages a list of observers with shared ownership with callers.
For each of these observers, the two member functions,
tf::ObserverInterface::on_entry and tf::ObserverInterface::on_exit
will be called before and after the execution of a task.
This member function is not thread-safe.
*/
template <typename Observer, typename... ArgsT>
std::shared_ptr<Observer> make_observer(ArgsT&&... args);
/**
@brief removes an observer from the executor
This member function is not thread-safe.
*/
template <typename Observer>
void remove_observer(std::shared_ptr<Observer> observer);
/**
@brief queries the number of observers
*/
size_t num_observers() const noexcept;
private:
const size_t _MAX_STEALS;
std::condition_variable _topology_cv;
std::mutex _taskflow_mutex;
std::mutex _topology_mutex;
std::mutex _wsq_mutex;
size_t _num_topologies {0};
std::unordered_map<std::thread::id, size_t> _wids;
std::vector<std::thread> _threads;
std::vector<Worker> _workers;
std::list<Taskflow> _taskflows;
Notifier _notifier;
TaskQueue<Node*> _wsq;
std::atomic<bool> _done {0};
std::shared_ptr<WorkerInterface> _worker_interface;
std::unordered_set<std::shared_ptr<ObserverInterface>> _observers;
Worker* _this_worker();
bool _wait_for_task(Worker&, Node*&);
void _observer_prologue(Worker&, Node*);
void _observer_epilogue(Worker&, Node*);
void _spawn(size_t);
void _exploit_task(Worker&, Node*&);
void _explore_task(Worker&, Node*&);
void _schedule(Worker&, Node*);
void _schedule(Node*);
void _schedule(Worker&, const SmallVector<Node*>&);
void _schedule(const SmallVector<Node*>&);
void _set_up_topology(Worker*, Topology*);
void _tear_down_topology(Worker&, Topology*);
void _tear_down_async(Node*);
void _tear_down_invoke(Worker&, Node*);
void _cancel_invoke(Worker&, Node*);
void _increment_topology();
void _decrement_topology();
void _decrement_topology_and_notify();
void _invoke(Worker&, Node*);
void _invoke_static_task(Worker&, Node*);
void _invoke_dynamic_task(Worker&, Node*);
void _consume_graph(Worker&, Node*, Graph&);
void _detach_dynamic_task(Worker&, Node*, Graph&);
void _invoke_condition_task(Worker&, Node*, SmallVector<int>&);
void _invoke_multi_condition_task(Worker&, Node*, SmallVector<int>&);
void _invoke_module_task(Worker&, Node*);
void _invoke_async_task(Worker&, Node*);
void _invoke_silent_async_task(Worker&, Node*);
void _invoke_cudaflow_task(Worker&, Node*);
void _invoke_syclflow_task(Worker&, Node*);
void _invoke_runtime_task(Worker&, Node*);
template <typename P>
void _loop_until(Worker&, P&&);
template <typename C, std::enable_if_t<is_cudaflow_task_v<C>, void>* = nullptr>
void _invoke_cudaflow_task_entry(Node*, C&&);
template <typename C, typename Q,
std::enable_if_t<is_syclflow_task_v<C>, void>* = nullptr
>
void _invoke_syclflow_task_entry(Node*, C&&, Q&);
};
// Constructor
inline Executor::Executor(size_t N, std::shared_ptr<WorkerInterface> wix) :
_MAX_STEALS {((N+1) << 1)},
_threads {N},
_workers {N},
_notifier {N},
_worker_interface {std::move(wix)} {
if(N == 0) {
TF_THROW("no cpu workers to execute taskflows");
}
_spawn(N);
// instantite the default observer if requested
if(has_env(TF_ENABLE_PROFILER)) {
TFProfManager::get()._manage(make_observer<TFProfObserver>());
}
}
// Destructor
inline Executor::~Executor() {
// wait for all topologies to complete
wait_for_all();
// shut down the scheduler
_done = true;
_notifier.notify(true);
for(auto& t : _threads){
t.join();
}
}
// Function: num_workers
inline size_t Executor::num_workers() const noexcept {
return _workers.size();
}
// Function: num_topologies
inline size_t Executor::num_topologies() const {
return _num_topologies;
}
// Function: num_taskflows
inline size_t Executor::num_taskflows() const {
return _taskflows.size();
}
// Function: _this_worker
inline Worker* Executor::_this_worker() {
auto itr = _wids.find(std::this_thread::get_id());
return itr == _wids.end() ? nullptr : &_workers[itr->second];
}
// Function: named_async
template <typename F, typename... ArgsT>
auto Executor::named_async(const std::string& name, F&& f, ArgsT&&... args) {
_increment_topology();
using T = std::invoke_result_t<F, ArgsT...>;
using R = std::conditional_t<std::is_same_v<T, void>, void, std::optional<T>>;
std::promise<R> p;
auto tpg = std::make_shared<AsyncTopology>();
Future<R> fu(p.get_future(), tpg);
auto node = node_pool.animate(
std::in_place_type_t<Node::Async>{},
[p=make_moc(std::move(p)), f=std::forward<F>(f), args...]
(bool cancel) mutable {
if constexpr(std::is_same_v<R, void>) {
if(!cancel) {
f(args...);
}
p.object.set_value();
}
else {
p.object.set_value(cancel ? std::nullopt : std::make_optional(f(args...)));
}
},
std::move(tpg)
);
node->_name = name;
if(auto w = _this_worker(); w) {
_schedule(*w, node);
}
else{
_schedule(node);
}
return fu;
}
// Function: async
template <typename F, typename... ArgsT>
auto Executor::async(F&& f, ArgsT&&... args) {
return named_async("", std::forward<F>(f), std::forward<ArgsT>(args)...);
}
// Function: named_silent_async
template <typename F, typename... ArgsT>
void Executor::named_silent_async(
const std::string& name, F&& f, ArgsT&&... args
) {
_increment_topology();
Node* node = node_pool.animate(
std::in_place_type_t<Node::SilentAsync>{},
[f=std::forward<F>(f), args...] () mutable {
f(args...);
}
);
node->_name = name;
if(auto w = _this_worker(); w) {
_schedule(*w, node);
}
else {
_schedule(node);
}
}
// Function: silent_async
template <typename F, typename... ArgsT>
void Executor::silent_async(F&& f, ArgsT&&... args) {
named_silent_async("", std::forward<F>(f), std::forward<ArgsT>(args)...);
}
// Function: this_worker_id
inline int Executor::this_worker_id() const {
auto i = _wids.find(std::this_thread::get_id());
return i == _wids.end() ? -1 : static_cast<int>(_workers[i->second]._id);
}
// Procedure: _spawn
inline void Executor::_spawn(size_t N) {
std::mutex mutex;
std::condition_variable cond;
size_t n=0;
for(size_t id=0; id<N; ++id) {
_workers[id]._id = id;
_workers[id]._vtm = id;
_workers[id]._executor = this;
_workers[id]._waiter = &_notifier._waiters[id];
_threads[id] = std::thread([this] (
Worker& w, std::mutex& mutex, std::condition_variable& cond, size_t& n
) -> void {
// assign the thread
w._thread = &_threads[w._id];
// enables the mapping
{
std::scoped_lock lock(mutex);
_wids[std::this_thread::get_id()] = w._id;
if(n++; n == num_workers()) {
cond.notify_one();
}
}
Node* t = nullptr;
// before entering the scheduler (work-stealing loop),
// call the user-specified prologue function
if(_worker_interface) {
_worker_interface->scheduler_prologue(w);
}
// must use 1 as condition instead of !done because
// the previous worker may stop while the following workers
// are still preparing for entering the scheduling loop
std::exception_ptr ptr{nullptr};
try {
while(1) {
// execute the tasks.
_exploit_task(w, t);
// wait for tasks
if(_wait_for_task(w, t) == false) {
break;
}
}
}
catch(...) {
ptr = std::current_exception();
}
// call the user-specified epilogue function
if(_worker_interface) {
_worker_interface->scheduler_epilogue(w, ptr);
}
}, std::ref(_workers[id]), std::ref(mutex), std::ref(cond), std::ref(n));
// POSIX-like system can use the following to affine threads to cores
//cpu_set_t cpuset;
//CPU_ZERO(&cpuset);
//CPU_SET(id, &cpuset);
//pthread_setaffinity_np(
// _threads[id].native_handle(), sizeof(cpu_set_t), &cpuset
//);
}
std::unique_lock<std::mutex> lock(mutex);
cond.wait(lock, [&](){ return n==N; });
}
// Function: _loop_until
template <typename P>
inline void Executor::_loop_until(Worker& w, P&& stop_predicate) {
std::uniform_int_distribution<size_t> rdvtm(0, _workers.size()-1);
exploit:
while(!stop_predicate()) {
//exploit:
if(auto t = w._wsq.pop(); t) {
_invoke(w, t);
}
else {
size_t num_steals = 0;
explore:
t = (w._id == w._vtm) ? _wsq.steal() : _workers[w._vtm]._wsq.steal();
if(t) {
_invoke(w, t);
goto exploit;
}
else if(!stop_predicate()) {
if(num_steals++ > _MAX_STEALS) {
std::this_thread::yield();
}
w._vtm = rdvtm(w._rdgen);
goto explore;
}
else {
break;
}
}
}
}
// Function: _explore_task
inline void Executor::_explore_task(Worker& w, Node*& t) {
//assert(_workers[w].wsq.empty());
//assert(!t);
size_t num_steals = 0;
size_t num_yields = 0;
std::uniform_int_distribution<size_t> rdvtm(0, _workers.size()-1);
// Here, we write do-while to make the worker steal at once
// from the assigned victim.
do {
t = (w._id == w._vtm) ? _wsq.steal() : _workers[w._vtm]._wsq.steal();
if(t) {
break;
}
if(num_steals++ > _MAX_STEALS) {
std::this_thread::yield();
if(num_yields++ > 100) {
break;
}
}
w._vtm = rdvtm(w._rdgen);
} while(!_done);
}
// Procedure: _exploit_task
inline void Executor::_exploit_task(Worker& w, Node*& t) {
while(t) {
_invoke(w, t);
t = w._wsq.pop();
}
}
// Function: _wait_for_task
inline bool Executor::_wait_for_task(Worker& worker, Node*& t) {
explore_task:
_explore_task(worker, t);
// The last thief who successfully stole a task will wake up
// another thief worker to avoid starvation.
if(t) {
_notifier.notify(false);
return true;
}
// ---- 2PC guard ----
_notifier.prepare_wait(worker._waiter);
if(!_wsq.empty()) {
_notifier.cancel_wait(worker._waiter);
worker._vtm = worker._id;
goto explore_task;
}
if(_done) {
_notifier.cancel_wait(worker._waiter);
_notifier.notify(true);
return false;
}
// We need to use index-based scanning to avoid data race
// with _spawn which may initialize a worker at the same time.
for(size_t vtm=0; vtm<_workers.size(); vtm++) {
if(!_workers[vtm]._wsq.empty()) {
_notifier.cancel_wait(worker._waiter);
worker._vtm = vtm;
goto explore_task;
}
}
//--_num_thieves;
//_num_thieves.fetch_sub(1, std::memory_order_release);
/*//if(auto vtm = _find_vtm(me); vtm != _workers.size()) {
if(!_wsq.empty()) {
_notifier.cancel_wait(worker._waiter);
//t = (vtm == me) ? _wsq.steal() : _workers[vtm].wsq.steal();
t = _wsq.steal(); // must steal here
if(t) {
if(_num_thieves.fetch_sub(1) == 1) {
_notifier.notify(false);
}
return true;
}
else {
worker._vtm = worker._id;
goto explore_task;
}
}
if(_done) {
_notifier.cancel_wait(worker._waiter);
_notifier.notify(true);
--_num_thieves;
return false;
}
if(_num_thieves.fetch_sub(1) == 1) {
if(_num_actives) {
_notifier.cancel_wait(worker._waiter);
goto wait_for_task;
}
// check all queues again
for(auto& w : _workers) {
if(!w._wsq.empty()) {
worker._vtm = w._id;
_notifier.cancel_wait(worker._waiter);
goto wait_for_task;
}
}
}*/
// Now I really need to relinguish my self to others
_notifier.commit_wait(worker._waiter);
goto explore_task;
}
// Function: make_observer
template<typename Observer, typename... ArgsT>
std::shared_ptr<Observer> Executor::make_observer(ArgsT&&... args) {
static_assert(
std::is_base_of_v<ObserverInterface, Observer>,
"Observer must be derived from ObserverInterface"
);
// use a local variable to mimic the constructor
auto ptr = std::make_shared<Observer>(std::forward<ArgsT>(args)...);
ptr->set_up(_workers.size());
_observers.emplace(std::static_pointer_cast<ObserverInterface>(ptr));
return ptr;
}
// Procedure: remove_observer
template <typename Observer>
void Executor::remove_observer(std::shared_ptr<Observer> ptr) {
static_assert(
std::is_base_of_v<ObserverInterface, Observer>,
"Observer must be derived from ObserverInterface"
);
_observers.erase(std::static_pointer_cast<ObserverInterface>(ptr));
}
// Function: num_observers
inline size_t Executor::num_observers() const noexcept {
return _observers.size();
}
// Procedure: _schedule
inline void Executor::_schedule(Worker& worker, Node* node) {
// We need to fetch p before the release such that the read
// operation is synchronized properly with other thread to
// void data race.
auto p = node->_priority;
node->_state.fetch_or(Node::READY, std::memory_order_release);
// caller is a worker to this pool - starting at v3.5 we do not use
// any complicated notification mechanism as the experimental result
// has shown no significant advantage.
if(worker._executor == this) {
worker._wsq.push(node, p);
_notifier.notify(false);
return;
}
{
std::lock_guard<std::mutex> lock(_wsq_mutex);
_wsq.push(node, p);
}
_notifier.notify(false);
}
// Procedure: _schedule
inline void Executor::_schedule(Node* node) {
// We need to fetch p before the release such that the read
// operation is synchronized properly with other thread to
// void data race.
auto p = node->_priority;
node->_state.fetch_or(Node::READY, std::memory_order_release);
{
std::lock_guard<std::mutex> lock(_wsq_mutex);
_wsq.push(node, p);
}
_notifier.notify(false);
}
// Procedure: _schedule
inline void Executor::_schedule(Worker& worker, const SmallVector<Node*>& nodes) {
// We need to cacth the node count to avoid accessing the nodes
// vector while the parent topology is removed!
const auto num_nodes = nodes.size();
if(num_nodes == 0) {
return;
}
// caller is a worker to this pool - starting at v3.5 we do not use
// any complicated notification mechanism as the experimental result
// has shown no significant advantage.
if(worker._executor == this) {
for(size_t i=0; i<num_nodes; ++i) {
// We need to fetch p before the release such that the read
// operation is synchronized properly with other thread to
// void data race.
auto p = nodes[i]->_priority;
nodes[i]->_state.fetch_or(Node::READY, std::memory_order_release);
worker._wsq.push(nodes[i], p);
_notifier.notify(false);
}
return;
}
{
std::lock_guard<std::mutex> lock(_wsq_mutex);
for(size_t k=0; k<num_nodes; ++k) {
auto p = nodes[k]->_priority;
nodes[k]->_state.fetch_or(Node::READY, std::memory_order_release);
_wsq.push(nodes[k], p);
}
}
_notifier.notify_n(num_nodes);
}
// Procedure: _schedule
inline void Executor::_schedule(const SmallVector<Node*>& nodes) {
// parent topology may be removed!
const auto num_nodes = nodes.size();
if(num_nodes == 0) {
return;
}
// We need to fetch p before the release such that the read
// operation is synchronized properly with other thread to
// void data race.
{
std::lock_guard<std::mutex> lock(_wsq_mutex);
for(size_t k=0; k<num_nodes; ++k) {
auto p = nodes[k]->_priority;
nodes[k]->_state.fetch_or(Node::READY, std::memory_order_release);
_wsq.push(nodes[k], p);
}
}
_notifier.notify_n(num_nodes);
}
// Procedure: _invoke
inline void Executor::_invoke(Worker& worker, Node* node) {
// synchronize all outstanding memory operations caused by reordering
while(!(node->_state.load(std::memory_order_acquire) & Node::READY));
begin_invoke:
// no need to do other things if the topology is cancelled
if(node->_is_cancelled()) {
_cancel_invoke(worker, node);
return;
}
// if acquiring semaphore(s) exists, acquire them first
if(node->_semaphores && !node->_semaphores->to_acquire.empty()) {
SmallVector<Node*> nodes;
if(!node->_acquire_all(nodes)) {
_schedule(worker, nodes);
return;
}
node->_state.fetch_or(Node::ACQUIRED, std::memory_order_release);
}
// condition task
//int cond = -1;
SmallVector<int> conds;
// switch is faster than nested if-else due to jump table
switch(node->_handle.index()) {
// static task
case Node::STATIC:{
_invoke_static_task(worker, node);
}
break;
// dynamic task
case Node::DYNAMIC: {
_invoke_dynamic_task(worker, node);
}
break;
// condition task
case Node::CONDITION: {
_invoke_condition_task(worker, node, conds);
}
break;
// multi-condition task
case Node::MULTI_CONDITION: {
_invoke_multi_condition_task(worker, node, conds);
}
break;
// module task
case Node::MODULE: {
_invoke_module_task(worker, node);
}
break;
// async task
case Node::ASYNC: {
_invoke_async_task(worker, node);
_tear_down_async(node);
return ;
}
break;
// silent async task
case Node::SILENT_ASYNC: {
_invoke_silent_async_task(worker, node);
_tear_down_async(node);
return ;
}
break;
// cudaflow task
case Node::CUDAFLOW: {
_invoke_cudaflow_task(worker, node);
}
break;
// syclflow task
case Node::SYCLFLOW: {
_invoke_syclflow_task(worker, node);
}
break;
// runtime task
case Node::RUNTIME: {
_invoke_runtime_task(worker, node);
}
break;
// monostate (placeholder)
default:
break;
}
// if releasing semaphores exist, release them
if(node->_semaphores && !node->_semaphores->to_release.empty()) {
_schedule(worker, node->_release_all());
}
// Reset the join counter to support the cyclic control flow.
// + We must do this before scheduling the successors to avoid race
// condition on _dependents.
// + We must use fetch_add instead of direct assigning
// because the user-space call on "invoke" may explicitly schedule
// this task again (e.g., pipeline) which can access the join_counter.
if((node->_state.load(std::memory_order_relaxed) & Node::CONDITIONED)) {
node->_join_counter.fetch_add(node->num_strong_dependents());
}
else {
node->_join_counter.fetch_add(node->num_dependents());
}
// acquire the parent flow counter
auto& j = (node->_parent) ? node->_parent->_join_counter :
node->_topology->_join_counter;
// Here, we want to cache the latest successor with the highest priority
Node* cache {nullptr};
auto max_p = static_cast<unsigned>(TaskPriority::MAX);
// Invoke the task based on the corresponding type
switch(node->_handle.index()) {
// condition and multi-condition tasks
case Node::CONDITION:
case Node::MULTI_CONDITION: {
for(auto cond : conds) {
if(cond >= 0 && static_cast<size_t>(cond) < node->_successors.size()) {
auto s = node->_successors[cond];
// zeroing the join counter for invariant
s->_join_counter.store(0, std::memory_order_relaxed);
j.fetch_add(1);
if(s->_priority <= max_p) {
if(cache) {
_schedule(worker, cache);
}
cache = s;
max_p = s->_priority;
}
else {
_schedule(worker, s);
}
}
}
}
break;
// non-condition task
default: {
for(size_t i=0; i<node->_successors.size(); ++i) {
if(auto s = node->_successors[i]; --(s->_join_counter) == 0) {
j.fetch_add(1);
if(s->_priority <= max_p) {
if(cache) {
_schedule(worker, cache);
}
cache = s;
max_p = s->_priority;
}
else {
_schedule(worker, s);
}
}
}
}
break;
}
// tear_down the invoke
_tear_down_invoke(worker, node);
// perform tail recursion elimination for the right-most child to reduce
// the number of expensive pop/push operations through the task queue
if(cache) {
node = cache;
//node->_state.fetch_or(Node::READY, std::memory_order_release);
goto begin_invoke;
}
}
// Procedure: _tear_down_async
inline void Executor::_tear_down_async(Node* node) {
if(node->_parent) {
node->_parent->_join_counter.fetch_sub(1);
}
else {
_decrement_topology_and_notify();
}
node_pool.recycle(node);
}
// Proecdure: _tear_down_invoke
inline void Executor::_tear_down_invoke(Worker& worker, Node* node) {
// we must check parent first before substracting the join counter,
// or it can introduce data race
if(node->_parent == nullptr) {
if(node->_topology->_join_counter.fetch_sub(1) == 1) {
_tear_down_topology(worker, node->_topology);
}
}
// joined subflow
else {
node->_parent->_join_counter.fetch_sub(1);
}
}
// Procedure: _cancel_invoke
inline void Executor::_cancel_invoke(Worker& worker, Node* node) {
switch(node->_handle.index()) {
// async task needs to carry out the promise
case Node::ASYNC:
std::get_if<Node::Async>(&(node->_handle))->work(true);
_tear_down_async(node);
break;
// silent async doesn't need to carry out the promise
case Node::SILENT_ASYNC:
_tear_down_async(node);
break;
// tear down topology if the node is the last leaf
default: {
_tear_down_invoke(worker, node);
}
break;
}
}
// Procedure: _observer_prologue
inline void Executor::_observer_prologue(Worker& worker, Node* node) {
for(auto& observer : _observers) {
observer->on_entry(WorkerView(worker), TaskView(*node));
}
}
// Procedure: _observer_epilogue
inline void Executor::_observer_epilogue(Worker& worker, Node* node) {
for(auto& observer : _observers) {
observer->on_exit(WorkerView(worker), TaskView(*node));
}
}
// Procedure: _invoke_static_task
inline void Executor::_invoke_static_task(Worker& worker, Node* node) {
_observer_prologue(worker, node);
std::get_if<Node::Static>(&node->_handle)->work();
_observer_epilogue(worker, node);
}
// Procedure: _invoke_dynamic_task
inline void Executor::_invoke_dynamic_task(Worker& w, Node* node) {
_observer_prologue(w, node);
auto handle = std::get_if<Node::Dynamic>(&node->_handle);
handle->subgraph._clear();
Subflow sf(*this, w, node, handle->subgraph);
handle->work(sf);
if(sf._joinable) {
_consume_graph(w, node, handle->subgraph);
}
_observer_epilogue(w, node);
}
// Procedure: _detach_dynamic_task
inline void Executor::_detach_dynamic_task(
Worker& w, Node* p, Graph& g
) {
// graph is empty and has no async tasks
if(g.empty() && p->_join_counter == 0) {
return;
}
SmallVector<Node*> src;
for(auto n : g._nodes) {
n->_state.store(Node::DETACHED, std::memory_order_relaxed);
n->_set_up_join_counter();
n->_topology = p->_topology;
n->_parent = nullptr;
if(n->num_dependents() == 0) {
src.push_back(n);
}
}
{
std::lock_guard<std::mutex> lock(p->_topology->_taskflow._mutex);
p->_topology->_taskflow._graph._merge(std::move(g));
}
p->_topology->_join_counter.fetch_add(src.size());
_schedule(w, src);
}
// Procedure: _consume_graph
inline void Executor::_consume_graph(Worker& w, Node* p, Graph& g) {
// graph is empty and has no async tasks
if(g.empty() && p->_join_counter == 0) {
return;
}
SmallVector<Node*> src;
for(auto n : g._nodes) {
n->_state.store(0, std::memory_order_relaxed);
n->_set_up_join_counter();
n->_topology = p->_topology;
n->_parent = p;
if(n->num_dependents() == 0) {
src.push_back(n);
}
}
p->_join_counter.fetch_add(src.size());
_schedule(w, src);
_loop_until(w, [p] () -> bool { return p->_join_counter == 0; });
}
// Procedure: _invoke_condition_task
inline void Executor::_invoke_condition_task(
Worker& worker, Node* node, SmallVector<int>& conds
) {
_observer_prologue(worker, node);
conds = { std::get_if<Node::Condition>(&node->_handle)->work() };
_observer_epilogue(worker, node);
}
// Procedure: _invoke_multi_condition_task
inline void Executor::_invoke_multi_condition_task(
Worker& worker, Node* node, SmallVector<int>& conds
) {
_observer_prologue(worker, node);
conds = std::get_if<Node::MultiCondition>(&node->_handle)->work();
_observer_epilogue(worker, node);
}
// Procedure: _invoke_cudaflow_task
inline void Executor::_invoke_cudaflow_task(Worker& worker, Node* node) {
_observer_prologue(worker, node);
std::get_if<Node::cudaFlow>(&node->_handle)->work(*this, node);
_observer_epilogue(worker, node);
}
// Procedure: _invoke_syclflow_task
inline void Executor::_invoke_syclflow_task(Worker& worker, Node* node) {
_observer_prologue(worker, node);
std::get_if<Node::syclFlow>(&node->_handle)->work(*this, node);
_observer_epilogue(worker, node);
}
// Procedure: _invoke_module_task
inline void Executor::_invoke_module_task(Worker& w, Node* node) {
_observer_prologue(w, node);
_consume_graph(
w, node, std::get_if<Node::Module>(&node->_handle)->graph
);
_observer_epilogue(w, node);
}
// Procedure: _invoke_async_task
inline void Executor::_invoke_async_task(Worker& w, Node* node) {
_observer_prologue(w, node);
std::get_if<Node::Async>(&node->_handle)->work(false);
_observer_epilogue(w, node);
}
// Procedure: _invoke_silent_async_task
inline void Executor::_invoke_silent_async_task(Worker& w, Node* node) {
_observer_prologue(w, node);
std::get_if<Node::SilentAsync>(&node->_handle)->work();
_observer_epilogue(w, node);
}
// Procedure: _invoke_runtime_task
inline void Executor::_invoke_runtime_task(Worker& w, Node* node) {
_observer_prologue(w, node);
Runtime rt(*this, w, node);
std::get_if<Node::Runtime>(&node->_handle)->work(rt);
_observer_epilogue(w, node);
}
// Function: run
inline tf::Future<void> Executor::run(Taskflow& f) {
return run_n(f, 1, [](){});
}
// Function: run
inline tf::Future<void> Executor::run(Taskflow&& f) {
return run_n(std::move(f), 1, [](){});
}
// Function: run
template <typename C>
tf::Future<void> Executor::run(Taskflow& f, C&& c) {
return run_n(f, 1, std::forward<C>(c));
}
// Function: run
template <typename C>
tf::Future<void> Executor::run(Taskflow&& f, C&& c) {
return run_n(std::move(f), 1, std::forward<C>(c));
}
// Function: run_n
inline tf::Future<void> Executor::run_n(Taskflow& f, size_t repeat) {
return run_n(f, repeat, [](){});
}
// Function: run_n
inline tf::Future<void> Executor::run_n(Taskflow&& f, size_t repeat) {
return run_n(std::move(f), repeat, [](){});
}
// Function: run_n
template <typename C>
tf::Future<void> Executor::run_n(Taskflow& f, size_t repeat, C&& c) {
return run_until(
f, [repeat]() mutable { return repeat-- == 0; }, std::forward<C>(c)
);
}
// Function: run_n
template <typename C>
tf::Future<void> Executor::run_n(Taskflow&& f, size_t repeat, C&& c) {
return run_until(
std::move(f), [repeat]() mutable { return repeat-- == 0; }, std::forward<C>(c)
);
}
// Function: run_until
template<typename P>
tf::Future<void> Executor::run_until(Taskflow& f, P&& pred) {
return run_until(f, std::forward<P>(pred), [](){});
}
// Function: run_until
template<typename P>
tf::Future<void> Executor::run_until(Taskflow&& f, P&& pred) {
return run_until(std::move(f), std::forward<P>(pred), [](){});
}
// Function: run_until
template <typename P, typename C>
tf::Future<void> Executor::run_until(Taskflow& f, P&& p, C&& c) {
_increment_topology();
// Need to check the empty under the lock since dynamic task may
// define detached blocks that modify the taskflow at the same time
bool empty;
{
std::lock_guard<std::mutex> lock(f._mutex);
empty = f.empty();
}
// No need to create a real topology but returns an dummy future
if(empty || p()) {
c();
std::promise<void> promise;
promise.set_value();
_decrement_topology_and_notify();
return tf::Future<void>(promise.get_future(), std::monostate{});
}
// create a topology for this run
auto t = std::make_shared<Topology>(f, std::forward<P>(p), std::forward<C>(c));
// need to create future before the topology got torn down quickly
tf::Future<void> future(t->_promise.get_future(), t);
// modifying topology needs to be protected under the lock
{
std::lock_guard<std::mutex> lock(f._mutex);
f._topologies.push(t);
if(f._topologies.size() == 1) {
_set_up_topology(_this_worker(), t.get());
}
}
return future;
}
// Function: run_until
template <typename P, typename C>
tf::Future<void> Executor::run_until(Taskflow&& f, P&& pred, C&& c) {
std::list<Taskflow>::iterator itr;
{
std::scoped_lock<std::mutex> lock(_taskflow_mutex);
itr = _taskflows.emplace(_taskflows.end(), std::move(f));
itr->_satellite = itr;
}
return run_until(*itr, std::forward<P>(pred), std::forward<C>(c));
}
// Function: run_and_wait
template <typename T>
void Executor::run_and_wait(T& target) {
auto w = _this_worker();
if(w == nullptr) {
TF_THROW("run_and_wait must be called by a worker of the executor");
}
Node parent; // dummy parent
_consume_graph(*w, &parent, target.graph());
}
// Function: loop_until
template <typename P>
void Executor::loop_until(P&& predicate) {
auto w = _this_worker();
if(w == nullptr) {
TF_THROW("loop_until must be called by a worker of the executor");
}
_loop_until(*w, std::forward<P>(predicate));
}
// Procedure: _increment_topology
inline void Executor::_increment_topology() {
std::lock_guard<std::mutex> lock(_topology_mutex);
++_num_topologies;
}
// Procedure: _decrement_topology_and_notify
inline void Executor::_decrement_topology_and_notify() {
std::lock_guard<std::mutex> lock(_topology_mutex);
if(--_num_topologies == 0) {
_topology_cv.notify_all();
}
}
// Procedure: _decrement_topology
inline void Executor::_decrement_topology() {
std::lock_guard<std::mutex> lock(_topology_mutex);
--_num_topologies;
}
// Procedure: wait_for_all
inline void Executor::wait_for_all() {
std::unique_lock<std::mutex> lock(_topology_mutex);
_topology_cv.wait(lock, [&](){ return _num_topologies == 0; });
}
// Function: _set_up_topology
inline void Executor::_set_up_topology(Worker* worker, Topology* tpg) {
// ---- under taskflow lock ----
tpg->_sources.clear();
tpg->_taskflow._graph._clear_detached();
// scan each node in the graph and build up the links
for(auto node : tpg->_taskflow._graph._nodes) {
node->_topology = tpg;
node->_parent = nullptr;
node->_state.store(0, std::memory_order_relaxed);
if(node->num_dependents() == 0) {
tpg->_sources.push_back(node);
}
node->_set_up_join_counter();
}
tpg->_join_counter = tpg->_sources.size();
if(worker) {
_schedule(*worker, tpg->_sources);
}
else {
_schedule(tpg->_sources);
}
}
// Function: _tear_down_topology
inline void Executor::_tear_down_topology(Worker& worker, Topology* tpg) {
auto &f = tpg->_taskflow;
//assert(&tpg == &(f._topologies.front()));
// case 1: we still need to run the topology again
if(!tpg->_is_cancelled && !tpg->_pred()) {
//assert(tpg->_join_counter == 0);
std::lock_guard<std::mutex> lock(f._mutex);
tpg->_join_counter = tpg->_sources.size();
_schedule(worker, tpg->_sources);
}
// case 2: the final run of this topology
else {
// TODO: if the topology is cancelled, need to release all semaphores
if(tpg->_call != nullptr) {
tpg->_call();
}
// If there is another run (interleave between lock)
if(std::unique_lock<std::mutex> lock(f._mutex); f._topologies.size()>1) {
//assert(tpg->_join_counter == 0);
// Set the promise
tpg->_promise.set_value();
f._topologies.pop();
tpg = f._topologies.front().get();
// decrement the topology but since this is not the last we don't notify
_decrement_topology();
// set up topology needs to be under the lock or it can
// introduce memory order error with pop
_set_up_topology(&worker, tpg);
}
else {
//assert(f._topologies.size() == 1);
// Need to back up the promise first here becuz taskflow might be
// destroy soon after calling get
auto p {std::move(tpg->_promise)};
// Back up lambda capture in case it has the topology pointer,
// to avoid it releasing on pop_front ahead of _mutex.unlock &
// _promise.set_value. Released safely when leaving scope.
auto c {std::move(tpg->_call)};
// Get the satellite if any
auto s {f._satellite};
// Now we remove the topology from this taskflow
f._topologies.pop();
//f._mutex.unlock();
lock.unlock();
// We set the promise in the end in case taskflow leaves the scope.
// After set_value, the caller will return from wait
p.set_value();
_decrement_topology_and_notify();
// remove the taskflow if it is managed by the executor
// TODO: in the future, we may need to synchronize on wait
// (which means the following code should the moved before set_value)
if(s) {
std::scoped_lock<std::mutex> lock(_taskflow_mutex);
_taskflows.erase(*s);
}
}
}
}
// ############################################################################
// Forward Declaration: Subflow
// ############################################################################
inline void Subflow::join() {
// assert(this_worker().worker == &_worker);
if(!_joinable) {
TF_THROW("subflow not joinable");
}
// only the parent worker can join the subflow
_executor._consume_graph(_worker, _parent, _graph);
_joinable = false;
}
inline void Subflow::detach() {
// assert(this_worker().worker == &_worker);
if(!_joinable) {
TF_THROW("subflow already joined or detached");
}
// only the parent worker can detach the subflow
_executor._detach_dynamic_task(_worker, _parent, _graph);
_joinable = false;
}
// Function: named_async
template <typename F, typename... ArgsT>
auto Subflow::named_async(const std::string& name, F&& f, ArgsT&&... args) {
return _named_async(
*_executor._this_worker(), name, std::forward<F>(f), std::forward<ArgsT>(args)...
);
}
// Function: _named_async
template <typename F, typename... ArgsT>
auto Subflow::_named_async(
Worker& w,
const std::string& name,
F&& f,
ArgsT&&... args
) {
_parent->_join_counter.fetch_add(1);
using T = std::invoke_result_t<F, ArgsT...>;
using R = std::conditional_t<std::is_same_v<T, void>, void, std::optional<T>>;
std::promise<R> p;
auto tpg = std::make_shared<AsyncTopology>();
Future<R> fu(p.get_future(), tpg);
auto node = node_pool.animate(
std::in_place_type_t<Node::Async>{},
[p=make_moc(std::move(p)), f=std::forward<F>(f), args...]
(bool cancel) mutable {
if constexpr(std::is_same_v<R, void>) {
if(!cancel) {
f(args...);
}
p.object.set_value();
}
else {
p.object.set_value(cancel ? std::nullopt : std::make_optional(f(args...)));
}
},
std::move(tpg)
);
node->_name = name;
node->_topology = _parent->_topology;
node->_parent = _parent;
_executor._schedule(w, node);
return fu;
}
// Function: async
template <typename F, typename... ArgsT>
auto Subflow::async(F&& f, ArgsT&&... args) {
return named_async("", std::forward<F>(f), std::forward<ArgsT>(args)...);
}
// Function: _named_silent_async
template <typename F, typename... ArgsT>
void Subflow::_named_silent_async(
Worker& w, const std::string& name, F&& f, ArgsT&&... args
) {
_parent->_join_counter.fetch_add(1);
auto node = node_pool.animate(
std::in_place_type_t<Node::SilentAsync>{},
[f=std::forward<F>(f), args...] () mutable {
f(args...);
}
);
node->_name = name;
node->_topology = _parent->_topology;
node->_parent = _parent;
_executor._schedule(w, node);
}
// Function: silent_async
template <typename F, typename... ArgsT>
void Subflow::named_silent_async(const std::string& name, F&& f, ArgsT&&... args) {
_named_silent_async(
*_executor._this_worker(), name, std::forward<F>(f), std::forward<ArgsT>(args)...
);
}
// Function: named_silent_async
template <typename F, typename... ArgsT>
void Subflow::silent_async(F&& f, ArgsT&&... args) {
named_silent_async("", std::forward<F>(f), std::forward<ArgsT>(args)...);
}
// ############################################################################
// Forward Declaration: Runtime
// ############################################################################
// Procedure: schedule
inline void Runtime::schedule(Task task) {
auto node = task._node;
auto& j = node->_parent ? node->_parent->_join_counter :
node->_topology->_join_counter;
j.fetch_add(1);
_executor._schedule(_worker, node);
}
// Procedure: emplace
template <typename T>
void Runtime::run_and_wait(T&& target) {
// dynamic task (subflow)
if constexpr(is_dynamic_task_v<T>) {
Graph graph;
Subflow sf(_executor, _worker, _parent, graph);
target(sf);
if(sf._joinable) {
_executor._consume_graph(_worker, _parent, graph);
}
}
// graph object
else {
_executor._consume_graph(_worker, _parent, target.graph());
}
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/core/task.hpp | #pragma once
#include "graph.hpp"
/**
@file task.hpp
@brief task include file
*/
namespace tf {
// ----------------------------------------------------------------------------
// Task Types
// ----------------------------------------------------------------------------
/**
@enum TaskType
@brief enumeration of all task types
*/
enum class TaskType : int {
/** @brief placeholder task type */
PLACEHOLDER = 0,
/** @brief cudaFlow task type */
CUDAFLOW,
/** @brief syclFlow task type */
SYCLFLOW,
/** @brief static task type */
STATIC,
/** @brief dynamic (subflow) task type */
DYNAMIC,
/** @brief condition task type */
CONDITION,
/** @brief module task type */
MODULE,
/** @brief asynchronous task type */
ASYNC,
/** @brief runtime task type */
RUNTIME,
/** @brief undefined task type (for internal use only) */
UNDEFINED
};
/**
@private
@brief array of all task types (used for iterating task types)
*/
inline constexpr std::array<TaskType, 9> TASK_TYPES = {
TaskType::PLACEHOLDER,
TaskType::CUDAFLOW,
TaskType::SYCLFLOW,
TaskType::STATIC,
TaskType::DYNAMIC,
TaskType::CONDITION,
TaskType::MODULE,
TaskType::ASYNC,
TaskType::RUNTIME
};
/**
@brief convert a task type to a human-readable string
The name of each task type is the litte-case string of its characters.
@code{.cpp}
TaskType::PLACEHOLDER -> "placeholder"
TaskType::CUDAFLOW -> "cudaflow"
TaskType::SYCLFLOW -> "syclflow"
TaskType::STATIC -> "static"
TaskType::DYNAMIC -> "subflow"
TaskType::CONDITION -> "condition"
TaskType::MODULE -> "module"
TaskType::ASYNC -> "async"
TaskType::RUNTIME -> "runtime"
@endcode
*/
inline const char* to_string(TaskType type) {
const char* val;
switch(type) {
case TaskType::PLACEHOLDER: val = "placeholder"; break;
case TaskType::CUDAFLOW: val = "cudaflow"; break;
case TaskType::SYCLFLOW: val = "syclflow"; break;
case TaskType::STATIC: val = "static"; break;
case TaskType::DYNAMIC: val = "subflow"; break;
case TaskType::CONDITION: val = "condition"; break;
case TaskType::MODULE: val = "module"; break;
case TaskType::ASYNC: val = "async"; break;
case TaskType::RUNTIME: val = "runtime"; break;
default: val = "undefined"; break;
}
return val;
}
// ----------------------------------------------------------------------------
// Task Traits
// ----------------------------------------------------------------------------
/**
@brief determines if a callable is a static task
A static task is a callable object constructible from std::function<void()>.
*/
template <typename C>
constexpr bool is_static_task_v =
std::is_invocable_r_v<void, C> &&
!std::is_invocable_r_v<int, C> &&
!std::is_invocable_r_v<tf::SmallVector<int>, C>;
/**
@brief determines if a callable is a dynamic task
A dynamic task is a callable object constructible from std::function<void(Subflow&)>.
*/
template <typename C>
constexpr bool is_dynamic_task_v = std::is_invocable_r_v<void, C, Subflow&>;
/**
@brief determines if a callable is a condition task
A condition task is a callable object constructible from std::function<int()>.
*/
template <typename C>
constexpr bool is_condition_task_v = std::is_invocable_r_v<int, C>;
/**
@brief determines if a callable is a multi-condition task
A multi-condition task is a callable object constructible from
std::function<tf::SmallVector<int>()>.
*/
template <typename C>
constexpr bool is_multi_condition_task_v =
std::is_invocable_r_v<SmallVector<int>, C>;
/**
@brief determines if a callable is a %cudaFlow task
A cudaFlow task is a callable object constructible from
std::function<void(tf::cudaFlow&)> or std::function<void(tf::cudaFlowCapturer&)>.
*/
template <typename C>
constexpr bool is_cudaflow_task_v = std::is_invocable_r_v<void, C, cudaFlow&> ||
std::is_invocable_r_v<void, C, cudaFlowCapturer&>;
/**
@brief determines if a callable is a %syclFlow task
A syclFlow task is a callable object constructible from
std::function<void(tf::syclFlow&)>.
*/
template <typename C>
constexpr bool is_syclflow_task_v = std::is_invocable_r_v<void, C, syclFlow&>;
/**
@brief determines if a callable is a runtime task
A runtime task is a callable object constructible from
std::function<void(tf::Runtime&)>.
*/
template <typename C>
constexpr bool is_runtime_task_v = std::is_invocable_r_v<void, C, Runtime&>;
// ----------------------------------------------------------------------------
// Task
// ----------------------------------------------------------------------------
/**
@class Task
@brief class to create a task handle over a node in a taskflow graph
A task is a wrapper over a node in a taskflow graph.
It provides a set of methods for users to access and modify the attributes of
the associated node in the taskflow graph.
A task is very lightweight object (i.e., only storing a node pointer) that
can be trivially copied around,
and it does not own the lifetime of the associated node.
*/
class Task {
friend class FlowBuilder;
friend class Runtime;
friend class Taskflow;
friend class TaskView;
friend class Executor;
public:
/**
@brief constructs an empty task
*/
Task() = default;
/**
@brief constructs the task with the copy of the other task
*/
Task(const Task& other);
/**
@brief replaces the contents with a copy of the other task
*/
Task& operator = (const Task&);
/**
@brief replaces the contents with a null pointer
*/
Task& operator = (std::nullptr_t);
/**
@brief compares if two tasks are associated with the same graph node
*/
bool operator == (const Task& rhs) const;
/**
@brief compares if two tasks are not associated with the same graph node
*/
bool operator != (const Task& rhs) const;
/**
@brief queries the name of the task
*/
const std::string& name() const;
/**
@brief queries the number of successors of the task
*/
size_t num_successors() const;
/**
@brief queries the number of predecessors of the task
*/
size_t num_dependents() const;
/**
@brief queries the number of strong dependents of the task
*/
size_t num_strong_dependents() const;
/**
@brief queries the number of weak dependents of the task
*/
size_t num_weak_dependents() const;
/**
@brief assigns a name to the task
@param name a @std_string acceptable string
@return @c *this
*/
Task& name(const std::string& name);
/**
@brief assigns a callable
@tparam C callable type
@param callable callable to construct one of the static, dynamic, condition,
and cudaFlow tasks
@return @c *this
*/
template <typename C>
Task& work(C&& callable);
/**
@brief creates a module task from a taskflow
@tparam T object type
@param object a custom object that defines @c T::graph() method
@return @c *this
*/
template <typename T>
Task& composed_of(T& object);
/**
@brief adds precedence links from this to other tasks
@tparam Ts parameter pack
@param tasks one or multiple tasks
@return @c *this
*/
template <typename... Ts>
Task& precede(Ts&&... tasks);
/**
@brief adds precedence links from other tasks to this
@tparam Ts parameter pack
@param tasks one or multiple tasks
@return @c *this
*/
template <typename... Ts>
Task& succeed(Ts&&... tasks);
/**
@brief makes the task release this semaphore
*/
Task& release(Semaphore& semaphore);
/**
@brief makes the task acquire this semaphore
*/
Task& acquire(Semaphore& semaphore);
/**
@brief assigns pointer to user data
@param data pointer to user data
The following example shows how to attach user data to a task and
run the task iteratively while changing the data value:
@code{.cpp}
tf::Executor executor;
tf::Taskflow taskflow("attach data to a task");
int data;
// create a task and attach it the data
auto A = taskflow.placeholder();
A.data(&data).work([A](){
auto d = *static_cast<int*>(A.data());
std::cout << "data is " << d << std::endl;
});
// run the taskflow iteratively with changing data
for(data = 0; data<10; data++){
executor.run(taskflow).wait();
}
@endcode
@return @c *this
*/
Task& data(void* data);
/**
@brief assigns a priority value to the task
A priority value can be one of the following three levels,
tf::TaskPriority::HIGH (numerically equivalent to 0),
tf::TaskPriority::NORMAL (numerically equivalent to 1), and
tf::TaskPriority::LOW (numerically equivalent to 2).
The smaller the priority value, the higher the priority.
*/
Task& priority(TaskPriority p);
/**
@brief queries the priority value of the task
*/
TaskPriority priority() const;
/**
@brief resets the task handle to null
*/
void reset();
/**
@brief resets the associated work to a placeholder
*/
void reset_work();
/**
@brief queries if the task handle points to a task node
*/
bool empty() const;
/**
@brief queries if the task has a work assigned
*/
bool has_work() const;
/**
@brief applies an visitor callable to each successor of the task
*/
template <typename V>
void for_each_successor(V&& visitor) const;
/**
@brief applies an visitor callable to each dependents of the task
*/
template <typename V>
void for_each_dependent(V&& visitor) const;
/**
@brief obtains a hash value of the underlying node
*/
size_t hash_value() const;
/**
@brief returns the task type
*/
TaskType type() const;
/**
@brief dumps the task through an output stream
*/
void dump(std::ostream& ostream) const;
/**
@brief queries pointer to user data
*/
void* data() const;
private:
Task(Node*);
Node* _node {nullptr};
};
// Constructor
inline Task::Task(Node* node) : _node {node} {
}
// Constructor
inline Task::Task(const Task& rhs) : _node {rhs._node} {
}
// Function: precede
template <typename... Ts>
Task& Task::precede(Ts&&... tasks) {
(_node->_precede(tasks._node), ...);
//_precede(std::forward<Ts>(tasks)...);
return *this;
}
// Function: succeed
template <typename... Ts>
Task& Task::succeed(Ts&&... tasks) {
(tasks._node->_precede(_node), ...);
//_succeed(std::forward<Ts>(tasks)...);
return *this;
}
// Function: composed_of
template <typename T>
Task& Task::composed_of(T& object) {
_node->_handle.emplace<Node::Module>(object);
return *this;
}
// Operator =
inline Task& Task::operator = (const Task& rhs) {
_node = rhs._node;
return *this;
}
// Operator =
inline Task& Task::operator = (std::nullptr_t ptr) {
_node = ptr;
return *this;
}
// Operator ==
inline bool Task::operator == (const Task& rhs) const {
return _node == rhs._node;
}
// Operator !=
inline bool Task::operator != (const Task& rhs) const {
return _node != rhs._node;
}
// Function: name
inline Task& Task::name(const std::string& name) {
_node->_name = name;
return *this;
}
// Function: acquire
inline Task& Task::acquire(Semaphore& s) {
if(!_node->_semaphores) {
_node->_semaphores = std::make_unique<Node::Semaphores>();
}
_node->_semaphores->to_acquire.push_back(&s);
return *this;
}
// Function: release
inline Task& Task::release(Semaphore& s) {
if(!_node->_semaphores) {
//_node->_semaphores.emplace();
_node->_semaphores = std::make_unique<Node::Semaphores>();
}
_node->_semaphores->to_release.push_back(&s);
return *this;
}
// Procedure: reset
inline void Task::reset() {
_node = nullptr;
}
// Procedure: reset_work
inline void Task::reset_work() {
_node->_handle.emplace<std::monostate>();
}
// Function: name
inline const std::string& Task::name() const {
return _node->_name;
}
// Function: num_dependents
inline size_t Task::num_dependents() const {
return _node->num_dependents();
}
// Function: num_strong_dependents
inline size_t Task::num_strong_dependents() const {
return _node->num_strong_dependents();
}
// Function: num_weak_dependents
inline size_t Task::num_weak_dependents() const {
return _node->num_weak_dependents();
}
// Function: num_successors
inline size_t Task::num_successors() const {
return _node->num_successors();
}
// Function: empty
inline bool Task::empty() const {
return _node == nullptr;
}
// Function: has_work
inline bool Task::has_work() const {
return _node ? _node->_handle.index() != 0 : false;
}
// Function: task_type
inline TaskType Task::type() const {
switch(_node->_handle.index()) {
case Node::PLACEHOLDER: return TaskType::PLACEHOLDER;
case Node::STATIC: return TaskType::STATIC;
case Node::DYNAMIC: return TaskType::DYNAMIC;
case Node::CONDITION: return TaskType::CONDITION;
case Node::MULTI_CONDITION: return TaskType::CONDITION;
case Node::MODULE: return TaskType::MODULE;
case Node::ASYNC: return TaskType::ASYNC;
case Node::SILENT_ASYNC: return TaskType::ASYNC;
case Node::CUDAFLOW: return TaskType::CUDAFLOW;
case Node::SYCLFLOW: return TaskType::SYCLFLOW;
case Node::RUNTIME: return TaskType::RUNTIME;
default: return TaskType::UNDEFINED;
}
}
// Function: for_each_successor
template <typename V>
void Task::for_each_successor(V&& visitor) const {
for(size_t i=0; i<_node->_successors.size(); ++i) {
visitor(Task(_node->_successors[i]));
}
}
// Function: for_each_dependent
template <typename V>
void Task::for_each_dependent(V&& visitor) const {
for(size_t i=0; i<_node->_dependents.size(); ++i) {
visitor(Task(_node->_dependents[i]));
}
}
// Function: hash_value
inline size_t Task::hash_value() const {
return std::hash<Node*>{}(_node);
}
// Procedure: dump
inline void Task::dump(std::ostream& os) const {
os << "task ";
if(name().empty()) os << _node;
else os << name();
os << " [type=" << to_string(type()) << ']';
}
// Function: work
template <typename C>
Task& Task::work(C&& c) {
if constexpr(is_static_task_v<C>) {
_node->_handle.emplace<Node::Static>(std::forward<C>(c));
}
else if constexpr(is_dynamic_task_v<C>) {
_node->_handle.emplace<Node::Dynamic>(std::forward<C>(c));
}
else if constexpr(is_condition_task_v<C>) {
_node->_handle.emplace<Node::Condition>(std::forward<C>(c));
}
else if constexpr(is_multi_condition_task_v<C>) {
_node->_handle.emplace<Node::MultiCondition>(std::forward<C>(c));
}
else if constexpr(is_cudaflow_task_v<C>) {
_node->_handle.emplace<Node::cudaFlow>(std::forward<C>(c));
}
else if constexpr(is_runtime_task_v<C>) {
_node->_handle.emplace<Node::Runtime>(std::forward<C>(c));
}
else {
static_assert(dependent_false_v<C>, "invalid task callable");
}
return *this;
}
// Function: data
inline void* Task::data() const {
return _node->_data;
}
// Function: data
inline Task& Task::data(void* data) {
_node->_data = data;
return *this;
}
// Function: priority
inline Task& Task::priority(TaskPriority p) {
_node->_priority = static_cast<unsigned>(p);
return *this;
}
// Function: priority
inline TaskPriority Task::priority() const {
return static_cast<TaskPriority>(_node->_priority);
}
// ----------------------------------------------------------------------------
// global ostream
// ----------------------------------------------------------------------------
/**
@brief overload of ostream inserter operator for cudaTask
*/
inline std::ostream& operator << (std::ostream& os, const Task& task) {
task.dump(os);
return os;
}
// ----------------------------------------------------------------------------
/**
@class TaskView
@brief class to access task information from the observer interface
*/
class TaskView {
friend class Executor;
public:
/**
@brief queries the name of the task
*/
const std::string& name() const;
/**
@brief queries the number of successors of the task
*/
size_t num_successors() const;
/**
@brief queries the number of predecessors of the task
*/
size_t num_dependents() const;
/**
@brief queries the number of strong dependents of the task
*/
size_t num_strong_dependents() const;
/**
@brief queries the number of weak dependents of the task
*/
size_t num_weak_dependents() const;
/**
@brief applies an visitor callable to each successor of the task
*/
template <typename V>
void for_each_successor(V&& visitor) const;
/**
@brief applies an visitor callable to each dependents of the task
*/
template <typename V>
void for_each_dependent(V&& visitor) const;
/**
@brief queries the task type
*/
TaskType type() const;
/**
@brief obtains a hash value of the underlying node
*/
size_t hash_value() const;
private:
TaskView(const Node&);
TaskView(const TaskView&) = default;
const Node& _node;
};
// Constructor
inline TaskView::TaskView(const Node& node) : _node {node} {
}
// Function: name
inline const std::string& TaskView::name() const {
return _node._name;
}
// Function: num_dependents
inline size_t TaskView::num_dependents() const {
return _node.num_dependents();
}
// Function: num_strong_dependents
inline size_t TaskView::num_strong_dependents() const {
return _node.num_strong_dependents();
}
// Function: num_weak_dependents
inline size_t TaskView::num_weak_dependents() const {
return _node.num_weak_dependents();
}
// Function: num_successors
inline size_t TaskView::num_successors() const {
return _node.num_successors();
}
// Function: type
inline TaskType TaskView::type() const {
switch(_node._handle.index()) {
case Node::PLACEHOLDER: return TaskType::PLACEHOLDER;
case Node::STATIC: return TaskType::STATIC;
case Node::DYNAMIC: return TaskType::DYNAMIC;
case Node::CONDITION: return TaskType::CONDITION;
case Node::MULTI_CONDITION: return TaskType::CONDITION;
case Node::MODULE: return TaskType::MODULE;
case Node::ASYNC: return TaskType::ASYNC;
case Node::SILENT_ASYNC: return TaskType::ASYNC;
case Node::CUDAFLOW: return TaskType::CUDAFLOW;
case Node::SYCLFLOW: return TaskType::SYCLFLOW;
case Node::RUNTIME: return TaskType::RUNTIME;
default: return TaskType::UNDEFINED;
}
}
// Function: hash_value
inline size_t TaskView::hash_value() const {
return std::hash<const Node*>{}(&_node);
}
// Function: for_each_successor
template <typename V>
void TaskView::for_each_successor(V&& visitor) const {
for(size_t i=0; i<_node._successors.size(); ++i) {
visitor(TaskView(_node._successors[i]));
}
}
// Function: for_each_dependent
template <typename V>
void TaskView::for_each_dependent(V&& visitor) const {
for(size_t i=0; i<_node._dependents.size(); ++i) {
visitor(TaskView(_node._dependents[i]));
}
}
} // end of namespace tf. ---------------------------------------------------
namespace std {
/**
@struct hash
@brief hash specialization for std::hash<tf::Task>
*/
template <>
struct hash<tf::Task> {
auto operator() (const tf::Task& task) const noexcept {
return task.hash_value();
}
};
/**
@struct hash
@brief hash specialization for std::hash<tf::TaskView>
*/
template <>
struct hash<tf::TaskView> {
auto operator() (const tf::TaskView& task_view) const noexcept {
return task_view.hash_value();
}
};
} // end of namespace std ----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/core/error.hpp | #pragma once
#include <iostream>
#include <sstream>
#include <exception>
#include "../utility/stream.hpp"
namespace tf {
// Procedure: throw_se
// Throws the system error under a given error code.
template <typename... ArgsT>
//void throw_se(const char* fname, const size_t line, Error::Code c, ArgsT&&... args) {
void throw_re(const char* fname, const size_t line, ArgsT&&... args) {
std::ostringstream oss;
oss << "[" << fname << ":" << line << "] ";
//ostreamize(oss, std::forward<ArgsT>(args)...);
(oss << ... << args);
throw std::runtime_error(oss.str());
}
} // ------------------------------------------------------------------------
#define TF_THROW(...) tf::throw_re(__FILE__, __LINE__, __VA_ARGS__);
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/core/async_task.hpp | #pragma once
#include "graph.hpp"
/**
@file async_task.hpp
@brief asynchronous task include file
*/
namespace tf {
// ----------------------------------------------------------------------------
// AsyncTask
// ----------------------------------------------------------------------------
/**
@brief class to create a dependent asynchronous task
A tf::AsyncTask is a lightweight handle that retains @em shared ownership
of a dependent async task created by an executor.
This shared ownership ensures that the async task remains alive when
adding it to the dependency list of another async task,
thus avoiding the classical [ABA problem](https://en.wikipedia.org/wiki/ABA_problem).
@code{.cpp}
// main thread retains shared ownership of async task A
tf::AsyncTask A = executor.silent_dependent_async([](){});
// task A remains alive (i.e., at least one ref count by the main thread)
// when being added to the dependency list of async task B
tf::AsyncTask B = executor.silent_dependent_async([](){}, A);
@endcode
Currently, tf::AsyncTask is implemented based on the logic of
C++ smart pointer std::shared_ptr and
is considered cheap to copy or move as long as only a handful of objects
own it.
When a worker completes an async task, it will remove the task from the executor,
decrementing the number of shared owners by one.
If that counter reaches zero, the task is destroyed.
*/
class AsyncTask {
friend class Executor;
public:
/**
@brief constructs an empty task handle
*/
AsyncTask() = default;
/**
@brief destroys the managed asynchronous task if this is the last owner
*/
~AsyncTask();
/**
@brief constructs an asynchronous task that shares ownership of @c rhs
*/
AsyncTask(const AsyncTask& rhs);
/**
@brief move-constructs an asynchronous task from @c rhs
*/
AsyncTask(AsyncTask&& rhs);
/**
@brief copy-assigns the asynchronous task from @c rhs
Releases the managed object of @c this and retains a new shared ownership
of @c rhs.
*/
AsyncTask& operator = (const AsyncTask& rhs);
/**
@brief move-assigns the asynchronous task from @c rhs
Releases the managed object of @c this and takes over the ownership of @c rhs.
*/
AsyncTask& operator = (AsyncTask&& rhs);
/**
@brief checks if the asynchronous task stores nothing
*/
bool empty() const;
/**
@brief release the managed object of @c this
*/
void reset();
/**
@brief obtains a hash value of this asynchronous task
*/
size_t hash_value() const;
/**
@brief returns the number of shared owners that are currently managing
this asynchronous task
*/
size_t use_count() const;
/**
@brief returns the boolean indicating whether the async task is done
*/
bool is_done() const;
private:
explicit AsyncTask(Node*);
Node* _node {nullptr};
void _incref();
void _decref();
};
// Constructor
inline AsyncTask::AsyncTask(Node* ptr) : _node{ptr} {
_incref();
}
// Function: _incref
inline void AsyncTask::_incref() {
if(_node) {
std::get_if<Node::DependentAsync>(&(_node->_handle))->use_count.fetch_add(
1, std::memory_order_relaxed
);
}
}
// Function: _decref
inline void AsyncTask::_decref() {
if(_node && std::get_if<Node::DependentAsync>(&(_node->_handle))->use_count.fetch_sub(
1, std::memory_order_acq_rel
) == 1) {
node_pool.recycle(_node);
}
}
// Copy Constructor
inline AsyncTask::AsyncTask(const AsyncTask& rhs) :
_node{rhs._node} {
_incref();
}
// Move Constructor
inline AsyncTask::AsyncTask(AsyncTask&& rhs) :
_node {rhs._node} {
rhs._node = nullptr;
}
// Destructor
inline AsyncTask::~AsyncTask() {
_decref();
}
// Copy assignment
inline AsyncTask& AsyncTask::operator = (const AsyncTask& rhs) {
_decref();
_node = rhs._node;
_incref();
return *this;
}
// Move assignment
inline AsyncTask& AsyncTask::operator = (AsyncTask&& rhs) {
_decref();
_node = rhs._node;
rhs._node = nullptr;
return *this;
}
// Function: empty
inline bool AsyncTask::empty() const {
return _node == nullptr;
}
// Function: reset
inline void AsyncTask::reset() {
_decref();
_node = nullptr;
}
// Function: hash_value
inline size_t AsyncTask::hash_value() const {
return std::hash<Node*>{}(_node);
}
// Function: use_count
inline size_t AsyncTask::use_count() const {
return _node == nullptr ? size_t{0} :
std::get_if<Node::DependentAsync>(&(_node->_handle))->use_count.load(
std::memory_order_relaxed
);
}
// Function: is_done
inline bool AsyncTask::is_done() const {
return std::get_if<Node::DependentAsync>(&(_node->_handle))->state.load(
std::memory_order_acquire
) == Node::AsyncState::FINISHED;
}
} // end of namespace tf ----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/core/async.hpp | #pragma once
#include "executor.hpp"
// https://hackmd.io/@sysprog/concurrency-atomics
namespace tf {
// ----------------------------------------------------------------------------
// Async
// ----------------------------------------------------------------------------
// Function: async
template <typename F>
auto Executor::async(const std::string& name, F&& f) {
_increment_topology();
using R = std::invoke_result_t<std::decay_t<F>>;
std::promise<R> p;
auto fu{p.get_future()};
auto node = node_pool.animate(
name, 0, nullptr, nullptr, 0,
std::in_place_type_t<Node::Async>{},
_make_promised_async(std::move(p), std::forward<F>(f))
);
_schedule_async_task(node);
return fu;
}
// Function: async
template <typename F>
auto Executor::async(F&& f) {
return async("", std::forward<F>(f));
}
// ----------------------------------------------------------------------------
// Silent Async
// ----------------------------------------------------------------------------
// Function: silent_async
template <typename F>
void Executor::silent_async(const std::string& name, F&& f) {
_increment_topology();
auto node = node_pool.animate(
name, 0, nullptr, nullptr, 0,
std::in_place_type_t<Node::Async>{}, std::forward<F>(f)
);
_schedule_async_task(node);
}
// Function: silent_async
template <typename F>
void Executor::silent_async(F&& f) {
silent_async("", std::forward<F>(f));
}
// ----------------------------------------------------------------------------
// Async Helper Methods
// ----------------------------------------------------------------------------
// Function: _make_promised_async
template <typename R, typename F>
auto Executor::_make_promised_async(std::promise<R>&& p, F&& func) {
return [p=make_moc(std::move(p)), func=std::forward<F>(func)]() mutable {
if constexpr(std::is_same_v<R, void>) {
func();
p.object.set_value();
}
else {
p.object.set_value(func());
}
};
}
// Procedure: _schedule_async_task
inline void Executor::_schedule_async_task(Node* node) {
if(auto w = _this_worker(); w) {
_schedule(*w, node);
}
else{
_schedule(node);
}
}
// Procedure: _tear_down_async
inline void Executor::_tear_down_async(Node* node) {
// from runtime
if(node->_parent) {
node->_parent->_join_counter.fetch_sub(1, std::memory_order_release);
}
// from executor
else {
_decrement_topology();
}
node_pool.recycle(node);
}
// ----------------------------------------------------------------------------
// Silent Dependent Async
// ----------------------------------------------------------------------------
// Function: silent_dependent_async
template <typename F, typename... Tasks,
std::enable_if_t<all_same_v<AsyncTask, std::decay_t<Tasks>...>, void>*
>
tf::AsyncTask Executor::silent_dependent_async(F&& func, Tasks&&... tasks) {
return silent_dependent_async("", std::forward<F>(func), std::forward<Tasks>(tasks)...);
}
// Function: silent_dependent_async
template <typename F, typename... Tasks,
std::enable_if_t<all_same_v<AsyncTask, std::decay_t<Tasks>...>, void>*
>
tf::AsyncTask Executor::silent_dependent_async(
const std::string& name, F&& func, Tasks&&... tasks
){
_increment_topology();
size_t num_dependents = sizeof...(Tasks);
// create a task before scheduling the node to retain a shared ownership first
AsyncTask task(node_pool.animate(
name, 0, nullptr, nullptr, num_dependents,
std::in_place_type_t<Node::DependentAsync>{}, std::forward<F>(func)
));
if constexpr(sizeof...(Tasks) > 0) {
(_process_async_dependent(task._node, tasks, num_dependents), ...);
}
if(num_dependents == 0) {
_schedule_async_task(task._node);
}
return task;
}
// Function: silent_dependent_async
template <typename F, typename I,
std::enable_if_t<!std::is_same_v<std::decay_t<I>, AsyncTask>, void>*
>
tf::AsyncTask Executor::silent_dependent_async(F&& func, I first, I last) {
return silent_dependent_async("", std::forward<F>(func), first, last);
}
// Function: silent_dependent_async
template <typename F, typename I,
std::enable_if_t<!std::is_same_v<std::decay_t<I>, AsyncTask>, void>*
>
tf::AsyncTask Executor::silent_dependent_async(
const std::string& name, F&& func, I first, I last
) {
_increment_topology();
size_t num_dependents = std::distance(first, last);
AsyncTask task(node_pool.animate(
name, 0, nullptr, nullptr, num_dependents,
std::in_place_type_t<Node::DependentAsync>{}, std::forward<F>(func)
));
for(; first != last; first++){
_process_async_dependent(task._node, *first, num_dependents);
}
if(num_dependents == 0) {
_schedule_async_task(task._node);
}
return task;
}
// ----------------------------------------------------------------------------
// Dependent Async
// ----------------------------------------------------------------------------
// Function: dependent_async
template <typename F, typename... Tasks,
std::enable_if_t<all_same_v<AsyncTask, std::decay_t<Tasks>...>, void>*
>
auto Executor::dependent_async(F&& func, Tasks&&... tasks) {
return dependent_async("", std::forward<F>(func), std::forward<Tasks>(tasks)...);
}
// Function: dependent_async
template <typename F, typename... Tasks,
std::enable_if_t<all_same_v<AsyncTask, std::decay_t<Tasks>...>, void>*
>
auto Executor::dependent_async(
const std::string& name, F&& func, Tasks&&... tasks
) {
_increment_topology();
using R = std::invoke_result_t<std::decay_t<F>>;
std::promise<R> p;
auto fu{p.get_future()};
size_t num_dependents = sizeof...(tasks);
AsyncTask task(node_pool.animate(
name, 0, nullptr, nullptr, num_dependents,
std::in_place_type_t<Node::DependentAsync>{},
_make_promised_async(std::move(p), std::forward<F>(func))
));
if constexpr(sizeof...(Tasks) > 0) {
(_process_async_dependent(task._node, tasks, num_dependents), ...);
}
if(num_dependents == 0) {
_schedule_async_task(task._node);
}
return std::make_pair(std::move(task), std::move(fu));
}
// Function: dependent_async
template <typename F, typename I,
std::enable_if_t<!std::is_same_v<std::decay_t<I>, AsyncTask>, void>*
>
auto Executor::dependent_async(F&& func, I first, I last) {
return dependent_async("", std::forward<F>(func), first, last);
}
// Function: dependent_async
template <typename F, typename I,
std::enable_if_t<!std::is_same_v<std::decay_t<I>, AsyncTask>, void>*
>
auto Executor::dependent_async(
const std::string& name, F&& func, I first, I last
) {
_increment_topology();
using R = std::invoke_result_t<std::decay_t<F>>;
std::promise<R> p;
auto fu{p.get_future()};
size_t num_dependents = std::distance(first, last);
AsyncTask task(node_pool.animate(
name, 0, nullptr, nullptr, num_dependents,
std::in_place_type_t<Node::DependentAsync>{},
_make_promised_async(std::move(p), std::forward<F>(func))
));
for(; first != last; first++) {
_process_async_dependent(task._node, *first, num_dependents);
}
if(num_dependents == 0) {
_schedule_async_task(task._node);
}
return std::make_pair(std::move(task), std::move(fu));
}
// ----------------------------------------------------------------------------
// Dependent Async Helper Functions
// ----------------------------------------------------------------------------
// Procedure: _process_async_dependent
inline void Executor::_process_async_dependent(
Node* node, tf::AsyncTask& task, size_t& num_dependents
) {
auto& state = std::get_if<Node::DependentAsync>(&(task._node->_handle))->state;
add_successor:
auto target = Node::AsyncState::UNFINISHED;
// acquires the lock
if(state.compare_exchange_weak(target, Node::AsyncState::LOCKED,
std::memory_order_acq_rel,
std::memory_order_acquire)) {
task._node->_successors.push_back(node);
state.store(Node::AsyncState::UNFINISHED, std::memory_order_release);
}
// dep's state is FINISHED, which means dep finished its callable already
// thus decrement the node's join counter by 1
else if (target == Node::AsyncState::FINISHED) {
num_dependents = node->_join_counter.fetch_sub(1, std::memory_order_acq_rel) - 1;
}
// another worker adding its async task to the same successors of this node
else {
goto add_successor;
}
}
// Procedure: _tear_down_dependent_async
inline void Executor::_tear_down_dependent_async(Worker& worker, Node* node) {
auto handle = std::get_if<Node::DependentAsync>(&(node->_handle));
// this async task comes from Executor
auto target = Node::AsyncState::UNFINISHED;
while(!handle->state.compare_exchange_weak(target, Node::AsyncState::FINISHED,
std::memory_order_acq_rel,
std::memory_order_relaxed)) {
target = Node::AsyncState::UNFINISHED;
}
// spaw successors whenever their dependencies are resolved
worker._cache = nullptr;
for(size_t i=0; i<node->_successors.size(); ++i) {
if(auto s = node->_successors[i];
s->_join_counter.fetch_sub(1, std::memory_order_acq_rel) == 1
) {
if(worker._cache) {
_schedule(worker, worker._cache);
}
worker._cache = s;
}
}
// now the executor no longer needs to retain ownership
if(handle->use_count.fetch_sub(1, std::memory_order_acq_rel) == 1) {
node_pool.recycle(node);
}
_decrement_topology();
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/core/environment.hpp | #pragma once
#define TF_ENABLE_PROFILER "TF_ENABLE_PROFILER"
namespace tf {
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/core/observer.hpp | #pragma once
#include "task.hpp"
#include "worker.hpp"
/**
@file observer.hpp
@brief observer include file
*/
namespace tf {
// ----------------------------------------------------------------------------
// timeline data structure
// ----------------------------------------------------------------------------
/**
@brief default time point type of observers
*/
using observer_stamp_t = std::chrono::time_point<std::chrono::steady_clock>;
/**
@private
*/
struct Segment {
std::string name;
TaskType type;
observer_stamp_t beg;
observer_stamp_t end;
template <typename Archiver>
auto save(Archiver& ar) const {
return ar(name, type, beg, end);
}
template <typename Archiver>
auto load(Archiver& ar) {
return ar(name, type, beg, end);
}
Segment() = default;
Segment(
const std::string& n, TaskType t, observer_stamp_t b, observer_stamp_t e
) : name {n}, type {t}, beg {b}, end {e} {
}
auto span() const {
return end-beg;
}
};
/**
@private
*/
struct Timeline {
size_t uid;
observer_stamp_t origin;
std::vector<std::vector<std::vector<Segment>>> segments;
Timeline() = default;
Timeline(const Timeline& rhs) = delete;
Timeline(Timeline&& rhs) = default;
Timeline& operator = (const Timeline& rhs) = delete;
Timeline& operator = (Timeline&& rhs) = default;
template <typename Archiver>
auto save(Archiver& ar) const {
return ar(uid, origin, segments);
}
template <typename Archiver>
auto load(Archiver& ar) {
return ar(uid, origin, segments);
}
};
/**
@private
*/
struct ProfileData {
std::vector<Timeline> timelines;
ProfileData() = default;
ProfileData(const ProfileData& rhs) = delete;
ProfileData(ProfileData&& rhs) = default;
ProfileData& operator = (const ProfileData& rhs) = delete;
ProfileData& operator = (ProfileData&&) = default;
template <typename Archiver>
auto save(Archiver& ar) const {
return ar(timelines);
}
template <typename Archiver>
auto load(Archiver& ar) {
return ar(timelines);
}
};
// ----------------------------------------------------------------------------
// observer interface
// ----------------------------------------------------------------------------
/**
@class: ObserverInterface
@brief class to derive an executor observer
The tf::ObserverInterface class allows users to define custom methods to monitor
the behaviors of an executor. This is particularly useful when you want to
inspect the performance of an executor and visualize when each thread
participates in the execution of a task.
To prevent users from direct access to the internal threads and tasks,
tf::ObserverInterface provides immutable wrappers,
tf::WorkerView and tf::TaskView, over workers and tasks.
Please refer to tf::WorkerView and tf::TaskView for details.
Example usage:
@code{.cpp}
struct MyObserver : public tf::ObserverInterface {
MyObserver(const std::string& name) {
std::cout << "constructing observer " << name << '\n';
}
void set_up(size_t num_workers) override final {
std::cout << "setting up observer with " << num_workers << " workers\n";
}
void on_entry(WorkerView w, tf::TaskView tv) override final {
std::ostringstream oss;
oss << "worker " << w.id() << " ready to run " << tv.name() << '\n';
std::cout << oss.str();
}
void on_exit(WorkerView w, tf::TaskView tv) override final {
std::ostringstream oss;
oss << "worker " << w.id() << " finished running " << tv.name() << '\n';
std::cout << oss.str();
}
};
tf::Taskflow taskflow;
tf::Executor executor;
// insert tasks into taskflow
// ...
// create a custom observer
std::shared_ptr<MyObserver> observer = executor.make_observer<MyObserver>("MyObserver");
// run the taskflow
executor.run(taskflow).wait();
@endcode
*/
class ObserverInterface {
public:
/**
@brief virtual destructor
*/
virtual ~ObserverInterface() = default;
/**
@brief constructor-like method to call when the executor observer is fully created
@param num_workers the number of the worker threads in the executor
*/
virtual void set_up(size_t num_workers) = 0;
/**
@brief method to call before a worker thread executes a closure
@param wv an immutable view of this worker thread
@param task_view a constant wrapper object to the task
*/
virtual void on_entry(WorkerView wv, TaskView task_view) = 0;
/**
@brief method to call after a worker thread executed a closure
@param wv an immutable view of this worker thread
@param task_view a constant wrapper object to the task
*/
virtual void on_exit(WorkerView wv, TaskView task_view) = 0;
};
// ----------------------------------------------------------------------------
// ChromeObserver definition
// ----------------------------------------------------------------------------
/**
@class: ChromeObserver
@brief class to create an observer based on Chrome tracing format
A tf::ChromeObserver inherits tf::ObserverInterface and defines methods to dump
the observed thread activities into a format that can be visualized through
@ChromeTracing.
@code{.cpp}
tf::Taskflow taskflow;
tf::Executor executor;
// insert tasks into taskflow
// ...
// create a custom observer
std::shared_ptr<tf::ChromeObserver> observer = executor.make_observer<tf::ChromeObserver>();
// run the taskflow
executor.run(taskflow).wait();
// dump the thread activities to a chrome-tracing format.
observer->dump(std::cout);
@endcode
*/
class ChromeObserver : public ObserverInterface {
friend class Executor;
// data structure to record each task execution
struct Segment {
std::string name;
observer_stamp_t beg;
observer_stamp_t end;
Segment(
const std::string& n,
observer_stamp_t b,
observer_stamp_t e
);
};
// data structure to store the entire execution timeline
struct Timeline {
observer_stamp_t origin;
std::vector<std::vector<Segment>> segments;
std::vector<std::stack<observer_stamp_t>> stacks;
};
public:
/**
@brief dumps the timelines into a @ChromeTracing format through
an output stream
*/
void dump(std::ostream& ostream) const;
/**
@brief dumps the timelines into a @ChromeTracing format
*/
inline std::string dump() const;
/**
@brief clears the timeline data
*/
inline void clear();
/**
@brief queries the number of tasks observed
*/
inline size_t num_tasks() const;
private:
inline void set_up(size_t num_workers) override final;
inline void on_entry(WorkerView w, TaskView task_view) override final;
inline void on_exit(WorkerView w, TaskView task_view) override final;
Timeline _timeline;
};
// constructor
inline ChromeObserver::Segment::Segment(
const std::string& n, observer_stamp_t b, observer_stamp_t e
) :
name {n}, beg {b}, end {e} {
}
// Procedure: set_up
inline void ChromeObserver::set_up(size_t num_workers) {
_timeline.segments.resize(num_workers);
_timeline.stacks.resize(num_workers);
for(size_t w=0; w<num_workers; ++w) {
_timeline.segments[w].reserve(32);
}
_timeline.origin = observer_stamp_t::clock::now();
}
// Procedure: on_entry
inline void ChromeObserver::on_entry(WorkerView wv, TaskView) {
_timeline.stacks[wv.id()].push(observer_stamp_t::clock::now());
}
// Procedure: on_exit
inline void ChromeObserver::on_exit(WorkerView wv, TaskView tv) {
size_t w = wv.id();
assert(!_timeline.stacks[w].empty());
auto beg = _timeline.stacks[w].top();
_timeline.stacks[w].pop();
_timeline.segments[w].emplace_back(
tv.name(), beg, observer_stamp_t::clock::now()
);
}
// Function: clear
inline void ChromeObserver::clear() {
for(size_t w=0; w<_timeline.segments.size(); ++w) {
_timeline.segments[w].clear();
while(!_timeline.stacks[w].empty()) {
_timeline.stacks[w].pop();
}
}
}
// Procedure: dump
inline void ChromeObserver::dump(std::ostream& os) const {
using namespace std::chrono;
size_t first;
for(first = 0; first<_timeline.segments.size(); ++first) {
if(_timeline.segments[first].size() > 0) {
break;
}
}
os << '[';
for(size_t w=first; w<_timeline.segments.size(); w++) {
if(w != first && _timeline.segments[w].size() > 0) {
os << ',';
}
for(size_t i=0; i<_timeline.segments[w].size(); i++) {
os << '{'<< "\"cat\":\"ChromeObserver\",";
// name field
os << "\"name\":\"";
if(_timeline.segments[w][i].name.empty()) {
os << w << '_' << i;
}
else {
os << _timeline.segments[w][i].name;
}
os << "\",";
// segment field
os << "\"ph\":\"X\","
<< "\"pid\":1,"
<< "\"tid\":" << w << ','
<< "\"ts\":" << duration_cast<microseconds>(
_timeline.segments[w][i].beg - _timeline.origin
).count() << ','
<< "\"dur\":" << duration_cast<microseconds>(
_timeline.segments[w][i].end - _timeline.segments[w][i].beg
).count();
if(i != _timeline.segments[w].size() - 1) {
os << "},";
}
else {
os << '}';
}
}
}
os << "]\n";
}
// Function: dump
inline std::string ChromeObserver::dump() const {
std::ostringstream oss;
dump(oss);
return oss.str();
}
// Function: num_tasks
inline size_t ChromeObserver::num_tasks() const {
return std::accumulate(
_timeline.segments.begin(), _timeline.segments.end(), size_t{0},
[](size_t sum, const auto& exe){
return sum + exe.size();
}
);
}
// ----------------------------------------------------------------------------
// TFProfObserver definition
// ----------------------------------------------------------------------------
/**
@class TFProfObserver
@brief class to create an observer based on the built-in taskflow profiler format
A tf::TFProfObserver inherits tf::ObserverInterface and defines methods to dump
the observed thread activities into a format that can be visualized through
@TFProf.
@code{.cpp}
tf::Taskflow taskflow;
tf::Executor executor;
// insert tasks into taskflow
// ...
// create a custom observer
std::shared_ptr<tf::TFProfObserver> observer = executor.make_observer<tf::TFProfObserver>();
// run the taskflow
executor.run(taskflow).wait();
// dump the thread activities to Taskflow Profiler format.
observer->dump(std::cout);
@endcode
*/
class TFProfObserver : public ObserverInterface {
friend class Executor;
friend class TFProfManager;
/** @private overall task summary */
struct TaskSummary {
size_t count {0};
size_t total_span {0};
size_t min_span;
size_t max_span;
float avg_span() const { return total_span * 1.0f / count; }
};
/** @private worker summary at a level */
struct WorkerSummary {
size_t id;
size_t level;
size_t count {0};
size_t total_span {0};
size_t min_span{0};
size_t max_span{0};
std::array<TaskSummary, TASK_TYPES.size()> tsum;
float avg_span() const { return total_span * 1.0f / count; }
//return count < 2 ? 0.0f : total_delay * 1.0f / (count-1);
};
/** @private */
struct Summary {
std::array<TaskSummary, TASK_TYPES.size()> tsum;
std::vector<WorkerSummary> wsum;
void dump_tsum(std::ostream&) const;
void dump_wsum(std::ostream&) const;
void dump(std::ostream&) const;
};
public:
/**
@brief dumps the timelines into a @TFProf format through
an output stream
*/
void dump(std::ostream& ostream) const;
/**
@brief dumps the timelines into a JSON string
*/
std::string dump() const;
/**
@brief shows the summary report through an output stream
*/
void summary(std::ostream& ostream) const;
/**
@brief returns the summary report in a string
*/
std::string summary() const;
/**
@brief clears the timeline data
*/
void clear();
/**
@brief queries the number of tasks observed
*/
size_t num_tasks() const;
/**
@brief queries the number of observed workers
*/
size_t num_workers() const;
private:
Timeline _timeline;
std::vector<std::stack<observer_stamp_t>> _stacks;
inline void set_up(size_t num_workers) override final;
inline void on_entry(WorkerView, TaskView) override final;
inline void on_exit(WorkerView, TaskView) override final;
};
// dump the task summary
inline void TFProfObserver::Summary::dump_tsum(std::ostream& os) const {
// task summary
size_t type_w{10}, count_w{5}, time_w{9}, avg_w{8}, min_w{8}, max_w{8};
std::for_each(tsum.begin(), tsum.end(), [&](const auto& i){
if(i.count == 0) return;
count_w = std::max(count_w, std::to_string(i.count).size());
});
std::for_each(tsum.begin(), tsum.end(), [&](const auto& i){
if(i.count == 0) return;
time_w = std::max(time_w, std::to_string(i.total_span).size());
});
std::for_each(tsum.begin(), tsum.end(), [&](const auto& i){
if(i.count == 0) return;
avg_w = std::max(time_w, std::to_string(i.avg_span()).size());
});
std::for_each(tsum.begin(), tsum.end(), [&](const auto& i){
if(i.count == 0) return;
min_w = std::max(min_w, std::to_string(i.min_span).size());
});
std::for_each(tsum.begin(), tsum.end(), [&](const auto& i){
if(i.count == 0) return;
max_w = std::max(max_w, std::to_string(i.max_span).size());
});
os << std::setw(type_w) << "-Task-"
<< std::setw(count_w+2) << "Count"
<< std::setw(time_w+2) << "Time (us)"
<< std::setw(avg_w+2) << "Avg (us)"
<< std::setw(min_w+2) << "Min (us)"
<< std::setw(max_w+2) << "Max (us)"
<< '\n';
for(size_t i=0; i<TASK_TYPES.size(); i++) {
if(tsum[i].count == 0) {
continue;
}
os << std::setw(type_w) << to_string(TASK_TYPES[i])
<< std::setw(count_w+2) << tsum[i].count
<< std::setw(time_w+2) << tsum[i].total_span
<< std::setw(avg_w+2) << std::to_string(tsum[i].avg_span())
<< std::setw(min_w+2) << tsum[i].min_span
<< std::setw(max_w+2) << tsum[i].max_span
<< '\n';
}
}
// dump the worker summary
inline void TFProfObserver::Summary::dump_wsum(std::ostream& os) const {
// task summary
size_t w_w{10}, t_w{10}, l_w{5}, c_w{5}, d_w{9}, avg_w{8}, min_w{8}, max_w{8};
std::for_each(wsum.begin(), wsum.end(), [&](const auto& i){
if(i.count == 0) return;
l_w = std::max(l_w, std::to_string(i.level).size());
});
std::for_each(wsum.begin(), wsum.end(), [&](const auto& i){
if(i.count == 0) return;
c_w = std::max(c_w, std::to_string(i.count).size());
});
std::for_each(wsum.begin(), wsum.end(), [&](const auto& i){
if(i.count == 0) return;
d_w = std::max(d_w, std::to_string(i.total_span).size());
});
std::for_each(wsum.begin(), wsum.end(), [&](const auto& i){
if(i.count == 0) return;
avg_w = std::max(avg_w, std::to_string(i.avg_span()).size());
});
std::for_each(wsum.begin(), wsum.end(), [&](const auto& i){
if(i.count == 0) return;
min_w = std::max(min_w, std::to_string(i.min_span).size());
});
std::for_each(wsum.begin(), wsum.end(), [&](const auto& i){
if(i.count == 0) return;
max_w = std::max(max_w, std::to_string(i.max_span).size());
});
os << std::setw(w_w) << "-Worker-"
<< std::setw(l_w+2) << "Level"
<< std::setw(t_w) << "Task"
<< std::setw(c_w+2) << "Count"
<< std::setw(d_w+2) << "Time (us)"
<< std::setw(avg_w+2) << "Avg (us)"
<< std::setw(min_w+2) << "Min (us)"
<< std::setw(max_w+2) << "Max (us)"
<< '\n';
for(const auto& ws : wsum) {
if(ws.count == 0) {
continue;
}
os << std::setw(w_w) << ws.id
<< std::setw(l_w+2) << ws.level;
bool first = true;
for(size_t i=0; i<TASK_TYPES.size(); i++) {
if(ws.tsum[i].count == 0) {
continue;
}
os << (first ? std::setw(t_w) : std::setw(w_w + l_w + 2 + t_w));
first = false;
os << to_string(TASK_TYPES[i])
<< std::setw(c_w+2) << ws.tsum[i].count
<< std::setw(d_w+2) << ws.tsum[i].total_span
<< std::setw(avg_w+2) << std::to_string(ws.tsum[i].avg_span())
<< std::setw(min_w+2) << ws.tsum[i].min_span
<< std::setw(max_w+2) << ws.tsum[i].max_span
<< '\n';
}
// per-worker summary
os << std::setw(w_w + l_w + t_w + c_w + 4) << ws.count
<< std::setw(d_w+2) << ws.total_span
<< std::setw(avg_w+2) << std::to_string(ws.avg_span())
<< std::setw(min_w+2) << ws.min_span
<< std::setw(max_w+2) << ws.max_span
<< '\n';
//for(size_t j=0; j<w_w+l_w+t_w+4; j++) os << ' ';
//for(size_t j=0; j<c_w+d_w+avg_w+min_w+max_w+8; j++) os << '-';
//os <<'\n';
}
}
// dump the summary report through an ostream
inline void TFProfObserver::Summary::dump(std::ostream& os) const {
dump_tsum(os);
os << '\n';
dump_wsum(os);
}
// Procedure: set_up
inline void TFProfObserver::set_up(size_t num_workers) {
_timeline.uid = unique_id<size_t>();
_timeline.origin = observer_stamp_t::clock::now();
_timeline.segments.resize(num_workers);
_stacks.resize(num_workers);
}
// Procedure: on_entry
inline void TFProfObserver::on_entry(WorkerView wv, TaskView) {
_stacks[wv.id()].push(observer_stamp_t::clock::now());
}
// Procedure: on_exit
inline void TFProfObserver::on_exit(WorkerView wv, TaskView tv) {
size_t w = wv.id();
assert(!_stacks[w].empty());
if(_stacks[w].size() > _timeline.segments[w].size()) {
_timeline.segments[w].resize(_stacks[w].size());
}
auto beg = _stacks[w].top();
_stacks[w].pop();
_timeline.segments[w][_stacks[w].size()].emplace_back(
tv.name(), tv.type(), beg, observer_stamp_t::clock::now()
);
}
// Function: clear
inline void TFProfObserver::clear() {
for(size_t w=0; w<_timeline.segments.size(); ++w) {
for(size_t l=0; l<_timeline.segments[w].size(); ++l) {
_timeline.segments[w][l].clear();
}
while(!_stacks[w].empty()) {
_stacks[w].pop();
}
}
}
// Procedure: dump
inline void TFProfObserver::dump(std::ostream& os) const {
using namespace std::chrono;
size_t first;
for(first = 0; first<_timeline.segments.size(); ++first) {
if(_timeline.segments[first].size() > 0) {
break;
}
}
// not timeline data to dump
if(first == _timeline.segments.size()) {
os << "{}\n";
return;
}
os << "{\"executor\":\"" << _timeline.uid << "\",\"data\":[";
bool comma = false;
for(size_t w=first; w<_timeline.segments.size(); w++) {
for(size_t l=0; l<_timeline.segments[w].size(); l++) {
if(_timeline.segments[w][l].empty()) {
continue;
}
if(comma) {
os << ',';
}
else {
comma = true;
}
os << "{\"worker\":" << w << ",\"level\":" << l << ",\"data\":[";
for(size_t i=0; i<_timeline.segments[w][l].size(); ++i) {
const auto& s = _timeline.segments[w][l][i];
if(i) os << ',';
// span
os << "{\"span\":["
<< duration_cast<microseconds>(s.beg - _timeline.origin).count()
<< ","
<< duration_cast<microseconds>(s.end - _timeline.origin).count()
<< "],";
// name
os << "\"name\":\"";
if(s.name.empty()) {
os << w << '_' << i;
}
else {
os << s.name;
}
os << "\",";
// e.g., category "type": "Condition Task"
os << "\"type\":\"" << to_string(s.type) << "\"";
os << "}";
}
os << "]}";
}
}
os << "]}\n";
}
// Function: dump
inline std::string TFProfObserver::dump() const {
std::ostringstream oss;
dump(oss);
return oss.str();
}
// Procedure: summary
inline void TFProfObserver::summary(std::ostream& os) const {
using namespace std::chrono;
Summary summary;
std::optional<observer_stamp_t> view_beg, view_end;
// find the first non-empty worker
size_t first;
for(first = 0; first<_timeline.segments.size(); ++first) {
if(_timeline.segments[first].size() > 0) {
break;
}
}
// not timeline data to dump
if(first == _timeline.segments.size()) {
goto end_of_summary;
}
for(size_t w=first; w<_timeline.segments.size(); w++) {
for(size_t l=0; l<_timeline.segments[w].size(); l++) {
if(_timeline.segments[w][l].empty()) {
continue;
}
// worker w at level l
WorkerSummary ws;
ws.id = w;
ws.level = l;
ws.count = _timeline.segments[w][l].size();
// scan all tasks at level l
for(size_t i=0; i<_timeline.segments[w][l].size(); ++i) {
// update the entire span
auto& s = _timeline.segments[w][l][i];
view_beg = view_beg ? std::min(*view_beg, s.beg) : s.beg;
view_end = view_end ? std::max(*view_end, s.end) : s.end;
// update the task summary
size_t t = duration_cast<microseconds>(s.end - s.beg).count();
auto& x = summary.tsum[static_cast<int>(s.type)];
x.count += 1;
x.total_span += t;
x.min_span = (x.count == 1) ? t : std::min(t, x.min_span);
x.max_span = (x.count == 1) ? t : std::max(t, x.max_span);
// update the worker summary
ws.total_span += t;
ws.min_span = (i == 0) ? t : std::min(t, ws.min_span);
ws.max_span = (i == 0) ? t : std::max(t, ws.max_span);
auto&y = ws.tsum[static_cast<int>(s.type)];
y.count += 1;
y.total_span += t;
y.min_span = (y.count == 1) ? t : std::min(t, y.min_span);
y.max_span = (y.count == 1) ? t : std::max(t, y.max_span);
// update the delay
//if(i) {
// size_t d = duration_cast<nanoseconds>(
// s.beg - _timeline.segments[w][l][i-1].end
// ).count();
// ws.total_delay += d;
// ws.min_delay = (i == 1) ? d : std::min(ws.min_delay, d);
// ws.max_delay = (i == 1) ? d : std::max(ws.max_delay, d);
//}
}
summary.wsum.push_back(ws);
}
}
end_of_summary:
size_t view = 0;
if(view_beg && view_end) {
view = duration_cast<microseconds>(*view_end - *view_beg).count();
}
os << "==Observer " << _timeline.uid << ": "
<< num_workers() << " workers completed "
<< num_tasks() << " tasks in "
<< view << " us\n";
summary.dump(os);
}
// Procedure: summary
inline std::string TFProfObserver::summary() const {
std::ostringstream oss;
summary(oss);
return oss.str();
}
// Function: num_tasks
inline size_t TFProfObserver::num_tasks() const {
size_t s = 0;
for(size_t w=0; w<_timeline.segments.size(); ++w) {
for(size_t l=0; l<_timeline.segments[w].size(); ++l) {
s += _timeline.segments[w][l].size();
}
}
return s;
}
// Function: num_workers
inline size_t TFProfObserver::num_workers() const {
size_t w = 0;
for(size_t i=0; i<_timeline.segments.size(); ++i) {
w += (!_timeline.segments[i].empty());
}
return w;
}
// ----------------------------------------------------------------------------
// TFProfManager
// ----------------------------------------------------------------------------
/**
@private
*/
class TFProfManager {
friend class Executor;
public:
~TFProfManager();
TFProfManager(const TFProfManager&) = delete;
TFProfManager& operator=(const TFProfManager&) = delete;
static TFProfManager& get();
void dump(std::ostream& ostream) const;
private:
const std::string _fpath;
std::mutex _mutex;
std::vector<std::shared_ptr<TFProfObserver>> _observers;
TFProfManager();
void _manage(std::shared_ptr<TFProfObserver> observer);
};
// constructor
inline TFProfManager::TFProfManager() :
_fpath {get_env(TF_ENABLE_PROFILER)} {
}
// Procedure: manage
inline void TFProfManager::_manage(std::shared_ptr<TFProfObserver> observer) {
std::lock_guard lock(_mutex);
_observers.push_back(std::move(observer));
}
// Procedure: dump
inline void TFProfManager::dump(std::ostream& os) const {
for(size_t i=0; i<_observers.size(); ++i) {
if(i) os << ',';
_observers[i]->dump(os);
}
}
// Destructor
inline TFProfManager::~TFProfManager() {
std::ofstream ofs(_fpath);
if(ofs) {
// .tfp
if(_fpath.rfind(".tfp") != std::string::npos) {
ProfileData data;
data.timelines.reserve(_observers.size());
for(size_t i=0; i<_observers.size(); ++i) {
data.timelines.push_back(std::move(_observers[i]->_timeline));
}
Serializer<std::ofstream> serializer(ofs);
serializer(data);
}
// .json
else { // if(_fpath.rfind(".json") != std::string::npos) {
ofs << "[\n";
for(size_t i=0; i<_observers.size(); ++i) {
if(i) ofs << ',';
_observers[i]->dump(ofs);
}
ofs << "]\n";
}
}
// do a summary report in stderr for each observer
else {
std::ostringstream oss;
for(size_t i=0; i<_observers.size(); ++i) {
_observers[i]->summary(oss);
}
fprintf(stderr, "%s", oss.str().c_str());
}
}
// Function: get
inline TFProfManager& TFProfManager::get() {
static TFProfManager mgr;
return mgr;
}
// ----------------------------------------------------------------------------
// Identifier for Each Built-in Observer
// ----------------------------------------------------------------------------
/** @enum ObserverType
@brief enumeration of all observer types
*/
enum class ObserverType : int {
TFPROF = 0,
CHROME,
UNDEFINED
};
/**
@brief convert an observer type to a human-readable string
*/
inline const char* to_string(ObserverType type) {
switch(type) {
case ObserverType::TFPROF: return "tfprof";
case ObserverType::CHROME: return "chrome";
default: return "undefined";
}
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/core/notifier.hpp | // 2019/02/09 - created by Tsung-Wei Huang
// - modified the event count from Eigen
#pragma once
#include <iostream>
#include <vector>
#include <cstdlib>
#include <cstdio>
#include <atomic>
#include <memory>
#include <deque>
#include <mutex>
#include <condition_variable>
#include <thread>
#include <algorithm>
#include <numeric>
#include <cassert>
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2016 Dmitry Vyukov <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
namespace tf {
// Notifier allows to wait for arbitrary predicates in non-blocking
// algorithms. Think of condition variable, but wait predicate does not need to
// be protected by a mutex. Usage:
// Waiting thread does:
//
// if (predicate)
// return act();
// Notifier::Waiter& w = waiters[my_index];
// ec.prepare_wait(&w);
// if (predicate) {
// ec.cancel_wait(&w);
// return act();
// }
// ec.commit_wait(&w);
//
// Notifying thread does:
//
// predicate = true;
// ec.notify(true);
//
// notify is cheap if there are no waiting threads. prepare_wait/commit_wait are not
// cheap, but they are executed only if the preceeding predicate check has
// failed.
//
// Algorihtm outline:
// There are two main variables: predicate (managed by user) and _state.
// Operation closely resembles Dekker mutual algorithm:
// https://en.wikipedia.org/wiki/Dekker%27s_algorithm
// Waiting thread sets _state then checks predicate, Notifying thread sets
// predicate then checks _state. Due to seq_cst fences in between these
// operations it is guaranteed than either waiter will see predicate change
// and won't block, or notifying thread will see _state change and will unblock
// the waiter, or both. But it can't happen that both threads don't see each
// other changes, which would lead to deadlock.
class Notifier {
friend class Executor;
public:
struct Waiter {
std::atomic<Waiter*> next;
std::mutex mu;
std::condition_variable cv;
uint64_t epoch;
unsigned state;
enum {
kNotSignaled,
kWaiting,
kSignaled,
};
};
explicit Notifier(size_t N) : _waiters{N} {
assert(_waiters.size() < (1 << kWaiterBits) - 1);
// Initialize epoch to something close to overflow to test overflow.
_state = kStackMask | (kEpochMask - kEpochInc * _waiters.size() * 2);
}
~Notifier() {
// Ensure there are no waiters.
assert((_state.load() & (kStackMask | kWaiterMask)) == kStackMask);
}
// prepare_wait prepares for waiting.
// After calling this function the thread must re-check the wait predicate
// and call either cancel_wait or commit_wait passing the same Waiter object.
void prepare_wait(Waiter* w) {
w->epoch = _state.fetch_add(kWaiterInc, std::memory_order_relaxed);
std::atomic_thread_fence(std::memory_order_seq_cst);
}
// commit_wait commits waiting.
void commit_wait(Waiter* w) {
w->state = Waiter::kNotSignaled;
// Modification epoch of this waiter.
uint64_t epoch =
(w->epoch & kEpochMask) +
(((w->epoch & kWaiterMask) >> kWaiterShift) << kEpochShift);
uint64_t state = _state.load(std::memory_order_seq_cst);
for (;;) {
if (int64_t((state & kEpochMask) - epoch) < 0) {
// The preceeding waiter has not decided on its fate. Wait until it
// calls either cancel_wait or commit_wait, or is notified.
std::this_thread::yield();
state = _state.load(std::memory_order_seq_cst);
continue;
}
// We've already been notified.
if (int64_t((state & kEpochMask) - epoch) > 0) return;
// Remove this thread from prewait counter and add it to the waiter list.
assert((state & kWaiterMask) != 0);
uint64_t newstate = state - kWaiterInc + kEpochInc;
//newstate = (newstate & ~kStackMask) | (w - &_waiters[0]);
newstate = static_cast<uint64_t>((newstate & ~kStackMask) | static_cast<uint64_t>(w - &_waiters[0]));
if ((state & kStackMask) == kStackMask)
w->next.store(nullptr, std::memory_order_relaxed);
else
w->next.store(&_waiters[state & kStackMask], std::memory_order_relaxed);
if (_state.compare_exchange_weak(state, newstate,
std::memory_order_release))
break;
}
_park(w);
}
// cancel_wait cancels effects of the previous prepare_wait call.
void cancel_wait(Waiter* w) {
uint64_t epoch =
(w->epoch & kEpochMask) +
(((w->epoch & kWaiterMask) >> kWaiterShift) << kEpochShift);
uint64_t state = _state.load(std::memory_order_relaxed);
for (;;) {
if (int64_t((state & kEpochMask) - epoch) < 0) {
// The preceeding waiter has not decided on its fate. Wait until it
// calls either cancel_wait or commit_wait, or is notified.
std::this_thread::yield();
state = _state.load(std::memory_order_relaxed);
continue;
}
// We've already been notified.
if (int64_t((state & kEpochMask) - epoch) > 0) return;
// Remove this thread from prewait counter.
assert((state & kWaiterMask) != 0);
if (_state.compare_exchange_weak(state, state - kWaiterInc + kEpochInc,
std::memory_order_relaxed))
return;
}
}
// notify wakes one or all waiting threads.
// Must be called after changing the associated wait predicate.
void notify(bool all) {
std::atomic_thread_fence(std::memory_order_seq_cst);
uint64_t state = _state.load(std::memory_order_acquire);
for (;;) {
// Easy case: no waiters.
if ((state & kStackMask) == kStackMask && (state & kWaiterMask) == 0)
return;
uint64_t waiters = (state & kWaiterMask) >> kWaiterShift;
uint64_t newstate;
if (all) {
// Reset prewait counter and empty wait list.
newstate = (state & kEpochMask) + (kEpochInc * waiters) + kStackMask;
} else if (waiters) {
// There is a thread in pre-wait state, unblock it.
newstate = state + kEpochInc - kWaiterInc;
} else {
// Pop a waiter from list and unpark it.
Waiter* w = &_waiters[state & kStackMask];
Waiter* wnext = w->next.load(std::memory_order_relaxed);
uint64_t next = kStackMask;
//if (wnext != nullptr) next = wnext - &_waiters[0];
if (wnext != nullptr) next = static_cast<uint64_t>(wnext - &_waiters[0]);
// Note: we don't add kEpochInc here. ABA problem on the lock-free stack
// can't happen because a waiter is re-pushed onto the stack only after
// it was in the pre-wait state which inevitably leads to epoch
// increment.
newstate = (state & kEpochMask) + next;
}
if (_state.compare_exchange_weak(state, newstate,
std::memory_order_acquire)) {
if (!all && waiters) return; // unblocked pre-wait thread
if ((state & kStackMask) == kStackMask) return;
Waiter* w = &_waiters[state & kStackMask];
if (!all) w->next.store(nullptr, std::memory_order_relaxed);
_unpark(w);
return;
}
}
}
// notify n workers
void notify_n(size_t n) {
if(n >= _waiters.size()) {
notify(true);
}
else {
for(size_t k=0; k<n; ++k) {
notify(false);
}
}
}
size_t size() const {
return _waiters.size();
}
private:
// State_ layout:
// - low kStackBits is a stack of waiters committed wait.
// - next kWaiterBits is count of waiters in prewait state.
// - next kEpochBits is modification counter.
static const uint64_t kStackBits = 16;
static const uint64_t kStackMask = (1ull << kStackBits) - 1;
static const uint64_t kWaiterBits = 16;
static const uint64_t kWaiterShift = 16;
static const uint64_t kWaiterMask = ((1ull << kWaiterBits) - 1)
<< kWaiterShift;
static const uint64_t kWaiterInc = 1ull << kWaiterBits;
static const uint64_t kEpochBits = 32;
static const uint64_t kEpochShift = 32;
static const uint64_t kEpochMask = ((1ull << kEpochBits) - 1) << kEpochShift;
static const uint64_t kEpochInc = 1ull << kEpochShift;
std::atomic<uint64_t> _state;
std::vector<Waiter> _waiters;
void _park(Waiter* w) {
std::unique_lock<std::mutex> lock(w->mu);
while (w->state != Waiter::kSignaled) {
w->state = Waiter::kWaiting;
w->cv.wait(lock);
}
}
void _unpark(Waiter* waiters) {
Waiter* next = nullptr;
for (Waiter* w = waiters; w; w = next) {
next = w->next.load(std::memory_order_relaxed);
unsigned state;
{
std::unique_lock<std::mutex> lock(w->mu);
state = w->state;
w->state = Waiter::kSignaled;
}
// Avoid notifying if it wasn't waiting.
if (state == Waiter::kWaiting) w->cv.notify_one();
}
}
};
} // namespace tf ------------------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/core/topology.hpp | #pragma once
namespace tf {
// ----------------------------------------------------------------------------
// class: TopologyBase
class TopologyBase {
friend class Executor;
friend class Node;
template <typename T>
friend class Future;
protected:
std::atomic<bool> _is_cancelled { false };
};
// ----------------------------------------------------------------------------
// class: AsyncTopology
class AsyncTopology : public TopologyBase {
};
// ----------------------------------------------------------------------------
// class: Topology
class Topology : public TopologyBase {
friend class Executor;
friend class Runtime;
public:
template <typename P, typename C>
Topology(Taskflow&, P&&, C&&);
private:
Taskflow& _taskflow;
std::promise<void> _promise;
SmallVector<Node*> _sources;
std::function<bool()> _pred;
std::function<void()> _call;
std::atomic<size_t> _join_counter {0};
};
// Constructor
template <typename P, typename C>
Topology::Topology(Taskflow& tf, P&& p, C&& c):
_taskflow(tf),
_pred {std::forward<P>(p)},
_call {std::forward<C>(c)} {
}
} // end of namespace tf. ----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/core/semaphore.hpp | #pragma once
#include <vector>
#include <mutex>
#include "declarations.hpp"
/**
@file semaphore.hpp
@brief semaphore include file
*/
namespace tf {
// ----------------------------------------------------------------------------
// Semaphore
// ----------------------------------------------------------------------------
/**
@class Semaphore
@brief class to create a semophore object for building a concurrency constraint
A semaphore creates a constraint that limits the maximum concurrency,
i.e., the number of workers, in a set of tasks.
You can let a task acquire/release one or multiple semaphores before/after
executing its work.
A task can acquire and release a semaphore,
or just acquire or just release it.
A tf::Semaphore object starts with an initial count.
As long as that count is above 0, tasks can acquire the semaphore and do
their work.
If the count is 0 or less, a task trying to acquire the semaphore will not run
but goes to a waiting list of that semaphore.
When the semaphore is released by another task,
it reschedules all tasks on that waiting list.
@code{.cpp}
tf::Executor executor(8); // create an executor of 8 workers
tf::Taskflow taskflow;
tf::Semaphore semaphore(1); // create a semaphore with initial count 1
std::vector<tf::Task> tasks {
taskflow.emplace([](){ std::cout << "A" << std::endl; }),
taskflow.emplace([](){ std::cout << "B" << std::endl; }),
taskflow.emplace([](){ std::cout << "C" << std::endl; }),
taskflow.emplace([](){ std::cout << "D" << std::endl; }),
taskflow.emplace([](){ std::cout << "E" << std::endl; })
};
for(auto & task : tasks) { // each task acquires and release the semaphore
task.acquire(semaphore);
task.release(semaphore);
}
executor.run(taskflow).wait();
@endcode
The above example creates five tasks with no dependencies between them.
Under normal circumstances, the five tasks would be executed concurrently.
However, this example has a semaphore with initial count 1,
and all tasks need to acquire that semaphore before running and release that
semaphore after they are done.
This arrangement limits the number of concurrently running tasks to only one.
*/
class Semaphore {
friend class Node;
public:
/**
@brief constructs a semaphore with the given counter
A semaphore creates a constraint that limits the maximum concurrency,
i.e., the number of workers, in a set of tasks.
@code{.cpp}
tf::Semaphore semaphore(4); // concurrency constraint of 4 workers
@endcode
*/
explicit Semaphore(size_t max_workers);
/**
@brief queries the counter value (not thread-safe during the run)
*/
size_t count() const;
private:
std::mutex _mtx;
size_t _counter;
std::vector<Node*> _waiters;
bool _try_acquire_or_wait(Node*);
std::vector<Node*> _release();
};
inline Semaphore::Semaphore(size_t max_workers) :
_counter(max_workers) {
}
inline bool Semaphore::_try_acquire_or_wait(Node* me) {
std::lock_guard<std::mutex> lock(_mtx);
if(_counter > 0) {
--_counter;
return true;
}
else {
_waiters.push_back(me);
return false;
}
}
inline std::vector<Node*> Semaphore::_release() {
std::lock_guard<std::mutex> lock(_mtx);
++_counter;
std::vector<Node*> r{std::move(_waiters)};
return r;
}
inline size_t Semaphore::count() const {
return _counter;
}
} // end of namespace tf. ---------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/core/taskflow.hpp | #pragma once
#include "flow_builder.hpp"
/**
@file taskflow/core/taskflow.hpp
@brief taskflow include file
*/
namespace tf {
// ----------------------------------------------------------------------------
/**
@class Taskflow
@brief class to create a taskflow object
A %taskflow manages a task dependency graph where each task represents a
callable object (e.g., @std_lambda, @std_function) and an edge represents a
dependency between two tasks. A task is one of the following types:
1. static task : the callable constructible from
@c std::function<void()>
2. dynamic task : the callable constructible from
@c std::function<void(tf::Subflow&)>
3. condition task : the callable constructible from
@c std::function<int()>
4. multi-condition task: the callable constructible from
@c %std::function<tf::SmallVector<int>()>
5. module task : the task constructed from tf::Taskflow::composed_of
6. runtime task : the callable constructible from
@c std::function<void(tf::Runtime&)>
7. %cudaFlow task : the callable constructible from
@c std::function<void(tf::cudaFlow&)> or
@c std::function<void(tf::cudaFlowCapturer&)>
8. %syclFlow task : the callable constructible from
@c std::function<void(tf::syclFlow&)>
Each task is a basic computation unit and is run by one worker thread
from an executor.
The following example creates a simple taskflow graph of four static tasks,
@c A, @c B, @c C, and @c D, where
@c A runs before @c B and @c C and
@c D runs after @c B and @c C.
@code{.cpp}
tf::Executor executor;
tf::Taskflow taskflow("simple");
tf::Task A = taskflow.emplace([](){ std::cout << "TaskA\n"; });
tf::Task B = taskflow.emplace([](){ std::cout << "TaskB\n"; });
tf::Task C = taskflow.emplace([](){ std::cout << "TaskC\n"; });
tf::Task D = taskflow.emplace([](){ std::cout << "TaskD\n"; });
A.precede(B, C); // A runs before B and C
D.succeed(B, C); // D runs after B and C
executor.run(taskflow).wait();
@endcode
The taskflow object itself is NOT thread-safe. You should not
modifying the graph while it is running,
such as adding new tasks, adding new dependencies, and moving
the taskflow to another.
To minimize the overhead of task creation,
our runtime leverages a global object pool to recycle
tasks in a thread-safe manner.
Please refer to @ref Cookbook to learn more about each task type
and how to submit a taskflow to an executor.
*/
class Taskflow : public FlowBuilder {
friend class Topology;
friend class Executor;
friend class FlowBuilder;
struct Dumper {
size_t id;
std::stack<std::pair<const Node*, const Graph*>> stack;
std::unordered_map<const Graph*, size_t> visited;
};
public:
/**
@brief constructs a taskflow with the given name
@code{.cpp}
tf::Taskflow taskflow("My Taskflow");
std::cout << taskflow.name(); // "My Taskflow"
@endcode
*/
Taskflow(const std::string& name);
/**
@brief constructs a taskflow
*/
Taskflow();
/**
@brief constructs a taskflow from a moved taskflow
Constructing a taskflow @c taskflow1 from a moved taskflow @c taskflow2 will
migrate the graph of @c taskflow2 to @c taskflow1.
After the move, @c taskflow2 will become empty.
@code{.cpp}
tf::Taskflow taskflow1(std::move(taskflow2));
assert(taskflow2.empty());
@endcode
Notice that @c taskflow2 should not be running in an executor
during the move operation, or the behavior is undefined.
*/
Taskflow(Taskflow&& rhs);
/**
@brief move assignment operator
Moving a taskflow @c taskflow2 to another taskflow @c taskflow1 will destroy
the existing graph of @c taskflow1 and assign it the graph of @c taskflow2.
After the move, @c taskflow2 will become empty.
@code{.cpp}
taskflow1 = std::move(taskflow2);
assert(taskflow2.empty());
@endcode
Notice that both @c taskflow1 and @c taskflow2 should not be running
in an executor during the move operation, or the behavior is undefined.
*/
Taskflow& operator = (Taskflow&& rhs);
/**
@brief default destructor
When the destructor is called, all tasks and their associated data
(e.g., captured data) will be destroyed.
It is your responsibility to ensure all submitted execution of this
taskflow have completed before destroying it.
For instance, the following code results in undefined behavior
since the executor may still be running the taskflow while
it is destroyed after the block.
@code{.cpp}
{
tf::Taskflow taskflow;
executor.run(taskflow);
}
@endcode
To fix the problem, we must wait for the execution to complete
before destroying the taskflow.
@code{.cpp}
{
tf::Taskflow taskflow;
executor.run(taskflow).wait();
}
@endcode
*/
~Taskflow() = default;
/**
@brief dumps the taskflow to a DOT format through a std::ostream target
@code{.cpp}
taskflow.dump(std::cout); // dump the graph to the standard output
std::ofstream ofs("output.dot");
taskflow.dump(ofs); // dump the graph to the file output.dot
@endcode
For dynamically spawned tasks, such as module tasks, subflow tasks,
and GPU tasks, you need to run the taskflow first before you can
dump the entire graph.
@code{.cpp}
tf::Task parent = taskflow.emplace([](tf::Subflow sf){
sf.emplace([](){ std::cout << "child\n"; });
});
taskflow.dump(std::cout); // this dumps only the parent tasks
executor.run(taskflow).wait();
taskflow.dump(std::cout); // this dumps both parent and child tasks
@endcode
*/
void dump(std::ostream& ostream) const;
/**
@brief dumps the taskflow to a std::string of DOT format
This method is similar to tf::Taskflow::dump(std::ostream& ostream),
but returning a string of the graph in DOT format.
*/
std::string dump() const;
/**
@brief queries the number of tasks
*/
size_t num_tasks() const;
/**
@brief queries the emptiness of the taskflow
An empty taskflow has no tasks. That is the return of
tf::Taskflow::num_tasks is zero.
*/
bool empty() const;
/**
@brief assigns a name to the taskflow
@code{.cpp}
taskflow.name("assign another name");
@endcode
*/
void name(const std::string&);
/**
@brief queries the name of the taskflow
@code{.cpp}
std::cout << "my name is: " << taskflow.name();
@endcode
*/
const std::string& name() const;
/**
@brief clears the associated task dependency graph
When you clear a taskflow, all tasks and their associated data
(e.g., captured data in task callables) will be destroyed.
The behavior of clearing a running taskflow is undefined.
*/
void clear();
/**
@brief applies a visitor to each task in the taskflow
A visitor is a callable that takes an argument of type tf::Task
and returns nothing. The following example iterates each task in a
taskflow and prints its name:
@code{.cpp}
taskflow.for_each_task([](tf::Task task){
std::cout << task.name() << '\n';
});
@endcode
*/
template <typename V>
void for_each_task(V&& visitor) const;
/**
@brief returns a reference to the underlying graph object
A graph object (of type tf::Graph) is the ultimate storage for the
task dependency graph and should only be used as an opaque
data structure to interact with the executor (e.g., composition).
*/
Graph& graph();
private:
mutable std::mutex _mutex;
std::string _name;
Graph _graph;
std::queue<std::shared_ptr<Topology>> _topologies;
std::optional<std::list<Taskflow>::iterator> _satellite;
void _dump(std::ostream&, const Graph*) const;
void _dump(std::ostream&, const Node*, Dumper&) const;
void _dump(std::ostream&, const Graph*, Dumper&) const;
};
// Constructor
inline Taskflow::Taskflow(const std::string& name) :
FlowBuilder {_graph},
_name {name} {
}
// Constructor
inline Taskflow::Taskflow() : FlowBuilder{_graph} {
}
// Move constructor
inline Taskflow::Taskflow(Taskflow&& rhs) : FlowBuilder{_graph} {
std::scoped_lock<std::mutex> lock(rhs._mutex);
_name = std::move(rhs._name);
_graph = std::move(rhs._graph);
_topologies = std::move(rhs._topologies);
_satellite = rhs._satellite;
rhs._satellite.reset();
}
// Move assignment
inline Taskflow& Taskflow::operator = (Taskflow&& rhs) {
if(this != &rhs) {
std::scoped_lock<std::mutex, std::mutex> lock(_mutex, rhs._mutex);
_name = std::move(rhs._name);
_graph = std::move(rhs._graph);
_topologies = std::move(rhs._topologies);
_satellite = rhs._satellite;
rhs._satellite.reset();
}
return *this;
}
// Procedure:
inline void Taskflow::clear() {
_graph._clear();
}
// Function: num_tasks
inline size_t Taskflow::num_tasks() const {
return _graph.size();
}
// Function: empty
inline bool Taskflow::empty() const {
return _graph.empty();
}
// Function: name
inline void Taskflow::name(const std::string &name) {
_name = name;
}
// Function: name
inline const std::string& Taskflow::name() const {
return _name;
}
// Function: graph
inline Graph& Taskflow::graph() {
return _graph;
}
// Function: for_each_task
template <typename V>
void Taskflow::for_each_task(V&& visitor) const {
for(size_t i=0; i<_graph._nodes.size(); ++i) {
visitor(Task(_graph._nodes[i]));
}
}
// Procedure: dump
inline std::string Taskflow::dump() const {
std::ostringstream oss;
dump(oss);
return oss.str();
}
// Function: dump
inline void Taskflow::dump(std::ostream& os) const {
os << "digraph Taskflow {\n";
_dump(os, &_graph);
os << "}\n";
}
// Procedure: _dump
inline void Taskflow::_dump(std::ostream& os, const Graph* top) const {
Dumper dumper;
dumper.id = 0;
dumper.stack.push({nullptr, top});
dumper.visited[top] = dumper.id++;
while(!dumper.stack.empty()) {
auto [p, f] = dumper.stack.top();
dumper.stack.pop();
os << "subgraph cluster_p" << f << " {\nlabel=\"";
// n-level module
if(p) {
os << 'm' << dumper.visited[f];
}
// top-level taskflow graph
else {
os << "Taskflow: ";
if(_name.empty()) os << 'p' << this;
else os << _name;
}
os << "\";\n";
_dump(os, f, dumper);
os << "}\n";
}
}
// Procedure: _dump
inline void Taskflow::_dump(
std::ostream& os, const Node* node, Dumper& dumper
) const {
os << 'p' << node << "[label=\"";
if(node->_name.empty()) os << 'p' << node;
else os << node->_name;
os << "\" ";
// shape for node
switch(node->_handle.index()) {
case Node::CONDITION:
case Node::MULTI_CONDITION:
os << "shape=diamond color=black fillcolor=aquamarine style=filled";
break;
case Node::RUNTIME:
os << "shape=component";
break;
case Node::CUDAFLOW:
os << " style=\"filled\""
<< " color=\"black\" fillcolor=\"purple\""
<< " fontcolor=\"white\""
<< " shape=\"folder\"";
break;
case Node::SYCLFLOW:
os << " style=\"filled\""
<< " color=\"black\" fillcolor=\"red\""
<< " fontcolor=\"white\""
<< " shape=\"folder\"";
break;
default:
break;
}
os << "];\n";
for(size_t s=0; s<node->_successors.size(); ++s) {
if(node->_is_conditioner()) {
// case edge is dashed
os << 'p' << node << " -> p" << node->_successors[s]
<< " [style=dashed label=\"" << s << "\"];\n";
} else {
os << 'p' << node << " -> p" << node->_successors[s] << ";\n";
}
}
// subflow join node
if(node->_parent && node->_parent->_handle.index() == Node::DYNAMIC &&
node->_successors.size() == 0
) {
os << 'p' << node << " -> p" << node->_parent << ";\n";
}
// node info
switch(node->_handle.index()) {
case Node::DYNAMIC: {
auto& sbg = std::get_if<Node::Dynamic>(&node->_handle)->subgraph;
if(!sbg.empty()) {
os << "subgraph cluster_p" << node << " {\nlabel=\"Subflow: ";
if(node->_name.empty()) os << 'p' << node;
else os << node->_name;
os << "\";\n" << "color=blue\n";
_dump(os, &sbg, dumper);
os << "}\n";
}
}
break;
case Node::CUDAFLOW: {
std::get_if<Node::cudaFlow>(&node->_handle)->graph->dump(
os, node, node->_name
);
}
break;
case Node::SYCLFLOW: {
std::get_if<Node::syclFlow>(&node->_handle)->graph->dump(
os, node, node->_name
);
}
break;
default:
break;
}
}
// Procedure: _dump
inline void Taskflow::_dump(
std::ostream& os, const Graph* graph, Dumper& dumper
) const {
for(const auto& n : graph->_nodes) {
// regular task
if(n->_handle.index() != Node::MODULE) {
_dump(os, n, dumper);
}
// module task
else {
//auto module = &(std::get_if<Node::Module>(&n->_handle)->module);
auto module = &(std::get_if<Node::Module>(&n->_handle)->graph);
os << 'p' << n << "[shape=box3d, color=blue, label=\"";
if(n->_name.empty()) os << 'p' << n;
else os << n->_name;
if(dumper.visited.find(module) == dumper.visited.end()) {
dumper.visited[module] = dumper.id++;
dumper.stack.push({n, module});
}
os << " [m" << dumper.visited[module] << "]\"];\n";
for(const auto s : n->_successors) {
os << 'p' << n << "->" << 'p' << s << ";\n";
}
}
}
}
// ----------------------------------------------------------------------------
// class definition: Future
// ----------------------------------------------------------------------------
/**
@class Future
@brief class to access the result of an execution
tf::Future is a derived class from std::future that will eventually hold the
execution result of a submitted taskflow (tf::Executor::run)
or an asynchronous task (tf::Executor::async, tf::Executor::silent_async).
In addition to the base methods inherited from std::future,
you can call tf::Future::cancel to cancel the execution of the running taskflow
associated with this future object.
The following example cancels a submission of a taskflow that contains
1000 tasks each running one second.
@code{.cpp}
tf::Executor executor;
tf::Taskflow taskflow;
for(int i=0; i<1000; i++) {
taskflow.emplace([](){
std::this_thread::sleep_for(std::chrono::seconds(1));
});
}
// submit the taskflow
tf::Future fu = executor.run(taskflow);
// request to cancel the submitted execution above
fu.cancel();
// wait until the cancellation finishes
fu.get();
@endcode
*/
template <typename T>
class Future : public std::future<T> {
friend class Executor;
friend class Subflow;
using handle_t = std::variant<
std::monostate, std::weak_ptr<Topology>, std::weak_ptr<AsyncTopology>
>;
// variant index
constexpr static auto ASYNC = get_index_v<std::weak_ptr<AsyncTopology>, handle_t>;
constexpr static auto TASKFLOW = get_index_v<std::weak_ptr<Topology>, handle_t>;
public:
/**
@brief default constructor
*/
Future() = default;
/**
@brief disabled copy constructor
*/
Future(const Future&) = delete;
/**
@brief default move constructor
*/
Future(Future&&) = default;
/**
@brief disabled copy assignment
*/
Future& operator = (const Future&) = delete;
/**
@brief default move assignment
*/
Future& operator = (Future&&) = default;
/**
@brief cancels the execution of the running taskflow associated with
this future object
@return @c true if the execution can be cancelled or
@c false if the execution has already completed
When you request a cancellation, the executor will stop scheduling
any tasks onwards. Tasks that are already running will continue to finish
(non-preemptive).
You can call tf::Future::wait to wait for the cancellation to complete.
*/
bool cancel();
private:
handle_t _handle;
template <typename P>
Future(std::future<T>&&, P&&);
};
template <typename T>
template <typename P>
Future<T>::Future(std::future<T>&& fu, P&& p) :
std::future<T> {std::move(fu)},
_handle {std::forward<P>(p)} {
}
// Function: cancel
template <typename T>
bool Future<T>::cancel() {
return std::visit([](auto&& arg){
using P = std::decay_t<decltype(arg)>;
if constexpr(std::is_same_v<P, std::monostate>) {
return false;
}
else {
auto ptr = arg.lock();
if(ptr) {
ptr->_is_cancelled.store(true, std::memory_order_relaxed);
return true;
}
return false;
}
}, _handle);
}
} // end of namespace tf. ---------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/core/declarations.hpp | #pragma once
namespace tf {
// ----------------------------------------------------------------------------
// taskflow
// ----------------------------------------------------------------------------
class AsyncTopology;
class Node;
class Graph;
class FlowBuilder;
class Semaphore;
class Subflow;
class Runtime;
class Task;
class TaskView;
class Taskflow;
class Topology;
class TopologyBase;
class Executor;
class Worker;
class WorkerView;
class ObserverInterface;
class ChromeTracingObserver;
class TFProfObserver;
class TFProfManager;
template <typename T>
class Future;
template <typename...Fs>
class Pipeline;
// ----------------------------------------------------------------------------
// cudaFlow
// ----------------------------------------------------------------------------
class cudaNode;
class cudaGraph;
class cudaTask;
class cudaFlow;
class cudaFlowCapturer;
class cudaFlowCapturerBase;
class cudaCapturingBase;
class cudaLinearCapturing;
class cudaSequentialCapturing;
class cudaRoundRobinCapturing;
// ----------------------------------------------------------------------------
// syclFlow
// ----------------------------------------------------------------------------
class syclNode;
class syclGraph;
class syclTask;
class syclFlow;
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/core/executor-module-opt.hpp | #pragma once
#include "observer.hpp"
#include "taskflow.hpp"
/**
@file executor.hpp
@brief executor include file
*/
namespace tf {
// ----------------------------------------------------------------------------
// Executor Definition
// ----------------------------------------------------------------------------
/** @class Executor
@brief class to create an executor for running a taskflow graph
An executor manages a set of worker threads to run one or multiple taskflows
using an efficient work-stealing scheduling algorithm.
@code{.cpp}
// Declare an executor and a taskflow
tf::Executor executor;
tf::Taskflow taskflow;
// Add three tasks into the taskflow
tf::Task A = taskflow.emplace([] () { std::cout << "This is TaskA\n"; });
tf::Task B = taskflow.emplace([] () { std::cout << "This is TaskB\n"; });
tf::Task C = taskflow.emplace([] () { std::cout << "This is TaskC\n"; });
// Build precedence between tasks
A.precede(B, C);
tf::Future<void> fu = executor.run(taskflow);
fu.wait(); // block until the execution completes
executor.run(taskflow, [](){ std::cout << "end of 1 run"; }).wait();
executor.run_n(taskflow, 4);
executor.wait_for_all(); // block until all associated executions finish
executor.run_n(taskflow, 4, [](){ std::cout << "end of 4 runs"; }).wait();
executor.run_until(taskflow, [cnt=0] () mutable { return ++cnt == 10; });
@endcode
All the @c run methods are @em thread-safe. You can submit multiple
taskflows at the same time to an executor from different threads.
*/
class Executor {
friend class FlowBuilder;
friend class Subflow;
friend class Runtime;
public:
/**
@brief constructs the executor with @c N worker threads
The constructor spawns @c N worker threads to run tasks in a
work-stealing loop. The number of workers must be greater than zero
or an exception will be thrown.
By default, the number of worker threads is equal to the maximum
hardware concurrency returned by std::thread::hardware_concurrency.
*/
explicit Executor(size_t N = std::thread::hardware_concurrency());
/**
@brief destructs the executor
The destructor calls Executor::wait_for_all to wait for all submitted
taskflows to complete and then notifies all worker threads to stop
and join these threads.
*/
~Executor();
/**
@brief runs a taskflow once
@param taskflow a tf::Taskflow object
@return a tf::Future that holds the result of the execution
This member function executes the given taskflow once and returns a tf::Future
object that eventually holds the result of the execution.
@code{.cpp}
tf::Future<void> future = executor.run(taskflow);
// do something else
future.wait();
@endcode
This member function is thread-safe.
@attention
The executor does not own the given taskflow. It is your responsibility to
ensure the taskflow remains alive during its execution.
*/
tf::Future<void> run(Taskflow& taskflow);
/**
@brief runs a moved taskflow once
@param taskflow a moved tf::Taskflow object
@return a tf::Future that holds the result of the execution
This member function executes a moved taskflow once and returns a tf::Future
object that eventually holds the result of the execution.
The executor will take care of the lifetime of the moved taskflow.
@code{.cpp}
tf::Future<void> future = executor.run(std::move(taskflow));
// do something else
future.wait();
@endcode
This member function is thread-safe.
*/
tf::Future<void> run(Taskflow&& taskflow);
/**
@brief runs a taskflow once and invoke a callback upon completion
@param taskflow a tf::Taskflow object
@param callable a callable object to be invoked after this run
@return a tf::Future that holds the result of the execution
This member function executes the given taskflow once and invokes the given
callable when the execution completes.
This member function returns a tf::Future object that
eventually holds the result of the execution.
@code{.cpp}
tf::Future<void> future = executor.run(taskflow, [](){ std::cout << "done"; });
// do something else
future.wait();
@endcode
This member function is thread-safe.
@attention
The executor does not own the given taskflow. It is your responsibility to
ensure the taskflow remains alive during its execution.
*/
template<typename C>
tf::Future<void> run(Taskflow& taskflow, C&& callable);
/**
@brief runs a moved taskflow once and invoke a callback upon completion
@param taskflow a moved tf::Taskflow object
@param callable a callable object to be invoked after this run
@return a tf::Future that holds the result of the execution
This member function executes a moved taskflow once and invokes the given
callable when the execution completes.
This member function returns a tf::Future object that
eventually holds the result of the execution.
The executor will take care of the lifetime of the moved taskflow.
@code{.cpp}
tf::Future<void> future = executor.run(
std::move(taskflow), [](){ std::cout << "done"; }
);
// do something else
future.wait();
@endcode
This member function is thread-safe.
*/
template<typename C>
tf::Future<void> run(Taskflow&& taskflow, C&& callable);
/**
@brief runs a taskflow for @c N times
@param taskflow a tf::Taskflow object
@param N number of runs
@return a tf::Future that holds the result of the execution
This member function executes the given taskflow @c N times and returns a tf::Future
object that eventually holds the result of the execution.
@code{.cpp}
tf::Future<void> future = executor.run_n(taskflow, 2); // run taskflow 2 times
// do something else
future.wait();
@endcode
This member function is thread-safe.
@attention
The executor does not own the given taskflow. It is your responsibility to
ensure the taskflow remains alive during its execution.
*/
tf::Future<void> run_n(Taskflow& taskflow, size_t N);
/**
@brief runs a moved taskflow for @c N times
@param taskflow a moved tf::Taskflow object
@param N number of runs
@return a tf::Future that holds the result of the execution
This member function executes a moved taskflow @c N times and returns a tf::Future
object that eventually holds the result of the execution.
The executor will take care of the lifetime of the moved taskflow.
@code{.cpp}
tf::Future<void> future = executor.run_n(
std::move(taskflow), 2 // run the moved taskflow 2 times
);
// do something else
future.wait();
@endcode
This member function is thread-safe.
*/
tf::Future<void> run_n(Taskflow&& taskflow, size_t N);
/**
@brief runs a taskflow for @c N times and then invokes a callback
@param taskflow a tf::Taskflow
@param N number of runs
@param callable a callable object to be invoked after this run
@return a tf::Future that holds the result of the execution
This member function executes the given taskflow @c N times and invokes the given
callable when the execution completes.
This member function returns a tf::Future object that
eventually holds the result of the execution.
@code{.cpp}
tf::Future<void> future = executor.run(
taskflow, 2, [](){ std::cout << "done"; } // runs taskflow 2 times and invoke
// the lambda to print "done"
);
// do something else
future.wait();
@endcode
This member function is thread-safe.
@attention
The executor does not own the given taskflow. It is your responsibility to
ensure the taskflow remains alive during its execution.
*/
template<typename C>
tf::Future<void> run_n(Taskflow& taskflow, size_t N, C&& callable);
/**
@brief runs a moved taskflow for @c N times and then invokes a callback
@param taskflow a moved tf::Taskflow
@param N number of runs
@param callable a callable object to be invoked after this run
@return a tf::Future that holds the result of the execution
This member function executes a moved taskflow @c N times and invokes the given
callable when the execution completes.
This member function returns a tf::Future object that
eventually holds the result of the execution.
@code{.cpp}
tf::Future<void> future = executor.run(
// run the moved taskflow 2 times and invoke the lambda to print "done"
std::move(taskflow), 2, [](){ std::cout << "done"; }
);
// do something else
future.wait();
@endcode
This member function is thread-safe.
*/
template<typename C>
tf::Future<void> run_n(Taskflow&& taskflow, size_t N, C&& callable);
/**
@brief runs a taskflow multiple times until the predicate becomes true
@param taskflow a tf::Taskflow
@param pred a boolean predicate to return @c true for stop
@return a tf::Future that holds the result of the execution
This member function executes the given taskflow multiple times until
the predicate returns @c true.
This member function returns a tf::Future object that
eventually holds the result of the execution.
@code{.cpp}
tf::Future<void> future = executor.run(
taskflow, [](){ return rand()%10 == 0 }
);
// do something else
future.wait();
@endcode
This member function is thread-safe.
@attention
The executor does not own the given taskflow. It is your responsibility to
ensure the taskflow remains alive during its execution.
*/
template<typename P>
tf::Future<void> run_until(Taskflow& taskflow, P&& pred);
/**
@brief runs a moved taskflow and keeps running it
until the predicate becomes true
@param taskflow a moved tf::Taskflow object
@param pred a boolean predicate to return @c true for stop
@return a tf::Future that holds the result of the execution
This member function executes a moved taskflow multiple times until
the predicate returns @c true.
This member function returns a tf::Future object that
eventually holds the result of the execution.
The executor will take care of the lifetime of the moved taskflow.
@code{.cpp}
tf::Future<void> future = executor.run(
std::move(taskflow), [](){ return rand()%10 == 0 }
);
// do something else
future.wait();
@endcode
This member function is thread-safe.
*/
template<typename P>
tf::Future<void> run_until(Taskflow&& taskflow, P&& pred);
/**
@brief runs a taskflow multiple times until the predicate becomes true and
then invokes the callback
@param taskflow a tf::Taskflow
@param pred a boolean predicate to return @c true for stop
@param callable a callable object to be invoked after this run completes
@return a tf::Future that holds the result of the execution
This member function executes the given taskflow multiple times until
the predicate returns @c true and then invokes the given callable when
the execution completes.
This member function returns a tf::Future object that
eventually holds the result of the execution.
@code{.cpp}
tf::Future<void> future = executor.run(
taskflow, [](){ return rand()%10 == 0 }, [](){ std::cout << "done"; }
);
// do something else
future.wait();
@endcode
This member function is thread-safe.
@attention
The executor does not own the given taskflow. It is your responsibility to
ensure the taskflow remains alive during its execution.
*/
template<typename P, typename C>
tf::Future<void> run_until(Taskflow& taskflow, P&& pred, C&& callable);
/**
@brief runs a moved taskflow and keeps running
it until the predicate becomes true and then invokes the callback
@param taskflow a moved tf::Taskflow
@param pred a boolean predicate to return @c true for stop
@param callable a callable object to be invoked after this run completes
@return a tf::Future that holds the result of the execution
This member function executes a moved taskflow multiple times until
the predicate returns @c true and then invokes the given callable when
the execution completes.
This member function returns a tf::Future object that
eventually holds the result of the execution.
The executor will take care of the lifetime of the moved taskflow.
@code{.cpp}
tf::Future<void> future = executor.run(
std::move(taskflow),
[](){ return rand()%10 == 0 }, [](){ std::cout << "done"; }
);
// do something else
future.wait();
@endcode
This member function is thread-safe.
*/
template<typename P, typename C>
tf::Future<void> run_until(Taskflow&& taskflow, P&& pred, C&& callable);
/**
@brief wait for all tasks to complete
This member function waits until all submitted tasks
(e.g., taskflows, asynchronous tasks) to finish.
@code{.cpp}
executor.run(taskflow1);
executor.run_n(taskflow2, 10);
executor.run_n(taskflow3, 100);
executor.wait_for_all(); // wait until the above submitted taskflows finish
@endcode
*/
void wait_for_all();
/**
@brief queries the number of worker threads
Each worker represents one unique thread spawned by an executor
upon its construction time.
@code{.cpp}
tf::Executor executor(4);
std::cout << executor.num_workers(); // 4
@endcode
*/
size_t num_workers() const noexcept;
/**
@brief queries the number of running topologies at the time of this call
When a taskflow is submitted to an executor, a topology is created to store
runtime metadata of the running taskflow.
When the execution of the submitted taskflow finishes,
its corresponding topology will be removed from the executor.
@code{.cpp}
executor.run(taskflow);
std::cout << executor.num_topologies(); // 0 or 1 (taskflow still running)
@endcode
*/
size_t num_topologies() const;
/**
@brief queries the number of running taskflows with moved ownership
@code{.cpp}
executor.run(std::move(taskflow));
std::cout << executor.num_taskflows(); // 0 or 1 (taskflow still running)
@endcode
*/
size_t num_taskflows() const;
/**
@brief queries the id of the caller thread in this executor
Each worker has an unique id in the range of @c 0 to @c N-1 associated with
its parent executor.
If the caller thread does not belong to the executor, @c -1 is returned.
@code{.cpp}
tf::Executor executor(4); // 4 workers in the executor
executor.this_worker_id(); // -1 (main thread is not a worker)
taskflow.emplace([&](){
std::cout << executor.this_worker_id(); // 0, 1, 2, or 3
});
executor.run(taskflow);
@endcode
*/
int this_worker_id() const;
/**
@brief runs a given function asynchronously
@tparam F callable type
@tparam ArgsT parameter types
@param f callable object to call
@param args parameters to pass to the callable
@return a tf::Future that will holds the result of the execution
The method creates an asynchronous task to launch the given
function on the given arguments.
Unlike std::async, the return here is a @em tf::Future that holds
an optional object to the result.
If the asynchronous task is cancelled before it runs, the return is
a @c std::nullopt, or the value returned by the callable.
@code{.cpp}
tf::Future<std::optional<int>> future = executor.async([](){
std::cout << "create an asynchronous task and returns 1\n";
return 1;
});
@endcode
This member function is thread-safe.
*/
template <typename F, typename... ArgsT>
auto async(F&& f, ArgsT&&... args);
/**
@brief runs a given function asynchronously and gives a name to this task
@tparam F callable type
@tparam ArgsT parameter types
@param name name of the asynchronous task
@param f callable object to call
@param args parameters to pass to the callable
@return a tf::Future that will holds the result of the execution
The method creates a named asynchronous task to launch the given
function on the given arguments.
Naming an asynchronous task is primarily used for profiling and visualizing
the task execution timeline.
Unlike std::async, the return here is a tf::Future that holds
an optional object to the result.
If the asynchronous task is cancelled before it runs, the return is
a @c std::nullopt, or the value returned by the callable.
@code{.cpp}
tf::Future<std::optional<int>> future = executor.named_async("name", [](){
std::cout << "create an asynchronous task with a name and returns 1\n";
return 1;
});
@endcode
This member function is thread-safe.
*/
template <typename F, typename... ArgsT>
auto named_async(const std::string& name, F&& f, ArgsT&&... args);
/**
@brief similar to tf::Executor::async but does not return a future object
This member function is more efficient than tf::Executor::async
and is encouraged to use when there is no data returned.
@code{.cpp}
executor.silent_async([](){
std::cout << "create an asynchronous task with no return\n";
});
@endcode
This member function is thread-safe.
*/
template <typename F, typename... ArgsT>
void silent_async(F&& f, ArgsT&&... args);
/**
@brief similar to tf::Executor::named_async but does not return a future object
This member function is more efficient than tf::Executor::named_async
and is encouraged to use when there is no data returned.
@code{.cpp}
executor.named_silent_async("name", [](){
std::cout << "create an asynchronous task with a name and no return\n";
});
@endcode
This member function is thread-safe.
*/
template <typename F, typename... ArgsT>
void named_silent_async(const std::string& name, F&& f, ArgsT&&... args);
/**
@brief constructs an observer to inspect the activities of worker threads
@tparam Observer observer type derived from tf::ObserverInterface
@tparam ArgsT argument parameter pack
@param args arguments to forward to the constructor of the observer
@return a shared pointer to the created observer
Each executor manages a list of observers with shared ownership with callers.
For each of these observers, the two member functions,
tf::ObserverInterface::on_entry and tf::ObserverInterface::on_exit
will be called before and after the execution of a task.
This member function is not thread-safe.
*/
template <typename Observer, typename... ArgsT>
std::shared_ptr<Observer> make_observer(ArgsT&&... args);
/**
@brief removes an observer from the executor
This member function is not thread-safe.
*/
template <typename Observer>
void remove_observer(std::shared_ptr<Observer> observer);
/**
@brief queries the number of observers
*/
size_t num_observers() const noexcept;
private:
std::condition_variable _topology_cv;
std::mutex _taskflow_mutex;
std::mutex _topology_mutex;
std::mutex _wsq_mutex;
size_t _num_topologies {0};
std::unordered_map<std::thread::id, size_t> _wids;
std::vector<Worker> _workers;
std::vector<std::thread> _threads;
std::list<Taskflow> _taskflows;
Notifier _notifier;
TaskQueue<Node*> _wsq;
std::atomic<size_t> _num_actives {0};
std::atomic<size_t> _num_thieves {0};
std::atomic<bool> _done {0};
std::unordered_set<std::shared_ptr<ObserverInterface>> _observers;
Worker* _this_worker();
bool _wait_for_task(Worker&, Node*&);
void _observer_prologue(Worker&, Node*);
void _observer_epilogue(Worker&, Node*);
void _spawn(size_t);
void _worker_loop(Worker&);
void _exploit_task(Worker&, Node*&);
void _explore_task(Worker&, Node*&);
void _consume_task(Worker&, Node*);
void _schedule(Worker&, Node*);
void _schedule(Node*);
void _schedule(Worker&, const SmallVector<Node*>&);
void _schedule(const SmallVector<Node*>&);
void _set_up_topology(Worker*, Topology*);
void _tear_down_topology(Worker&, Topology*);
void _tear_down_async(Node*);
void _tear_down_invoke(Worker&, Node*);
void _cancel_invoke(Worker&, Node*);
void _increment_topology();
void _decrement_topology();
void _decrement_topology_and_notify();
void _invoke(Worker&, Node*);
void _invoke_static_task(Worker&, Node*);
void _invoke_dynamic_task(Worker&, Node*);
void _invoke_dynamic_task_external(Worker&, Node*, Graph&, bool);
void _invoke_dynamic_task_internal(Worker&, Node*, Graph&);
void _invoke_condition_task(Worker&, Node*, SmallVector<int>&);
void _invoke_multi_condition_task(Worker&, Node*, SmallVector<int>&);
void _invoke_module_task(Worker&, Node*, bool&);
void _invoke_module_task_internal(Worker&, Node*, Graph&, bool&);
void _invoke_async_task(Worker&, Node*);
void _invoke_silent_async_task(Worker&, Node*);
void _invoke_cudaflow_task(Worker&, Node*);
void _invoke_syclflow_task(Worker&, Node*);
void _invoke_runtime_task(Worker&, Node*);
template <typename C,
std::enable_if_t<is_cudaflow_task_v<C>, void>* = nullptr
>
void _invoke_cudaflow_task_entry(Node*, C&&);
template <typename C, typename Q,
std::enable_if_t<is_syclflow_task_v<C>, void>* = nullptr
>
void _invoke_syclflow_task_entry(Node*, C&&, Q&);
};
// Constructor
inline Executor::Executor(size_t N) :
_workers {N},
_notifier {N} {
if(N == 0) {
TF_THROW("no cpu workers to execute taskflows");
}
_spawn(N);
// instantite the default observer if requested
if(has_env(TF_ENABLE_PROFILER)) {
TFProfManager::get()._manage(make_observer<TFProfObserver>());
}
}
// Destructor
inline Executor::~Executor() {
// wait for all topologies to complete
wait_for_all();
// shut down the scheduler
_done = true;
_notifier.notify(true);
for(auto& t : _threads){
t.join();
}
}
// Function: num_workers
inline size_t Executor::num_workers() const noexcept {
return _workers.size();
}
// Function: num_topologies
inline size_t Executor::num_topologies() const {
return _num_topologies;
}
// Function: num_taskflows
inline size_t Executor::num_taskflows() const {
return _taskflows.size();
}
// Function: _this_worker
inline Worker* Executor::_this_worker() {
auto itr = _wids.find(std::this_thread::get_id());
return itr == _wids.end() ? nullptr : &_workers[itr->second];
}
// Function: named_async
template <typename F, typename... ArgsT>
auto Executor::named_async(const std::string& name, F&& f, ArgsT&&... args) {
_increment_topology();
using T = std::invoke_result_t<F, ArgsT...>;
using R = std::conditional_t<std::is_same_v<T, void>, void, std::optional<T>>;
std::promise<R> p;
auto tpg = std::make_shared<AsyncTopology>();
Future<R> fu(p.get_future(), tpg);
auto node = node_pool.animate(
std::in_place_type_t<Node::Async>{},
[p=make_moc(std::move(p)), f=std::forward<F>(f), args...]
(bool cancel) mutable {
if constexpr(std::is_same_v<R, void>) {
if(!cancel) {
f(args...);
}
p.object.set_value();
}
else {
p.object.set_value(cancel ? std::nullopt : std::make_optional(f(args...)));
}
},
std::move(tpg)
);
node->_name = name;
if(auto w = _this_worker(); w) {
_schedule(*w, node);
}
else{
_schedule(node);
}
return fu;
}
// Function: async
template <typename F, typename... ArgsT>
auto Executor::async(F&& f, ArgsT&&... args) {
return named_async("", std::forward<F>(f), std::forward<ArgsT>(args)...);
}
// Function: named_silent_async
template <typename F, typename... ArgsT>
void Executor::named_silent_async(
const std::string& name, F&& f, ArgsT&&... args
) {
_increment_topology();
Node* node = node_pool.animate(
std::in_place_type_t<Node::SilentAsync>{},
[f=std::forward<F>(f), args...] () mutable {
f(args...);
}
);
node->_name = name;
if(auto w = _this_worker(); w) {
_schedule(*w, node);
}
else {
_schedule(node);
}
}
// Function: silent_async
template <typename F, typename... ArgsT>
void Executor::silent_async(F&& f, ArgsT&&... args) {
named_silent_async("", std::forward<F>(f), std::forward<ArgsT>(args)...);
}
// Function: this_worker_id
inline int Executor::this_worker_id() const {
auto i = _wids.find(std::this_thread::get_id());
return i == _wids.end() ? -1 : static_cast<int>(_workers[i->second]._id);
}
// Procedure: _spawn
inline void Executor::_spawn(size_t N) {
std::mutex mutex;
std::condition_variable cond;
size_t n=0;
for(size_t id=0; id<N; ++id) {
_workers[id]._id = id;
_workers[id]._vtm = id;
_workers[id]._executor = this;
_workers[id]._waiter = &_notifier._waiters[id];
_threads.emplace_back([this] (
Worker& w, std::mutex& mutex, std::condition_variable& cond, size_t& n
) -> void {
// enables the mapping
{
std::scoped_lock lock(mutex);
_wids[std::this_thread::get_id()] = w._id;
if(n++; n == num_workers()) {
cond.notify_one();
}
}
//this_worker().worker = &w;
Node* t = nullptr;
// must use 1 as condition instead of !done
while(1) {
// execute the tasks.
_exploit_task(w, t);
// wait for tasks
if(_wait_for_task(w, t) == false) {
break;
}
}
}, std::ref(_workers[id]), std::ref(mutex), std::ref(cond), std::ref(n));
}
std::unique_lock<std::mutex> lock(mutex);
cond.wait(lock, [&](){ return n==N; });
}
// Function: _consume_task
inline void Executor::_consume_task(Worker& w, Node* p) {
std::uniform_int_distribution<size_t> rdvtm(0, _workers.size()-1);
while(p->_join_counter != 0) {
exploit:
if(auto t = w._wsq.pop(); t) {
_invoke(w, t);
}
else {
size_t num_steals = 0;
//size_t num_pauses = 0;
size_t max_steals = ((_workers.size() + 1) << 1);
explore:
t = (w._id == w._vtm) ? _wsq.steal() : _workers[w._vtm]._wsq.steal();
if(t) {
_invoke(w, t);
goto exploit;
}
else if(p->_join_counter != 0){
if(num_steals++ > max_steals) {
std::this_thread::yield();
}
//std::this_thread::yield();
w._vtm = rdvtm(w._rdgen);
goto explore;
}
else {
break;
}
}
}
}
// Function: _explore_task
inline void Executor::_explore_task(Worker& w, Node*& t) {
//assert(_workers[w].wsq.empty());
//assert(!t);
size_t num_steals = 0;
size_t num_yields = 0;
size_t max_steals = ((_workers.size() + 1) << 1);
std::uniform_int_distribution<size_t> rdvtm(0, _workers.size()-1);
do {
t = (w._id == w._vtm) ? _wsq.steal() : _workers[w._vtm]._wsq.steal();
if(t) {
break;
}
if(num_steals++ > max_steals) {
std::this_thread::yield();
if(num_yields++ > 100) {
break;
}
}
w._vtm = rdvtm(w._rdgen);
} while(!_done);
}
// Procedure: _exploit_task
inline void Executor::_exploit_task(Worker& w, Node*& t) {
if(t) {
if(_num_actives.fetch_add(1) == 0 && _num_thieves == 0) {
_notifier.notify(false);
}
while(t) {
_invoke(w, t);
t = w._wsq.pop();
}
--_num_actives;
}
}
// Function: _wait_for_task
inline bool Executor::_wait_for_task(Worker& worker, Node*& t) {
wait_for_task:
//assert(!t);
++_num_thieves;
explore_task:
_explore_task(worker, t);
if(t) {
if(_num_thieves.fetch_sub(1) == 1) {
_notifier.notify(false);
}
return true;
}
_notifier.prepare_wait(worker._waiter);
//if(auto vtm = _find_vtm(me); vtm != _workers.size()) {
if(!_wsq.empty()) {
_notifier.cancel_wait(worker._waiter);
//t = (vtm == me) ? _wsq.steal() : _workers[vtm].wsq.steal();
t = _wsq.steal(); // must steal here
if(t) {
if(_num_thieves.fetch_sub(1) == 1) {
_notifier.notify(false);
}
return true;
}
else {
worker._vtm = worker._id;
goto explore_task;
}
}
if(_done) {
_notifier.cancel_wait(worker._waiter);
_notifier.notify(true);
--_num_thieves;
return false;
}
if(_num_thieves.fetch_sub(1) == 1) {
if(_num_actives) {
_notifier.cancel_wait(worker._waiter);
goto wait_for_task;
}
// check all queues again
for(auto& w : _workers) {
if(!w._wsq.empty()) {
worker._vtm = w._id;
_notifier.cancel_wait(worker._waiter);
goto wait_for_task;
}
}
}
// Now I really need to relinguish my self to others
_notifier.commit_wait(worker._waiter);
return true;
}
// Function: make_observer
template<typename Observer, typename... ArgsT>
std::shared_ptr<Observer> Executor::make_observer(ArgsT&&... args) {
static_assert(
std::is_base_of_v<ObserverInterface, Observer>,
"Observer must be derived from ObserverInterface"
);
// use a local variable to mimic the constructor
auto ptr = std::make_shared<Observer>(std::forward<ArgsT>(args)...);
ptr->set_up(_workers.size());
_observers.emplace(std::static_pointer_cast<ObserverInterface>(ptr));
return ptr;
}
// Procedure: remove_observer
template <typename Observer>
void Executor::remove_observer(std::shared_ptr<Observer> ptr) {
static_assert(
std::is_base_of_v<ObserverInterface, Observer>,
"Observer must be derived from ObserverInterface"
);
_observers.erase(std::static_pointer_cast<ObserverInterface>(ptr));
}
// Function: num_observers
inline size_t Executor::num_observers() const noexcept {
return _observers.size();
}
// Procedure: _schedule
inline void Executor::_schedule(Worker& worker, Node* node) {
node->_state.fetch_or(Node::READY, std::memory_order_release);
// caller is a worker to this pool
if(worker._executor == this) {
worker._wsq.push(node);
return;
}
{
std::lock_guard<std::mutex> lock(_wsq_mutex);
_wsq.push(node);
}
_notifier.notify(false);
}
// Procedure: _schedule
inline void Executor::_schedule(Node* node) {
node->_state.fetch_or(Node::READY, std::memory_order_release);
{
std::lock_guard<std::mutex> lock(_wsq_mutex);
_wsq.push(node);
}
_notifier.notify(false);
}
// Procedure: _schedule
inline void Executor::_schedule(
Worker& worker, const SmallVector<Node*>& nodes
) {
// We need to cacth the node count to avoid accessing the nodes
// vector while the parent topology is removed!
const auto num_nodes = nodes.size();
if(num_nodes == 0) {
return;
}
// make the node ready
for(size_t i=0; i<num_nodes; ++i) {
nodes[i]->_state.fetch_or(Node::READY, std::memory_order_release);
}
if(worker._executor == this) {
for(size_t i=0; i<num_nodes; ++i) {
worker._wsq.push(nodes[i]);
}
return;
}
{
std::lock_guard<std::mutex> lock(_wsq_mutex);
for(size_t k=0; k<num_nodes; ++k) {
_wsq.push(nodes[k]);
}
}
_notifier.notify_n(num_nodes);
}
// Procedure: _schedule
inline void Executor::_schedule(const SmallVector<Node*>& nodes) {
// parent topology may be removed!
const auto num_nodes = nodes.size();
if(num_nodes == 0) {
return;
}
// make the node ready
for(size_t i=0; i<num_nodes; ++i) {
nodes[i]->_state.fetch_or(Node::READY, std::memory_order_release);
}
{
std::lock_guard<std::mutex> lock(_wsq_mutex);
for(size_t k=0; k<num_nodes; ++k) {
_wsq.push(nodes[k]);
}
}
_notifier.notify_n(num_nodes);
}
// Procedure: _invoke
inline void Executor::_invoke(Worker& worker, Node* node) {
int state;
SmallVector<int> conds;
// synchronize all outstanding memory operations caused by reordering
do {
state = node->_state.load(std::memory_order_acquire);
} while(! (state & Node::READY));
// unwind stack for deferred node
if(state & Node::DEFERRED) {
node->_state.fetch_and(~Node::DEFERRED, std::memory_order_relaxed);
goto invoke_epilogue;
}
//while(!(node->_state.load(std::memory_order_acquire) & Node::READY));
invoke_prologue:
// no need to do other things if the topology is cancelled
if(node->_is_cancelled()) {
_cancel_invoke(worker, node);
return;
}
// if acquiring semaphore(s) exists, acquire them first
if(node->_semaphores && !node->_semaphores->to_acquire.empty()) {
SmallVector<Node*> nodes;
if(!node->_acquire_all(nodes)) {
_schedule(worker, nodes);
return;
}
node->_state.fetch_or(Node::ACQUIRED, std::memory_order_release);
}
// condition task
//int cond = -1;
//SmallVector<int> conds = { -1 };
// switch is faster than nested if-else due to jump table
switch(node->_handle.index()) {
// static task
case Node::STATIC:{
_invoke_static_task(worker, node);
}
break;
// dynamic task
case Node::DYNAMIC: {
_invoke_dynamic_task(worker, node);
}
break;
// condition task
case Node::CONDITION: {
_invoke_condition_task(worker, node, conds);
}
break;
// multi-condition task
case Node::MULTI_CONDITION: {
_invoke_multi_condition_task(worker, node, conds);
}
break;
// module task
case Node::MODULE: {
bool deferred = false;
_invoke_module_task(worker, node, deferred);
if(deferred) {
return;
}
}
break;
// async task
case Node::ASYNC: {
_invoke_async_task(worker, node);
_tear_down_async(node);
return ;
}
break;
// silent async task
case Node::SILENT_ASYNC: {
_invoke_silent_async_task(worker, node);
_tear_down_async(node);
return ;
}
break;
// cudaflow task
case Node::CUDAFLOW: {
_invoke_cudaflow_task(worker, node);
}
break;
// syclflow task
case Node::SYCLFLOW: {
_invoke_syclflow_task(worker, node);
}
break;
// runtime task
case Node::RUNTIME: {
_invoke_runtime_task(worker, node);
}
break;
// monostate (placeholder)
default:
break;
}
invoke_epilogue:
// if releasing semaphores exist, release them
if(node->_semaphores && !node->_semaphores->to_release.empty()) {
_schedule(worker, node->_release_all());
}
// We MUST recover the dependency since the graph may have cycles.
// This must be done before scheduling the successors, otherwise this might cause
// race condition on the _dependents
if((node->_state.load(std::memory_order_relaxed) & Node::CONDITIONED)) {
node->_join_counter = node->num_strong_dependents();
}
else {
node->_join_counter = node->num_dependents();
}
// acquire the parent flow counter
auto& j = (node->_parent) ? node->_parent->_join_counter :
node->_topology->_join_counter;
Node* cache {nullptr};
// At this point, the node storage might be destructed (to be verified)
// case 1: non-condition task
switch(node->_handle.index()) {
// condition and multi-condition tasks
case Node::CONDITION:
case Node::MULTI_CONDITION: {
for(auto cond : conds) {
if(cond >= 0 && static_cast<size_t>(cond) < node->_successors.size()) {
auto s = node->_successors[cond];
// zeroing the join counter for invariant
s->_join_counter.store(0, std::memory_order_relaxed);
j.fetch_add(1);
if(cache) {
_schedule(worker, cache);
}
cache = s;
}
}
}
break;
// non-condition task
default: {
for(size_t i=0; i<node->_successors.size(); ++i) {
if(--(node->_successors[i]->_join_counter) == 0) {
j.fetch_add(1);
if(cache) {
_schedule(worker, cache);
}
cache = node->_successors[i];
}
}
}
break;
}
// tear_down the invoke
_tear_down_invoke(worker, node);
// perform tail recursion elimination for the right-most child to reduce
// the number of expensive pop/push operations through the task queue
if(cache) {
node = cache;
//node->_state.fetch_or(Node::READY, std::memory_order_release);
goto invoke_prologue;
}
}
// Procedure: _tear_down_async
inline void Executor::_tear_down_async(Node* node) {
if(node->_parent) {
node->_parent->_join_counter.fetch_sub(1);
}
else {
_decrement_topology_and_notify();
}
node_pool.recycle(node);
}
// Proecdure: _tear_down_invoke
inline void Executor::_tear_down_invoke(Worker& worker, Node* node) {
// we must check parent first before substracting the join counter,
// or it can introduce data race
if(auto parent = node->_parent; parent == nullptr) {
if(node->_topology->_join_counter.fetch_sub(1) == 1) {
_tear_down_topology(worker, node->_topology);
}
}
else {
// prefetch the deferred status, as subtracting the join counter can
// immediately cause the other worker to release the subflow
auto deferred = parent->_state.load(std::memory_order_relaxed) & Node::DEFERRED;
if(parent->_join_counter.fetch_sub(1) == 1 && deferred) {
_schedule(worker, parent);
}
}
}
// Procedure: _cancel_invoke
inline void Executor::_cancel_invoke(Worker& worker, Node* node) {
switch(node->_handle.index()) {
// async task needs to carry out the promise
case Node::ASYNC:
std::get_if<Node::Async>(&(node->_handle))->work(true);
_tear_down_async(node);
break;
// silent async doesn't need to carry out the promise
case Node::SILENT_ASYNC:
_tear_down_async(node);
break;
// tear down topology if the node is the last leaf
default: {
_tear_down_invoke(worker, node);
}
break;
}
}
// Procedure: _observer_prologue
inline void Executor::_observer_prologue(Worker& worker, Node* node) {
for(auto& observer : _observers) {
observer->on_entry(WorkerView(worker), TaskView(*node));
}
}
// Procedure: _observer_epilogue
inline void Executor::_observer_epilogue(Worker& worker, Node* node) {
for(auto& observer : _observers) {
observer->on_exit(WorkerView(worker), TaskView(*node));
}
}
// Procedure: _invoke_static_task
inline void Executor::_invoke_static_task(Worker& worker, Node* node) {
_observer_prologue(worker, node);
std::get_if<Node::Static>(&node->_handle)->work();
_observer_epilogue(worker, node);
}
// Procedure: _invoke_dynamic_task
inline void Executor::_invoke_dynamic_task(Worker& w, Node* node) {
_observer_prologue(w, node);
auto handle = std::get_if<Node::Dynamic>(&node->_handle);
handle->subgraph._clear();
Subflow sf(*this, w, node, handle->subgraph);
handle->work(sf);
if(sf._joinable) {
_invoke_dynamic_task_internal(w, node, handle->subgraph);
}
_observer_epilogue(w, node);
}
// Procedure: _invoke_dynamic_task_external
inline void Executor::_invoke_dynamic_task_external(
Worker& w, Node* p, Graph& g, bool detach
) {
// graph is empty and has no async tasks
if(g.empty() && p->_join_counter == 0) {
return;
}
SmallVector<Node*> src;
for(auto n : g._nodes) {
n->_topology = p->_topology;
n->_state.store(0, std::memory_order_relaxed);
n->_set_up_join_counter();
if(detach) {
n->_parent = nullptr;
n->_state.fetch_or(Node::DETACHED, std::memory_order_relaxed);
}
else {
n->_parent = p;
}
if(n->num_dependents() == 0) {
src.push_back(n);
}
}
// detach here
if(detach) {
{
std::lock_guard<std::mutex> lock(p->_topology->_taskflow._mutex);
p->_topology->_taskflow._graph._merge(std::move(g));
}
p->_topology->_join_counter.fetch_add(src.size());
_schedule(w, src);
}
// join here
else {
p->_join_counter.fetch_add(src.size());
_schedule(w, src);
_consume_task(w, p);
}
}
// Procedure: _invoke_dynamic_task_internal
inline void Executor::_invoke_dynamic_task_internal(
Worker& w, Node* p, Graph& g
) {
// graph is empty and has no async tasks
if(g.empty() && p->_join_counter == 0) {
return;
}
SmallVector<Node*> src;
for(auto n : g._nodes) {
n->_topology = p->_topology;
n->_state.store(0, std::memory_order_relaxed);
n->_set_up_join_counter();
n->_parent = p;
if(n->num_dependents() == 0) {
src.push_back(n);
}
}
p->_join_counter.fetch_add(src.size());
_schedule(w, src);
_consume_task(w, p);
}
// Procedure: _invoke_module_task_internal
inline void Executor::_invoke_module_task_internal(
Worker& w, Node* p, Graph& g, bool& deferred
) {
// graph is empty and has no async tasks
if(g.empty()) {
return;
}
// set deferred
deferred = true;
p->_state.fetch_or(Node::DEFERRED, std::memory_order_relaxed);
SmallVector<Node*> src;
for(auto n : g._nodes) {
n->_topology = p->_topology;
n->_state.store(0, std::memory_order_relaxed);
n->_set_up_join_counter();
n->_parent = p;
if(n->num_dependents() == 0) {
src.push_back(n);
}
}
p->_join_counter.fetch_add(src.size());
_schedule(w, src);
}
// Procedure: _invoke_condition_task
inline void Executor::_invoke_condition_task(
Worker& worker, Node* node, SmallVector<int>& conds
) {
_observer_prologue(worker, node);
conds = { std::get_if<Node::Condition>(&node->_handle)->work() };
_observer_epilogue(worker, node);
}
// Procedure: _invoke_multi_condition_task
inline void Executor::_invoke_multi_condition_task(
Worker& worker, Node* node, SmallVector<int>& conds
) {
_observer_prologue(worker, node);
conds = std::get_if<Node::MultiCondition>(&node->_handle)->work();
_observer_epilogue(worker, node);
}
// Procedure: _invoke_cudaflow_task
inline void Executor::_invoke_cudaflow_task(Worker& worker, Node* node) {
_observer_prologue(worker, node);
std::get_if<Node::cudaFlow>(&node->_handle)->work(*this, node);
_observer_epilogue(worker, node);
}
// Procedure: _invoke_syclflow_task
inline void Executor::_invoke_syclflow_task(Worker& worker, Node* node) {
_observer_prologue(worker, node);
std::get_if<Node::syclFlow>(&node->_handle)->work(*this, node);
_observer_epilogue(worker, node);
}
// Procedure: _invoke_module_task
inline void Executor::_invoke_module_task(Worker& w, Node* node, bool& deferred) {
_observer_prologue(w, node);
_invoke_module_task_internal(
w, node, std::get_if<Node::Module>(&node->_handle)->graph, deferred
);
_observer_epilogue(w, node);
}
// Procedure: _invoke_async_task
inline void Executor::_invoke_async_task(Worker& w, Node* node) {
_observer_prologue(w, node);
std::get_if<Node::Async>(&node->_handle)->work(false);
_observer_epilogue(w, node);
}
// Procedure: _invoke_silent_async_task
inline void Executor::_invoke_silent_async_task(Worker& w, Node* node) {
_observer_prologue(w, node);
std::get_if<Node::SilentAsync>(&node->_handle)->work();
_observer_epilogue(w, node);
}
// Procedure: _invoke_runtime_task
inline void Executor::_invoke_runtime_task(Worker& w, Node* node) {
_observer_prologue(w, node);
Runtime rt(*this, w, node);
std::get_if<Node::Runtime>(&node->_handle)->work(rt);
_observer_epilogue(w, node);
}
// Function: run
inline tf::Future<void> Executor::run(Taskflow& f) {
return run_n(f, 1, [](){});
}
// Function: run
inline tf::Future<void> Executor::run(Taskflow&& f) {
return run_n(std::move(f), 1, [](){});
}
// Function: run
template <typename C>
tf::Future<void> Executor::run(Taskflow& f, C&& c) {
return run_n(f, 1, std::forward<C>(c));
}
// Function: run
template <typename C>
tf::Future<void> Executor::run(Taskflow&& f, C&& c) {
return run_n(std::move(f), 1, std::forward<C>(c));
}
// Function: run_n
inline tf::Future<void> Executor::run_n(Taskflow& f, size_t repeat) {
return run_n(f, repeat, [](){});
}
// Function: run_n
inline tf::Future<void> Executor::run_n(Taskflow&& f, size_t repeat) {
return run_n(std::move(f), repeat, [](){});
}
// Function: run_n
template <typename C>
tf::Future<void> Executor::run_n(Taskflow& f, size_t repeat, C&& c) {
return run_until(
f, [repeat]() mutable { return repeat-- == 0; }, std::forward<C>(c)
);
}
// Function: run_n
template <typename C>
tf::Future<void> Executor::run_n(Taskflow&& f, size_t repeat, C&& c) {
return run_until(
std::move(f), [repeat]() mutable { return repeat-- == 0; }, std::forward<C>(c)
);
}
// Function: run_until
template<typename P>
tf::Future<void> Executor::run_until(Taskflow& f, P&& pred) {
return run_until(f, std::forward<P>(pred), [](){});
}
// Function: run_until
template<typename P>
tf::Future<void> Executor::run_until(Taskflow&& f, P&& pred) {
return run_until(std::move(f), std::forward<P>(pred), [](){});
}
// Function: run_until
template <typename P, typename C>
tf::Future<void> Executor::run_until(Taskflow& f, P&& p, C&& c) {
_increment_topology();
// Need to check the empty under the lock since dynamic task may
// define detached blocks that modify the taskflow at the same time
bool empty;
{
std::lock_guard<std::mutex> lock(f._mutex);
empty = f.empty();
}
// No need to create a real topology but returns an dummy future
if(empty || p()) {
c();
std::promise<void> promise;
promise.set_value();
_decrement_topology_and_notify();
return tf::Future<void>(promise.get_future(), std::monostate{});
}
// create a topology for this run
auto t = std::make_shared<Topology>(f, std::forward<P>(p), std::forward<C>(c));
// need to create future before the topology got torn down quickly
tf::Future<void> future(t->_promise.get_future(), t);
// modifying topology needs to be protected under the lock
{
std::lock_guard<std::mutex> lock(f._mutex);
f._topologies.push(t);
if(f._topologies.size() == 1) {
_set_up_topology(_this_worker(), t.get());
}
}
return future;
}
// Function: run_until
template <typename P, typename C>
tf::Future<void> Executor::run_until(Taskflow&& f, P&& pred, C&& c) {
std::list<Taskflow>::iterator itr;
{
std::scoped_lock<std::mutex> lock(_taskflow_mutex);
itr = _taskflows.emplace(_taskflows.end(), std::move(f));
itr->_satellite = itr;
}
return run_until(*itr, std::forward<P>(pred), std::forward<C>(c));
}
// Procedure: _increment_topology
inline void Executor::_increment_topology() {
std::lock_guard<std::mutex> lock(_topology_mutex);
++_num_topologies;
}
// Procedure: _decrement_topology_and_notify
inline void Executor::_decrement_topology_and_notify() {
std::lock_guard<std::mutex> lock(_topology_mutex);
if(--_num_topologies == 0) {
_topology_cv.notify_all();
}
}
// Procedure: _decrement_topology
inline void Executor::_decrement_topology() {
std::lock_guard<std::mutex> lock(_topology_mutex);
--_num_topologies;
}
// Procedure: wait_for_all
inline void Executor::wait_for_all() {
std::unique_lock<std::mutex> lock(_topology_mutex);
_topology_cv.wait(lock, [&](){ return _num_topologies == 0; });
}
// Function: _set_up_topology
inline void Executor::_set_up_topology(Worker* worker, Topology* tpg) {
// ---- under taskflow lock ----
tpg->_sources.clear();
tpg->_taskflow._graph._clear_detached();
// scan each node in the graph and build up the links
for(auto node : tpg->_taskflow._graph._nodes) {
node->_topology = tpg;
node->_state.store(0, std::memory_order_relaxed);
if(node->num_dependents() == 0) {
tpg->_sources.push_back(node);
}
node->_set_up_join_counter();
}
tpg->_join_counter = tpg->_sources.size();
if(worker) {
_schedule(*worker, tpg->_sources);
}
else {
_schedule(tpg->_sources);
}
}
// Function: _tear_down_topology
inline void Executor::_tear_down_topology(Worker& worker, Topology* tpg) {
auto &f = tpg->_taskflow;
//assert(&tpg == &(f._topologies.front()));
// case 1: we still need to run the topology again
if(!tpg->_is_cancelled && !tpg->_pred()) {
//assert(tpg->_join_counter == 0);
std::lock_guard<std::mutex> lock(f._mutex);
tpg->_join_counter = tpg->_sources.size();
_schedule(worker, tpg->_sources);
}
// case 2: the final run of this topology
else {
// TODO: if the topology is cancelled, need to release all semaphores
if(tpg->_call != nullptr) {
tpg->_call();
}
// If there is another run (interleave between lock)
if(std::unique_lock<std::mutex> lock(f._mutex); f._topologies.size()>1) {
//assert(tpg->_join_counter == 0);
// Set the promise
tpg->_promise.set_value();
f._topologies.pop();
tpg = f._topologies.front().get();
// decrement the topology but since this is not the last we don't notify
_decrement_topology();
// set up topology needs to be under the lock or it can
// introduce memory order error with pop
_set_up_topology(&worker, tpg);
}
else {
//assert(f._topologies.size() == 1);
// Need to back up the promise first here becuz taskflow might be
// destroy soon after calling get
auto p {std::move(tpg->_promise)};
// Back up lambda capture in case it has the topology pointer,
// to avoid it releasing on pop_front ahead of _mutex.unlock &
// _promise.set_value. Released safely when leaving scope.
auto c {std::move(tpg->_call)};
// Get the satellite if any
auto s {f._satellite};
// Now we remove the topology from this taskflow
f._topologies.pop();
//f._mutex.unlock();
lock.unlock();
// We set the promise in the end in case taskflow leaves the scope.
// After set_value, the caller will return from wait
p.set_value();
_decrement_topology_and_notify();
// remove the taskflow if it is managed by the executor
// TODO: in the future, we may need to synchronize on wait
// (which means the following code should the moved before set_value)
if(s) {
std::scoped_lock<std::mutex> lock(_taskflow_mutex);
_taskflows.erase(*s);
}
}
}
}
// ############################################################################
// Forward Declaration: Subflow
// ############################################################################
inline void Subflow::join() {
// assert(this_worker().worker == &_worker);
if(!_joinable) {
TF_THROW("subflow not joinable");
}
// only the parent worker can join the subflow
_executor._invoke_dynamic_task_external(_worker, _parent, _graph, false);
_joinable = false;
}
inline void Subflow::detach() {
// assert(this_worker().worker == &_worker);
if(!_joinable) {
TF_THROW("subflow already joined or detached");
}
// only the parent worker can detach the subflow
_executor._invoke_dynamic_task_external(_worker, _parent, _graph, true);
_joinable = false;
}
// Function: named_async
template <typename F, typename... ArgsT>
auto Subflow::named_async(const std::string& name, F&& f, ArgsT&&... args) {
return _named_async(
*_executor._this_worker(), name, std::forward<F>(f), std::forward<ArgsT>(args)...
);
}
// Function: _named_async
template <typename F, typename... ArgsT>
auto Subflow::_named_async(
Worker& w,
const std::string& name,
F&& f,
ArgsT&&... args
) {
_parent->_join_counter.fetch_add(1);
using T = std::invoke_result_t<F, ArgsT...>;
using R = std::conditional_t<std::is_same_v<T, void>, void, std::optional<T>>;
std::promise<R> p;
auto tpg = std::make_shared<AsyncTopology>();
Future<R> fu(p.get_future(), tpg);
auto node = node_pool.animate(
std::in_place_type_t<Node::Async>{},
[p=make_moc(std::move(p)), f=std::forward<F>(f), args...]
(bool cancel) mutable {
if constexpr(std::is_same_v<R, void>) {
if(!cancel) {
f(args...);
}
p.object.set_value();
}
else {
p.object.set_value(cancel ? std::nullopt : std::make_optional(f(args...)));
}
},
std::move(tpg)
);
node->_name = name;
node->_topology = _parent->_topology;
node->_parent = _parent;
_executor._schedule(w, node);
return fu;
}
// Function: async
template <typename F, typename... ArgsT>
auto Subflow::async(F&& f, ArgsT&&... args) {
return named_async("", std::forward<F>(f), std::forward<ArgsT>(args)...);
}
// Function: _named_silent_async
template <typename F, typename... ArgsT>
void Subflow::_named_silent_async(
Worker& w, const std::string& name, F&& f, ArgsT&&... args
) {
_parent->_join_counter.fetch_add(1);
auto node = node_pool.animate(
std::in_place_type_t<Node::SilentAsync>{},
[f=std::forward<F>(f), args...] () mutable {
f(args...);
}
);
node->_name = name;
node->_topology = _parent->_topology;
node->_parent = _parent;
_executor._schedule(w, node);
}
// Function: silent_async
template <typename F, typename... ArgsT>
void Subflow::named_silent_async(const std::string& name, F&& f, ArgsT&&... args) {
_named_silent_async(
*_executor._this_worker(), name, std::forward<F>(f), std::forward<ArgsT>(args)...
);
}
// Function: named_silent_async
template <typename F, typename... ArgsT>
void Subflow::silent_async(F&& f, ArgsT&&... args) {
named_silent_async("", std::forward<F>(f), std::forward<ArgsT>(args)...);
}
// ############################################################################
// Forward Declaration: Runtime
// ############################################################################
// Procedure: schedule
inline void Runtime::schedule(Task task) {
auto node = task._node;
auto& j = node->_parent ? node->_parent->_join_counter :
node->_topology->_join_counter;
j.fetch_add(1);
_executor._schedule(_worker, node);
}
// Procedure: run
template <typename C>
void Runtime::run(C&& callable) {
// dynamic task (subflow)
if constexpr(is_dynamic_task_v<C>) {
Graph graph;
Subflow sf(_executor, _worker, _parent, graph);
callable(sf);
if(sf._joinable) {
_executor._invoke_dynamic_task_internal(_worker, _parent, graph);
}
}
else {
static_assert(dependent_false_v<C>, "unsupported task callable to run");
}
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/core/worker.hpp | #pragma once
#include "declarations.hpp"
#include "tsq.hpp"
#include "notifier.hpp"
/**
@file worker.hpp
@brief worker include file
*/
namespace tf {
// ----------------------------------------------------------------------------
// Class Definition: Worker
// ----------------------------------------------------------------------------
/**
@class Worker
@brief class to create a worker in an executor
The class is primarily used by the executor to perform work-stealing algorithm.
Users can access a worker object and alter its property
(e.g., changing the thread affinity in a POSIX-like system)
using tf::WorkerInterface.
*/
class Worker {
friend class Executor;
friend class WorkerView;
public:
/**
@brief queries the worker id associated with its parent executor
A worker id is a unsigned integer in the range <tt>[0, N)</tt>,
where @c N is the number of workers spawned at the construction
time of the executor.
*/
inline size_t id() const { return _id; }
/**
@brief acquires a pointer access to the underlying thread
*/
inline std::thread* thread() const { return _thread; }
/**
@brief queries the size of the queue (i.e., number of enqueued tasks to
run) associated with the worker
*/
inline size_t queue_size() const { return _wsq.size(); }
/**
@brief queries the current capacity of the queue
*/
inline size_t queue_capacity() const { return static_cast<size_t>(_wsq.capacity()); }
private:
size_t _id;
size_t _vtm;
Executor* _executor;
std::thread* _thread;
Notifier::Waiter* _waiter;
std::default_random_engine _rdgen { std::random_device{}() };
TaskQueue<Node*> _wsq;
};
// ----------------------------------------------------------------------------
// Class Definition: PerThreadWorker
// ----------------------------------------------------------------------------
/**
@private
*/
//struct PerThreadWorker {
//
// Worker* worker;
//
// PerThreadWorker() : worker {nullptr} {}
//
// PerThreadWorker(const PerThreadWorker&) = delete;
// PerThreadWorker(PerThreadWorker&&) = delete;
//
// PerThreadWorker& operator = (const PerThreadWorker&) = delete;
// PerThreadWorker& operator = (PerThreadWorker&&) = delete;
//};
/**
@private
*/
//inline PerThreadWorker& this_worker() {
// thread_local PerThreadWorker worker;
// return worker;
//}
// ----------------------------------------------------------------------------
// Class Definition: WorkerView
// ----------------------------------------------------------------------------
/**
@class WorkerView
@brief class to create an immutable view of a worker in an executor
An executor keeps a set of internal worker threads to run tasks.
A worker view provides users an immutable interface to observe
when a worker runs a task, and the view object is only accessible
from an observer derived from tf::ObserverInterface.
*/
class WorkerView {
friend class Executor;
public:
/**
@brief queries the worker id associated with its parent executor
A worker id is a unsigned integer in the range <tt>[0, N)</tt>,
where @c N is the number of workers spawned at the construction
time of the executor.
*/
size_t id() const;
/**
@brief queries the size of the queue (i.e., number of pending tasks to
run) associated with the worker
*/
size_t queue_size() const;
/**
@brief queries the current capacity of the queue
*/
size_t queue_capacity() const;
private:
WorkerView(const Worker&);
WorkerView(const WorkerView&) = default;
const Worker& _worker;
};
// Constructor
inline WorkerView::WorkerView(const Worker& w) : _worker{w} {
}
// function: id
inline size_t WorkerView::id() const {
return _worker._id;
}
// Function: queue_size
inline size_t WorkerView::queue_size() const {
return _worker._wsq.size();
}
// Function: queue_capacity
inline size_t WorkerView::queue_capacity() const {
return static_cast<size_t>(_worker._wsq.capacity());
}
// ----------------------------------------------------------------------------
// Class Definition: WorkerInterface
// ----------------------------------------------------------------------------
/**
@class WorkerInterface
@brief class to configure worker behavior in an executor
The tf::WorkerInterface class lets users interact with the executor
to customize the worker behavior,
such as calling custom methods before and after a worker enters and leaves
the loop.
When you create an executor, it spawns a set of workers to run tasks.
The interaction between the executor and its spawned workers looks like
the following:
for(size_t n=0; n<num_workers; n++) {
create_thread([](Worker& worker)
// pre-processing executor-specific worker information
// ...
// enter the scheduling loop
// Here, WorkerInterface::scheduler_prologue is invoked, if any
while(1) {
perform_work_stealing_algorithm();
if(stop) {
break;
}
}
// leaves the scheduling loop and joins this worker thread
// Here, WorkerInterface::scheduler_epilogue is invoked, if any
);
}
@note
Methods defined in tf::WorkerInterface are not thread-safe and may be
be invoked by multiple workers concurrently.
*/
class WorkerInterface {
public:
/**
@brief default destructor
*/
virtual ~WorkerInterface() = default;
/**
@brief method to call before a worker enters the scheduling loop
@param worker a reference to the worker
The method is called by the constructor of an executor.
*/
virtual void scheduler_prologue(Worker& worker) = 0;
/**
@brief method to call after a worker leaves the scheduling loop
@param worker a reference to the worker
@param ptr an pointer to the exception thrown by the scheduling loop
The method is called by the constructor of an executor.
*/
virtual void scheduler_epilogue(Worker& worker, std::exception_ptr ptr) = 0;
};
/**
@fn make_worker_interface
@brief helper function to create an instance derived from tf::WorkerInterface
@param args arguments to forward to the constructor of @c T
*/
template <typename T, typename... ArgsT>
std::shared_ptr<T> make_worker_interface(ArgsT&&... args) {
static_assert(
std::is_base_of_v<WorkerInterface, T>,
"T must be derived from WorkerInterface"
);
return std::make_shared<T>(std::forward<ArgsT>(args)...);
}
} // end of namespact tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/core/flow_builder.hpp | #pragma once
#include "task.hpp"
/**
@file flow_builder.hpp
@brief flow builder include file
*/
namespace tf {
/**
@class FlowBuilder
@brief class to build a task dependency graph
The class provides essential methods to construct a task dependency graph
from which tf::Taskflow and tf::Subflow are derived.
*/
class FlowBuilder {
friend class Executor;
public:
/**
@brief constructs a flow builder with a graph
*/
FlowBuilder(Graph& graph);
/**
@brief creates a static task
@tparam C callable type constructible from std::function<void()>
@param callable callable to construct a static task
@return a tf::Task handle
The following example creates a static task.
@code{.cpp}
tf::Task static_task = taskflow.emplace([](){});
@endcode
Please refer to @ref StaticTasking for details.
*/
template <typename C,
std::enable_if_t<is_static_task_v<C>, void>* = nullptr
>
Task emplace(C&& callable);
/**
@brief creates a dynamic task
@tparam C callable type constructible from std::function<void(tf::Subflow&)>
@param callable callable to construct a dynamic task
@return a tf::Task handle
The following example creates a dynamic task (tf::Subflow)
that spawns two static tasks.
@code{.cpp}
tf::Task dynamic_task = taskflow.emplace([](tf::Subflow& sf){
tf::Task static_task1 = sf.emplace([](){});
tf::Task static_task2 = sf.emplace([](){});
});
@endcode
Please refer to @ref DynamicTasking for details.
*/
template <typename C,
std::enable_if_t<is_dynamic_task_v<C>, void>* = nullptr
>
Task emplace(C&& callable);
/**
@brief creates a condition task
@tparam C callable type constructible from std::function<int()>
@param callable callable to construct a condition task
@return a tf::Task handle
The following example creates an if-else block using one condition task
and three static tasks.
@code{.cpp}
tf::Taskflow taskflow;
auto [init, cond, yes, no] = taskflow.emplace(
[] () { },
[] () { return 0; },
[] () { std::cout << "yes\n"; },
[] () { std::cout << "no\n"; }
);
// executes yes if cond returns 0, or no if cond returns 1
cond.precede(yes, no);
cond.succeed(init);
@endcode
Please refer to @ref ConditionalTasking for details.
*/
template <typename C,
std::enable_if_t<is_condition_task_v<C>, void>* = nullptr
>
Task emplace(C&& callable);
/**
@brief creates a multi-condition task
@tparam C callable type constructible from
std::function<tf::SmallVector<int>()>
@param callable callable to construct a multi-condition task
@return a tf::Task handle
The following example creates a multi-condition task that selectively
jumps to two successor tasks.
@code{.cpp}
tf::Taskflow taskflow;
auto [init, cond, branch1, branch2, branch3] = taskflow.emplace(
[] () { },
[] () { return tf::SmallVector{0, 2}; },
[] () { std::cout << "branch1\n"; },
[] () { std::cout << "branch2\n"; },
[] () { std::cout << "branch3\n"; }
);
// executes branch1 and branch3 when cond returns 0 and 2
cond.precede(branch1, branch2, branch3);
cond.succeed(init);
@endcode
Please refer to @ref ConditionalTasking for details.
*/
template <typename C,
std::enable_if_t<is_multi_condition_task_v<C>, void>* = nullptr
>
Task emplace(C&& callable);
/**
@brief creates multiple tasks from a list of callable objects
@tparam C callable types
@param callables one or multiple callable objects constructible from each task category
@return a tf::Task handle
The method returns a tuple of tasks each corresponding to the given
callable target. You can use structured binding to get the return tasks
one by one.
The following example creates four static tasks and assign them to
@c A, @c B, @c C, and @c D using structured binding.
@code{.cpp}
auto [A, B, C, D] = taskflow.emplace(
[] () { std::cout << "A"; },
[] () { std::cout << "B"; },
[] () { std::cout << "C"; },
[] () { std::cout << "D"; }
);
@endcode
*/
template <typename... C, std::enable_if_t<(sizeof...(C)>1), void>* = nullptr>
auto emplace(C&&... callables);
/**
@brief removes a task from a taskflow
@param task task to remove
Removes a task and its input and output dependencies from the graph
associated with the flow builder.
If the task does not belong to the graph, nothing will happen.
@code{.cpp}
tf::Task A = taskflow.emplace([](){ std::cout << "A"; });
tf::Task B = taskflow.emplace([](){ std::cout << "B"; });
tf::Task C = taskflow.emplace([](){ std::cout << "C"; });
tf::Task D = taskflow.emplace([](){ std::cout << "D"; });
A.precede(B, C, D);
// erase A from the taskflow and its dependencies to B, C, and D
taskflow.erase(A);
@endcode
*/
void erase(Task task);
/**
@brief creates a module task for the target object
@tparam T target object type
@param object a custom object that defines the method @c T::graph()
@return a tf::Task handle
The example below demonstrates a taskflow composition using
the @c composed_of method.
@code{.cpp}
tf::Taskflow t1, t2;
t1.emplace([](){ std::cout << "t1"; });
// t2 is partially composed of t1
tf::Task comp = t2.composed_of(t1);
tf::Task init = t2.emplace([](){ std::cout << "t2"; });
init.precede(comp);
@endcode
The taskflow object @c t2 is composed of another taskflow object @c t1,
preceded by another static task @c init.
When taskflow @c t2 is submitted to an executor,
@c init will run first and then @c comp which spwans its definition
in taskflow @c t1.
The target @c object being composed must define the method
<tt>T::graph()</tt> that returns a reference to a graph object of
type tf::Graph such that it can interact with the executor.
For example:
@code{.cpp}
// custom struct
struct MyObj {
tf::Graph graph;
MyObj() {
tf::FlowBuilder builder(graph);
tf::Task task = builder.emplace([](){
std::cout << "a task\n"; // static task
});
}
Graph& graph() { return graph; }
};
MyObj obj;
tf::Task comp = taskflow.composed_of(obj);
@endcode
Please refer to @ref ComposableTasking for details.
*/
template <typename T>
Task composed_of(T& object);
/**
@brief creates a placeholder task
@return a tf::Task handle
A placeholder task maps to a node in the taskflow graph, but
it does not have any callable work assigned yet.
A placeholder task is different from an empty task handle that
does not point to any node in a graph.
@code{.cpp}
// create a placeholder task with no callable target assigned
tf::Task placeholder = taskflow.placeholder();
assert(placeholder.empty() == false && placeholder.has_work() == false);
// create an empty task handle
tf::Task task;
assert(task.empty() == true);
// assign the task handle to the placeholder task
task = placeholder;
assert(task.empty() == false && task.has_work() == false);
@endcode
*/
Task placeholder();
/**
@brief creates a %cudaFlow task on the caller's GPU device context
@tparam C callable type constructible from @c std::function<void(tf::cudaFlow&)>
@return a tf::Task handle
This method is equivalent to calling tf::FlowBuilder::emplace_on(callable, d)
where @c d is the caller's device context.
The following example creates a %cudaFlow of two kernel tasks, @c task1 and
@c task2, where @c task1 runs before @c task2.
@code{.cpp}
taskflow.emplace([&](tf::cudaFlow& cf){
// create two kernel tasks
tf::cudaTask task1 = cf.kernel(grid1, block1, shm1, kernel1, args1);
tf::cudaTask task2 = cf.kernel(grid2, block2, shm2, kernel2, args2);
// kernel1 runs before kernel2
task1.precede(task2);
});
@endcode
Please refer to @ref GPUTaskingcudaFlow and @ref GPUTaskingcudaFlowCapturer
for details.
*/
template <typename C,
std::enable_if_t<is_cudaflow_task_v<C>, void>* = nullptr
>
Task emplace(C&& callable);
/**
@brief creates a %cudaFlow task on the given device
@tparam C callable type constructible from std::function<void(tf::cudaFlow&)>
@tparam D device type, either @c int or @c std::ref<int> (stateful)
@return a tf::Task handle
The following example creates a %cudaFlow of two kernel tasks, @c task1 and
@c task2 on GPU @c 2, where @c task1 runs before @c task2
@code{.cpp}
taskflow.emplace_on([&](tf::cudaFlow& cf){
// create two kernel tasks
tf::cudaTask task1 = cf.kernel(grid1, block1, shm1, kernel1, args1);
tf::cudaTask task2 = cf.kernel(grid2, block2, shm2, kernel2, args2);
// kernel1 runs before kernel2
task1.precede(task2);
}, 2);
@endcode
*/
template <typename C, typename D,
std::enable_if_t<is_cudaflow_task_v<C>, void>* = nullptr
>
Task emplace_on(C&& callable, D&& device);
/**
@brief creates a %syclFlow task on the default queue
@tparam C callable type constructible from std::function<void(tf::syclFlow&)>
@param callable a callable that takes a referenced tf::syclFlow object
@return a tf::Task handle
The following example creates a %syclFlow on the default queue to submit
two kernel tasks, @c task1 and @c task2, where @c task1 runs before @c task2.
@code{.cpp}
taskflow.emplace([&](tf::syclFlow& cf){
// create two single-thread kernel tasks
tf::syclTask task1 = cf.single_task([](){});
tf::syclTask task2 = cf.single_task([](){});
// kernel1 runs before kernel2
task1.precede(task2);
});
@endcode
*/
template <typename C, std::enable_if_t<is_syclflow_task_v<C>, void>* = nullptr>
Task emplace(C&& callable);
/**
@brief creates a %syclFlow task on the given queue
@tparam C callable type constructible from std::function<void(tf::syclFlow&)>
@tparam Q queue type
@param callable a callable that takes a referenced tf::syclFlow object
@param queue a queue of type sycl::queue
@return a tf::Task handle
The following example creates a %syclFlow on the given queue to submit
two kernel tasks, @c task1 and @c task2, where @c task1 runs before @c task2.
@code{.cpp}
taskflow.emplace_on([&](tf::syclFlow& cf){
// create two single-thread kernel tasks
tf::syclTask task1 = cf.single_task([](){});
tf::syclTask task2 = cf.single_task([](){});
// kernel1 runs before kernel2
task1.precede(task2);
}, queue);
@endcode
*/
template <typename C, typename Q,
std::enable_if_t<is_syclflow_task_v<C>, void>* = nullptr
>
Task emplace_on(C&& callable, Q&& queue);
/**
@brief creates a runtime task
@tparam C callable type constructible from std::function<void(tf::Runtime&)>
@param callable callable to construct a runtime task
@return a tf::Task handle
The following example creates a runtime task that enables in-task
control over the running executor.
@code{.cpp}
tf::Task runtime_task = taskflow.emplace([](tf::Runtime& rt){
auto& executor = rt.executor();
std::cout << executor.num_workers() << '\n';
});
@endcode
Please refer to @ref RuntimeTasking for details.
*/
template <typename C,
std::enable_if_t<is_runtime_task_v<C>, void>* = nullptr
>
Task emplace(C&& callable);
/**
@brief adds adjacent dependency links to a linear list of tasks
@param tasks a vector of tasks
This member function creates linear dependencies over a vector of tasks.
@code{.cpp}
tf::Task A = taskflow.emplace([](){ std::cout << "A"; });
tf::Task B = taskflow.emplace([](){ std::cout << "B"; });
tf::Task C = taskflow.emplace([](){ std::cout << "C"; });
tf::Task D = taskflow.emplace([](){ std::cout << "D"; });
std::vector<tf::Task> tasks {A, B, C, D}
taskflow.linearize(tasks); // A->B->C->D
@endcode
*/
void linearize(std::vector<Task>& tasks);
/**
@brief adds adjacent dependency links to a linear list of tasks
@param tasks an initializer list of tasks
This member function creates linear dependencies over a list of tasks.
@code{.cpp}
tf::Task A = taskflow.emplace([](){ std::cout << "A"; });
tf::Task B = taskflow.emplace([](){ std::cout << "B"; });
tf::Task C = taskflow.emplace([](){ std::cout << "C"; });
tf::Task D = taskflow.emplace([](){ std::cout << "D"; });
taskflow.linearize({A, B, C, D}); // A->B->C->D
@endcode
*/
void linearize(std::initializer_list<Task> tasks);
// ------------------------------------------------------------------------
// parallel iterations
// ------------------------------------------------------------------------
/**
@brief constructs a STL-styled parallel-for task
@tparam B beginning iterator type
@tparam E ending iterator type
@tparam C callable type
@param first iterator to the beginning (inclusive)
@param last iterator to the end (exclusive)
@param callable a callable object to apply to the dereferenced iterator
@return a tf::Task handle
The task spawns a subflow that applies the callable object to each object
obtained by dereferencing every iterator in the range <tt>[first, last)</tt>.
This method is equivalent to the parallel execution of the following loop:
@code{.cpp}
for(auto itr=first; itr!=last; itr++) {
callable(*itr);
}
@endcode
Arguments templated to enable stateful range using std::reference_wrapper.
The callable needs to take a single argument of
the dereferenced iterator type.
Please refer to @ref ParallelIterations for details.
*/
template <typename B, typename E, typename C>
Task for_each(B first, E last, C callable);
/**
@brief constructs a parallel-transform task
@tparam B beginning index type (must be integral)
@tparam E ending index type (must be integral)
@tparam S step type (must be integral)
@tparam C callable type
@param first index of the beginning (inclusive)
@param last index of the end (exclusive)
@param step step size
@param callable a callable object to apply to each valid index
@return a tf::Task handle
The task spawns a subflow that applies the callable object to each index
in the range <tt>[first, last)</tt> with the step size.
This method is equivalent to the parallel execution of the following loop:
@code{.cpp}
// case 1: step size is positive
for(auto i=first; i<last; i+=step) {
callable(i);
}
// case 2: step size is negative
for(auto i=first, i>last; i+=step) {
callable(i);
}
@endcode
Arguments are templated to enable stateful range using std::reference_wrapper.
The callable needs to take a single argument of the integral index type.
Please refer to @ref ParallelIterations for details.
*/
template <typename B, typename E, typename S, typename C>
Task for_each_index(B first, E last, S step, C callable);
// ------------------------------------------------------------------------
// transform
// ------------------------------------------------------------------------
/**
@brief constructs a parallel-transform task
@tparam B beginning input iterator type
@tparam E ending input iterator type
@tparam O output iterator type
@tparam C callable type
@param first1 iterator to the beginning of the first range
@param last1 iterator to the end of the first range
@param d_first iterator to the beginning of the output range
@param c an unary callable to apply to dereferenced input elements
@return a tf::Task handle
The task spawns a subflow that applies the callable object to an
input range and stores the result in another output range.
This method is equivalent to the parallel execution of the following loop:
@code{.cpp}
while (first1 != last1) {
*d_first++ = c(*first1++);
}
@endcode
Arguments are templated to enable stateful range using std::reference_wrapper.
The callable needs to take a single argument of the dereferenced
iterator type.
*/
template <typename B, typename E, typename O, typename C>
Task transform(B first1, E last1, O d_first, C c);
/**
@brief constructs a parallel-transform task
@tparam B1 beginning input iterator type for the first input range
@tparam E1 ending input iterator type for the first input range
@tparam B2 beginning input iterator type for the first second range
@tparam O output iterator type
@tparam C callable type
@param first1 iterator to the beginning of the first input range
@param last1 iterator to the end of the first input range
@param first2 iterator to the beginning of the second input range
@param d_first iterator to the beginning of the output range
@param c a binary operator to apply to dereferenced input elements
@return a tf::Task handle
The task spawns a subflow that applies the callable object to two
input ranges and stores the result in another output range.
This method is equivalent to the parallel execution of the following loop:
@code{.cpp}
while (first1 != last1) {
*d_first++ = c(*first1++, *first2++);
}
@endcode
Arguments are templated to enable stateful range using std::reference_wrapper.
The callable needs to take two arguments of dereferenced elements
from the two input ranges.
*/
template <typename B1, typename E1, typename B2, typename O, typename C>
Task transform(B1 first1, E1 last1, B2 first2, O d_first, C c);
// ------------------------------------------------------------------------
// reduction
// ------------------------------------------------------------------------
/**
@brief constructs a STL-styled parallel-reduce task
@tparam B beginning iterator type
@tparam E ending iterator type
@tparam T result type
@tparam O binary reducer type
@param first iterator to the beginning (inclusive)
@param last iterator to the end (exclusive)
@param init initial value of the reduction and the storage for the reduced result
@param bop binary operator that will be applied
@return a tf::Task handle
The task spawns a subflow to perform parallel reduction over @c init
and the elements in the range <tt>[first, last)</tt>.
The reduced result is store in @c init.
This method is equivalent to the parallel execution of the following loop:
@code{.cpp}
for(auto itr=first; itr!=last; itr++) {
init = bop(init, *itr);
}
@endcode
Arguments are templated to enable stateful range using std::reference_wrapper.
Please refer to @ref ParallelReduction for details.
*/
template <typename B, typename E, typename T, typename O>
Task reduce(B first, E last, T& init, O bop);
// ------------------------------------------------------------------------
// transfrom and reduction
// ------------------------------------------------------------------------
/**
@brief constructs a STL-styled parallel transform-reduce task
@tparam B beginning iterator type
@tparam E ending iterator type
@tparam T result type
@tparam BOP binary reducer type
@tparam UOP unary transformion type
@param first iterator to the beginning (inclusive)
@param last iterator to the end (exclusive)
@param init initial value of the reduction and the storage for the reduced result
@param bop binary operator that will be applied in unspecified order to the results of @c uop
@param uop unary operator that will be applied to transform each element in the range to the result type
@return a tf::Task handle
The task spawns a subflow to perform parallel reduction over @c init and
the transformed elements in the range <tt>[first, last)</tt>.
The reduced result is store in @c init.
This method is equivalent to the parallel execution of the following loop:
@code{.cpp}
for(auto itr=first; itr!=last; itr++) {
init = bop(init, uop(*itr));
}
@endcode
Arguments are templated to enable stateful range using std::reference_wrapper.
Please refer to @ref ParallelReduction for details.
*/
template <typename B, typename E, typename T, typename BOP, typename UOP>
Task transform_reduce(B first, E last, T& init, BOP bop, UOP uop);
// ------------------------------------------------------------------------
// sort
// ------------------------------------------------------------------------
/**
@brief constructs a dynamic task to perform STL-styled parallel sort
@tparam B beginning iterator type (random-accessible)
@tparam E ending iterator type (random-accessible)
@tparam C comparator type
@param first iterator to the beginning (inclusive)
@param last iterator to the end (exclusive)
@param cmp comparison function object
The task spawns a subflow to parallelly sort elements in the range
<tt>[first, last)</tt>.
Arguments are templated to enable stateful range using std::reference_wrapper.
Please refer to @ref ParallelSort for details.
*/
template <typename B, typename E, typename C>
Task sort(B first, E last, C cmp);
/**
@brief constructs a dynamic task to perform STL-styled parallel sort using
the @c std::less<T> comparator, where @c T is the element type
@tparam B beginning iterator type (random-accessible)
@tparam E ending iterator type (random-accessible)
@param first iterator to the beginning (inclusive)
@param last iterator to the end (exclusive)
The task spawns a subflow to parallelly sort elements in the range
<tt>[first, last)</tt> using the @c std::less<T> comparator,
where @c T is the dereferenced iterator type.
Arguments are templated to enable stateful range using std::reference_wrapper.
Please refer to @ref ParallelSort for details.
*/
template <typename B, typename E>
Task sort(B first, E last);
protected:
/**
@brief associated graph object
*/
Graph& _graph;
private:
template <typename L>
void _linearize(L&);
};
// Constructor
inline FlowBuilder::FlowBuilder(Graph& graph) :
_graph {graph} {
}
// Function: emplace
template <typename C, std::enable_if_t<is_static_task_v<C>, void>*>
Task FlowBuilder::emplace(C&& c) {
return Task(_graph._emplace_back(
std::in_place_type_t<Node::Static>{}, std::forward<C>(c)
));
}
// Function: emplace
template <typename C, std::enable_if_t<is_dynamic_task_v<C>, void>*>
Task FlowBuilder::emplace(C&& c) {
return Task(_graph._emplace_back(
std::in_place_type_t<Node::Dynamic>{}, std::forward<C>(c)
));
}
// Function: emplace
template <typename C, std::enable_if_t<is_condition_task_v<C>, void>*>
Task FlowBuilder::emplace(C&& c) {
return Task(_graph._emplace_back(
std::in_place_type_t<Node::Condition>{}, std::forward<C>(c)
));
}
// Function: emplace
template <typename C, std::enable_if_t<is_multi_condition_task_v<C>, void>*>
Task FlowBuilder::emplace(C&& c) {
return Task(_graph._emplace_back(
std::in_place_type_t<Node::MultiCondition>{}, std::forward<C>(c)
));
}
// Function: emplace
template <typename C, std::enable_if_t<is_runtime_task_v<C>, void>*>
Task FlowBuilder::emplace(C&& c) {
return Task(_graph._emplace_back(
std::in_place_type_t<Node::Runtime>{}, std::forward<C>(c)
));
}
// Function: emplace
template <typename... C, std::enable_if_t<(sizeof...(C)>1), void>*>
auto FlowBuilder::emplace(C&&... cs) {
return std::make_tuple(emplace(std::forward<C>(cs))...);
}
// Function: erase
inline void FlowBuilder::erase(Task task) {
if (!task._node) {
return;
}
task.for_each_dependent([&] (Task dependent) {
auto& S = dependent._node->_successors;
if(auto I = std::find(S.begin(), S.end(), task._node); I != S.end()) {
S.erase(I);
}
});
task.for_each_successor([&] (Task dependent) {
auto& D = dependent._node->_dependents;
if(auto I = std::find(D.begin(), D.end(), task._node); I != D.end()) {
D.erase(I);
}
});
_graph._erase(task._node);
}
// Function: composed_of
template <typename T>
Task FlowBuilder::composed_of(T& object) {
auto node = _graph._emplace_back(
std::in_place_type_t<Node::Module>{}, object
);
return Task(node);
}
// Function: placeholder
inline Task FlowBuilder::placeholder() {
auto node = _graph._emplace_back();
return Task(node);
}
// Procedure: _linearize
template <typename L>
void FlowBuilder::_linearize(L& keys) {
auto itr = keys.begin();
auto end = keys.end();
if(itr == end) {
return;
}
auto nxt = itr;
for(++nxt; nxt != end; ++nxt, ++itr) {
itr->_node->_precede(nxt->_node);
}
}
// Procedure: linearize
inline void FlowBuilder::linearize(std::vector<Task>& keys) {
_linearize(keys);
}
// Procedure: linearize
inline void FlowBuilder::linearize(std::initializer_list<Task> keys) {
_linearize(keys);
}
// ----------------------------------------------------------------------------
/**
@class Subflow
@brief class to construct a subflow graph from the execution of a dynamic task
By default, a subflow automatically @em joins its parent node.
You may explicitly join or detach a subflow by calling tf::Subflow::join
or tf::Subflow::detach, respectively.
The following example creates a taskflow graph that spawns a subflow from
the execution of task @c B, and the subflow contains three tasks, @c B1,
@c B2, and @c B3, where @c B3 runs after @c B1 and @c B2.
@code{.cpp}
// create three static tasks
tf::Task A = taskflow.emplace([](){}).name("A");
tf::Task C = taskflow.emplace([](){}).name("C");
tf::Task D = taskflow.emplace([](){}).name("D");
// create a subflow graph (dynamic tasking)
tf::Task B = taskflow.emplace([] (tf::Subflow& subflow) {
tf::Task B1 = subflow.emplace([](){}).name("B1");
tf::Task B2 = subflow.emplace([](){}).name("B2");
tf::Task B3 = subflow.emplace([](){}).name("B3");
B1.precede(B3);
B2.precede(B3);
}).name("B");
A.precede(B); // B runs after A
A.precede(C); // C runs after A
B.precede(D); // D runs after B
C.precede(D); // D runs after C
@endcode
*/
class Subflow : public FlowBuilder {
friend class Executor;
friend class FlowBuilder;
friend class Runtime;
public:
/**
@brief enables the subflow to join its parent task
Performs an immediate action to join the subflow. Once the subflow is joined,
it is considered finished and you may not modify the subflow anymore.
@code{.cpp}
taskflow.emplace([](tf::Subflow& sf){
sf.emplace([](){});
sf.join(); // join the subflow of one task
});
@endcode
Only the worker that spawns this subflow can join it.
*/
void join();
/**
@brief enables the subflow to detach from its parent task
Performs an immediate action to detach the subflow. Once the subflow is detached,
it is considered finished and you may not modify the subflow anymore.
@code{.cpp}
taskflow.emplace([](tf::Subflow& sf){
sf.emplace([](){});
sf.detach();
});
@endcode
Only the worker that spawns this subflow can detach it.
*/
void detach();
/**
@brief resets the subflow to a joinable state
@param clear_graph specifies whether to clear the associated graph (default @c true)
Clears the underlying task graph depending on the
given variable @c clear_graph (default @c true) and then
updates the subflow to a joinable state.
*/
void reset(bool clear_graph = true);
/**
@brief queries if the subflow is joinable
This member function queries if the subflow is joinable.
When a subflow is joined or detached, it becomes not joinable.
@code{.cpp}
taskflow.emplace([](tf::Subflow& sf){
sf.emplace([](){});
std::cout << sf.joinable() << '\n'; // true
sf.join();
std::cout << sf.joinable() << '\n'; // false
});
@endcode
*/
bool joinable() const noexcept;
/**
@brief runs a given function asynchronously
@tparam F callable type
@tparam ArgsT parameter types
@param f callable object to call
@param args parameters to pass to the callable
@return a tf::Future that will holds the result of the execution
The method creates an asynchronous task to launch the given
function on the given arguments.
The difference to tf::Executor::async is that the created asynchronous task
pertains to the subflow.
When the subflow joins, all asynchronous tasks created from the subflow
are guaranteed to finish before the join.
For example:
@code{.cpp}
std::atomic<int> counter(0);
taskflow.empalce([&](tf::Subflow& sf){
for(int i=0; i<100; i++) {
sf.async([&](){ counter++; });
}
sf.join();
assert(counter == 100);
});
@endcode
This method is thread-safe and can be called by multiple tasks in the
subflow at the same time.
@attention
You cannot create asynchronous tasks from a detached subflow.
Doing this results in undefined behavior.
*/
template <typename F, typename... ArgsT>
auto async(F&& f, ArgsT&&... args);
/**
@brief runs the given function asynchronously and assigns the task a name
@tparam F callable type
@tparam ArgsT parameter types
@param name name of the asynchronous task
@param f callable object to call
@param args parameters to pass to the callable
@return a tf::Future that will holds the result of the execution
The method creates a named asynchronous task to launch the given
function on the given arguments.
The difference from tf::Executor::async is that the created asynchronous task
pertains to the subflow.
When the subflow joins, all asynchronous tasks created from the subflow
are guaranteed to finish before the join.
For example:
@code{.cpp}
std::atomic<int> counter(0);
taskflow.empalce([&](tf::Subflow& sf){
for(int i=0; i<100; i++) {
sf.async("name", [&](){ counter++; });
}
sf.join();
assert(counter == 100);
});
@endcode
This method is thread-safe and can be called by multiple tasks in the
subflow at the same time.
@attention
You cannot create named asynchronous tasks from a detached subflow.
Doing this results in undefined behavior.
*/
template <typename F, typename... ArgsT>
auto named_async(const std::string& name, F&& f, ArgsT&&... args);
/**
@brief similar to tf::Subflow::async but does not return a future object
This member function is more efficient than tf::Subflow::async
and is encouraged to use when there is no data returned.
@code{.cpp}
taskflow.empalce([&](tf::Subflow& sf){
for(int i=0; i<100; i++) {
sf.silent_async([&](){ counter++; });
}
sf.join();
assert(counter == 100);
});
@endcode
This member function is thread-safe.
*/
template <typename F, typename... ArgsT>
void silent_async(F&& f, ArgsT&&... args);
/**
@brief similar to tf::Subflow::named_async but does not return a future object
This member function is more efficient than tf::Subflow::named_async
and is encouraged to use when there is no data returned.
@code{.cpp}
taskflow.empalce([&](tf::Subflow& sf){
for(int i=0; i<100; i++) {
sf.named_silent_async("name", [&](){ counter++; });
}
sf.join();
assert(counter == 100);
});
@endcode
This member function is thread-safe.
*/
template <typename F, typename... ArgsT>
void named_silent_async(const std::string& name, F&& f, ArgsT&&... args);
/**
@brief returns the executor that runs this subflow
*/
inline Executor& executor();
private:
Executor& _executor;
Worker& _worker;
Node* _parent;
bool _joinable {true};
Subflow(Executor&, Worker&, Node*, Graph&);
template <typename F, typename... ArgsT>
auto _named_async(Worker& w, const std::string& name, F&& f, ArgsT&&... args);
template <typename F, typename... ArgsT>
void _named_silent_async(Worker& w, const std::string& name, F&& f, ArgsT&&... args);
};
// Constructor
inline Subflow::Subflow(
Executor& executor, Worker& worker, Node* parent, Graph& graph
) :
FlowBuilder {graph},
_executor {executor},
_worker {worker},
_parent {parent} {
// assert(_parent != nullptr);
}
// Function: joined
inline bool Subflow::joinable() const noexcept {
return _joinable;
}
// Function: executor
inline Executor& Subflow::executor() {
return _executor;
}
// Procedure: reset
inline void Subflow::reset(bool clear_graph) {
if(clear_graph) {
_graph._clear();
}
_joinable = true;
}
} // end of namespace tf. ---------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/utility/traits.hpp | #pragma once
#include <type_traits>
#include <iterator>
#include <iostream>
#include <fstream>
#include <mutex>
#include <stack>
#include <queue>
#include <vector>
#include <algorithm>
#include <memory>
#include <atomic>
#include <thread>
#include <future>
#include <functional>
#include <unordered_map>
#include <unordered_set>
#include <sstream>
#include <list>
#include <numeric>
#include <random>
#include <iomanip>
#include <cassert>
#include <cmath>
#include <array>
#include <string>
#include <variant>
#include <optional>
#include "os.hpp"
namespace tf {
//-----------------------------------------------------------------------------
// Traits
//-----------------------------------------------------------------------------
//// Struct: dependent_false
//template <typename... T>
//struct dependent_false {
// static constexpr bool value = false;
//};
//
//template <typename... T>
//constexpr auto dependent_false_v = dependent_false<T...>::value;
template<typename> inline constexpr bool dependent_false_v = false;
// ----------------------------------------------------------------------------
// is_pod
//-----------------------------------------------------------------------------
template <typename T>
struct is_pod {
static const bool value = std::is_trivial_v<T> &&
std::is_standard_layout_v<T>;
};
template <typename T>
constexpr bool is_pod_v = is_pod<T>::value;
//-----------------------------------------------------------------------------
// NoInit
//-----------------------------------------------------------------------------
template <typename T>
struct NoInit {
//static_assert(is_pod_v<T>, "NoInit only supports POD type");
// constructor without initialization
NoInit () noexcept {}
// implicit conversion T -> NoInit<T>
constexpr NoInit (T value) noexcept : v{value} {}
// implicit conversion NoInit<T> -> T
constexpr operator T () const noexcept { return v; }
T v;
};
//-----------------------------------------------------------------------------
// Move-On-Copy
//-----------------------------------------------------------------------------
// Struct: MoveOnCopyWrapper
template <typename T>
struct MoC {
MoC(T&& rhs) : object(std::move(rhs)) {}
MoC(const MoC& other) : object(std::move(other.object)) {}
T& get() { return object; }
mutable T object;
};
template <typename T>
auto make_moc(T&& m) {
return MoC<T>(std::forward<T>(m));
}
//-----------------------------------------------------------------------------
// Visitors.
//-----------------------------------------------------------------------------
//// Overloadded.
//template <typename... Ts>
//struct Visitors : Ts... {
// using Ts::operator()... ;
//};
//
//template <typename... Ts>
//Visitors(Ts...) -> Visitors<Ts...>;
// ----------------------------------------------------------------------------
// std::variant
// ----------------------------------------------------------------------------
template <typename T, typename>
struct get_index;
template <size_t I, typename... Ts>
struct get_index_impl {};
template <size_t I, typename T, typename... Ts>
struct get_index_impl<I, T, T, Ts...> : std::integral_constant<size_t, I>{};
template <size_t I, typename T, typename U, typename... Ts>
struct get_index_impl<I, T, U, Ts...> : get_index_impl<I+1, T, Ts...>{};
template <typename T, typename... Ts>
struct get_index<T, std::variant<Ts...>> : get_index_impl<0, T, Ts...>{};
template <typename T, typename... Ts>
constexpr auto get_index_v = get_index<T, Ts...>::value;
// ----------------------------------------------------------------------------
// unwrap_reference
// ----------------------------------------------------------------------------
template <class T>
struct unwrap_reference { using type = T; };
template <class U>
struct unwrap_reference<std::reference_wrapper<U>> { using type = U&; };
template<class T>
using unwrap_reference_t = typename unwrap_reference<T>::type;
template< class T >
struct unwrap_ref_decay : unwrap_reference<std::decay_t<T>> {};
template<class T>
using unwrap_ref_decay_t = typename unwrap_ref_decay<T>::type;
// ----------------------------------------------------------------------------
// stateful iterators
// ----------------------------------------------------------------------------
// STL-styled iterator
template <typename B, typename E>
struct stateful_iterator {
using TB = std::decay_t<unwrap_ref_decay_t<B>>;
using TE = std::decay_t<unwrap_ref_decay_t<E>>;
static_assert(std::is_same_v<TB, TE>, "decayed iterator types must match");
using type = TB;
};
template <typename B, typename E>
using stateful_iterator_t = typename stateful_iterator<B, E>::type;
// raw integral index
template <typename B, typename E, typename S>
struct stateful_index {
using TB = std::decay_t<unwrap_ref_decay_t<B>>;
using TE = std::decay_t<unwrap_ref_decay_t<E>>;
using TS = std::decay_t<unwrap_ref_decay_t<S>>;
static_assert(
std::is_integral_v<TB>, "decayed beg index must be an integral type"
);
static_assert(
std::is_integral_v<TE>, "decayed end index must be an integral type"
);
static_assert(
std::is_integral_v<TS>, "decayed step must be an integral type"
);
static_assert(
std::is_same_v<TB, TE> && std::is_same_v<TE, TS>,
"decayed index and step types must match"
);
using type = TB;
};
template <typename B, typename E, typename S>
using stateful_index_t = typename stateful_index<B, E, S>::type;
// ----------------------------------------------------------------------------
// visit a tuple with a functor at runtime
// ----------------------------------------------------------------------------
template <typename Func, typename Tuple, size_t N = 0>
void visit_tuple(Func func, Tuple& tup, size_t idx) {
if (N == idx) {
std::invoke(func, std::get<N>(tup));
return;
}
if constexpr (N + 1 < std::tuple_size_v<Tuple>) {
return visit_tuple<Func, Tuple, N + 1>(func, tup, idx);
}
}
// ----------------------------------------------------------------------------
// unroll loop
// ----------------------------------------------------------------------------
// Template unrolled looping construct.
template<auto beg, auto end, auto step, bool valid = (beg < end)>
struct Unroll {
template<typename F>
static void eval(F f) {
f(beg);
Unroll<beg + step, end, step>::eval(f);
}
};
template<auto beg, auto end, auto step>
struct Unroll<beg, end, step, false> {
template<typename F>
static void eval(F) { }
};
template<auto beg, auto end, auto step, typename F>
void unroll(F f) {
Unroll<beg, end, step>::eval(f);
}
// ----------------------------------------------------------------------------
// make types of variant unique
// ----------------------------------------------------------------------------
template <typename T, typename... Ts>
struct filter_duplicates { using type = T; };
template <template <typename...> class C, typename... Ts, typename U, typename... Us>
struct filter_duplicates<C<Ts...>, U, Us...>
: std::conditional_t<(std::is_same_v<U, Ts> || ...)
, filter_duplicates<C<Ts...>, Us...>
, filter_duplicates<C<Ts..., U>, Us...>> {};
template <typename T>
struct unique_variant;
template <typename... Ts>
struct unique_variant<std::variant<Ts...>> : filter_duplicates<std::variant<>, Ts...> {};
template <typename T>
using unique_variant_t = typename unique_variant<T>::type;
} // end of namespace tf. ----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/utility/singleton.hpp | #pragma once
namespace tf {
/** @class Singleton
@brief class template to create a thread-safe singleton object
*/
template <typename T>
class Singleton {
public:
/**
@brief get a reference to the singleton object
*/
inline static T& get() {
static T instance;
return instance;
}
private:
Singleton() = default;
~Singleton() = default;
Singleton(const Singleton&)= delete;
Singleton& operator=(const Singleton&)= delete;
};
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/utility/object_pool.hpp | // 2020/03/13 - modified by Tsung-Wei Huang
// - fixed bug in aligning memory
//
// 2020/02/02 - modified by Tsung-Wei Huang
// - new implementation motivated by Hoard
//
// 2019/07/10 - modified by Tsung-Wei Huang
// - replace raw pointer with smart pointer
//
// 2019/06/13 - created by Tsung-Wei Huang
// - implemented an object pool class
#pragma once
#include <thread>
#include <atomic>
#include <mutex>
#include <vector>
#include <cassert>
#include <cstddef>
namespace tf {
#define TF_ENABLE_POOLABLE_ON_THIS \
template <typename T, size_t S> friend class ObjectPool; \
void* _object_pool_block
// Class: ObjectPool
//
// The class implements an efficient thread-safe object pool motivated
// by the Hoard memory allocator algorithm.
// Different from the normal memory allocator, object pool allocates
// only one object at a time.
//
// Internall, we use the following variables to maintain blocks and heaps:
// X: size in byte of a item slot
// M: number of items per block
// F: emptiness threshold
// B: number of bins per local heap (bin[B-1] is the full list)
// W: number of items per bin
// K: shrinkness constant
//
// Example scenario 1:
// M = 30
// F = 4
// W = (30+4-1)/4 = 8
//
// b0: 0, 1, 2, 3, 4, 5, 6, 7
// b1: 8, 9, 10, 11, 12, 13, 14, 15
// b2: 16, 17, 18, 19, 20, 21, 22, 23
// b3: 24, 25, 26, 27, 28, 29
// b4: 30 (anything equal to M)
//
// Example scenario 2:
// M = 32
// F = 4
// W = (32+4-1)/4 = 8
// b0: 0, 1, 2, 3, 4, 5, 6, 7
// b1: 8, 9, 10, 11, 12, 13, 14, 15
// b2: 16, 17, 18, 19, 20, 21, 22, 23
// b3: 24, 25, 26, 27, 28, 29, 30, 31
// b4: 32 (anything equal to M)
//
template <typename T, size_t S = 65536>
class ObjectPool {
// the data column must be sufficient to hold the pointer in freelist
constexpr static size_t X = (std::max)(sizeof(T*), sizeof(T));
//constexpr static size_t X = sizeof(long double) + std::max(sizeof(T*), sizeof(T));
//constexpr static size_t M = (S - offsetof(Block, data)) / X;
constexpr static size_t M = S / X;
constexpr static size_t F = 4;
constexpr static size_t B = F + 1;
constexpr static size_t W = (M + F - 1) / F;
constexpr static size_t K = 4;
static_assert(
S && (!(S & (S-1))), "block size S must be a power of two"
);
static_assert(
M >= 128, "block size S must be larger enough to pool at least 128 objects"
);
struct Blocklist {
Blocklist* prev;
Blocklist* next;
};
struct GlobalHeap {
std::mutex mutex;
Blocklist list;
};
struct LocalHeap {
std::mutex mutex;
Blocklist lists[B];
size_t u {0};
size_t a {0};
};
struct Block {
std::atomic<LocalHeap*> heap;
Blocklist list_node;
size_t i;
size_t u;
T* top;
// long double padding;
char data[S];
};
public:
/**
@brief constructs an object pool from a number of anticipated threads
*/
explicit ObjectPool(unsigned = std::thread::hardware_concurrency());
/**
@brief destructs the object pool
*/
~ObjectPool();
/**
@brief acquires a pointer to a object constructed from a given argument list
*/
template <typename... ArgsT>
T* animate(ArgsT&&... args);
/**
@brief recycles a object pointed by @c ptr and destroys it
*/
void recycle(T* ptr);
size_t num_bins_per_local_heap() const;
size_t num_objects_per_bin() const;
size_t num_objects_per_block() const;
size_t num_available_objects() const;
size_t num_allocated_objects() const;
size_t capacity() const;
size_t num_local_heaps() const;
size_t num_global_heaps() const;
size_t num_heaps() const;
float emptiness_threshold() const;
private:
const size_t _lheap_mask;
GlobalHeap _gheap;
std::vector<LocalHeap> _lheaps;
LocalHeap& _this_heap();
constexpr unsigned _next_pow2(unsigned n) const;
template <class P, class Q>
constexpr size_t _offset_in_class(const Q P::*member) const;
template <class P, class Q>
constexpr P* _parent_class_of(Q*, const Q P::*member);
template <class P, class Q>
constexpr P* _parent_class_of(const Q*, const Q P::*member) const;
constexpr Block* _block_of(Blocklist*);
constexpr Block* _block_of(const Blocklist*) const;
size_t _bin(size_t) const;
T* _allocate(Block*);
void _deallocate(Block*, T*);
void _blocklist_init_head(Blocklist*);
void _blocklist_add_impl(Blocklist*, Blocklist*, Blocklist*);
void _blocklist_push_front(Blocklist*, Blocklist*);
void _blocklist_push_back(Blocklist*, Blocklist*);
void _blocklist_del_impl(Blocklist*, Blocklist*);
void _blocklist_del(Blocklist*);
void _blocklist_replace(Blocklist*, Blocklist*);
void _blocklist_move_front(Blocklist*, Blocklist*);
void _blocklist_move_back(Blocklist*, Blocklist*);
bool _blocklist_is_first(const Blocklist*, const Blocklist*);
bool _blocklist_is_last(const Blocklist*, const Blocklist*);
bool _blocklist_is_empty(const Blocklist*);
bool _blocklist_is_singular(const Blocklist*);
template <typename C>
void _for_each_block_safe(Blocklist*, C&&);
template <typename C>
void _for_each_block(Blocklist*, C&&);
};
// ----------------------------------------------------------------------------
// ObjectPool definition
// ----------------------------------------------------------------------------
// Constructor
template <typename T, size_t S>
ObjectPool<T, S>::ObjectPool(unsigned t) :
//_heap_mask {(_next_pow2(t) << 1) - 1u},
//_heap_mask { _next_pow2(t<<1) - 1u },
//_heap_mask {(t << 1) - 1},
_lheap_mask { _next_pow2((t+1) << 1) - 1 },
_lheaps { _lheap_mask + 1 } {
_blocklist_init_head(&_gheap.list);
for(auto& h : _lheaps) {
for(size_t i=0; i<B; ++i) {
_blocklist_init_head(&h.lists[i]);
}
}
}
// Destructor
template <typename T, size_t S>
ObjectPool<T, S>::~ObjectPool() {
// clear local heaps
for(auto& h : _lheaps) {
for(size_t i=0; i<B; ++i) {
_for_each_block_safe(&h.lists[i], [] (Block* b) {
//std::free(b);
delete b;
});
}
}
// clear global heap
_for_each_block_safe(&_gheap.list, [] (Block* b) {
//std::free(b);
delete b;
});
}
// Function: num_bins_per_local_heap
template <typename T, size_t S>
size_t ObjectPool<T, S>::num_bins_per_local_heap() const {
return B;
}
// Function: num_objects_per_bin
template <typename T, size_t S>
size_t ObjectPool<T, S>::num_objects_per_bin() const {
return W;
}
// Function: num_objects_per_block
template <typename T, size_t S>
size_t ObjectPool<T, S>::num_objects_per_block() const {
return M;
}
// Function: emptiness_threshold
template <typename T, size_t S>
float ObjectPool<T, S>::emptiness_threshold() const {
return 1.0f/F;
}
// Function: num_global_heaps
template <typename T, size_t S>
size_t ObjectPool<T, S>::num_global_heaps() const {
return 1;
}
// Function: num_lheaps
template <typename T, size_t S>
size_t ObjectPool<T, S>::num_local_heaps() const {
return _lheaps.size();
}
// Function: num_heaps
template <typename T, size_t S>
size_t ObjectPool<T, S>::num_heaps() const {
return _lheaps.size() + 1;
}
// Function: capacity
template <typename T, size_t S>
size_t ObjectPool<T, S>::capacity() const {
size_t n = 0;
// global heap
for(auto p=_gheap.list.next; p!=&_gheap.list; p=p->next) {
n += M;
};
// local heap
for(auto& h : _lheaps) {
n += h.a;
}
return n;
}
// Function: num_available_objects
template <typename T, size_t S>
size_t ObjectPool<T, S>::num_available_objects() const {
size_t n = 0;
// global heap
for(auto p=_gheap.list.next; p!=&_gheap.list; p=p->next) {
n += (M - _block_of(p)->u);
};
// local heap
for(auto& h : _lheaps) {
n += (h.a - h.u);
}
return n;
}
// Function: num_allocated_objects
template <typename T, size_t S>
size_t ObjectPool<T, S>::num_allocated_objects() const {
size_t n = 0;
// global heap
for(auto p=_gheap.list.next; p!=&_gheap.list; p=p->next) {
n += _block_of(p)->u;
};
// local heap
for(auto& h : _lheaps) {
n += h.u;
}
return n;
}
// Function: _bin
template <typename T, size_t S>
size_t ObjectPool<T, S>::_bin(size_t u) const {
return u == M ? F : u/W;
}
// Function: _offset_in_class
template <typename T, size_t S>
template <class P, class Q>
constexpr size_t ObjectPool<T, S>::_offset_in_class(
const Q P::*member) const {
return (size_t) &( reinterpret_cast<P*>(0)->*member);
}
// C macro: parent_class_of(list_pointer, Block, list)
// C++: parent_class_of(list_pointer, &Block::list)
template <typename T, size_t S>
template <class P, class Q>
constexpr P* ObjectPool<T, S>::_parent_class_of(
Q* ptr, const Q P::*member
) {
return (P*)( (char*)ptr - _offset_in_class(member));
}
// Function: _parent_class_of
template <typename T, size_t S>
template <class P, class Q>
constexpr P* ObjectPool<T, S>::_parent_class_of(
const Q* ptr, const Q P::*member
) const {
return (P*)( (char*)ptr - _offset_in_class(member));
}
// Function: _block_of
template <typename T, size_t S>
constexpr typename ObjectPool<T, S>::Block*
ObjectPool<T, S>::_block_of(Blocklist* list) {
return _parent_class_of(list, &Block::list_node);
}
// Function: _block_of
template <typename T, size_t S>
constexpr typename ObjectPool<T, S>::Block*
ObjectPool<T, S>::_block_of(const Blocklist* list) const {
return _parent_class_of(list, &Block::list_node);
}
// Procedure: initialize a list head
template <typename T, size_t S>
void ObjectPool<T, S>::_blocklist_init_head(Blocklist *list) {
list->next = list;
list->prev = list;
}
// Procedure: _blocklist_add_impl
// Insert a new entry between two known consecutive entries.
//
// This is only for internal list manipulation where we know
// the prev/next entries already!
template <typename T, size_t S>
void ObjectPool<T, S>::_blocklist_add_impl(
Blocklist *curr, Blocklist *prev, Blocklist *next
) {
next->prev = curr;
curr->next = next;
curr->prev = prev;
prev->next = curr;
}
// list_push_front - add a new entry
// @curr: curr entry to be added
// @head: list head to add it after
//
// Insert a new entry after the specified head.
// This is good for implementing stacks.
//
template <typename T, size_t S>
void ObjectPool<T, S>::_blocklist_push_front(
Blocklist *curr, Blocklist *head
) {
_blocklist_add_impl(curr, head, head->next);
}
// list_add_tail - add a new entry
// @curr: curr entry to be added
// @head: list head to add it before
//
// Insert a new entry before the specified head.
// This is useful for implementing queues.
//
template <typename T, size_t S>
void ObjectPool<T, S>::_blocklist_push_back(
Blocklist *curr, Blocklist *head
) {
_blocklist_add_impl(curr, head->prev, head);
}
// Delete a list entry by making the prev/next entries
// point to each other.
//
// This is only for internal list manipulation where we know
// the prev/next entries already!
//
template <typename T, size_t S>
void ObjectPool<T, S>::_blocklist_del_impl(
Blocklist * prev, Blocklist * next
) {
next->prev = prev;
prev->next = next;
}
// _blocklist_del - deletes entry from list.
// @entry: the element to delete from the list.
// Note: list_empty() on entry does not return true after this, the entry is
// in an undefined state.
template <typename T, size_t S>
void ObjectPool<T, S>::_blocklist_del(Blocklist *entry) {
_blocklist_del_impl(entry->prev, entry->next);
entry->next = nullptr;
entry->prev = nullptr;
}
// list_replace - replace old entry by new one
// @old : the element to be replaced
// @curr : the new element to insert
//
// If @old was empty, it will be overwritten.
template <typename T, size_t S>
void ObjectPool<T, S>::_blocklist_replace(
Blocklist *old, Blocklist *curr
) {
curr->next = old->next;
curr->next->prev = curr;
curr->prev = old->prev;
curr->prev->next = curr;
}
// list_move - delete from one list and add as another's head
// @list: the entry to move
// @head: the head that will precede our entry
template <typename T, size_t S>
void ObjectPool<T, S>::_blocklist_move_front(
Blocklist *list, Blocklist *head
) {
_blocklist_del_impl(list->prev, list->next);
_blocklist_push_front(list, head);
}
// list_move_tail - delete from one list and add as another's tail
// @list: the entry to move
// @head: the head that will follow our entry
template <typename T, size_t S>
void ObjectPool<T, S>::_blocklist_move_back(
Blocklist *list, Blocklist *head
) {
_blocklist_del_impl(list->prev, list->next);
_blocklist_push_back(list, head);
}
// list_is_first - tests whether @list is the last entry in list @head
// @list: the entry to test
// @head: the head of the list
template <typename T, size_t S>
bool ObjectPool<T, S>::_blocklist_is_first(
const Blocklist *list, const Blocklist *head
) {
return list->prev == head;
}
// list_is_last - tests whether @list is the last entry in list @head
// @list: the entry to test
// @head: the head of the list
template <typename T, size_t S>
bool ObjectPool<T, S>::_blocklist_is_last(
const Blocklist *list, const Blocklist *head
) {
return list->next == head;
}
// list_empty - tests whether a list is empty
// @head: the list to test.
template <typename T, size_t S>
bool ObjectPool<T, S>::_blocklist_is_empty(const Blocklist *head) {
return head->next == head;
}
// list_is_singular - tests whether a list has just one entry.
// @head: the list to test.
template <typename T, size_t S>
bool ObjectPool<T, S>::_blocklist_is_singular(
const Blocklist *head
) {
return !_blocklist_is_empty(head) && (head->next == head->prev);
}
// Procedure: _for_each_block
template <typename T, size_t S>
template <typename C>
void ObjectPool<T, S>::_for_each_block(Blocklist* head, C&& c) {
Blocklist* p;
for(p=head->next; p!=head; p=p->next) {
c(_block_of(p));
}
}
// Procedure: _for_each_block_safe
// Iterate each item of a list - safe to free
template <typename T, size_t S>
template <typename C>
void ObjectPool<T, S>::_for_each_block_safe(Blocklist* head, C&& c) {
Blocklist* p;
Blocklist* t;
for(p=head->next, t=p->next; p!=head; p=t, t=p->next) {
c(_block_of(p));
}
}
// Function: _allocate
// allocate a spot from the block
template <typename T, size_t S>
T* ObjectPool<T, S>::_allocate(Block* s) {
if(s->top == nullptr) {
return reinterpret_cast<T*>(s->data + s->i++ * X);
}
else {
T* retval = s->top;
s->top = *(reinterpret_cast<T**>(s->top));
return retval;
}
}
// Procedure: _deallocate
template <typename T, size_t S>
void ObjectPool<T, S>::_deallocate(Block* s, T* ptr) {
*(reinterpret_cast<T**>(ptr)) = s->top;
s->top = ptr;
}
// Function: allocate
template <typename T, size_t S>
template <typename... ArgsT>
T* ObjectPool<T, S>::animate(ArgsT&&... args) {
//std::cout << "construct a new item\n";
// my logically mapped heap
LocalHeap& h = _this_heap();
Block* s {nullptr};
h.mutex.lock();
// scan the list of superblocks from the most full to the least full
int f = static_cast<int>(F-1);
for(; f>=0; f--) {
if(!_blocklist_is_empty(&h.lists[f])) {
s = _block_of(h.lists[f].next);
break;
}
}
// no superblock found
if(f == -1) {
// check heap 0 for a superblock
_gheap.mutex.lock();
if(!_blocklist_is_empty(&_gheap.list)) {
s = _block_of(_gheap.list.next);
//printf("get a superblock from global heap %lu\n", s->u);
assert(s->u < M && s->heap == nullptr);
f = static_cast<int>(_bin(s->u + 1));
_blocklist_move_front(&s->list_node, &h.lists[f]);
s->heap = &h; // must be within the global heap lock
_gheap.mutex.unlock();
h.u = h.u + s->u;
h.a = h.a + M;
}
// create a new block
else {
//printf("create a new superblock\n");
_gheap.mutex.unlock();
f = 0;
//s = static_cast<Block*>(std::malloc(sizeof(Block)));
s = new Block();
if(s == nullptr) {
throw std::bad_alloc();
}
s->heap = &h;
s->i = 0;
s->u = 0;
s->top = nullptr;
_blocklist_push_front(&s->list_node, &h.lists[f]);
h.a = h.a + M;
}
}
// the superblock must have at least one space
//assert(s->u < M);
//printf("%lu %lu %lu\n", h.u, h.a, s->u);
//assert(h.u < h.a);
h.u = h.u + 1;
s->u = s->u + 1;
// take one item from the superblock
T* mem = _allocate(s);
int b = static_cast<int>(_bin(s->u));
if(b != f) {
//printf("move superblock from list[%d] to list[%d]\n", f, b);
_blocklist_move_front(&s->list_node, &h.lists[b]);
}
//std::cout << "s.i " << s->i << '\n'
// << "s.u " << s->u << '\n'
// << "h.u " << h.u << '\n'
// << "h.a " << h.a << '\n';
h.mutex.unlock();
//printf("allocate %p (s=%p)\n", mem, s);
new (mem) T(std::forward<ArgsT>(args)...);
mem->_object_pool_block = s;
return mem;
}
// Function: destruct
template <typename T, size_t S>
void ObjectPool<T, S>::recycle(T* mem) {
//Block* s = *reinterpret_cast<Block**>(
// reinterpret_cast<char*>(mem) - sizeof(Block**)
//);
//Block* s= *(reinterpret_cast<Block**>(mem) - O); // (mem) - 1
Block* s = static_cast<Block*>(mem->_object_pool_block);
mem->~T();
//printf("deallocate %p (s=%p) M=%lu W=%lu X=%lu\n", mem, s, M, W, X);
// here we need a loop because when we lock the heap,
// other threads may have removed the superblock to another heap
bool sync = false;
do {
LocalHeap* h = s->heap.load(std::memory_order_relaxed);
// the block is in global heap
if(h == nullptr) {
std::lock_guard<std::mutex> glock(_gheap.mutex);
if(s->heap == h) {
sync = true;
_deallocate(s, mem);
s->u = s->u - 1;
}
}
else {
std::lock_guard<std::mutex> llock(h->mutex);
if(s->heap == h) {
sync = true;
// deallocate the item from the superblock
size_t f = _bin(s->u);
_deallocate(s, mem);
s->u = s->u - 1;
h->u = h->u - 1;
size_t b = _bin(s->u);
if(b != f) {
//printf("move superblock from list[%d] to list[%d]\n", f, b);
_blocklist_move_front(&s->list_node, &h->lists[b]);
}
// transfer a mostly-empty superblock to global heap
if((h->u + K*M < h->a) && (h->u < ((F-1) * h->a / F))) {
for(size_t i=0; i<F; i++) {
if(!_blocklist_is_empty(&h->lists[i])) {
Block* x = _block_of(h->lists[i].next);
//printf("transfer a block (x.u=%lu/x.i=%lu) to the global heap\n", x->u, x->i);
assert(h->u > x->u && h->a > M);
h->u = h->u - x->u;
h->a = h->a - M;
x->heap = nullptr;
std::lock_guard<std::mutex> glock(_gheap.mutex);
_blocklist_move_front(&x->list_node, &_gheap.list);
break;
}
}
}
}
}
} while(!sync);
//std::cout << "s.i " << s->i << '\n'
// << "s.u " << s->u << '\n';
}
// Function: _this_heap
template <typename T, size_t S>
typename ObjectPool<T, S>::LocalHeap&
ObjectPool<T, S>::_this_heap() {
// here we don't use thread local since object pool might be
// created and destroyed multiple times
//thread_local auto hv = std::hash<std::thread::id>()(std::this_thread::get_id());
//return _lheaps[hv & _lheap_mask];
return _lheaps[
std::hash<std::thread::id>()(std::this_thread::get_id()) & _lheap_mask
];
}
// Function: _next_pow2
template <typename T, size_t S>
constexpr unsigned ObjectPool<T, S>::_next_pow2(unsigned n) const {
if(n == 0) return 1;
n--;
n |= n >> 1;
n |= n >> 2;
n |= n >> 4;
n |= n >> 8;
n |= n >> 16;
n++;
return n;
}
} // end namespace tf --------------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/utility/macros.hpp | #pragma once
#if defined(_MSC_VER)
#define TF_FORCE_INLINE __forceinline
#elif defined(__GNUC__) && __GNUC__ > 3
#define TF_FORCE_INLINE __attribute__((__always_inline__)) inline
#else
#define TF_FORCE_INLINE inline
#endif
#if defined(_MSC_VER)
#define TF_NO_INLINE __declspec(noinline)
#elif defined(__GNUC__) && __GNUC__ > 3
#define TF_NO_INLINE __attribute__((__noinline__))
#else
#define TF_NO_INLINE
#endif
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/utility/stream.hpp | #pragma once
#include <iostream>
#include <string>
namespace tf {
// Procedure: ostreamize
template <typename T>
void ostreamize(std::ostream& os, T&& token) {
os << std::forward<T>(token);
}
// Procedure: ostreamize
template <typename T, typename... Rest>
void ostreamize(std::ostream& os, T&& token, Rest&&... rest) {
os << std::forward<T>(token);
ostreamize(os, std::forward<Rest>(rest)...);
}
// Function: stringify
template <typename... ArgsT>
std::string stringify(ArgsT&&... args) {
std::ostringstream oss;
ostreamize(oss, std::forward<ArgsT>(args)...);
return oss.str();
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/utility/uuid.hpp | #pragma once
#include <iostream>
#include <string>
#include <cstring>
#include <limits>
#include <random>
#include <chrono>
namespace tf {
// Class: UUID
//
// A universally unique identifier (UUID) is an identifier standard used in software
// construction. A UUID is simply a 128-bit value. The meaning of each bit is defined
// by any of several variants.
// For human-readable display, many systems use a canonical format using hexadecimal
// text with inserted hyphen characters.
//
// For example: 123e4567-e89b-12d3-a456-426655440000
//
// The intent of UUIDs is to enable distributed systems to uniquely identify information
// without significant central coordination.
//
// Copyright 2006 Andy Tompkins.
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
struct UUID {
using value_type = uint8_t;
using reference = uint8_t&;
using const_reference = const uint8_t&;
using iterator = uint8_t*;
using const_iterator = const uint8_t*;
using size_type = size_t;
using difference_type = ptrdiff_t;
inline UUID();
UUID(const UUID&) = default;
UUID(UUID&&) = default;
UUID& operator = (const UUID&) = default;
UUID& operator = (UUID&&) = default;
inline static size_type size();
inline iterator begin();
inline const_iterator begin() const;
inline iterator end();
inline const_iterator end() const;
inline bool is_nil() const;
inline void swap(UUID& rhs);
inline size_t hash_value() const;
inline bool operator == (const UUID&) const;
inline bool operator < (const UUID&) const;
inline bool operator > (const UUID&) const;
inline bool operator != (const UUID&) const;
inline bool operator >= (const UUID&) const;
inline bool operator <= (const UUID&) const;
uint8_t data[16] {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
inline std::string to_string() const;
};
// Constructor
inline UUID::UUID() {
static thread_local std::default_random_engine engine {
std::random_device{}()
};
std::uniform_int_distribution<unsigned long> distribution(
(std::numeric_limits<unsigned long>::min)(),
(std::numeric_limits<unsigned long>::max)()
);
int i = 0;
auto random_value = distribution(engine);
for (auto it=begin(); it!=end(); ++it, ++i) {
if (i == sizeof(unsigned long)) {
random_value = distribution(engine);
i = 0;
}
*it = static_cast<UUID::value_type>((random_value >> (i*8)) & 0xFF);
}
// set variant: must be 0b10xxxxxx
*(begin()+8) &= 0xBF;
*(begin()+8) |= 0x80;
// set version: must be 0b0100xxxx
*(begin()+6) &= 0x4F; //0b01001111
*(begin()+6) |= 0x40; //0b01000000
}
// Function: size
inline typename UUID::size_type UUID::size() {
return 16;
}
// Function: begin
inline typename UUID::iterator UUID::begin() {
return data;
}
// Function: begin
inline typename UUID::const_iterator UUID::begin() const {
return data;
}
// Function: end
inline typename UUID::iterator UUID::end() {
return data+size();
}
// Function: end
inline typename UUID::const_iterator UUID::end() const {
return data+size();
}
// Function: is_nil
inline bool UUID::is_nil() const {
for (std::size_t i = 0; i < sizeof(this->data); ++i) {
if (this->data[i] != 0U) {
return false;
}
}
return true;
}
// Procedure: swap
inline void UUID::swap(UUID& rhs) {
UUID tmp = *this;
*this = rhs;
rhs = tmp;
}
// Function: hash_value
inline size_t UUID::hash_value() const {
size_t seed = 0;
for(auto i=begin(); i != end(); ++i) {
seed ^= static_cast<size_t>(*i) + 0x9e3779b9 + (seed << 6) + (seed >> 2);
}
return seed;
}
// Operator: ==
inline bool UUID::operator == (const UUID& rhs) const {
return std::memcmp(data, rhs.data, sizeof(data)) == 0;
}
// Operator: !=
inline bool UUID::operator != (const UUID& rhs) const {
return std::memcmp(data, rhs.data, sizeof(data)) != 0;
}
// Operator: <
inline bool UUID::operator < (const UUID& rhs) const {
return std::memcmp(data, rhs.data, sizeof(data)) < 0;
}
// Operator: >
inline bool UUID::operator > (const UUID& rhs) const {
return std::memcmp(data, rhs.data, sizeof(data)) > 0;
}
// Operator: <=
inline bool UUID::operator <= (const UUID& rhs) const {
return std::memcmp(data, rhs.data, sizeof(data)) <= 0;
}
// Operator: >=
inline bool UUID::operator >= (const UUID& rhs) const {
return std::memcmp(data, rhs.data, sizeof(data)) >= 0;
}
// Function: to_string
inline std::string UUID::to_string() const {
auto to_char = [](size_t i) {
if (i <= 9) return static_cast<char>('0' + i);
return static_cast<char>('a' + (i-10));
};
std::string result;
result.reserve(36);
std::size_t i=0;
for (auto it = begin(); it!=end(); ++it, ++i) {
const size_t hi = ((*it) >> 4) & 0x0F;
result += to_char(hi);
const size_t lo = (*it) & 0x0F;
result += to_char(lo);
if (i == 3 || i == 5 || i == 7 || i == 9) {
result += '-';
}
}
return result;
}
// Procedure: swap
inline void swap(UUID& lhs, UUID& rhs) {
lhs.swap(rhs);
}
// ostream
inline std::ostream& operator << (std::ostream& os, const UUID& rhs) {
os << rhs.to_string();
return os;
}
} // End of namespace tf. ----------------------------------------------------
//-----------------------------------------------------------------------------
namespace std {
// Partial specialization: hash<tf::UUID>
template <>
struct hash<tf::UUID> {
size_t operator()(const tf::UUID& rhs) const { return rhs.hash_value(); }
};
} // End of namespace std. ---------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/utility/math.hpp | #pragma once
#include <atomic>
namespace tf {
// rounds the given 64-bit unsigned integer to the nearest power of 2
template <typename T, std::enable_if_t<
(std::is_unsigned_v<std::decay_t<T>> && sizeof(T) == 8) , void
>* = nullptr>
constexpr T next_pow2(T x) {
if(x == 0) return 1;
x--;
x |= x>>1;
x |= x>>2;
x |= x>>4;
x |= x>>8;
x |= x>>16;
x |= x>>32;
x++;
return x;
}
// rounds the given 32-bit unsigned integer to the nearest power of 2
template <typename T, std::enable_if_t<
(std::is_unsigned_v<std::decay_t<T>> && sizeof(T) == 4), void
>* = nullptr>
constexpr T next_pow2(T x) {
if(x == 0) return 1;
x--;
x |= x>>1;
x |= x>>2;
x |= x>>4;
x |= x>>8;
x |= x>>16;
x++;
return x;
}
// checks if the given number if a power of 2
template <typename T, std::enable_if_t<
std::is_integral_v<std::decay_t<T>>, void>* = nullptr
>
constexpr bool is_pow2(const T& x) {
return x && (!(x&(x-1)));
}
//// finds the ceil of x divided by b
//template <typename T, std::enable_if_t<
// std::is_integral_v<std::decay_t<T>>, void>* = nullptr
//>
//constexpr T ceil(const T& x, const T& y) {
// //return (x + y - 1) / y;
// return (x-1) / y + 1;
//}
/**
@brief returns floor(log2(n)), assumes n > 0
*/
template<typename T>
constexpr int log2(T n) {
int log = 0;
while (n >>= 1) {
++log;
}
return log;
}
/**
@brief finds the median of three numbers of dereferenced iterators using
the given comparator
*/
template <typename RandItr, typename C>
RandItr median_of_three(RandItr l, RandItr m, RandItr r, C cmp) {
return cmp(*l, *m) ? (cmp(*m, *r) ? m : (cmp(*l, *r) ? r : l ))
: (cmp(*r, *m) ? m : (cmp(*r, *l) ? r : l ));
}
/**
@brief finds the pseudo median of a range of items using spreaded
nine numbers
*/
template <typename RandItr, typename C>
RandItr pseudo_median_of_nine(RandItr beg, RandItr end, C cmp) {
size_t N = std::distance(beg, end);
size_t offset = N >> 3;
return median_of_three(
median_of_three(beg, beg+offset, beg+(offset*2), cmp),
median_of_three(beg+(offset*3), beg+(offset*4), beg+(offset*5), cmp),
median_of_three(beg+(offset*6), beg+(offset*7), end-1, cmp),
cmp
);
}
/**
@brief sorts two elements of dereferenced iterators using the given
comparison function
*/
template<typename Iter, typename Compare>
void sort2(Iter a, Iter b, Compare comp) {
if (comp(*b, *a)) std::iter_swap(a, b);
}
/**
@brief sorts three elements of dereferenced iterators using the given
comparison function
*/
template<typename Iter, typename Compare>
void sort3(Iter a, Iter b, Iter c, Compare comp) {
sort2(a, b, comp);
sort2(b, c, comp);
sort2(a, b, comp);
}
/**
@brief generates a program-wise unique id of the give type (thread-safe)
*/
template <typename T, std::enable_if_t<std::is_integral_v<T>, void>* = nullptr>
T unique_id() {
static std::atomic<T> counter{0};
return counter.fetch_add(1, std::memory_order_relaxed);
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/utility/iterator.hpp | #pragma once
#include <cstddef>
#include <type_traits>
namespace tf {
template <typename T>
constexpr std::enable_if_t<std::is_integral<std::decay_t<T>>::value, size_t>
distance(T beg, T end, T step) {
return (end - beg + step + (step > 0 ? -1 : 1)) / step;
}
template <typename T>
constexpr std::enable_if_t<std::is_integral<std::decay_t<T>>::value, bool>
is_range_invalid(T beg, T end, T step) {
return ((step == 0 && beg != end) ||
(beg < end && step <= 0) ||
(beg > end && step >= 0));
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/utility/serializer.hpp | #pragma once
#include <type_traits>
#include <iterator>
#include <iostream>
#include <fstream>
#include <stack>
#include <queue>
#include <vector>
#include <algorithm>
#include <memory>
#include <functional>
#include <map>
#include <set>
#include <unordered_map>
#include <unordered_set>
#include <sstream>
#include <list>
#include <forward_list>
#include <numeric>
#include <iomanip>
#include <cassert>
#include <cmath>
#include <array>
#include <string>
#include <variant>
#include <optional>
namespace tf {
// ----------------------------------------------------------------------------
// Supported C++ STL type
// ----------------------------------------------------------------------------
// std::basic_string
template <typename T>
struct is_std_basic_string : std::false_type {};
template <typename... ArgsT>
struct is_std_basic_string <std::basic_string<ArgsT...>> : std::true_type {};
template <typename T>
constexpr bool is_std_basic_string_v = is_std_basic_string<T>::value;
// std::array
template <typename T>
struct is_std_array : std::false_type {};
template <typename T, size_t N>
struct is_std_array <std::array<T, N>> : std::true_type {};
template <typename T>
constexpr bool is_std_array_v = is_std_array<T>::value;
// std::vector
template <typename T>
struct is_std_vector : std::false_type {};
template <typename... ArgsT>
struct is_std_vector <std::vector<ArgsT...>> : std::true_type {};
template <typename T>
constexpr bool is_std_vector_v = is_std_vector<T>::value;
// std::deque
template <typename T>
struct is_std_deque : std::false_type {};
template <typename... ArgsT>
struct is_std_deque <std::deque<ArgsT...>> : std::true_type {};
template <typename T>
constexpr bool is_std_deque_v = is_std_deque<T>::value;
// std::list
template <typename T>
struct is_std_list : std::false_type {};
template <typename... ArgsT>
struct is_std_list <std::list<ArgsT...>> : std::true_type {};
template <typename T>
constexpr bool is_std_list_v = is_std_list<T>::value;
// std::forward_list
template <typename T>
struct is_std_forward_list : std::false_type {};
template <typename... ArgsT>
struct is_std_forward_list <std::forward_list<ArgsT...>> : std::true_type {};
template <typename T>
constexpr bool is_std_forward_list_v = is_std_forward_list<T>::value;
// std::map
template <typename T>
struct is_std_map : std::false_type {};
template <typename... ArgsT>
struct is_std_map <std::map<ArgsT...>> : std::true_type {};
template <typename T>
constexpr bool is_std_map_v = is_std_map<T>::value;
// std::unordered_map
template <typename T>
struct is_std_unordered_map : std::false_type {};
template <typename... ArgsT>
struct is_std_unordered_map <std::unordered_map<ArgsT...>> : std::true_type {};
template <typename T>
constexpr bool is_std_unordered_map_v = is_std_unordered_map<T>::value;
// std::set
template <typename T>
struct is_std_set : std::false_type {};
template <typename... ArgsT>
struct is_std_set <std::set<ArgsT...>> : std::true_type {};
template <typename T>
constexpr bool is_std_set_v = is_std_set<T>::value;
// std::unordered_set
template <typename T>
struct is_std_unordered_set : std::false_type {};
template <typename... ArgsT>
struct is_std_unordered_set <std::unordered_set<ArgsT...>> : std::true_type {};
template <typename T>
constexpr bool is_std_unordered_set_v = is_std_unordered_set<T>::value;
// std::variant
template <typename T>
struct is_std_variant : std::false_type {};
template <typename... ArgsT>
struct is_std_variant <std::variant<ArgsT...>> : std::true_type {};
template <typename T>
constexpr bool is_std_variant_v = is_std_variant<T>::value;
// std::optional
template <typename T>
struct is_std_optional : std::false_type {};
template <typename... ArgsT>
struct is_std_optional <std::optional<ArgsT...>> : std::true_type {};
template <typename T>
constexpr bool is_std_optional_v = is_std_optional<T>::value;
// std::unique_ptr
template <typename T>
struct is_std_unique_ptr : std::false_type {};
template <typename... ArgsT>
struct is_std_unique_ptr <std::unique_ptr<ArgsT...>> : std::true_type {};
template <typename T>
constexpr bool is_std_unique_ptr_v = is_std_unique_ptr<T>::value;
// std::shared_ptr
template <typename T>
struct is_std_shared_ptr : std::false_type {};
template <typename... ArgsT>
struct is_std_shared_ptr <std::shared_ptr<ArgsT...>> : std::true_type {};
template <typename T>
constexpr bool is_std_shared_ptr_v = is_std_shared_ptr<T>::value;
// std::duration
template <typename T> struct is_std_duration : std::false_type {};
template <typename... ArgsT>
struct is_std_duration<std::chrono::duration<ArgsT...>> : std::true_type {};
template <typename T>
constexpr bool is_std_duration_v = is_std_duration<T>::value;
// std::time_point
template <typename T>
struct is_std_time_point : std::false_type {};
template <typename... ArgsT>
struct is_std_time_point<std::chrono::time_point<ArgsT...>> : std::true_type {};
template <typename T>
constexpr bool is_std_time_point_v = is_std_time_point<T>::value;
// std::tuple
template <typename T>
struct is_std_tuple : std::false_type {};
template <typename... ArgsT>
struct is_std_tuple<std::tuple<ArgsT...>> : std::true_type {};
template <typename T>
constexpr bool is_std_tuple_v = is_std_tuple<T>::value;
//-----------------------------------------------------------------------------
// Type extraction.
//-----------------------------------------------------------------------------
// ExtractType: forward declaration
template <size_t, typename>
struct ExtractType;
// ExtractType_t: alias interface
template <size_t idx, typename C>
using ExtractType_t = typename ExtractType<idx, C>::type;
// ExtractType: base
template <template <typename...> typename C, typename T, typename... RestT>
struct ExtractType <0, C<T, RestT...>> {
using type = T;
};
// ExtractType: base
template <typename T>
struct ExtractType <0, T> {
using type = T;
};
// ExtractType: recursive definition.
template <size_t idx, template <typename...> typename C, typename T, typename... RestT>
struct ExtractType <idx, C<T, RestT...>> : ExtractType<idx-1, C<RestT...>> {
};
// ----------------------------------------------------------------------------
// Size Wrapper
// ----------------------------------------------------------------------------
// Struct: SizeTag
// Class that wraps a given size item which can be customized.
template <typename T>
class SizeTag {
public:
using type = std::conditional_t<std::is_lvalue_reference_v<T>, T, std::decay_t<T>>;
SizeTag(T&& item) : _item(std::forward<T>(item)) {}
SizeTag& operator = (const SizeTag&) = delete;
inline const T& get() const {return _item;}
template <typename ArchiverT>
auto save(ArchiverT & ar) const { return ar(_item); }
template <typename ArchiverT>
auto load(ArchiverT & ar) { return ar(_item); }
private:
type _item;
};
// Function: make_size_tag
template <typename T>
SizeTag<T> make_size_tag(T&& t) {
return { std::forward<T>(t) };
}
// ----------------------------------------------------------------------------
// Size Wrapper
// ----------------------------------------------------------------------------
// Class: MapItem
template <typename KeyT, typename ValueT>
class MapItem {
public:
using KeyType = std::conditional_t <std::is_lvalue_reference_v<KeyT>, KeyT, std::decay_t<KeyT>>;
using ValueType = std::conditional_t <std::is_lvalue_reference_v<ValueT>, ValueT, std::decay_t<ValueT>>;
MapItem(KeyT&& k, ValueT&& v) : _key(std::forward<KeyT>(k)), _value(std::forward<ValueT>(v)) {}
MapItem& operator = (const MapItem&) = delete;
inline const KeyT& key() const { return _key; }
inline const ValueT& value() const { return _value; }
template <typename ArchiverT>
auto save(ArchiverT & ar) const { return ar(_key, _value); }
template <typename ArchiverT>
auto load(ArchiverT & ar) { return ar(_key, _value); }
private:
KeyType _key;
ValueType _value;
};
// Function: make_kv_pair
template <typename KeyT, typename ValueT>
MapItem<KeyT, ValueT> make_kv_pair(KeyT&& k, ValueT&& v) {
return { std::forward<KeyT>(k), std::forward<ValueT>(v) };
}
// ----------------------------------------------------------------------------
// Serializer Definition
// ----------------------------------------------------------------------------
template <typename T>
constexpr auto is_default_serializable_v = (
std::is_arithmetic_v<T> ||
std::is_enum_v<T> ||
is_std_basic_string_v<T> ||
is_std_vector_v<T> ||
is_std_deque_v<T> ||
is_std_list_v<T> ||
is_std_forward_list_v<T> ||
is_std_map_v<T> ||
is_std_unordered_map_v<T> ||
is_std_set_v<T> ||
is_std_unordered_set_v<T> ||
is_std_duration_v<T> ||
is_std_time_point_v<T> ||
is_std_variant_v<T> ||
is_std_optional_v<T> ||
is_std_tuple_v<T> ||
is_std_array_v<T>
);
// Class: Serializer
template <typename Stream, typename SizeType = std::streamsize>
class Serializer {
public:
Serializer(Stream& stream);
template <typename... T>
SizeType operator()(T&&... items);
private:
Stream& _stream;
template <typename T,
std::enable_if_t<!is_default_serializable_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _save(T&&);
template <typename T,
std::enable_if_t<std::is_arithmetic_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _save(T&&);
template <typename T,
std::enable_if_t<is_std_basic_string_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _save(T&&);
template <typename T,
std::enable_if_t<is_std_vector_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _save(T&&);
template <typename T,
std::enable_if_t<
is_std_deque_v<std::decay_t<T>> ||
is_std_list_v<std::decay_t<T>>,
void
>* = nullptr
>
SizeType _save(T&&);
template <typename T,
std::enable_if_t<
is_std_forward_list_v<std::decay_t<T>>,
void
>* = nullptr
>
SizeType _save(T&&);
template <typename T,
std::enable_if_t<
is_std_map_v<std::decay_t<T>> ||
is_std_unordered_map_v<std::decay_t<T>>,
void
>* = nullptr
>
SizeType _save(T&&);
template <typename T,
std::enable_if_t<
is_std_set_v<std::decay_t<T>> ||
is_std_unordered_set_v<std::decay_t<T>>,
void
>* = nullptr
>
SizeType _save(T&&);
template <typename T,
std::enable_if_t<std::is_enum_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _save(T&&);
template <typename T,
std::enable_if_t<is_std_duration_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _save(T&&);
template <typename T,
std::enable_if_t<is_std_time_point_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _save(T&&);
template <typename T,
std::enable_if_t<is_std_optional_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _save(T&&);
template <typename T,
std::enable_if_t<is_std_variant_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _save(T&&);
template <typename T,
std::enable_if_t<is_std_tuple_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _save(T&&);
template <typename T,
std::enable_if_t<is_std_array_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _save(T&&);
};
// Constructor
template <typename Stream, typename SizeType>
Serializer<Stream, SizeType>::Serializer(Stream& stream) : _stream(stream) {
}
// Operator ()
template <typename Stream, typename SizeType>
template <typename... T>
SizeType Serializer<Stream, SizeType>::operator() (T&&... items) {
return (_save(std::forward<T>(items)) + ...);
}
// arithmetic data type
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<std::is_arithmetic_v<std::decay_t<T>>, void>*
>
SizeType Serializer<Stream, SizeType>::_save(T&& t) {
_stream.write(reinterpret_cast<const char*>(std::addressof(t)), sizeof(t));
return sizeof(t);
}
// std::basic_string
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<is_std_basic_string_v<std::decay_t<T>>, void>*
>
SizeType Serializer<Stream, SizeType>::_save(T&& t) {
using U = std::decay_t<T>;
auto sz = _save(make_size_tag(t.size()));
_stream.write(
reinterpret_cast<const char*>(t.data()),
t.size()*sizeof(typename U::value_type)
);
return sz + t.size()*sizeof(typename U::value_type);
}
// std::vector
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<is_std_vector_v<std::decay_t<T>>, void>*
>
SizeType Serializer<Stream, SizeType>::_save(T&& t) {
using U = std::decay_t<T>;
auto sz = _save(make_size_tag(t.size()));
if constexpr (std::is_arithmetic_v<typename U::value_type>) {
_stream.write(
reinterpret_cast<const char*>(t.data()),
t.size() * sizeof(typename U::value_type)
);
sz += t.size() * sizeof(typename U::value_type);
} else {
for(auto&& item : t) {
sz += _save(item);
}
}
return sz;
}
// std::list and std::deque
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<is_std_deque_v<std::decay_t<T>> ||
is_std_list_v<std::decay_t<T>>, void>*
>
SizeType Serializer<Stream, SizeType>::_save(T&& t) {
auto sz = _save(make_size_tag(t.size()));
for(auto&& item : t) {
sz += _save(item);
}
return sz;
}
// std::forward_list
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<is_std_forward_list_v<std::decay_t<T>>, void>*
>
SizeType Serializer<Stream, SizeType>::_save(T&& t) {
auto sz = _save(make_size_tag(std::distance(t.begin(), t.end())));
for(auto&& item : t) {
sz += _save(item);
}
return sz;
}
// std::map and std::unordered_map
template <typename Stream, typename SizeType>
template <typename T, std::enable_if_t<
is_std_map_v<std::decay_t<T>> ||
is_std_unordered_map_v<std::decay_t<T>>,
void
>*>
SizeType Serializer<Stream, SizeType>::_save(T&& t) {
auto sz = _save(make_size_tag(t.size()));
for(auto&& [k, v] : t) {
sz += _save(make_kv_pair(k, v));
}
return sz;
}
// std::set and std::unordered_set
template <typename Stream, typename SizeType>
template <typename T, std::enable_if_t<
is_std_set_v<std::decay_t<T>> ||
is_std_unordered_set_v<std::decay_t<T>>,
void
>*>
SizeType Serializer<Stream, SizeType>::_save(T&& t) {
auto sz = _save(make_size_tag(t.size()));
for(auto&& item : t) {
sz += _save(item);
}
return sz;
}
// enum data type
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<std::is_enum_v<std::decay_t<T>>, void>*
>
SizeType Serializer<Stream, SizeType>::_save(T&& t) {
using U = std::decay_t<T>;
return _save(static_cast<std::underlying_type_t<U>>(t));
}
// duration data type
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<is_std_duration_v<std::decay_t<T>>, void>*
>
SizeType Serializer<Stream, SizeType>::_save(T&& t) {
return _save(t.count());
}
// time point data type
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<is_std_time_point_v<std::decay_t<T>>, void>*
>
SizeType Serializer<Stream, SizeType>::_save(T&& t) {
return _save(t.time_since_epoch());
}
// optional data type
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<is_std_optional_v<std::decay_t<T>>, void>*
>
SizeType Serializer<Stream, SizeType>::_save(T&& t) {
if(bool flag = t.has_value(); flag) {
return _save(flag) + _save(*t);
}
else {
return _save(flag);
}
}
// variant type
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<is_std_variant_v<std::decay_t<T>>, void>*
>
SizeType Serializer<Stream, SizeType>::_save(T&& t) {
return _save(t.index()) +
std::visit([&] (auto&& arg){ return _save(arg);}, t);
}
// tuple type
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<is_std_tuple_v<std::decay_t<T>>, void>*
>
SizeType Serializer<Stream, SizeType>::_save(T&& t) {
return std::apply(
[&] (auto&&... args) {
return (_save(std::forward<decltype(args)>(args)) + ... + 0);
},
std::forward<T>(t)
);
}
// array
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<is_std_array_v<std::decay_t<T>>, void>*
>
SizeType Serializer<Stream, SizeType>::_save(T&& t) {
using U = std::decay_t<T>;
static_assert(std::tuple_size<U>::value > 0, "Array size can't be zero");
SizeType sz;
if constexpr(std::is_arithmetic_v<typename U::value_type>) {
_stream.write(reinterpret_cast<const char*>(t.data()), sizeof(t));
sz = sizeof(t);
}
else {
sz = 0;
for(auto&& item : t) {
sz += _save(item);
}
}
return sz;
}
// custom save method
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<!is_default_serializable_v<std::decay_t<T>>, void>*
>
SizeType Serializer<Stream, SizeType>::_save(T&& t) {
return t.save(*this);
}
// ----------------------------------------------------------------------------
// DeSerializer Definition
// ----------------------------------------------------------------------------
template <typename T>
constexpr auto is_default_deserializable_v =
std::is_arithmetic_v<T> ||
std::is_enum_v<T> ||
is_std_basic_string_v<T> ||
is_std_vector_v<T> ||
is_std_deque_v<T> ||
is_std_list_v<T> ||
is_std_forward_list_v<T> ||
is_std_map_v<T> ||
is_std_unordered_map_v<T> ||
is_std_set_v<T> ||
is_std_unordered_set_v<T> ||
is_std_duration_v<T> ||
is_std_time_point_v<T> ||
is_std_variant_v<T> ||
is_std_optional_v<T> ||
is_std_tuple_v<T> ||
is_std_array_v<T>;
// Class: Deserializer
template <typename Stream, typename SizeType = std::streamsize>
class Deserializer {
public:
Deserializer(Stream& stream);
template <typename... T>
SizeType operator()(T&&... items);
private:
Stream& _stream;
// Function: _variant_helper
template <
size_t I = 0, typename... ArgsT,
std::enable_if_t<I==sizeof...(ArgsT)>* = nullptr
>
SizeType _variant_helper(size_t, std::variant<ArgsT...>&);
// Function: _variant_helper
template <
size_t I = 0, typename... ArgsT,
std::enable_if_t<I<sizeof...(ArgsT)>* = nullptr
>
SizeType _variant_helper(size_t, std::variant<ArgsT...>&);
template <typename T,
std::enable_if_t<std::is_arithmetic_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _load(T&&);
template <typename T,
std::enable_if_t<is_std_basic_string_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _load(T&&);
template <typename T,
std::enable_if_t<is_std_vector_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _load(T&&);
template <typename T,
std::enable_if_t<
is_std_deque_v<std::decay_t<T>> ||
is_std_list_v<std::decay_t<T>> ||
is_std_forward_list_v<std::decay_t<T>>,
void
>* = nullptr
>
SizeType _load(T&&);
template <typename T,
std::enable_if_t<is_std_map_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _load(T&&);
template <typename T,
std::enable_if_t<is_std_unordered_map_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _load(T&&);
template <typename T,
std::enable_if_t<is_std_set_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _load(T&&);
template <typename T,
std::enable_if_t<is_std_unordered_set_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _load(T&&);
template <typename T,
std::enable_if_t<std::is_enum_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _load(T&&);
template <typename T,
std::enable_if_t<is_std_duration_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _load(T&&);
template <typename T,
std::enable_if_t<is_std_time_point_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _load(T&&);
template <typename T,
std::enable_if_t<is_std_optional_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _load(T&&);
template <typename T,
std::enable_if_t<is_std_variant_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _load(T&&);
template <typename T,
std::enable_if_t<is_std_tuple_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _load(T&&);
template <typename T,
std::enable_if_t<is_std_array_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _load(T&&);
template <typename T,
std::enable_if_t<!is_default_deserializable_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _load(T&&);
};
// Constructor
template <typename Stream, typename SizeType>
Deserializer<Stream, SizeType>::Deserializer(Stream& stream) : _stream(stream) {
}
// Operator ()
template <typename Stream, typename SizeType>
template <typename... T>
SizeType Deserializer<Stream, SizeType>::operator() (T&&... items) {
return (_load(std::forward<T>(items)) + ...);
}
// Function: _variant_helper
template <typename Stream, typename SizeType>
template <size_t I, typename... ArgsT, std::enable_if_t<I==sizeof...(ArgsT)>*>
SizeType Deserializer<Stream, SizeType>::_variant_helper(size_t, std::variant<ArgsT...>&) {
return 0;
}
// Function: _variant_helper
template <typename Stream, typename SizeType>
template <size_t I, typename... ArgsT, std::enable_if_t<I<sizeof...(ArgsT)>*>
SizeType Deserializer<Stream, SizeType>::_variant_helper(size_t i, std::variant<ArgsT...>& v) {
if(i == 0) {
using type = ExtractType_t<I, std::variant<ArgsT...>>;
if(v.index() != I) {
static_assert(
std::is_default_constructible<type>::value,
"Failed to archive variant (type should be default constructible T())"
);
v = type();
}
return _load(*std::get_if<type>(&v));
}
return _variant_helper<I+1, ArgsT...>(i-1, v);
}
// arithmetic data type
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<std::is_arithmetic_v<std::decay_t<T>>, void>*
>
SizeType Deserializer<Stream, SizeType>::_load(T&& t) {
_stream.read(reinterpret_cast<char*>(std::addressof(t)), sizeof(t));
return sizeof(t);
}
// std::basic_string
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<is_std_basic_string_v<std::decay_t<T>>, void>*
>
SizeType Deserializer<Stream, SizeType>::_load(T&& t) {
using U = std::decay_t<T>;
typename U::size_type num_chars;
auto sz = _load(make_size_tag(num_chars));
t.resize(num_chars);
_stream.read(reinterpret_cast<char*>(t.data()), num_chars*sizeof(typename U::value_type));
return sz + num_chars*sizeof(typename U::value_type);
}
// std::vector
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<is_std_vector_v<std::decay_t<T>>, void>*
>
SizeType Deserializer<Stream, SizeType>::_load(T&& t) {
using U = std::decay_t<T>;
typename U::size_type num_data;
auto sz = _load(make_size_tag(num_data));
if constexpr(std::is_arithmetic_v<typename U::value_type>) {
t.resize(num_data);
_stream.read(reinterpret_cast<char*>(t.data()), num_data * sizeof(typename U::value_type));
sz += num_data * sizeof(typename U::value_type);
}
else {
t.resize(num_data);
for(auto && v : t) {
sz += _load(v);
}
}
return sz;
}
// std::list and std::deque
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<is_std_deque_v<std::decay_t<T>> ||
is_std_list_v<std::decay_t<T>> ||
is_std_forward_list_v<std::decay_t<T>>, void>*
>
SizeType Deserializer<Stream, SizeType>::_load(T&& t) {
using U = std::decay_t<T>;
typename U::size_type num_data;
auto sz = _load(make_size_tag(num_data));
t.resize(num_data);
for(auto && v : t) {
sz += _load(v);
}
return sz;
}
// std::map
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<is_std_map_v<std::decay_t<T>>, void>*
>
SizeType Deserializer<Stream, SizeType>::_load(T&& t) {
using U = std::decay_t<T>;
typename U::size_type num_data;
auto sz = _load(make_size_tag(num_data));
t.clear();
auto hint = t.begin();
typename U::key_type k;
typename U::mapped_type v;
for(size_t i=0; i<num_data; ++i) {
sz += _load(make_kv_pair(k, v));
hint = t.emplace_hint(hint, std::move(k), std::move(v));
}
return sz;
}
// std::unordered_map
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<is_std_unordered_map_v<std::decay_t<T>>, void>*
>
SizeType Deserializer<Stream, SizeType>::_load(T&& t) {
using U = std::decay_t<T>;
typename U::size_type num_data;
auto sz = _load(make_size_tag(num_data));
t.clear();
t.reserve(num_data);
typename U::key_type k;
typename U::mapped_type v;
for(size_t i=0; i<num_data; ++i) {
sz += _load(make_kv_pair(k, v));
t.emplace(std::move(k), std::move(v));
}
return sz;
}
// std::set
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<is_std_set_v<std::decay_t<T>>, void>*
>
SizeType Deserializer<Stream, SizeType>::_load(T&& t) {
using U = std::decay_t<T>;
typename U::size_type num_data;
auto sz = _load(make_size_tag(num_data));
t.clear();
auto hint = t.begin();
typename U::key_type k;
for(size_t i=0; i<num_data; ++i) {
sz += _load(k);
hint = t.emplace_hint(hint, std::move(k));
}
return sz;
}
// std::unordered_set
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<is_std_unordered_set_v<std::decay_t<T>>, void>*
>
SizeType Deserializer<Stream, SizeType>::_load(T&& t) {
using U = std::decay_t<T>;
typename U::size_type num_data;
auto sz = _load(make_size_tag(num_data));
t.clear();
t.reserve(num_data);
typename U::key_type k;
for(size_t i=0; i<num_data; ++i) {
sz += _load(k);
t.emplace(std::move(k));
}
return sz;
}
// enum data type
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<std::is_enum_v<std::decay_t<T>>, void>*
>
SizeType Deserializer<Stream, SizeType>::_load(T&& t) {
using U = std::decay_t<T>;
std::underlying_type_t<U> k;
auto sz = _load(k);
t = static_cast<U>(k);
return sz;
}
// duration data type
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<is_std_duration_v<std::decay_t<T>>, void>*
>
SizeType Deserializer<Stream, SizeType>::_load(T&& t) {
using U = std::decay_t<T>;
typename U::rep count;
auto s = _load(count);
t = U{count};
return s;
}
// time point data type
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<is_std_time_point_v<std::decay_t<T>>, void>*
>
SizeType Deserializer<Stream, SizeType>::_load(T&& t) {
using U = std::decay_t<T>;
typename U::duration elapsed;
auto s = _load(elapsed);
t = U{elapsed};
return s;
}
// optional data type
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<is_std_optional_v<std::decay_t<T>>, void>*
>
SizeType Deserializer<Stream, SizeType>::_load(T&& t) {
using U = std::decay_t<T>;
bool has_value;
auto s = _load(has_value);
if(has_value) {
if(!t) {
t = typename U::value_type();
}
s += _load(*t);
}
else {
t.reset();
}
return s;
}
// variant type
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<is_std_variant_v<std::decay_t<T>>, void>*
>
SizeType Deserializer<Stream, SizeType>::_load(T&& t) {
std::decay_t<decltype(t.index())> idx;
auto s = _load(idx);
return s + _variant_helper(idx, t);
}
// tuple type
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<is_std_tuple_v<std::decay_t<T>>, void>*
>
SizeType Deserializer<Stream, SizeType>::_load(T&& t) {
return std::apply(
[&] (auto&&... args) {
return (_load(std::forward<decltype(args)>(args)) + ... + 0);
},
std::forward<T>(t)
);
}
// array
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<is_std_array_v<std::decay_t<T>>, void>*
>
SizeType Deserializer<Stream, SizeType>::_load(T&& t) {
using U = std::decay_t<T>;
static_assert(std::tuple_size<U>::value > 0, "Array size can't be zero");
SizeType sz;
if constexpr(std::is_arithmetic_v<typename U::value_type>) {
_stream.read(reinterpret_cast<char*>(t.data()), sizeof(t));
sz = sizeof(t);
}
else {
sz = 0;
for(auto && v : t) {
sz += _load(v);
}
}
return sz;
}
// custom save method
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<!is_default_deserializable_v<std::decay_t<T>>, void>*
>
SizeType Deserializer<Stream, SizeType>::_load(T&& t) {
return t.load(*this);
}
} // ned of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/utility/os.hpp | #pragma once
#include <cstdlib>
#include <cstdio>
#include <string>
#define TF_OS_LINUX 0
#define TF_OS_DRAGONFLY 0
#define TF_OS_FREEBSD 0
#define TF_OS_NETBSD 0
#define TF_OS_OPENBSD 0
#define TF_OS_DARWIN 0
#define TF_OS_WINDOWS 0
#define TF_OS_CNK 0
#define TF_OS_HURD 0
#define TF_OS_SOLARIS 0
#define TF_OS_UNIX 0
#ifdef _WIN32
#undef TF_OS_WINDOWS
#define TF_OS_WINDOWS 1
#endif
#ifdef __CYGWIN__
#undef TF_OS_WINDOWS
#define TF_OS_WINDOWS 1
#endif
#if (defined __APPLE__ && defined __MACH__)
#undef TF_OS_DARWIN
#define TF_OS_DARWIN 1
#endif
// in some ppc64 linux installations, only the second condition is met
#if (defined __linux)
#undef TF_OS_LINUX
#define TF_OS_LINUX 1
#elif (defined __linux__)
#undef TF_OS_LINUX
#define TF_OS_LINUX 1
#else
#endif
#if (defined __DragonFly__)
#undef TF_OS_DRAGONFLY
#define TF_OS_DRAGONFLY 1
#endif
#if (defined __FreeBSD__)
#undef TF_OS_FREEBSD
#define TF_OS_FREEBSD 1
#endif
#if (defined __NetBSD__)
#undef TF_OS_NETBSD
#define TF_OS_NETBSD 1
#endif
#if (defined __OpenBSD__)
#undef TF_OS_OPENBSD
#define TF_OS_OPENBSD 1
#endif
#if (defined __bgq__)
#undef TF_OS_CNK
#define TF_OS_CNK 1
#endif
#if (defined __GNU__)
#undef TF_OS_HURD
#define TF_OS_HURD 1
#endif
#if (defined __sun)
#undef TF_OS_SOLARIS
#define TF_OS_SOLARIS 1
#endif
#if (1 != \
TF_OS_LINUX + TF_OS_DRAGONFLY + TF_OS_FREEBSD + TF_OS_NETBSD + \
TF_OS_OPENBSD + TF_OS_DARWIN + TF_OS_WINDOWS + TF_OS_HURD + \
TF_OS_SOLARIS)
#define TF_OS_UNKNOWN 1
#endif
#if TF_OS_LINUX || TF_OS_DRAGONFLY || TF_OS_FREEBSD || TF_OS_NETBSD || \
TF_OS_OPENBSD || TF_OS_DARWIN || TF_OS_HURD || TF_OS_SOLARIS
#undef TF_OS_UNIX
#define TF_OS_UNIX 1
#endif
//-----------------------------------------------------------------------------
// Cache line alignment
//-----------------------------------------------------------------------------
#if defined(__i386__) || defined(__x86_64__)
#define TF_CACHELINE_SIZE 64
#elif defined(__powerpc64__)
// TODO
// This is the L1 D-cache line size of our Power7 machines.
// Need to check if this is appropriate for other PowerPC64 systems.
#define TF_CACHELINE_SIZE 128
#elif defined(__arm__)
// Cache line sizes for ARM: These values are not strictly correct since
// cache line sizes depend on implementations, not architectures.
// There are even implementations with cache line sizes configurable
// at boot time.
#if defined(__ARM_ARCH_5T__)
#define TF_CACHELINE_SIZE 32
#elif defined(__ARM_ARCH_7A__)
#define TF_CACHELINE_SIZE 64
#endif
#endif
#ifndef TF_CACHELINE_SIZE
// A reasonable default guess. Note that overestimates tend to waste more
// space, while underestimates tend to waste more time.
#define TF_CACHELINE_SIZE 64
#endif
//-----------------------------------------------------------------------------
// pause
//-----------------------------------------------------------------------------
//#if __has_include (<immintrin.h>)
// #define TF_HAS_MM_PAUSE 1
// #include <immintrin.h>
//#endif
namespace tf {
// Struct: CachelineAligned
// Due to prefetch, we typically do 2x cacheline for the alignment.
template <typename T>
struct CachelineAligned {
alignas (2*TF_CACHELINE_SIZE) T data;
};
// Function: get_env
inline std::string get_env(const std::string& str) {
#ifdef _MSC_VER
char *ptr = nullptr;
size_t len = 0;
if(_dupenv_s(&ptr, &len, str.c_str()) == 0 && ptr != nullptr) {
std::string res(ptr, len);
std::free(ptr);
return res;
}
return "";
#else
auto ptr = std::getenv(str.c_str());
return ptr ? ptr : "";
#endif
}
// Function: has_env
inline bool has_env(const std::string& str) {
#ifdef _MSC_VER
char *ptr = nullptr;
size_t len = 0;
if(_dupenv_s(&ptr, &len, str.c_str()) == 0 && ptr != nullptr) {
std::string res(ptr, len);
std::free(ptr);
return true;
}
return false;
#else
auto ptr = std::getenv(str.c_str());
return ptr ? true : false;
#endif
}
// Procedure: relax_cpu
//inline void relax_cpu() {
//#ifdef TF_HAS_MM_PAUSE
// _mm_pause();
//#endif
//}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/utility/small_vector.hpp | // small vector modified from llvm
#pragma once
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdlib>
#include <cstring>
#include <initializer_list>
#include <iterator>
#include <memory>
#if defined(__GNUC__)
#define TF_LIKELY(x) (__builtin_expect((x), 1))
#define TF_UNLIKELY(x) (__builtin_expect((x), 0))
#else
#define TF_LIKELY(x) (x)
#define TF_UNLIKELY(x) (x)
#endif
/**
@file small_vector.hpp
@brief small vector include file
*/
namespace tf { namespace detail {
/**
@private
@brief NextCapacity - Returns the next power of two (in 64-bits)
that is strictly greater than A. Returns zero on overflow.
this function assumes A to be positive
*/
inline uint64_t NextCapacity(uint64_t A) {
A |= (A >> 1);
A |= (A >> 2);
A |= (A >> 4);
A |= (A >> 8);
A |= (A >> 16);
A |= (A >> 32);
return A + 1;
}
}} // end of namespace tf::detail --------------------------------------------
namespace tf {
/**
@private
*/
template <typename T>
struct IsPod : std::integral_constant<bool, std::is_standard_layout<T>::value &&
std::is_trivial<T>::value> {};
/**
@private
*/
class SmallVectorBase {
protected:
void *BeginX, *EndX, *CapacityX;
protected:
SmallVectorBase(void *FirstEl, size_t Size)
: BeginX(FirstEl), EndX(FirstEl), CapacityX((char*)FirstEl+Size) {}
/// This is an implementation of the grow() method which only works
/// on POD-like data types and is out of line to reduce code duplication.
void grow_pod(void *FirstEl, size_t MinSizeInBytes, size_t TSize){
size_t CurSizeBytes = size_in_bytes();
size_t NewCapacityInBytes = 2 * capacity_in_bytes() + TSize; // Always grow.
if (NewCapacityInBytes < MinSizeInBytes) {
NewCapacityInBytes = MinSizeInBytes;
}
void *NewElts;
if (BeginX == FirstEl) {
NewElts = std::malloc(NewCapacityInBytes);
// Copy the elements over. No need to run dtors on PODs.
memcpy(NewElts, this->BeginX, CurSizeBytes);
} else {
// If this wasn't grown from the inline copy, grow the allocated space.
NewElts = realloc(this->BeginX, NewCapacityInBytes);
}
//assert(NewElts && "Out of memory");
this->EndX = (char*)NewElts+CurSizeBytes;
this->BeginX = NewElts;
this->CapacityX = (char*)this->BeginX + NewCapacityInBytes;
}
public:
/// This returns size()*sizeof(T).
size_t size_in_bytes() const {
return size_t((char*)EndX - (char*)BeginX);
}
/// capacity_in_bytes - This returns capacity()*sizeof(T).
size_t capacity_in_bytes() const {
return size_t((char*)CapacityX - (char*)BeginX);
}
bool empty() const { return BeginX == EndX; }
};
/**
@private
*/
template <typename T, unsigned N> struct SmallVectorStorage;
/**
@private
*/
template <typename T, typename = void>
class SmallVectorTemplateCommon : public SmallVectorBase {
private:
template <typename, unsigned> friend struct SmallVectorStorage;
template <typename X>
struct AlignedUnionType {
alignas(X) std::byte buff[std::max(sizeof(std::byte), sizeof(X))];
};
// Allocate raw space for N elements of type T. If T has a ctor or dtor, we
// don't want it to be automatically run, so we need to represent the space as
// something else. Use an array of char of sufficient alignment.
// deprecated in c++23
//typedef typename std::aligned_union<1, T>::type U;
typedef AlignedUnionType<T> U;
U FirstEl;
// Space after 'FirstEl' is clobbered, do not add any instance vars after it.
protected:
SmallVectorTemplateCommon(size_t Size) : SmallVectorBase(&FirstEl, Size) {}
void grow_pod(size_t MinSizeInBytes, size_t TSize) {
SmallVectorBase::grow_pod(&FirstEl, MinSizeInBytes, TSize);
}
/// Return true if this is a smallvector which has not had dynamic
/// memory allocated for it.
bool isSmall() const {
return BeginX == static_cast<const void*>(&FirstEl);
}
/// Put this vector in a state of being small.
void resetToSmall() {
BeginX = EndX = CapacityX = &FirstEl;
}
void setEnd(T *P) { this->EndX = P; }
public:
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef T value_type;
typedef T *iterator;
typedef const T *const_iterator;
typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
typedef std::reverse_iterator<iterator> reverse_iterator;
typedef T &reference;
typedef const T &const_reference;
typedef T *pointer;
typedef const T *const_pointer;
// forward iterator creation methods.
inline iterator begin() { return (iterator)this->BeginX; }
inline const_iterator begin() const { return (const_iterator)this->BeginX; }
inline iterator end() { return (iterator)this->EndX; }
inline const_iterator end() const { return (const_iterator)this->EndX; }
protected:
iterator capacity_ptr() { return (iterator)this->CapacityX; }
const_iterator capacity_ptr() const { return (const_iterator)this->CapacityX;}
public:
// reverse iterator creation methods.
reverse_iterator rbegin() { return reverse_iterator(end()); }
const_reverse_iterator rbegin() const{ return const_reverse_iterator(end()); }
reverse_iterator rend() { return reverse_iterator(begin()); }
const_reverse_iterator rend() const { return const_reverse_iterator(begin());}
inline size_type size() const { return end()-begin(); }
inline size_type max_size() const { return size_type(-1) / sizeof(T); }
/// Return the total number of elements in the currently allocated buffer.
size_t capacity() const { return capacity_ptr() - begin(); }
/// Return a pointer to the vector's buffer, even if empty().
pointer data() { return pointer(begin()); }
/// Return a pointer to the vector's buffer, even if empty().
const_pointer data() const { return const_pointer(begin()); }
inline reference operator[](size_type idx) {
//assert(idx < size());
return begin()[idx];
}
inline const_reference operator[](size_type idx) const {
//assert(idx < size());
return begin()[idx];
}
reference front() {
//assert(!empty());
return begin()[0];
}
const_reference front() const {
//assert(!empty());
return begin()[0];
}
reference back() {
//assert(!empty());
return end()[-1];
}
const_reference back() const {
//assert(!empty());
return end()[-1];
}
};
/**
@private
*/
template <typename T, bool isPodLike>
class SmallVectorTemplateBase : public SmallVectorTemplateCommon<T> {
protected:
SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
static void destroy_range(T *S, T *E) {
while (S != E) {
--E;
E->~T();
}
}
/// Move the range [I, E) into the uninitialized memory starting with "Dest",
/// constructing elements as needed.
template<typename It1, typename It2>
static void uninitialized_move(It1 I, It1 E, It2 Dest) {
std::uninitialized_copy(std::make_move_iterator(I),
std::make_move_iterator(E), Dest);
}
/// Copy the range [I, E) onto the uninitialized memory starting with "Dest",
/// constructing elements as needed.
template<typename It1, typename It2>
static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
std::uninitialized_copy(I, E, Dest);
}
/// Grow the allocated memory (without initializing new elements), doubling
/// the size of the allocated memory. Guarantees space for at least one more
/// element, or MinSize more elements if specified.
void grow(size_t MinSize = 0);
public:
void push_back(const T &Elt) {
if (TF_UNLIKELY(this->EndX >= this->CapacityX))
this->grow();
::new ((void*) this->end()) T(Elt);
this->setEnd(this->end()+1);
}
void push_back(T &&Elt) {
if (TF_UNLIKELY(this->EndX >= this->CapacityX))
this->grow();
::new ((void*) this->end()) T(::std::move(Elt));
this->setEnd(this->end()+1);
}
void pop_back() {
this->setEnd(this->end()-1);
this->end()->~T();
}
};
/**
@private
*/
template <typename T, bool isPodLike>
void SmallVectorTemplateBase<T, isPodLike>::grow(size_t MinSize) {
size_t CurCapacity = this->capacity();
size_t CurSize = this->size();
// Always grow, even from zero.
size_t NewCapacity = size_t(tf::detail::NextCapacity(CurCapacity+2));
if (NewCapacity < MinSize)
NewCapacity = MinSize;
T *NewElts = static_cast<T*>(std::malloc(NewCapacity*sizeof(T)));
// Move the elements over.
this->uninitialized_move(this->begin(), this->end(), NewElts);
// Destroy the original elements.
destroy_range(this->begin(), this->end());
// If this wasn't grown from the inline copy, deallocate the old space.
if (!this->isSmall())
std::free(this->begin());
this->setEnd(NewElts+CurSize);
this->BeginX = NewElts;
this->CapacityX = this->begin()+NewCapacity;
}
/**
@private
*/
template <typename T>
class SmallVectorTemplateBase<T, true> : public SmallVectorTemplateCommon<T> {
protected:
SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
// No need to do a destroy loop for POD's.
static void destroy_range(T *, T *) {}
/// Move the range [I, E) onto the uninitialized memory
/// starting with "Dest", constructing elements into it as needed.
template<typename It1, typename It2>
static void uninitialized_move(It1 I, It1 E, It2 Dest) {
// Just do a copy.
uninitialized_copy(I, E, Dest);
}
/// Copy the range [I, E) onto the uninitialized memory
/// starting with "Dest", constructing elements into it as needed.
template<typename It1, typename It2>
static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
// Arbitrary iterator types; just use the basic implementation.
std::uninitialized_copy(I, E, Dest);
}
/// Copy the range [I, E) onto the uninitialized memory
/// starting with "Dest", constructing elements into it as needed.
template <typename T1, typename T2>
static void uninitialized_copy(
T1 *I, T1 *E, T2 *Dest,
typename std::enable_if<std::is_same<typename std::remove_const<T1>::type,
T2>::value>::type * = nullptr) {
// Use memcpy for PODs iterated by pointers (which includes SmallVector
// iterators): std::uninitialized_copy optimizes to memmove, but we can
// use memcpy here. Note that I and E are iterators and thus might be
// invalid for memcpy if they are equal.
if (I != E)
memcpy(Dest, I, (E - I) * sizeof(T));
}
/// Double the size of the allocated memory, guaranteeing space for at
/// least one more element or MinSize if specified.
void grow(size_t MinSize = 0) {
this->grow_pod(MinSize*sizeof(T), sizeof(T));
}
public:
void push_back(const T &Elt) {
if (TF_UNLIKELY(this->EndX >= this->CapacityX))
this->grow();
memcpy(this->end(), &Elt, sizeof(T));
this->setEnd(this->end()+1);
}
void pop_back() {
this->setEnd(this->end()-1);
}
};
/**
@private
*/
template <typename T>
class SmallVectorImpl : public SmallVectorTemplateBase<T, IsPod<T>::value> {
typedef SmallVectorTemplateBase<T, IsPod<T>::value> SuperClass;
SmallVectorImpl(const SmallVectorImpl&) = delete;
public:
typedef typename SuperClass::iterator iterator;
typedef typename SuperClass::const_iterator const_iterator;
typedef typename SuperClass::size_type size_type;
protected:
// Default ctor - Initialize to empty.
explicit SmallVectorImpl(unsigned N)
: SmallVectorTemplateBase<T, IsPod<T>::value>(N*sizeof(T)) {
}
public:
~SmallVectorImpl() {
// Destroy the constructed elements in the vector.
this->destroy_range(this->begin(), this->end());
// If this wasn't grown from the inline copy, deallocate the old space.
if (!this->isSmall())
std::free(this->begin());
}
void clear() {
this->destroy_range(this->begin(), this->end());
this->EndX = this->BeginX;
}
void resize(size_type N) {
if (N < this->size()) {
this->destroy_range(this->begin()+N, this->end());
this->setEnd(this->begin()+N);
} else if (N > this->size()) {
if (this->capacity() < N)
this->grow(N);
for (auto I = this->end(), E = this->begin() + N; I != E; ++I)
new (&*I) T();
this->setEnd(this->begin()+N);
}
}
void resize(size_type N, const T &NV) {
if (N < this->size()) {
this->destroy_range(this->begin()+N, this->end());
this->setEnd(this->begin()+N);
} else if (N > this->size()) {
if (this->capacity() < N)
this->grow(N);
std::uninitialized_fill(this->end(), this->begin()+N, NV);
this->setEnd(this->begin()+N);
}
}
void reserve(size_type N) {
if (this->capacity() < N)
this->grow(N);
}
T pop_back_val() {
T Result = ::std::move(this->back());
this->pop_back();
return Result;
}
void swap(SmallVectorImpl &RHS);
/// Add the specified range to the end of the SmallVector.
template<typename in_iter>
void append(in_iter in_start, in_iter in_end) {
size_type NumInputs = std::distance(in_start, in_end);
// Grow allocated space if needed.
if (NumInputs > size_type(this->capacity_ptr()-this->end()))
this->grow(this->size()+NumInputs);
// Copy the new elements over.
this->uninitialized_copy(in_start, in_end, this->end());
this->setEnd(this->end() + NumInputs);
}
/// Add the specified range to the end of the SmallVector.
void append(size_type NumInputs, const T &Elt) {
// Grow allocated space if needed.
if (NumInputs > size_type(this->capacity_ptr()-this->end()))
this->grow(this->size()+NumInputs);
// Copy the new elements over.
std::uninitialized_fill_n(this->end(), NumInputs, Elt);
this->setEnd(this->end() + NumInputs);
}
void append(std::initializer_list<T> IL) {
append(IL.begin(), IL.end());
}
void assign(size_type NumElts, const T &Elt) {
clear();
if (this->capacity() < NumElts)
this->grow(NumElts);
this->setEnd(this->begin()+NumElts);
std::uninitialized_fill(this->begin(), this->end(), Elt);
}
void assign(std::initializer_list<T> IL) {
clear();
append(IL);
}
iterator erase(const_iterator CI) {
// Just cast away constness because this is a non-const member function.
iterator I = const_cast<iterator>(CI);
//assert(I >= this->begin() && "Iterator to erase is out of bounds.");
//assert(I < this->end() && "Erasing at past-the-end iterator.");
iterator N = I;
// Shift all elts down one.
std::move(I+1, this->end(), I);
// Drop the last elt.
this->pop_back();
return(N);
}
iterator erase(const_iterator CS, const_iterator CE) {
// Just cast away constness because this is a non-const member function.
iterator S = const_cast<iterator>(CS);
iterator E = const_cast<iterator>(CE);
//assert(S >= this->begin() && "Range to erase is out of bounds.");
//assert(S <= E && "Trying to erase invalid range.");
//assert(E <= this->end() && "Trying to erase past the end.");
iterator N = S;
// Shift all elts down.
iterator I = std::move(E, this->end(), S);
// Drop the last elts.
this->destroy_range(I, this->end());
this->setEnd(I);
return(N);
}
iterator insert(iterator I, T &&Elt) {
if (I == this->end()) { // Important special case for empty vector.
this->push_back(::std::move(Elt));
return this->end()-1;
}
//assert(I >= this->begin() && "Insertion iterator is out of bounds.");
//assert(I <= this->end() && "Inserting past the end of the vector.");
if (this->EndX >= this->CapacityX) {
size_t EltNo = I-this->begin();
this->grow();
I = this->begin()+EltNo;
}
::new ((void*) this->end()) T(::std::move(this->back()));
// Push everything else over.
std::move_backward(I, this->end()-1, this->end());
this->setEnd(this->end()+1);
// If we just moved the element we're inserting, be sure to update
// the reference.
T *EltPtr = &Elt;
if (I <= EltPtr && EltPtr < this->EndX)
++EltPtr;
*I = ::std::move(*EltPtr);
return I;
}
iterator insert(iterator I, const T &Elt) {
if (I == this->end()) { // Important special case for empty vector.
this->push_back(Elt);
return this->end()-1;
}
//assert(I >= this->begin() && "Insertion iterator is out of bounds.");
//assert(I <= this->end() && "Inserting past the end of the vector.");
if (this->EndX >= this->CapacityX) {
size_t EltNo = I-this->begin();
this->grow();
I = this->begin()+EltNo;
}
::new ((void*) this->end()) T(std::move(this->back()));
// Push everything else over.
std::move_backward(I, this->end()-1, this->end());
this->setEnd(this->end()+1);
// If we just moved the element we're inserting, be sure to update
// the reference.
const T *EltPtr = &Elt;
if (I <= EltPtr && EltPtr < this->EndX)
++EltPtr;
*I = *EltPtr;
return I;
}
iterator insert(iterator I, size_type NumToInsert, const T &Elt) {
// Convert iterator to elt# to avoid invalidating iterator when we reserve()
size_t InsertElt = I - this->begin();
if (I == this->end()) { // Important special case for empty vector.
append(NumToInsert, Elt);
return this->begin()+InsertElt;
}
//assert(I >= this->begin() && "Insertion iterator is out of bounds.");
//assert(I <= this->end() && "Inserting past the end of the vector.");
// Ensure there is enough space.
reserve(this->size() + NumToInsert);
// Uninvalidate the iterator.
I = this->begin()+InsertElt;
// If there are more elements between the insertion point and the end of the
// range than there are being inserted, we can use a simple approach to
// insertion. Since we already reserved space, we know that this won't
// reallocate the vector.
if (size_t(this->end()-I) >= NumToInsert) {
T *OldEnd = this->end();
append(std::move_iterator<iterator>(this->end() - NumToInsert),
std::move_iterator<iterator>(this->end()));
// Copy the existing elements that get replaced.
std::move_backward(I, OldEnd-NumToInsert, OldEnd);
std::fill_n(I, NumToInsert, Elt);
return I;
}
// Otherwise, we're inserting more elements than exist already, and we're
// not inserting at the end.
// Move over the elements that we're about to overwrite.
T *OldEnd = this->end();
this->setEnd(this->end() + NumToInsert);
size_t NumOverwritten = OldEnd-I;
this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten);
// Replace the overwritten part.
std::fill_n(I, NumOverwritten, Elt);
// Insert the non-overwritten middle part.
std::uninitialized_fill_n(OldEnd, NumToInsert-NumOverwritten, Elt);
return I;
}
template<typename ItTy>
iterator insert(iterator I, ItTy From, ItTy To) {
// Convert iterator to elt# to avoid invalidating iterator when we reserve()
size_t InsertElt = I - this->begin();
if (I == this->end()) { // Important special case for empty vector.
append(From, To);
return this->begin()+InsertElt;
}
//assert(I >= this->begin() && "Insertion iterator is out of bounds.");
//assert(I <= this->end() && "Inserting past the end of the vector.");
size_t NumToInsert = std::distance(From, To);
// Ensure there is enough space.
reserve(this->size() + NumToInsert);
// Uninvalidate the iterator.
I = this->begin()+InsertElt;
// If there are more elements between the insertion point and the end of the
// range than there are being inserted, we can use a simple approach to
// insertion. Since we already reserved space, we know that this won't
// reallocate the vector.
if (size_t(this->end()-I) >= NumToInsert) {
T *OldEnd = this->end();
append(std::move_iterator<iterator>(this->end() - NumToInsert),
std::move_iterator<iterator>(this->end()));
// Copy the existing elements that get replaced.
std::move_backward(I, OldEnd-NumToInsert, OldEnd);
std::copy(From, To, I);
return I;
}
// Otherwise, we're inserting more elements than exist already, and we're
// not inserting at the end.
// Move over the elements that we're about to overwrite.
T *OldEnd = this->end();
this->setEnd(this->end() + NumToInsert);
size_t NumOverwritten = OldEnd-I;
this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten);
// Replace the overwritten part.
for (T *J = I; NumOverwritten > 0; --NumOverwritten) {
*J = *From;
++J; ++From;
}
// Insert the non-overwritten middle part.
this->uninitialized_copy(From, To, OldEnd);
return I;
}
void insert(iterator I, std::initializer_list<T> IL) {
insert(I, IL.begin(), IL.end());
}
template <typename... ArgTypes> void emplace_back(ArgTypes &&... Args) {
if (TF_UNLIKELY(this->EndX >= this->CapacityX))
this->grow();
::new ((void *)this->end()) T(std::forward<ArgTypes>(Args)...);
this->setEnd(this->end() + 1);
}
SmallVectorImpl &operator=(const SmallVectorImpl &RHS);
SmallVectorImpl &operator=(SmallVectorImpl &&RHS);
bool operator==(const SmallVectorImpl &RHS) const {
if (this->size() != RHS.size()) return false;
return std::equal(this->begin(), this->end(), RHS.begin());
}
bool operator!=(const SmallVectorImpl &RHS) const {
return !(*this == RHS);
}
bool operator<(const SmallVectorImpl &RHS) const {
return std::lexicographical_compare(this->begin(), this->end(),
RHS.begin(), RHS.end());
}
/// Set the array size to \p N, which the current array must have enough
/// capacity for.
///
/// This does not construct or destroy any elements in the vector.
///
/// Clients can use this in conjunction with capacity() to write past the end
/// of the buffer when they know that more elements are available, and only
/// update the size later. This avoids the cost of value initializing elements
/// which will only be overwritten.
void set_size(size_type N) {
//assert(N <= this->capacity());
this->setEnd(this->begin() + N);
}
};
template <typename T>
void SmallVectorImpl<T>::swap(SmallVectorImpl<T> &RHS) {
if (this == &RHS) return;
// We can only avoid copying elements if neither vector is small.
if (!this->isSmall() && !RHS.isSmall()) {
std::swap(this->BeginX, RHS.BeginX);
std::swap(this->EndX, RHS.EndX);
std::swap(this->CapacityX, RHS.CapacityX);
return;
}
if (RHS.size() > this->capacity())
this->grow(RHS.size());
if (this->size() > RHS.capacity())
RHS.grow(this->size());
// Swap the shared elements.
size_t NumShared = this->size();
if (NumShared > RHS.size()) NumShared = RHS.size();
for (size_type i = 0; i != NumShared; ++i)
std::swap((*this)[i], RHS[i]);
// Copy over the extra elts.
if (this->size() > RHS.size()) {
size_t EltDiff = this->size() - RHS.size();
this->uninitialized_copy(this->begin()+NumShared, this->end(), RHS.end());
RHS.setEnd(RHS.end()+EltDiff);
this->destroy_range(this->begin()+NumShared, this->end());
this->setEnd(this->begin()+NumShared);
} else if (RHS.size() > this->size()) {
size_t EltDiff = RHS.size() - this->size();
this->uninitialized_copy(RHS.begin()+NumShared, RHS.end(), this->end());
this->setEnd(this->end() + EltDiff);
this->destroy_range(RHS.begin()+NumShared, RHS.end());
RHS.setEnd(RHS.begin()+NumShared);
}
}
template <typename T>
SmallVectorImpl<T> &SmallVectorImpl<T>::
operator=(const SmallVectorImpl<T> &RHS) {
// Avoid self-assignment.
if (this == &RHS) return *this;
// If we already have sufficient space, assign the common elements, then
// destroy any excess.
size_t RHSSize = RHS.size();
size_t CurSize = this->size();
if (CurSize >= RHSSize) {
// Assign common elements.
iterator NewEnd;
if (RHSSize)
NewEnd = std::copy(RHS.begin(), RHS.begin()+RHSSize, this->begin());
else
NewEnd = this->begin();
// Destroy excess elements.
this->destroy_range(NewEnd, this->end());
// Trim.
this->setEnd(NewEnd);
return *this;
}
// If we have to grow to have enough elements, destroy the current elements.
// This allows us to avoid copying them during the grow.
// FIXME: don't do this if they're efficiently moveable.
if (this->capacity() < RHSSize) {
// Destroy current elements.
this->destroy_range(this->begin(), this->end());
this->setEnd(this->begin());
CurSize = 0;
this->grow(RHSSize);
} else if (CurSize) {
// Otherwise, use assignment for the already-constructed elements.
std::copy(RHS.begin(), RHS.begin()+CurSize, this->begin());
}
// Copy construct the new elements in place.
this->uninitialized_copy(RHS.begin()+CurSize, RHS.end(),
this->begin()+CurSize);
// Set end.
this->setEnd(this->begin()+RHSSize);
return *this;
}
template <typename T>
SmallVectorImpl<T> &SmallVectorImpl<T>::operator=(SmallVectorImpl<T> &&RHS) {
// Avoid self-assignment.
if (this == &RHS) return *this;
// If the RHS isn't small, clear this vector and then steal its buffer.
if (!RHS.isSmall()) {
this->destroy_range(this->begin(), this->end());
if (!this->isSmall()) std::free(this->begin());
this->BeginX = RHS.BeginX;
this->EndX = RHS.EndX;
this->CapacityX = RHS.CapacityX;
RHS.resetToSmall();
return *this;
}
// If we already have sufficient space, assign the common elements, then
// destroy any excess.
size_t RHSSize = RHS.size();
size_t CurSize = this->size();
if (CurSize >= RHSSize) {
// Assign common elements.
iterator NewEnd = this->begin();
if (RHSSize)
NewEnd = std::move(RHS.begin(), RHS.end(), NewEnd);
// Destroy excess elements and trim the bounds.
this->destroy_range(NewEnd, this->end());
this->setEnd(NewEnd);
// Clear the RHS.
RHS.clear();
return *this;
}
// If we have to grow to have enough elements, destroy the current elements.
// This allows us to avoid copying them during the grow.
// FIXME: this may not actually make any sense if we can efficiently move
// elements.
if (this->capacity() < RHSSize) {
// Destroy current elements.
this->destroy_range(this->begin(), this->end());
this->setEnd(this->begin());
CurSize = 0;
this->grow(RHSSize);
} else if (CurSize) {
// Otherwise, use assignment for the already-constructed elements.
std::move(RHS.begin(), RHS.begin()+CurSize, this->begin());
}
// Move-construct the new elements in place.
this->uninitialized_move(RHS.begin()+CurSize, RHS.end(),
this->begin()+CurSize);
// Set end.
this->setEnd(this->begin()+RHSSize);
RHS.clear();
return *this;
}
/**
@private
*/
template <typename T, unsigned N>
struct SmallVectorStorage {
/**
@private
*/
typename SmallVectorTemplateCommon<T>::U InlineElts[N - 1];
};
/**
@private
*/
template <typename T> struct SmallVectorStorage<T, 1> {};
/**
@private
*/
template <typename T> struct SmallVectorStorage<T, 0> {};
/**
@brief class to define a vector optimized for small array
@tparam T data type
@tparam N threshold of the number of elements in the initial storage
The class defines a C++ STL-styled vector (a variable-sized array)
optimized for the case when the array is small.
It contains some number of elements in-place,
which allows it to avoid heap allocation when the actual number of
elements is below that threshold. This allows normal @em small cases to be
fast without losing generality for large inputs.
All the methods in [std::vector](https://en.cppreference.com/w/cpp/container/vector)
can apply to this class.
The class is stripped from the LLVM codebase.
*/
template <typename T, unsigned N = 2>
class SmallVector : public SmallVectorImpl<T> {
/// Inline space for elements which aren't stored in the base class.
SmallVectorStorage<T, N> Storage;
public:
/**
@brief constructs an empty vector
*/
SmallVector() : SmallVectorImpl<T>(N) {
}
/**
@brief constructs a vector with @c Size copies of elements with value @c value
*/
explicit SmallVector(size_t Size, const T &Value = T())
: SmallVectorImpl<T>(N) {
this->assign(Size, Value);
}
/**
@brief constructs a vector with the contents of the range
<tt>[S, E)</tt>
*/
template<typename ItTy>
SmallVector(ItTy S, ItTy E) : SmallVectorImpl<T>(N) {
this->append(S, E);
}
//template <typename RangeTy>
//explicit SmallVector(const tf::iterator_range<RangeTy> &R)
// : SmallVectorImpl<T>(N) {
// this->append(R.begin(), R.end());
//}
/**
@brief constructs a vector with the contents of the initializer list @c IL
*/
SmallVector(std::initializer_list<T> IL) : SmallVectorImpl<T>(N) {
this->assign(IL);
}
/**
@brief constructs the vector with the copy of the contents of @c RHS
*/
SmallVector(const SmallVector &RHS) : SmallVectorImpl<T>(N) {
if (!RHS.empty())
SmallVectorImpl<T>::operator=(RHS);
}
/**
@brief constructs the vector with the contents of @c RHS using move semantics
*/
SmallVector(SmallVector &&RHS) : SmallVectorImpl<T>(N) {
if (!RHS.empty())
SmallVectorImpl<T>::operator=(::std::move(RHS));
}
/**
@brief replaces the contents with a copy of the contents of @c RHS
*/
const SmallVector &operator=(const SmallVector &RHS) {
SmallVectorImpl<T>::operator=(RHS);
return *this;
}
/**
@brief replaces the contents with the contents of @c RHS using move semantics
*/
const SmallVector &operator=(SmallVector &&RHS) {
SmallVectorImpl<T>::operator=(::std::move(RHS));
return *this;
}
/**
@brief constructs a vector with the contents of @c RHS using move semantics
*/
SmallVector(SmallVectorImpl<T> &&RHS) : SmallVectorImpl<T>(N) {
if (!RHS.empty())
SmallVectorImpl<T>::operator=(::std::move(RHS));
}
/**
@brief replaces the contents with the contents of @c RHS using move semantics
*/
const SmallVector &operator=(SmallVectorImpl<T> &&RHS) {
SmallVectorImpl<T>::operator=(::std::move(RHS));
return *this;
}
/**
@brief replaces the contents with the copy of the contents of an initializer list @c IL
*/
const SmallVector &operator=(std::initializer_list<T> IL) {
this->assign(IL);
return *this;
}
};
template<typename T, unsigned N>
static inline size_t capacity_in_bytes(const SmallVector<T, N> &X) {
return X.capacity_in_bytes();
}
} // end tf namespace ---------------------------------------------------------
namespace std {
/// Implement std::swap in terms of SmallVector swap.
template<typename T>
inline void
swap(tf::SmallVectorImpl<T> &LHS, tf::SmallVectorImpl<T> &RHS) {
LHS.swap(RHS);
}
/// Implement std::swap in terms of SmallVector swap.
template<typename T, unsigned N>
inline void
swap(tf::SmallVector<T, N> &LHS, tf::SmallVector<T, N> &RHS) {
LHS.swap(RHS);
}
} // end of namespace std ----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/sycl/sycl_graph.hpp | #pragma once
#include <CL/sycl.hpp>
#include "sycl_meta.hpp"
namespace tf {
// ----------------------------------------------------------------------------
// syclGraph class
// ----------------------------------------------------------------------------
// class: syclGraph
class syclGraph : public CustomGraphBase {
friend class syclNode;
friend class syclTask;
friend class syclFlow;
friend class Taskflow;
friend class Executor;
constexpr static int OFFLOADED = 0x01;
constexpr static int TOPOLOGY_CHANGED = 0x02;
public:
syclGraph() = default;
~syclGraph() = default;
syclGraph(const syclGraph&) = delete;
syclGraph(syclGraph&&);
syclGraph& operator = (const syclGraph&) = delete;
syclGraph& operator = (syclGraph&&);
template <typename... ArgsT>
syclNode* emplace_back(ArgsT&&...);
bool empty() const;
void clear();
void dump(std::ostream&, const void*, const std::string&) const override final;
private:
int _state {0};
std::vector<std::unique_ptr<syclNode>> _nodes;
};
// ----------------------------------------------------------------------------
// syclNode definitions
// ----------------------------------------------------------------------------
// class: syclNode
class syclNode {
friend class syclGraph;
friend class syclTask;
friend class syclFlow;
friend class Taskflow;
friend class Executor;
struct Empty {
};
struct CGH {
std::function<void(sycl::handler&)> work;
template <typename F>
CGH(F&& func) : work {std::forward<F>(func)} {}
};
using handle_t = std::variant<
Empty,
CGH
>;
public:
// variant index
constexpr static auto EMPTY = get_index_v<Empty, handle_t>;
constexpr static auto COMMAND_GROUP_HANDLER = get_index_v<CGH, handle_t>;
syclNode() = delete;
template <typename... ArgsT>
syclNode(syclGraph&, ArgsT&&...);
private:
syclGraph& _graph;
std::string _name;
int _level;
sycl::event _event;
handle_t _handle;
SmallVector<syclNode*> _successors;
SmallVector<syclNode*> _dependents;
void _precede(syclNode*);
};
// ----------------------------------------------------------------------------
// syclNode definitions
// ----------------------------------------------------------------------------
// Constructor
template <typename... ArgsT>
syclNode::syclNode(syclGraph& g, ArgsT&&... args) :
_graph {g},
_handle {std::forward<ArgsT>(args)...} {
}
// Procedure: _precede
inline void syclNode::_precede(syclNode* v) {
_graph._state |= syclGraph::TOPOLOGY_CHANGED;
_successors.push_back(v);
v->_dependents.push_back(this);
}
// ----------------------------------------------------------------------------
// syclGraph definitions
// ----------------------------------------------------------------------------
// Move constructor
inline syclGraph::syclGraph(syclGraph&& g) :
_nodes {std::move(g._nodes)} {
assert(g._nodes.empty());
}
// Move assignment
inline syclGraph& syclGraph::operator = (syclGraph&& rhs) {
// lhs
_nodes = std::move(rhs._nodes);
assert(rhs._nodes.empty());
return *this;
}
// Function: empty
inline bool syclGraph::empty() const {
return _nodes.empty();
}
// Procedure: clear
inline void syclGraph::clear() {
_state = syclGraph::TOPOLOGY_CHANGED;
_nodes.clear();
}
// Function: emplace_back
template <typename... ArgsT>
syclNode* syclGraph::emplace_back(ArgsT&&... args) {
_state |= syclGraph::TOPOLOGY_CHANGED;
auto node = std::make_unique<syclNode>(std::forward<ArgsT>(args)...);
_nodes.emplace_back(std::move(node));
return _nodes.back().get();
// TODO: object pool
//auto node = new syclNode(std::forward<ArgsT>(args)...);
//_nodes.push_back(node);
//return node;
}
// Procedure: dump the graph to a DOT format
inline void syclGraph::dump(
std::ostream& os, const void* root, const std::string& root_name
) const {
// recursive dump with stack
std::stack<std::tuple<const syclGraph*, const syclNode*, int>> stack;
stack.push(std::make_tuple(this, nullptr, 1));
int pl = 0;
while(!stack.empty()) {
auto [graph, parent, l] = stack.top();
stack.pop();
for(int i=0; i<pl-l+1; i++) {
os << "}\n";
}
if(parent == nullptr) {
if(root) {
os << "subgraph cluster_p" << root << " {\nlabel=\"syclFlow: ";
if(root_name.empty()) os << 'p' << root;
else os << root_name;
os << "\";\n" << "color=\"red\"\n";
}
else {
os << "digraph syclFlow {\n";
}
}
else {
os << "subgraph cluster_p" << parent << " {\nlabel=\"syclSubflow: ";
if(parent->_name.empty()) os << 'p' << parent;
else os << parent->_name;
os << "\";\n" << "color=\"purple\"\n";
}
for(auto& v : graph->_nodes) {
os << 'p' << v.get() << "[label=\"";
if(v->_name.empty()) {
os << 'p' << v.get() << "\"";
}
else {
os << v->_name << "\"";
}
os << "];\n";
for(const auto s : v->_successors) {
os << 'p' << v.get() << " -> " << 'p' << s << ";\n";
}
if(v->_successors.size() == 0) {
if(parent == nullptr) {
if(root) {
os << 'p' << v.get() << " -> p" << root << ";\n";
}
}
else {
os << 'p' << v.get() << " -> p" << parent << ";\n";
}
}
}
// set the previous level
pl = l;
}
for(int i=0; i<pl; i++) {
os << "}\n";
}
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/sycl/sycl_task.hpp | #pragma once
#include "sycl_graph.hpp"
/**
@file sycl_task.hpp
@brief syclTask include file
*/
namespace tf {
// ----------------------------------------------------------------------------
// syclTask
// ----------------------------------------------------------------------------
/**
@class syclTask
@brief handle to a node of the internal CUDA graph
*/
class syclTask {
friend class syclFlow;
friend std::ostream& operator << (std::ostream&, const syclTask&);
public:
/**
@brief constructs an empty syclTask
*/
syclTask() = default;
/**
@brief copy-constructs a syclTask
*/
syclTask(const syclTask&) = default;
/**
@brief copy-assigns a syclTask
*/
syclTask& operator = (const syclTask&) = default;
/**
@brief adds precedence links from this to other tasks
@tparam Ts parameter pack
@param tasks one or multiple tasks
@return @c *this
*/
template <typename... Ts>
syclTask& precede(Ts&&... tasks);
/**
@brief adds precedence links from other tasks to this
@tparam Ts parameter pack
@param tasks one or multiple tasks
@return @c *this
*/
template <typename... Ts>
syclTask& succeed(Ts&&... tasks);
/**
@brief assigns a name to the task
@param name a @std_string acceptable string
@return @c *this
*/
syclTask& name(const std::string& name);
/**
@brief queries the name of the task
*/
const std::string& name() const;
/**
@brief queries the number of successors
*/
size_t num_successors() const;
/**
@brief queries the number of dependents
*/
size_t num_dependents() const;
/**
@brief queries if the task is associated with a syclNode
*/
bool empty() const;
/**
@brief dumps the task through an output stream
@tparam T output stream type with insertion operator (<<) defined
@param ostream an output stream target
*/
template <typename T>
void dump(T& ostream) const;
/**
@brief applies an visitor callable to each successor of the task
*/
template <typename V>
void for_each_successor(V&& visitor) const;
/**
@brief applies an visitor callable to each dependents of the task
*/
template <typename V>
void for_each_dependent(V&& visitor) const;
private:
syclTask(syclNode*);
syclNode* _node {nullptr};
};
// Constructor
inline syclTask::syclTask(syclNode* node) : _node {node} {
}
// Function: precede
template <typename... Ts>
syclTask& syclTask::precede(Ts&&... tasks) {
(_node->_precede(tasks._node), ...);
return *this;
}
// Function: succeed
template <typename... Ts>
syclTask& syclTask::succeed(Ts&&... tasks) {
(tasks._node->_precede(_node), ...);
return *this;
}
// Function: empty
inline bool syclTask::empty() const {
return _node == nullptr;
}
// Function: name
inline syclTask& syclTask::name(const std::string& name) {
_node->_name = name;
return *this;
}
// Function: name
inline const std::string& syclTask::name() const {
return _node->_name;
}
// Function: num_successors
inline size_t syclTask::num_successors() const {
return _node->_successors.size();
}
// Function: num_dependents
inline size_t syclTask::num_dependents() const {
return _node->_dependents.size();
}
// Procedure: dump
template <typename T>
void syclTask::dump(T& os) const {
os << "syclTask ";
if(_node->_name.empty()) os << _node;
else os << _node->_name;
}
// Function: for_each_successor
template <typename V>
void syclTask::for_each_successor(V&& visitor) const {
for(size_t i=0; i<_node->_successors.size(); ++i) {
visitor(syclTask(_node->_successors[i]));
}
}
// Function: for_each_dependent
template <typename V>
void syclTask::for_each_dependent(V&& visitor) const {
for(size_t i=0; i<_node->_dependents.size(); ++i) {
visitor(syclTask(_node->_dependents[i]));
}
}
// ----------------------------------------------------------------------------
// global ostream
// ----------------------------------------------------------------------------
/**
@brief overload of ostream inserter operator for syclTask
*/
inline std::ostream& operator << (std::ostream& os, const syclTask& ct) {
ct.dump(os);
return os;
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/sycl/sycl_execution_policy.hpp | #pragma once
/**
@file sycl_execution_policy.hpp
@brief SYCL execution policy include file
*/
namespace tf {
/**
@class syclExecutionPolicy
@brief class to define execution policy for SYCL standard algorithms
@tparam NT number of threads per block
@tparam VT number of work units per thread
Execution policy configures the kernel execution parameters in SYCL algorithms.
The first template argument, @c NT, the number of threads per block should
always be a power-of-two number.
The second template argument, @c VT, the number of work units per thread
is recommended to be an odd number to avoid bank conflict.
Details can be referred to @ref SYCLSTDExecutionPolicy.
*/
template<unsigned NT, unsigned VT>
class syclExecutionPolicy {
static_assert(is_pow2(NT), "max # threads per block must be a power of two");
public:
/** @brief static constant for getting the number of threads per block */
const static unsigned nt = NT;
/** @brief static constant for getting the number of work units per thread */
const static unsigned vt = VT;
/** @brief static constant for getting the number of elements to process per block */
const static unsigned nv = NT*VT;
/**
@brief constructs an execution policy object with the given queue
*/
syclExecutionPolicy(sycl::queue& queue) : _queue{queue} {}
/**
@brief returns an mutable reference to the associated queue
*/
sycl::queue& queue() noexcept { return _queue; };
/**
@brief returns an immutable reference to the associated queue
*/
const sycl::queue& queue() const noexcept { return _queue; }
private:
sycl::queue& _queue;
};
/**
@brief default execution policy
*/
using syclDefaultExecutionPolicy = syclExecutionPolicy<512, 9>;
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/sycl/sycl_meta.hpp | #pragma once
#include "sycl_execution_policy.hpp"
namespace tf {
// default warp size
inline constexpr unsigned SYCL_WARP_SIZE = 32;
// empty type
struct syclEmpty { };
// ----------------------------------------------------------------------------
// iterator unrolling
// ----------------------------------------------------------------------------
// Template unrolled looping construct.
template<unsigned i, unsigned count, bool valid = (i < count)>
struct syclIterate {
template<typename F>
static void eval(F f) {
f(i);
syclIterate<i + 1, count>::eval(f);
}
};
template<unsigned i, unsigned count>
struct syclIterate<i, count, false> {
template<typename F>
static void eval(F) { }
};
template<unsigned begin, unsigned end, typename F>
void sycl_iterate(F f) {
syclIterate<begin, end>::eval(f);
}
template<unsigned count, typename F>
void sycl_iterate(F f) {
sycl_iterate<0, count>(f);
}
template<unsigned count, typename T>
T reduce(const T(&x)[count]) {
T y;
sycl_iterate<count>([&](auto i) { y = i ? x[i] + y : x[i]; });
return y;
}
template<unsigned count, typename T>
void fill(T(&x)[count], T val) {
sycl_iterate<count>([&](auto i) { x[i] = val; });
}
// Invoke unconditionally.
template<unsigned nt, unsigned vt, typename F>
void sycl_strided_iterate(F f, unsigned tid) {
sycl_iterate<vt>([=](auto i) { f(i, nt * i + tid); });
}
// Check range.
template<unsigned nt, unsigned vt, unsigned vt0 = vt, typename F>
void sycl_strided_iterate(F f, unsigned tid, unsigned count) {
// Unroll the first vt0 elements of each thread.
if(vt0 > 1 && count >= nt * vt0) {
sycl_strided_iterate<nt, vt0>(f, tid); // No checking
} else {
sycl_iterate<vt0>([=](auto i) {
auto j = nt * i + tid;
if(j < count) f(i, j);
});
}
// TODO: seems dummy when vt0 == vt
sycl_iterate<vt0, vt>([=](auto i) {
auto j = nt * i + tid;
if(j < count) f(i, j);
});
}
template<unsigned vt, typename F>
void sycl_thread_iterate(F f, unsigned tid) {
sycl_iterate<vt>([=](auto i) { f(i, vt * tid + i); });
}
// ----------------------------------------------------------------------------
// syclRange
// ----------------------------------------------------------------------------
// syclRange
struct syclRange {
unsigned begin, end;
unsigned size() const { return end - begin; }
unsigned count() const { return size(); }
bool valid() const { return end > begin; }
};
inline syclRange sycl_get_tile(unsigned b, unsigned nv, unsigned count) {
return syclRange { nv * b, std::min(count, nv * (b + 1)) };
}
// ----------------------------------------------------------------------------
// syclArray
// ----------------------------------------------------------------------------
template<typename T, unsigned size>
struct syclArray {
T data[size];
T operator[](unsigned i) const { return data[i]; }
T& operator[](unsigned i) { return data[i]; }
syclArray() = default;
syclArray(const syclArray&) = default;
syclArray& operator=(const syclArray&) = default;
// Fill the array with x.
syclArray(T x) {
sycl_iterate<size>([&](unsigned i) { data[i] = x; });
}
};
template<typename T>
struct syclArray<T, 0> {
T operator[](unsigned) const { return T(); }
T& operator[](unsigned) { return *(T*)nullptr; }
};
template<typename T, typename V, unsigned size>
struct syclKVArray {
syclArray<T, size> keys;
syclArray<V, size> vals;
};
// ----------------------------------------------------------------------------
// thread reg <-> global mem
// ----------------------------------------------------------------------------
template<unsigned nt, unsigned vt, unsigned vt0 = vt, typename I>
auto sycl_mem_to_reg_strided(I mem, unsigned tid, unsigned count) {
using T = typename std::iterator_traits<I>::value_type;
syclArray<T, vt> x;
sycl_strided_iterate<nt, vt, vt0>(
[&](auto i, auto j) { x[i] = mem[j]; }, tid, count
);
return x;
}
template<unsigned nt, unsigned vt, unsigned vt0 = vt, typename T, typename it_t>
void sycl_reg_to_mem_strided(
syclArray<T, vt> x, unsigned tid, unsigned count, it_t mem) {
sycl_strided_iterate<nt, vt, vt0>(
[=](auto i, auto j) { mem[j] = x[i]; }, tid, count
);
}
template<unsigned nt, unsigned vt, unsigned vt0 = vt, typename I, typename O>
auto sycl_transform_mem_to_reg_strided(
I mem, unsigned tid, unsigned count, O op
) {
using T = std::invoke_result_t<O, typename std::iterator_traits<I>::value_type>;
syclArray<T, vt> x;
sycl_strided_iterate<nt, vt, vt0>(
[&](auto i, auto j) { x[i] = op(mem[j]); }, tid, count
);
return x;
}
// ----------------------------------------------------------------------------
// thread reg <-> shared
// ----------------------------------------------------------------------------
//template<unsigned nt, unsigned vt, typename T, unsigned shared_size>
//void sycl_reg_to_shared_thread(
// syclArray<T, vt> x, unsigned tid, T (&shared)[shared_size], bool sync = true
//) {
//
// static_assert(shared_size >= nt * vt,
// "reg_to_shared_thread must have at least nt * vt storage");
//
// sycl_thread_iterate<vt>([&](auto i, auto j) { shared[j] = x[i]; }, tid);
//
// if(sync) __syncthreads();
//}
//
//template<unsigned nt, unsigned vt, typename T, unsigned shared_size>
//auto sycl_shared_to_reg_thread(
// const T (&shared)[shared_size], unsigned tid, bool sync = true
//) {
//
// static_assert(shared_size >= nt * vt,
// "reg_to_shared_thread must have at least nt * vt storage");
//
// syclArray<T, vt> x;
// sycl_thread_iterate<vt>([&](auto i, auto j) {
// x[i] = shared[j];
// }, tid);
//
// if(sync) __syncthreads();
//
// return x;
//}
//
//template<unsigned nt, unsigned vt, typename T, unsigned shared_size>
//void sycl_reg_to_shared_strided(
// syclArray<T, vt> x, unsigned tid, T (&shared)[shared_size], bool sync = true
//) {
//
// static_assert(shared_size >= nt * vt,
// "reg_to_shared_strided must have at least nt * vt storage");
//
// sycl_strided_iterate<nt, vt>(
// [&](auto i, auto j) { shared[j] = x[i]; }, tid
// );
//
// if(sync) __syncthreads();
//}
//
//template<unsigned nt, unsigned vt, typename T, unsigned shared_size>
//auto sycl_shared_to_reg_strided(
// const T (&shared)[shared_size], unsigned tid, bool sync = true
//) {
//
// static_assert(shared_size >= nt * vt,
// "shared_to_reg_strided must have at least nt * vt storage");
//
// syclArray<T, vt> x;
// sycl_strided_iterate<nt, vt>([&](auto i, auto j) { x[i] = shared[j]; }, tid);
// if(sync) __syncthreads();
//
// return x;
//}
//
//template<
// unsigned nt, unsigned vt, unsigned vt0 = vt, typename T, typename it_t,
// unsigned shared_size
//>
//auto sycl_reg_to_mem_thread(
// syclArray<T, vt> x, unsigned tid,
// unsigned count, it_t mem, T (&shared)[shared_size]
//) {
// sycl_reg_to_shared_thread<nt>(x, tid, shared);
// auto y = sycl_shared_to_reg_strided<nt, vt>(shared, tid);
// sycl_reg_to_mem_strided<nt, vt, vt0>(y, tid, count, mem);
//}
//
//template<
// unsigned nt, unsigned vt, unsigned vt0 = vt, typename T, typename it_t,
// unsigned shared_size
//>
//auto sycl_mem_to_reg_thread(
// it_t mem, unsigned tid, unsigned count, T (&shared)[shared_size]
//) {
//
// auto x = sycl_mem_to_reg_strided<nt, vt, vt0>(mem, tid, count);
// sycl_reg_to_shared_strided<nt, vt>(x, tid, shared);
// auto y = sycl_shared_to_reg_thread<nt, vt>(shared, tid);
// return y;
//}
//
//template<unsigned nt, unsigned vt, typename T, unsigned S>
//auto sycl_shared_gather(
// const T(&data)[S], syclArray<unsigned, vt> indices, bool sync = true
//) {
//
// static_assert(S >= nt * vt,
// "shared_gather must have at least nt * vt storage");
//
// syclArray<T, vt> x;
// sycl_iterate<vt>([&](auto i) { x[i] = data[indices[i]]; });
//
// if(sync) __syncthreads();
//
// return x;
//}
//
//
//
//// ----------------------------------------------------------------------------
//// reg<->reg
//// ----------------------------------------------------------------------------
//
//template<unsigned nt, unsigned vt, typename T, unsigned S>
//auto sycl_reg_thread_to_strided(
// syclArray<T, vt> x, unsigned tid, T (&shared)[S]
//) {
// sycl_reg_to_shared_thread<nt>(x, tid, shared);
// return sycl_shared_to_reg_strided<nt, vt>(shared, tid);
//}
//
//template<unsigned nt, unsigned vt, typename T, unsigned S>
//auto sycl_reg_strided_to_thread(
// syclArray<T, vt> x, unsigned tid, T (&shared)[S]
//) {
// sycl_reg_to_shared_strided<nt>(x, tid, shared);
// return sycl_shared_to_reg_thread<nt, vt>(shared, tid);
//}
// ----------------------------------------------------------------------------
// syclLoadStoreIterator
// ----------------------------------------------------------------------------
template<typename L, typename S, typename T, typename I>
struct syclLoadStoreIterator : std::iterator_traits<const T*> {
L load;
S store;
I base;
syclLoadStoreIterator(L load_, S store_, I base_) :
load(load_), store(store_), base(base_) { }
struct assign_t {
L load;
S store;
I index;
assign_t& operator=(T rhs) {
static_assert(!std::is_same<S, syclEmpty>::value,
"load_iterator is being stored to.");
store(rhs, index);
return *this;
}
operator T() const {
static_assert(!std::is_same<L, syclEmpty>::value,
"store_iterator is being loaded from.");
return load(index);
}
};
assign_t operator[](I index) const {
return assign_t { load, store, base + index };
}
assign_t operator*() const {
return assign_t { load, store, base };
}
syclLoadStoreIterator operator+(I offset) const {
syclLoadStoreIterator cp = *this;
cp += offset;
return cp;
}
syclLoadStoreIterator& operator+=(I offset) {
base += offset;
return *this;
}
syclLoadStoreIterator operator-(I offset) const {
syclLoadStoreIterator cp = *this;
cp -= offset;
return cp;
}
syclLoadStoreIterator& operator-=(I offset) {
base -= offset;
return *this;
}
};
//template<typename T>
//struct trivial_load_functor {
// template<typename I>
// T operator()(I index) const {
// return T();
// }
//};
//template<typename T>
//struct trivial_store_functor {
// template<typename I>
// void operator()(T v, I index) const { }
//};
template <typename T, typename I = int, typename L, typename S>
auto sycl_make_load_store_iterator(L load, S store, I base = 0) {
return syclLoadStoreIterator<L, S, T, I>(load, store, base);
}
template <typename T, typename I = int, typename L>
auto sycl_make_load_iterator(L load, I base = 0) {
return sycl_make_load_store_iterator<T>(load, syclEmpty(), base);
}
template <typename T, typename I = int, typename S>
auto sycl_make_store_iterator(S store, I base = 0) {
return sycl_make_load_store_iterator<T>(syclEmpty(), store, base);
}
// ----------------------------------------------------------------------------
// swap
// ----------------------------------------------------------------------------
template<typename T>
void sycl_swap(T& a, T& b) {
auto c = a;
a = b;
b = c;
}
// ----------------------------------------------------------------------------
// launch kernel
// ----------------------------------------------------------------------------
//template<typename F, typename... args_t>
//__global__ void sycl_kernel(F f, args_t... args) {
// f(threadIdx.x, blockIdx.x, args...);
//}
// ----------------------------------------------------------------------------
// operators
// ----------------------------------------------------------------------------
template <typename T>
struct sycl_plus : public std::binary_function<T, T, T> {
T operator()(T a, T b) const { return a + b; }
};
template <typename T>
struct sycl_minus : public std::binary_function<T, T, T> {
T operator()(T a, T b) const { return a - b; }
};
template <typename T>
struct sycl_multiplies : public std::binary_function<T, T, T> {
T operator()(T a, T b) const { return a * b; }
};
template <typename T>
struct sycl_maximum : public std::binary_function<T, T, T> {
T operator()(T a, T b) const { return a > b ? a : b; }
};
template <typename T>
struct sycl_minimum : public std::binary_function<T, T, T> {
T operator()(T a, T b) const { return a < b ? a : b; }
};
template <typename T>
struct sycl_less : public std::binary_function<T, T, T> {
T operator()(T a, T b) const { return a < b; }
};
template <typename T>
struct sycl_greater : public std::binary_function<T, T, T> {
T operator()(T a, T b) const { return a > b; }
};
// ----------------------------------------------------------------------------
// Memory Object
// ----------------------------------------------------------------------------
/**
@private
*/
template <typename T>
class syclScopedDeviceMemory {
public:
syclScopedDeviceMemory() = delete;
syclScopedDeviceMemory(size_t N, sycl::queue& queue) :
_queue {queue},
_N {N} {
if(N) {
_data = sycl::malloc_device<T>(N, _queue);
}
}
syclScopedDeviceMemory(syclScopedDeviceMemory&& rhs) :
_queue{std::move(rhs._queue)}, _data{rhs._data}, _N {rhs._N} {
rhs._data = nullptr;
rhs._N = 0;
}
~syclScopedDeviceMemory() {
if(_data) {
sycl::free(_data, _queue);
}
}
syclScopedDeviceMemory& operator = (syclScopedDeviceMemory&& rhs) {
if(_data) {
sycl::free(_data, _queue);
}
_queue = std::move(rhs._queue);
_data = rhs._data;
_N = rhs._N;
rhs._data = nullptr;
rhs._N = 0;
return *this;
}
size_t size() const { return _N; }
T* data() { return _data; }
const T* data() const { return _data; }
syclScopedDeviceMemory(const syclScopedDeviceMemory&) = delete;
syclScopedDeviceMemory& operator = (const syclScopedDeviceMemory&) = delete;
private:
sycl::queue& _queue;
T* _data {nullptr};
size_t _N {0};
};
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/sycl/syclflow.hpp | #pragma once
#include "../taskflow.hpp"
#include "sycl_task.hpp"
/**
@file syclflow.hpp
@brief main syclFlow include file
*/
namespace tf {
// ----------------------------------------------------------------------------
// class definition: syclFlow
// ----------------------------------------------------------------------------
/**
@class syclFlow
@brief class for building a SYCL task dependency graph
*/
class syclFlow {
friend class Executor;
struct External {
syclGraph graph;
};
struct Internal {
Executor& executor;
Internal(Executor& e) : executor {e} {}
};
using handle_t = std::variant<External, Internal>;
public:
/**
@brief constructs a standalone %syclFlow from the given queue
A standalone %syclFlow does not go through any taskflow and
can be run by the caller thread using explicit offload methods
(e.g., tf::syclFlow::offload).
*/
syclFlow(sycl::queue& queue);
/**
@brief destroys the %syclFlow
*/
~syclFlow() = default;
/**
@brief queries the emptiness of the graph
*/
bool empty() const;
/**
@brief queries the number of tasks
*/
size_t num_tasks() const;
/**
@brief dumps the %syclFlow graph into a DOT format through an
output stream
*/
void dump(std::ostream& os) const;
/**
@brief clear the associated graph
*/
void clear();
// ------------------------------------------------------------------------
// Generic device operations
// ------------------------------------------------------------------------
/**
@brief creates a task that launches the given command group function object
@tparam F type of command group function object
@param func function object that is constructible from
std::function<void(sycl::handler&)>
Creates a task that is associated from the given command group.
In SYCL, each command group function object is given a unique
command group handler object to perform all the necessary work
required to correctly process data on a device using a kernel.
*/
template <typename F, std::enable_if_t<
std::is_invocable_r_v<void, F, sycl::handler&>, void>* = nullptr
>
syclTask on(F&& func);
/**
@brief updates the task to the given command group function object
Similar to tf::syclFlow::on but operates on an existing task.
*/
template <typename F, std::enable_if_t<
std::is_invocable_r_v<void, F, sycl::handler&>, void>* = nullptr
>
void on(syclTask task, F&& func);
/**
@brief creates a memcpy task that copies untyped data in bytes
@param tgt pointer to the target memory block
@param src pointer to the source memory block
@param bytes bytes to copy
@return a tf::syclTask handle
A memcpy task transfers @c bytes of data from a source locationA @c src
to a target location @c tgt. Both @c src and @c tgt may be either host
or USM pointers.
*/
syclTask memcpy(void* tgt, const void* src, size_t bytes);
/**
@brief creates a memset task that fills untyped data with a byte value
@param ptr pointer to the destination device memory area
@param value value to set for each byte of specified memory
@param bytes number of bytes to set
@return a tf::syclTask handle
Fills @c bytes of memory beginning at address @c ptr with @c value.
@c ptr must be a USM allocation.
@c value is interpreted as an unsigned char.
*/
syclTask memset(void* ptr, int value, size_t bytes);
/**
@brief creates a fill task that fills typed data with the given value
@tparam T trivially copyable value type
@param ptr pointer to the memory to fill
@param pattern pattern value to fill into the memory
@param count number of items to fill the value
Creates a task that fills the specified memory with the
specified value.
*/
template <typename T>
syclTask fill(void* ptr, const T& pattern, size_t count);
/**
@brief creates a copy task that copies typed data from a source to a target
memory block
@tparam T trivially copyable value type
@param target pointer to the memory to fill
@param source pointer to the pattern value to fill into the memory
@param count number of items to fill the value
Creates a task that copies @c count items of type @c T from a source memory
location to a target memory location.
*/
template <typename T,
std::enable_if_t<!std::is_same_v<T, void>, void>* = nullptr
>
syclTask copy(T* target, const T* source, size_t count);
/**
@brief creates a kernel task
@tparam ArgsT arguments types
@param args arguments to forward to the parallel_for methods defined
in the handler object
Creates a kernel task from a parallel_for method through the handler
object associated with a command group.
*/
template <typename...ArgsT>
syclTask parallel_for(ArgsT&&... args);
// ------------------------------------------------------------------------
// algorithms
// ------------------------------------------------------------------------
/**
@brief invokes a SYCL kernel function using only one thread
@tparam F kernel function type
@param func kernel function
Creates a task that launches the given function object using only one
kernel thread.
*/
template <typename F>
syclTask single_task(F&& func);
/**
@brief applies a callable to each dereferenced element of the data array
@tparam I iterator type
@tparam C callable type
@param first iterator to the beginning (inclusive)
@param last iterator to the end (exclusive)
@param callable a callable object to apply to the dereferenced iterator
@return a tf::syclTask handle
This method is equivalent to the parallel execution of the following loop on a GPU:
@code{.cpp}
for(auto itr = first; itr != last; itr++) {
callable(*itr);
}
@endcode
*/
template <typename I, typename C>
syclTask for_each(I first, I last, C&& callable);
/**
@brief applies a callable to each index in the range with the step size
@tparam I index type
@tparam C callable type
@param first beginning index
@param last last index
@param step step size
@param callable the callable to apply to each element in the data array
@return a tf::syclTask handle
This method is equivalent to the parallel execution of the following loop on a GPU:
@code{.cpp}
// step is positive [first, last)
for(auto i=first; i<last; i+=step) {
callable(i);
}
// step is negative [first, last)
for(auto i=first; i>last; i+=step) {
callable(i);
}
@endcode
*/
template <typename I, typename C>
syclTask for_each_index(I first, I last, I step, C&& callable);
/**
@brief applies a callable to a source range and stores the result in a target range
@tparam I iterator type
@tparam C callable type
@tparam S source types
@param first iterator to the beginning (inclusive)
@param last iterator to the end (exclusive)
@param callable the callable to apply to each element in the range
@param srcs iterators to the source ranges
@return a tf::syclTask handle
This method is equivalent to the parallel execution of the following
loop on a SYCL device:
@code{.cpp}
while (first != last) {
*first++ = callable(*src1++, *src2++, *src3++, ...);
}
@endcode
*/
template <typename I, typename C, typename... S>
syclTask transform(I first, I last, C&& callable, S... srcs);
/**
@brief performs parallel reduction over a range of items
@tparam I input iterator type
@tparam T value type
@tparam C callable type
@param first iterator to the beginning (inclusive)
@param last iterator to the end (exclusive)
@param result pointer to the result with an initialized value
@param op binary reduction operator
@return a tf::syclTask handle
This method is equivalent to the parallel execution of the following loop
on a SYCL device:
@code{.cpp}
while (first != last) {
*result = op(*result, *first++);
}
@endcode
*/
template <typename I, typename T, typename C>
syclTask reduce(I first, I last, T* result, C&& op);
/**
@brief similar to tf::syclFlow::reduce but does not assume any initial
value to reduce
This method is equivalent to the parallel execution of the following loop
on a SYCL device:
@code{.cpp}
*result = *first++; // no initial values partitipcate in the loop
while (first != last) {
*result = op(*result, *first++);
}
@endcode
*/
template <typename I, typename T, typename C>
syclTask uninitialized_reduce(I first, I last, T* result, C&& op);
// ------------------------------------------------------------------------
// offload methods
// ------------------------------------------------------------------------
/**
@brief offloads the %syclFlow onto a GPU and repeatedly runs it until
the predicate becomes true
@tparam P predicate type (a binary callable)
@param predicate a binary predicate (returns @c true for stop)
Repetitively executes the present %syclFlow through the given queue object
until the predicate returns @c true.
By default, if users do not offload the %syclFlow,
the executor will offload it once.
*/
template <typename P>
void offload_until(P&& predicate);
/**
@brief offloads the %syclFlow and executes it by the given times
@param N number of executions
*/
void offload_n(size_t N);
/**
@brief offloads the %syclFlow and executes it once
*/
void offload();
// ------------------------------------------------------------------------
// update methods
// ------------------------------------------------------------------------
/**
@brief rebinds the task to a memcpy task
Similar to tf::syclFlow::memcpy but operates on an existing task.
*/
void memcpy(syclTask task, void* tgt, const void* src, size_t bytes);
/**
@brief rebinds the task to a memset task
Similar to tf::syclFlow::memset but operates on an existing task.
*/
void memset(syclTask task, void* ptr, int value, size_t bytes);
/**
@brief rebinds the task to a fill task
Similar to tf::syclFlow::fill but operates on an existing task.
*/
template <typename T>
void fill(syclTask task, void* ptr, const T& pattern, size_t count);
/**
@brief rebinds the task to a copy task
Similar to tf::syclFlow::copy but operates on an existing task.
*/
template <typename T,
std::enable_if_t<!std::is_same_v<T, void>, void>* = nullptr
>
void copy(syclTask task, T* target, const T* source, size_t count);
/**
@brief rebinds the task to a parallel-for kernel task
Similar to tf::syclFlow::parallel_for but operates on an existing task.
*/
template <typename...ArgsT>
void parallel_for(syclTask task, ArgsT&&... args);
/**
@brief rebinds the task to a single-threaded kernel task
Similar to tf::syclFlow::single_task but operates on an existing task.
*/
template <typename F>
void single_task(syclTask task, F&& func);
private:
syclFlow(Executor&, syclGraph&, sycl::queue&);
sycl::queue& _queue;
handle_t _handle;
syclGraph& _graph;
std::vector<syclNode*> _tpg;
std::queue<syclNode*> _bfs;
};
// constructor
inline syclFlow::syclFlow(sycl::queue& queue) :
_queue {queue},
_handle {std::in_place_type_t<External>{}},
_graph {std::get_if<External>(&_handle)->graph} {
}
// Construct the syclFlow from executor (internal graph)
inline syclFlow::syclFlow(Executor& e, syclGraph& g, sycl::queue& queue) :
_queue {queue},
_handle {std::in_place_type_t<Internal>{}, e},
_graph {g} {
}
// Function: empty
inline bool syclFlow::empty() const {
return _graph._nodes.empty();
}
// Function: num_tasks
inline size_t syclFlow::num_tasks() const {
return _graph._nodes.size();
}
// Procedure: dump
inline void syclFlow::dump(std::ostream& os) const {
_graph.dump(os, nullptr, "");
}
// Procedure: clear
inline void syclFlow::clear() {
_graph.clear();
}
// Function: memcpy
inline syclTask syclFlow::memcpy(void* tgt, const void* src, size_t bytes) {
return on([=](sycl::handler& h){ h.memcpy(tgt, src, bytes); });
}
// Function: memset
inline syclTask syclFlow::memset(void* ptr, int value, size_t bytes) {
return on([=](sycl::handler& h){ h.memset(ptr, value, bytes); });
}
// Function: fill
template <typename T>
syclTask syclFlow::fill(void* ptr, const T& pattern, size_t count) {
return on([=](sycl::handler& h){ h.fill(ptr, pattern, count); });
}
// Function: copy
template <typename T,
std::enable_if_t<!std::is_same_v<T, void>, void>*
>
syclTask syclFlow::copy(T* target, const T* source, size_t count) {
return on([=](sycl::handler& h){ h.memcpy(target, source, count*sizeof(T)); });
}
// Function: on
template <typename F, std::enable_if_t<
std::is_invocable_r_v<void, F, sycl::handler&>, void>*
>
syclTask syclFlow::on(F&& f) {
auto node = _graph.emplace_back(_graph,
std::in_place_type_t<syclNode::CGH>{}, std::forward<F>(f)
);
return syclTask(node);
}
// Function: single_task
template <typename F>
syclTask syclFlow::single_task(F&& func) {
return on([f=std::forward<F>(func)] (sycl::handler& h) {
h.single_task(f);
});
}
// Function: parallel_for
template <typename...ArgsT>
syclTask syclFlow::parallel_for(ArgsT&&... args) {
return on([args...] (sycl::handler& h) { h.parallel_for(args...); });
}
// Procedure: offload_until
template <typename P>
void syclFlow::offload_until(P&& predicate) {
if(!(_graph._state & syclGraph::TOPOLOGY_CHANGED)) {
goto offload;
}
// levelize the graph
_tpg.clear();
// insert the first level of nodes into the queue
for(auto& u : _graph._nodes) {
u->_level = u->_dependents.size();
if(u->_level == 0) {
_bfs.push(u.get());
}
}
while(!_bfs.empty()) {
auto u = _bfs.front();
_bfs.pop();
_tpg.push_back(u);
for(auto v : u->_successors) {
if(--(v->_level) == 0) {
v->_level = u->_level + 1;
_bfs.push(v);
}
}
}
offload:
// offload the syclFlow graph
bool in_order = _queue.is_in_order();
while(!predicate()) {
// traverse node in a topological order
for(auto u : _tpg) {
switch(u->_handle.index()) {
// task type 1: command group handler
case syclNode::COMMAND_GROUP_HANDLER:
u->_event = _queue.submit([u, in_order](sycl::handler& h){
// wait on all predecessors
if(!in_order) {
for(auto p : u->_dependents) {
h.depends_on(p->_event);
}
}
std::get_if<syclNode::CGH>(&u->_handle)->work(h);
});
break;
}
}
// synchronize the execution
_queue.wait();
}
_graph._state = syclGraph::OFFLOADED;
}
// Procedure: offload_n
inline void syclFlow::offload_n(size_t n) {
offload_until([repeat=n] () mutable { return repeat-- == 0; });
}
// Procedure: offload
inline void syclFlow::offload() {
offload_until([repeat=1] () mutable { return repeat-- == 0; });
}
// Function: on
template <typename F, std::enable_if_t<
std::is_invocable_r_v<void, F, sycl::handler&>, void>*
>
void syclFlow::on(syclTask task, F&& f) {
std::get_if<syclNode::CGH>(&task._node->_handle)->work =
std::forward<F>(f);
}
// Function: memcpy
inline void syclFlow::memcpy(
syclTask task, void* tgt, const void* src, size_t bytes
) {
on(task, [=](sycl::handler& h){ h.memcpy(tgt, src, bytes); });
}
// Function: memset
inline void syclFlow::memset(
syclTask task, void* ptr, int value, size_t bytes
) {
on(task, [=](sycl::handler& h){ h.memset(ptr, value, bytes); });
}
// Function: fill
template <typename T>
void syclFlow::fill(
syclTask task, void* ptr, const T& pattern, size_t count
) {
on(task, [=](sycl::handler& h){ h.fill(ptr, pattern, count); });
}
// Function: copy
template <typename T,
std::enable_if_t<!std::is_same_v<T, void>, void>*
>
void syclFlow::copy(
syclTask task, T* target, const T* source, size_t count
) {
on(task, [=](sycl::handler& h){
h.memcpy(target, source, count*sizeof(T));}
);
}
// Function: parallel_for
template <typename...ArgsT>
void syclFlow::parallel_for(syclTask task, ArgsT&&... args) {
on(task, [args...] (sycl::handler& h) { h.parallel_for(args...); });
}
// Function: single_task
template <typename F>
void syclFlow::single_task(syclTask task, F&& func) {
on(task, [f=std::forward<F>(func)] (sycl::handler& h) { h.single_task(f); });
}
// ############################################################################
// Forward declaration: FlowBuilder
// ############################################################################
// FlowBuilder::emplace_on
template <typename C, typename Q, std::enable_if_t<is_syclflow_task_v<C>, void>*>
Task FlowBuilder::emplace_on(C&& callable, Q&& q) {
auto n = _graph._emplace_back(
std::in_place_type_t<Node::syclFlow>{},
[c=std::forward<C>(callable), queue=std::forward<Q>(q)]
(Executor& e, Node* p) mutable {
e._invoke_syclflow_task_entry(p, c, queue);
},
std::make_unique<syclGraph>()
);
return Task(n);
}
// FlowBuilder::emplace
template <typename C, std::enable_if_t<is_syclflow_task_v<C>, void>*>
Task FlowBuilder::emplace(C&& callable) {
return emplace_on(std::forward<C>(callable), sycl::queue{});
}
// ############################################################################
// Forward declaration: Executor
// ############################################################################
// Procedure: _invoke_syclflow_task_entry (syclFlow)
template <typename C, typename Q,
std::enable_if_t<is_syclflow_task_v<C>, void>*
>
void Executor::_invoke_syclflow_task_entry(Node* node, C&& c, Q& queue) {
auto h = std::get_if<Node::syclFlow>(&node->_handle);
syclGraph* g = dynamic_cast<syclGraph*>(h->graph.get());
g->clear();
syclFlow sf(*this, *g, queue);
c(sf);
if(!(g->_state & syclGraph::OFFLOADED)) {
sf.offload();
}
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/sycl/algorithm/sycl_transform.hpp | #pragma once
#include "../sycl_flow.hpp"
namespace tf {
// Function: _transform_cgh
template <typename I, typename C, typename... S>
auto syclFlow::_transform_cgh(I first, I last, C&& op, S... srcs) {
// TODO: special case N == 0?
size_t N = std::distance(first, last);
size_t B = _default_group_size(N);
return [=, op=std::forward<C>(op)] (sycl::handler& handler) mutable {
size_t _N = (N % B == 0) ? N : (N + B - N % B);
handler.parallel_for(
sycl::nd_range<1>{sycl::range<1>(_N), sycl::range<1>(B)},
[=] (sycl::nd_item<1> item) {
size_t i = item.get_global_id(0);
if(i < N) {
*(first + i) = op(*(srcs + i)...);
}
}
);
};
}
// Function: transform
template <typename I, typename C, typename... S>
syclTask syclFlow::transform(I first, I last, C&& op, S... srcs) {
return on(_transform_cgh(first, last, std::forward<C>(op), srcs...));
}
// Procedure: transform
template <typename I, typename C, typename... S>
void syclFlow::transform(
syclTask task, I first, I last, C&& op, S... srcs
) {
on(task, _transform_cgh(first, last, std::forward<C>(op), srcs...));
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/sycl/algorithm/reduce.hpp | #pragma once
#include "../syclflow.hpp"
namespace tf::detail {
// ----------------------------------------------------------------------------
// reduction helper functions
// ----------------------------------------------------------------------------
/** @private */
template<unsigned nt, typename T>
struct syclBlockReduce {
static const unsigned group_size = std::min(nt, SYCL_WARP_SIZE);
static const unsigned shm_size = std::max(nt, 2* group_size);
static const unsigned num_passes = log2(group_size);
static const unsigned num_items = nt / group_size;
static_assert(
nt && (0 == nt % SYCL_WARP_SIZE),
"syclBlockReduce requires num threads to be a multiple of warp_size (32)"
);
using shm_t = sycl::accessor<
T, 1, sycl::access::mode::read_write, sycl::access::target::local
>;
template<typename op_t>
T operator()(
sycl::nd_item<1>&, T, const shm_t&, unsigned, op_t, bool = true
) const;
};
// function: reduce to be called from a block
template<unsigned nt, typename T>
template<typename op_t>
T syclBlockReduce<nt, T>::operator ()(
sycl::nd_item<1>& item,
T x,
const shm_t& shm,
unsigned count,
op_t op,
bool ret
) const {
auto tid = item.get_local_id(0);
// Store your data into shared memory.
shm[tid] = x;
item.barrier(sycl::access::fence_space::local_space);
if(tid < group_size) {
// Each thread scans within its lane.
sycl_strided_iterate<group_size, num_items>([&](auto i, auto j) {
if(i > 0) {
x = op(x, shm[j]);
}
}, tid, count);
shm[tid] = x;
}
item.barrier(sycl::access::fence_space::local_space);
auto count2 = count < group_size ? count : group_size;
auto first = (1 & num_passes) ? group_size : 0;
if(tid < group_size) {
shm[first + tid] = x;
}
item.barrier(sycl::access::fence_space::local_space);
sycl_iterate<num_passes>([&](auto pass) {
if(tid < group_size) {
if(auto offset = 1 << pass; tid + offset < count2) {
x = op(x, shm[first + offset + tid]);
}
first = group_size - first;
shm[first + tid] = x;
}
item.barrier(sycl::access::fence_space::local_space);
});
if(ret) {
x = shm[0];
item.barrier(sycl::access::fence_space::local_space);
}
return x;
}
/** @private */
template <typename P, typename I, typename T, typename O>
sycl::event sycl_reduce_loop(
P&& p,
I input,
unsigned count,
T* res,
O op,
bool incl,
void* ptr,
std::vector<sycl::event> evs
) {
using E = std::decay_t<P>;
using R = syclBlockReduce<E::nt, T>;
auto buf = static_cast<T*>(ptr);
auto B = (count + E::nv - 1) / E::nv;
auto e = p.queue().submit([=, evs=std::move(evs)](sycl::handler& h) {
h.depends_on(evs);
// create a shared memory
typename R::shm_t shm(sycl::range<1>(R::shm_size), h);
h.parallel_for(
sycl::nd_range<1>{sycl::range<1>(B*E::nt), sycl::range<1>(E::nt)},
[=](sycl::nd_item<1> item) {
auto tid = item.get_local_id(0);
auto bid = item.get_group(0);
// get the tile of this group
auto tile = sycl_get_tile(bid, E::nv, count);
// load data from input to register
auto x = sycl_mem_to_reg_strided<E::nt, E::vt>(
input + tile.begin, tid, tile.count()
);
// reduce multiple values per thread into a scalar.
T s;
sycl_strided_iterate<E::nt, E::vt>(
[&] (auto i, auto) { s = i ? op(s, x[i]) : x[0]; }, tid, tile.count()
);
// reduce to a scalar per block.
s = R()(
item, s, shm, (tile.count()<E::nt ? tile.count() : E::nt), op, false
);
if(!tid) {
(1 == B) ? *res = (incl ? op(*res, s) : s) : buf[bid] = s;
}
}
);
});
if(B > 1) {
return sycl_reduce_loop(p, buf, B, res, op, incl, buf+B, {e});
}
else {
return e;
}
}
} // end of namespace detail -------------------------------------------------
namespace tf {
/**
@brief queries the buffer size in bytes needed to call reduce kernels
@tparam P execution policy type
@tparam T value type
@param count number of elements to reduce
The function is used to allocate a buffer for calling asynchronous reduce.
Please refer to @ref SYCLSTDReduce for details.
*/
template <typename P, typename T>
unsigned sycl_reduce_buffer_size(unsigned count) {
using E = std::decay_t<P>;
unsigned B = (count + E::nv - 1) / E::nv;
unsigned n = 0;
for(auto b=B; b>1; n += (b=(b+E::nv-1)/E::nv));
return n*sizeof(T);
}
//// sycl reduction
//template <typename I, typename T, typename C, bool uninitialized>
//auto syclFlow::_reduce_cgh(I first, I last, T* res, C&& op) {
//
// // TODO: special case N == 0?
// size_t N = std::distance(first, last);
// size_t B = _default_group_size(N);
//
// return [=, op=std::forward<C>(op)](sycl::handler& handler) mutable {
//
// // create a shared memory
// sycl::accessor<
// T, 1, sycl::access::mode::read_write, sycl::access::target::local
// > shm(sycl::range<1>(B), handler);
//
// // perform parallel reduction
// handler.parallel_for(
// sycl::nd_range<1>{sycl::range<1>(B), sycl::range<1>(B)},
// [=] (sycl::nd_item<1> item) {
//
// size_t tid = item.get_global_id(0);
//
// if(tid >= N) {
// return;
// }
//
// shm[tid] = *(first+tid);
//
// for(size_t i=tid+B; i<N; i+=B) {
// shm[tid] = op(shm[tid], *(first+i));
// }
//
// item.barrier(sycl::access::fence_space::local_space);
//
// for(size_t s = B / 2; s > 0; s >>= 1) {
// if(tid < s && tid + s < N) {
// shm[tid] = op(shm[tid], shm[tid+s]);
// }
// item.barrier(sycl::access::fence_space::local_space);
// }
//
// if(tid == 0) {
// if constexpr (uninitialized) {
// *res = shm[0];
// }
// else {
// *res = op(*res, shm[0]);
// }
// }
// });
// };
//}
// ----------------------------------------------------------------------------
// SYCL standard reduce algorithms
// ----------------------------------------------------------------------------
/**
@brief performs parallel reduction over a range of items
@tparam P execution policy type
@tparam I input iterator type
@tparam T value type
@tparam O binary operator type
@param p execution policy
@param first iterator to the beginning of the range
@param last iterator to the end of the range
@param res pointer to the result
@param op binary operator to apply to reduce elements
This method is equivalent to the parallel execution of the following loop
on a SYCL device:
@code{.cpp}
while (first != last) {
*result = op(*result, *first++);
}
@endcode
*/
template<typename P, typename I, typename T, typename O>
void sycl_reduce(P&& p, I first, I last, T* res, O op) {
unsigned count = std::distance(first, last);
if(count == 0) {
return;
}
// allocate temporary buffer
auto tmp = sycl::malloc_device(
sycl_reduce_buffer_size<P, T>(count), p.queue()
);
// reduction loop
detail::sycl_reduce_loop(p, first, count, res, op, true, tmp, {}).wait();
// deallocate the temporary buffer
sycl::free(tmp, p.queue());
}
/**
@brief performs asynchronous parallel reduction over a range of items
@tparam P execution policy type
@tparam I input iterator type
@tparam T value type
@tparam O binary operator type
@param p execution policy
@param first iterator to the beginning of the range
@param last iterator to the end of the range
@param res pointer to the result
@param op binary operator to apply to reduce elements
@param buf pointer to the temporary buffer
@return an SYCL event
Please refer to @ref SYCLSTDReduce for details.
*/
template<typename P, typename I, typename T, typename O>
sycl::event sycl_reduce_async(
P&& p, I first, I last, T* res, O op, void* buf, std::vector<sycl::event> dep
) {
unsigned count = std::distance(first, last);
if(count == 0) {
return {};
}
// reduction loop
return detail::sycl_reduce_loop(
p, first, count, res, op, true, buf, std::move(dep)
);
}
/**
@brief performs parallel reduction over a range of items
without an initial value
@tparam P execution policy type
@tparam I input iterator type
@tparam T value type
@tparam O binary operator type
@param p execution policy
@param first iterator to the beginning of the range
@param last iterator to the end of the range
@param res pointer to the result
@param op binary operator to apply to reduce elements
This method is equivalent to the parallel execution of the following loop
on a SYCL device:
@code{.cpp}
*result = *first++; // no initial values partitipcate in the loop
while (first != last) {
*result = op(*result, *first++);
}
@endcode
*/
template<typename P, typename I, typename T, typename O>
void sycl_uninitialized_reduce(P&& p, I first, I last, T* res, O op) {
unsigned count = std::distance(first, last);
if(count == 0) {
return;
}
// allocate temporary buffer
auto tmp = sycl::malloc_device(
sycl_reduce_buffer_size<P, T>(count), p.queue()
);
// reduction loop
detail::sycl_reduce_loop(p, first, count, res, op, false, tmp, {}).wait();
// deallocate the temporary buffer
sycl::free(tmp, p.queue());
}
/**
@brief performs asynchronous parallel reduction over a range of items
without an initial value
@tparam P execution policy type
@tparam I input iterator type
@tparam T value type
@tparam O binary operator type
@param p execution policy
@param first iterator to the beginning of the range
@param last iterator to the end of the range
@param res pointer to the result
@param op binary operator to apply to reduce elements
@param buf pointer to the temporary buffer
@return an SYCL event
Please refer to @ref SYCLSTDReduce for details.
*/
template<typename P, typename I, typename T, typename O>
sycl::event sycl_uninitialized_reduce_async(
P&& p, I first, I last, T* res, O op, void* buf, std::vector<sycl::event> dep
) {
unsigned count = std::distance(first, last);
if(count == 0) {
return {};
}
// reduction loop
return detail::sycl_reduce_loop(
p, first, count, res, op, false, buf, std::move(dep)
);
}
// ----------------------------------------------------------------------------
// syclFlow reduce
// ----------------------------------------------------------------------------
// Function: reduce
template <typename I, typename T, typename C>
syclTask syclFlow::reduce(I first, I last, T* res, C&& op) {
//return on(_reduce_cgh<I, T, C, false>(first, last, res, std::forward<C>(op)));
auto bufsz = sycl_reduce_buffer_size<syclDefaultExecutionPolicy, T>(
std::distance(first, last)
);
return on([=, buf=MoC{syclScopedDeviceMemory<std::byte>(bufsz, _queue)}]
(sycl::queue& queue, std::vector<sycl::event> events) mutable {
syclDefaultExecutionPolicy p(queue);
return sycl_reduce_async(
p, first, last, res, op, buf.get().data(), std::move(events)
);
});
}
// Function: uninitialized_reduce
template <typename I, typename T, typename C>
syclTask syclFlow::uninitialized_reduce(I first, I last, T* res, C&& op) {
//return on(_reduce_cgh<I, T, C, true>(first, last, res, std::forward<C>(op)));
auto bufsz = sycl_reduce_buffer_size<syclDefaultExecutionPolicy, T>(
std::distance(first, last)
);
return on([=, buf=MoC{syclScopedDeviceMemory<std::byte>(bufsz, _queue)}]
(sycl::queue& queue, std::vector<sycl::event> events) mutable {
syclDefaultExecutionPolicy p(queue);
return sycl_uninitialized_reduce_async(
p, first, last, res, op, buf.get().data(), std::move(events)
);
});
}
// ----------------------------------------------------------------------------
// rebind methods
// ----------------------------------------------------------------------------
//// Function: reduce
//template <typename I, typename T, typename C>
//void syclFlow::reduce(syclTask task, I first, I last, T* res, C&& op) {
// //on(task, _reduce_cgh<I, T, C, false>(
// // first, last, res, std::forward<C>(op)
// //));
//
// auto bufsz = sycl_reduce_buffer_size<syclDefaultExecutionPolicy, T>(
// std::distance(first, last)
// );
//
// on(task, [=, buf=MoC{syclScopedDeviceMemory<std::byte>(bufsz, _queue)}]
// (sycl::queue& queue, std::vector<sycl::event> events) mutable {
// syclDefaultExecutionPolicy p(queue);
// return sycl_reduce_async(
// p, first, last, res, op, buf.get().data(), std::move(events)
// );
// });
//}
//
//// Function: uninitialized_reduce
//template <typename I, typename T, typename C>
//void syclFlow::uninitialized_reduce(
// syclTask task, I first, I last, T* res, C&& op
//) {
// //on(task, _reduce_cgh<I, T, C, true>(
// // first, last, res, std::forward<C>(op)
// //));
// auto bufsz = sycl_reduce_buffer_size<syclDefaultExecutionPolicy, T>(
// std::distance(first, last)
// );
//
// on(task, [=, buf=MoC{syclScopedDeviceMemory<std::byte>(bufsz, _queue)}]
// (sycl::queue& queue, std::vector<sycl::event> events) mutable {
// syclDefaultExecutionPolicy p(queue);
// return sycl_uninitialized_reduce_async(
// p, first, last, res, op, buf.get().data(), std::move(events)
// );
// });
//}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/sycl/algorithm/sycl_for_each.hpp | #pragma once
#include "../sycl_flow.hpp"
namespace tf {
// command group function object of for_each
template <typename I, typename C>
auto syclFlow::_for_each_cgh(I first, I last, C&& op) {
// TODO: special case N == 0?
size_t N = std::distance(first, last);
size_t B = _default_group_size(N);
return [=, op=std::forward<C>(op)] (sycl::handler& handler) mutable {
size_t _N = (N % B == 0) ? N : (N + B - N % B);
handler.parallel_for(
sycl::nd_range<1>{sycl::range<1>(_N), sycl::range<1>(B)},
[=] (sycl::nd_item<1> item) {
size_t i = item.get_global_id(0);
if(i < N) {
op(*(first + i));
}
}
);
};
}
// command group function object of for_each_index
template <typename I, typename C>
auto syclFlow::_for_each_index_cgh(I first, I last, I step, C&& op) {
if(is_range_invalid(first, last, step)) {
TF_THROW("invalid range [", first, ", ", last, ") with step size ", step);
}
// TODO: special case when N is 0?
size_t N = distance(first, last, step);
size_t B = _default_group_size(N);
return [=, op=std::forward<C>(op)] (sycl::handler& handler) mutable {
size_t _N = (N % B == 0) ? N : (N + B - N % B);
handler.parallel_for(
sycl::nd_range<1>{sycl::range<1>(_N), sycl::range<1>(B)},
[=] (sycl::nd_item<1> item) {
size_t i = item.get_global_id(0);
if(i < N) {
op(static_cast<I>(i)*step + first);
}
}
);
};
}
// ----------------------------------------------------------------------------
// for_each and for_each_index algorithms
// ----------------------------------------------------------------------------
// Function: for_each
template <typename I, typename C>
syclTask syclFlow::for_each(I first, I last, C&& op) {
return on(_for_each_cgh(first, last, std::forward<C>(op)));
}
// Function: for_each_index
template <typename I, typename C>
syclTask syclFlow::for_each_index(I beg, I end, I inc, C&& op) {
return on(_for_each_index_cgh(beg, end, inc, std::forward<C>(op)));
}
// ----------------------------------------------------------------------------
// rebind
// ----------------------------------------------------------------------------
// Function: for_each
template <typename I, typename C>
void syclFlow::for_each(syclTask task, I first, I last, C&& op) {
on(task, _for_each_cgh(first, last, std::forward<C>(op)));
}
// Function: for_each_index
template <typename I, typename C>
void syclFlow::for_each_index(syclTask task, I beg, I end, I inc, C&& op) {
on(task, _for_each_index_cgh(beg, end, inc, std::forward<C>(op)));
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/algorithm/sort.hpp | #pragma once
#include "../core/executor.hpp"
namespace tf {
// threshold whether or not to perform parallel sort
template <typename I>
constexpr size_t parallel_sort_cutoff() {
//using value_type = std::decay_t<decltype(*std::declval<I>())>;
using value_type = typename std::iterator_traits<I>::value_type;
constexpr size_t object_size = sizeof(value_type);
if constexpr(std::is_same_v<value_type, std::string>) {
return 65536 / sizeof(std::string);
}
else {
if constexpr(object_size < 16) return 4096;
else if constexpr(object_size < 32) return 2048;
else if constexpr(object_size < 64) return 1024;
else if constexpr(object_size < 128) return 768;
else if constexpr(object_size < 256) return 512;
else if constexpr(object_size < 512) return 256;
else return 128;
}
}
// ----------------------------------------------------------------------------
// pattern-defeating quick sort (pdqsort)
// ----------------------------------------------------------------------------
// Sorts [begin, end) using insertion sort with the given comparison function.
template<typename RandItr, typename Compare>
void insertion_sort(RandItr begin, RandItr end, Compare comp) {
using T = typename std::iterator_traits<RandItr>::value_type;
if (begin == end) {
return;
}
for (RandItr cur = begin + 1; cur != end; ++cur) {
RandItr shift = cur;
RandItr shift_1 = cur - 1;
// Compare first to avoid 2 moves for an element
// already positioned correctly.
if (comp(*shift, *shift_1)) {
T tmp = std::move(*shift);
do {
*shift-- = std::move(*shift_1);
}while (shift != begin && comp(tmp, *--shift_1));
*shift = std::move(tmp);
}
}
}
// Sorts [begin, end) using insertion sort with the given comparison function.
// Assumes *(begin - 1) is an element smaller than or equal to any element
// in [begin, end).
template<typename RandItr, typename Compare>
void unguarded_insertion_sort(RandItr begin, RandItr end, Compare comp) {
using T = typename std::iterator_traits<RandItr>::value_type;
if (begin == end) {
return;
}
for (RandItr cur = begin + 1; cur != end; ++cur) {
RandItr shift = cur;
RandItr shift_1 = cur - 1;
// Compare first so we can avoid 2 moves
// for an element already positioned correctly.
if (comp(*shift, *shift_1)) {
T tmp = std::move(*shift);
do {
*shift-- = std::move(*shift_1);
}while (comp(tmp, *--shift_1));
*shift = std::move(tmp);
}
}
}
// Attempts to use insertion sort on [begin, end).
// Will return false if more than
// partial_insertion_sort_limit elements were moved,
// and abort sorting. Otherwise it will successfully sort and return true.
template<typename RandItr, typename Compare>
bool partial_insertion_sort(RandItr begin, RandItr end, Compare comp) {
using T = typename std::iterator_traits<RandItr>::value_type;
using D = typename std::iterator_traits<RandItr>::difference_type;
// When we detect an already sorted partition, attempt an insertion sort
// that allows this amount of element moves before giving up.
constexpr auto partial_insertion_sort_limit = D{8};
if (begin == end) return true;
auto limit = D{0};
for (RandItr cur = begin + 1; cur != end; ++cur) {
if (limit > partial_insertion_sort_limit) {
return false;
}
RandItr shift = cur;
RandItr shift_1 = cur - 1;
// Compare first so we can avoid 2 moves
// for an element already positioned correctly.
if (comp(*shift, *shift_1)) {
T tmp = std::move(*shift);
do {
*shift-- = std::move(*shift_1);
}while (shift != begin && comp(tmp, *--shift_1));
*shift = std::move(tmp);
limit += cur - shift;
}
}
return true;
}
// Partitions [begin, end) around pivot *begin using comparison function comp.
// Elements equal to the pivot are put in the right-hand partition.
// Returns the position of the pivot after partitioning and whether the passed
// sequence already was correctly partitioned.
// Assumes the pivot is a median of at least 3 elements and that [begin, end)
// is at least insertion_sort_threshold long.
template<typename Iter, typename Compare>
std::pair<Iter, bool> partition_right(Iter begin, Iter end, Compare comp) {
using T = typename std::iterator_traits<Iter>::value_type;
// Move pivot into local for speed.
T pivot(std::move(*begin));
Iter first = begin;
Iter last = end;
// Find the first element greater than or equal than the pivot
// (the median of 3 guarantees/ this exists).
while (comp(*++first, pivot));
// Find the first element strictly smaller than the pivot.
// We have to guard this search if there was no element before *first.
if (first - 1 == begin) while (first < last && !comp(*--last, pivot));
else while (!comp(*--last, pivot));
// If the first pair of elements that should be swapped to partition
// are the same element, the passed in sequence already was correctly
// partitioned.
bool already_partitioned = first >= last;
// Keep swapping pairs of elements that are on the wrong side of the pivot.
// Previously swapped pairs guard the searches,
// which is why the first iteration is special-cased above.
while (first < last) {
std::iter_swap(first, last);
while (comp(*++first, pivot));
while (!comp(*--last, pivot));
}
// Put the pivot in the right place.
Iter pivot_pos = first - 1;
*begin = std::move(*pivot_pos);
*pivot_pos = std::move(pivot);
return std::make_pair(pivot_pos, already_partitioned);
}
// Similar function to the one above, except elements equal to the pivot
// are put to the left of the pivot and it doesn't check or return
// if the passed sequence already was partitioned.
// Since this is rarely used (the many equal case),
// and in that case pdqsort already has O(n) performance,
// no block quicksort is applied here for simplicity.
template<typename RandItr, typename Compare>
RandItr partition_left(RandItr begin, RandItr end, Compare comp) {
using T = typename std::iterator_traits<RandItr>::value_type;
T pivot(std::move(*begin));
RandItr first = begin;
RandItr last = end;
while (comp(pivot, *--last));
if (last + 1 == end) {
while (first < last && !comp(pivot, *++first));
}
else {
while (!comp(pivot, *++first));
}
while (first < last) {
std::iter_swap(first, last);
while (comp(pivot, *--last));
while (!comp(pivot, *++first));
}
RandItr pivot_pos = last;
*begin = std::move(*pivot_pos);
*pivot_pos = std::move(pivot);
return pivot_pos;
}
template<typename Iter, typename Compare>
void parallel_pdqsort(
tf::Subflow& sf,
Iter begin, Iter end, Compare comp,
int bad_allowed, bool leftmost = true
) {
// Partitions below this size are sorted sequentially
constexpr auto cutoff = parallel_sort_cutoff<Iter>();
// Partitions below this size are sorted using insertion sort
constexpr auto insertion_sort_threshold = 24;
// Partitions above this size use Tukey's ninther to select the pivot.
constexpr auto ninther_threshold = 128;
//using diff_t = typename std::iterator_traits<Iter>::difference_type;
// Use a while loop for tail recursion elimination.
while (true) {
//diff_t size = end - begin;
size_t size = end - begin;
// Insertion sort is faster for small arrays.
if (size < insertion_sort_threshold) {
if (leftmost) {
insertion_sort(begin, end, comp);
}
else {
unguarded_insertion_sort(begin, end, comp);
}
return;
}
if(size <= cutoff) {
std::sort(begin, end, comp);
return;
}
// Choose pivot as median of 3 or pseudomedian of 9.
//diff_t s2 = size / 2;
size_t s2 = size >> 1;
if (size > ninther_threshold) {
sort3(begin, begin + s2, end - 1, comp);
sort3(begin + 1, begin + (s2 - 1), end - 2, comp);
sort3(begin + 2, begin + (s2 + 1), end - 3, comp);
sort3(begin + (s2 - 1), begin + s2, begin + (s2 + 1), comp);
std::iter_swap(begin, begin + s2);
}
else {
sort3(begin + s2, begin, end - 1, comp);
}
// If *(begin - 1) is the end of the right partition
// of a previous partition operation, there is no element in [begin, end)
// that is smaller than *(begin - 1).
// Then if our pivot compares equal to *(begin - 1) we change strategy,
// putting equal elements in the left partition,
// greater elements in the right partition.
// We do not have to recurse on the left partition,
// since it's sorted (all equal).
if (!leftmost && !comp(*(begin - 1), *begin)) {
begin = partition_left(begin, end, comp) + 1;
continue;
}
// Partition and get results.
const auto pair = partition_right(begin, end, comp);
const auto pivot_pos = pair.first;
const auto already_partitioned = pair.second;
// Check for a highly unbalanced partition.
//diff_t l_size = pivot_pos - begin;
//diff_t r_size = end - (pivot_pos + 1);
const size_t l_size = pivot_pos - begin;
const size_t r_size = end - (pivot_pos + 1);
const bool highly_unbalanced = l_size < size / 8 || r_size < size / 8;
// If we got a highly unbalanced partition we shuffle elements
// to break many patterns.
if (highly_unbalanced) {
// If we had too many bad partitions, switch to heapsort
// to guarantee O(n log n).
if (--bad_allowed == 0) {
std::make_heap(begin, end, comp);
std::sort_heap(begin, end, comp);
return;
}
if (l_size >= insertion_sort_threshold) {
std::iter_swap(begin, begin + l_size / 4);
std::iter_swap(pivot_pos - 1, pivot_pos - l_size / 4);
if (l_size > ninther_threshold) {
std::iter_swap(begin + 1, begin + (l_size / 4 + 1));
std::iter_swap(begin + 2, begin + (l_size / 4 + 2));
std::iter_swap(pivot_pos - 2, pivot_pos - (l_size / 4 + 1));
std::iter_swap(pivot_pos - 3, pivot_pos - (l_size / 4 + 2));
}
}
if (r_size >= insertion_sort_threshold) {
std::iter_swap(pivot_pos + 1, pivot_pos + (1 + r_size / 4));
std::iter_swap(end - 1, end - r_size / 4);
if (r_size > ninther_threshold) {
std::iter_swap(pivot_pos + 2, pivot_pos + (2 + r_size / 4));
std::iter_swap(pivot_pos + 3, pivot_pos + (3 + r_size / 4));
std::iter_swap(end - 2, end - (1 + r_size / 4));
std::iter_swap(end - 3, end - (2 + r_size / 4));
}
}
}
// decently balanced
else {
// sequence try to use insertion sort.
if (already_partitioned &&
partial_insertion_sort(begin, pivot_pos, comp) &&
partial_insertion_sort(pivot_pos + 1, end, comp)
) {
return;
}
}
// Sort the left partition first using recursion and
// do tail recursion elimination for the right-hand partition.
sf.silent_async(
[&sf, begin, pivot_pos, comp, bad_allowed, leftmost] () mutable {
parallel_pdqsort(sf, begin, pivot_pos, comp, bad_allowed, leftmost);
}
);
begin = pivot_pos + 1;
leftmost = false;
}
}
// ----------------------------------------------------------------------------
// 3-way quick sort
// ----------------------------------------------------------------------------
// 3-way quick sort
template <typename RandItr, typename C>
void parallel_3wqsort(tf::Subflow& sf, RandItr first, RandItr last, C compare) {
using namespace std::string_literals;
constexpr auto cutoff = parallel_sort_cutoff<RandItr>();
sort_partition:
if(static_cast<size_t>(last - first) < cutoff) {
std::sort(first, last+1, compare);
return;
}
auto m = pseudo_median_of_nine(first, last, compare);
if(m != first) {
std::iter_swap(first, m);
}
auto l = first;
auto r = last;
auto f = std::next(first, 1);
bool is_swapped_l = false;
bool is_swapped_r = false;
while(f <= r) {
if(compare(*f, *l)) {
is_swapped_l = true;
std::iter_swap(l, f);
l++;
f++;
}
else if(compare(*l, *f)) {
is_swapped_r = true;
std::iter_swap(r, f);
r--;
}
else {
f++;
}
}
if(l - first > 1 && is_swapped_l) {
//sf.emplace([&](tf::Subflow& sfl) mutable {
// parallel_3wqsort(sfl, first, l-1, compare);
//});
sf.silent_async([&sf, first, l, &compare] () mutable {
parallel_3wqsort(sf, first, l-1, compare);
});
}
if(last - r > 1 && is_swapped_r) {
//sf.emplace([&](tf::Subflow& sfr) mutable {
// parallel_3wqsort(sfr, r+1, last, compare);
//});
//sf.silent_async([&sf, r, last, &compare] () mutable {
// parallel_3wqsort(sf, r+1, last, compare);
//});
first = r+1;
goto sort_partition;
}
//sf.join();
}
// ----------------------------------------------------------------------------
// tf::Taskflow::sort
// ----------------------------------------------------------------------------
// Function: sort
template <typename B, typename E, typename C>
Task FlowBuilder::sort(B beg, E end, C cmp) {
using B_t = std::decay_t<unwrap_ref_decay_t<B>>;
using E_t = std::decay_t<unwrap_ref_decay_t<E>>;
Task task = emplace([b=beg, e=end, cmp] (Subflow& sf) mutable {
// fetch the iterator values
B_t beg = b;
E_t end = e;
if(beg == end) {
return;
}
size_t W = sf._executor.num_workers();
size_t N = std::distance(beg, end);
// only myself - no need to spawn another graph
if(W <= 1 || N <= parallel_sort_cutoff<B_t>()) {
std::sort(beg, end, cmp);
return;
}
//parallel_3wqsort(sf, beg, end-1, cmp);
parallel_pdqsort(sf, beg, end, cmp, log2(end - beg));
sf.join();
});
return task;
}
// Function: sort
template <typename B, typename E>
Task FlowBuilder::sort(B beg, E end) {
using value_type = std::decay_t<decltype(*std::declval<B>())>;
return sort(beg, end, std::less<value_type>{});
}
} // namespace tf ------------------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/algorithm/for_each.hpp | // reference:
// - gomp: https://github.com/gcc-mirror/gcc/blob/master/libgomp/iter.c
// - komp: https://github.com/llvm-mirror/openmp/blob/master/runtime/src/kmp_dispatch.cpp
#pragma once
#include "../core/executor.hpp"
namespace tf {
// ----------------------------------------------------------------------------
// default parallel for
// ----------------------------------------------------------------------------
// Function: for_each
template <typename B, typename E, typename C>
Task FlowBuilder::for_each(B beg, E end, C c) {
using B_t = std::decay_t<unwrap_ref_decay_t<B>>;
using E_t = std::decay_t<unwrap_ref_decay_t<E>>;
using namespace std::string_literals;
Task task = emplace([b=beg, e=end, c] (Subflow& sf) mutable {
// fetch the stateful values
B_t beg = b;
E_t end = e;
if(beg == end) {
return;
}
size_t chunk_size = 1;
size_t W = sf._executor.num_workers();
size_t N = std::distance(beg, end);
// only myself - no need to spawn another graph
if(W <= 1 || N <= chunk_size) {
std::for_each(beg, end, c);
return;
}
if(N < W) {
W = N;
}
std::atomic<size_t> next(0);
auto loop = [=, &next] () mutable {
size_t z = 0;
size_t p1 = 2 * W * (chunk_size + 1);
double p2 = 0.5 / static_cast<double>(W);
size_t s0 = next.load(std::memory_order_relaxed);
while(s0 < N) {
size_t r = N - s0;
// fine-grained
if(r < p1) {
while(1) {
s0 = next.fetch_add(chunk_size, std::memory_order_relaxed);
if(s0 >= N) {
return;
}
size_t e0 = (chunk_size <= (N - s0)) ? s0 + chunk_size : N;
std::advance(beg, s0-z);
for(size_t x=s0; x<e0; x++) {
c(*beg++);
}
z = e0;
}
break;
}
// coarse-grained
else {
size_t q = static_cast<size_t>(p2 * r);
if(q < chunk_size) {
q = chunk_size;
}
size_t e0 = (q <= r) ? s0 + q : N;
if(next.compare_exchange_strong(s0, e0, std::memory_order_relaxed,
std::memory_order_relaxed)) {
std::advance(beg, s0-z);
for(size_t x = s0; x< e0; x++) {
c(*beg++);
}
z = e0;
s0 = next.load(std::memory_order_relaxed);
}
}
}
};
for(size_t w=0; w<W; w++) {
auto r = N - next.load(std::memory_order_relaxed);
// no more loop work to do - finished by previous async tasks
if(!r) {
break;
}
// tail optimization
if(r <= chunk_size || w == W-1) {
loop();
break;
}
else {
sf._named_silent_async(sf._worker, "loop-"s + std::to_string(w), loop);
}
}
sf.join();
});
return task;
}
// Function: for_each_index
template <typename B, typename E, typename S, typename C>
Task FlowBuilder::for_each_index(B beg, E end, S inc, C c){
using namespace std::string_literals;
using B_t = std::decay_t<unwrap_ref_decay_t<B>>;
using E_t = std::decay_t<unwrap_ref_decay_t<E>>;
using S_t = std::decay_t<unwrap_ref_decay_t<S>>;
Task task = emplace([b=beg, e=end, a=inc, c] (Subflow& sf) mutable {
// fetch the iterator values
B_t beg = b;
E_t end = e;
S_t inc = a;
if(is_range_invalid(beg, end, inc)) {
TF_THROW("invalid range [", beg, ", ", end, ") with step size ", inc);
}
size_t chunk_size = 1;
size_t W = sf._executor.num_workers();
size_t N = distance(beg, end, inc);
// only myself - no need to spawn another graph
if(W <= 1 || N <= chunk_size) {
for(size_t x=0; x<N; x++, beg+=inc) {
c(beg);
}
return;
}
if(N < W) {
W = N;
}
std::atomic<size_t> next(0);
auto loop = [=, &next] () mutable {
size_t p1 = 2 * W * (chunk_size + 1);
double p2 = 0.5 / static_cast<double>(W);
size_t s0 = next.load(std::memory_order_relaxed);
while(s0 < N) {
size_t r = N - s0;
// fine-grained
if(r < p1) {
while(1) {
s0 = next.fetch_add(chunk_size, std::memory_order_relaxed);
if(s0 >= N) {
return;
}
size_t e0 = (chunk_size <= (N - s0)) ? s0 + chunk_size : N;
auto s = static_cast<B_t>(s0) * inc + beg;
for(size_t x=s0; x<e0; x++, s+=inc) {
c(s);
}
}
break;
}
// coarse-grained
else {
size_t q = static_cast<size_t>(p2 * r);
if(q < chunk_size) {
q = chunk_size;
}
size_t e0 = (q <= r) ? s0 + q : N;
if(next.compare_exchange_strong(s0, e0, std::memory_order_relaxed,
std::memory_order_relaxed)) {
auto s = static_cast<B_t>(s0) * inc + beg;
for(size_t x=s0; x<e0; x++, s+= inc) {
c(s);
}
s0 = next.load(std::memory_order_relaxed);
}
}
}
};
for(size_t w=0; w<W; w++) {
auto r = N - next.load(std::memory_order_relaxed);
// no more loop work to do - finished by previous async tasks
if(!r) {
break;
}
// tail optimization
if(r <= chunk_size || w == W-1) {
loop();
break;
}
else {
sf._named_silent_async(sf._worker, "loop-"s + std::to_string(w), loop);
}
}
sf.join();
});
return task;
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/algorithm/reduce.hpp | #pragma once
#include "../core/executor.hpp"
namespace tf {
// ----------------------------------------------------------------------------
// default reduction
// ----------------------------------------------------------------------------
template <typename B, typename E, typename T, typename O>
Task FlowBuilder::reduce(B beg, E end, T& init, O bop) {
using B_t = std::decay_t<unwrap_ref_decay_t<B>>;
using E_t = std::decay_t<unwrap_ref_decay_t<E>>;
using namespace std::string_literals;
Task task = emplace([b=beg, e=end, &r=init, bop] (Subflow& sf) mutable {
// fetch the iterator values
B_t beg = b;
E_t end = e;
if(beg == end) {
return;
}
//size_t C = (c == 0) ? 1 : c;
size_t chunk_size = 1;
size_t W = sf._executor.num_workers();
size_t N = std::distance(beg, end);
// only myself - no need to spawn another graph
if(W <= 1 || N <= chunk_size) {
for(; beg!=end; r = bop(r, *beg++));
return;
}
if(N < W) {
W = N;
}
std::mutex mutex;
std::atomic<size_t> next(0);
auto loop = [=, &mutex, &next, &r] () mutable {
size_t s0 = next.fetch_add(2, std::memory_order_relaxed);
if(s0 >= N) {
return;
}
std::advance(beg, s0);
if(N - s0 == 1) {
std::lock_guard<std::mutex> lock(mutex);
r = bop(r, *beg);
return;
}
auto beg1 = beg++;
auto beg2 = beg++;
T sum = bop(*beg1, *beg2);
size_t z = s0 + 2;
size_t p1 = 2 * W * (chunk_size + 1);
double p2 = 0.5 / static_cast<double>(W);
s0 = next.load(std::memory_order_relaxed);
while(s0 < N) {
size_t r = N - s0;
// fine-grained
if(r < p1) {
while(1) {
s0 = next.fetch_add(chunk_size, std::memory_order_relaxed);
if(s0 >= N) {
break;
}
size_t e0 = (chunk_size <= (N - s0)) ? s0 + chunk_size : N;
std::advance(beg, s0-z);
for(size_t x=s0; x<e0; x++, beg++) {
sum = bop(sum, *beg);
}
z = e0;
}
break;
}
// coarse-grained
else {
size_t q = static_cast<size_t>(p2 * r);
if(q < chunk_size) {
q = chunk_size;
}
size_t e0 = (q <= r) ? s0 + q : N;
if(next.compare_exchange_strong(s0, e0, std::memory_order_relaxed,
std::memory_order_relaxed)) {
std::advance(beg, s0-z);
for(size_t x = s0; x<e0; x++, beg++) {
sum = bop(sum, *beg);
}
z = e0;
s0 = next.load(std::memory_order_relaxed);
}
}
}
std::lock_guard<std::mutex> lock(mutex);
r = bop(r, sum);
};
for(size_t w=0; w<W; w++) {
//if(w*2 >= N) {
// break;
//}
//sf._named_silent_async(
// sf._worker, "part-"s + std::to_string(w), loop
//);
auto r = N - next.load(std::memory_order_relaxed);
// no more loop work to do - finished by previous async tasks
if(!r) {
break;
}
// tail optimization
if(r <= chunk_size || w == W-1) {
loop();
break;
}
else {
sf._named_silent_async(sf._worker, "loop-"s + std::to_string(w), loop);
}
}
sf.join();
});
return task;
}
// ----------------------------------------------------------------------------
// default transform and reduction
// ----------------------------------------------------------------------------
template <typename B, typename E, typename T, typename BOP, typename UOP>
Task FlowBuilder::transform_reduce(
B beg, E end, T& init, BOP bop, UOP uop
) {
using B_t = std::decay_t<unwrap_ref_decay_t<B>>;
using E_t = std::decay_t<unwrap_ref_decay_t<E>>;
using namespace std::string_literals;
Task task = emplace([b=beg, e=end, &r=init, bop, uop] (Subflow& sf) mutable {
// fetch the iterator values
B_t beg = b;
E_t end = e;
if(beg == end) {
return;
}
//size_t chunk_size = (c == 0) ? 1 : c;
size_t chunk_size = 1;
size_t W = sf._executor.num_workers();
size_t N = std::distance(beg, end);
// only myself - no need to spawn another graph
if(W <= 1 || N <= chunk_size) {
for(; beg!=end; r = bop(std::move(r), uop(*beg++)));
return;
}
if(N < W) {
W = N;
}
std::mutex mutex;
std::atomic<size_t> next(0);
auto loop = [=, &mutex, &next, &r] () mutable {
size_t s0 = next.fetch_add(2, std::memory_order_relaxed);
if(s0 >= N) {
return;
}
std::advance(beg, s0);
if(N - s0 == 1) {
std::lock_guard<std::mutex> lock(mutex);
r = bop(std::move(r), uop(*beg));
return;
}
auto beg1 = beg++;
auto beg2 = beg++;
T sum = bop(uop(*beg1), uop(*beg2));
size_t z = s0 + 2;
size_t p1 = 2 * W * (chunk_size + 1);
double p2 = 0.5 / static_cast<double>(W);
s0 = next.load(std::memory_order_relaxed);
while(s0 < N) {
size_t r = N - s0;
// fine-grained
if(r < p1) {
while(1) {
s0 = next.fetch_add(chunk_size, std::memory_order_relaxed);
if(s0 >= N) {
break;
}
size_t e0 = (chunk_size <= (N - s0)) ? s0 + chunk_size : N;
std::advance(beg, s0-z);
for(size_t x=s0; x<e0; x++, beg++) {
sum = bop(std::move(sum), uop(*beg));
}
z = e0;
}
break;
}
// coarse-grained
else {
size_t q = static_cast<size_t>(p2 * r);
if(q < chunk_size) {
q = chunk_size;
}
size_t e0 = (q <= r) ? s0 + q : N;
if(next.compare_exchange_strong(s0, e0, std::memory_order_relaxed,
std::memory_order_relaxed)) {
std::advance(beg, s0-z);
for(size_t x = s0; x<e0; x++, beg++) {
sum = bop(std::move(sum), uop(*beg));
}
z = e0;
s0 = next.load(std::memory_order_relaxed);
}
}
}
std::lock_guard<std::mutex> lock(mutex);
r = bop(std::move(r), std::move(sum));
};
for(size_t w=0; w<W; w++) {
//if(w*2 >= N) {
// break;
//}
//sf._named_silent_async(sf._worker, "loop-"s + std::to_string(w), loop);
auto r = N - next.load(std::memory_order_relaxed);
// no more loop work to do - finished by previous async tasks
if(!r) {
break;
}
// tail optimization
if(r <= chunk_size || w == W-1) {
loop();
break;
}
else {
sf._named_silent_async(sf._worker, "loop-"s + std::to_string(w), loop);
}
}
sf.join();
});
return task;
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/algorithm/critical.hpp | #pragma once
#include "../core/task.hpp"
/**
@file critical.hpp
@brief critical include file
*/
namespace tf {
// ----------------------------------------------------------------------------
// CriticalSection
// ----------------------------------------------------------------------------
/**
@class CriticalSection
@brief class to create a critical region of limited workers to run tasks
tf::CriticalSection is a warpper over tf::Semaphore and is specialized for
limiting the maximum concurrency over a set of tasks.
A critical section starts with an initial count representing that limit.
When a task is added to the critical section,
the task acquires and releases the semaphore internal to the critical section.
This design avoids explicit call of tf::Task::acquire and tf::Task::release.
The following example creates a critical section of one worker and adds
the five tasks to the critical section.
@code{.cpp}
tf::Executor executor(8); // create an executor of 8 workers
tf::Taskflow taskflow;
// create a critical section of 1 worker
tf::CriticalSection critical_section(1);
tf::Task A = taskflow.emplace([](){ std::cout << "A" << std::endl; });
tf::Task B = taskflow.emplace([](){ std::cout << "B" << std::endl; });
tf::Task C = taskflow.emplace([](){ std::cout << "C" << std::endl; });
tf::Task D = taskflow.emplace([](){ std::cout << "D" << std::endl; });
tf::Task E = taskflow.emplace([](){ std::cout << "E" << std::endl; });
critical_section.add(A, B, C, D, E);
executor.run(taskflow).wait();
@endcode
*/
class CriticalSection : public Semaphore {
public:
/**
@brief constructs a critical region of a limited number of workers
*/
explicit CriticalSection(size_t max_workers = 1);
/**
@brief adds a task into the critical region
*/
template <typename... Tasks>
void add(Tasks...tasks);
};
inline CriticalSection::CriticalSection(size_t max_workers) :
Semaphore {max_workers} {
}
template <typename... Tasks>
void CriticalSection::add(Tasks... tasks) {
(tasks.acquire(*this), ...);
(tasks.release(*this), ...);
}
} // end of namespace tf. ---------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/algorithm/data_pipeline.hpp | #pragma once
#include "pipeline.hpp"
namespace tf {
// ----------------------------------------------------------------------------
// Class Definition: DataPipe
// ----------------------------------------------------------------------------
/**
@class DataPipe
@brief class to create a stage in a data-parallel pipeline
A data pipe represents a stage of a data-parallel pipeline.
A data pipe can be either @em parallel direction or @em serial direction
(specified by tf::PipeType) and is associated with a callable to invoke
by the pipeline scheduler.
You need to use the template function, tf::make_data_pipe, to create
a data pipe. The input and output types of a tf::DataPipe should be decayed types
(though the library will always decay them for you using `std::decay`)
to allow internal storage to work.
The data will be passed by reference to your callable, at which you can take
it by copy or reference.
@code{.cpp}
tf::make_data_pipe<int, std::string>(
tf::PipeType::SERIAL,
[](int& input) {return std::to_string(input + 100);}
);
@endcode
In addition to the data, you callable can take an additional reference
of tf::Pipeflow in the second argument to probe the runtime information
for a stage task, such as its line number and token number:
@code{.cpp}
tf::make_data_pipe<int, std::string>(
tf::PipeType::SERIAL,
[](int& input, tf::Pipeflow& pf) {
printf("token=%lu, line=%lu\n", pf.token(), pf.line());
return std::to_string(input + 100);
}
);
@endcode
*/
template <typename Input, typename Output, typename C>
class DataPipe {
template <typename... Ps>
friend class DataPipeline;
public:
/**
@brief callable type of the data pipe
*/
using callable_t = C;
/**
@brief input type of the data pipe
*/
using input_t = Input;
/**
@brief output type of the data pipe
*/
using output_t = Output;
/**
@brief default constructor
*/
DataPipe() = default;
/**
@brief constructs a data pipe
You should use the helper function, tf::make_data_pipe,
to create a DataPipe object, especially when you need tf::DataPipe
to automatically deduct the lambda type.
*/
DataPipe(PipeType d, callable_t&& callable) :
_type{d}, _callable{std::forward<callable_t>(callable)} {
}
/**
@brief queries the type of the data pipe
A data pipe can be either parallel (tf::PipeType::PARALLEL) or serial
(tf::PipeType::SERIAL).
*/
PipeType type() const {
return _type;
}
/**
@brief assigns a new type to the data pipe
*/
void type(PipeType type) {
_type = type;
}
/**
@brief assigns a new callable to the data pipe
@tparam U callable type
@param callable a callable object constructible from the callable type
of this data pipe
Assigns a new callable to the pipe using universal forwarding.
*/
template <typename U>
void callable(U&& callable) {
_callable = std::forward<U>(callable);
}
private:
PipeType _type;
callable_t _callable;
};
/**
@brief function to construct a data pipe (tf::DataPipe)
@tparam Input input data type
@tparam Output output data type
@tparam C callable type
tf::make_data_pipe is a helper function to create a data pipe (tf::DataPipe)
in a data-parallel pipeline (tf::DataPipeline).
The first argument specifies the direction of the data pipe,
either tf::PipeType::SERIAL or tf::PipeType::PARALLE,
and the second argument is a callable to invoke by the pipeline scheduler.
Input and output data types are specified via template parameters,
which will always be decayed by the library to its original form
for storage purpose.
The callable must take the input data type in its first argument
and returns a value of the output data type.
@code{.cpp}
tf::make_data_pipe<int, std::string>(
tf::PipeType::SERIAL,
[](int& input) {
return std::to_string(input + 100);
}
);
@endcode
The callable can additionally take a reference of tf::Pipeflow,
which allows you to query the runtime information of a stage task,
such as its line number and token number.
@code{.cpp}
tf::make_data_pipe<int, std::string>(
tf::PipeType::SERIAL,
[](int& input, tf::Pipeflow& pf) {
printf("token=%lu, line=%lu\n", pf.token(), pf.line());
return std::to_string(input + 100);
}
);
@endcode
*/
template <typename Input, typename Output, typename C>
auto make_data_pipe(PipeType d, C&& callable) {
return DataPipe<Input, Output, C>(d, std::forward<C>(callable));
}
// ----------------------------------------------------------------------------
// Class Definition: DataPipeline
// ----------------------------------------------------------------------------
/**
@class DataPipeline
@brief class to create a data-parallel pipeline scheduling framework
@tparam Ps data pipe types
Similar to tf::Pipeline, a tf::DataPipeline is a composable graph object
for users to create a <i>data-parallel pipeline scheduling framework</i>
using a module task in a taskflow.
The only difference is that tf::DataPipline provides a data abstraction
for users to quickly express dataflow in a pipeline.
The following example creates a data-parallel pipeline of three stages
that generate dataflow from `void` to `int`, `std::string`, `float`, and `void`.
@code{.cpp}
#include <taskflow/taskflow.hpp>
#include <taskflow/algorithm/data_pipeline.hpp>
int main() {
// data flow => void -> int -> std::string -> float -> void
tf::Taskflow taskflow("pipeline");
tf::Executor executor;
const size_t num_lines = 4;
tf::DataPipeline pl(num_lines,
tf::make_data_pipe<void, int>(tf::PipeType::SERIAL, [&](tf::Pipeflow& pf) -> int{
if(pf.token() == 5) {
pf.stop();
return 0;
}
else {
return pf.token();
}
}),
tf::make_data_pipe<int, std::string>(tf::PipeType::SERIAL, [](int& input) {
return std::to_string(input + 100);
}),
tf::make_data_pipe<std::string, void>(tf::PipeType::SERIAL, [](std::string& input) {
std::cout << input << std::endl;
})
);
// build the pipeline graph using composition
taskflow.composed_of(pl).name("pipeline");
// dump the pipeline graph structure (with composition)
taskflow.dump(std::cout);
// run the pipeline
executor.run(taskflow).wait();
return 0;
}
@endcode
The pipeline schedules five tokens over four parallel lines in a circular fashion,
as depicted below:
@code{.shell-session}
o -> o -> o
| | |
v v v
o -> o -> o
| | |
v v v
o -> o -> o
| | |
v v v
o -> o -> o
@endcode
*/
template <typename... Ps>
class DataPipeline {
static_assert(sizeof...(Ps)>0, "must have at least one pipe");
/**
@private
*/
struct Line {
std::atomic<size_t> join_counter;
};
/**
@private
*/
struct PipeMeta {
PipeType type;
};
public:
/**
@brief internal storage type for each data token (default std::variant)
*/
using data_t = unique_variant_t<std::variant<std::conditional_t<
std::is_void_v<typename Ps::output_t>,
std::monostate,
std::decay_t<typename Ps::output_t>>...
>>;
/**
@brief constructs a data-parallel pipeline object
@param num_lines the number of parallel lines
@param ps a list of pipes
Constructs a data-parallel pipeline of up to @c num_lines parallel lines to schedule
tokens through the given linear chain of pipes.
The first pipe must define a serial direction (tf::PipeType::SERIAL)
or an exception will be thrown.
*/
DataPipeline(size_t num_lines, Ps&&... ps);
/**
@brief constructs a data-parallel pipeline object
@param num_lines the number of parallel lines
@param ps a tuple of pipes
Constructs a data-parallel pipeline of up to @c num_lines parallel lines to schedule
tokens through the given linear chain of pipes stored in a std::tuple.
The first pipe must define a serial direction (tf::PipeType::SERIAL)
or an exception will be thrown.
*/
DataPipeline(size_t num_lines, std::tuple<Ps...>&& ps);
/**
@brief queries the number of parallel lines
The function returns the number of parallel lines given by the user
upon the construction of the pipeline.
The number of lines represents the maximum parallelism this pipeline
can achieve.
*/
size_t num_lines() const noexcept;
/**
@brief queries the number of pipes
The Function returns the number of pipes given by the user
upon the construction of the pipeline.
*/
constexpr size_t num_pipes() const noexcept;
/**
@brief resets the pipeline
Resetting the pipeline to the initial state. After resetting a pipeline,
its token identifier will start from zero as if the pipeline was just
constructed.
*/
void reset();
/**
@brief queries the number of generated tokens in the pipeline
The number represents the total scheduling tokens that has been
generated by the pipeline so far.
*/
size_t num_tokens() const noexcept;
/**
@brief obtains the graph object associated with the pipeline construct
This method is primarily used as an opaque data structure for creating
a module task of this pipeline.
*/
Graph& graph();
private:
Graph _graph;
size_t _num_tokens;
std::tuple<Ps...> _pipes;
std::array<PipeMeta, sizeof...(Ps)> _meta;
std::vector<std::array<Line, sizeof...(Ps)>> _lines;
std::vector<Task> _tasks;
std::vector<Pipeflow> _pipeflows;
std::vector<CachelineAligned<data_t>> _buffer;
template <size_t... I>
auto _gen_meta(std::tuple<Ps...>&&, std::index_sequence<I...>);
void _on_pipe(Pipeflow&, Runtime&);
void _build();
};
// constructor
template <typename... Ps>
DataPipeline<Ps...>::DataPipeline(size_t num_lines, Ps&&... ps) :
_pipes {std::make_tuple(std::forward<Ps>(ps)...)},
_meta {PipeMeta{ps.type()}...},
_lines (num_lines),
_tasks (num_lines + 1),
_pipeflows (num_lines),
_buffer (num_lines) {
if(num_lines == 0) {
TF_THROW("must have at least one line");
}
if(std::get<0>(_pipes).type() != PipeType::SERIAL) {
TF_THROW("first pipe must be serial");
}
reset();
_build();
}
// constructor
template <typename... Ps>
DataPipeline<Ps...>::DataPipeline(size_t num_lines, std::tuple<Ps...>&& ps) :
_pipes {std::forward<std::tuple<Ps...>>(ps)},
_meta {_gen_meta(
std::forward<std::tuple<Ps...>>(ps), std::make_index_sequence<sizeof...(Ps)>{}
)},
_lines (num_lines),
_tasks (num_lines + 1),
_pipeflows (num_lines),
_buffer (num_lines) {
if(num_lines == 0) {
TF_THROW("must have at least one line");
}
if(std::get<0>(_pipes).type() != PipeType::SERIAL) {
TF_THROW("first pipe must be serial");
}
reset();
_build();
}
// Function: _get_meta
template <typename... Ps>
template <size_t... I>
auto DataPipeline<Ps...>::_gen_meta(std::tuple<Ps...>&& ps, std::index_sequence<I...>) {
return std::array{PipeMeta{std::get<I>(ps).type()}...};
}
// Function: num_lines
template <typename... Ps>
size_t DataPipeline<Ps...>::num_lines() const noexcept {
return _pipeflows.size();
}
// Function: num_pipes
template <typename... Ps>
constexpr size_t DataPipeline<Ps...>::num_pipes() const noexcept {
return sizeof...(Ps);
}
// Function: num_tokens
template <typename... Ps>
size_t DataPipeline<Ps...>::num_tokens() const noexcept {
return _num_tokens;
}
// Function: graph
template <typename... Ps>
Graph& DataPipeline<Ps...>::graph() {
return _graph;
}
// Function: reset
template <typename... Ps>
void DataPipeline<Ps...>::reset() {
_num_tokens = 0;
for(size_t l = 0; l<num_lines(); l++) {
_pipeflows[l]._pipe = 0;
_pipeflows[l]._line = l;
}
_lines[0][0].join_counter.store(0, std::memory_order_relaxed);
for(size_t l=1; l<num_lines(); l++) {
for(size_t f=1; f<num_pipes(); f++) {
_lines[l][f].join_counter.store(
static_cast<size_t>(_meta[f].type), std::memory_order_relaxed
);
}
}
for(size_t f=1; f<num_pipes(); f++) {
_lines[0][f].join_counter.store(1, std::memory_order_relaxed);
}
for(size_t l=1; l<num_lines(); l++) {
_lines[l][0].join_counter.store(
static_cast<size_t>(_meta[0].type) - 1, std::memory_order_relaxed
);
}
}
// Procedure: _on_pipe
template <typename... Ps>
void DataPipeline<Ps...>::_on_pipe(Pipeflow& pf, Runtime&) {
visit_tuple([&](auto&& pipe){
using data_pipe_t = std::decay_t<decltype(pipe)>;
using callable_t = typename data_pipe_t::callable_t;
using input_t = std::decay_t<typename data_pipe_t::input_t>;
using output_t = std::decay_t<typename data_pipe_t::output_t>;
// first pipe
if constexpr (std::is_invocable_v<callable_t, Pipeflow&>) {
// [](tf::Pipeflow&) -> void {}, i.e., we only have one pipe
if constexpr (std::is_void_v<output_t>) {
pipe._callable(pf);
// [](tf::Pipeflow&) -> output_t {}
} else {
_buffer[pf._line].data = pipe._callable(pf);
}
}
// other pipes without pipeflow in the second argument
else if constexpr (std::is_invocable_v<callable_t, input_t&>) {
// [](input_t&) -> void {}, i.e., the last pipe
if constexpr (std::is_void_v<output_t>) {
pipe._callable(std::get<input_t>(_buffer[pf._line].data));
// [](input_t&) -> output_t {}
} else {
_buffer[pf._line].data = pipe._callable(
std::get<input_t>(_buffer[pf._line].data)
);
}
}
// other pipes with pipeflow in the second argument
else if constexpr (std::is_invocable_v<callable_t, input_t&, Pipeflow&>) {
// [](input_t&, tf::Pipeflow&) -> void {}
if constexpr (std::is_void_v<output_t>) {
pipe._callable(std::get<input_t>(_buffer[pf._line].data), pf);
// [](input_t&, tf::Pipeflow&) -> output_t {}
} else {
_buffer[pf._line].data = pipe._callable(
std::get<input_t>(_buffer[pf._line].data), pf
);
}
}
//else if constexpr(std::is_invocable_v<callable_t, Pipeflow&, Runtime&>) {
// pipe._callable(pf, rt);
//}
else {
static_assert(dependent_false_v<callable_t>, "un-supported pipe callable type");
}
}, _pipes, pf._pipe);
}
// Procedure: _build
template <typename... Ps>
void DataPipeline<Ps...>::_build() {
using namespace std::literals::string_literals;
FlowBuilder fb(_graph);
// init task
_tasks[0] = fb.emplace([this]() {
return static_cast<int>(_num_tokens % num_lines());
}).name("cond");
// line task
for(size_t l = 0; l < num_lines(); l++) {
_tasks[l + 1] = fb.emplace([this, l] (tf::Runtime& rt) mutable {
auto pf = &_pipeflows[l];
pipeline:
_lines[pf->_line][pf->_pipe].join_counter.store(
static_cast<size_t>(_meta[pf->_pipe].type), std::memory_order_relaxed
);
if (pf->_pipe == 0) {
pf->_token = _num_tokens;
if (pf->_stop = false, _on_pipe(*pf, rt); pf->_stop == true) {
// here, the pipeline is not stopped yet because other
// lines of tasks may still be running their last stages
return;
}
++_num_tokens;
}
else {
_on_pipe(*pf, rt);
}
size_t c_f = pf->_pipe;
size_t n_f = (pf->_pipe + 1) % num_pipes();
size_t n_l = (pf->_line + 1) % num_lines();
pf->_pipe = n_f;
// ---- scheduling starts here ----
// Notice that the shared variable f must not be changed after this
// point because it can result in data race due to the following
// condition:
//
// a -> b
// | |
// v v
// c -> d
//
// d will be spawned by either c or b, so if c changes f but b spawns d
// then data race on f will happen
std::array<int, 2> retval;
size_t n = 0;
// downward dependency
if(_meta[c_f].type == PipeType::SERIAL &&
_lines[n_l][c_f].join_counter.fetch_sub(
1, std::memory_order_acq_rel) == 1
) {
retval[n++] = 1;
}
// forward dependency
if(_lines[pf->_line][n_f].join_counter.fetch_sub(
1, std::memory_order_acq_rel) == 1
) {
retval[n++] = 0;
}
// notice that the task index starts from 1
switch(n) {
case 2: {
rt.schedule(_tasks[n_l+1]);
goto pipeline;
}
case 1: {
if (retval[0] == 1) {
pf = &_pipeflows[n_l];
}
goto pipeline;
}
}
}).name("rt-"s + std::to_string(l));
_tasks[0].precede(_tasks[l+1]);
}
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/algorithm/launch.hpp | #pragma once
#include "../core/async.hpp"
namespace tf {
// Function: launch_loop
template <typename P, typename Loop>
TF_FORCE_INLINE void launch_loop(
size_t N,
size_t W,
Runtime& rt,
std::atomic<size_t>& next,
P&& part,
Loop&& loop
) {
//static_assert(std::is_lvalue_reference_v<Loop>, "");
using namespace std::string_literals;
for(size_t w=0; w<W; w++) {
auto r = N - next.load(std::memory_order_relaxed);
// no more loop work to do - finished by previous async tasks
if(!r) {
break;
}
// tail optimization
if(r <= part.chunk_size() || w == W-1) {
loop();
break;
}
else {
rt.silent_async_unchecked("loop-"s + std::to_string(w), loop);
}
}
rt.join();
}
// Function: launch_loop
template <typename Loop>
TF_FORCE_INLINE void launch_loop(
size_t W,
size_t w,
Runtime& rt,
Loop&& loop
) {
using namespace std::string_literals;
if(w == W-1) {
loop();
}
else {
rt.silent_async_unchecked("loop-"s + std::to_string(w), loop);
}
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/algorithm/pipeline.hpp | #pragma once
#include "../taskflow.hpp"
/**
@file pipeline.hpp
@brief pipeline include file
*/
namespace tf {
// ----------------------------------------------------------------------------
// Structure Definition: DeferredPipeflow
// ----------------------------------------------------------------------------
// For example:
// 12.defer(7); 12.defer(16);
// _____
// | |
// v |
// 7 12 16
// | ^
// |____ |
//
// DeferredPipeflow dpf of 12 :
// dpf._token = 12;
// dpf._num_deferrals = 1;
// dpf._dependents = std::list<size_t>{7,16};
// dpf._dependent_satellites has following two entries
// {key: 7, value: dpf._dependents.begin()}
// {key: 16, value: dpf._dependents.begin()+1}
//
/** @private */
class DeferredPipeflow {
template <typename... Ps>
friend class Pipeline;
public:
DeferredPipeflow() = default;
DeferredPipeflow(const DeferredPipeflow&) = delete;
DeferredPipeflow(DeferredPipeflow&&) = delete;
DeferredPipeflow(size_t t, size_t n, std::unordered_set<size_t>&& dep) :
_token{t}, _num_deferrals{n}, _dependents{std::move(dep)} {
}
DeferredPipeflow& operator = (const DeferredPipeflow&) = delete;
DeferredPipeflow& operator = (DeferredPipeflow&&) = delete;
private:
// token id
size_t _token;
// number of deferrals
size_t _num_deferrals;
// dependents
// For example,
// 12.defer(7); 12.defer(16)
// _dependents = {7, 16}
std::unordered_set<size_t> _dependents;
};
// ----------------------------------------------------------------------------
// Class Definition: Pipeflow
// ----------------------------------------------------------------------------
/**
@class Pipeflow
@brief class to create a pipeflow object used by the pipe callable
Pipeflow represents a <i>scheduling token</i> in the pipeline scheduling
framework. A pipeflow is created by the pipeline scheduler at runtime to
pass to the pipe callable. Users can query the present statistics
of that scheduling token, including the line identifier, pipe identifier,
and token identifier, and build their application algorithms based on
these statistics.
At the first stage, users can explicitly call the stop method
to stop the pipeline scheduler.
@code{.cpp}
tf::Pipe{tf::PipeType::SERIAL, [](tf::Pipeflow& pf){
std::cout << "token id=" << pf.token()
<< " at line=" << pf.line()
<< " at pipe=" << pf.pipe()
<< '\n';
}};
@endcode
Pipeflow can only be created privately by the tf::Pipeline and
be used through the pipe callable.
*/
class Pipeflow {
template <typename... Ps>
friend class Pipeline;
template <typename P>
friend class ScalablePipeline;
template <typename... Ps>
friend class DataPipeline;
public:
/**
@brief default constructor
*/
Pipeflow() = default;
/**
@brief queries the line identifier of the present token
*/
size_t line() const {
return _line;
}
/**
@brief queries the pipe identifier of the present token
*/
size_t pipe() const {
return _pipe;
}
/**
@brief queries the token identifier
*/
size_t token() const {
return _token;
}
/**
@brief stops the pipeline scheduling
Only the first pipe can call this method to stop the pipeline.
Others have no effect.
*/
void stop() {
if(_pipe != 0) {
TF_THROW("only the first pipe can stop the token");
}
_stop = true;
}
/**
@brief queries the number of deferrals
*/
size_t num_deferrals() const {
return _num_deferrals;
}
/**
@brief pushes token in _dependents
*/
void defer(size_t token) {
_dependents.insert(token);
}
private:
// Regular data
size_t _line;
size_t _pipe;
size_t _token;
bool _stop;
// Data field for token dependencies
size_t _num_deferrals;
std::unordered_set<size_t> _dependents;
};
// ----------------------------------------------------------------------------
// Class Definition: PipeType
// ----------------------------------------------------------------------------
/**
@enum PipeType
@brief enumeration of all pipe types
*/
enum class PipeType : int {
/** @brief parallel type */
PARALLEL = 1,
/** @brief serial type */
SERIAL = 2
};
// ----------------------------------------------------------------------------
// Class Definition: Pipe
// ----------------------------------------------------------------------------
/**
@class Pipe
@brief class to create a pipe object for a pipeline stage
@tparam C callable type
A pipe represents a stage of a pipeline. A pipe can be either
@em parallel direction or @em serial direction (specified by tf::PipeType)
and is coupled with a callable to invoke by the pipeline scheduler.
The callable must take a referenced tf::Pipeflow object in the first argument:
@code{.cpp}
Pipe{PipeType::SERIAL, [](tf::Pipeflow&){}}
@endcode
The pipeflow object is used to query the statistics of a scheduling token
in the pipeline, such as pipe, line, and token numbers.
*/
template <typename C = std::function<void(tf::Pipeflow&)>>
class Pipe {
template <typename... Ps>
friend class Pipeline;
template <typename P>
friend class ScalablePipeline;
public:
/**
@brief alias of the callable type
*/
using callable_t = C;
/**
@brief default constructor
*/
Pipe() = default;
/**
@brief constructs the pipe object
@param d pipe type (tf::PipeType)
@param callable callable type
The constructor constructs a pipe with the given direction
(tf::PipeType::SERIAL or tf::PipeType::PARALLEL) and the given callable.
The callable must take a referenced tf::Pipeflow object in the first argument.
@code{.cpp}
Pipe{PipeType::SERIAL, [](tf::Pipeflow&){}}
@endcode
When creating a pipeline, the direction of the first pipe must be serial
(tf::PipeType::SERIAL).
*/
Pipe(PipeType d, C&& callable) :
_type{d}, _callable{std::forward<C>(callable)} {
}
/**
@brief queries the type of the pipe
Returns the type of the callable.
*/
PipeType type() const {
return _type;
}
/**
@brief assigns a new type to the pipe
@param type a tf::PipeType variable
*/
void type(PipeType type) {
_type = type;
}
/**
@brief assigns a new callable to the pipe
@tparam U callable type
@param callable a callable object constructible from std::function<void(tf::Pipeflow&)>
Assigns a new callable to the pipe with universal forwarding.
*/
template <typename U>
void callable(U&& callable) {
_callable = std::forward<U>(callable);
}
private:
PipeType _type;
C _callable;
};
// ----------------------------------------------------------------------------
// Class Definition: Pipeline
// ----------------------------------------------------------------------------
/**
@class Pipeline
@brief class to create a pipeline scheduling framework
@tparam Ps pipe types
A pipeline is a composable graph object for users to create a
<i>pipeline scheduling framework</i> using a module task in a taskflow.
Unlike the conventional pipeline programming frameworks (e.g., Intel TBB),
%Taskflow's pipeline algorithm does not provide any data abstraction,
which often restricts users from optimizing data layouts in their applications,
but a flexible framework for users to customize their application data
atop our pipeline scheduling.
The following code creates a pipeline of four parallel lines to schedule
tokens through three serial pipes:
@code{.cpp}
tf::Taskflow taskflow;
tf::Executor executor;
const size_t num_lines = 4;
const size_t num_pipes = 3;
// create a custom data buffer
std::array<std::array<int, num_pipes>, num_lines> buffer;
// create a pipeline graph of four concurrent lines and three serial pipes
tf::Pipeline pipeline(num_lines,
// first pipe must define a serial direction
tf::Pipe{tf::PipeType::SERIAL, [&buffer](tf::Pipeflow& pf) {
// generate only 5 scheduling tokens
if(pf.token() == 5) {
pf.stop();
}
// save the token id into the buffer
else {
buffer[pf.line()][pf.pipe()] = pf.token();
}
}},
tf::Pipe{tf::PipeType::SERIAL, [&buffer] (tf::Pipeflow& pf) {
// propagate the previous result to this pipe by adding one
buffer[pf.line()][pf.pipe()] = buffer[pf.line()][pf.pipe()-1] + 1;
}},
tf::Pipe{tf::PipeType::SERIAL, [&buffer](tf::Pipeflow& pf){
// propagate the previous result to this pipe by adding one
buffer[pf.line()][pf.pipe()] = buffer[pf.line()][pf.pipe()-1] + 1;
}}
);
// build the pipeline graph using composition
tf::Task init = taskflow.emplace([](){ std::cout << "ready\n"; })
.name("starting pipeline");
tf::Task task = taskflow.composed_of(pipeline)
.name("pipeline");
tf::Task stop = taskflow.emplace([](){ std::cout << "stopped\n"; })
.name("pipeline stopped");
// create task dependency
init.precede(task);
task.precede(stop);
// run the pipeline
executor.run(taskflow).wait();
@endcode
The above example creates a pipeline graph that schedules five tokens over
four parallel lines in a circular fashion, as depicted below:
@code{.shell-session}
o -> o -> o
| | |
v v v
o -> o -> o
| | |
v v v
o -> o -> o
| | |
v v v
o -> o -> o
@endcode
At each pipe stage, the program propagates the result to the next pipe
by adding one to the result stored in a custom data storage, @c buffer.
The pipeline scheduler will generate five scheduling tokens and then stop.
Internally, tf::Pipeline uses std::tuple to store the given sequence of pipes.
The definition of each pipe can be different, completely decided by the compiler
to optimize the object layout.
After a pipeline is constructed, it is not possible to change its pipes.
If applications need to change these pipes, please use tf::ScalablePipeline.
*/
template <typename... Ps>
class Pipeline {
static_assert(sizeof...(Ps)>0, "must have at least one pipe");
/**
@private
*/
struct Line {
std::atomic<size_t> join_counter;
};
/**
@private
*/
struct PipeMeta {
PipeType type;
};
public:
/**
@brief constructs a pipeline object
@param num_lines the number of parallel lines
@param ps a list of pipes
Constructs a pipeline of up to @c num_lines parallel lines to schedule
tokens through the given linear chain of pipes.
The first pipe must define a serial direction (tf::PipeType::SERIAL)
or an exception will be thrown.
*/
Pipeline(size_t num_lines, Ps&&... ps);
/**
@brief constructs a pipeline object
@param num_lines the number of parallel lines
@param ps a tuple of pipes
Constructs a pipeline of up to @c num_lines parallel lines to schedule
tokens through the given linear chain of pipes.
The first pipe must define a serial direction (tf::PipeType::SERIAL)
or an exception will be thrown.
*/
Pipeline(size_t num_lines, std::tuple<Ps...>&& ps);
/**
@brief queries the number of parallel lines
The function returns the number of parallel lines given by the user
upon the construction of the pipeline.
The number of lines represents the maximum parallelism this pipeline
can achieve.
*/
size_t num_lines() const noexcept;
/**
@brief queries the number of pipes
The Function returns the number of pipes given by the user
upon the construction of the pipeline.
*/
constexpr size_t num_pipes() const noexcept;
/**
@brief resets the pipeline
Resetting the pipeline to the initial state. After resetting a pipeline,
its token identifier will start from zero as if the pipeline was just
constructed.
*/
void reset();
/**
@brief queries the number of generated tokens in the pipeline
The number represents the total scheduling tokens that has been
generated by the pipeline so far.
*/
size_t num_tokens() const noexcept;
/**
@brief obtains the graph object associated with the pipeline construct
This method is primarily used as an opaque data structure for creating
a module task of the this pipeline.
*/
Graph& graph();
private:
Graph _graph;
size_t _num_tokens;
std::tuple<Ps...> _pipes;
std::array<PipeMeta, sizeof...(Ps)> _meta;
std::vector<std::array<Line, sizeof...(Ps)>> _lines;
std::vector<Task> _tasks;
std::vector<Pipeflow> _pipeflows;
// queue of ready tokens (paired with their deferral times)
// For example,
// when 12 does not have any dependetns,
// we put 12 in _ready_tokens queue
// Assume num_deferrals of 12 is 1,
// we push pair{12, 1} in the queue
std::queue<std::pair<size_t, size_t>> _ready_tokens;
// unordered_map of token dependencies
// For example,
// 12.defer(16); 13.defer(16);
// _token_dependencies has the following entry
// {key: 16, value: std::vector{12, 13}}.
std::unordered_map<size_t, std::vector<size_t>> _token_dependencies;
// unordered_map of deferred tokens
// For example,
// 12.defer(16); 13.defer(16);
// _deferred_tokens has the following two entries
// {key: 12, DeferredPipeflow of 12} and
// {key: 13, DeferredPipeflow of 13}
std::unordered_map<size_t, DeferredPipeflow> _deferred_tokens;
// variable to keep track of the longest deferred tokens
// For example,
// 2.defer(16)
// 5.defer(19)
// 5.defer(17),
// _longest_deferral will be 19 - after token 19 the pipeline
// has almost zero cost on handling deferred pipeflow
size_t _longest_deferral = 0;
template <size_t... I>
auto _gen_meta(std::tuple<Ps...>&&, std::index_sequence<I...>);
void _on_pipe(Pipeflow&, Runtime&);
void _build();
void _check_dependents(Pipeflow&);
void _construct_deferred_tokens(Pipeflow&);
void _resolve_token_dependencies(Pipeflow&);
};
// constructor
template <typename... Ps>
Pipeline<Ps...>::Pipeline(size_t num_lines, Ps&&... ps) :
_pipes {std::make_tuple(std::forward<Ps>(ps)...)},
_meta {PipeMeta{ps.type()}...},
_lines (num_lines),
_tasks (num_lines + 1),
_pipeflows (num_lines) {
if(num_lines == 0) {
TF_THROW("must have at least one line");
}
if(std::get<0>(_pipes).type() != PipeType::SERIAL) {
TF_THROW("first pipe must be serial");
}
reset();
_build();
}
// constructor
template <typename... Ps>
Pipeline<Ps...>::Pipeline(size_t num_lines, std::tuple<Ps...>&& ps) :
_pipes {std::forward<std::tuple<Ps...>>(ps)},
_meta {_gen_meta(
std::forward<std::tuple<Ps...>>(ps), std::make_index_sequence<sizeof...(Ps)>{}
)},
_lines (num_lines),
_tasks (num_lines + 1),
_pipeflows (num_lines) {
if(num_lines == 0) {
TF_THROW("must have at least one line");
}
if(std::get<0>(_pipes).type() != PipeType::SERIAL) {
TF_THROW("first pipe must be serial");
}
reset();
_build();
}
// Function: _get_meta
template <typename... Ps>
template <size_t... I>
auto Pipeline<Ps...>::_gen_meta(std::tuple<Ps...>&& ps, std::index_sequence<I...>) {
return std::array{PipeMeta{std::get<I>(ps).type()}...};
}
// Function: num_lines
template <typename... Ps>
size_t Pipeline<Ps...>::num_lines() const noexcept {
return _pipeflows.size();
}
// Function: num_pipes
template <typename... Ps>
constexpr size_t Pipeline<Ps...>::num_pipes() const noexcept {
return sizeof...(Ps);
}
// Function: num_tokens
template <typename... Ps>
size_t Pipeline<Ps...>::num_tokens() const noexcept {
return _num_tokens;
}
// Function: graph
template <typename... Ps>
Graph& Pipeline<Ps...>::graph() {
return _graph;
}
// Function: reset
template <typename... Ps>
void Pipeline<Ps...>::reset() {
_num_tokens = 0;
for(size_t l = 0; l<num_lines(); l++) {
_pipeflows[l]._pipe = 0;
_pipeflows[l]._line = l;
_pipeflows[l]._num_deferrals = 0;
_pipeflows[l]._dependents.clear();
}
assert(_ready_tokens.empty() == true);
_token_dependencies.clear();
_deferred_tokens.clear();
_lines[0][0].join_counter.store(0, std::memory_order_relaxed);
for(size_t l=1; l<num_lines(); l++) {
for(size_t f=1; f<num_pipes(); f++) {
_lines[l][f].join_counter.store(
static_cast<size_t>(_meta[f].type), std::memory_order_relaxed
);
}
}
for(size_t f=1; f<num_pipes(); f++) {
_lines[0][f].join_counter.store(1, std::memory_order_relaxed);
}
for(size_t l=1; l<num_lines(); l++) {
_lines[l][0].join_counter.store(
static_cast<size_t>(_meta[0].type) - 1, std::memory_order_relaxed
);
}
}
// Procedure: _on_pipe
template <typename... Ps>
void Pipeline<Ps...>::_on_pipe(Pipeflow& pf, Runtime& rt) {
visit_tuple([&](auto&& pipe){
using callable_t = typename std::decay_t<decltype(pipe)>::callable_t;
if constexpr (std::is_invocable_v<callable_t, Pipeflow&>) {
pipe._callable(pf);
}
else if constexpr(std::is_invocable_v<callable_t, Pipeflow&, Runtime&>) {
pipe._callable(pf, rt);
}
else {
static_assert(dependent_false_v<callable_t>, "un-supported pipe callable type");
}
}, _pipes, pf._pipe);
}
// Procedure: _check_dependents
// Check and remove invalid dependents after on_pipe
// For example, users may defer a pipeflow to multiple tokens,
// and we need to remove invalid tokens.
// 12.defer(7); // valid only if 7 is deferred, or invalid otherwise
// 12.defer(16); // 16 is valid
template <typename... Ps>
void Pipeline<Ps...>::_check_dependents(Pipeflow& pf) {
//if (pf._dependents.size()) {
++pf._num_deferrals;
for (auto it = pf._dependents.begin(); it != pf._dependents.end();) {
// valid (e.g., 12.defer(16))
if (*it >= _num_tokens) {
_token_dependencies[*it].push_back(pf._token);
_longest_deferral = std::max(_longest_deferral, *it);
++it;
}
// valid or invalid (e.g., 12.defer(7))
else {
auto pit = _deferred_tokens.find(*it);
// valid (e.g., 7 is deferred)
if (pit != _deferred_tokens.end()) {
_token_dependencies[*it].push_back(pf._token);
++it;
}
// invalid (e.g., 7 is finished - this this 12.defer(7) is dummy)
else {
it = pf._dependents.erase(it);
}
}
}
}
// Procedure: _construct_deferred_tokens
// Construct a data structure for a deferred token
//
// For example,
// 12.defer(7); 12.defer(16);
// After _check_dependents, 12 needs to be deferred,
// so we will construct a data structure for 12 using hashmap:
// {key: 12, value: DeferredPipeflow of 12}
template <typename... Ps>
void Pipeline<Ps...>::_construct_deferred_tokens(Pipeflow& pf) {
//auto res = _deferred_tokens.emplace(
// pf._token, DeferredPipeflow{pf._token, pf._num_deferrals, std::move(pf._dependents)}
//);
// construct the deferred pipeflow with zero copy
//auto res = _deferred_tokens.emplace(
_deferred_tokens.emplace(
std::piecewise_construct,
std::forward_as_tuple(pf._token),
std::forward_as_tuple(
pf._token, pf._num_deferrals, std::move(pf._dependents)
)
);
//assert(res.second == true);
}
// Procedure: _resolve_token_dependencies
// Resolve dependencies for tokens that defer to current token
//
// For example,
// 12.defer(16);
// 13.defer(16);
// _token_dependencies will have the entry
// {key: 16, value: std::vector{12, 13}}
//
// When 16 finishes, we need to remove 16 from 12's and 13's
// individual_dependents
template <typename... Ps>
void Pipeline<Ps...>::_resolve_token_dependencies(Pipeflow& pf) {
if (auto it = _token_dependencies.find(pf._token);
it != _token_dependencies.end()) {
// iterate tokens that defer to pf._token
// (e.g., 12 and 13)
for(size_t target : it->second) {
auto dpf = _deferred_tokens.find(target);
assert(dpf != _deferred_tokens.end());
// erase pf._token from target's _dependents
// (e.g., remove 16 from 12's dependents)
dpf->second._dependents.erase(pf._token);
// dpf->second._dependent_satellites[pf._token]
//);
// target has no dependents
if (dpf->second._dependents.empty()) {
// push target into _ready_tokens queue
_ready_tokens.emplace(dpf->second._token, dpf->second._num_deferrals);
//_ready_tokens.push(
// std::make_pair(dpf->second._token, dpf->second._num_deferrals)
//);
// erase target from _deferred_tokens
_deferred_tokens.erase(dpf);
}
}
// remove pf._token from _token_dependencies
// (e.g., remove the entry
// {key: 16, value: std::vector{12, 13}} from _token_dependencies)
_token_dependencies.erase(it);
}
}
// Procedure: _build
template <typename... Ps>
void Pipeline<Ps...>::_build() {
using namespace std::literals::string_literals;
FlowBuilder fb(_graph);
// init task
_tasks[0] = fb.emplace([this]() {
return static_cast<int>(_num_tokens % num_lines());
}).name("cond");
// line task
for(size_t l = 0; l < num_lines(); l++) {
_tasks[l + 1] = fb.emplace([this, l] (tf::Runtime& rt) mutable {
auto pf = &_pipeflows[l];
pipeline:
_lines[pf->_line][pf->_pipe].join_counter.store(
static_cast<size_t>(_meta[pf->_pipe].type), std::memory_order_relaxed
);
// First pipe does all jobs of initialization and token dependencies
if (pf->_pipe == 0) {
// _ready_tokens queue is not empty
// substitute pf with the token at the front of the queue
if (!_ready_tokens.empty()) {
pf->_token = _ready_tokens.front().first;
pf->_num_deferrals = _ready_tokens.front().second;
_ready_tokens.pop();
}
else {
pf->_token = _num_tokens;
pf->_num_deferrals = 0;
}
handle_token_dependency:
if (pf->_stop = false, _on_pipe(*pf, rt); pf->_stop == true) {
// here, the pipeline is not stopped yet because other
// lines of tasks may still be running their last stages
return;
}
if (_num_tokens == pf->_token) {
++_num_tokens;
}
if (pf->_dependents.empty() == false){
// check if the pf->_depdendents have valid dependents
_check_dependents(*pf);
// tokens in pf->_dependents are all valid dependents
if (pf->_dependents.size()) {
// construct a data structure for pf in _deferred_tokens
_construct_deferred_tokens(*pf);
goto pipeline;
}
// tokens in pf->_dependents are invalid dependents
// directly goto on_pipe on the same line
else {
goto handle_token_dependency;
}
}
// Every token within the deferral range needs to check
// if it can resolve dependencies on other tokens.
if (pf->_token <= _longest_deferral) {
_resolve_token_dependencies(*pf);
}
}
else {
_on_pipe(*pf, rt);
}
size_t c_f = pf->_pipe;
size_t n_f = (pf->_pipe + 1) % num_pipes();
size_t n_l = (pf->_line + 1) % num_lines();
pf->_pipe = n_f;
// ---- scheduling starts here ----
// Notice that the shared variable f must not be changed after this
// point because it can result in data race due to the following
// condition:
//
// a -> b
// | |
// v v
// c -> d
//
// d will be spawned by either c or b, so if c changes f but b spawns d
// then data race on f will happen
std::array<int, 2> retval;
size_t n = 0;
// downward dependency
if(_meta[c_f].type == PipeType::SERIAL &&
_lines[n_l][c_f].join_counter.fetch_sub(
1, std::memory_order_acq_rel) == 1
) {
retval[n++] = 1;
}
// forward dependency
if(_lines[pf->_line][n_f].join_counter.fetch_sub(
1, std::memory_order_acq_rel) == 1
) {
retval[n++] = 0;
}
// notice that the task index starts from 1
switch(n) {
case 2: {
rt.schedule(_tasks[n_l+1]);
goto pipeline;
}
case 1: {
// downward dependency
if (retval[0] == 1) {
pf = &_pipeflows[n_l];
}
// forward dependency
goto pipeline;
}
}
}).name("rt-"s + std::to_string(l));
_tasks[0].precede(_tasks[l+1]);
}
}
// ----------------------------------------------------------------------------
// Class Definition: ScalablePipeline
// ----------------------------------------------------------------------------
/**
@class ScalablePipeline
@brief class to create a scalable pipeline object
@tparam P type of the iterator to a range of pipes
A scalable pipeline is a composable graph object for users to create a
<i>pipeline scheduling framework</i> using a module task in a taskflow.
Unlike tf::Pipeline that instantiates all pipes upon the construction time,
tf::ScalablePipeline allows variable assignments of pipes using range iterators.
Users can also reset a scalable pipeline to a different range of pipes
between runs. The following code creates a scalable pipeline of four
parallel lines to schedule tokens through three serial pipes in a custom storage,
then resetting the pipeline to a new range of five serial pipes:
@code{.cpp}
tf::Taskflow taskflow("pipeline");
tf::Executor executor;
const size_t num_lines = 4;
// create data storage
std::array<int, num_lines> buffer;
// define the pipe callable
auto pipe_callable = [&buffer] (tf::Pipeflow& pf) mutable {
switch(pf.pipe()) {
// first stage generates only 5 scheduling tokens and saves the
// token number into the buffer.
case 0: {
if(pf.token() == 5) {
pf.stop();
}
else {
printf("stage 1: input token = %zu\n", pf.token());
buffer[pf.line()] = pf.token();
}
return;
}
break;
// other stages propagate the previous result to this pipe and
// increment it by one
default: {
printf(
"stage %zu: input buffer[%zu] = %d\n", pf.pipe(), pf.line(), buffer[pf.line()]
);
buffer[pf.line()] = buffer[pf.line()] + 1;
}
break;
}
};
// create a vector of three pipes
std::vector< tf::Pipe<std::function<void(tf::Pipeflow&)>> > pipes;
for(size_t i=0; i<3; i++) {
pipes.emplace_back(tf::PipeType::SERIAL, pipe_callable);
}
// create a pipeline of four parallel lines based on the given vector of pipes
tf::ScalablePipeline pl(num_lines, pipes.begin(), pipes.end());
// build the pipeline graph using composition
tf::Task init = taskflow.emplace([](){ std::cout << "ready\n"; })
.name("starting pipeline");
tf::Task task = taskflow.composed_of(pl)
.name("pipeline");
tf::Task stop = taskflow.emplace([](){ std::cout << "stopped\n"; })
.name("pipeline stopped");
// create task dependency
init.precede(task);
task.precede(stop);
// dump the pipeline graph structure (with composition)
taskflow.dump(std::cout);
// run the pipeline
executor.run(taskflow).wait();
// reset the pipeline to a new range of five pipes and starts from
// the initial state (i.e., token counts from zero)
for(size_t i=0; i<2; i++) {
pipes.emplace_back(tf::PipeType::SERIAL, pipe_callable);
}
pl.reset(pipes.begin(), pipes.end());
executor.run(taskflow).wait();
@endcode
The above example creates a pipeline graph that schedules five tokens over
four parallel lines in a circular fashion, first going through three serial pipes
and then five serial pipes:
@code{.shell-session}
# initial construction of three serial pipes
o -> o -> o
| | |
v v v
o -> o -> o
| | |
v v v
o -> o -> o
| | |
v v v
o -> o -> o
# resetting to a new range of five serial pipes
o -> o -> o -> o -> o
| | | | |
v v v v v
o -> o -> o -> o -> o
| | | | |
v v v v v
o -> o -> o -> o -> o
| | | | |
v v v v v
o -> o -> o -> o -> o
@endcode
Each piPipeline.2P(SS).DeferNextToken.2L.1Wpe has the same type of `%tf::Pipe<%std::function<void(%tf::Pipeflow&)>>`
and is kept in a vector that is amenable to change.
We construct the scalable pipeline using two range iterators pointing to the
beginning and the end of the vector.
At each pipe stage, the program propagates the result to the next pipe
by adding one to the result stored in a custom data storage, @c buffer.
The pipeline scheduler will generate five scheduling tokens and then stop.
A scalable pipeline is move-only.
*/
template <typename P>
class ScalablePipeline {
/**
@private
*/
struct Line {
std::atomic<size_t> join_counter;
};
public:
/**
@brief pipe type
*/
using pipe_t = typename std::iterator_traits<P>::value_type;
/**
@brief default constructor
*/
ScalablePipeline() = default;
/**
@brief constructs an empty scalable pipeline object
@param num_lines the number of parallel lines
An empty scalable pipeline does not have any pipes.
The pipeline needs to be reset to a valid range of pipes
before running.
*/
ScalablePipeline(size_t num_lines);
/**
@brief constructs a scalable pipeline object
@param num_lines the number of parallel lines
@param first iterator to the beginning of the range
@param last iterator to the end of the range
Constructs a pipeline from the given range of pipes specified in
<tt>[first, last)</tt> using @c num_lines parallel lines.
The first pipe must define a serial direction (tf::PipeType::SERIAL)
or an exception will be thrown.
Internally, the scalable pipeline copies the iterators
from the specified range. Those pipe callables pointed to by
these iterators must remain valid during the execution of the pipeline.
*/
ScalablePipeline(size_t num_lines, P first, P last);
/**
@brief disabled copy constructor
*/
ScalablePipeline(const ScalablePipeline&) = delete;
/**
@brief move constructor
Constructs a pipeline from the given @c rhs using move semantics
(i.e. the data in @c rhs is moved into this pipeline).
After the move, @c rhs is in a state as if it is just constructed.
The behavior is undefined if @c rhs is running during the move.
*/
ScalablePipeline(ScalablePipeline&& rhs);
/**
@brief disabled copy assignment operator
*/
ScalablePipeline& operator = (const ScalablePipeline&) = delete;
/**
@brief move constructor
Replaces the contents with those of @c rhs using move semantics
(i.e. the data in @c rhs is moved into this pipeline).
After the move, @c rhs is in a state as if it is just constructed.
The behavior is undefined if @c rhs is running during the move.
*/
ScalablePipeline& operator = (ScalablePipeline&& rhs);
/**
@brief queries the number of parallel lines
The function returns the number of parallel lines given by the user
upon the construction of the pipeline.
The number of lines represents the maximum parallelism this pipeline
can achieve.
*/
size_t num_lines() const noexcept;
/**
@brief queries the number of pipes
The Function returns the number of pipes given by the user
upon the construction of the pipeline.
*/
size_t num_pipes() const noexcept;
/**
@brief resets the pipeline
Resets the pipeline to the initial state. After resetting a pipeline,
its token identifier will start from zero.
*/
void reset();
/**
@brief resets the pipeline with a new range of pipes
@param first iterator to the beginning of the range
@param last iterator to the end of the range
The member function assigns the pipeline to a new range of pipes
specified in <tt>[first, last)</tt> and resets the pipeline to the
initial state. After resetting a pipeline, its token identifier will
start from zero.
Internally, the scalable pipeline copies the iterators
from the specified range. Those pipe callables pointed to by
these iterators must remain valid during the execution of the pipeline.
*/
void reset(P first, P last);
/**
@brief resets the pipeline to a new line number and a
new range of pipes
@param num_lines number of parallel lines
@param first iterator to the beginning of the range
@param last iterator to the end of the range
The member function resets the pipeline to a new number of
parallel lines and a new range of pipes specified in
<tt>[first, last)</tt>, as if the pipeline is just constructed.
After resetting a pipeline, its token identifier will start from zero.
Internally, the scalable pipeline copies the iterators
from the specified range. Those pipe callables pointed to by
these iterators must remain valid during the execution of the pipeline.
*/
void reset(size_t num_lines, P first, P last);
/**
@brief queries the number of generated tokens in the pipeline
The number represents the total scheduling tokens that has been
generated by the pipeline so far.
*/
size_t num_tokens() const noexcept;
/**
@brief obtains the graph object associated with the pipeline construct
This method is primarily used as an opaque data structure for creating
a module task of the this pipeline.
*/
Graph& graph();
private:
Graph _graph;
size_t _num_tokens{0};
std::vector<P> _pipes;
std::vector<Task> _tasks;
std::vector<Pipeflow> _pipeflows;
std::unique_ptr<Line[]> _lines;
void _on_pipe(Pipeflow&, Runtime&);
void _build();
Line& _line(size_t, size_t);
};
// constructor
template <typename P>
ScalablePipeline<P>::ScalablePipeline(size_t num_lines) :
_tasks (num_lines + 1),
_pipeflows (num_lines) {
if(num_lines == 0) {
TF_THROW("must have at least one line");
}
_build();
}
// constructor
template <typename P>
ScalablePipeline<P>::ScalablePipeline(size_t num_lines, P first, P last) :
_tasks (num_lines + 1),
_pipeflows (num_lines) {
if(num_lines == 0) {
TF_THROW("must have at least one line");
}
reset(first, last);
_build();
}
// move constructor
template <typename P>
ScalablePipeline<P>::ScalablePipeline(ScalablePipeline&& rhs) :
_graph {std::move(rhs._graph)},
_num_tokens {rhs._num_tokens},
_pipes {std::move(rhs._pipes)},
_tasks {std::move(rhs._tasks)},
_pipeflows {std::move(rhs._pipeflows)},
_lines {std::move(rhs._lines)} {
rhs._num_tokens = 0;
}
// move assignment operator
template <typename P>
ScalablePipeline<P>& ScalablePipeline<P>::operator = (ScalablePipeline&& rhs) {
_graph = std::move(rhs._graph);
_num_tokens = rhs._num_tokens;
_pipes = std::move(rhs._pipes);
_tasks = std::move(rhs._tasks);
_pipeflows = std::move(rhs._pipeflows);
_lines = std::move(rhs._lines);
rhs._num_tokens = 0;
return *this;
}
// Function: num_lines
template <typename P>
size_t ScalablePipeline<P>::num_lines() const noexcept {
return _pipeflows.size();
}
// Function: num_pipes
template <typename P>
size_t ScalablePipeline<P>::num_pipes() const noexcept {
return _pipes.size();
}
// Function: num_tokens
template <typename P>
size_t ScalablePipeline<P>::num_tokens() const noexcept {
return _num_tokens;
}
// Function: graph
template <typename P>
Graph& ScalablePipeline<P>::graph() {
return _graph;
}
// Function: _line
template <typename P>
typename ScalablePipeline<P>::Line& ScalablePipeline<P>::_line(size_t l, size_t p) {
return _lines[l*num_pipes() + p];
}
template <typename P>
void ScalablePipeline<P>::reset(size_t num_lines, P first, P last) {
if(num_lines == 0) {
TF_THROW("must have at least one line");
}
_graph.clear();
_tasks.resize(num_lines + 1);
_pipeflows.resize(num_lines);
reset(first, last);
_build();
}
// Function: reset
template <typename P>
void ScalablePipeline<P>::reset(P first, P last) {
size_t num_pipes = static_cast<size_t>(std::distance(first, last));
if(num_pipes == 0) {
TF_THROW("pipeline cannot be empty");
}
if(first->type() != PipeType::SERIAL) {
TF_THROW("first pipe must be serial");
}
_pipes.resize(num_pipes);
size_t i=0;
for(auto itr = first; itr != last; itr++) {
_pipes[i++] = itr;
}
_lines = std::make_unique<Line[]>(num_lines() * _pipes.size());
reset();
}
// Function: reset
template <typename P>
void ScalablePipeline<P>::reset() {
_num_tokens = 0;
for(size_t l = 0; l<num_lines(); l++) {
_pipeflows[l]._pipe = 0;
_pipeflows[l]._line = l;
}
_line(0, 0).join_counter.store(0, std::memory_order_relaxed);
for(size_t l=1; l<num_lines(); l++) {
for(size_t f=1; f<num_pipes(); f++) {
_line(l, f).join_counter.store(
static_cast<size_t>(_pipes[f]->type()), std::memory_order_relaxed
);
}
}
for(size_t f=1; f<num_pipes(); f++) {
_line(0, f).join_counter.store(1, std::memory_order_relaxed);
}
for(size_t l=1; l<num_lines(); l++) {
_line(l, 0).join_counter.store(
static_cast<size_t>(_pipes[0]->type()) - 1, std::memory_order_relaxed
);
}
}
// Procedure: _on_pipe
template <typename P>
void ScalablePipeline<P>::_on_pipe(Pipeflow& pf, Runtime& rt) {
using callable_t = typename pipe_t::callable_t;
if constexpr (std::is_invocable_v<callable_t, Pipeflow&>) {
_pipes[pf._pipe]->_callable(pf);
}
else if constexpr(std::is_invocable_v<callable_t, Pipeflow&, Runtime&>) {
_pipes[pf._pipe]->_callable(pf, rt);
}
else {
static_assert(dependent_false_v<callable_t>, "un-supported pipe callable type");
}
}
// Procedure: _build
template <typename P>
void ScalablePipeline<P>::_build() {
using namespace std::literals::string_literals;
FlowBuilder fb(_graph);
// init task
_tasks[0] = fb.emplace([this]() {
return static_cast<int>(_num_tokens % num_lines());
}).name("cond");
// line task
for(size_t l = 0; l < num_lines(); l++) {
_tasks[l + 1] = fb.emplace([this, l] (tf::Runtime& rt) mutable {
auto pf = &_pipeflows[l];
pipeline:
_line(pf->_line, pf->_pipe).join_counter.store(
static_cast<size_t>(_pipes[pf->_pipe]->type()), std::memory_order_relaxed
);
if (pf->_pipe == 0) {
pf->_token = _num_tokens;
if (pf->_stop = false, _on_pipe(*pf, rt); pf->_stop == true) {
// here, the pipeline is not stopped yet because other
// lines of tasks may still be running their last stages
return;
}
++_num_tokens;
}
else {
_on_pipe(*pf, rt);
}
size_t c_f = pf->_pipe;
size_t n_f = (pf->_pipe + 1) % num_pipes();
size_t n_l = (pf->_line + 1) % num_lines();
pf->_pipe = n_f;
// ---- scheduling starts here ----
// Notice that the shared variable f must not be changed after this
// point because it can result in data race due to the following
// condition:
//
// a -> b
// | |
// v v
// c -> d
//
// d will be spawned by either c or b, so if c changes f but b spawns d
// then data race on f will happen
std::array<int, 2> retval;
size_t n = 0;
// downward dependency
if(_pipes[c_f]->type() == PipeType::SERIAL &&
_line(n_l, c_f).join_counter.fetch_sub(
1, std::memory_order_acq_rel) == 1
) {
retval[n++] = 1;
}
// forward dependency
if(_line(pf->_line, n_f).join_counter.fetch_sub(
1, std::memory_order_acq_rel) == 1
) {
retval[n++] = 0;
}
// notice that the task index starts from 1
switch(n) {
case 2: {
rt.schedule(_tasks[n_l+1]);
goto pipeline;
}
case 1: {
if (retval[0] == 1) {
pf = &_pipeflows[n_l];
}
goto pipeline;
}
}
}).name("rt-"s + std::to_string(l));
_tasks[0].precede(_tasks[l+1]);
}
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/algorithm/scan.hpp | #pragma once
#include "launch.hpp"
namespace tf {
namespace detail {
// Function: scan_loop
template <typename Iterator, typename BufferT, typename B>
TF_FORCE_INLINE void scan_loop(
tf::Runtime& rt,
std::atomic<size_t>& counter,
BufferT& buf,
B&& bop,
Iterator d_beg,
size_t W,
size_t w,
size_t chunk_size
){
// whoever finishes the last performs global scan
if(counter.fetch_add(1, std::memory_order_acq_rel) == W-1) {
for(size_t i=1; i<buf.size(); i++) {
buf[i].data = bop(buf[i-1].data, buf[i].data);
}
counter.store(0, std::memory_order_release);
}
// first worker no need to do any work
if(w==0) {
return;
}
// need to do public corun because multiple workers can call this
rt.executor().corun_until([&counter](){
return counter.load(std::memory_order_acquire) == 0;
});
// block addup
for(size_t i=0; i<chunk_size; i++) {
*d_beg++ = bop(buf[w-1].data, *d_beg);
}
}
} // end of namespace tf::detail ---------------------------------------------
// Function: make_inclusive_scan_task
template <typename B, typename E, typename D, typename BOP>
TF_FORCE_INLINE auto make_inclusive_scan_task(B first, E last, D d_first, BOP bop) {
using B_t = std::decay_t<unwrap_ref_decay_t<B>>;
using E_t = std::decay_t<unwrap_ref_decay_t<E>>;
using D_t = std::decay_t<unwrap_ref_decay_t<D>>;
using value_type = typename std::iterator_traits<B_t>::value_type;
using namespace std::string_literals;
return [=] (Runtime& rt) mutable {
// fetch the stateful values
B_t s_beg = first;
E_t s_end = last;
D_t d_beg = d_first;
if(s_beg == s_end) {
return;
}
size_t W = rt.executor().num_workers();
size_t N = std::distance(s_beg, s_end);
// only myself - no need to spawn another graph
if(W <= 1 || N <= 2) {
std::inclusive_scan(s_beg, s_end, d_beg, bop);
return;
}
if(N < W) {
W = N;
}
std::vector<CachelineAligned<value_type>> buf(W);
std::atomic<size_t> counter(0);
size_t Q = N/W;
size_t R = N%W;
//auto orig_d_beg = d_beg;
//ExecutionPolicy<StaticPartitioner> policy;
for(size_t w=0, curr_b=0, chunk_size; w<W && curr_b < N; ++w) {
chunk_size = std::min(Q + (w < R), N - curr_b);
// block scan
launch_loop(W, w, rt, [=, &rt, &bop, &buf, &counter] () mutable {
auto result = d_beg;
// local scan per worker
auto& init = buf[w].data;
*d_beg++ = init = *s_beg++;
for(size_t i=1; i<chunk_size; i++){
*d_beg++ = init = bop(init, *s_beg++);
}
// block scan
detail::scan_loop(rt, counter, buf, bop, result, W, w, chunk_size);
//size_t offset = R ? Q + 1 : Q;
//size_t rest = N - offset;
//size_t rest_Q = rest / W;
//size_t rest_R = rest % W;
//
//chunk_size = policy.chunk_size() == 0 ?
// rest_Q + (w < rest_R) : policy.chunk_size();
//
//size_t curr_b = policy.chunk_size() == 0 ?
// offset + (w<rest_R ? w*(rest_Q + 1) : rest_R + w*rest_Q) :
// offset + w*policy.chunk_size();
//policy(N, W, curr_b, chunk_size,
// [&, prev_e=size_t{0}](size_t curr_b, size_t curr_e) mutable {
// std::advance(orig_d_beg, curr_b - prev_e);
// for(size_t x = curr_b; x<curr_e; x++) {
// size_t j = x < (Q+1)*R ? x/(Q+1) : (x-(Q+1)*R)/Q + R;
// *orig_d_beg++ = bop(buf[j-1].data, *orig_d_beg);
// }
// prev_e = curr_e;
// }
//);
});
std::advance(s_beg, chunk_size);
std::advance(d_beg, chunk_size);
curr_b += chunk_size;
}
rt.join();
};
}
// Function: make_inclusive_scan_task
template <typename B, typename E, typename D, typename BOP, typename T>
TF_FORCE_INLINE auto make_inclusive_scan_task(B first, E last, D d_first, BOP bop, T init) {
using B_t = std::decay_t<unwrap_ref_decay_t<B>>;
using E_t = std::decay_t<unwrap_ref_decay_t<E>>;
using D_t = std::decay_t<unwrap_ref_decay_t<D>>;
using value_type = typename std::iterator_traits<B_t>::value_type;
using namespace std::string_literals;
return [=] (Runtime& rt) mutable {
// fetch the stateful values
B_t s_beg = first;
E_t s_end = last;
D_t d_beg = d_first;
if(s_beg == s_end) {
return;
}
size_t W = rt.executor().num_workers();
size_t N = std::distance(s_beg, s_end);
// only myself - no need to spawn another graph
if(W <= 1 || N <= 2) {
std::inclusive_scan(s_beg, s_end, d_beg, bop, init);
return;
}
if(N < W) {
W = N;
}
std::vector<CachelineAligned<value_type>> buf(W);
std::atomic<size_t> counter(0);
// set up the initial value for the first worker
buf[0].data = std::move(init);
size_t Q = N/W;
size_t R = N%W;
for(size_t w=0, curr_b=0, chunk_size; w<W && curr_b < N; ++w) {
chunk_size = std::min(Q + (w < R), N - curr_b);
// block scan
launch_loop(W, w, rt, [=, &rt, &bop, &buf, &counter] () mutable {
auto result = d_beg;
// local scan per worker
auto& init = buf[w].data;
*d_beg++ = init = (w == 0) ? bop(init, *s_beg++) : *s_beg++;
for(size_t i=1; i<chunk_size; i++){
*d_beg++ = init = bop(init, *s_beg++);
}
// block scan
detail::scan_loop(rt, counter, buf, bop, result, W, w, chunk_size);
});
std::advance(s_beg, chunk_size);
std::advance(d_beg, chunk_size);
curr_b += chunk_size;
}
rt.join();
};
}
// ----------------------------------------------------------------------------
// Transform Inclusive Scan
// ----------------------------------------------------------------------------
// Function: transform_inclusive_scan
template <typename B, typename E, typename D, typename BOP, typename UOP>
TF_FORCE_INLINE auto make_transform_inclusive_scan_task(
B first, E last, D d_first, BOP bop, UOP uop
) {
using B_t = std::decay_t<unwrap_ref_decay_t<B>>;
using E_t = std::decay_t<unwrap_ref_decay_t<E>>;
using D_t = std::decay_t<unwrap_ref_decay_t<D>>;
using value_type = typename std::iterator_traits<B_t>::value_type;
using namespace std::string_literals;
return [=] (Runtime& rt) mutable {
// fetch the stateful values
B_t s_beg = first;
E_t s_end = last;
D_t d_beg = d_first;
if(s_beg == s_end) {
return;
}
size_t W = rt.executor().num_workers();
size_t N = std::distance(s_beg, s_end);
// only myself - no need to spawn another graph
if(W <= 1 || N <= 2) {
std::transform_inclusive_scan(s_beg, s_end, d_beg, bop, uop);
return;
}
if(N < W) {
W = N;
}
std::vector<CachelineAligned<value_type>> buf(W);
std::atomic<size_t> counter(0);
size_t Q = N/W;
size_t R = N%W;
for(size_t w=0, curr_b=0, chunk_size; w<W && curr_b < N; ++w) {
chunk_size = std::min(Q + (w < R), N - curr_b);
// block scan
launch_loop(W, w, rt, [=, &rt, &bop, &uop, &buf, &counter] () mutable {
auto result = d_beg;
// local scan per worker
auto& init = buf[w].data;
*d_beg++ = init = uop(*s_beg++);
for(size_t i=1; i<chunk_size; i++){
*d_beg++ = init = bop(init, uop(*s_beg++));
}
// block scan
detail::scan_loop(rt, counter, buf, bop, result, W, w, chunk_size);
});
std::advance(s_beg, chunk_size);
std::advance(d_beg, chunk_size);
curr_b += chunk_size;
}
rt.join();
};
}
// Function: transform_inclusive_scan
template <typename B, typename E, typename D, typename BOP, typename UOP, typename T>
TF_FORCE_INLINE auto make_transform_inclusive_scan_task(
B first, E last, D d_first, BOP bop, UOP uop, T init
) {
using B_t = std::decay_t<unwrap_ref_decay_t<B>>;
using E_t = std::decay_t<unwrap_ref_decay_t<E>>;
using D_t = std::decay_t<unwrap_ref_decay_t<D>>;
using value_type = typename std::iterator_traits<B_t>::value_type;
using namespace std::string_literals;
return [=] (Runtime& rt) mutable {
// fetch the stateful values
B_t s_beg = first;
E_t s_end = last;
D_t d_beg = d_first;
if(s_beg == s_end) {
return;
}
size_t W = rt.executor().num_workers();
size_t N = std::distance(s_beg, s_end);
// only myself - no need to spawn another graph
if(W <= 1 || N <= 2) {
std::transform_inclusive_scan(s_beg, s_end, d_beg, bop, uop, init);
return;
}
if(N < W) {
W = N;
}
std::vector<CachelineAligned<value_type>> buf(W);
std::atomic<size_t> counter(0);
// set up the initial value for the first worker
buf[0].data = std::move(init);
size_t Q = N/W;
size_t R = N%W;
for(size_t w=0, curr_b=0, chunk_size; w<W && curr_b < N; ++w) {
chunk_size = std::min(Q + (w < R), N - curr_b);
// block scan
launch_loop(W, w, rt, [=, &rt, &bop, &uop, &buf, &counter] () mutable {
auto result = d_beg;
// local scan per worker
auto& init = buf[w].data;
*d_beg++ = init = (w == 0) ? bop(init, uop(*s_beg++)) : uop(*s_beg++);
for(size_t i=1; i<chunk_size; i++){
*d_beg++ = init = bop(init, uop(*s_beg++));
}
// block scan
detail::scan_loop(rt, counter, buf, bop, result, W, w, chunk_size);
});
std::advance(s_beg, chunk_size);
std::advance(d_beg, chunk_size);
curr_b += chunk_size;
}
rt.join();
};
}
// ----------------------------------------------------------------------------
// Exclusive Scan
// ----------------------------------------------------------------------------
// Function: make_exclusive_scan_task
template <typename B, typename E, typename D, typename T, typename BOP>
TF_FORCE_INLINE auto make_exclusive_scan_task(
B first, E last, D d_first, T init, BOP bop
) {
using B_t = std::decay_t<unwrap_ref_decay_t<B>>;
using E_t = std::decay_t<unwrap_ref_decay_t<E>>;
using D_t = std::decay_t<unwrap_ref_decay_t<D>>;
using value_type = typename std::iterator_traits<B_t>::value_type;
using namespace std::string_literals;
return [=] (Runtime& rt) mutable {
// fetch the stateful values
B_t s_beg = first;
E_t s_end = last;
D_t d_beg = d_first;
if(s_beg == s_end) {
return;
}
size_t W = rt.executor().num_workers();
size_t N = std::distance(s_beg, s_end);
// only myself - no need to spawn another graph
if(W <= 1 || N <= 2) {
std::exclusive_scan(s_beg, s_end, d_beg, init, bop);
return;
}
if(N < W) {
W = N;
}
std::vector<CachelineAligned<value_type>> buf(W);
std::atomic<size_t> counter(0);
size_t Q = N/W;
size_t R = N%W;
// fetch the init value
auto s_beg_temp = s_beg;
for(size_t w=0, curr_b=0, chunk_size; w<W && curr_b < N; ++w) {
chunk_size = std::min(Q + (w<R), N - curr_b);
buf[w].data = w ? *s_beg_temp : std::move(init);
std::advance(s_beg_temp, chunk_size - !w);
curr_b += chunk_size;
}
for(size_t w=0, curr_b=0, chunk_size; w<W && curr_b < N; ++w) {
chunk_size = std::min(Q + (w < R), N - curr_b);
// block scan
launch_loop(W, w, rt, [=, &rt, &bop, &buf, &counter] () mutable {
auto result = d_beg;
// local scan per worker
auto& init = buf[w].data;
for(size_t i=1; i<chunk_size; i++) {
auto v = init;
init = bop(init, *s_beg++);
*d_beg++ = std::move(v);
}
*d_beg++ = init;
// block scan
detail::scan_loop(rt, counter, buf, bop, result, W, w, chunk_size);
});
std::advance(s_beg, chunk_size);
std::advance(d_beg, chunk_size);
curr_b += chunk_size;
}
rt.join();
};
}
// ----------------------------------------------------------------------------
// Transform Exclusive Scan
// ----------------------------------------------------------------------------
// Function:
template <typename B, typename E, typename D, typename T, typename BOP, typename UOP>
TF_FORCE_INLINE auto make_transform_exclusive_scan_task(
B first, E last, D d_first, T init, BOP bop, UOP uop
) {
using B_t = std::decay_t<unwrap_ref_decay_t<B>>;
using E_t = std::decay_t<unwrap_ref_decay_t<E>>;
using D_t = std::decay_t<unwrap_ref_decay_t<D>>;
using value_type = typename std::iterator_traits<B_t>::value_type;
using namespace std::string_literals;
return [=] (Runtime& rt) mutable {
// fetch the stateful values
B_t s_beg = first;
E_t s_end = last;
D_t d_beg = d_first;
if(s_beg == s_end) {
return;
}
size_t W = rt.executor().num_workers();
size_t N = std::distance(s_beg, s_end);
// only myself - no need to spawn another graph
if(W <= 1 || N <= 2) {
std::transform_exclusive_scan(s_beg, s_end, d_beg, init, bop, uop);
return;
}
if(N < W) {
W = N;
}
std::vector<CachelineAligned<value_type>> buf(W);
std::atomic<size_t> counter(0);
size_t Q = N/W;
size_t R = N%W;
// fetch the init value
auto s_beg_temp = s_beg;
for(size_t w=0, curr_b=0, chunk_size; w<W && curr_b < N; ++w) {
chunk_size = std::min(Q + (w<R), N - curr_b);
buf[w].data = w ? uop(*s_beg_temp) : std::move(init);
std::advance(s_beg_temp, chunk_size - !w);
curr_b += chunk_size;
}
for(size_t w=0, curr_b=0, chunk_size; w<W && curr_b < N; ++w) {
chunk_size = std::min(Q + (w < R), N - curr_b);
// block scan
launch_loop(W, w, rt, [=, &rt, &bop, &uop, &buf, &counter] () mutable {
auto result = d_beg;
// local scan per worker
auto& init = buf[w].data;
for(size_t i=1; i<chunk_size; i++) {
auto v = init;
init = bop(init, uop(*s_beg++));
*d_beg++ = std::move(v);
}
*d_beg++ = init;
// block scan
detail::scan_loop(rt, counter, buf, bop, result, W, w, chunk_size);
});
std::advance(s_beg, chunk_size);
std::advance(d_beg, chunk_size);
curr_b += chunk_size;
}
rt.join();
};
}
// ----------------------------------------------------------------------------
// Inclusive Scan
// ----------------------------------------------------------------------------
// Function: inclusive_scan
template <typename B, typename E, typename D, typename BOP>
Task FlowBuilder::inclusive_scan(B first, E last, D d_first, BOP bop) {
return emplace(make_inclusive_scan_task(
first, last, d_first, bop
));
}
// Function: inclusive_scan
template <typename B, typename E, typename D, typename BOP, typename T>
Task FlowBuilder::inclusive_scan(B first, E last, D d_first, BOP bop, T init) {
return emplace(make_inclusive_scan_task(
first, last, d_first, bop, init
));
}
// ----------------------------------------------------------------------------
// Transform Inclusive Scan
// ----------------------------------------------------------------------------
// Function: transform_inclusive_scan
template <typename B, typename E, typename D, typename BOP, typename UOP>
Task FlowBuilder::transform_inclusive_scan(
B first, E last, D d_first, BOP bop, UOP uop
) {
return emplace(make_transform_inclusive_scan_task(
first, last, d_first, bop, uop
));
}
// Function: transform_inclusive_scan
template <typename B, typename E, typename D, typename BOP, typename UOP, typename T>
Task FlowBuilder::transform_inclusive_scan(
B first, E last, D d_first, BOP bop, UOP uop, T init
) {
return emplace(make_transform_inclusive_scan_task(
first, last, d_first, bop, uop, init
));
}
// ----------------------------------------------------------------------------
// Exclusive Scan
// ----------------------------------------------------------------------------
// Function: exclusive_scan
template <typename B, typename E, typename D, typename T, typename BOP>
Task FlowBuilder::exclusive_scan(B first, E last, D d_first, T init, BOP bop) {
return emplace(make_exclusive_scan_task(
first, last, d_first, init, bop
));
}
// ----------------------------------------------------------------------------
// Transform Exclusive Scan
// ----------------------------------------------------------------------------
// Function: transform_exclusive_scan
template <typename B, typename E, typename D, typename T, typename BOP, typename UOP>
Task FlowBuilder::transform_exclusive_scan(
B first, E last, D d_first, T init, BOP bop, UOP uop
) {
return emplace(make_transform_exclusive_scan_task(
first, last, d_first, init, bop, uop
));
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/algorithm/find.hpp | #pragma once
#include "launch.hpp"
namespace tf {
namespace detail {
// Function: find_if_loop
template <typename Iterator, typename Predicate>
TF_FORCE_INLINE bool find_if_loop(
std::atomic<size_t>& offset,
Iterator& beg,
size_t& prev_e,
size_t curr_b,
size_t curr_e,
Predicate&& predicate
) {
// early prune
if(offset.load(std::memory_order_relaxed) < curr_b) {
return true;
}
std::advance(beg, curr_b - prev_e);
for(size_t x = curr_b; x<curr_e; x++) {
if(predicate(*beg++)) {
atomic_min(offset, x);
return true;
}
}
prev_e = curr_e;
return false;
}
// Function: find_if_not_loop
template <typename Iterator, typename Predicate>
TF_FORCE_INLINE bool find_if_not_loop(
std::atomic<size_t>& offset,
Iterator& beg,
size_t& prev_e,
size_t curr_b,
size_t curr_e,
Predicate&& predicate
) {
// early prune
if(offset.load(std::memory_order_relaxed) < curr_b) {
return true;
}
std::advance(beg, curr_b - prev_e);
for(size_t x = curr_b; x<curr_e; x++) {
if(!predicate(*beg++)) {
atomic_min(offset, x);
return true;
}
}
prev_e = curr_e;
return false;
}
} // namespace detail --------------------------------------------------------
// Function: make_find_if_task
template <typename B, typename E, typename T, typename UOP, typename P = GuidedPartitioner>
TF_FORCE_INLINE auto make_find_if_task(
B first, E last, T& result, UOP predicate, P&& part = P()
) {
using B_t = std::decay_t<unwrap_ref_decay_t<B>>;
using E_t = std::decay_t<unwrap_ref_decay_t<E>>;
using namespace std::string_literals;
return
[b=first, e=last, predicate, &result, part=std::forward<P>(part)]
(Runtime& rt) mutable {
// fetch the stateful values
B_t beg = b;
E_t end = e;
size_t W = rt.executor().num_workers();
size_t N = std::distance(beg, end);
// only myself - no need to spawn another graph
if(W <= 1 || N <= part.chunk_size()) {
result = std::find_if(beg, end, predicate);
return;
}
if(N < W) {
W = N;
}
std::atomic<size_t> offset(N);
// static partitioner
if constexpr(std::is_same_v<std::decay_t<P>, StaticPartitioner>) {
size_t chunk_size;
for(size_t w=0, curr_b=0; w<W && curr_b < N; ++w, curr_b += chunk_size) {
chunk_size = part.adjusted_chunk_size(N, W, w);
launch_loop(W, w, rt,
[N, W, curr_b, chunk_size, beg, &predicate, &offset, &part]
() mutable {
part.loop_until(N, W, curr_b, chunk_size,
[&, prev_e=size_t{0}](size_t curr_b, size_t curr_e) mutable {
return detail::find_if_loop(
offset, beg, prev_e, curr_b, curr_e, predicate
);
}
);
}
);
}
rt.join();
}
// dynamic partitioner
else {
std::atomic<size_t> next(0);
launch_loop(N, W, rt, next, part,
[N, W, beg, &predicate, &offset, &next, &part] () mutable {
part.loop_until(N, W, next,
[&, prev_e=size_t{0}](size_t curr_b, size_t curr_e) mutable {
return detail::find_if_loop(
offset, beg, prev_e, curr_b, curr_e, predicate
);
}
);
}
);
}
// update the result iterator by the offset
result = std::next(beg, offset.load(std::memory_order_relaxed));
};
}
// Function: make_find_if_not_task
template <typename B, typename E, typename T, typename UOP, typename P = GuidedPartitioner>
TF_FORCE_INLINE auto make_find_if_not_task(
B first, E last, T& result, UOP predicate, P&& part = P()
) {
using B_t = std::decay_t<unwrap_ref_decay_t<B>>;
using E_t = std::decay_t<unwrap_ref_decay_t<E>>;
using namespace std::string_literals;
return
[b=first, e=last, predicate, &result, part=std::forward<P>(part)]
(Runtime& rt) mutable {
// fetch the stateful values
B_t beg = b;
E_t end = e;
size_t W = rt.executor().num_workers();
size_t N = std::distance(beg, end);
// only myself - no need to spawn another graph
if(W <= 1 || N <= part.chunk_size()) {
result = std::find_if_not(beg, end, predicate);
return;
}
if(N < W) {
W = N;
}
std::atomic<size_t> offset(N);
// static partitioner
if constexpr(std::is_same_v<std::decay_t<P>, StaticPartitioner>) {
size_t chunk_size;
for(size_t w=0, curr_b=0; w<W && curr_b < N; ++w, curr_b += chunk_size) {
chunk_size = part.adjusted_chunk_size(N, W, w);
launch_loop(W, w, rt,
[N, W, curr_b, chunk_size, beg, &predicate, &offset, &part] () mutable {
part.loop_until(N, W, curr_b, chunk_size,
[&, prev_e=size_t{0}](size_t curr_b, size_t curr_e) mutable {
return detail::find_if_not_loop(
offset, beg, prev_e, curr_b, curr_e, predicate
);
}
);
}
);
}
rt.join();
}
// dynamic partitioner
else {
std::atomic<size_t> next(0);
launch_loop(N, W, rt, next, part,
[N, W, beg, &predicate, &offset, &next, &part] () mutable {
part.loop_until(N, W, next,
[&, prev_e=size_t{0}](size_t curr_b, size_t curr_e) mutable {
return detail::find_if_not_loop(
offset, beg, prev_e, curr_b, curr_e, predicate
);
}
);
}
);
}
// update the result iterator by the offset
result = std::next(beg, offset.load(std::memory_order_relaxed));
};
}
// Function: make_min_element_task
template <typename B, typename E, typename T, typename C, typename P = GuidedPartitioner>
TF_FORCE_INLINE auto make_min_element_task(
B first, E last, T& result, C comp, P&& part = P()
) {
using B_t = std::decay_t<unwrap_ref_decay_t<B>>;
using E_t = std::decay_t<unwrap_ref_decay_t<E>>;
using namespace std::string_literals;
return
[b=first, e=last, &result, comp, part=std::forward<P>(part)]
(Runtime& rt) mutable {
// fetch the iterator values
B_t beg = b;
E_t end = e;
size_t W = rt.executor().num_workers();
size_t N = std::distance(beg, end);
// only myself - no need to spawn another graph
if(W <= 1 || N <= part.chunk_size()) {
result = std::min_element(beg, end, comp);
return;
}
if(N < W) {
W = N;
}
std::mutex mutex;
// initialize the result to the first element
result = beg++;
N--;
// static partitioner
if constexpr(std::is_same_v<std::decay_t<P>, StaticPartitioner>) {
size_t chunk_size;
for(size_t w=0, curr_b=0; w<W && curr_b < N; ++w, curr_b += chunk_size) {
// we force chunk size to be at least two because the temporary
// variable sum needs to avoid copy at the first step
chunk_size = std::max(size_t{2}, part.adjusted_chunk_size(N, W, w));
launch_loop(W, w, rt,
[beg, curr_b, N, W, chunk_size, &comp, &mutex, &result, &part] () mutable {
std::advance(beg, curr_b);
if(N - curr_b == 1) {
std::lock_guard<std::mutex> lock(mutex);
if(comp(*beg, *result)) {
result = beg;
}
return;
}
auto beg1 = beg++;
auto beg2 = beg++;
T smallest = comp(*beg1, *beg2) ? beg1 : beg2;
// loop reduce
part.loop(N, W, curr_b, chunk_size,
[&, prev_e=curr_b+2](size_t curr_b, size_t curr_e) mutable {
if(curr_b > prev_e) {
std::advance(beg, curr_b - prev_e);
}
else {
curr_b = prev_e;
}
for(size_t x=curr_b; x<curr_e; x++, beg++) {
if(comp(*beg, *smallest)) {
smallest = beg;
}
}
prev_e = curr_e;
}
);
// final reduce
std::lock_guard<std::mutex> lock(mutex);
if(comp(*smallest, *result)) {
result = smallest;
}
});
}
rt.join();
}
// dynamic partitioner
else {
std::atomic<size_t> next(0);
launch_loop(N, W, rt, next, part,
[beg, N, W, &next, &comp, &mutex, &result, &part] () mutable {
// pre-reduce
size_t s0 = next.fetch_add(2, std::memory_order_relaxed);
if(s0 >= N) {
return;
}
std::advance(beg, s0);
if(N - s0 == 1) {
std::lock_guard<std::mutex> lock(mutex);
if(comp(*beg, *result)) {
result = beg;
}
return;
}
auto beg1 = beg++;
auto beg2 = beg++;
T smallest = comp(*beg1, *beg2) ? beg1 : beg2;
// loop reduce
part.loop(N, W, next,
[&, prev_e=s0+2](size_t curr_b, size_t curr_e) mutable {
std::advance(beg, curr_b - prev_e);
for(size_t x=curr_b; x<curr_e; x++, beg++) {
if(comp(*beg, *smallest)) {
smallest = beg;
}
}
prev_e = curr_e;
}
);
// final reduce
std::lock_guard<std::mutex> lock(mutex);
if(comp(*smallest, *result)) {
result = smallest;
}
}
);
}
};
}
// Function: make_max_element_task
template <typename B, typename E, typename T, typename C, typename P = GuidedPartitioner>
TF_FORCE_INLINE auto make_max_element_task(
B first, E last, T& result, C comp, P&& part = P()
) {
using B_t = std::decay_t<unwrap_ref_decay_t<B>>;
using E_t = std::decay_t<unwrap_ref_decay_t<E>>;
using namespace std::string_literals;
return
[b=first, e=last, &result, comp, part=std::forward<P>(part)]
(Runtime& rt) mutable {
// fetch the iterator values
B_t beg = b;
E_t end = e;
size_t W = rt.executor().num_workers();
size_t N = std::distance(beg, end);
// only myself - no need to spawn another graph
if(W <= 1 || N <= part.chunk_size()) {
result = std::max_element(beg, end, comp);
return;
}
if(N < W) {
W = N;
}
std::mutex mutex;
// initialize the result to the first element
result = beg++;
N--;
// static partitioner
if constexpr(std::is_same_v<std::decay_t<P>, StaticPartitioner>) {
size_t chunk_size;
for(size_t w=0, curr_b=0; w<W && curr_b < N; ++w, curr_b += chunk_size) {
// we force chunk size to be at least two because the temporary
// variable sum needs to avoid copy at the first step
chunk_size = std::max(size_t{2}, part.adjusted_chunk_size(N, W, w));
launch_loop(W, w, rt,
[beg, curr_b, N, W, chunk_size, &comp, &mutex, &result, &part] () mutable {
std::advance(beg, curr_b);
if(N - curr_b == 1) {
std::lock_guard<std::mutex> lock(mutex);
if(comp(*result, *beg)) {
result = beg;
}
return;
}
auto beg1 = beg++;
auto beg2 = beg++;
T largest = comp(*beg1, *beg2) ? beg2 : beg1;
// loop reduce
part.loop(N, W, curr_b, chunk_size,
[&, prev_e=curr_b+2](size_t curr_b, size_t curr_e) mutable {
if(curr_b > prev_e) {
std::advance(beg, curr_b - prev_e);
}
else {
curr_b = prev_e;
}
for(size_t x=curr_b; x<curr_e; x++, beg++) {
if(comp(*largest, *beg)) {
largest = beg;
}
}
prev_e = curr_e;
}
);
// final reduce
std::lock_guard<std::mutex> lock(mutex);
if(comp(*result, *largest)) {
result = largest;
}
});
}
rt.join();
}
// dynamic partitioner
else {
std::atomic<size_t> next(0);
launch_loop(N, W, rt, next, part,
[beg, N, W, &next, &comp, &mutex, &result, &part] () mutable {
// pre-reduce
size_t s0 = next.fetch_add(2, std::memory_order_relaxed);
if(s0 >= N) {
return;
}
std::advance(beg, s0);
if(N - s0 == 1) {
std::lock_guard<std::mutex> lock(mutex);
if(comp(*result, *beg)) {
result = beg;
}
return;
}
auto beg1 = beg++;
auto beg2 = beg++;
T largest = comp(*beg1, *beg2) ? beg2 : beg1;
// loop reduce
part.loop(N, W, next,
[&, prev_e=s0+2](size_t curr_b, size_t curr_e) mutable {
std::advance(beg, curr_b - prev_e);
for(size_t x=curr_b; x<curr_e; x++, beg++) {
if(comp(*largest, *beg)) {
largest = beg;
}
}
prev_e = curr_e;
}
);
// final reduce
std::lock_guard<std::mutex> lock(mutex);
if(comp(*result, *largest)) {
result = largest;
}
}
);
}
};
}
// Function: find_if
template <typename B, typename E, typename T, typename UOP, typename P>
Task tf::FlowBuilder::find_if(B first, E last, T& result, UOP predicate, P&& part) {
return emplace(make_find_if_task(
first, last, result, predicate, std::forward<P>(part)
));
}
// Function: find_if_not
template <typename B, typename E, typename T, typename UOP, typename P>
Task tf::FlowBuilder::find_if_not(B first, E last, T& result, UOP predicate, P&& part) {
return emplace(make_find_if_not_task(
first, last, result, predicate, std::forward<P>(part)
));
}
// ----------------------------------------------------------------------------
// min_element
// ----------------------------------------------------------------------------
// Function: min_element
template <typename B, typename E, typename T, typename C, typename P>
Task FlowBuilder::min_element(B first, E last, T& result, C comp, P&& part) {
return emplace(make_min_element_task(
first, last, result, comp, std::forward<P>(part)
));
}
// ----------------------------------------------------------------------------
// max_element
// ----------------------------------------------------------------------------
// Function: max_element
template <typename B, typename E, typename T, typename C, typename P>
Task FlowBuilder::max_element(B first, E last, T& result, C comp, P&& part) {
return emplace(make_max_element_task(
first, last, result, comp, std::forward<P>(part)
));
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/algorithm/transform.hpp | #pragma once
#include "../core/executor.hpp"
namespace tf {
// ----------------------------------------------------------------------------
// default transform
// ----------------------------------------------------------------------------
// Function: transform
template <typename B, typename E, typename O, typename C>
Task FlowBuilder::transform(B first1, E last1, O d_first, C c) {
using namespace std::string_literals;
using B_t = std::decay_t<unwrap_ref_decay_t<B>>;
using E_t = std::decay_t<unwrap_ref_decay_t<E>>;
using O_t = std::decay_t<unwrap_ref_decay_t<O>>;
Task task = emplace(
[first1, last1, d_first, c] (Subflow& sf) mutable {
// fetch the stateful values
B_t beg = first1;
E_t end = last1;
O_t d_beg = d_first;
if(beg == end) {
return;
}
size_t chunk_size = 1;
size_t W = sf._executor.num_workers();
size_t N = std::distance(beg, end);
// only myself - no need to spawn another graph
if(W <= 1 || N <= chunk_size) {
std::transform(beg, end, d_beg, c);
return;
}
if(N < W) {
W = N;
}
std::atomic<size_t> next(0);
auto loop = [=, &next] () mutable {
size_t z = 0;
size_t p1 = 2 * W * (chunk_size + 1);
double p2 = 0.5 / static_cast<double>(W);
size_t s0 = next.load(std::memory_order_relaxed);
while(s0 < N) {
size_t r = N - s0;
// fine-grained
if(r < p1) {
while(1) {
s0 = next.fetch_add(chunk_size, std::memory_order_relaxed);
if(s0 >= N) {
return;
}
size_t e0 = (chunk_size <= (N - s0)) ? s0 + chunk_size : N;
std::advance(beg, s0-z);
std::advance(d_beg, s0-z);
for(size_t x=s0; x<e0; x++) {
*d_beg++ = c(*beg++);
}
z = e0;
}
break;
}
// coarse-grained
else {
size_t q = static_cast<size_t>(p2 * r);
if(q < chunk_size) {
q = chunk_size;
}
size_t e0 = (q <= r) ? s0 + q : N;
if(next.compare_exchange_strong(s0, e0, std::memory_order_relaxed,
std::memory_order_relaxed)) {
std::advance(beg, s0-z);
std::advance(d_beg, s0-z);
for(size_t x = s0; x< e0; x++) {
*d_beg++ = c(*beg++);
}
z = e0;
s0 = next.load(std::memory_order_relaxed);
}
}
}
};
for(size_t w=0; w<W; w++) {
auto r = N - next.load(std::memory_order_relaxed);
// no more loop work to do - finished by previous async tasks
if(!r) {
break;
}
// tail optimization
if(r <= chunk_size || w == W-1) {
loop();
break;
}
else {
sf._named_silent_async(sf._worker, "loop-"s + std::to_string(w), loop);
}
}
sf.join();
});
return task;
}
// Function: transform
template <typename B1, typename E1, typename B2, typename O, typename C>
Task FlowBuilder::transform(B1 first1, E1 last1, B2 first2, O d_first, C c) {
using namespace std::string_literals;
using B1_t = std::decay_t<unwrap_ref_decay_t<B1>>;
using E1_t = std::decay_t<unwrap_ref_decay_t<E1>>;
using B2_t = std::decay_t<unwrap_ref_decay_t<B2>>;
using O_t = std::decay_t<unwrap_ref_decay_t<O>>;
Task task = emplace(
[first1, last1, first2, d_first, c] (Subflow& sf) mutable {
// fetch the stateful values
B1_t beg1 = first1;
E1_t end1 = last1;
B2_t beg2 = first2;
O_t d_beg = d_first;
if(beg1 == end1) {
return;
}
size_t chunk_size = 1;
size_t W = sf._executor.num_workers();
size_t N = std::distance(beg1, end1);
// only myself - no need to spawn another graph
if(W <= 1 || N <= chunk_size) {
std::transform(beg1, end1, beg2, d_beg, c);
return;
}
if(N < W) {
W = N;
}
std::atomic<size_t> next(0);
auto loop = [=, &next] () mutable {
size_t z = 0;
size_t p1 = 2 * W * (chunk_size + 1);
double p2 = 0.5 / static_cast<double>(W);
size_t s0 = next.load(std::memory_order_relaxed);
while(s0 < N) {
size_t r = N - s0;
// fine-grained
if(r < p1) {
while(1) {
s0 = next.fetch_add(chunk_size, std::memory_order_relaxed);
if(s0 >= N) {
return;
}
size_t e0 = (chunk_size <= (N - s0)) ? s0 + chunk_size : N;
std::advance(beg1, s0-z);
std::advance(beg2, s0-z);
std::advance(d_beg, s0-z);
for(size_t x=s0; x<e0; x++) {
*d_beg++ = c(*beg1++, *beg2++);
}
z = e0;
}
break;
}
// coarse-grained
else {
size_t q = static_cast<size_t>(p2 * r);
if(q < chunk_size) {
q = chunk_size;
}
size_t e0 = (q <= r) ? s0 + q : N;
if(next.compare_exchange_strong(s0, e0, std::memory_order_relaxed,
std::memory_order_relaxed)) {
std::advance(beg1, s0-z);
std::advance(beg2, s0-z);
std::advance(d_beg, s0-z);
for(size_t x = s0; x< e0; x++) {
*d_beg++ = c(*beg1++, *beg2++);
}
z = e0;
s0 = next.load(std::memory_order_relaxed);
}
}
}
};
for(size_t w=0; w<W; w++) {
auto r = N - next.load(std::memory_order_relaxed);
// no more loop work to do - finished by previous async tasks
if(!r) {
break;
}
// tail optimization
if(r <= chunk_size || w == W-1) {
loop();
break;
}
else {
sf._named_silent_async(sf._worker, "loop-"s + std::to_string(w), loop);
}
}
sf.join();
});
return task;
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/algorithm/partitioner.hpp | // reference:
// - gomp: https://github.com/gcc-mirror/gcc/blob/master/libgomp/iter.c
// - komp: https://github.com/llvm-mirror/openmp/blob/master/runtime/src/kmp_dispatch.cpp
#pragma once
/**
@file partitioner.hpp
@brief partitioner include file
*/
namespace tf {
// ----------------------------------------------------------------------------
// Partitioner Base
// ----------------------------------------------------------------------------
/**
@class PartitionerBase
@brief class to derive a partitioner for scheduling parallel algorithms
The class provides base methods to derive a partitioner that can be used
to schedule parallel iterations (e.g., tf::Taskflow::for_each).
An partitioner defines the scheduling method for running parallel algorithms,
such tf::Taskflow::for_each, tf::Taskflow::reduce, and so on.
By default, we provide the following partitioners:
+ tf::GuidedPartitioner to enable guided scheduling algorithm of adaptive chunk size
+ tf::DynamicPartitioner to enable dynamic scheduling algorithm of equal chunk size
+ tf::StaticPartitioner to enable static scheduling algorithm of static chunk size
+ tf::RandomPartitioner to enable random scheduling algorithm of random chunk size
Depending on applications, partitioning algorithms can impact the performance
a lot.
For example, if a parallel-iteration workload contains a regular work unit per
iteration, tf::StaticPartitioner can deliver the best performance.
On the other hand, if the work unit per iteration is irregular and unbalanced,
tf::GuidedPartitioner or tf::DynamicPartitioner can outperform tf::StaticPartitioner.
In most situations, tf::GuidedPartitioner can deliver decent performance and
is thus used as our default partitioner.
*/
class PartitionerBase {
public:
/**
@brief default constructor
*/
PartitionerBase() = default;
/**
@brief construct a partitioner with the given chunk size
*/
explicit PartitionerBase(size_t chunk_size) : _chunk_size {chunk_size} {}
/**
@brief query the chunk size of this partitioner
*/
size_t chunk_size() const { return _chunk_size; }
/**
@brief update the chunk size of this partitioner
*/
void chunk_size(size_t cz) { _chunk_size = cz; }
protected:
/**
@brief chunk size
*/
size_t _chunk_size{0};
};
// ----------------------------------------------------------------------------
// Guided Partitioner
// ----------------------------------------------------------------------------
/**
@class GuidedPartitioner
@brief class to construct a guided partitioner for scheduling parallel algorithms
The size of a partition is proportional to the number of unassigned iterations
divided by the number of workers,
and the size will gradually decrease to the given chunk size.
The last partition may be smaller than the chunk size.
*/
class GuidedPartitioner : public PartitionerBase {
public:
/**
@brief default constructor
*/
GuidedPartitioner() : PartitionerBase{1} {}
/**
@brief construct a guided partitioner with the given chunk size
*/
explicit GuidedPartitioner(size_t sz) : PartitionerBase (sz) {}
// --------------------------------------------------------------------------
// scheduling methods
// --------------------------------------------------------------------------
/**
@private
*/
template <typename F,
std::enable_if_t<std::is_invocable_r_v<void, F, size_t, size_t>, void>* = nullptr
>
void loop(
size_t N,
size_t W,
std::atomic<size_t>& next,
F&& func
) const {
size_t chunk_size = (_chunk_size == 0) ? size_t{1} : _chunk_size;
size_t p1 = 2 * W * (chunk_size + 1);
float p2 = 0.5f / static_cast<float>(W);
size_t curr_b = next.load(std::memory_order_relaxed);
while(curr_b < N) {
size_t r = N - curr_b;
// fine-grained
if(r < p1) {
while(1) {
curr_b = next.fetch_add(chunk_size, std::memory_order_relaxed);
if(curr_b >= N) {
return;
}
func(curr_b, std::min(curr_b + chunk_size, N));
}
break;
}
// coarse-grained
else {
size_t q = static_cast<size_t>(p2 * r);
if(q < chunk_size) {
q = chunk_size;
}
//size_t curr_e = (q <= r) ? curr_b + q : N;
size_t curr_e = std::min(curr_b + q, N);
if(next.compare_exchange_strong(curr_b, curr_e, std::memory_order_relaxed,
std::memory_order_relaxed)) {
func(curr_b, curr_e);
curr_b = next.load(std::memory_order_relaxed);
}
}
}
}
/**
@private
*/
template <typename F,
std::enable_if_t<std::is_invocable_r_v<bool, F, size_t, size_t>, void>* = nullptr
>
void loop_until(
size_t N,
size_t W,
std::atomic<size_t>& next,
F&& func
) const {
size_t chunk_size = (_chunk_size == 0) ? size_t{1} : _chunk_size;
size_t p1 = 2 * W * (chunk_size + 1);
float p2 = 0.5f / static_cast<float>(W);
size_t curr_b = next.load(std::memory_order_relaxed);
while(curr_b < N) {
size_t r = N - curr_b;
// fine-grained
if(r < p1) {
while(1) {
curr_b = next.fetch_add(chunk_size, std::memory_order_relaxed);
if(curr_b >= N) {
return;
}
if(func(curr_b, std::min(curr_b + chunk_size, N))) {
return;
}
}
break;
}
// coarse-grained
else {
size_t q = static_cast<size_t>(p2 * r);
if(q < chunk_size) {
q = chunk_size;
}
//size_t curr_e = (q <= r) ? curr_b + q : N;
size_t curr_e = std::min(curr_b + q, N);
if(next.compare_exchange_strong(curr_b, curr_e, std::memory_order_relaxed,
std::memory_order_relaxed)) {
if(func(curr_b, curr_e)) {
return;
}
curr_b = next.load(std::memory_order_relaxed);
}
}
}
}
};
// ----------------------------------------------------------------------------
// Dynamic Partitioner
// ----------------------------------------------------------------------------
/**
@class DynamicPartitioner
@brief class to construct a dynamic partitioner for scheduling parallel algorithms
The partitioner splits iterations into many partitions each of size equal to
the given chunk size.
Different partitions are distributed dynamically to workers
without any specific order.
*/
class DynamicPartitioner : public PartitionerBase {
public:
/**
@brief default constructor
*/
DynamicPartitioner() : PartitionerBase{1} {};
/**
@brief construct a dynamic partitioner with the given chunk size
*/
explicit DynamicPartitioner(size_t sz) : PartitionerBase (sz) {}
// --------------------------------------------------------------------------
// scheduling methods
// --------------------------------------------------------------------------
/**
@private
*/
template <typename F,
std::enable_if_t<std::is_invocable_r_v<void, F, size_t, size_t>, void>* = nullptr
>
void loop(
size_t N,
size_t,
std::atomic<size_t>& next,
F&& func
) const {
size_t chunk_size = (_chunk_size == 0) ? size_t{1} : _chunk_size;
size_t curr_b = next.fetch_add(chunk_size, std::memory_order_relaxed);
while(curr_b < N) {
func(curr_b, std::min(curr_b + chunk_size, N));
curr_b = next.fetch_add(chunk_size, std::memory_order_relaxed);
}
}
/**
@private
*/
template <typename F,
std::enable_if_t<std::is_invocable_r_v<bool, F, size_t, size_t>, void>* = nullptr
>
void loop_until(
size_t N,
size_t,
std::atomic<size_t>& next,
F&& func
) const {
size_t chunk_size = (_chunk_size == 0) ? size_t{1} : _chunk_size;
size_t curr_b = next.fetch_add(chunk_size, std::memory_order_relaxed);
while(curr_b < N) {
if(func(curr_b, std::min(curr_b + chunk_size, N))) {
return;
}
curr_b = next.fetch_add(chunk_size, std::memory_order_relaxed);
}
}
};
// ----------------------------------------------------------------------------
// Static Partitioner
// ----------------------------------------------------------------------------
/**
@class StaticPartitioner
@brief class to construct a dynamic partitioner for scheduling parallel algorithms
The partitioner divides iterations into chunks and distributes chunks
to workers in order.
If the chunk size is not specified (default @c 0), the partitioner resorts to a chunk size
that equally distributes iterations into workers.
@code{.cpp}
std::vector<int> data = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
taskflow.for_each(
data.begin(), data.end(), [](int i){}, StaticPartitioner(0)
);
executor.run(taskflow).run();
@endcode
*/
class StaticPartitioner : public PartitionerBase {
public:
/**
@brief default constructor
*/
StaticPartitioner() : PartitionerBase{0} {};
/**
@brief construct a dynamic partitioner with the given chunk size
*/
explicit StaticPartitioner(size_t sz) : PartitionerBase(sz) {}
/**
@brief queries the adjusted chunk size
Returns the given chunk size if it is not zero, or returns
<tt>N/W + (w < N%W)</tt>, where @c N is the number of iterations,
@c W is the number of workers, and @c w is the worker ID.
*/
size_t adjusted_chunk_size(size_t N, size_t W, size_t w) const {
return _chunk_size ? _chunk_size : N/W + (w < N%W);
}
// --------------------------------------------------------------------------
// scheduling methods
// --------------------------------------------------------------------------
/**
@private
*/
template <typename F,
std::enable_if_t<std::is_invocable_r_v<void, F, size_t, size_t>, void>* = nullptr
>
void loop(
size_t N,
size_t W,
size_t curr_b,
size_t chunk_size,
F&& func
) {
size_t stride = W * chunk_size;
while(curr_b < N) {
size_t curr_e = std::min(curr_b + chunk_size, N);
func(curr_b, curr_e);
curr_b += stride;
}
}
/**
@private
*/
template <typename F,
std::enable_if_t<std::is_invocable_r_v<bool, F, size_t, size_t>, void>* = nullptr
>
void loop_until(
size_t N,
size_t W,
size_t curr_b,
size_t chunk_size,
F&& func
) {
size_t stride = W * chunk_size;
while(curr_b < N) {
size_t curr_e = std::min(curr_b + chunk_size, N);
if(func(curr_b, curr_e)) {
return;
}
curr_b += stride;
}
}
};
// ----------------------------------------------------------------------------
// RandomPartitioner
// ----------------------------------------------------------------------------
/**
@class RandomPartitioner
@brief class to construct a random partitioner for scheduling parallel algorithms
Similar to tf::DynamicPartitioner,
the partitioner splits iterations into many partitions but each with a random
chunk size in the range, <tt>c = [alpha * N * W, beta * N * W]</tt>.
By default, @c alpha is <tt>0.01</tt> and @c beta is <tt>0.5</tt>, respectively.
*/
class RandomPartitioner : public PartitionerBase {
public:
/**
@brief default constructor
*/
RandomPartitioner() = default;
/**
@brief constructs a random partitioner
*/
RandomPartitioner(size_t cz) : PartitionerBase(cz) {}
/**
@brief constructs a random partitioner with the given parameters
*/
RandomPartitioner(float alpha, float beta) : _alpha {alpha}, _beta {beta} {}
/**
@brief queries the @c alpha value
*/
float alpha() const { return _alpha; }
/**
@brief queries the @c beta value
*/
float beta() const { return _beta; }
/**
@brief queries the range of chunk size
@param N number of iterations
@param W number of workers
*/
std::pair<size_t, size_t> chunk_size_range(size_t N, size_t W) const {
size_t b1 = static_cast<size_t>(_alpha * N * W);
size_t b2 = static_cast<size_t>(_beta * N * W);
if(b1 > b2) {
std::swap(b1, b2);
}
b1 = std::max(b1, size_t{1});
b2 = std::max(b2, b1 + 1);
return {b1, b2};
}
// --------------------------------------------------------------------------
// scheduling methods
// --------------------------------------------------------------------------
/**
@private
*/
template <typename F,
std::enable_if_t<std::is_invocable_r_v<void, F, size_t, size_t>, void>* = nullptr
>
void loop(
size_t N,
size_t W,
std::atomic<size_t>& next,
F&& func
) const {
auto [b1, b2] = chunk_size_range(N, W);
std::default_random_engine engine {std::random_device{}()};
std::uniform_int_distribution<size_t> dist(b1, b2);
size_t chunk_size = dist(engine);
size_t curr_b = next.fetch_add(chunk_size, std::memory_order_relaxed);
while(curr_b < N) {
func(curr_b, std::min(curr_b + chunk_size, N));
chunk_size = dist(engine);
curr_b = next.fetch_add(chunk_size, std::memory_order_relaxed);
}
}
/**
@private
*/
template <typename F,
std::enable_if_t<std::is_invocable_r_v<bool, F, size_t, size_t>, void>* = nullptr
>
void loop_until(
size_t N,
size_t W,
std::atomic<size_t>& next,
F&& func
) const {
auto [b1, b2] = chunk_size_range(N, W);
std::default_random_engine engine {std::random_device{}()};
std::uniform_int_distribution<size_t> dist(b1, b2);
size_t chunk_size = dist(engine);
size_t curr_b = next.fetch_add(chunk_size, std::memory_order_relaxed);
while(curr_b < N) {
if(func(curr_b, std::min(curr_b + chunk_size, N))){
return;
}
chunk_size = dist(engine);
curr_b = next.fetch_add(chunk_size, std::memory_order_relaxed);
}
}
private:
float _alpha {0.01f};
float _beta {0.5f};
};
/**
@brief default partitioner set to tf::GuidedPartitioner
Guided partitioner can achieve decent performance for most parallel algorithms,
especially for those with irregular and unbalanced workload per iteration.
*/
using DefaultPartitioner = GuidedPartitioner;
/**
@brief determines if a type is a partitioner
A partitioner is a derived type from tf::PartitionerBase.
*/
template <typename C>
inline constexpr bool is_partitioner_v = std::is_base_of<PartitionerBase, C>::value;
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/cuda/cudaflow.hpp | #pragma once
#include "../taskflow.hpp"
#include "cuda_task.hpp"
#include "cuda_capturer.hpp"
/**
@file taskflow/cuda/cudaflow.hpp
@brief cudaFlow include file
*/
namespace tf {
// ----------------------------------------------------------------------------
// class definition: cudaFlow
// ----------------------------------------------------------------------------
/**
@class cudaFlow
@brief class to create a %cudaFlow task dependency graph
A %cudaFlow is a high-level interface over CUDA Graph to perform GPU operations
using the task dependency graph model.
The class provides a set of methods for creating and launch different tasks
on one or multiple CUDA devices,
for instance, kernel tasks, data transfer tasks, and memory operation tasks.
The following example creates a %cudaFlow of two kernel tasks, @c task1 and
@c task2, where @c task1 runs before @c task2.
@code{.cpp}
tf::Taskflow taskflow;
tf::Executor executor;
taskflow.emplace([&](tf::cudaFlow& cf){
// create two kernel tasks
tf::cudaTask task1 = cf.kernel(grid1, block1, shm_size1, kernel1, args1);
tf::cudaTask task2 = cf.kernel(grid2, block2, shm_size2, kernel2, args2);
// kernel1 runs before kernel2
task1.precede(task2);
});
executor.run(taskflow).wait();
@endcode
A %cudaFlow is a task (tf::Task) created from tf::Taskflow
and will be run by @em one worker thread in the executor.
That is, the callable that describes a %cudaFlow
will be executed sequentially.
Inside a %cudaFlow task, different GPU tasks (tf::cudaTask) may run
in parallel scheduled by the CUDA runtime.
Please refer to @ref GPUTaskingcudaFlow for details.
*/
class cudaFlow {
friend class Executor;
// created by user
struct External {
cudaGraph graph;
};
// created by executor
struct Internal {
Internal(Executor& e) : executor{e} {}
Executor& executor;
};
using handle_t = std::variant<External, Internal>;
// variant index
constexpr static auto EXTERNAL = get_index_v<External, handle_t>;
constexpr static auto INTERNAL = get_index_v<Internal, handle_t>;
public:
/**
@brief constructs a standalone %cudaFlow
A standalone %cudaFlow does not go through any taskflow and
can be run by the caller thread using explicit offload methods
(e.g., tf::cudaFlow::offload).
*/
cudaFlow();
/**
@brief destroys the %cudaFlow and its associated native CUDA graph
and executable graph
*/
~cudaFlow();
/**
@brief queries the emptiness of the graph
*/
bool empty() const;
/**
@brief queries the number of tasks
*/
size_t num_tasks() const;
/**
@brief clears the %cudaFlow object
*/
void clear();
/**
@brief dumps the %cudaFlow graph into a DOT format through an
output stream
*/
void dump(std::ostream& os) const;
/**
@brief dumps the native CUDA graph into a DOT format through an
output stream
The native CUDA graph may be different from the upper-level %cudaFlow
graph when flow capture is involved.
*/
void dump_native_graph(std::ostream& os) const;
// ------------------------------------------------------------------------
// Graph building routines
// ------------------------------------------------------------------------
/**
@brief creates a no-operation task
@return a tf::cudaTask handle
An empty node performs no operation during execution,
but can be used for transitive ordering.
For example, a phased execution graph with 2 groups of @c n nodes
with a barrier between them can be represented using an empty node
and @c 2*n dependency edges,
rather than no empty node and @c n^2 dependency edges.
*/
cudaTask noop();
/**
@brief creates a host task that runs a callable on the host
@tparam C callable type
@param callable a callable object with neither arguments nor return
(i.e., constructible from @c std::function<void()>)
@return a tf::cudaTask handle
A host task can only execute CPU-specific functions and cannot do any CUDA calls
(e.g., @c cudaMalloc).
*/
template <typename C>
cudaTask host(C&& callable);
/**
@brief updates parameters of a host task
The method is similar to tf::cudaFlow::host but operates on a task
of type tf::cudaTaskType::HOST.
*/
template <typename C>
void host(cudaTask task, C&& callable);
/**
@brief creates a kernel task
@tparam F kernel function type
@tparam ArgsT kernel function parameters type
@param g configured grid
@param b configured block
@param s configured shared memory size in bytes
@param f kernel function
@param args arguments to forward to the kernel function by copy
@return a tf::cudaTask handle
*/
template <typename F, typename... ArgsT>
cudaTask kernel(dim3 g, dim3 b, size_t s, F f, ArgsT&&... args);
/**
@brief updates parameters of a kernel task
The method is similar to tf::cudaFlow::kernel but operates on a task
of type tf::cudaTaskType::KERNEL.
The kernel function name must NOT change.
*/
template <typename F, typename... ArgsT>
void kernel(
cudaTask task, dim3 g, dim3 b, size_t shm, F f, ArgsT&&... args
);
/**
@brief creates a memset task that fills untyped data with a byte value
@param dst pointer to the destination device memory area
@param v value to set for each byte of specified memory
@param count size in bytes to set
@return a tf::cudaTask handle
A memset task fills the first @c count bytes of device memory area
pointed by @c dst with the byte value @c v.
*/
cudaTask memset(void* dst, int v, size_t count);
/**
@brief updates parameters of a memset task
The method is similar to tf::cudaFlow::memset but operates on a task
of type tf::cudaTaskType::MEMSET.
The source/destination memory may have different address values but
must be allocated from the same contexts as the original
source/destination memory.
*/
void memset(cudaTask task, void* dst, int ch, size_t count);
/**
@brief creates a memcpy task that copies untyped data in bytes
@param tgt pointer to the target memory block
@param src pointer to the source memory block
@param bytes bytes to copy
@return a tf::cudaTask handle
A memcpy task transfers @c bytes of data from a source location
to a target location. Direction can be arbitrary among CPUs and GPUs.
*/
cudaTask memcpy(void* tgt, const void* src, size_t bytes);
/**
@brief updates parameters of a memcpy task
The method is similar to tf::cudaFlow::memcpy but operates on a task
of type tf::cudaTaskType::MEMCPY.
The source/destination memory may have different address values but
must be allocated from the same contexts as the original
source/destination memory.
*/
void memcpy(cudaTask task, void* tgt, const void* src, size_t bytes);
/**
@brief creates a memset task that sets a typed memory block to zero
@tparam T element type (size of @c T must be either 1, 2, or 4)
@param dst pointer to the destination device memory area
@param count number of elements
@return a tf::cudaTask handle
A zero task zeroes the first @c count elements of type @c T
in a device memory area pointed by @c dst.
*/
template <typename T, std::enable_if_t<
is_pod_v<T> && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>* = nullptr
>
cudaTask zero(T* dst, size_t count);
/**
@brief updates parameters of a memset task to a zero task
The method is similar to tf::cudaFlow::zero but operates on
a task of type tf::cudaTaskType::MEMSET.
The source/destination memory may have different address values but
must be allocated from the same contexts as the original
source/destination memory.
*/
template <typename T, std::enable_if_t<
is_pod_v<T> && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>* = nullptr
>
void zero(cudaTask task, T* dst, size_t count);
/**
@brief creates a memset task that fills a typed memory block with a value
@tparam T element type (size of @c T must be either 1, 2, or 4)
@param dst pointer to the destination device memory area
@param value value to fill for each element of type @c T
@param count number of elements
@return a tf::cudaTask handle
A fill task fills the first @c count elements of type @c T with @c value
in a device memory area pointed by @c dst.
The value to fill is interpreted in type @c T rather than byte.
*/
template <typename T, std::enable_if_t<
is_pod_v<T> && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>* = nullptr
>
cudaTask fill(T* dst, T value, size_t count);
/**
@brief updates parameters of a memset task to a fill task
The method is similar to tf::cudaFlow::fill but operates on a task
of type tf::cudaTaskType::MEMSET.
The source/destination memory may have different address values but
must be allocated from the same contexts as the original
source/destination memory.
*/
template <typename T, std::enable_if_t<
is_pod_v<T> && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>* = nullptr
>
void fill(cudaTask task, T* dst, T value, size_t count);
/**
@brief creates a memcopy task that copies typed data
@tparam T element type (non-void)
@param tgt pointer to the target memory block
@param src pointer to the source memory block
@param num number of elements to copy
@return a tf::cudaTask handle
A copy task transfers <tt>num*sizeof(T)</tt> bytes of data from a source location
to a target location. Direction can be arbitrary among CPUs and GPUs.
*/
template <typename T,
std::enable_if_t<!std::is_same_v<T, void>, void>* = nullptr
>
cudaTask copy(T* tgt, const T* src, size_t num);
/**
@brief updates parameters of a memcpy task to a copy task
The method is similar to tf::cudaFlow::copy but operates on a task
of type tf::cudaTaskType::MEMCPY.
The source/destination memory may have different address values but
must be allocated from the same contexts as the original
source/destination memory.
*/
template <typename T,
std::enable_if_t<!std::is_same_v<T, void>, void>* = nullptr
>
void copy(cudaTask task, T* tgt, const T* src, size_t num);
// ------------------------------------------------------------------------
// offload methods
// ------------------------------------------------------------------------
/**
@brief offloads the %cudaFlow onto a GPU and repeatedly runs it until
the predicate becomes true
@tparam P predicate type (a binary callable)
@param predicate a binary predicate (returns @c true for stop)
Immediately offloads the present %cudaFlow onto a GPU and
repeatedly runs it until the predicate returns @c true.
An offloaded %cudaFlow forces the underlying graph to be instantiated.
After the instantiation, you should not modify the graph topology
but update node parameters.
By default, if users do not offload the %cudaFlow,
the executor will offload it once.
*/
template <typename P>
void offload_until(P&& predicate);
/**
@brief offloads the %cudaFlow and executes it by the given times
@param N number of executions
*/
void offload_n(size_t N);
/**
@brief offloads the %cudaFlow and executes it once
*/
void offload();
// ------------------------------------------------------------------------
// generic algorithms
// ------------------------------------------------------------------------
/**
@brief runs a callable with only a single kernel thread
@tparam C callable type
@param c callable to run by a single kernel thread
@return a tf::cudaTask handle
*/
template <typename C>
cudaTask single_task(C c);
/**
@brief updates a single-threaded kernel task
This method is similar to cudaFlow::single_task but operates
on an existing task.
*/
template <typename C>
void single_task(cudaTask task, C c);
/**
@brief applies a callable to each dereferenced element of the data array
@tparam I iterator type
@tparam C callable type
@param first iterator to the beginning (inclusive)
@param last iterator to the end (exclusive)
@param callable a callable object to apply to the dereferenced iterator
@return a tf::cudaTask handle
This method is equivalent to the parallel execution of the following loop on a GPU:
@code{.cpp}
for(auto itr = first; itr != last; itr++) {
callable(*itr);
}
@endcode
*/
template <typename I, typename C>
cudaTask for_each(I first, I last, C callable);
/**
@brief updates parameters of a kernel task created from
tf::cudaFlow::for_each
The type of the iterators and the callable must be the same as
the task created from tf::cudaFlow::for_each.
*/
template <typename I, typename C>
void for_each(cudaTask task, I first, I last, C callable);
/**
@brief applies a callable to each index in the range with the step size
@tparam I index type
@tparam C callable type
@param first beginning index
@param last last index
@param step step size
@param callable the callable to apply to each element in the data array
@return a tf::cudaTask handle
This method is equivalent to the parallel execution of the following loop on a GPU:
@code{.cpp}
// step is positive [first, last)
for(auto i=first; i<last; i+=step) {
callable(i);
}
// step is negative [first, last)
for(auto i=first; i>last; i+=step) {
callable(i);
}
@endcode
*/
template <typename I, typename C>
cudaTask for_each_index(I first, I last, I step, C callable);
/**
@brief updates parameters of a kernel task created from
tf::cudaFlow::for_each_index
The type of the iterators and the callable must be the same as
the task created from tf::cudaFlow::for_each_index.
*/
template <typename I, typename C>
void for_each_index(
cudaTask task, I first, I last, I step, C callable
);
/**
@brief applies a callable to a source range and stores the result in a target range
@tparam I input iterator type
@tparam O output iterator type
@tparam C unary operator type
@param first iterator to the beginning of the input range
@param last iterator to the end of the input range
@param output iterator to the beginning of the output range
@param op the operator to apply to transform each element in the range
@return a tf::cudaTask handle
This method is equivalent to the parallel execution of the following loop on a GPU:
@code{.cpp}
while (first != last) {
*output++ = callable(*first++);
}
@endcode
*/
template <typename I, typename O, typename C>
cudaTask transform(I first, I last, O output, C op);
/**
@brief updates parameters of a kernel task created from
tf::cudaFlow::transform
The type of the iterators and the callable must be the same as
the task created from tf::cudaFlow::for_each.
*/
template <typename I, typename O, typename C>
void transform(cudaTask task, I first, I last, O output, C c);
/**
@brief creates a task to perform parallel transforms over two ranges of items
@tparam I1 first input iterator type
@tparam I2 second input iterator type
@tparam O output iterator type
@tparam C unary operator type
@param first1 iterator to the beginning of the input range
@param last1 iterator to the end of the input range
@param first2 iterato
@param output iterator to the beginning of the output range
@param op binary operator to apply to transform each pair of items in the
two input ranges
@return cudaTask handle
This method is equivalent to the parallel execution of the following loop on a GPU:
@code{.cpp}
while (first1 != last1) {
*output++ = op(*first1++, *first2++);
}
@endcode
*/
template <typename I1, typename I2, typename O, typename C>
cudaTask transform(I1 first1, I1 last1, I2 first2, O output, C op);
/**
@brief updates parameters of a kernel task created from
tf::cudaFlow::transform
The type of the iterators and the callable must be the same as
the task created from tf::cudaFlow::for_each.
*/
template <typename I1, typename I2, typename O, typename C>
void transform(
cudaTask task, I1 first1, I1 last1, I2 first2, O output, C c
);
/**
@brief performs parallel reduction over a range of items
@tparam I input iterator type
@tparam T value type
@tparam B binary operator type
@param first iterator to the beginning (inclusive)
@param last iterator to the end (exclusive)
@param result pointer to the result with an initialized value
@param bop binary operator to apply to reduce items
@return a tf::cudaTask handle
This method is equivalent to the parallel execution of the following loop on a GPU:
@code{.cpp}
while (first != last) {
*result = bop(*result, *first++);
}
@endcode
*/
template <typename I, typename T, typename B>
cudaTask reduce(I first, I last, T* result, B bop);
/**
@brief updates parameters of a kernel task created from
tf::cudaFlow::reduce
The type of the iterators, result, and callable must be the same as
the task created from tf::cudaFlow::reduce.
*/
template <typename I, typename T, typename C>
void reduce(cudaTask task, I first, I last, T* result, C op);
/**
@brief similar to tf::cudaFlow::reduce but does not assume any initial
value to reduce
This method is equivalent to the parallel execution of the following loop
on a GPU:
@code{.cpp}
*result = *first++; // no initial values partitipcate in the loop
while (first != last) {
*result = op(*result, *first++);
}
@endcode
*/
template <typename I, typename T, typename B>
cudaTask uninitialized_reduce(I first, I last, T* result, B bop);
/**
@brief updates parameters of a kernel task created from
tf::cudaFlow::uninitialized_reduce
The type of the iterators, result, and callable must be the same as
the task created from tf::cudaFlow::uninitialized_reduce.
*/
template <typename I, typename T, typename C>
void uninitialized_reduce(
cudaTask task, I first, I last, T* result, C op
);
/**
@brief performs parallel reduction over a range of transformed items
@tparam I input iterator type
@tparam T value type
@tparam B binary operator type
@tparam U unary operator type
@param first iterator to the beginning (inclusive)
@param last iterator to the end (exclusive)
@param result pointer to the result with an initialized value
@param bop binary operator to apply to reduce items
@param uop unary operator to transform each item before reduction
@return a tf::cudaTask handle
This method is equivalent to the parallel execution of the following loop on a GPU:
@code{.cpp}
while (first != last) {
*result = bop(*result, uop(*first++));
}
@endcode
*/
template <typename I, typename T, typename B, typename U>
cudaTask transform_reduce(I first, I last, T* result, B bop, U uop);
/**
@brief updates parameters of a kernel task created from
tf::cudaFlow::transform_reduce
*/
template <typename I, typename T, typename B, typename U>
void transform_reduce(cudaTask, I first, I last, T* result, B bop, U uop);
/**
@brief similar to tf::cudaFlow::transform_reduce but does not assume any initial
value to reduce
This method is equivalent to the parallel execution of the following loop
on a GPU:
@code{.cpp}
*result = uop(*first++); // no initial values partitipcate in the loop
while (first != last) {
*result = bop(*result, uop(*first++));
}
@endcode
*/
template <typename I, typename T, typename B, typename U>
cudaTask transform_uninitialized_reduce(
I first, I last, T* result, B bop, U uop
);
/**
@brief updates parameters of a kernel task created from
tf::cudaFlow::transform_uninitialized_reduce
*/
template <typename I, typename T, typename B, typename U>
void transform_uninitialized_reduce(
cudaTask task, I first, I last, T* result, B bop, U uop
);
/**
@brief creates a task to perform parallel inclusive scan
over a range of items
@tparam I input iterator type
@tparam O output iterator type
@tparam C binary operator type
@param first iterator to the beginning
@param last iterator to the end
@param output iterator to the beginning of the output
@param op binary operator
@return a tf::cudaTask handle
This method is equivalent to the parallel execution of the following loop on a GPU:
@code{.cpp}
for(size_t i=0; i<std::distance(first, last); i++) {
*(output + i) = i ? op(*(first+i), *(output+i-1)) : *(first+i);
}
@endcode
*/
template <typename I, typename O, typename C>
cudaTask inclusive_scan(I first, I last, O output, C op);
/**
@brief updates the parameters of a task created
from tf::cudaFlow::inclusive_scan
This method is similar to tf::cudaFlow::inclusive_scan
but operates on an existing task.
*/
template <typename I, typename O, typename C>
void inclusive_scan(cudaTask task, I first, I last, O output, C op);
/**
@brief similar to cudaFlow::inclusive_scan but excludes the first value
*/
template <typename I, typename O, typename C>
cudaTask exclusive_scan(I first, I last, O output, C op);
/**
@brief updates the parameters of a task created from
tf::cudaFlow::exclusive_scan
This method is similar to tf::cudaFlow::exclusive_scan
but operates on an existing task.
*/
template <typename I, typename O, typename C>
void exclusive_scan(cudaTask task, I first, I last, O output, C op);
/**
@brief creates a task to perform parallel inclusive scan
over a range of transformed items
@tparam I input iterator type
@tparam O output iterator type
@tparam B binary operator type
@tparam U unary operator type
@param first iterator to the beginning
@param last iterator to the end
@param output iterator to the beginning of the output
@param bop binary operator
@param uop unary operator
@return a tf::cudaTask handle
This method is equivalent to the parallel execution of the following loop
on a GPU:
@code{.cpp}
for(size_t i=0; i<std::distance(first, last); i++) {
*(output + i) = i ? op(uop(*(first+i)), *(output+i-1)) : uop(*(first+i));
}
@endcode
*/
template <typename I, typename O, typename B, typename U>
cudaTask transform_inclusive_scan(I first, I last, O output, B bop, U uop);
/**
@brief updates the parameters of a task created from
tf::cudaFlow::transform_inclusive_scan
This method is similar to tf::cudaFlow::transform_inclusive_scan
but operates on an existing task.
*/
template <typename I, typename O, typename B, typename U>
void transform_inclusive_scan(
cudaTask task, I first, I last, O output, B bop, U uop
);
/**
@brief similar to cudaFlow::transform_inclusive_scan but
excludes the first value
*/
template <typename I, typename O, typename B, typename U>
cudaTask transform_exclusive_scan(I first, I last, O output, B bop, U uop);
/**
@brief updates the parameters of a task created from
tf::cudaFlow::transform_exclusive_scan
This method is similar to tf::cudaFlow::transform_exclusive_scan
but operates on an existing task.
*/
template <typename I, typename O, typename B, typename U>
void transform_exclusive_scan(
cudaTask task, I first, I last, O output, B bop, U uop
);
/**
@brief creates a task to perform parallel merge on two sorted arrays
@tparam A iterator type of the first input array
@tparam B iterator type of the second input array
@tparam C iterator type of the output array
@tparam Comp comparator type
@param a_first iterator to the beginning of the first input array
@param a_last iterator to the end of the first input array
@param b_first iterator to the beginning of the second input array
@param b_last iterator to the end of the second input array
@param c_first iterator to the beginning of the output array
@param comp binary comparator
@return a tf::cudaTask handle
Merges two sorted ranges <tt>[a_first, a_last)</tt> and
<tt>[b_first, b_last)</tt> into one sorted range beginning at @c c_first.
A sequence is said to be sorted with respect to a comparator @c comp
if for any iterator it pointing to the sequence and
any non-negative integer @c n such that <tt>it + n</tt> is a valid iterator
pointing to an element of the sequence, <tt>comp(*(it + n), *it)</tt>
evaluates to false.
*/
template <typename A, typename B, typename C, typename Comp>
cudaTask merge(A a_first, A a_last, B b_first, B b_last, C c_first, Comp comp);
/**
@brief updates the parameters of a task created from
tf::cudaFlow::merge
This method is similar to tf::cudaFlow::merge but operates on
an existing task.
*/
template <typename A, typename B, typename C, typename Comp>
void merge(
cudaTask task, A a_first, A a_last, B b_first, B b_last, C c_first, Comp comp
);
/**
@brief creates a task to perform parallel sort an array
@tparam I iterator type of the first input array
@tparam C comparator type
@param first iterator to the beginning of the input array
@param last iterator to the end of the input array
@param comp binary comparator
@return a tf::cudaTask handle
Sorts elements in the range <tt>[first, last)</tt>
with the given comparator @c comp.
*/
template <typename I, typename C>
cudaTask sort(I first, I last, C comp);
/**
@brief updates the parameters of the task created from
tf::cudaFlow::sort
This method is similar to tf::cudaFlow::sort but operates on
an existing task.
*/
template <typename I, typename C>
void sort(cudaTask task, I first, I last, C comp);
/**
@brief creates kernels that sort the given array
@tparam K_it iterator type of the key
@tparam V_it iterator type of the value
@tparam C comparator type
@param k_first iterator to the beginning of the key array
@param k_last iterator to the end of the key array
@param v_first iterator to the beginning of the value array
@param comp binary comparator
@return a tf::cudaTask handle
Sorts key-value elements in <tt>[k_first, k_last)</tt> and
<tt>[v_first, v_first + (k_last - k_first))</tt> into ascending key order
using the given comparator @c comp.
If @c i and @c j are any two valid iterators in <tt>[k_first, k_last)</tt>
such that @c i precedes @c j, and @c p and @c q are iterators in
<tt>[v_first, v_first + (k_last - k_first))</tt> corresponding to
@c i and @c j respectively, then <tt>comp(*j, *i)</tt> evaluates to @c false.
For example, assume:
+ @c keys are <tt>{1, 4, 2, 8, 5, 7}</tt>
+ @c values are <tt>{'a', 'b', 'c', 'd', 'e', 'f'}</tt>
After sort:
+ @c keys are <tt>{1, 2, 4, 5, 7, 8}</tt>
+ @c values are <tt>{'a', 'c', 'b', 'e', 'f', 'd'}</tt>
*/
template <typename K_it, typename V_it, typename C>
cudaTask sort_by_key(K_it k_first, K_it k_last, V_it v_first, C comp);
/**
@brief updates the parameters of a task created from
tf::cudaFlow::sort_by_key
This method is similar to tf::cudaFlow::sort_by_key but operates on
an existing task.
*/
template <typename K_it, typename V_it, typename C>
void sort_by_key(
cudaTask task, K_it k_first, K_it k_last, V_it v_first, C comp
);
/**
@brief creates a task to perform parallel key-value merge
@tparam a_keys_it first key iterator type
@tparam a_vals_it first value iterator type
@tparam b_keys_it second key iterator type
@tparam b_vals_it second value iterator type
@tparam c_keys_it output key iterator type
@tparam c_vals_it output value iterator type
@tparam C comparator type
@param a_keys_first iterator to the beginning of the first key range
@param a_keys_last iterator to the end of the first key range
@param a_vals_first iterator to the beginning of the first value range
@param b_keys_first iterator to the beginning of the second key range
@param b_keys_last iterator to the end of the second key range
@param b_vals_first iterator to the beginning of the second value range
@param c_keys_first iterator to the beginning of the output key range
@param c_vals_first iterator to the beginning of the output value range
@param comp comparator
Performs a key-value merge that copies elements from
<tt>[a_keys_first, a_keys_last)</tt> and <tt>[b_keys_first, b_keys_last)</tt>
into a single range, <tt>[c_keys_first, c_keys_last + (a_keys_last - a_keys_first) + (b_keys_last - b_keys_first))</tt>
such that the resulting range is in ascending key order.
At the same time, the merge copies elements from the two associated ranges
<tt>[a_vals_first + (a_keys_last - a_keys_first))</tt> and
<tt>[b_vals_first + (b_keys_last - b_keys_first))</tt> into a single range,
<tt>[c_vals_first, c_vals_first + (a_keys_last - a_keys_first) + (b_keys_last - b_keys_first))</tt>
such that the resulting range is in ascending order
implied by each input element's associated key.
For example, assume:
+ @c a_keys = <tt>{8, 1}</tt>
+ @c a_vals = <tt>{1, 2}</tt>
+ @c b_keys = <tt>{3, 7}</tt>
+ @c b_vals = <tt>{3, 4}</tt>
After the merge, we have:
+ @c c_keys = <tt>{1, 3, 7, 8}</tt>
+ @c c_vals = <tt>{2, 3, 4, 1}</tt>
*/
template<
typename a_keys_it, typename a_vals_it,
typename b_keys_it, typename b_vals_it,
typename c_keys_it, typename c_vals_it,
typename C
>
cudaTask merge_by_key(
a_keys_it a_keys_first, a_keys_it a_keys_last, a_vals_it a_vals_first,
b_keys_it b_keys_first, b_keys_it b_keys_last, b_vals_it b_vals_first,
c_keys_it c_keys_first, c_vals_it c_vals_first, C comp
);
/**
@brief updates the parameters of a task created from
tf::cudaFlow::merge_by_key
This method is similar to tf::cudaFlow::merge_by_key but operates
on an existing task.
*/
template<
typename a_keys_it, typename a_vals_it,
typename b_keys_it, typename b_vals_it,
typename c_keys_it, typename c_vals_it,
typename C
>
void merge_by_key(
cudaTask task,
a_keys_it a_keys_first, a_keys_it a_keys_last, a_vals_it a_vals_first,
b_keys_it b_keys_first, b_keys_it b_keys_last, b_vals_it b_vals_first,
c_keys_it c_keys_first, c_vals_it c_vals_first, C comp
);
/**
@brief creates a task to find the index of the first element in a range
@tparam I input iterator type
@tparam U unary operator type
@param first iterator to the beginning of the range
@param last iterator to the end of the range
@param idx pointer to the index of the found element
@param op unary operator which returns @c true for the required element
Finds the index @c idx of the first element in the range
<tt>[first, last)</tt> such that <tt>op(*(first+idx))</tt> is true.
This is equivalent to the parallel execution of the following loop:
@code{.cpp}
unsigned idx = 0;
for(; first != last; ++first, ++idx) {
if (p(*first)) {
return idx;
}
}
return idx;
@endcode
*/
template <typename I, typename U>
cudaTask find_if(I first, I last, unsigned* idx, U op);
/**
@brief updates the parameters of the task created from
tf::cudaFlow::find_if
*/
template <typename I, typename U>
void find_if(cudaTask task, I first, I last, unsigned* idx, U op);
/**
@brief finds the index of the minimum element in a range
@tparam I input iterator type
@tparam O comparator type
@param first iterator to the beginning of the range
@param last iterator to the end of the range
@param idx solution index of the minimum element
@param op comparison function object
The function launches kernels asynchronously to find
the smallest element in the range <tt>[first, last)</tt>
using the given comparator @c op.
The function is equivalent to a parallel execution of the following loop:
@code{.cpp}
if(first == last) {
return 0;
}
auto smallest = first;
for (++first; first != last; ++first) {
if (op(*first, *smallest)) {
smallest = first;
}
}
return std::distance(first, smallest);
@endcode
*/
template <typename I, typename O>
cudaTask min_element(I first, I last, unsigned* idx, O op);
/**
@brief updates the parameters of the task created from
tf::cudaFlow::min_element
*/
template <typename I, typename O>
void min_element(cudaTask task, I first, I last, unsigned* idx, O op);
/**
@brief finds the index of the maximum element in a range
@tparam I input iterator type
@tparam O comparator type
@param first iterator to the beginning of the range
@param last iterator to the end of the range
@param idx solution index of the maximum element
@param op comparison function object
The function launches kernels asynchronously to find
the largest element in the range <tt>[first, last)</tt>
using the given comparator @c op.
The function is equivalent to a parallel execution of the following loop:
@code{.cpp}
if(first == last) {
return 0;
}
auto largest = first;
for (++first; first != last; ++first) {
if (op(*largest, *first)) {
largest = first;
}
}
return std::distance(first, largest);
@endcode
*/
template <typename I, typename O>
cudaTask max_element(I first, I last, unsigned* idx, O op);
/**
@brief updates the parameters of the task created from
tf::cudaFlow::max_element
*/
template <typename I, typename O>
void max_element(cudaTask task, I first, I last, unsigned* idx, O op);
// ------------------------------------------------------------------------
// subflow
// ------------------------------------------------------------------------
/**
@brief constructs a subflow graph through tf::cudaFlowCapturer
@tparam C callable type constructible from
@c std::function<void(tf::cudaFlowCapturer&)>
@param callable the callable to construct a capture flow
@return a tf::cudaTask handle
A captured subflow forms a sub-graph to the %cudaFlow and can be used to
capture custom (or third-party) kernels that cannot be directly constructed
from the %cudaFlow.
Example usage:
@code{.cpp}
taskflow.emplace([&](tf::cudaFlow& cf){
tf::cudaTask my_kernel = cf.kernel(my_arguments);
// create a flow capturer to capture custom kernels
tf::cudaTask my_subflow = cf.capture([&](tf::cudaFlowCapturer& capturer){
capturer.on([&](cudaStream_t stream){
invoke_custom_kernel_with_stream(stream, custom_arguments);
});
});
my_kernel.precede(my_subflow);
});
@endcode
*/
template <typename C>
cudaTask capture(C&& callable);
/**
@brief updates the captured child graph
The method is similar to tf::cudaFlow::capture but operates on a task
of type tf::cudaTaskType::SUBFLOW.
The new captured graph must be topologically identical to the original
captured graph.
*/
template <typename C>
void capture(cudaTask task, C callable);
private:
handle_t _handle;
cudaGraph& _graph;
cudaGraphExec _exec {nullptr};
cudaFlow(cudaGraph&, Executor&);
template <typename P>
void _offload_until_external(P&&);
template <typename P>
void _offload_until_internal(P&&);
};
// Construct a standalone cudaFlow
inline cudaFlow::cudaFlow() :
_handle {std::in_place_type_t<External>{}},
_graph {std::get_if<External>(&_handle)->graph} {
TF_CHECK_CUDA(
cudaGraphCreate(&_graph._native_handle, 0),
"cudaFlow failed to create a native graph (external mode)"
);
}
// Construct the cudaFlow from executor (internal graph)
inline cudaFlow::cudaFlow(cudaGraph& g, Executor& executor) :
_handle {std::in_place_type_t<Internal>{}, executor},
_graph {g} {
assert(_graph._native_handle == nullptr);
TF_CHECK_CUDA(
cudaGraphCreate(&_graph._native_handle, 0),
"failed to create a native graph (internal mode)"
);
}
// Destructor
inline cudaFlow::~cudaFlow() {
cudaGraphDestroy(_graph._native_handle);
_graph._native_handle = nullptr;
}
// Procedure: clear
inline void cudaFlow::clear() {
_exec.clear();
TF_CHECK_CUDA(
cudaGraphDestroy(_graph._native_handle), "failed to destroy native graph"
);
TF_CHECK_CUDA(
cudaGraphCreate(&_graph._native_handle, 0), "failed to create native graph"
);
_graph._nodes.clear();
}
// Function: empty
inline bool cudaFlow::empty() const {
return _graph._nodes.empty();
}
// Function: num_tasks
inline size_t cudaFlow::num_tasks() const {
return _graph._nodes.size();
}
// Procedure: dump
inline void cudaFlow::dump(std::ostream& os) const {
_graph.dump(os, nullptr, "");
}
// Procedure: dump
inline void cudaFlow::dump_native_graph(std::ostream& os) const {
cuda_dump_graph(os, _graph._native_handle);
}
// ----------------------------------------------------------------------------
// Graph building methods
// ----------------------------------------------------------------------------
// Function: noop
inline cudaTask cudaFlow::noop() {
auto node = _graph.emplace_back(
_graph, std::in_place_type_t<cudaNode::Empty>{}
);
TF_CHECK_CUDA(
cudaGraphAddEmptyNode(
&node->_native_handle, _graph._native_handle, nullptr, 0
),
"failed to create a no-operation (empty) node"
);
return cudaTask(node);
}
// Function: host
template <typename C>
cudaTask cudaFlow::host(C&& c) {
auto node = _graph.emplace_back(
_graph, std::in_place_type_t<cudaNode::Host>{}, std::forward<C>(c)
);
auto h = std::get_if<cudaNode::Host>(&node->_handle);
cudaHostNodeParams p;
p.fn = cudaNode::Host::callback;
p.userData = h;
TF_CHECK_CUDA(
cudaGraphAddHostNode(
&node->_native_handle, _graph._native_handle, nullptr, 0, &p
),
"failed to create a host node"
);
return cudaTask(node);
}
// Function: kernel
template <typename F, typename... ArgsT>
cudaTask cudaFlow::kernel(
dim3 g, dim3 b, size_t s, F f, ArgsT&&... args
) {
auto node = _graph.emplace_back(
_graph, std::in_place_type_t<cudaNode::Kernel>{}, (void*)f
);
cudaKernelNodeParams p;
void* arguments[sizeof...(ArgsT)] = { (void*)(&args)... };
p.func = (void*)f;
p.gridDim = g;
p.blockDim = b;
p.sharedMemBytes = s;
p.kernelParams = arguments;
p.extra = nullptr;
TF_CHECK_CUDA(
cudaGraphAddKernelNode(
&node->_native_handle, _graph._native_handle, nullptr, 0, &p
),
"failed to create a kernel task"
);
return cudaTask(node);
}
// Function: zero
template <typename T, std::enable_if_t<
is_pod_v<T> && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>*
>
cudaTask cudaFlow::zero(T* dst, size_t count) {
auto node = _graph.emplace_back(
_graph, std::in_place_type_t<cudaNode::Memset>{}
);
auto p = cuda_get_zero_parms(dst, count);
TF_CHECK_CUDA(
cudaGraphAddMemsetNode(
&node->_native_handle, _graph._native_handle, nullptr, 0, &p
),
"failed to create a memset (zero) task"
);
return cudaTask(node);
}
// Function: fill
template <typename T, std::enable_if_t<
is_pod_v<T> && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>*
>
cudaTask cudaFlow::fill(T* dst, T value, size_t count) {
auto node = _graph.emplace_back(
_graph, std::in_place_type_t<cudaNode::Memset>{}
);
auto p = cuda_get_fill_parms(dst, value, count);
TF_CHECK_CUDA(
cudaGraphAddMemsetNode(
&node->_native_handle, _graph._native_handle, nullptr, 0, &p
),
"failed to create a memset (fill) task"
);
return cudaTask(node);
}
// Function: copy
template <
typename T,
std::enable_if_t<!std::is_same_v<T, void>, void>*
>
cudaTask cudaFlow::copy(T* tgt, const T* src, size_t num) {
auto node = _graph.emplace_back(
_graph, std::in_place_type_t<cudaNode::Memcpy>{}
);
auto p = cuda_get_copy_parms(tgt, src, num);
TF_CHECK_CUDA(
cudaGraphAddMemcpyNode(
&node->_native_handle, _graph._native_handle, nullptr, 0, &p
),
"failed to create a memcpy (copy) task"
);
return cudaTask(node);
}
// Function: memset
inline cudaTask cudaFlow::memset(void* dst, int ch, size_t count) {
auto node = _graph.emplace_back(
_graph, std::in_place_type_t<cudaNode::Memset>{}
);
auto p = cuda_get_memset_parms(dst, ch, count);
TF_CHECK_CUDA(
cudaGraphAddMemsetNode(
&node->_native_handle, _graph._native_handle, nullptr, 0, &p
),
"failed to create a memset task"
);
return cudaTask(node);
}
// Function: memcpy
inline cudaTask cudaFlow::memcpy(void* tgt, const void* src, size_t bytes) {
auto node = _graph.emplace_back(
_graph, std::in_place_type_t<cudaNode::Memcpy>{}
);
auto p = cuda_get_memcpy_parms(tgt, src, bytes);
TF_CHECK_CUDA(
cudaGraphAddMemcpyNode(
&node->_native_handle, _graph._native_handle, nullptr, 0, &p
),
"failed to create a memcpy task"
);
return cudaTask(node);
}
// ------------------------------------------------------------------------
// update methods
// ------------------------------------------------------------------------
// Function: host
template <typename C>
void cudaFlow::host(cudaTask task, C&& c) {
if(task.type() != cudaTaskType::HOST) {
TF_THROW(task, " is not a host task");
}
auto h = std::get_if<cudaNode::Host>(&task._node->_handle);
h->func = std::forward<C>(c);
}
// Function: update kernel parameters
template <typename F, typename... ArgsT>
void cudaFlow::kernel(
cudaTask task, dim3 g, dim3 b, size_t s, F f, ArgsT&&... args
) {
if(task.type() != cudaTaskType::KERNEL) {
TF_THROW(task, " is not a kernel task");
}
cudaKernelNodeParams p;
void* arguments[sizeof...(ArgsT)] = { (void*)(&args)... };
p.func = (void*)f;
p.gridDim = g;
p.blockDim = b;
p.sharedMemBytes = s;
p.kernelParams = arguments;
p.extra = nullptr;
TF_CHECK_CUDA(
cudaGraphExecKernelNodeSetParams(_exec, task._node->_native_handle, &p),
"failed to update kernel parameters on ", task
);
}
// Function: update copy parameters
template <typename T, std::enable_if_t<!std::is_same_v<T, void>, void>*>
void cudaFlow::copy(cudaTask task, T* tgt, const T* src, size_t num) {
if(task.type() != cudaTaskType::MEMCPY) {
TF_THROW(task, " is not a memcpy task");
}
auto p = cuda_get_copy_parms(tgt, src, num);
TF_CHECK_CUDA(
cudaGraphExecMemcpyNodeSetParams(_exec, task._node->_native_handle, &p),
"failed to update memcpy parameters on ", task
);
}
// Function: update memcpy parameters
inline void cudaFlow::memcpy(
cudaTask task, void* tgt, const void* src, size_t bytes
) {
if(task.type() != cudaTaskType::MEMCPY) {
TF_THROW(task, " is not a memcpy task");
}
auto p = cuda_get_memcpy_parms(tgt, src, bytes);
TF_CHECK_CUDA(
cudaGraphExecMemcpyNodeSetParams(_exec, task._node->_native_handle, &p),
"failed to update memcpy parameters on ", task
);
}
// Procedure: memset
inline void cudaFlow::memset(cudaTask task, void* dst, int ch, size_t count) {
if(task.type() != cudaTaskType::MEMSET) {
TF_THROW(task, " is not a memset task");
}
auto p = cuda_get_memset_parms(dst, ch, count);
TF_CHECK_CUDA(
cudaGraphExecMemsetNodeSetParams(_exec, task._node->_native_handle, &p),
"failed to update memset parameters on ", task
);
}
// Procedure: fill
template <typename T, std::enable_if_t<
is_pod_v<T> && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>*
>
void cudaFlow::fill(cudaTask task, T* dst, T value, size_t count) {
if(task.type() != cudaTaskType::MEMSET) {
TF_THROW(task, " is not a memset task");
}
auto p = cuda_get_fill_parms(dst, value, count);
TF_CHECK_CUDA(
cudaGraphExecMemsetNodeSetParams(_exec, task._node->_native_handle, &p),
"failed to update memset parameters on ", task
);
}
// Procedure: zero
template <typename T, std::enable_if_t<
is_pod_v<T> && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>*
>
void cudaFlow::zero(cudaTask task, T* dst, size_t count) {
if(task.type() != cudaTaskType::MEMSET) {
TF_THROW(task, " is not a memset task");
}
auto p = cuda_get_zero_parms(dst, count);
TF_CHECK_CUDA(
cudaGraphExecMemsetNodeSetParams(_exec, task._node->_native_handle, &p),
"failed to update memset parameters on ", task
);
}
// Function: capture
template <typename C>
void cudaFlow::capture(cudaTask task, C c) {
if(task.type() != cudaTaskType::SUBFLOW) {
TF_THROW(task, " is not a subflow task");
}
// insert a subflow node
// construct a captured flow from the callable
auto node_handle = std::get_if<cudaNode::Subflow>(&task._node->_handle);
node_handle->graph.clear();
cudaFlowCapturer capturer(node_handle->graph);
c(capturer);
// obtain the optimized captured graph
auto captured = capturer._capture();
//cuda_dump_graph(std::cout, captured);
TF_CHECK_CUDA(
cudaGraphExecChildGraphNodeSetParams(_exec, task._node->_native_handle, captured),
"failed to update a captured child graph"
);
TF_CHECK_CUDA(cudaGraphDestroy(captured), "failed to destroy captured graph");
}
// ----------------------------------------------------------------------------
// captured flow
// ----------------------------------------------------------------------------
// Function: capture
template <typename C>
cudaTask cudaFlow::capture(C&& c) {
// insert a subflow node
auto node = _graph.emplace_back(
_graph, std::in_place_type_t<cudaNode::Subflow>{}
);
// construct a captured flow from the callable
auto node_handle = std::get_if<cudaNode::Subflow>(&node->_handle);
node_handle->graph.clear();
cudaFlowCapturer capturer(node_handle->graph);
c(capturer);
// obtain the optimized captured graph
auto captured = capturer._capture();
//cuda_dump_graph(std::cout, captured);
TF_CHECK_CUDA(
cudaGraphAddChildGraphNode(
&node->_native_handle, _graph._native_handle, nullptr, 0, captured
),
"failed to add a cudaFlow capturer task"
);
TF_CHECK_CUDA(cudaGraphDestroy(captured), "failed to destroy captured graph");
return cudaTask(node);
}
// ----------------------------------------------------------------------------
// Offload methods
// ----------------------------------------------------------------------------
// Procedure: offload_until
template <typename P>
void cudaFlow::offload_until(P&& predicate) {
_offload_until_external(std::forward<P>(predicate));
/*
// turns out the optimized version runs slower...
switch(_handle.index()) {
case EXTERNAL: {
_offload_until_external(std::forward<P>(predicate));
}
break;
case INTERNAL: {
_offload_until_internal(std::forward<P>(predicate));
}
break;
default:
break;
}*/
}
template <typename P>
void cudaFlow::_offload_until_external(P&& predicate) {
if(!_exec) {
_exec.instantiate(_graph._native_handle);
}
cudaStream stream;
while(!predicate()) {
_exec.launch(stream);
stream.synchronize();
}
_graph._state = cudaGraph::OFFLOADED;
}
template <typename P>
void cudaFlow::_offload_until_internal(P&& predicate) {
auto& executor = std::get<Internal>(_handle).executor;
if(!_exec) {
_exec.instantiate(_graph._native_handle);
}
cudaStream stream;
cudaEvent event(cudaEventDisableTiming);
while(!predicate()) {
_exec.launch(stream);
stream.record(event);
executor.loop_until([&event] () -> bool {
return cudaEventQuery(event) == cudaSuccess;
});
}
_graph._state = cudaGraph::OFFLOADED;
}
// Procedure: offload_n
inline void cudaFlow::offload_n(size_t n) {
offload_until([repeat=n] () mutable { return repeat-- == 0; });
}
// Procedure: offload
inline void cudaFlow::offload() {
offload_until([repeat=1] () mutable { return repeat-- == 0; });
}
// ############################################################################
// Forward declaration: FlowBuilder
// ############################################################################
// FlowBuilder::emplace_on
template <typename C, typename D,
std::enable_if_t<is_cudaflow_task_v<C>, void>*
>
Task FlowBuilder::emplace_on(C&& c, D&& d) {
auto n = _graph._emplace_back(
std::in_place_type_t<Node::cudaFlow>{},
[c=std::forward<C>(c), d=std::forward<D>(d)] (Executor& e, Node* p) mutable {
cudaScopedDevice ctx(d);
e._invoke_cudaflow_task_entry(p, c);
},
std::make_unique<cudaGraph>()
);
return Task(n);
}
// FlowBuilder::emplace
template <typename C, std::enable_if_t<is_cudaflow_task_v<C>, void>*>
Task FlowBuilder::emplace(C&& c) {
return emplace_on(std::forward<C>(c), tf::cuda_get_device());
}
// ############################################################################
// Forward declaration: Executor
// ############################################################################
// Procedure: _invoke_cudaflow_task_entry
template <typename C, std::enable_if_t<is_cudaflow_task_v<C>, void>*>
void Executor::_invoke_cudaflow_task_entry(Node* node, C&& c) {
using T = std::conditional_t<
std::is_invocable_r_v<void, C, cudaFlow&>, cudaFlow, cudaFlowCapturer
>;
auto h = std::get_if<Node::cudaFlow>(&node->_handle);
cudaGraph* g = dynamic_cast<cudaGraph*>(h->graph.get());
g->clear();
T cf(*g, *this);
c(cf);
if(!(g->_state & cudaGraph::OFFLOADED)) {
cf.offload();
}
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/cuda/cuda_task.hpp | #pragma once
#include "cuda_graph.hpp"
/**
@file cuda_task.hpp
@brief cudaTask include file
*/
namespace tf {
// ----------------------------------------------------------------------------
// cudaTask Types
// ----------------------------------------------------------------------------
/**
@enum cudaTaskType
@brief enumeration of all %cudaTask types
*/
enum class cudaTaskType : int {
/** @brief empty task type */
EMPTY = 0,
/** @brief host task type */
HOST,
/** @brief memory set task type */
MEMSET,
/** @brief memory copy task type */
MEMCPY,
/** @brief memory copy task type */
KERNEL,
/** @brief subflow (child graph) task type */
SUBFLOW,
/** @brief capture task type */
CAPTURE,
/** @brief undefined task type */
UNDEFINED
};
/**
@brief convert a cuda_task type to a human-readable string
*/
constexpr const char* to_string(cudaTaskType type) {
switch(type) {
case cudaTaskType::EMPTY: return "empty";
case cudaTaskType::HOST: return "host";
case cudaTaskType::MEMSET: return "memset";
case cudaTaskType::MEMCPY: return "memcpy";
case cudaTaskType::KERNEL: return "kernel";
case cudaTaskType::SUBFLOW: return "subflow";
case cudaTaskType::CAPTURE: return "capture";
default: return "undefined";
}
}
// ----------------------------------------------------------------------------
// cudaTask
// ----------------------------------------------------------------------------
/**
@class cudaTask
@brief class to create a task handle over an internal node of a %cudaFlow graph
*/
class cudaTask {
friend class cudaFlow;
friend class cudaFlowCapturer;
friend class cudaFlowCapturerBase;
friend std::ostream& operator << (std::ostream&, const cudaTask&);
public:
/**
@brief constructs an empty cudaTask
*/
cudaTask() = default;
/**
@brief copy-constructs a cudaTask
*/
cudaTask(const cudaTask&) = default;
/**
@brief copy-assigns a cudaTask
*/
cudaTask& operator = (const cudaTask&) = default;
/**
@brief adds precedence links from this to other tasks
@tparam Ts parameter pack
@param tasks one or multiple tasks
@return @c *this
*/
template <typename... Ts>
cudaTask& precede(Ts&&... tasks);
/**
@brief adds precedence links from other tasks to this
@tparam Ts parameter pack
@param tasks one or multiple tasks
@return @c *this
*/
template <typename... Ts>
cudaTask& succeed(Ts&&... tasks);
/**
@brief assigns a name to the task
@param name a @std_string acceptable string
@return @c *this
*/
cudaTask& name(const std::string& name);
/**
@brief queries the name of the task
*/
const std::string& name() const;
/**
@brief queries the number of successors
*/
size_t num_successors() const;
/**
@brief queries the number of dependents
*/
size_t num_dependents() const;
/**
@brief queries if the task is associated with a cudaNode
*/
bool empty() const;
/**
@brief queries the task type
*/
cudaTaskType type() const;
/**
@brief dumps the task through an output stream
@tparam T output stream type with insertion operator (<<) defined
@param ostream an output stream target
*/
template <typename T>
void dump(T& ostream) const;
/**
@brief applies an visitor callable to each successor of the task
*/
template <typename V>
void for_each_successor(V&& visitor) const;
/**
@brief applies an visitor callable to each dependents of the task
*/
template <typename V>
void for_each_dependent(V&& visitor) const;
private:
cudaTask(cudaNode*);
cudaNode* _node {nullptr};
};
// Constructor
inline cudaTask::cudaTask(cudaNode* node) : _node {node} {
}
// Function: precede
template <typename... Ts>
cudaTask& cudaTask::precede(Ts&&... tasks) {
(_node->_precede(tasks._node), ...);
return *this;
}
// Function: succeed
template <typename... Ts>
cudaTask& cudaTask::succeed(Ts&&... tasks) {
(tasks._node->_precede(_node), ...);
return *this;
}
// Function: empty
inline bool cudaTask::empty() const {
return _node == nullptr;
}
// Function: name
inline cudaTask& cudaTask::name(const std::string& name) {
_node->_name = name;
return *this;
}
// Function: name
inline const std::string& cudaTask::name() const {
return _node->_name;
}
// Function: num_successors
inline size_t cudaTask::num_successors() const {
return _node->_successors.size();
}
// Function: num_dependents
inline size_t cudaTask::num_dependents() const {
return _node->_dependents.size();
}
// Function: type
inline cudaTaskType cudaTask::type() const {
switch(_node->_handle.index()) {
case cudaNode::EMPTY: return cudaTaskType::EMPTY;
case cudaNode::HOST: return cudaTaskType::HOST;
case cudaNode::MEMSET: return cudaTaskType::MEMSET;
case cudaNode::MEMCPY: return cudaTaskType::MEMCPY;
case cudaNode::KERNEL: return cudaTaskType::KERNEL;
case cudaNode::SUBFLOW: return cudaTaskType::SUBFLOW;
case cudaNode::CAPTURE: return cudaTaskType::CAPTURE;
default: return cudaTaskType::UNDEFINED;
}
}
// Procedure: dump
template <typename T>
void cudaTask::dump(T& os) const {
os << "cudaTask ";
if(_node->_name.empty()) os << _node;
else os << _node->_name;
os << " [type=" << to_string(type()) << ']';
}
// Function: for_each_successor
template <typename V>
void cudaTask::for_each_successor(V&& visitor) const {
for(size_t i=0; i<_node->_successors.size(); ++i) {
visitor(cudaTask(_node->_successors[i]));
}
}
// Function: for_each_dependent
template <typename V>
void cudaTask::for_each_dependent(V&& visitor) const {
for(size_t i=0; i<_node->_dependents.size(); ++i) {
visitor(cudaTask(_node->_dependents[i]));
}
}
// ----------------------------------------------------------------------------
// global ostream
// ----------------------------------------------------------------------------
/**
@brief overload of ostream inserter operator for cudaTask
*/
inline std::ostream& operator << (std::ostream& os, const cudaTask& ct) {
ct.dump(os);
return os;
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/cuda/cuda_execution_policy.hpp | #pragma once
#include "cuda_error.hpp"
/**
@file cuda_execution_policy.hpp
@brief CUDA execution policy include file
*/
namespace tf {
/**
@class cudaExecutionPolicy
@brief class to define execution policy for CUDA standard algorithms
@tparam NT number of threads per block
@tparam VT number of work units per thread
Execution policy configures the kernel execution parameters in CUDA algorithms.
The first template argument, @c NT, the number of threads per block should
always be a power-of-two number.
The second template argument, @c VT, the number of work units per thread
is recommended to be an odd number to avoid bank conflict.
Details can be referred to @ref CUDASTDExecutionPolicy.
*/
template<unsigned NT, unsigned VT>
class cudaExecutionPolicy {
static_assert(is_pow2(NT), "max # threads per block must be a power of two");
public:
/** @brief static constant for getting the number of threads per block */
const static unsigned nt = NT;
/** @brief static constant for getting the number of work units per thread */
const static unsigned vt = VT;
/** @brief static constant for getting the number of elements to process per block */
const static unsigned nv = NT*VT;
/**
@brief constructs an execution policy object with default stream
*/
cudaExecutionPolicy() = default;
/**
@brief constructs an execution policy object with the given stream
*/
explicit cudaExecutionPolicy(cudaStream_t s) : _stream{s} {}
/**
@brief queries the associated stream
*/
cudaStream_t stream() noexcept { return _stream; };
/**
@brief assigns a stream
*/
void stream(cudaStream_t stream) noexcept { _stream = stream; }
private:
cudaStream_t _stream {0};
};
/**
@brief default execution policy
*/
using cudaDefaultExecutionPolicy = cudaExecutionPolicy<512, 9>;
} // end of namespace tf -----------------------------------------------------
| hpp |
Subsets and Splits