repo_name
stringclasses 10
values | file_path
stringlengths 29
222
| content
stringlengths 24
926k
| extention
stringclasses 5
values |
---|---|---|---|
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/cuda/cuda_stream.hpp | #pragma once
#include "cuda_pool.hpp"
/**
@file cuda_stream.hpp
@brief CUDA stream utilities include file
*/
namespace tf {
// ----------------------------------------------------------------------------
// cudaStream
// ----------------------------------------------------------------------------
/**
@class cudaStream
@brief class to create an RAII-styled wrapper over a native CUDA stream
A cudaStream object is an RAII-styled wrapper over a native CUDA stream
(@c cudaStream_t).
A cudaStream object is move-only.
*/
class cudaStream {
struct cudaStreamCreator {
cudaStream_t operator () () const {
cudaStream_t stream;
TF_CHECK_CUDA(cudaStreamCreate(&stream), "failed to create a CUDA stream");
return stream;
}
};
struct cudaStreamDeleter {
void operator () (cudaStream_t stream) const {
if(stream) {
cudaStreamDestroy(stream);
}
}
};
public:
/**
@brief constructs an RAII-styled object from the given CUDA stream
Constructs a cudaStream object which owns @c stream.
*/
explicit cudaStream(cudaStream_t stream) : _stream(stream) {
}
/**
@brief constructs an RAII-styled object for a new CUDA stream
Equivalently calling @c cudaStreamCreate to create a stream.
*/
cudaStream() : _stream{ cudaStreamCreator{}() } {
}
/**
@brief disabled copy constructor
*/
cudaStream(const cudaStream&) = delete;
/**
@brief move constructor
*/
cudaStream(cudaStream&& rhs) : _stream{rhs._stream} {
rhs._stream = nullptr;
}
/**
@brief destructs the CUDA stream
*/
~cudaStream() {
cudaStreamDeleter {} (_stream);
}
/**
@brief disabled copy assignment
*/
cudaStream& operator = (const cudaStream&) = delete;
/**
@brief move assignment
*/
cudaStream& operator = (cudaStream&& rhs) {
cudaStreamDeleter {} (_stream);
_stream = rhs._stream;
rhs._stream = nullptr;
return *this;
}
/**
@brief implicit conversion to the native CUDA stream (cudaStream_t)
Returns the underlying stream of type @c cudaStream_t.
*/
operator cudaStream_t () const {
return _stream;
}
/**
@brief synchronizes the associated stream
Equivalently calling @c cudaStreamSynchronize to block
until this stream has completed all operations.
*/
void synchronize() const {
TF_CHECK_CUDA(
cudaStreamSynchronize(_stream), "failed to synchronize a CUDA stream"
);
}
/**
@brief begins graph capturing on the stream
When a stream is in capture mode, all operations pushed into the stream
will not be executed, but will instead be captured into a graph,
which will be returned via cudaStream::end_capture.
A thread's mode can be one of the following:
+ @c cudaStreamCaptureModeGlobal: This is the default mode.
If the local thread has an ongoing capture sequence that was not initiated
with @c cudaStreamCaptureModeRelaxed at @c cuStreamBeginCapture,
or if any other thread has a concurrent capture sequence initiated with
@c cudaStreamCaptureModeGlobal, this thread is prohibited from potentially
unsafe API calls.
+ @c cudaStreamCaptureModeThreadLocal: If the local thread has an ongoing capture
sequence not initiated with @c cudaStreamCaptureModeRelaxed,
it is prohibited from potentially unsafe API calls.
Concurrent capture sequences in other threads are ignored.
+ @c cudaStreamCaptureModeRelaxed: The local thread is not prohibited
from potentially unsafe API calls. Note that the thread is still prohibited
from API calls which necessarily conflict with stream capture, for example,
attempting @c cudaEventQuery on an event that was last recorded
inside a capture sequence.
*/
void begin_capture(cudaStreamCaptureMode m = cudaStreamCaptureModeGlobal) const {
TF_CHECK_CUDA(
cudaStreamBeginCapture(_stream, m),
"failed to begin capture on stream ", _stream, " with thread mode ", m
);
}
/**
@brief ends graph capturing on the stream
Equivalently calling @c cudaStreamEndCapture to
end capture on stream and returning the captured graph.
Capture must have been initiated on stream via a call to cudaStream::begin_capture.
If capture was invalidated, due to a violation of the rules of stream capture,
then a NULL graph will be returned.
*/
cudaGraph_t end_capture() const {
cudaGraph_t native_g;
TF_CHECK_CUDA(
cudaStreamEndCapture(_stream, &native_g),
"failed to end capture on stream ", _stream
);
return native_g;
}
/**
@brief records an event on the stream
Equivalently calling @c cudaEventRecord to record an event on this stream,
both of which must be on the same CUDA context.
*/
void record(cudaEvent_t event) const {
TF_CHECK_CUDA(
cudaEventRecord(event, _stream),
"failed to record event ", event, " on stream ", _stream
);
}
/**
@brief waits on an event
Equivalently calling @c cudaStreamWaitEvent to make all future work
submitted to stream wait for all work captured in event.
*/
void wait(cudaEvent_t event) const {
TF_CHECK_CUDA(
cudaStreamWaitEvent(_stream, event, 0),
"failed to wait for event ", event, " on stream ", _stream
);
}
private:
cudaStream_t _stream {nullptr};
};
// ----------------------------------------------------------------------------
// cudaEvent
// ----------------------------------------------------------------------------
/**
@class cudaEvent
@brief class to create an RAII-styled wrapper over a native CUDA event
A cudaEvent object is an RAII-styled wrapper over a native CUDA event
(@c cudaEvent_t).
A cudaEvent object is move-only.
*/
class cudaEvent {
struct cudaEventCreator {
cudaEvent_t operator () () const {
cudaEvent_t event;
TF_CHECK_CUDA(cudaEventCreate(&event), "failed to create a CUDA event");
return event;
}
cudaEvent_t operator () (unsigned int flag) const {
cudaEvent_t event;
TF_CHECK_CUDA(
cudaEventCreateWithFlags(&event, flag),
"failed to create a CUDA event with flag=", flag
);
return event;
}
};
struct cudaEventDeleter {
void operator () (cudaEvent_t event) const {
cudaEventDestroy(event);
}
};
public:
/**
@brief constructs an RAII-styled CUDA event object from the given CUDA event
*/
explicit cudaEvent(cudaEvent_t event) : _event(event) { }
/**
@brief constructs an RAII-styled CUDA event object
*/
cudaEvent() : _event{ cudaEventCreator{}() } { }
/**
@brief constructs an RAII-styled CUDA event object with the given flag
*/
explicit cudaEvent(unsigned int flag) : _event{ cudaEventCreator{}(flag) } { }
/**
@brief disabled copy constructor
*/
cudaEvent(const cudaEvent&) = delete;
/**
@brief move constructor
*/
cudaEvent(cudaEvent&& rhs) : _event{rhs._event} {
rhs._event = nullptr;
}
/**
@brief destructs the CUDA event
*/
~cudaEvent() {
cudaEventDeleter {} (_event);
}
/**
@brief disabled copy assignment
*/
cudaEvent& operator = (const cudaEvent&) = delete;
/**
@brief move assignment
*/
cudaEvent& operator = (cudaEvent&& rhs) {
cudaEventDeleter {} (_event);
_event = rhs._event;
rhs._event = nullptr;
return *this;
}
/**
@brief implicit conversion to the native CUDA event (cudaEvent_t)
Returns the underlying event of type @c cudaEvent_t.
*/
operator cudaEvent_t () const {
return _event;
}
private:
cudaEvent_t _event {nullptr};
};
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/cuda/cuda_error.hpp | #pragma once
#include <cuda.h>
#include <iostream>
#include <sstream>
#include <exception>
#include "../utility/stream.hpp"
#define TF_CUDA_EXPAND( x ) x
#define TF_CUDA_REMOVE_FIRST_HELPER(N, ...) __VA_ARGS__
#define TF_CUDA_REMOVE_FIRST(...) TF_CUDA_EXPAND(TF_CUDA_REMOVE_FIRST_HELPER(__VA_ARGS__))
#define TF_CUDA_GET_FIRST_HELPER(N, ...) N
#define TF_CUDA_GET_FIRST(...) TF_CUDA_EXPAND(TF_CUDA_GET_FIRST_HELPER(__VA_ARGS__))
#define TF_CHECK_CUDA(...) \
if(TF_CUDA_GET_FIRST(__VA_ARGS__) != cudaSuccess) { \
std::ostringstream oss; \
auto __ev__ = TF_CUDA_GET_FIRST(__VA_ARGS__); \
oss << "[" << __FILE__ << ":" << __LINE__ << "] " \
<< (cudaGetErrorString(__ev__)) << " (" \
<< (cudaGetErrorName(__ev__)) << ") - "; \
tf::ostreamize(oss, TF_CUDA_REMOVE_FIRST(__VA_ARGS__)); \
throw std::runtime_error(oss.str()); \
}
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/cuda/cuda_optimizer.hpp | #pragma once
#include "cuda_graph.hpp"
/**
@file cuda_optimizer.hpp
@brief %cudaFlow capturing algorithms include file
*/
namespace tf {
// ----------------------------------------------------------------------------
// cudaCapturingBase
// ----------------------------------------------------------------------------
/**
@private
@brief class to provide helper common methods for optimization algorithms
*/
class cudaCapturingBase {
protected:
std::vector<cudaNode*> _toposort(cudaGraph&);
std::vector<std::vector<cudaNode*>> _levelize(cudaGraph&);
};
// Function: _toposort
inline std::vector<cudaNode*> cudaCapturingBase::_toposort(cudaGraph& graph) {
std::vector<cudaNode*> res;
std::queue<cudaNode*> bfs;
res.reserve(graph._nodes.size());
// insert the first level of nodes into the queue
for(auto& u : graph._nodes) {
auto hu = std::get_if<cudaNode::Capture>(&u->_handle);
hu->level = u->_dependents.size();
if(hu->level == 0) {
bfs.push(u.get());
}
}
// levelize the graph using bfs
while(!bfs.empty()) {
auto u = bfs.front();
bfs.pop();
res.push_back(u);
for(auto v : u->_successors) {
auto hv = std::get_if<cudaNode::Capture>(&v->_handle);
if(--hv->level == 0) {
bfs.push(v);
}
}
}
return res;
}
// Function: _levelize
inline std::vector<std::vector<cudaNode*>>
cudaCapturingBase::_levelize(cudaGraph& graph) {
std::queue<cudaNode*> bfs;
size_t max_level = 0;
// insert the first level of nodes into the queue
for(auto& u : graph._nodes) {
auto hu = std::get_if<cudaNode::Capture>(&u->_handle);
hu->level = u->_dependents.size();
if(hu->level == 0) {
bfs.push(u.get());
}
}
// levelize the graph using bfs
while(!bfs.empty()) {
auto u = bfs.front();
bfs.pop();
auto hu = std::get_if<cudaNode::Capture>(&u->_handle);
for(auto v : u->_successors) {
auto hv = std::get_if<cudaNode::Capture>(&v->_handle);
if(--hv->level == 0) {
hv->level = hu->level + 1;
if(hv->level > max_level) {
max_level = hv->level;
}
bfs.push(v);
}
}
}
// set level_graph and each node's idx
std::vector<std::vector<cudaNode*>> level_graph(max_level+1);
for(auto& u : graph._nodes) {
auto hu = std::get_if<cudaNode::Capture>(&u->_handle);
hu->lid = level_graph[hu->level].size();
level_graph[hu->level].emplace_back(u.get());
//for(auto s : u->_successors) {
// assert(hu.level < std::get_if<cudaNode::Capture>(&s->_handle)->level);
//}
}
return level_graph;
}
// ----------------------------------------------------------------------------
// class definition: cudaSequentialCapturing
// ----------------------------------------------------------------------------
/**
@class cudaSequentialCapturing
@brief class to capture a CUDA graph using a sequential stream
A sequential capturing algorithm finds a topological order of
the described graph and captures dependent GPU tasks using a single stream.
All GPU tasks run sequentially without breaking inter dependencies.
*/
class cudaSequentialCapturing : public cudaCapturingBase {
friend class cudaFlowCapturer;
public:
/**
@brief constructs a sequential optimizer
*/
cudaSequentialCapturing() = default;
private:
cudaGraph_t _optimize(cudaGraph& graph);
};
inline cudaGraph_t cudaSequentialCapturing::_optimize(cudaGraph& graph) {
// acquire per-thread stream and turn it into capture mode
// we must use ThreadLocal mode to avoid clashing with CUDA global states
cudaStream stream;
stream.begin_capture(cudaStreamCaptureModeThreadLocal);
auto ordered = _toposort(graph);
for(auto node : ordered) {
std::get_if<cudaNode::Capture>(&node->_handle)->work(stream);
}
return stream.end_capture();
}
// ----------------------------------------------------------------------------
// class definition: cudaLinearCapturing
// ----------------------------------------------------------------------------
/**
@class cudaLinearCapturing
@brief class to capture a linear CUDA graph using a sequential stream
A linear capturing algorithm is a special case of tf::cudaSequentialCapturing
and assumes the input task graph to be a single linear chain of tasks
(i.e., a straight line).
This assumption allows faster optimization during the capturing process.
If the input task graph is not a linear chain, the behavior is undefined.
*/
class cudaLinearCapturing : public cudaCapturingBase {
friend class cudaFlowCapturer;
public:
/**
@brief constructs a linear optimizer
*/
cudaLinearCapturing() = default;
private:
cudaGraph_t _optimize(cudaGraph& graph);
};
inline cudaGraph_t cudaLinearCapturing::_optimize(cudaGraph& graph) {
// acquire per-thread stream and turn it into capture mode
// we must use ThreadLocal mode to avoid clashing with CUDA global states
cudaStream stream;
stream.begin_capture(cudaStreamCaptureModeThreadLocal);
// find the source node
cudaNode* src {nullptr};
for(auto& u : graph._nodes) {
if(u->_dependents.size() == 0) {
src = u.get();
while(src) {
std::get_if<cudaNode::Capture>(&src->_handle)->work(stream);
src = src->_successors.empty() ? nullptr : src->_successors[0];
}
break;
}
// ideally, there should be only one source
}
return stream.end_capture();
}
// ----------------------------------------------------------------------------
// class definition: cudaRoundRobinCapturing
// ----------------------------------------------------------------------------
/**
@class cudaRoundRobinCapturing
@brief class to capture a CUDA graph using a round-robin algorithm
A round-robin capturing algorithm levelizes the user-described graph
and assign streams to nodes in a round-robin order level by level.
The algorithm is based on the following paper published in Euro-Par 2021:
+ Dian-Lun Lin and Tsung-Wei Huang, "Efficient GPU Computation using %Task Graph Parallelism," <i>European Conference on Parallel and Distributed Computing (Euro-Par)</i>, 2021
The round-robin optimization algorithm is best suited for large %cudaFlow graphs
that compose hundreds of or thousands of GPU operations
(e.g., kernels and memory copies) with many of them being able to run in parallel.
You can configure the number of streams to the optimizer to adjust the
maximum kernel currency in the captured CUDA graph.
*/
class cudaRoundRobinCapturing : public cudaCapturingBase {
friend class cudaFlowCapturer;
public:
/**
@brief constructs a round-robin optimizer with 4 streams by default
*/
cudaRoundRobinCapturing() = default;
/**
@brief constructs a round-robin optimizer with the given number of streams
*/
explicit cudaRoundRobinCapturing(size_t num_streams);
/**
@brief queries the number of streams used by the optimizer
*/
size_t num_streams() const;
/**
@brief sets the number of streams used by the optimizer
*/
void num_streams(size_t n);
private:
size_t _num_streams {4};
cudaGraph_t _optimize(cudaGraph& graph);
void _reset(std::vector<std::vector<cudaNode*>>& graph);
};
// Constructor
inline cudaRoundRobinCapturing::cudaRoundRobinCapturing(size_t num_streams) :
_num_streams {num_streams} {
if(num_streams == 0) {
TF_THROW("number of streams must be at least one");
}
}
// Function: num_streams
inline size_t cudaRoundRobinCapturing::num_streams() const {
return _num_streams;
}
// Procedure: num_streams
inline void cudaRoundRobinCapturing::num_streams(size_t n) {
if(n == 0) {
TF_THROW("number of streams must be at least one");
}
_num_streams = n;
}
inline void cudaRoundRobinCapturing::_reset(
std::vector<std::vector<cudaNode*>>& graph
) {
//level == global id
//idx == stream id we want to skip
size_t id{0};
for(auto& each_level: graph) {
for(auto& node: each_level) {
auto hn = std::get_if<cudaNode::Capture>(&node->_handle);
hn->level = id++;
hn->idx = _num_streams;
hn->event = nullptr;
}
}
}
// Function: _optimize
inline cudaGraph_t cudaRoundRobinCapturing::_optimize(cudaGraph& graph) {
// levelize the graph
auto levelized = _levelize(graph);
// initialize the data structure
_reset(levelized);
// begin to capture
std::vector<cudaStream> streams(_num_streams);
streams[0].begin_capture(cudaStreamCaptureModeThreadLocal);
// reserve space for scoped events
std::vector<cudaEvent> events;
events.reserve((_num_streams >> 1) + levelized.size());
// fork
cudaEvent_t fork_event = events.emplace_back();
streams[0].record(fork_event);
for(size_t i = 1; i < streams.size(); ++i) {
streams[i].wait(fork_event);
}
// assign streams to levelized nodes in a round-robin manner
for(auto& each_level: levelized) {
for(auto& node: each_level) {
auto hn = std::get_if<cudaNode::Capture>(&node->_handle);
size_t sid = hn->lid % _num_streams;
//wait events
cudaNode* wait_node{nullptr};
for(auto& pn: node->_dependents) {
auto phn = std::get_if<cudaNode::Capture>(&pn->_handle);
size_t psid = phn->lid % _num_streams;
//level == global id
//idx == stream id we want to skip
if(psid == hn->idx) {
if(wait_node == nullptr ||
std::get_if<cudaNode::Capture>(&wait_node->_handle)->level < phn->level) {
wait_node = pn;
}
}
else if(psid != sid) {
streams[sid].wait(phn->event);
}
}
if(wait_node != nullptr) {
assert(std::get_if<cudaNode::Capture>(&wait_node->_handle)->event);
streams[sid].wait(std::get_if<cudaNode::Capture>(&wait_node->_handle)->event);
}
//capture
hn->work(streams[sid]);
//create/record stream
for(auto& sn: node->_successors) {
auto shn = std::get_if<cudaNode::Capture>(&sn->_handle);
size_t ssid = shn->lid % _num_streams;
if(ssid != sid) {
if(!hn->event) {
hn->event = events.emplace_back();
streams[sid].record(hn->event);
}
//idx == stream id we want to skip
shn->idx = sid;
}
}
}
}
// join
for(size_t i=1; i<_num_streams; ++i) {
cudaEvent_t join_event = events.emplace_back();
streams[i].record(join_event);
streams[0].wait(join_event);
}
return streams[0].end_capture();
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/cuda/cuda_pool.hpp | #pragma once
#include "cuda_error.hpp"
namespace tf {
/**
@brief per-thread object pool to manage CUDA device object
@tparam H object type
@tparam C function object to create a library object
@tparam D function object to delete a library object
A CUDA device object has a lifetime associated with a device,
for example, @c cudaStream_t, @c cublasHandle_t, etc.
Creating a device object is typically expensive (e.g., 10-200 ms)
and destroying it may trigger implicit device synchronization.
For applications tha intensively make use of device objects,
it is desirable to reuse them as much as possible.
There exists an one-to-one relationship between CUDA devices in CUDA Runtime API
and CUcontexts in the CUDA Driver API within a process.
The specific context which the CUDA Runtime API uses for a device
is called the device's primary context.
From the perspective of the CUDA Runtime API,
a device and its primary context are synonymous.
We design the device object pool in a decentralized fashion by keeping
(1) a global pool to keep track of potentially usable objects and
(2) a per-thread pool to footprint objects with shared ownership.
The global pool does not own the object and therefore does not destruct any of them.
The per-thread pool keeps the footprints of objects with shared ownership
and will destruct them if the thread holds the last reference count after it joins.
The motivation of this decentralized control is to avoid device objects
from being destroyed while the context had been destroyed due to driver shutdown.
*/
template <typename H, typename C, typename D>
class cudaPerThreadDeviceObjectPool {
public:
/**
@brief structure to store a context object
*/
struct Object {
int device;
H value;
Object(int);
~Object();
Object(const Object&) = delete;
Object(Object&&) = delete;
};
private:
// Master thread hold the storage to the pool.
// Due to some ordering, cuda context may be destroyed when the master
// program thread destroys the cuda object.
// Therefore, we use a decentralized approach to let child thread
// destroy cuda objects while the master thread only keeps a weak reference
// to those objects for reuse.
struct cudaGlobalDeviceObjectPool {
std::shared_ptr<Object> acquire(int);
void release(int, std::weak_ptr<Object>);
std::mutex mutex;
std::unordered_map<int, std::vector<std::weak_ptr<Object>>> pool;
};
public:
/**
@brief default constructor
*/
cudaPerThreadDeviceObjectPool() = default;
/**
@brief acquires a device object with shared ownership
*/
std::shared_ptr<Object> acquire(int);
/**
@brief releases a device object with moved ownership
*/
void release(std::shared_ptr<Object>&&);
/**
@brief queries the number of device objects with shared ownership
*/
size_t footprint_size() const;
private:
inline static cudaGlobalDeviceObjectPool _shared_pool;
std::unordered_set<std::shared_ptr<Object>> _footprint;
};
// ----------------------------------------------------------------------------
// cudaPerThreadDeviceObject::cudaHanale definition
// ----------------------------------------------------------------------------
template <typename H, typename C, typename D>
cudaPerThreadDeviceObjectPool<H, C, D>::Object::Object(int d) :
device {d} {
cudaScopedDevice ctx(device);
value = C{}();
}
template <typename H, typename C, typename D>
cudaPerThreadDeviceObjectPool<H, C, D>::Object::~Object() {
cudaScopedDevice ctx(device);
D{}(value);
}
// ----------------------------------------------------------------------------
// cudaPerThreadDeviceObject::cudaHanaldePool definition
// ----------------------------------------------------------------------------
template <typename H, typename C, typename D>
std::shared_ptr<typename cudaPerThreadDeviceObjectPool<H, C, D>::Object>
cudaPerThreadDeviceObjectPool<H, C, D>::cudaGlobalDeviceObjectPool::acquire(int d) {
std::scoped_lock<std::mutex> lock(mutex);
if(auto itr = pool.find(d); itr != pool.end()) {
while(!itr->second.empty()) {
auto sptr = itr->second.back().lock();
itr->second.pop_back();
if(sptr) {
return sptr;
}
}
}
return nullptr;
}
template <typename H, typename C, typename D>
void cudaPerThreadDeviceObjectPool<H, C, D>::cudaGlobalDeviceObjectPool::release(
int d, std::weak_ptr<Object> ptr
) {
std::scoped_lock<std::mutex> lock(mutex);
pool[d].push_back(ptr);
}
// ----------------------------------------------------------------------------
// cudaPerThreadDeviceObject definition
// ----------------------------------------------------------------------------
template <typename H, typename C, typename D>
std::shared_ptr<typename cudaPerThreadDeviceObjectPool<H, C, D>::Object>
cudaPerThreadDeviceObjectPool<H, C, D>::acquire(int d) {
auto ptr = _shared_pool.acquire(d);
if(!ptr) {
ptr = std::make_shared<Object>(d);
}
return ptr;
}
template <typename H, typename C, typename D>
void cudaPerThreadDeviceObjectPool<H, C, D>::release(
std::shared_ptr<Object>&& ptr
) {
_shared_pool.release(ptr->device, ptr);
_footprint.insert(std::move(ptr));
}
template <typename H, typename C, typename D>
size_t cudaPerThreadDeviceObjectPool<H, C, D>::footprint_size() const {
return _footprint.size();
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/cuda/cuda_graph.hpp | #pragma once
#include "cuda_memory.hpp"
#include "cuda_stream.hpp"
#include "cuda_meta.hpp"
#include "../utility/traits.hpp"
namespace tf {
// ----------------------------------------------------------------------------
// cudaGraph_t routines
// ----------------------------------------------------------------------------
/**
@brief gets the memcpy node parameter of a copy task
*/
template <typename T,
std::enable_if_t<!std::is_same_v<T, void>, void>* = nullptr
>
cudaMemcpy3DParms cuda_get_copy_parms(T* tgt, const T* src, size_t num) {
using U = std::decay_t<T>;
cudaMemcpy3DParms p;
p.srcArray = nullptr;
p.srcPos = ::make_cudaPos(0, 0, 0);
p.srcPtr = ::make_cudaPitchedPtr(const_cast<T*>(src), num*sizeof(U), num, 1);
p.dstArray = nullptr;
p.dstPos = ::make_cudaPos(0, 0, 0);
p.dstPtr = ::make_cudaPitchedPtr(tgt, num*sizeof(U), num, 1);
p.extent = ::make_cudaExtent(num*sizeof(U), 1, 1);
p.kind = cudaMemcpyDefault;
return p;
}
/**
@brief gets the memcpy node parameter of a memcpy task (untyped)
*/
inline cudaMemcpy3DParms cuda_get_memcpy_parms(
void* tgt, const void* src, size_t bytes
) {
// Parameters in cudaPitchedPtr
// d - Pointer to allocated memory
// p - Pitch of allocated memory in bytes
// xsz - Logical width of allocation in elements
// ysz - Logical height of allocation in elements
cudaMemcpy3DParms p;
p.srcArray = nullptr;
p.srcPos = ::make_cudaPos(0, 0, 0);
p.srcPtr = ::make_cudaPitchedPtr(const_cast<void*>(src), bytes, bytes, 1);
p.dstArray = nullptr;
p.dstPos = ::make_cudaPos(0, 0, 0);
p.dstPtr = ::make_cudaPitchedPtr(tgt, bytes, bytes, 1);
p.extent = ::make_cudaExtent(bytes, 1, 1);
p.kind = cudaMemcpyDefault;
return p;
}
/**
@brief gets the memset node parameter of a memcpy task (untyped)
*/
inline cudaMemsetParams cuda_get_memset_parms(void* dst, int ch, size_t count) {
cudaMemsetParams p;
p.dst = dst;
p.value = ch;
p.pitch = 0;
//p.elementSize = (count & 1) == 0 ? ((count & 3) == 0 ? 4 : 2) : 1;
//p.width = (count & 1) == 0 ? ((count & 3) == 0 ? count >> 2 : count >> 1) : count;
p.elementSize = 1; // either 1, 2, or 4
p.width = count;
p.height = 1;
return p;
}
/**
@brief gets the memset node parameter of a fill task (typed)
*/
template <typename T, std::enable_if_t<
is_pod_v<T> && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>* = nullptr
>
cudaMemsetParams cuda_get_fill_parms(T* dst, T value, size_t count) {
cudaMemsetParams p;
p.dst = dst;
// perform bit-wise copy
p.value = 0; // crucial
static_assert(sizeof(T) <= sizeof(p.value), "internal error");
std::memcpy(&p.value, &value, sizeof(T));
p.pitch = 0;
p.elementSize = sizeof(T); // either 1, 2, or 4
p.width = count;
p.height = 1;
return p;
}
/**
@brief gets the memset node parameter of a zero task (typed)
*/
template <typename T, std::enable_if_t<
is_pod_v<T> && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>* = nullptr
>
cudaMemsetParams cuda_get_zero_parms(T* dst, size_t count) {
cudaMemsetParams p;
p.dst = dst;
p.value = 0;
p.pitch = 0;
p.elementSize = sizeof(T); // either 1, 2, or 4
p.width = count;
p.height = 1;
return p;
}
/**
@brief queries the number of root nodes in a native CUDA graph
*/
inline size_t cuda_get_graph_num_root_nodes(cudaGraph_t graph) {
size_t num_nodes;
TF_CHECK_CUDA(
cudaGraphGetRootNodes(graph, nullptr, &num_nodes),
"failed to get native graph root nodes"
);
return num_nodes;
}
/**
@brief queries the number of nodes in a native CUDA graph
*/
inline size_t cuda_get_graph_num_nodes(cudaGraph_t graph) {
size_t num_nodes;
TF_CHECK_CUDA(
cudaGraphGetNodes(graph, nullptr, &num_nodes),
"failed to get native graph nodes"
);
return num_nodes;
}
/**
@brief queries the number of edges in a native CUDA graph
*/
inline size_t cuda_get_graph_num_edges(cudaGraph_t graph) {
size_t num_edges;
TF_CHECK_CUDA(
cudaGraphGetEdges(graph, nullptr, nullptr, &num_edges),
"failed to get native graph edges"
);
return num_edges;
}
/**
@brief acquires the nodes in a native CUDA graph
*/
inline std::vector<cudaGraphNode_t> cuda_get_graph_nodes(cudaGraph_t graph) {
size_t num_nodes = cuda_get_graph_num_nodes(graph);
std::vector<cudaGraphNode_t> nodes(num_nodes);
TF_CHECK_CUDA(
cudaGraphGetNodes(graph, nodes.data(), &num_nodes),
"failed to get native graph nodes"
);
return nodes;
}
/**
@brief acquires the root nodes in a native CUDA graph
*/
inline std::vector<cudaGraphNode_t> cuda_get_graph_root_nodes(cudaGraph_t graph) {
size_t num_nodes = cuda_get_graph_num_root_nodes(graph);
std::vector<cudaGraphNode_t> nodes(num_nodes);
TF_CHECK_CUDA(
cudaGraphGetRootNodes(graph, nodes.data(), &num_nodes),
"failed to get native graph nodes"
);
return nodes;
}
/**
@brief acquires the edges in a native CUDA graph
*/
inline std::vector<std::pair<cudaGraphNode_t, cudaGraphNode_t>>
cuda_get_graph_edges(cudaGraph_t graph) {
size_t num_edges = cuda_get_graph_num_edges(graph);
std::vector<cudaGraphNode_t> froms(num_edges), tos(num_edges);
TF_CHECK_CUDA(
cudaGraphGetEdges(graph, froms.data(), tos.data(), &num_edges),
"failed to get native graph edges"
);
std::vector<std::pair<cudaGraphNode_t, cudaGraphNode_t>> edges(num_edges);
for(size_t i=0; i<num_edges; i++) {
edges[i] = std::make_pair(froms[i], tos[i]);
}
return edges;
}
/**
@brief queries the type of a native CUDA graph node
valid type values are:
+ cudaGraphNodeTypeKernel = 0x00
+ cudaGraphNodeTypeMemcpy = 0x01
+ cudaGraphNodeTypeMemset = 0x02
+ cudaGraphNodeTypeHost = 0x03
+ cudaGraphNodeTypeGraph = 0x04
+ cudaGraphNodeTypeEmpty = 0x05
+ cudaGraphNodeTypeWaitEvent = 0x06
+ cudaGraphNodeTypeEventRecord = 0x07
*/
inline cudaGraphNodeType cuda_get_graph_node_type(cudaGraphNode_t node) {
cudaGraphNodeType type;
TF_CHECK_CUDA(
cudaGraphNodeGetType(node, &type), "failed to get native graph node type"
);
return type;
}
/**
@brief convert the type of a native CUDA graph node to a readable string
*/
inline const char* cuda_graph_node_type_to_string(cudaGraphNodeType type) {
switch(type) {
case cudaGraphNodeTypeKernel : return "kernel";
case cudaGraphNodeTypeMemcpy : return "memcpy";
case cudaGraphNodeTypeMemset : return "memset";
case cudaGraphNodeTypeHost : return "host";
case cudaGraphNodeTypeGraph : return "graph";
case cudaGraphNodeTypeEmpty : return "empty";
case cudaGraphNodeTypeWaitEvent : return "event_wait";
case cudaGraphNodeTypeEventRecord : return "event_record";
default : return "undefined";
}
}
/**
@brief dumps a native CUDA graph and all associated child graphs to a DOT format
@tparam T output stream target
@param os target output stream
@param graph native CUDA graph
*/
template <typename T>
void cuda_dump_graph(T& os, cudaGraph_t graph) {
os << "digraph cudaGraph {\n";
std::stack<std::tuple<cudaGraph_t, cudaGraphNode_t, int>> stack;
stack.push(std::make_tuple(graph, nullptr, 1));
int pl = 0;
while(stack.empty() == false) {
auto [graph, parent, l] = stack.top();
stack.pop();
for(int i=0; i<pl-l+1; i++) {
os << "}\n";
}
os << "subgraph cluster_p" << graph << " {\n"
<< "label=\"cudaGraph-L" << l << "\";\n"
<< "color=\"purple\";\n";
auto nodes = cuda_get_graph_nodes(graph);
auto edges = cuda_get_graph_edges(graph);
for(auto& [from, to] : edges) {
os << 'p' << from << " -> " << 'p' << to << ";\n";
}
for(auto& node : nodes) {
auto type = cuda_get_graph_node_type(node);
if(type == cudaGraphNodeTypeGraph) {
cudaGraph_t graph;
TF_CHECK_CUDA(cudaGraphChildGraphNodeGetGraph(node, &graph), "");
stack.push(std::make_tuple(graph, node, l+1));
os << 'p' << node << "["
<< "shape=folder, style=filled, fontcolor=white, fillcolor=purple, "
<< "label=\"cudaGraph-L" << l+1
<< "\"];\n";
}
else {
os << 'p' << node << "[label=\""
<< cuda_graph_node_type_to_string(type)
<< "\"];\n";
}
}
// precede to parent
if(parent != nullptr) {
std::unordered_set<cudaGraphNode_t> successors;
for(const auto& p : edges) {
successors.insert(p.first);
}
for(auto node : nodes) {
if(successors.find(node) == successors.end()) {
os << 'p' << node << " -> " << 'p' << parent << ";\n";
}
}
}
// set the previous level
pl = l;
}
for(int i=0; i<=pl; i++) {
os << "}\n";
}
}
// ----------------------------------------------------------------------------
// cudaGraphNative
// ----------------------------------------------------------------------------
/**
@class cudaGraphNative
@brief class to create an RAII-styled wrapper over a CUDA executable graph
A cudaGraphNative object is an RAII-styled wrapper over
a native CUDA executable graph (@c cudaGraphNative_t).
A cudaGraphNative object is move-only.
*/
class cudaGraphNative {
struct cudaGraphNativeCreator {
cudaGraph_t operator () () const {
cudaGraph_t g;
TF_CHECK_CUDA(cudaGraphCreate(&g, 0), "failed to create a CUDA native graph");
return g;
}
};
struct cudaGraphNativeDeleter {
void operator () (cudaGraph_t g) const {
if(g) {
cudaGraphDestroy(g);
}
}
};
public:
/**
@brief constructs an RAII-styled object from the given CUDA exec
Constructs a cudaGraphNative object which owns @c exec.
*/
explicit cudaGraphNative(cudaGraph_t native) : _native(native) {
}
/**
@brief constructs an RAII-styled object for a new CUDA exec
Equivalently calling @c cudaGraphNativeCreate to create a exec.
*/
cudaGraphNative() : _native{ cudaGraphNativeCreator{}() } {
}
/**
@brief disabled copy constructor
*/
cudaGraphNative(const cudaGraphNative&) = delete;
/**
@brief move constructor
*/
cudaGraphNative(cudaGraphNative&& rhs) : _native{rhs._native} {
rhs._native = nullptr;
}
/**
@brief destructs the CUDA exec
*/
~cudaGraphNative() {
cudaGraphNativeDeleter {} (_native);
}
/**
@brief disabled copy assignment
*/
cudaGraphNative& operator = (const cudaGraphNative&) = delete;
/**
@brief move assignment
*/
cudaGraphNative& operator = (cudaGraphNative&& rhs) {
cudaGraphNativeDeleter {} (_native);
_native = rhs._native;
rhs._native = nullptr;
return *this;
}
/**
@brief implicit conversion to the native CUDA exec (cudaGraphNative_t)
Returns the underlying exec of type @c cudaGraphNative_t.
*/
operator cudaGraph_t () const {
return _native;
}
private:
cudaGraph_t _native {nullptr};
};
// ----------------------------------------------------------------------------
// cudaGraphExec
// ----------------------------------------------------------------------------
/**
@class cudaGraphExec
@brief class to create an RAII-styled wrapper over a CUDA executable graph
A cudaGraphExec object is an RAII-styled wrapper over
a native CUDA executable graph (@c cudaGraphExec_t).
A cudaGraphExec object is move-only.
*/
class cudaGraphExec {
struct cudaGraphExecCreator {
cudaGraphExec_t operator () () const { return nullptr; }
};
struct cudaGraphExecDeleter {
void operator () (cudaGraphExec_t executable) const {
if(executable) {
cudaGraphExecDestroy(executable);
}
}
};
public:
/**
@brief constructs an RAII-styled object from the given CUDA exec
Constructs a cudaGraphExec object which owns @c exec.
*/
explicit cudaGraphExec(cudaGraphExec_t exec) : _exec(exec) {
}
/**
@brief constructs an RAII-styled object for a new CUDA exec
Equivalently calling @c cudaGraphExecCreate to create a exec.
*/
cudaGraphExec() : _exec{ cudaGraphExecCreator{}() } {
}
/**
@brief disabled copy constructor
*/
cudaGraphExec(const cudaGraphExec&) = delete;
/**
@brief move constructor
*/
cudaGraphExec(cudaGraphExec&& rhs) : _exec{rhs._exec} {
rhs._exec = nullptr;
}
/**
@brief destructs the CUDA exec
*/
~cudaGraphExec() {
cudaGraphExecDeleter {} (_exec);
}
/**
@brief disabled copy assignment
*/
cudaGraphExec& operator = (const cudaGraphExec&) = delete;
/**
@brief move assignment
*/
cudaGraphExec& operator = (cudaGraphExec&& rhs) {
cudaGraphExecDeleter {} (_exec);
_exec = rhs._exec;
rhs._exec = nullptr;
return *this;
}
/**
@brief replaces the managed executable graph with the given one
Destructs the managed exec and resets it to the given exec.
*/
void clear() {
cudaGraphExecDeleter {} (_exec);
_exec = nullptr;
}
/**
@brief instantiates the exexutable from the given CUDA graph
*/
void instantiate(cudaGraph_t graph) {
cudaGraphExecDeleter {} (_exec);
TF_CHECK_CUDA(
cudaGraphInstantiate(&_exec, graph, nullptr, nullptr, 0),
"failed to create an executable graph"
);
}
/**
@brief updates the exexutable from the given CUDA graph
*/
cudaGraphExecUpdateResult update(cudaGraph_t graph) {
cudaGraphNode_t error_node;
cudaGraphExecUpdateResult error_result;
cudaGraphExecUpdate(_exec, graph, &error_node, &error_result);
return error_result;
}
/**
@brief launchs the executable graph via the given stream
*/
void launch(cudaStream_t stream) {
TF_CHECK_CUDA(
cudaGraphLaunch(_exec, stream), "failed to launch a CUDA executable graph"
);
}
/**
@brief implicit conversion to the native CUDA exec (cudaGraphExec_t)
Returns the underlying exec of type @c cudaGraphExec_t.
*/
operator cudaGraphExec_t () const {
return _exec;
}
private:
cudaGraphExec_t _exec {nullptr};
};
// ----------------------------------------------------------------------------
// cudaGraph class
// ----------------------------------------------------------------------------
// class: cudaGraph
class cudaGraph : public CustomGraphBase {
friend class cudaNode;
friend class cudaTask;
friend class cudaFlowCapturerBase;
friend class cudaFlowCapturer;
friend class cudaFlow;
friend class cudaCapturingBase;
friend class cudaSequentialCapturing;
friend class cudaLinearCapturing;
friend class cudaRoundRobinCapturing;
friend class Taskflow;
friend class Executor;
constexpr static int OFFLOADED = 0x01;
constexpr static int CHANGED = 0x02;
constexpr static int UPDATED = 0x04;
public:
cudaGraph() = default;
~cudaGraph();
cudaGraph(const cudaGraph&) = delete;
cudaGraph(cudaGraph&&);
cudaGraph& operator = (const cudaGraph&) = delete;
cudaGraph& operator = (cudaGraph&&);
template <typename... ArgsT>
cudaNode* emplace_back(ArgsT&&...);
bool empty() const;
void clear();
void dump(std::ostream&, const void*, const std::string&) const override final;
private:
int _state{CHANGED};
cudaGraph_t _native_handle {nullptr};
std::vector<std::unique_ptr<cudaNode>> _nodes;
//std::vector<cudaNode*> _nodes;
};
// ----------------------------------------------------------------------------
// cudaNode class
// ----------------------------------------------------------------------------
/**
@private
@class: cudaNode
*/
class cudaNode {
friend class cudaGraph;
friend class cudaTask;
friend class cudaFlow;
friend class cudaFlowCapturer;
friend class cudaFlowCapturerBase;
friend class cudaCapturingBase;
friend class cudaSequentialCapturing;
friend class cudaLinearCapturing;
friend class cudaRoundRobinCapturing;
friend class Taskflow;
friend class Executor;
// Empty handle
struct Empty {
};
// Host handle
struct Host {
template <typename C>
Host(C&&);
std::function<void()> func;
static void callback(void*);
};
// Memset handle
struct Memset {
};
// Memcpy handle
struct Memcpy {
};
// Kernel handle
struct Kernel {
template <typename F>
Kernel(F&& f);
void* func {nullptr};
};
// Subflow handle
struct Subflow {
cudaGraph graph;
};
// Capture
struct Capture {
template <typename C>
Capture(C&&);
std::function<void(cudaStream_t)> work;
cudaEvent_t event;
size_t level;
size_t lid;
size_t idx;
};
using handle_t = std::variant<
Empty,
Host,
Memset,
Memcpy,
Kernel,
Subflow,
Capture
>;
public:
// variant index
constexpr static auto EMPTY = get_index_v<Empty, handle_t>;
constexpr static auto HOST = get_index_v<Host, handle_t>;
constexpr static auto MEMSET = get_index_v<Memset, handle_t>;
constexpr static auto MEMCPY = get_index_v<Memcpy, handle_t>;
constexpr static auto KERNEL = get_index_v<Kernel, handle_t>;
constexpr static auto SUBFLOW = get_index_v<Subflow, handle_t>;
constexpr static auto CAPTURE = get_index_v<Capture, handle_t>;
cudaNode() = delete;
template <typename... ArgsT>
cudaNode(cudaGraph&, ArgsT&&...);
private:
cudaGraph& _graph;
std::string _name;
handle_t _handle;
cudaGraphNode_t _native_handle {nullptr};
SmallVector<cudaNode*> _successors;
SmallVector<cudaNode*> _dependents;
void _precede(cudaNode*);
};
// ----------------------------------------------------------------------------
// cudaNode definitions
// ----------------------------------------------------------------------------
// Host handle constructor
template <typename C>
cudaNode::Host::Host(C&& c) : func {std::forward<C>(c)} {
}
// Host callback
inline void cudaNode::Host::callback(void* data) {
static_cast<Host*>(data)->func();
};
// Kernel handle constructor
template <typename F>
cudaNode::Kernel::Kernel(F&& f) :
func {std::forward<F>(f)} {
}
// Capture handle constructor
template <typename C>
cudaNode::Capture::Capture(C&& work) :
work {std::forward<C>(work)} {
}
// Constructor
template <typename... ArgsT>
cudaNode::cudaNode(cudaGraph& graph, ArgsT&&... args) :
_graph {graph},
_handle {std::forward<ArgsT>(args)...} {
}
// Procedure: _precede
inline void cudaNode::_precede(cudaNode* v) {
_graph._state |= cudaGraph::CHANGED;
_successors.push_back(v);
v->_dependents.push_back(this);
// capture node doesn't have the native graph yet
if(_handle.index() != cudaNode::CAPTURE) {
TF_CHECK_CUDA(
cudaGraphAddDependencies(
_graph._native_handle, &_native_handle, &v->_native_handle, 1
),
"failed to add a preceding link ", this, "->", v
);
}
}
//// Procedure: _set_state
//inline void cudaNode::_set_state(int flag) {
// _state |= flag;
//}
//
//// Procedure: _unset_state
//inline void cudaNode::_unset_state(int flag) {
// _state &= ~flag;
//}
//
//// Procedure: _clear_state
//inline void cudaNode::_clear_state() {
// _state = 0;
//}
//
//// Function: _has_state
//inline bool cudaNode::_has_state(int flag) const {
// return _state & flag;
//}
// ----------------------------------------------------------------------------
// cudaGraph definitions
// ----------------------------------------------------------------------------
// Destructor
inline cudaGraph::~cudaGraph() {
//clear();
assert(_native_handle == nullptr);
}
// Move constructor
inline cudaGraph::cudaGraph(cudaGraph&& g) :
_native_handle {g._native_handle},
_nodes {std::move(g._nodes)} {
g._native_handle = nullptr;
assert(g._nodes.empty());
}
// Move assignment
inline cudaGraph& cudaGraph::operator = (cudaGraph&& rhs) {
//clear();
// lhs
_native_handle = rhs._native_handle;
_nodes = std::move(rhs._nodes);
assert(rhs._nodes.empty());
// rhs
rhs._native_handle = nullptr;
return *this;
}
// Function: empty
inline bool cudaGraph::empty() const {
return _nodes.empty();
}
// Procedure: clear
inline void cudaGraph::clear() {
//for(auto n : _nodes) {
// delete n;
//}
_state = cudaGraph::CHANGED;
_nodes.clear();
}
// Function: emplace_back
template <typename... ArgsT>
cudaNode* cudaGraph::emplace_back(ArgsT&&... args) {
_state |= cudaGraph::CHANGED;
auto node = std::make_unique<cudaNode>(std::forward<ArgsT>(args)...);
_nodes.emplace_back(std::move(node));
return _nodes.back().get();
// TODO: use object pool to save memory
//auto node = new cudaNode(std::forward<ArgsT>(args)...);
//_nodes.push_back(node);
//return node;
}
// Procedure: dump the graph to a DOT format
inline void cudaGraph::dump(
std::ostream& os, const void* root, const std::string& root_name
) const {
// recursive dump with stack
std::stack<std::tuple<const cudaGraph*, const cudaNode*, int>> stack;
stack.push(std::make_tuple(this, nullptr, 1));
int pl = 0;
while(!stack.empty()) {
auto [graph, parent, l] = stack.top();
stack.pop();
for(int i=0; i<pl-l+1; i++) {
os << "}\n";
}
if(parent == nullptr) {
if(root) {
os << "subgraph cluster_p" << root << " {\nlabel=\"cudaFlow: ";
if(root_name.empty()) os << 'p' << root;
else os << root_name;
os << "\";\n" << "color=\"purple\"\n";
}
else {
os << "digraph cudaFlow {\n";
}
}
else {
os << "subgraph cluster_p" << parent << " {\nlabel=\"cudaSubflow: ";
if(parent->_name.empty()) os << 'p' << parent;
else os << parent->_name;
os << "\";\n" << "color=\"purple\"\n";
}
for(auto& node : graph->_nodes) {
auto v = node.get();
os << 'p' << v << "[label=\"";
if(v->_name.empty()) {
os << 'p' << v << "\"";
}
else {
os << v->_name << "\"";
}
switch(v->_handle.index()) {
case cudaNode::KERNEL:
os << " style=\"filled\""
<< " color=\"white\" fillcolor=\"black\""
<< " fontcolor=\"white\""
<< " shape=\"box3d\"";
break;
case cudaNode::SUBFLOW:
stack.push(std::make_tuple(
&(std::get_if<cudaNode::Subflow>(&v->_handle)->graph), v, l+1)
);
os << " style=\"filled\""
<< " color=\"black\" fillcolor=\"purple\""
<< " fontcolor=\"white\""
<< " shape=\"folder\"";
break;
default:
break;
}
os << "];\n";
for(const auto s : v->_successors) {
os << 'p' << v << " -> " << 'p' << s << ";\n";
}
if(v->_successors.size() == 0) {
if(parent == nullptr) {
if(root) {
os << 'p' << v << " -> p" << root << ";\n";
}
}
else {
os << 'p' << v << " -> p" << parent << ";\n";
}
}
}
// set the previous level
pl = l;
}
for(int i=0; i<pl; i++) {
os << "}\n";
}
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/cuda/cuda_object.hpp | #pragma once
#include "cuda_error.hpp"
namespace tf {
/**
@brief per-thread object pool to manage CUDA device object
@tparam H object type
@tparam C function object to create a library object
@tparam D function object to delete a library object
A CUDA device object has a lifetime associated with a device,
for example, @c cudaStream_t, @c cublasHandle_t, etc.
Creating a device object is typically expensive (e.g., 10-200 ms)
and destroying it may trigger implicit device synchronization.
For applications tha intensively make use of device objects,
it is desirable to reuse them as much as possible.
There exists an one-to-one relationship between CUDA devices in CUDA Runtime API
and CUcontexts in the CUDA Driver API within a process.
The specific context which the CUDA Runtime API uses for a device
is called the device's primary context.
From the perspective of the CUDA Runtime API,
a device and its primary context are synonymous.
We design the device object pool in a decentralized fashion by keeping
(1) a global pool to keep track of potentially usable objects and
(2) a per-thread pool to footprint objects with shared ownership.
The global pool does not own the object and therefore does not destruct any of them.
The per-thread pool keeps the footprints of objects with shared ownership
and will destruct them if the thread holds the last reference count after it joins.
The motivation of this decentralized control is to avoid device objects
from being destroyed while the context had been destroyed due to driver shutdown.
*/
template <typename H, typename C, typename D>
class cudaPerThreadDeviceObjectPool {
public:
/**
@brief structure to store a context object
*/
struct Object {
int device;
H value;
Object(int);
~Object();
Object(const Object&) = delete;
Object(Object&&) = delete;
};
private:
// Master thread hold the storage to the pool.
// Due to some ordering, cuda context may be destroyed when the master
// program thread destroys the cuda object.
// Therefore, we use a decentralized approach to let child thread
// destroy cuda objects while the master thread only keeps a weak reference
// to those objects for reuse.
struct cudaGlobalDeviceObjectPool {
std::shared_ptr<Object> acquire(int);
void release(int, std::weak_ptr<Object>);
std::mutex mutex;
std::unordered_map<int, std::vector<std::weak_ptr<Object>>> pool;
};
public:
/**
@brief default constructor
*/
cudaPerThreadDeviceObjectPool() = default;
/**
@brief acquires a device object with shared ownership
*/
std::shared_ptr<Object> acquire(int);
/**
@brief releases a device object with moved ownership
*/
void release(std::shared_ptr<Object>&&);
/**
@brief queries the number of device objects with shared ownership
*/
size_t footprint_size() const;
private:
inline static cudaGlobalDeviceObjectPool _shared_pool;
std::unordered_set<std::shared_ptr<Object>> _footprint;
};
// ----------------------------------------------------------------------------
// cudaPerThreadDeviceObject::cudaHanale definition
// ----------------------------------------------------------------------------
template <typename H, typename C, typename D>
cudaPerThreadDeviceObjectPool<H, C, D>::Object::Object(int d) :
device {d} {
cudaScopedDevice ctx(device);
value = C{}();
}
template <typename H, typename C, typename D>
cudaPerThreadDeviceObjectPool<H, C, D>::Object::~Object() {
cudaScopedDevice ctx(device);
D{}(value);
}
// ----------------------------------------------------------------------------
// cudaPerThreadDeviceObject::cudaHanaldePool definition
// ----------------------------------------------------------------------------
template <typename H, typename C, typename D>
std::shared_ptr<typename cudaPerThreadDeviceObjectPool<H, C, D>::Object>
cudaPerThreadDeviceObjectPool<H, C, D>::cudaGlobalDeviceObjectPool::acquire(int d) {
std::scoped_lock<std::mutex> lock(mutex);
if(auto itr = pool.find(d); itr != pool.end()) {
while(!itr->second.empty()) {
auto sptr = itr->second.back().lock();
itr->second.pop_back();
if(sptr) {
return sptr;
}
}
}
return nullptr;
}
template <typename H, typename C, typename D>
void cudaPerThreadDeviceObjectPool<H, C, D>::cudaGlobalDeviceObjectPool::release(
int d, std::weak_ptr<Object> ptr
) {
std::scoped_lock<std::mutex> lock(mutex);
pool[d].push_back(ptr);
}
// ----------------------------------------------------------------------------
// cudaPerThreadDeviceObject definition
// ----------------------------------------------------------------------------
template <typename H, typename C, typename D>
std::shared_ptr<typename cudaPerThreadDeviceObjectPool<H, C, D>::Object>
cudaPerThreadDeviceObjectPool<H, C, D>::acquire(int d) {
auto ptr = _shared_pool.acquire(d);
if(!ptr) {
ptr = std::make_shared<Object>(d);
}
return ptr;
}
template <typename H, typename C, typename D>
void cudaPerThreadDeviceObjectPool<H, C, D>::release(
std::shared_ptr<Object>&& ptr
) {
_shared_pool.release(ptr->device, ptr);
_footprint.insert(std::move(ptr));
}
template <typename H, typename C, typename D>
size_t cudaPerThreadDeviceObjectPool<H, C, D>::footprint_size() const {
return _footprint.size();
}
// ----------------------------------------------------------------------------
// cudaObject
// ----------------------------------------------------------------------------
/**
@class cudaObject
@brief class to create an RAII-styled and move-only wrapper for CUDA objects
*/
template <typename T, typename C, typename D>
class cudaObject {
public:
/**
@brief constructs a CUDA object from the given one
*/
explicit cudaObject(T obj) : object(obj) {}
/**
@brief constructs a new CUDA object
*/
cudaObject() : object{ C{}() } {}
/**
@brief disabled copy constructor
*/
cudaObject(const cudaObject&) = delete;
/**
@brief move constructor
*/
cudaObject(cudaObject&& rhs) : object{rhs.object} {
rhs.object = nullptr;
}
/**
@brief destructs the CUDA object
*/
~cudaObject() { D{}(object); }
/**
@brief disabled copy assignment
*/
cudaObject& operator = (const cudaObject&) = delete;
/**
@brief move assignment
*/
cudaObject& operator = (cudaObject&& rhs) {
D {} (object);
object = rhs.object;
rhs.object = nullptr;
return *this;
}
/**
@brief implicit conversion to the native CUDA stream (cudaObject_t)
Returns the underlying stream of type @c cudaObject_t.
*/
operator T () const {
return object;
}
/**
@brief deletes the current CUDA object (if any) and creates a new one
*/
void create() {
D {} (object);
object = C{}();
}
/**
@brief resets this CUDA object to the given one
*/
void reset(T new_obj) {
D {} (object);
object = new_obj;
}
/**
@brief deletes the current CUDA object
*/
void clear() {
reset(nullptr);
}
/**
@brief releases the ownership of the CUDA object
*/
T release() {
auto tmp = object;
object = nullptr;
return tmp;
}
protected:
/**
@brief the CUDA object
*/
T object;
};
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/cuda/cuda_meta.hpp | #pragma once
#include "cuda_execution_policy.hpp"
namespace tf {
// default warp size
inline constexpr unsigned CUDA_WARP_SIZE = 32;
// empty type
struct cudaEmpty { };
// ----------------------------------------------------------------------------
// iterator unrolling
// ----------------------------------------------------------------------------
// Template unrolled looping construct.
template<unsigned i, unsigned count, bool valid = (i < count)>
struct cudaIterate {
template<typename F>
__device__ static void eval(F f) {
f(i);
cudaIterate<i + 1, count>::eval(f);
}
};
template<unsigned i, unsigned count>
struct cudaIterate<i, count, false> {
template<typename F>
__device__ static void eval(F) { }
};
template<unsigned begin, unsigned end, typename F>
__device__ void cuda_iterate(F f) {
cudaIterate<begin, end>::eval(f);
}
template<unsigned count, typename F>
__device__ void cuda_iterate(F f) {
cuda_iterate<0, count>(f);
}
template<unsigned count, typename T>
__device__ T reduce(const T(&x)[count]) {
T y;
cuda_iterate<count>([&](auto i) { y = i ? x[i] + y : x[i]; });
return y;
}
template<unsigned count, typename T>
__device__ void fill(T(&x)[count], T val) {
cuda_iterate<count>([&](auto i) { x[i] = val; });
}
// Invoke unconditionally.
template<unsigned nt, unsigned vt, typename F>
__device__ void cuda_strided_iterate(F f, unsigned tid) {
cuda_iterate<vt>([=](auto i) { f(i, nt * i + tid); });
}
// Check range.
template<unsigned nt, unsigned vt, unsigned vt0 = vt, typename F>
__device__ void cuda_strided_iterate(F f, unsigned tid, unsigned count) {
// Unroll the first vt0 elements of each thread.
if(vt0 > 1 && count >= nt * vt0) {
cuda_strided_iterate<nt, vt0>(f, tid); // No checking
} else {
cuda_iterate<vt0>([=](auto i) {
auto j = nt * i + tid;
if(j < count) f(i, j);
});
}
// TODO: seems dummy when vt0 == vt
cuda_iterate<vt0, vt>([=](auto i) {
auto j = nt * i + tid;
if(j < count) f(i, j);
});
}
template<unsigned vt, typename F>
__device__ void cuda_thread_iterate(F f, unsigned tid) {
cuda_iterate<vt>([=](auto i) { f(i, vt * tid + i); });
}
// ----------------------------------------------------------------------------
// cudaRange
// ----------------------------------------------------------------------------
// cudaRange
struct cudaRange {
unsigned begin, end;
__device__ unsigned size() const { return end - begin; }
__device__ unsigned count() const { return size(); }
__device__ bool valid() const { return end > begin; }
};
inline __device__ cudaRange cuda_get_tile(unsigned b, unsigned nv, unsigned count) {
return cudaRange { nv * b, min(count, nv * (b + 1)) };
}
// ----------------------------------------------------------------------------
// cudaArray
// ----------------------------------------------------------------------------
template<typename T, unsigned size>
struct cudaArray {
T data[size];
__device__ T operator[](unsigned i) const { return data[i]; }
__device__ T& operator[](unsigned i) { return data[i]; }
cudaArray() = default;
cudaArray(const cudaArray&) = default;
cudaArray& operator=(const cudaArray&) = default;
// Fill the array with x.
__device__ cudaArray(T x) {
cuda_iterate<size>([&](unsigned i) { data[i] = x; });
}
};
template<typename T>
struct cudaArray<T, 0> {
__device__ T operator[](unsigned) const { return T(); }
__device__ T& operator[](unsigned) { return *(T*)nullptr; }
};
template<typename T, typename V, unsigned size>
struct cudaKVArray {
cudaArray<T, size> keys;
cudaArray<V, size> vals;
};
// ----------------------------------------------------------------------------
// thread reg <-> global mem
// ----------------------------------------------------------------------------
template<unsigned nt, unsigned vt, unsigned vt0 = vt, typename I>
__device__ auto cuda_mem_to_reg_strided(I mem, unsigned tid, unsigned count) {
using T = typename std::iterator_traits<I>::value_type;
cudaArray<T, vt> x;
cuda_strided_iterate<nt, vt, vt0>(
[&](auto i, auto j) { x[i] = mem[j]; }, tid, count
);
return x;
}
template<unsigned nt, unsigned vt, unsigned vt0 = vt, typename T, typename it_t>
__device__ void cuda_reg_to_mem_strided(
cudaArray<T, vt> x, unsigned tid, unsigned count, it_t mem) {
cuda_strided_iterate<nt, vt, vt0>(
[=](auto i, auto j) { mem[j] = x[i]; }, tid, count
);
}
template<unsigned nt, unsigned vt, unsigned vt0 = vt, typename I, typename O>
__device__ auto cuda_transform_mem_to_reg_strided(
I mem, unsigned tid, unsigned count, O op
) {
using T = std::invoke_result_t<O, typename std::iterator_traits<I>::value_type>;
cudaArray<T, vt> x;
cuda_strided_iterate<nt, vt, vt0>(
[&](auto i, auto j) { x[i] = op(mem[j]); }, tid, count
);
return x;
}
// ----------------------------------------------------------------------------
// thread reg <-> shared
// ----------------------------------------------------------------------------
template<unsigned nt, unsigned vt, typename T, unsigned shared_size>
__device__ void cuda_reg_to_shared_thread(
cudaArray<T, vt> x, unsigned tid, T (&shared)[shared_size], bool sync = true
) {
static_assert(shared_size >= nt * vt,
"reg_to_shared_thread must have at least nt * vt storage");
cuda_thread_iterate<vt>([&](auto i, auto j) { shared[j] = x[i]; }, tid);
if(sync) __syncthreads();
}
template<unsigned nt, unsigned vt, typename T, unsigned shared_size>
__device__ auto cuda_shared_to_reg_thread(
const T (&shared)[shared_size], unsigned tid, bool sync = true
) {
static_assert(shared_size >= nt * vt,
"reg_to_shared_thread must have at least nt * vt storage");
cudaArray<T, vt> x;
cuda_thread_iterate<vt>([&](auto i, auto j) {
x[i] = shared[j];
}, tid);
if(sync) __syncthreads();
return x;
}
template<unsigned nt, unsigned vt, typename T, unsigned shared_size>
__device__ void cuda_reg_to_shared_strided(
cudaArray<T, vt> x, unsigned tid, T (&shared)[shared_size], bool sync = true
) {
static_assert(shared_size >= nt * vt,
"reg_to_shared_strided must have at least nt * vt storage");
cuda_strided_iterate<nt, vt>(
[&](auto i, auto j) { shared[j] = x[i]; }, tid
);
if(sync) __syncthreads();
}
template<unsigned nt, unsigned vt, typename T, unsigned shared_size>
__device__ auto cuda_shared_to_reg_strided(
const T (&shared)[shared_size], unsigned tid, bool sync = true
) {
static_assert(shared_size >= nt * vt,
"shared_to_reg_strided must have at least nt * vt storage");
cudaArray<T, vt> x;
cuda_strided_iterate<nt, vt>([&](auto i, auto j) { x[i] = shared[j]; }, tid);
if(sync) __syncthreads();
return x;
}
template<
unsigned nt, unsigned vt, unsigned vt0 = vt, typename T, typename it_t,
unsigned shared_size
>
__device__ auto cuda_reg_to_mem_thread(
cudaArray<T, vt> x, unsigned tid,
unsigned count, it_t mem, T (&shared)[shared_size]
) {
cuda_reg_to_shared_thread<nt>(x, tid, shared);
auto y = cuda_shared_to_reg_strided<nt, vt>(shared, tid);
cuda_reg_to_mem_strided<nt, vt, vt0>(y, tid, count, mem);
}
template<
unsigned nt, unsigned vt, unsigned vt0 = vt, typename T, typename it_t,
unsigned shared_size
>
__device__ auto cuda_mem_to_reg_thread(
it_t mem, unsigned tid, unsigned count, T (&shared)[shared_size]
) {
auto x = cuda_mem_to_reg_strided<nt, vt, vt0>(mem, tid, count);
cuda_reg_to_shared_strided<nt, vt>(x, tid, shared);
auto y = cuda_shared_to_reg_thread<nt, vt>(shared, tid);
return y;
}
template<unsigned nt, unsigned vt, typename T, unsigned S>
__device__ auto cuda_shared_gather(
const T(&data)[S], cudaArray<unsigned, vt> indices, bool sync = true
) {
static_assert(S >= nt * vt,
"shared_gather must have at least nt * vt storage");
cudaArray<T, vt> x;
cuda_iterate<vt>([&](auto i) { x[i] = data[indices[i]]; });
if(sync) __syncthreads();
return x;
}
// ----------------------------------------------------------------------------
// reg<->reg
// ----------------------------------------------------------------------------
template<unsigned nt, unsigned vt, typename T, unsigned S>
__device__ auto cuda_reg_thread_to_strided(
cudaArray<T, vt> x, unsigned tid, T (&shared)[S]
) {
cuda_reg_to_shared_thread<nt>(x, tid, shared);
return cuda_shared_to_reg_strided<nt, vt>(shared, tid);
}
template<unsigned nt, unsigned vt, typename T, unsigned S>
__device__ auto cuda_reg_strided_to_thread(
cudaArray<T, vt> x, unsigned tid, T (&shared)[S]
) {
cuda_reg_to_shared_strided<nt>(x, tid, shared);
return cuda_shared_to_reg_thread<nt, vt>(shared, tid);
}
// ----------------------------------------------------------------------------
// cudaLoadStoreIterator
// ----------------------------------------------------------------------------
template<typename L, typename S, typename T, typename I>
struct cudaLoadStoreIterator : std::iterator_traits<const T*> {
L load;
S store;
I base;
cudaLoadStoreIterator(L load_, S store_, I base_) :
load(load_), store(store_), base(base_) { }
struct assign_t {
L load;
S store;
I index;
__device__ assign_t& operator=(T rhs) {
static_assert(!std::is_same<S, cudaEmpty>::value,
"load_iterator is being stored to.");
store(rhs, index);
return *this;
}
__device__ operator T() const {
static_assert(!std::is_same<L, cudaEmpty>::value,
"store_iterator is being loaded from.");
return load(index);
}
};
__device__ assign_t operator[](I index) const {
return assign_t { load, store, base + index };
}
__device__ assign_t operator*() const {
return assign_t { load, store, base };
}
__device__ cudaLoadStoreIterator operator+(I offset) const {
cudaLoadStoreIterator cp = *this;
cp += offset;
return cp;
}
__device__ cudaLoadStoreIterator& operator+=(I offset) {
base += offset;
return *this;
}
__device__ cudaLoadStoreIterator operator-(I offset) const {
cudaLoadStoreIterator cp = *this;
cp -= offset;
return cp;
}
__device__ cudaLoadStoreIterator& operator-=(I offset) {
base -= offset;
return *this;
}
};
//template<typename T>
//struct trivial_load_functor {
// template<typename I>
// __device__ T operator()(I index) const {
// return T();
// }
//};
//template<typename T>
//struct trivial_store_functor {
// template<typename I>
// __device__ void operator()(T v, I index) const { }
//};
template <typename T, typename I = unsigned, typename L, typename S>
auto cuda_make_load_store_iterator(L load, S store, I base = 0) {
return cudaLoadStoreIterator<L, S, T, I>(load, store, base);
}
template <typename T, typename I = unsigned, typename L>
auto cuda_make_load_iterator(L load, I base = 0) {
return cuda_make_load_store_iterator<T>(load, cudaEmpty(), base);
}
template <typename T, typename I = unsigned, typename S>
auto cuda_make_store_iterator(S store, I base = 0) {
return cuda_make_load_store_iterator<T>(cudaEmpty(), store, base);
}
// ----------------------------------------------------------------------------
// swap
// ----------------------------------------------------------------------------
template<typename T>
__device__ void cuda_swap(T& a, T& b) {
auto c = a;
a = b;
b = c;
}
// ----------------------------------------------------------------------------
// launch kernel
// ----------------------------------------------------------------------------
template<typename F, typename... args_t>
__global__ void cuda_kernel(F f, args_t... args) {
f(threadIdx.x, blockIdx.x, args...);
}
// ----------------------------------------------------------------------------
// operators
// ----------------------------------------------------------------------------
template <class T>
struct cuda_plus{
__device__ T operator()(T a, T b) const { return a + b; }
};
template <class T>
struct cuda_minus{
__device__ T operator()(T a, T b) const { return a - b; }
};
template <class T>
struct cuda_multiplies{
__device__ T operator()(T a, T b) const { return a * b; }
};
template <class T>
struct cuda_maximum{
__device__ T operator()(T a, T b) const { return a > b ? a : b; }
};
template <class T>
struct cuda_minimum{
__device__ T operator()(T a, T b) const { return a < b ? a : b; }
};
template <class T>
struct cuda_less{
__device__ T operator()(T a, T b) const { return a < b; }
};
template <class T>
struct cuda_greater{
__device__ T operator()(T a, T b) const { return a > b; }
};
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/cuda/cuda_device.hpp | #pragma once
#include "cuda_error.hpp"
/**
@file cuda_device.hpp
@brief CUDA device utilities include file
*/
namespace tf {
/**
@brief queries the number of available devices
*/
inline size_t cuda_get_num_devices() {
int N = 0;
TF_CHECK_CUDA(cudaGetDeviceCount(&N), "failed to get device count");
return static_cast<size_t>(N);
}
/**
@brief gets the current device associated with the caller thread
*/
inline int cuda_get_device() {
int id;
TF_CHECK_CUDA(cudaGetDevice(&id), "failed to get current device id");
return id;
}
/**
@brief switches to a given device context
*/
inline void cuda_set_device(int id) {
TF_CHECK_CUDA(cudaSetDevice(id), "failed to switch to device ", id);
}
/**
@brief obtains the device property
*/
inline void cuda_get_device_property(int i, cudaDeviceProp& p) {
TF_CHECK_CUDA(
cudaGetDeviceProperties(&p, i), "failed to get property of device ", i
);
}
/**
@brief obtains the device property
*/
inline cudaDeviceProp cuda_get_device_property(int i) {
cudaDeviceProp p;
TF_CHECK_CUDA(
cudaGetDeviceProperties(&p, i), "failed to get property of device ", i
);
return p;
}
/**
@brief dumps the device property
*/
inline void cuda_dump_device_property(std::ostream& os, const cudaDeviceProp& p) {
os << "Major revision number: " << p.major << '\n'
<< "Minor revision number: " << p.minor << '\n'
<< "Name: " << p.name << '\n'
<< "Total global memory: " << p.totalGlobalMem << '\n'
<< "Total shared memory per block: " << p.sharedMemPerBlock << '\n'
<< "Total registers per block: " << p.regsPerBlock << '\n'
<< "Warp size: " << p.warpSize << '\n'
<< "Maximum memory pitch: " << p.memPitch << '\n'
<< "Maximum threads per block: " << p.maxThreadsPerBlock << '\n';
os << "Maximum dimension of block: ";
for (int i = 0; i < 3; ++i) {
if(i) os << 'x';
os << p.maxThreadsDim[i];
}
os << '\n';
os << "Maximum dimenstion of grid: ";
for (int i = 0; i < 3; ++i) {
if(i) os << 'x';
os << p.maxGridSize[i];;
}
os << '\n';
os << "Clock rate: " << p.clockRate << '\n'
<< "Total constant memory: " << p.totalConstMem << '\n'
<< "Texture alignment: " << p.textureAlignment << '\n'
<< "Concurrent copy and execution: " << p.deviceOverlap << '\n'
<< "Number of multiprocessors: " << p.multiProcessorCount << '\n'
<< "Kernel execution timeout: " << p.kernelExecTimeoutEnabled << '\n'
<< "GPU sharing Host Memory: " << p.integrated << '\n'
<< "Host page-locked mem mapping: " << p.canMapHostMemory << '\n'
<< "Alignment for Surfaces: " << p.surfaceAlignment << '\n'
<< "Device has ECC support: " << p.ECCEnabled << '\n'
<< "Unified Addressing (UVA): " << p.unifiedAddressing << '\n';
}
/**
@brief queries the maximum threads per block on a device
*/
inline size_t cuda_get_device_max_threads_per_block(int d) {
int threads = 0;
TF_CHECK_CUDA(
cudaDeviceGetAttribute(&threads, cudaDevAttrMaxThreadsPerBlock, d),
"failed to query the maximum threads per block on device ", d
)
return threads;
}
/**
@brief queries the maximum x-dimension per block on a device
*/
inline size_t cuda_get_device_max_x_dim_per_block(int d) {
int dim = 0;
TF_CHECK_CUDA(
cudaDeviceGetAttribute(&dim, cudaDevAttrMaxBlockDimX, d),
"failed to query the maximum x-dimension per block on device ", d
)
return dim;
}
/**
@brief queries the maximum y-dimension per block on a device
*/
inline size_t cuda_get_device_max_y_dim_per_block(int d) {
int dim = 0;
TF_CHECK_CUDA(
cudaDeviceGetAttribute(&dim, cudaDevAttrMaxBlockDimY, d),
"failed to query the maximum y-dimension per block on device ", d
)
return dim;
}
/**
@brief queries the maximum z-dimension per block on a device
*/
inline size_t cuda_get_device_max_z_dim_per_block(int d) {
int dim = 0;
TF_CHECK_CUDA(
cudaDeviceGetAttribute(&dim, cudaDevAttrMaxBlockDimZ, d),
"failed to query the maximum z-dimension per block on device ", d
)
return dim;
}
/**
@brief queries the maximum x-dimension per grid on a device
*/
inline size_t cuda_get_device_max_x_dim_per_grid(int d) {
int dim = 0;
TF_CHECK_CUDA(
cudaDeviceGetAttribute(&dim, cudaDevAttrMaxGridDimX, d),
"failed to query the maximum x-dimension per grid on device ", d
)
return dim;
}
/**
@brief queries the maximum y-dimension per grid on a device
*/
inline size_t cuda_get_device_max_y_dim_per_grid(int d) {
int dim = 0;
TF_CHECK_CUDA(
cudaDeviceGetAttribute(&dim, cudaDevAttrMaxGridDimY, d),
"failed to query the maximum y-dimension per grid on device ", d
)
return dim;
}
/**
@brief queries the maximum z-dimension per grid on a device
*/
inline size_t cuda_get_device_max_z_dim_per_grid(int d) {
int dim = 0;
TF_CHECK_CUDA(
cudaDeviceGetAttribute(&dim, cudaDevAttrMaxGridDimZ, d),
"failed to query the maximum z-dimension per grid on device ", d
)
return dim;
}
/**
@brief queries the maximum shared memory size in bytes per block on a device
*/
inline size_t cuda_get_device_max_shm_per_block(int d) {
int num = 0;
TF_CHECK_CUDA(
cudaDeviceGetAttribute(&num, cudaDevAttrMaxSharedMemoryPerBlock, d),
"failed to query the maximum shared memory per block on device ", d
)
return num;
}
/**
@brief queries the warp size on a device
*/
inline size_t cuda_get_device_warp_size(int d) {
int num = 0;
TF_CHECK_CUDA(
cudaDeviceGetAttribute(&num, cudaDevAttrWarpSize, d),
"failed to query the warp size per block on device ", d
)
return num;
}
/**
@brief queries the major number of compute capability of a device
*/
inline int cuda_get_device_compute_capability_major(int d) {
int num = 0;
TF_CHECK_CUDA(
cudaDeviceGetAttribute(&num, cudaDevAttrComputeCapabilityMajor, d),
"failed to query the major number of compute capability of device ", d
)
return num;
}
/**
@brief queries the minor number of compute capability of a device
*/
inline int cuda_get_device_compute_capability_minor(int d) {
int num = 0;
TF_CHECK_CUDA(
cudaDeviceGetAttribute(&num, cudaDevAttrComputeCapabilityMinor, d),
"failed to query the minor number of compute capability of device ", d
)
return num;
}
/**
@brief queries if the device supports unified addressing
*/
inline bool cuda_get_device_unified_addressing(int d) {
int num = 0;
TF_CHECK_CUDA(
cudaDeviceGetAttribute(&num, cudaDevAttrUnifiedAddressing, d),
"failed to query unified addressing status on device ", d
)
return num;
}
// ----------------------------------------------------------------------------
// CUDA Version
// ----------------------------------------------------------------------------
/**
@brief queries the latest CUDA version (1000 * major + 10 * minor) supported by the driver
*/
inline int cuda_get_driver_version() {
int num = 0;
TF_CHECK_CUDA(
cudaDriverGetVersion(&num),
"failed to query the latest cuda version supported by the driver"
);
return num;
}
/**
@brief queries the CUDA Runtime version (1000 * major + 10 * minor)
*/
inline int cuda_get_runtime_version() {
int num = 0;
TF_CHECK_CUDA(
cudaRuntimeGetVersion(&num), "failed to query cuda runtime version"
);
return num;
}
// ----------------------------------------------------------------------------
// cudaScopedDevice
// ----------------------------------------------------------------------------
/** @class cudaScopedDevice
@brief class to create an RAII-styled context switch
Sample usage:
@code{.cpp}
{
tf::cudaScopedDevice device(1); // switch to the device context 1
// create a stream under device context 1
cudaStream_t stream;
cudaStreamCreate(&stream);
} // leaving the scope and goes back to the previous device context
@endcode
%cudaScopedDevice is neither movable nor copyable.
*/
class cudaScopedDevice {
public:
/**
@brief constructs a RAII-styled device switcher
@param device device context to scope in the guard
*/
explicit cudaScopedDevice(int device);
/**
@brief destructs the guard and switches back to the previous device context
*/
~cudaScopedDevice();
private:
cudaScopedDevice() = delete;
cudaScopedDevice(const cudaScopedDevice&) = delete;
cudaScopedDevice(cudaScopedDevice&&) = delete;
int _p;
};
// Constructor
inline cudaScopedDevice::cudaScopedDevice(int dev) {
TF_CHECK_CUDA(cudaGetDevice(&_p), "failed to get current device scope");
if(_p == dev) {
_p = -1;
}
else {
TF_CHECK_CUDA(cudaSetDevice(dev), "failed to scope on device ", dev);
}
}
// Destructor
inline cudaScopedDevice::~cudaScopedDevice() {
if(_p != -1) {
cudaSetDevice(_p);
//TF_CHECK_CUDA(cudaSetDevice(_p), "failed to scope back to device ", _p);
}
}
} // end of namespace cuda ---------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/cuda/cuda_memory.hpp | #pragma once
#include "cuda_device.hpp"
/**
@file cuda_memory.hpp
@brief CUDA memory utilities include file
*/
namespace tf {
// ----------------------------------------------------------------------------
// memory
// ----------------------------------------------------------------------------
/**
@brief queries the free memory (expensive call)
*/
inline size_t cuda_get_free_mem(int d) {
cudaScopedDevice ctx(d);
size_t free, total;
TF_CHECK_CUDA(
cudaMemGetInfo(&free, &total), "failed to get mem info on device ", d
);
return free;
}
/**
@brief queries the total available memory (expensive call)
*/
inline size_t cuda_get_total_mem(int d) {
cudaScopedDevice ctx(d);
size_t free, total;
TF_CHECK_CUDA(
cudaMemGetInfo(&free, &total), "failed to get mem info on device ", d
);
return total;
}
/**
@brief allocates memory on the given device for holding @c N elements of type @c T
The function calls @c cudaMalloc to allocate <tt>N*sizeof(T)</tt> bytes of memory
on the given device @c d and returns a pointer to the starting address of
the device memory.
*/
template <typename T>
T* cuda_malloc_device(size_t N, int d) {
cudaScopedDevice ctx(d);
T* ptr {nullptr};
TF_CHECK_CUDA(
cudaMalloc(&ptr, N*sizeof(T)),
"failed to allocate memory (", N*sizeof(T), "bytes) on device ", d
)
return ptr;
}
/**
@brief allocates memory on the current device associated with the caller
The function calls malloc_device from the current device associated
with the caller.
*/
template <typename T>
T* cuda_malloc_device(size_t N) {
T* ptr {nullptr};
TF_CHECK_CUDA(
cudaMalloc(&ptr, N*sizeof(T)),
"failed to allocate memory (", N*sizeof(T), "bytes)"
)
return ptr;
}
/**
@brief allocates shared memory for holding @c N elements of type @c T
The function calls @c cudaMallocManaged to allocate <tt>N*sizeof(T)</tt> bytes
of memory and returns a pointer to the starting address of the shared memory.
*/
template <typename T>
T* cuda_malloc_shared(size_t N) {
T* ptr {nullptr};
TF_CHECK_CUDA(
cudaMallocManaged(&ptr, N*sizeof(T)),
"failed to allocate shared memory (", N*sizeof(T), "bytes)"
)
return ptr;
}
/**
@brief frees memory on the GPU device
@tparam T pointer type
@param ptr device pointer to memory to free
@param d device context identifier
This methods call @c cudaFree to free the memory space pointed to by @c ptr
using the given device context.
*/
template <typename T>
void cuda_free(T* ptr, int d) {
cudaScopedDevice ctx(d);
TF_CHECK_CUDA(cudaFree(ptr), "failed to free memory ", ptr, " on GPU ", d);
}
/**
@brief frees memory on the GPU device
@tparam T pointer type
@param ptr device pointer to memory to free
This methods call @c cudaFree to free the memory space pointed to by @c ptr
using the current device context of the caller.
*/
template <typename T>
void cuda_free(T* ptr) {
TF_CHECK_CUDA(cudaFree(ptr), "failed to free memory ", ptr);
}
/**
@brief copies data between host and device asynchronously through a stream
@param stream stream identifier
@param dst destination memory address
@param src source memory address
@param count size in bytes to copy
The method calls @c cudaMemcpyAsync with the given @c stream
using @c cudaMemcpyDefault to infer the memory space of the source and
the destination pointers. The memory areas may not overlap.
*/
inline void cuda_memcpy_async(
cudaStream_t stream, void* dst, const void* src, size_t count
) {
TF_CHECK_CUDA(
cudaMemcpyAsync(dst, src, count, cudaMemcpyDefault, stream),
"failed to perform cudaMemcpyAsync"
);
}
/**
@brief initializes or sets GPU memory to the given value byte by byte
@param stream stream identifier
@param devPtr pointer to GPU mempry
@param value value to set for each byte of the specified memory
@param count size in bytes to set
The method calls @c cudaMemsetAsync with the given @c stream
to fill the first @c count bytes of the memory area pointed to by @c devPtr
with the constant byte value @c value.
*/
inline void cuda_memset_async(
cudaStream_t stream, void* devPtr, int value, size_t count
){
TF_CHECK_CUDA(
cudaMemsetAsync(devPtr, value, count, stream),
"failed to perform cudaMemsetAsync"
);
}
// ----------------------------------------------------------------------------
// Shared Memory
// ----------------------------------------------------------------------------
//
// Because dynamically sized shared memory arrays are declared "extern",
// we can't templatize them directly. To get around this, we declare a
// simple wrapper struct that will declare the extern array with a different
// name depending on the type. This avoids compiler errors about duplicate
// definitions.
//
// To use dynamically allocated shared memory in a templatized __global__ or
// __device__ function, just replace code like this:
//
// template<class T>
// __global__ void
// foo( T* g_idata, T* g_odata)
// {
// // Shared mem size is determined by the host app at run time
// extern __shared__ T sdata[];
// ...
// doStuff(sdata);
// ...
// }
//
// With this:
//
// template<class T>
// __global__ void
// foo( T* g_idata, T* g_odata)
// {
// // Shared mem size is determined by the host app at run time
// cudaSharedMemory<T> smem;
// T* sdata = smem.get();
// ...
// doStuff(sdata);
// ...
// }
// ----------------------------------------------------------------------------
// This is the un-specialized struct. Note that we prevent instantiation of this
// struct by putting an undefined symbol in the function body so it won't compile.
/**
@private
*/
template <typename T>
struct cudaSharedMemory
{
// Ensure that we won't compile any un-specialized types
__device__ T *get()
{
extern __device__ void error(void);
error();
return NULL;
}
};
// Following are the specializations for the following types.
// int, uint, char, uchar, short, ushort, long, ulong, bool, float, and double
// One could also specialize it for user-defined types.
/**
@private
*/
template <>
struct cudaSharedMemory <int>
{
__device__ int *get()
{
extern __shared__ int s_int[];
return s_int;
}
};
/**
@private
*/
template <>
struct cudaSharedMemory <unsigned int>
{
__device__ unsigned int *get()
{
extern __shared__ unsigned int s_uint[];
return s_uint;
}
};
/**
@private
*/
template <>
struct cudaSharedMemory <char>
{
__device__ char *get()
{
extern __shared__ char s_char[];
return s_char;
}
};
/**
@private
*/
template <>
struct cudaSharedMemory <unsigned char>
{
__device__ unsigned char *get()
{
extern __shared__ unsigned char s_uchar[];
return s_uchar;
}
};
/**
@private
*/
template <>
struct cudaSharedMemory <short>
{
__device__ short *get()
{
extern __shared__ short s_short[];
return s_short;
}
};
/**
@private
*/
template <>
struct cudaSharedMemory <unsigned short>
{
__device__ unsigned short *get()
{
extern __shared__ unsigned short s_ushort[];
return s_ushort;
}
};
/**
@private
*/
template <>
struct cudaSharedMemory <long>
{
__device__ long *get()
{
extern __shared__ long s_long[];
return s_long;
}
};
/**
@private
*/
template <>
struct cudaSharedMemory <unsigned long>
{
__device__ unsigned long *get()
{
extern __shared__ unsigned long s_ulong[];
return s_ulong;
}
};
//template <>
//struct cudaSharedMemory <size_t>
//{
// __device__ size_t *get()
// {
// extern __shared__ size_t s_sizet[];
// return s_sizet;
// }
//};
/**
@private
*/
template <>
struct cudaSharedMemory <bool>
{
__device__ bool *get()
{
extern __shared__ bool s_bool[];
return s_bool;
}
};
/**
@private
*/
template <>
struct cudaSharedMemory <float>
{
__device__ float *get()
{
extern __shared__ float s_float[];
return s_float;
}
};
/**
@private
*/
template <>
struct cudaSharedMemory <double>
{
__device__ double *get()
{
extern __shared__ double s_double[];
return s_double;
}
};
// ----------------------------------------------------------------------------
// cudaDeviceAllocator
// ----------------------------------------------------------------------------
/**
@class cudaDeviceAllocator
@brief class to create a CUDA device allocator
@tparam T element type
A %cudaDeviceAllocator enables device-specific allocation for
standard library containers. It is typically passed as template parameter
when declaring standard library containers (e.g. std::vector).
*/
template<typename T>
class cudaDeviceAllocator {
public:
/**
@brief element type
*/
using value_type = T;
/**
@brief element pointer type
*/
using pointer = T*;
/**
@brief element reference type
*/
using reference = T&;
/**
@brief const element pointer type
*/
using const_pointer = const T*;
/**
@brief constant element reference type
*/
using const_reference = const T&;
/**
@brief size type
*/
using size_type = std::size_t;
/**
@brief pointer difference type
*/
using difference_type = std::ptrdiff_t;
/**
@brief its member type @c U is the equivalent allocator type to allocate elements of type U
*/
template<typename U>
struct rebind {
/**
@brief allocator of a different data type
*/
using other = cudaDeviceAllocator<U>;
};
/**
@brief Constructs a device allocator object.
*/
cudaDeviceAllocator() noexcept {}
/**
@brief Constructs a device allocator object from another device allocator object.
*/
cudaDeviceAllocator( const cudaDeviceAllocator& ) noexcept {}
/**
@brief Constructs a device allocator object from another device allocator
object with a different element type.
*/
template<typename U>
cudaDeviceAllocator( const cudaDeviceAllocator<U>& ) noexcept {}
/**
@brief Destructs the device allocator object.
*/
~cudaDeviceAllocator() noexcept {}
/**
@brief Returns the address of x.
This effectively means returning &x.
@param x reference to an object
@return a pointer to the object
*/
pointer address( reference x ) { return &x; }
/**
@brief Returns the address of x.
This effectively means returning &x.
@param x reference to an object
@return a pointer to the object
*/
const_pointer address( const_reference x ) const { return &x; }
/**
@brief allocates block of storage.
Attempts to allocate a block of storage with a size large enough to contain
@c n elements of member type, @c value_type, and returns a pointer
to the first element.
The storage is aligned appropriately for object of type @c value_type,
but they are not constructed.
The block of storage is allocated using cudaMalloc and throws std::bad_alloc
if it cannot allocate the total amount of storage requested.
@param n number of elements (each of size sizeof(value_type)) to be allocated
@return a pointer to the initial element in the block of storage.
*/
pointer allocate( size_type n, std::allocator<void>::const_pointer = 0 )
{
void* ptr = NULL;
TF_CHECK_CUDA(
cudaMalloc( &ptr, n*sizeof(T) ),
"failed to allocate ", n, " elements (", n*sizeof(T), "bytes)"
)
return static_cast<pointer>(ptr);
}
/**
@brief Releases a block of storage previously allocated with member allocate and not yet released
The elements in the array are not destroyed by a call to this member function.
@param ptr pointer to a block of storage previously allocated with allocate
*/
void deallocate( pointer ptr, size_type )
{
if(ptr){
cudaFree(ptr);
}
}
/**
@brief returns the maximum number of elements that could potentially
be allocated by this allocator
A call to member allocate with the value returned by this function
can still fail to allocate the requested storage.
@return the nubmer of elements that might be allcoated as maximum
by a call to member allocate
*/
size_type max_size() const noexcept { return size_type {-1}; }
/**
@brief ignored to avoid de-referencing device pointer from the host
*/
void construct( pointer, const_reference) { }
/**
@brief ignored to avoid de-referencing device pointer from the host
*/
void destroy( pointer) { }
/**
@brief compares two allocator of different types using @c ==
Device allocators of different types are always equal to each other
because the storage allocated by the allocator @c a1 can be deallocated
through @c a2.
*/
template <typename U>
bool operator == (const cudaDeviceAllocator<U>&) const noexcept {
return true;
}
/**
@brief compares two allocator of different types using @c !=
Device allocators of different types are always equal to each other
because the storage allocated by the allocator @c a1 can be deallocated
through @c a2.
*/
template <typename U>
bool operator != (const cudaDeviceAllocator<U>&) const noexcept {
return false;
}
};
// ----------------------------------------------------------------------------
// cudaUSMAllocator
// ----------------------------------------------------------------------------
/**
@class cudaUSMAllocator
@brief class to create a unified shared memory (USM) allocator
@tparam T element type
A %cudaUSMAllocator enables using unified shared memory (USM) allocation for
standard library containers. It is typically passed as template parameter
when declaring standard library containers (e.g. std::vector).
*/
template<typename T>
class cudaUSMAllocator {
public:
/**
@brief element type
*/
using value_type = T;
/**
@brief element pointer type
*/
using pointer = T*;
/**
@brief element reference type
*/
using reference = T&;
/**
@brief const element pointer type
*/
using const_pointer = const T*;
/**
@brief constant element reference type
*/
using const_reference = const T&;
/**
@brief size type
*/
using size_type = std::size_t;
/**
@brief pointer difference type
*/
using difference_type = std::ptrdiff_t;
/**
@brief its member type @c U is the equivalent allocator type to allocate elements of type U
*/
template<typename U>
struct rebind {
/**
@brief allocator of a different data type
*/
using other = cudaUSMAllocator<U>;
};
/**
@brief Constructs a device allocator object.
*/
cudaUSMAllocator() noexcept {}
/**
@brief Constructs a device allocator object from another device allocator object.
*/
cudaUSMAllocator( const cudaUSMAllocator& ) noexcept {}
/**
@brief Constructs a device allocator object from another device allocator
object with a different element type.
*/
template<typename U>
cudaUSMAllocator( const cudaUSMAllocator<U>& ) noexcept {}
/**
@brief Destructs the device allocator object.
*/
~cudaUSMAllocator() noexcept {}
/**
@brief Returns the address of x.
This effectively means returning &x.
@param x reference to an object
@return a pointer to the object
*/
pointer address( reference x ) { return &x; }
/**
@brief Returns the address of x.
This effectively means returning &x.
@param x reference to an object
@return a pointer to the object
*/
const_pointer address( const_reference x ) const { return &x; }
/**
@brief allocates block of storage.
Attempts to allocate a block of storage with a size large enough to contain
@c n elements of member type, @c value_type, and returns a pointer
to the first element.
The storage is aligned appropriately for object of type @c value_type,
but they are not constructed.
The block of storage is allocated using cudaMalloc and throws std::bad_alloc
if it cannot allocate the total amount of storage requested.
@param n number of elements (each of size sizeof(value_type)) to be allocated
@return a pointer to the initial element in the block of storage.
*/
pointer allocate( size_type n, std::allocator<void>::const_pointer = 0 )
{
void* ptr {nullptr};
TF_CHECK_CUDA(
cudaMallocManaged( &ptr, n*sizeof(T) ),
"failed to allocate ", n, " elements (", n*sizeof(T), "bytes)"
)
return static_cast<pointer>(ptr);
}
/**
@brief Releases a block of storage previously allocated with member allocate and not yet released
The elements in the array are not destroyed by a call to this member function.
@param ptr pointer to a block of storage previously allocated with allocate
*/
void deallocate( pointer ptr, size_type )
{
if(ptr){
cudaFree(ptr);
}
}
/**
@brief returns the maximum number of elements that could potentially
be allocated by this allocator
A call to member allocate with the value returned by this function
can still fail to allocate the requested storage.
@return the nubmer of elements that might be allcoated as maximum
by a call to member allocate
*/
size_type max_size() const noexcept { return size_type {-1}; }
/**
@brief Constructs an element object on the location pointed by ptr.
@param ptr pointer to a location with enough storage soace to contain
an element of type @c value_type
@param val value to initialize the constructed element to
*/
void construct( pointer ptr, const_reference val ) {
new ((void*)ptr) value_type(val);
}
/**
@brief destroys in-place the object pointed by @c ptr
Notice that this does not deallocate the storage for the element but calls
its destructor.
@param ptr pointer to the object to be destroye
*/
void destroy( pointer ptr ) {
ptr->~value_type();
}
/**
@brief compares two allocator of different types using @c ==
USM allocators of different types are always equal to each other
because the storage allocated by the allocator @c a1 can be deallocated
through @c a2.
*/
template <typename U>
bool operator == (const cudaUSMAllocator<U>&) const noexcept {
return true;
}
/**
@brief compares two allocator of different types using @c !=
USM allocators of different types are always equal to each other
because the storage allocated by the allocator @c a1 can be deallocated
through @c a2.
*/
template <typename U>
bool operator != (const cudaUSMAllocator<U>&) const noexcept {
return false;
}
};
// ----------------------------------------------------------------------------
// GPU vector object
// ----------------------------------------------------------------------------
//template <typename T>
//using cudaDeviceVector = std::vector<NoInit<T>, cudaDeviceAllocator<NoInit<T>>>;
//template <typename T>
//using cudaUSMVector = std::vector<T, cudaUSMAllocator<T>>;
/**
@private
*/
template <typename T>
class cudaDeviceVector {
public:
cudaDeviceVector() = default;
cudaDeviceVector(size_t N) : _N {N} {
if(N) {
TF_CHECK_CUDA(
cudaMalloc(&_data, N*sizeof(T)),
"failed to allocate device memory (", N*sizeof(T), " bytes)"
);
}
}
cudaDeviceVector(cudaDeviceVector&& rhs) :
_data{rhs._data}, _N {rhs._N} {
rhs._data = nullptr;
rhs._N = 0;
}
~cudaDeviceVector() {
if(_data) {
cudaFree(_data);
}
}
cudaDeviceVector& operator = (cudaDeviceVector&& rhs) {
if(_data) {
cudaFree(_data);
}
_data = rhs._data;
_N = rhs._N;
rhs._data = nullptr;
rhs._N = 0;
return *this;
}
size_t size() const { return _N; }
T* data() { return _data; }
const T* data() const { return _data; }
cudaDeviceVector(const cudaDeviceVector&) = delete;
cudaDeviceVector& operator = (const cudaDeviceVector&) = delete;
private:
T* _data {nullptr};
size_t _N {0};
};
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/cuda/cuda_capturer.hpp | #pragma once
#include "cuda_task.hpp"
#include "cuda_optimizer.hpp"
/**
@file cuda_capturer.hpp
@brief %cudaFlow capturer include file
*/
namespace tf {
// ----------------------------------------------------------------------------
// class definition: cudaFlowCapturer
// ----------------------------------------------------------------------------
/**
@class cudaFlowCapturer
@brief class to create a %cudaFlow graph using stream capture
The usage of tf::cudaFlowCapturer is similar to tf::cudaFlow, except users can
call the method tf::cudaFlowCapturer::on to capture a sequence of asynchronous
CUDA operations through the given stream.
The following example creates a CUDA graph that captures two kernel tasks,
@c task_1 and @c task_2, where @c task_1 runs before @c task_2.
@code{.cpp}
taskflow.emplace([](tf::cudaFlowCapturer& capturer){
// capture my_kernel_1 through the given stream managed by the capturer
auto task_1 = capturer.on([&](cudaStream_t stream){
my_kernel_1<<<grid_1, block_1, shm_size_1, stream>>>(my_parameters_1);
});
// capture my_kernel_2 through the given stream managed by the capturer
auto task_2 = capturer.on([&](cudaStream_t stream){
my_kernel_2<<<grid_2, block_2, shm_size_2, stream>>>(my_parameters_2);
});
task_1.precede(task_2);
});
@endcode
Similar to tf::cudaFlow, a %cudaFlowCapturer is a task (tf::Task)
created from tf::Taskflow
and will be run by @em one worker thread in the executor.
That is, the callable that describes a %cudaFlowCapturer
will be executed sequentially.
Inside a %cudaFlow capturer task, different GPU tasks (tf::cudaTask) may run
in parallel depending on the selected optimization algorithm.
By default, we use tf::cudaRoundRobinCapturing to transform a user-level
graph into a native CUDA graph.
Please refer to @ref GPUTaskingcudaFlowCapturer for details.
*/
class cudaFlowCapturer {
friend class cudaFlow;
friend class Executor;
// created by user
struct External {
cudaGraph graph;
};
// created from executor
struct Internal {
Internal(Executor& e) : executor{e} {}
Executor& executor;
};
// created from cudaFlow
struct Proxy {
};
using handle_t = std::variant<External, Internal, Proxy>;
using Optimizer = std::variant<
cudaRoundRobinCapturing,
cudaSequentialCapturing,
cudaLinearCapturing
>;
public:
/**
@brief constrcts a standalone cudaFlowCapturer
A standalone %cudaFlow capturer does not go through any taskflow and
can be run by the caller thread using explicit offload methods
(e.g., tf::cudaFlow::offload).
*/
cudaFlowCapturer();
/**
@brief destructs the cudaFlowCapturer
*/
virtual ~cudaFlowCapturer();
/**
@brief queries the emptiness of the graph
*/
bool empty() const;
/**
@brief queries the number of tasks
*/
size_t num_tasks() const;
/**
@brief clear this %cudaFlow capturer
*/
void clear();
/**
@brief dumps the capture graph into a DOT format through an
output stream
*/
void dump(std::ostream& os) const;
/**
@brief selects a different optimization algorithm
@tparam OPT optimizer type
@tparam ArgsT arguments types
@param args arguments to forward to construct the optimizer
@return a reference to the optimizer
We currently supports the following optimization algorithms to capture
a user-described %cudaFlow:
+ tf::cudaSequentialCapturing
+ tf::cudaRoundRobinCapturing
+ tf::cudaLinearCapturing
By default, tf::cudaFlowCapturer uses the round-robin optimization
algorithm with four streams to transform a user-level graph into
a native CUDA graph.
*/
template <typename OPT, typename... ArgsT>
OPT& make_optimizer(ArgsT&&... args);
// ------------------------------------------------------------------------
// basic methods
// ------------------------------------------------------------------------
/**
@brief captures a sequential CUDA operations from the given callable
@tparam C callable type constructible with @c std::function<void(cudaStream_t)>
@param callable a callable to capture CUDA operations with the stream
This methods applies a stream created by the flow to capture
a sequence of CUDA operations defined in the callable.
*/
template <typename C, std::enable_if_t<
std::is_invocable_r_v<void, C, cudaStream_t>, void>* = nullptr
>
cudaTask on(C&& callable);
/**
@brief updates a capture task to another sequential CUDA operations
The method is similar to cudaFlowCapturer::on but operates
on an existing task.
*/
template <typename C, std::enable_if_t<
std::is_invocable_r_v<void, C, cudaStream_t>, void>* = nullptr
>
void on(cudaTask task, C&& callable);
/**
@brief captures a no-operation task
@return a tf::cudaTask handle
An empty node performs no operation during execution,
but can be used for transitive ordering.
For example, a phased execution graph with 2 groups of @c n nodes
with a barrier between them can be represented using an empty node
and @c 2*n dependency edges,
rather than no empty node and @c n^2 dependency edges.
*/
cudaTask noop();
/**
@brief updates a task to a no-operation task
The method is similar to tf::cudaFlowCapturer::noop but
operates on an existing task.
*/
void noop(cudaTask task);
/**
@brief copies data between host and device asynchronously through a stream
@param dst destination memory address
@param src source memory address
@param count size in bytes to copy
The method captures a @c cudaMemcpyAsync operation through an
internal stream.
*/
cudaTask memcpy(void* dst, const void* src, size_t count);
/**
@brief updates a capture task to a memcpy operation
The method is similar to cudaFlowCapturer::memcpy but operates on an
existing task.
*/
void memcpy(cudaTask task, void* dst, const void* src, size_t count);
/**
@brief captures a copy task of typed data
@tparam T element type (non-void)
@param tgt pointer to the target memory block
@param src pointer to the source memory block
@param num number of elements to copy
@return cudaTask handle
A copy task transfers <tt>num*sizeof(T)</tt> bytes of data from a source location
to a target location. Direction can be arbitrary among CPUs and GPUs.
*/
template <typename T,
std::enable_if_t<!std::is_same_v<T, void>, void>* = nullptr
>
cudaTask copy(T* tgt, const T* src, size_t num);
/**
@brief updates a capture task to a copy operation
The method is similar to cudaFlowCapturer::copy but operates on
an existing task.
*/
template <typename T,
std::enable_if_t<!std::is_same_v<T, void>, void>* = nullptr
>
void copy(cudaTask task, T* tgt, const T* src, size_t num);
/**
@brief initializes or sets GPU memory to the given value byte by byte
@param ptr pointer to GPU mempry
@param v value to set for each byte of the specified memory
@param n size in bytes to set
The method captures a @c cudaMemsetAsync operation through an
internal stream to fill the first @c count bytes of the memory area
pointed to by @c devPtr with the constant byte value @c value.
*/
cudaTask memset(void* ptr, int v, size_t n);
/**
@brief updates a capture task to a memset operation
The method is similar to cudaFlowCapturer::memset but operates on
an existing task.
*/
void memset(cudaTask task, void* ptr, int value, size_t n);
/**
@brief captures a kernel
@tparam F kernel function type
@tparam ArgsT kernel function parameters type
@param g configured grid
@param b configured block
@param s configured shared memory size in bytes
@param f kernel function
@param args arguments to forward to the kernel function by copy
@return cudaTask handle
*/
template <typename F, typename... ArgsT>
cudaTask kernel(dim3 g, dim3 b, size_t s, F f, ArgsT&&... args);
/**
@brief updates a capture task to a kernel operation
The method is similar to cudaFlowCapturer::kernel but operates on
an existing task.
*/
template <typename F, typename... ArgsT>
void kernel(
cudaTask task, dim3 g, dim3 b, size_t s, F f, ArgsT&&... args
);
// ------------------------------------------------------------------------
// generic algorithms
// ------------------------------------------------------------------------
/**
@brief capturers a kernel to runs the given callable with only one thread
@tparam C callable type
@param c callable to run by a single kernel thread
*/
template <typename C>
cudaTask single_task(C c);
/**
@brief updates a capture task to a single-threaded kernel
This method is similar to cudaFlowCapturer::single_task but operates
on an existing task.
*/
template <typename C>
void single_task(cudaTask task, C c);
/**
@brief captures a kernel that applies a callable to each dereferenced element
of the data array
@tparam I iterator type
@tparam C callable type
@param first iterator to the beginning
@param last iterator to the end
@param callable a callable object to apply to the dereferenced iterator
@return cudaTask handle
This method is equivalent to the parallel execution of the following loop on a GPU:
@code{.cpp}
for(auto itr = first; itr != last; i++) {
callable(*itr);
}
@endcode
*/
template <typename I, typename C>
cudaTask for_each(I first, I last, C callable);
/**
@brief updates a capture task to a for-each kernel task
This method is similar to cudaFlowCapturer::for_each but operates
on an existing task.
*/
template <typename I, typename C>
void for_each(cudaTask task, I first, I last, C callable);
/**
@brief captures a kernel that applies a callable to each index in the range
with the step size
@tparam I index type
@tparam C callable type
@param first beginning index
@param last last index
@param step step size
@param callable the callable to apply to each element in the data array
@return cudaTask handle
This method is equivalent to the parallel execution of the following loop on a GPU:
@code{.cpp}
// step is positive [first, last)
for(auto i=first; i<last; i+=step) {
callable(i);
}
// step is negative [first, last)
for(auto i=first; i>last; i+=step) {
callable(i);
}
@endcode
*/
template <typename I, typename C>
cudaTask for_each_index(I first, I last, I step, C callable);
/**
@brief updates a capture task to a for-each-index kernel task
This method is similar to cudaFlowCapturer::for_each_index but operates
on an existing task.
*/
template <typename I, typename C>
void for_each_index(
cudaTask task, I first, I last, I step, C callable
);
/**
@brief captures a kernel that transforms an input range to an output range
@tparam I input iterator type
@tparam O output iterator type
@tparam C unary operator type
@param first iterator to the beginning of the input range
@param last iterator to the end of the input range
@param output iterator to the beginning of the output range
@param op unary operator to apply to transform each item in the range
@return cudaTask handle
This method is equivalent to the parallel execution of the following loop on a GPU:
@code{.cpp}
while (first != last) {
*output++ = op(*first++);
}
@endcode
*/
template <typename I, typename O, typename C>
cudaTask transform(I first, I last, O output, C op);
/**
@brief updates a capture task to a transform kernel task
This method is similar to cudaFlowCapturer::transform but operates
on an existing task.
*/
template <typename I, typename O, typename C>
void transform(cudaTask task, I first, I last, O output, C op);
/**
@brief captures a kernel that transforms two input ranges to an output range
@tparam I1 first input iterator type
@tparam I2 second input iterator type
@tparam O output iterator type
@tparam C unary operator type
@param first1 iterator to the beginning of the input range
@param last1 iterator to the end of the input range
@param first2 iterato
@param output iterator to the beginning of the output range
@param op binary operator to apply to transform each pair of items in the
two input ranges
@return cudaTask handle
This method is equivalent to the parallel execution of the following loop on a GPU:
@code{.cpp}
while (first1 != last1) {
*output++ = op(*first1++, *first2++);
}
@endcode
*/
template <typename I1, typename I2, typename O, typename C>
cudaTask transform(I1 first1, I1 last1, I2 first2, O output, C op);
/**
@brief updates a capture task to a transform kernel task
This method is similar to cudaFlowCapturer::transform but operates
on an existing task.
*/
template <typename I1, typename I2, typename O, typename C>
void transform(
cudaTask task, I1 first1, I1 last1, I2 first2, O output, C op
);
/**
@brief captures kernels that perform parallel reduction over a range of items
@tparam I input iterator type
@tparam T value type
@tparam C binary operator type
@param first iterator to the beginning
@param last iterator to the end
@param result pointer to the result with an initialized value
@param op binary reduction operator
@return a tf::cudaTask handle
This method is equivalent to the parallel execution of the following loop on a GPU:
@code{.cpp}
while (first != last) {
*result = op(*result, *first++);
}
@endcode
*/
template <typename I, typename T, typename C>
cudaTask reduce(I first, I last, T* result, C op);
/**
@brief updates a capture task to a reduction task
This method is similar to cudaFlowCapturer::reduce but operates
on an existing task.
*/
template <typename I, typename T, typename C>
void reduce(cudaTask task, I first, I last, T* result, C op);
/**
@brief similar to tf::cudaFlowCapturer::reduce but does not assume
any initial value to reduce
This method is equivalent to the parallel execution of the following loop
on a GPU:
@code{.cpp}
*result = *first++; // initial value does not involve in the loop
while (first != last) {
*result = op(*result, *first++);
}
@endcode
*/
template <typename I, typename T, typename C>
cudaTask uninitialized_reduce(I first, I last, T* result, C op);
/**
@brief updates a capture task to an uninitialized-reduction task
This method is similar to cudaFlowCapturer::uninitialized_reduce
but operates on an existing task.
*/
template <typename I, typename T, typename C>
void uninitialized_reduce(
cudaTask task, I first, I last, T* result, C op
);
/**
@brief captures kernels that perform parallel reduction over a range of
transformed items
@tparam I input iterator type
@tparam T value type
@tparam C binary operator type
@tparam U unary operator type
@param first iterator to the beginning
@param last iterator to the end
@param result pointer to the result with an initialized value
@param bop binary reduce operator
@param uop unary transform operator
@return a tf::cudaTask handle
This method is equivalent to the parallel execution of the following loop on a GPU:
@code{.cpp}
while (first != last) {
*result = bop(*result, uop(*first++));
}
@endcode
*/
template <typename I, typename T, typename C, typename U>
cudaTask transform_reduce(I first, I last, T* result, C bop, U uop);
/**
@brief updates a capture task to a transform-reduce task
This method is similar to cudaFlowCapturer::transform_reduce but
operates on an existing task.
*/
template <typename I, typename T, typename C, typename U>
void transform_reduce(
cudaTask task, I first, I last, T* result, C bop, U uop
);
/**
@brief similar to tf::cudaFlowCapturer::transform_reduce but does not assume
any initial value to reduce
This method is equivalent to the parallel execution of the following loop
on a GPU:
@code{.cpp}
*result = uop(*first++); // initial value does not involve in the loop
while (first != last) {
*result = bop(*result, uop(*first++));
}
@endcode
*/
template <typename I, typename T, typename C, typename U>
cudaTask transform_uninitialized_reduce(I first, I last, T* result, C bop, U uop);
/**
@brief updates a capture task to a transform-reduce task of no initialized value
This method is similar to cudaFlowCapturer::transform_uninitialized_reduce
but operates on an existing task.
*/
template <typename I, typename T, typename C, typename U>
void transform_uninitialized_reduce(
cudaTask task, I first, I last, T* result, C bop, U uop
);
/**
@brief captures kernels that perform parallel inclusive scan
over a range of items
@tparam I input iterator type
@tparam O output iterator type
@tparam C binary operator type
@param first iterator to the beginning
@param last iterator to the end
@param output iterator to the beginning of the output
@param op binary operator
@return a tf::cudaTask handle
This method is equivalent to the parallel execution of the following loop on a GPU:
@code{.cpp}
for(size_t i=0; i<std::distance(first, last); i++) {
*(output + i) = i ? op(*(first+i), *(output+i-1)) : *(first+i);
}
@endcode
*/
template <typename I, typename O, typename C>
cudaTask inclusive_scan(I first, I last, O output, C op);
/**
@brief updates a capture task to an inclusive scan task
This method is similar to cudaFlowCapturer::inclusive_scan
but operates on an existing task.
*/
template <typename I, typename O, typename C>
void inclusive_scan(cudaTask task, I first, I last, O output, C op);
/**
@brief similar to cudaFlowCapturer::inclusive_scan
but excludes the first value
*/
template <typename I, typename O, typename C>
cudaTask exclusive_scan(I first, I last, O output, C op);
/**
@brief updates a capture task to an exclusive scan task
This method is similar to cudaFlowCapturer::exclusive_scan
but operates on an existing task.
*/
template <typename I, typename O, typename C>
void exclusive_scan(cudaTask task, I first, I last, O output, C op);
/**
@brief captures kernels that perform parallel inclusive scan
over a range of transformed items
@tparam I input iterator type
@tparam O output iterator type
@tparam B binary operator type
@tparam U unary operator type
@param first iterator to the beginning
@param last iterator to the end
@param output iterator to the beginning of the output
@param bop binary operator
@param uop unary operator
@return a tf::cudaTask handle
This method is equivalent to the parallel execution of the following loop
on a GPU:
@code{.cpp}
for(size_t i=0; i<std::distance(first, last); i++) {
*(output + i) = i ? op(uop(*(first+i)), *(output+i-1)) : uop(*(first+i));
}
@endcode
*/
template <typename I, typename O, typename B, typename U>
cudaTask transform_inclusive_scan(I first, I last, O output, B bop, U uop);
/**
@brief updates a capture task to a transform-inclusive scan task
This method is similar to cudaFlowCapturer::transform_inclusive_scan
but operates on an existing task.
*/
template <typename I, typename O, typename B, typename U>
void transform_inclusive_scan(
cudaTask task, I first, I last, O output, B bop, U uop
);
/**
@brief similar to cudaFlowCapturer::transform_inclusive_scan but
excludes the first value
*/
template <typename I, typename O, typename B, typename U>
cudaTask transform_exclusive_scan(I first, I last, O output, B bop, U uop);
/**
@brief updates a capture task to a transform-exclusive scan task
This method is similar to cudaFlowCapturer::transform_exclusive_scan
but operates on an existing task.
*/
template <typename I, typename O, typename B, typename U>
void transform_exclusive_scan(
cudaTask task, I first, I last, O output, B bop, U uop
);
/**
@brief captures kernels that perform parallel merge on two sorted arrays
@tparam A iterator type of the first input array
@tparam B iterator type of the second input array
@tparam C iterator type of the output array
@tparam Comp comparator type
@param a_first iterator to the beginning of the first input array
@param a_last iterator to the end of the first input array
@param b_first iterator to the beginning of the second input array
@param b_last iterator to the end of the second input array
@param c_first iterator to the beginning of the output array
@param comp binary comparator
@return a tf::cudaTask handle
Merges two sorted ranges <tt>[a_first, a_last)</tt> and
<tt>[b_first, b_last)</tt> into one sorted range beginning at @c c_first.
A sequence is said to be sorted with respect to a comparator @c comp
if for any iterator it pointing to the sequence and
any non-negative integer @c n such that <tt>it + n</tt> is a valid iterator
pointing to an element of the sequence, <tt>comp(*(it + n), *it)</tt>
evaluates to @c false.
*/
template <typename A, typename B, typename C, typename Comp>
cudaTask merge(A a_first, A a_last, B b_first, B b_last, C c_first, Comp comp);
/**
@brief updates a capture task to a merge task
This method is similar to cudaFlowCapturer::merge but operates
on an existing task.
*/
template <typename A, typename B, typename C, typename Comp>
void merge(
cudaTask task, A a_first, A a_last, B b_first, B b_last, C c_first, Comp comp
);
/**
@brief captures kernels that perform parallel key-value merge
@tparam a_keys_it first key iterator type
@tparam a_vals_it first value iterator type
@tparam b_keys_it second key iterator type
@tparam b_vals_it second value iterator type
@tparam c_keys_it output key iterator type
@tparam c_vals_it output value iterator type
@tparam C comparator type
@param a_keys_first iterator to the beginning of the first key range
@param a_keys_last iterator to the end of the first key range
@param a_vals_first iterator to the beginning of the first value range
@param b_keys_first iterator to the beginning of the second key range
@param b_keys_last iterator to the end of the second key range
@param b_vals_first iterator to the beginning of the second value range
@param c_keys_first iterator to the beginning of the output key range
@param c_vals_first iterator to the beginning of the output value range
@param comp comparator
Performs a key-value merge that copies elements from
<tt>[a_keys_first, a_keys_last)</tt> and <tt>[b_keys_first, b_keys_last)</tt>
into a single range, <tt>[c_keys_first, c_keys_last + (a_keys_last - a_keys_first) + (b_keys_last - b_keys_first))</tt>
such that the resulting range is in ascending key order.
At the same time, the merge copies elements from the two associated ranges
<tt>[a_vals_first + (a_keys_last - a_keys_first))</tt> and
<tt>[b_vals_first + (b_keys_last - b_keys_first))</tt> into a single range,
<tt>[c_vals_first, c_vals_first + (a_keys_last - a_keys_first) + (b_keys_last - b_keys_first))</tt>
such that the resulting range is in ascending order
implied by each input element's associated key.
For example, assume:
+ @c a_keys = <tt>{8, 1}</tt>
+ @c a_vals = <tt>{1, 2}</tt>
+ @c b_keys = <tt>{3, 7}</tt>
+ @c b_vals = <tt>{3, 4}</tt>
After the merge, we have:
+ @c c_keys = <tt>{1, 3, 7, 8}</tt>
+ @c c_vals = <tt>{2, 3, 4, 1}</tt>
*/
template<
typename a_keys_it, typename a_vals_it,
typename b_keys_it, typename b_vals_it,
typename c_keys_it, typename c_vals_it,
typename C
>
cudaTask merge_by_key(
a_keys_it a_keys_first, a_keys_it a_keys_last, a_vals_it a_vals_first,
b_keys_it b_keys_first, b_keys_it b_keys_last, b_vals_it b_vals_first,
c_keys_it c_keys_first, c_vals_it c_vals_first, C comp
);
/**
@brief updates a capture task to a key-value merge task
This method is similar to tf::cudaFlowCapturer::merge_by_key but operates
on an existing task.
*/
template<
typename a_keys_it, typename a_vals_it,
typename b_keys_it, typename b_vals_it,
typename c_keys_it, typename c_vals_it,
typename C
>
void merge_by_key(
cudaTask task,
a_keys_it a_keys_first, a_keys_it a_keys_last, a_vals_it a_vals_first,
b_keys_it b_keys_first, b_keys_it b_keys_last, b_vals_it b_vals_first,
c_keys_it c_keys_first, c_vals_it c_vals_first, C comp
);
/**
@brief captures kernels that sort the given array
@tparam I iterator type of the first input array
@tparam C comparator type
@param first iterator to the beginning of the input array
@param last iterator to the end of the input array
@param comp binary comparator
@return a tf::cudaTask handle
Sorts elements in the range <tt>[first, last)</tt>
with the given comparator.
*/
template <typename I, typename C>
cudaTask sort(I first, I last, C comp);
/**
@brief updates a capture task to a sort task
This method is similar to cudaFlowCapturer::sort but operates on
an existing task.
*/
template <typename I, typename C>
void sort(cudaTask task, I first, I last, C comp);
/**
@brief captures kernels that sort the given array
@tparam K_it iterator type of the key
@tparam V_it iterator type of the value
@tparam C comparator type
@param k_first iterator to the beginning of the key array
@param k_last iterator to the end of the key array
@param v_first iterator to the beginning of the value array
@param comp binary comparator
@return a tf::cudaTask handle
Sorts key-value elements in <tt>[k_first, k_last)</tt> and
<tt>[v_first, v_first + (k_last - k_first))</tt> into ascending key order
using the given comparator @c comp.
If @c i and @c j are any two valid iterators in <tt>[k_first, k_last)</tt>
such that @c i precedes @c j, and @c p and @c q are iterators in
<tt>[v_first, v_first + (k_last - k_first))</tt> corresponding to
@c i and @c j respectively, then <tt>comp(*j, *i)</tt> evaluates to @c false.
For example, assume:
+ @c keys are <tt>{1, 4, 2, 8, 5, 7}</tt>
+ @c values are <tt>{'a', 'b', 'c', 'd', 'e', 'f'}</tt>
After sort:
+ @c keys are <tt>{1, 2, 4, 5, 7, 8}</tt>
+ @c values are <tt>{'a', 'c', 'b', 'e', 'f', 'd'}</tt>
*/
template <typename K_it, typename V_it, typename C>
cudaTask sort_by_key(K_it k_first, K_it k_last, V_it v_first, C comp);
/**
@brief updates a capture task to a key-value sort task
This method is similar to tf::cudaFlowCapturer::sort_by_key
but operates on an existing task.
*/
template <typename K_it, typename V_it, typename C>
void sort_by_key(
cudaTask task, K_it k_first, K_it k_last, V_it v_first, C comp
);
/**
@brief creates a task to find the index of the first element in a range
@tparam I input iterator type
@tparam U unary operator type
@param first iterator to the beginning of the range
@param last iterator to the end of the range
@param idx pointer to the index of the found element
@param op unary operator which returns @c true for the required element
Finds the index @c idx of the first element in the range
<tt>[first, last)</tt> such that <tt>op(*(first+idx))</tt> is true.
This is equivalent to the parallel execution of the following loop:
@code{.cpp}
unsigned idx = 0;
for(; first != last; ++first, ++idx) {
if (p(*first)) {
return idx;
}
}
return idx;
@endcode
*/
template <typename I, typename U>
cudaTask find_if(I first, I last, unsigned* idx, U op);
/**
@brief updates the parameters of a find-if task
This method is similar to tf::cudaFlowCapturer::find_if but operates
on an existing task.
*/
template <typename I, typename U>
void find_if(cudaTask task, I first, I last, unsigned* idx, U op);
/**
@brief finds the index of the minimum element in a range
@tparam I input iterator type
@tparam O comparator type
@param first iterator to the beginning of the range
@param last iterator to the end of the range
@param idx solution index of the minimum element
@param op comparison function object
The function launches kernels asynchronously to find
the smallest element in the range <tt>[first, last)</tt>
using the given comparator @c op.
The function is equivalent to a parallel execution of the following loop:
@code{.cpp}
if(first == last) {
return 0;
}
auto smallest = first;
for (++first; first != last; ++first) {
if (op(*first, *smallest)) {
smallest = first;
}
}
return std::distance(first, smallest);
@endcode
*/
template <typename I, typename O>
cudaTask min_element(I first, I last, unsigned* idx, O op);
/**
@brief updates the parameters of a min-element task
This method is similar to cudaFlowCapturer::min_element but operates
on an existing task.
*/
template <typename I, typename O>
void min_element(cudaTask task, I first, I last, unsigned* idx, O op);
/**
@brief finds the index of the maximum element in a range
@tparam I input iterator type
@tparam O comparator type
@param first iterator to the beginning of the range
@param last iterator to the end of the range
@param idx solution index of the maximum element
@param op comparison function object
The function launches kernels asynchronously to find
the largest element in the range <tt>[first, last)</tt>
using the given comparator @c op.
The function is equivalent to a parallel execution of the following loop:
@code{.cpp}
if(first == last) {
return 0;
}
auto largest = first;
for (++first; first != last; ++first) {
if (op(*largest, *first)) {
largest = first;
}
}
return std::distance(first, largest);
@endcode
*/
template <typename I, typename O>
cudaTask max_element(I first, I last, unsigned* idx, O op);
/**
@brief updates the parameters of a max-element task
This method is similar to cudaFlowCapturer::max_element but operates
on an existing task.
*/
template <typename I, typename O>
void max_element(cudaTask task, I first, I last, unsigned* idx, O op);
// ------------------------------------------------------------------------
// offload methods
// ------------------------------------------------------------------------
/**
@brief offloads the captured %cudaFlow onto a GPU and repeatedly runs it until
the predicate becomes true
@tparam P predicate type (a binary callable)
@param predicate a binary predicate (returns @c true for stop)
Immediately offloads the %cudaFlow captured so far onto a GPU and
repeatedly runs it until the predicate returns @c true.
By default, if users do not offload the %cudaFlow capturer,
the executor will offload it once.
*/
template <typename P>
void offload_until(P&& predicate);
/**
@brief offloads the captured %cudaFlow and executes it by the given times
@param n number of executions
*/
void offload_n(size_t n);
/**
@brief offloads the captured %cudaFlow and executes it once
*/
void offload();
private:
handle_t _handle;
cudaGraph& _graph;
Optimizer _optimizer;
cudaGraphExec _exec {nullptr};
cudaFlowCapturer(cudaGraph&, Executor& executor);
cudaFlowCapturer(cudaGraph&);
cudaGraph_t _capture();
};
// constructs a cudaFlow capturer from a taskflow
inline cudaFlowCapturer::cudaFlowCapturer(cudaGraph& g) :
_handle {std::in_place_type_t<Proxy>{}},
_graph {g} {
}
// constructs a cudaFlow capturer from a taskflow
inline cudaFlowCapturer::cudaFlowCapturer(cudaGraph& g, Executor& e) :
_handle {std::in_place_type_t<Internal>{}, e},
_graph {g} {
}
// constructs a standalone cudaFlow capturer
inline cudaFlowCapturer::cudaFlowCapturer() :
_handle {std::in_place_type_t<External>{}},
_graph {std::get_if<External>(&_handle)->graph} {
}
inline cudaFlowCapturer::~cudaFlowCapturer() {
}
// Function: empty
inline bool cudaFlowCapturer::empty() const {
return _graph.empty();
}
// Function: num_tasks
inline size_t cudaFlowCapturer::num_tasks() const {
return _graph._nodes.size();
}
// Procedure: clear
inline void cudaFlowCapturer::clear() {
_exec.clear();
_graph._nodes.clear();
}
// Procedure: dump
inline void cudaFlowCapturer::dump(std::ostream& os) const {
_graph.dump(os, nullptr, "");
}
// Function: capture
template <typename C, std::enable_if_t<
std::is_invocable_r_v<void, C, cudaStream_t>, void>*
>
cudaTask cudaFlowCapturer::on(C&& callable) {
auto node = _graph.emplace_back(_graph,
std::in_place_type_t<cudaNode::Capture>{}, std::forward<C>(callable)
);
return cudaTask(node);
}
// Function: noop
inline cudaTask cudaFlowCapturer::noop() {
return on([](cudaStream_t){});
}
// Function: noop
inline void cudaFlowCapturer::noop(cudaTask task) {
on(task, [](cudaStream_t){});
}
// Function: memcpy
inline cudaTask cudaFlowCapturer::memcpy(
void* dst, const void* src, size_t count
) {
return on([dst, src, count] (cudaStream_t stream) mutable {
TF_CHECK_CUDA(
cudaMemcpyAsync(dst, src, count, cudaMemcpyDefault, stream),
"failed to capture memcpy"
);
});
}
// Function: copy
template <typename T, std::enable_if_t<!std::is_same_v<T, void>, void>*>
cudaTask cudaFlowCapturer::copy(T* tgt, const T* src, size_t num) {
return on([tgt, src, num] (cudaStream_t stream) mutable {
TF_CHECK_CUDA(
cudaMemcpyAsync(tgt, src, sizeof(T)*num, cudaMemcpyDefault, stream),
"failed to capture copy"
);
});
}
// Function: memset
inline cudaTask cudaFlowCapturer::memset(void* ptr, int v, size_t n) {
return on([ptr, v, n] (cudaStream_t stream) mutable {
TF_CHECK_CUDA(
cudaMemsetAsync(ptr, v, n, stream), "failed to capture memset"
);
});
}
// Function: kernel
template <typename F, typename... ArgsT>
cudaTask cudaFlowCapturer::kernel(
dim3 g, dim3 b, size_t s, F f, ArgsT&&... args
) {
return on([g, b, s, f, args...] (cudaStream_t stream) mutable {
f<<<g, b, s, stream>>>(args...);
});
}
// Function: _capture
inline cudaGraph_t cudaFlowCapturer::_capture() {
return std::visit(
[this](auto&& opt){ return opt._optimize(_graph); }, _optimizer
);
}
// Procedure: offload_until
template <typename P>
void cudaFlowCapturer::offload_until(P&& predicate) {
// If the topology got changed, we need to destroy the executable
// and create a new one
if(_graph._state & cudaGraph::CHANGED) {
// TODO: store the native graph?
cudaGraphNative g(_capture());
_exec.instantiate(g);
}
// if the graph is just updated (i.e., topology does not change),
// we can skip part of the optimization and just update the executable
// with the new captured graph
else if(_graph._state & cudaGraph::UPDATED) {
// TODO: skip part of the optimization (e.g., levelization)
cudaGraphNative g(_capture());
if(_exec.update(g) != cudaGraphExecUpdateSuccess) {
_exec.instantiate(g);
}
// TODO: store the native graph?
}
// offload the executable
if(_exec) {
cudaStream s;
while(!predicate()) {
_exec.launch(s);
s.synchronize();
}
}
_graph._state = cudaGraph::OFFLOADED;
}
// Procedure: offload_n
inline void cudaFlowCapturer::offload_n(size_t n) {
offload_until([repeat=n] () mutable { return repeat-- == 0; });
}
// Procedure: offload
inline void cudaFlowCapturer::offload() {
offload_until([repeat=1] () mutable { return repeat-- == 0; });
}
// Function: on
template <typename C, std::enable_if_t<
std::is_invocable_r_v<void, C, cudaStream_t>, void>*
>
void cudaFlowCapturer::on(cudaTask task, C&& callable) {
if(task.type() != cudaTaskType::CAPTURE) {
TF_THROW("invalid cudaTask type (must be CAPTURE)");
}
_graph._state |= cudaGraph::UPDATED;
std::get_if<cudaNode::Capture>(&task._node->_handle)->work =
std::forward<C>(callable);
}
// Function: memcpy
inline void cudaFlowCapturer::memcpy(
cudaTask task, void* dst, const void* src, size_t count
) {
on(task, [dst, src, count](cudaStream_t stream) mutable {
TF_CHECK_CUDA(
cudaMemcpyAsync(dst, src, count, cudaMemcpyDefault, stream),
"failed to capture memcpy"
);
});
}
// Function: copy
template <typename T,
std::enable_if_t<!std::is_same_v<T, void>, void>*
>
void cudaFlowCapturer::copy(
cudaTask task, T* tgt, const T* src, size_t num
) {
on(task, [tgt, src, num] (cudaStream_t stream) mutable {
TF_CHECK_CUDA(
cudaMemcpyAsync(tgt, src, sizeof(T)*num, cudaMemcpyDefault, stream),
"failed to capture copy"
);
});
}
// Function: memset
inline void cudaFlowCapturer::memset(
cudaTask task, void* ptr, int v, size_t n
) {
on(task, [ptr, v, n] (cudaStream_t stream) mutable {
TF_CHECK_CUDA(
cudaMemsetAsync(ptr, v, n, stream), "failed to capture memset"
);
});
}
// Function: kernel
template <typename F, typename... ArgsT>
void cudaFlowCapturer::kernel(
cudaTask task, dim3 g, dim3 b, size_t s, F f, ArgsT&&... args
) {
on(task, [g, b, s, f, args...] (cudaStream_t stream) mutable {
f<<<g, b, s, stream>>>(args...);
});
}
// Function: make_optimizer
template <typename OPT, typename ...ArgsT>
OPT& cudaFlowCapturer::make_optimizer(ArgsT&&... args) {
return _optimizer.emplace<OPT>(std::forward<ArgsT>(args)...);
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/cuda/algorithm/sort.hpp | #pragma once
#include "merge.hpp"
/**
@file taskflow/cuda/algorithm/sort.hpp
@brief CUDA sort algorithm include file
*/
namespace tf::detail {
// ----------------------------------------------------------------------------
// odd-even sort in register
// ----------------------------------------------------------------------------
/**
@private
@brief counts the number of leading zeros starting from the most significant bit
*/
constexpr int cuda_clz(int x) {
for(int i = 31; i >= 0; --i) {
if((1<< i) & x) {
return 31 - i;
}
}
return 32;
}
/**
@private
@brief finds log2(x) and optionally round up to the next integer logarithm.
*/
constexpr int cuda_find_log2(int x, bool round_up = false) {
int a = 31 - cuda_clz(x);
if(round_up) {
a += !is_pow2(x);
}
return a;
}
/** @private */
template<typename T, unsigned vt, typename C>
__device__ auto cuda_odd_even_sort(
cudaArray<T, vt> x, C comp, int flags = 0
) {
cuda_iterate<vt>([&](auto I) {
#pragma unroll
for(auto i = 1 & I; i < vt - 1; i += 2) {
if((0 == ((2<< i) & flags)) && comp(x[i + 1], x[i]))
cuda_swap(x[i], x[i + 1]);
}
});
return x;
}
/** @private */
template<typename K, typename V, unsigned vt, typename C>
__device__ auto cuda_odd_even_sort(
cudaKVArray<K, V, vt> x, C comp, int flags = 0
) {
cuda_iterate<vt>([&](auto I) {
#pragma unroll
for(auto i = 1 & I; i < vt - 1; i += 2) {
if((0 == ((2<< i) & flags)) && comp(x.keys[i + 1], x.keys[i])) {
cuda_swap(x.keys[i], x.keys[i + 1]);
cuda_swap(x.vals[i], x.vals[i + 1]);
}
}
});
return x;
}
// ----------------------------------------------------------------------------
// range check
// ----------------------------------------------------------------------------
/** @private */
__device__ inline int cuda_out_of_range_flags(int first, int vt, int count) {
int out_of_range = min(vt, first + vt - count);
int head_flags = 0;
if(out_of_range > 0) {
const int mask = (1<< vt) - 1;
head_flags = mask & (~mask>> out_of_range);
}
return head_flags;
}
/** @private */
__device__ inline auto cuda_compute_merge_sort_frame(
unsigned partition, unsigned coop, unsigned spacing
) {
unsigned size = spacing * (coop / 2);
unsigned start = ~(coop - 1) & partition;
unsigned a_begin = spacing * start;
unsigned b_begin = spacing * start + size;
return cudaMergeRange {
a_begin,
a_begin + size,
b_begin,
b_begin + size
};
}
/** @private */
__device__ inline auto cuda_compute_merge_sort_range(
unsigned count, unsigned partition, unsigned coop, unsigned spacing
) {
auto frame = cuda_compute_merge_sort_frame(partition, coop, spacing);
return cudaMergeRange {
frame.a_begin,
min(count, frame.a_end),
min(count, frame.b_begin),
min(count, frame.b_end)
};
}
/** @private */
__device__ inline auto cuda_compute_merge_sort_range(
unsigned count, unsigned partition, unsigned coop, unsigned spacing,
unsigned mp0, unsigned mp1
) {
auto range = cuda_compute_merge_sort_range(count, partition, coop, spacing);
// Locate the diagonal from the start of the A sublist.
unsigned diag = spacing * partition - range.a_begin;
// The end partition of the last cta for each merge operation is computed
// and stored as the begin partition for the subsequent merge. i.e. it is
// the same partition but in the wrong coordinate system, so its 0 when it
// should be listSize. Correct that by checking if this is the last cta
// in this merge operation.
if(coop - 1 != ((coop - 1) & partition)) {
range.a_end = range.a_begin + mp1;
range.b_end = min(count, range.b_begin + diag + spacing - mp1);
}
range.a_begin = range.a_begin + mp0;
range.b_begin = min(count, range.b_begin + diag - mp0);
return range;
}
/** @private */
template<unsigned nt, unsigned vt, typename K, typename V>
struct cudaBlockSort {
static constexpr bool has_values = !std::is_same<V, cudaEmpty>::value;
static constexpr unsigned num_passes = log2(nt);
/** @private */
union Storage {
K keys[nt * vt + 1];
V vals[nt * vt];
};
static_assert(is_pow2(nt), "cudaBlockSort requires pow2 number of threads");
template<typename C>
__device__ auto merge_pass(
cudaKVArray<K, V, vt> x,
unsigned tid, unsigned count, unsigned pass,
C comp, Storage& storage
) const {
// Divide the CTA's keys into lists.
unsigned coop = 2 << pass;
auto range = cuda_compute_merge_sort_range(count, tid, coop, vt);
unsigned diag = vt * tid - range.a_begin;
// Store the keys into shared memory for searching.
cuda_reg_to_shared_thread<nt, vt>(x.keys, tid, storage.keys);
// Search for the merge path for this thread within its list.
auto mp = cuda_merge_path<cudaMergeBoundType::LOWER>(
storage.keys, range, diag, comp
);
// Run a serial merge and return.
auto merge = cuda_serial_merge<cudaMergeBoundType::LOWER, vt>(
storage.keys, range.partition(mp, diag), comp
);
x.keys = merge.keys;
if(has_values) {
// Reorder values through shared memory.
cuda_reg_to_shared_thread<nt, vt>(x.vals, tid, storage.vals);
x.vals = cuda_shared_gather<nt, vt>(storage.vals, merge.indices);
}
return x;
}
template<typename C>
__device__ auto block_sort(cudaKVArray<K, V, vt> x,
unsigned tid, unsigned count, C comp, Storage& storage
) const {
// Sort the inputs within each thread. If any threads have fewer than
// vt items, use the segmented sort network to prevent out-of-range
// elements from contaminating the sort.
if(count < nt * vt) {
auto head_flags = cuda_out_of_range_flags(vt * tid, vt, count);
x = cuda_odd_even_sort(x, comp, head_flags);
} else {
x = cuda_odd_even_sort(x, comp);
}
// Merge threads starting with a pair until all values are merged.
for(unsigned pass = 0; pass < num_passes; ++pass) {
x = merge_pass(x, tid, count, pass, comp, storage);
}
return x;
}
};
/** @private */
template<typename P, typename K, typename C>
void cuda_merge_sort_partitions(
P&& p, K keys, unsigned count,
unsigned coop, unsigned spacing, C comp, unsigned* buf
) {
// bufer size is num_partitions + 1
unsigned num_partitions = (count + spacing - 1) / spacing + 1;
const unsigned nt = 128;
const unsigned vt = 1;
const unsigned nv = nt * vt;
unsigned B = (num_partitions + nv - 1) / nv; // nt = 128, vt = 1
cuda_kernel<<<B, nt, 0, p.stream()>>>([=] __device__ (auto tid, auto bid) {
auto range = cuda_get_tile(bid, nt * vt, num_partitions);
cuda_strided_iterate<nt, vt>([=](auto, auto j) {
auto index = j + range.begin;
auto range = cuda_compute_merge_sort_range(count, index, coop, spacing);
auto diag = min(spacing * index, count) - range.a_begin;
buf[index] = cuda_merge_path<cudaMergeBoundType::LOWER>(
keys + range.a_begin, range.a_count(),
keys + range.b_begin, range.b_count(),
diag, comp
);
}, tid, range.count());
});
}
/** @private */
template<typename P, typename K_it, typename V_it, typename C>
void merge_sort_loop(
P&& p, K_it keys_input, V_it vals_input, unsigned count, C comp, void* buf
) {
using K = typename std::iterator_traits<K_it>::value_type;
using V = typename std::iterator_traits<V_it>::value_type;
using E = std::decay_t<P>;
const bool has_values = !std::is_same<V, cudaEmpty>::value;
unsigned B = (count + E::nv - 1) / E::nv;
unsigned R = cuda_find_log2(B, true);
K* keys_output {nullptr};
V* vals_output {nullptr};
unsigned *mp_data {nullptr};
if(R) {
keys_output = (K*)(buf);
if(has_values) {
vals_output = (V*)(keys_output + count);
mp_data = (unsigned*)(vals_output + count);
}
else {
mp_data = (unsigned*)(keys_output + count);
}
}
//cudaDeviceVector<K> keys_temp(R ? count : 0);
//auto keys_output = keys_temp.data();
////std::cout << "keys_output = " << keys_temp.size()*sizeof(K) << std::endl;
//cudaDeviceVector<V> vals_temp((has_values && R) ? count : 0);
//auto vals_output = vals_temp.data();
//std::cout << "vals_output = " << vals_temp.size()*sizeof(V) << std::endl;
auto keys_blocksort = (1 & R) ? keys_output : keys_input;
auto vals_blocksort = (1 & R) ? vals_output : vals_input;
//printf("B=%u, R=%u\n", B, R);
cuda_kernel<<<B, E::nt, 0, p.stream()>>>([=] __device__ (auto tid, auto bid) {
using sort_t = cudaBlockSort<E::nt, E::vt, K, V>;
__shared__ union {
typename sort_t::Storage sort;
K keys[E::nv];
V vals[E::nv];
} shared;
auto tile = cuda_get_tile(bid, E::nv, count);
// Load the keys and values.
cudaKVArray<K, V, E::vt> unsorted;
unsorted.keys = cuda_mem_to_reg_thread<E::nt, E::vt>(
keys_input + tile.begin, tid, tile.count(), shared.keys
);
if(has_values) {
unsorted.vals = cuda_mem_to_reg_thread<E::nt, E::vt>(
vals_input + tile.begin, tid, tile.count(), shared.vals
);
}
// Blocksort.
auto sorted = sort_t().block_sort(unsorted, tid, tile.count(), comp, shared.sort);
// Store the keys and values.
cuda_reg_to_mem_thread<E::nt, E::vt>(
sorted.keys, tid, tile.count(), keys_blocksort + tile.begin, shared.keys
);
if(has_values) {
cuda_reg_to_mem_thread<E::nt, E::vt>(
sorted.vals, tid, tile.count(), vals_blocksort + tile.begin, shared.vals
);
}
});
if(R == 0) {
return;
}
// merge passes
if(1 & R) {
std::swap(keys_input, keys_output);
std::swap(vals_input, vals_output);
}
// number of partitions
//unsigned num_partitions = B + 1;
//cudaDeviceVector<unsigned> mem(num_partitions);
//auto mp_data = mem.data();
//std::cout << "num_partitions = " << (B+1)*sizeof(unsigned) << std::endl;
for(unsigned pass = 0; pass < R; ++pass) {
unsigned coop = 2 << pass;
cuda_merge_sort_partitions(
p, keys_input, count, coop, E::nv, comp, mp_data
);
cuda_kernel<<<B, E::nt, 0, p.stream()>>>([=]__device__(auto tid, auto bid) {
__shared__ union {
K keys[E::nv + 1];
unsigned indices[E::nv];
} shared;
auto tile = cuda_get_tile(bid, E::nv, count);
// Load the range for this CTA and merge the values into register.
auto range = cuda_compute_merge_sort_range(
count, bid, coop, E::nv, mp_data[bid + 0], mp_data[bid + 1]
);
auto merge = block_merge_from_mem<cudaMergeBoundType::LOWER, E::nt, E::vt>(
keys_input, keys_input, range, tid, comp, shared.keys
);
// Store merged values back out.
cuda_reg_to_mem_thread<E::nt>(
merge.keys, tid, tile.count(), keys_output + tile.begin, shared.keys
);
if(has_values) {
// Transpose the indices from thread order to strided order.
auto indices = cuda_reg_thread_to_strided<E::nt>(
merge.indices, tid, shared.indices
);
// Gather the input values and merge into the output values.
cuda_transfer_two_streams_strided<E::nt>(
vals_input + range.a_begin, range.a_count(),
vals_input + range.b_begin, range.b_count(),
indices, tid, vals_output + tile.begin
);
}
});
std::swap(keys_input, keys_output);
std::swap(vals_input, vals_output);
}
}
} // end of namespace tf::detail ---------------------------------------------
namespace tf {
/**
@brief queries the buffer size in bytes needed to call sort kernels
for the given number of elements
@tparam P execution policy type
@tparam K key type
@tparam V value type (default tf::cudaEmpty)
@param count number of keys/values to sort
The function is used to allocate a buffer for calling tf::cuda_sort.
*/
template <typename P, typename K, typename V = cudaEmpty>
unsigned cuda_sort_buffer_size(unsigned count) {
using E = std::decay_t<P>;
const bool has_values = !std::is_same<V, cudaEmpty>::value;
unsigned B = (count + E::nv - 1) / E::nv;
unsigned R = detail::cuda_find_log2(B, true);
return R ? (count * sizeof(K) + (has_values ? count*sizeof(V) : 0) +
(B+1)*sizeof(unsigned)) : 0;
}
// ----------------------------------------------------------------------------
// key-value sort
// ----------------------------------------------------------------------------
/**
@brief performs asynchronous key-value sort on a range of items
@tparam P execution policy type
@tparam K_it key iterator type
@tparam V_it value iterator type
@tparam C comparator type
@param p execution policy
@param k_first iterator to the beginning of the key range
@param k_last iterator to the end of the key range
@param v_first iterator to the beginning of the value range
@param comp binary comparator
@param buf pointer to the temporary buffer
Sorts key-value elements in <tt>[k_first, k_last)</tt> and
<tt>[v_first, v_first + (k_last - k_first))</tt> into ascending key order
using the given comparator @c comp.
If @c i and @c j are any two valid iterators in <tt>[k_first, k_last)</tt>
such that @c i precedes @c j, and @c p and @c q are iterators in
<tt>[v_first, v_first + (k_last - k_first))</tt> corresponding to
@c i and @c j respectively, then <tt>comp(*j, *i)</tt> evaluates to @c false.
For example, assume:
+ @c keys are <tt>{1, 4, 2, 8, 5, 7}</tt>
+ @c values are <tt>{'a', 'b', 'c', 'd', 'e', 'f'}</tt>
After sort:
+ @c keys are <tt>{1, 2, 4, 5, 7, 8}</tt>
+ @c values are <tt>{'a', 'c', 'b', 'e', 'f', 'd'}</tt>
*/
template<typename P, typename K_it, typename V_it, typename C>
void cuda_sort_by_key(
P&& p, K_it k_first, K_it k_last, V_it v_first, C comp, void* buf
) {
unsigned N = std::distance(k_first, k_last);
if(N <= 1) {
return;
}
detail::merge_sort_loop(p, k_first, v_first, N, comp, buf);
}
// ----------------------------------------------------------------------------
// key sort
// ----------------------------------------------------------------------------
/**
@brief performs asynchronous key-only sort on a range of items
@tparam P execution policy type
@tparam K_it key iterator type
@tparam C comparator type
@param p execution policy
@param k_first iterator to the beginning of the key range
@param k_last iterator to the end of the key range
@param comp binary comparator
@param buf pointer to the temporary buffer
This method is equivalent to tf::cuda_sort_by_key without values.
*/
template<typename P, typename K_it, typename C>
void cuda_sort(P&& p, K_it k_first, K_it k_last, C comp, void* buf) {
cuda_sort_by_key(p, k_first, k_last, (cudaEmpty*)nullptr, comp, buf);
}
// ----------------------------------------------------------------------------
// cudaFlow
// ----------------------------------------------------------------------------
// Function: sort
template <typename I, typename C>
cudaTask cudaFlow::sort(I first, I last, C comp) {
return capture([=](cudaFlowCapturer& cap){
cap.make_optimizer<cudaLinearCapturing>();
cap.sort(first, last, comp);
});
}
// Function: sort
template <typename I, typename C>
void cudaFlow::sort(cudaTask task, I first, I last, C comp) {
capture(task, [=](cudaFlowCapturer& cap){
cap.make_optimizer<cudaLinearCapturing>();
cap.sort(first, last, comp);
});
}
// Function: sort_by_key
template <typename K_it, typename V_it, typename C>
cudaTask cudaFlow::sort_by_key(K_it k_first, K_it k_last, V_it v_first, C comp) {
return capture([=](cudaFlowCapturer& cap){
cap.make_optimizer<cudaLinearCapturing>();
cap.sort_by_key(k_first, k_last, v_first, comp);
});
}
// Function: sort_by_key
template <typename K_it, typename V_it, typename C>
void cudaFlow::sort_by_key(
cudaTask task, K_it k_first, K_it k_last, V_it v_first, C comp
) {
capture(task, [=](cudaFlowCapturer& cap){
cap.make_optimizer<cudaLinearCapturing>();
cap.sort_by_key(k_first, k_last, v_first, comp);
});
}
// ----------------------------------------------------------------------------
// cudaFlowCapturer
// ----------------------------------------------------------------------------
// Function: sort
template <typename I, typename C>
cudaTask cudaFlowCapturer::sort(I first, I last, C comp) {
using K = typename std::iterator_traits<I>::value_type;
auto bufsz = cuda_sort_buffer_size<cudaDefaultExecutionPolicy, K>(
std::distance(first, last)
);
return on([=, buf=MoC{cudaDeviceVector<std::byte>(bufsz)}]
(cudaStream_t stream) mutable {
cuda_sort(
cudaDefaultExecutionPolicy{stream}, first, last, comp, buf.get().data()
);
});
}
// Function: sort
template <typename I, typename C>
void cudaFlowCapturer::sort(cudaTask task, I first, I last, C comp) {
using K = typename std::iterator_traits<I>::value_type;
auto bufsz = cuda_sort_buffer_size<cudaDefaultExecutionPolicy, K>(
std::distance(first, last)
);
on(task, [=, buf=MoC{cudaDeviceVector<std::byte>(bufsz)}]
(cudaStream_t stream) mutable {
cuda_sort(
cudaDefaultExecutionPolicy{stream}, first, last, comp, buf.get().data()
);
});
}
// Function: sort_by_key
template <typename K_it, typename V_it, typename C>
cudaTask cudaFlowCapturer::sort_by_key(
K_it k_first, K_it k_last, V_it v_first, C comp
) {
using K = typename std::iterator_traits<K_it>::value_type;
using V = typename std::iterator_traits<V_it>::value_type;
auto bufsz = cuda_sort_buffer_size<cudaDefaultExecutionPolicy, K, V>(
std::distance(k_first, k_last)
);
return on([=, buf=MoC{cudaDeviceVector<std::byte>(bufsz)}]
(cudaStream_t stream) mutable {
cuda_sort_by_key(cudaDefaultExecutionPolicy{stream},
k_first, k_last, v_first, comp, buf.get().data()
);
});
}
// Function: sort_by_key
template <typename K_it, typename V_it, typename C>
void cudaFlowCapturer::sort_by_key(
cudaTask task, K_it k_first, K_it k_last, V_it v_first, C comp
) {
using K = typename std::iterator_traits<K_it>::value_type;
using V = typename std::iterator_traits<V_it>::value_type;
auto bufsz = cuda_sort_buffer_size<cudaDefaultExecutionPolicy, K, V>(
std::distance(k_first, k_last)
);
on(task, [=, buf=MoC{cudaDeviceVector<std::byte>(bufsz)}]
(cudaStream_t stream) mutable {
cuda_sort_by_key(cudaDefaultExecutionPolicy{stream},
k_first, k_last, v_first, comp, buf.get().data()
);
});
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/cuda/algorithm/for_each.hpp | #pragma once
#include "../cudaflow.hpp"
/**
@file taskflow/cuda/algorithm/for_each.hpp
@brief cuda parallel-iteration algorithms include file
*/
namespace tf {
namespace detail {
/** @private */
template <typename P, typename I, typename C>
void cuda_for_each_loop(P&& p, I first, unsigned count, C c) {
using E = std::decay_t<P>;
unsigned B = (count + E::nv - 1) / E::nv;
cuda_kernel<<<B, E::nt, 0, p.stream()>>>(
[=] __device__ (auto tid, auto bid) {
auto tile = cuda_get_tile(bid, E::nv, count);
cuda_strided_iterate<E::nt, E::vt>([=](auto, auto j) {
c(*(first + tile.begin + j));
}, tid, tile.count());
});
}
/** @private */
template <typename P, typename I, typename C>
void cuda_for_each_index_loop(
P&& p, I first, I inc, unsigned count, C c
) {
using E = std::decay_t<P>;
unsigned B = (count + E::nv - 1) / E::nv;
cuda_kernel<<<B, E::nt, 0, p.stream()>>>(
[=]__device__(auto tid, auto bid) {
auto tile = cuda_get_tile(bid, E::nv, count);
cuda_strided_iterate<E::nt, E::vt>([=]__device__(auto, auto j) {
c(first + inc*(tile.begin+j));
}, tid, tile.count());
});
}
} // end of namespace detail -------------------------------------------------
// ----------------------------------------------------------------------------
// cuda standard algorithms: single_task/for_each/for_each_index
// ----------------------------------------------------------------------------
/**
@brief runs a callable asynchronously using one kernel thread
@tparam P execution policy type
@tparam C closure type
@param p execution policy
@param c closure to run by one kernel thread
The function launches a single kernel thread to run the given callable
through the stream in the execution policy object.
*/
template <typename P, typename C>
void cuda_single_task(P&& p, C c) {
cuda_kernel<<<1, 1, 0, p.stream()>>>(
[=]__device__(auto, auto) mutable { c(); }
);
}
/**
@brief performs asynchronous parallel iterations over a range of items
@tparam P execution policy type
@tparam I input iterator type
@tparam C unary operator type
@param p execution policy object
@param first iterator to the beginning of the range
@param last iterator to the end of the range
@param c unary operator to apply to each dereferenced iterator
This function is equivalent to a parallel execution of the following loop
on a GPU:
@code{.cpp}
for(auto itr = first; itr != last; itr++) {
c(*itr);
}
@endcode
*/
template <typename P, typename I, typename C>
void cuda_for_each(P&& p, I first, I last, C c) {
unsigned count = std::distance(first, last);
if(count == 0) {
return;
}
detail::cuda_for_each_loop(p, first, count, c);
}
/**
@brief performs asynchronous parallel iterations over
an index-based range of items
@tparam P execution policy type
@tparam I input index type
@tparam C unary operator type
@param p execution policy object
@param first index to the beginning of the range
@param last index to the end of the range
@param inc step size between successive iterations
@param c unary operator to apply to each index
This function is equivalent to a parallel execution of
the following loop on a GPU:
@code{.cpp}
// step is positive [first, last)
for(auto i=first; i<last; i+=step) {
c(i);
}
// step is negative [first, last)
for(auto i=first; i>last; i+=step) {
c(i);
}
@endcode
*/
template <typename P, typename I, typename C>
void cuda_for_each_index(P&& p, I first, I last, I inc, C c) {
if(is_range_invalid(first, last, inc)) {
TF_THROW("invalid range [", first, ", ", last, ") with inc size ", inc);
}
unsigned count = distance(first, last, inc);
if(count == 0) {
return;
}
detail::cuda_for_each_index_loop(p, first, inc, count, c);
}
// ----------------------------------------------------------------------------
// single_task
// ----------------------------------------------------------------------------
/** @private */
template <typename C>
__global__ void cuda_single_task(C callable) {
callable();
}
// Function: single_task
template <typename C>
cudaTask cudaFlow::single_task(C c) {
return kernel(1, 1, 0, cuda_single_task<C>, c);
}
// Function: single_task
template <typename C>
void cudaFlow::single_task(cudaTask task, C c) {
return kernel(task, 1, 1, 0, cuda_single_task<C>, c);
}
// ----------------------------------------------------------------------------
// cudaFlow
// ----------------------------------------------------------------------------
// Function: for_each
template <typename I, typename C>
cudaTask cudaFlow::for_each(I first, I last, C c) {
return capture([=](cudaFlowCapturer& cap) mutable {
cap.make_optimizer<cudaLinearCapturing>();
cap.for_each(first, last, c);
});
}
// Function: for_each_index
template <typename I, typename C>
cudaTask cudaFlow::for_each_index(I first, I last, I inc, C c) {
return capture([=](cudaFlowCapturer& cap) mutable {
cap.make_optimizer<cudaLinearCapturing>();
cap.for_each_index(first, last, inc, c);
});
}
// Function: for_each
template <typename I, typename C>
void cudaFlow::for_each(cudaTask task, I first, I last, C c) {
capture(task, [=](cudaFlowCapturer& cap) mutable {
cap.make_optimizer<cudaLinearCapturing>();
cap.for_each(first, last, c);
});
}
// Function: for_each_index
template <typename I, typename C>
void cudaFlow::for_each_index(cudaTask task, I first, I last, I inc, C c) {
capture(task, [=](cudaFlowCapturer& cap) mutable {
cap.make_optimizer<cudaLinearCapturing>();
cap.for_each_index(first, last, inc, c);
});
}
// ----------------------------------------------------------------------------
// cudaFlowCapturer
// ----------------------------------------------------------------------------
// Function: for_each
template <typename I, typename C>
cudaTask cudaFlowCapturer::for_each(I first, I last, C c) {
return on([=](cudaStream_t stream) mutable {
cudaDefaultExecutionPolicy p(stream);
cuda_for_each(p, first, last, c);
});
}
// Function: for_each_index
template <typename I, typename C>
cudaTask cudaFlowCapturer::for_each_index(I beg, I end, I inc, C c) {
return on([=] (cudaStream_t stream) mutable {
cudaDefaultExecutionPolicy p(stream);
cuda_for_each_index(p, beg, end, inc, c);
});
}
// Function: for_each
template <typename I, typename C>
void cudaFlowCapturer::for_each(cudaTask task, I first, I last, C c) {
on(task, [=](cudaStream_t stream) mutable {
cudaDefaultExecutionPolicy p(stream);
cuda_for_each(p, first, last, c);
});
}
// Function: for_each_index
template <typename I, typename C>
void cudaFlowCapturer::for_each_index(
cudaTask task, I beg, I end, I inc, C c
) {
on(task, [=] (cudaStream_t stream) mutable {
cudaDefaultExecutionPolicy p(stream);
cuda_for_each_index(p, beg, end, inc, c);
});
}
// Function: single_task
template <typename C>
cudaTask cudaFlowCapturer::single_task(C callable) {
return on([=] (cudaStream_t stream) mutable {
cudaDefaultExecutionPolicy p(stream);
cuda_single_task(p, callable);
});
}
// Function: single_task
template <typename C>
void cudaFlowCapturer::single_task(cudaTask task, C callable) {
on(task, [=] (cudaStream_t stream) mutable {
cudaDefaultExecutionPolicy p(stream);
cuda_single_task(p, callable);
});
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/cuda/algorithm/transpose.hpp | #pragma once
#include "../cuda_error.hpp"
namespace tf {
// ----------------------------------------------------------------------------
// row-wise matrix transpose
// ----------------------------------------------------------------------------
//
template <typename T>
__global__ void cuda_transpose(
const T* d_in,
T* d_out,
size_t rows,
size_t cols
) {
__shared__ T tile[32][32];
size_t x = blockIdx.x * 32 + threadIdx.x;
size_t y = blockIdx.y * 32 + threadIdx.y;
for(size_t i = 0; i < 32; i += 8) {
if(x < cols && (y + i) < rows) {
tile[threadIdx.y + i][threadIdx.x] = d_in[(y + i) * cols + x];
}
}
__syncthreads();
x = blockIdx.y * 32 + threadIdx.x;
y = blockIdx.x * 32 + threadIdx.y;
for(size_t i = 0; i < 32; i += 8) {
if(x < rows && (y + i) < cols) {
d_out[(y + i) * rows + x] = tile[threadIdx.x][threadIdx.y + i];
}
}
}
} // end of namespace --------------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/cuda/algorithm/reduce.hpp | #pragma once
#include "../cudaflow.hpp"
/**
@file taskflow/cuda/algorithm/reduce.hpp
@brief cuda reduce algorithms include file
*/
namespace tf::detail {
// ----------------------------------------------------------------------------
// reduction helper functions
// ----------------------------------------------------------------------------
/** @private */
template<unsigned nt, typename T>
struct cudaBlockReduce {
static const unsigned group_size = std::min(nt, CUDA_WARP_SIZE);
static const unsigned num_passes = log2(group_size);
static const unsigned num_items = nt / group_size;
static_assert(
nt && (0 == nt % CUDA_WARP_SIZE),
"cudaBlockReduce requires num threads to be a multiple of warp_size (32)"
);
/** @private */
struct Storage {
T data[std::max(nt, 2 * group_size)];
};
template<typename op_t>
__device__ T operator()(unsigned, T, Storage&, unsigned, op_t, bool = true) const;
};
// function: reduce to be called from a block
template<unsigned nt, typename T>
template<typename op_t>
__device__ T cudaBlockReduce<nt, T>::operator ()(
unsigned tid, T x, Storage& storage, unsigned count, op_t op, bool ret
) const {
// Store your data into shared memory.
storage.data[tid] = x;
__syncthreads();
if(tid < group_size) {
// Each thread scans within its lane.
cuda_strided_iterate<group_size, num_items>([&](auto i, auto j) {
if(i > 0) {
x = op(x, storage.data[j]);
}
}, tid, count);
storage.data[tid] = x;
}
__syncthreads();
auto count2 = count < group_size ? count : group_size;
auto first = (1 & num_passes) ? group_size : 0;
if(tid < group_size) {
storage.data[first + tid] = x;
}
__syncthreads();
cuda_iterate<num_passes>([&](auto pass) {
if(tid < group_size) {
if(auto offset = 1 << pass; tid + offset < count2) {
x = op(x, storage.data[first + offset + tid]);
}
first = group_size - first;
storage.data[first + tid] = x;
}
__syncthreads();
});
if(ret) {
x = storage.data[0];
__syncthreads();
}
return x;
}
/** @private */
template <typename P, typename I, typename T, typename O>
void cuda_reduce_loop(
P&& p, I input, unsigned count, T* res, O op, void* ptr
) {
using U = typename std::iterator_traits<I>::value_type;
using E = std::decay_t<P>;
auto buf = static_cast<U*>(ptr);
auto B = (count + E::nv - 1) / E::nv;
cuda_kernel<<<B, E::nt, 0, p.stream()>>>([=] __device__ (auto tid, auto bid) {
__shared__ typename cudaBlockReduce<E::nt, U>::Storage shm;
auto tile = cuda_get_tile(bid, E::nv, count);
auto x = cuda_mem_to_reg_strided<E::nt, E::vt>(
input + tile.begin, tid, tile.count()
);
// reduce multiple values per thread into a scalar.
U s;
cuda_strided_iterate<E::nt, E::vt>(
[&] (auto i, auto) { s = i ? op(s, x[i]) : x[0]; }, tid, tile.count()
);
// reduce to a scalar per block.
s = cudaBlockReduce<E::nt, U>()(
tid, s, shm, (tile.count() < E::nt ? tile.count() : E::nt), op, false
);
if(!tid) {
(1 == B) ? *res = op(*res, s) : buf[bid] = s;
}
});
if(B > 1) {
cuda_reduce_loop(p, buf, B, res, op, buf+B);
}
}
/** @private */
template <typename P, typename I, typename T, typename O>
void cuda_uninitialized_reduce_loop(
P&& p, I input, unsigned count, T* res, O op, void* ptr
) {
using U = typename std::iterator_traits<I>::value_type;
using E = std::decay_t<P>;
auto buf = static_cast<U*>(ptr);
auto B = (count + E::nv - 1) / E::nv;
cuda_kernel<<<B, E::nt, 0, p.stream()>>>([=] __device__ (auto tid, auto bid) {
__shared__ typename cudaBlockReduce<E::nt, U>::Storage shm;
auto tile = cuda_get_tile(bid, E::nv, count);
auto x = cuda_mem_to_reg_strided<E::nt, E::vt>(
input + tile.begin, tid, tile.count()
);
// reduce multiple values per thread into a scalar.
U s;
cuda_strided_iterate<E::nt, E::vt>(
[&] (auto i, auto) { s = i ? op(s, x[i]) : x[0]; }, tid, tile.count()
);
// reduce to a scalar per block.
s = cudaBlockReduce<E::nt, U>()(
tid, s, shm, (tile.count() < E::nt ? tile.count() : E::nt), op, false
);
if(!tid) {
(1 == B) ? *res = s : buf[bid] = s;
}
});
if(B > 1) {
cuda_uninitialized_reduce_loop(p, buf, B, res, op, buf+B);
}
}
} // namespace tf::detail ----------------------------------------------------
namespace tf {
/**
@brief queries the buffer size in bytes needed to call reduce kernels
@tparam P execution policy type
@tparam T value type
@param count number of elements to reduce
The function is used to allocate a buffer for calling tf::cuda_reduce,
tf::cuda_uninitialized_reduce, tf::cuda_transform_reduce, and
tf::cuda_transform_uninitialized_reduce.
*/
template <typename P, typename T>
unsigned cuda_reduce_buffer_size(unsigned count) {
using E = std::decay_t<P>;
unsigned B = (count + E::nv - 1) / E::nv;
unsigned n = 0;
for(auto b=B; b>1; n += (b=(b+E::nv-1)/E::nv));
return n*sizeof(T);
}
// ----------------------------------------------------------------------------
// cuda_reduce
// ----------------------------------------------------------------------------
/**
@brief performs asynchronous parallel reduction over a range of items
@tparam P execution policy type
@tparam I input iterator type
@tparam T value type
@tparam O binary operator type
@param p execution policy
@param first iterator to the beginning of the range
@param last iterator to the end of the range
@param res pointer to the result
@param op binary operator to apply to reduce elements
@param buf pointer to the temporary buffer
This method is equivalent to the parallel execution of the following loop on a GPU:
@code{.cpp}
while (first != last) {
*result = op(*result, *first++);
}
@endcode
*/
template <typename P, typename I, typename T, typename O>
void cuda_reduce(
P&& p, I first, I last, T* res, O op, void* buf
) {
unsigned count = std::distance(first, last);
if(count == 0) {
return;
}
detail::cuda_reduce_loop(p, first, count, res, op, buf);
}
// ----------------------------------------------------------------------------
// cuda_uninitialized_reduce
// ----------------------------------------------------------------------------
/**
@brief performs asynchronous parallel reduction over a range of items without
an initial value
@tparam P execution policy type
@tparam I input iterator type
@tparam T value type
@tparam O binary operator type
@param p execution policy
@param first iterator to the beginning of the range
@param last iterator to the end of the range
@param res pointer to the result
@param op binary operator to apply to reduce elements
@param buf pointer to the temporary buffer
This method is equivalent to the parallel execution of the following loop
on a GPU:
@code{.cpp}
*result = *first++; // no initial values partitipcate in the loop
while (first != last) {
*result = op(*result, *first++);
}
@endcode
*/
template <typename P, typename I, typename T, typename O>
void cuda_uninitialized_reduce(
P&& p, I first, I last, T* res, O op, void* buf
) {
unsigned count = std::distance(first, last);
if(count == 0) {
return;
}
detail::cuda_uninitialized_reduce_loop(p, first, count, res, op, buf);
}
// ----------------------------------------------------------------------------
// transform_reduce
// ----------------------------------------------------------------------------
/**
@brief performs asynchronous parallel reduction over a range of transformed items
without an initial value
@tparam P execution policy type
@tparam I input iterator type
@tparam T value type
@tparam O binary operator type
@tparam U unary operator type
@param p execution policy
@param first iterator to the beginning of the range
@param last iterator to the end of the range
@param res pointer to the result
@param bop binary operator to apply to reduce elements
@param uop unary operator to apply to transform elements
@param buf pointer to the temporary buffer
This method is equivalent to the parallel execution of the following loop on a GPU:
@code{.cpp}
while (first != last) {
*result = bop(*result, uop(*first++));
}
@endcode
*/
template<typename P, typename I, typename T, typename O, typename U>
void cuda_transform_reduce(
P&& p, I first, I last, T* res, O bop, U uop, void* buf
) {
unsigned count = std::distance(first, last);
if(count == 0) {
return;
}
// reduction loop
detail::cuda_reduce_loop(p,
cuda_make_load_iterator<T>([=]__device__(auto i){
return uop(*(first+i));
}),
count, res, bop, buf
);
}
// ----------------------------------------------------------------------------
// transform_uninitialized_reduce
// ----------------------------------------------------------------------------
/**
@brief performs asynchronous parallel reduction over a range of transformed items
with an initial value
@tparam P execution policy type
@tparam I input iterator type
@tparam T value type
@tparam O binary operator type
@tparam U unary operator type
@param p execution policy
@param first iterator to the beginning of the range
@param last iterator to the end of the range
@param res pointer to the result
@param bop binary operator to apply to reduce elements
@param uop unary operator to apply to transform elements
@param buf pointer to the temporary buffer
This method is equivalent to the parallel execution of the following loop
on a GPU:
@code{.cpp}
*result = uop(*first++); // no initial values partitipcate in the loop
while (first != last) {
*result = bop(*result, uop(*first++));
}
@endcode
*/
template<typename P, typename I, typename T, typename O, typename U>
void cuda_transform_uninitialized_reduce(
P&& p, I first, I last, T* res, O bop, U uop, void* buf
) {
unsigned count = std::distance(first, last);
if(count == 0) {
return;
}
// reduction loop
//detail::cuda_transform_reduce_loop(
// p, first, count, res, bop, uop, false, s, buf
//);
detail::cuda_uninitialized_reduce_loop(p,
cuda_make_load_iterator<T>([=]__device__(auto i){ return uop(*(first+i)); }),
count, res, bop, buf
);
}
// ----------------------------------------------------------------------------
//template <typename T, typename C>
//__device__ void cuda_warp_reduce(
// volatile T* shm, size_t N, size_t tid, C op
//) {
// if(tid + 32 < N) shm[tid] = op(shm[tid], shm[tid+32]);
// if(tid + 16 < N) shm[tid] = op(shm[tid], shm[tid+16]);
// if(tid + 8 < N) shm[tid] = op(shm[tid], shm[tid+8]);
// if(tid + 4 < N) shm[tid] = op(shm[tid], shm[tid+4]);
// if(tid + 2 < N) shm[tid] = op(shm[tid], shm[tid+2]);
// if(tid + 1 < N) shm[tid] = op(shm[tid], shm[tid+1]);
//}
//
//template <typename I, typename T, typename C, bool uninitialized>
//__global__ void cuda_reduce(I first, size_t N, T* res, C op) {
//
// size_t tid = threadIdx.x;
//
// if(tid >= N) {
// return;
// }
//
// cudaSharedMemory<T> shared_memory;
// T* shm = shared_memory.get();
//
// shm[tid] = *(first+tid);
//
// for(size_t i=tid+blockDim.x; i<N; i+=blockDim.x) {
// shm[tid] = op(shm[tid], *(first+i));
// }
//
// __syncthreads();
//
// for(size_t s = blockDim.x / 2; s > 32; s >>= 1) {
// if(tid < s && tid + s < N) {
// shm[tid] = op(shm[tid], shm[tid+s]);
// }
// __syncthreads();
// }
//
// if(tid < 32) {
// cuda_warp_reduce(shm, N, tid, op);
// }
//
// if(tid == 0) {
// if constexpr (uninitialized) {
// *res = shm[0];
// }
// else {
// *res = op(*res, shm[0]);
// }
// }
//}
// ----------------------------------------------------------------------------
// cudaFlowCapturer
// ----------------------------------------------------------------------------
// Function: reduce
template <typename I, typename T, typename C>
cudaTask cudaFlowCapturer::reduce(I first, I last, T* result, C c) {
// TODO
auto bufsz = cuda_reduce_buffer_size<cudaDefaultExecutionPolicy, T>(
std::distance(first, last)
);
return on([=, buf=MoC{cudaDeviceVector<std::byte>(bufsz)}]
(cudaStream_t stream) mutable {
cudaDefaultExecutionPolicy p(stream);
cuda_reduce(p, first, last, result, c, buf.get().data());
});
}
// Function: uninitialized_reduce
template <typename I, typename T, typename C>
cudaTask cudaFlowCapturer::uninitialized_reduce(I first, I last, T* result, C c) {
// TODO
auto bufsz = cuda_reduce_buffer_size<cudaDefaultExecutionPolicy, T>(
std::distance(first, last)
);
return on([=, buf=MoC{cudaDeviceVector<std::byte>(bufsz)}]
(cudaStream_t stream) mutable {
cudaDefaultExecutionPolicy p(stream);
cuda_uninitialized_reduce(p, first, last, result, c, buf.get().data());
});
}
// Function: transform_reduce
template <typename I, typename T, typename C, typename U>
cudaTask cudaFlowCapturer::transform_reduce(
I first, I last, T* result, C bop, U uop
) {
// TODO
auto bufsz = cuda_reduce_buffer_size<cudaDefaultExecutionPolicy, T>(
std::distance(first, last)
);
return on([=, buf=MoC{cudaDeviceVector<std::byte>(bufsz)}]
(cudaStream_t stream) mutable {
cudaDefaultExecutionPolicy p(stream);
cuda_transform_reduce(
p, first, last, result, bop, uop, buf.get().data()
);
});
}
// Function: transform_uninitialized_reduce
template <typename I, typename T, typename C, typename U>
cudaTask cudaFlowCapturer::transform_uninitialized_reduce(
I first, I last, T* result, C bop, U uop) {
// TODO
auto bufsz = cuda_reduce_buffer_size<cudaDefaultExecutionPolicy, T>(
std::distance(first, last)
);
return on([=, buf=MoC{cudaDeviceVector<std::byte>(bufsz)}]
(cudaStream_t stream) mutable {
cudaDefaultExecutionPolicy p(stream);
cuda_transform_uninitialized_reduce(
p, first, last, result, bop, uop, buf.get().data()
);
});
}
// Function: reduce
template <typename I, typename T, typename C>
void cudaFlowCapturer::reduce(
cudaTask task, I first, I last, T* result, C c
) {
// TODO
auto bufsz = cuda_reduce_buffer_size<cudaDefaultExecutionPolicy, T>(
std::distance(first, last)
);
on(task, [=, buf=MoC{cudaDeviceVector<std::byte>(bufsz)}]
(cudaStream_t stream) mutable {
cudaDefaultExecutionPolicy p(stream);
cuda_reduce(p, first, last, result, c, buf.get().data());
});
}
// Function: uninitialized_reduce
template <typename I, typename T, typename C>
void cudaFlowCapturer::uninitialized_reduce(
cudaTask task, I first, I last, T* result, C c
) {
// TODO
auto bufsz = cuda_reduce_buffer_size<cudaDefaultExecutionPolicy, T>(
std::distance(first, last)
);
on(task, [=, buf=MoC{cudaDeviceVector<std::byte>(bufsz)}]
(cudaStream_t stream) mutable {
cudaDefaultExecutionPolicy p(stream);
cuda_uninitialized_reduce(p, first, last, result, c, buf.get().data());
});
}
// Function: transform_reduce
template <typename I, typename T, typename C, typename U>
void cudaFlowCapturer::transform_reduce(
cudaTask task, I first, I last, T* result, C bop, U uop
) {
// TODO
auto bufsz = cuda_reduce_buffer_size<cudaDefaultExecutionPolicy, T>(
std::distance(first, last)
);
on(task, [=, buf=MoC{cudaDeviceVector<std::byte>(bufsz)}]
(cudaStream_t stream) mutable {
cudaDefaultExecutionPolicy p(stream);
cuda_transform_reduce(
p, first, last, result, bop, uop, buf.get().data()
);
});
}
// Function: transform_uninitialized_reduce
template <typename I, typename T, typename C, typename U>
void cudaFlowCapturer::transform_uninitialized_reduce(
cudaTask task, I first, I last, T* result, C bop, U uop
) {
// TODO
auto bufsz = cuda_reduce_buffer_size<cudaDefaultExecutionPolicy, T>(
std::distance(first, last)
);
on(task, [=, buf=MoC{cudaDeviceVector<std::byte>(bufsz)}]
(cudaStream_t stream) mutable {
cudaDefaultExecutionPolicy p(stream);
cuda_transform_uninitialized_reduce(
p, first, last, result, bop, uop, buf.get().data()
);
});
}
// ----------------------------------------------------------------------------
// cudaFlow
// ----------------------------------------------------------------------------
// Function: reduce
template <typename I, typename T, typename B>
cudaTask cudaFlow::reduce(I first, I last, T* result, B bop) {
return capture([=](cudaFlowCapturer& cap){
cap.make_optimizer<cudaLinearCapturing>();
cap.reduce(first, last, result, bop);
});
}
// Function: uninitialized_reduce
template <typename I, typename T, typename B>
cudaTask cudaFlow::uninitialized_reduce(I first, I last, T* result, B bop) {
return capture([=](cudaFlowCapturer& cap){
cap.make_optimizer<cudaLinearCapturing>();
cap.uninitialized_reduce(first, last, result, bop);
});
}
// Function: transform_reduce
template <typename I, typename T, typename B, typename U>
cudaTask cudaFlow::transform_reduce(I first, I last, T* result, B bop, U uop) {
return capture([=](cudaFlowCapturer& cap){
cap.make_optimizer<cudaLinearCapturing>();
cap.transform_reduce(first, last, result, bop, uop);
});
}
// Function: transform_uninitialized_reduce
template <typename I, typename T, typename B, typename U>
cudaTask cudaFlow::transform_uninitialized_reduce(
I first, I last, T* result, B bop, U uop
) {
return capture([=](cudaFlowCapturer& cap){
cap.make_optimizer<cudaLinearCapturing>();
cap.transform_uninitialized_reduce(first, last, result, bop, uop);
});
}
// Function: reduce
template <typename I, typename T, typename C>
void cudaFlow::reduce(cudaTask task, I first, I last, T* result, C op) {
capture(task, [=](cudaFlowCapturer& cap){
cap.make_optimizer<cudaLinearCapturing>();
cap.reduce(first, last, result, op);
});
}
// Function: uninitialized_reduce
template <typename I, typename T, typename C>
void cudaFlow::uninitialized_reduce(
cudaTask task, I first, I last, T* result, C op
) {
capture(task, [=](cudaFlowCapturer& cap){
cap.make_optimizer<cudaLinearCapturing>();
cap.uninitialized_reduce(first, last, result, op);
});
}
// Function: transform_reduce
template <typename I, typename T, typename B, typename U>
void cudaFlow::transform_reduce(
cudaTask task, I first, I last, T* result, B bop, U uop
) {
capture(task, [=](cudaFlowCapturer& cap){
cap.make_optimizer<cudaLinearCapturing>();
cap.transform_reduce(first, last, result, bop, uop);
});
}
// Function: transform_uninitialized_reduce
template <typename I, typename T, typename B, typename U>
void cudaFlow::transform_uninitialized_reduce(
cudaTask task, I first, I last, T* result, B bop, U uop
) {
capture(task, [=](cudaFlowCapturer& cap){
cap.make_optimizer<cudaLinearCapturing>();
cap.transform_uninitialized_reduce(first, last, result, bop, uop);
});
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/cuda/algorithm/matmul.hpp | #pragma once
#include "../cudaflow.hpp"
namespace tf {
// ----------------------------------------------------------------------------
// row-major matrix multiplication
// ----------------------------------------------------------------------------
template <typename T>
__global__ void cuda_matmul(
const T* A,
const T* B,
T* C,
size_t M,
size_t K,
size_t N
) {
__shared__ T A_tile[32][32];
__shared__ T B_tile[32][32];
size_t x = blockIdx.x * blockDim.x + threadIdx.x;
size_t y = blockIdx.y * blockDim.y + threadIdx.y;
T res = 0;
for(size_t k = 0; k < K; k += 32) {
if((threadIdx.x + k) < K && y < M) {
A_tile[threadIdx.y][threadIdx.x] = A[y * K + threadIdx.x + k];
}
else{
A_tile[threadIdx.y][threadIdx.x] = 0;
}
if((threadIdx.y + k) < K && x < N) {
B_tile[threadIdx.y][threadIdx.x] = B[(threadIdx.y + k) * N + x];
}
else{
B_tile[threadIdx.y][threadIdx.x] = 0;
}
__syncthreads();
for(size_t i = 0; i < 32; ++i) {
res += A_tile[threadIdx.y][i] * B_tile[i][threadIdx.x];
}
__syncthreads();
}
if(x < N && y < M) {
C[y * N + x] = res;
}
}
} // end of namespace tf ---------------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/cuda/algorithm/scan.hpp | #pragma once
#include "reduce.hpp"
/**
@file taskflow/cuda/algorithm/scan.hpp
@brief CUDA scan algorithm include file
*/
namespace tf::detail {
// ----------------------------------------------------------------------------
// scan
// ----------------------------------------------------------------------------
/** @private */
inline constexpr unsigned cudaScanRecursionThreshold = 8;
/** @private */
enum class cudaScanType : int {
EXCLUSIVE = 1,
INCLUSIVE
};
/** @private */
template<typename T, unsigned vt = 0, bool is_array = (vt > 0)>
struct cudaScanResult {
T scan;
T reduction;
};
/** @private */
template<typename T, unsigned vt>
struct cudaScanResult<T, vt, true> {
cudaArray<T, vt> scan;
T reduction;
};
//-----------------------------------------------------------------------------
/** @private */
template<unsigned nt, typename T>
struct cudaBlockScan {
const static unsigned num_warps = nt / CUDA_WARP_SIZE;
const static unsigned num_passes = log2(nt);
const static unsigned capacity = nt + num_warps;
/** @private */
union storage_t {
T data[2 * nt];
struct { T threads[nt], warps[num_warps]; };
};
// standard scan
template<typename op_t>
__device__ cudaScanResult<T> operator ()(
unsigned tid,
T x,
storage_t& storage,
unsigned count = nt,
op_t op = op_t(),
T init = T(),
cudaScanType type = cudaScanType::EXCLUSIVE
) const;
// vectorized scan. accepts multiple values per thread and adds in
// optional global carry-in
template<unsigned vt, typename op_t>
__device__ cudaScanResult<T, vt> operator()(
unsigned tid,
cudaArray<T, vt> x,
storage_t& storage,
T carry_in = T(),
bool use_carry_in = false,
unsigned count = nt,
op_t op = op_t(),
T init = T(),
cudaScanType type = cudaScanType::EXCLUSIVE
) const;
};
// standard scan
template <unsigned nt, typename T>
template<typename op_t>
__device__ cudaScanResult<T> cudaBlockScan<nt, T>::operator () (
unsigned tid, T x, storage_t& storage, unsigned count, op_t op,
T init, cudaScanType type
) const {
unsigned first = 0;
storage.data[first + tid] = x;
__syncthreads();
cuda_iterate<num_passes>([&](auto pass) {
if(auto offset = 1<<pass; tid >= offset) {
x = op(storage.data[first + tid - offset], x);
}
first = nt - first;
storage.data[first + tid] = x;
__syncthreads();
});
cudaScanResult<T> result;
result.reduction = storage.data[first + count - 1];
result.scan = (tid < count) ?
(cudaScanType::INCLUSIVE == type ? x :
(tid ? storage.data[first + tid - 1] : init)) :
result.reduction;
__syncthreads();
return result;
}
// vectorized scan block
template <unsigned nt, typename T>
template<unsigned vt, typename op_t>
__device__ cudaScanResult<T, vt> cudaBlockScan<nt, T>::operator()(
unsigned tid,
cudaArray<T, vt> x,
storage_t& storage,
T carry_in,
bool use_carry_in,
unsigned count, op_t op,
T init,
cudaScanType type
) const {
// Start with an inclusive scan of the in-range elements.
if(count >= nt * vt) {
cuda_iterate<vt>([&](auto i) {
x[i] = i ? op(x[i], x[i - 1]) : x[i];
});
} else {
cuda_iterate<vt>([&](auto i) {
auto index = vt * tid + i;
x[i] = i ?
((index < count) ? op(x[i], x[i - 1]) : x[i - 1]) :
(x[i] = (index < count) ? x[i] : init);
});
}
// Scan the thread-local reductions for a carry-in for each thread.
auto result = operator()(
tid, x[vt - 1], storage,
(count + vt - 1) / vt, op, init, cudaScanType::EXCLUSIVE
);
// Perform the scan downsweep and add both the global carry-in and the
// thread carry-in to the values.
if(use_carry_in) {
result.reduction = op(carry_in, result.reduction);
result.scan = tid ? op(carry_in, result.scan) : carry_in;
} else {
use_carry_in = tid > 0;
}
cudaArray<T, vt> y;
cuda_iterate<vt>([&](auto i) {
if(cudaScanType::EXCLUSIVE == type) {
y[i] = i ? x[i - 1] : result.scan;
if(use_carry_in && i > 0) y[i] = op(result.scan, y[i]);
} else {
y[i] = use_carry_in ? op(x[i], result.scan) : x[i];
}
});
return cudaScanResult<T, vt> { y, result.reduction };
}
/**
@private
@brief single-pass scan for small input
*/
template <typename P, typename I, typename O, typename C>
void cuda_single_pass_scan(
P&& p,
cudaScanType scan_type,
I input,
unsigned count,
O output,
C op
//reduction_it reduction,
) {
using T = typename std::iterator_traits<O>::value_type;
using E = std::decay_t<P>;
// Small input specialization. This is the non-recursive branch.
cuda_kernel<<<1, E::nt, 0, p.stream()>>>([=] __device__ (auto tid, auto bid) {
using scan_t = cudaBlockScan<E::nt, T>;
__shared__ union {
typename scan_t::storage_t scan;
T values[E::nv];
} shared;
auto carry_in = T();
for(unsigned cur = 0; cur < count; cur += E::nv) {
// Cooperatively load values into register.
auto count2 = min(count - cur, E::nv);
auto x = cuda_mem_to_reg_thread<E::nt, E::vt>(input + cur,
tid, count2, shared.values);
auto result = scan_t()(tid, x, shared.scan,
carry_in, cur > 0, count2, op, T(), scan_type);
// Store the scanned values back to global memory.
cuda_reg_to_mem_thread<E::nt, E::vt>(result.scan, tid, count2,
output + cur, shared.values);
// Roll the reduction into carry_in.
carry_in = result.reduction;
}
// Store the carry-out to the reduction pointer. This may be a
// discard_iterator_t if no reduction is wanted.
//if(!tid) *reduction = carry_in;
});
}
/**
@private
@brief main scan loop
*/
template<typename P, typename I, typename O, typename C>
void cuda_scan_loop(
P&& p,
cudaScanType scan_type,
I input,
unsigned count,
O output,
C op,
//reduction_it reduction,
void* ptr
) {
using E = std::decay_t<P>;
using T = typename std::iterator_traits<O>::value_type;
T* buffer = static_cast<T*>(ptr);
//launch_t::cta_dim(context).B(count);
unsigned B = (count + E::nv - 1) / E::nv;
if(B > cudaScanRecursionThreshold) {
//cudaDeviceVector<T> partials(B);
//auto buffer = partials.data();
// upsweep phase
cuda_kernel<<<B, E::nt, 0, p.stream()>>>([=] __device__ (auto tid, auto bid) {
__shared__ typename cudaBlockReduce<E::nt, T>::Storage shm;
// Load the tile's data into register.
auto tile = cuda_get_tile(bid, E::nv, count);
auto x = cuda_mem_to_reg_strided<E::nt, E::vt>(
input + tile.begin, tid, tile.count()
);
// Reduce the thread's values into a scalar.
T scalar;
cuda_strided_iterate<E::nt, E::vt>(
[&] (auto i, auto j) { scalar = i ? op(scalar, x[i]) : x[0]; },
tid, tile.count()
);
// Reduce across all threads.
auto all_reduce = cudaBlockReduce<E::nt, T>()(
tid, scalar, shm, tile.count(), op
);
// Store the final reduction to the partials.
if(!tid) {
buffer[bid] = all_reduce;
}
});
// recursively call scan
//cuda_scan_loop(p, cudaScanType::EXCLUSIVE, buffer, B, buffer, op, S);
cuda_scan_loop(
p, cudaScanType::EXCLUSIVE, buffer, B, buffer, op, buffer+B
);
// downsweep: perform an intra-tile scan and add the scan of the partials
// as carry-in
cuda_kernel<<<B, E::nt, 0, p.stream()>>>([=] __device__ (auto tid, auto bid) {
using scan_t = cudaBlockScan<E::nt, T>;
__shared__ union {
typename scan_t::storage_t scan;
T values[E::nv];
} shared;
// Load a tile to register in thread order.
auto tile = cuda_get_tile(bid, E::nv, count);
auto x = cuda_mem_to_reg_thread<E::nt, E::vt>(
input + tile.begin, tid, tile.count(), shared.values
);
// Scan the array with carry-in from the partials.
auto y = scan_t()(tid, x, shared.scan,
buffer[bid], bid > 0, tile.count(), op, T(),
scan_type).scan;
// Store the scanned values to the output.
cuda_reg_to_mem_thread<E::nt, E::vt>(
y, tid, tile.count(), output + tile.begin, shared.values
);
});
}
// Small input specialization. This is the non-recursive branch.
else {
cuda_single_pass_scan(p, scan_type, input, count, output, op);
}
}
} // namespace tf::detail ----------------------------------------------------
namespace tf {
/**
@brief queries the buffer size in bytes needed to call scan kernels
@tparam P execution policy type
@tparam T value type
@param count number of elements to scan
The function is used to allocate a buffer for calling
tf::cuda_inclusive_scan, tf::cuda_exclusive_scan,
tf::cuda_transform_inclusive_scan, and tf::cuda_transform_exclusive_scan.
*/
template <typename P, typename T>
unsigned cuda_scan_buffer_size(unsigned count) {
using E = std::decay_t<P>;
unsigned B = (count + E::nv - 1) / E::nv;
unsigned n = 0;
for(auto b=B; b>detail::cudaScanRecursionThreshold; b=(b+E::nv-1)/E::nv) {
n += b;
}
return n*sizeof(T);
}
// ----------------------------------------------------------------------------
// inclusive scan
// ----------------------------------------------------------------------------
//template<typename P, typename I, typename O, typename C>
//void cuda_inclusive_scan(P&& p, I first, I last, O output, C op) {
//
// unsigned count = std::distance(first, last);
//
// if(count == 0) {
// return;
// }
//
// using T = typename std::iterator_traits<O>::value_type;
//
// // allocate temporary buffer
// cudaDeviceVector<std::byte> temp(cuda_scan_buffer_size<P, T>(count));
//
// // launch the scan loop
// detail::cuda_scan_loop(
// p, detail::cudaScanType::INCLUSIVE, first, count, output, op, temp.data()
// );
//
// // synchronize the execution
// p.synchronize();
//}
/**
@brief performs asynchronous inclusive scan over a range of items
@tparam P execution policy type
@tparam I input iterator
@tparam O output iterator
@tparam C binary operator type
@param p execution policy
@param first iterator to the beginning of the input range
@param last iterator to the end of the input range
@param output iterator to the beginning of the output range
@param op binary operator to apply to scan
@param buf pointer to the temporary buffer
*/
template<typename P, typename I, typename O, typename C>
void cuda_inclusive_scan(
P&& p, I first, I last, O output, C op, void* buf
) {
unsigned count = std::distance(first, last);
if(count == 0) {
return;
}
// launch the scan loop
detail::cuda_scan_loop(
p, detail::cudaScanType::INCLUSIVE, first, count, output, op, buf
);
}
// ----------------------------------------------------------------------------
// transform inclusive_scan
// ----------------------------------------------------------------------------
//template<typename P, typename I, typename O, typename C, typename U>
//void cuda_transform_inclusive_scan(
// P&& p, I first, I last, O output, C bop, U uop
//) {
//
// unsigned count = std::distance(first, last);
//
// if(count == 0) {
// return;
// }
//
// using T = typename std::iterator_traits<O>::value_type;
//
// // allocate temporary buffer
// cudaDeviceVector<std::byte> temp(cuda_scan_buffer_size<P, T>(count));
// auto buf = temp.data();
//
// // launch the scan loop
// detail::cuda_scan_loop(
// p, detail::cudaScanType::INCLUSIVE,
// cuda_make_load_iterator<T>([=]__device__(auto i){ return uop(*(first+i)); }),
// count, output, bop, buf
// );
//
// // synchronize the execution
// p.synchronize();
//}
/**
@brief performs asynchronous inclusive scan over a range of transformed items
@tparam P execution policy type
@tparam I input iterator
@tparam O output iterator
@tparam C binary operator type
@tparam U unary operator type
@param p execution policy
@param first iterator to the beginning of the input range
@param last iterator to the end of the input range
@param output iterator to the beginning of the output range
@param bop binary operator to apply to scan
@param uop unary operator to apply to transform each item before scan
@param buf pointer to the temporary buffer
*/
template<typename P, typename I, typename O, typename C, typename U>
void cuda_transform_inclusive_scan(
P&& p, I first, I last, O output, C bop, U uop, void* buf
) {
using T = typename std::iterator_traits<O>::value_type;
unsigned count = std::distance(first, last);
if(count == 0) {
return;
}
// launch the scan loop
detail::cuda_scan_loop(
p, detail::cudaScanType::INCLUSIVE,
cuda_make_load_iterator<T>([=]__device__(auto i){ return uop(*(first+i)); }),
count, output, bop, buf
);
}
// ----------------------------------------------------------------------------
// exclusive scan
// ----------------------------------------------------------------------------
//template<typename P, typename I, typename O, typename C>
//void cuda_exclusive_scan(P&& p, I first, I last, O output, C op) {
//
// unsigned count = std::distance(first, last);
//
// if(count == 0) {
// return;
// }
//
// using T = typename std::iterator_traits<O>::value_type;
//
// // allocate temporary buffer
// cudaDeviceVector<std::byte> temp(cuda_scan_buffer_size<P, T>(count));
// auto buf = temp.data();
//
// // launch the scan loop
// detail::cuda_scan_loop(
// p, detail::cudaScanType::EXCLUSIVE, first, count, output, op, buf
// );
//
// // synchronize the execution
// p.synchronize();
//}
/**
@brief performs asynchronous exclusive scan over a range of items
@tparam P execution policy type
@tparam I input iterator
@tparam O output iterator
@tparam C binary operator type
@param p execution policy
@param first iterator to the beginning of the input range
@param last iterator to the end of the input range
@param output iterator to the beginning of the output range
@param op binary operator to apply to scan
@param buf pointer to the temporary buffer
*/
template<typename P, typename I, typename O, typename C>
void cuda_exclusive_scan(
P&& p, I first, I last, O output, C op, void* buf
) {
unsigned count = std::distance(first, last);
if(count == 0) {
return;
}
// launch the scan loop
detail::cuda_scan_loop(
p, detail::cudaScanType::EXCLUSIVE, first, count, output, op, buf
);
}
// ----------------------------------------------------------------------------
// transform exclusive scan
// ----------------------------------------------------------------------------
//template<typename P, typename I, typename O, typename C, typename U>
//void cuda_transform_exclusive_scan(
// P&& p, I first, I last, O output, C bop, U uop
//) {
//
// unsigned count = std::distance(first, last);
//
// if(count == 0) {
// return;
// }
//
// using T = typename std::iterator_traits<O>::value_type;
//
// // allocate temporary buffer
// cudaDeviceVector<std::byte> temp(cuda_scan_buffer_size<P, T>(count));
// auto buf = temp.data();
//
// // launch the scan loop
// detail::cuda_scan_loop(
// p, detail::cudaScanType::EXCLUSIVE,
// cuda_make_load_iterator<T>([=]__device__(auto i){ return uop(*(first+i)); }),
// count, output, bop, buf
// );
//
// // synchronize the execution
// p.synchronize();
//}
/**
@brief performs asynchronous exclusive scan over a range of items
@tparam P execution policy type
@tparam I input iterator
@tparam O output iterator
@tparam C binary operator type
@tparam U unary operator type
@param p execution policy
@param first iterator to the beginning of the input range
@param last iterator to the end of the input range
@param output iterator to the beginning of the output range
@param bop binary operator to apply to scan
@param uop unary operator to apply to transform each item before scan
@param buf pointer to the temporary buffer
*/
template<typename P, typename I, typename O, typename C, typename U>
void cuda_transform_exclusive_scan(
P&& p, I first, I last, O output, C bop, U uop, void* buf
) {
using T = typename std::iterator_traits<O>::value_type;
unsigned count = std::distance(first, last);
if(count == 0) {
return;
}
// launch the scan loop
detail::cuda_scan_loop(
p, detail::cudaScanType::EXCLUSIVE,
cuda_make_load_iterator<T>([=]__device__(auto i){ return uop(*(first+i)); }),
count, output, bop, buf
);
}
// ----------------------------------------------------------------------------
// cudaFlow
// ----------------------------------------------------------------------------
// Function: inclusive_scan
template <typename I, typename O, typename C>
cudaTask cudaFlow::inclusive_scan(I first, I last, O output, C op) {
return capture([=](cudaFlowCapturer& cap) {
cap.make_optimizer<cudaLinearCapturing>();
cap.inclusive_scan(first, last, output, op);
});
}
// Function: inclusive_scan
template <typename I, typename O, typename C>
void cudaFlow::inclusive_scan(cudaTask task, I first, I last, O output, C op) {
capture(task, [=](cudaFlowCapturer& cap) {
cap.make_optimizer<cudaLinearCapturing>();
cap.inclusive_scan(first, last, output, op);
});
}
// Function: exclusive_scan
template <typename I, typename O, typename C>
cudaTask cudaFlow::exclusive_scan(I first, I last, O output, C op) {
return capture([=](cudaFlowCapturer& cap) {
cap.make_optimizer<cudaLinearCapturing>();
cap.exclusive_scan(first, last, output, op);
});
}
// Function: exclusive_scan
template <typename I, typename O, typename C>
void cudaFlow::exclusive_scan(cudaTask task, I first, I last, O output, C op) {
capture(task, [=](cudaFlowCapturer& cap) {
cap.make_optimizer<cudaLinearCapturing>();
cap.exclusive_scan(first, last, output, op);
});
}
// Function: transform_inclusive_scan
template <typename I, typename O, typename B, typename U>
cudaTask cudaFlow::transform_inclusive_scan(
I first, I last, O output, B bop, U uop
) {
return capture([=](cudaFlowCapturer& cap) {
cap.make_optimizer<cudaLinearCapturing>();
cap.transform_inclusive_scan(first, last, output, bop, uop);
});
}
// Function: transform_inclusive_scan
template <typename I, typename O, typename B, typename U>
void cudaFlow::transform_inclusive_scan(
cudaTask task, I first, I last, O output, B bop, U uop
) {
capture(task, [=](cudaFlowCapturer& cap) {
cap.make_optimizer<cudaLinearCapturing>();
cap.transform_inclusive_scan(first, last, output, bop, uop);
});
}
// Function: transform_exclusive_scan
template <typename I, typename O, typename B, typename U>
cudaTask cudaFlow::transform_exclusive_scan(
I first, I last, O output, B bop, U uop
) {
return capture([=](cudaFlowCapturer& cap) {
cap.make_optimizer<cudaLinearCapturing>();
cap.transform_exclusive_scan(first, last, output, bop, uop);
});
}
// Function: transform_exclusive_scan
template <typename I, typename O, typename B, typename U>
void cudaFlow::transform_exclusive_scan(
cudaTask task, I first, I last, O output, B bop, U uop
) {
capture(task, [=](cudaFlowCapturer& cap) {
cap.make_optimizer<cudaLinearCapturing>();
cap.transform_exclusive_scan(first, last, output, bop, uop);
});
}
// ----------------------------------------------------------------------------
// cudaFlowCapturer
// ----------------------------------------------------------------------------
// Function: inclusive_scan
template <typename I, typename O, typename C>
cudaTask cudaFlowCapturer::inclusive_scan(I first, I last, O output, C op) {
using T = typename std::iterator_traits<O>::value_type;
auto bufsz = cuda_scan_buffer_size<cudaDefaultExecutionPolicy, T>(
std::distance(first, last)
);
return on([=, buf=MoC{cudaDeviceVector<std::byte>(bufsz)}]
(cudaStream_t stream) mutable {
cudaDefaultExecutionPolicy p(stream);
cuda_inclusive_scan(p, first, last, output, op, buf.get().data());
});
}
// Function: inclusive_scan
template <typename I, typename O, typename C>
void cudaFlowCapturer::inclusive_scan(
cudaTask task, I first, I last, O output, C op
) {
using T = typename std::iterator_traits<O>::value_type;
auto bufsz = cuda_scan_buffer_size<cudaDefaultExecutionPolicy, T>(
std::distance(first, last)
);
on(task, [=, buf=MoC{cudaDeviceVector<std::byte>(bufsz)}]
(cudaStream_t stream) mutable {
cudaDefaultExecutionPolicy p(stream);
cuda_inclusive_scan(p, first, last, output, op, buf.get().data());
});
}
// Function: exclusive_scan
template <typename I, typename O, typename C>
cudaTask cudaFlowCapturer::exclusive_scan(I first, I last, O output, C op) {
using T = typename std::iterator_traits<O>::value_type;
auto bufsz = cuda_scan_buffer_size<cudaDefaultExecutionPolicy, T>(
std::distance(first, last)
);
return on([=, buf=MoC{cudaDeviceVector<std::byte>(bufsz)}]
(cudaStream_t stream) mutable {
cudaDefaultExecutionPolicy p(stream);
cuda_exclusive_scan(p, first, last, output, op, buf.get().data());
});
}
// Function: exclusive_scan
template <typename I, typename O, typename C>
void cudaFlowCapturer::exclusive_scan(
cudaTask task, I first, I last, O output, C op
) {
using T = typename std::iterator_traits<O>::value_type;
auto bufsz = cuda_scan_buffer_size<cudaDefaultExecutionPolicy, T>(
std::distance(first, last)
);
on(task, [=, buf=MoC{cudaDeviceVector<std::byte>(bufsz)}]
(cudaStream_t stream) mutable {
cudaDefaultExecutionPolicy p(stream);
cuda_exclusive_scan(p, first, last, output, op, buf.get().data());
});
}
// Function: transform_inclusive_scan
template <typename I, typename O, typename B, typename U>
cudaTask cudaFlowCapturer::transform_inclusive_scan(
I first, I last, O output, B bop, U uop
) {
using T = typename std::iterator_traits<O>::value_type;
auto bufsz = cuda_scan_buffer_size<cudaDefaultExecutionPolicy, T>(
std::distance(first, last)
);
return on([=, buf=MoC{cudaDeviceVector<std::byte>(bufsz)}]
(cudaStream_t stream) mutable {
cudaDefaultExecutionPolicy p(stream);
cuda_transform_inclusive_scan(
p, first, last, output, bop, uop, buf.get().data()
);
});
}
// Function: transform_inclusive_scan
template <typename I, typename O, typename B, typename U>
void cudaFlowCapturer::transform_inclusive_scan(
cudaTask task, I first, I last, O output, B bop, U uop
) {
using T = typename std::iterator_traits<O>::value_type;
auto bufsz = cuda_scan_buffer_size<cudaDefaultExecutionPolicy, T>(
std::distance(first, last)
);
on(task, [=, buf=MoC{cudaDeviceVector<std::byte>(bufsz)}]
(cudaStream_t stream) mutable {
cudaDefaultExecutionPolicy p(stream);
cuda_transform_inclusive_scan(
p, first, last, output, bop, uop, buf.get().data()
);
});
}
// Function: transform_exclusive_scan
template <typename I, typename O, typename B, typename U>
cudaTask cudaFlowCapturer::transform_exclusive_scan(
I first, I last, O output, B bop, U uop
) {
using T = typename std::iterator_traits<O>::value_type;
auto bufsz = cuda_scan_buffer_size<cudaDefaultExecutionPolicy, T>(
std::distance(first, last)
);
return on([=, buf=MoC{cudaDeviceVector<std::byte>(bufsz)}]
(cudaStream_t stream) mutable {
cudaDefaultExecutionPolicy p(stream);
cuda_transform_exclusive_scan(
p, first, last, output, bop, uop, buf.get().data()
);
});
}
// Function: transform_exclusive_scan
template <typename I, typename O, typename B, typename U>
void cudaFlowCapturer::transform_exclusive_scan(
cudaTask task, I first, I last, O output, B bop, U uop
) {
using T = typename std::iterator_traits<O>::value_type;
auto bufsz = cuda_scan_buffer_size<cudaDefaultExecutionPolicy, T>(
std::distance(first, last)
);
on(task, [=, buf=MoC{cudaDeviceVector<std::byte>(bufsz)}]
(cudaStream_t stream) mutable {
cudaDefaultExecutionPolicy p(stream);
cuda_transform_exclusive_scan(
p, first, last, output, bop, uop, buf.get().data()
);
});
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/cuda/algorithm/find.hpp | #pragma once
#include "for_each.hpp"
#include "reduce.hpp"
/**
@file taskflow/cuda/algorithm/find.hpp
@brief cuda find algorithms include file
*/
namespace tf::detail {
/** @private */
template <typename T>
struct cudaFindPair {
T key;
unsigned index;
__device__ operator unsigned () const { return index; }
};
/** @private */
template <typename P, typename I, typename U>
void cuda_find_if_loop(P&& p, I input, unsigned count, unsigned* idx, U pred) {
if(count == 0) {
cuda_single_task(p, [=] __device__ () { *idx = 0; });
return;
}
using E = std::decay_t<P>;
auto B = (count + E::nv - 1) / E::nv;
// set the index to the maximum
cuda_single_task(p, [=] __device__ () { *idx = count; });
// launch the kernel to atomic-find the minimum
cuda_kernel<<<B, E::nt, 0, p.stream()>>>([=] __device__ (auto tid, auto bid) {
__shared__ unsigned shm_id;
if(!tid) {
shm_id = count;
}
__syncthreads();
auto tile = cuda_get_tile(bid, E::nv, count);
auto x = cuda_mem_to_reg_strided<E::nt, E::vt>(
input + tile.begin, tid, tile.count()
);
auto id = count;
for(unsigned i=0; i<E::vt; i++) {
auto j = E::nt*i + tid;
if(j < tile.count() && pred(x[i])) {
id = j + tile.begin;
break;
}
}
// Note: the reduce version is not faster though
// reduce to a scalar per block.
//__shared__ typename cudaBlockReduce<E::nt, unsigned>::Storage shm;
//id = cudaBlockReduce<E::nt, unsigned>()(
// tid,
// id,
// shm,
// (tile.count() < E::nt ? tile.count() : E::nt),
// cuda_minimum<unsigned>{},
// false
//);
// only need the minimum id
atomicMin(&shm_id, id);
__syncthreads();
// reduce all to the global memory
if(!tid) {
atomicMin(idx, shm_id);
//atomicMin(idx, id);
}
});
}
/** @private */
template <typename P, typename I, typename O>
void cuda_min_element_loop(
P&& p, I input, unsigned count, unsigned* idx, O op, void* ptr
) {
if(count == 0) {
cuda_single_task(p, [=] __device__ () { *idx = 0; });
return;
}
using T = cudaFindPair<typename std::iterator_traits<I>::value_type>;
cuda_uninitialized_reduce_loop(p,
cuda_make_load_iterator<T>([=]__device__(auto i){
return T{*(input+i), i};
}),
count,
idx,
[=] __device__ (const auto& a, const auto& b) {
return op(a.key, b.key) ? a : b;
},
ptr
);
}
/** @private */
template <typename P, typename I, typename O>
void cuda_max_element_loop(
P&& p, I input, unsigned count, unsigned* idx, O op, void* ptr
) {
if(count == 0) {
cuda_single_task(p, [=] __device__ () { *idx = 0; });
return;
}
using T = cudaFindPair<typename std::iterator_traits<I>::value_type>;
cuda_uninitialized_reduce_loop(p,
cuda_make_load_iterator<T>([=]__device__(auto i){
return T{*(input+i), i};
}),
count,
idx,
[=] __device__ (const auto& a, const auto& b) {
return op(a.key, b.key) ? b : a;
},
ptr
);
}
} // end of namespace tf::detail ---------------------------------------------
namespace tf {
// ----------------------------------------------------------------------------
// cuda_find_if
// ----------------------------------------------------------------------------
/**
@brief finds the index of the first element that satisfies the given criteria
@tparam P execution policy type
@tparam I input iterator type
@tparam U unary operator type
@param p execution policy
@param first iterator to the beginning of the range
@param last iterator to the end of the range
@param idx pointer to the index of the found element
@param op unary operator which returns @c true for the required element
The function launches kernels asynchronously to find the index @c idx of the
first element in the range <tt>[first, last)</tt>
such that <tt>op(*(first+idx))</tt> is true.
This is equivalent to the parallel execution of the following loop:
@code{.cpp}
unsigned idx = 0;
for(; first != last; ++first, ++idx) {
if (p(*first)) {
return idx;
}
}
return idx;
@endcode
*/
template <typename P, typename I, typename U>
void cuda_find_if(
P&& p, I first, I last, unsigned* idx, U op
) {
detail::cuda_find_if_loop(p, first, std::distance(first, last), idx, op);
}
// ----------------------------------------------------------------------------
// cudaFlow
// ----------------------------------------------------------------------------
// Function: find_if
template <typename I, typename U>
cudaTask cudaFlow::find_if(I first, I last, unsigned* idx, U op) {
return capture([=](cudaFlowCapturer& cap){
cap.make_optimizer<cudaLinearCapturing>();
cap.find_if(first, last, idx, op);
});
}
// Function: find_if
template <typename I, typename U>
void cudaFlow::find_if(cudaTask task, I first, I last, unsigned* idx, U op) {
capture(task, [=](cudaFlowCapturer& cap){
cap.make_optimizer<cudaLinearCapturing>();
cap.find_if(first, last, idx, op);
});
}
// ----------------------------------------------------------------------------
// cudaFlowCapturer
// ----------------------------------------------------------------------------
// Function: find_if
template <typename I, typename U>
cudaTask cudaFlowCapturer::find_if(I first, I last, unsigned* idx, U op) {
return on([=](cudaStream_t stream) mutable {
cudaDefaultExecutionPolicy p(stream);
cuda_find_if(p, first, last, idx, op);
});
}
// Function: find_if
template <typename I, typename U>
void cudaFlowCapturer::find_if(
cudaTask task, I first, I last, unsigned* idx, U op
) {
on(task, [=](cudaStream_t stream) mutable {
cudaDefaultExecutionPolicy p(stream);
cuda_find_if(p, first, last, idx, op);
});
}
// ----------------------------------------------------------------------------
// cuda_min_element
// ----------------------------------------------------------------------------
/**
@brief queries the buffer size in bytes needed to call tf::cuda_min_element
@tparam P execution policy type
@tparam T value type
@param count number of elements to search
The function is used to decide the buffer size in bytes for calling
tf::cuda_min_element.
*/
template <typename P, typename T>
unsigned cuda_min_element_buffer_size(unsigned count) {
return cuda_reduce_buffer_size<P, detail::cudaFindPair<T>>(count);
}
/**
@brief finds the index of the minimum element in a range
@tparam P execution policy type
@tparam I input iterator type
@tparam O comparator type
@param p execution policy object
@param first iterator to the beginning of the range
@param last iterator to the end of the range
@param idx solution index of the minimum element
@param op comparison function object
@param buf pointer to the buffer
The function launches kernels asynchronously to find
the smallest element in the range <tt>[first, last)</tt>
using the given comparator @c op.
You need to provide a buffer that holds at least
tf::cuda_min_element_buffer_size bytes for internal use.
The function is equivalent to a parallel execution of the following loop:
@code{.cpp}
if(first == last) {
return 0;
}
auto smallest = first;
for (++first; first != last; ++first) {
if (op(*first, *smallest)) {
smallest = first;
}
}
return std::distance(first, smallest);
@endcode
*/
template <typename P, typename I, typename O>
void cuda_min_element(P&& p, I first, I last, unsigned* idx, O op, void* buf) {
detail::cuda_min_element_loop(
p, first, std::distance(first, last), idx, op, buf
);
}
// ----------------------------------------------------------------------------
// cudaFlowCapturer::min_element
// ----------------------------------------------------------------------------
// Function: min_element
template <typename I, typename O>
cudaTask cudaFlowCapturer::min_element(I first, I last, unsigned* idx, O op) {
using T = typename std::iterator_traits<I>::value_type;
auto bufsz = cuda_min_element_buffer_size<cudaDefaultExecutionPolicy, T>(
std::distance(first, last)
);
return on([=, buf=MoC{cudaDeviceVector<std::byte>(bufsz)}]
(cudaStream_t stream) mutable {
cudaDefaultExecutionPolicy p(stream);
cuda_min_element(p, first, last, idx, op, buf.get().data());
});
}
// Function: min_element
template <typename I, typename O>
void cudaFlowCapturer::min_element(
cudaTask task, I first, I last, unsigned* idx, O op
) {
using T = typename std::iterator_traits<I>::value_type;
auto bufsz = cuda_min_element_buffer_size<cudaDefaultExecutionPolicy, T>(
std::distance(first, last)
);
on(task, [=, buf=MoC{cudaDeviceVector<std::byte>(bufsz)}]
(cudaStream_t stream) mutable {
cudaDefaultExecutionPolicy p(stream);
cuda_min_element(p, first, last, idx, op, buf.get().data());
});
}
// ----------------------------------------------------------------------------
// cudaFlow::min_element
// ----------------------------------------------------------------------------
// Function: min_element
template <typename I, typename O>
cudaTask cudaFlow::min_element(I first, I last, unsigned* idx, O op) {
return capture([=](cudaFlowCapturer& cap){
cap.make_optimizer<cudaLinearCapturing>();
cap.min_element(first, last, idx, op);
});
}
// Function: min_element
template <typename I, typename O>
void cudaFlow::min_element(
cudaTask task, I first, I last, unsigned* idx, O op
) {
capture(task, [=](cudaFlowCapturer& cap){
cap.make_optimizer<cudaLinearCapturing>();
cap.min_element(first, last, idx, op);
});
}
// ----------------------------------------------------------------------------
// cuda_max_element
// ----------------------------------------------------------------------------
/**
@brief queries the buffer size in bytes needed to call tf::cuda_max_element
@tparam P execution policy type
@tparam T value type
@param count number of elements to search
The function is used to decide the buffer size in bytes for calling
tf::cuda_max_element.
*/
template <typename P, typename T>
unsigned cuda_max_element_buffer_size(unsigned count) {
return cuda_reduce_buffer_size<P, detail::cudaFindPair<T>>(count);
}
/**
@brief finds the index of the maximum element in a range
@tparam P execution policy type
@tparam I input iterator type
@tparam O comparator type
@param p execution policy object
@param first iterator to the beginning of the range
@param last iterator to the end of the range
@param idx solution index of the maximum element
@param op comparison function object
@param buf pointer to the buffer
The function launches kernels asynchronously to find
the largest element in the range <tt>[first, last)</tt>
using the given comparator @c op.
You need to provide a buffer that holds at least
tf::cuda_max_element_buffer_size bytes for internal use.
The function is equivalent to a parallel execution of the following loop:
@code{.cpp}
if(first == last) {
return 0;
}
auto largest = first;
for (++first; first != last; ++first) {
if (op(*largest, *first)) {
largest = first;
}
}
return std::distance(first, largest);
@endcode
*/
template <typename P, typename I, typename O>
void cuda_max_element(P&& p, I first, I last, unsigned* idx, O op, void* buf) {
detail::cuda_max_element_loop(
p, first, std::distance(first, last), idx, op, buf
);
}
// ----------------------------------------------------------------------------
// cudaFlowCapturer::max_element
// ----------------------------------------------------------------------------
// Function: max_element
template <typename I, typename O>
cudaTask cudaFlowCapturer::max_element(I first, I last, unsigned* idx, O op) {
using T = typename std::iterator_traits<I>::value_type;
auto bufsz = cuda_max_element_buffer_size<cudaDefaultExecutionPolicy, T>(
std::distance(first, last)
);
return on([=, buf=MoC{cudaDeviceVector<std::byte>(bufsz)}]
(cudaStream_t stream) mutable {
cudaDefaultExecutionPolicy p(stream);
cuda_max_element(p, first, last, idx, op, buf.get().data());
});
}
// Function: max_element
template <typename I, typename O>
void cudaFlowCapturer::max_element(
cudaTask task, I first, I last, unsigned* idx, O op
) {
using T = typename std::iterator_traits<I>::value_type;
auto bufsz = cuda_max_element_buffer_size<cudaDefaultExecutionPolicy, T>(
std::distance(first, last)
);
on(task, [=, buf=MoC{cudaDeviceVector<std::byte>(bufsz)}]
(cudaStream_t stream) mutable {
cudaDefaultExecutionPolicy p(stream);
cuda_max_element(p, first, last, idx, op, buf.get().data());
});
}
// ----------------------------------------------------------------------------
// cudaFlow::max_element
// ----------------------------------------------------------------------------
// Function: max_element
template <typename I, typename O>
cudaTask cudaFlow::max_element(I first, I last, unsigned* idx, O op) {
return capture([=](cudaFlowCapturer& cap){
cap.make_optimizer<cudaLinearCapturing>();
cap.max_element(first, last, idx, op);
});
}
// Function: max_element
template <typename I, typename O>
void cudaFlow::max_element(
cudaTask task, I first, I last, unsigned* idx, O op
) {
capture(task, [=](cudaFlowCapturer& cap){
cap.make_optimizer<cudaLinearCapturing>();
cap.max_element(first, last, idx, op);
});
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/cuda/algorithm/transform.hpp | #pragma once
#include "../cudaflow.hpp"
/**
@file taskflow/cuda/algorithm/transform.hpp
@brief cuda parallel-transform algorithms include file
*/
namespace tf {
// ----------------------------------------------------------------------------
// transform
// ----------------------------------------------------------------------------
namespace detail {
/** @private */
template <typename P, typename I, typename O, typename C>
void cuda_transform_loop(P&& p, I first, unsigned count, O output, C op) {
using E = std::decay_t<P>;
unsigned B = (count + E::nv - 1) / E::nv;
cuda_kernel<<<B, E::nt, 0, p.stream()>>>([=]__device__(auto tid, auto bid) {
auto tile = cuda_get_tile(bid, E::nv, count);
cuda_strided_iterate<E::nt, E::vt>([=]__device__(auto, auto j) {
auto offset = j + tile.begin;
*(output + offset) = op(*(first+offset));
}, tid, tile.count());
});
}
/** @private */
template <typename P, typename I1, typename I2, typename O, typename C>
void cuda_transform_loop(
P&& p, I1 first1, I2 first2, unsigned count, O output, C op
) {
using E = std::decay_t<P>;
unsigned B = (count + E::nv - 1) / E::nv;
cuda_kernel<<<B, E::nt, 0, p.stream()>>>([=]__device__(auto tid, auto bid) {
auto tile = cuda_get_tile(bid, E::nv, count);
cuda_strided_iterate<E::nt, E::vt>([=]__device__(auto, auto j) {
auto offset = j + tile.begin;
*(output + offset) = op(*(first1+offset), *(first2+offset));
}, tid, tile.count());
});
}
} // end of namespace detail -------------------------------------------------
// ----------------------------------------------------------------------------
// CUDA standard algorithms: transform
// ----------------------------------------------------------------------------
/**
@brief performs asynchronous parallel transforms over a range of items
@tparam P execution policy type
@tparam I input iterator type
@tparam O output iterator type
@tparam C unary operator type
@param p execution policy
@param first iterator to the beginning of the range
@param last iterator to the end of the range
@param output iterator to the beginning of the output range
@param op unary operator to apply to transform each item
This method is equivalent to the parallel execution of the following loop on a GPU:
@code{.cpp}
while (first != last) {
*output++ = op(*first++);
}
@endcode
*/
template <typename P, typename I, typename O, typename C>
void cuda_transform(P&& p, I first, I last, O output, C op) {
unsigned count = std::distance(first, last);
if(count == 0) {
return;
}
detail::cuda_transform_loop(p, first, count, output, op);
}
/**
@brief performs asynchronous parallel transforms over two ranges of items
@tparam P execution policy type
@tparam I1 first input iterator type
@tparam I2 second input iterator type
@tparam O output iterator type
@tparam C binary operator type
@param p execution policy
@param first1 iterator to the beginning of the first range
@param last1 iterator to the end of the first range
@param first2 iterator to the beginning of the second range
@param output iterator to the beginning of the output range
@param op binary operator to apply to transform each pair of items
This method is equivalent to the parallel execution of the following loop on a GPU:
@code{.cpp}
while (first1 != last1) {
*output++ = op(*first1++, *first2++);
}
@endcode
*/
template <typename P, typename I1, typename I2, typename O, typename C>
void cuda_transform(
P&& p, I1 first1, I1 last1, I2 first2, O output, C op
) {
unsigned count = std::distance(first1, last1);
if(count == 0) {
return;
}
detail::cuda_transform_loop(p, first1, first2, count, output, op);
}
// ----------------------------------------------------------------------------
// cudaFlow
// ----------------------------------------------------------------------------
// Function: transform
template <typename I, typename O, typename C>
cudaTask cudaFlow::transform(I first, I last, O output, C c) {
return capture([=](cudaFlowCapturer& cap) mutable {
cap.make_optimizer<cudaLinearCapturing>();
cap.transform(first, last, output, c);
});
}
// Function: transform
template <typename I1, typename I2, typename O, typename C>
cudaTask cudaFlow::transform(I1 first1, I1 last1, I2 first2, O output, C c) {
return capture([=](cudaFlowCapturer& cap) mutable {
cap.make_optimizer<cudaLinearCapturing>();
cap.transform(first1, last1, first2, output, c);
});
}
// Function: update transform
template <typename I, typename O, typename C>
void cudaFlow::transform(cudaTask task, I first, I last, O output, C c) {
capture(task, [=](cudaFlowCapturer& cap) mutable {
cap.make_optimizer<cudaLinearCapturing>();
cap.transform(first, last, output, c);
});
}
// Function: update transform
template <typename I1, typename I2, typename O, typename C>
void cudaFlow::transform(
cudaTask task, I1 first1, I1 last1, I2 first2, O output, C c
) {
capture(task, [=](cudaFlowCapturer& cap) mutable {
cap.make_optimizer<cudaLinearCapturing>();
cap.transform(first1, last1, first2, output, c);
});
}
// ----------------------------------------------------------------------------
// cudaFlowCapturer
// ----------------------------------------------------------------------------
// Function: transform
template <typename I, typename O, typename C>
cudaTask cudaFlowCapturer::transform(I first, I last, O output, C op) {
return on([=](cudaStream_t stream) mutable {
cudaDefaultExecutionPolicy p(stream);
cuda_transform(p, first, last, output, op);
});
}
// Function: transform
template <typename I1, typename I2, typename O, typename C>
cudaTask cudaFlowCapturer::transform(
I1 first1, I1 last1, I2 first2, O output, C op
) {
return on([=](cudaStream_t stream) mutable {
cudaDefaultExecutionPolicy p(stream);
cuda_transform(p, first1, last1, first2, output, op);
});
}
// Function: transform
template <typename I, typename O, typename C>
void cudaFlowCapturer::transform(
cudaTask task, I first, I last, O output, C op
) {
on(task, [=] (cudaStream_t stream) mutable {
cudaDefaultExecutionPolicy p(stream);
cuda_transform(p, first, last, output, op);
});
}
// Function: transform
template <typename I1, typename I2, typename O, typename C>
void cudaFlowCapturer::transform(
cudaTask task, I1 first1, I1 last1, I2 first2, O output, C op
) {
on(task, [=] (cudaStream_t stream) mutable {
cudaDefaultExecutionPolicy p(stream);
cuda_transform(p, first1, last1, first2, output, op);
});
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Common/taskflow/cuda/algorithm/merge.hpp | #pragma once
#include "../cudaflow.hpp"
/**
@file taskflow/cuda/algorithm/merge.hpp
@brief CUDA merge algorithm include file
*/
namespace tf::detail {
/**
@private
@brief merge bound type
*/
enum class cudaMergeBoundType {
LOWER,
UPPER
};
/** @private */
template<typename T, unsigned N>
struct cudaMergePair {
cudaArray<T, N> keys;
cudaArray<unsigned, N> indices;
};
/** @private */
struct cudaMergeRange {
unsigned a_begin, a_end, b_begin, b_end;
__device__ unsigned a_count() const { return a_end - a_begin; }
__device__ unsigned b_count() const { return b_end - b_begin; }
__device__ unsigned total() const { return a_count() + b_count(); }
__device__ cudaRange a_range() const {
return cudaRange { a_begin, a_end };
}
__device__ cudaRange b_range() const {
return cudaRange { b_begin, b_end };
}
__device__ cudaMergeRange to_local() const {
return cudaMergeRange { 0, a_count(), a_count(), total() };
}
// Partition from mp to the end.
__device__ cudaMergeRange partition(unsigned mp0, unsigned diag) const {
return cudaMergeRange { a_begin + mp0, a_end, b_begin + diag - mp0, b_end };
}
// Partition from mp0 to mp1.
__device__ cudaMergeRange partition(unsigned mp0, unsigned diag0,
unsigned mp1, unsigned diag1) const {
return cudaMergeRange {
a_begin + mp0,
a_begin + mp1,
b_begin + diag0 - mp0,
b_begin + diag1 - mp1
};
}
__device__ bool a_valid() const {
return a_begin < a_end;
}
__device__ bool b_valid() const {
return b_begin < b_end;
}
};
/** @private */
template<
cudaMergeBoundType bounds = cudaMergeBoundType::LOWER,
typename a_keys_it, typename b_keys_it, typename comp_t
>
__device__ auto cuda_merge_path(
a_keys_it a_keys, unsigned a_count,
b_keys_it b_keys, unsigned b_count,
unsigned diag, comp_t comp
) {
unsigned beg = (diag > b_count) ? diag - b_count : 0;
unsigned end = diag < a_count ? diag : a_count;
while(beg < end) {
auto mid = (beg + end) / 2;
auto a_key = a_keys[mid];
auto b_key = b_keys[diag - 1 - mid];
bool pred = (cudaMergeBoundType::UPPER == bounds) ?
comp(a_key, b_key) :
!comp(b_key, a_key);
if(pred) beg = mid + 1;
else end = mid;
}
return beg;
}
/** @private */
template<cudaMergeBoundType bounds, typename keys_it, typename comp_t>
__device__ auto cuda_merge_path(
keys_it keys, cudaMergeRange range, unsigned diag, comp_t comp
) {
return cuda_merge_path<bounds>(
keys + range.a_begin, range.a_count(),
keys + range.b_begin, range.b_count(),
diag, comp);
}
/** @private */
template<cudaMergeBoundType bounds, bool range_check, typename T, typename comp_t>
__device__ bool cuda_merge_predicate(
T a_key, T b_key, cudaMergeRange range, comp_t comp
) {
bool p;
if(range_check && !range.a_valid()) {
p = false;
}
else if(range_check && !range.b_valid()) {
p = true;
}
else {
p = (cudaMergeBoundType::UPPER == bounds) ? comp(a_key, b_key) :
!comp(b_key, a_key);
}
return p;
}
/** @private */
inline __device__ auto cuda_compute_merge_range(
unsigned a_count, unsigned b_count,
unsigned partition, unsigned spacing,
unsigned mp0, unsigned mp1
) {
auto diag0 = spacing * partition;
auto diag1 = min(a_count + b_count, diag0 + spacing);
return cudaMergeRange { mp0, mp1, diag0 - mp0, diag1 - mp1 };
}
/**
@private
Specialization that emits just one LD instruction. Can only reliably used
with raw pointer types. Fixed not to use pointer arithmetic so that
we don't get undefined behaviors with unaligned types.
*/
template<unsigned nt, unsigned vt, typename T>
__device__ auto cuda_load_two_streams_reg(
const T* a, unsigned a_count, const T* b, unsigned b_count, unsigned tid
) {
b -= a_count;
cudaArray<T, vt> x;
cuda_strided_iterate<nt, vt>([&](auto i, auto index) {
const T* p = (index >= a_count) ? b : a;
x[i] = p[index];
}, tid, a_count + b_count);
return x;
}
/** @private */
template<unsigned nt, unsigned vt, typename T, typename a_it, typename b_it>
__device__
std::enable_if_t<
!(std::is_pointer<a_it>::value && std::is_pointer<b_it>::value),
cudaArray<T, vt>
> load_two_streams_reg(a_it a, unsigned a_count, b_it b, unsigned b_count, unsigned tid) {
b -= a_count;
cudaArray<T, vt> x;
cuda_strided_iterate<nt, vt>([&](auto i, auto index) {
x[i] = (index < a_count) ? a[index] : b[index];
}, tid, a_count + b_count);
return x;
}
/** @private */
template<unsigned nt, unsigned vt, typename A, typename B, typename T, unsigned S>
__device__ void cuda_load_two_streams_shared(A a, unsigned a_count,
B b, unsigned b_count, unsigned tid, T (&shared)[S], bool sync = true
) {
// Load into register then make an unconditional strided store into memory.
auto x = cuda_load_two_streams_reg<nt, vt, T>(a, a_count, b, b_count, tid);
cuda_reg_to_shared_strided<nt>(x, tid, shared, sync);
}
/** @private */
template<unsigned nt, unsigned vt, typename T>
__device__ auto cuda_gather_two_streams_strided(const T* a,
unsigned a_count, const T* b, unsigned b_count, cudaArray<unsigned, vt> indices,
unsigned tid) {
ptrdiff_t b_offset = b - a - a_count;
auto count = a_count + b_count;
cudaArray<T, vt> x;
cuda_strided_iterate<nt, vt>([&](auto i, auto j) {
ptrdiff_t gather = indices[i];
if(gather >= a_count) gather += b_offset;
x[i] = a[gather];
}, tid, count);
return x;
}
/** @private */
template<unsigned nt, unsigned vt, typename T, typename a_it, typename b_it>
__device__
std::enable_if_t<
!(std::is_pointer<a_it>::value && std::is_pointer<b_it>::value),
cudaArray<T, vt>
> cuda_gather_two_streams_strided(a_it a,
unsigned a_count, b_it b, unsigned b_count, cudaArray<unsigned, vt> indices, unsigned tid) {
b -= a_count;
cudaArray<T, vt> x;
cuda_strided_iterate<nt, vt>([&](auto i, auto j) {
x[i] = (indices[i] < a_count) ? a[indices[i]] : b[indices[i]];
}, tid, a_count + b_count);
return x;
}
/** @private */
template<unsigned nt, unsigned vt, typename a_it, typename b_it, typename c_it>
__device__ void cuda_transfer_two_streams_strided(
a_it a, unsigned a_count, b_it b, unsigned b_count,
cudaArray<unsigned, vt> indices, unsigned tid, c_it c
) {
using T = typename std::iterator_traits<a_it>::value_type;
auto x = cuda_gather_two_streams_strided<nt, vt, T>(
a, a_count, b, b_count, indices, tid
);
cuda_reg_to_mem_strided<nt>(x, tid, a_count + b_count, c);
}
/**
@private
This function must be able to dereference keys[a_begin] and keys[b_begin],
no matter the indices for each. The caller should allocate at least
nt * vt + 1 elements for
*/
template<cudaMergeBoundType bounds, unsigned vt, typename T, typename comp_t>
__device__ auto cuda_serial_merge(
const T* keys_shared, cudaMergeRange range, comp_t comp, bool sync = true
) {
auto a_key = keys_shared[range.a_begin];
auto b_key = keys_shared[range.b_begin];
cudaMergePair<T, vt> merge_pair;
cuda_iterate<vt>([&](auto i) {
bool p = cuda_merge_predicate<bounds, true>(a_key, b_key, range, comp);
auto index = p ? range.a_begin : range.b_begin;
merge_pair.keys[i] = p ? a_key : b_key;
merge_pair.indices[i] = index;
T c_key = keys_shared[++index];
if(p) a_key = c_key, range.a_begin = index;
else b_key = c_key, range.b_begin = index;
});
if(sync) __syncthreads();
return merge_pair;
}
/**
@private
Load arrays a and b from global memory and merge unsignedo register.
*/
template<cudaMergeBoundType bounds,
unsigned nt, unsigned vt,
typename a_it, typename b_it, typename T, typename comp_t, unsigned S
>
__device__ auto block_merge_from_mem(
a_it a, b_it b, cudaMergeRange range_mem, unsigned tid, comp_t comp, T (&keys_shared)[S]
) {
static_assert(S >= nt * vt + 1,
"block_merge_from_mem requires temporary storage of at "
"least nt * vt + 1 items");
// Load the data into shared memory.
cuda_load_two_streams_shared<nt, vt>(
a + range_mem.a_begin, range_mem.a_count(),
b + range_mem.b_begin, range_mem.b_count(),
tid, keys_shared, true
);
// Run a merge path to find the start of the serial merge for each thread.
auto range_local = range_mem.to_local();
auto diag = vt * tid;
auto mp = cuda_merge_path<bounds>(keys_shared, range_local, diag, comp);
// Compute the ranges of the sources in shared memory. The end iterators
// of the range are inaccurate, but still facilitate exact merging, because
// only vt elements will be merged.
auto merged = cuda_serial_merge<bounds, vt>(
keys_shared, range_local.partition(mp, diag), comp
);
return merged;
};
/** @private */
template<cudaMergeBoundType bounds,
typename P, typename a_keys_it, typename b_keys_it, typename comp_t
>
void cuda_merge_path_partitions(
P&& p,
a_keys_it a, unsigned a_count,
b_keys_it b, unsigned b_count,
unsigned spacing,
comp_t comp,
unsigned* buf
) {
//int num_partitions = (int)div_up(a_count + b_count, spacing) + 1;
unsigned num_partitions = (a_count + b_count + spacing - 1) / spacing + 1;
const unsigned nt = 128;
const unsigned vt = 1;
const unsigned nv = nt * vt;
unsigned B = (num_partitions + nv - 1) / nv; // nt = 128, vt = 1
cuda_kernel<<<B, nt, 0, p.stream()>>>([=]__device__(auto tid, auto bid) {
auto range = cuda_get_tile(bid, nt * vt, num_partitions);
cuda_strided_iterate<nt, vt>([=](auto, auto j) {
auto index = range.begin + j;
auto diag = min(spacing * index, a_count + b_count);
buf[index] = cuda_merge_path<bounds>(a, a_count, b, b_count, diag, comp);
}, tid, range.count());
});
}
//template<typename segments_it>
//auto load_balance_partitions(int64_t dest_count, segments_it segments,
// int num_segments, int spacing, context_t& context) ->
// mem_t<typename std::iterator_traits<segments_it>::value_type> {
//
// typedef typename std::iterator_traits<segments_it>::value_type int_t;
// return merge_path_partitions<bounds_upper>(counting_iterator_t<int_t>(0),
// dest_count, segments, num_segments, spacing, less_t<int_t>(), context);
//}
//template<bounds_t bounds, typename keys_it>
//mem_t<int> binary_search_partitions(keys_it keys, int count, int num_items,
// int spacing, context_t& context) {
//
// int num_partitions = div_up(count, spacing) + 1;
// mem_t<int> mem(num_partitions, context);
// int* p = mem.data();
// transform([=]MGPU_DEVICE(int index) {
// int key = min(spacing * index, count);
// p[index] = binary_search<bounds>(keys, num_items, key, less_t<int>());
// }, num_partitions, context);
// return mem;
//}
/** @private */
template<
typename P,
typename a_keys_it, typename a_vals_it,
typename b_keys_it, typename b_vals_it,
typename c_keys_it, typename c_vals_it,
typename comp_t
>
void cuda_merge_loop(
P&& p,
a_keys_it a_keys, a_vals_it a_vals, unsigned a_count,
b_keys_it b_keys, b_vals_it b_vals, unsigned b_count,
c_keys_it c_keys, c_vals_it c_vals,
comp_t comp,
void* ptr
) {
using E = std::decay_t<P>;
using T = typename std::iterator_traits<a_keys_it>::value_type;
using V = typename std::iterator_traits<a_vals_it>::value_type;
auto buf = static_cast<unsigned*>(ptr);
auto has_values = !std::is_same<V, cudaEmpty>::value;
cuda_merge_path_partitions<cudaMergeBoundType::LOWER>(
p, a_keys, a_count, b_keys, b_count, E::nv, comp, buf
);
unsigned B = (a_count + b_count + E::nv - 1)/ E::nv;
// we use small kernel
cuda_kernel<<<B, E::nt, 0, p.stream()>>>([=] __device__ (auto tid, auto bid) {
__shared__ union {
T keys[E::nv + 1];
unsigned indices[E::nv];
} shared;
// Load the range for this CTA and merge the values into register.
auto mp0 = buf[bid + 0];
auto mp1 = buf[bid + 1];
auto range = cuda_compute_merge_range(a_count, b_count, bid, E::nv, mp0, mp1);
auto merge = block_merge_from_mem<cudaMergeBoundType::LOWER, E::nt, E::vt>(
a_keys, b_keys, range, tid, comp, shared.keys
);
auto dest_offset = E::nv * bid;
cuda_reg_to_mem_thread<E::nt>(
merge.keys, tid, range.total(), c_keys + dest_offset, shared.keys
);
if(has_values) {
// Transpose the indices from thread order to strided order.
auto indices = cuda_reg_thread_to_strided<E::nt>(
merge.indices, tid, shared.indices
);
// Gather the input values and merge into the output values.
cuda_transfer_two_streams_strided<E::nt>(
a_vals + range.a_begin, range.a_count(),
b_vals + range.b_begin, range.b_count(), indices, tid,
c_vals + dest_offset
);
}
});
}
} // end of namespace tf::detail ---------------------------------------------
namespace tf {
// ----------------------------------------------------------------------------
// standalone merge algorithms
// ----------------------------------------------------------------------------
/**
@brief queries the buffer size in bytes needed to call merge kernels
@tparam P execution polity type
@param a_count number of elements in the first input array
@param b_count number of elements in the second input array
The function is used to allocate a buffer for calling
tf::cuda_merge.
*/
template <typename P>
unsigned cuda_merge_buffer_size(unsigned a_count, unsigned b_count) {
using E = std::decay_t<P>;
unsigned sz = (a_count + b_count + E::nv - 1) / E::nv + 1;
return sz*sizeof(unsigned);
}
// ----------------------------------------------------------------------------
// key-value merge
// ----------------------------------------------------------------------------
//template<
// typename P,
// typename a_keys_it, typename a_vals_it,
// typename b_keys_it, typename b_vals_it,
// typename c_keys_it, typename c_vals_it,
// typename C
//>
//void cuda_merge(
// P&& p,
// a_keys_it a_keys_first, a_vals_it a_vals_first, a_keys_it a_keys_last,
// b_keys_it b_keys_first, b_vals_it b_vals_first, b_keys_it b_keys_last,
// c_keys_it c_keys_first, c_vals_it c_vals_first, C comp
//) {
//
// unsigned a_count = std::distance(a_keys_first, a_keys_last);
// unsigned b_count = std::distance(b_keys_first, b_keys_last);
//
// if(a_count + b_count == 0) {
// return;
// }
//
// // allocate temporary buffer
// cudaDeviceVector<std::byte> temp(cuda_merge_buffer_size<P>(a_count, b_count));
//
// detail::cuda_merge_loop(
// p,
// a_keys_first, a_vals_first, a_count,
// b_keys_first, b_vals_first, b_count,
// c_keys_first, c_vals_first, comp,
// temp.data()
// );
//
// // synchronize the execution
// p.synchronize();
//}
/**
@brief performs asynchronous key-value merge over a range of keys and values
@tparam P execution policy type
@tparam a_keys_it first key iterator type
@tparam a_vals_it first value iterator type
@tparam b_keys_it second key iterator type
@tparam b_vals_it second value iterator type
@tparam c_keys_it output key iterator type
@tparam c_vals_it output value iterator type
@tparam C comparator type
@param p execution policy
@param a_keys_first iterator to the beginning of the first key range
@param a_keys_last iterator to the end of the first key range
@param a_vals_first iterator to the beginning of the first value range
@param b_keys_first iterator to the beginning of the second key range
@param b_keys_last iterator to the end of the second key range
@param b_vals_first iterator to the beginning of the second value range
@param c_keys_first iterator to the beginning of the output key range
@param c_vals_first iterator to the beginning of the output value range
@param comp comparator
@param buf pointer to the temporary buffer
Performs a key-value merge that copies elements from
<tt>[a_keys_first, a_keys_last)</tt> and <tt>[b_keys_first, b_keys_last)</tt>
into a single range, <tt>[c_keys_first, c_keys_last + (a_keys_last - a_keys_first) + (b_keys_last - b_keys_first))</tt>
such that the resulting range is in ascending key order.
At the same time, the merge copies elements from the two associated ranges
<tt>[a_vals_first + (a_keys_last - a_keys_first))</tt> and
<tt>[b_vals_first + (b_keys_last - b_keys_first))</tt> into a single range,
<tt>[c_vals_first, c_vals_first + (a_keys_last - a_keys_first) + (b_keys_last - b_keys_first))</tt>
such that the resulting range is in ascending order
implied by each input element's associated key.
For example, assume:
+ @c a_keys = {8, 1};
+ @c a_vals = {1, 2};
+ @c b_keys = {3, 7};
+ @c b_vals = {3, 4};
After the merge, we have:
+ @c c_keys = {1, 3, 7, 8}
+ @c c_vals = {2, 3, 4, 1}
*/
template<
typename P,
typename a_keys_it, typename a_vals_it,
typename b_keys_it, typename b_vals_it,
typename c_keys_it, typename c_vals_it,
typename C
>
void cuda_merge_by_key(
P&& p,
a_keys_it a_keys_first, a_keys_it a_keys_last, a_vals_it a_vals_first,
b_keys_it b_keys_first, b_keys_it b_keys_last, b_vals_it b_vals_first,
c_keys_it c_keys_first, c_vals_it c_vals_first, C comp,
void* buf
) {
unsigned a_count = std::distance(a_keys_first, a_keys_last);
unsigned b_count = std::distance(b_keys_first, b_keys_last);
if(a_count + b_count == 0) {
return;
}
detail::cuda_merge_loop(p,
a_keys_first, a_vals_first, a_count,
b_keys_first, b_vals_first, b_count,
c_keys_first, c_vals_first, comp,
buf
);
}
// ----------------------------------------------------------------------------
// key-only merge
// ----------------------------------------------------------------------------
//template<typename P,
// typename a_keys_it, typename b_keys_it, typename c_keys_it, typename C
//>
//void cuda_merge(
// P&& p,
// a_keys_it a_keys_first, a_keys_it a_keys_last,
// b_keys_it b_keys_first, b_keys_it b_keys_last,
// c_keys_it c_keys_first,
// C comp
//) {
// cuda_merge(
// p,
// a_keys_first, (const cudaEmpty*)nullptr, a_keys_last,
// b_keys_first, (const cudaEmpty*)nullptr, b_keys_last,
// c_keys_first, (cudaEmpty*)nullptr, comp
// );
//}
/**
@brief performs asynchronous key-only merge over a range of keys
@tparam P execution policy type
@tparam a_keys_it first key iterator type
@tparam b_keys_it second key iterator type
@tparam c_keys_it output key iterator type
@tparam C comparator type
@param p execution policy
@param a_keys_first iterator to the beginning of the first key range
@param a_keys_last iterator to the end of the first key range
@param b_keys_first iterator to the beginning of the second key range
@param b_keys_last iterator to the end of the second key range
@param c_keys_first iterator to the beginning of the output key range
@param comp comparator
@param buf pointer to the temporary buffer
This function is equivalent to tf::cuda_merge_by_key without values.
*/
template<typename P,
typename a_keys_it, typename b_keys_it, typename c_keys_it, typename C
>
void cuda_merge(
P&& p,
a_keys_it a_keys_first, a_keys_it a_keys_last,
b_keys_it b_keys_first, b_keys_it b_keys_last,
c_keys_it c_keys_first,
C comp,
void* buf
) {
cuda_merge_by_key(
p,
a_keys_first, a_keys_last, (const cudaEmpty*)nullptr,
b_keys_first, b_keys_last, (const cudaEmpty*)nullptr,
c_keys_first, (cudaEmpty*)nullptr, comp,
buf
);
}
// ----------------------------------------------------------------------------
// cudaFlow merge algorithms
// ----------------------------------------------------------------------------
// Function: merge
template<typename A, typename B, typename C, typename Comp>
cudaTask cudaFlow::merge(
A a_first, A a_last, B b_first, B b_last, C c_first, Comp comp
) {
return capture([=](cudaFlowCapturer& cap){
cap.make_optimizer<cudaLinearCapturing>();
cap.merge(a_first, a_last, b_first, b_last, c_first, comp);
});
}
// Function: merge
template<typename A, typename B, typename C, typename Comp>
void cudaFlow::merge(
cudaTask task, A a_first, A a_last, B b_first, B b_last, C c_first, Comp comp
) {
capture(task, [=](cudaFlowCapturer& cap){
cap.make_optimizer<cudaLinearCapturing>();
cap.merge(a_first, a_last, b_first, b_last, c_first, comp);
});
}
// Function: merge_by_key
template<
typename a_keys_it, typename a_vals_it,
typename b_keys_it, typename b_vals_it,
typename c_keys_it, typename c_vals_it,
typename C
>
cudaTask cudaFlow::merge_by_key(
a_keys_it a_keys_first, a_keys_it a_keys_last, a_vals_it a_vals_first,
b_keys_it b_keys_first, b_keys_it b_keys_last, b_vals_it b_vals_first,
c_keys_it c_keys_first, c_vals_it c_vals_first, C comp
) {
return capture([=](cudaFlowCapturer& cap){
cap.make_optimizer<cudaLinearCapturing>();
cap.merge_by_key(
a_keys_first, a_keys_last, a_vals_first,
b_keys_first, b_keys_last, b_vals_first,
c_keys_first, c_vals_first,
comp
);
});
}
// Function: merge_by_key
template<
typename a_keys_it, typename a_vals_it,
typename b_keys_it, typename b_vals_it,
typename c_keys_it, typename c_vals_it,
typename C
>
void cudaFlow::merge_by_key(
cudaTask task,
a_keys_it a_keys_first, a_keys_it a_keys_last, a_vals_it a_vals_first,
b_keys_it b_keys_first, b_keys_it b_keys_last, b_vals_it b_vals_first,
c_keys_it c_keys_first, c_vals_it c_vals_first, C comp
) {
capture(task, [=](cudaFlowCapturer& cap){
cap.make_optimizer<cudaLinearCapturing>();
cap.merge_by_key(
a_keys_first, a_keys_last, a_vals_first,
b_keys_first, b_keys_last, b_vals_first,
c_keys_first, c_vals_first,
comp
);
});
}
// ----------------------------------------------------------------------------
// cudaFlowCapturer merge algorithms
// ----------------------------------------------------------------------------
// Function: merge
template<typename A, typename B, typename C, typename Comp>
cudaTask cudaFlowCapturer::merge(
A a_first, A a_last, B b_first, B b_last, C c_first, Comp comp
) {
// TODO
auto bufsz = cuda_merge_buffer_size<cudaDefaultExecutionPolicy>(
std::distance(a_first, a_last), std::distance(b_first, b_last)
);
return on([=, buf=MoC{cudaDeviceVector<std::byte>(bufsz)}]
(cudaStream_t stream) mutable {
cuda_merge(cudaDefaultExecutionPolicy{stream},
a_first, a_last, b_first, b_last, c_first, comp, buf.get().data()
);
});
}
// Procedure: merge (update)
template<typename A, typename B, typename C, typename Comp>
void cudaFlowCapturer::merge(
cudaTask task, A a_first, A a_last, B b_first, B b_last, C c_first, Comp comp
) {
// TODO
auto bufsz = cuda_merge_buffer_size<cudaDefaultExecutionPolicy>(
std::distance(a_first, a_last), std::distance(b_first, b_last)
);
on(task, [=, buf=MoC{cudaDeviceVector<std::byte>(bufsz)}]
(cudaStream_t stream) mutable {
cuda_merge(cudaDefaultExecutionPolicy{stream},
a_first, a_last, b_first, b_last, c_first, comp, buf.get().data()
);
});
}
// Function: merge_by_key
template<
typename a_keys_it, typename a_vals_it,
typename b_keys_it, typename b_vals_it,
typename c_keys_it, typename c_vals_it,
typename C
>
cudaTask cudaFlowCapturer::merge_by_key(
a_keys_it a_keys_first, a_keys_it a_keys_last, a_vals_it a_vals_first,
b_keys_it b_keys_first, b_keys_it b_keys_last, b_vals_it b_vals_first,
c_keys_it c_keys_first, c_vals_it c_vals_first, C comp
) {
auto bufsz = cuda_merge_buffer_size<cudaDefaultExecutionPolicy>(
std::distance(a_keys_first, a_keys_last),
std::distance(b_keys_first, b_keys_last)
);
return on([=, buf=MoC{cudaDeviceVector<std::byte>(bufsz)}]
(cudaStream_t stream) mutable {
cuda_merge_by_key(cudaDefaultExecutionPolicy{stream},
a_keys_first, a_keys_last, a_vals_first,
b_keys_first, b_keys_last, b_vals_first,
c_keys_first, c_vals_first,
comp,
buf.get().data()
);
});
}
// Function: merge_by_key
template<
typename a_keys_it, typename a_vals_it,
typename b_keys_it, typename b_vals_it,
typename c_keys_it, typename c_vals_it,
typename C
>
void cudaFlowCapturer::merge_by_key(
cudaTask task,
a_keys_it a_keys_first, a_keys_it a_keys_last, a_vals_it a_vals_first,
b_keys_it b_keys_first, b_keys_it b_keys_last, b_vals_it b_vals_first,
c_keys_it c_keys_first, c_vals_it c_vals_first, C comp
) {
auto bufsz = cuda_merge_buffer_size<cudaDefaultExecutionPolicy>(
std::distance(a_keys_first, a_keys_last),
std::distance(b_keys_first, b_keys_last)
);
on(task, [=, buf=MoC{cudaDeviceVector<std::byte>(bufsz)}]
(cudaStream_t stream) mutable {
cuda_merge_by_key(cudaDefaultExecutionPolicy{stream},
a_keys_first, a_keys_last, a_vals_first,
b_keys_first, b_keys_last, b_vals_first,
c_keys_first, c_vals_first,
comp,
buf.get().data()
);
});
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Samples/3_CUDA_Features/jacobiCudaGraphs/jacobi.h | //=========================================================
// Modifications Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: BSD-3-Clause
//=========================================================
/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef JACOBI_H
#define JACOBI_H
#define N_ROWS 512
#endif | h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Samples/3_CUDA_Features/jacobiCudaGraphs/jacobi.dp.cpp | //=========================================================
// Modifications Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: BSD-3-Clause
//=========================================================
/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sycl/sycl.hpp>
#include <dpct/dpct.hpp>
#include <helper_cuda.h>
#include <vector>
#include "jacobi.h"
#include <taskflow/sycl/syclflow.hpp>
// 8 Rows of square-matrix A processed by each CTA.
// This can be max 32 and only power of 2 (i.e., 2/4/8/16/32).
#define ROWS_PER_CTA 16
#define SUB_GRP_SIZE 32
#if !defined(DPCT_COMPATIBILITY_TEMP) || DPCT_COMPATIBILITY_TEMP >= 600
#else
__device__ double atomicAdd(double *address, double val) {
unsigned long long int *address_as_ull = (unsigned long long int *)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN !=
// NaN)
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
static void JacobiMethod(const float *A, const double *b,
const float conv_threshold, double *x,
double *x_new, double *sum,
const sycl::nd_item<3> &item_ct1,
double *x_shared, double *b_shared) {
// Handle to thread block group
auto cta = item_ct1.get_group();
for (int i = item_ct1.get_local_id(2); i < N_ROWS;
i += item_ct1.get_local_range(2)) {
x_shared[i] = x[i];
}
if (item_ct1.get_local_id(2) < ROWS_PER_CTA) {
int k = item_ct1.get_local_id(2);
for (int i = k + (item_ct1.get_group(2) * ROWS_PER_CTA);
(k < ROWS_PER_CTA) && (i < N_ROWS);
k += ROWS_PER_CTA, i += ROWS_PER_CTA) {
b_shared[i % (ROWS_PER_CTA + 1)] = b[i];
}
}
item_ct1.barrier();
sycl::sub_group tile32 = item_ct1.get_sub_group();
for (int k = 0, i = item_ct1.get_group(2) * ROWS_PER_CTA;
(k < ROWS_PER_CTA) && (i < N_ROWS); k++, i++) {
double rowThreadSum = 0.0;
for (int j = item_ct1.get_local_id(2); j < N_ROWS;
j += item_ct1.get_local_range(2)) {
rowThreadSum += (A[i * N_ROWS + j] * x_shared[j]);
}
for (int offset = tile32.get_local_linear_range() / 2;
offset > 0; offset /= 2) {
rowThreadSum += sycl::shift_group_left(tile32,
rowThreadSum, offset);
}
if (tile32.get_local_linear_id() == 0) {
dpct::atomic_fetch_add<sycl::access::address_space::generic_space>(
&b_shared[i % (ROWS_PER_CTA + 1)], -rowThreadSum);
}
}
item_ct1.barrier();
if (item_ct1.get_local_id(2) < ROWS_PER_CTA) {
dpct::experimental::logical_group tile8 = dpct::experimental::logical_group(
item_ct1, item_ct1.get_group(), ROWS_PER_CTA);
double temp_sum = 0.0;
int k = item_ct1.get_local_id(2);
for (int i = k + (item_ct1.get_group(2) * ROWS_PER_CTA);
(k < ROWS_PER_CTA) && (i < N_ROWS);
k += ROWS_PER_CTA, i += ROWS_PER_CTA) {
double dx = b_shared[i % (ROWS_PER_CTA + 1)];
dx /= A[i * N_ROWS + i];
x_new[i] = (x_shared[i] + dx);
temp_sum += sycl::fabs(dx);
}
for (int offset = tile8.get_local_linear_range() / 2; offset > 0;
offset /= 2) {
temp_sum += dpct::shift_sub_group_left(tile32, temp_sum,
offset, 8);
}
if (tile8.get_local_linear_id() == 0) {
dpct::atomic_fetch_add<sycl::access::address_space::generic_space>(
sum, temp_sum);
}
}
}
// Thread block size for finalError kernel should be multiple of 32
static void finalError(double *x, double *g_sum,
const sycl::nd_item<3> &item_ct1, uint8_t *dpct_local) {
// Handle to thread block group
auto warpSum = (double *)dpct_local;
double sum = 0.0;
int globalThreadId = item_ct1.get_group(2) * item_ct1.get_local_range(2) +
item_ct1.get_local_id(2);
for (int i = globalThreadId; i < N_ROWS;
i += item_ct1.get_local_range(2) * item_ct1.get_group_range(2)) {
double d = x[i] - 1.0;
sum += sycl::fabs(d);
}
sycl::sub_group tile32 = item_ct1.get_sub_group();
for (int offset = tile32.get_local_linear_range() / 2;
offset > 0; offset /= 2) {
sum += sycl::shift_group_left(tile32, sum, offset);
}
if (tile32.get_local_linear_id() == 0) {
warpSum[item_ct1.get_local_id(2) /
tile32.get_local_range().get(0)] = sum;
}
item_ct1.barrier();
double blockSum = 0.0;
if (item_ct1.get_local_id(2) <
(item_ct1.get_local_range(2) /
tile32.get_local_range().get(0))) {
blockSum = warpSum[item_ct1.get_local_id(2)];
}
if (item_ct1.get_local_id(2) < 32) {
for (int offset = tile32.get_local_linear_range() / 2;
offset > 0; offset /= 2) {
blockSum +=
sycl::shift_group_left(tile32, blockSum, offset);
}
if (tile32.get_local_linear_id() == 0) {
dpct::atomic_fetch_add<sycl::access::address_space::generic_space>(
g_sum, blockSum);
}
}
}
double JacobiMethodGpuCudaGraphExecKernelSetParams(
const float *A, const double *b, const float conv_threshold,
const int max_iter, double *x, double *x_new, sycl::queue q) {
// CTA size
sycl::range<3> nthreads(1, 1, 256);
// grid size
sycl::range<3> nblocks(1, 1, (N_ROWS / ROWS_PER_CTA) + 2);
tf::Taskflow tflow;
tf::Executor exe;
double sum = 0.0;
double *d_sum = NULL;
double *params[] = {x, x_new};
checkCudaErrors(DPCT_CHECK_ERROR(
d_sum = sycl::malloc_device<double>(1, q)));
int k = 0;
tf::Task syclDeviceTasks =
tflow
.emplace_on(
[&](tf::syclFlow &sf) {
tf::syclTask dsum_memset =
sf.memset(d_sum, 0, sizeof(double)).name("dsum_memset");
tf::syclTask jM_kernel =
sf.on([=](sycl::handler &cgh) {
sycl::local_accessor<double, 1> x_shared_acc_ct1(
sycl::range<1>(N_ROWS), cgh);
sycl::local_accessor<double, 1> b_shared_acc_ct1(
sycl::range<1>(ROWS_PER_CTA + 1), cgh);
cgh.parallel_for(
sycl::nd_range<3>(nblocks * nthreads, nthreads),
[=](sycl::nd_item<3> item_ct1) [
[intel::reqd_sub_group_size(SUB_GRP_SIZE)]] {
JacobiMethod(A, b, conv_threshold, params[k % 2],
params[(k + 1) % 2], d_sum, item_ct1,
x_shared_acc_ct1.get_pointer(),
b_shared_acc_ct1.get_pointer());
});
}).name("jacobi_kernel");
tf::syclTask sum_d2h =
sf.memcpy(&sum, d_sum, sizeof(double)).name("sum_d2h");
q.wait();
jM_kernel.succeed(dsum_memset).precede(sum_d2h);
},
q)
.name("syclTasks");
for (k = 0; k < max_iter; k++) {
exe.run(tflow).wait();
if (sum <= conv_threshold) {
q.memset(d_sum, 0, sizeof(double));
nblocks[2] = (N_ROWS / nthreads[2]) + 1;
size_t sharedMemSize =
((nthreads[2] / SUB_GRP_SIZE) + 1) * sizeof(double);
if ((k & 1) == 0) {
q.submit([&](sycl::handler &cgh) {
sycl::local_accessor<uint8_t, 1> dpct_local_acc_ct1(
sycl::range<1>(sharedMemSize), cgh);
cgh.parallel_for(
sycl::nd_range<3>(nblocks * nthreads, nthreads), [=
](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(32)]] {
finalError(x_new, d_sum, item_ct1,
dpct_local_acc_ct1.get_pointer());
});
});
} else {
q.submit([&](sycl::handler &cgh) {
sycl::local_accessor<uint8_t, 1> dpct_local_acc_ct1(
sycl::range<1>(sharedMemSize), cgh);
cgh.parallel_for(
sycl::nd_range<3>(nblocks * nthreads, nthreads), [=
](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(32)]] {
finalError(x, d_sum, item_ct1,
dpct_local_acc_ct1.get_pointer());
});
});
}
q.memcpy(&sum, d_sum, sizeof(double)).wait();
printf("Device iterations : %d\n", k + 1);
printf("Device error : %.3e\n", sum);
break;
}
}
sycl::free(d_sum, q);
return sum;
}
double JacobiMethodGpu(const float *A, const double *b,
const float conv_threshold, const int max_iter,
double *x, double *x_new, sycl::queue q) {
// CTA size
sycl::range<3> nthreads(1, 1, 256);
// grid size
sycl::range<3> nblocks(1, 1, (N_ROWS / ROWS_PER_CTA) + 2);
double sum = 0.0;
double *d_sum;
checkCudaErrors(DPCT_CHECK_ERROR(
d_sum = sycl::malloc_device<double>(1, q)));
int k = 0;
for (k = 0; k < max_iter; k++) {
checkCudaErrors(DPCT_CHECK_ERROR(q.memset(d_sum, 0, sizeof(double))));
if ((k & 1) == 0) {
q.submit([&](sycl::handler &cgh) {
sycl::local_accessor<double, 1> x_shared_acc_ct1(
sycl::range<1>(N_ROWS), cgh);
sycl::local_accessor<double, 1> b_shared_acc_ct1(
sycl::range<1>(ROWS_PER_CTA + 1), cgh);
cgh.parallel_for(
sycl::nd_range<3>(nblocks * nthreads, nthreads),
[=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(SUB_GRP_SIZE)]] {
JacobiMethod(A, b, conv_threshold, x, x_new, d_sum, item_ct1,
x_shared_acc_ct1.get_pointer(),
b_shared_acc_ct1.get_pointer());
});
});
} else {
q.submit([&](sycl::handler &cgh) {
sycl::local_accessor<double, 1> x_shared_acc_ct1(
sycl::range<1>(N_ROWS), cgh);
sycl::local_accessor<double, 1> b_shared_acc_ct1(
sycl::range<1>(ROWS_PER_CTA + 1), cgh);
cgh.parallel_for(
sycl::nd_range<3>(nblocks * nthreads, nthreads),
[=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(SUB_GRP_SIZE)]] {
JacobiMethod(A, b, conv_threshold, x_new, x, d_sum, item_ct1,
x_shared_acc_ct1.get_pointer(),
b_shared_acc_ct1.get_pointer());
});
});
}
checkCudaErrors(
DPCT_CHECK_ERROR(q.memcpy(&sum, d_sum, sizeof(double))));
checkCudaErrors(DPCT_CHECK_ERROR(q.wait()));
if (sum <= conv_threshold) {
checkCudaErrors(
DPCT_CHECK_ERROR(q.memset(d_sum, 0, sizeof(double))));
nblocks[2] = (N_ROWS / nthreads[2]) + 1;
size_t sharedMemSize = ((nthreads[2] / 32) + 1) * sizeof(double);
if ((k & 1) == 0) {
q.submit([&](sycl::handler &cgh) {
sycl::local_accessor<uint8_t, 1> dpct_local_acc_ct1(
sycl::range<1>(sharedMemSize), cgh);
cgh.parallel_for(sycl::nd_range<3>(nblocks * nthreads, nthreads),
[=](sycl::nd_item<3> item_ct1)
[[intel::reqd_sub_group_size(SUB_GRP_SIZE)]] {
finalError(x_new, d_sum, item_ct1,
dpct_local_acc_ct1.get_pointer());
});
});
} else {
q.submit([&](sycl::handler &cgh) {
sycl::local_accessor<uint8_t, 1> dpct_local_acc_ct1(
sycl::range<1>(sharedMemSize), cgh);
cgh.parallel_for(sycl::nd_range<3>(nblocks * nthreads, nthreads),
[=](sycl::nd_item<3> item_ct1)
[[intel::reqd_sub_group_size(SUB_GRP_SIZE)]] {
finalError(x, d_sum, item_ct1,
dpct_local_acc_ct1.get_pointer());
});
});
}
checkCudaErrors(
DPCT_CHECK_ERROR(q.memcpy(&sum, d_sum, sizeof(double))));
checkCudaErrors(DPCT_CHECK_ERROR(q.wait()));
printf("Device iterations : %d\n", k + 1);
printf("Device error : %.3e\n", sum);
break;
}
}
checkCudaErrors(
DPCT_CHECK_ERROR(sycl::free(d_sum, q)));
return sum;
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/Samples/3_CUDA_Features/jacobiCudaGraphs/main.cpp.dp.cpp | //=========================================================
// Modifications Copyright © 2022 Intel Corporation
//
// SPDX-License-Identifier: BSD-3-Clause
//=========================================================
/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// This sample demonstrates Instantiated CUDA Graph Update
// with Jacobi Iterative Method in 3 different methods:
// 1 - JacobiMethodGpuCudaGraphExecKernelSetParams() - CUDA Graph with
// cudaGraphExecKernelNodeSetParams() 2 - JacobiMethodGpuCudaGraphExecUpdate() -
// CUDA Graph with cudaGraphExecUpdate() 3 - JacobiMethodGpu() - Non CUDA Graph
// method
// Jacobi method on a linear system A*x = b,
// where A is diagonally dominant and the exact solution consists
// of all ones.
// The dimension N_ROWS is included in jacobi.h
#include <sycl/sycl.hpp>
#include <dpct/dpct.hpp>
#include <helper_cuda.h>
#include <helper_timer.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "jacobi.h"
// Run the Jacobi method for A*x = b on GPU with CUDA Graph -
// cudaGraphExecKernelNodeSetParams().
extern double JacobiMethodGpuCudaGraphExecKernelSetParams(
const float *A, const double *b, const float conv_threshold,
const int max_iter, double *x, double *x_new, sycl::queue q);
// Run the Jacobi method for A*x = b on GPU without CUDA Graph.
extern double JacobiMethodGpu(const float *A, const double *b,
const float conv_threshold, const int max_iter,
double *x, double *x_new, sycl::queue q);
// creates N_ROWS x N_ROWS matrix A with N_ROWS+1 on the diagonal and 1
// elsewhere. The elements of the right hand side b all equal 2*n, hence the
// exact solution x to A*x = b is a vector of ones.
void createLinearSystem(float *A, double *b);
// Run the Jacobi method for A*x = b on CPU.
void JacobiMethodCPU(float *A, double *b, float conv_threshold, int max_iter,
int *numit, double *x);
int main(int argc, char **argv) {
if (checkCmdLineFlag(argc, (const char **)argv, "help")) {
printf("Command line: jacobiCudaGraphs [-option]\n");
printf("Valid options:\n");
printf(
"-gpumethod=<0 or 1> : 0 - [Default] "
"JacobiMethodGpuCudaGraphExecKernelSetParams\n");
printf(" : 1 - JacobiMethodGpu - Non CUDA Graph\n");
printf("-device=device_num : cuda device id");
printf("-help : Output a help message\n");
exit(EXIT_SUCCESS);
}
int gpumethod = 0;
if (checkCmdLineFlag(argc, (const char **)argv, "gpumethod")) {
gpumethod = getCmdLineArgumentInt(argc, (const char **)argv, "gpumethod");
if (gpumethod < 0 || gpumethod > 1) {
printf("Error: gpumethod must be 0 or 1 or 2, gpumethod=%d is invalid\n",
gpumethod);
exit(EXIT_SUCCESS);
}
}
sycl::queue q{aspect_selector(sycl::aspect::fp64),
sycl::property::queue::in_order()};
std::cout << "\nRunning on "
<< q.get_device().get_info<sycl::info::device::name>() << "\n";
double *b = NULL;
float *A = NULL;
checkCudaErrors(DPCT_CHECK_ERROR(
b = sycl::malloc_host<double>(N_ROWS, q)));
memset(b, 0, N_ROWS * sizeof(double));
checkCudaErrors(
DPCT_CHECK_ERROR(A = sycl::malloc_host<float>(
N_ROWS * N_ROWS, q)));
memset(A, 0, N_ROWS * N_ROWS * sizeof(float));
createLinearSystem(A, b);
double *x = NULL;
// start with array of all zeroes
x = (double *)calloc(N_ROWS, sizeof(double));
float conv_threshold = 1.0e-2;
int max_iter = 4 * N_ROWS * N_ROWS;
int cnt = 0;
// create timer
StopWatchInterface *timerCPU = NULL, *timerGpu = NULL;
sdkCreateTimer(&timerCPU);
sdkStartTimer(&timerCPU);
JacobiMethodCPU(A, b, conv_threshold, max_iter, &cnt, x);
double sum = 0.0;
// Compute error
for (int i = 0; i < N_ROWS; i++) {
double d = x[i] - 1.0;
sum += fabs(d);
}
sdkStopTimer(&timerCPU);
printf("CPU iterations : %d\n", cnt);
printf("CPU error : %.3e\n", sum);
printf("CPU Processing time: %f (ms)\n", sdkGetTimerValue(&timerCPU));
float *d_A;
double *d_b, *d_x, *d_x_new;
checkCudaErrors(DPCT_CHECK_ERROR(
d_b = sycl::malloc_device<double>(N_ROWS, q)));
checkCudaErrors(DPCT_CHECK_ERROR(
d_A = (float *)sycl::malloc_device(sizeof(float) * N_ROWS * N_ROWS,
q)));
checkCudaErrors(DPCT_CHECK_ERROR(
d_x = sycl::malloc_device<double>(N_ROWS, q)));
checkCudaErrors(DPCT_CHECK_ERROR(d_x_new = sycl::malloc_device<double>(
N_ROWS, q)));
checkCudaErrors(
DPCT_CHECK_ERROR(q.memset(d_x, 0, sizeof(double) * N_ROWS)));
checkCudaErrors(
DPCT_CHECK_ERROR(q.memset(d_x_new, 0, sizeof(double) * N_ROWS)));
checkCudaErrors(DPCT_CHECK_ERROR(
q.memcpy(d_A, A, sizeof(float) * N_ROWS * N_ROWS)));
checkCudaErrors(
DPCT_CHECK_ERROR(q.memcpy(d_b, b, sizeof(double) * N_ROWS)));
q.wait();
sdkCreateTimer(&timerGpu);
sdkStartTimer(&timerGpu);
double sumGPU = 0.0;
if (gpumethod == 0) {
sumGPU = JacobiMethodGpuCudaGraphExecKernelSetParams(
d_A, d_b, conv_threshold, max_iter, d_x, d_x_new, q);
} else if (gpumethod == 1) {
sumGPU = JacobiMethodGpu(d_A, d_b, conv_threshold, max_iter, d_x, d_x_new,
q);
}
sdkStopTimer(&timerGpu);
printf("Device Processing time: %f (ms)\n", sdkGetTimerValue(&timerGpu));
checkCudaErrors(DPCT_CHECK_ERROR(sycl::free(d_b, q)));
checkCudaErrors(DPCT_CHECK_ERROR(sycl::free(d_A, q)));
checkCudaErrors(DPCT_CHECK_ERROR(sycl::free(d_x, q)));
checkCudaErrors(
DPCT_CHECK_ERROR(sycl::free(d_x_new, q)));
checkCudaErrors(DPCT_CHECK_ERROR(sycl::free(A, q)));
checkCudaErrors(DPCT_CHECK_ERROR(sycl::free(b, q)));
printf("&&&& jacobiCudaGraphs %s\n",
(fabs(sum - sumGPU) < conv_threshold) ? "PASSED" : "FAILED");
return (fabs(sum - sumGPU) < conv_threshold) ? EXIT_SUCCESS : EXIT_FAILURE;
}
void createLinearSystem(float *A, double *b) {
int i, j;
for (i = 0; i < N_ROWS; i++) {
b[i] = 2.0 * N_ROWS;
for (j = 0; j < N_ROWS; j++) A[i * N_ROWS + j] = 1.0;
A[i * N_ROWS + i] = N_ROWS + 1.0;
}
}
void JacobiMethodCPU(float *A, double *b, float conv_threshold, int max_iter,
int *num_iter, double *x) {
double *x_new;
x_new = (double *)calloc(N_ROWS, sizeof(double));
int k;
for (k = 0; k < max_iter; k++) {
double sum = 0.0;
for (int i = 0; i < N_ROWS; i++) {
double temp_dx = b[i];
for (int j = 0; j < N_ROWS; j++) temp_dx -= A[i * N_ROWS + j] * x[j];
temp_dx /= A[i * N_ROWS + i];
x_new[i] += temp_dx;
sum += fabs(temp_dx);
}
for (int i = 0; i < N_ROWS; i++) x[i] = x_new[i];
if (sum <= conv_threshold) break;
}
*num_iter = k + 1;
free(x_new);
}
| cpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/include/dpct/ccl_utils.hpp | //==---- ccl_utils.hpp----------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_CCL_UTILS_HPP__
#define __DPCT_CCL_UTILS_HPP__
#include <sycl/sycl.hpp>
#include <oneapi/ccl.hpp>
#include <unordered_map>
#include <memory>
#include "device.hpp"
namespace dpct {
namespace ccl {
namespace detail {
/// Get stored kvs with specified kvs address.
inline std::shared_ptr<oneapi::ccl::kvs> &
get_kvs(const oneapi::ccl::kvs::address_type &addr) {
struct hash {
std::size_t operator()(const oneapi::ccl::kvs::address_type &in) const {
return std::hash<std::string_view>()(std::string_view(in.data(), in.size()));
}
};
static std::unordered_map<oneapi::ccl::kvs::address_type,
std::shared_ptr<oneapi::ccl::kvs>, hash>
kvs_map;
return kvs_map[addr];
}
/// Help class to init ccl environment.
class ccl_init_helper {
public:
ccl_init_helper() { oneapi::ccl::init(); }
};
} // namespace detail
/// Get concatenated library version as an integer.
static inline int get_version() {
oneapi::ccl::init();
auto ver = oneapi::ccl::get_library_version();
return ver.major * 10000 + ver.minor * 100 + ver.update;
}
/// Create main kvs and return its address.
static inline oneapi::ccl::kvs::address_type create_kvs_address() {
oneapi::ccl::init();
auto ptr = oneapi::ccl::create_main_kvs();
auto addr = ptr->get_address();
detail::get_kvs(addr) = ptr;
return addr;
}
/// Get stored kvs with /p addr if exist. Otherwise, create kvs with /p addr.
static inline std::shared_ptr<oneapi::ccl::kvs>
create_kvs(const oneapi::ccl::kvs::address_type &addr) {
oneapi::ccl::init();
auto &ptr = detail::get_kvs(addr);
if (!ptr)
ptr = oneapi::ccl::create_kvs(addr);
return ptr;
}
/// dpct communicator extension
class communicator_wrapper : public dpct::ccl::detail::ccl_init_helper {
public:
communicator_wrapper(
int size, int rank, oneapi::ccl::kvs::address_type id,
const oneapi::ccl::comm_attr &attr = oneapi::ccl::default_comm_attr)
: _device_comm(oneapi::ccl::create_device(
static_cast<sycl::device &>(dpct::get_current_device()))),
_context_comm(oneapi::ccl::create_context(dpct::get_default_context())),
_comm(oneapi::ccl::create_communicator(
size, rank, _device_comm, _context_comm, dpct::ccl::create_kvs(id),
attr)) {
_queue_init = false;
_ccl_stream_ptr = nullptr;
}
~communicator_wrapper() {
delete _ccl_stream_ptr;
};
/// Return the rank in a oneapi::ccl::communicator
/// \returns The rank corresponding to communicator object
int rank() const {
return _comm.rank();
}
/// Retrieves the number of rank in oneapi::ccl::communicator
/// \returns The number of the ranks
int size() const {
return _comm.size();
}
/// Return underlying native device, which was used in oneapi::ccl::communicator
sycl::device get_device() const {
return _comm.get_device().get_native();
}
/// \brief allreduce is a collective communication operation that performs the global reduction operation
/// on values from all ranks of communicator and distributes the result back to all ranks.
/// \param send_buf the buffer with @c count elements of @c dtype that stores local data to be reduced
/// \param recv_buf [out] the buffer to store reduced result, must have the same dimension as @c send_buf
/// \param count the number of elements of type @c dtype in @c send_buf and @c recv_buf
/// \param dtype the datatype of elements in @c send_buf and @c recv_buf
/// \param rtype the type of the reduction operation to be applied
/// \param queue_ptr a sycl::queue ptr associated with the operation
/// \return @ref void
void allreduce(const void *sendbuff, void *recvbuff, size_t count,
oneapi::ccl::datatype dtype, oneapi::ccl::reduction rtype,
sycl::queue *queue_ptr) {
call_func_wrapper(
[=](const oneapi::ccl::stream &stream) {
return oneapi::ccl::allreduce(sendbuff, recvbuff, count, dtype, rtype,
_comm, stream);
},
queue_ptr);
}
/// \brief reduce is a collective communication operation that performs the
/// global reduction operation on values from all ranks of the communicator
/// and returns the result to the root rank.
/// \param send_buf the buffer with @c count elements of @c dtype that stores
/// local data to be reduced
/// \param recv_buf [out] the buffer to store reduced result,
/// must have the same dimension as @c send_buf
/// \param count the number of elements of type @c dtype in @c send_buf and @c recv_buf
/// \param dtype the datatype of elements in @c send_buf and @c recv_buf
/// \param root the rank that gets the result of reduction
/// \param rtype the type of the reduction operation to be applied
/// \param queue_ptr a sycl::queue ptr associated with the operation
/// \return @ref void
void reduce(const void *sendbuff, void *recvbuff, size_t count,
oneapi::ccl::datatype dtype, oneapi::ccl::reduction rtype,
int root, sycl::queue *queue_ptr) {
call_func_wrapper(
[=](const oneapi::ccl::stream &stream) {
return oneapi::ccl::reduce(sendbuff, recvbuff, count, dtype, rtype,
root, _comm, stream);
},
queue_ptr);
}
/// \brief broadcast is a collective communication operation that broadcasts data
/// from one rank of communicator (denoted as root) to all other ranks.
/// Only support in-place operation
/// \param send_buf the buffer with @c count elements of @c dtype that stores
/// local data to be reduced
/// \param recv_buf [out] the buffer to store reduced result
/// \param count the number of elements of type @c dtype in @c buf
/// \param dtype thedatatype of elements in @c buf
/// \param root the rank that broadcasts @c buf
/// \param queue_ptr a sycl::queue ptr associated with the operation
/// \return @ref void
void broadcast(void *sendbuff, void *recvbuff, size_t count,
oneapi::ccl::datatype dtype, int root,
sycl::queue *queue_ptr) {
if (sendbuff != recvbuff) {
throw std::runtime_error(
"oneCCL broadcast only support in-place operation. "
"send_buf and recv_buf must be same.");
return;
}
call_func_wrapper(
[=](const oneapi::ccl::stream &stream) {
return oneapi::ccl::broadcast(recvbuff, count, dtype, root, _comm,
stream);
},
queue_ptr);
}
/// \brief reduce_scatter is a collective communication operation that performs the global reduction operation
/// on values from all ranks of the communicator and scatters the result in blocks back to all ranks.
/// \param send_buf the buffer with @c count elements of @c dtype that stores local data to be reduced
/// \param recv_buf [out] the buffer to store reduced result, must have the same dimension as @c send_buf
/// \param recv_count the number of elements of type @c dtype in receive block
/// \param dtype the datatype of elements in @c send_buf and @c recv_buf
/// \param rtype the type of the reduction operation to be applied
/// \param queue_ptr a sycl::queue ptr associated with the operation
/// \return @ref void
void reduce_scatter(const void *sendbuff, void *recvbuff, size_t recv_count,
oneapi::ccl::datatype dtype, oneapi::ccl::reduction rtype,
sycl::queue *queue_ptr) {
call_func_wrapper(
[=](const oneapi::ccl::stream &stream) {
return oneapi::ccl::reduce_scatter(sendbuff, recvbuff, recv_count,
dtype, rtype, _comm, stream);
},
queue_ptr);
}
private:
oneapi::ccl::device _device_comm;
oneapi::ccl::context _context_comm;
oneapi::ccl::communicator _comm;
sycl::queue _queue;
bool _queue_init;
oneapi::ccl::stream *_ccl_stream_ptr;
template <class Fn>
void call_func_wrapper(Fn func, sycl::queue *qptr) {
if (_queue_init && *qptr != _queue) {
call_func_async(func, qptr);
} else {
if(!_queue_init) {
_queue = *qptr;
_queue_init = true;
_ccl_stream_ptr = new oneapi::ccl::stream(oneapi::ccl::create_stream(_queue));
}
std::invoke(func, *_ccl_stream_ptr);
}
}
class call_func_async {
sycl::queue *_q_ptr;
struct call_async_impl {
oneapi::ccl::stream _ccl_stream_impl;
oneapi::ccl::event _ccl_event_impl;
template <class Fn>
explicit call_async_impl(Fn func, sycl::queue *qptr)
: _ccl_stream_impl(oneapi::ccl::create_stream(*qptr)),
_ccl_event_impl(std::invoke(func, _ccl_stream_impl)) {}
};
call_async_impl *_imp;
public:
template <class Fn>
explicit call_func_async(Fn func, sycl::queue *qptr)
: _q_ptr(qptr),
_imp(new call_async_impl(func, qptr)) {}
~call_func_async() {
_q_ptr->submit([&](sycl::handler &cgh)
{ cgh.host_task([=]
{
_imp->_ccl_event_impl.wait();
delete _imp; }); });
}
};
};
typedef dpct::ccl::communicator_wrapper *comm_ptr;
} // namespace ccl
} // namespace dpct
#endif // __DPCT_CCL_UTILS_HPP__ | hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/include/dpct/util.hpp | //==---- util.hpp ---------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_UTIL_HPP__
#define __DPCT_UTIL_HPP__
#include <sycl/sycl.hpp>
#include <complex>
#include <type_traits>
#include <cassert>
#include <cstdint>
// TODO: Remove these function definitions once they exist in the DPC++ compiler
#if defined(__SYCL_DEVICE_ONLY__) && defined(__INTEL_LLVM_COMPILER)
template <typename T>
__SYCL_CONVERGENT__ extern SYCL_EXTERNAL __SYCL_EXPORT __attribute__((noduplicate))
T __spirv_GroupNonUniformShuffle(__spv::Scope::Flag, T, unsigned) noexcept;
template <typename T>
__SYCL_CONVERGENT__ extern SYCL_EXTERNAL __SYCL_EXPORT __attribute__((noduplicate))
T __spirv_GroupNonUniformShuffleDown(__spv::Scope::Flag, T, unsigned) noexcept;
template <typename T>
__SYCL_CONVERGENT__ extern SYCL_EXTERNAL __SYCL_EXPORT __attribute__((noduplicate))
T __spirv_GroupNonUniformShuffleUp(__spv::Scope::Flag, T, unsigned) noexcept;
#endif
namespace dpct {
namespace detail {
template <typename tag, typename T> class generic_error_type {
public:
generic_error_type() = default;
generic_error_type(T value) : value{value} {}
operator T() const { return value; }
private:
T value;
};
} // namespace detail
using err0 = detail::generic_error_type<struct err0_tag, int>;
using err1 = detail::generic_error_type<struct err1_tag, int>;
template <int... Ints> struct integer_sequence {};
template <int Size, int... Ints>
struct make_index_sequence
: public make_index_sequence<Size - 1, Size - 1, Ints...> {};
template <int... Ints>
struct make_index_sequence<0, Ints...> : public integer_sequence<Ints...> {};
template <typename T> struct DataType { using T2 = T; };
template <typename T> struct DataType<sycl::vec<T, 2>> {
using T2 = std::complex<T>;
};
inline void matrix_mem_copy(void *to_ptr, const void *from_ptr, int to_ld,
int from_ld, int rows, int cols, int elem_size,
memcpy_direction direction = automatic,
sycl::queue &queue = dpct::get_default_queue(),
bool async = false) {
if (to_ptr == from_ptr && to_ld == from_ld) {
return;
}
if (to_ld == from_ld) {
size_t copy_size = elem_size * ((cols - 1) * (size_t)to_ld + rows);
if (async)
detail::dpct_memcpy(queue, (void *)to_ptr, (void *)from_ptr,
copy_size, direction);
else
detail::dpct_memcpy(queue, (void *)to_ptr, (void *)from_ptr,
copy_size, direction).wait();
} else {
if (async)
detail::dpct_memcpy(queue, to_ptr, from_ptr, elem_size * to_ld,
elem_size * from_ld, elem_size * rows, cols,
direction);
else
sycl::event::wait(detail::dpct_memcpy(
queue, to_ptr, from_ptr, elem_size * to_ld, elem_size * from_ld,
elem_size * rows, cols, direction));
}
}
/// Copy matrix data. The default leading dimension is column.
/// \param [out] to_ptr A pointer points to the destination location.
/// \param [in] from_ptr A pointer points to the source location.
/// \param [in] to_ld The leading dimension the destination matrix.
/// \param [in] from_ld The leading dimension the source matrix.
/// \param [in] rows The number of rows of the source matrix.
/// \param [in] cols The number of columns of the source matrix.
/// \param [in] direction The direction of the data copy.
/// \param [in] queue The queue where the routine should be executed.
/// \param [in] async If this argument is true, the return of the function
/// does NOT guarantee the copy is completed.
template <typename T>
inline void matrix_mem_copy(T *to_ptr, const T *from_ptr, int to_ld,
int from_ld, int rows, int cols,
memcpy_direction direction = automatic,
sycl::queue &queue = dpct::get_default_queue(),
bool async = false) {
using Ty = typename DataType<T>::T2;
matrix_mem_copy((void *)to_ptr, (void *)from_ptr, to_ld, from_ld, rows, cols,
sizeof(Ty), direction, queue, async);
}
/// Cast the high or low 32 bits of a double to an integer.
/// \param [in] d The double value.
/// \param [in] use_high32 Cast the high 32 bits of the double if true;
/// otherwise cast the low 32 bits.
inline int cast_double_to_int(double d, bool use_high32 = true) {
sycl::vec<double, 1> v0{d};
auto v1 = v0.as<sycl::int2>();
if (use_high32)
return v1[1];
return v1[0];
}
/// Combine two integers, the first as the high 32 bits and the second
/// as the low 32 bits, into a double.
/// \param [in] high32 The integer as the high 32 bits
/// \param [in] low32 The integer as the low 32 bits
inline double cast_ints_to_double(int high32, int low32) {
sycl::int2 v0{low32, high32};
auto v1 = v0.as<sycl::vec<double, 1>>();
return v1;
}
/// Reverse the bit order of an unsigned integer
/// \param [in] a Input unsigned integer value
/// \returns Value of a with the bit order reversed
template <typename T> inline T reverse_bits(T a) {
static_assert(std::is_unsigned<T>::value && std::is_integral<T>::value,
"unsigned integer required");
if (!a)
return 0;
T mask = 0;
size_t count = 4 * sizeof(T);
mask = ~mask >> count;
while (count) {
a = ((a & mask) << count) | ((a & ~mask) >> count);
count = count >> 1;
mask = mask ^ (mask << count);
}
return a;
}
/// \param [in] a The first value contains 4 bytes
/// \param [in] b The second value contains 4 bytes
/// \param [in] s The selector value, only lower 16bit used
/// \returns the permutation result of 4 bytes selected in the way
/// specified by \p s from \p a and \p b
inline unsigned int byte_level_permute(unsigned int a, unsigned int b,
unsigned int s) {
unsigned int ret;
ret =
((((std::uint64_t)b << 32 | a) >> (s & 0x7) * 8) & 0xff) |
(((((std::uint64_t)b << 32 | a) >> ((s >> 4) & 0x7) * 8) & 0xff) << 8) |
(((((std::uint64_t)b << 32 | a) >> ((s >> 8) & 0x7) * 8) & 0xff) << 16) |
(((((std::uint64_t)b << 32 | a) >> ((s >> 12) & 0x7) * 8) & 0xff) << 24);
return ret;
}
/// Find position of first least significant set bit in an integer.
/// ffs(0) returns 0.
///
/// \param [in] a Input integer value
/// \returns The position
template <typename T> inline int ffs(T a) {
static_assert(std::is_integral<T>::value, "integer required");
return (sycl::ctz(a) + 1) % (sizeof(T) * 8 + 1);
}
/// select_from_sub_group allows work-items to obtain a copy of a value held by
/// any other work-item in the sub_group. The input sub_group will be divided
/// into several logical sub_groups with id range [0, \p logical_sub_group_size
/// - 1]. Each work-item in logical sub_group gets value from another work-item
/// whose id is \p remote_local_id. If \p remote_local_id is outside the
/// logical sub_group id range, \p remote_local_id will modulo with \p
/// logical_sub_group_size. The \p logical_sub_group_size must be a power of 2
/// and not exceed input sub_group size.
/// \tparam T Input value type
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] remote_local_id Input source work item id
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T select_from_sub_group(sycl::sub_group g, T x, int remote_local_id,
int logical_sub_group_size = 32) {
unsigned int start_index =
g.get_local_linear_id() / logical_sub_group_size * logical_sub_group_size;
return sycl::select_from_group(
g, x, start_index + remote_local_id % logical_sub_group_size);
}
/// shift_sub_group_left move values held by the work-items in a sub_group
/// directly to another work-item in the sub_group, by shifting values a fixed
/// number of work-items to the left. The input sub_group will be divided into
/// several logical sub_groups with id range [0, \p logical_sub_group_size - 1].
/// Each work-item in logical sub_group gets value from another work-item whose
/// id is caller's id adds \p delta. If calculated id is outside the logical
/// sub_group id range, the work-item will get value from itself. The \p
/// logical_sub_group_size must be a power of 2 and not exceed input sub_group
/// size.
/// \tparam T Input value type
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] delta Input delta
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T shift_sub_group_left(sycl::sub_group g, T x, unsigned int delta,
int logical_sub_group_size = 32) {
unsigned int id = g.get_local_linear_id();
unsigned int end_index =
(id / logical_sub_group_size + 1) * logical_sub_group_size;
T result = sycl::shift_group_left(g, x, delta);
if ((id + delta) >= end_index) {
result = x;
}
return result;
}
/// shift_sub_group_right move values held by the work-items in a sub_group
/// directly to another work-item in the sub_group, by shifting values a fixed
/// number of work-items to the right. The input sub_group will be divided into
/// several logical_sub_groups with id range [0, \p logical_sub_group_size - 1].
/// Each work-item in logical_sub_group gets value from another work-item whose
/// id is caller's id subtracts \p delta. If calculated id is outside the
/// logical sub_group id range, the work-item will get value from itself. The \p
/// logical_sub_group_size must be a power of 2 and not exceed input sub_group
/// size.
/// \tparam T Input value type
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] delta Input delta
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T shift_sub_group_right(sycl::sub_group g, T x, unsigned int delta,
int logical_sub_group_size = 32) {
unsigned int id = g.get_local_linear_id();
unsigned int start_index =
id / logical_sub_group_size * logical_sub_group_size;
T result = sycl::shift_group_right(g, x, delta);
if ((id - start_index) < delta) {
result = x;
}
return result;
}
/// permute_sub_group_by_xor permutes values by exchanging values held by pairs
/// of work-items identified by computing the bitwise exclusive OR of the
/// work-item id and some fixed mask. The input sub_group will be divided into
/// several logical sub_groups with id range [0, \p logical_sub_group_size - 1].
/// Each work-item in logical sub_group gets value from another work-item whose
/// id is bitwise exclusive OR of the caller's id and \p mask. If calculated id
/// is outside the logical sub_group id range, the work-item will get value from
/// itself. The \p logical_sub_group_size must be a power of 2 and not exceed
/// input sub_group size.
/// \tparam T Input value type
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] mask Input mask
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T permute_sub_group_by_xor(sycl::sub_group g, T x, unsigned int mask,
int logical_sub_group_size = 32) {
unsigned int id = g.get_local_linear_id();
unsigned int start_index =
id / logical_sub_group_size * logical_sub_group_size;
unsigned int target_offset = (id % logical_sub_group_size) ^ mask;
return sycl::select_from_group(g, x,
target_offset < logical_sub_group_size
? start_index + target_offset
: id);
}
namespace experimental {
/// Masked version of select_from_sub_group, which execute masked sub-group
/// operation. The parameter member_mask indicating the work-items participating
/// the call. Whether the n-th bit is set to 1 representing whether the
/// work-item with id n is participating the call. All work-items named in
/// member_mask must be executed with the same member_mask, or the result is
/// undefined.
/// \tparam T Input value type
/// \param [in] member_mask Input mask
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] remote_local_id Input source work item id
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T select_from_sub_group(unsigned int member_mask,
sycl::sub_group g, T x, int remote_local_id,
int logical_sub_group_size = 32) {
unsigned int start_index =
g.get_local_linear_id() / logical_sub_group_size * logical_sub_group_size;
unsigned logical_remote_id =
start_index + remote_local_id % logical_sub_group_size;
#if defined(__SYCL_DEVICE_ONLY__) && defined(__INTEL_LLVM_COMPILER)
#if defined(__SPIR__)
return __spirv_GroupNonUniformShuffle(__spv::Scope::Subgroup, x, logical_remote_id);
#else
throw sycl::exception(sycl::errc::runtime, "Masked version of select_from_sub_group "
"only supports SPIR-V backends.");
#endif // __SPIR__
#else
(void)g;
(void)x;
(void)remote_local_id;
(void)logical_sub_group_size;
(void)member_mask;
throw sycl::exception(sycl::errc::runtime, "Masked version of select_from_sub_group not "
"supported on host device and none intel compiler.");
#endif // __SYCL_DEVICE_ONLY__ && __INTEL_LLVM_COMPILER
}
/// Masked version of shift_sub_group_left, which execute masked sub-group
/// operation. The parameter member_mask indicating the work-items participating
/// the call. Whether the n-th bit is set to 1 representing whether the
/// work-item with id n is participating the call. All work-items named in
/// member_mask must be executed with the same member_mask, or the result is
/// undefined.
/// \tparam T Input value type
/// \param [in] member_mask Input mask
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] delta Input delta
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T shift_sub_group_left(unsigned int member_mask,
sycl::sub_group g, T x, unsigned int delta,
int logical_sub_group_size = 32) {
unsigned int id = g.get_local_linear_id();
unsigned int end_index =
(id / logical_sub_group_size + 1) * logical_sub_group_size;
#if defined(__SYCL_DEVICE_ONLY__) && defined(__INTEL_LLVM_COMPILER)
#if defined(__SPIR__)
T result = __spirv_GroupNonUniformShuffleDown(__spv::Scope::Subgroup, x, delta);
if ((id + delta) >= end_index) {
result = x;
}
return result;
#else
throw sycl::exception(sycl::errc::runtime, "Masked version of shift_sub_group_left "
"only supports SPIR-V backends.");
#endif // __SPIR__
#else
(void)g;
(void)x;
(void)delta;
(void)logical_sub_group_size;
(void)member_mask;
throw sycl::exception(sycl::errc::runtime, "Masked version of select_from_sub_group not "
"supported on host device and none intel compiler.");
#endif // __SYCL_DEVICE_ONLY__ && __INTEL_LLVM_COMPILER
}
/// Masked version of shift_sub_group_right, which execute masked sub-group
/// operation. The parameter member_mask indicating the work-items participating
/// the call. Whether the n-th bit is set to 1 representing whether the
/// work-item with id n is participating the call. All work-items named in
/// member_mask must be executed with the same member_mask, or the result is
/// undefined.
/// \tparam T Input value type
/// \param [in] member_mask Input mask
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] delta Input delta
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T shift_sub_group_right(unsigned int member_mask,
sycl::sub_group g, T x, unsigned int delta,
int logical_sub_group_size = 32) {
unsigned int id = g.get_local_linear_id();
unsigned int start_index =
id / logical_sub_group_size * logical_sub_group_size;
#if defined(__SYCL_DEVICE_ONLY__) && defined(__INTEL_LLVM_COMPILER)
#if defined(__SPIR__)
T result = __spirv_GroupNonUniformShuffleUp(__spv::Scope::Subgroup, x, delta);
if ((id - start_index) < delta) {
result = x;
}
return result;
#else
throw sycl::exception(sycl::errc::runtime, "Masked version of shift_sub_group_right "
"only supports SPIR-V backends.");
#endif // __SPIR__
#else
(void)g;
(void)x;
(void)delta;
(void)logical_sub_group_size;
(void)member_mask;
throw sycl::exception(sycl::errc::runtime, "Masked version of select_from_sub_group not "
"supported on host device and none intel compiler.");
#endif // __SYCL_DEVICE_ONLY && __INTEL_LLVM_COMPILER
}
/// Masked version of permute_sub_group_by_xor, which execute masked sub-group
/// operation. The parameter member_mask indicating the work-items participating
/// the call. Whether the n-th bit is set to 1 representing whether the
/// work-item with id n is participating the call. All work-items named in
/// member_mask must be executed with the same member_mask, or the result is
/// undefined.
/// \tparam T Input value type
/// \param [in] member_mask Input mask
/// \param [in] g Input sub_group
/// \param [in] x Input value
/// \param [in] mask Input mask
/// \param [in] logical_sub_group_size Input logical sub_group size
/// \returns The result
template <typename T>
T permute_sub_group_by_xor(unsigned int member_mask,
sycl::sub_group g, T x, unsigned int mask,
int logical_sub_group_size = 32) {
unsigned int id = g.get_local_linear_id();
unsigned int start_index =
id / logical_sub_group_size * logical_sub_group_size;
unsigned int target_offset = (id % logical_sub_group_size) ^ mask;
unsigned logical_remote_id = (target_offset < logical_sub_group_size) ? start_index + target_offset : id;
#if defined(__SYCL_DEVICE_ONLY__) && defined(__INTEL_LLVM_COMPILER)
#if defined(__SPIR__)
return __spirv_GroupNonUniformShuffle(__spv::Scope::Subgroup, x, logical_remote_id);
#else
throw sycl::exception(sycl::errc::runtime, "Masked version of permute_sub_group_by_xor "
"only supports SPIR-V backends.");
#endif // __SPIR__
#else
(void)g;
(void)x;
(void)mask;
(void)logical_sub_group_size;
(void)member_mask;
throw sycl::exception(sycl::errc::runtime, "Masked version of select_from_sub_group not "
"supported on host device and none intel compiler.");
#endif // __SYCL_DEVICE_ONLY__ && __INTEL_LLVM_COMPILER
}
} // namespace experimental
/// Computes the multiplication of two complex numbers.
/// \tparam T Complex element type
/// \param [in] x The first input complex number
/// \param [in] y The second input complex number
/// \returns The result
template <typename T>
sycl::vec<T, 2> cmul(sycl::vec<T, 2> x, sycl::vec<T, 2> y) {
std::complex<T> t1(x[0], x[1]), t2(y[0], y[1]);
t1 = t1 * t2;
return sycl::vec<T, 2>(t1.real(), t1.imag());
}
/// Computes the division of two complex numbers.
/// \tparam T Complex element type
/// \param [in] x The first input complex number
/// \param [in] y The second input complex number
/// \returns The result
template <typename T>
sycl::vec<T, 2> cdiv(sycl::vec<T, 2> x, sycl::vec<T, 2> y) {
std::complex<T> t1(x[0], x[1]), t2(y[0], y[1]);
t1 = t1 / t2;
return sycl::vec<T, 2>(t1.real(), t1.imag());
}
/// Computes the magnitude of a complex number.
/// \tparam T Complex element type
/// \param [in] x The input complex number
/// \returns The result
template <typename T>
T cabs(sycl::vec<T, 2> x) {
std::complex<T> t(x[0], x[1]);
return std::abs(t);
}
/// Computes the complex conjugate of a complex number.
/// \tparam T Complex element type
/// \param [in] x The input complex number
/// \returns The result
template <typename T>
sycl::vec<T, 2> conj(sycl::vec<T, 2> x) {
std::complex<T> t(x[0], x[1]);
t = std::conj(t);
return sycl::vec<T, 2>(t.real(), t.imag());
}
inline int get_sycl_language_version() {
#ifdef SYCL_LANGUAGE_VERSION
return SYCL_LANGUAGE_VERSION;
#else
return 202000;
#endif
}
namespace experimental {
/// Synchronize work items from all work groups within a SYCL kernel.
/// \param [in] item: Represents a work group.
/// \param [in] counter: An atomic object defined on a device memory which can
/// be accessed by work items in all work groups. The initial value of the
/// counter should be zero.
/// Note: Please make sure that all the work items of all work groups within
/// a SYCL kernel can be scheduled actively at the same time on a device.
template <int dimensions = 3>
inline void
nd_range_barrier(const sycl::nd_item<dimensions> &item,
sycl::atomic_ref<
unsigned int, sycl::memory_order::seq_cst,
sycl::memory_scope::device,
sycl::access::address_space::global_space> &counter) {
static_assert(dimensions == 3, "dimensions must be 3.");
unsigned int num_groups = item.get_group_range(2) * item.get_group_range(1) *
item.get_group_range(0);
item.barrier();
if (item.get_local_linear_id() == 0) {
unsigned int inc = 1;
unsigned int old_arrive = 0;
bool is_group0 =
(item.get_group(2) + item.get_group(1) + item.get_group(0) == 0);
if (is_group0) {
inc = 0x80000000 - (num_groups - 1);
}
old_arrive = counter.fetch_add(inc);
// Synchronize all the work groups
while (((old_arrive ^ counter.load()) & 0x80000000) == 0)
;
}
item.barrier();
}
/// Synchronize work items from all work groups within a SYCL kernel.
/// \param [in] item: Represents a work group.
/// \param [in] counter: An atomic object defined on a device memory which can
/// be accessed by work items in all work groups. The initial value of the
/// counter should be zero.
/// Note: Please make sure that all the work items of all work groups within
/// a SYCL kernel can be scheduled actively at the same time on a device.
template <>
inline void
nd_range_barrier(const sycl::nd_item<1> &item,
sycl::atomic_ref<
unsigned int, sycl::memory_order::seq_cst,
sycl::memory_scope::device,
sycl::access::address_space::global_space> &counter) {
unsigned int num_groups = item.get_group_range(0);
item.barrier();
if (item.get_local_linear_id() == 0) {
unsigned int inc = 1;
unsigned int old_arrive = 0;
bool is_group0 = (item.get_group(0) == 0);
if (is_group0) {
inc = 0x80000000 - (num_groups - 1);
}
old_arrive = counter.fetch_add(inc);
// Synchronize all the work groups
while (((old_arrive ^ counter.load()) & 0x80000000) == 0)
;
}
item.barrier();
}
/// The logical-group is a logical collection of some work-items within a
/// work-group.
/// Note: Please make sure that the logical-group size is a power of 2 in the
/// range [1, current_sub_group_size].
class logical_group {
sycl::nd_item<3> _item;
sycl::group<3> _g;
uint32_t _logical_group_size;
uint32_t _group_linear_range_in_parent;
public:
/// Dividing \p parent_group into several logical-groups.
/// \param [in] item Current work-item.
/// \param [in] parent_group The group to be divided.
/// \param [in] size The logical-group size.
logical_group(sycl::nd_item<3> item, sycl::group<3> parent_group,
uint32_t size)
: _item(item), _g(parent_group), _logical_group_size(size) {
_group_linear_range_in_parent =
(_g.get_local_linear_range() - 1) / _logical_group_size + 1;
}
/// Returns the index of the work-item within the logical-group.
uint32_t get_local_linear_id() const {
return _item.get_local_linear_id() % _logical_group_size;
}
/// Returns the index of the logical-group in the parent group.
uint32_t get_group_linear_id() const {
return _item.get_local_linear_id() / _logical_group_size;
}
/// Returns the number of work-items in the logical-group.
uint32_t get_local_linear_range() const {
if (_g.get_local_linear_range() % _logical_group_size == 0) {
return _logical_group_size;
}
uint32_t last_item_group_id =
_g.get_local_linear_range() / _logical_group_size;
uint32_t first_of_last_group = last_item_group_id * _logical_group_size;
if (_item.get_local_linear_id() >= first_of_last_group) {
return _g.get_local_linear_range() - first_of_last_group;
} else {
return _logical_group_size;
}
}
/// Returns the number of logical-group in the parent group.
uint32_t get_group_linear_range() const {
return _group_linear_range_in_parent;
}
};
// The original source of the function calculate_max_active_wg_per_xecore was
// under the license below:
//
// Copyright Intel Corporation
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
//
/// This function is used for occupancy calculation, it computes the max active
/// work-group number per Xe-Core. Ref to
/// https://github.com/oneapi-src/oneAPI-samples/tree/master/Tools/GPU-Occupancy-Calculator
/// \param [out] num_wg Active work-group number.
/// \param [in] wg_size Work-group size.
/// \param [in] slm_size Share local memory size.
/// \param [in] sg_size Sub-group size.
/// \param [in] used_barrier Whether barrier is used.
/// \param [in] used_large_grf Whether large General Register File is used.
/// \return If no error, returns 0.
/// If \p wg_size exceeds the max work-group size, the max work-group size will
/// be used instead of \p wg_size and returns -1.
inline int calculate_max_active_wg_per_xecore(int *num_wg, int wg_size,
int slm_size = 0,
int sg_size = 32,
bool used_barrier = false,
bool used_large_grf = false) {
int ret = 0;
const int slm_size_per_xe_core = 64 * 1024;
const int max_barrier_registers = 32;
dpct::device_ext &dev = dpct::get_current_device();
size_t max_wg_size = dev.get_info<sycl::info::device::max_work_group_size>();
if (wg_size > max_wg_size) {
wg_size = max_wg_size;
ret = -1;
}
int num_threads_ss = 56;
int max_num_wg = 56;
if (dev.has(sycl::aspect::ext_intel_gpu_eu_count_per_subslice) &&
dev.has(sycl::aspect::ext_intel_gpu_hw_threads_per_eu)) {
auto eu_count =
dev.get_info<sycl::info::device::ext_intel_gpu_eu_count_per_subslice>();
auto threads_count =
dev.get_info<sycl::ext::intel::info::device::gpu_hw_threads_per_eu>();
num_threads_ss = eu_count * threads_count;
max_num_wg = eu_count * threads_count;
}
if (used_barrier) {
max_num_wg = max_barrier_registers;
}
// Calculate num_wg_slm
int num_wg_slm = 0;
if (slm_size == 0) {
num_wg_slm = max_num_wg;
} else {
num_wg_slm = std::floor((float)slm_size_per_xe_core / slm_size);
}
// Calculate num_wg_threads
if (used_large_grf)
num_threads_ss = num_threads_ss / 2;
int num_threads = std::ceil((float)wg_size / sg_size);
int num_wg_threads = std::floor((float)num_threads_ss / num_threads);
// Calculate num_wg
*num_wg = std::min(num_wg_slm, num_wg_threads);
*num_wg = std::min(*num_wg, max_num_wg);
return ret;
}
} // namespace experimental
/// If x <= 2, then return a pointer to the deafult queue;
/// otherwise, return x reinterpreted as a dpct::queue_ptr.
inline queue_ptr int_as_queue_ptr(uintptr_t x) {
return x <= 2 ?
&get_default_queue()
: reinterpret_cast<queue_ptr>(x);
}
template <int n_nondefault_params, int n_default_params, typename T>
class args_selector;
/// args_selector is a helper class for extracting arguments from an
/// array of pointers to arguments or buffer of arguments to pass to a
/// kernel function.
///
/// \param R(Ts...) The type of the kernel
/// \param n_nondefault_params The number of nondefault parameters of the kernel
/// (excluding parameters that like sycl::nd_item, etc.)
/// \param n_default_params The number of default parameters of the kernel
///
/// Example usage:
/// With the following kernel:
/// void foo(sycl::float2 *x, int n, sycl::nd_item<3> item_ct1, float f=.1) {}
/// and with the declaration:
/// args_selector<2, 1, decltype(foo)> selector(kernelParams, extra);
/// we have:
/// selector.get<0>() returns a reference to sycl::float*,
/// selector.get<1>() returns a reference to int,
/// selector.get<2>() returns a reference to float
template <int n_nondefault_params, int n_default_params,
typename R, typename... Ts>
class args_selector<n_nondefault_params, n_default_params, R(Ts...)> {
private:
void **kernel_params;
char *args_buffer;
template <int i>
static constexpr int account_for_default_params() {
constexpr int n_total_params = sizeof...(Ts);
if constexpr (i >= n_nondefault_params) {
return n_total_params - n_default_params + (i - n_nondefault_params);
} else {
return i;
}
}
public:
/// Get the type of the ith argument of R(Ts...)
/// \param [in] i Index of parameter to get
/// \returns Type of ith parameter
template <int i>
using arg_type = std::tuple_element_t<account_for_default_params<i>(),
std::tuple<Ts...>>;
private:
template <int i>
static constexpr int get_offset() {
if constexpr (i == 0) {
// we can assume args_buffer is properly aligned to the
// first argument
return 0;
} else {
constexpr int prev_off = get_offset<i-1>();
constexpr int prev_past_end = prev_off + sizeof(arg_type<i-1>);
using T = arg_type<i>;
// is the past-the-end of the i-1st element properly aligned
// with the ith element's alignment?
if constexpr (prev_past_end % alignof(T) == 0) {
return prev_past_end;
}
// otherwise bump prev_past_end to match alignment
else {
return prev_past_end + (alignof(T) - (prev_past_end % alignof(T)));
}
}
}
static char *get_args_buffer(void **extra) {
if (!extra)
return nullptr;
for (; (std::size_t) *extra != 0; ++extra) {
if ((std::size_t) *extra == 1) {
return static_cast<char*>(*(extra+1));
}
}
return nullptr;
}
public:
/// If kernel_params is nonnull, then args_selector will
/// extract arguments from kernel_params. Otherwise, it
/// will extract them from extra.
/// \param [in] kernel_params Array of pointers to arguments
/// a or null pointer.
/// \param [in] extra Array containing pointer to argument buffer.
args_selector(void **kernel_params, void **extra)
: kernel_params(kernel_params),
args_buffer(get_args_buffer(extra))
{}
/// Get a reference to the ith argument extracted from kernel_params
/// or extra.
/// \param [in] i Index of argument to get
/// \returns Reference to the ith argument
template <int i>
arg_type<i> &get() {
if (kernel_params) {
return *static_cast<arg_type<i>*>(kernel_params[i]);
} else {
return *reinterpret_cast<arg_type<i>*>(args_buffer + get_offset<i>());
}
}
};
#ifdef _WIN32
#define DPCT_EXPORT __declspec(dllexport)
#else
#define DPCT_EXPORT
#endif
} // namespace dpct
#endif // __DPCT_UTIL_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/include/dpct/image.hpp | //==---- image.hpp --------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_IMAGE_HPP__
#define __DPCT_IMAGE_HPP__
#include <sycl/sycl.hpp>
#include "memory.hpp"
#include "util.hpp"
namespace dpct {
enum class image_channel_data_type {
signed_int,
unsigned_int,
fp,
};
class image_channel;
class image_wrapper_base;
namespace detail {
/// Image object type traits, with accessor type and sampled data type defined.
/// The data type of an image accessor must be one of sycl::int4, sycl::uint4,
/// sycl::float4 and sycl::half4. The data type of accessors with 8bits/16bits
/// channel width will be 32 bits. sycl::half is an exception.
template <class T> struct image_trait {
using acc_data_t = sycl::vec<T, 4>;
template <int dimensions>
using accessor_t =
sycl::accessor<acc_data_t, dimensions, sycl::access_mode::read,
sycl::access::target::image>;
template <int dimensions>
using array_accessor_t =
sycl::accessor<acc_data_t, dimensions, sycl::access_mode::read,
sycl::access::target::image_array>;
using data_t = T;
using elem_t = T;
static constexpr image_channel_data_type data_type =
std::is_integral<T>::value
? (std::is_signed<T>::value ? image_channel_data_type::signed_int
: image_channel_data_type::unsigned_int)
: image_channel_data_type::fp;
static constexpr int channel_num = 1;
};
template <>
struct image_trait<std::uint8_t> : public image_trait<std::uint32_t> {
using data_t = std::uint8_t;
using elem_t = data_t;
};
template <>
struct image_trait<std::uint16_t>
: public image_trait<std::uint32_t> {
using data_t = std::uint16_t;
using elem_t = data_t;
};
template <>
struct image_trait<std::int8_t> : public image_trait<std::int32_t> {
using data_t = std::int8_t;
using elem_t = data_t;
};
template <>
struct image_trait<std::int16_t> : public image_trait<std::int32_t> {
using data_t = std::int16_t;
using elem_t = data_t;
};
template <>
struct image_trait<char>
: public image_trait<typename std::conditional<
std::is_signed<char>::value, signed char, unsigned char>::type> {};
template <class T>
struct image_trait<sycl::vec<T, 1>> : public image_trait<T> {};
template <class T>
struct image_trait<sycl::vec<T, 2>> : public image_trait<T> {
using data_t = sycl::vec<T, 2>;
static constexpr int channel_num = 2;
};
template <class T>
struct image_trait<sycl::vec<T, 3>>
: public image_trait<sycl::vec<T, 4>> {
static constexpr int channel_num = 3;
};
template <class T>
struct image_trait<sycl::vec<T, 4>> : public image_trait<T> {
using data_t = sycl::vec<T, 4>;
static constexpr int channel_num = 4;
};
/// Functor to fetch data from read result of an image accessor.
template <class T> struct fetch_data {
using return_t = typename image_trait<T>::data_t;
using acc_data_t = typename image_trait<T>::acc_data_t;
return_t operator()(acc_data_t &&original_data) {
return (return_t)original_data.r();
}
};
template <class T>
struct fetch_data<sycl::vec<T, 1>> : public fetch_data<T> {};
template <class T> struct fetch_data<sycl::vec<T, 2>> {
using return_t = typename image_trait<sycl::vec<T, 2>>::data_t;
using acc_data_t = typename image_trait<sycl::vec<T, 2>>::acc_data_t;
return_t operator()(acc_data_t &&origin_data) {
return return_t(origin_data.r(), origin_data.g());
}
};
template <class T>
struct fetch_data<sycl::vec<T, 3>>
: public fetch_data<sycl::vec<T, 4>> {};
template <class T> struct fetch_data<sycl::vec<T, 4>> {
using return_t = typename image_trait<sycl::vec<T, 4>>::data_t;
using acc_data_t = typename image_trait<sycl::vec<T, 4>>::acc_data_t;
return_t operator()(acc_data_t &&origin_data) {
return return_t(origin_data.r(), origin_data.g(), origin_data.b(),
origin_data.a());
}
};
/// Create image according with given type \p T and \p dims.
template <class T> static image_wrapper_base *create_image_wrapper(int dims);
/// Create image with given data type \p T, channel order and dims
template <class T>
static image_wrapper_base *create_image_wrapper(unsigned channel_num, int dims);
/// Create image with channel info and specified dimensions.
static image_wrapper_base *create_image_wrapper(image_channel channel, int dims);
} // namespace detail
/// Image channel info, include channel number, order, data width and type
class image_channel {
image_channel_data_type _type = image_channel_data_type::signed_int;
/// Number of channels.
unsigned _channel_num = 0;
/// Total size of all channels in bytes.
unsigned _total_size = 0;
/// Size of each channel in bytes.
unsigned _channel_size = 0;
public:
/// Create image channel info according to template argument \p T.
template <class T> static image_channel create() {
image_channel channel;
channel.set_channel_size(detail::image_trait<T>::channel_num,
sizeof(typename detail::image_trait<T>::elem_t) *
8);
channel.set_channel_data_type(detail::image_trait<T>::data_type);
return channel;
}
image_channel() = default;
image_channel_data_type get_channel_data_type() { return _type; }
void set_channel_data_type(image_channel_data_type type) { _type = type; }
unsigned get_total_size() { return _total_size; }
unsigned get_channel_num() { return _channel_num; }
void set_channel_num(unsigned channel_num) {
_channel_num = channel_num;
_total_size = _channel_size * _channel_num;
}
/// image_channel constructor.
/// \param r Channel r width in bits.
/// \param g Channel g width in bits. Should be same with \p r, or zero.
/// \param b Channel b width in bits. Should be same with \p g, or zero.
/// \param a Channel a width in bits. Should be same with \p b, or zero.
/// \param data_type Image channel data type: signed_nt, unsigned_int or fp.
image_channel(int r, int g, int b, int a, image_channel_data_type data_type) {
_type = data_type;
if (a) {
assert(r == a && "SYCL doesn't support different channel size");
assert(r == b && "SYCL doesn't support different channel size");
assert(r == g && "SYCL doesn't support different channel size");
set_channel_size(4, a);
} else if (b) {
assert(r == b && "SYCL doesn't support different channel size");
assert(r == g && "SYCL doesn't support different channel size");
set_channel_size(3, b);
} else if (g) {
assert(r == g && "SYCL doesn't support different channel size");
set_channel_size(2, g);
} else {
set_channel_size(1, r);
}
}
sycl::image_channel_type get_channel_type() const {
if (_channel_size == 4) {
if (_type == image_channel_data_type::signed_int)
return sycl::image_channel_type::signed_int32;
else if (_type == image_channel_data_type::unsigned_int)
return sycl::image_channel_type::unsigned_int32;
else if (_type == image_channel_data_type::fp)
return sycl::image_channel_type::fp32;
} else if (_channel_size == 2) {
if (_type == image_channel_data_type::signed_int)
return sycl::image_channel_type::signed_int16;
else if (_type == image_channel_data_type::unsigned_int)
return sycl::image_channel_type::unsigned_int16;
else if (_type == image_channel_data_type::fp)
return sycl::image_channel_type::fp16;
} else {
if (_type == image_channel_data_type::signed_int)
return sycl::image_channel_type::signed_int8;
else if (_type == image_channel_data_type::unsigned_int)
return sycl::image_channel_type::unsigned_int8;
}
assert(false && "unexpected channel data kind and channel size");
return sycl::image_channel_type::signed_int32;
}
void set_channel_type(sycl::image_channel_type type) {
switch (type) {
case sycl::image_channel_type::unsigned_int8:
_type = image_channel_data_type::unsigned_int;
_channel_size = 1;
break;
case sycl::image_channel_type::unsigned_int16:
_type = image_channel_data_type::unsigned_int;
_channel_size = 2;
break;
case sycl::image_channel_type::unsigned_int32:
_type = image_channel_data_type::unsigned_int;
_channel_size = 4;
break;
case sycl::image_channel_type::signed_int8:
_type = image_channel_data_type::signed_int;
_channel_size = 1;
break;
case sycl::image_channel_type::signed_int16:
_type = image_channel_data_type::signed_int;
_channel_size = 2;
break;
case sycl::image_channel_type::signed_int32:
_type = image_channel_data_type::signed_int;
_channel_size = 4;
break;
case sycl::image_channel_type::fp16:
_type = image_channel_data_type::fp;
_channel_size = 2;
break;
case sycl::image_channel_type::fp32:
_type = image_channel_data_type::fp;
_channel_size = 4;
break;
default:
break;
}
_total_size = _channel_size * _channel_num;
}
sycl::image_channel_order get_channel_order() const {
switch (_channel_num) {
case 1:
return sycl::image_channel_order::r;
case 2:
return sycl::image_channel_order::rg;
case 3:
return sycl::image_channel_order::rgb;
case 4:
return sycl::image_channel_order::rgba;
default:
return sycl::image_channel_order::r;
}
}
/// Get the size for each channel in bits.
unsigned get_channel_size() const { return _channel_size * 8; }
/// Set channel size.
/// \param in_channel_num Channels number to set.
/// \param channel_size Size for each channel in bits.
void set_channel_size(unsigned in_channel_num,
unsigned channel_size) {
if (in_channel_num < _channel_num)
return;
_channel_num = in_channel_num;
_channel_size = channel_size / 8;
_total_size = _channel_size * _channel_num;
}
};
/// 2D or 3D matrix data for image.
class image_matrix {
image_channel _channel;
int _range[3] = {1, 1, 1};
int _dims = 0;
void *_host_data = nullptr;
/// Set range of each dimension.
template <int dimensions> void set_range(sycl::range<dimensions> range) {
for (int i = 0; i < dimensions; ++i)
_range[i] = range[i];
_dims = dimensions;
}
template <int... DimIdx>
sycl::range<sizeof...(DimIdx)> get_range(integer_sequence<DimIdx...>) {
return sycl::range<sizeof...(DimIdx)>(_range[DimIdx]...);
}
public:
/// Constructor with channel info and dimension size info.
template <int dimensions>
image_matrix(image_channel channel, sycl::range<dimensions> range)
: _channel(channel) {
set_range(range);
_host_data = std::malloc(range.size() * _channel.get_total_size());
}
image_matrix(sycl::image_channel_type channel_type, unsigned channel_num,
size_t x, size_t y) {
_channel.set_channel_type(channel_type);
_channel.set_channel_num(channel_num);
_dims = 1;
_range[0] = x;
if (y) {
_dims = 2;
_range[1] = y;
}
_host_data = std::malloc(_range[0] * _range[1] * _channel.get_total_size());
}
/// Construct a new image class with the matrix data.
template <int dimensions> sycl::image<dimensions> *create_image() {
return create_image<dimensions>(_channel);
}
/// Construct a new image class with the matrix data.
template <int dimensions>
sycl::image<dimensions> *create_image(image_channel channel) {
return new sycl::image<dimensions>(
_host_data, channel.get_channel_order(), channel.get_channel_type(),
get_range(make_index_sequence<dimensions>()),
sycl::property::image::use_host_ptr());
}
/// Get channel info.
inline image_channel get_channel() { return _channel; }
/// Get range of the image.
sycl::range<3> get_range() {
return sycl::range<3>(_range[0], _range[1], _range[2]);
}
/// Get matrix dims.
inline int get_dims() { return _dims; }
/// Convert to pitched data.
pitched_data to_pitched_data() {
return pitched_data(_host_data, _range[0], _range[0], _range[1]);
}
~image_matrix() {
if (_host_data)
std::free(_host_data);
_host_data = nullptr;
}
};
using image_matrix_p = image_matrix *;
enum class image_data_type { matrix, linear, pitch, unsupport };
/// Image data info.
class image_data {
public:
image_data() { _type = image_data_type::unsupport; }
image_data(image_matrix_p matrix_data) { set_data(matrix_data); }
image_data(void *data_ptr, size_t x_size, image_channel channel) {
set_data(data_ptr, x_size, channel);
}
image_data(void *data_ptr, size_t x_size, size_t y_size, size_t pitch_size,
image_channel channel) {
set_data(data_ptr, x_size, y_size, pitch_size, channel);
}
void set_data(image_matrix_p matrix_data) {
_type = image_data_type::matrix;
_data = matrix_data;
_channel = matrix_data->get_channel();
}
void set_data(void *data_ptr, size_t x_size, image_channel channel) {
_type = image_data_type::linear;
_data = data_ptr;
_x = x_size;
_channel = channel;
}
void set_data(void *data_ptr, size_t x_size, size_t y_size, size_t pitch_size,
image_channel channel) {
_type = image_data_type::pitch;
_data = data_ptr;
_x = x_size;
_y = y_size;
_pitch = pitch_size;
_channel = channel;
}
image_data_type get_data_type() const { return _type; }
void set_data_type(image_data_type type) { _type = type; }
void *get_data_ptr() const { return _data; }
void set_data_ptr(void *data) { _data = data; }
size_t get_x() const { return _x; }
void set_x(size_t x) { _x = x; }
size_t get_y() const { return _y; }
void set_y(size_t y) { _y = y; }
size_t get_pitch() const { return _pitch; }
void set_pitch(size_t pitch) { _pitch = pitch; }
image_channel get_channel() const { return _channel; }
void set_channel(image_channel channel) { _channel = channel; }
image_channel_data_type get_channel_data_type() {
return _channel.get_channel_data_type();
}
void set_channel_data_type(image_channel_data_type type) {
_channel.set_channel_data_type(type);
}
unsigned get_channel_size() { return _channel.get_channel_size(); }
void set_channel_size(unsigned channel_num, unsigned channel_size) {
return _channel.set_channel_size(channel_num, channel_size);
}
unsigned get_channel_num() { return _channel.get_channel_num(); }
void set_channel_num(unsigned num) {
return _channel.set_channel_num(num);
}
sycl::image_channel_type get_channel_type() {
return _channel.get_channel_type();
}
void set_channel_type(sycl::image_channel_type type) {
return _channel.set_channel_type(type);
}
private:
image_data_type _type;
void *_data = nullptr;
size_t _x, _y, _pitch;
image_channel _channel;
};
/// Image sampling info, include addressing mode, filtering mode and
/// normalization info.
class sampling_info {
sycl::addressing_mode _addressing_mode =
sycl::addressing_mode::clamp_to_edge;
sycl::filtering_mode _filtering_mode = sycl::filtering_mode::nearest;
sycl::coordinate_normalization_mode _coordinate_normalization_mode =
sycl::coordinate_normalization_mode::unnormalized;
public:
sycl::addressing_mode get_addressing_mode() { return _addressing_mode; }
void set(sycl::addressing_mode addressing_mode) { _addressing_mode = addressing_mode; }
sycl::filtering_mode get_filtering_mode() { return _filtering_mode; }
void set(sycl::filtering_mode filtering_mode) { _filtering_mode = filtering_mode; }
sycl::coordinate_normalization_mode get_coordinate_normalization_mode() {
return _coordinate_normalization_mode;
}
void set(sycl::coordinate_normalization_mode coordinate_normalization_mode) {
_coordinate_normalization_mode = coordinate_normalization_mode;
}
bool is_coordinate_normalized() {
return _coordinate_normalization_mode ==
sycl::coordinate_normalization_mode::normalized;
}
void set_coordinate_normalization_mode(int is_normalized) {
_coordinate_normalization_mode =
is_normalized ? sycl::coordinate_normalization_mode::normalized
: sycl::coordinate_normalization_mode::unnormalized;
}
void
set(sycl::addressing_mode addressing_mode,
sycl::filtering_mode filtering_mode,
sycl::coordinate_normalization_mode coordinate_normalization_mode) {
set(addressing_mode);
set(filtering_mode);
set(coordinate_normalization_mode);
}
void set(sycl::addressing_mode addressing_mode,
sycl::filtering_mode filtering_mode, int is_normalized) {
set(addressing_mode);
set(filtering_mode);
set_coordinate_normalization_mode(is_normalized);
}
sycl::sampler get_sampler() {
return sycl::sampler(_coordinate_normalization_mode, _addressing_mode,
_filtering_mode);
}
};
/// Image base class.
class image_wrapper_base {
sampling_info _sampling_info;
image_data _data;
public:
virtual ~image_wrapper_base() = 0;
void attach(image_data data) { set_data(data); }
/// Attach matrix data to this class.
void attach(image_matrix *matrix) {
detach();
image_wrapper_base::set_data(image_data(matrix));
}
/// Attach matrix data to this class.
void attach(image_matrix *matrix, image_channel channel) {
attach(matrix);
image_wrapper_base::set_channel(channel);
}
/// Attach linear data to this class.
void attach(const void *ptr, size_t count) {
attach(ptr, count, get_channel());
}
/// Attach linear data to this class.
void attach(const void *ptr, size_t count, image_channel channel) {
detach();
image_wrapper_base::set_data(image_data(const_cast<void *>(ptr), count, channel));
}
/// Attach 2D data to this class.
void attach(const void *data, size_t x, size_t y, size_t pitch) {
attach(data, x, y, pitch, get_channel());
}
/// Attach 2D data to this class.
void attach(const void *data, size_t x, size_t y, size_t pitch,
image_channel channel) {
detach();
image_wrapper_base::set_data(
image_data(const_cast<void *>(data), x, y, pitch, channel));
}
/// Detach data.
virtual void detach() {}
sampling_info get_sampling_info() { return _sampling_info; }
void set_sampling_info(sampling_info info) {
_sampling_info = info;
}
const image_data &get_data() { return _data; }
void set_data(image_data data) { _data = data; }
image_channel get_channel() { return _data.get_channel(); }
void set_channel(image_channel channel) { _data.set_channel(channel); }
image_channel_data_type get_channel_data_type() {
return _data.get_channel_data_type();
}
void set_channel_data_type(image_channel_data_type type) {
_data.set_channel_data_type(type);
}
unsigned get_channel_size() { return _data.get_channel_size(); }
void set_channel_size(unsigned channel_num, unsigned channel_size) {
return _data.set_channel_size(channel_num, channel_size);
}
sycl::addressing_mode get_addressing_mode() {
return _sampling_info.get_addressing_mode();
}
void set(sycl::addressing_mode addressing_mode) {
_sampling_info.set(addressing_mode);
}
sycl::filtering_mode get_filtering_mode() {
return _sampling_info.get_filtering_mode();
}
void set(sycl::filtering_mode filtering_mode) {
_sampling_info.set(filtering_mode);
}
sycl::coordinate_normalization_mode get_coordinate_normalization_mode() {
return _sampling_info.get_coordinate_normalization_mode();
}
void
set(sycl::coordinate_normalization_mode coordinate_normalization_mode) {
_sampling_info.set(coordinate_normalization_mode);
}
bool is_coordinate_normalized() {
return _sampling_info.is_coordinate_normalized();
}
void set_coordinate_normalization_mode(int is_normalized) {
_sampling_info.set_coordinate_normalization_mode(is_normalized);
}
void
set(sycl::addressing_mode addressing_mode,
sycl::filtering_mode filtering_mode,
sycl::coordinate_normalization_mode coordinate_normalization_mode) {
set(addressing_mode);
set(filtering_mode);
set(coordinate_normalization_mode);
}
void set(sycl::addressing_mode addressing_mode,
sycl::filtering_mode filtering_mode, int is_normalized) {
set(addressing_mode);
set(filtering_mode);
set_coordinate_normalization_mode(is_normalized);
}
unsigned get_channel_num() { return _data.get_channel_num(); }
void set_channel_num(unsigned num) {
return _data.set_channel_num(num);
}
sycl::image_channel_type get_channel_type() {
return _data.get_channel_type();
}
void set_channel_type(sycl::image_channel_type type) {
return _data.set_channel_type(type);
}
sycl::sampler get_sampler() { return _sampling_info.get_sampler(); }
};
inline image_wrapper_base::~image_wrapper_base() {}
using image_wrapper_base_p = image_wrapper_base *;
template <class T, int dimensions, bool IsImageArray> class image_accessor_ext;
/// Image class, wrapper of sycl::image.
template <class T, int dimensions, bool IsImageArray = false> class image_wrapper : public image_wrapper_base {
sycl::image<dimensions> *_image = nullptr;
#ifndef DPCT_USM_LEVEL_NONE
std::vector<char> _host_buffer;
#endif
void create_image(sycl::queue q) {
auto &data = get_data();
if (data.get_data_type() == image_data_type::matrix) {
_image = static_cast<image_matrix_p>(data.get_data_ptr())
->create_image<dimensions>(data.get_channel());
return;
}
auto ptr = data.get_data_ptr();
auto channel = data.get_channel();
if (detail::get_pointer_attribute(q, ptr) == detail::pointer_access_attribute::device_only) {
#ifdef DPCT_USM_LEVEL_NONE
ptr = get_buffer(ptr)
.template get_access<sycl::access_mode::read_write>()
.get_pointer();
#else
auto sz = data.get_x();
if (data.get_data_type() == image_data_type::pitch)
sz *= channel.get_total_size() * data.get_y();
_host_buffer.resize(sz);
q.memcpy(_host_buffer.data(), ptr, sz).wait();
ptr = _host_buffer.data();
#endif
}
if constexpr (dimensions == 1) {
assert(data.get_data_type() == image_data_type::linear);
_image = new sycl::image<1>(
ptr, channel.get_channel_order(), channel.get_channel_type(),
sycl::range<1>(data.get_x() / channel.get_total_size()));
} else if constexpr (dimensions == 2) {
assert(data.get_data_type() == image_data_type::pitch);
_image = new sycl::image<2>(ptr, channel.get_channel_order(),
channel.get_channel_type(),
sycl::range<2>(data.get_x(), data.get_y()),
sycl::range<1>(data.get_pitch()));
} else {
throw std::runtime_error("3D image only support matrix data");
}
return;
}
public:
using acc_data_t = typename detail::image_trait<T>::acc_data_t;
using accessor_t =
typename image_accessor_ext<T, IsImageArray ? (dimensions - 1) : dimensions,
IsImageArray>::accessor_t;
image_wrapper() { set_channel(image_channel::create<T>()); }
~image_wrapper() { detach(); }
/// Get image accessor.
accessor_t get_access(sycl::handler &cgh, sycl::queue &q = get_default_queue()) {
if (!_image)
create_image(q);
return accessor_t(*_image, cgh);
}
/// Detach data.
void detach() override {
if (_image)
delete _image;
_image = nullptr;
}
};
/// Wrap sampler and image accessor together.
template <class T, int dimensions, bool IsImageArray = false>
class image_accessor_ext {
public:
using accessor_t =
typename detail::image_trait<T>::template accessor_t<dimensions>;
using data_t = typename detail::image_trait<T>::data_t;
sycl::sampler _sampler;
accessor_t _img_acc;
public:
image_accessor_ext(sycl::sampler sampler, accessor_t acc)
: _sampler(sampler), _img_acc(acc) {}
/// Read data from accessor.
template <bool Available = dimensions == 3>
typename std::enable_if<Available, data_t>::type read(float x, float y,
float z) {
return detail::fetch_data<T>()(
_img_acc.read(sycl::float4(x, y, z, 0), _sampler));
}
/// Read data from accessor.
template <class Coord0, class Coord1, class Coord2,
bool Available = dimensions == 3 &&
std::is_integral<Coord0>::value
&&std::is_integral<Coord1>::value
&&std::is_integral<Coord2>::value>
typename std::enable_if<Available, data_t>::type read(Coord0 x, Coord1 y,
Coord2 z) {
return detail::fetch_data<T>()(
_img_acc.read(sycl::int4(x, y, z, 0), _sampler));
}
/// Read data from accessor.
template <bool Available = dimensions == 2>
typename std::enable_if<Available, data_t>::type read(float x, float y) {
return detail::fetch_data<T>()(
_img_acc.read(sycl::float2(x, y), _sampler));
}
/// Read data from accessor.
template <class Coord0, class Coord1,
bool Available = dimensions == 2 &&
std::is_integral<Coord0>::value
&&std::is_integral<Coord1>::value>
typename std::enable_if<Available, data_t>::type read(Coord0 x, Coord1 y) {
return detail::fetch_data<T>()(
_img_acc.read(sycl::int2(x, y), _sampler));
}
/// Read data from accessor.
template <bool Available = dimensions == 1>
typename std::enable_if<Available, data_t>::type read(float x) {
return detail::fetch_data<T>()(_img_acc.read(x, _sampler));
}
/// Read data from accessor.
template <class CoordT,
bool Available = dimensions == 1 && std::is_integral<CoordT>::value>
typename std::enable_if<Available, data_t>::type read(CoordT x) {
return detail::fetch_data<T>()(_img_acc.read(x, _sampler));
}
};
template <class T, int dimensions> class image_accessor_ext<T, dimensions, true> {
public:
using accessor_t =
typename detail::image_trait<T>::template array_accessor_t<dimensions>;
using data_t = typename detail::image_trait<T>::data_t;
sycl::sampler _sampler;
accessor_t _img_acc;
public:
image_accessor_ext(sycl::sampler sampler, accessor_t acc)
: _sampler(sampler), _img_acc(acc) {}
/// Read data from accessor.
template <bool Available = dimensions == 2>
typename std::enable_if<Available, data_t>::type read(int index, float x,
float y) {
return detail::fetch_data<T>()(
_img_acc[index].read(sycl::float2(x, y), _sampler));
}
/// Read data from accessor.
template <bool Available = dimensions == 2>
typename std::enable_if<Available, data_t>::type read(int index, int x, int y) {
return detail::fetch_data<T>()(
_img_acc[index].read(sycl::int2(x, y), _sampler));
}
/// Read data from accessor.
template <bool Available = dimensions == 1>
typename std::enable_if<Available, data_t>::type read(int index, float x) {
return detail::fetch_data<T>()(
_img_acc[index].read(x, _sampler));
}
/// Read data from accessor.
template <bool Available = dimensions == 1>
typename std::enable_if<Available, data_t>::type read(int index, int x) {
return detail::fetch_data<T>()(
_img_acc[index].read(x, _sampler));
}
};
/// Create image wrapper according to image data and sampling info.
/// \return Pointer to image wrapper base class.
/// \param data Image data used to create image wrapper.
/// \param info Image sampling info used to create image wrapper.
/// \returns Pointer to base class of created image wrapper object.
static inline image_wrapper_base *create_image_wrapper(image_data data,
sampling_info info) {
image_channel channel;
int dims = 1;
if (data.get_data_type() == image_data_type::matrix) {
auto matrix = (image_matrix_p)data.get_data_ptr();
channel = matrix->get_channel();
dims = matrix->get_dims();
} else {
if (data.get_data_type() == image_data_type::pitch) {
dims = 2;
}
channel = data.get_channel();
}
if (auto ret = detail::create_image_wrapper(channel, dims)) {
ret->set_sampling_info(info);
ret->set_data(data);
return ret;
}
return nullptr;
}
namespace detail {
/// Create image according with given type \p T and \p dims.
template <class T> static image_wrapper_base *create_image_wrapper(int dims) {
switch (dims) {
case 1:
return new image_wrapper<T, 1>();
case 2:
return new image_wrapper<T, 2>();
case 3:
return new image_wrapper<T, 3>();
default:
return nullptr;
}
}
/// Create image with given data type \p T, channel order and dims
template <class T>
static image_wrapper_base *create_image_wrapper(unsigned channel_num, int dims) {
switch (channel_num) {
case 1:
return create_image_wrapper<T>(dims);
case 2:
return create_image_wrapper<sycl::vec<T, 2>>(dims);
case 3:
return create_image_wrapper<sycl::vec<T, 3>>(dims);
case 4:
return create_image_wrapper<sycl::vec<T, 4>>(dims);
default:
return nullptr;
}
}
/// Create image with channel info and specified dimensions.
static image_wrapper_base *create_image_wrapper(image_channel channel, int dims) {
switch (channel.get_channel_type()) {
case sycl::image_channel_type::fp16:
return create_image_wrapper<sycl::half>(channel.get_channel_num(), dims);
case sycl::image_channel_type::fp32:
return create_image_wrapper<float>(channel.get_channel_num(), dims);
case sycl::image_channel_type::signed_int8:
return create_image_wrapper<std::int8_t>(channel.get_channel_num(), dims);
case sycl::image_channel_type::signed_int16:
return create_image_wrapper<std::int16_t>(channel.get_channel_num(), dims);
case sycl::image_channel_type::signed_int32:
return create_image_wrapper<std::int32_t>(channel.get_channel_num(), dims);
case sycl::image_channel_type::unsigned_int8:
return create_image_wrapper<std::uint8_t>(channel.get_channel_num(), dims);
case sycl::image_channel_type::unsigned_int16:
return create_image_wrapper<std::uint16_t>(channel.get_channel_num(), dims);
case sycl::image_channel_type::unsigned_int32:
return create_image_wrapper<std::uint32_t>(channel.get_channel_num(), dims);
default:
return nullptr;
}
}
} // namespace detail
} // namespace dpct
#endif // !__DPCT_IMAGE_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/include/dpct/kernel.hpp | //==---- kernel.hpp -------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_KERNEL_HPP__
#define __DPCT_KERNEL_HPP__
#include <sycl/sycl.hpp>
#ifdef _WIN32
#include <unordered_set>
#include <windows.h>
#else
#include <dlfcn.h>
#endif
#if defined(__has_include) && __has_include(<filesystem>)
#include <filesystem>
#elif defined(__has_include) && __has_include(<experimental/filesystem>)
#include <experimental/filesystem>
#else
#error "SYCLomatic runtime requires C++ filesystem support"
#endif
#include <random>
#include <image.hpp>
#include <fstream>
namespace dpct {
typedef void (*kernel_functor)(sycl::queue &, const sycl::nd_range<3> &,
unsigned int, void **, void **);
struct kernel_function_info {
int max_work_group_size = 0;
};
static inline void get_kernel_function_info(kernel_function_info *kernel_info,
const void *function) {
kernel_info->max_work_group_size =
dpct::dev_mgr::instance()
.current_device()
.get_info<sycl::info::device::max_work_group_size>();
}
static inline kernel_function_info
get_kernel_function_info(const void *function) {
kernel_function_info kernel_info;
kernel_info.max_work_group_size =
dpct::dev_mgr::instance()
.current_device()
.get_info<sycl::info::device::max_work_group_size>();
return kernel_info;
}
namespace detail {
#if defined(__has_include) && __has_include(<filesystem>)
namespace fs = std::filesystem;
#else
namespace fs = std::experimental::filesystem;
#endif
/// Write data to temporary file and return absolute path to temporary file.
/// Temporary file is created in a temporary directory both of which have random
/// names with only the user having access permissions. Only one temporary file
/// will be created in the temporary directory.
static inline fs::path write_data_to_file(char const *const data, size_t size) {
std::error_code ec;
if (sizeof(size_t) >= sizeof(std::streamsize) &&
size > (std::numeric_limits<std::streamsize>::max)())
throw std::runtime_error("data file too large");
// random number generator
std::random_device dev;
std::mt19937 prng(dev());
std::uniform_int_distribution<uint64_t> rand(0);
// find temporary directory
auto tmp_dir = fs::temp_directory_path(ec);
if (ec)
throw std::runtime_error("could not find temporary directory");
// create private directory
std::stringstream directory;
fs::path directory_path;
constexpr int max_attempts = 5;
int i;
for (i = 0; i < max_attempts; i++) {
directory << std::hex << rand(prng);
directory_path = tmp_dir / directory.str();
if (fs::create_directory(directory_path)) {
break;
}
}
if (i == max_attempts)
throw std::runtime_error("could not create directory");
// only allow owner permissions to private directory
fs::permissions(directory_path, fs::perms::owner_all, ec);
if (ec)
throw std::runtime_error("could not set directory permissions");
// random filename in private directory
std::stringstream filename;
filename << std::hex << rand(prng);
#ifdef _WIN32
auto filepath = directory_path / (filename.str() + ".dll");
#else
auto filepath = directory_path / filename.str();
#endif
// write data to temporary file
auto outfile = std::ofstream(filepath, std::ios::out | std::ios::binary);
if (outfile) {
// only allow program to write file
fs::permissions(filepath, fs::perms::owner_write, ec);
if (ec)
throw std::runtime_error("could not set permissions");
outfile.write(data, size);
if (!outfile.good())
throw std::runtime_error("could not write data");
outfile.close();
// only allow program to read/execute file
fs::permissions(filepath, fs::perms::owner_read | fs::perms::owner_exec,
ec);
if (ec)
throw std::runtime_error("could not set permissions");
} else
throw std::runtime_error("could not write data");
// check temporary file contents
auto infile = std::ifstream(filepath, std::ios::in | std::ios::binary);
if (infile) {
bool mismatch = false;
size_t cnt = 0;
while (1) {
char c;
infile.get(c);
if (infile.eof())
break;
if (c != data[cnt++])
mismatch = true;
}
if (cnt != size || mismatch)
throw std::runtime_error("file contents not written correctly");
} else
throw std::runtime_error("could not validate file");
if (!filepath.is_absolute())
throw std::runtime_error("temporary filepath is not absolute");
return filepath;
}
static inline uint16_t extract16(unsigned char const *const ptr) {
uint16_t ret = 0;
ret |= static_cast<uint16_t>(ptr[0]) << 0;
ret |= static_cast<uint16_t>(ptr[1]) << 8;
return (ret);
}
static inline uint32_t extract32(unsigned char const *const ptr) {
uint32_t ret = 0;
ret |= static_cast<uint32_t>(ptr[0]) << 0;
ret |= static_cast<uint32_t>(ptr[1]) << 8;
ret |= static_cast<uint32_t>(ptr[2]) << 16;
ret |= static_cast<uint32_t>(ptr[3]) << 24;
return (ret);
}
static inline uint64_t extract64(unsigned char const *const ptr) {
uint64_t ret = 0;
ret |= static_cast<uint64_t>(ptr[0]) << 0;
ret |= static_cast<uint64_t>(ptr[1]) << 8;
ret |= static_cast<uint64_t>(ptr[2]) << 16;
ret |= static_cast<uint64_t>(ptr[3]) << 24;
ret |= static_cast<uint64_t>(ptr[4]) << 32;
ret |= static_cast<uint64_t>(ptr[5]) << 40;
ret |= static_cast<uint64_t>(ptr[6]) << 48;
ret |= static_cast<uint64_t>(ptr[7]) << 56;
return (ret);
}
static inline uint64_t get_lib_size(char const *const blob) {
#ifdef _WIN32
///////////////////////////////////////////////////////////////////////
// Analyze DOS stub
unsigned char const *const ublob =
reinterpret_cast<unsigned char const *const>(blob);
if (ublob[0] != 0x4d || ublob[1] != 0x5a) {
throw std::runtime_error("Blob is not a Windows DLL.");
}
uint32_t pe_header_offset = extract32(ublob + 0x3c);
///////////////////////////////////////////////////////////////////////
// Ananlyze PE-header
unsigned char const *const pe_header = ublob + pe_header_offset;
// signature
uint32_t pe_signature = extract32(pe_header + 0);
if (pe_signature != 0x00004550) {
throw std::runtime_error("PE-header signature is not 0x00004550");
}
// machine
uint16_t machine = extract16(pe_header + 4);
if (machine != 0x8664) {
throw std::runtime_error("Only DLLs for x64 supported");
}
// number of sections
uint16_t number_of_sections = extract16(pe_header + 6);
// sizeof optional header
uint16_t sizeof_optional_header = extract16(pe_header + 20);
// magic
uint16_t magic = extract16(pe_header + 24);
if (magic != 0x10b && magic != 0x20b) {
throw std::runtime_error("MAGIC is not 0x010b or 0x020b");
}
///////////////////////////////////////////////////////////////////////
// Analyze tail of optional header
constexpr int coff_header_size = 24;
unsigned char const *const tail_of_optional_header =
pe_header + coff_header_size + sizeof_optional_header;
if (extract64(tail_of_optional_header - 8) != 0) {
throw std::runtime_error("Optional header not zero-padded");
}
///////////////////////////////////////////////////////////////////////
// Analyze last section header
constexpr int section_header_size = 40;
unsigned char const *const last_section_header =
tail_of_optional_header + section_header_size * (number_of_sections - 1);
uint32_t sizeof_raw_data = extract32(last_section_header + 16);
uint32_t pointer_to_raw_data = extract32(last_section_header + 20);
return sizeof_raw_data + pointer_to_raw_data;
#else
if (blob[0] != 0x7F || blob[1] != 'E' || blob[2] != 'L' || blob[3] != 'F')
throw std::runtime_error("Blob is not in ELF format");
if (blob[4] != 0x02)
throw std::runtime_error("Only 64-bit headers are supported");
if (blob[5] != 0x01)
throw std::runtime_error("Only little-endian headers are supported");
unsigned char const *const ublob =
reinterpret_cast<unsigned char const *const>(blob);
uint64_t e_shoff = extract64(ublob + 0x28);
uint16_t e_shentsize = extract16(ublob + 0x3A);
uint16_t e_shnum = extract16(ublob + 0x3C);
return e_shoff + (e_shentsize * e_shnum);
#endif
}
#ifdef _WIN32
class path_lib_record {
public:
void operator=(const path_lib_record &) = delete;
~path_lib_record() {
for (auto entry : lib_to_path) {
FreeLibrary(static_cast<HMODULE>(entry.first));
fs::permissions(entry.second, fs::perms::owner_all);
fs::remove_all(entry.second.remove_filename());
}
}
static void record_lib_path(fs::path path, void *library) {
lib_to_path[library] = path;
}
static void remove_lib(void *library) {
auto path = lib_to_path[library];
std::error_code ec;
FreeLibrary(static_cast<HMODULE>(library));
fs::permissions(path, fs::perms::owner_all);
if (fs::remove_all(path.remove_filename(), ec) != 2 || ec)
// one directory and one temporary file should have been deleted
throw std::runtime_error("Directory delete failed");
lib_to_path.erase(library);
}
private:
static inline std::unordered_map<void *, fs::path> lib_to_path;
};
#endif
} // namespace detail
class kernel_library {
public:
kernel_library() : ptr{nullptr} {}
kernel_library(void *ptr) : ptr{ptr} {}
operator void *() const { return ptr; }
private:
void *ptr;
#ifdef _WIN32
static inline detail::path_lib_record single_instance_to_trigger_destructor;
#endif
};
namespace detail {
static inline kernel_library load_dl_from_data(char const *const data,
size_t size) {
fs::path filename = write_data_to_file(data, size);
#ifdef _WIN32
void *so = LoadLibraryW(filename.wstring().c_str());
#else
void *so = dlopen(filename.c_str(), RTLD_LAZY);
#endif
if (so == nullptr)
throw std::runtime_error("Failed to load kernel library");
#ifdef _WIN32
detail::path_lib_record::record_lib_path(filename, so);
#else
std::error_code ec;
// Windows DLL cannot be deleted while in use
if (fs::remove_all(filename.remove_filename(), ec) != 2 || ec)
// one directory and one temporary file should have been deleted
throw std::runtime_error("Directory delete failed");
#endif
return so;
}
} // namespace detail
/// Load kernel library and return a handle to use the library.
/// \param [in] name The name of the library.
static inline kernel_library load_kernel_library(const std::string &name) {
std::ifstream ifs;
ifs.open(name, std::ios::in | std::ios::binary);
std::stringstream buffer;
buffer << ifs.rdbuf();
const std::string buffer_string = buffer.str();
return detail::load_dl_from_data(buffer_string.c_str(), buffer_string.size());
}
/// Load kernel library whose image is alreay in memory and return a handle to
/// use the library.
/// \param [in] image A pointer to the image in memory.
static inline kernel_library load_kernel_library_mem(char const *const image) {
const size_t size = detail::get_lib_size(image);
return detail::load_dl_from_data(image, size);
}
/// Unload kernel library.
/// \param [in,out] library Handle to the library to be closed.
static inline void unload_kernel_library(const kernel_library &library) {
#ifdef _WIN32
detail::path_lib_record::remove_lib(library);
#else
dlclose(library);
#endif
}
class kernel_function {
public:
kernel_function() : ptr{nullptr} {}
kernel_function(dpct::kernel_functor ptr) : ptr{ptr} {}
operator void *() const { return ((void *)ptr); }
void operator()(sycl::queue &q, const sycl::nd_range<3> &range,
unsigned int a, void **args, void **extra) {
ptr(q, range, a, args, extra);
}
private:
dpct::kernel_functor ptr;
};
/// Find kernel function in a kernel library and return its address.
/// \param [in] library Handle to the kernel library.
/// \param [in] name Name of the kernel function.
static inline dpct::kernel_function
get_kernel_function(kernel_library &library, const std::string &name) {
#ifdef _WIN32
dpct::kernel_functor fn = reinterpret_cast<dpct::kernel_functor>(
GetProcAddress(static_cast<HMODULE>(static_cast<void *>(library)),
(name + std::string("_wrapper")).c_str()));
#else
dpct::kernel_functor fn = reinterpret_cast<dpct::kernel_functor>(
dlsym(library, (name + std::string("_wrapper")).c_str()));
#endif
if (fn == nullptr)
throw std::runtime_error("Failed to get function");
return fn;
}
/// Invoke a kernel function.
/// \param [in] function kernel function.
/// \param [in] queue SYCL queue used to execute kernel
/// \param [in] groupRange SYCL group range
/// \param [in] localRange SYCL local range
/// \param [in] localMemSize The size of local memory required by the kernel
/// function.
/// \param [in] kernelParams Array of pointers to kernel arguments.
/// \param [in] extra Extra arguments.
static inline void invoke_kernel_function(dpct::kernel_function &function,
sycl::queue &queue,
sycl::range<3> groupRange,
sycl::range<3> localRange,
unsigned int localMemSize,
void **kernelParams, void **extra) {
function(queue, sycl::nd_range<3>(groupRange * localRange, localRange),
localMemSize, kernelParams, extra);
}
/// Find image wrapper in a kernel library and return its address.
/// \param [in] library Handle to the kernel library.
/// \param [in] name Name of the target image wrapper.
static inline dpct::image_wrapper_base_p
get_image_wrapper(dpct::kernel_library &library, const std::string &name) {
#ifdef _WIN32
dpct::image_wrapper_base_p fn =
reinterpret_cast<dpct::image_wrapper_base_p>(GetProcAddress(
static_cast<HMODULE>(static_cast<void *>(library)), name.c_str()));
#else
dpct::image_wrapper_base_p fn = reinterpret_cast<dpct::image_wrapper_base_p>(
dlsym(library, name.c_str()));
#endif
if (fn == nullptr)
throw std::runtime_error("Failed to get image");
return fn;
}
} // namespace dpct
#endif // __DPCT_KERNEL_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/include/dpct/dpct.hpp | //==---- dpct.hpp ---------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_HPP__
#define __DPCT_HPP__
#include <sycl/sycl.hpp>
#include <iostream>
#include <limits.h>
#include <math.h>
template <class... Args> class dpct_kernel_name;
template <int Arg> class dpct_kernel_scalar;
#include "atomic.hpp"
#include "device.hpp"
#include "image.hpp"
#include "kernel.hpp"
#include "math.hpp"
#include "memory.hpp"
#include "util.hpp"
#if defined(_MSC_VER)
#define __dpct_align__(n) __declspec(align(n))
#define __dpct_inline__ __forceinline
#else
#define __dpct_align__(n) __attribute__((aligned(n)))
#define __dpct_inline__ __inline__ __attribute__((always_inline))
#endif
#if defined(_MSC_VER)
#define __dpct_noinline__ __declspec(noinline)
#else
#define __dpct_noinline__ __attribute__((noinline))
#endif
#define DPCT_COMPATIBILITY_TEMP (600)
namespace dpct{
enum error_code { success = 0, default_error = 999 };
}
#define DPCT_CHECK_ERROR(expr) \
[&]() { \
try { \
expr; \
return dpct::success; \
} catch (std::exception const &e) { \
std::cerr << e.what() << std::endl; \
return dpct::default_error; \
} \
}()
#define DPCT_PI_F (3.14159274101257f)
#define DPCT_PI (3.141592653589793115998)
#endif // __DPCT_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/include/dpct/dnnl_utils.hpp | //==---- dnnl_utils.hpp ---------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_DNNL_UTILS_HPP__
#define __DPCT_DNNL_UTILS_HPP__
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
#include <oneapi/dpl/numeric>
#include <oneapi/mkl.hpp>
#include <oneapi/mkl/rng/device.hpp>
#include <sycl/sycl.hpp>
#include <oneapi/dnnl/dnnl.hpp>
#include <oneapi/dnnl/dnnl_sycl.hpp>
#include <unordered_map>
#include <algorithm>
#include <list>
#include "memory.hpp"
#include "device.hpp"
#include "lib_common_utils.hpp"
namespace dpct {
namespace dnnl {
/// Get concatenated library version as an integer.
static inline size_t get_version() {
const ::dnnl::version_t *ver = ::dnnl::version();
return ver->major * 1000 + ver->minor * 100 + ver->patch;
}
class engine_ext;
typedef oneapi::mkl::rng::philox4x32x10 rng_engine_t;
/// An enum class representing memory layout. Used by
/// memory_desc_ext to create a memory with pre-defined layout.
enum class memory_format_tag { nchw, nhwc, nchw_blocked };
/// An enum class representing RNN data memory layout. Used by
/// memory_desc_ext to create a memory with pre-defined layout.
enum class rnn_memory_format_tag { tnc, ntc };
/// A class holding the description of an N-dimensions memory.
class memory_desc_ext {
::dnnl::memory::desc _desc;
public:
/// Convert dpct::library_data_t to dnnl::memory::data_type.
static ::dnnl::memory::data_type to_dnnl_data_type(dpct::library_data_t dt);
/// Convert dnnl::memory::data_type to dpct::library_data_t.
static dpct::library_data_t
to_dpct_library_data_t(::dnnl::memory::data_type dt, unsigned block_size);
/// Convert dpct::dnnl::memory_format_tag to dnnl::memory::format_tag.
static ::dnnl::memory::format_tag to_dnnl_format_tag(dpct::library_data_t dt,
memory_format_tag tag);
memory_desc_ext() = default;
memory_desc_ext(::dnnl::memory::desc &desc) : _desc(desc) {}
memory_desc_ext(::dnnl::memory::desc &&desc) : _desc(std::move(desc)) {}
/// Setting a 4D memory with given parameters.
/// \param [in] tag Format tag.
/// \param [in] dt Data type.
/// \param [in] n Number of images.
/// \param [in] c Number of channels.
/// \param [in] h Height of images.
/// \param [in] w Width of images.
void set(memory_format_tag tag, dpct::library_data_t dt, int n, int c, int h,
int w);
/// Setting a 3D RNN data memory with given parameters.
/// \param [in] tag RNN data format tag.
/// \param [in] dt Data type.
/// \param [in] t Number of sequence length.
/// \param [in] n Number of batch.
/// \param [in] c Height of input channel.
void set(rnn_memory_format_tag tag, dpct::library_data_t dt, int t, int n, int c);
/// Setting a 4D memory with given parameters.
/// \param [in] dt Data type.
/// \param [in] n Number of images.
/// \param [in] c Number of channels.
/// \param [in] h Height of images.
/// \param [in] w Width of images.
/// \param [in] n_stride Stride between two continuous images.
/// \param [in] c_stride Stride between two continuous channels.
/// \param [in] h_stride Stride between two continuous rows.
/// \param [in] w_stride Stride between two continuous columns.
void set(dpct::library_data_t dt, int n, int c, int h, int w, int n_stride,
int c_stride, int h_stride, int w_stride);
/// Setting a ND memory with given parameters.
/// \param [in] dt Data type.
/// \param [in] ndims Dimension of the memory.
/// \param [in] dims Array of dimension ndims that contain the size of each
/// memory dimension. \param [in] strides Array of dimension ndims that
/// contain the stride of each memory dimension.
void set(dpct::library_data_t dt, int ndims, const int dims[],
const int strides[]);
/// Setting a ND memory with given parameters.
/// \param [in] tag Format tag.
/// \param [in] dt Data type.
/// \param [in] ndims Dimension of the memory.
/// \param [in] dims Array of dimension ndims that contain the size of each
/// memory dimension.
void set(memory_format_tag tag, dpct::library_data_t dt, int ndims,
const int dims[]);
/// Getting a ::dnnl::memory::desc from a memory_desc_ext.
/// \returns The ::dnnl::memory::desc.
const ::dnnl::memory::desc &get_desc() const { return _desc; }
/// Setting holding desc with given dnnl memory descriptor.
void set_desc(::dnnl::memory::desc desc) { _desc = desc; }
/// Getting a size of a memory_desc_ext in bytes.
/// \returns The size.
size_t get_size() const { return _desc.get_size(); }
/// Getting parameters from a 4D memory.
/// \param [out] dt Data type.
/// \param [out] n Number of images.
/// \param [out] c Number of channels.
/// \param [out] h Height of images.
/// \param [out] w Width of images.
/// \param [out] n_stride Stride between two continuous images.
/// \param [out] c_stride Stride between two continuous channels.
/// \param [out] h_stride Stride between two continuous rows.
/// \param [out] w_stride Stride between two continuous columns.
void get(dpct::library_data_t *dt, int *n, int *c, int *h, int *w,
int *n_stride, int *c_stride, int *h_stride, int *w_stride) const;
/// Getting parameters from a 4D memory.
/// \param [out] dt Data type.
/// \param [out] tag Format tag.
/// \param [out] n Number of images.
/// \param [out] c Number of channels.
/// \param [out] h Height of images.
/// \param [out] w Width of images.
void get(dpct::library_data_t *dt, memory_format_tag *tag, int *n, int *c,
int *h, int *w) const;
/// Getting parameters from a 3D RNN data memory.
/// \param [out] dt Data type.
/// \param [out] tag RNN data format tag.
/// \param [out] t Number of sequence length.
/// \param [out] n Number of batch.
/// \param [out] c Height of input channel.
void get(dpct::library_data_t *dt, rnn_memory_format_tag *tag, int *t, int *n,
int *c) const;
/// Getting parameters from a ND memory.
/// \param [in] requested_ndims Requested number of dimensions to get from a
/// given memory descriptor.
/// \param [out] dt Data type.
/// \param [out] ndims Dimension of the memory.
/// \param [out] dims Array of dimension requested_ndims that contain the
/// size of each memory dimension.
/// \param [out] strides Array of dimension requested_ndims that contain the
/// stride of each memory dimension.
void get(int requested_ndims, dpct::library_data_t *dt, int *ndims,
int dims[], int strides[]) const;
/// Getting parameters from a ND memory.
/// \param [in] requested_ndims Requested number of dimensions to get from a
/// given memory descriptor.
/// \param [out] dt Data type.
/// \param [out] tag Format tag.
/// \param [out] ndims Dimension of the memory.
/// \param [out] dims Array of dimension requested_ndims that contain the
/// size of each memory dimension.
void get(int requested_ndims, dpct::library_data_t *dt,
memory_format_tag *tag, int *ndims, int dims[]) const;
/// Getting dims from a ND memory.
/// \return The dims.
std::vector<int64_t> get_dims() const { return _desc.get_dims(); }
/// Getting strides from a ND memory.
/// \return The strides.
std::vector<int64_t> get_strides() const {
return _desc.get_strides();
}
/// Getting element num from a ND memory.
/// \return The element number.
size_t get_element_num() const {
auto dims = _desc.get_dims();
if (dims.empty()) {
return 0;
}
size_t result = 1;
for (auto &dim : dims) {
result *= dim;
}
return result;
}
operator bool() const {
return bool(_desc);
}
memory_desc_ext &operator=(std::nullptr_t) {
_desc.reset(nullptr);
return *this;
}
};
/// A class holding description for an activation operation.
class activation_desc {
::dnnl::algorithm _alg;
float _alpha;
float _beta;
public:
/// Setting an activation descriptor with given parameters.
/// \param [in] alg Activation algorithm.
/// \param [in] alpha Value of alpha parameter.
void set(::dnnl::algorithm alg, float alpha) {
_alg = alg;
if(alg == ::dnnl::algorithm::eltwise_clip) {
_alpha = 0;
_beta = alpha;
} else {
_alpha = alpha;
}
}
/// Getting parameters form an activation descriptor.
/// \param [out] alg Activation algorithm.
/// \param [out] alpha Value of alpha parameter.
void get(::dnnl::algorithm *alg, float *alpha) const {
*alg = _alg;
if(_alg == ::dnnl::algorithm::eltwise_clip) {
*alpha = _beta;
} else {
*alpha = _alpha;
}
}
/// Setting the alpha parameter of an activation descriptor.
/// \param [in] alpha Value of alpha parameter.
void set_alpha(float alpha) { _alpha = alpha; }
/// Setting the beta parameter of an activation descriptor.
/// \param [in] beta Value of beta parameter.
void set_beta(float beta) { _beta = beta; }
/// Setting the algorithm parameter of an activation descriptor.
/// \param [in] alg Activation algorithm.
void set_algorithm(::dnnl::algorithm alg) { _alg = alg; }
/// Getting the alpha parameter from an activation descriptor.
/// \param [out] alpha Value of alpha parameter.
float get_alpha() const { return _alpha; }
/// Getting the beta parameter from an activation descriptor.
/// \param [out] beta Value of beta parameter.
float get_beta() const { return _beta; }
/// Getting the algorithm parameter from an activation descriptor.
/// \param [out] alg Activation algorithm.
::dnnl::algorithm get_algorithm() const { return _alg; }
};
/// A class holding description for a local response normalization operation.
class lrn_desc {
unsigned int _local_size;
float _alpha;
float _beta;
float _k;
public:
/// Setting a local response normalization descriptor with given parameters.
/// \param [in] local_size Value of local_size parameter.
/// \param [in] alpha Value of alpha parameter.
/// \param [in] beta Value of beta parameter.
/// \param [in] k Value of k parameter.
void set(unsigned int local_size, float alpha, float beta, float k) {
_local_size = local_size;
_alpha = alpha;
_beta = beta;
_k = k;
}
/// Getting parameters form a local response normalization descriptor.
/// \param [out] local_size Value of local_size parameter.
/// \param [out] alpha Value of alpha parameter.
/// \param [out] beta Value of beta parameter.
/// \param [out] k Value of k parameter.
void get(unsigned int *local_size, float *alpha, float *beta,
float *k) const {
*local_size = _local_size;
*alpha = _alpha;
*beta = _beta;
*k = _k;
}
/// Setting the local size parameter of a local response normalization
/// descriptor.
/// \param [in] local_size Value of local_size parameter.
void set_local_size(unsigned int local_size) { _local_size = local_size; }
/// Setting the alpha parameter of a local response normalization descriptor.
/// \param [in] alpha Value of alpha parameter.
void set_alpha(float alpha) { _alpha = alpha; }
/// Setting the beta parameter of a local response normalization descriptor.
/// \param [in] beta Value of beta parameter.
void set_beta(float beta) { _beta = beta; }
/// Setting the k parameter of a local response normalization descriptor.
/// \param [in] k Value of k parameter.
void set_k(float k) { _k = k; }
/// Getting the local size parameter from a local response normalization
/// descriptor.
/// \param [out] local_size Value of local_size parameter.
unsigned int get_local_size() const { return _local_size; }
/// Getting the alpha parameter from a local response normalization
/// descriptor.
/// \param [out] alpha Value of alpha parameter.
float get_alpha() const { return _alpha; }
/// Getting the beta parameter from a local response normalization descriptor.
/// \param [out] beta Value of beta parameter.
float get_beta() const { return _beta; }
/// Getting the k parameter from a local response normalization descriptor.
/// \param [out] k Value of k parameter.
float get_k() const { return _k; }
};
/// An enum class representing softmax algorithm.
enum class softmax_algorithm { normal, log };
/// An enum class representing softmax mode.
enum class softmax_mode { instance, channel };
/// A class holding description for a pooling operation.
class pooling_desc {
::dnnl::algorithm _alg;
std::vector<int64_t> _stride;
std::vector<int64_t> _kernel;
std::vector<int64_t> _padding;
public:
/// Setting a 2D pooling descriptor with given parameters.
/// \param [in] alg Pooling algorithm.
/// \param [in] kernel_h Value of height of kernel.
/// \param [in] kernel_w Value of width of kernel.
/// \param [in] padding_h Value of height of padding.
/// \param [in] padding_w Value of width of padding.
/// \param [in] stride_h Value of height of stride.
/// \param [in] stride_w Value of width of stride.
void set(::dnnl::algorithm alg, int kernel_h, int kernel_w, int padding_h,
int padding_w, int stride_h, int stride_w) {
_alg = alg;
_stride = {stride_h, stride_w};
_kernel = {kernel_h, kernel_w};
_padding = {padding_h, padding_w};
}
/// Setting a ND pooling descriptor with given parameters.
/// \param [in] alg Pooling algorithm.
/// \param [in] ndims Dimension of the pooling operation.
/// \param [in] kernel Array of dimension ndims containing the kernel size of
/// each dimension.
/// \param [in] padding Array of dimension ndims containing the padding size of
/// each dimension.
/// \param [in] stride Array of dimension ndims containing the stride size of
/// each dimension.
void set(::dnnl::algorithm alg, int ndims, int kernel[], int padding[],
int stride[]) {
_alg = alg;
_stride = std::vector<int64_t>(stride, stride + ndims);
_kernel = std::vector<int64_t>(kernel, kernel + ndims);
_padding = std::vector<int64_t>(padding, padding + ndims);
}
/// Getting parameters from a 2D pooling descriptor.
/// \param [out] alg Pooling algorithm.
/// \param [out] kernel_h Value of height of kernel.
/// \param [out] kernel_w Value of width of kernel.
/// \param [out] padding_h Value of height of padding.
/// \param [out] padding_w Value of width of padding.
/// \param [out] stride_h Value of height of stride.
/// \param [out] stride_w Value of width of stride.
void get(::dnnl::algorithm *alg, int *kernel_h, int *kernel_w, int *padding_h,
int *padding_w, int *stride_h, int *stride_w) const {
*alg = _alg;
*kernel_h = _kernel[0];
*kernel_w = _kernel[1];
*padding_h = _padding[0];
*padding_w = _padding[1];
*stride_h = _stride[0];
*stride_w = _stride[1];
}
/// Getting parameters from a ND pooling descriptor.
/// \param [in] requested_ndims Requested number of dimensions to get from a
/// given pooling descriptor.
/// \param [out] alg Pooling algorithm.
/// \param [out] ndims Dimension of the pooling operation.
/// \param [out] kernel Array of dimension ndims containing the kernel size of
/// each dimension.
/// \param [out] padding Array of dimension ndims containing the padding size
/// of each dimension.
/// \param [out] stride Array of dimension ndims containing the stride size of
/// each dimension.
void get(int requested_ndims, ::dnnl::algorithm *alg, int *ndims,
int kernel[], int padding[], int stride[]) const {
*alg = _alg;
*ndims = _stride.size();
for (int i = 0; i < requested_ndims; i++) {
kernel[i] = _kernel[i];
padding[i] = _padding[i];
stride[i] = _stride[i];
}
}
/// Setting the algorithm parameter of a pooling descriptor.
/// \param [in] alg Pooling algorithm.
void set_algorithm(::dnnl::algorithm alg) { _alg = alg; }
/// Setting the stride parameter of a pooling descriptor.
/// \param [in] stride Array of dimension ndims containing the stride size of
/// each dimension.
void set_stride(const std::vector<int64_t> &stride) { _stride = stride; }
/// Setting the kernel parameter of a pooling descriptor.
/// \param [in] kernel Array of dimension ndims containing the kernel size of
/// each dimension.
void set_kernel(const std::vector<int64_t> &kernel) { _kernel = kernel; }
/// Setting the padding parameter of a pooling descriptor.
/// \param [in] padding Array of dimension ndims containing the padding size
/// of each dimension.
void set_padding(const std::vector<int64_t> &padding) { _padding = padding; }
/// Getting the algorithm parameter from a pooling descriptor.
/// \param [out] alg Pooling algorithm.
::dnnl::algorithm get_algorithm() const { return _alg; }
/// Getting the stride parameter from a pooling descriptor.
/// \returns Array of dimension ndims containing the stride size of each
/// dimension.
const std::vector<int64_t> &get_stride() const { return _stride; }
/// Getting the kernel parameter from a pooling descriptor.
/// \returns Array of dimension ndims containing the kernel size of each
/// dimension.
const std::vector<int64_t> &get_kernel() const { return _kernel; }
/// Getting the padding parameter from a pooling descriptor.
/// \returns Array of dimension ndims containing the padding size of each
/// dimension.
const std::vector<int64_t> &get_padding() const { return _padding; }
/// Getting the output dimensions of a memory after 2D pooling has been
/// applied.
/// \param [in] desc Input memory descriptor.
/// \param [out] out_n Number of images.
/// \param [out] out_c Number of channels.
/// \param [out] out_h Height of images.
/// \param [out] out_w Width of images.
void get_forward_output_dim(const memory_desc_ext &desc, int *out_n,
int *out_c, int *out_h, int *out_w) const {
auto dims = desc.get_dims();
*out_n = dims[0];
*out_c = dims[1];
*out_h = 1 + (dims[2] + 2 * _padding[0] - _kernel[0]) / _stride[0];
*out_w = 1 + (dims[3] + 2 * _padding[1] - _kernel[1]) / _stride[1];
}
/// Getting the output dimensions of a memory after ND pooling has been
/// applied.
/// \param [in] desc Input memory descriptor.
/// \param [out] ndims Dimension of the memory.
/// \param [out] out_dims Array of dimension requested_ndims that contain
/// the size of each memory dimension.
void get_forward_output_dim(const memory_desc_ext &desc, int ndims,
int out_dims[]) const {
assert(ndims >= 4 && "ndims is at least 4.");
auto dims = desc.get_dims();
out_dims[0] = dims[0];
out_dims[1] = dims[1];
for (int i = 2; i < ndims; i++) {
out_dims[i] =
1 + (dims[i] + 2 * _padding[i - 2] - _kernel[i - 2]) / _stride[i - 2];
}
}
};
/// An enum class representing reduction operations.
enum class reduction_op {
max,
min,
sum,
mul,
mean,
amax,
mul_no_zeros,
norm1,
norm2
};
/// An enum class representing batch normalization mode.
enum class batch_normalization_mode { per_activation, spatial };
/// An enum class representing batch normalization operations.
enum class batch_normalization_ops { none, activation, add_activation };
/// An enum class representing binary operations.
enum class binary_op { add, sub, mul, div, min, max, sqrt, neg };
/// An struct representing convolution algorithm infomation.
struct convolution_algorithm_info {
::dnnl::algorithm algo = ::dnnl::algorithm::convolution_auto;
int status = 0;
};
/// A class holding description for a convolution operation.
class convolution_desc {
std::vector<int64_t> _strides;
std::vector<int64_t> _dilates;
std::vector<int64_t> _paddings;
int _group_count = 1;
::dnnl::fpmath_mode _math_mode = ::dnnl::fpmath_mode::strict;
public:
/// Setting a group count to be used in the convolution.
/// \param [in] group_count Value of group count.
void set_group_count(int group_count) { _group_count = group_count; }
/// Getting a group count specified in the given convolution descriptor.
/// \returns Value of group count.
int get_group_count() { return _group_count; }
/// Setting floating point math mode to be used in the convolution.
/// \param [in] math_mode Value of math_mode.
void set_math_mode(::dnnl::fpmath_mode math_mode) { _math_mode = math_mode; }
/// Getting floating point math mode specified in the given convolution descriptor.
/// \returns Value of math mode.
::dnnl::fpmath_mode get_math_mode() { return _math_mode; }
/// Setting a 2D convolution descriptor with given parameters.
/// \param [in] padding_h Value of height of padding.
/// \param [in] padding_w Value of width of padding.
/// \param [in] stride_h Value of height of stride.
/// \param [in] stride_w Value of width of stride.
/// \param [in] dilate_h Value of height of dilate.
/// \param [in] dilate_w Value of width of dilate.
void set(int padding_h, int padding_w, int stride_h, int stride_w,
int dilate_h, int dilate_w) {
_strides = {stride_h, stride_w};
_dilates = {dilate_h - 1, dilate_w - 1};
_paddings = {padding_h, padding_w};
}
/// Setting a ND convolution descriptor with given parameters.
/// \param [in] ndims Dimension of the convolution operation.
/// \param [in] paddings Array of dimension ndims containing the padding size of
/// each dimension.
/// \param [in] strides Array of dimension ndims containing the stride size of
/// each dimension.
/// \param [in] dilates Array of dimension ndims containing the kernel size of
/// each dimension.
void set(int ndims, int paddings[], int strides[], int dilates[]) {
_strides = std::vector<int64_t>(strides, strides + ndims);
_paddings = std::vector<int64_t>(paddings, paddings + ndims);
_dilates = std::vector<int64_t>(dilates, dilates + ndims);
for (auto &dilate : _dilates) {
dilate--;
}
}
/// Getting parameters from a 2D convolution descriptor.
/// \param [out] padding_h Value of height of padding.
/// \param [out] padding_w Value of width of padding.
/// \param [out] stride_h Value of height of stride.
/// \param [out] stride_w Value of width of stride.
/// \param [out] dilate_h Value of height of dilate.
/// \param [out] dilate_w Value of width of dilate.
void get(int *padding_h, int *padding_w, int *stride_h, int *stride_w,
int *dilate_h, int *dilate_w) const {
*dilate_h = _dilates[0];
*dilate_w = _dilates[1];
*padding_h = _paddings[0];
*padding_w = _paddings[1];
*stride_h = _strides[0];
*stride_w = _strides[1];
}
/// Getting parameters from a ND convolution descriptor.
/// \param [in] requested_ndims Requested number of dimensions to get from a
/// given convolution descriptor.
/// \param [out] ndims Dimension of the pooling operation.
/// \param [out] paddings Array of dimension ndims containing the padding size
/// of each dimension.
/// \param [out] strides Array of dimension ndims containing the stride size of
/// each dimension.
/// \param [out] dilates Array of dimension ndims containing the dilate size of
/// each dimension.
void get(int requested_ndims, int *ndims, int paddings[], int strides[],
int dilates[]) const {
*ndims = _strides.size();
for (int i = 0; i < requested_ndims; i++) {
dilates[i] = _dilates[i];
paddings[i] = _paddings[i];
strides[i] = _strides[i];
}
}
/// Getting the stride parameter from a convolution descriptor.
/// \returns Array of dimension ndims containing the stride size of each
/// dimension.
const std::vector<int64_t> &get_stride() const { return _strides; }
/// Getting the kernel parameter from a convolution descriptor.
/// \returns Array of dimension ndims containing the dilate size of each
/// dimension.
const std::vector<int64_t> &get_dilate() const { return _dilates; }
/// Getting the padding parameter from a convolution descriptor.
/// \returns Array of dimension ndims containing the padding size of each
/// dimension.
const std::vector<int64_t> &get_padding() const { return _paddings; }
/// Getting the output dimensions of a memory after 2D convolution has been
/// applied.
/// \param [in] desc Input memory descriptor.
/// \param [in] weight_desc Input weight memory descriptor.
/// \param [out] out_n Number of images.
/// \param [out] out_c Number of channels.
/// \param [out] out_h Height of images.
/// \param [out] out_w Width of images.
void get_forward_output_dim(const memory_desc_ext &desc,
const memory_desc_ext &weight_desc, int *out_n,
int *out_c, int *out_h, int *out_w) const {
auto dims = desc.get_dims();
auto weight_dims = weight_desc.get_dims();
*out_n = dims[0];
*out_c = weight_dims[0];
*out_h = 1 + (dims[2] + 2 * _paddings[0] -
(1 + (_dilates[0] * (weight_dims[2] - 1)))) /
_strides[0];
*out_w = 1 + (dims[3] + 2 * _paddings[1] -
(1 + (_dilates[1] * (weight_dims[3] - 1)))) /
_strides[1];
}
/// Getting the output dimensions of a memory after ND convolution has been
/// applied.
/// \param [in] desc Input memory descriptor.
/// \param [in] weight_desc Input weight memory descriptor.
/// \param [out] ndims Dimension of the memory.
/// \param [out] out_dims Array of dimension requested_ndims that contain
/// the size of each memory dimension.
void get_forward_output_dim(const memory_desc_ext &desc,
const memory_desc_ext &weight_desc, int ndims,
int out_dims[]) const {
assert(ndims >= 4 && "ndims is at least 4.");
auto dims = desc.get_dims();
auto weight_dims = weight_desc.get_dims();
out_dims[0] = dims[0];
out_dims[1] = weight_dims[1];
for (int i = 2; i < ndims; i++) {
out_dims[i] = 1 + (dims[i] + 2 * _paddings[i - 2] -
(1 + (_dilates[i - 2] * (weight_dims[i] - 1)))) /
_strides[i - 2];
}
}
convolution_desc &operator=(std::nullptr_t) {
return *this = convolution_desc();
}
operator bool() const {
return !(_strides.size() == 0
&& _dilates.size() == 0
&& _paddings.size() == 0);
}
};
/// An enum class representing rnn mode.
enum class rnn_mode { vanilla_relu, vanilla_tanh, lstm, gru };
/// An enum class representing rnn bias mode.
enum class rnn_bias_mode { none, single };
/// An enum class representing rnn direction.
enum class rnn_direction {unidirectional, bidirectional};
/// A class holding description for a RNN operation.
class rnn_desc {
rnn_mode _mode;
rnn_bias_mode _bias_mode;
rnn_direction _direction;
dpct::library_data_t _dt;
int _input_size;
int _hidden_size;
int _projection_size;
int _layer_size;
public:
void set(rnn_mode mode, rnn_bias_mode bias_mode, rnn_direction direction,
dpct::library_data_t dt, int input_size, int hidden_size,
int projection_size, int layer_size) {
_mode = mode;
_bias_mode = bias_mode;
_direction = direction;
_input_size = input_size;
_hidden_size = hidden_size;
_projection_size = projection_size;
_layer_size = layer_size;
_dt = dt;
}
void get(rnn_mode *mode, rnn_bias_mode *bias_mode, rnn_direction *direction,
dpct::library_data_t *dt, int *input_size, int *hidden_size,
int *projection_size, int *layer_size) const {
*mode = _mode;
*bias_mode = _bias_mode;
*direction = _direction;
*input_size = _input_size;
*hidden_size = _hidden_size;
*projection_size = _projection_size;
*layer_size = _layer_size;
*dt = _dt;
}
};
/// A class holding description for a Dropout operation.
class dropout_desc {
struct dropout_desc_imp {
float _p = 0.5f;
unsigned long long _seed = 1;
void *_state = nullptr;
std::vector<std::uint8_t> _host_state;
rng_engine_t _rng_engine;
dropout_desc_imp() : _rng_engine(dpct::get_default_queue(), 1) {}
};
std::shared_ptr<dropout_desc_imp> _imp;
void generate(sycl::queue *q, std::int64_t required_state_size,
std::int64_t num, void *buffer) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) "
"Interfaces Project does not support this API.");
#else
sycl::event e_gen = oneapi::mkl::rng::generate(
oneapi::mkl::rng::bernoulli<std::int32_t>(1.f - _imp->_p),
_imp->_rng_engine, num, (std::int32_t *)buffer);
sycl::event e_save = q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e_gen);
cgh.host_task([=] {
oneapi::mkl::rng::save_state(_imp->_rng_engine,
_imp->_host_state.data());
});
});
q->memcpy(_imp->_state, _imp->_host_state.data(), required_state_size,
e_save);
#endif
}
public:
operator bool() const {
return bool(_imp);
}
dropout_desc &operator=(std::nullptr_t) {
_imp.reset();
return *this;
}
/// Initializing a dropout descriptor.
void init(){
_imp = std::make_shared<dropout_desc_imp>();
}
/// Setting a dropout descriptor with given parameters.
/// \param [in] engine Engine of the dropout operation.
/// \param [in] p Probability of value set to zero.
/// \param [in] state Memory that store random generator state.
/// \param [in] state_size Required size to store random generator state.
/// \param [in] seed Seed to initialize conditions of the generator state.
void set(engine_ext &engine, float p, void *state, size_t state_size,
unsigned long long seed);
/// Getting parameters from a dropout descriptor.
/// \param [in] engine Engine of the dropout operation.
/// \param [in] p Probability of value set to zero.
/// \param [in] state Memory that store random generator state.
/// \param [in] seed Seed to initialize conditions of the generator state.
void get(float *p, void **states, unsigned long long *seed) const noexcept {
*seed = _imp->_seed;
*states = _imp->_state;
*p = _imp->_p;
}
/// Getting the probability of value set to zero.
/// \returns Probability.
float get_probability() const noexcept { return _imp->_p; }
/// Restoreing a dropout descriptor from stored state.
/// \param [in] engine Engine of the dropout operation.
/// \param [in] p Probability of value set to zero.
/// \param [in] state Memory that store random generator state.
/// \param [in] state_size Required size to store random generator state.
/// \param [in] seed Seed to initialize conditions of the generator state.
void restore(engine_ext &engine, float p, void *state, size_t state_size,
unsigned long long seed);
friend class engine_ext;
};
namespace detail {
typedef std::string primitive_cache_key_type;
typedef std::list<primitive_cache_key_type> usage_list_type;
typedef struct {
::dnnl::primitive *primitive;
usage_list_type::iterator usage_it;
std::function<void(::dnnl::primitive *)> destructor;
sycl::event e;
} primitive_cache_value_type;
typedef std::unordered_map<primitive_cache_key_type, primitive_cache_value_type>
cache_map_type;
// The primitive cache uses LRU replacement policy, and the default cache
// capacity is 1024.
class primitive_cache {
int _capacity = 1024;
usage_list_type usage;
cache_map_type cache_map;
void touch(cache_map_type::iterator it, sycl::event e = {},
bool update_event = false) {
if (it->second.usage_it != usage.begin()) {
const primitive_cache_key_type &key = it->first;
usage.erase(it->second.usage_it);
usage.push_front(key);
it->second.usage_it = usage.begin();
}
if (update_event) {
it->second.e = e;
}
}
void async_destruct_primitive(const primitive_cache_value_type &value) {
dpct::get_current_device().default_queue().submit([&](sycl::handler &cgh) {
cgh.depends_on(value.e);
cgh.host_task([=] { value.destructor(value.primitive); });
});
}
public:
::dnnl::primitive *get(const primitive_cache_key_type &key) {
auto it = cache_map.find(key);
if (it == cache_map.end()) {
return nullptr;
}
touch(it);
return it->second.primitive;
}
void put(const primitive_cache_key_type &key, ::dnnl::primitive *value,
std::function<void(::dnnl::primitive *)> destructor, sycl::event e) {
auto it = cache_map.find(key);
if (it != cache_map.end()) {
touch(it, e, true);
} else {
if (cache_map.size() == _capacity) {
auto last_primitive = cache_map.find(usage.back());
async_destruct_primitive(last_primitive->second);
cache_map.erase(usage.back());
usage.pop_back();
}
usage.push_front(key);
cache_map[key] = {value, usage.begin(), destructor, e};
}
}
~primitive_cache() {
for (auto &v : cache_map) {
async_destruct_primitive(v.second);
}
}
};
} // namespace detail
/// A class holding the oneDNN engine.
class engine_ext {
struct output_argument_info {
float _alpha;
float _beta;
int _name;
memory_desc_ext _desc;
void *_data;
output_argument_info(float alpha, float beta, int name,
memory_desc_ext desc, void *data)
: _alpha(alpha), _beta(beta), _name(name), _desc(desc), _data(data) {}
output_argument_info(float alpha, float beta, memory_desc_ext desc,
void *data)
: _alpha(alpha), _beta(beta), _name(0), _desc(desc), _data(data) {}
};
::dnnl::engine _eng;
::dnnl::stream _s;
sycl::queue *_q = nullptr;
std::map<void *, ::dnnl::memory> workspace_map;
std::int64_t _random_engine_state_size = -1;
detail::primitive_cache _primitive_cache;
::dnnl::memory &get_workspace(void *key) { return workspace_map[key]; }
void insert_workspace(void *key, ::dnnl::memory workspace) {
workspace_map[key] = workspace;
}
const ::dnnl::stream &get_stream() const { return _s; }
const ::dnnl::engine &get_engine() const { return _eng; }
void *allocate(const memory_desc_ext &desc, int count = 1) const;
::dnnl::memory::desc
compress_spatial_dimensions_to_channel(const ::dnnl::memory::desc &desc);
::dnnl::memory::desc
get_bn_scale_bias_mean_var_desc(const ::dnnl::memory::desc &desc,
batch_normalization_mode mode);
sycl::event batch_normalization_backward_internal(
batch_normalization_mode mode, float epsilon, float alpha_data,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta_data,
const memory_desc_ext &diff_src_desc, void *diff_src, float alpha_param,
const memory_desc_ext &diff_scale_bias_desc, void *scale, void *bias,
float beta_param, void *diff_scale, void *diff_bias,
const memory_desc_ext &mean_var_desc, void *saved_mean, void *saved_var);
sycl::event batch_normalization_forward_internal(
bool is_infer, batch_normalization_mode mode, float epsilon, float factor,
float alpha, const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &scale_bias_desc, void *scale, void *bias,
const memory_desc_ext &mean_var_desc, void *saved_mean, void *saved_var,
void *running_mean, void *running_var);
::dnnl::memory::desc
transfer_memory_desc_to_channel_major_format(const ::dnnl::memory::desc &desc);
::dnnl::memory::desc
bn_reorder_memory_to_channel_major_format(bool is_input, ::dnnl::memory::desc &desc,
void *src, void **cache,
std::vector<void *> &caches);
::dnnl::memory::desc
transfer_memory_desc_to_format_tag_any(const ::dnnl::memory::desc &desc){
return ::dnnl::memory::desc(desc.get_dims(), desc.get_data_type(),
::dnnl::memory::format_tag::any);
}
void allocate_and_reorder_memory_to_optimal(::dnnl::memory::desc &from_desc,
void *&from,
::dnnl::memory::desc &to_desc,
void *&to,
std::vector<void *> &caches) {
if (from_desc != to_desc) {
to = allocate(to_desc);
caches.push_back(to);
async_reorder(1.f, from_desc, from, 0.f, to_desc, to);
}
}
template <typename primitive_type, typename... args_type>
std::pair<detail::primitive_cache_key_type, primitive_type *>
create_primitive(args_type &&...args);
template <typename primitive_type>
std::pair<detail::primitive_cache_key_type, primitive_type *>
create_primitive_with_pd(const typename primitive_type::primitive_desc &pd);
template <typename primitive_type, typename... args_type>
typename primitive_type::primitive_desc
create_primitive_desc(args_type &&...args);
template <typename primitive_desc_type>
std::string generate_cache_key(const primitive_desc_type &pd);
void serialize_dims(std::stringstream &ss, const std::vector<int64_t> &dims) {
ss.write((char *)dims.data(), dims.size() * sizeof(int64_t));
};
void serialize_mem_desc(std::stringstream &ss,
const ::dnnl::memory::desc &desc) {
if (desc.is_zero()) {
return;
}
auto format_kind = desc.get_format_kind();
ss << desc.get_ndims() << (std::uint8_t)desc.get_data_type()
<< (std::uint8_t)format_kind;
serialize_dims(ss, desc.get_dims());
serialize_dims(ss, desc.get_strides());
if (format_kind == ::dnnl::memory::format_kind::blocked) {
ss << desc.get_inner_nblks();
serialize_dims(ss, desc.get_inner_blks());
serialize_dims(ss, desc.get_inner_idxs());
}
};
sycl::event execute_rnn_forward_primitive(
rnn_mode mode, ::dnnl::prop_kind kind, ::dnnl::rnn_direction direction,
rnn_bias_mode bias_mode, ::dnnl::memory::data_type dt,
::dnnl::memory::format_tag tag, int seq_length, int batch_size, int src_c,
int dst_c, int layer_size, int direction_num, int hidden_size,
int gate_num, int projection_size, std::vector<void *> &data,
std::vector<int> &offset, int iter_num, size_t *weight_size = nullptr,
size_t *workspace_size = nullptr, size_t *scratchpad_size = nullptr);
sycl::event rnn_forward_internal(
const rnn_desc &desc, ::dnnl::prop_kind kind,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &iter_desc, void *src_iter, void *dst_iter,
const memory_desc_ext &iter_c_desc, void *src_iter_c, void *dst_iter_c,
size_t weight_size, void *weight, size_t workspace_size, void *workspace,
size_t scratchpad_size, void *scratchpad, bool is_get_execution_args,
size_t *weight_size_query, size_t *workspace_size_query,
size_t *scratchpad_size_query);
sycl::event execute_rnn_backward_primitive(
rnn_mode mode, ::dnnl::rnn_direction direction, rnn_bias_mode bias_mode,
::dnnl::memory::data_type dt, ::dnnl::memory::format_tag tag,
int seq_length, int batch_size, int src_c, int dst_c, int layer_size,
int direction_num, int hidden_size, int gate_num, int projection_size,
std::vector<void *> &data, std::vector<int> &offset, int iter_num);
void async_free(sycl::queue *q, sycl::event e,
std::unordered_map<int, ::dnnl::memory> *args,
std::vector<void *> device_ptrs = {}) {
q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
if (args) {
delete args;
}
for (auto ptr : device_ptrs) {
if (ptr) {
sycl::free(ptr, *_q);
}
}
});
});
};
bool
scale_parameter_preprocess(const std::vector<output_argument_info> &args);
template <typename primitive_type>
sycl::event
execute_primitive(const std::pair<detail::primitive_cache_key_type,
primitive_type *> &primitive,
std::unordered_map<int, ::dnnl::memory> *args,
const std::vector<output_argument_info> &extra_args = {},
const std::vector<void *> &device_ptrs = {});
template <typename T>
sycl::event fill_with_type(sycl::queue *q, void *src, const void *value,
size_t size_with_byte) {
return q->fill<T>(static_cast<T *>(src), *static_cast<const T *>(value),
size_with_byte / sizeof(T));
}
template <typename T> struct no_zero_op {
T operator()(T e) {
if (!e) {
return 1;
}
return e;
}
};
template <typename T>
void transform_no_zero_with_type(sycl::queue *q, void *src, void *dst,
size_t num) {
std::transform(oneapi::dpl::execution::make_device_policy(*q),
static_cast<T *>(src), static_cast<T *>(src) + num,
static_cast<T *>(dst), no_zero_op<T>());
}
void transform_no_zero(const memory_desc_ext &desc, void *src, void *dst);
::dnnl::memory::desc get_group_weight_desc(int group_count,
const memory_desc_ext &weight_desc);
void get_rnn_configuration(const ::dnnl::memory::desc &desc,
rnn_direction direction, rnn_mode mode,
dpct::library_data_t dt, int hidden_size,
::dnnl::memory::data_type *dnnl_dt,
::dnnl::memory::format_tag *tag,
int *projection_size, int *output_size,
int *seq_length, int *batch_size,
int *direction_num, int *gate_num);
public:
engine_ext() {}
operator bool() const {
return bool(_eng) && bool(_s) && bool(_q);
}
engine_ext &operator=(std::nullptr_t) {
_eng.reset(nullptr);
_s.reset(nullptr);
_q = nullptr;
return *this;
}
/// Creating oneDNN engine.
void create_engine() {
_eng = ::dnnl::sycl_interop::make_engine(
dpct::get_current_device(), dpct::get_current_device().get_context());
_s = ::dnnl::sycl_interop::make_stream(
_eng, dpct::get_current_device().default_queue());
_q = &dpct::get_current_device().default_queue();
}
/// Setting the user's SYCL queue for an oneDNN engine.
/// \param [in] q Pointer to the SYCL queue.
void set_queue(sycl::queue *q) {
if (!q) {
throw std::runtime_error("set_queue: pointer must not be nullptr.");
}
if (!_eng) {
throw std::runtime_error("set_queue: current engine is invalid.");
}
if (q->get_context() != ::dnnl::sycl_interop::get_context(_eng)) {
throw std::runtime_error(
"set_queue: queue is mismatch with current engine context.");
}
_q = q;
_s = ::dnnl::sycl_interop::make_stream(_eng, *q);
}
/// Retrieving the user's SYCL queue set in the oneDNN engine.
/// \returns Pointer to the SYCL queue.
sycl::queue *get_queue() const { return _q; }
/// Setting all elements of a memory to a given value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] valuePtr Pointer to a single value.
void fill(const memory_desc_ext &src_desc, void *src,
const void *valuePtr);
/// Coping the scaled data from a memory to another memory with a different
/// description.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
void reorder(float alpha, const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc, void *dst);
/// Scaling all the elements of a memory by a given factor.
/// \param [in] alpha Value to scaling factors.
/// \param [in] src_desc Source memory descriptor.
/// \param [out] src Pointer to source data.
void scale(float alpha, const memory_desc_ext &src_desc, void *src);
/// Adding the scaled values of a memory to another memory.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
void sum(float alpha, const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc, void *dst);
/// Computing a specified activation function value.
/// \param [in] desc Activation descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
void activation_forward(activation_desc &desc, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst);
/// Computing the gradient of a specified activation function.
/// \param [in] desc Activation descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the differential destination memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
void
activation_backward(activation_desc &desc, float alpha,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src);
/// Computing a specified pooling function value.
/// \param [in] desc Pooling descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [out] workspace Pointer to workspace generated from forward propagation.
void pooling_forward(pooling_desc &desc, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst, ::dnnl::memory *workspace = nullptr);
/// Computing the gradient of a specified pooling function.
/// \param [in] desc Activation descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the differential destination memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential
/// source data.
/// \param [in] workspace Pointer to workspace used for backward
/// propagation.
void pooling_backward(pooling_desc &desc, float alpha,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc,
void *diff_dst, const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &diff_src_desc,
void *diff_src,
::dnnl::memory *workspace = nullptr);
/// Computing a specified softmax function value.
/// \param [in] alg Softmax algorithm.
/// \param [in] mode Softmax mode.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
void softmax_forward(softmax_algorithm alg, softmax_mode mode,
float alpha, const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &dst_desc, void *dst);
/// Computing the gradient of a specified softmax function.
/// \param [in] alg Softmax algorithm.
/// \param [in] mode Softmax mode.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the differential destination memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
void softmax_backward(softmax_algorithm alg, softmax_mode mode,
float alpha, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &diff_dst_desc,
void *diff_dst, float beta,
const memory_desc_ext &diff_src_desc,
void *diff_src);
/// Computing a specified local response normalization function value.
/// \param [in] desc Local response normalization descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [out] workspace Pointer to workspace generated from forward
/// propagation.
void lrn_forward(lrn_desc &desc, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst, ::dnnl::memory *workspace = nullptr);
/// Computing the gradient of a specified local response normalization
/// function.
/// \param [in] desc Local response normalization descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed value.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the differential destination memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
/// \param [in] workspace Pointer to workspace used for backward propagation.
void lrn_backward(lrn_desc &desc, float alpha,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &diff_src_desc,
void *diff_src, ::dnnl::memory *workspace = nullptr);
/// Setting all elements of a memory to a given value asynchronously.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] valuePtr Pointer to a single value.
/// \returns An event representing the fill operations.
sycl::event async_fill(const memory_desc_ext &src_desc, void *src,
const void *valuePtr);
/// Coping the scaled data from a memory to another memory with a different
/// description asynchronously.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \returns An event representing the reorder operations.
sycl::event async_reorder(float alpha, const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc, void *dst);
/// Scaling all the elements of a memory by a given factor asynchronously.
/// \param [in] alpha Value to scaling factors.
/// \param [in] src_desc Source memory descriptor.
/// \param [out] src Pointer to source data.
/// \returns An event representing the scale operations.
sycl::event async_scale(float alpha, const memory_desc_ext &src_desc, void *src);
/// Adding the scaled values of a memory to another memory asynchronously.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \returns An event representing the sum operations.
sycl::event async_sum(float alpha, const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc, void *dst);
/// Perform specified binary operation asynchronously.
/// \param [in] op Specified binary operation.
/// \param [in] alpha_0 Value to scaling factors used to scale the src_0
/// value.
/// \param [in] src_desc_0 Source 0 memory descriptor.
/// \param [in] src_0 Pointer to source 0 data.
/// \param [in] alpha_1 Value to scaling factors used to scale the src_1
/// value.
/// \param [in] src_desc_1 Source 1 memory descriptor.
/// \param [in] src_1 Pointer to source 1 data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \returns An event representing the binary operations.
sycl::event async_binary(binary_op op, float alpha_0,
const memory_desc_ext &src_desc_0, void *src_0,
float alpha_1, const memory_desc_ext &src_desc_1,
void *src_1, float beta, const memory_desc_ext &dst_desc,
void *dst);
/// Perform specified binary operation asynchronously.
/// \param [in] op Specified reduction operation.
/// \param [in] alpha Value to scaling factors used to scale the data
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \returns An event representing the reduction operations.
sycl::event async_reduction(reduction_op op, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst);
/// Computing a specified activation function value asynchronously.
/// \param [in] desc Activation descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \returns An event representing the activation forward operations.
sycl::event async_activation_forward(activation_desc &desc, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst);
/// Computing the gradient of a specified activation function asynchronously.
/// \param [in] desc Activation descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the differential destination memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
/// \returns An event representing the activation backward operations.
sycl::event
async_activation_backward(activation_desc &desc, float alpha,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src);
/// Computing a specified pooling function value asynchronously.
/// \param [in] desc Pooling descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [out] workspace Pointer to workspace generated from forward propagation.
/// \returns An event representing the pooling forward operations.
sycl::event async_pooling_forward(pooling_desc &desc, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst, ::dnnl::memory *workspace = nullptr);
/// Computing the gradient of a specified pooling function asynchronously.
/// \param [in] desc Activation descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the differential destination memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential
/// source data.
/// \param [in] workspace Pointer to workspace used for backward
/// propagation.
/// \returns An event representing the pooling backward operations.
sycl::event async_pooling_backward(pooling_desc &desc, float alpha,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc,
void *diff_dst, const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &diff_src_desc,
void *diff_src,
::dnnl::memory *workspace = nullptr);
/// Computing a specified softmax function value asynchronously.
/// \param [in] alg Softmax algorithm.
/// \param [in] mode Softmax mode.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \returns An event representing the softmax forward operations.
sycl::event async_softmax_forward(softmax_algorithm alg, softmax_mode mode,
float alpha, const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &dst_desc, void *dst);
/// Computing the gradient of a specified softmax function asynchronously.
/// \param [in] alg Softmax algorithm.
/// \param [in] mode Softmax mode.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the differential destination memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
/// \returns An event representing the softmax backward operations.
sycl::event async_softmax_backward(softmax_algorithm alg, softmax_mode mode,
float alpha, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &diff_dst_desc,
void *diff_dst, float beta,
const memory_desc_ext &diff_src_desc,
void *diff_src);
/// Computing a specified local response normalization function value
/// asynchronously.
/// \param [in] desc Local response normalization descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [out] workspace Pointer to workspace generated from forward
/// propagation.
/// \returns An event representing the lrn forward operations.
sycl::event async_lrn_forward(lrn_desc &desc, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst, ::dnnl::memory *workspace = nullptr);
/// Computing the gradient of a specified local response normalization
/// function asynchronously.
/// \param [in] desc Local response normalization descriptor.
/// \param [in] alpha Value to scaling factors used to scale the computed value.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the differential destination memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
/// \param [in] workspace Pointer to workspace used for backward propagation.
/// \returns An event representing the lrn backward operations.
sycl::event async_lrn_backward(lrn_desc &desc, float alpha,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &diff_src_desc,
void *diff_src, ::dnnl::memory *workspace = nullptr);
/// Derives a memory descriptor for the batch normalization scale, bias, mean,
/// variance from the source memory descriptor and batch normalization mode.
/// \param [out] desc Derived memory descriptor.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] mode Batch normalization mode.
static void derive_batch_normalization_memory_desc(memory_desc_ext &desc,
const memory_desc_ext &src_desc,
batch_normalization_mode mode);
/// Derives a memory descriptor for the batch normalization scale, bias, mean,
/// variance from the source memory descriptor and batch normalization mode.
/// \param [out] scale_bias_desc Derived scale and bias memory descriptor.
/// \param [out] mean_var_desc Derived mean and var memory descriptor.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] mode Batch normalization mode.
static void derive_batch_normalization_memory_desc(memory_desc_ext &scale_bias_desc,
memory_desc_ext &mean_var_desc,
const memory_desc_ext &src_desc,
batch_normalization_mode mode);
/// Get the size of workspace that needed by batch normalization. The data stored
/// in workspace must be preserved between forward and backward.
/// \param [in] ops Batch normalization operation mode. This mode can set to
/// perform only batch normalization, or batch normalization followed by
/// activation, or batch normalization followed by element-wise addition and
/// activation.
/// \param [in] src_desc Source memory descriptor.
/// \returns Size of workspace.
size_t get_batch_normalization_workspace_size(
batch_normalization_ops ops, const memory_desc_ext &src_desc);
/// Computing a specified batch normalization inference stage function value
/// asynchronously.
/// \param [in] mode Batch normalization mode.
/// \param [in] epsilon Epsilon value used in computation.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [in] scale_bias_mean_var_desc Scale, bias, mean, variance memory
/// descriptor.
/// \param [in] scale Pointer to scale data.
/// \param [in] bias Pointer to bias data.
/// \param [in] mean Pointer to mean data.
/// \param [in] var Pointer to variance data.
/// \returns An event representing the batch normalization forward operations.
sycl::event async_batch_normalization_forward_inference(
batch_normalization_mode mode, float epsilon, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &scale_bias_mean_var_desc, void *scale, void *bias,
void *mean, void *var);
/// Computing a specified batch normalization inference stage function value
/// asynchronously.
/// \param [in] mode Batch normalization mode.
/// \param [in] ops Batch normalization operation mode. This mode can set to
/// perform only batch normalization, or batch normalization followed by
/// activation, or batch normalization followed by element-wise addition and
/// activation.
/// \param [in] adesc Activation operation descriptor.
/// \param [in] epsilon Epsilon value used in computation.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [in] summand_desc Summand memory descriptor.
/// \param [in] summand Pointer to summand data.
/// \param [in] scale_bias_desc Scale, bias memory descriptor.
/// \param [in] scale Pointer to scale data.
/// \param [in] bias Pointer to bias data.
/// \param [in] mean_var_desc Mean, variance memory descriptor.
/// \param [in] mean Pointer to mean data.
/// \param [in] var Pointer to variance data.
/// \returns An event representing the batch normalization forward operations.
sycl::event async_batch_normalization_forward_inference(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &summand_desc, void *summand,
const memory_desc_ext &scale_bias_desc, void *scale, void *bias,
const memory_desc_ext &mean_var_desc, void *mean, void *var);
/// Computing a specified batch normalization training stage function value
/// asynchronously.
/// \param [in] mode Batch normalization mode.
/// \param [in] epsilon Epsilon value used in computation.
/// \param [in] factor Factor value used in running mean and variance
/// computation.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [in] scale_bias_mean_var_desc Scale, bias, mean, variance memory
/// descriptor.
/// \param [in] scale Pointer to scale data.
/// \param [in] bias Pointer to bias data.
/// \param [out] running_mean Pointer to running mean data.
/// \param [out] running_var Pointer to running variance data.
/// \param [out] saved_mean Pointer to optional cache to save mean data.
/// \param [out] saved_var Pointer to optional cache to save variance data.
/// \returns An event representing the batch normalization forward operations.
sycl::event async_batch_normalization_forward_training(
batch_normalization_mode mode, float epsilon, float factor, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &scale_bias_mean_var_desc, void *scale, void *bias,
void *running_mean, void *running_var, void *saved_mean, void *saved_var);
/// Computing a specified batch normalization training stage function value
/// asynchronously.
/// \param [in] mode Batch normalization mode.
/// \param [in] ops Batch normalization operation mode. This mode can set to
/// perform only batch normalization, or batch normalization followed by
/// activation, or batch normalization followed by element-wise addition and
/// activation.
/// \param [in] adesc Activation operation descriptor.
/// \param [in] epsilon Epsilon value used in computation.
/// \param [in] factor Factor value used in running mean and variance
/// computation.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [in] summand_desc Summand memory descriptor.
/// \param [in] summand Pointer to summand data.
/// \param [in] scale_bias_mean_var_desc Scale, bias, mean, variance memory
/// descriptor.
/// \param [in] scale Pointer to scale data.
/// \param [in] bias Pointer to bias data.
/// \param [out] running_mean Pointer to running mean data.
/// \param [out] running_var Pointer to running variance data.
/// \param [out] saved_mean Pointer to optional cache to save mean data.
/// \param [out] saved_var Pointer to optional cache to save variance data.
/// \param [in] workspace_size Size of workspace.
/// \param [out] workspace Pointer to workspace generated from forward
/// propagation.
/// \returns An event representing the batch normalization forward operations.
sycl::event async_batch_normalization_forward_training(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float factor, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &summand_desc, void *summand,
const memory_desc_ext &scale_bias_mean_var_desc, void *scale, void *bias,
void *running_mean, void *running_var, void *saved_mean, void *saved_var,
size_t workspace_size, void *workspace);
/// Computing a specified batch normalization training stage function value
/// asynchronously.
/// \param [in] mode Batch normalization mode.
/// \param [in] ops Batch normalization operation mode. This mode can set to
/// perform only batch normalization, or batch normalization followed by
/// activation, or batch normalization followed by element-wise addition and
/// activation.
/// \param [in] adesc Activation operation descriptor.
/// \param [in] epsilon Epsilon value used in computation.
/// \param [in] factor Factor value used in running mean and variance
/// computation.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [in] summand_desc Summand memory descriptor.
/// \param [in] summand Pointer to summand data.
/// \param [in] scale_bias_desc Scale, bias memory descriptor.
/// \param [in] scale Pointer to scale data.
/// \param [in] bias Pointer to bias data.
/// \param [in] mean_var_desc Mean, variance memory descriptor.
/// \param [out] running_mean Pointer to running mean data.
/// \param [out] running_var Pointer to running variance data.
/// \param [out] saved_mean Pointer to optional cache to save mean data.
/// \param [out] saved_var Pointer to optional cache to save variance data.
/// \param [in] workspace_size Size of workspace.
/// \param [out] workspace Pointer to workspace generated from forward
/// propagation.
/// \returns An event representing the batch normalization forward operations.
sycl::event async_batch_normalization_forward_training(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float factor, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &summand_desc, void *summand,
const memory_desc_ext &scale_bias_desc, void *scale, void *bias,
const memory_desc_ext &mean_var_desc, void *running_mean, void *running_var,
void *saved_mean, void *saved_var, size_t workspace_size, void *workspace);
/// Computing the gradient of a specified batch normalization function asynchronously.
/// \param [in] mode Batch normalization mode.
/// \param [in] epsilon Epsilon value used in computation.
/// \param [in] alpha_data Value to scaling factors used to scale the computed
/// data value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] beta_data Value to scaling factors used to scale the prior value
/// in the data memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
/// \param [in] alpha_param Value to scaling factors used to scale the computed
/// parameter value.
/// \param [in] diff_scale_bias_mean_var_desc Differential scale, bias, mean,
/// variance memory descriptor.
/// \param [in] scale Pointer to scale data.
/// \param [in] beta_param Value to scaling factors used to scale the prior value
/// in the parameter memory.
/// \param [in] diff_scale Pointer to differential scale data.
/// \param [in] diff_bias Pointer to differential bias data.
/// \param [in] saved_mean Pointer to optional cache saved mean data in forward.
/// \param [in] saved_var Pointer to optional cache saved variance data in forward.
/// \returns An event representing the batch normalization backward operations.
sycl::event async_batch_normalization_backward(
batch_normalization_mode mode, float epsilon, float alpha_data,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta_data,
const memory_desc_ext &diff_src_desc, void *diff_src, float alpha_param,
const memory_desc_ext &diff_scale_bias_mean_var_desc, void *scale,
float beta_param, void *diff_scale, void *diff_bias, void *saved_mean,
void *saved_var);
/// Computing the gradient of a specified batch normalization function
/// asynchronously.
/// \param [in] mode Batch normalization mode.
/// \param [in] ops Batch normalization operation mode. This mode can set to
/// perform only batch normalization, or batch normalization followed by
/// activation, or batch normalization followed by element-wise addition and
/// activation.
/// \param [in] adesc Activation operation descriptor.
/// \param [in] epsilon Epsilon value used in computation.
/// \param [in] alpha_data Value to scaling factors used to scale the computed
/// data value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] beta_data Value to scaling factors used to scale the prior value
/// in the data memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
/// \param [in] diff_summand_desc Differential summand memory descriptor.
/// \param [out] diff_summand Pointer to differential summand data.
/// \param [in] alpha_param Value to scaling factors used to scale the computed
/// parameter value.
/// \param [in] diff_scale_bias_mean_var_desc Differential scale, bias, mean,
/// variance memory descriptor.
/// \param [in] scale Pointer to scale data.
/// \param [in] bias Pointer to bias data.
/// \param [in] beta_param Value to scaling factors used to scale the prior value
/// in the parameter memory.
/// \param [out] diff_scale Pointer to differential scale data.
/// \param [out] diff_bias Pointer to differential bias data.
/// \param [in] saved_mean Pointer to optional cache saved mean data in forward.
/// \param [in] saved_var Pointer to optional cache saved variance data in forward.
/// \param [in] workspace_size Size of workspace.
/// \param [in] workspace Pointer to workspace used for backward propagation.
/// \returns An event representing the batch normalization backward operations.
sycl::event async_batch_normalization_backward(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float alpha_data,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta_data,
const memory_desc_ext &diff_src_desc, void *diff_src,
const memory_desc_ext &diff_summand_desc, void *diff_summand,
float alpha_param, const memory_desc_ext &diff_scale_bias_mean_var_desc,
void *scale, void *bias, float beta_param, void *diff_scale,
void *diff_bias, void *saved_mean, void *saved_var,
size_t workspace_size, void *workspace);
/// Computing the gradient of a specified batch normalization function
/// asynchronously.
/// \param [in] mode Batch normalization mode.
/// \param [in] ops Batch normalization operation mode. This mode can set to
/// perform only batch normalization, or batch normalization followed by
/// activation, or batch normalization followed by element-wise addition and
/// activation.
/// \param [in] adesc Activation operation descriptor.
/// \param [in] epsilon Epsilon value used in computation.
/// \param [in] alpha_data Value to scaling factors used to scale the computed
/// data value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] beta_data Value to scaling factors used to scale the prior value
/// in the data memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
/// \param [in] diff_summand_desc Differential summand memory descriptor.
/// \param [out] diff_summand Pointer to differential summand data.
/// \param [in] alpha_param Value to scaling factors used to scale the computed
/// parameter value.
/// \param [in] diff_scale_bias_desc Differential scale, bias memory descriptor.
/// \param [in] scale Pointer to scale data.
/// \param [in] bias Pointer to bias data.
/// \param [in] beta_param Value to scaling factors used to scale the prior value
/// in the parameter memory.
/// \param [out] diff_scale Pointer to differential scale data.
/// \param [out] diff_bias Pointer to differential bias data.
/// \param [in] mean_var_desc Differential mean, variance memory descriptor.
/// \param [in] saved_mean Pointer to optional cache saved mean data in forward.
/// \param [in] saved_var Pointer to optional cache saved variance data in forward.
/// \param [in] workspace_size Size of workspace.
/// \param [in] workspace Pointer to workspace used for backward propagation.
/// \returns An event representing the batch normalization backward operations.
sycl::event async_batch_normalization_backward(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float alpha_data,
const memory_desc_ext &src_desc, void *src, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst,
float beta_data, const memory_desc_ext &diff_src_desc, void *diff_src,
const memory_desc_ext &diff_summand_desc, void *diff_summand,
float alpha_param, const memory_desc_ext &diff_scale_bias_desc, void *scale,
void *bias, float beta_param, void *diff_scale, void *diff_bias,
const memory_desc_ext &mean_var_desc, void *saved_mean, void *saved_var,
size_t workspace_size, void *workspace);
/// Computing a specified convolution function value asynchronously.
/// \param [in] desc Convolution descriptor.
/// \param [in] alg Convolution algorithm.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] weight_desc Weight memory descriptor.
/// \param [in] weight Pointer to weight data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \returns An event representing the convolution forward operations.
sycl::event async_convolution_forward(convolution_desc &desc, ::dnnl::algorithm alg,
float alpha, const memory_desc_ext &src_desc,
void *src, const memory_desc_ext &weight_desc,
void *weight, float beta,
const memory_desc_ext &dst_desc, void *dst);
/// Computing a specified convolution function value asynchronously.
/// \param [in] desc Convolution descriptor.
/// \param [in] alg Convolution algorithm.
/// \param [in] adesc Activation operation descriptor.
/// \param [in] alpha_0 Value to scaling factors used to scale the data
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] weight_desc Weight memory descriptor.
/// \param [in] weight Pointer to weight data.
/// \param [in] alpha_1 Value to scaling factors used to scale the summand
/// value.
/// \param [in] summand_desc Summand memory descriptor.
/// \param [in] summand Pointer to summand data.
/// \param [in] bias_desc Bias memory descriptor.
/// \param [in] bias Pointer to bias data.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \returns An event representing the convolution forward operations.
sycl::event async_convolution_forward(
convolution_desc &desc, ::dnnl::algorithm alg, activation_desc &adesc,
float alpha_0, const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &weight_desc, void *weight, float alpha_1,
const memory_desc_ext &summand_desc, void *summand,
const memory_desc_ext &bias_desc, void *bias,
const memory_desc_ext &dst_desc, void *dst);
/// Computing the data gradient of a specified convolution function asynchronously.
/// \param [in] desc Convolution descriptor.
/// \param [in] alg Convolution algorithm.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] weight_desc Weight memory descriptor.
/// \param [in] weight Pointer to weight data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
/// \returns An event representing the convolution backward data operations.
sycl::event async_convolution_backward_data(
convolution_desc &desc, ::dnnl::algorithm alg, float alpha,
const memory_desc_ext &weight_desc, void *weight,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src);
/// Computing the weight gradient of a specified convolution function
/// asynchronously.
/// \param [in] desc Convolution descriptor.
/// \param [in] alg Convolution algorithm.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] diff_weight_desc Differential weight memory descriptor.
/// \param [out] diff_weight Pointer to differential weight data.
/// \returns An event representing the convolution backward weight operations.
sycl::event async_convolution_backward_weight(
convolution_desc &desc, ::dnnl::algorithm alg, float alpha,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta,
const memory_desc_ext &diff_weight_desc, void *diff_weight);
/// Computing the bias gradient of a specified convolution function
/// asynchronously.
/// \param [in] alpha Value to scaling factors used to scale the computed
/// value.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] beta Value to scaling factors used to scale the prior value
/// in the destination memory.
/// \param [in] diff_bias_desc Differential bias memory descriptor.
/// \param [out] diff_bias Pointer to differential bias data.
/// \returns An event representing the convolution backward bias operations.
sycl::event async_convolution_backward_bias(float alpha,
const memory_desc_ext &diff_dst_desc,
void *diff_dst, float beta,
const memory_desc_ext &diff_bias_desc,
void *diff_bias);
/// Getting the required weight space size for specified rnn operation.
/// \param [in] desc RNN descriptor.
/// \param [out] weight_space_size Size of required weight space.
void rnn_get_weight_space_size(const rnn_desc &desc,
size_t *weight_space_size);
/// Getting the required scratchpad size and workspace size for specified rnn operation.
/// \param [in] desc RNN descriptor.
/// \param [in] kind Propagation kind.
/// \param [in] src_desc Source memory descriptor.
/// \param [out] scratchpad_size Size of required scratchpad.
/// \param [out] workspace_size Size of required workspace.
void rnn_get_scratchpad_workspace_size(const rnn_desc &desc, ::dnnl::prop_kind kind,
const memory_desc_ext &src_desc,
size_t *scratchpad_size, size_t *workspace_size);
/// Computing a specified rnn function value asynchronously.
/// \param [in] desc RNN descriptor.
/// \param [in] kind Propagation kind.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [in] iter_desc Recurrent hidden state data memory descriptor.
/// \param [in] src_iter Pointer to input recurrent hidden state data.
/// \param [in] dst_iter Pointer to output recurrent hidden state data.
/// \param [in] iter_c_desc Recurrent cell state data memory descriptor.
/// \param [in] src_c_iter Pointer to input recurrent cell state data.
/// \param [in] dst_c_iter Pointer to output recurrent cell state data.
/// \param [in] weight_size Size of weight memory.
/// \param [in] weight Pointer to weight data.
/// \param [in] scratchpad_size Size of scratchpad memory.
/// \param [in] scratchpad Pointer to scratchpad data.
/// \param [in] workspace_size Size of workspace memory.
/// \param [in] workspace Pointer to workspace data.
/// \returns An event representing the status of rnn forward operations.
sycl::event async_rnn_forward(const rnn_desc &desc, ::dnnl::prop_kind kind,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &iter_desc, void *src_iter,
void *dst_iter,
const memory_desc_ext &iter_c_desc,
void *src_iter_c, void *dst_iter_c,
size_t weight_size, void *weight,
size_t scratchpad_size, void *scratchpad,
size_t workspace_size, void *workspace);
/// Computing the data and weight gradient of a specified rnn function
/// asynchronously.
/// \param [in] desc RNN descriptor.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [in] dst Pointer to destination data.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [out] diff_src Pointer to differential source data.
/// \param [in] iter_desc Recurrent hidden state data memory descriptor.
/// \param [in] src_iter Pointer to input recurrent hidden state data.
/// \param [in] diff_dst_iter Pointer to differential output recurrent hidden state data.
/// \param [out] diff_src_iter Pointer to differential input recurrent hidden state data.
/// \param [in] iter_c_desc Recurrent cell state data memory descriptor.
/// \param [in] src_c_iter Pointer to input recurrent cell state data.
/// \param [in] diff_dst_c_iter Pointer to differential output recurrent cell state data.
/// \param [out] diff_src_c_iter Pointer to differential input recurrent cell state data.
/// \param [in] weight_size Size of weight memory.
/// \param [in] weight Pointer to weight data.
/// \param [out] diff_weight Pointer to differential weight data.
/// \param [in] scratchpad_size Size of scratchpad memory.
/// \param [in] scratchpad Pointer to scratchpad data.
/// \param [in] workspace_size Size of workspace memory.
/// \param [in] workspace Pointer to workspace data.
/// \returns An event representing the status of rnn backward operations.
sycl::event async_rnn_backward(
const rnn_desc &desc, const memory_desc_ext &dst_desc, void *dst,
void *diff_dst, const memory_desc_ext &src_desc, void *src,
void *diff_src, const memory_desc_ext &iter_desc, void *src_iter,
void *diff_dst_iter, void *diff_src_iter,
const memory_desc_ext &iter_c_desc, void *src_iter_c,
void *diff_dst_iter_c, void *diff_src_iter_c, size_t weight_size,
void *weight, void *diff_weight, size_t scratchpad_size, void *scratchpad,
size_t workspace_size, void *workspace);
/// Getting the required state size for specified dropout operation.
/// \param [in] src_desc Source memory descriptor.
/// \returns Required size of state.
size_t get_dropout_state_size();
/// Getting the required workspace size for dropout operation.
/// \param [in] src_desc Source memory descriptor.
/// \returns Required size of workspace.
static size_t get_dropout_workspace_size(const memory_desc_ext &src_desc);
/// Computing a specified dropout function value asynchronously.
/// \param [in] desc Dropout descriptor.
/// \param [in] src_desc Source memory descriptor.
/// \param [in] src Pointer to source data.
/// \param [in] dst_desc Destination memory descriptor.
/// \param [out] dst Pointer to destination data.
/// \param [in] workspace Pointer to workspace data.
/// \param [in] workspace_size Size of workspace memory.
/// \returns An event representing the dropout forward operations.
sycl::event async_dropout_forward(dropout_desc &desc,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &dst_desc, void *dst,
void *workspace, size_t workspace_size);
/// Computing the gradient of a specified dropout function asynchronously.
/// \param [in] desc Dropout descriptor.
/// \param [in] diff_dst_desc Differential destination memory descriptor.
/// \param [in] diff_dst Pointer to differential destination data.
/// \param [in] diff_src_desc Differential source memory descriptor.
/// \param [out] diff_src Pointer to differential source data.
/// \param [in] workspace Pointer to workspace data.
/// \param [in] workspace_size Size of workspace memory.
/// \returns An event representing the dropout backward operations.
sycl::event async_dropout_backward(dropout_desc &desc,
const memory_desc_ext &diff_dst_desc,
void *diff_dst,
const memory_desc_ext &diff_src_desc,
void *diff_src, void *workspace,
size_t workspace_size);
};
inline
void dropout_desc::restore(engine_ext &engine, float p, void *state,
size_t state_size, unsigned long long seed) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) "
"Interfaces Project does not support this API.");
#else
if (state) {
std::int64_t required_state_size = engine.get_dropout_state_size();
if (state_size < required_state_size) {
throw std::runtime_error("restore: state_size less than required state size.");
}
sycl::queue *q = engine.get_queue();
_imp->_p = p;
_imp->_seed = seed;
_imp->_state = state;
_imp->_host_state = std::vector<std::uint8_t>(required_state_size);
q->memcpy(_imp->_host_state.data(), _imp->_state, required_state_size).wait();
_imp->_rng_engine =
oneapi::mkl::rng::load_state<rng_engine_t>(
*q, _imp->_host_state.data());
}
#endif
}
inline
void dropout_desc::set(engine_ext &engine, float p, void *state,
size_t state_size, unsigned long long seed) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) "
"Interfaces Project does not support this API.");
#else
_imp->_p = p;
if (state) {
std::int64_t required_state_size = engine.get_dropout_state_size();
if (state_size < required_state_size) {
throw std::runtime_error("set: no sufficient memory to save states.");
}
sycl::queue *q = engine.get_queue();
_imp->_seed = seed;
_imp->_state = state;
_imp->_host_state = std::vector<std::uint8_t>(required_state_size);
_imp->_rng_engine = rng_engine_t(*q, seed);
oneapi::mkl::rng::save_state(_imp->_rng_engine, _imp->_host_state.data());
q->memcpy(_imp->_state, _imp->_host_state.data(), required_state_size).wait();
}
#endif
}
inline
::dnnl::memory::data_type
memory_desc_ext::to_dnnl_data_type(dpct::library_data_t dt) {
using dnnl_dt = ::dnnl::memory::data_type;
switch (dt) {
case dpct::library_data_t::real_half:
return dnnl_dt::f16;
case dpct::library_data_t::real_bfloat16:
return dnnl_dt::bf16;
case dpct::library_data_t::real_float:
return dnnl_dt::f32;
case dpct::library_data_t::real_int32:
return dnnl_dt::s32;
case dpct::library_data_t::real_int8:
return dnnl_dt::s8;
case dpct::library_data_t::real_uint8:
return dnnl_dt::u8;
case dpct::library_data_t::real_int8_4:
return dnnl_dt::s8;
case dpct::library_data_t::real_int8_32:
return dnnl_dt::s8;
case dpct::library_data_t::real_uint8_4:
return dnnl_dt::u8;
default:
throw std::runtime_error("to_dnnl_data_type: unsupported data type.");
}
}
inline
dpct::library_data_t
memory_desc_ext::to_dpct_library_data_t(::dnnl::memory::data_type dt,
unsigned block_size) {
using dpct_dt = dpct::library_data_t;
using dnnl_dt = ::dnnl::memory::data_type;
switch (dt) {
case dnnl_dt::f16:
return dpct_dt::real_half;
case dnnl_dt::bf16:
return dpct_dt::real_bfloat16;
case dnnl_dt::f32:
return dpct_dt::real_float;
case dnnl_dt::s32:
return dpct_dt::real_int32;
case dnnl_dt::s8:
if (block_size == 4) {
return dpct_dt::real_int8_4;
} else if (block_size == 32) {
return dpct_dt::real_int8_32;
} else {
return dpct_dt::real_int8;
}
case dnnl_dt::u8:
if (block_size == 4) {
return dpct_dt::real_uint8_4;
} else {
return dpct_dt::real_uint8;
}
default:
throw std::runtime_error("to_dpct_library_data_t: unsupported data type "
"dnnl::memory::data_type::undef.");
}
}
inline
::dnnl::memory::format_tag
memory_desc_ext::to_dnnl_format_tag(dpct::library_data_t dt,
memory_format_tag tag) {
using dpct_dt = dpct::library_data_t;
using dpct_tag = memory_format_tag;
using dnnl_tag = ::dnnl::memory::format_tag;
switch (tag) {
case dpct_tag::nchw:
return dnnl_tag::nchw;
case dpct_tag::nhwc:
return dnnl_tag::nhwc;
default:
if (dt == dpct_dt::real_int8_32) {
return dnnl_tag::nChw32c;
} else {
return dnnl_tag::nChw4c;
}
}
}
inline
void memory_desc_ext::set(memory_format_tag tag, dpct::library_data_t dt, int n,
int c, int h, int w) {
_desc = ::dnnl::memory::desc({n, c, h, w}, to_dnnl_data_type(dt),
to_dnnl_format_tag(dt, tag));
}
inline
void memory_desc_ext::set(dpct::library_data_t dt, int n, int c, int h, int w,
int n_stride, int c_stride, int h_stride,
int w_stride) {
_desc = ::dnnl::memory::desc({n, c, h, w}, to_dnnl_data_type(dt),
{n_stride, c_stride, h_stride, w_stride});
}
inline
void memory_desc_ext::set(dpct::library_data_t dt, int ndims, const int dims[],
const int strides[]) {
_desc = ::dnnl::memory::desc({dims, dims + ndims}, to_dnnl_data_type(dt),
{strides, strides + ndims});
}
inline
void memory_desc_ext::set(memory_format_tag tag, dpct::library_data_t dt,
int ndims, const int dims[]) {
_desc = ::dnnl::memory::desc({dims, dims + ndims}, to_dnnl_data_type(dt),
to_dnnl_format_tag(dt, tag));
}
inline
void memory_desc_ext::set(rnn_memory_format_tag tag, dpct::library_data_t dt,
int t, int n, int c) {
if (tag == rnn_memory_format_tag::tnc) {
_desc = ::dnnl::memory::desc({t, n, c}, to_dnnl_data_type(dt),
::dnnl::memory::format_tag::tnc);
} else if(tag == rnn_memory_format_tag::ntc) {
_desc = ::dnnl::memory::desc({t, n, c}, to_dnnl_data_type(dt),
::dnnl::memory::format_tag::ntc);
} else {
throw std::runtime_error("set: unsupported memory format tag.");
}
}
inline
void memory_desc_ext::get(dpct::library_data_t *dt, int *n, int *c, int *h,
int *w, int *n_stride, int *c_stride, int *h_stride,
int *w_stride) const {
unsigned block_size = 1;
auto dims = _desc.get_dims();
auto inner_blks = _desc.get_inner_blks();
auto strides = _desc.get_strides();
if (!inner_blks.empty()) {
block_size = inner_blks[0];
}
*dt = to_dpct_library_data_t(_desc.get_data_type(), block_size);
*n = dims[0];
*c = dims[1];
*h = dims[2];
*w = dims[3];
*n_stride = strides[0] / block_size;
*c_stride = strides[1] / block_size;
*h_stride = strides[2] / block_size;
*w_stride = strides[3] / block_size;
}
inline
void memory_desc_ext::get(dpct::library_data_t *dt, memory_format_tag *tag,
int *n, int *c, int *h, int *w) const {
unsigned block_size = 1;
*tag = memory_format_tag::nchw;
auto dims = _desc.get_dims();
auto strides = _desc.get_strides();
auto inner_blks = _desc.get_inner_blks();
if (!inner_blks.empty()) {
block_size = inner_blks[0];
*tag = memory_format_tag::nchw_blocked;
}
if (strides[1] == 1 && dims[1] != 1) {
*tag = memory_format_tag::nhwc;
}
*dt = to_dpct_library_data_t(_desc.get_data_type(), block_size);
*n = dims[0];
*c = dims[1];
*h = dims[2];
*w = dims[3];
}
inline
void memory_desc_ext::get(dpct::library_data_t *dt, rnn_memory_format_tag *tag,
int *t, int *n, int *c) const {
auto dims = _desc.get_dims();
auto strides = _desc.get_strides();
if (strides[0] >= strides[1]) {
*tag = rnn_memory_format_tag::tnc;
} else {
*tag = rnn_memory_format_tag::ntc;
}
*dt = to_dpct_library_data_t(_desc.get_data_type(), 1);
*t = dims[0];
*n = dims[1];
*c = dims[2];
}
inline
void memory_desc_ext::get(int requested_ndims, dpct::library_data_t *dt,
int *ndims, int dims[], int strides[]) const {
unsigned block_size = 1;
auto inner_blks = _desc.get_inner_blks();
auto adims = _desc.get_dims();
auto astrides = _desc.get_strides();
if (!inner_blks.empty()) {
block_size = inner_blks[0];
}
*dt = to_dpct_library_data_t(_desc.get_data_type(), block_size);
*ndims = _desc.get_ndims();
for (int index = 0; index < requested_ndims; index++) {
dims[index] = adims[index];
strides[index] =
astrides[index] / block_size;
}
}
inline
void memory_desc_ext::get(int requested_ndims, dpct::library_data_t *dt,
memory_format_tag *tag, int *ndims,
int dims[]) const {
unsigned block_size = 1;
*tag = memory_format_tag::nchw;
auto inner_blks = _desc.get_inner_blks();
auto adims = _desc.get_dims();
auto astrides = _desc.get_strides();
if (!inner_blks.empty()) {
block_size = inner_blks[0];
*tag = memory_format_tag::nchw_blocked;
}
if (astrides[1] == 1 &&
adims[1] != 1) {
*tag = memory_format_tag::nhwc;
}
*dt = to_dpct_library_data_t(_desc.get_data_type(), block_size);
*ndims = _desc.get_ndims();
for (int index = 0; index < requested_ndims; index++) {
dims[index] = adims[index];
}
}
inline
void engine_ext::get_rnn_configuration(const ::dnnl::memory::desc &desc,
rnn_direction direction, rnn_mode mode,
dpct::library_data_t dt, int hidden_size,
::dnnl::memory::data_type *dnnl_dt,
::dnnl::memory::format_tag *tag,
int *projection_size, int *output_size,
int *seq_length, int *batch_size,
int *direction_num, int *gate_num) {
if (!desc.is_zero()) {
auto dims = desc.get_dims();
auto strides = desc.get_strides();
if (strides[0] >= strides[1]) {
*tag = ::dnnl::memory::format_tag::tnc;
*seq_length = dims[0];
*batch_size = dims[1];
} else {
*tag = ::dnnl::memory::format_tag::ntc;
*seq_length = dims[1];
*batch_size = dims[0];
}
}
if (direction == rnn_direction::bidirectional) {
*direction_num = 2;
} else {
*direction_num = 1;
}
if (mode == rnn_mode::lstm) {
*gate_num = 4;
} else if (mode == rnn_mode::gru) {
*gate_num = 3;
} else {
*gate_num = 1;
}
if (*projection_size != hidden_size) {
*output_size = *projection_size;
} else {
*projection_size = 0;
*output_size = hidden_size;
}
*dnnl_dt = memory_desc_ext::to_dnnl_data_type(dt);
}
inline
void *engine_ext::allocate(const memory_desc_ext &data_desc, int count) const {
size_t mem_size = data_desc.get_size();
void *mem = sycl::malloc_device(mem_size * count, *_q);
return mem;
}
inline
void engine_ext::transform_no_zero(const memory_desc_ext &desc, void *src, void *dst) {
::dnnl::memory::data_type dt = desc.get_desc().get_data_type();
size_t element_num = desc.get_element_num();
switch (dt) {
case ::dnnl::memory::data_type::f32:
transform_no_zero_with_type<float>(_q, src, dst, element_num);
break;
case ::dnnl::memory::data_type::f16:
transform_no_zero_with_type<sycl::half>(_q, src, dst, element_num);
break;
case ::dnnl::memory::data_type::s32:
transform_no_zero_with_type<int32_t>(_q, src, dst, element_num);
break;
case ::dnnl::memory::data_type::s8:
transform_no_zero_with_type<int8_t>(_q, src, dst, element_num);
break;
case ::dnnl::memory::data_type::u8:
transform_no_zero_with_type<uint8_t>(_q, src, dst, element_num);
break;
default:
throw std::runtime_error("transform_no_zero: unsupported data type.");
}
}
inline
::dnnl::memory::desc
engine_ext::get_group_weight_desc(int group_count,
const memory_desc_ext &weight_desc) {
if (group_count == 1) {
return weight_desc.get_desc();
}
auto help_weight_desc = weight_desc.get_desc();
int ndims = help_weight_desc.get_ndims();
if (!help_weight_desc.get_inner_blks().empty()) {
throw std::runtime_error("get_group_weight_desc: group convolution with "
"blocked weight memory unimplemented.");
}
std::vector<int64_t> new_size;
auto old_size = weight_desc.get_dims();
new_size.push_back(group_count);
new_size.push_back(old_size[0] / group_count);
for (int index = 1; index < old_size.size(); index++) {
new_size.push_back(old_size[index]);
}
std::vector<int64_t> strides = help_weight_desc.get_strides();
::dnnl::memory::format_tag tag;
bool is_nhwc = (strides[1] == 1 && old_size[1] != 1);
if (ndims == 4) {
if (is_nhwc) {
tag = ::dnnl::memory::format_tag::gohwi;
} else {
tag = ::dnnl::memory::format_tag::goihw;
}
} else if (ndims == 5) {
if (is_nhwc) {
tag = ::dnnl::memory::format_tag::godhwi;
} else {
tag = ::dnnl::memory::format_tag::goidhw;
}
}
help_weight_desc =
::dnnl::memory::desc(new_size, weight_desc.get_desc().get_data_type(), tag);
return help_weight_desc;
}
inline
::dnnl::memory::desc engine_ext::compress_spatial_dimensions_to_channel(
const ::dnnl::memory::desc &desc) {
int ndims = desc.get_ndims();
auto dims = desc.get_dims();
auto inner_blks = desc.get_inner_blks();
assert(ndims >= 4 && "ndims is at least 4.");
std::vector<int64_t> compressed_dims(ndims);
compressed_dims[0] = dims[0];
compressed_dims[1] = dims[1];
for (int index = 2; index < ndims; index++) {
compressed_dims[1] = compressed_dims[1] * dims[index];
compressed_dims[index] = 1;
}
if (!inner_blks.empty() && inner_blks[0] == 4) {
return ::dnnl::memory::desc(compressed_dims, desc.get_data_type(),
::dnnl::memory::format_tag::nChw4c);
} else if (!inner_blks.empty() && inner_blks[0] == 32) {
return ::dnnl::memory::desc(compressed_dims, desc.get_data_type(),
::dnnl::memory::format_tag::nChw32c);
}
std::vector<int64_t> strides(ndims, 1);
strides[0] = compressed_dims[1];
return ::dnnl::memory::desc(compressed_dims, desc.get_data_type(), strides);
}
inline
::dnnl::memory::desc
engine_ext::get_bn_scale_bias_mean_var_desc(const ::dnnl::memory::desc &desc,
batch_normalization_mode mode) {
int ndims = desc.get_ndims();
auto dims = desc.get_dims();
assert(ndims >= 4 && "ndims is at least 4.");
int channel_num = 1;
if (mode == batch_normalization_mode::spatial) {
channel_num = dims[1];
} else {
for (int index = 1; index < ndims; index++) {
channel_num = channel_num * dims[index];
}
}
return ::dnnl::memory::desc({channel_num}, desc.get_data_type(),
::dnnl::memory::format_tag::a);
}
inline
::dnnl::memory::desc engine_ext::transfer_memory_desc_to_channel_major_format(
const ::dnnl::memory::desc &desc) {
if (!desc.get_inner_blks().empty()) {
return desc;
}
int ndims = desc.get_ndims();
auto dims = desc.get_dims();
if (ndims == 4) {
return ::dnnl::memory::desc(dims, desc.get_data_type(),
::dnnl::memory::format_tag::nchw);
}
return ::dnnl::memory::desc(dims, desc.get_data_type(),
::dnnl::memory::format_tag::ncdhw);
}
/// If the alpha = 0 and beta = 1, then the destination (dst = alpha * out +
/// beta * prior_dst) have no change. In this case this function returns true
/// means the operation can exit directly.
inline
bool engine_ext::scale_parameter_preprocess(
const std::vector<output_argument_info> &args) {
bool direct_exit = true;
for (auto &arg : args) {
if (arg._alpha == 0.f) {
if (arg._beta != 1.f) {
async_scale(arg._beta, arg._desc, arg._data);
}
} else {
direct_exit = false;
}
}
return direct_exit;
}
inline
void engine_ext::derive_batch_normalization_memory_desc(
memory_desc_ext &scale_bias_desc, memory_desc_ext &mean_var_desc,
const memory_desc_ext &src_desc, batch_normalization_mode mode) {
derive_batch_normalization_memory_desc(scale_bias_desc, src_desc, mode);
derive_batch_normalization_memory_desc(mean_var_desc, src_desc, mode);
}
inline
void engine_ext::derive_batch_normalization_memory_desc(
memory_desc_ext &desc, const memory_desc_ext &src_desc,
batch_normalization_mode mode) {
int src_ndims = src_desc.get_desc().get_ndims();
auto inner_blks = src_desc.get_desc().get_inner_blks();
if (src_desc.get_desc().get_ndims() != 4 ||
src_desc.get_desc().get_ndims() != 5) {
throw std::runtime_error("derive_batch_normalization_memory_desc: only 4d "
"and 5d memory descriptor supported.");
}
std::vector<int64_t> dims = src_desc.get_dims();
dims[0] = 1;
if (mode == batch_normalization_mode::spatial) {
dims[2] = 1;
dims[3] = 1;
if (src_ndims == 5) {
dims[4] = 1;
}
}
auto data_type = src_desc.get_desc().get_data_type();
if (data_type == ::dnnl::memory::data_type::f16) {
data_type = ::dnnl::memory::data_type::f32;
}
if (!inner_blks.empty() && inner_blks[0] == 4) {
desc.set_desc(::dnnl::memory::desc(dims, data_type,
::dnnl::memory::format_tag::nChw4c));
} else if (!inner_blks.empty() && inner_blks[0] == 32) {
desc.set_desc(::dnnl::memory::desc(dims, data_type,
::dnnl::memory::format_tag::nChw32c));
} else {
if (src_ndims == 4) {
desc.set_desc(::dnnl::memory::desc(dims, data_type,
::dnnl::memory::format_tag::nchw));
} else {
desc.set_desc(::dnnl::memory::desc(dims, data_type,
::dnnl::memory::format_tag::ncdhw));
}
}
}
template <typename primitive_type>
sycl::event engine_ext::execute_primitive(
const std::pair<detail::primitive_cache_key_type, primitive_type *>
&primitive,
std::unordered_map<int, ::dnnl::memory> *args,
const std::vector<output_argument_info> &output_args,
const std::vector<void *> &device_ptrs) {
std::vector<void *> caches;
int output_arg_num = output_args.size();
for (int i = 0; i < output_arg_num; i++) {
if (output_args[i]._beta != 0.f) {
auto cache = allocate(output_args[i]._desc);
caches.push_back(cache);
args->insert(
{output_args[i]._name,
::dnnl::memory(output_args[i]._desc.get_desc(), _eng, cache)});
} else {
args->insert(
{output_args[i]._name, ::dnnl::memory(output_args[i]._desc.get_desc(),
_eng, output_args[i]._data)});
}
}
auto e = ::dnnl::sycl_interop::execute(
*(static_cast<primitive_type *>(primitive.second)), _s, *args);
_primitive_cache.put(
primitive.first, primitive.second,
[](::dnnl::primitive *p) { delete static_cast<primitive_type *>(p); }, e);
int cache_index = 0;
for (int i = 0; i < output_arg_num; i++) {
if (output_args[i]._beta != 0.f) {
e = async_sum(output_args[i]._alpha, output_args[i]._desc,
caches[cache_index++], output_args[i]._beta, output_args[i]._desc,
output_args[i]._data);
} else {
if (output_args[i]._alpha != 1.f) {
e = async_scale(output_args[i]._alpha, output_args[i]._desc,
output_args[i]._data);
}
}
}
caches.insert(caches.end(), device_ptrs.begin(), device_ptrs.end());
async_free(_q, e, args, caches);
return e;
}
inline
::dnnl::memory::desc engine_ext::bn_reorder_memory_to_channel_major_format(
bool is_input, ::dnnl::memory::desc &desc, void *src, void **cache,
std::vector<void *> &caches) {
::dnnl::memory::desc result;
result = transfer_memory_desc_to_channel_major_format(desc);
if ((result != desc) || !src) {
*cache = allocate(desc);
if (is_input && src) {
async_reorder(1.f, desc, src, 0.f, result, *cache);
}
caches.push_back(*cache);
}
return result;
}
inline
sycl::event engine_ext::batch_normalization_backward_internal(
batch_normalization_mode mode, float epsilon, float alpha_data,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta_data,
const memory_desc_ext &diff_src_desc, void *diff_src, float alpha_param,
const memory_desc_ext &diff_scale_bias_desc, void *scale, void *bias,
float beta_param, void *diff_scale, void *diff_bias,
const memory_desc_ext &mean_var_desc, void *saved_mean, void *saved_var) {
if (scale_parameter_preprocess(
{{alpha_data, beta_data, diff_src_desc, diff_src},
{alpha_param, beta_param, diff_scale_bias_desc, diff_scale},
{alpha_param, beta_param, diff_scale_bias_desc, diff_bias}})) {
return sycl::event();
}
std::vector<void *> caches;
void *reordered_src = nullptr, *reordered_diff_dst = nullptr,
*reordered_diff_src = nullptr, *reordered_scale = nullptr,
*reordered_bias = nullptr, *reordered_diff_scale = nullptr,
*reordered_diff_bias = nullptr, *reordered_saved_mean = nullptr,
*reordered_saved_var = nullptr;
::dnnl::memory::desc help_src_desc = src_desc.get_desc();
::dnnl::memory::desc help_diff_dst_desc = diff_dst_desc.get_desc();
::dnnl::memory::desc help_diff_src_desc = diff_src_desc.get_desc();
::dnnl::memory::desc help_diff_scale_bias_desc =
diff_scale_bias_desc.get_desc();
::dnnl::memory::desc help_mean_var_desc = mean_var_desc.get_desc();
::dnnl::memory::desc actual_diff_src_desc = help_diff_src_desc;
::dnnl::memory::desc actual_diff_scale_bias_desc = help_diff_scale_bias_desc;
if (mode == batch_normalization_mode::per_activation) {
help_src_desc = bn_reorder_memory_to_channel_major_format(true, help_src_desc, src,
&reordered_src, caches);
help_diff_dst_desc = bn_reorder_memory_to_channel_major_format(
true, help_diff_dst_desc, diff_dst, &reordered_diff_dst, caches);
help_diff_src_desc = bn_reorder_memory_to_channel_major_format(
false, help_diff_src_desc, diff_src, &reordered_diff_src, caches);
actual_diff_src_desc = help_diff_src_desc;
help_diff_scale_bias_desc = bn_reorder_memory_to_channel_major_format(
true, help_diff_scale_bias_desc, scale, &reordered_scale, caches);
actual_diff_scale_bias_desc = help_diff_scale_bias_desc;
if (bias) {
bn_reorder_memory_to_channel_major_format(true, help_diff_scale_bias_desc, bias,
&reordered_bias, caches);
}
bn_reorder_memory_to_channel_major_format(false, help_diff_scale_bias_desc,
diff_scale, &reordered_diff_scale,
caches);
bn_reorder_memory_to_channel_major_format(false, help_diff_scale_bias_desc,
diff_bias, &reordered_diff_bias, caches);
help_mean_var_desc = bn_reorder_memory_to_channel_major_format(
true, help_mean_var_desc, saved_mean, &reordered_saved_mean, caches);
bn_reorder_memory_to_channel_major_format(true, help_mean_var_desc, saved_var,
&reordered_saved_var, caches);
help_src_desc = compress_spatial_dimensions_to_channel(help_src_desc);
help_diff_src_desc =
compress_spatial_dimensions_to_channel(help_diff_src_desc);
help_diff_dst_desc =
compress_spatial_dimensions_to_channel(help_diff_dst_desc);
} else {
if ((help_src_desc != help_diff_dst_desc) ||
(help_src_desc != help_diff_src_desc) ||
(help_diff_dst_desc != help_diff_src_desc)) {
help_src_desc = bn_reorder_memory_to_channel_major_format(
true, help_src_desc, src, &reordered_src, caches);
help_diff_dst_desc = bn_reorder_memory_to_channel_major_format(
true, help_diff_dst_desc, diff_dst, &reordered_diff_dst, caches);
help_diff_src_desc = bn_reorder_memory_to_channel_major_format(
false, help_diff_src_desc, diff_src, &reordered_diff_src, caches);
actual_diff_src_desc = help_diff_src_desc;
}
}
help_diff_scale_bias_desc =
get_bn_scale_bias_mean_var_desc(help_diff_scale_bias_desc, mode);
help_mean_var_desc =
get_bn_scale_bias_mean_var_desc(help_mean_var_desc, mode);
auto forward_primitive =
create_primitive_desc<::dnnl::batch_normalization_forward>(
::dnnl::prop_kind::forward_training, help_src_desc,
help_diff_dst_desc, epsilon,
::dnnl::normalization_flags::use_scale |
::dnnl::normalization_flags::use_shift);
auto primitive =
create_primitive<::dnnl::batch_normalization_backward>(
::dnnl::prop_kind::backward, help_diff_src_desc, help_diff_dst_desc,
help_src_desc, epsilon,
::dnnl::normalization_flags::use_scale |
::dnnl::normalization_flags::use_shift, forward_primitive);
void *dst_cache = nullptr;
if (!saved_mean && !saved_var) {
dst_cache = allocate(diff_dst_desc);
if (!reordered_saved_mean) {
reordered_saved_mean = allocate(mean_var_desc);
caches.push_back(reordered_saved_mean);
}
if (!reordered_saved_var) {
reordered_saved_var = allocate(mean_var_desc);
caches.push_back(reordered_saved_var);
}
if (!bias) {
_q->fill(reordered_bias, 0, diff_scale_bias_desc.get_size());
}
batch_normalization_forward_internal(
true, mode, epsilon, 0.f, 1.f, src_desc, src, 0.f, diff_dst_desc,
dst_cache, diff_scale_bias_desc, scale, bias ? bias : reordered_bias,
mean_var_desc, reordered_saved_mean, reordered_saved_var, nullptr,
nullptr);
caches.push_back(dst_cache);
}
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC,
{::dnnl::memory(help_src_desc, _eng,
reordered_src ? reordered_src : src)}},
{DNNL_ARG_SCALE,
{::dnnl::memory(help_diff_scale_bias_desc, _eng,
reordered_scale ? reordered_scale : scale)}},
{DNNL_ARG_MEAN,
{::dnnl::memory(help_mean_var_desc, _eng,
reordered_saved_mean ? reordered_saved_mean
: saved_mean)}},
{DNNL_ARG_VARIANCE,
{::dnnl::memory(help_mean_var_desc, _eng,
reordered_saved_var ? reordered_saved_var : saved_var)}},
{DNNL_ARG_DIFF_DST,
{::dnnl::memory(help_diff_src_desc, _eng,
reordered_diff_dst ? reordered_diff_dst : diff_dst)}}};
sycl::event e = execute_primitive(
primitive, execution_args,
{{alpha_data, beta_data, DNNL_ARG_DIFF_SRC, help_diff_src_desc,
reordered_diff_src ? reordered_diff_src : diff_src},
{alpha_param, beta_param, DNNL_ARG_DIFF_SCALE, help_diff_scale_bias_desc,
reordered_diff_scale ? reordered_diff_scale : diff_scale},
{alpha_param, beta_param, DNNL_ARG_DIFF_SHIFT, help_diff_scale_bias_desc,
reordered_diff_bias ? reordered_diff_bias : diff_bias}});
if (actual_diff_src_desc != diff_src_desc.get_desc() && reordered_diff_src) {
e = async_reorder(1.f, actual_diff_src_desc, reordered_diff_src, 0.f,
diff_src_desc, diff_src);
}
if (actual_diff_scale_bias_desc != diff_scale_bias_desc.get_desc() &&
reordered_diff_scale && reordered_diff_bias) {
async_reorder(1.f, actual_diff_scale_bias_desc, reordered_diff_scale, 0.f,
diff_scale_bias_desc, diff_scale);
e = async_reorder(1.f, actual_diff_scale_bias_desc, reordered_diff_bias, 0.f,
diff_scale_bias_desc, diff_bias);
}
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
for (auto ptr : caches) {
sycl::free(ptr, *_q);
}
});
});
return e;
}
inline
sycl::event engine_ext::batch_normalization_forward_internal(
bool is_infer, batch_normalization_mode mode, float epsilon, float factor,
float alpha, const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &scale_bias_desc, void *scale, void *bias,
const memory_desc_ext &mean_var_desc, void *saved_mean, void *saved_var,
void *running_mean, void *running_var) {
if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) {
return sycl::event();
}
std::vector<void *> caches;
void *reordered_src = nullptr, *reordered_dst = nullptr,
*reordered_scale = nullptr, *reordered_bias = nullptr,
*reordered_saved_mean = nullptr, *reordered_saved_var = nullptr;
::dnnl::memory::desc help_src_desc = src_desc.get_desc();
::dnnl::memory::desc help_dst_desc = dst_desc.get_desc();
::dnnl::memory::desc help_scale_bias_desc = scale_bias_desc.get_desc();
::dnnl::memory::desc help_mean_var_desc = mean_var_desc.get_desc();
::dnnl::memory::desc actual_dst_desc = help_dst_desc;
::dnnl::memory::desc actual_mean_var_desc = help_mean_var_desc;
if (mode == batch_normalization_mode::per_activation) {
help_src_desc = bn_reorder_memory_to_channel_major_format(true, help_src_desc, src,
&reordered_src, caches);
help_dst_desc = bn_reorder_memory_to_channel_major_format(
false, help_dst_desc, dst, &reordered_dst, caches);
actual_dst_desc = help_dst_desc;
help_scale_bias_desc = bn_reorder_memory_to_channel_major_format(
true, help_scale_bias_desc, scale, &reordered_scale, caches);
bn_reorder_memory_to_channel_major_format(true, help_scale_bias_desc, bias,
&reordered_bias, caches);
help_mean_var_desc = bn_reorder_memory_to_channel_major_format(
is_infer, help_mean_var_desc, saved_mean,
&reordered_saved_mean, caches);
actual_mean_var_desc = help_mean_var_desc;
bn_reorder_memory_to_channel_major_format(is_infer,
help_mean_var_desc, saved_var,
&reordered_saved_var, caches);
help_src_desc = compress_spatial_dimensions_to_channel(help_src_desc);
help_dst_desc = compress_spatial_dimensions_to_channel(help_dst_desc);
} else {
if (help_src_desc != help_dst_desc) {
help_src_desc = bn_reorder_memory_to_channel_major_format(
true, help_src_desc, src, &reordered_src, caches);
help_dst_desc = bn_reorder_memory_to_channel_major_format(
false, help_dst_desc, dst, &reordered_dst, caches);
actual_dst_desc = help_dst_desc;
}
}
help_scale_bias_desc =
get_bn_scale_bias_mean_var_desc(help_scale_bias_desc, mode);
help_mean_var_desc =
get_bn_scale_bias_mean_var_desc(help_mean_var_desc, mode);
::dnnl::prop_kind kind;
::dnnl::normalization_flags flag = ::dnnl::normalization_flags::use_scale |
::dnnl::normalization_flags::use_shift;
if (is_infer) {
kind = ::dnnl::prop_kind::forward_inference;
flag = ::dnnl::normalization_flags::use_global_stats | flag;
} else {
kind = ::dnnl::prop_kind::forward_training;
}
auto primitive =
create_primitive<::dnnl::batch_normalization_forward>(
kind, help_src_desc, help_dst_desc, epsilon, flag);
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC,
{::dnnl::memory(help_src_desc, _eng,
reordered_src ? reordered_src : src)}},
{DNNL_ARG_SCALE,
{::dnnl::memory(help_scale_bias_desc, _eng,
reordered_scale ? reordered_scale : scale)}},
{DNNL_ARG_SHIFT,
{::dnnl::memory(help_scale_bias_desc, _eng,
reordered_bias ? reordered_bias : bias)}},
{DNNL_ARG_MEAN,
{::dnnl::memory(help_mean_var_desc, _eng,
reordered_saved_mean ? reordered_saved_mean
: saved_mean)}},
{DNNL_ARG_VARIANCE,
{::dnnl::memory(help_mean_var_desc, _eng,
reordered_saved_var ? reordered_saved_var
: saved_var)}}};
sycl::event e = execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, help_dst_desc,
reordered_dst ? reordered_dst : dst}});
if (!is_infer && running_var) {
auto src_ndim = src_desc.get_desc().get_ndims();
auto src_dims = src_desc.get_dims();
int element_num = src_dims[0];
if (mode == batch_normalization_mode::spatial) {
for (int index = 2; index < src_ndim; index++) {
element_num *= src_dims[index];
}
}
float unbias_factor = element_num / (element_num - 1.f);
async_scale(1.f - factor, mean_var_desc, running_var);
e = async_sum(factor * unbias_factor, mean_var_desc,
reordered_saved_var ? reordered_saved_var : saved_var,
1.f, mean_var_desc, running_var);
}
if (!is_infer && running_mean) {
e = async_sum(factor, mean_var_desc,
reordered_saved_mean ? reordered_saved_mean : saved_mean,
(1.f - factor), mean_var_desc, running_mean);
}
if (reordered_dst && (actual_dst_desc != dst_desc.get_desc())) {
e = async_reorder(1.f, actual_dst_desc, reordered_dst, 0.f, dst_desc, dst);
}
if (!is_infer && reordered_saved_mean && reordered_saved_var && saved_mean &&
saved_var && (actual_mean_var_desc != mean_var_desc.get_desc())) {
e = async_reorder(1.f, actual_mean_var_desc, reordered_saved_mean, 0.f,
mean_var_desc, saved_mean);
e = async_reorder(1.f, actual_mean_var_desc, reordered_saved_var, 0.f,
mean_var_desc, saved_var);
}
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
for (auto ptr : caches) {
sycl::free(ptr, *_q);
}
});
});
return e;
}
inline
sycl::event engine_ext::rnn_forward_internal(
const rnn_desc &desc, ::dnnl::prop_kind kind,
const memory_desc_ext &src_desc, void *src, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &iter_desc, void *src_iter, void *dst_iter,
const memory_desc_ext &iter_c_desc, void *src_iter_c, void *dst_iter_c,
size_t weight_size, void *weight, size_t workspace_size, void *workspace,
size_t scratchpad_size, void *scratchpad, bool is_get_execution_args,
size_t *weight_size_query, size_t *workspace_size_query,
size_t *scratchpad_size_query) {
::dnnl::memory::data_type src_dt;
::dnnl::memory::format_tag src_format_tag;
rnn_mode mode;
rnn_bias_mode bias_mode;
rnn_direction direction;
dpct::library_data_t dt;
int direction_num = 1, input_size = 0, hidden_size = 0, projection_size = 0,
layer_size = 0, gate_num = 1, output_size = 0, data_type_size = 0,
seq_length = 1, batch_size = 1;
std::vector<void *> data = {src, dst, src_iter, dst_iter,
src_iter_c, dst_iter_c, weight, workspace,
scratchpad};
std::vector<int> offset(6, 0);
void *input_layer_cache = nullptr, *hidden_layer_cache = nullptr;
sycl::event e;
desc.get(&mode, &bias_mode, &direction, &dt, &input_size, &hidden_size,
&projection_size, &layer_size);
get_rnn_configuration(src_desc.get_desc(), direction, mode, dt, hidden_size,
&src_dt, &src_format_tag, &projection_size,
&output_size, &seq_length, &batch_size, &direction_num,
&gate_num);
if (direction == rnn_direction::bidirectional) {
// Here to combine the oneDNN bidirectional_sum and
// bidirectional_concat config, so call execute_rnn_forward_primitive
// twice.
if (layer_size > 1) {
if (!is_get_execution_args) {
input_layer_cache = allocate(src_desc);
hidden_layer_cache = allocate(src_desc);
_q->memcpy(input_layer_cache, src, src_desc.get_size());
}
data[0] = input_layer_cache;
data[1] = hidden_layer_cache;
e = execute_rnn_forward_primitive(
mode, kind, ::dnnl::rnn_direction::bidirectional_sum, bias_mode,
src_dt, src_format_tag, seq_length, batch_size, output_size,
output_size, 1, direction_num, hidden_size, gate_num, projection_size,
data, offset, layer_size - 1, weight_size_query, workspace_size_query,
scratchpad_size_query);
data[0] =
((layer_size - 1) % 2 == 0) ? input_layer_cache : hidden_layer_cache;
data[1] = dst;
}
e = execute_rnn_forward_primitive(
mode, kind, ::dnnl::rnn_direction::bidirectional_concat, bias_mode,
src_dt, src_format_tag, seq_length, batch_size, output_size,
2 * output_size, 1, direction_num, hidden_size, gate_num,
projection_size, data, offset, 1, weight_size_query,
workspace_size_query, scratchpad_size_query);
} else {
e = execute_rnn_forward_primitive(
mode, kind, ::dnnl::rnn_direction::unidirectional_left2right, bias_mode,
src_dt, src_format_tag, seq_length, batch_size, output_size,
output_size, layer_size, direction_num, hidden_size, gate_num,
projection_size, data, offset, 1, weight_size_query,
workspace_size_query, scratchpad_size_query);
}
if (is_get_execution_args) {
return e;
}
if (input_layer_cache && hidden_layer_cache) {
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
sycl::free(input_layer_cache, *_q);
sycl::free(hidden_layer_cache, *_q);
});
});
}
return e;
}
inline
sycl::event engine_ext::execute_rnn_forward_primitive(
rnn_mode mode, ::dnnl::prop_kind kind, ::dnnl::rnn_direction direction,
rnn_bias_mode bias_mode, ::dnnl::memory::data_type dt,
::dnnl::memory::format_tag tag, int seq_length, int batch_size, int src_c,
int dst_c, int layer_size, int direction_num, int hidden_size, int gate_num,
int projection_size, std::vector<void *> &data, std::vector<int> &offset,
int iter_num, size_t *weight_size, size_t *workspace_size,
size_t *scratchpad_size) {
sycl::event e;
::dnnl::primitive *p = nullptr;
detail::primitive_cache_key_type key;
std::unordered_map<int, ::dnnl::memory> *execution_args;
::dnnl::memory::desc bias_desc(
{layer_size, direction_num, gate_num, hidden_size}, dt,
::dnnl::memory::format_tag::ldgo);
::dnnl::memory::desc weight_layer_desc(
{layer_size, direction_num,
projection_size ? projection_size : hidden_size, gate_num, hidden_size},
dt, ::dnnl::memory::format_tag::ldigo);
::dnnl::memory::desc weight_iter_desc(
{layer_size, direction_num,
projection_size ? projection_size : hidden_size, gate_num, hidden_size},
dt, ::dnnl::memory::format_tag::ldigo);
::dnnl::memory::desc projection_desc;
if (projection_size) {
projection_desc = ::dnnl::memory::desc(
{layer_size, direction_num, hidden_size, projection_size}, dt,
::dnnl::memory::format_tag::ldio);
}
if (weight_size) {
*weight_size +=
(weight_layer_desc.get_size() + weight_iter_desc.get_size() +
projection_desc.get_size() + bias_desc.get_size()) *
iter_num;
return e;
}
::dnnl::memory::desc src_desc({seq_length, batch_size, src_c}, dt, tag);
::dnnl::memory::desc dst_desc({seq_length, batch_size, dst_c}, dt, tag);
::dnnl::memory::desc iter_desc(
{layer_size, direction_num, batch_size,
projection_size ? projection_size : hidden_size},
dt, ::dnnl::memory::format_tag::ldnc);
::dnnl::memory::desc iter_c_desc(
{layer_size, direction_num, batch_size, hidden_size}, dt,
::dnnl::memory::format_tag::ldnc);
::dnnl::memory::desc workspace_desc;
::dnnl::memory::desc scratchpad_desc;
::dnnl::primitive_attr attr;
attr.set_scratchpad_mode(::dnnl::scratchpad_mode::user);
if (mode == rnn_mode::vanilla_relu || mode == rnn_mode::vanilla_tanh) {
auto pd = create_primitive_desc<::dnnl::vanilla_rnn_forward>(
kind,
mode == rnn_mode::vanilla_relu ? ::dnnl::algorithm::eltwise_relu
: ::dnnl::algorithm::eltwise_tanh,
direction, src_desc, iter_desc, weight_layer_desc, weight_iter_desc,
bias_desc, dst_desc, iter_desc, attr);
workspace_desc = pd.workspace_desc();
scratchpad_desc = pd.scratchpad_desc();
if (workspace_size && scratchpad_size) {
*workspace_size += workspace_desc.get_size() * iter_num;
*scratchpad_size = scratchpad_desc.get_size() > *scratchpad_size
? scratchpad_desc.get_size()
: *scratchpad_size;
} else {
auto r = create_primitive_with_pd<::dnnl::vanilla_rnn_forward>(pd);
key = r.first;
p = r.second;
}
} else if (mode == rnn_mode::gru) {
auto pd = create_primitive_desc<::dnnl::gru_forward>(
kind, direction, src_desc, iter_desc, weight_layer_desc,
weight_iter_desc, bias_desc, dst_desc, iter_desc, attr);
workspace_desc = pd.workspace_desc();
scratchpad_desc = pd.scratchpad_desc();
if (workspace_size && scratchpad_size) {
*workspace_size += workspace_desc.get_size() * iter_num;
*scratchpad_size = scratchpad_desc.get_size() > *scratchpad_size
? scratchpad_desc.get_size()
: *scratchpad_size;
} else {
auto r = create_primitive_with_pd<::dnnl::gru_forward>(pd);
key = r.first;
p = r.second;
}
} else if (mode == rnn_mode::lstm) {
auto pd = create_primitive_desc<::dnnl::lstm_forward>(
kind, direction, src_desc, iter_desc, iter_c_desc, weight_layer_desc,
weight_iter_desc, ::dnnl::memory::desc(), projection_desc, bias_desc,
dst_desc, iter_desc, iter_c_desc, attr);
workspace_desc = pd.workspace_desc();
scratchpad_desc = pd.scratchpad_desc();
if (workspace_size && scratchpad_size) {
*workspace_size += workspace_desc.get_size() * iter_num;
*scratchpad_size = scratchpad_desc.get_size() > *scratchpad_size
? scratchpad_desc.get_size()
: *scratchpad_size;
} else {
auto r = create_primitive_with_pd<::dnnl::lstm_forward>(pd);
key = r.first;
p = r.second;
}
}
for (int i = 0; i < iter_num; i++) {
void *in_cache = data[0], *out_cache = data[1], *dst_iter_c_cache = nullptr,
*dst_iter_cache = ((uint8_t *)(data[3]) + offset[1]);
if (mode == rnn_mode::lstm) {
dst_iter_c_cache = (uint8_t *)(data[4]) + offset[2];
}
if (!workspace_size) {
execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC_LAYER, {::dnnl::memory(src_desc, _eng, data[0])}},
{DNNL_ARG_DST_LAYER, {::dnnl::memory(dst_desc, _eng, data[1])}},
{DNNL_ARG_SCRATCHPAD, {::dnnl::memory(scratchpad_desc, _eng, data[8])}}};
auto insert_args = [&](int arg_name, ::dnnl::memory::desc &d, void *data,
int &offset) {
execution_args->insert(
{arg_name, {::dnnl::memory(d, _eng, (uint8_t *)data + offset)}});
offset += d.get_size();
};
insert_args(DNNL_ARG_SRC_ITER, iter_desc, data[2], offset[0]);
insert_args(DNNL_ARG_DST_ITER, iter_desc, data[3], offset[1]);
if (mode == rnn_mode::lstm) {
insert_args(DNNL_ARG_SRC_ITER_C, iter_c_desc, data[4], offset[2]);
insert_args(DNNL_ARG_DST_ITER_C, iter_c_desc, data[5], offset[3]);
}
insert_args(DNNL_ARG_WEIGHTS_LAYER, weight_layer_desc, data[6],
offset[4]);
insert_args(DNNL_ARG_WEIGHTS_ITER, weight_iter_desc, data[6], offset[4]);
if (projection_size) {
insert_args(DNNL_ARG_WEIGHTS_PROJECTION, projection_desc, data[6],
offset[4]);
}
if (bias_mode == rnn_bias_mode::none) {
_q->memset((uint8_t *)(data[6]) + offset[4], 0, bias_desc.get_size());
}
insert_args(DNNL_ARG_BIAS, bias_desc, data[6], offset[4]);
if (kind == ::dnnl::prop_kind::forward_training) {
insert_args(DNNL_ARG_WORKSPACE, workspace_desc, data[7], offset[5]);
}
if (mode == rnn_mode::vanilla_relu || mode == rnn_mode::vanilla_tanh) {
execute_primitive<::dnnl::vanilla_rnn_forward>(
{key, static_cast<::dnnl::vanilla_rnn_forward *>(p)},
execution_args);
} else if (mode == rnn_mode::gru) {
execute_primitive<::dnnl::gru_forward>(
{key, static_cast<::dnnl::gru_forward *>(p)}, execution_args);
} else if (mode == rnn_mode::lstm) {
execute_primitive<::dnnl::lstm_forward>(
{key, static_cast<::dnnl::lstm_forward *>(p)}, execution_args);
}
if (i != iter_num - 1) {
std::swap(data[0], data[1]);
}
}
if (kind == ::dnnl::prop_kind::forward_training) {
if (workspace_size) {
*workspace_size +=
(src_desc.get_size() + dst_desc.get_size() + iter_desc.get_size());
if (mode == rnn_mode::lstm) {
*workspace_size += iter_c_desc.get_size();
}
} else {
_q->memcpy((uint8_t *)(data[7]) + offset[5], in_cache,
src_desc.get_size());
offset[5] += src_desc.get_size();
_q->memcpy((uint8_t *)(data[7]) + offset[5], out_cache,
dst_desc.get_size());
offset[5] += dst_desc.get_size();
_q->memcpy((uint8_t *)(data[7]) + offset[5], dst_iter_cache,
iter_desc.get_size());
offset[5] += iter_desc.get_size();
if (mode == rnn_mode::lstm) {
_q->memcpy((uint8_t *)(data[7]) + offset[5], dst_iter_c_cache,
iter_c_desc.get_size());
offset[5] += iter_c_desc.get_size();
}
}
}
}
return e;
}
inline
sycl::event engine_ext::execute_rnn_backward_primitive(
rnn_mode mode, ::dnnl::rnn_direction direction, rnn_bias_mode bias_mode,
::dnnl::memory::data_type dt, ::dnnl::memory::format_tag tag,
int seq_length, int batch_size, int src_c, int dst_c, int layer_size,
int direction_num, int hidden_size, int gate_num, int projection_size,
std::vector<void *> &data, std::vector<int> &offset, int iter_num) {
sycl::event e;
::dnnl::primitive *p = nullptr;
detail::primitive_cache_key_type key;
::dnnl::prop_kind fkind = ::dnnl::prop_kind::forward_training;
::dnnl::prop_kind bkind = ::dnnl::prop_kind::backward;
::dnnl::memory::desc bias_desc(
{layer_size, direction_num, gate_num, hidden_size}, dt,
::dnnl::memory::format_tag::ldgo);
::dnnl::memory::desc weight_layer_desc(
{layer_size, direction_num,
projection_size ? projection_size : hidden_size, gate_num, hidden_size},
dt, ::dnnl::memory::format_tag::ldigo);
::dnnl::memory::desc weight_iter_desc(
{layer_size, direction_num,
projection_size ? projection_size : hidden_size, gate_num, hidden_size},
dt, ::dnnl::memory::format_tag::ldigo);
::dnnl::memory::desc diff_weight_layer_desc(
{layer_size, direction_num,
projection_size ? projection_size : hidden_size, gate_num, hidden_size},
dt, ::dnnl::memory::format_tag::ldgoi);
::dnnl::memory::desc diff_weight_iter_desc(
{layer_size, direction_num,
projection_size ? projection_size : hidden_size, gate_num, hidden_size},
dt, ::dnnl::memory::format_tag::ldgoi);
::dnnl::memory::desc projection_desc, diff_projection_desc;
if (projection_size) {
projection_desc = ::dnnl::memory::desc(
{layer_size, direction_num, hidden_size, projection_size}, dt,
::dnnl::memory::format_tag::ldio);
diff_projection_desc = ::dnnl::memory::desc(
{layer_size, direction_num, hidden_size, projection_size}, dt,
::dnnl::memory::format_tag::ldoi);
}
::dnnl::memory::desc src_desc({seq_length, batch_size, src_c}, dt, tag);
::dnnl::memory::desc dst_desc({seq_length, batch_size, dst_c}, dt, tag);
::dnnl::memory::desc iter_desc(
{layer_size, direction_num, batch_size,
projection_size ? projection_size : hidden_size},
dt, ::dnnl::memory::format_tag::ldnc);
::dnnl::memory::desc iter_c_desc(
{layer_size, direction_num, batch_size, hidden_size}, dt,
::dnnl::memory::format_tag::ldnc);
::dnnl::memory::desc workspace_desc;
::dnnl::memory::desc scratchpad_desc;
::dnnl::primitive_attr attr;
attr.set_scratchpad_mode(::dnnl::scratchpad_mode::user);
if (mode == rnn_mode::vanilla_relu || mode == rnn_mode::vanilla_tanh) {
auto fpd = create_primitive_desc<::dnnl::vanilla_rnn_forward>(
fkind,
mode == rnn_mode::vanilla_relu ? ::dnnl::algorithm::eltwise_relu
: ::dnnl::algorithm::eltwise_tanh,
direction, src_desc, iter_desc, weight_layer_desc, weight_iter_desc,
bias_desc, dst_desc, iter_desc, attr);
auto pd = create_primitive_desc<::dnnl::vanilla_rnn_backward>(
bkind,
mode == rnn_mode::vanilla_relu ? ::dnnl::algorithm::eltwise_relu
: ::dnnl::algorithm::eltwise_tanh,
direction, src_desc, iter_desc, diff_weight_layer_desc,
diff_weight_iter_desc, bias_desc, dst_desc, iter_desc, src_desc,
iter_desc, weight_layer_desc, weight_iter_desc, bias_desc, dst_desc,
iter_desc, fpd, attr);
workspace_desc = pd.workspace_desc();
scratchpad_desc = pd.scratchpad_desc();
auto r = create_primitive_with_pd<::dnnl::vanilla_rnn_backward>(pd);
key = r.first;
p = r.second;
} else if (mode == rnn_mode::gru) {
auto fpd = create_primitive_desc<::dnnl::gru_forward>(
fkind, direction, src_desc, iter_desc, weight_layer_desc,
weight_iter_desc, bias_desc, dst_desc, iter_desc, attr);
auto pd = create_primitive_desc<::dnnl::gru_backward>(
bkind, direction, src_desc, iter_desc, diff_weight_layer_desc,
diff_weight_iter_desc, bias_desc, dst_desc, iter_desc, src_desc,
iter_desc, weight_layer_desc, weight_iter_desc, bias_desc, dst_desc,
iter_desc, fpd, attr);
workspace_desc = pd.workspace_desc();
scratchpad_desc = pd.scratchpad_desc();
auto r = create_primitive_with_pd<::dnnl::gru_backward>(pd);
key = r.first;
p = r.second;
} else if (mode == rnn_mode::lstm) {
auto fpd = create_primitive_desc<::dnnl::lstm_forward>(
fkind, direction, src_desc, iter_desc, iter_c_desc, weight_layer_desc,
weight_iter_desc, ::dnnl::memory::desc(), projection_desc, bias_desc,
dst_desc, iter_desc, iter_c_desc, attr);
auto pd = create_primitive_desc<::dnnl::lstm_backward>(
bkind, direction, src_desc, iter_desc, iter_c_desc,
diff_weight_layer_desc, diff_weight_iter_desc, ::dnnl::memory::desc(),
diff_projection_desc, bias_desc, dst_desc, iter_desc, iter_c_desc,
src_desc, iter_desc, iter_c_desc, weight_layer_desc, weight_iter_desc,
::dnnl::memory::desc(), projection_desc, bias_desc, dst_desc, iter_desc,
iter_c_desc, fpd, attr);
workspace_desc = pd.workspace_desc();
scratchpad_desc = pd.scratchpad_desc();
auto r = create_primitive_with_pd<::dnnl::lstm_backward>(pd);
key = r.first;
p = r.second;
}
for (int i = 0; i < iter_num; i++) {
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_DIFF_SRC_LAYER, {::dnnl::memory(src_desc, _eng, data[8])}},
{DNNL_ARG_DIFF_DST_LAYER, {::dnnl::memory(dst_desc, _eng, data[9])}},
{DNNL_ARG_SCRATCHPAD, {::dnnl::memory(scratchpad_desc, _eng, data[15])}}};
auto insert_args = [&](int arg_name, ::dnnl::memory::desc &d, void *data,
int &offset) {
offset += d.get_size();
execution_args->insert(
{arg_name, {::dnnl::memory(d, _eng, (uint8_t *)data - offset)}});
};
if (mode == rnn_mode::lstm) {
insert_args(DNNL_ARG_DST_ITER_C, iter_c_desc, data[7], offset[0]);
insert_args(DNNL_ARG_SRC_ITER_C, iter_c_desc, data[4], offset[2]);
}
insert_args(DNNL_ARG_DST_ITER, iter_desc, data[7], offset[0]);
insert_args(DNNL_ARG_DST_LAYER, dst_desc, data[7], offset[0]);
insert_args(DNNL_ARG_SRC_LAYER, src_desc, data[7], offset[0]);
insert_args(DNNL_ARG_WORKSPACE, workspace_desc, data[7], offset[0]);
insert_args(DNNL_ARG_SRC_ITER, iter_desc, data[2], offset[1]);
insert_args(DNNL_ARG_BIAS, bias_desc, data[6], offset[3]);
if (projection_size) {
insert_args(DNNL_ARG_WEIGHTS_PROJECTION, diff_projection_desc, data[6],
offset[3]);
}
insert_args(DNNL_ARG_WEIGHTS_ITER, diff_weight_iter_desc, data[6],
offset[3]);
insert_args(DNNL_ARG_WEIGHTS_LAYER, diff_weight_layer_desc, data[6],
offset[3]);
insert_args(DNNL_ARG_DIFF_SRC_ITER, iter_desc, data[10], offset[4]);
insert_args(DNNL_ARG_DIFF_DST_ITER, iter_desc, data[11], offset[5]);
if (mode == rnn_mode::lstm) {
insert_args(DNNL_ARG_DIFF_SRC_ITER_C, iter_c_desc, data[12], offset[6]);
insert_args(DNNL_ARG_DIFF_DST_ITER_C, iter_c_desc, data[13], offset[7]);
}
insert_args(DNNL_ARG_DIFF_BIAS, bias_desc, data[14], offset[8]);
if (bias_mode == rnn_bias_mode::none) {
_q->memset((uint8_t *)(data[14]) - offset[8], 0, bias_desc.get_size());
}
if (projection_size) {
insert_args(DNNL_ARG_DIFF_WEIGHTS_PROJECTION, projection_desc, data[14],
offset[8]);
}
insert_args(DNNL_ARG_DIFF_WEIGHTS_ITER, weight_iter_desc, data[14],
offset[8]);
insert_args(DNNL_ARG_DIFF_WEIGHTS_LAYER, weight_layer_desc, data[14],
offset[8]);
if (mode == rnn_mode::vanilla_relu || mode == rnn_mode::vanilla_tanh) {
e = execute_primitive<::dnnl::vanilla_rnn_backward>(
{key, static_cast<::dnnl::vanilla_rnn_backward *>(p)},
execution_args);
} else if (mode == rnn_mode::gru) {
e = execute_primitive<::dnnl::gru_backward>(
{key, static_cast<::dnnl::gru_backward *>(p)}, execution_args);
} else if (mode == rnn_mode::lstm) {
e = execute_primitive<::dnnl::lstm_backward>(
{key, static_cast<::dnnl::lstm_backward *>(p)}, execution_args);
}
if (i != iter_num - 1) {
std::swap(data[8], data[9]);
}
}
return e;
}
#define GENERATE_RNN_PRIMITIVE_KEY(name) \
template <> \
inline std::string \
engine_ext::generate_cache_key<::dnnl::name::primitive_desc>( \
const ::dnnl::name::primitive_desc &pd) { \
std::stringstream ss; \
ss << (std::uint8_t)pd.get_kind() << (std::uint8_t)pd.get_prop_kind() \
<< (std::uint8_t)pd.get_cell_kind() << (std::uint8_t)pd.get_direction() \
<< (std::uint8_t)pd.get_algorithm(); \
serialize_mem_desc(ss, pd.src_layer_desc()); \
serialize_mem_desc(ss, pd.src_iter_desc()); \
serialize_mem_desc(ss, pd.dst_layer_desc()); \
serialize_mem_desc(ss, pd.dst_iter_desc()); \
serialize_mem_desc(ss, pd.diff_src_layer_desc()); \
serialize_mem_desc(ss, pd.diff_src_iter_desc()); \
serialize_mem_desc(ss, pd.diff_dst_layer_desc()); \
serialize_mem_desc(ss, pd.diff_dst_iter_desc()); \
serialize_mem_desc(ss, pd.src_iter_c_desc()); \
serialize_mem_desc(ss, pd.dst_iter_c_desc()); \
serialize_mem_desc(ss, pd.diff_src_iter_c_desc()); \
serialize_mem_desc(ss, pd.diff_dst_iter_c_desc()); \
return ss.str(); \
}
#define GENERATE_CONVOLUTION_PRIMITIVE_KEY(name, query_type) \
template <> \
inline std::string \
engine_ext::generate_cache_key<::dnnl::name::primitive_desc>( \
const ::dnnl::name::primitive_desc &pd) { \
std::stringstream ss; \
ss << (std::uint8_t)pd.get_kind() << (std::uint8_t)pd.get_prop_kind() \
<< (std::uint8_t)pd.get_algorithm() \
<< (std::uint8_t)pd.get_primitive_attr().get_fpmath_mode() \
<< (std::uint8_t)pd.get_group_size(); \
serialize_dims(ss, pd.get_strides()); \
serialize_dims(ss, pd.get_dilations()); \
serialize_dims(ss, pd.get_padding_l()); \
serialize_mem_desc(ss, pd.src_desc()); \
serialize_mem_desc(ss, pd.diff_src_desc()); \
serialize_mem_desc(ss, pd.dst_desc()); \
serialize_mem_desc(ss, pd.diff_dst_desc()); \
serialize_mem_desc(ss, pd.query_type()); \
serialize_mem_desc(ss, pd.weights_desc()); \
serialize_mem_desc(ss, pd.diff_weights_desc()); \
return ss.str(); \
}
GENERATE_RNN_PRIMITIVE_KEY(vanilla_rnn_forward)
GENERATE_RNN_PRIMITIVE_KEY(vanilla_rnn_backward)
GENERATE_RNN_PRIMITIVE_KEY(lstm_forward)
GENERATE_RNN_PRIMITIVE_KEY(lstm_backward)
GENERATE_RNN_PRIMITIVE_KEY(gru_forward)
GENERATE_RNN_PRIMITIVE_KEY(gru_backward)
GENERATE_CONVOLUTION_PRIMITIVE_KEY(convolution_forward, bias_desc)
GENERATE_CONVOLUTION_PRIMITIVE_KEY(convolution_backward_data, scratchpad_desc)
GENERATE_CONVOLUTION_PRIMITIVE_KEY(convolution_backward_weights, diff_bias_desc)
template <typename primitive_desc_type>
std::string engine_ext::generate_cache_key(const primitive_desc_type &pd) {
std::stringstream ss;
auto kind = pd.get_kind();
ss << (std::uint8_t)kind << (std::uint8_t)pd.get_prop_kind()
<< (std::uint8_t)pd.get_algorithm();
serialize_mem_desc(ss, pd.src_desc());
serialize_mem_desc(ss, pd.diff_src_desc());
serialize_mem_desc(ss, pd.dst_desc());
serialize_mem_desc(ss, pd.diff_dst_desc());
switch (kind) {
case ::dnnl::primitive::kind::batch_normalization:
ss << pd.get_epsilon() << (std::uint8_t)pd.get_flags();
case ::dnnl::primitive::kind::reduction:
ss << pd.get_p();
break;
case ::dnnl::primitive::kind::eltwise:
ss << pd.get_alpha() << pd.get_beta();
case ::dnnl::primitive::kind::lrn:
ss << pd.get_k();
break;
case ::dnnl::primitive::kind::pooling:
serialize_dims(ss, pd.get_strides());
serialize_dims(ss, pd.get_dilations());
serialize_dims(ss, pd.get_padding_l());
serialize_dims(ss, pd.get_kernel());
break;
case ::dnnl::primitive::kind::softmax:
ss << pd.get_axis();
break;
default:
break;
}
return ss.str();
}
template <typename primitive_type, typename... args_type>
std::pair<detail::primitive_cache_key_type, primitive_type *>
engine_ext::create_primitive(args_type &&...args) {
auto pd =
create_primitive_desc<primitive_type>(std::forward<args_type>(args)...);
return create_primitive_with_pd<primitive_type>(pd);
}
template <typename primitive_type>
std::pair<detail::primitive_cache_key_type, primitive_type *>
engine_ext::create_primitive_with_pd(
const typename primitive_type::primitive_desc &pd) {
detail::primitive_cache_key_type key = generate_cache_key(pd);
primitive_type *p = (primitive_type *)_primitive_cache.get(key);
if (!p) {
p = new primitive_type(pd);
}
return {key, p};
}
template <typename primitive_type, typename... args_type>
typename primitive_type::primitive_desc
engine_ext::create_primitive_desc(args_type &&...args) {
return typename primitive_type::primitive_desc(
_eng, std::forward<args_type>(args)...);
}
inline
void engine_ext::fill(const memory_desc_ext &src_desc, void *src,
const void *valuePtr) {
async_fill(src_desc, src, valuePtr).wait();
}
inline
void engine_ext::reorder(float alpha, const memory_desc_ext &src_desc,
void *src, float beta, const memory_desc_ext &dst_desc,
void *dst) {
async_reorder(alpha, src_desc, src, beta, dst_desc, dst).wait();
}
inline
void engine_ext::scale(float alpha, const memory_desc_ext &src_desc,
void *src) {
async_scale(alpha, src_desc, src).wait();
}
inline
void engine_ext::sum(float alpha, const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc, void *dst) {
async_sum(alpha, src_desc, src, beta, dst_desc, dst).wait();
}
inline
void engine_ext::activation_forward(activation_desc &desc, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst) {
async_activation_forward(desc, alpha, src_desc, src, beta, dst_desc, dst)
.wait();
}
inline
void engine_ext::activation_backward(
activation_desc &desc, float alpha, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src) {
async_activation_backward(desc, alpha, dst_desc, dst, diff_dst_desc, diff_dst,
src_desc, src, beta, diff_src_desc, diff_src)
.wait();
}
inline
void engine_ext::pooling_forward(pooling_desc &desc, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst,
::dnnl::memory *workspace) {
async_pooling_forward(desc, alpha, src_desc, src, beta, dst_desc, dst,
workspace).wait();
}
inline
void engine_ext::pooling_backward(
pooling_desc &desc, float alpha, const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src,
::dnnl::memory *workspace) {
async_pooling_backward(desc, alpha, dst_desc, dst, diff_dst_desc, diff_dst,
src_desc, src, beta, diff_src_desc, diff_src,
workspace)
.wait();
}
inline
void engine_ext::softmax_forward(softmax_algorithm alg, softmax_mode mode,
float alpha, const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &dst_desc, void *dst) {
async_softmax_forward(alg, mode, alpha, src_desc, src, beta, dst_desc, dst)
.wait();
}
inline
void engine_ext::softmax_backward(softmax_algorithm alg, softmax_mode mode,
float alpha, const memory_desc_ext &dst_desc,
void *dst,
const memory_desc_ext &diff_dst_desc,
void *diff_dst, float beta,
const memory_desc_ext &diff_src_desc,
void *diff_src) {
async_softmax_backward(alg, mode, alpha, dst_desc, dst, diff_dst_desc,
diff_dst, beta, diff_src_desc, diff_src)
.wait();
}
inline
void engine_ext::lrn_forward(lrn_desc &desc, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst, ::dnnl::memory *workspace) {
async_lrn_forward(desc, alpha, src_desc, src, beta, dst_desc, dst, workspace)
.wait();
}
inline
void engine_ext::lrn_backward(lrn_desc &desc, float alpha,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc,
void *diff_dst, const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &diff_src_desc,
void *diff_src,
::dnnl::memory *workspace) {
async_lrn_backward(desc, alpha, dst_desc, dst, diff_dst_desc, diff_dst,
src_desc, src, beta, diff_src_desc, diff_src, workspace)
.wait();
}
inline
sycl::event engine_ext::async_fill(const memory_desc_ext &src_desc, void *src,
const void *valuePtr) {
::dnnl::memory::data_type dt = src_desc.get_desc().get_data_type();
unsigned mem_size = src_desc.get_size();
switch (dt) {
case ::dnnl::memory::data_type::f32:
return fill_with_type<float>(_q, src, valuePtr, mem_size);
case ::dnnl::memory::data_type::f16:
return fill_with_type<sycl::half>(_q, src, valuePtr, mem_size);
case ::dnnl::memory::data_type::s32:
return fill_with_type<int32_t>(_q, src, valuePtr, mem_size);
case ::dnnl::memory::data_type::s8:
return fill_with_type<int8_t>(_q, src, valuePtr, mem_size);
case ::dnnl::memory::data_type::u8:
return fill_with_type<uint8_t>(_q, src, valuePtr, mem_size);
default:
throw std::runtime_error("async_fill: unsupported data type.");
}
}
inline
sycl::event engine_ext::async_reorder(float alpha, const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &dst_desc, void *dst) {
if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) {
return sycl::event();
}
auto pd = ::dnnl::reorder::primitive_desc(_eng, src_desc.get_desc(), _eng,
dst_desc.get_desc());
auto primitive = create_primitive_with_pd<::dnnl::reorder>(pd);
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}}};
return execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, dst_desc, dst}});
}
inline
sycl::event engine_ext::async_scale(float alpha, const memory_desc_ext &src_desc,
void *src) {
if (alpha == 1.f) {
return sycl::event();
}
void *src_cache = allocate(src_desc);
_q->memcpy(src_cache, src, src_desc.get_size());
auto primitive = create_primitive<::dnnl::eltwise_forward>(
::dnnl::prop_kind::forward_inference, ::dnnl::algorithm::eltwise_linear,
src_desc.get_desc(), src_desc.get_desc(), alpha, 0.f);
auto args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_DST, ::dnnl::memory(src_desc.get_desc(), _eng, src)},
{DNNL_ARG_SRC, ::dnnl::memory(src_desc.get_desc(), _eng, src_cache)}};
return execute_primitive(primitive, args, {}, {src_cache});
}
inline sycl::event
engine_ext::async_sum(float alpha, const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc, void *dst) {
if (alpha == 0.f && beta == 1.f) {
return sycl::event();
}
void *dst_cache = allocate(dst_desc);
_q->memcpy(dst_cache, dst, dst_desc.get_size());
auto pd = create_primitive_desc<::dnnl::sum>(
std::vector<float>{alpha, beta},
std::vector<::dnnl::memory::desc>{src_desc.get_desc(),
dst_desc.get_desc()});
std::stringstream ss;
ss << (std::uint8_t)pd.get_kind() << alpha << beta;
serialize_mem_desc(ss, pd.src_desc(0));
serialize_mem_desc(ss, pd.src_desc(1));
detail::primitive_cache_key_type key = ss.str();
::dnnl::sum *p = (::dnnl::sum *)_primitive_cache.get(key);
if (!p) {
p = new ::dnnl::sum(pd);
}
auto args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_DST, ::dnnl::memory(dst_desc.get_desc(), _eng, dst)},
{DNNL_ARG_MULTIPLE_SRC, ::dnnl::memory(src_desc.get_desc(), _eng, src)},
{DNNL_ARG_MULTIPLE_SRC + 1,
::dnnl::memory(dst_desc.get_desc(), _eng, dst_cache)}};
return execute_primitive<::dnnl::sum>({key, p}, args, {}, {dst_cache});
}
inline
sycl::event engine_ext::async_binary(binary_op op, float alpha_0,
const memory_desc_ext &src_desc_0, void *src_0,
float alpha_1, const memory_desc_ext &src_desc_1,
void *src_1, float beta,
const memory_desc_ext &dst_desc, void *dst) {
::dnnl::algorithm onednn_algorithm;
switch (op) {
case binary_op::max:
onednn_algorithm = ::dnnl::algorithm::binary_max;
break;
case binary_op::min:
onednn_algorithm = ::dnnl::algorithm::binary_min;
break;
case binary_op::add:
onednn_algorithm = ::dnnl::algorithm::binary_add;
break;
case binary_op::sub:
onednn_algorithm = ::dnnl::algorithm::binary_sub;
break;
case binary_op::mul:
onednn_algorithm = ::dnnl::algorithm::binary_mul;
break;
case binary_op::div:
onednn_algorithm = ::dnnl::algorithm::binary_div;
break;
case binary_op::sqrt:
onednn_algorithm = ::dnnl::algorithm::eltwise_sqrt;
break;
case binary_op::neg:
onednn_algorithm = ::dnnl::algorithm::eltwise_linear;
break;
}
if (onednn_algorithm == ::dnnl::algorithm::eltwise_sqrt ||
onednn_algorithm == ::dnnl::algorithm::eltwise_linear) {
void *src_cache = nullptr, *dst_cache = nullptr;
src_cache = allocate(src_desc_0);
dst_cache = allocate(dst_desc);
_q->memcpy(src_cache, src_0, src_desc_0.get_size());
_q->memcpy(dst_cache, dst, dst_desc.get_size());
async_scale(alpha_0, src_desc_0, src_cache);
async_scale(beta, dst_desc, dst_cache);
// Let the output = 1 - input to simulate the behavior of neg.
auto primitive = create_primitive<::dnnl::eltwise_forward>(
::dnnl::prop_kind::forward_inference, onednn_algorithm,
src_desc_0.get_desc(), dst_desc.get_desc(), -1.f, 1.f);
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC,
{::dnnl::memory(src_desc_0.get_desc(), _eng, src_cache)}}};
execute_primitive(
primitive, execution_args, {{1.f, 0.f, DNNL_ARG_DST, dst_desc, dst}});
auto e = async_sum(1.f, dst_desc, dst_cache, 1.f, dst_desc, dst);
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
sycl::free(src_cache, *_q);
sycl::free(dst_cache, *_q);
});
});
return e;
}
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{};
void *src_0_cache = nullptr, *src_1_cache = nullptr, *dst_cache = nullptr;
src_0_cache = allocate(src_desc_0);
src_1_cache = allocate(src_desc_1);
dst_cache = allocate(dst_desc);
_q->memcpy(src_0_cache, src_0, src_desc_0.get_size());
_q->memcpy(src_1_cache, src_1, src_desc_1.get_size());
_q->memcpy(dst_cache, dst, dst_desc.get_size());
async_scale(alpha_0, src_desc_0, src_0_cache);
async_scale(alpha_1, src_desc_1, src_1_cache);
async_scale(beta, dst_desc, dst_cache);
execution_args->insert({DNNL_ARG_SRC_0, ::dnnl::memory(src_desc_0.get_desc(),
_eng, src_0_cache)});
execution_args->insert({DNNL_ARG_SRC_1, ::dnnl::memory(src_desc_1.get_desc(),
_eng, src_1_cache)});
auto primitive = create_primitive<::dnnl::binary>(
onednn_algorithm, src_desc_0.get_desc(), src_desc_1.get_desc(),
dst_desc.get_desc());
auto e = execute_primitive(primitive, execution_args,
{{1.f, 0.f, DNNL_ARG_DST, dst_desc, dst}});
async_sum(1.f, dst_desc, dst_cache, 1.f, dst_desc, dst);
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
sycl::free(dst_cache, *_q);
sycl::free(src_0_cache, *_q);
sycl::free(src_1_cache, *_q);
});
});
return e;
}
inline
sycl::event engine_ext::async_reduction(reduction_op op, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst) {
if (alpha == 0.f && beta == 1.f) {
return sycl::event();
}
float p = 2.f;
::dnnl::algorithm onednn_algorithm;
void *cache = nullptr;
switch (op) {
case reduction_op::amax:
cache = allocate(src_desc);
activation_desc adesc;
adesc.set_algorithm(::dnnl::algorithm::eltwise_abs);
async_activation_forward(adesc, 1.f, src_desc, src, 0.f, src_desc, cache);
onednn_algorithm = ::dnnl::algorithm::reduction_max;
src = cache;
break;
case reduction_op::max:
onednn_algorithm = ::dnnl::algorithm::reduction_max;
break;
case reduction_op::min:
onednn_algorithm = ::dnnl::algorithm::reduction_min;
break;
case reduction_op::sum:
onednn_algorithm = ::dnnl::algorithm::reduction_sum;
break;
case reduction_op::mean:
onednn_algorithm = ::dnnl::algorithm::reduction_mean;
break;
case reduction_op::mul:
onednn_algorithm = ::dnnl::algorithm::reduction_mul;
break;
case reduction_op::mul_no_zeros:
cache = allocate(src_desc);
transform_no_zero(src_desc, src, cache);
onednn_algorithm = ::dnnl::algorithm::reduction_mul;
src = cache;
break;
case reduction_op::norm1:
p = 1.f;
onednn_algorithm = ::dnnl::algorithm::reduction_norm_lp_power_p_sum;
break;
case reduction_op::norm2:
onednn_algorithm = ::dnnl::algorithm::reduction_norm_lp_sum;
break;
}
auto primitive = create_primitive<::dnnl::reduction>(
onednn_algorithm, src_desc.get_desc(), dst_desc.get_desc(), p, 0.f);
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, ::dnnl::memory(src_desc.get_desc(), _eng, src)}};
if (cache) {
return execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, dst_desc, dst}},
{cache});
}
return execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, dst_desc, dst}});
}
inline
sycl::event engine_ext::async_activation_forward(activation_desc &desc, float alpha,
const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &dst_desc,
void *dst) {
if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) {
return sycl::event();
}
auto primitive = create_primitive<::dnnl::eltwise_forward>(
::dnnl::prop_kind::forward, desc.get_algorithm(), src_desc.get_desc(),
dst_desc.get_desc(), desc.get_alpha(), desc.get_beta());
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}}};
return execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, dst_desc, dst}});
}
inline
sycl::event engine_ext::async_activation_backward(
activation_desc &desc, float alpha, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src) {
if (scale_parameter_preprocess({{alpha, beta, diff_src_desc, diff_src}})) {
return sycl::event();
}
::dnnl::memory::desc data_desc = dst_desc.get_desc();
auto alg = desc.get_algorithm();
if ((alg == ::dnnl::algorithm::eltwise_clip) ||
(alg == ::dnnl::algorithm::eltwise_linear) ||
(alg == ::dnnl::algorithm::eltwise_swish)) {
data_desc = src_desc.get_desc();
}
auto primitive = create_primitive<::dnnl::eltwise_backward>(
alg, diff_src_desc.get_desc(), diff_dst_desc.get_desc(), data_desc,
desc.get_alpha(), desc.get_beta(),
create_primitive_desc<::dnnl::eltwise_forward>(
::dnnl::prop_kind::forward, alg, src_desc.get_desc(),
dst_desc.get_desc(), desc.get_alpha(), desc.get_beta()));
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_DST, {::dnnl::memory(dst_desc.get_desc(), _eng, dst)}},
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}},
{DNNL_ARG_DIFF_DST,
{::dnnl::memory(diff_dst_desc.get_desc(), _eng, diff_dst)}}};
return execute_primitive(
primitive, execution_args,
{{alpha, beta, DNNL_ARG_DIFF_SRC, diff_src_desc, diff_src}});
}
inline
sycl::event engine_ext::async_pooling_forward(pooling_desc &desc, float alpha,
const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &dst_desc,
void *dst, ::dnnl::memory *workspace) {
if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) {
return sycl::event();
}
int pooling_dim = desc.get_stride().size();
std::vector<int64_t> dilation(pooling_dim, 0);
auto primitive_desc =
create_primitive_desc<::dnnl::pooling_forward>(
::dnnl::prop_kind::forward_training, desc.get_algorithm(),
src_desc.get_desc(), dst_desc.get_desc(), desc.get_stride(),
desc.get_kernel(), dilation, desc.get_padding(), desc.get_padding());
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}}};
::dnnl::memory ws_mem(primitive_desc.workspace_desc(), _eng);
execution_args->insert({DNNL_ARG_WORKSPACE, ws_mem});
if (workspace) {
*workspace = ws_mem;
} else {
insert_workspace(src, ws_mem);
}
auto primitive =
create_primitive_with_pd<::dnnl::pooling_forward>(primitive_desc);
return execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, dst_desc, dst}});
}
inline
sycl::event engine_ext::async_pooling_backward(
pooling_desc &desc, float alpha, const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src,
::dnnl::memory *workspace) {
if (scale_parameter_preprocess({{alpha, beta, diff_src_desc, diff_src}})) {
return sycl::event();
}
int pooling_dim = desc.get_stride().size();
std::vector<int64_t> dilation(pooling_dim, 0);
auto primitive = create_primitive<::dnnl::pooling_backward>(
desc.get_algorithm(), diff_src_desc.get_desc(), diff_dst_desc.get_desc(),
desc.get_stride(), desc.get_kernel(), dilation, desc.get_padding(),
desc.get_padding(),
create_primitive_desc<::dnnl::pooling_forward>(
::dnnl::prop_kind::forward_training, desc.get_algorithm(),
src_desc.get_desc(), dst_desc.get_desc(), desc.get_stride(),
desc.get_kernel(), dilation, desc.get_padding(), desc.get_padding()));
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_DST, {::dnnl::memory(dst_desc.get_desc(), _eng, dst)}},
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}},
{DNNL_ARG_DIFF_DST,
{::dnnl::memory(diff_dst_desc.get_desc(), _eng, diff_dst)}}};
if (workspace) {
execution_args->insert({DNNL_ARG_WORKSPACE, *workspace});
} else {
execution_args->insert({DNNL_ARG_WORKSPACE, get_workspace(src)});
}
return execute_primitive(
primitive, execution_args,
{{alpha, beta, DNNL_ARG_DIFF_SRC, diff_src_desc, diff_src}});
}
inline
sycl::event engine_ext::async_softmax_forward(softmax_algorithm alg,
softmax_mode mode, float alpha,
const memory_desc_ext &src_desc,
void *src, float beta,
const memory_desc_ext &dst_desc,
void *dst) {
if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) {
return sycl::event();
}
::dnnl::memory::desc help_src_desc = src_desc.get_desc();
::dnnl::memory::desc help_dst_desc = dst_desc.get_desc();
if (mode == softmax_mode::instance) {
help_src_desc = compress_spatial_dimensions_to_channel(help_src_desc);
help_dst_desc = compress_spatial_dimensions_to_channel(help_dst_desc);
}
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, {::dnnl::memory(help_src_desc, _eng, src)}}};
::dnnl::algorithm softmax_alg = ::dnnl::algorithm::softmax_accurate;
if (alg == softmax_algorithm::log) {
softmax_alg = ::dnnl::algorithm::softmax_log;
}
auto primitive = create_primitive<::dnnl::softmax_forward>(
::dnnl::prop_kind::forward, softmax_alg, help_src_desc,
help_dst_desc, 1);
return execute_primitive(
primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, memory_desc_ext(help_dst_desc), dst}});
}
inline
sycl::event engine_ext::async_softmax_backward(
softmax_algorithm alg, softmax_mode mode, float alpha,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src) {
if (scale_parameter_preprocess({{alpha, beta, diff_src_desc, diff_src}})) {
return sycl::event();
}
::dnnl::memory::desc help_diff_src_desc = diff_src_desc.get_desc();
::dnnl::memory::desc help_dst_desc = dst_desc.get_desc();
::dnnl::memory::desc help_diff_dst_desc = diff_dst_desc.get_desc();
if (mode == softmax_mode::instance) {
help_diff_src_desc =
compress_spatial_dimensions_to_channel(help_diff_src_desc);
help_dst_desc = compress_spatial_dimensions_to_channel(help_dst_desc);
help_diff_dst_desc =
compress_spatial_dimensions_to_channel(help_diff_dst_desc);
}
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_DST, {::dnnl::memory(help_dst_desc, _eng, dst)}},
{DNNL_ARG_DIFF_DST,
{::dnnl::memory(help_diff_dst_desc, _eng, diff_dst)}}};
::dnnl::algorithm softmax_alg = ::dnnl::algorithm::softmax_accurate;
if (alg == softmax_algorithm::log) {
softmax_alg = ::dnnl::algorithm::softmax_log;
}
auto primitive = create_primitive<::dnnl::softmax_backward>(
softmax_alg, help_diff_src_desc, help_diff_dst_desc, help_dst_desc, 1,
create_primitive_desc<::dnnl::softmax_forward>(
::dnnl::prop_kind::forward, softmax_alg, help_diff_src_desc,
help_dst_desc, 1));
return execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DIFF_SRC,
memory_desc_ext(help_diff_src_desc), diff_src}});
}
inline
sycl::event engine_ext::async_lrn_forward(lrn_desc &desc, float alpha,
const memory_desc_ext &src_desc, void *src,
float beta, const memory_desc_ext &dst_desc,
void *dst, ::dnnl::memory *workspace) {
if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) {
return sycl::event();
}
auto primitive_desc = create_primitive_desc<::dnnl::lrn_forward>(
::dnnl::prop_kind::forward_training,
::dnnl::algorithm::lrn_across_channels, src_desc.get_desc(),
dst_desc.get_desc(), desc.get_local_size(), desc.get_alpha(),
desc.get_beta(), desc.get_k());
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}}};
::dnnl::memory ws_mem(primitive_desc.workspace_desc(), _eng);
execution_args->insert({DNNL_ARG_WORKSPACE, ws_mem});
if (workspace) {
*workspace = ws_mem;
} else {
insert_workspace(src, ws_mem);
}
auto primitive =
create_primitive_with_pd<::dnnl::lrn_forward>(primitive_desc);
return execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, dst_desc, dst}});
}
inline
sycl::event
engine_ext::async_lrn_backward(lrn_desc &desc, float alpha,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &diff_dst_desc, void *diff_dst,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src,
::dnnl::memory *workspace) {
if (scale_parameter_preprocess({{alpha, beta, diff_src_desc, diff_src}})) {
return sycl::event();
}
auto primitive = create_primitive<::dnnl::lrn_backward>(
::dnnl::algorithm::lrn_across_channels, diff_src_desc.get_desc(),
diff_dst_desc.get_desc(), src_desc.get_desc(), desc.get_local_size(),
desc.get_alpha(), desc.get_beta(), desc.get_k(),
create_primitive_desc<::dnnl::lrn_forward>(
::dnnl::prop_kind::forward_training,
::dnnl::algorithm::lrn_across_channels, src_desc.get_desc(),
dst_desc.get_desc(), desc.get_local_size(), desc.get_alpha(),
desc.get_beta(), desc.get_k()));
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_DST, {::dnnl::memory(dst_desc.get_desc(), _eng, dst)}},
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}},
{DNNL_ARG_DIFF_DST,
{::dnnl::memory(diff_dst_desc.get_desc(), _eng, diff_dst)}}};
if (workspace) {
execution_args->insert({DNNL_ARG_WORKSPACE, *workspace});
} else {
execution_args->insert({DNNL_ARG_WORKSPACE, get_workspace(src)});
}
return execute_primitive(
primitive, execution_args,
{{alpha, beta, DNNL_ARG_DIFF_SRC, diff_src_desc, diff_src}});
}
inline
size_t engine_ext::get_batch_normalization_workspace_size(
batch_normalization_ops ops, const memory_desc_ext &src_desc) {
if(ops == batch_normalization_ops::none) {
return 0;
}
return src_desc.get_size();
}
inline
sycl::event engine_ext::async_batch_normalization_forward_inference(
batch_normalization_mode mode, float epsilon, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &scale_bias_mean_var_desc, void *scale, void *bias,
void *mean, void *var) {
return batch_normalization_forward_internal(
true, mode, epsilon, 0.f, alpha, src_desc, src, beta, dst_desc, dst,
scale_bias_mean_var_desc, scale, bias, scale_bias_mean_var_desc, mean,
var, nullptr, nullptr);
}
inline
sycl::event engine_ext::async_batch_normalization_forward_inference(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &summand_desc, void *summand,
const memory_desc_ext &scale_bias_desc, void *scale, void *bias,
const memory_desc_ext &mean_var_desc, void *mean, void *var) {
bool has_post_op = (ops != batch_normalization_ops::none);
sycl::event e;
std::vector<void *> caches;
if (has_post_op) {
void *dst_cache = allocate(dst_desc);
caches.push_back(dst_cache);
batch_normalization_forward_internal(
true, mode, epsilon, 0.f, 1.f, src_desc, src, 0.f, dst_desc, dst_cache,
scale_bias_desc, scale, bias, mean_var_desc, mean, var, nullptr,
nullptr);
if (ops == batch_normalization_ops::add_activation) {
async_sum(1.f, summand_desc, summand, 1.f, dst_desc, dst_cache);
}
async_activation_forward(adesc, 1.f, dst_desc, dst_cache, 0.f, dst_desc,
dst_cache);
e = async_sum(alpha, dst_desc, dst_cache, beta, dst_desc, dst);
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
for (auto ptr : caches) {
sycl::free(ptr, *_q);
}
});
});
return e;
}
return batch_normalization_forward_internal(
true, mode, epsilon, 0.f, alpha, src_desc, src, beta, dst_desc, dst,
scale_bias_desc, scale, bias, mean_var_desc, mean, var, nullptr, nullptr);
}
inline
sycl::event engine_ext::async_batch_normalization_forward_training(
batch_normalization_mode mode, float epsilon, float factor, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &scale_bias_mean_var_desc, void *scale, void *bias,
void *running_mean, void *running_var, void *saved_mean, void *saved_var) {
return batch_normalization_forward_internal(
false, mode, epsilon, factor, alpha, src_desc, src, beta, dst_desc, dst,
scale_bias_mean_var_desc, scale, bias, scale_bias_mean_var_desc,
saved_mean, saved_var, running_mean, running_var);
}
inline
sycl::event engine_ext::async_batch_normalization_forward_training(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float factor, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &summand_desc, void *summand,
const memory_desc_ext &scale_bias_desc, void *scale, void *bias,
const memory_desc_ext &mean_var_desc, void *running_mean, void *running_var,
void *saved_mean, void *saved_var, size_t workspace_size,
void *workspace) {
bool has_post_op = (ops != batch_normalization_ops::none);
sycl::event e;
if (has_post_op) {
if(workspace_size < dst_desc.get_desc().get_size()) {
throw std::runtime_error("async_batch_normalization_forward_training_ex: "
"no sufficient workspace.");
}
batch_normalization_forward_internal(
false, mode, epsilon, factor, 1.f, src_desc, src, 0.f, dst_desc,
workspace, scale_bias_desc, scale, bias, mean_var_desc,
saved_mean, saved_var, running_mean, running_var);
if (ops == batch_normalization_ops::add_activation) {
async_sum(1.f, summand_desc, summand, 1.f, dst_desc,
workspace);
}
return async_activation_forward(adesc, alpha, dst_desc, workspace,
beta, dst_desc, dst);
}
return batch_normalization_forward_internal(
false, mode, epsilon, factor, alpha, src_desc, src, beta, dst_desc, dst,
scale_bias_desc, scale, bias, mean_var_desc, saved_mean, saved_var,
running_mean, running_var);
}
inline
sycl::event engine_ext::async_batch_normalization_forward_training(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float factor, float alpha,
const memory_desc_ext &src_desc, void *src, float beta,
const memory_desc_ext &dst_desc, void *dst,
const memory_desc_ext &summand_desc, void *summand,
const memory_desc_ext &scale_bias_mean_var_desc, void *scale, void *bias,
void *running_mean, void *running_var, void *saved_mean, void *saved_var,
size_t workspace_size, void *workspace) {
return async_batch_normalization_forward_training(
mode, ops, adesc, epsilon, factor, alpha, src_desc, src, beta, dst_desc,
dst, summand_desc, summand, scale_bias_mean_var_desc, scale, bias,
scale_bias_mean_var_desc, running_mean, running_var, saved_mean,
saved_var, workspace_size, workspace);
}
inline
sycl::event engine_ext::async_batch_normalization_backward(
batch_normalization_mode mode, float epsilon, float alpha_data,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta_data,
const memory_desc_ext &diff_src_desc, void *diff_src, float alpha_param,
const memory_desc_ext &diff_scale_bias_mean_var_desc, void *scale,
float beta_param, void *diff_scale, void *diff_bias, void *saved_mean,
void *saved_var) {
return batch_normalization_backward_internal(
mode, epsilon, alpha_data, src_desc, src, diff_dst_desc, diff_dst,
beta_data, diff_src_desc, diff_src, alpha_param,
diff_scale_bias_mean_var_desc, scale, nullptr, beta_param, diff_scale,
diff_bias, diff_scale_bias_mean_var_desc, saved_mean, saved_var);
}
inline
sycl::event engine_ext::async_batch_normalization_backward(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float alpha_data,
const memory_desc_ext &src_desc, void *src, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst,
float beta_data, const memory_desc_ext &diff_src_desc, void *diff_src,
const memory_desc_ext &diff_summand_desc, void *diff_summand,
float alpha_param, const memory_desc_ext &diff_scale_bias_desc, void *scale,
void *bias, float beta_param, void *diff_scale, void *diff_bias,
const memory_desc_ext &mean_var_desc, void *saved_mean, void *saved_var,
size_t workspace_size, void *workspace) {
std::vector<void *> caches;
::dnnl::memory::desc real_diff_dst_desc = diff_dst_desc.get_desc();
void *real_diff_dst = diff_dst;
if (ops != batch_normalization_ops::none &&
workspace_size < dst_desc.get_desc().get_size()) {
throw std::runtime_error("async_batch_normalization_backward_ex: "
"no sufficient workspace.");
}
if (ops == batch_normalization_ops::add_activation) {
void *diff_summand_cache = allocate(diff_summand_desc);
async_activation_backward(adesc, 1.f, dst_desc, dst, diff_dst_desc, diff_dst,
dst_desc, workspace, 0.f,
diff_summand_desc, diff_summand_cache);
caches.push_back(diff_summand_cache);
async_sum(alpha_data, diff_summand_desc, diff_summand_cache, beta_data,
diff_summand_desc, diff_summand);
real_diff_dst_desc = diff_summand_desc.get_desc();
real_diff_dst = diff_summand_cache;
} else if (ops == batch_normalization_ops::activation) {
void *diff_dst_cache = allocate(diff_dst_desc);
caches.push_back(diff_dst_cache);
async_activation_backward(adesc, 1.f, dst_desc, dst, diff_dst_desc,
diff_dst, dst_desc, workspace,
0.f, diff_dst_desc, diff_dst_cache);
real_diff_dst = diff_dst_cache;
}
sycl::event e = batch_normalization_backward_internal(
mode, epsilon, alpha_data, src_desc, src, real_diff_dst_desc,
real_diff_dst, beta_data, diff_src_desc, diff_src, alpha_param,
diff_scale_bias_desc, scale, bias, beta_param, diff_scale, diff_bias,
mean_var_desc, saved_mean, saved_var);
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
for (auto ptr : caches) {
sycl::free(ptr, *_q);
}
});
});
return e;
}
inline
sycl::event engine_ext::async_batch_normalization_backward(
batch_normalization_mode mode, batch_normalization_ops ops,
activation_desc &adesc, float epsilon, float alpha_data,
const memory_desc_ext &src_desc, void *src, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &diff_dst_desc, void *diff_dst,
float beta_data, const memory_desc_ext &diff_src_desc, void *diff_src,
const memory_desc_ext &diff_summand_desc, void *diff_summand,
float alpha_param, const memory_desc_ext &diff_scale_bias_mean_var_desc,
void *scale, void *bias, float beta_param, void *diff_scale,
void *diff_bias, void *saved_mean, void *saved_var,
size_t workspace_size, void *workspace) {
return async_batch_normalization_backward(
mode, ops, adesc, epsilon, alpha_data, src_desc, src, dst_desc, dst,
diff_dst_desc, diff_dst, beta_data, diff_src_desc, diff_src,
diff_summand_desc, diff_summand, alpha_param,
diff_scale_bias_mean_var_desc, scale, bias, beta_param, diff_scale,
diff_bias, diff_scale_bias_mean_var_desc, saved_mean, saved_var,
workspace_size, workspace);
}
inline
sycl::event
engine_ext::async_convolution_forward(convolution_desc &desc, ::dnnl::algorithm alg,
float alpha, const memory_desc_ext &src_desc,
void *src, const memory_desc_ext &weight_desc,
void *weight, float beta,
const memory_desc_ext &dst_desc, void *dst) {
if (scale_parameter_preprocess({{alpha, beta, dst_desc, dst}})) {
return sycl::event();
}
auto help_weight_desc =
get_group_weight_desc(desc.get_group_count(), weight_desc);
::dnnl::primitive_attr attr;
attr.set_fpmath_mode(desc.get_math_mode());
auto origin_src_md = src_desc.get_desc();
auto origin_dst_md = dst_desc.get_desc();
auto origin_weight_md = help_weight_desc;
auto src_md = transfer_memory_desc_to_format_tag_any(origin_src_md);
auto dst_md = transfer_memory_desc_to_format_tag_any(origin_dst_md);
auto weight_md = transfer_memory_desc_to_format_tag_any(origin_weight_md);
auto primitive = create_primitive<::dnnl::convolution_forward>(
::dnnl::prop_kind::forward_training, alg, src_md, weight_md, dst_md,
desc.get_stride(), desc.get_dilate(), desc.get_padding(),
desc.get_padding(), attr);
::dnnl::convolution_forward::primitive_desc pd =
::dnnl::convolution_forward::primitive_desc(
const_cast<dnnl_primitive_desc_t>(
primitive.second->get_primitive_desc()));
auto optimal_src_md = pd.src_desc();
auto optimal_dst_md = pd.dst_desc();
auto optimal_weight_md = pd.weights_desc();
void *optimal_src = src, *optimal_dst = dst, *optimal_weight = weight;
std::vector<void *> input_caches, output_caches;
allocate_and_reorder_memory_to_optimal(origin_src_md, src, optimal_src_md,
optimal_src, input_caches);
allocate_and_reorder_memory_to_optimal(origin_dst_md, dst, optimal_dst_md,
optimal_dst, output_caches);
allocate_and_reorder_memory_to_optimal(origin_weight_md, weight,
optimal_weight_md, optimal_weight,
input_caches);
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, {::dnnl::memory(optimal_src_md, _eng, optimal_src)}},
{DNNL_ARG_WEIGHTS, {::dnnl::memory(optimal_weight_md, _eng, optimal_weight)}}};
auto e = execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DST, optimal_dst_md, optimal_dst}},
input_caches);
if(origin_dst_md != optimal_dst_md){
e = async_reorder(1.f, optimal_dst_md, optimal_dst, 0.f, origin_dst_md, dst);
}
async_free(_q, e, nullptr, output_caches);
return e;
}
inline
sycl::event engine_ext::async_convolution_forward(
convolution_desc &desc, ::dnnl::algorithm alg, activation_desc &adesc,
float alpha_0, const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &weight_desc, void *weight, float alpha_1,
const memory_desc_ext &summand_desc, void *summand,
const memory_desc_ext &bias_desc, void *bias,
const memory_desc_ext &dst_desc, void *dst) {
int channel_num = bias_desc.get_element_num();
auto help_weight_desc =
get_group_weight_desc(desc.get_group_count(), weight_desc);
::dnnl::memory::desc help_bias_desc = {{channel_num},
bias_desc.get_desc().get_data_type(),
::dnnl::memory::format_tag::a};
::dnnl::primitive_attr attr;
attr.set_fpmath_mode(desc.get_math_mode());
auto primitive = create_primitive<::dnnl::convolution_forward>(
::dnnl::prop_kind::forward_training, alg, src_desc.get_desc(),
help_weight_desc, help_bias_desc, dst_desc.get_desc(), desc.get_stride(),
desc.get_dilate(), desc.get_padding(), desc.get_padding(), attr);
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}},
{DNNL_ARG_BIAS, {::dnnl::memory(help_bias_desc, _eng, bias)}}};
void *cache = nullptr;
if (alpha_0 != 1.f) {
cache = allocate(help_weight_desc);
_q->memcpy(cache, weight, weight_desc.get_size());
async_scale(alpha_0, help_weight_desc, cache);
execution_args->insert(
{DNNL_ARG_WEIGHTS, {::dnnl::memory(help_weight_desc, _eng, cache)}});
execute_primitive(primitive, execution_args,
{{1.f, 0.f, DNNL_ARG_DST, dst_desc, dst}}, {cache});
} else {
execution_args->insert(
{DNNL_ARG_WEIGHTS, {::dnnl::memory(help_weight_desc, _eng, weight)}});
execute_primitive(primitive, execution_args,
{{1.f, 0.f, DNNL_ARG_DST, dst_desc, dst}});
}
async_sum(alpha_1, summand_desc, summand, 1.f, dst_desc, dst);
return async_activation_forward(adesc, 1.f, dst_desc, dst, 0.f, dst_desc, dst);
}
inline
sycl::event engine_ext::async_convolution_backward_data(
convolution_desc &desc, ::dnnl::algorithm alg, float alpha,
const memory_desc_ext &weight_desc, void *weight,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta,
const memory_desc_ext &diff_src_desc, void *diff_src) {
if (scale_parameter_preprocess({{alpha, beta, diff_dst_desc, diff_dst}})) {
return sycl::event();
}
auto help_weight_desc =
get_group_weight_desc(desc.get_group_count(), weight_desc);
::dnnl::primitive_attr attr;
attr.set_fpmath_mode(desc.get_math_mode());
auto forward_primitive =
create_primitive_desc<::dnnl::convolution_forward>(
::dnnl::prop_kind::forward_training,
::dnnl::algorithm::convolution_auto, diff_src_desc.get_desc(),
help_weight_desc, diff_dst_desc.get_desc(), desc.get_stride(),
desc.get_dilate(), desc.get_padding(), desc.get_padding(), attr);
auto primitive = create_primitive<::dnnl::convolution_backward_data>(
::dnnl::algorithm::convolution_auto, diff_src_desc.get_desc(),
help_weight_desc, diff_dst_desc.get_desc(), desc.get_stride(),
desc.get_dilate(), desc.get_padding(), desc.get_padding(),
forward_primitive, attr);
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_DIFF_DST,
{::dnnl::memory(diff_dst_desc.get_desc(), _eng, diff_dst)}},
{DNNL_ARG_WEIGHTS, {::dnnl::memory(help_weight_desc, _eng, weight)}}};
return execute_primitive(
primitive, execution_args,
{{alpha, beta, DNNL_ARG_DIFF_SRC, diff_src_desc, diff_src}});
}
inline
sycl::event engine_ext::async_convolution_backward_weight(
convolution_desc &desc, ::dnnl::algorithm alg, float alpha,
const memory_desc_ext &src_desc, void *src,
const memory_desc_ext &diff_dst_desc, void *diff_dst, float beta,
const memory_desc_ext &diff_weight_desc, void *diff_weight) {
if (scale_parameter_preprocess(
{{alpha, beta, diff_weight_desc, diff_weight}})) {
return sycl::event();
}
auto help_diff_weight_desc =
get_group_weight_desc(desc.get_group_count(), diff_weight_desc);
::dnnl::primitive_attr attr;
attr.set_fpmath_mode(desc.get_math_mode());
auto forward_primitive =
create_primitive_desc<::dnnl::convolution_forward>(
::dnnl::prop_kind::forward_training,
::dnnl::algorithm::convolution_auto, src_desc.get_desc(),
help_diff_weight_desc, diff_dst_desc.get_desc(), desc.get_stride(),
desc.get_dilate(), desc.get_padding(), desc.get_padding(), attr);
auto primitive =
create_primitive<::dnnl::convolution_backward_weights>(
::dnnl::algorithm::convolution_auto, src_desc.get_desc(),
help_diff_weight_desc, diff_dst_desc.get_desc(), desc.get_stride(),
desc.get_dilate(), desc.get_padding(), desc.get_padding(),
forward_primitive, attr);
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC, {::dnnl::memory(src_desc.get_desc(), _eng, src)}},
{DNNL_ARG_DIFF_DST,
{::dnnl::memory(diff_dst_desc.get_desc(), _eng, diff_dst)}}};
return execute_primitive(primitive, execution_args,
{{alpha, beta, DNNL_ARG_DIFF_WEIGHTS,
help_diff_weight_desc, diff_weight}});
}
inline
sycl::event engine_ext::async_convolution_backward_bias(
float alpha, const memory_desc_ext &diff_dst_desc, void *diff_dst,
float beta, const memory_desc_ext &diff_bias_desc, void *diff_bias) {
return async_reduction(reduction_op::sum, alpha, diff_dst_desc, diff_dst, beta,
diff_bias_desc, diff_bias);
}
inline
void engine_ext::rnn_get_weight_space_size(const rnn_desc &desc,
size_t *weight_space_size) {
*weight_space_size = 0;
rnn_forward_internal(desc, ::dnnl::prop_kind::forward_inference,
memory_desc_ext(), nullptr, memory_desc_ext(), nullptr,
memory_desc_ext(), nullptr, nullptr, memory_desc_ext(),
nullptr, nullptr, 0, nullptr, 0, nullptr, 0, nullptr, true,
weight_space_size, nullptr, nullptr);
return;
}
inline
void engine_ext::rnn_get_scratchpad_workspace_size(
const rnn_desc &desc, ::dnnl::prop_kind kind,
const memory_desc_ext &src_desc, size_t *scratchpad_size,
size_t *workspace_size) {
*workspace_size = 0;
*scratchpad_size = 0;
rnn_forward_internal(desc, kind, src_desc, nullptr, memory_desc_ext(),
nullptr, memory_desc_ext(), nullptr, nullptr,
memory_desc_ext(), nullptr, nullptr, 0, nullptr, 0,
nullptr, 0, nullptr, true, nullptr, workspace_size,
scratchpad_size);
return;
}
inline
sycl::event engine_ext::async_rnn_forward(
const rnn_desc &desc, ::dnnl::prop_kind kind,
const memory_desc_ext &src_desc, void *src, const memory_desc_ext &dst_desc,
void *dst, const memory_desc_ext &iter_desc, void *src_iter, void *dst_iter,
const memory_desc_ext &iter_c_desc, void *src_iter_c, void *dst_iter_c,
size_t weight_size, void *weight, size_t scratchpad_size, void *scratchpad,
size_t workspace_size, void *workspace) {
return rnn_forward_internal(
desc, kind, src_desc, src, dst_desc, dst, iter_desc, src_iter, dst_iter,
iter_c_desc, src_iter_c, dst_iter_c, weight_size, weight, workspace_size,
workspace, scratchpad_size, scratchpad, false, nullptr, nullptr,
nullptr);
}
inline
sycl::event engine_ext::async_rnn_backward(
const rnn_desc &desc, const memory_desc_ext &dst_desc, void *dst,
void *diff_dst, const memory_desc_ext &src_desc, void *src, void *diff_src,
const memory_desc_ext &iter_desc, void *src_iter, void *diff_dst_iter,
void *diff_src_iter, const memory_desc_ext &iter_c_desc, void *src_iter_c,
void *diff_dst_iter_c, void *diff_src_iter_c, size_t weight_size,
void *weight, void *diff_weight, size_t scratchpad_size, void *scratchpad,
size_t workspace_size, void *workspace) {
::dnnl::memory::data_type src_dt;
::dnnl::memory::format_tag src_format_tag;
rnn_mode mode;
rnn_memory_format_tag format_tag;
rnn_bias_mode bias_mode;
rnn_direction direction;
dpct::library_data_t dt;
int direction_num = 1, input_size = 0, hidden_size = 0, projection_size = 0,
layer_size = 0, gate_num = 1, output_size = 0, data_type_size = 0,
seq_length = 1, batch_size = 1;
void *last_layer_cache = nullptr;
void *hidden_layer_cache = nullptr;
sycl::event e;
std::vector<int> offset(9, 0);
std::vector<void *> data = {
src,
dst,
(uint8_t *)src_iter + iter_desc.get_size(),
nullptr,
(uint8_t *)src_iter_c + iter_c_desc.get_size(),
nullptr,
(uint8_t *)weight + weight_size,
(uint8_t *)workspace + workspace_size,
diff_src,
diff_dst,
(uint8_t *)diff_src_iter + iter_desc.get_size(),
(uint8_t *)diff_dst_iter + iter_desc.get_size(),
(uint8_t *)diff_src_iter_c + iter_c_desc.get_size(),
(uint8_t *)diff_dst_iter_c + iter_c_desc.get_size(),
(uint8_t *)diff_weight + weight_size,
scratchpad};
desc.get(&mode, &bias_mode, &direction, &dt, &input_size, &hidden_size,
&projection_size, &layer_size);
get_rnn_configuration(src_desc.get_desc(), direction, mode, dt, hidden_size,
&src_dt, &src_format_tag, &projection_size,
&output_size, &seq_length, &batch_size, &direction_num,
&gate_num);
if (direction == rnn_direction::bidirectional) {
if (layer_size > 1) {
last_layer_cache = allocate(src_desc);
hidden_layer_cache = allocate(src_desc);
data[8] = last_layer_cache;
}
e = execute_rnn_backward_primitive(
mode, ::dnnl::rnn_direction::bidirectional_concat, bias_mode, src_dt,
src_format_tag, seq_length, batch_size, output_size, 2 * output_size, 1,
direction_num, hidden_size, gate_num, projection_size, data, offset, 1);
if (layer_size > 1) {
data[8] = hidden_layer_cache;
data[9] = last_layer_cache;
e = execute_rnn_backward_primitive(
mode, ::dnnl::rnn_direction::bidirectional_sum, bias_mode, src_dt,
src_format_tag, seq_length, batch_size, output_size, output_size, 1,
direction_num, hidden_size, gate_num, projection_size, data, offset,
layer_size - 1);
_q->memcpy(diff_src,
((layer_size - 1) % 2 == 0) ? last_layer_cache
: hidden_layer_cache,
src_desc.get_size());
}
} else {
e = execute_rnn_backward_primitive(
mode, ::dnnl::rnn_direction::unidirectional_left2right, bias_mode,
src_dt, src_format_tag, seq_length, batch_size, output_size,
output_size, layer_size, direction_num, hidden_size, gate_num,
projection_size, data, offset, 1);
}
if (last_layer_cache && hidden_layer_cache) {
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
sycl::free(last_layer_cache, *_q);
sycl::free(hidden_layer_cache, *_q);
});
});
}
return e;
}
inline
size_t engine_ext::get_dropout_state_size(){
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) "
"Interfaces Project does not support this API.");
#else
sycl::queue q;
if(_random_engine_state_size == -1) {
if(_q){
q = *_q;
} else {
q = dpct::get_current_device().default_queue();
}
auto rand_engine = rng_engine_t(q, 0);
_random_engine_state_size = oneapi::mkl::rng::get_state_size(rand_engine);
}
return _random_engine_state_size;
#endif
}
inline size_t
engine_ext::get_dropout_workspace_size(const memory_desc_ext &src_desc) {
return src_desc.get_size();
}
inline
sycl::event engine_ext::async_dropout_forward(dropout_desc &desc,
const memory_desc_ext &src_desc,
void *src,
const memory_desc_ext &dst_desc,
void *dst, void *workspace,
size_t workspace_size) {
if (workspace_size < src_desc.get_size()) {
throw std::runtime_error("async_dropout_forward: no sufficient workspace.");
}
float p = desc.get_probability();
if (p == 1.f) {
return _q->memset(dst, 0, dst_desc.get_size());
} else if (p == 0.f) {
return async_reorder(1.f, src_desc, src, 0.f, dst_desc, dst);
}
float scale_factor = 1.f / (1.f - p);
void *cache = workspace;
memory_desc_ext rng_data_desc(
::dnnl::memory::desc(src_desc.get_dims(), ::dnnl::memory::data_type::s32,
src_desc.get_strides()));
if (src_desc.get_desc().get_data_type() != ::dnnl::memory::data_type::s32) {
cache = allocate(rng_data_desc);
}
desc.generate(_q, _random_engine_state_size, rng_data_desc.get_element_num(),
(std::int32_t *)cache);
if (cache == workspace) {
async_scale(scale_factor, src_desc, workspace);
} else {
async_reorder(scale_factor, rng_data_desc, cache, 0.f, src_desc, workspace);
}
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC_0, ::dnnl::memory(src_desc.get_desc(), _eng, src)},
{DNNL_ARG_SRC_1, ::dnnl::memory(src_desc.get_desc(), _eng, workspace)},
{DNNL_ARG_DST, ::dnnl::memory(dst_desc.get_desc(), _eng, dst)}};
auto primitive = create_primitive<::dnnl::binary>(
::dnnl::algorithm::binary_mul, src_desc.get_desc(), src_desc.get_desc(),
dst_desc.get_desc());
auto e = execute_primitive(primitive, execution_args);
if (cache != workspace) {
_q->submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] { sycl::free(cache, *_q); });
});
}
return e;
}
inline
sycl::event engine_ext::async_dropout_backward(
dropout_desc &desc, const memory_desc_ext &diff_dst_desc,
void *diff_dst, const memory_desc_ext &diff_src_desc, void *diff_src,
void *workspace, size_t workspace_size) {
float p = desc.get_probability();
if (p == 1.f) {
return _q->memset(diff_src, 0, diff_src_desc.get_size());
} else if (p == 0.f) {
return async_reorder(1.f, diff_dst_desc, diff_dst, 0.f, diff_src_desc,
diff_src);
}
auto execution_args = new std::unordered_map<int, ::dnnl::memory>{
{DNNL_ARG_SRC_0,
::dnnl::memory(diff_dst_desc.get_desc(), _eng, diff_dst)},
{DNNL_ARG_SRC_1,
::dnnl::memory(diff_dst_desc.get_desc(), _eng, workspace)},
{DNNL_ARG_DST, ::dnnl::memory(diff_src_desc.get_desc(), _eng, diff_src)}};
auto primitive = create_primitive<::dnnl::binary>(
::dnnl::algorithm::binary_mul, diff_dst_desc.get_desc(),
diff_dst_desc.get_desc(), diff_src_desc.get_desc());
return execute_primitive(primitive, execution_args);
}
} // namespace dnnl
} // namespace dpct
#endif // __DPCT_DNNL_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/include/dpct/lapack_utils.hpp | //==---- lapack_utils.hpp -------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_LAPACK_UTILS_HPP__
#define __DPCT_LAPACK_UTILS_HPP__
#include "memory.hpp"
#include "util.hpp"
#include "lib_common_utils.hpp"
#include <oneapi/mkl.hpp>
#include <sycl/sycl.hpp>
namespace dpct {
namespace lapack {
/// Computes all eigenvalues and, optionally, eigenvectors of a real generalized
/// symmetric definite eigenproblem using a divide and conquer method.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] queue Device queue where calculations will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] itype Must be 1 or 2 or 3. Specifies the problem type to be solved.
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrices A and B.
/// \param [in,out] a The symmetric matrix A.
/// \param [in] lda The leading dimension of matrix A.
/// \param [in,out] b The symmetric matrix B.
/// \param [in] ldb The leading dimension of matrix B.
/// \param [out] w Eigenvalues.
/// \param [in] scratchpad Scratchpad memory to be used by the routine
/// for storing intermediate results.
/// \param [in] scratchpad_size Size of scratchpad memory as a number of
/// floating point elements of type T.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
template <typename T>
inline int sygvd(sycl::queue &queue, std::int64_t itype, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, int n, T *a, int lda, T *b, int ldb,
T *w, T *scratchpad, int scratchpad_size, int *info) {
#ifdef DPCT_USM_LEVEL_NONE
auto info_buf = get_buffer<int>(info);
auto a_buffer = get_buffer<T>(a);
auto b_buffer = get_buffer<T>(b);
auto w_buffer = get_buffer<T>(w);
auto scratchpad_buffer = get_buffer<T>(scratchpad);
int info_val = 0;
int ret_val = 0;
try {
oneapi::mkl::lapack::sygvd(queue, itype, jobz, uplo, n, a_buffer, lda,
b_buffer, ldb, w_buffer, scratchpad_buffer,
scratchpad_size);
} catch (oneapi::mkl::lapack::exception const& e) {
std::cerr << "Unexpected exception caught during call to LAPACK API: sygvd"
<< std::endl
<< "reason: " << e.what() << std::endl
<< "info: " << e.info() << std::endl;
info_val = static_cast<int>(e.info());
ret_val = 1;
} catch (sycl::exception const& e) {
std::cerr << "Caught synchronous SYCL exception:" << std::endl
<< "reason: " << e.what() << std::endl;
ret_val = 1;
}
queue.submit([&, info_val](sycl::handler &cgh) {
auto info_acc = info_buf.get_access<sycl::access_mode::write>(cgh);
cgh.single_task<dpct_kernel_name<class sygvd_set_info, T>>(
[=]() { info_acc[0] = info_val; });
});
return ret_val;
#else
try {
oneapi::mkl::lapack::sygvd(queue, itype, jobz, uplo, n, a, lda, b, ldb, w,
scratchpad, scratchpad_size);
} catch (oneapi::mkl::lapack::exception const& e) {
std::cerr << "Unexpected exception caught during call to LAPACK API: sygvd"
<< std::endl
<< "reason: " << e.what() << std::endl
<< "info: " << e.info() << std::endl;
int info_val = static_cast<int>(e.info());
queue.memcpy(info, &info_val, sizeof(int)).wait();
return 1;
} catch (sycl::exception const& e) {
std::cerr << "Caught synchronous SYCL exception:" << std::endl
<< "reason: " << e.what() << std::endl;
queue.memset(info, 0, sizeof(int)).wait();
return 1;
}
queue.memset(info, 0, sizeof(int));
return 0;
#endif
}
/// Computes all the eigenvalues, and optionally, the eigenvectors of a complex
/// generalized Hermitian positive-definite eigenproblem using a divide and
/// conquer method.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] queue Device queue where calculations will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] itype Must be 1 or 2 or 3. Specifies the problem type to be solved.
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrices A and B.
/// \param [in,out] a The Hermitian matrix A.
/// \param [in] lda The leading dimension of matrix A.
/// \param [in,out] b The Hermitian matrix B.
/// \param [in] ldb The leading dimension of matrix B.
/// \param [in] w Eigenvalues.
/// \param [in] scratchpad Scratchpad memory to be used by the routine
/// for storing intermediate results.
/// \param [in] scratchpad_size Size of scratchpad memory as a number of
/// floating point elements of type T.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
template <typename T, typename Tw>
inline int hegvd(sycl::queue &queue, std::int64_t itype, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, int n, T *a, int lda, T *b, int ldb,
Tw *w, T *scratchpad, int scratchpad_size, int *info) {
using Ty = typename DataType<T>::T2;
#ifdef DPCT_USM_LEVEL_NONE
auto info_buf = get_buffer<int>(info);
auto a_buffer = get_buffer<Ty>(a);
auto b_buffer = get_buffer<Ty>(b);
auto w_buffer = get_buffer<Tw>(w);
auto scratchpad_buffer = get_buffer<Ty>(scratchpad);
int info_val = 0;
int ret_val = 0;
try {
oneapi::mkl::lapack::hegvd(queue, itype, jobz, uplo, n, a_buffer, lda,
b_buffer, ldb, w_buffer, scratchpad_buffer,
scratchpad_size);
} catch (oneapi::mkl::lapack::exception const& e) {
std::cerr << "Unexpected exception caught during call to LAPACK API: hegvd"
<< std::endl
<< "reason: " << e.what() << std::endl
<< "info: " << e.info() << std::endl;
info_val = static_cast<int>(e.info());
ret_val = 1;
} catch (sycl::exception const& e) {
std::cerr << "Caught synchronous SYCL exception:" << std::endl
<< "reason: " << e.what() << std::endl;
ret_val = 1;
}
queue.submit([&, info_val](sycl::handler &cgh) {
auto info_acc = info_buf.get_access<sycl::access_mode::write>(cgh);
cgh.single_task<dpct_kernel_name<class hegvd_set_info, T>>(
[=]() { info_acc[0] = info_val; });
});
return ret_val;
#else
try {
oneapi::mkl::lapack::hegvd(queue, itype, jobz, uplo, n, (Ty *)a, lda, (Ty *)b,
ldb, w, (Ty *)scratchpad, scratchpad_size);
} catch (oneapi::mkl::lapack::exception const& e) {
std::cerr << "Unexpected exception caught during call to LAPACK API: hegvd"
<< std::endl
<< "reason: " << e.what() << std::endl
<< "info: " << e.info() << std::endl;
int info_val = static_cast<int>(e.info());
queue.memcpy(info, &info_val, sizeof(int)).wait();
return 1;
} catch (sycl::exception const& e) {
std::cerr << "Caught synchronous SYCL exception:" << std::endl
<< "reason: " << e.what() << std::endl;
queue.memset(info, 0, sizeof(int)).wait();
return 1;
}
queue.memset(info, 0, sizeof(int));
return 0;
#endif
}
/// Computes the Cholesky factorizations of a batch of symmetric (or Hermitian,
/// for complex data) positive-definite matrices.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] queue Device queue where calculations will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in,out] a Array of pointers to matrix A.
/// \param [in] lda The leading dimension of matrix A.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
/// \param [in] group_size The batch size.
template <typename T>
inline int potrf_batch(sycl::queue &queue, oneapi::mkl::uplo uplo, int n,
T *a[], int lda, int *info, int group_size) {
#ifdef DPCT_USM_LEVEL_NONE
throw std::runtime_error("this API is unsupported when USM level is none");
#else
using Ty = typename DataType<T>::T2;
struct matrix_info_t {
oneapi::mkl::uplo uplo_info;
std::int64_t n_info;
std::int64_t lda_info;
std::int64_t group_size_info;
};
matrix_info_t *matrix_info =
(matrix_info_t *)std::malloc(sizeof(matrix_info_t));
matrix_info->uplo_info = uplo;
matrix_info->n_info = n;
matrix_info->lda_info = lda;
matrix_info->group_size_info = group_size;
std::int64_t scratchpad_size = 0;
sycl::event e;
Ty *scratchpad = nullptr;
try {
scratchpad_size = oneapi::mkl::lapack::potrf_batch_scratchpad_size<Ty>(
queue, &(matrix_info->uplo_info), &(matrix_info->n_info),
&(matrix_info->lda_info), 1, &(matrix_info->group_size_info));
scratchpad = sycl::malloc_device<Ty>(scratchpad_size, queue);
e = oneapi::mkl::lapack::potrf_batch(
queue, &(matrix_info->uplo_info), &(matrix_info->n_info), (Ty **)a,
&(matrix_info->lda_info), 1, &(matrix_info->group_size_info),
scratchpad, scratchpad_size);
} catch (oneapi::mkl::lapack::batch_error const &be) {
std::cerr << "Unexpected exception caught during call to LAPACK API: "
"potrf_batch_scratchpad_size/potrf_batch"
<< std::endl
<< "reason: " << be.what() << std::endl
<< "number: " << be.info() << std::endl;
int i = 0;
auto &ids = be.ids();
std::vector<int> info_vec(group_size);
for (auto const &e : be.exceptions()) {
try {
std::rethrow_exception(e);
} catch (oneapi::mkl::lapack::exception &e) {
std::cerr << "Exception " << ids[i] << std::endl
<< "reason: " << e.what() << std::endl
<< "info: " << e.info() << std::endl;
info_vec[i] = e.info();
i++;
}
}
queue.memcpy(info, info_vec.data(), group_size * sizeof(int)).wait();
std::free(matrix_info);
if (scratchpad)
sycl::free(scratchpad, queue);
return 1;
} catch (sycl::exception const &e) {
std::cerr << "Caught synchronous SYCL exception:" << std::endl
<< "reason: " << e.what() << std::endl;
queue.memset(info, 0, group_size * sizeof(int)).wait();
std::free(matrix_info);
if (scratchpad)
sycl::free(scratchpad, queue);
return 1;
}
queue.submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
std::free(matrix_info);
sycl::free(scratchpad, queue);
});
});
queue.memset(info, 0, group_size * sizeof(int));
return 0;
#endif
}
/// Solves a batch of systems of linear equations with a Cholesky-factored
/// symmetric (Hermitian) positive-definite coefficient matrices.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] queue Device queue where calculations will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] nrhs The number of right-hand sides.
/// \param [in,out] a Array of pointers to matrix A.
/// \param [in] lda The leading dimension of matrix A.
/// \param [in,out] b Array of pointers to matrix B.
/// \param [in] ldb The leading dimension of matrix B.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
/// \param [in] group_size The batch size.
template <typename T>
inline int potrs_batch(sycl::queue &queue, oneapi::mkl::uplo uplo, int n,
int nrhs, T *a[], int lda, T *b[], int ldb, int *info,
int group_size) {
#ifdef DPCT_USM_LEVEL_NONE
throw std::runtime_error("this API is unsupported when USM level is none");
#else
using Ty = typename DataType<T>::T2;
struct matrix_info_t {
oneapi::mkl::uplo uplo_info;
std::int64_t n_info;
std::int64_t nrhs_info;
std::int64_t lda_info;
std::int64_t ldb_info;
std::int64_t group_size_info;
};
matrix_info_t *matrix_info =
(matrix_info_t *)std::malloc(sizeof(matrix_info_t));
matrix_info->uplo_info = uplo;
matrix_info->n_info = n;
matrix_info->nrhs_info = nrhs;
matrix_info->lda_info = lda;
matrix_info->ldb_info = ldb;
matrix_info->group_size_info = group_size;
std::int64_t scratchpad_size = 0;
sycl::event e;
Ty *scratchpad = nullptr;
try {
scratchpad_size = oneapi::mkl::lapack::potrs_batch_scratchpad_size<Ty>(
queue, &(matrix_info->uplo_info), &(matrix_info->n_info),
&(matrix_info->nrhs_info), &(matrix_info->lda_info),
&(matrix_info->ldb_info), 1, &(matrix_info->group_size_info));
scratchpad = sycl::malloc_device<Ty>(scratchpad_size, queue);
e = oneapi::mkl::lapack::potrs_batch(
queue, &(matrix_info->uplo_info), &(matrix_info->n_info),
&(matrix_info->nrhs_info), (Ty **)a, &(matrix_info->lda_info), (Ty **)b,
&(matrix_info->ldb_info), 1, &(matrix_info->group_size_info),
scratchpad, scratchpad_size);
} catch (oneapi::mkl::lapack::batch_error const &be) {
std::cerr << "Unexpected exception caught during call to LAPACK API: "
"potrs_batch_scratchpad_size/potrs_batch"
<< std::endl
<< "reason: " << be.what() << std::endl
<< "number: " << be.info() << std::endl;
int i = 0;
auto &ids = be.ids();
std::vector<int> info_vec(group_size);
for (auto const &e : be.exceptions()) {
try {
std::rethrow_exception(e);
} catch (oneapi::mkl::lapack::exception &e) {
std::cerr << "Exception " << ids[i] << std::endl
<< "reason: " << e.what() << std::endl
<< "info: " << e.info() << std::endl;
info_vec[i] = e.info();
i++;
}
}
queue.memcpy(info, info_vec.data(), group_size * sizeof(int)).wait();
std::free(matrix_info);
if (scratchpad)
sycl::free(scratchpad, queue);
return 1;
} catch (sycl::exception const &e) {
std::cerr << "Caught synchronous SYCL exception:" << std::endl
<< "reason: " << e.what() << std::endl;
queue.memset(info, 0, group_size * sizeof(int)).wait();
std::free(matrix_info);
if (scratchpad)
sycl::free(scratchpad, queue);
return 1;
}
queue.submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] {
std::free(matrix_info);
sycl::free(scratchpad, queue);
});
});
queue.memset(info, 0, group_size * sizeof(int));
return 0;
#endif
}
namespace detail {
template <template <typename> typename functor_t, typename... args_t>
inline int lapack_shim(sycl::queue &q, library_data_t a_type, int *info,
std::string const &lapack_api_name, args_t &&...args) {
auto handle_lapack_exception = [&](const oneapi::mkl::lapack::exception &e) {
std::cerr << "Unexpected exception caught during call to LAPACK API: "
<< lapack_api_name << std::endl
<< "reason: " << e.what() << std::endl
<< "info: " << e.info() << std::endl
<< "detail: " << e.detail() << std::endl;
if (e.info() < std::numeric_limits<int>::min() ||
e.info() > std::numeric_limits<int>::max()) {
throw std::runtime_error("e.info() exceeds the limit of int type");
}
int info_val = static_cast<int>(e.info());
if (info)
dpct::detail::dpct_memcpy(q, info, &info_val, sizeof(int),
memcpy_direction::host_to_device)
.wait();
return 1;
};
try {
switch (a_type) {
case library_data_t::real_float: {
functor_t<float>()(std::forward<args_t>(args)...);
break;
}
case library_data_t::real_double: {
functor_t<double>()(std::forward<args_t>(args)...);
break;
}
case library_data_t::complex_float: {
functor_t<std::complex<float>>()(std::forward<args_t>(args)...);
break;
}
case library_data_t::complex_double: {
functor_t<std::complex<double>>()(std::forward<args_t>(args)...);
break;
}
default:
throw std::runtime_error("the data type is unsupported");
}
} catch (oneapi::mkl::lapack::batch_error const &be) {
try {
std::rethrow_exception(be.exceptions()[0]);
} catch (oneapi::mkl::lapack::exception &e) {
return handle_lapack_exception(e);
}
} catch (oneapi::mkl::lapack::exception const &e) {
return handle_lapack_exception(e);
} catch (sycl::exception const &e) {
std::cerr << "Caught synchronous SYCL exception:" << std::endl
<< "reason: " << e.what() << std::endl;
if (info)
dpct::detail::dpct_memset(q, info, 0, sizeof(int)).wait();
return 1;
}
return 0;
}
template <typename T> class working_memory {
public:
working_memory(std::size_t element_number, const sycl::queue &q) : _q(q) {
_ptr = dpct::detail::dpct_malloc(element_number * sizeof(T), _q);
}
auto get_memory() {
return dpct::detail::get_memory(reinterpret_cast<T *>(_ptr));
}
auto get_ptr() {
return _ptr;
}
void set_event(sycl::event e) { _e = e; }
~working_memory() {
if (_ptr) {
dpct::async_dpct_free({_ptr}, {_e}, _q);
}
}
private:
void *_ptr = nullptr;
sycl::event _e;
sycl::queue _q;
};
std::size_t byte_to_element_number(std::size_t size_in_byte,
dpct::library_data_t element_type) {
auto dv = std::lldiv(
size_in_byte,
dpct::detail::library_data_size[static_cast<unsigned int>(element_type)] /
8);
if (dv.rem) {
throw std::runtime_error(
"size_in_byte is not divisible by the size of element (in bytes)");
}
return dv.quot;
}
std::size_t element_number_to_byte(std::size_t size_in_element,
dpct::library_data_t element_type) {
auto dv = std::lldiv(
dpct::detail::library_data_size[static_cast<unsigned int>(element_type)],
8);
if (dv.rem) {
throw std::runtime_error(
"the size of element (in bits) is not divisible by 8");
}
return size_in_element * dv.quot;
}
inline oneapi::mkl::jobsvd char2jobsvd(signed char job) {
switch (job) {
case 'A':
return oneapi::mkl::jobsvd::vectors;
case 'S':
return oneapi::mkl::jobsvd::somevec;
case 'O':
return oneapi::mkl::jobsvd::vectorsina;
case 'N':
return oneapi::mkl::jobsvd::novec;
default:
throw std::runtime_error("the job type is unsupported");
}
}
template <typename T> struct getrf_scratchpad_size_impl {
void operator()(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, std::int64_t lda,
std::size_t &device_ws_size) {
device_ws_size =
oneapi::mkl::lapack::getrf_scratchpad_size<T>(q, m, n, lda);
}
};
template <typename T> struct getrf_impl {
void operator()(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
std::int64_t *ipiv, void *device_ws,
std::size_t device_ws_size, int *info) {
auto ipiv_data = dpct::detail::get_memory(ipiv);
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
oneapi::mkl::lapack::getrf(q, m, n, a_data, lda, ipiv_data, device_ws_data,
device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
}
};
template <typename T> struct getrs_impl {
void operator()(sycl::queue &q, oneapi::mkl::transpose trans, std::int64_t n,
std::int64_t nrhs, library_data_t a_type, void *a,
std::int64_t lda, std::int64_t *ipiv, library_data_t b_type,
void *b, std::int64_t ldb, int *info) {
auto ipiv_data = dpct::detail::get_memory(ipiv);
std::int64_t device_ws_size = oneapi::mkl::lapack::getrs_scratchpad_size<T>(
q, trans, n, nrhs, lda, ldb);
working_memory<T> device_ws(device_ws_size, q);
auto device_ws_data = device_ws.get_memory();
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto b_data = dpct::detail::get_memory(reinterpret_cast<T *>(b));
oneapi::mkl::lapack::getrs(q, trans, n, nrhs, a_data, lda, ipiv_data,
b_data, ldb, device_ws_data, device_ws_size);
sycl::event e = dpct::detail::dpct_memset(q, info, 0, sizeof(int));
device_ws.set_event(e);
}
};
template <typename T> struct geqrf_scratchpad_size_impl {
void operator()(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, std::int64_t lda,
std::size_t &device_ws_size) {
device_ws_size =
oneapi::mkl::lapack::geqrf_scratchpad_size<T>(q, m, n, lda);
}
};
template <typename T> struct geqrf_impl {
void operator()(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
library_data_t tau_type, void *tau, void *device_ws,
std::size_t device_ws_size, int *info) {
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto tau_data = dpct::detail::get_memory(reinterpret_cast<T *>(tau));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
oneapi::mkl::lapack::geqrf(q, m, n, a_data, lda, tau_data, device_ws_data,
device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
}
};
template <typename T> struct getrfnp_impl {
void operator()(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
std::int64_t *ipiv, void *device_ws,
std::size_t device_ws_size, int *info) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
std::int64_t a_stride = m * lda;
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
oneapi::mkl::lapack::getrfnp_batch(q, m, n, a_data, lda, a_stride, 1,
device_ws_data, device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
#endif
}
};
template <typename T> struct gesvd_scratchpad_size_impl {
void operator()(sycl::queue &q, oneapi::mkl::jobsvd jobu,
oneapi::mkl::jobsvd jobvt, std::int64_t m, std::int64_t n,
library_data_t a_type, std::int64_t lda,
library_data_t u_type, std::int64_t ldu,
library_data_t vt_type, std::int64_t ldvt,
std::size_t &device_ws_size) {
device_ws_size = oneapi::mkl::lapack::gesvd_scratchpad_size<T>(
q, jobu, jobvt, m, n, lda, ldu, ldvt);
}
};
template <typename T> struct ElementType {
using value_tpye = T;
};
template <typename T> struct ElementType<std::complex<T>> {
using value_tpye = T;
};
template <typename T> struct gesvd_impl {
void operator()(sycl::queue &q, oneapi::mkl::jobsvd jobu,
oneapi::mkl::jobsvd jobvt, std::int64_t m, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
library_data_t s_type, void *s, library_data_t u_type,
void *u, std::int64_t ldu, library_data_t vt_type, void *vt,
std::int64_t ldvt, void *device_ws,
std::size_t device_ws_size, int *info) {
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto s_data = dpct::detail::get_memory(
reinterpret_cast<typename ElementType<T>::value_tpye *>(s));
auto u_data = dpct::detail::get_memory(reinterpret_cast<T *>(u));
auto vt_data = dpct::detail::get_memory(reinterpret_cast<T *>(vt));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
oneapi::mkl::lapack::gesvd(q, jobu, jobvt, m, n, a_data, lda, s_data,
u_data, ldu, vt_data, ldvt, device_ws_data,
device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
}
};
template <typename T> struct gesvd_conj_impl : public gesvd_impl<T> {
void operator()(sycl::queue &q, oneapi::mkl::jobsvd jobu,
oneapi::mkl::jobsvd jobvt, std::int64_t m, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
library_data_t s_type, void *s, library_data_t u_type,
void *u, std::int64_t ldu, library_data_t vt_type, void *vt,
std::int64_t ldvt, void *device_ws,
std::size_t device_ws_size, int *info) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using base = gesvd_impl<T>;
base::operator()(q, jobu, jobvt, m, n, a_type, a, lda, s_type, s, u_type, u,
ldu, vt_type, vt, ldvt, device_ws, device_ws_size, info);
auto vt_data = dpct::detail::get_memory(reinterpret_cast<T *>(vt));
oneapi::mkl::blas::row_major::imatcopy(q, oneapi::mkl::transpose::conjtrans,
n, n, T(1.0f), vt_data, ldvt, ldvt);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
#endif
}
};
template <typename T> struct potrf_scratchpad_size_impl {
void operator()(sycl::queue &q, oneapi::mkl::uplo uplo, std::int64_t n,
library_data_t a_type, std::int64_t lda,
std::size_t &device_ws_size) {
device_ws_size =
oneapi::mkl::lapack::potrf_scratchpad_size<T>(q, uplo, n, lda);
}
};
template <typename T> struct potrf_impl {
void operator()(sycl::queue &q, oneapi::mkl::uplo uplo, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
void *device_ws, std::size_t device_ws_size, int *info) {
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
oneapi::mkl::lapack::potrf(q, uplo, n, a_data, lda, device_ws_data,
device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
}
};
template <typename T> struct potrs_impl {
void operator()(sycl::queue &q, oneapi::mkl::uplo uplo, std::int64_t n,
std::int64_t nrhs, library_data_t a_type, void *a,
std::int64_t lda, library_data_t b_type, void *b,
std::int64_t ldb, int *info) {
std::int64_t device_ws_size = oneapi::mkl::lapack::potrs_scratchpad_size<T>(
q, uplo, n, nrhs, lda, ldb);
working_memory<T> device_ws(device_ws_size, q);
auto device_ws_data = device_ws.get_memory();
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto b_data = dpct::detail::get_memory(reinterpret_cast<T *>(b));
oneapi::mkl::lapack::potrs(q, uplo, n, nrhs, a_data, lda, b_data, ldb,
device_ws_data, device_ws_size);
sycl::event e = dpct::detail::dpct_memset(q, info, 0, sizeof(int));
device_ws.set_event(e);
}
};
template <typename T> struct value_type_trait {
using value_type = T;
};
template <typename T> struct value_type_trait<std::complex<T>> {
using value_type = T;
};
template <typename T> auto lamch_s() {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
if constexpr (std::is_same_v<T, float>) {
return slamch("S");
} else if constexpr (std::is_same_v<T, double>) {
return dlamch("S");
}
throw std::runtime_error("the type is unsupported");
#endif
}
#define DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE(FUNC, ...) \
do { \
if constexpr (std::is_floating_point_v<T>) { \
device_ws_size = oneapi::mkl::lapack::sy##FUNC(__VA_ARGS__); \
} else { \
device_ws_size = oneapi::mkl::lapack::he##FUNC(__VA_ARGS__); \
} \
} while (0)
#define DISPATCH_FLOAT_FOR_CALCULATION(FUNC, ...) \
do { \
if constexpr (std::is_floating_point_v<T>) { \
oneapi::mkl::lapack::sy##FUNC(__VA_ARGS__); \
} else { \
oneapi::mkl::lapack::he##FUNC(__VA_ARGS__); \
} \
} while (0)
template <typename T> struct syheevx_scratchpad_size_impl {
void operator()(sycl::queue &q, oneapi::mkl::compz jobz,
oneapi::mkl::rangev range, oneapi::mkl::uplo uplo,
std::int64_t n, std::int64_t lda, void *vl, void *vu,
std::int64_t il, std::int64_t iu,
std::size_t &device_ws_size) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using value_t = typename value_type_trait<T>::value_type;
auto vl_value = *reinterpret_cast<value_t *>(vl);
auto vu_value = *reinterpret_cast<value_t *>(vu);
auto abstol = 2 * lamch_s<value_t>();
DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE(evx_scratchpad_size<T>, q, jobz, range,
uplo, n, lda, vl_value, vu_value, il, iu,
abstol, lda);
#endif
}
};
template <typename T> constexpr library_data_t get_library_data_t_from_type() {
if constexpr (std::is_same_v<T, float>) {
return library_data_t::real_float;
} else if constexpr (std::is_same_v<T, double>) {
return library_data_t::real_double;
} else if constexpr (std::is_same_v<T, sycl::float2> ||
std::is_same_v<T, std::complex<float>>) {
return library_data_t::complex_float;
} else if constexpr (std::is_same_v<T, sycl::double2> ||
std::is_same_v<T, std::complex<double>>) {
return library_data_t::complex_double;
}
throw std::runtime_error("the type is unsupported");
}
template <typename T> struct syheevx_impl {
void operator()(sycl::queue &q, oneapi::mkl::compz jobz,
oneapi::mkl::rangev range, oneapi::mkl::uplo uplo,
std::int64_t n, library_data_t a_type, void *a,
std::int64_t lda, void *vl, void *vu, std::int64_t il,
std::int64_t iu, std::int64_t *m, library_data_t w_type,
void *w, void *device_ws, std::size_t device_ws_size,
int *info) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using value_t = typename value_type_trait<T>::value_type;
working_memory<T> z(n * lda, q);
working_memory<std::int64_t> m_device(1, q);
auto z_data = z.get_memory();
auto m_device_data = m_device.get_memory();
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
auto vl_value = *reinterpret_cast<value_t *>(vl);
auto vu_value = *reinterpret_cast<value_t *>(vu);
auto w_data = dpct::detail::get_memory(reinterpret_cast<value_t *>(w));
auto abstol = 2 * lamch_s<value_t>();
DISPATCH_FLOAT_FOR_CALCULATION(evx, q, jobz, range, uplo, n, a_data, lda,
vl_value, vu_value, il, iu, abstol,
m_device_data, w_data, z_data, lda,
device_ws_data, device_ws_size);
dpct::async_dpct_memcpy(a, z.get_ptr(), n * lda * sizeof(T),
memcpy_direction::device_to_device, q);
dpct::async_dpct_memcpy(m, m_device.get_ptr(), sizeof(std::int64_t),
memcpy_direction::device_to_host, q);
sycl::event e = dpct::detail::dpct_memset(q, info, 0, sizeof(int));
z.set_event(e);
m_device.set_event(e);
#endif
}
};
template <typename T> struct syhegvx_scratchpad_size_impl {
void operator()(sycl::queue &q, std::int64_t itype, oneapi::mkl::compz jobz,
oneapi::mkl::rangev range, oneapi::mkl::uplo uplo,
std::int64_t n, std::int64_t lda, std::int64_t ldb, void *vl,
void *vu, std::int64_t il, std::int64_t iu,
std::size_t &device_ws_size) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using value_t = typename value_type_trait<T>::value_type;
auto vl_value = *reinterpret_cast<value_t *>(vl);
auto vu_value = *reinterpret_cast<value_t *>(vu);
auto abstol = 2 * lamch_s<value_t>();
DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE(gvx_scratchpad_size<T>, q, itype, jobz,
range, uplo, n, lda, ldb, vl_value,
vu_value, il, iu, abstol, lda);
#endif
}
};
template <typename T> struct syhegvx_impl {
void operator()(sycl::queue &q, std::int64_t itype, oneapi::mkl::compz jobz,
oneapi::mkl::rangev range, oneapi::mkl::uplo uplo,
std::int64_t n, void *a, std::int64_t lda, void *b,
std::int64_t ldb, void *vl, void *vu, std::int64_t il,
std::int64_t iu, std::int64_t *m, void *w, void *device_ws,
std::size_t device_ws_size, int *info) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using value_t = typename value_type_trait<T>::value_type;
working_memory<T> z(n * lda, q);
working_memory<std::int64_t> m_device(1, q);
auto z_data = z.get_memory();
auto m_device_data = m_device.get_memory();
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto b_data = dpct::detail::get_memory(reinterpret_cast<T *>(b));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
auto vl_value = *reinterpret_cast<value_t *>(vl);
auto vu_value = *reinterpret_cast<value_t *>(vu);
auto w_data = dpct::detail::get_memory(reinterpret_cast<value_t *>(w));
auto abstol = 2 * lamch_s<value_t>();
DISPATCH_FLOAT_FOR_CALCULATION(gvx, q, itype, jobz, range, uplo, n, a_data,
lda, b_data, ldb, vl_value, vu_value, il, iu,
abstol, m_device_data, w_data, z_data, lda,
device_ws_data, device_ws_size);
dpct::async_dpct_memcpy(a, z.get_ptr(), n * lda * sizeof(T),
memcpy_direction::device_to_device, q);
dpct::async_dpct_memcpy(m, m_device.get_ptr(), sizeof(std::int64_t),
memcpy_direction::device_to_host, q);
sycl::event e = dpct::detail::dpct_memset(q, info, 0, sizeof(int));
z.set_event(e);
m_device.set_event(e);
#endif
}
};
template <typename T> struct syhegvd_scratchpad_size_impl {
void operator()(sycl::queue &q, std::int64_t itype, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, std::int64_t n, std::int64_t lda,
std::int64_t ldb, std::size_t &device_ws_size) {
DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE(gvd_scratchpad_size<T>, q, itype, jobz,
uplo, n, lda, ldb);
}
};
template <typename T> struct syhegvd_impl {
void operator()(sycl::queue &q, std::int64_t itype, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, std::int64_t n, void *a,
std::int64_t lda, void *b, std::int64_t ldb, void *w,
void *device_ws, std::size_t device_ws_size,
int *info) {
using value_t = typename value_type_trait<T>::value_type;
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto b_data = dpct::detail::get_memory(reinterpret_cast<T *>(b));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
auto w_data = dpct::detail::get_memory(reinterpret_cast<value_t *>(w));
DISPATCH_FLOAT_FOR_CALCULATION(gvd, q, itype, jobz, uplo, n, a_data, lda,
b_data, ldb, w_data, device_ws_data,
device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
}
};
oneapi::mkl::compz job2compz(const oneapi::mkl::job &job) {
oneapi::mkl::compz ret;
if (job == oneapi::mkl::job::novec) {
ret = oneapi::mkl::compz::novectors;
} else if (job == oneapi::mkl::job::vec) {
ret = oneapi::mkl::compz::vectors;
} else {
throw std::runtime_error("the job type is unsupported");
}
return ret;
}
template <typename T> struct syheev_scratchpad_size_impl {
void operator()(sycl::queue &q, oneapi::mkl::compz jobz,
oneapi::mkl::uplo uplo, std::int64_t n, std::int64_t lda,
std::size_t &device_ws_size) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE(ev_scratchpad_size<T>, q, jobz, uplo, n,
lda);
#endif
}
};
template <typename T> struct syheev_impl {
void operator()(sycl::queue &q, oneapi::mkl::compz jobz,
oneapi::mkl::uplo uplo, std::int64_t n, void *a,
std::int64_t lda, void *w, void *device_ws,
std::size_t device_ws_size, int *info) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using value_t = typename value_type_trait<T>::value_type;
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
auto w_data = dpct::detail::get_memory(reinterpret_cast<value_t *>(w));
DISPATCH_FLOAT_FOR_CALCULATION(ev, q, jobz, uplo, n, a_data, lda, w_data,
device_ws_data, device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
#endif
}
};
template <typename T> struct syheevd_scratchpad_size_impl {
void operator()(sycl::queue &q, oneapi::mkl::job jobz, oneapi::mkl::uplo uplo,
std::int64_t n, library_data_t a_type, std::int64_t lda,
std::size_t &device_ws_size) {
DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE(evd_scratchpad_size<T>, q, jobz, uplo, n,
lda);
}
};
template <typename T> struct syheevd_impl {
void operator()(sycl::queue &q, oneapi::mkl::job jobz, oneapi::mkl::uplo uplo,
std::int64_t n, library_data_t a_type, void *a,
std::int64_t lda, void *w, void *device_ws,
std::size_t device_ws_size, int *info) {
using value_t = typename value_type_trait<T>::value_type;
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
auto w_data = dpct::detail::get_memory(reinterpret_cast<value_t *>(w));
DISPATCH_FLOAT_FOR_CALCULATION(evd, q, jobz, uplo, n, a_data, lda, w_data,
device_ws_data, device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
}
};
#undef DISPATCH_FLOAT_FOR_SCRATCHPAD_SIZE
#undef DISPATCH_FLOAT_FOR_CALCULATION
template <typename T> struct trtri_scratchpad_size_impl {
void operator()(sycl::queue &q, oneapi::mkl::uplo uplo,
oneapi::mkl::diag diag, std::int64_t n, library_data_t a_type,
std::int64_t lda, std::size_t &device_ws_size) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
device_ws_size =
oneapi::mkl::lapack::trtri_scratchpad_size<T>(q, uplo, diag, n, lda);
#endif
}
};
template <typename T> struct trtri_impl {
void operator()(sycl::queue &q, oneapi::mkl::uplo uplo,
oneapi::mkl::diag diag, std::int64_t n, library_data_t a_type,
void *a, std::int64_t lda, void *device_ws,
std::size_t device_ws_size, int *info) {
#ifndef __INTEL_MKL__
throw std::runtime_error(
"The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
auto a_data = dpct::detail::get_memory(reinterpret_cast<T *>(a));
auto device_ws_data =
dpct::detail::get_memory(reinterpret_cast<T *>(device_ws));
oneapi::mkl::lapack::trtri(q, uplo, diag, n, a_data, lda, device_ws_data,
device_ws_size);
dpct::detail::dpct_memset(q, info, 0, sizeof(int));
#endif
}
};
} // namespace detail
/// Computes the size of workspace memory of getrf function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] m The number of rows in the matrix A.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [out] device_ws_size The workspace size in bytes.
/// \param [out] host_ws_size The host workspace size in bytes. Currently the
/// value is always zero.
inline int getrf_scratchpad_size(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, std::int64_t lda,
std::size_t *device_ws_size,
std::size_t *host_ws_size = nullptr) {
if (host_ws_size)
*host_ws_size = 0;
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::getrf_scratchpad_size_impl>(
q, a_type, nullptr, "getrf_scratchpad_size", q, m, n, a_type, lda,
device_ws_size_tmp);
*device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type);
return ret;
}
/// Computes the LU factorization of a general m-by-n matrix.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] m The number of rows in the matrix A.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A. Overwritten by L and U. The unit
/// diagonal elements of L are not stored.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [out] ipiv The pivot indices. If \p ipiv is nullptr, non-pivoting
/// LU factorization is computed.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The workspace size in bytes.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int getrf(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
std::int64_t *ipiv, void *device_ws,
std::size_t device_ws_size, int *info) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
std::size_t device_ws_size_in_element_number =
detail::byte_to_element_number(device_ws_size, a_type);
if (ipiv == nullptr) {
return detail::lapack_shim<detail::getrfnp_impl>(
q, a_type, info, "getrfnp_batch", q, m, n, a_type, a, lda, ipiv,
device_ws, device_ws_size_in_element_number, info);
}
return detail::lapack_shim<detail::getrf_impl>(
q, a_type, info, "getrf", q, m, n, a_type, a, lda, ipiv, device_ws,
device_ws_size_in_element_number, info);
#endif
}
/// Solves a system of linear equations with a LU-factored square coefficient
/// matrix, with multiple right-hand sides.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] trans Indicates the form of the linear equation.
/// \param [in] n The order of the matrix A and the number of rows in matrix B.
/// \param [in] nrhs The number of right hand sides.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] a The input matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] ipiv The pivot indices.
/// \param [in] b_type The data type of the matrix B.
/// \param [in, out] b The matrix B, whose columns are the right-hand sides
/// for the systems of equations.
/// \param [in] ldb The leading dimension of the matrix B.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int getrs(sycl::queue &q, oneapi::mkl::transpose trans, std::int64_t n,
std::int64_t nrhs, library_data_t a_type, void *a,
std::int64_t lda, std::int64_t *ipiv, library_data_t b_type,
void *b, std::int64_t ldb, int *info) {
return detail::lapack_shim<detail::getrs_impl>(
q, a_type, info, "getrs_scratchpad_size/getrs", q, trans, n, nrhs, a_type,
a, lda, ipiv, b_type, b, ldb, info);
}
/// Computes the size of workspace memory of geqrf function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] m The number of rows in the matrix A.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [out] device_ws_size The device workspace size in bytes.
/// \param [out] host_ws_size The host workspace size in bytes. Currently the
/// value is always zero.
inline int geqrf_scratchpad_size(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, std::int64_t lda,
std::size_t *device_ws_size,
std::size_t *host_ws_size = nullptr) {
if (host_ws_size)
*host_ws_size = 0;
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::geqrf_scratchpad_size_impl>(
q, a_type, nullptr, "geqrf_scratchpad_size", q, m, n, a_type, lda,
device_ws_size_tmp);
*device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type);
return ret;
}
/// Computes the QR factorization of a general m-by-n matrix.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] m The number of rows in the matrix A.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A. Overwritten by the factorization data.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] tau_type The data type of the array tau.
/// \param [in] tau The array contains scalars that define elementary reflectors
/// for the matrix Q in its decomposition in a product of elementary reflectors.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The workspace size in bytes.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int geqrf(sycl::queue &q, std::int64_t m, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
library_data_t tau_type, void *tau, void *device_ws,
std::size_t device_ws_size, int *info) {
std::size_t device_ws_size_in_element_number =
detail::byte_to_element_number(device_ws_size, a_type);
return detail::lapack_shim<detail::geqrf_impl>(
q, a_type, info, "geqrf", q, m, n, a_type, a, lda, tau_type, tau,
device_ws, device_ws_size_in_element_number, info);
}
/// Computes the size of workspace memory of gesvd function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobu Must be 'A' (representing jobsvd::vectors), 'S'
/// (representing jobsvd::somevec), 'O' (representing jobsvd::vectorsina) or 'N'
/// (representing jobsvd::novec).
/// \param [in] jobvt Must be 'A' (representing jobsvd::vectors), 'S'
/// (representing jobsvd::somevec), 'O' (representing jobsvd::vectorsina) or 'N'
/// (representing jobsvd::novec).
/// \param [in] m The number of rows in the matrix A.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] u_type The data type of the matrix U.
/// \param [in] ldu The leading dimension of the matrix U.
/// \param [in] vt_type The data type of the matrix VT.
/// \param [in] ldvt The leading dimension of the matrix VT.
/// \param [out] device_ws_size The device workspace size in bytes.
/// \param [out] host_ws_size The host workspace size in bytes. Currently the
/// value is always zero.
inline int gesvd_scratchpad_size(sycl::queue &q, signed char jobu,
signed char jobvt, std::int64_t m,
std::int64_t n, library_data_t a_type,
std::int64_t lda, library_data_t u_type,
std::int64_t ldu, library_data_t vt_type,
std::int64_t ldvt, std::size_t *device_ws_size,
std::size_t *host_ws_size = nullptr) {
oneapi::mkl::jobsvd jobu_enum = detail::char2jobsvd(jobu);
oneapi::mkl::jobsvd jobvt_enum = detail::char2jobsvd(jobvt);
if (host_ws_size)
*host_ws_size = 0;
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::gesvd_scratchpad_size_impl>(
q, a_type, nullptr, "gesvd_scratchpad_size", q, jobu_enum, jobvt_enum, m,
n, a_type, lda, u_type, ldu, vt_type, ldvt, device_ws_size_tmp);
*device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type);
return ret;
}
/// Computes the size of workspace memory of gesvd function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::vec or job::novec
/// \param [in] all_vec Only have effects when \param jobz is job::vec.If the
/// value is zero, all m columns of U are returned in the matrix U, otherwise
/// the first min( \param m, \param n ) columns of U (the left singular vectors)
/// are returned in the matrix U.
/// \param [in] m The number of rows in the matrix A.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] u_type The data type of the matrix U.
/// \param [in] ldu The leading dimension of the matrix U.
/// \param [in] vt_type The data type of the matrix VT.
/// \param [in] ldvt The leading dimension of the matrix VT.
/// \param [out] device_ws_size The device workspace size as a number of
/// elements of type \param a_type.
/// \param [out] host_ws_size The host workspace size as a number of elements
/// of type \param a_type. Currently the value is always zero.
inline int gesvd_scratchpad_size(sycl::queue &q, oneapi::mkl::job jobz,
std::int64_t all_vec, std::int64_t m,
std::int64_t n, library_data_t a_type,
std::int64_t lda, library_data_t u_type,
std::int64_t ldu, library_data_t vt_type,
std::int64_t ldvt, int *device_ws_size,
std::size_t *host_ws_size = nullptr) {
if (host_ws_size)
*host_ws_size = 0;
oneapi::mkl::jobsvd jobu;
oneapi::mkl::jobsvd jobvt;
if (jobz == oneapi::mkl::job::vec) {
if (all_vec) {
jobu = jobvt = oneapi::mkl::jobsvd::somevec;
} else {
jobu = jobvt = oneapi::mkl::jobsvd::vectors;
}
} else if (jobz == oneapi::mkl::job::novec) {
jobu = jobvt = oneapi::mkl::jobsvd::novec;
} else {
throw std::runtime_error("the job type is unsupported");
}
std::size_t device_ws_size_64;
int ret = detail::lapack_shim<detail::gesvd_scratchpad_size_impl>(
q, a_type, nullptr, "gesvd_scratchpad_size", q, jobu, jobvt, m, n, a_type,
lda, u_type, ldu, vt_type, ldvt, device_ws_size_64);
*device_ws_size = device_ws_size_64;
return ret;
}
/// Computes the size of workspace memory of gesvd function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobu Must be 'A' (representing jobsvd::vectors), 'S'
/// (representing jobsvd::somevec), 'O' (representing jobsvd::vectorsina) or 'N'
/// (representing jobsvd::novec).
/// \param [in] jobvt Must be 'A' (representing jobsvd::vectors), 'S'
/// (representing jobsvd::somevec), 'O' (representing jobsvd::vectorsina) or 'N'
/// (representing jobsvd::novec).
/// \param [in] m The number of rows in the matrix A.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A and it will be overwritten according
/// to \p jobu and \p jobvt.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] s_type The data type of the matrix S.
/// \param [out] s The output matrix S.
/// \param [in] u_type The data type of the matrix U.
/// \param [out] u The output matrix U.
/// \param [in] ldu The leading dimension of the matrix U.
/// \param [in] vt_type The data type of the matrix VT.
/// \param [out] vt The output matrix VT.
/// \param [in] ldvt The leading dimension of the matrix VT.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The workspace size in bytes.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int gesvd(sycl::queue &q, signed char jobu, signed char jobvt,
std::int64_t m, std::int64_t n, library_data_t a_type, void *a,
std::int64_t lda, library_data_t s_type, void *s,
library_data_t u_type, void *u, std::int64_t ldu,
library_data_t vt_type, void *vt, std::int64_t ldvt,
void *device_ws, std::size_t device_ws_size, int *info) {
oneapi::mkl::jobsvd jobu_enum = detail::char2jobsvd(jobu);
oneapi::mkl::jobsvd jobvt_enum = detail::char2jobsvd(jobvt);
std::size_t device_ws_size_in_element_number =
detail::byte_to_element_number(device_ws_size, a_type);
return detail::lapack_shim<detail::gesvd_impl>(
q, a_type, info, "gesvd", q, jobu_enum, jobvt_enum, m, n, a_type, a, lda,
s_type, s, u_type, u, ldu, vt_type, vt, ldvt, device_ws,
device_ws_size_in_element_number, info);
}
/// Computes the size of workspace memory of gesvd function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::vec or job::novec.
/// \param [in] all_vec Only have effects when \param jobz is job::vec.If the
/// value is zero, all m columns of U are returned in the matrix U, otherwise
/// the first min( \param m, \param n ) columns of U (the left singular vectors)
/// are returned in the matrix U.
/// \param [in] m The number of rows in the matrix A.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A and it will be overwritten according
/// to \p jobu and \p jobvt.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] s_type The data type of the matrix S.
/// \param [out] s The output matrix S.
/// \param [in] u_type The data type of the matrix U.
/// \param [out] u The output matrix U.
/// \param [in] ldu The leading dimension of the matrix U.
/// \param [in] vt_type The data type of the matrix VT.
/// \param [out] vt The output matrix VT.
/// \param [in] ldvt The leading dimension of the matrix VT.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The device workspace size as a number of
/// elements of type \param a_type.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int gesvd(sycl::queue &q, oneapi::mkl::job jobz, std::int64_t all_vec,
std::int64_t m, std::int64_t n, library_data_t a_type, void *a,
std::int64_t lda, library_data_t s_type, void *s,
library_data_t u_type, void *u, std::int64_t ldu,
library_data_t vt_type, void *vt, std::int64_t ldvt,
void *device_ws, std::size_t device_ws_size, int *info) {
oneapi::mkl::jobsvd jobu;
oneapi::mkl::jobsvd jobvt;
if (jobz == oneapi::mkl::job::vec) {
if (all_vec) {
jobu = jobvt = oneapi::mkl::jobsvd::somevec;
} else {
jobu = jobvt = oneapi::mkl::jobsvd::vectors;
}
} else if (jobz == oneapi::mkl::job::novec) {
jobu = jobvt = oneapi::mkl::jobsvd::novec;
} else {
throw std::runtime_error("the job type is unsupported");
}
detail::lapack_shim<detail::gesvd_conj_impl>(
q, a_type, info, "gesvd", q, jobu, jobvt, m, n, a_type, a, lda, s_type, s,
u_type, u, ldu, vt_type, vt, ldvt, device_ws, device_ws_size, info);
return 0;
}
/// Computes the size of workspace memory of potrf function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [out] device_ws_size The device workspace size in bytes.
/// \param [out] host_ws_size The host workspace size in bytes. Currently the
/// value is always zero.
inline int potrf_scratchpad_size(sycl::queue &q, oneapi::mkl::uplo uplo,
std::int64_t n, library_data_t a_type,
std::int64_t lda, std::size_t *device_ws_size,
std::size_t *host_ws_size = nullptr) {
if (host_ws_size)
*host_ws_size = 0;
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::potrf_scratchpad_size_impl>(
q, a_type, nullptr, "potrf_scratchpad_size", q, uplo, n, a_type, lda,
device_ws_size_tmp);
*device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type);
return ret;
}
/// Computes the Cholesky factorization of a symmetric (Hermitian)
/// positive-definite matrix.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The number of columns in the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A. Overwritten by the Cholesky factor U
/// or L, as specified by \p uplo.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The workspace size in bytes.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int potrf(sycl::queue &q, oneapi::mkl::uplo uplo, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
void *device_ws, std::size_t device_ws_size, int *info) {
std::size_t device_ws_size_in_element_number =
detail::byte_to_element_number(device_ws_size, a_type);
return detail::lapack_shim<detail::potrf_impl>(
q, a_type, info, "potrf", q, uplo, n, a_type, a, lda, device_ws,
device_ws_size_in_element_number, info);
}
/// Solves a system of linear equations with a Cholesky-factored symmetric
/// (Hermitian) positive-definite coefficient matrix.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A and the number of rows in matrix B.
/// \param [in] nrhs The number of right hand sides.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A. Overwritten by the Cholesky factor U
/// or L, as specified by \p uplo.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] b_type The data type of the matrix B.
/// \param [in, out] b The matrix B, whose columns are the right-hand sides
/// for the systems of equations.
/// \param [in] ldb The leading dimension of the matrix B.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int potrs(sycl::queue &q, oneapi::mkl::uplo uplo, std::int64_t n,
std::int64_t nrhs, library_data_t a_type, void *a,
std::int64_t lda, library_data_t b_type, void *b,
std::int64_t ldb, int *info) {
return detail::lapack_shim<detail::potrs_impl>(
q, a_type, info, "potrs_scratchpad_size/potrs", q, uplo, n, nrhs, a_type,
a, lda, b_type, b, ldb, info);
}
/// Computes the size of workspace memory of syevx/heevx function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] range Must be rangev::all, rangev::values or uplo::indices.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] vl If range == rangev::values, the lower bound of the interval
/// to be searched for eigenvalues
/// \param [in] vu If range == rangev::values, the upper bound of the interval
/// to be searched for eigenvalues
/// \param [in] il If range == rangev::indices, the indices of the smallest
/// eigenvalue to be returned.
/// \param [in] iu If range == rangev::indices, the indices of the largest
/// eigenvalue to be returned.
/// \param [in] w_type The data type of the eigenvalues.
/// \param [out] device_ws_size The device workspace size in bytes.
/// \param [out] host_ws_size The host workspace size in bytes. Currently the
/// value is always zero.
inline int syheevx_scratchpad_size(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::rangev range,
oneapi::mkl::uplo uplo, std::int64_t n,
library_data_t a_type, std::int64_t lda,
void *vl, void *vu, std::int64_t il,
std::int64_t iu, library_data_t w_type,
std::size_t *device_ws_size,
std::size_t *host_ws_size = nullptr) {
if (host_ws_size)
*host_ws_size = 0;
oneapi::mkl::compz compz_jobz = detail::job2compz(jobz);
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::syheevx_scratchpad_size_impl>(
q, a_type, nullptr, "syevx_scratchpad_size/heevx_scratchpad_size", q,
compz_jobz, range, uplo, n, lda, vl, vu, il, iu,
device_ws_size_tmp);
*device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type);
return ret;
}
/// Computes selected eigenvalues and, optionally, eigenvectors of a
/// symmetric/Hermitian matrix.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] range Must be rangev::all, rangev::values or uplo::indices.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A. On exit, the lower or upper triangle is
/// overwritten.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] vl If range == rangev::values, the lower bound of the interval
/// to be searched for eigenvalues
/// \param [in] vu If range == rangev::values, the upper bound of the interval
/// to be searched for eigenvalues
/// \param [in] il If range == rangev::indices, the indices of the smallest
/// eigenvalue to be returned.
/// \param [in] iu If range == rangev::indices, the indices of the largest
/// eigenvalue to be returned.
/// \param [out] m The total number of eigenvalues found.
/// \param [in] w_type The data type of the eigenvalues.
/// \param [out] w The eigenvalues of the matrix A in ascending order.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The workspace size in bytes.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int syheevx(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::rangev range, oneapi::mkl::uplo uplo,
std::int64_t n, library_data_t a_type, void *a,
std::int64_t lda, void *vl, void *vu, std::int64_t il,
std::int64_t iu, std::int64_t *m, library_data_t w_type,
void *w, void *device_ws, std::size_t device_ws_size,
int *info) {
std::size_t device_ws_size_in_element_number =
detail::byte_to_element_number(device_ws_size, a_type);
oneapi::mkl::compz compz_jobz = detail::job2compz(jobz);
int ret = detail::lapack_shim<detail::syheevx_impl>(
q, a_type, info, "syevx/heevx", q, compz_jobz, range, uplo, n, a_type, a,
lda, vl, vu, il, iu, m, w_type, w, device_ws,
device_ws_size_in_element_number, info);
q.wait();
return ret;
}
/// Computes the size of workspace memory of syevx/heevx function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] range Must be rangev::all, rangev::values or uplo::indices.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] vl If range == rangev::values, the lower bound of the interval
/// to be searched for eigenvalues
/// \param [in] vu If range == rangev::values, the upper bound of the interval
/// to be searched for eigenvalues
/// \param [in] il If range == rangev::indices, the indices of the smallest
/// eigenvalue to be returned.
/// \param [in] iu If range == rangev::indices, the indices of the largest
/// eigenvalue to be returned.
/// \param [out] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
template <typename T, typename ValueT>
inline int syheevx_scratchpad_size(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::rangev range,
oneapi::mkl::uplo uplo, int n, int lda,
ValueT vl, ValueT vu, int il, int iu,
int *device_ws_size) {
oneapi::mkl::compz compz_jobz = detail::job2compz(jobz);
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::syheevx_scratchpad_size_impl>(
q, detail::get_library_data_t_from_type<T>(), nullptr,
"syevx_scratchpad_size/heevx_scratchpad_size", q, compz_jobz, range, uplo,
n, lda, &vl, &vu, il, iu, device_ws_size_tmp);
*device_ws_size = (int)device_ws_size_tmp;
return ret;
}
/// Computes selected eigenvalues and, optionally, eigenvectors of a
/// symmetric/Hermitian matrix.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] range Must be rangev::all, rangev::values or uplo::indices.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in, out] a The input matrix A. On exit, the lower or upper triangle is
/// overwritten.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] vl If range == rangev::values, the lower bound of the interval
/// to be searched for eigenvalues
/// \param [in] vu If range == rangev::values, the upper bound of the interval
/// to be searched for eigenvalues
/// \param [in] il If range == rangev::indices, the indices of the smallest
/// eigenvalue to be returned.
/// \param [in] iu If range == rangev::indices, the indices of the largest
/// eigenvalue to be returned.
/// \param [out] m The total number of eigenvalues found.
/// \param [out] w The eigenvalues of the matrix A in ascending order.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
template <typename T, typename ValueT>
inline int syheevx(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::rangev range, oneapi::mkl::uplo uplo, int n,
T *a, int lda, ValueT vl, ValueT vu, int il, int iu, int *m,
ValueT *w, T *device_ws, int device_ws_size, int *info) {
oneapi::mkl::compz compz_jobz = detail::job2compz(jobz);
std::int64_t m64;
int ret = detail::lapack_shim<detail::syheevx_impl>(
q, detail::get_library_data_t_from_type<T>(), info, "syevx/heevx", q,
compz_jobz, range, uplo, n, detail::get_library_data_t_from_type<T>(), a,
lda, &vl, &vu, il, iu, &m64,
detail::get_library_data_t_from_type<ValueT>(), w, device_ws,
device_ws_size, info);
q.wait();
*m = (int)m64;
return ret;
}
/// Computes the size of workspace memory of sygvx/hegvx function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] itype Must be 1, 2 or 3.
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] range Must be rangev::all, rangev::values or uplo::indices.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] ldb The leading dimension of the matrix B.
/// \param [in] vl If range == rangev::values, the lower bound of the interval
/// to be searched for eigenvalues
/// \param [in] vu If range == rangev::values, the upper bound of the interval
/// to be searched for eigenvalues
/// \param [in] il If range == rangev::indices, the indices of the smallest
/// eigenvalue to be returned.
/// \param [in] iu If range == rangev::indices, the indices of the largest
/// eigenvalue to be returned.
/// \param [in] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
template <typename T, typename ValueT>
inline int
syhegvx_scratchpad_size(sycl::queue &q, int itype, oneapi::mkl::job jobz,
oneapi::mkl::rangev range, oneapi::mkl::uplo uplo,
int n, int lda, int ldb, ValueT vl, ValueT vu, int il,
int iu, int *device_ws_size) {
oneapi::mkl::compz compz_jobz = detail::job2compz(jobz);
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::syhegvx_scratchpad_size_impl>(
q, detail::get_library_data_t_from_type<T>(), nullptr,
"sygvx_scratchpad_size/hegvx_scratchpad_size", q, itype, compz_jobz,
range, uplo, n, lda, ldb, &vl, &vu, il, iu, device_ws_size_tmp);
*device_ws_size = (int)device_ws_size_tmp;
return ret;
}
/// Computes selected eigenvalues and, optionally, eigenvectors of a real
/// generalized symmetric/Hermitian definite eigenproblem.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] itype Must be 1, 2 or 3.
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] range Must be rangev::all, rangev::values or uplo::indices.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in, out] a The input matrix A. On exit, the lower or upper triangle is
/// overwritten.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in, out] b The input matrix B.
/// \param [in] ldb The leading dimension of the matrix B.
/// \param [in] vl If range == rangev::values, the lower bound of the interval
/// to be searched for eigenvalues
/// \param [in] vu If range == rangev::values, the upper bound of the interval
/// to be searched for eigenvalues
/// \param [in] il If range == rangev::indices, the indices of the smallest
/// eigenvalue to be returned.
/// \param [in] iu If range == rangev::indices, the indices of the largest
/// eigenvalue to be returned.
/// \param [out] m The total number of eigenvalues found.
/// \param [out] w The eigenvalues of the matrix A in ascending order.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
template <typename T, typename ValueT>
inline int syhegvx(sycl::queue &q, int itype, oneapi::mkl::job jobz,
oneapi::mkl::rangev range, oneapi::mkl::uplo uplo, int n,
T *a, int lda, T *b, int ldb, ValueT vl, ValueT vu, int il,
int iu, int *m, ValueT *w, T *device_ws, int device_ws_size,
int *info) {
oneapi::mkl::compz compz_jobz = detail::job2compz(jobz);
std::int64_t m64;
int ret = detail::lapack_shim<detail::syhegvx_impl>(
q, detail::get_library_data_t_from_type<T>(), info, "sygvx/hegvx", q,
itype, compz_jobz, range, uplo, n, a, lda, b, ldb, &vl, &vu, il, iu, &m64,
w, device_ws, device_ws_size, info);
q.wait();
*m = (int)m64;
return ret;
}
/// Computes the size of workspace memory of sygvd/hegvd function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] itype Must be 1, 2 or 3.
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] ldb The leading dimension of the matrix B.
/// \param [in] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
template <typename T>
inline int syhegvd_scratchpad_size(sycl::queue &q, int itype,
oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, int n, int lda,
int ldb, int *device_ws_size) {
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::syhegvd_scratchpad_size_impl>(
q, detail::get_library_data_t_from_type<T>(), nullptr,
"sygvd_scratchpad_size/hegvd_scratchpad_size", q, itype, jobz, uplo, n,
lda, ldb, device_ws_size_tmp);
*device_ws_size = (int)device_ws_size_tmp;
return ret;
}
/// Computes all eigenvalues and, optionally, eigenvectors of a real generalized
/// symmetric/Hermitian definite eigenproblem using a divide and conquer method.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] itype Must be 1, 2 or 3.
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in, out] a The input matrix A. On exit, it is overwritten by eigenvectors.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in, out] b The input matrix B.
/// \param [in] ldb The leading dimension of the matrix B.
/// \param [out] w The eigenvalues of the matrix A in ascending order.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
template <typename T, typename ValueT>
inline int syhegvd(sycl::queue &q, int itype, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, int n, T *a, int lda, T *b, int ldb,
ValueT *w, T *device_ws, int device_ws_size, int *info) {
return detail::lapack_shim<detail::syhegvd_impl>(
q, detail::get_library_data_t_from_type<T>(), info, "sygvd/hegvd", q,
itype, jobz, uplo, n, a, lda, b, ldb, w, device_ws, device_ws_size, info);
}
/// Computes the size of workspace memory of syev/heev function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [out] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
template <typename T>
inline int syheev_scratchpad_size(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, int n, int lda,
int *device_ws_size) {
std::size_t device_ws_size_tmp;
oneapi::mkl::compz compz_jobz = detail::job2compz(jobz);
int ret = detail::lapack_shim<detail::syheev_scratchpad_size_impl>(
q, detail::get_library_data_t_from_type<T>(), nullptr,
"syev_scratchpad_size/heev_scratchpad_size", q, compz_jobz, uplo, n, lda,
device_ws_size_tmp);
*device_ws_size = (int)device_ws_size_tmp;
return ret;
}
/// Computes all eigenvalues and, optionally, eigenvectors of a real symmetric
/// or Hermitian matrix.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in, out] a The input matrix A. On exit, it is overwritten by
/// eigenvectors.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [out] w The eigenvalues of the matrix A in ascending order.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
template <typename T, typename ValueT>
inline int syheev(sycl::queue &q, oneapi::mkl::job jobz, oneapi::mkl::uplo uplo,
int n, T *a, int lda, ValueT *w, T *device_ws,
int device_ws_size, int *info) {
oneapi::mkl::compz compz_jobz = detail::job2compz(jobz);
return detail::lapack_shim<detail::syheev_impl>(
q, detail::get_library_data_t_from_type<T>(), info, "syev/heev", q,
compz_jobz, uplo, n, a, lda, w, device_ws, device_ws_size, info);
}
/// Computes the size of workspace memory of syevd/heevd function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] w_type The data type of the eigenvalues.
/// \param [out] device_ws_size The device workspace in bytes.
/// \param [out] host_ws_size The host workspace size in bytes. Currently the
/// value is always zero.
inline int syheevd_scratchpad_size(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, std::int64_t n,
library_data_t a_type, std::int64_t lda,
library_data_t w_type,
std::size_t *device_ws_size,
std::size_t *host_ws_size = nullptr) {
if (host_ws_size)
*host_ws_size = 0;
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::syheevd_scratchpad_size_impl>(
q, a_type, nullptr, "syevd_scratchpad_size/heevd_scratchpad_size", q,
jobz, uplo, n, a_type, lda, device_ws_size_tmp);
*device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type);
return ret;
}
/// Computes all eigenvalues and, optionally, all eigenvectors of a real
/// symmetric or Hermitian matrix using divide and conquer algorithm.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A. On exit, it is overwritten by
/// eigenvectors.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] w_type The data type of the eigenvalues.
/// \param [out] w The eigenvalues of the matrix A in ascending order.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The workspace size in bytes.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int syheevd(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, std::int64_t n,
library_data_t a_type, void *a, std::int64_t lda,
library_data_t w_type, void *w, void *device_ws,
std::size_t device_ws_size, int *info) {
std::size_t device_ws_size_in_element_number =
detail::byte_to_element_number(device_ws_size, a_type);
return detail::lapack_shim<detail::syheevd_impl>(
q, a_type, info, "syevd/heevd", q, jobz, uplo, n, a_type, a, lda, w,
device_ws, device_ws_size_in_element_number, info);
}
/// Computes the size of workspace memory of syevd/heevd function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] w_type The data type of the eigenvalues.
/// \param [out] device_ws_size The device workspace size as a number of
/// elements of type \tparam T.
template <typename T>
inline int syheevd_scratchpad_size(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, std::int64_t n,
std::int64_t lda, int *device_ws_size) {
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::syheevd_scratchpad_size_impl>(
q, detail::get_library_data_t_from_type<T>(), nullptr,
"syevd_scratchpad_size/heevd_scratchpad_size", q, jobz, uplo, n,
detail::get_library_data_t_from_type<T>(), lda, device_ws_size_tmp);
*device_ws_size = (int)device_ws_size_tmp;
return ret;
}
/// Computes all eigenvalues and, optionally, all eigenvectors of a real
/// symmetric or Hermitian matrix using divide and conquer algorithm.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] jobz Must be job::novec or job::vec.
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] n The order of the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A. On exit, it is overwritten by
/// eigenvectors.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] w_type The data type of the eigenvalues.
/// \param [out] w The eigenvalues of the matrix A in ascending order.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The workspace size as a number of
/// elements of type \tparam T.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
template <typename T, typename ValueT>
inline int syheevd(sycl::queue &q, oneapi::mkl::job jobz,
oneapi::mkl::uplo uplo, std::int64_t n, T *a,
std::int64_t lda, ValueT *w, T *device_ws,
int device_ws_size, int *info) {
return detail::lapack_shim<detail::syheevd_impl>(
q, detail::get_library_data_t_from_type<T>(), info, "syevd/heevd", q,
jobz, uplo, n, detail::get_library_data_t_from_type<T>(), a, lda, w,
device_ws, device_ws_size, info);
}
/// Computes the size of workspace memory of trtri function.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] diag Must be diag::nonunit or diag::unit.
/// \param [in] n The order of the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [out] device_ws_size The device workspace in bytes.
/// \param [out] host_ws_size The host workspace size in bytes. Currently the
/// value is always zero.
inline int trtri_scratchpad_size(sycl::queue &q, oneapi::mkl::uplo uplo,
oneapi::mkl::diag diag, std::int64_t n,
library_data_t a_type, std::int64_t lda,
std::size_t *device_ws_size,
std::size_t *host_ws_size = nullptr) {
if (host_ws_size)
*host_ws_size = 0;
std::size_t device_ws_size_tmp;
int ret = detail::lapack_shim<detail::trtri_scratchpad_size_impl>(
q, a_type, nullptr, "trtri_scratchpad_size", q, uplo, diag, n, a_type,
lda, device_ws_size_tmp);
*device_ws_size = detail::element_number_to_byte(device_ws_size_tmp, a_type);
return ret;
}
/// Computes the inverse of a triangular matrix.
/// \return Returns 0 if no synchronous exception, otherwise returns 1.
/// \param [in] q Device queue where computation will be performed. It must
/// have the in_order property when using the USM mode (DPCT_USM_LEVEL_NONE is
/// not defined).
/// \param [in] uplo Must be uplo::upper or uplo::lower.
/// \param [in] diag Must be diag::nonunit or diag::unit.
/// \param [in] n The order of the matrix A.
/// \param [in] a_type The data type of the matrix A.
/// \param [in, out] a The input matrix A. On exit, it is overwritten by
/// the inverse matrix of A.
/// \param [in] lda The leading dimension of the matrix A.
/// \param [in] device_ws The workspace.
/// \param [in] device_ws_size The workspace size in bytes.
/// \param [out] info If lapack synchronous exception is caught, the value
/// returned from info() method of the exception is set to \p info.
inline int trtri(sycl::queue &q, oneapi::mkl::uplo uplo, oneapi::mkl::diag diag,
std::int64_t n, library_data_t a_type, void *a,
std::int64_t lda, void *device_ws, std::size_t device_ws_size,
int *info) {
#ifdef DPCT_USM_LEVEL_NONE
throw std::runtime_error("this API is unsupported when USM level is none");
#else
std::size_t device_ws_size_in_element_number =
detail::byte_to_element_number(device_ws_size, a_type);
return detail::lapack_shim<detail::trtri_impl>(
q, a_type, info, "trtri", q, uplo, diag, n, a_type, a, lda, device_ws,
device_ws_size_in_element_number, info);
#endif
}
} // namespace lapack
} // namespace dpct
#endif // __DPCT_LAPACK_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/include/dpct/fft_utils.hpp | //==---- fft_utils.hpp ----------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_FFT_UTILS_HPP__
#define __DPCT_FFT_UTILS_HPP__
#include <sycl/sycl.hpp>
#include <oneapi/mkl.hpp>
#include <optional>
#include <utility>
#include "lib_common_utils.hpp"
namespace dpct {
namespace fft {
/// An enumeration type to describe the FFT direction is forward or backward.
enum fft_direction : int {
forward = 0,
backward
};
/// An enumeration type to describe the types of FFT input and output data.
enum fft_type : int {
real_float_to_complex_float = 0,
complex_float_to_real_float,
real_double_to_complex_double,
complex_double_to_real_double,
complex_float_to_complex_float,
complex_double_to_complex_double,
};
/// A class to perform FFT calculation.
class fft_engine {
public:
/// Default constructor.
fft_engine() {}
/// Commit the configuration to calculate n-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] dim Dimension number of the data.
/// \param [in] n Pointer to an array containing each dimension's size.
/// \param [in] inembed Pointer to an array containing each dimension's size
/// of the embedded input data.
/// \param [in] istride Stride size of the input data.
/// \param [in] idist Distance between the two batches of the input data.
/// \param [in] input_type Input data type.
/// \param [in] onembed Pointer to an array containing each dimension's size
/// of the embedded output data.
/// \param [in] ostride Stride size of the output data.
/// \param [in] odist Distance between the two batches of the output data.
/// \param [in] output_type Output data type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [out] scratchpad_size The workspace size required for this FFT.
/// If this value is used to allocate memory, \p direction_and_placement need
/// to be specified explicitly to get correct result.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
void commit(sycl::queue *exec_queue, int dim, long long *n,
long long *inembed, long long istride, long long idist,
library_data_t input_type, long long *onembed, long long ostride,
long long odist, library_data_t output_type, long long batch,
size_t *scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
_q = exec_queue;
init<long long>(dim, n, inembed, istride, idist, input_type, onembed,
ostride, odist, output_type, batch,
direction_and_placement);
if (scratchpad_size) {
if (_is_estimate_call)
*scratchpad_size = _workspace_estimate_bytes;
else
*scratchpad_size = _workspace_bytes;
}
}
/// Commit the configuration to calculate n-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] dim Dimension number of the data.
/// \param [in] n Pointer to an array containing each dimension's size.
/// \param [in] inembed Pointer to an array containing each dimension's size
/// of the embedded input data.
/// \param [in] istride Stride size of the input data.
/// \param [in] idist Distance between the two batches of the input data.
/// \param [in] input_type Input data type.
/// \param [in] onembed Pointer to an array containing each dimension's size
/// of the embedded output data.
/// \param [in] ostride Stride size of the output data.
/// \param [in] odist Distance between the two batches of the output data.
/// \param [in] output_type Output data type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [out] scratchpad_size The workspace size required for this FFT.
/// If this value is used to allocate memory, \p direction_and_placement need
/// to be specified explicitly to get correct result.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
void commit(sycl::queue *exec_queue, int dim, int *n, int *inembed,
int istride, int idist, library_data_t input_type, int *onembed,
int ostride, int odist, library_data_t output_type, int batch,
size_t *scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
_q = exec_queue;
init<int>(dim, n, inembed, istride, idist, input_type, onembed, ostride,
odist, output_type, batch, direction_and_placement);
if (scratchpad_size) {
if (_is_estimate_call)
*scratchpad_size = _workspace_estimate_bytes;
else
*scratchpad_size = _workspace_bytes;
}
}
/// Commit the configuration to calculate n-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] dim Dimension number of the data.
/// \param [in] n Pointer to an array containing each dimension's size.
/// \param [in] inembed Pointer to an array containing each dimension's size
/// of the embedded input data.
/// \param [in] istride Stride size of the input data.
/// \param [in] idist Distance between the two batches of the input data.
/// \param [in] onembed Pointer to an array containing each dimension's size
/// of the embedded output data.
/// \param [in] ostride Stride size of the output data.
/// \param [in] odist Distance between the two batches of the output data.
/// \param [in] type The FFT type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [out] scratchpad_size The workspace size required for this FFT.
/// If this value is used to allocate memory, \p direction_and_placement need
/// to be specified explicitly to get correct result.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
void commit(sycl::queue *exec_queue, int dim, long long *n,
long long *inembed, long long istride, long long idist,
long long *onembed, long long ostride, long long odist,
fft_type type, long long batch, size_t *scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
commit(exec_queue, dim, n, inembed, istride, idist,
fft_type_to_data_type(type).first, onembed, ostride, odist,
fft_type_to_data_type(type).second, batch, scratchpad_size,
direction_and_placement);
}
/// Commit the configuration to calculate n-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] dim Dimension number of the data.
/// \param [in] n Pointer to an array containing each dimension's size.
/// \param [in] inembed Pointer to an array containing each dimension's size
/// of the embedded input data.
/// \param [in] istride Stride size of the input data.
/// \param [in] idist Distance between the two batches of the input data.
/// \param [in] onembed Pointer to an array containing each dimension's size
/// of the embedded output data.
/// \param [in] ostride Stride size of the output data.
/// \param [in] odist Distance between the two batches of the output data.
/// \param [in] type The FFT type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [out] scratchpad_size The workspace size required for this FFT.
/// If this value is used to allocate memory, \p direction_and_placement need
/// to be specified explicitly to get correct result.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
void commit(sycl::queue *exec_queue, int dim, int *n, int *inembed,
int istride, int idist, int *onembed, int ostride, int odist,
fft_type type, int batch, size_t *scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
commit(exec_queue, dim, n, inembed, istride, idist,
fft_type_to_data_type(type).first, onembed, ostride, odist,
fft_type_to_data_type(type).second, batch, scratchpad_size,
direction_and_placement);
}
/// Commit the configuration to calculate 1-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] n1 The size of the dimension of the data.
/// \param [in] type The FFT type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [out] scratchpad_size The workspace size required for this FFT.
/// If this value is used to allocate memory, \p direction_and_placement need
/// to be specified explicitly to get correct result.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
void commit(sycl::queue *exec_queue, int n1, fft_type type, int batch,
size_t *scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
_q = exec_queue;
_n.resize(1);
_n[0] = n1;
std::tie(_input_type, _output_type) = fft_type_to_data_type(type);
_dim = 1;
_batch = batch;
_is_basic = true;
if (direction_and_placement.has_value()) {
_is_user_specified_dir_and_placement = true;
_direction = direction_and_placement->first;
_is_inplace = direction_and_placement->second;
}
config_and_commit_basic();
if (scratchpad_size) {
if (_is_estimate_call)
*scratchpad_size = _workspace_estimate_bytes;
else
*scratchpad_size = _workspace_bytes;
}
}
/// Commit the configuration to calculate 2-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] n2 The size of the 2nd dimension (outermost) of the data.
/// \param [in] n1 The size of the 1st dimension (innermost) of the data.
/// \param [in] type The FFT type.
/// \param [out] scratchpad_size The workspace size required for this FFT.
/// If this value is used to allocate memory, \p direction_and_placement need
/// to be specified explicitly to get correct result.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
void commit(sycl::queue *exec_queue, int n2, int n1, fft_type type,
size_t *scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
_q = exec_queue;
_n.resize(2);
_n[0] = n2;
_n[1] = n1;
std::tie(_input_type, _output_type) = fft_type_to_data_type(type);
_dim = 2;
_is_basic = true;
if (direction_and_placement.has_value()) {
_is_user_specified_dir_and_placement = true;
_direction = direction_and_placement->first;
_is_inplace = direction_and_placement->second;
}
config_and_commit_basic();
if (scratchpad_size) {
if (_is_estimate_call)
*scratchpad_size = _workspace_estimate_bytes;
else
*scratchpad_size = _workspace_bytes;
}
}
/// Commit the configuration to calculate 3-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] n3 The size of the 3rd dimension (outermost) of the data.
/// \param [in] n2 The size of the 2nd dimension of the data.
/// \param [in] n1 The size of the 1st dimension (innermost) of the data.
/// \param [in] type The FFT type.
/// \param [out] scratchpad_size The workspace size required for this FFT.
/// If this value is used to allocate memory, \p direction_and_placement need
/// to be specified explicitly to get correct result.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
void commit(sycl::queue *exec_queue, int n3, int n2, int n1, fft_type type,
size_t *scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
_q = exec_queue;
_n.resize(3);
_n[0] = n3;
_n[1] = n2;
_n[2] = n1;
std::tie(_input_type, _output_type) = fft_type_to_data_type(type);
_dim = 3;
_is_basic = true;
if (direction_and_placement.has_value()) {
_is_user_specified_dir_and_placement = true;
_direction = direction_and_placement->first;
_is_inplace = direction_and_placement->second;
}
config_and_commit_basic();
if (scratchpad_size) {
if (_is_estimate_call)
*scratchpad_size = _workspace_estimate_bytes;
else
*scratchpad_size = _workspace_bytes;
}
}
/// Create the class for calculate 1-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] n1 The size of the dimension of the data.
/// \param [in] type The FFT type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
static fft_engine *
create(sycl::queue *exec_queue, int n1, fft_type type, int batch,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = new fft_engine();
engine->commit(exec_queue, n1, type, batch, nullptr,
direction_and_placement);
return engine;
}
/// Create the class for calculate 2-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] n2 The size of the 2nd dimension (outermost) of the data.
/// \param [in] n1 The size of the 1st dimension (innermost) of the data.
/// \param [in] type The FFT type.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
static fft_engine *
create(sycl::queue *exec_queue, int n2, int n1, fft_type type,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = new fft_engine();
engine->commit(exec_queue, n2, n1, type, nullptr, direction_and_placement);
return engine;
}
/// Create the class for calculate 3-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] n3 The size of the 3rd dimension (outermost) of the data.
/// \param [in] n2 The size of the 2nd dimension of the data.
/// \param [in] n1 The size of the 1st dimension (innermost) of the data.
/// \param [in] type The FFT type.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
static fft_engine *
create(sycl::queue *exec_queue, int n3, int n2, int n1, fft_type type,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = new fft_engine();
engine->commit(exec_queue, n3, n2, n1, type, nullptr,
direction_and_placement);
return engine;
}
/// Create the class for calculate n-D FFT.
/// \param [in] exec_queue The queue where the calculation should be executed.
/// \param [in] dim Dimension number of the data.
/// \param [in] n Pointer to an array containing each dimension's size.
/// \param [in] inembed Pointer to an array containing each dimension's size
/// of the embedded input data.
/// \param [in] istride Stride size of the input data.
/// \param [in] idist Distance between the two batches of the input data.
/// \param [in] onembed Pointer to an array containing each dimension's size
/// of the embedded output data.
/// \param [in] ostride Stride size of the output data.
/// \param [in] odist Distance between the two batches of the output data.
/// \param [in] type The FFT type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If this value is specified, the direction parameter
/// will be ignored in the fft_engine::compute function. If it is not set,
/// forward direction(if current FFT is complex-to-complex) and out-of-place
/// (false) are set by default.
static fft_engine *
create(sycl::queue *exec_queue, int dim, int *n, int *inembed, int istride,
int idist, int *onembed, int ostride, int odist, fft_type type,
int batch,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = new fft_engine();
engine->commit(exec_queue, dim, n, inembed, istride, idist, onembed,
ostride, odist, type, batch, nullptr,
direction_and_placement);
return engine;
}
/// Create the class for calculate FFT without commit any config.
static fft_engine *create() {
fft_engine *engine = new fft_engine();
return engine;
}
/// Destroy the class for calculate FFT.
/// \param [in] engine Pointer returned from fft_engine::craete.
static void destroy(fft_engine *engine) { delete engine; }
#ifdef __INTEL_MKL__
/// Estimates the workspace size for calculating n-D FFT.
/// \param [in] dim Dimension number of the data.
/// \param [in] n Pointer to an array containing each dimension's size.
/// \param [in] inembed Pointer to an array containing each dimension's size
/// of the embedded input data.
/// \param [in] istride Stride size of the input data.
/// \param [in] idist Distance between the two batches of the input data.
/// \param [in] onembed Pointer to an array containing each dimension's size
/// of the embedded output data.
/// \param [in] ostride Stride size of the output data.
/// \param [in] odist Distance between the two batches of the output data.
/// \param [in] type The FFT type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [out] estimated_scratchpad_size The estimated workspace size
/// required for this FFT. If this value is used to allocate memory,
/// \p direction_and_placement need to be specified explicitly to get correct
/// result.
/// \param [in] direction_and_placement Explicitly specify the FFT
/// direction and placement info. If it is not set, forward direction(if
/// current FFT is complex-to-complex) and out-of-place (false) are set by default.
static void
estimate_size(int dim, long long *n, long long *inembed, long long istride,
long long idist, long long *onembed, long long ostride,
long long odist, fft_type type, long long batch,
size_t *estimated_scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = fft_engine::create();
engine->_is_estimate_call = true;
engine->commit(&dpct::get_default_queue(), dim, n, inembed, istride, idist,
fft_type_to_data_type(type).first, onembed, ostride, odist,
fft_type_to_data_type(type).second, batch,
estimated_scratchpad_size, direction_and_placement);
fft_engine::destroy(engine);
}
/// Estimates the workspace size for calculating n-D FFT.
/// \param [in] dim Dimension number of the data.
/// \param [in] n Pointer to an array containing each dimension's size.
/// \param [in] inembed Pointer to an array containing each dimension's size
/// of the embedded input data.
/// \param [in] istride Stride size of the input data.
/// \param [in] idist Distance between the two batches of the input data.
/// \param [in] onembed Pointer to an array containing each dimension's size
/// of the embedded output data.
/// \param [in] ostride Stride size of the output data.
/// \param [in] odist Distance between the two batches of the output data.
/// \param [in] type The FFT type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [out] estimated_scratchpad_size The estimated workspace size
/// required for this FFT. If this value is used to allocate memory,
/// \p direction_and_placement need to be specified explicitly to get correct
/// result.
/// \param [in] direction_and_placement Explicitly specify the FFT
/// direction and placement info. If it is not set, forward direction(if
/// current FFT is complex-to-complex) and out-of-place (false) are set by default.
static void
estimate_size(int dim, int *n, int *inembed, int istride, int idist,
int *onembed, int ostride, int odist, fft_type type, int batch,
size_t *estimated_scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = fft_engine::create();
engine->_is_estimate_call = true;
engine->commit(&dpct::get_default_queue(), dim, n, inembed, istride, idist,
fft_type_to_data_type(type).first, onembed, ostride, odist,
fft_type_to_data_type(type).second, batch,
estimated_scratchpad_size, direction_and_placement);
fft_engine::destroy(engine);
}
/// Estimates the workspace size for calculating 1-D FFT.
/// \param [in] n1 The size of the dimension of the data.
/// \param [in] type The FFT type.
/// \param [in] batch The number of FFT operations to perform.
/// \param [out] estimated_scratchpad_size The estimated workspace size
/// required for this FFT. If this value is used to allocate memory,
/// \p direction_and_placement need to be specified explicitly to get correct
/// result.
/// \param [in] direction_and_placement Explicitly specify the FFT direction
/// and placement info. If it is not set, forward direction(if current FFT is
/// complex-to-complex) and out-of-place (false) are set by default.
static void
estimate_size(int n1, fft_type type, int batch,
size_t *estimated_scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = fft_engine::create();
engine->_is_estimate_call = true;
engine->commit(&dpct::get_default_queue(), n1, type, batch,
estimated_scratchpad_size, direction_and_placement);
fft_engine::destroy(engine);
}
/// Estimates the workspace size for calculating 2-D FFT.
/// \param [in] n2 The size of the 2nd dimension (outermost) of the data.
/// \param [in] n1 The size of the 1st dimension (innermost) of the data.
/// \param [in] type The FFT type.
/// \param [out] estimated_scratchpad_size The estimated workspace size
/// required for this FFT. If this value is used to allocate memory,
/// \p direction_and_placement need to be specified explicitly to get correct
/// result.
/// \param [in] direction_and_placement Explicitly specify the FFT
/// direction and placement info. If it is not set, forward direction(if
/// current FFT is complex-to-complex) and out-of-place (false) are set by default.
static void
estimate_size(int n2, int n1, fft_type type,
size_t *estimated_scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = fft_engine::create();
engine->_is_estimate_call = true;
engine->commit(&dpct::get_default_queue(), n2, n1, type,
estimated_scratchpad_size, direction_and_placement);
fft_engine::destroy(engine);
}
/// Estimates the workspace size for calculating 3-D FFT.
/// \param [in] n3 The size of the 3rd dimension (outermost) of the data.
/// \param [in] n2 The size of the 2nd dimension of the data.
/// \param [in] n1 The size of the 1st dimension (innermost) of the data.
/// \param [in] type The FFT type.
/// \param [out] estimated_scratchpad_size The estimated workspace size
/// required for this FFT. If this value is used to allocate memory,
/// \p direction_and_placement need to be specified explicitly to get correct
/// result.
/// \param [in] direction_and_placement Explicitly specify the FFT
/// direction and placement info. If it is not set, forward direction(if
/// current FFT is complex-to-complex) and out-of-place (false) are set by default.
static void
estimate_size(int n3, int n2, int n1, fft_type type,
size_t *estimated_scratchpad_size,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement = std::nullopt) {
fft_engine *engine = fft_engine::create();
engine->_is_estimate_call = true;
engine->commit(&dpct::get_default_queue(), n3, n2, n1, type,
estimated_scratchpad_size, direction_and_placement);
fft_engine::destroy(engine);
}
#endif
/// Execute the FFT calculation.
/// \param [in] input Pointer to the input data.
/// \param [out] output Pointer to the output data.
/// \param [in] direction The FFT direction.
template <typename input_t, typename output_t>
void compute(input_t *input, output_t *output, fft_direction direction) {
if (_input_type == library_data_t::complex_float &&
_output_type == library_data_t::complex_float) {
compute_complex<float, oneapi::mkl::dft::precision::SINGLE>(
(float *)input, (float *)output, direction);
} else if (_input_type == library_data_t::complex_double &&
_output_type == library_data_t::complex_double) {
compute_complex<double, oneapi::mkl::dft::precision::DOUBLE>(
(double *)input, (double *)output, direction);
} else if (_input_type == library_data_t::real_float &&
_output_type == library_data_t::complex_float) {
_direction = direction;
compute_real<float, oneapi::mkl::dft::precision::SINGLE>((float *)input,
(float *)output);
} else if (_input_type == library_data_t::complex_float &&
_output_type == library_data_t::real_float) {
_direction = direction;
compute_real<float, oneapi::mkl::dft::precision::SINGLE>((float *)input,
(float *)output);
} else if (_input_type == library_data_t::real_double &&
_output_type == library_data_t::complex_double) {
_direction = direction;
compute_real<double, oneapi::mkl::dft::precision::DOUBLE>(
(double *)input, (double *)output);
} else if (_input_type == library_data_t::complex_double &&
_output_type == library_data_t::real_double) {
_direction = direction;
compute_real<double, oneapi::mkl::dft::precision::DOUBLE>(
(double *)input, (double *)output);
}
}
template <>
void compute(float *input, sycl::float2 *output, fft_direction direction) {
_direction = direction;
compute_real<float, oneapi::mkl::dft::precision::SINGLE>((float *)input,
(float *)output);
}
template <>
void compute(sycl::float2 *input, float *output, fft_direction direction) {
_direction = direction;
compute_real<float, oneapi::mkl::dft::precision::SINGLE>((float *)input,
(float *)output);
}
template <>
void compute(double *input, sycl::double2 *output, fft_direction direction) {
_direction = direction;
compute_real<double, oneapi::mkl::dft::precision::DOUBLE>((double *)input,
(double *)output);
}
template <>
void compute(sycl::double2 *input, double *output, fft_direction direction) {
_direction = direction;
compute_real<double, oneapi::mkl::dft::precision::DOUBLE>((double *)input,
(double *)output);
}
template <>
void compute(sycl::float2 *input, sycl::float2 *output,
fft_direction direction) {
compute_complex<float, oneapi::mkl::dft::precision::SINGLE>(
(float *)input, (float *)output, direction);
}
template <>
void compute(sycl::double2 *input, sycl::double2 *output,
fft_direction direction) {
compute_complex<double, oneapi::mkl::dft::precision::DOUBLE>(
(double *)input, (double *)output, direction);
}
/// Setting the user's SYCL queue for calculation.
/// \param [in] q Pointer to the SYCL queue.
void set_queue(sycl::queue *q) { _q = q; }
#ifdef __INTEL_MKL__
/// Setting whether to use external or internal workspace.
/// \param [in] flag True means using internal workspace. False means using
/// external workspace.
void use_internal_workspace(bool flag = true) {
_use_external_workspace = !flag;
}
/// Specify the external workspace.
/// \param [in] ptr Pointer to the workspace.
void set_workspace(void *ptr) {
if (!_use_external_workspace) {
return;
}
if (_input_type == library_data_t::complex_float &&
_output_type == library_data_t::complex_float) {
if (_q->get_device().is_gpu()) {
auto data = dpct::detail::get_memory(reinterpret_cast<float *>(ptr));
_desc_sc->set_workspace(data);
}
} else if (_input_type == library_data_t::complex_double &&
_output_type == library_data_t::complex_double) {
if (_q->get_device().is_gpu()) {
auto data = dpct::detail::get_memory(reinterpret_cast<double *>(ptr));
_desc_dc->set_workspace(data);
}
} else if ((_input_type == library_data_t::real_float &&
_output_type == library_data_t::complex_float) ||
(_input_type == library_data_t::complex_float &&
_output_type == library_data_t::real_float)) {
if (_q->get_device().is_gpu()) {
auto data = dpct::detail::get_memory(reinterpret_cast<float *>(ptr));
_desc_sr->set_workspace(data);
}
} else if ((_input_type == library_data_t::real_double &&
_output_type == library_data_t::complex_double) ||
(_input_type == library_data_t::complex_double &&
_output_type == library_data_t::real_double)) {
if (_q->get_device().is_gpu()) {
auto data = dpct::detail::get_memory(reinterpret_cast<double *>(ptr));
_desc_dr->set_workspace(data);
}
} else {
throw sycl::exception(sycl::make_error_code(sycl::errc::invalid),
"invalid fft type");
}
}
#endif
/// Get the workspace size.
/// \param [out] scratchpad_size Workspace size in bytes.
void get_workspace_size(size_t *scratchpad_size) {
if (scratchpad_size) {
*scratchpad_size = _workspace_bytes;
}
}
private:
static std::pair<library_data_t, library_data_t>
fft_type_to_data_type(fft_type type) {
switch (type) {
case fft_type::real_float_to_complex_float: {
return std::make_pair(library_data_t::real_float,
library_data_t::complex_float);
}
case fft_type::complex_float_to_real_float: {
return std::make_pair(library_data_t::complex_float,
library_data_t::real_float);
}
case fft_type::real_double_to_complex_double: {
return std::make_pair(library_data_t::real_double,
library_data_t::complex_double);
}
case fft_type::complex_double_to_real_double: {
return std::make_pair(library_data_t::complex_double,
library_data_t::real_double);
}
case fft_type::complex_float_to_complex_float: {
return std::make_pair(library_data_t::complex_float,
library_data_t::complex_float);
}
case fft_type::complex_double_to_complex_double: {
return std::make_pair(library_data_t::complex_double,
library_data_t::complex_double);
}
}
}
void config_and_commit_basic() {
if (_input_type == library_data_t::complex_float &&
_output_type == library_data_t::complex_float) {
_desc_sc = std::make_shared<
oneapi::mkl::dft::descriptor<oneapi::mkl::dft::precision::SINGLE,
oneapi::mkl::dft::domain::COMPLEX>>(_n);
std::int64_t distance = 1;
for (auto i : _n)
distance = distance * i;
_fwd_dist = distance;
_bwd_dist = distance;
_desc_sc->set_value(oneapi::mkl::dft::config_param::FWD_DISTANCE,
distance);
_desc_sc->set_value(oneapi::mkl::dft::config_param::BWD_DISTANCE,
distance);
_desc_sc->set_value(oneapi::mkl::dft::config_param::NUMBER_OF_TRANSFORMS,
_batch);
#ifdef __INTEL_MKL__
if (_is_user_specified_dir_and_placement && _is_inplace)
_desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_INPLACE);
else
_desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE);
if (_use_external_workspace) {
if (_q->get_device().is_gpu()) {
_desc_sc->set_value(
oneapi::mkl::dft::config_param::WORKSPACE,
oneapi::mkl::dft::config_value::WORKSPACE_EXTERNAL);
}
}
if (_is_estimate_call) {
if (_q->get_device().is_gpu()) {
_desc_sc->get_value(
oneapi::mkl::dft::config_param::WORKSPACE_ESTIMATE_BYTES,
&_workspace_estimate_bytes);
}
} else {
_desc_sc->commit(*_q);
if (_q->get_device().is_gpu()) {
_desc_sc->get_value(oneapi::mkl::dft::config_param::WORKSPACE_BYTES,
&_workspace_bytes);
}
}
#else
if (_is_user_specified_dir_and_placement && _is_inplace)
_desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::INPLACE);
else
_desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::NOT_INPLACE);
_desc_sc->commit(*_q);
#endif
} else if (_input_type == library_data_t::complex_double &&
_output_type == library_data_t::complex_double) {
_desc_dc = std::make_shared<
oneapi::mkl::dft::descriptor<oneapi::mkl::dft::precision::DOUBLE,
oneapi::mkl::dft::domain::COMPLEX>>(_n);
std::int64_t distance = 1;
for (auto i : _n)
distance = distance * i;
_fwd_dist = distance;
_bwd_dist = distance;
_desc_dc->set_value(oneapi::mkl::dft::config_param::FWD_DISTANCE,
distance);
_desc_dc->set_value(oneapi::mkl::dft::config_param::BWD_DISTANCE,
distance);
_desc_dc->set_value(oneapi::mkl::dft::config_param::NUMBER_OF_TRANSFORMS,
_batch);
#ifdef __INTEL_MKL__
if (_is_user_specified_dir_and_placement && _is_inplace)
_desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_INPLACE);
else
_desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE);
if (_use_external_workspace) {
if (_q->get_device().is_gpu()) {
_desc_dc->set_value(
oneapi::mkl::dft::config_param::WORKSPACE,
oneapi::mkl::dft::config_value::WORKSPACE_EXTERNAL);
}
}
if (_is_estimate_call) {
if (_q->get_device().is_gpu()) {
_desc_dc->get_value(
oneapi::mkl::dft::config_param::WORKSPACE_ESTIMATE_BYTES,
&_workspace_estimate_bytes);
}
} else {
_desc_dc->commit(*_q);
if (_q->get_device().is_gpu()) {
_desc_dc->get_value(oneapi::mkl::dft::config_param::WORKSPACE_BYTES,
&_workspace_bytes);
}
}
#else
if (_is_user_specified_dir_and_placement && _is_inplace)
_desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::INPLACE);
else
_desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::NOT_INPLACE);
_desc_dc->commit(*_q);
#endif
} else if ((_input_type == library_data_t::real_float &&
_output_type == library_data_t::complex_float) ||
(_input_type == library_data_t::complex_float &&
_output_type == library_data_t::real_float)) {
_desc_sr = std::make_shared<oneapi::mkl::dft::descriptor<
oneapi::mkl::dft::precision::SINGLE, oneapi::mkl::dft::domain::REAL>>(
_n);
if (_input_type == library_data_t::real_float &&
_output_type == library_data_t::complex_float)
_direction = fft_direction::forward;
else
_direction = fft_direction::backward;
_desc_sr->set_value(oneapi::mkl::dft::config_param::NUMBER_OF_TRANSFORMS,
_batch);
#ifdef __INTEL_MKL__
if (_is_user_specified_dir_and_placement && _is_inplace) {
_desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_INPLACE);
set_stride_and_distance_basic<true>(_desc_sr);
} else {
_desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE);
set_stride_and_distance_basic<false>(_desc_sr);
}
if (_use_external_workspace) {
if (_q->get_device().is_gpu()) {
_desc_sr->set_value(
oneapi::mkl::dft::config_param::WORKSPACE,
oneapi::mkl::dft::config_value::WORKSPACE_EXTERNAL);
}
}
if (_is_estimate_call) {
if (_q->get_device().is_gpu()) {
_desc_sr->get_value(
oneapi::mkl::dft::config_param::WORKSPACE_ESTIMATE_BYTES,
&_workspace_estimate_bytes);
}
} else {
_desc_sr->commit(*_q);
if (_q->get_device().is_gpu()) {
_desc_sr->get_value(oneapi::mkl::dft::config_param::WORKSPACE_BYTES,
&_workspace_bytes);
}
}
#else
if (_is_user_specified_dir_and_placement && _is_inplace) {
_desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::INPLACE);
set_stride_and_distance_basic<true>(_desc_sr);
} else {
_desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::NOT_INPLACE);
set_stride_and_distance_basic<false>(_desc_sr);
}
_desc_sr->commit(*_q);
#endif
} else if ((_input_type == library_data_t::real_double &&
_output_type == library_data_t::complex_double) ||
(_input_type == library_data_t::complex_double &&
_output_type == library_data_t::real_double)) {
_desc_dr = std::make_shared<oneapi::mkl::dft::descriptor<
oneapi::mkl::dft::precision::DOUBLE, oneapi::mkl::dft::domain::REAL>>(
_n);
if (_input_type == library_data_t::real_double &&
_output_type == library_data_t::complex_double)
_direction = fft_direction::forward;
else
_direction = fft_direction::backward;
_desc_dr->set_value(oneapi::mkl::dft::config_param::NUMBER_OF_TRANSFORMS,
_batch);
#ifdef __INTEL_MKL__
if (_is_user_specified_dir_and_placement && _is_inplace) {
_desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_INPLACE);
set_stride_and_distance_basic<true>(_desc_dr);
} else {
_desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE);
set_stride_and_distance_basic<false>(_desc_dr);
}
if (_use_external_workspace) {
if (_q->get_device().is_gpu()) {
_desc_dr->set_value(
oneapi::mkl::dft::config_param::WORKSPACE,
oneapi::mkl::dft::config_value::WORKSPACE_EXTERNAL);
}
}
if (_is_estimate_call) {
if (_q->get_device().is_gpu()) {
_desc_dr->get_value(
oneapi::mkl::dft::config_param::WORKSPACE_ESTIMATE_BYTES,
&_workspace_estimate_bytes);
}
} else {
_desc_dr->commit(*_q);
if (_q->get_device().is_gpu()) {
_desc_dr->get_value(oneapi::mkl::dft::config_param::WORKSPACE_BYTES,
&_workspace_bytes);
}
}
#else
if (_is_user_specified_dir_and_placement && _is_inplace) {
_desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::INPLACE);
set_stride_and_distance_basic<true>(_desc_dr);
} else {
_desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::NOT_INPLACE);
set_stride_and_distance_basic<false>(_desc_dr);
}
_desc_dr->commit(*_q);
#endif
} else {
throw sycl::exception(sycl::make_error_code(sycl::errc::invalid),
"invalid fft type");
}
}
void config_and_commit_advanced() {
#ifdef __INTEL_MKL__
#define CONFIG_AND_COMMIT(DESC, PREC, DOM, TYPE) \
{ \
DESC = std::make_shared<oneapi::mkl::dft::descriptor< \
oneapi::mkl::dft::precision::PREC, oneapi::mkl::dft::domain::DOM>>( \
_n); \
set_stride_advanced(DESC); \
DESC->set_value(oneapi::mkl::dft::config_param::FWD_DISTANCE, _fwd_dist); \
DESC->set_value(oneapi::mkl::dft::config_param::BWD_DISTANCE, _bwd_dist); \
DESC->set_value(oneapi::mkl::dft::config_param::NUMBER_OF_TRANSFORMS, \
_batch); \
if (_is_user_specified_dir_and_placement && _is_inplace) \
DESC->set_value(oneapi::mkl::dft::config_param::PLACEMENT, \
DFTI_CONFIG_VALUE::DFTI_INPLACE); \
else \
DESC->set_value(oneapi::mkl::dft::config_param::PLACEMENT, \
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE); \
if (_use_external_workspace) { \
DESC->set_value(oneapi::mkl::dft::config_param::WORKSPACE, \
oneapi::mkl::dft::config_value::WORKSPACE_EXTERNAL); \
} \
if (_is_estimate_call) { \
if (_q->get_device().is_gpu()) { \
DESC->get_value( \
oneapi::mkl::dft::config_param::WORKSPACE_ESTIMATE_BYTES, \
&_workspace_estimate_bytes); \
} \
} else { \
DESC->commit(*_q); \
if (_is_estimate_call) { \
DESC->get_value(oneapi::mkl::dft::config_param::WORKSPACE_BYTES, \
&_workspace_bytes); \
} \
} \
}
#else
#define CONFIG_AND_COMMIT(DESC, PREC, DOM, TYPE) \
{ \
DESC = std::make_shared<oneapi::mkl::dft::descriptor< \
oneapi::mkl::dft::precision::PREC, oneapi::mkl::dft::domain::DOM>>( \
_n); \
set_stride_advanced(DESC); \
DESC->set_value(oneapi::mkl::dft::config_param::FWD_DISTANCE, _fwd_dist); \
DESC->set_value(oneapi::mkl::dft::config_param::BWD_DISTANCE, _bwd_dist); \
DESC->set_value(oneapi::mkl::dft::config_param::NUMBER_OF_TRANSFORMS, \
_batch); \
if (_is_user_specified_dir_and_placement && _is_inplace) \
DESC->set_value(oneapi::mkl::dft::config_param::PLACEMENT, \
oneapi::mkl::dft::config_value::INPLACE); \
else \
DESC->set_value(oneapi::mkl::dft::config_param::PLACEMENT, \
oneapi::mkl::dft::config_value::NOT_INPLACE); \
DESC->commit(*_q); \
}
#endif
if (_input_type == library_data_t::complex_float &&
_output_type == library_data_t::complex_float) {
CONFIG_AND_COMMIT(_desc_sc, SINGLE, COMPLEX, float);
} else if (_input_type == library_data_t::complex_double &&
_output_type == library_data_t::complex_double) {
CONFIG_AND_COMMIT(_desc_dc, DOUBLE, COMPLEX, double);
} else if ((_input_type == library_data_t::real_float &&
_output_type == library_data_t::complex_float) ||
(_input_type == library_data_t::complex_float &&
_output_type == library_data_t::real_float)) {
CONFIG_AND_COMMIT(_desc_sr, SINGLE, REAL, float);
} else if ((_input_type == library_data_t::real_double &&
_output_type == library_data_t::complex_double) ||
(_input_type == library_data_t::complex_double &&
_output_type == library_data_t::real_double)) {
CONFIG_AND_COMMIT(_desc_dr, DOUBLE, REAL, double);
} else {
throw sycl::exception(sycl::make_error_code(sycl::errc::invalid),
"invalid fft type");
}
#undef CONFIG_AND_COMMIT
}
template <typename T>
void init(int dim, T *n, T *inembed, T istride, T idist,
library_data_t input_type, T *onembed, T ostride, T odist,
library_data_t output_type, T batch,
std::optional<std::pair<fft_direction, bool /*is_inplace*/>>
direction_and_placement) {
if (direction_and_placement.has_value()) {
_is_user_specified_dir_and_placement = true;
_direction = direction_and_placement->first;
_is_inplace = direction_and_placement->second;
}
_n.resize(dim);
_inembed.resize(dim);
_onembed.resize(dim);
_input_type = input_type;
_output_type = output_type;
for (int i = 0; i < dim; i++) {
_n[i] = n[i];
}
if (inembed && onembed) {
for (int i = 0; i < dim; i++) {
_inembed[i] = inembed[i];
_onembed[i] = onembed[i];
}
_istride = istride;
_ostride = ostride;
if ((_input_type == library_data_t::real_float &&
_output_type == library_data_t::complex_float) ||
(_input_type == library_data_t::real_double &&
_output_type == library_data_t::complex_double)) {
_fwd_dist = idist;
_bwd_dist = odist;
} else if ((_output_type == library_data_t::real_float &&
_input_type == library_data_t::complex_float) ||
(_output_type == library_data_t::real_double &&
_input_type == library_data_t::complex_double)) {
_fwd_dist = odist;
_bwd_dist = idist;
} else {
if (_is_user_specified_dir_and_placement &&
(_direction == fft_direction::backward)) {
_fwd_dist = odist;
_bwd_dist = idist;
} else {
_fwd_dist = idist;
_bwd_dist = odist;
}
}
} else {
_is_basic = true;
}
_batch = batch;
_dim = dim;
if (_is_basic)
config_and_commit_basic();
else
config_and_commit_advanced();
}
template <class Desc_t>
void set_stride_advanced(std::shared_ptr<Desc_t> desc) {
if (_dim == 1) {
std::int64_t input_stride[2] = {0, _istride};
std::int64_t output_stride[2] = {0, _ostride};
desc->set_value(oneapi::mkl::dft::config_param::INPUT_STRIDES,
input_stride);
desc->set_value(oneapi::mkl::dft::config_param::OUTPUT_STRIDES,
output_stride);
} else if (_dim == 2) {
std::int64_t input_stride[3] = {0, _inembed[1] * _istride, _istride};
std::int64_t output_stride[3] = {0, _onembed[1] * _ostride, _ostride};
desc->set_value(oneapi::mkl::dft::config_param::INPUT_STRIDES,
input_stride);
desc->set_value(oneapi::mkl::dft::config_param::OUTPUT_STRIDES,
output_stride);
} else if (_dim == 3) {
std::int64_t input_stride[4] = {0, _inembed[2] * _inembed[1] * _istride,
_inembed[2] * _istride, _istride};
std::int64_t output_stride[4] = {0, _onembed[2] * _onembed[1] * _ostride,
_onembed[2] * _ostride, _ostride};
desc->set_value(oneapi::mkl::dft::config_param::INPUT_STRIDES,
input_stride);
desc->set_value(oneapi::mkl::dft::config_param::OUTPUT_STRIDES,
output_stride);
}
}
template <class Desc_t> void swap_distance(std::shared_ptr<Desc_t> desc) {
desc->set_value(oneapi::mkl::dft::config_param::FWD_DISTANCE, _bwd_dist);
desc->set_value(oneapi::mkl::dft::config_param::BWD_DISTANCE, _fwd_dist);
std::int64_t temp = _bwd_dist;
_bwd_dist = _fwd_dist;
_fwd_dist = temp;
}
template <bool Is_inplace, class Desc_t>
void set_stride_and_distance_basic(std::shared_ptr<Desc_t> desc) {
std::int64_t forward_distance = 0;
std::int64_t backward_distance = 0;
#define SET_STRIDE \
{ \
if (_direction == fft_direction::forward) { \
desc->set_value(oneapi::mkl::dft::config_param::INPUT_STRIDES, \
real_stride); \
desc->set_value(oneapi::mkl::dft::config_param::OUTPUT_STRIDES, \
complex_stride); \
} else { \
desc->set_value(oneapi::mkl::dft::config_param::INPUT_STRIDES, \
complex_stride); \
desc->set_value(oneapi::mkl::dft::config_param::OUTPUT_STRIDES, \
real_stride); \
} \
}
if (_dim == 1) {
if constexpr (Is_inplace) {
std::int64_t real_stride[2] = {0, 1};
std::int64_t complex_stride[2] = {0, 1};
SET_STRIDE;
forward_distance = 2 * (_n[0] / 2 + 1);
backward_distance = _n[0] / 2 + 1;
} else {
std::int64_t real_stride[2] = {0, 1};
std::int64_t complex_stride[2] = {0, 1};
SET_STRIDE;
forward_distance = _n[0];
backward_distance = _n[0] / 2 + 1;
}
} else if (_dim == 2) {
if constexpr (Is_inplace) {
std::int64_t complex_stride[3] = {0, _n[1] / 2 + 1, 1};
std::int64_t real_stride[3] = {0, 2 * (_n[1] / 2 + 1), 1};
SET_STRIDE;
forward_distance = _n[0] * 2 * (_n[1] / 2 + 1);
backward_distance = _n[0] * (_n[1] / 2 + 1);
} else {
std::int64_t complex_stride[3] = {0, _n[1] / 2 + 1, 1};
std::int64_t real_stride[3] = {0, _n[1], 1};
SET_STRIDE;
forward_distance = _n[0] * _n[1];
backward_distance = _n[0] * (_n[1] / 2 + 1);
}
} else if (_dim == 3) {
if constexpr (Is_inplace) {
std::int64_t complex_stride[4] = {0, _n[1] * (_n[2] / 2 + 1),
_n[2] / 2 + 1, 1};
std::int64_t real_stride[4] = {0, _n[1] * 2 * (_n[2] / 2 + 1),
2 * (_n[2] / 2 + 1), 1};
SET_STRIDE;
forward_distance = _n[0] * _n[1] * 2 * (_n[2] / 2 + 1);
backward_distance = _n[0] * _n[1] * (_n[2] / 2 + 1);
} else {
std::int64_t complex_stride[4] = {0, _n[1] * (_n[2] / 2 + 1),
_n[2] / 2 + 1, 1};
std::int64_t real_stride[4] = {0, _n[1] * _n[2], _n[2], 1};
SET_STRIDE;
forward_distance = _n[0] * _n[1] * _n[2];
backward_distance = _n[0] * _n[1] * (_n[2] / 2 + 1);
}
}
#undef SET_STRIDE
desc->set_value(oneapi::mkl::dft::config_param::FWD_DISTANCE,
forward_distance);
desc->set_value(oneapi::mkl::dft::config_param::BWD_DISTANCE,
backward_distance);
}
#define COMPUTE(DESC) \
{ \
if (_is_inplace) { \
auto data_input = \
dpct::detail::get_memory(reinterpret_cast<T *>(input)); \
if (_direction == fft_direction::forward) { \
oneapi::mkl::dft::compute_forward(*DESC, data_input); \
} else { \
oneapi::mkl::dft::compute_backward(*DESC, data_input); \
} \
} else { \
auto data_input = \
dpct::detail::get_memory(reinterpret_cast<T *>(input)); \
auto data_output = \
dpct::detail::get_memory(reinterpret_cast<T *>(output)); \
if (_direction == fft_direction::forward) { \
oneapi::mkl::dft::compute_forward(*DESC, data_input, data_output); \
} else { \
oneapi::mkl::dft::compute_backward(*DESC, data_input, data_output); \
} \
} \
}
template <class T, oneapi::mkl::dft::precision Precision>
void compute_complex(T *input, T *output, fft_direction direction) {
bool is_this_compute_inplace = input == output;
if (!_is_user_specified_dir_and_placement) {
// The complex domain descriptor need different config values if the
// FFT direction or placement is different.
// Here we check the conditions, and new config values are set and
// re-committed if needed.
if (direction != _direction || is_this_compute_inplace != _is_inplace) {
if constexpr (Precision == oneapi::mkl::dft::precision::SINGLE) {
if (direction != _direction) {
swap_distance(_desc_sc);
_direction = direction;
}
if (is_this_compute_inplace != _is_inplace) {
_is_inplace = is_this_compute_inplace;
#ifdef __INTEL_MKL__
if (_is_inplace) {
_desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_INPLACE);
} else {
_desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE);
}
#else
if (_is_inplace) {
_desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::INPLACE);
} else {
_desc_sc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::NOT_INPLACE);
}
#endif
}
_desc_sc->commit(*_q);
} else {
if (direction != _direction) {
swap_distance(_desc_dc);
_direction = direction;
}
if (is_this_compute_inplace != _is_inplace) {
_is_inplace = is_this_compute_inplace;
#ifdef __INTEL_MKL__
if (_is_inplace) {
_desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_INPLACE);
} else {
_desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE);
}
#else
if (_is_inplace) {
_desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::INPLACE);
} else {
_desc_dc->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::NOT_INPLACE);
}
#endif
}
_desc_dc->commit(*_q);
}
}
}
if constexpr (Precision == oneapi::mkl::dft::precision::SINGLE) {
COMPUTE(_desc_sc);
} else {
COMPUTE(_desc_dc);
}
}
template <class T, oneapi::mkl::dft::precision Precision>
void compute_real(T *input, T *output) {
bool is_this_compute_inplace = input == output;
if (!_is_user_specified_dir_and_placement) {
// The real domain descriptor need different config values if the
// FFT placement is different.
// Here we check the condition, and new config values are set and
// re-committed if needed.
if (is_this_compute_inplace != _is_inplace) {
if constexpr (Precision == oneapi::mkl::dft::precision::SINGLE) {
_is_inplace = is_this_compute_inplace;
if (_is_inplace) {
#ifdef __INTEL_MKL__
_desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_INPLACE);
#else
_desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::INPLACE);
#endif
if (_is_basic)
set_stride_and_distance_basic<true>(_desc_sr);
} else {
#ifdef __INTEL_MKL__
_desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE);
#else
_desc_sr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::NOT_INPLACE);
#endif
if (_is_basic)
set_stride_and_distance_basic<false>(_desc_sr);
}
_desc_sr->commit(*_q);
} else {
_is_inplace = is_this_compute_inplace;
if (_is_inplace) {
#ifdef __INTEL_MKL__
_desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_INPLACE);
#else
_desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::INPLACE);
#endif
if (_is_basic)
set_stride_and_distance_basic<true>(_desc_dr);
} else {
#ifdef __INTEL_MKL__
_desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
DFTI_CONFIG_VALUE::DFTI_NOT_INPLACE);
#else
_desc_dr->set_value(oneapi::mkl::dft::config_param::PLACEMENT,
oneapi::mkl::dft::config_value::NOT_INPLACE);
#endif
if (_is_basic)
set_stride_and_distance_basic<false>(_desc_dr);
}
_desc_dr->commit(*_q);
}
}
}
if constexpr (Precision == oneapi::mkl::dft::precision::SINGLE) {
COMPUTE(_desc_sr);
} else {
COMPUTE(_desc_dr);
}
}
#undef COMPUTE
private:
sycl::queue *_q = nullptr;
int _dim;
std::vector<std::int64_t> _n;
std::vector<std::int64_t> _inembed;
std::int64_t _istride;
std::int64_t _fwd_dist;
library_data_t _input_type;
std::vector<std::int64_t> _onembed;
std::int64_t _ostride;
std::int64_t _bwd_dist;
library_data_t _output_type;
std::int64_t _batch = 1;
bool _is_basic = false;
bool _is_inplace = false;
fft_direction _direction = fft_direction::forward;
bool _is_user_specified_dir_and_placement = false;
bool _use_external_workspace = false;
void *_external_workspace_ptr = nullptr;
size_t _workspace_bytes = 0;
bool _is_estimate_call = false;
size_t _workspace_estimate_bytes = 0;
std::shared_ptr<oneapi::mkl::dft::descriptor<
oneapi::mkl::dft::precision::SINGLE, oneapi::mkl::dft::domain::REAL>>
_desc_sr;
std::shared_ptr<oneapi::mkl::dft::descriptor<
oneapi::mkl::dft::precision::DOUBLE, oneapi::mkl::dft::domain::REAL>>
_desc_dr;
std::shared_ptr<oneapi::mkl::dft::descriptor<
oneapi::mkl::dft::precision::SINGLE, oneapi::mkl::dft::domain::COMPLEX>>
_desc_sc;
std::shared_ptr<oneapi::mkl::dft::descriptor<
oneapi::mkl::dft::precision::DOUBLE, oneapi::mkl::dft::domain::COMPLEX>>
_desc_dc;
};
using fft_engine_ptr = fft_engine *;
} // namespace fft
} // namespace dpct
#endif // __DPCT_FFT_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/include/dpct/lib_common_utils.hpp | //==---- lib_common_utils.hpp ---------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_LIB_COMMON_UTILS_HPP__
#define __DPCT_LIB_COMMON_UTILS_HPP__
#include <sycl/sycl.hpp>
#include <oneapi/mkl.hpp>
#include "memory.hpp"
#include "util.hpp"
namespace dpct {
namespace detail {
template <typename T> inline auto get_memory(T *x) {
#ifdef DPCT_USM_LEVEL_NONE
return dpct::get_buffer<std::remove_cv_t<T>>(x);
#else
return x;
#endif
}
template <typename T>
inline typename DataType<T>::T2 get_value(const T *s, sycl::queue &q) {
using Ty = typename DataType<T>::T2;
Ty s_h;
detail::dpct_memcpy(q, (void *)&s_h, (void *)s, sizeof(T), automatic).wait();
return s_h;
}
}
enum class version_field : int {
major,
minor,
update,
patch
};
/// Returns the requested field of Intel(R) oneAPI Math Kernel Library version.
/// \param field The version information field (major, minor, update or patch).
/// \param result The result value.
inline void mkl_get_version(version_field field, int *result) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
MKLVersion version;
mkl_get_version(&version);
if (version_field::major == field) {
*result = version.MajorVersion;
} else if (version_field::minor == field) {
*result = version.MinorVersion;
} else if (version_field::update == field) {
*result = version.UpdateVersion;
} else if (version_field::patch == field) {
*result = 0;
} else {
throw std::runtime_error("unknown field");
}
#endif
}
enum class library_data_t : unsigned char {
real_float = 0,
complex_float,
real_double,
complex_double,
real_half,
complex_half,
real_bfloat16,
complex_bfloat16,
real_int4,
complex_int4,
real_uint4,
complex_uint4,
real_int8,
complex_int8,
real_uint8,
complex_uint8,
real_int16,
complex_int16,
real_uint16,
complex_uint16,
real_int32,
complex_int32,
real_uint32,
complex_uint32,
real_int64,
complex_int64,
real_uint64,
complex_uint64,
real_int8_4,
real_int8_32,
real_uint8_4,
library_data_t_size
};
namespace detail {
template <typename ArgT>
inline constexpr std::uint64_t get_type_combination_id(ArgT Val) {
static_assert((unsigned char)library_data_t::library_data_t_size <=
std::numeric_limits<unsigned char>::max() &&
"library_data_t size exceeds limit.");
static_assert(std::is_same_v<ArgT, library_data_t>, "Unsupported ArgT");
return (std::uint64_t)Val;
}
template <typename FirstT, typename... RestT>
inline constexpr std::uint64_t get_type_combination_id(FirstT FirstVal,
RestT... RestVal) {
static_assert((std::uint8_t)library_data_t::library_data_t_size <=
std::numeric_limits<unsigned char>::max() &&
"library_data_t size exceeds limit.");
static_assert(sizeof...(RestT) <= 8 && "Too many parameters");
static_assert(std::is_same_v<FirstT, library_data_t>, "Unsupported FirstT");
return get_type_combination_id(RestVal...) << 8 | ((std::uint64_t)FirstVal);
}
inline constexpr std::size_t library_data_size[] = {
8 * sizeof(float), // real_float
8 * sizeof(std::complex<float>), // complex_float
8 * sizeof(double), // real_double
8 * sizeof(std::complex<double>), // complex_double
8 * sizeof(sycl::half), // real_half
8 * sizeof(std::complex<sycl::half>), // complex_half
16, // real_bfloat16
16 * 2, // complex_bfloat16
4, // real_int4
4 * 2, // complex_int4
4, // real_uint4
4 * 2, // complex_uint4
8, // real_int8
8 * 2, // complex_int8
8, // real_uint8
8 * 2, // complex_uint8
16, // real_int16
16 * 2, // complex_int16
16, // real_uint16
16 * 2, // complex_uint16
32, // real_int32
32 * 2, // complex_int32
32, // real_uint32
32 * 2, // complex_uint32
64, // real_int64
64 * 2, // complex_int64
64, // real_uint64
64 * 2, // complex_uint64
8, // real_int8_4
8, // real_int8_32
8 // real_uint8_4
};
} // namespace detail
} // namespace dpct
#endif // __DPCT_LIB_COMMON_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/include/dpct/sparse_utils.hpp | //==---- sparse_utils.hpp -------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_SPARSE_UTILS_HPP__
#define __DPCT_SPARSE_UTILS_HPP__
#include "lib_common_utils.hpp"
#include <oneapi/mkl.hpp>
#include <sycl/sycl.hpp>
namespace dpct {
namespace sparse {
/// Describes properties of a sparse matrix.
/// The properties are matrix type, diag, uplo and index base.
class matrix_info {
public:
/// Matrix types are:
/// ge: General matrix
/// sy: Symmetric matrix
/// he: Hermitian matrix
/// tr: Triangular matrix
enum class matrix_type : int { ge = 0, sy, he, tr };
auto get_matrix_type() const { return _matrix_type; }
auto get_diag() const { return _diag; }
auto get_uplo() const { return _uplo; }
auto get_index_base() const { return _index_base; }
void set_matrix_type(matrix_type mt) { _matrix_type = mt; }
void set_diag(oneapi::mkl::diag d) { _diag = d; }
void set_uplo(oneapi::mkl::uplo u) { _uplo = u; }
void set_index_base(oneapi::mkl::index_base ib) { _index_base = ib; }
private:
matrix_type _matrix_type = matrix_type::ge;
oneapi::mkl::diag _diag = oneapi::mkl::diag::nonunit;
oneapi::mkl::uplo _uplo = oneapi::mkl::uplo::upper;
oneapi::mkl::index_base _index_base = oneapi::mkl::index_base::zero;
};
/// Computes a CSR format sparse matrix-dense vector product.
/// y = alpha * op(A) * x + beta * y
/// \param [in] queue The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode.
/// \param [in] trans The operation applied to the matrix A.
/// \param [in] num_rows Number of rows of the matrix A.
/// \param [in] num_cols Number of columns of the matrix A.
/// \param [in] alpha Scaling factor for the matrix A.
/// \param [in] info Matrix info of the matrix A.
/// \param [in] val An array containing the non-zero elements of the matrix A.
/// \param [in] row_ptr An array of length \p num_rows + 1.
/// \param [in] col_ind An array containing the column indices in index-based
/// numbering.
/// \param [in] x Data of the vector x.
/// \param [in] beta Scaling factor for the vector x.
/// \param [in, out] y Data of the vector y.
template <typename T>
void csrmv(sycl::queue &queue, oneapi::mkl::transpose trans, int num_rows,
int num_cols, const T *alpha,
const std::shared_ptr<matrix_info> info, const T *val,
const int *row_ptr, const int *col_ind, const T *x, const T *beta,
T *y) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using Ty = typename dpct::DataType<T>::T2;
auto alpha_value =
dpct::detail::get_value(reinterpret_cast<const Ty *>(alpha), queue);
auto beta_value =
dpct::detail::get_value(reinterpret_cast<const Ty *>(beta), queue);
oneapi::mkl::sparse::matrix_handle_t *sparse_matrix_handle =
new oneapi::mkl::sparse::matrix_handle_t;
oneapi::mkl::sparse::init_matrix_handle(sparse_matrix_handle);
auto data_row_ptr = dpct::detail::get_memory(const_cast<int *>(row_ptr));
auto data_col_ind = dpct::detail::get_memory(const_cast<int *>(col_ind));
auto data_val =
dpct::detail::get_memory(reinterpret_cast<Ty *>(const_cast<T *>(val)));
oneapi::mkl::sparse::set_csr_data(queue, *sparse_matrix_handle, num_rows,
num_cols, info->get_index_base(),
data_row_ptr, data_col_ind, data_val);
auto data_x =
dpct::detail::get_memory(reinterpret_cast<Ty *>(const_cast<T *>(x)));
auto data_y = dpct::detail::get_memory(reinterpret_cast<Ty *>(y));
switch (info->get_matrix_type()) {
case matrix_info::matrix_type::ge: {
oneapi::mkl::sparse::optimize_gemv(queue, trans, *sparse_matrix_handle);
oneapi::mkl::sparse::gemv(queue, trans, alpha_value, *sparse_matrix_handle,
data_x, beta_value, data_y);
break;
}
case matrix_info::matrix_type::sy: {
oneapi::mkl::sparse::symv(queue, info->get_uplo(), alpha_value,
*sparse_matrix_handle, data_x, beta_value,
data_y);
break;
}
case matrix_info::matrix_type::tr: {
oneapi::mkl::sparse::optimize_trmv(queue, info->get_uplo(), trans,
info->get_diag(), *sparse_matrix_handle);
oneapi::mkl::sparse::trmv(queue, info->get_uplo(), trans, info->get_diag(),
alpha_value, *sparse_matrix_handle, data_x,
beta_value, data_y);
break;
}
default:
throw std::runtime_error(
"the spmv does not support matrix_info::matrix_type::he");
}
sycl::event e =
oneapi::mkl::sparse::release_matrix_handle(queue, sparse_matrix_handle);
queue.submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] { delete sparse_matrix_handle; });
});
#endif
}
/// Computes a CSR format sparse matrix-dense matrix product.
/// C = alpha * op(A) * B + beta * C
/// \param [in] queue The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode.
/// \param [in] trans The operation applied to the matrix A.
/// \param [in] sparse_rows Number of rows of the matrix A.
/// \param [in] dense_cols Number of columns of the matrix B or C.
/// \param [in] sparse_cols Number of columns of the matrix A.
/// \param [in] alpha Scaling factor for the matrix A.
/// \param [in] info Matrix info of the matrix A.
/// \param [in] val An array containing the non-zero elements of the matrix A.
/// \param [in] row_ptr An array of length \p num_rows + 1.
/// \param [in] col_ind An array containing the column indices in index-based
/// numbering.
/// \param [in] b Data of the matrix B.
/// \param [in] ldb Leading dimension of the matrix B.
/// \param [in] beta Scaling factor for the matrix B.
/// \param [in, out] c Data of the matrix C.
/// \param [in] ldc Leading dimension of the matrix C.
template <typename T>
void csrmm(sycl::queue &queue, oneapi::mkl::transpose trans, int sparse_rows,
int dense_cols, int sparse_cols, const T *alpha,
const std::shared_ptr<matrix_info> info, const T *val,
const int *row_ptr, const int *col_ind, const T *b, int ldb,
const T *beta, T *c, int ldc) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using Ty = typename dpct::DataType<T>::T2;
auto alpha_value =
dpct::detail::get_value(reinterpret_cast<const Ty *>(alpha), queue);
auto beta_value =
dpct::detail::get_value(reinterpret_cast<const Ty *>(beta), queue);
oneapi::mkl::sparse::matrix_handle_t *sparse_matrix_handle =
new oneapi::mkl::sparse::matrix_handle_t;
oneapi::mkl::sparse::init_matrix_handle(sparse_matrix_handle);
auto data_row_ptr = dpct::detail::get_memory(const_cast<int *>(row_ptr));
auto data_col_ind = dpct::detail::get_memory(const_cast<int *>(col_ind));
auto data_val =
dpct::detail::get_memory(reinterpret_cast<Ty *>(const_cast<T *>(val)));
oneapi::mkl::sparse::set_csr_data(queue, *sparse_matrix_handle, sparse_rows,
sparse_cols, info->get_index_base(),
data_row_ptr, data_col_ind, data_val);
auto data_b =
dpct::detail::get_memory(reinterpret_cast<Ty *>(const_cast<T *>(b)));
auto data_c = dpct::detail::get_memory(reinterpret_cast<Ty *>(c));
switch (info->get_matrix_type()) {
case matrix_info::matrix_type::ge: {
oneapi::mkl::sparse::gemm(queue, oneapi::mkl::layout::row_major, trans,
oneapi::mkl::transpose::nontrans, alpha_value,
*sparse_matrix_handle, data_b, dense_cols, ldb,
beta_value, data_c, ldc);
break;
}
default:
throw std::runtime_error(
"the csrmm does not support matrix_info::matrix_type::sy, "
"matrix_info::matrix_type::tr and matrix_info::matrix_type::he");
}
sycl::event e =
oneapi::mkl::sparse::release_matrix_handle(queue, sparse_matrix_handle);
queue.submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] { delete sparse_matrix_handle; });
});
#endif
}
#ifdef __INTEL_MKL__ // The oneMKL Interfaces Project does not support this.
/// Saving the optimization information for solving a system of linear
/// equations.
class optimize_info {
public:
/// Constructor
optimize_info() { oneapi::mkl::sparse::init_matrix_handle(&_matrix_handle); }
/// Destructor
~optimize_info() {
oneapi::mkl::sparse::release_matrix_handle(get_default_queue(),
&_matrix_handle, _deps)
.wait();
}
/// Add dependency for the destructor.
/// \param [in] e The event which the destructor depends on.
void add_dependency(sycl::event e) { _deps.push_back(e); }
/// Get the internal saved matrix handle.
/// \return Returns the matrix handle.
oneapi::mkl::sparse::matrix_handle_t get_matrix_handle() const noexcept {
return _matrix_handle;
}
private:
oneapi::mkl::sparse::matrix_handle_t _matrix_handle = nullptr;
std::vector<sycl::event> _deps;
};
#endif
#ifdef __INTEL_MKL__ // The oneMKL Interfaces Project does not support this.
/// Performs internal optimizations for solving a system of linear equations for
/// a CSR format sparse matrix.
/// \param [in] queue The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode.
/// \param [in] trans The operation applied to the sparse matrix.
/// \param [in] row_col Number of rows of the sparse matrix.
/// \param [in] info Matrix info of the sparse matrix.
/// \param [in] val An array containing the non-zero elements of the sparse matrix.
/// \param [in] row_ptr An array of length \p num_rows + 1.
/// \param [in] col_ind An array containing the column indices in index-based
/// numbering.
/// \param [out] optimize_info The result of the optimizations.
template <typename T>
void optimize_csrsv(sycl::queue &queue, oneapi::mkl::transpose trans,
int row_col, const std::shared_ptr<matrix_info> info,
const T *val, const int *row_ptr, const int *col_ind,
std::shared_ptr<optimize_info> optimize_info) {
using Ty = typename dpct::DataType<T>::T2;
auto data_row_ptr = dpct::detail::get_memory(const_cast<int *>(row_ptr));
auto data_col_ind = dpct::detail::get_memory(const_cast<int *>(col_ind));
auto data_val =
dpct::detail::get_memory(reinterpret_cast<Ty *>(const_cast<T *>(val)));
oneapi::mkl::sparse::set_csr_data(queue, optimize_info->get_matrix_handle(),
row_col, row_col, info->get_index_base(),
data_row_ptr, data_col_ind, data_val);
if (info->get_matrix_type() != matrix_info::matrix_type::tr)
return;
#ifndef DPCT_USM_LEVEL_NONE
sycl::event e;
e =
#endif
oneapi::mkl::sparse::optimize_trsv(queue, info->get_uplo(), trans,
info->get_diag(),
optimize_info->get_matrix_handle());
#ifndef DPCT_USM_LEVEL_NONE
optimize_info->add_dependency(e);
#endif
}
#endif
class sparse_matrix_desc;
using sparse_matrix_desc_t = std::shared_ptr<sparse_matrix_desc>;
/// Structure for describe a dense vector
class dense_vector_desc {
public:
dense_vector_desc(std::int64_t ele_num, void *value,
library_data_t value_type)
: _ele_num(ele_num), _value(value), _value_type(value_type) {}
void get_desc(std::int64_t *ele_num, const void **value,
library_data_t *value_type) const noexcept {
*ele_num = _ele_num;
*value = _value;
*value_type = _value_type;
}
void get_desc(std::int64_t *ele_num, void **value,
library_data_t *value_type) const noexcept {
get_desc(ele_num, const_cast<const void **>(value), value_type);
}
void *get_value() const noexcept { return _value; }
void set_value(void *value) { _value = value; }
private:
std::int64_t _ele_num;
void *_value;
library_data_t _value_type;
};
/// Structure for describe a dense matrix
class dense_matrix_desc {
public:
dense_matrix_desc(std::int64_t row_num, std::int64_t col_num,
std::int64_t leading_dim, void *value,
library_data_t value_type, oneapi::mkl::layout layout)
: _row_num(row_num), _col_num(col_num), _leading_dim(leading_dim),
_value(value), _value_type(value_type), _layout(layout) {}
void get_desc(std::int64_t *row_num, std::int64_t *col_num,
std::int64_t *leading_dim, void **value,
library_data_t *value_type,
oneapi::mkl::layout *layout) const noexcept {
*row_num = _row_num;
*col_num = _col_num;
*leading_dim = _leading_dim;
*value = _value;
*value_type = _value_type;
*layout = _layout;
}
void *get_value() const noexcept { return _value; }
void set_value(void *value) { _value = value; }
std::int64_t get_col_num() const noexcept { return _col_num; }
std::int64_t get_leading_dim() const noexcept { return _leading_dim; }
oneapi::mkl::layout get_layout() const noexcept { return _layout; }
private:
std::int64_t _row_num;
std::int64_t _col_num;
std::int64_t _leading_dim;
void *_value;
library_data_t _value_type;
oneapi::mkl::layout _layout;
};
/// Sparse matrix data format
enum matrix_format : int {
csr = 1,
};
/// Sparse matrix attribute
enum matrix_attribute : int { uplo = 0, diag };
#ifdef __INTEL_MKL__ // The oneMKL Interfaces Project does not support this.
/// Structure for describe a sparse matrix
class sparse_matrix_desc {
public:
/// Constructor
/// \param [out] desc The descriptor to be created
/// \param [in] row_num Number of rows of the sparse matrix.
/// \param [in] col_num Number of colums of the sparse matrix.
/// \param [in] nnz Non-zero elements in the sparse matrix.
/// \param [in] row_ptr An array of length \p row_num + 1.
/// \param [in] col_ind An array containing the column indices in index-based
/// numbering.
/// \param [in] value An array containing the non-zero elements of the sparse matrix.
/// \param [in] row_ptr_type Data type of the \p row_ptr .
/// \param [in] col_ind_type Data type of the \p col_ind .
/// \param [in] base Indicates how input arrays are indexed.
/// \param [in] value_type Data type of the \p value .
/// \param [in] data_format The matrix data format.
sparse_matrix_desc(std::int64_t row_num, std::int64_t col_num,
std::int64_t nnz, void *row_ptr, void *col_ind,
void *value, library_data_t row_ptr_type,
library_data_t col_ind_type, oneapi::mkl::index_base base,
library_data_t value_type, matrix_format data_format)
: _row_num(row_num), _col_num(col_num), _nnz(nnz), _row_ptr(row_ptr),
_col_ind(col_ind), _value(value), _row_ptr_type(row_ptr_type),
_col_ind_type(col_ind_type), _base(base), _value_type(value_type),
_data_format(data_format) {
if (_data_format != matrix_format::csr) {
throw std::runtime_error("the sparse matrix data format is unsupported");
}
oneapi::mkl::sparse::init_matrix_handle(&_matrix_handle);
construct();
}
/// Destructor
~sparse_matrix_desc() {
oneapi::mkl::sparse::release_matrix_handle(get_default_queue(),
&_matrix_handle, _deps)
.wait();
}
/// Add dependency for the destroy method.
/// \param [in] e The event which the destroy method depends on.
void add_dependency(sycl::event e) { _deps.push_back(e); }
/// Get the internal saved matrix handle.
/// \return Returns the matrix handle.
oneapi::mkl::sparse::matrix_handle_t get_matrix_handle() const noexcept {
return _matrix_handle;
}
/// Get the values saved in the descriptor
/// \param [out] row_num Number of rows of the sparse matrix.
/// \param [out] col_num Number of colums of the sparse matrix.
/// \param [out] nnz Non-zero elements in the sparse matrix.
/// \param [out] row_ptr An array of length \p row_num + 1.
/// \param [out] col_ind An array containing the column indices in index-based
/// numbering.
/// \param [out] value An array containing the non-zero elements of the sparse matrix.
/// \param [out] row_ptr_type Data type of the \p row_ptr .
/// \param [out] col_ind_type Data type of the \p col_ind .
/// \param [out] base Indicates how input arrays are indexed.
/// \param [out] value_type Data type of the \p value .
void get_desc(int64_t *row_num, int64_t *col_num, int64_t *nnz,
void **row_ptr, void **col_ind, void **value,
library_data_t *row_ptr_type, library_data_t *col_ind_type,
oneapi::mkl::index_base *base,
library_data_t *value_type) const noexcept {
*row_num = _row_num;
*col_num = _col_num;
*nnz = _nnz;
*row_ptr = _row_ptr;
*col_ind = _col_ind;
*value = _value;
*row_ptr_type = _row_ptr_type;
*col_ind_type = _col_ind_type;
*base = _base;
*value_type = _value_type;
}
/// Get the sparse matrix data format of this descriptor
/// \param [out] format The matrix data format result
void get_format(matrix_format *data_format) const noexcept {
*data_format = _data_format;
}
/// Get the index base of this descriptor
/// \param [out] base The index base result
void get_base(oneapi::mkl::index_base *base) const noexcept { *base = _base; }
/// Get the value pointer of this descriptor
/// \param [out] value The value pointer result
void get_value(void **value) const noexcept { *value = _value; }
/// Set the value pointer of this descriptor
/// \param [in] value The input value pointer
void set_value(void *value) {
// Assume the new data is different from the old data
_value = value;
construct();
}
/// Get the size of the sparse matrix
/// \param [out] row_num Number of rows of the sparse matrix.
/// \param [out] col_num Number of colums of the sparse matrix.
/// \param [out] nnz Non-zero elements in the sparse matrix.
void get_size(int64_t *row_num, int64_t *col_num,
int64_t *nnz) const noexcept {
*row_num = _row_num;
*col_num = _col_num;
*nnz = _nnz;
}
/// Set the sparse matrix attribute
/// \param [in] attribute The attribute type
/// \param [in] data The attribute value
/// \param [in] data_size The data size of the attribute value
void set_attribute(matrix_attribute attribute, const void *data,
size_t data_size) {
if (attribute == matrix_attribute::diag) {
const oneapi::mkl::diag *diag_ptr =
reinterpret_cast<const oneapi::mkl::diag *>(data);
if (*diag_ptr == oneapi::mkl::diag::unit) {
_diag = oneapi::mkl::diag::unit;
} else if (*diag_ptr == oneapi::mkl::diag::nonunit) {
_diag = oneapi::mkl::diag::nonunit;
} else {
throw std::runtime_error("unsupported diag value");
}
} else if (attribute == matrix_attribute::uplo) {
const oneapi::mkl::uplo *uplo_ptr =
reinterpret_cast<const oneapi::mkl::uplo *>(data);
if (*uplo_ptr == oneapi::mkl::uplo::upper) {
_uplo = oneapi::mkl::uplo::upper;
} else if (*uplo_ptr == oneapi::mkl::uplo::lower) {
_uplo = oneapi::mkl::uplo::lower;
} else {
throw std::runtime_error("unsupported uplo value");
}
} else {
throw std::runtime_error("unsupported attribute");
}
}
/// Get the sparse matrix attribute
/// \param [out] attribute The attribute type
/// \param [out] data The attribute value
/// \param [out] data_size The data size of the attribute value
void get_attribute(matrix_attribute attribute, void *data,
size_t data_size) const {
if (attribute == matrix_attribute::diag) {
oneapi::mkl::diag *diag_ptr = reinterpret_cast<oneapi::mkl::diag *>(data);
if (_diag.has_value()) {
*diag_ptr = _diag.value();
} else {
*diag_ptr = oneapi::mkl::diag::nonunit;
}
} else if (attribute == matrix_attribute::uplo) {
oneapi::mkl::uplo *uplo_ptr = reinterpret_cast<oneapi::mkl::uplo *>(data);
if (_uplo.has_value()) {
*uplo_ptr = _uplo.value();
} else {
*uplo_ptr = oneapi::mkl::uplo::lower;
}
} else {
throw std::runtime_error("unsupported attribute");
}
}
/// Set the pointers for describing the sparse matrix
/// \param [in] row_ptr An array of length \p row_num + 1.
/// \param [in] col_ind An array containing the column indices in index-based
/// numbering.
/// \param [in] value An array containing the non-zero elements of the sparse matrix.
void set_pointers(void *row_ptr, void *col_ind, void *value) {
// Assume the new data is different from the old data
_row_ptr = row_ptr;
_col_ind = col_ind;
_value = value;
construct();
}
/// Get the diag attribute
/// \return diag value
std::optional<oneapi::mkl::diag> get_diag() const noexcept { return _diag; }
/// Get the uplo attribute
/// \return uplo value
std::optional<oneapi::mkl::uplo> get_uplo() const noexcept { return _uplo; }
private:
template <typename index_t, typename value_t> void set_data() {
auto data_row_ptr =
dpct::detail::get_memory(reinterpret_cast<index_t *>(_row_ptr));
auto data_col_ind =
dpct::detail::get_memory(reinterpret_cast<index_t *>(_col_ind));
auto data_value =
dpct::detail::get_memory(reinterpret_cast<value_t *>(_value));
oneapi::mkl::sparse::set_csr_data(get_default_queue(), _matrix_handle,
_row_num, _col_num, _base, data_row_ptr,
data_col_ind, data_value);
get_default_queue().wait();
}
void construct() {
std::uint64_t key = dpct::detail::get_type_combination_id(
_row_ptr_type, _col_ind_type, _value_type);
switch (key) {
case dpct::detail::get_type_combination_id(library_data_t::real_int32,
library_data_t::real_int32,
library_data_t::real_float): {
set_data<std::int32_t, float>();
break;
}
case dpct::detail::get_type_combination_id(library_data_t::real_int32,
library_data_t::real_int32,
library_data_t::real_double): {
set_data<std::int32_t, double>();
break;
}
case dpct::detail::get_type_combination_id(library_data_t::real_int32,
library_data_t::real_int32,
library_data_t::complex_float): {
set_data<std::int32_t, std::complex<float>>();
break;
}
case dpct::detail::get_type_combination_id(
library_data_t::real_int32, library_data_t::real_int32,
library_data_t::complex_double): {
set_data<std::int32_t, std::complex<double>>();
break;
}
case dpct::detail::get_type_combination_id(library_data_t::real_int64,
library_data_t::real_int64,
library_data_t::real_float): {
set_data<std::int64_t, float>();
break;
}
case dpct::detail::get_type_combination_id(library_data_t::real_int64,
library_data_t::real_int64,
library_data_t::real_double): {
set_data<std::int64_t, double>();
break;
}
case dpct::detail::get_type_combination_id(library_data_t::real_int64,
library_data_t::real_int64,
library_data_t::complex_float): {
set_data<std::int64_t, std::complex<float>>();
break;
}
case dpct::detail::get_type_combination_id(
library_data_t::real_int64, library_data_t::real_int64,
library_data_t::complex_double): {
set_data<std::int64_t, std::complex<double>>();
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
std::int64_t _row_num;
std::int64_t _col_num;
std::int64_t _nnz;
void *_row_ptr;
void *_col_ind;
void *_value;
library_data_t _row_ptr_type;
library_data_t _col_ind_type;
oneapi::mkl::index_base _base;
library_data_t _value_type;
oneapi::mkl::sparse::matrix_handle_t _matrix_handle = nullptr;
std::vector<sycl::event> _deps;
matrix_format _data_format;
std::optional<oneapi::mkl::uplo> _uplo;
std::optional<oneapi::mkl::diag> _diag;
};
namespace detail {
#ifdef DPCT_USM_LEVEL_NONE
#define SPARSE_CALL(X) \
do { \
X; \
} while (0)
#else
#define SPARSE_CALL(X) \
do { \
sycl::event e = X; \
a->add_dependency(e); \
} while (0)
#endif
template <typename Ty>
inline void spmv_impl(sycl::queue queue, oneapi::mkl::transpose trans,
const void *alpha, sparse_matrix_desc_t a,
std::shared_ptr<dense_vector_desc> x, const void *beta,
std::shared_ptr<dense_vector_desc> y) {
auto alpha_value =
dpct::detail::get_value(reinterpret_cast<const Ty *>(alpha), queue);
auto beta_value =
dpct::detail::get_value(reinterpret_cast<const Ty *>(beta), queue);
auto data_x =
dpct::detail::get_memory(reinterpret_cast<Ty *>(x->get_value()));
auto data_y =
dpct::detail::get_memory(reinterpret_cast<Ty *>(y->get_value()));
if (a->get_diag().has_value() && a->get_uplo().has_value()) {
oneapi::mkl::sparse::optimize_trmv(queue, a->get_uplo().value(), trans,
a->get_diag().value(),
a->get_matrix_handle());
SPARSE_CALL(oneapi::mkl::sparse::trmv(
queue, a->get_uplo().value(), trans, a->get_diag().value(), alpha_value,
a->get_matrix_handle(), data_x, beta_value, data_y));
} else {
oneapi::mkl::sparse::optimize_gemv(queue, trans, a->get_matrix_handle());
SPARSE_CALL(oneapi::mkl::sparse::gemv(queue, trans, alpha_value,
a->get_matrix_handle(), data_x,
beta_value, data_y));
}
}
template <typename Ty>
inline void spmm_impl(sycl::queue queue, oneapi::mkl::transpose trans_a,
oneapi::mkl::transpose trans_b, const void *alpha,
sparse_matrix_desc_t a,
std::shared_ptr<dense_matrix_desc> b, const void *beta,
std::shared_ptr<dense_matrix_desc> c) {
auto alpha_value =
dpct::detail::get_value(reinterpret_cast<const Ty *>(alpha), queue);
auto beta_value =
dpct::detail::get_value(reinterpret_cast<const Ty *>(beta), queue);
auto data_b =
dpct::detail::get_memory(reinterpret_cast<Ty *>(b->get_value()));
auto data_c =
dpct::detail::get_memory(reinterpret_cast<Ty *>(c->get_value()));
SPARSE_CALL(oneapi::mkl::sparse::gemm(
queue, b->get_layout(), trans_a, trans_b, alpha_value,
a->get_matrix_handle(), data_b, b->get_col_num(), b->get_leading_dim(),
beta_value, data_c, c->get_leading_dim()));
}
#undef SPARSE_CALL
} // namespace detail
/// Computes a sparse matrix-dense vector product: y = alpha * op(a) * x + beta * y.
/// \param [in] queue The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode.
/// \param [in] trans Specifies operation on input matrix.
/// \param [in] alpha Specifies the scalar alpha.
/// \param [in] a Specifies the sparse matrix a.
/// \param [in] x Specifies the dense vector x.
/// \param [in] beta Specifies the scalar beta.
/// \param [in, out] y Specifies the dense vector y.
/// \param [in] data_type Specifies the data type of \param a, \param x and \param y .
inline void spmv(sycl::queue queue, oneapi::mkl::transpose trans,
const void *alpha, sparse_matrix_desc_t a,
std::shared_ptr<dense_vector_desc> x, const void *beta,
std::shared_ptr<dense_vector_desc> y,
library_data_t data_type) {
switch (data_type) {
case library_data_t::real_float: {
detail::spmv_impl<float>(queue, trans, alpha, a, x, beta, y);
break;
}
case library_data_t::real_double: {
detail::spmv_impl<double>(queue, trans, alpha, a, x, beta, y);
break;
}
case library_data_t::complex_float: {
detail::spmv_impl<std::complex<float>>(queue, trans, alpha, a, x, beta, y);
break;
}
case library_data_t::complex_double: {
detail::spmv_impl<std::complex<double>>(queue, trans, alpha, a, x, beta, y);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
/// Computes a sparse matrix-dense matrix product: c = alpha * op(a) * op(b) + beta * c.
/// \param [in] queue The queue where the routine should be executed. It must
/// have the in_order property when using the USM mode.
/// \param [in] trans_a Specifies operation on input matrix a.
/// \param [in] trans_b Specifies operation on input matrix b.
/// \param [in] alpha Specifies the scalar alpha.
/// \param [in] a Specifies the sparse matrix a.
/// \param [in] b Specifies the dense matrix b.
/// \param [in] beta Specifies the scalar beta.
/// \param [in, out] c Specifies the dense matrix c.
/// \param [in] data_type Specifies the data type of \param a, \param b and \param c .
inline void spmm(sycl::queue queue, oneapi::mkl::transpose trans_a,
oneapi::mkl::transpose trans_b, const void *alpha,
sparse_matrix_desc_t a, std::shared_ptr<dense_matrix_desc> b,
const void *beta, std::shared_ptr<dense_matrix_desc> c,
library_data_t data_type) {
if (b->get_layout() != c->get_layout())
throw std::runtime_error("the layout of b and c are different");
switch (data_type) {
case library_data_t::real_float: {
detail::spmm_impl<float>(queue, trans_a, trans_b, alpha, a, b, beta, c);
break;
}
case library_data_t::real_double: {
detail::spmm_impl<double>(queue, trans_a, trans_b, alpha, a, b, beta, c);
break;
}
case library_data_t::complex_float: {
detail::spmm_impl<std::complex<float>>(queue, trans_a, trans_b, alpha, a, b,
beta, c);
break;
}
case library_data_t::complex_double: {
detail::spmm_impl<std::complex<double>>(queue, trans_a, trans_b, alpha, a,
b, beta, c);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
#endif
} // namespace sparse
} // namespace dpct
#endif // __DPCT_SPARSE_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/include/dpct/device.hpp | //==---- device.hpp -------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_DEVICE_HPP__
#define __DPCT_DEVICE_HPP__
#include <sycl/sycl.hpp>
#include <algorithm>
#include <array>
#include <cstring>
#include <iostream>
#include <mutex>
#include <set>
#include <sstream>
#include <map>
#include <vector>
#include <thread>
#if defined(__linux__)
#include <unistd.h>
#include <sys/syscall.h>
#endif
#if defined(_WIN64)
#define NOMINMAX
#include <windows.h>
#endif
namespace dpct {
/// SYCL default exception handler
inline auto exception_handler = [](sycl::exception_list exceptions) {
for (std::exception_ptr const &e : exceptions) {
try {
std::rethrow_exception(e);
} catch (sycl::exception const &e) {
std::cerr << "Caught asynchronous SYCL exception:" << std::endl
<< e.what() << std::endl
<< "Exception caught at file:" << __FILE__
<< ", line:" << __LINE__ << std::endl;
}
}
};
typedef sycl::event *event_ptr;
typedef sycl::queue *queue_ptr;
typedef char *device_ptr;
/// Destroy \p event pointed memory.
///
/// \param event Pointer to the sycl::event address.
static void destroy_event(event_ptr event) {
delete event;
}
class device_info {
public:
// get interface
const char *get_name() const { return _name; }
char *get_name() { return _name; }
template <typename WorkItemSizesTy = sycl::id<3>,
std::enable_if_t<std::is_same_v<WorkItemSizesTy, sycl::id<3>> ||
std::is_same_v<WorkItemSizesTy, int *>,
int> = 0>
auto get_max_work_item_sizes() const {
if constexpr (std::is_same_v<WorkItemSizesTy, sycl::id<3>>)
return _max_work_item_sizes;
else
return _max_work_item_sizes_i;
}
template <typename WorkItemSizesTy = sycl::id<3>,
std::enable_if_t<std::is_same_v<WorkItemSizesTy, sycl::id<3>> ||
std::is_same_v<WorkItemSizesTy, int *>,
int> = 0>
auto get_max_work_item_sizes() {
if constexpr (std::is_same_v<WorkItemSizesTy, sycl::id<3>>)
return _max_work_item_sizes;
else
return _max_work_item_sizes_i;
}
bool get_host_unified_memory() const { return _host_unified_memory; }
int get_major_version() const { return _major; }
int get_minor_version() const { return _minor; }
int get_integrated() const { return _integrated; }
int get_max_clock_frequency() const { return _frequency; }
int get_max_compute_units() const { return _max_compute_units; }
int get_max_work_group_size() const { return _max_work_group_size; }
int get_max_sub_group_size() const { return _max_sub_group_size; }
int get_max_work_items_per_compute_unit() const {
return _max_work_items_per_compute_unit;
}
int get_max_register_size_per_work_group() const {
return _max_register_size_per_work_group;
}
template <typename NDRangeSizeTy = size_t *,
std::enable_if_t<std::is_same_v<NDRangeSizeTy, size_t *> ||
std::is_same_v<NDRangeSizeTy, int *>,
int> = 0>
auto get_max_nd_range_size() const {
if constexpr (std::is_same_v<NDRangeSizeTy, size_t *>)
return _max_nd_range_size;
else
return _max_nd_range_size_i;
}
template <typename NDRangeSizeTy = size_t *,
std::enable_if_t<std::is_same_v<NDRangeSizeTy, size_t *> ||
std::is_same_v<NDRangeSizeTy, int *>,
int> = 0>
auto get_max_nd_range_size() {
if constexpr (std::is_same_v<NDRangeSizeTy, size_t *>)
return _max_nd_range_size;
else
return _max_nd_range_size_i;
}
size_t get_global_mem_size() const { return _global_mem_size; }
size_t get_local_mem_size() const { return _local_mem_size; }
/// Returns the maximum clock rate of device's global memory in kHz. If
/// compiler does not support this API then returns default value 3200000 kHz.
unsigned int get_memory_clock_rate() const { return _memory_clock_rate; }
/// Returns the maximum bus width between device and memory in bits. If
/// compiler does not support this API then returns default value 64 bits.
unsigned int get_memory_bus_width() const { return _memory_bus_width; }
uint32_t get_device_id() const { return _device_id; }
std::array<unsigned char, 16> get_uuid() const { return _uuid; }
// set interface
void set_name(const char* name) {
size_t length = strlen(name);
if (length < 256) {
std::memcpy(_name, name, length + 1);
} else {
std::memcpy(_name, name, 255);
_name[255] = '\0';
}
}
void set_max_work_item_sizes(const sycl::id<3> max_work_item_sizes) {
_max_work_item_sizes = max_work_item_sizes;
for (int i = 0; i < 3; ++i)
_max_work_item_sizes_i[i] = max_work_item_sizes[i];
}
void set_host_unified_memory(bool host_unified_memory) {
_host_unified_memory = host_unified_memory;
}
void set_major_version(int major) { _major = major; }
void set_minor_version(int minor) { _minor = minor; }
void set_integrated(int integrated) { _integrated = integrated; }
void set_max_clock_frequency(int frequency) { _frequency = frequency; }
void set_max_compute_units(int max_compute_units) {
_max_compute_units = max_compute_units;
}
void set_global_mem_size(size_t global_mem_size) {
_global_mem_size = global_mem_size;
}
void set_local_mem_size(size_t local_mem_size) {
_local_mem_size = local_mem_size;
}
void set_max_work_group_size(int max_work_group_size) {
_max_work_group_size = max_work_group_size;
}
void set_max_sub_group_size(int max_sub_group_size) {
_max_sub_group_size = max_sub_group_size;
}
void
set_max_work_items_per_compute_unit(int max_work_items_per_compute_unit) {
_max_work_items_per_compute_unit = max_work_items_per_compute_unit;
}
void set_max_nd_range_size(int max_nd_range_size[]) {
for (int i = 0; i < 3; i++) {
_max_nd_range_size[i] = max_nd_range_size[i];
_max_nd_range_size_i[i] = max_nd_range_size[i];
}
}
void set_memory_clock_rate(unsigned int memory_clock_rate) {
_memory_clock_rate = memory_clock_rate;
}
void set_memory_bus_width(unsigned int memory_bus_width) {
_memory_bus_width = memory_bus_width;
}
void
set_max_register_size_per_work_group(int max_register_size_per_work_group) {
_max_register_size_per_work_group = max_register_size_per_work_group;
}
void set_device_id(uint32_t device_id) {
_device_id = device_id;
}
void set_uuid(std::array<unsigned char, 16> uuid) {
_uuid = std::move(uuid);
}
private:
char _name[256];
sycl::id<3> _max_work_item_sizes;
int _max_work_item_sizes_i[3];
bool _host_unified_memory = false;
int _major;
int _minor;
int _integrated = 0;
int _frequency;
// Set estimated value 3200000 kHz as default value.
unsigned int _memory_clock_rate = 3200000;
// Set estimated value 64 bits as default value.
unsigned int _memory_bus_width = 64;
int _max_compute_units;
int _max_work_group_size;
int _max_sub_group_size;
int _max_work_items_per_compute_unit;
int _max_register_size_per_work_group;
size_t _global_mem_size;
size_t _local_mem_size;
size_t _max_nd_range_size[3];
int _max_nd_range_size_i[3];
uint32_t _device_id;
std::array<unsigned char, 16> _uuid;
};
/// dpct device extension
class device_ext : public sycl::device {
typedef std::mutex mutex_type;
public:
device_ext() : sycl::device(), _ctx(*this) {}
~device_ext() {
std::lock_guard<mutex_type> lock(m_mutex);
clear_queues();
}
device_ext(const sycl::device &base) : sycl::device(base), _ctx(*this) {
std::lock_guard<mutex_type> lock(m_mutex);
init_queues();
}
int is_native_atomic_supported() { return 0; }
int get_major_version() const {
int major, minor;
get_version(major, minor);
return major;
}
int get_minor_version() const {
int major, minor;
get_version(major, minor);
return minor;
}
int get_max_compute_units() const {
return get_device_info().get_max_compute_units();
}
/// Return the maximum clock frequency of this device in KHz.
int get_max_clock_frequency() const {
return get_device_info().get_max_clock_frequency();
}
int get_integrated() const { return get_device_info().get_integrated(); }
int get_max_sub_group_size() const {
return get_device_info().get_max_sub_group_size();
}
int get_max_register_size_per_work_group() const {
return get_device_info().get_max_register_size_per_work_group();
}
int get_max_work_group_size() const {
return get_device_info().get_max_work_group_size();
}
int get_mem_base_addr_align() const {
return get_info<sycl::info::device::mem_base_addr_align>();
}
size_t get_global_mem_size() const {
return get_device_info().get_global_mem_size();
}
/// Get the number of bytes of free and total memory on the SYCL device.
/// \param [out] free_memory The number of bytes of free memory on the SYCL device.
/// \param [out] total_memory The number of bytes of total memory on the SYCL device.
void get_memory_info(size_t &free_memory, size_t &total_memory) {
#if (defined(__SYCL_COMPILER_VERSION) && __SYCL_COMPILER_VERSION >= 20221105)
if (!has(sycl::aspect::ext_intel_free_memory)) {
std::cerr << "get_memory_info: ext_intel_free_memory is not supported." << std::endl;
free_memory = 0;
} else {
free_memory = get_info<sycl::ext::intel::info::device::free_memory>();
}
#else
std::cerr << "get_memory_info: ext_intel_free_memory is not supported." << std::endl;
free_memory = 0;
#if defined(_MSC_VER) && !defined(__clang__)
#pragma message("Querying the number of bytes of free memory is not supported")
#else
#warning "Querying the number of bytes of free memory is not supported"
#endif
#endif
total_memory = get_device_info().get_global_mem_size();
}
void get_device_info(device_info &out) const {
device_info prop;
prop.set_name(get_info<sycl::info::device::name>().c_str());
int major, minor;
get_version(major, minor);
prop.set_major_version(major);
prop.set_minor_version(minor);
prop.set_max_work_item_sizes(
#if (__SYCL_COMPILER_VERSION && __SYCL_COMPILER_VERSION<20220902)
// oneAPI DPC++ compiler older than 2022/09/02, where max_work_item_sizes is an enum class element
get_info<sycl::info::device::max_work_item_sizes>());
#else
// SYCL 2020-conformant code, max_work_item_sizes is a struct templated by an int
get_info<sycl::info::device::max_work_item_sizes<3>>());
#endif
prop.set_host_unified_memory(
this->has(sycl::aspect::usm_host_allocations));
prop.set_max_clock_frequency(
get_info<sycl::info::device::max_clock_frequency>() * 1000);
prop.set_max_compute_units(
get_info<sycl::info::device::max_compute_units>());
prop.set_max_work_group_size(
get_info<sycl::info::device::max_work_group_size>());
prop.set_global_mem_size(
get_info<sycl::info::device::global_mem_size>());
prop.set_local_mem_size(get_info<sycl::info::device::local_mem_size>());
#if (defined(SYCL_EXT_INTEL_DEVICE_INFO) && SYCL_EXT_INTEL_DEVICE_INFO >= 6)
if (this->has(sycl::aspect::ext_intel_memory_clock_rate)) {
unsigned int tmp =
this->get_info<sycl::ext::intel::info::device::memory_clock_rate>();
if (tmp != 0)
prop.set_memory_clock_rate(1000 * tmp);
}
if (this->has(sycl::aspect::ext_intel_memory_bus_width)) {
prop.set_memory_bus_width(
this->get_info<sycl::ext::intel::info::device::memory_bus_width>());
}
if (this->has(sycl::aspect::ext_intel_device_id)) {
prop.set_device_id(
this->get_info<sycl::ext::intel::info::device::device_id>());
}
if (this->has(sycl::aspect::ext_intel_device_info_uuid)) {
prop.set_uuid(
this->get_info<sycl::ext::intel::info::device::uuid>());
}
#elif defined(_MSC_VER) && !defined(__clang__)
#pragma message("get_device_info: querying memory_clock_rate and \
memory_bus_width are not supported by the compiler used. \
Use 3200000 kHz as memory_clock_rate default value. \
Use 64 bits as memory_bus_width default value.")
#else
#warning "get_device_info: querying memory_clock_rate and \
memory_bus_width are not supported by the compiler used. \
Use 3200000 kHz as memory_clock_rate default value. \
Use 64 bits as memory_bus_width default value."
#endif
size_t max_sub_group_size = 1;
std::vector<size_t> sub_group_sizes =
get_info<sycl::info::device::sub_group_sizes>();
for (const auto &sub_group_size : sub_group_sizes) {
if (max_sub_group_size < sub_group_size)
max_sub_group_size = sub_group_size;
}
prop.set_max_sub_group_size(max_sub_group_size);
prop.set_max_work_items_per_compute_unit(
get_info<sycl::info::device::max_work_group_size>());
int max_nd_range_size[] = {0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF};
prop.set_max_nd_range_size(max_nd_range_size);
// Estimates max register size per work group, feel free to update the value
// according to device properties.
prop.set_max_register_size_per_work_group(65536);
out = prop;
}
device_info get_device_info() const {
device_info prop;
get_device_info(prop);
return prop;
}
void reset() {
std::lock_guard<mutex_type> lock(m_mutex);
clear_queues();
init_queues();
}
sycl::queue &in_order_queue() { return *_q_in_order; }
sycl::queue &out_of_order_queue() { return *_q_out_of_order; }
sycl::queue &default_queue() {
#ifdef DPCT_USM_LEVEL_NONE
return out_of_order_queue();
#else
return in_order_queue();
#endif // DPCT_USM_LEVEL_NONE
}
void queues_wait_and_throw() {
std::unique_lock<mutex_type> lock(m_mutex);
std::vector<std::shared_ptr<sycl::queue>> current_queues(
_queues);
lock.unlock();
for (const auto &q : current_queues) {
q->wait_and_throw();
}
// Guard the destruct of current_queues to make sure the ref count is safe.
lock.lock();
}
sycl::queue *create_queue(bool enable_exception_handler = false) {
#ifdef DPCT_USM_LEVEL_NONE
return create_out_of_order_queue(enable_exception_handler);
#else
return create_in_order_queue(enable_exception_handler);
#endif // DPCT_USM_LEVEL_NONE
}
sycl::queue *create_in_order_queue(bool enable_exception_handler = false) {
std::lock_guard<mutex_type> lock(m_mutex);
return create_queue_impl(enable_exception_handler,
sycl::property::queue::in_order());
}
sycl::queue *create_out_of_order_queue(bool enable_exception_handler = false) {
std::lock_guard<mutex_type> lock(m_mutex);
return create_queue_impl(enable_exception_handler);
}
void destroy_queue(sycl::queue *&queue) {
std::lock_guard<mutex_type> lock(m_mutex);
_queues.erase(std::remove_if(_queues.begin(), _queues.end(),
[=](const std::shared_ptr<sycl::queue> &q) -> bool {
return q.get() == queue;
}),
_queues.end());
queue = nullptr;
}
void set_saved_queue(sycl::queue* q) {
std::lock_guard<mutex_type> lock(m_mutex);
_saved_queue = q;
}
sycl::queue *get_saved_queue() const {
std::lock_guard<mutex_type> lock(m_mutex);
return _saved_queue;
}
sycl::context get_context() const { return _ctx; }
private:
void clear_queues() {
_queues.clear();
_q_in_order = _q_out_of_order = _saved_queue = nullptr;
}
void init_queues() {
_q_in_order = create_queue_impl(true, sycl::property::queue::in_order());
_q_out_of_order = create_queue_impl(true);
_saved_queue = &default_queue();
}
/// Caller should acquire resource \p m_mutex before calling this function.
template <class... Properties>
sycl::queue *create_queue_impl(bool enable_exception_handler,
Properties... properties) {
sycl::async_handler eh = {};
if (enable_exception_handler) {
eh = exception_handler;
}
_queues.push_back(std::make_shared<sycl::queue>(
_ctx, *this, eh,
sycl::property_list(
#ifdef DPCT_PROFILING_ENABLED
sycl::property::queue::enable_profiling(),
#endif
properties...)));
return _queues.back().get();
}
void get_version(int &major, int &minor) const {
// Version string has the following format:
// a. OpenCL<space><major.minor><space><vendor-specific-information>
// b. <major.minor>
std::string ver;
ver = get_info<sycl::info::device::version>();
std::string::size_type i = 0;
while (i < ver.size()) {
if (isdigit(ver[i]))
break;
i++;
}
major = std::stoi(&(ver[i]));
while (i < ver.size()) {
if (ver[i] == '.')
break;
i++;
}
i++;
minor = std::stoi(&(ver[i]));
}
sycl::queue *_q_in_order, *_q_out_of_order;
sycl::queue *_saved_queue;
sycl::context _ctx;
std::vector<std::shared_ptr<sycl::queue>> _queues;
mutable mutex_type m_mutex;
};
static inline unsigned int get_tid() {
#if defined(__linux__)
return syscall(SYS_gettid);
#elif defined(_WIN64)
return GetCurrentThreadId();
#else
#error "Only support Windows and Linux."
#endif
}
/// device manager
class dev_mgr {
public:
device_ext ¤t_device() {
unsigned int dev_id=current_device_id();
check_id(dev_id);
return *_devs[dev_id];
}
device_ext &cpu_device() const {
std::lock_guard<std::recursive_mutex> lock(m_mutex);
if (_cpu_device == -1) {
throw std::runtime_error("no valid cpu device");
} else {
return *_devs[_cpu_device];
}
}
device_ext &get_device(unsigned int id) const {
std::lock_guard<std::recursive_mutex> lock(m_mutex);
check_id(id);
return *_devs[id];
}
unsigned int current_device_id() const {
std::lock_guard<std::recursive_mutex> lock(m_mutex);
auto it=_thread2dev_map.find(get_tid());
if(it != _thread2dev_map.end())
return it->second;
return DEFAULT_DEVICE_ID;
}
/// Select device with a device ID.
/// \param [in] id The id of the device which can
/// be obtained through get_device_id(const sycl::device).
void select_device(unsigned int id) {
std::lock_guard<std::recursive_mutex> lock(m_mutex);
check_id(id);
_thread2dev_map[get_tid()]=id;
}
unsigned int device_count() { return _devs.size(); }
unsigned int get_device_id(const sycl::device &dev) {
unsigned int id = 0;
for(auto dev_item : _devs) {
if (*dev_item == dev) {
break;
}
id++;
}
return id;
}
template <class DeviceSelector>
std::enable_if_t<
std::is_invocable_r_v<int, DeviceSelector, const sycl::device &>>
select_device(const DeviceSelector &selector = sycl::gpu_selector_v) {
sycl::device selected_device = sycl::device(selector);
unsigned int selected_device_id = get_device_id(selected_device);
select_device(selected_device_id);
}
/// Returns the instance of device manager singleton.
static dev_mgr &instance() {
static dev_mgr d_m;
return d_m;
}
dev_mgr(const dev_mgr &) = delete;
dev_mgr &operator=(const dev_mgr &) = delete;
dev_mgr(dev_mgr &&) = delete;
dev_mgr &operator=(dev_mgr &&) = delete;
private:
mutable std::recursive_mutex m_mutex;
dev_mgr() {
sycl::device default_device =
sycl::device(sycl::default_selector_v);
_devs.push_back(std::make_shared<device_ext>(default_device));
std::vector<sycl::device> sycl_all_devs =
sycl::device::get_devices(sycl::info::device_type::all);
// Collect other devices except for the default device.
if (default_device.is_cpu())
_cpu_device = 0;
for (auto &dev : sycl_all_devs) {
if (dev == default_device) {
continue;
}
_devs.push_back(std::make_shared<device_ext>(dev));
if (_cpu_device == -1 && dev.is_cpu()) {
_cpu_device = _devs.size() - 1;
}
}
}
void check_id(unsigned int id) const {
if (id >= _devs.size()) {
throw std::runtime_error("invalid device id");
}
}
std::vector<std::shared_ptr<device_ext>> _devs;
/// DEFAULT_DEVICE_ID is used, if current_device_id() can not find current
/// thread id in _thread2dev_map, which means default device should be used
/// for the current thread.
const unsigned int DEFAULT_DEVICE_ID = 0;
/// thread-id to device-id map.
std::map<unsigned int, unsigned int> _thread2dev_map;
int _cpu_device = -1;
};
/// Util function to get the default queue of current selected device depends on
/// the USM config. Return the default out-of-ordered queue when USM-none is
/// enabled, otherwise return the default in-ordered queue.
static inline sycl::queue &get_default_queue() {
return dev_mgr::instance().current_device().default_queue();
}
/// Util function to get the default in-ordered queue of current device in
/// dpct device manager.
static inline sycl::queue &get_in_order_queue() {
return dev_mgr::instance().current_device().in_order_queue();
}
/// Util function to get the default out-of-ordered queue of current device in
/// dpct device manager.
static inline sycl::queue &get_out_of_order_queue() {
return dev_mgr::instance().current_device().out_of_order_queue();
}
/// Util function to get the id of current device in
/// dpct device manager.
static inline unsigned int get_current_device_id() {
return dev_mgr::instance().current_device_id();
}
/// Util function to get the current device.
static inline device_ext &get_current_device() {
return dev_mgr::instance().current_device();
}
/// Util function to get a device by id.
static inline device_ext &get_device(unsigned int id) {
return dev_mgr::instance().get_device(id);
}
/// Util function to get the context of the default queue of current
/// device in dpct device manager.
static inline sycl::context get_default_context() {
return dpct::get_current_device().get_context();
}
/// Util function to get a CPU device.
static inline device_ext &cpu_device() {
return dev_mgr::instance().cpu_device();
}
static inline unsigned int select_device(unsigned int id) {
dev_mgr::instance().select_device(id);
return id;
}
template <class DeviceSelector>
static inline std::enable_if_t<
std::is_invocable_r_v<int, DeviceSelector, const sycl::device &>>
select_device(const DeviceSelector &selector = sycl::gpu_selector_v) {
dev_mgr::instance().select_device(selector);
}
static inline unsigned int get_device_id(const sycl::device &dev){
return dev_mgr::instance().get_device_id(dev);
}
/// Util function to check whether a device supports some kinds of sycl::aspect.
inline void
has_capability_or_fail(const sycl::device &dev,
const std::initializer_list<sycl::aspect> &props) {
for (const auto &it : props) {
if (dev.has(it))
continue;
switch (it) {
case sycl::aspect::fp64:
throw std::runtime_error("'double' is not supported in '" +
dev.get_info<sycl::info::device::name>() +
"' device");
break;
case sycl::aspect::fp16:
throw std::runtime_error("'half' is not supported in '" +
dev.get_info<sycl::info::device::name>() +
"' device");
break;
default:
#define __SYCL_ASPECT(ASPECT, ID) \
case sycl::aspect::ASPECT: \
return #ASPECT;
#define __SYCL_ASPECT_DEPRECATED(ASPECT, ID, MESSAGE) __SYCL_ASPECT(ASPECT, ID)
#define __SYCL_ASPECT_DEPRECATED_ALIAS(ASPECT, ID, MESSAGE)
auto getAspectNameStr = [](sycl::aspect AspectNum) -> std::string {
switch (AspectNum) {
#include <sycl/info/aspects.def>
#include <sycl/info/aspects_deprecated.def>
default:
return "unknown aspect";
}
};
#undef __SYCL_ASPECT_DEPRECATED_ALIAS
#undef __SYCL_ASPECT_DEPRECATED
#undef __SYCL_ASPECT
throw std::runtime_error(
"'" + getAspectNameStr(it) + "' is not supported in '" +
dev.get_info<sycl::info::device::name>() + "' device");
}
break;
}
}
} // namespace dpct
#endif // __DPCT_DEVICE_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/include/dpct/memory.hpp | //==---- memory.hpp -------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_MEMORY_HPP__
#define __DPCT_MEMORY_HPP__
#include "device.hpp"
#include <sycl/sycl.hpp>
#include <cassert>
#include <cstdint>
#include <cstring>
#include <mutex>
#include <unordered_map>
#include <map>
#include <utility>
#include <thread>
#include <type_traits>
#if defined(__linux__)
#include <sys/mman.h>
#elif defined(_WIN64)
#define NOMINMAX
#include <windows.h>
#else
#error "Only support Windows and Linux."
#endif
namespace dpct {
enum memcpy_direction {
host_to_host,
host_to_device,
device_to_host,
device_to_device,
automatic
};
enum memory_region {
global = 0, // device global memory
constant, // device constant memory
local, // device local memory
shared, // memory which can be accessed by host and device
};
typedef uint8_t byte_t;
/// Buffer type to be used in Memory Management runtime.
typedef sycl::buffer<byte_t> buffer_t;
/// Pitched 2D/3D memory data.
class pitched_data {
public:
pitched_data() : pitched_data(nullptr, 0, 0, 0) {}
pitched_data(void *data, size_t pitch, size_t x, size_t y)
: _data(data), _pitch(pitch), _x(x), _y(y) {}
void *get_data_ptr() { return _data; }
void set_data_ptr(void *data) { _data = data; }
size_t get_pitch() { return _pitch; }
void set_pitch(size_t pitch) { _pitch = pitch; }
size_t get_x() { return _x; }
void set_x(size_t x) { _x = x; };
size_t get_y() { return _y; }
void set_y(size_t y) { _y = y; }
private:
void *_data;
size_t _pitch, _x, _y;
};
namespace detail {
class mem_mgr {
mem_mgr() {
// Reserved address space, no real memory allocation happens here.
#if defined(__linux__)
mapped_address_space =
(byte_t *)mmap(nullptr, mapped_region_size, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
#elif defined(_WIN64)
mapped_address_space = (byte_t *)VirtualAlloc(
NULL, // NULL specified as the base address parameter
mapped_region_size, // Size of allocation
MEM_RESERVE, // Allocate reserved pages
PAGE_NOACCESS); // Protection = no access
#else
#error "Only support Windows and Linux."
#endif
next_free = mapped_address_space;
};
public:
using buffer_id_t = int;
struct allocation {
buffer_t buffer;
byte_t *alloc_ptr;
size_t size;
};
~mem_mgr() {
#if defined(__linux__)
munmap(mapped_address_space, mapped_region_size);
#elif defined(_WIN64)
VirtualFree(mapped_address_space, 0, MEM_RELEASE);
#else
#error "Only support Windows and Linux."
#endif
};
mem_mgr(const mem_mgr &) = delete;
mem_mgr &operator=(const mem_mgr &) = delete;
mem_mgr(mem_mgr &&) = delete;
mem_mgr &operator=(mem_mgr &&) = delete;
/// Allocate
void *mem_alloc(size_t size) {
if (!size)
return nullptr;
std::lock_guard<std::mutex> lock(m_mutex);
if (next_free + size > mapped_address_space + mapped_region_size) {
throw std::runtime_error("dpct_malloc: out of memory for virtual memory pool");
}
// Allocation
sycl::range<1> r(size);
buffer_t buf(r);
allocation A{buf, next_free, size};
// Map allocation to device pointer
void *result = next_free;
m_map.emplace(next_free + size, A);
// Update pointer to the next free space.
next_free += (size + extra_padding + alignment - 1) & ~(alignment - 1);
return result;
}
/// Deallocate
void mem_free(const void *ptr) {
if (!ptr)
return;
std::lock_guard<std::mutex> lock(m_mutex);
auto it = get_map_iterator(ptr);
m_map.erase(it);
}
/// map: device pointer -> allocation(buffer, alloc_ptr, size)
allocation translate_ptr(const void *ptr) {
std::lock_guard<std::mutex> lock(m_mutex);
auto it = get_map_iterator(ptr);
return it->second;
}
/// Check if the pointer represents device pointer or not.
bool is_device_ptr(const void *ptr) const {
std::lock_guard<std::mutex> lock(m_mutex);
return (mapped_address_space <= ptr) &&
(ptr < mapped_address_space + mapped_region_size);
}
/// Returns the instance of memory manager singleton.
static mem_mgr &instance() {
static mem_mgr m;
return m;
}
private:
std::map<byte_t *, allocation> m_map;
mutable std::mutex m_mutex;
byte_t *mapped_address_space;
byte_t *next_free;
const size_t mapped_region_size = 128ull * 1024 * 1024 * 1024;
const size_t alignment = 256;
/// This padding may be defined to some positive value to debug
/// out of bound accesses.
const size_t extra_padding = 0;
std::map<byte_t *, allocation>::iterator get_map_iterator(const void *ptr) {
auto it = m_map.upper_bound((byte_t *)ptr);
if (it == m_map.end()) {
// Not a virtual pointer.
throw std::runtime_error("can not get buffer from non-virtual pointer");
}
const allocation &alloc = it->second;
if (ptr < alloc.alloc_ptr) {
// Out of bound.
// This may happen if there's a gap between allocations due to alignment
// or extra padding and pointer points to this gap.
throw std::runtime_error("invalid virtual pointer");
}
return it;
}
};
template <class T, memory_region Memory, size_t Dimension> class accessor;
template <memory_region Memory, class T = byte_t> class memory_traits {
public:
static constexpr sycl::access::target target =
sycl::access::target::device;
static constexpr sycl::access_mode mode =
(Memory == constant) ? sycl::access_mode::read
: sycl::access_mode::read_write;
static constexpr size_t type_size = sizeof(T);
using element_t =
typename std::conditional<Memory == constant, const T, T>::type;
using value_t = typename std::remove_cv<T>::type;
template <size_t Dimension = 1>
using accessor_t = typename std::conditional<
Memory == local, sycl::local_accessor<value_t, Dimension>,
sycl::accessor<T, Dimension, mode, target>>::type;
using pointer_t = T *;
};
static inline void *dpct_malloc(size_t size, sycl::queue &q) {
#ifdef DPCT_USM_LEVEL_NONE
return mem_mgr::instance().mem_alloc(size * sizeof(byte_t));
#else
return sycl::malloc_device(size, q.get_device(), q.get_context());
#endif // DPCT_USM_LEVEL_NONE
}
#define PITCH_DEFAULT_ALIGN(x) (((x) + 31) & ~(0x1F))
static inline void *dpct_malloc(size_t &pitch, size_t x, size_t y, size_t z,
sycl::queue &q) {
pitch = PITCH_DEFAULT_ALIGN(x);
return dpct_malloc(pitch * y * z, q);
}
/// Set \p value to the first \p size bytes starting from \p dev_ptr in \p q.
///
/// \param q The queue in which the operation is done.
/// \param dev_ptr Pointer to the device memory address.
/// \param value Value to be set.
/// \param size Number of bytes to be set to the value.
/// \returns An event representing the memset operation.
static inline sycl::event dpct_memset(sycl::queue &q, void *dev_ptr,
int value, size_t size) {
#ifdef DPCT_USM_LEVEL_NONE
auto &mm = mem_mgr::instance();
assert(mm.is_device_ptr(dev_ptr));
auto alloc = mm.translate_ptr(dev_ptr);
size_t offset = (byte_t *)dev_ptr - alloc.alloc_ptr;
return q.submit([&](sycl::handler &cgh) {
auto r = sycl::range<1>(size);
auto o = sycl::id<1>(offset);
sycl::accessor<byte_t, 1, sycl::access_mode::write,
sycl::access::target::device>
acc(alloc.buffer, cgh, r, o);
cgh.fill(acc, (byte_t)value);
});
#else
return q.memset(dev_ptr, value, size);
#endif // DPCT_USM_LEVEL_NONE
}
/// Set \p value to the 3D memory region pointed by \p data in \p q. \p size
/// specifies the 3D memory size to set.
///
/// \param q The queue in which the operation is done.
/// \param data Pointer to the device memory region.
/// \param value Value to be set.
/// \param size Memory region size.
/// \returns An event list representing the memset operations.
static inline std::vector<sycl::event>
dpct_memset(sycl::queue &q, pitched_data data, int value,
sycl::range<3> size) {
std::vector<sycl::event> event_list;
size_t slice = data.get_pitch() * data.get_y();
unsigned char *data_surface = (unsigned char *)data.get_data_ptr();
for (size_t z = 0; z < size.get(2); ++z) {
unsigned char *data_ptr = data_surface;
for (size_t y = 0; y < size.get(1); ++y) {
event_list.push_back(dpct_memset(q, data_ptr, value, size.get(0)));
data_ptr += data.get_pitch();
}
data_surface += slice;
}
return event_list;
}
/// memset 2D matrix with pitch.
static inline std::vector<sycl::event>
dpct_memset(sycl::queue &q, void *ptr, size_t pitch, int val, size_t x,
size_t y) {
return dpct_memset(q, pitched_data(ptr, pitch, x, 1), val,
sycl::range<3>(x, y, 1));
}
enum class pointer_access_attribute {
host_only = 0,
device_only,
host_device,
end
};
static pointer_access_attribute get_pointer_attribute(sycl::queue &q,
const void *ptr) {
#ifdef DPCT_USM_LEVEL_NONE
return mem_mgr::instance().is_device_ptr(ptr)
? pointer_access_attribute::device_only
: pointer_access_attribute::host_only;
#else
switch (sycl::get_pointer_type(ptr, q.get_context())) {
case sycl::usm::alloc::unknown:
return pointer_access_attribute::host_only;
case sycl::usm::alloc::device:
return pointer_access_attribute::device_only;
case sycl::usm::alloc::shared:
case sycl::usm::alloc::host:
return pointer_access_attribute::host_device;
}
#endif
}
static memcpy_direction deduce_memcpy_direction(sycl::queue &q, void *to_ptr,
const void *from_ptr,
memcpy_direction dir) {
switch (dir) {
case memcpy_direction::host_to_host:
case memcpy_direction::host_to_device:
case memcpy_direction::device_to_host:
case memcpy_direction::device_to_device:
return dir;
case memcpy_direction::automatic: {
// table[to_attribute][from_attribute]
static const memcpy_direction
direction_table[static_cast<unsigned>(pointer_access_attribute::end)]
[static_cast<unsigned>(pointer_access_attribute::end)] =
{{memcpy_direction::host_to_host,
memcpy_direction::device_to_host,
memcpy_direction::host_to_host},
{memcpy_direction::host_to_device,
memcpy_direction::device_to_device,
memcpy_direction::device_to_device},
{memcpy_direction::host_to_host,
memcpy_direction::device_to_device,
memcpy_direction::device_to_device}};
return direction_table[static_cast<unsigned>(get_pointer_attribute(
q, to_ptr))][static_cast<unsigned>(get_pointer_attribute(q, from_ptr))];
}
default:
throw std::runtime_error("dpct_memcpy: invalid direction value");
}
}
static sycl::event
dpct_memcpy(sycl::queue &q, void *to_ptr, const void *from_ptr, size_t size,
memcpy_direction direction,
const std::vector<sycl::event> &dep_events = {}) {
if (!size)
return sycl::event{};
#ifdef DPCT_USM_LEVEL_NONE
auto &mm = mem_mgr::instance();
auto real_direction = deduce_memcpy_direction(q, to_ptr, from_ptr, direction);
switch (real_direction) {
case host_to_host:
return q.submit([&](sycl::handler &cgh) {
cgh.depends_on(dep_events);
cgh.host_task([=] { std::memcpy(to_ptr, from_ptr, size); });
});
case host_to_device: {
auto alloc = mm.translate_ptr(to_ptr);
size_t offset = (byte_t *)to_ptr - alloc.alloc_ptr;
return q.submit([&](sycl::handler &cgh) {
cgh.depends_on(dep_events);
auto r = sycl::range<1>(size);
auto o = sycl::id<1>(offset);
sycl::accessor<byte_t, 1, sycl::access_mode::write,
sycl::access::target::device>
acc(alloc.buffer, cgh, r, o);
cgh.copy(from_ptr, acc);
});
}
case device_to_host: {
auto alloc = mm.translate_ptr(from_ptr);
size_t offset = (byte_t *)from_ptr - alloc.alloc_ptr;
return q.submit([&](sycl::handler &cgh) {
cgh.depends_on(dep_events);
auto r = sycl::range<1>(size);
auto o = sycl::id<1>(offset);
sycl::accessor<byte_t, 1, sycl::access_mode::read,
sycl::access::target::device>
acc(alloc.buffer, cgh, r, o);
cgh.copy(acc, to_ptr);
});
}
case device_to_device: {
auto to_alloc = mm.translate_ptr(to_ptr);
auto from_alloc = mm.translate_ptr(from_ptr);
size_t to_offset = (byte_t *)to_ptr - to_alloc.alloc_ptr;
size_t from_offset = (byte_t *)from_ptr - from_alloc.alloc_ptr;
return q.submit([&](sycl::handler &cgh) {
cgh.depends_on(dep_events);
auto r = sycl::range<1>(size);
auto to_o = sycl::id<1>(to_offset);
auto from_o = sycl::id<1>(from_offset);
sycl::accessor<byte_t, 1, sycl::access_mode::write,
sycl::access::target::device>
to_acc(to_alloc.buffer, cgh, r, to_o);
sycl::accessor<byte_t, 1, sycl::access_mode::read,
sycl::access::target::device>
from_acc(from_alloc.buffer, cgh, r, from_o);
cgh.copy(from_acc, to_acc);
});
}
default:
throw std::runtime_error("dpct_memcpy: invalid direction value");
}
#else
return q.memcpy(to_ptr, from_ptr, size, dep_events);
#endif // DPCT_USM_LEVEL_NONE
}
// Get actual copy range and make sure it will not exceed range.
static inline size_t get_copy_range(sycl::range<3> size, size_t slice,
size_t pitch) {
return slice * (size.get(2) - 1) + pitch * (size.get(1) - 1) + size.get(0);
}
static inline size_t get_offset(sycl::id<3> id, size_t slice,
size_t pitch) {
return slice * id.get(2) + pitch * id.get(1) + id.get(0);
}
/// copy 3D matrix specified by \p size from 3D matrix specified by \p from_ptr
/// and \p from_range to another specified by \p to_ptr and \p to_range.
static inline std::vector<sycl::event>
dpct_memcpy(sycl::queue &q, void *to_ptr, const void *from_ptr,
sycl::range<3> to_range, sycl::range<3> from_range,
sycl::id<3> to_id, sycl::id<3> from_id,
sycl::range<3> size, memcpy_direction direction,
const std::vector<sycl::event> &dep_events = {}) {
// RAII for host pointer
class host_buffer {
void *_buf;
size_t _size;
sycl::queue &_q;
const std::vector<sycl::event> &_deps; // free operation depends
public:
host_buffer(size_t size, sycl::queue &q,
const std::vector<sycl::event> &deps)
: _buf(std::malloc(size)), _size(size), _q(q), _deps(deps) {}
void *get_ptr() const { return _buf; }
size_t get_size() const { return _size; }
~host_buffer() {
if (_buf) {
_q.submit([&](sycl::handler &cgh) {
cgh.depends_on(_deps);
cgh.host_task([buf = _buf] { std::free(buf); });
});
}
}
};
std::vector<sycl::event> event_list;
size_t to_slice = to_range.get(1) * to_range.get(0),
from_slice = from_range.get(1) * from_range.get(0);
unsigned char *to_surface =
(unsigned char *)to_ptr + get_offset(to_id, to_slice, to_range.get(0));
const unsigned char *from_surface =
(const unsigned char *)from_ptr +
get_offset(from_id, from_slice, from_range.get(0));
if (to_slice == from_slice && to_slice == size.get(1) * size.get(0)) {
return {dpct_memcpy(q, to_surface, from_surface, to_slice * size.get(2),
direction, dep_events)};
}
direction = deduce_memcpy_direction(q, to_ptr, from_ptr, direction);
size_t size_slice = size.get(1) * size.get(0);
switch (direction) {
case host_to_host:
for (size_t z = 0; z < size.get(2); ++z) {
unsigned char *to_ptr = to_surface;
const unsigned char *from_ptr = from_surface;
if (to_range.get(0) == from_range.get(0) &&
to_range.get(0) == size.get(0)) {
event_list.push_back(dpct_memcpy(q, to_ptr, from_ptr, size_slice,
direction, dep_events));
} else {
for (size_t y = 0; y < size.get(1); ++y) {
event_list.push_back(dpct_memcpy(q, to_ptr, from_ptr, size.get(0),
direction, dep_events));
to_ptr += to_range.get(0);
from_ptr += from_range.get(0);
}
}
to_surface += to_slice;
from_surface += from_slice;
}
break;
case host_to_device: {
host_buffer buf(get_copy_range(size, to_slice, to_range.get(0)), q,
event_list);
std::vector<sycl::event> host_events;
if (to_slice == size_slice) {
// Copy host data to a temp host buffer with the shape of target.
host_events =
dpct_memcpy(q, buf.get_ptr(), from_surface, to_range, from_range,
sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0), size,
host_to_host, dep_events);
} else {
// Copy host data to a temp host buffer with the shape of target.
host_events = dpct_memcpy(
q, buf.get_ptr(), from_surface, to_range, from_range,
sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0), size, host_to_host,
// If has padding data, not sure whether it is useless. So fill temp
// buffer with it.
std::vector<sycl::event>{
dpct_memcpy(q, buf.get_ptr(), to_surface, buf.get_size(),
device_to_host, dep_events)});
}
// Copy from temp host buffer to device with only one submit.
event_list.push_back(dpct_memcpy(q, to_surface, buf.get_ptr(),
buf.get_size(), host_to_device,
host_events));
break;
}
case device_to_host: {
host_buffer buf(get_copy_range(size, from_slice, from_range.get(0)), q,
event_list);
// Copy from host temp buffer to host target with reshaping.
event_list = dpct_memcpy(
q, to_surface, buf.get_ptr(), to_range, from_range, sycl::id<3>(0, 0, 0),
sycl::id<3>(0, 0, 0), size, host_to_host,
// Copy from device to temp host buffer with only one submit.
std::vector<sycl::event>{dpct_memcpy(q, buf.get_ptr(), from_surface,
buf.get_size(),
device_to_host, dep_events)});
break;
}
case device_to_device:
#ifdef DPCT_USM_LEVEL_NONE
{
auto &mm = mem_mgr::instance();
auto to_alloc = mm.translate_ptr(to_surface);
auto from_alloc = mm.translate_ptr(from_surface);
size_t to_offset = (byte_t *)to_surface - to_alloc.alloc_ptr;
size_t from_offset = (byte_t *)from_surface - from_alloc.alloc_ptr;
event_list.push_back(q.submit([&](sycl::handler &cgh) {
cgh.depends_on(dep_events);
auto to_o = sycl::id<1>(to_offset);
auto from_o = sycl::id<1>(from_offset);
sycl::accessor<byte_t, 1, sycl::access_mode::write,
sycl::access::target::device>
to_acc(to_alloc.buffer, cgh,
get_copy_range(size, to_slice, to_range.get(0)), to_o);
sycl::accessor<byte_t, 1, sycl::access_mode::read,
sycl::access::target::device>
from_acc(from_alloc.buffer, cgh,
get_copy_range(size, from_slice, from_range.get(0)), from_o);
cgh.parallel_for<class dpct_memcpy_3d_detail_usmnone>(
size,
[=](sycl::id<3> id) {
to_acc[get_offset(id, to_slice, to_range.get(0))] =
from_acc[get_offset(id, from_slice, from_range.get(0))];
});
}));
}
#else
event_list.push_back(q.submit([&](sycl::handler &cgh) {
cgh.depends_on(dep_events);
cgh.parallel_for<class dpct_memcpy_3d_detail>(
size,
[=](sycl::id<3> id) {
to_surface[get_offset(id, to_slice, to_range.get(0))] =
from_surface[get_offset(id, from_slice, from_range.get(0))];
});
}));
#endif
break;
default:
throw std::runtime_error("dpct_memcpy: invalid direction value");
}
return event_list;
}
/// memcpy 2D/3D matrix specified by pitched_data.
static inline std::vector<sycl::event>
dpct_memcpy(sycl::queue &q, pitched_data to, sycl::id<3> to_id,
pitched_data from, sycl::id<3> from_id, sycl::range<3> size,
memcpy_direction direction = automatic) {
return dpct_memcpy(q, to.get_data_ptr(), from.get_data_ptr(),
sycl::range<3>(to.get_pitch(), to.get_y(), 1),
sycl::range<3>(from.get_pitch(), from.get_y(), 1), to_id, from_id,
size, direction);
}
/// memcpy 2D matrix with pitch.
static inline std::vector<sycl::event>
dpct_memcpy(sycl::queue &q, void *to_ptr, const void *from_ptr,
size_t to_pitch, size_t from_pitch, size_t x, size_t y,
memcpy_direction direction = automatic) {
return dpct_memcpy(q, to_ptr, from_ptr, sycl::range<3>(to_pitch, y, 1),
sycl::range<3>(from_pitch, y, 1),
sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0),
sycl::range<3>(x, y, 1), direction);
}
namespace deprecated {
template <typename T, sycl::usm::alloc AllocKind>
class usm_allocator {
private:
using Alloc = sycl::usm_allocator<T, AllocKind>;
Alloc _impl;
public:
using value_type = typename std::allocator_traits<Alloc>::value_type;
using pointer = typename std::allocator_traits<Alloc>::pointer;
using const_pointer = typename std::allocator_traits<Alloc>::const_pointer;
using void_pointer = typename std::allocator_traits<Alloc>::void_pointer;
using const_void_pointer =
typename std::allocator_traits<Alloc>::const_void_pointer;
using reference = typename std::allocator_traits<Alloc>::value_type &;
using const_reference =
const typename std::allocator_traits<Alloc>::value_type &;
using difference_type =
typename std::allocator_traits<Alloc>::difference_type;
using size_type = typename std::allocator_traits<Alloc>::size_type;
using propagate_on_container_copy_assignment = typename std::allocator_traits<
Alloc>::propagate_on_container_copy_assignment;
using propagate_on_container_move_assignment = typename std::allocator_traits<
Alloc>::propagate_on_container_move_assignment;
using propagate_on_container_swap =
typename std::allocator_traits<Alloc>::propagate_on_container_swap;
using is_always_equal =
typename std::allocator_traits<Alloc>::is_always_equal;
template <typename U> struct rebind {
typedef usm_allocator<U, AllocKind> other;
};
usm_allocator() : _impl(dpct::get_default_queue()) {}
~usm_allocator() {}
usm_allocator(const usm_allocator &other) : _impl(other._impl) {}
usm_allocator(usm_allocator &&other) : _impl(std::move(other._impl)) {}
pointer address(reference r) { return &r; }
const_pointer address(const_reference r) { return &r; }
pointer allocate(size_type cnt, const_void_pointer hint = nullptr) {
return std::allocator_traits<Alloc>::allocate(_impl, cnt, hint);
}
void deallocate(pointer p, size_type cnt) {
std::allocator_traits<Alloc>::deallocate(_impl, p, cnt);
}
size_type max_size() const {
return std::allocator_traits<Alloc>::max_size(_impl);
}
bool operator==(const usm_allocator &other) const { return _impl == other._impl; }
bool operator!=(const usm_allocator &other) const { return _impl != other._impl; }
};
} // namespace deprecated
inline void dpct_free(void *ptr,
const sycl::queue &q) {
if (ptr) {
#ifdef DPCT_USM_LEVEL_NONE
detail::mem_mgr::instance().mem_free(ptr);
#else
sycl::free(ptr, q.get_context());
#endif // DPCT_USM_LEVEL_NONE
}
}
} // namespace detail
#ifdef DPCT_USM_LEVEL_NONE
/// Check if the pointer \p ptr represents device pointer or not.
///
/// \param ptr The pointer to be checked.
/// \returns true if \p ptr is a device pointer.
template<class T>
static inline bool is_device_ptr(T ptr) {
if constexpr (std::is_pointer<T>::value) {
return detail::mem_mgr::instance().is_device_ptr(ptr);
}
return false;
}
#endif
/// Get the buffer and the offset of a piece of memory pointed to by \p ptr.
///
/// \param ptr Pointer to a piece of memory.
/// If NULL is passed as an argument, an exception will be thrown.
/// \returns a pair containing both the buffer and the offset.
static std::pair<buffer_t, size_t> get_buffer_and_offset(const void *ptr) {
if (ptr) {
auto alloc = detail::mem_mgr::instance().translate_ptr(ptr);
size_t offset = (byte_t *)ptr - alloc.alloc_ptr;
return std::make_pair(alloc.buffer, offset);
} else {
throw std::runtime_error(
"NULL pointer argument in get_buffer_and_offset function is invalid");
}
}
/// Get the data pointed from \p ptr as a 1D buffer reinterpreted as type T.
template <typename T> static sycl::buffer<T> get_buffer(const void *ptr) {
if (!ptr)
return sycl::buffer<T>(sycl::range<1>(0));
auto alloc = detail::mem_mgr::instance().translate_ptr(ptr);
return alloc.buffer.reinterpret<T>(
sycl::range<1>(alloc.size / sizeof(T)));
}
/// Get the buffer of a piece of memory pointed to by \p ptr.
///
/// \param ptr Pointer to a piece of memory.
/// \returns the buffer.
static buffer_t get_buffer(const void *ptr) {
return detail::mem_mgr::instance().translate_ptr(ptr).buffer;
}
/// A wrapper class contains an accessor and an offset.
template <typename dataT,
sycl::access_mode accessMode = sycl::access_mode::read_write>
class access_wrapper {
sycl::accessor<byte_t, 1, accessMode> accessor;
size_t offset;
public:
/// Construct the accessor wrapper for memory pointed by \p ptr.
///
/// \param ptr Pointer to memory.
/// \param cgh The command group handler.
access_wrapper(const void *ptr, sycl::handler &cgh)
: accessor(get_buffer(ptr).get_access<accessMode>(cgh)), offset(0) {
auto alloc = detail::mem_mgr::instance().translate_ptr(ptr);
offset = (byte_t *)ptr - alloc.alloc_ptr;
}
/// Get the device pointer.
///
/// \returns a device pointer with offset.
dataT get_raw_pointer() const { return (dataT)(&accessor[0] + offset); }
};
/// Get the accessor for memory pointed by \p ptr.
///
/// \param ptr Pointer to memory.
/// If NULL is passed as an argument, an exception will be thrown.
/// \param cgh The command group handler.
/// \returns an accessor.
template <sycl::access_mode accessMode = sycl::access_mode::read_write>
static sycl::accessor<byte_t, 1, accessMode>
get_access(const void *ptr, sycl::handler &cgh) {
if (ptr) {
auto alloc = detail::mem_mgr::instance().translate_ptr(ptr);
return alloc.buffer.get_access<accessMode>(cgh);
} else {
throw std::runtime_error(
"NULL pointer argument in get_access function is invalid");
}
}
/// Allocate memory block on the device.
/// \param num_bytes Number of bytes to allocate.
/// \param q Queue to execute the allocate task.
/// \returns A pointer to the newly allocated memory.
template <typename T>
static inline void *dpct_malloc(T num_bytes,
sycl::queue &q = get_default_queue()) {
return detail::dpct_malloc(static_cast<size_t>(num_bytes), q);
}
/// Get the host pointer from a buffer that is mapped to virtual pointer ptr.
/// \param ptr Virtual Pointer mapped to device buffer
/// \returns A host pointer
template <typename T> static inline T *get_host_ptr(const void *ptr) {
auto BufferOffset = get_buffer_and_offset(ptr);
auto host_ptr =
BufferOffset.first.get_host_access()
.get_pointer();
return (T *)(host_ptr + BufferOffset.second);
}
/// Allocate memory block for 3D array on the device.
/// \param size Size of the memory block, in bytes.
/// \param q Queue to execute the allocate task.
/// \returns A pitched_data object which stores the memory info.
static inline pitched_data
dpct_malloc(sycl::range<3> size, sycl::queue &q = get_default_queue()) {
pitched_data pitch(nullptr, 0, size.get(0), size.get(1));
size_t pitch_size;
pitch.set_data_ptr(detail::dpct_malloc(pitch_size, size.get(0), size.get(1),
size.get(2), q));
pitch.set_pitch(pitch_size);
return pitch;
}
/// Allocate memory block for 2D array on the device.
/// \param [out] pitch Aligned size of x in bytes.
/// \param x Range in dim x.
/// \param y Range in dim y.
/// \param q Queue to execute the allocate task.
/// \returns A pointer to the newly allocated memory.
static inline void *dpct_malloc(size_t &pitch, size_t x, size_t y,
sycl::queue &q = get_default_queue()) {
return detail::dpct_malloc(pitch, x, y, 1, q);
}
/// free
/// \param ptr Point to free.
/// \param q Queue to execute the free task.
/// \returns no return value.
static inline void dpct_free(void *ptr,
sycl::queue &q = get_default_queue()) {
detail::dpct_free(ptr, q);
}
/// Free the device memory pointed by a batch of pointers in \p pointers which
/// are related to \p q after \p events completed.
///
/// \param pointers The pointers point to the device memory requested to be freed.
/// \param events The events to be waited.
/// \param q The sycl::queue the memory relates to.
inline void async_dpct_free(const std::vector<void *> &pointers,
const std::vector<sycl::event> &events,
sycl::queue &q = get_default_queue()) {
q.submit([&](sycl::handler &cgh) {
cgh.depends_on(events);
cgh.host_task([=] {
for (auto p : pointers)
if (p) {
detail::dpct_free(p, q);
}
});
});
}
/// Synchronously copies \p size bytes from the address specified by \p from_ptr
/// to the address specified by \p to_ptr. The value of \p direction is used to
/// set the copy direction, it can be \a host_to_host, \a host_to_device,
/// \a device_to_host, \a device_to_device or \a automatic. The function will
/// return after the copy is completed.
///
/// \param to_ptr Pointer to destination memory address.
/// \param from_ptr Pointer to source memory address.
/// \param size Number of bytes to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static void dpct_memcpy(void *to_ptr, const void *from_ptr, size_t size,
memcpy_direction direction = automatic,
sycl::queue &q = get_default_queue()) {
detail::dpct_memcpy(q, to_ptr, from_ptr, size, direction).wait();
}
/// Asynchronously copies \p size bytes from the address specified by \p
/// from_ptr to the address specified by \p to_ptr. The value of \p direction is
/// used to set the copy direction, it can be \a host_to_host, \a
/// host_to_device, \a device_to_host, \a device_to_device or \a automatic. The
/// return of the function does NOT guarantee the copy is completed.
///
/// \param to_ptr Pointer to destination memory address.
/// \param from_ptr Pointer to source memory address.
/// \param size Number of bytes to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static void async_dpct_memcpy(void *to_ptr, const void *from_ptr, size_t size,
memcpy_direction direction = automatic,
sycl::queue &q = dpct::get_default_queue()) {
detail::dpct_memcpy(q, to_ptr, from_ptr, size, direction);
}
/// Synchronously copies 2D matrix specified by \p x and \p y from the address
/// specified by \p from_ptr to the address specified by \p to_ptr, while \p
/// from_pitch and \p to_pitch are the range of dim x in bytes of the matrix
/// specified by \p from_ptr and \p to_ptr. The value of \p direction is used to
/// set the copy direction, it can be \a host_to_host, \a host_to_device, \a
/// device_to_host, \a device_to_device or \a automatic. The function will
/// return after the copy is completed.
///
/// \param to_ptr Pointer to destination memory address.
/// \param to_pitch Range of dim x in bytes of destination matrix.
/// \param from_ptr Pointer to source memory address.
/// \param from_pitch Range of dim x in bytes of source matrix.
/// \param x Range of dim x of matrix to be copied.
/// \param y Range of dim y of matrix to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static inline void dpct_memcpy(void *to_ptr, size_t to_pitch,
const void *from_ptr, size_t from_pitch,
size_t x, size_t y,
memcpy_direction direction = automatic,
sycl::queue &q = dpct::get_default_queue()) {
sycl::event::wait(detail::dpct_memcpy(q, to_ptr, from_ptr, to_pitch,
from_pitch, x, y, direction));
}
/// Asynchronously copies 2D matrix specified by \p x and \p y from the address
/// specified by \p from_ptr to the address specified by \p to_ptr, while \p
/// \p from_pitch and \p to_pitch are the range of dim x in bytes of the matrix
/// specified by \p from_ptr and \p to_ptr. The value of \p direction is used to
/// set the copy direction, it can be \a host_to_host, \a host_to_device, \a
/// device_to_host, \a device_to_device or \a automatic. The return of the
/// function does NOT guarantee the copy is completed.
///
/// \param to_ptr Pointer to destination memory address.
/// \param to_pitch Range of dim x in bytes of destination matrix.
/// \param from_ptr Pointer to source memory address.
/// \param from_pitch Range of dim x in bytes of source matrix.
/// \param x Range of dim x of matrix to be copied.
/// \param y Range of dim y of matrix to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static inline void
async_dpct_memcpy(void *to_ptr, size_t to_pitch, const void *from_ptr,
size_t from_pitch, size_t x, size_t y,
memcpy_direction direction = automatic,
sycl::queue &q = get_default_queue()) {
detail::dpct_memcpy(q, to_ptr, from_ptr, to_pitch, from_pitch, x, y,
direction);
}
/// Synchronously copies a subset of a 3D matrix specified by \p to to another
/// 3D matrix specified by \p from. The from and to position info are specified
/// by \p from_pos and \p to_pos The copied matrix size is specified by \p size.
/// The value of \p direction is used to set the copy direction, it can be \a
/// host_to_host, \a host_to_device, \a device_to_host, \a device_to_device or
/// \a automatic. The function will return after the copy is completed.
///
/// \param to Destination matrix info.
/// \param to_pos Position of destination.
/// \param from Source matrix info.
/// \param from_pos Position of destination.
/// \param size Range of the submatrix to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static inline void dpct_memcpy(pitched_data to, sycl::id<3> to_pos,
pitched_data from, sycl::id<3> from_pos,
sycl::range<3> size,
memcpy_direction direction = automatic,
sycl::queue &q = dpct::get_default_queue()) {
sycl::event::wait(
detail::dpct_memcpy(q, to, to_pos, from, from_pos, size, direction));
}
/// Asynchronously copies a subset of a 3D matrix specified by \p to to another
/// 3D matrix specified by \p from. The from and to position info are specified
/// by \p from_pos and \p to_pos The copied matrix size is specified by \p size.
/// The value of \p direction is used to set the copy direction, it can be \a
/// host_to_host, \a host_to_device, \a device_to_host, \a device_to_device or
/// \a automatic. The return of the function does NOT guarantee the copy is
/// completed.
///
/// \param to Destination matrix info.
/// \param to_pos Position of destination.
/// \param from Source matrix info.
/// \param from_pos Position of destination.
/// \param size Range of the submatrix to be copied.
/// \param direction Direction of the copy.
/// \param q Queue to execute the copy task.
/// \returns no return value.
static inline void
async_dpct_memcpy(pitched_data to, sycl::id<3> to_pos, pitched_data from,
sycl::id<3> from_pos, sycl::range<3> size,
memcpy_direction direction = automatic,
sycl::queue &q = get_default_queue()) {
detail::dpct_memcpy(q, to, to_pos, from, from_pos, size, direction);
}
/// Synchronously sets \p value to the first \p size bytes starting from \p
/// dev_ptr. The function will return after the memset operation is completed.
///
/// \param dev_ptr Pointer to the device memory address.
/// \param value Value to be set.
/// \param size Number of bytes to be set to the value.
/// \param q The queue in which the operation is done.
/// \returns no return value.
static void dpct_memset(void *dev_ptr, int value, size_t size,
sycl::queue &q = get_default_queue()) {
detail::dpct_memset(q, dev_ptr, value, size).wait();
}
/// Asynchronously sets \p value to the first \p size bytes starting from \p
/// dev_ptr. The return of the function does NOT guarantee the memset operation
/// is completed.
///
/// \param dev_ptr Pointer to the device memory address.
/// \param value Value to be set.
/// \param size Number of bytes to be set to the value.
/// \returns no return value.
static void async_dpct_memset(void *dev_ptr, int value, size_t size,
sycl::queue &q = dpct::get_default_queue()) {
detail::dpct_memset(q, dev_ptr, value, size);
}
/// Sets \p value to the 2D memory region pointed by \p ptr in \p q. \p x and
/// \p y specify the setted 2D memory size. \p pitch is the bytes in linear
/// dimension, including padding bytes. The function will return after the
/// memset operation is completed.
///
/// \param ptr Pointer to the device memory region.
/// \param pitch Bytes in linear dimension, including padding bytes.
/// \param value Value to be set.
/// \param x The setted memory size in linear dimension.
/// \param y The setted memory size in second dimension.
/// \param q The queue in which the operation is done.
/// \returns no return value.
static inline void dpct_memset(void *ptr, size_t pitch, int val, size_t x,
size_t y,
sycl::queue &q = get_default_queue()) {
sycl::event::wait(detail::dpct_memset(q, ptr, pitch, val, x, y));
}
/// Sets \p value to the 2D memory region pointed by \p ptr in \p q. \p x and
/// \p y specify the setted 2D memory size. \p pitch is the bytes in linear
/// dimension, including padding bytes. The return of the function does NOT
/// guarantee the memset operation is completed.
///
/// \param ptr Pointer to the device memory region.
/// \param pitch Bytes in linear dimension, including padding bytes.
/// \param value Value to be set.
/// \param x The setted memory size in linear dimension.
/// \param y The setted memory size in second dimension.
/// \param q The queue in which the operation is done.
/// \returns no return value.
static inline void async_dpct_memset(void *ptr, size_t pitch, int val, size_t x,
size_t y,
sycl::queue &q = get_default_queue()) {
detail::dpct_memset(q, ptr, pitch, val, x, y);
}
/// Sets \p value to the 3D memory region specified by \p pitch in \p q. \p size
/// specify the setted 3D memory size. The function will return after the
/// memset operation is completed.
///
/// \param pitch Specify the 3D memory region.
/// \param value Value to be set.
/// \param size The setted 3D memory size.
/// \param q The queue in which the operation is done.
/// \returns no return value.
static inline void dpct_memset(pitched_data pitch, int val,
sycl::range<3> size,
sycl::queue &q = get_default_queue()) {
sycl::event::wait(detail::dpct_memset(q, pitch, val, size));
}
/// Sets \p value to the 3D memory region specified by \p pitch in \p q. \p size
/// specify the setted 3D memory size. The return of the function does NOT
/// guarantee the memset operation is completed.
///
/// \param pitch Specify the 3D memory region.
/// \param value Value to be set.
/// \param size The setted 3D memory size.
/// \param q The queue in which the operation is done.
/// \returns no return value.
static inline void async_dpct_memset(pitched_data pitch, int val,
sycl::range<3> size,
sycl::queue &q = get_default_queue()) {
detail::dpct_memset(q, pitch, val, size);
}
/// dpct accessor used as device function parameter.
template <class T, memory_region Memory, size_t Dimension> class accessor;
template <class T, memory_region Memory> class accessor<T, Memory, 3> {
public:
using memory_t = detail::memory_traits<Memory, T>;
using element_t = typename memory_t::element_t;
using pointer_t = typename memory_t::pointer_t;
using accessor_t = typename memory_t::template accessor_t<3>;
accessor(pointer_t data, const sycl::range<3> &in_range)
: _data(data), _range(in_range) {}
template <memory_region M = Memory>
accessor(typename std::enable_if<M != local, const accessor_t>::type &acc)
: accessor(acc, acc.get_range()) {}
accessor(const accessor_t &acc, const sycl::range<3> &in_range)
: accessor(acc.get_pointer(), in_range) {}
accessor<T, Memory, 2> operator[](size_t index) const {
sycl::range<2> sub(_range.get(1), _range.get(2));
return accessor<T, Memory, 2>(_data + index * sub.size(), sub);
}
pointer_t get_ptr() const { return _data; }
private:
pointer_t _data;
sycl::range<3> _range;
};
template <class T, memory_region Memory> class accessor<T, Memory, 2> {
public:
using memory_t = detail::memory_traits<Memory, T>;
using element_t = typename memory_t::element_t;
using pointer_t = typename memory_t::pointer_t;
using accessor_t = typename memory_t::template accessor_t<2>;
accessor(pointer_t data, const sycl::range<2> &in_range)
: _data(data), _range(in_range) {}
template <memory_region M = Memory>
accessor(typename std::enable_if<M != local, const accessor_t>::type &acc)
: accessor(acc, acc.get_range()) {}
accessor(const accessor_t &acc, const sycl::range<2> &in_range)
: accessor(acc.get_pointer(), in_range) {}
pointer_t operator[](size_t index) const {
return _data + _range.get(1) * index;
}
pointer_t get_ptr() const { return _data; }
private:
pointer_t _data;
sycl::range<2> _range;
};
namespace detail {
/// Device variable with address space of shared, global or constant.
template <class T, memory_region Memory, size_t Dimension>
class device_memory {
public:
using accessor_t =
typename detail::memory_traits<Memory, T>::template accessor_t<Dimension>;
using value_t = typename detail::memory_traits<Memory, T>::value_t;
using dpct_accessor_t = dpct::accessor<T, Memory, Dimension>;
device_memory() : device_memory(sycl::range<Dimension>(1)) {}
/// Constructor of 1-D array with initializer list
device_memory(
const sycl::range<Dimension> &in_range,
std::initializer_list<value_t> &&init_list)
: device_memory(in_range) {
assert(init_list.size() <= in_range.size());
_host_ptr = (value_t *)std::malloc(_size);
std::memset(_host_ptr, 0, _size);
std::memcpy(_host_ptr, init_list.begin(), init_list.size() * sizeof(T));
}
/// Constructor of 2-D array with initializer list
template <size_t D = Dimension>
device_memory(
const typename std::enable_if<D == 2, sycl::range<2>>::type &in_range,
std::initializer_list<std::initializer_list<value_t>> &&init_list)
: device_memory(in_range) {
assert(init_list.size() <= in_range[0]);
_host_ptr = (value_t *)std::malloc(_size);
std::memset(_host_ptr, 0, _size);
auto tmp_data = _host_ptr;
for (auto sub_list : init_list) {
assert(sub_list.size() <= in_range[1]);
std::memcpy(tmp_data, sub_list.begin(), sub_list.size() * sizeof(T));
tmp_data += in_range[1];
}
}
/// Constructor with range
device_memory(const sycl::range<Dimension> &range_in)
: _size(range_in.size() * sizeof(T)), _range(range_in), _reference(false),
_host_ptr(nullptr), _device_ptr(nullptr) {
static_assert(
(Memory == global) || (Memory == constant) || (Memory == shared),
"device memory region should be global, constant or shared");
// Make sure that singleton class mem_mgr and dev_mgr will destruct later
// than this.
detail::mem_mgr::instance();
dev_mgr::instance();
}
/// Constructor with range
template <class... Args>
device_memory(Args... Arguments)
: device_memory(sycl::range<Dimension>(Arguments...)) {}
~device_memory() {
if (_device_ptr && !_reference)
dpct::dpct_free(_device_ptr);
if (_host_ptr)
std::free(_host_ptr);
}
/// Allocate memory with default queue, and init memory if has initial value.
void init() {
init(dpct::get_default_queue());
}
/// Allocate memory with specified queue, and init memory if has initial value.
void init(sycl::queue &q) {
if (_device_ptr)
return;
if (!_size)
return;
allocate_device(q);
if (_host_ptr)
detail::dpct_memcpy(q, _device_ptr, _host_ptr, _size, host_to_device);
}
/// The variable is assigned to a device pointer.
void assign(value_t *src, size_t size) {
this->~device_memory();
new (this) device_memory(src, size);
}
/// Get memory pointer of the memory object, which is virtual pointer when
/// usm is not used, and device pointer when usm is used.
value_t *get_ptr() {
return get_ptr(get_default_queue());
}
/// Get memory pointer of the memory object, which is virtual pointer when
/// usm is not used, and device pointer when usm is used.
value_t *get_ptr(sycl::queue &q) {
init(q);
return _device_ptr;
}
/// Get the device memory object size in bytes.
size_t get_size() { return _size; }
template <size_t D = Dimension>
typename std::enable_if<D == 1, T>::type &operator[](size_t index) {
init();
#ifdef DPCT_USM_LEVEL_NONE
return dpct::get_buffer<typename std::enable_if<D == 1, T>::type>(
_device_ptr)
.template get_access<sycl::access_mode::read_write>()[index];
#else
return _device_ptr[index];
#endif // DPCT_USM_LEVEL_NONE
}
#ifdef DPCT_USM_LEVEL_NONE
/// Get sycl::accessor for the device memory object when usm is not used.
accessor_t get_access(sycl::handler &cgh) {
return get_buffer(_device_ptr)
.template reinterpret<T, Dimension>(_range)
.template get_access<detail::memory_traits<Memory, T>::mode,
detail::memory_traits<Memory, T>::target>(cgh);
}
#else
/// Get dpct::accessor with dimension info for the device memory object
/// when usm is used and dimension is greater than 1.
template <size_t D = Dimension>
typename std::enable_if<D != 1, dpct_accessor_t>::type
get_access(sycl::handler &cgh) {
return dpct_accessor_t((T *)_device_ptr, _range);
}
#endif // DPCT_USM_LEVEL_NONE
private:
device_memory(value_t *memory_ptr, size_t size)
: _size(size), _range(size / sizeof(T)), _reference(true),
_device_ptr(memory_ptr) {}
void allocate_device(sycl::queue &q) {
#ifndef DPCT_USM_LEVEL_NONE
if (Memory == shared) {
_device_ptr = (value_t *)sycl::malloc_shared(
_size, q.get_device(), q.get_context());
return;
}
#endif
_device_ptr = (value_t *)detail::dpct_malloc(_size, q);
}
size_t _size;
sycl::range<Dimension> _range;
bool _reference;
value_t *_host_ptr;
value_t *_device_ptr;
};
template <class T, memory_region Memory>
class device_memory<T, Memory, 0> : public device_memory<T, Memory, 1> {
public:
using base = device_memory<T, Memory, 1>;
using value_t = typename base::value_t;
using accessor_t =
typename detail::memory_traits<Memory, T>::template accessor_t<0>;
/// Constructor with initial value.
device_memory(const value_t &val) : base(sycl::range<1>(1), {val}) {}
/// Default constructor
device_memory() : base(1) {}
#ifdef DPCT_USM_LEVEL_NONE
/// Get sycl::accessor for the device memory object when usm is not used.
accessor_t get_access(sycl::handler &cgh) {
auto buf = get_buffer(base::get_ptr())
.template reinterpret<T, 1>(sycl::range<1>(1));
return accessor_t(buf, cgh);
}
#endif // DPCT_USM_LEVEL_NONE
};
}
template <class T, size_t Dimension>
using global_memory = detail::device_memory<T, global, Dimension>;
template <class T, size_t Dimension>
using constant_memory = detail::device_memory<T, constant, Dimension>;
template <class T, size_t Dimension>
using shared_memory = detail::device_memory<T, shared, Dimension>;
// dpct::deprecated:: is for functionality that was introduced for compatibility
// purpose, but relies on deprecated C++ features, which are either removed or
// will be removed in the future standards.
// Direct use of deprecated functionality in this namespace should be avoided.
namespace deprecated {
template <typename T>
using usm_host_allocator = detail::deprecated::usm_allocator<T, sycl::usm::alloc::host>;
template <typename T>
using usm_device_allocator = detail::deprecated::usm_allocator<T, sycl::usm::alloc::shared>;
} // namespace deprecated
class pointer_attributes {
public:
void init(const void *ptr,
sycl::queue &q = dpct::get_default_queue()) {
#ifdef DPCT_USM_LEVEL_NONE
throw std::runtime_error(
"dpct::pointer_attributes: only works for USM pointer.");
#else
memory_type = sycl::get_pointer_type(ptr, q.get_context());
device_pointer = (memory_type !=
sycl::usm::alloc::unknown) ? ptr : nullptr;
host_pointer = (memory_type !=
sycl::usm::alloc::unknown) &&
(memory_type != sycl::usm::alloc::device) ? ptr : nullptr;
sycl::device device_obj = sycl::get_pointer_device(ptr, q.get_context());
device_id = dpct::dev_mgr::instance().get_device_id(device_obj);
#endif
}
sycl::usm::alloc get_memory_type() {
return memory_type;
}
const void *get_device_pointer() {
return device_pointer;
}
const void *get_host_pointer() {
return host_pointer;
}
bool is_memory_shared() {
return memory_type == sycl::usm::alloc::shared;
}
unsigned int get_device_id() {
return device_id;
}
private:
sycl::usm::alloc memory_type = sycl::usm::alloc::unknown;
const void *device_pointer = nullptr;
const void *host_pointer = nullptr;
unsigned int device_id = 0;
};
} // namespace dpct
#endif // __DPCT_MEMORY_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/include/dpct/dpl_utils.hpp | //==---- dpl_utils.hpp ----------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_DPL_UTILS_HPP__
#define __DPCT_DPL_UTILS_HPP__
#define ONEDPL_USE_DPCPP_BACKEND 1
#define __USE_DPCT 1
#include <oneapi/dpl/execution>
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/numeric>
#include "dpl_extras/memory.h"
#include "dpl_extras/algorithm.h"
#include "dpl_extras/numeric.h"
#include "dpl_extras/iterators.h"
#include "dpl_extras/vector.h"
#include "dpl_extras/dpcpp_extensions.h"
#endif // __DPCT_DPL_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/include/dpct/math.hpp | //==---- math.hpp ---------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_MATH_HPP__
#define __DPCT_MATH_HPP__
#include <sycl/sycl.hpp>
namespace dpct {
namespace detail {
template <typename VecT, class BinaryOperation, class = void>
class vectorized_binary {
public:
inline VecT operator()(VecT a, VecT b, const BinaryOperation binary_op) {
VecT v4;
for (size_t i = 0; i < v4.size(); ++i) {
v4[i] = binary_op(a[i], b[i]);
}
return v4;
}
};
template <typename VecT, class BinaryOperation>
class vectorized_binary<
VecT, BinaryOperation,
std::void_t<std::invoke_result_t<BinaryOperation, VecT, VecT>>> {
public:
inline VecT operator()(VecT a, VecT b, const BinaryOperation binary_op) {
return binary_op(a, b).template as<VecT>();
}
};
template <typename T> bool isnan(const T a) { return sycl::isnan(a); }
// TODO: Need add more specialization such as bfloat16 version.
} // namespace detail
/// Compute fast_length for variable-length array
/// \param [in] a The array
/// \param [in] len Length of the array
/// \returns The computed fast_length
inline float fast_length(const float *a, int len) {
switch (len) {
case 1:
return a[0];
case 2:
return sycl::fast_length(sycl::float2(a[0], a[1]));
case 3:
return sycl::fast_length(sycl::float3(a[0], a[1], a[2]));
case 4:
return sycl::fast_length(sycl::float4(a[0], a[1], a[2], a[3]));
case 0:
return 0;
default:
float f = 0;
for (int i = 0; i < len; ++i)
f += a[i] * a[i];
return sycl::sqrt(f);
}
}
/// Calculate the square root of the input array.
/// \param [in] a The array pointer
/// \param [in] len Length of the array
/// \returns The square root
template <typename T> inline T length(const T *a, const int len) {
switch (len) {
case 1:
return a[0];
case 2:
return sycl::length(sycl::vec<T, 2>(a[0], a[1]));
case 3:
return sycl::length(sycl::vec<T, 3>(a[0], a[1], a[2]));
case 4:
return sycl::length(sycl::vec<T, 4>(a[0], a[1], a[2], a[3]));
default:
T ret = 0;
for (int i = 0; i < len; ++i)
ret += a[i] * a[i];
return sycl::sqrt(ret);
}
}
/// Performs comparison.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline std::enable_if_t<
std::is_same_v<std::invoke_result_t<BinaryOperation, T, T>, bool>, bool>
compare(const T a, const T b, const BinaryOperation binary_op) {
return binary_op(a, b);
}
template <typename T>
inline std::enable_if_t<
std::is_same_v<std::invoke_result_t<std::not_equal_to<>, T, T>, bool>, bool>
compare(const T a, const T b, const std::not_equal_to<> binary_op) {
return !detail::isnan(a) && !detail::isnan(b) && binary_op(a, b);
}
/// Performs unordered comparison.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline std::enable_if_t<
std::is_same_v<std::invoke_result_t<BinaryOperation, T, T>, bool>, bool>
unordered_compare(const T a, const T b, const BinaryOperation binary_op) {
return detail::isnan(a) || detail::isnan(b) || binary_op(a, b);
}
/// Performs 2 element comparison and return true if both results are true.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline std::enable_if_t<T::size() == 2, bool>
compare_both(const T a, const T b, const BinaryOperation binary_op) {
return compare(a[0], b[0], binary_op) && compare(a[1], b[1], binary_op);
}
/// Performs 2 element unordered comparison and return true if both results are
/// true.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline std::enable_if_t<T::size() == 2, bool>
unordered_compare_both(const T a, const T b, const BinaryOperation binary_op) {
return unordered_compare(a[0], b[0], binary_op) &&
unordered_compare(a[1], b[1], binary_op);
}
/// Performs 2 element comparison.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline std::enable_if_t<T::size() == 2, T>
compare(const T a, const T b, const BinaryOperation binary_op) {
return {compare(a[0], b[0], binary_op), compare(a[1], b[1], binary_op)};
}
/// Performs 2 elements comparison, compare result of each element is 0 (false)
/// or 0xffff (true), returns an unsigned int by composing compare result of two
/// elements.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline unsigned compare_mask(const sycl::vec<T, 2> a, const sycl::vec<T, 2> b,
const BinaryOperation binary_op) {
return sycl::vec<short, 2>(-compare(a[0], b[0], binary_op),
-compare(a[1], b[1], binary_op))
.as<sycl::vec<unsigned, 1>>();
}
/// Performs 2 element unordered comparison.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline std::enable_if_t<T::size() == 2, T>
unordered_compare(const T a, const T b, const BinaryOperation binary_op) {
return {unordered_compare(a[0], b[0], binary_op),
unordered_compare(a[1], b[1], binary_op)};
}
/// Performs 2 elements unordered comparison, compare result of each element is
/// 0 (false) or 0xffff (true), returns an unsigned int by composing compare
/// result of two elements.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] binary_op functor that implements the binary operation
/// \returns the comparison result
template <typename T, class BinaryOperation>
inline unsigned unordered_compare_mask(const sycl::vec<T, 2> a,
const sycl::vec<T, 2> b,
const BinaryOperation binary_op) {
return sycl::vec<short, 2>(-unordered_compare(a[0], b[0], binary_op),
-unordered_compare(a[1], b[1], binary_op))
.as<sycl::vec<unsigned, 1>>();
}
/// Determine whether 2 element value is NaN.
/// \param [in] a The input value
/// \returns the comparison result
template <typename T>
inline std::enable_if_t<T::size() == 2, T> isnan(const T a) {
return {detail::isnan(a[0]), detail::isnan(a[1])};
}
// min function overloads.
// For floating-point types, `float` or `double` arguments are acceptable.
// For integer types, `std::uint32_t`, `std::int32_t`, `std::uint64_t` or
// `std::int64_t` type arguments are acceptable.
inline double min(const double a, const float b) {
return sycl::fmin(a, static_cast<double>(b));
}
inline double min(const float a, const double b) {
return sycl::fmin(static_cast<double>(a), b);
}
inline float min(const float a, const float b) { return sycl::fmin(a, b); }
inline double min(const double a, const double b) { return sycl::fmin(a, b); }
inline std::uint32_t min(const std::uint32_t a, const std::int32_t b) {
return sycl::min(a, static_cast<std::uint32_t>(b));
}
inline std::uint32_t min(const std::int32_t a, const std::uint32_t b) {
return sycl::min(static_cast<std::uint32_t>(a), b);
}
inline std::int32_t min(const std::int32_t a, const std::int32_t b) {
return sycl::min(a, b);
}
inline std::uint32_t min(const std::uint32_t a, const std::uint32_t b) {
return sycl::min(a, b);
}
inline std::uint64_t min(const std::uint64_t a, const std::int64_t b) {
return sycl::min(a, static_cast<std::uint64_t>(b));
}
inline std::uint64_t min(const std::int64_t a, const std::uint64_t b) {
return sycl::min(static_cast<std::uint64_t>(a), b);
}
inline std::int64_t min(const std::int64_t a, const std::int64_t b) {
return sycl::min(a, b);
}
inline std::uint64_t min(const std::uint64_t a, const std::uint64_t b) {
return sycl::min(a, b);
}
inline std::uint64_t min(const std::uint64_t a, const std::int32_t b) {
return sycl::min(a, static_cast<std::uint64_t>(b));
}
inline std::uint64_t min(const std::int32_t a, const std::uint64_t b) {
return sycl::min(static_cast<std::uint64_t>(a), b);
}
inline std::uint64_t min(const std::uint64_t a, const std::uint32_t b) {
return sycl::min(a, static_cast<std::uint64_t>(b));
}
inline std::uint64_t min(const std::uint32_t a, const std::uint64_t b) {
return sycl::min(static_cast<std::uint64_t>(a), b);
}
// max function overloads.
// For floating-point types, `float` or `double` arguments are acceptable.
// For integer types, `std::uint32_t`, `std::int32_t`, `std::uint64_t` or
// `std::int64_t` type arguments are acceptable.
inline double max(const double a, const float b) {
return sycl::fmax(a, static_cast<double>(b));
}
inline double max(const float a, const double b) {
return sycl::fmax(static_cast<double>(a), b);
}
inline float max(const float a, const float b) { return sycl::fmax(a, b); }
inline double max(const double a, const double b) { return sycl::fmax(a, b); }
inline std::uint32_t max(const std::uint32_t a, const std::int32_t b) {
return sycl::max(a, static_cast<std::uint32_t>(b));
}
inline std::uint32_t max(const std::int32_t a, const std::uint32_t b) {
return sycl::max(static_cast<std::uint32_t>(a), b);
}
inline std::int32_t max(const std::int32_t a, const std::int32_t b) {
return sycl::max(a, b);
}
inline std::uint32_t max(const std::uint32_t a, const std::uint32_t b) {
return sycl::max(a, b);
}
inline std::uint64_t max(const std::uint64_t a, const std::int64_t b) {
return sycl::max(a, static_cast<std::uint64_t>(b));
}
inline std::uint64_t max(const std::int64_t a, const std::uint64_t b) {
return sycl::max(static_cast<std::uint64_t>(a), b);
}
inline std::int64_t max(const std::int64_t a, const std::int64_t b) {
return sycl::max(a, b);
}
inline std::uint64_t max(const std::uint64_t a, const std::uint64_t b) {
return sycl::max(a, b);
}
inline std::uint64_t max(const std::uint64_t a, const std::int32_t b) {
return sycl::max(a, static_cast<std::uint64_t>(b));
}
inline std::uint64_t max(const std::int32_t a, const std::uint64_t b) {
return sycl::max(static_cast<std::uint64_t>(a), b);
}
inline std::uint64_t max(const std::uint64_t a, const std::uint32_t b) {
return sycl::max(a, static_cast<std::uint64_t>(b));
}
inline std::uint64_t max(const std::uint32_t a, const std::uint64_t b) {
return sycl::max(static_cast<std::uint64_t>(a), b);
}
/// Performs relu saturation.
/// \param [in] a The input value
/// \returns the relu saturation result
template <typename T> inline T relu(const T a) {
if (!detail::isnan(a) && a < 0.f)
return 0.f;
return a;
}
template <class T> inline sycl::vec<T, 2> relu(const sycl::vec<T, 2> a) {
return {relu(a[0]), relu(a[1])};
}
/// Performs complex number multiply addition.
/// \param [in] a The first value
/// \param [in] b The second value
/// \param [in] c The third value
/// \returns the operation result
template <typename T>
inline sycl::vec<T, 2> complex_mul_add(const sycl::vec<T, 2> a,
const sycl::vec<T, 2> b,
const sycl::vec<T, 2> c) {
return sycl::vec<T, 2>{a[0] * b[0] - a[1] * b[1] + c[0],
a[0] * b[1] + a[1] * b[0] + c[1]};
}
/// Performs 2 elements comparison and returns the bigger one. If either of
/// inputs is NaN, then return NaN.
/// \param [in] a The first value
/// \param [in] b The second value
/// \returns the bigger value
template <typename T> inline T fmax_nan(const T a, const T b) {
if (detail::isnan(a) || detail::isnan(b))
return NAN;
return sycl::fmax(a, b);
}
template <typename T>
inline sycl::vec<T, 2> fmax_nan(const sycl::vec<T, 2> a,
const sycl::vec<T, 2> b) {
return {fmax_nan(a[0], b[0]), fmax_nan(a[1], b[1])};
}
/// Performs 2 elements comparison and returns the smaller one. If either of
/// inputs is NaN, then return NaN.
/// \param [in] a The first value
/// \param [in] b The second value
/// \returns the smaller value
template <typename T> inline T fmin_nan(const T a, const T b) {
if (detail::isnan(a) || detail::isnan(b))
return NAN;
return sycl::fmin(a, b);
}
template <typename T>
inline sycl::vec<T, 2> fmin_nan(const sycl::vec<T, 2> a,
const sycl::vec<T, 2> b) {
return {fmin_nan(a[0], b[0]), fmin_nan(a[1], b[1])};
}
/// A sycl::abs wrapper functors.
struct abs {
template <typename T> auto operator()(const T x) const {
return sycl::abs(x);
}
};
/// A sycl::abs_diff wrapper functors.
struct abs_diff {
template <typename T> auto operator()(const T x, const T y) const {
return sycl::abs_diff(x, y);
}
};
/// A sycl::add_sat wrapper functors.
struct add_sat {
template <typename T> auto operator()(const T x, const T y) const {
return sycl::add_sat(x, y);
}
};
/// A sycl::rhadd wrapper functors.
struct rhadd {
template <typename T> auto operator()(const T x, const T y) const {
return sycl::rhadd(x, y);
}
};
/// A sycl::hadd wrapper functors.
struct hadd {
template <typename T> auto operator()(const T x, const T y) const {
return sycl::hadd(x, y);
}
};
/// A sycl::max wrapper functors.
struct maximum {
template <typename T> auto operator()(const T x, const T y) const {
return sycl::max(x, y);
}
};
/// A sycl::min wrapper functors.
struct minimum {
template <typename T> auto operator()(const T x, const T y) const {
return sycl::min(x, y);
}
};
/// A sycl::sub_sat wrapper functors.
struct sub_sat {
template <typename T> auto operator()(const T x, const T y) const {
return sycl::sub_sat(x, y);
}
};
/// Compute vectorized binary operation value for two values, with each value
/// treated as a vector type \p VecT.
/// \tparam [in] VecT The type of the vector
/// \tparam [in] BinaryOperation The binary operation class
/// \param [in] a The first value
/// \param [in] b The second value
/// \returns The vectorized binary operation value of the two values
template <typename VecT, class BinaryOperation>
inline unsigned vectorized_binary(unsigned a, unsigned b,
const BinaryOperation binary_op) {
sycl::vec<unsigned, 1> v0{a}, v1{b};
auto v2 = v0.as<VecT>();
auto v3 = v1.as<VecT>();
auto v4 =
detail::vectorized_binary<VecT, BinaryOperation>()(v2, v3, binary_op);
v0 = v4.template as<sycl::vec<unsigned, 1>>();
return v0;
}
/// Compute vectorized isgreater for two values, with each value treated as a
/// vector type \p S.
/// \tparam [in] S The type of the vector
/// \tparam [in] T The type of the original values
/// \param [in] a The first value
/// \param [in] b The second value
/// \returns The vectorized greater than of the two values
template <typename S, typename T> inline T vectorized_isgreater(T a, T b) {
sycl::vec<T, 1> v0{a}, v1{b};
auto v2 = v0.template as<S>();
auto v3 = v1.template as<S>();
auto v4 = v2 > v3;
v0 = v4.template as<sycl::vec<T, 1>>();
return v0;
}
/// Compute vectorized max for two values, with each value treated as a vector
/// type \p S.
/// \tparam [in] S The type of the vector
/// \tparam [in] T The type of the original values
/// \param [in] a The first value
/// \param [in] b The second value
/// \returns The vectorized max of the two values
template <typename S, typename T> inline T vectorized_max(T a, T b) {
sycl::vec<T, 1> v0{a}, v1{b};
auto v2 = v0.template as<S>();
auto v3 = v1.template as<S>();
auto v4 = sycl::max(v2, v3);
v0 = v4.template as<sycl::vec<T, 1>>();
return v0;
}
/// Compute vectorized min for two values, with each value treated as a vector
/// type \p S.
/// \tparam [in] S The type of the vector
/// \tparam [in] T The type of the original values
/// \param [in] a The first value
/// \param [in] b The second value
/// \returns The vectorized min of the two values
template <typename S, typename T> inline T vectorized_min(T a, T b) {
sycl::vec<T, 1> v0{a}, v1{b};
auto v2 = v0.template as<S>();
auto v3 = v1.template as<S>();
auto v4 = sycl::min(v2, v3);
v0 = v4.template as<sycl::vec<T, 1>>();
return v0;
}
/// Compute vectorized unary operation for a value, with the value treated as a
/// vector type \p VecT.
/// \tparam [in] VecT The type of the vector
/// \tparam [in] UnaryOperation The unary operation class
/// \param [in] a The input value
/// \returns The vectorized unary operation value of the input value
template <typename VecT, class UnaryOperation>
inline unsigned vectorized_unary(unsigned a, const UnaryOperation unary_op) {
sycl::vec<unsigned, 1> v0{a};
auto v1 = v0.as<VecT>();
auto v2 = unary_op(v1);
v0 = v2.template as<sycl::vec<unsigned, 1>>();
return v0;
}
/// Compute vectorized absolute difference for two values without modulo
/// overflow, with each value treated as a vector type \p VecT.
/// \tparam [in] VecT The type of the vector
/// \param [in] a The first value
/// \param [in] b The second value
/// \returns The vectorized absolute difference of the two values
template <typename VecT>
inline unsigned vectorized_sum_abs_diff(unsigned a, unsigned b) {
sycl::vec<unsigned, 1> v0{a}, v1{b};
auto v2 = v0.as<VecT>();
auto v3 = v1.as<VecT>();
auto v4 = sycl::abs_diff(v2, v3);
unsigned sum = 0;
for (size_t i = 0; i < v4.size(); ++i) {
sum += v4[i];
}
return sum;
}
} // namespace dpct
#endif // __DPCT_MATH_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/include/dpct/blas_utils.hpp | //==---- blas_utils.hpp----------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_BLAS_UTILS_HPP__
#define __DPCT_BLAS_UTILS_HPP__
#include "memory.hpp"
#include "util.hpp"
#include "lib_common_utils.hpp"
#include <sycl/sycl.hpp>
#include <oneapi/mkl.hpp>
#include <utility>
#include <vector>
#include <thread>
namespace dpct {
/// Get the value of \p s.
/// Copy the data to host synchronously, then return the data.
/// \param [in] p The pointer points the data.
/// \param [in] q The queue where the memory copy should be executed.
template <typename T>
inline auto get_value(const T *s, sycl::queue &q) {
return detail::get_value(s, q);
}
namespace detail {
inline void mem_free(sycl::queue *exec_queue,
std::vector<void *> pointers_array, sycl::event e) {
e.wait();
for (auto p : pointers_array)
sycl::free(p, *exec_queue);
}
inline int stride_for(int num_elems, int mem_align_in_elems) {
return ((num_elems - 1) / mem_align_in_elems + 1) * mem_align_in_elems;
}
#ifndef DPCT_USM_LEVEL_NONE
template<typename T>
class working_memory {
T *_input_ptr;
T *_temp_ptr;
bool _is_sycl_malloced = false;
bool _is_scalar_value = false;
sycl::queue _q;
sycl::event _e;
public:
working_memory(size_t size, sycl::queue q) : _q(q) {
_is_scalar_value = false;
_temp_ptr = (T *)sycl::malloc_device(size, q);
}
working_memory(T *result_ptr, sycl::queue q) : _input_ptr(result_ptr), _q(q) {
_is_scalar_value = true;
_is_sycl_malloced = sycl::get_pointer_type(_input_ptr, _q.get_context()) !=
sycl::usm::alloc::unknown;
if (!_is_sycl_malloced)
_temp_ptr = sycl::malloc_shared<T>(1, _q);
}
auto get_ptr() {
if (_is_scalar_value && _is_sycl_malloced)
return _input_ptr;
return _temp_ptr;
}
void set_event(sycl::event e) { _e = e; }
~working_memory() {
if (_is_scalar_value) {
if (!_is_sycl_malloced) {
_q.memcpy(_input_ptr, _temp_ptr, sizeof(T)).wait();
sycl::free(_temp_ptr, _q);
}
} else {
std::vector<void *> ptrs{_temp_ptr};
dpct::async_dpct_free(ptrs, {_e});
}
}
};
#endif
template <typename Tx, typename Tr>
inline void nrm2_impl(sycl::queue &q, int n, const void *x, int incx,
void *result) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
#ifdef DPCT_USM_LEVEL_NONE
auto x_buffer = dpct::get_buffer<Tx>(x);
auto r_buffer =
sycl::buffer<Tr, 1>(reinterpret_cast<Tr *>(result), sycl::range<1>(1));
if (dpct::is_device_ptr(result))
r_buffer = dpct::get_buffer<Tr>(result);
oneapi::mkl::blas::column_major::nrm2(q, n, x_buffer, incx, r_buffer);
#else
working_memory<Tr> res_mem(reinterpret_cast<Tr *>(result), q);
oneapi::mkl::blas::column_major::nrm2(q, n, reinterpret_cast<const Tx *>(x),
incx, res_mem.get_ptr());
#endif
#endif
}
template <bool is_conjugate, class Txy, class Tr>
inline void dotuc_impl(sycl::queue &q, int n, const Txy *x, int incx,
const Txy *y, int incy, Tr *result) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
#ifdef DPCT_USM_LEVEL_NONE
auto x_buffer = dpct::get_buffer<Txy>(x);
auto y_buffer = dpct::get_buffer<Txy>(y);
auto r_buffer = sycl::buffer<Tr, 1>((Tr *)result, sycl::range<1>(1));
if (dpct::is_device_ptr(result))
r_buffer = dpct::get_buffer<Tr>(result);
if constexpr (std::is_same_v<Txy, std::complex<float>> ||
std::is_same_v<Txy, std::complex<double>>) {
if constexpr (is_conjugate)
oneapi::mkl::blas::column_major::dotc(q, n, x_buffer, incx, y_buffer,
incy, r_buffer);
else
oneapi::mkl::blas::column_major::dotu(q, n, x_buffer, incx, y_buffer,
incy, r_buffer);
} else
oneapi::mkl::blas::column_major::dot(q, n, x_buffer, incx, y_buffer, incy,
r_buffer);
#else
working_memory<Tr> res_mem(result, q);
if constexpr (std::is_same_v<Txy, std::complex<float>> ||
std::is_same_v<Txy, std::complex<double>>) {
if constexpr (is_conjugate)
oneapi::mkl::blas::column_major::dotc(q, n, x, incx, y, incy, res_mem.get_ptr());
else
oneapi::mkl::blas::column_major::dotu(q, n, x, incx, y, incy, res_mem.get_ptr());
} else
oneapi::mkl::blas::column_major::dot(q, n, x, incx, y, incy, res_mem.get_ptr());
#endif
#endif
}
template <bool is_conjugate>
inline void dotuc(sycl::queue &q, int n, const void *x,
library_data_t x_type, int incx, const void *y,
library_data_t y_type, int incy, void *result,
library_data_t result_type) {
std::uint64_t key = detail::get_type_combination_id(x_type, y_type, result_type);
switch (key) {
case detail::get_type_combination_id(library_data_t::real_float, library_data_t::real_float,
library_data_t::real_float): {
detail::dotuc_impl<is_conjugate>(
q, n, reinterpret_cast<const float *>(x), incx,
reinterpret_cast<const float *>(y), incy,
reinterpret_cast<float *>(result));
break;
}
case detail::get_type_combination_id(library_data_t::real_double, library_data_t::real_double,
library_data_t::real_double): {
detail::dotuc_impl<is_conjugate>(
q, n, reinterpret_cast<const double *>(x), incx,
reinterpret_cast<const double *>(y), incy,
reinterpret_cast<double *>(result));
break;
}
case detail::get_type_combination_id(library_data_t::complex_float,
library_data_t::complex_float,
library_data_t::complex_float): {
detail::dotuc_impl<is_conjugate>(
q, n, reinterpret_cast<const std::complex<float> *>(x), incx,
reinterpret_cast<const std::complex<float> *>(y), incy,
reinterpret_cast<std::complex<float> *>(result));
break;
}
case detail::get_type_combination_id(library_data_t::complex_double,
library_data_t::complex_double,
library_data_t::complex_double): {
detail::dotuc_impl<is_conjugate>(
q, n, reinterpret_cast<const std::complex<double> *>(x), incx,
reinterpret_cast<const std::complex<double> *>(y), incy,
reinterpret_cast<std::complex<double> *>(result));
break;
}
case detail::get_type_combination_id(library_data_t::real_half, library_data_t::real_half,
library_data_t::real_half): {
detail::dotuc_impl<is_conjugate>(
q, n, reinterpret_cast<const sycl::half *>(x), incx,
reinterpret_cast<const sycl::half *>(y), incy,
reinterpret_cast<sycl::half *>(result));
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
template <class Tx, class Te>
inline void scal_impl(sycl::queue &q, int n, const void *alpha, void *x,
int incx) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
Te alpha_val = dpct::get_value(reinterpret_cast<const Te *>(alpha), q);
auto data_x = get_memory(reinterpret_cast<Tx *>(x));
oneapi::mkl::blas::column_major::scal(q, n, alpha_val,
data_x, incx);
#endif
}
template <class Txy, class Te>
inline void axpy_impl(sycl::queue &q, int n, const void *alpha, const void *x,
int incx, void *y, int incy) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
Te alpha_val = dpct::get_value(reinterpret_cast<const Te *>(alpha), q);
auto data_x = get_memory(reinterpret_cast<const Txy *>(x));
auto data_y = get_memory(reinterpret_cast<Txy *>(y));
oneapi::mkl::blas::column_major::axpy(q, n, alpha_val,
data_x, incx,
data_y, incy);
#endif
}
template <class Txy, class Tc, class Ts>
inline void rot_impl(sycl::queue &q, int n, void *x, int incx, void *y,
int incy, const void *c, const void *s) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
Tc c_value = dpct::get_value(reinterpret_cast<const Tc *>(c), q);
Ts s_value = dpct::get_value(reinterpret_cast<const Ts *>(s), q);
auto data_x = get_memory(reinterpret_cast<Txy *>(x));
auto data_y = get_memory(reinterpret_cast<Txy *>(y));
oneapi::mkl::blas::column_major::rot(q, n, data_x, incx,
data_y, incy, c_value,
s_value);
#endif
}
template <class Ta, class Tb, class Tc, class Ts>
inline void gemm_impl(sycl::queue &q, oneapi::mkl::transpose a_trans,
oneapi::mkl::transpose b_trans, int m, int n, int k,
const void *alpha, const void *a, int lda, const void *b,
int ldb, const void *beta, void *c, int ldc) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
Ts alpha_value = dpct::get_value(reinterpret_cast<const Ts *>(alpha), q);
Ts beta_value = dpct::get_value(reinterpret_cast<const Ts *>(beta), q);
auto data_a = get_memory(reinterpret_cast<const Ta *>(a));
auto data_b = get_memory(reinterpret_cast<const Tb *>(b));
auto data_c = get_memory(reinterpret_cast<Tc *>(c));
oneapi::mkl::blas::column_major::gemm(
q, a_trans, b_trans, m, n, k, alpha_value, data_a, lda,
data_b, ldb, beta_value, data_c, ldc);
#endif
}
template <class Ta, class Tb, class Tc, class Ts>
inline void gemm_batch_impl(sycl::queue &q, oneapi::mkl::transpose a_trans,
oneapi::mkl::transpose b_trans, int m, int n, int k,
const void *alpha, const void **a, int lda,
const void **b, int ldb, const void *beta, void **c,
int ldc, int batch_size) {
struct matrix_info_t {
oneapi::mkl::transpose transpose_info[2];
Ts value_info[2];
std::int64_t size_info[3];
std::int64_t ld_info[3];
std::int64_t groupsize_info;
};
Ts alpha_value = dpct::get_value(reinterpret_cast<const Ts *>(alpha), q);
Ts beta_value = dpct::get_value(reinterpret_cast<const Ts *>(beta), q);
matrix_info_t *matrix_info =
(matrix_info_t *)std::malloc(sizeof(matrix_info_t));
matrix_info->transpose_info[0] = a_trans;
matrix_info->transpose_info[1] = b_trans;
matrix_info->value_info[0] = alpha_value;
matrix_info->value_info[1] = beta_value;
matrix_info->size_info[0] = m;
matrix_info->size_info[1] = n;
matrix_info->size_info[2] = k;
matrix_info->ld_info[0] = lda;
matrix_info->ld_info[1] = ldb;
matrix_info->ld_info[2] = ldc;
matrix_info->groupsize_info = batch_size;
sycl::event e = oneapi::mkl::blas::column_major::gemm_batch(
q, matrix_info->transpose_info, matrix_info->transpose_info + 1,
matrix_info->size_info, matrix_info->size_info + 1,
matrix_info->size_info + 2, matrix_info->value_info,
reinterpret_cast<const Ta **>(a), matrix_info->ld_info,
reinterpret_cast<const Tb **>(b), matrix_info->ld_info + 1,
matrix_info->value_info + 1, reinterpret_cast<Tc **>(c),
matrix_info->ld_info + 2, 1, &(matrix_info->groupsize_info));
q.submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] { std::free(matrix_info); });
});
}
template <class Ta, class Tb, class Tc, class Ts>
inline void
gemm_batch_impl(sycl::queue &q, oneapi::mkl::transpose a_trans,
oneapi::mkl::transpose b_trans, int m, int n,
int k, const void *alpha, const void *a, int lda,
long long int stride_a, const void *b, int ldb,
long long int stride_b, const void *beta, void *c,
int ldc, long long int stride_c, int batch_size) {
Ts alpha_value = dpct::get_value(reinterpret_cast<const Ts *>(alpha), q);
Ts beta_value = dpct::get_value(reinterpret_cast<const Ts *>(beta), q);
auto data_a = get_memory(reinterpret_cast<const Ta *>(a));
auto data_b = get_memory(reinterpret_cast<const Tb *>(b));
auto data_c = get_memory(reinterpret_cast<Tc *>(c));
oneapi::mkl::blas::column_major::gemm_batch(
q, a_trans, b_trans, m, n, k, alpha_value, data_a, lda,
stride_a, data_b, ldb, stride_b, beta_value,
data_c, ldc, stride_c, batch_size);
}
template <bool is_hermitian, class T, class Tbeta>
inline void rk_impl(sycl::queue &q, oneapi::mkl::uplo uplo,
oneapi::mkl::transpose trans, int n, int k,
const T *alpha, const T *a, int lda, const T *b,
int ldb, const Tbeta *beta, T *c, int ldc) {
// For symmetric matrix, this function performs: C = alpha*OP(A)*(OP(B))^T + beta*C
// For Hermitian matrix, this function performs: C = alpha*OP(A)*(OP(B))^H + beta*C
// The gemmt() function performs: C = alpha*OPA(A)*OPB(B) + beta*C
// So the OPB need be updated before we call gemmt().
using Ty = typename dpct::DataType<T>::T2;
using Ts = typename dpct::DataType<Tbeta>::T2;
Ty alpha_value = dpct::get_value(reinterpret_cast<const Ty *>(alpha), q);
Ts beta_value = dpct::get_value(reinterpret_cast<const Ts *>(beta), q);
oneapi::mkl::transpose trans_A = trans, trans_B = trans;
int origin_b_rows = trans == oneapi::mkl::transpose::nontrans ? n : k;
int origin_b_cols = trans == oneapi::mkl::transpose::nontrans ? k : n;
if ((is_hermitian && trans == oneapi::mkl::transpose::trans) ||
(!is_hermitian && !std::is_floating_point_v<Ty> && trans == oneapi::mkl::transpose::conjtrans)) {
// In this case, OPB need be a conjugate operation,
// but only notrans, conjtrans and trans are available.
// So we need do a conjtrans operation first, then do a trans operation.
trans_B = oneapi::mkl::transpose::trans;
auto data_a = get_memory(reinterpret_cast<const Ty *>(a));
auto data_c = get_memory(reinterpret_cast<Ty *>(c));
#ifdef DPCT_USM_LEVEL_NONE
auto new_B_buffer = sycl::buffer<Ty, 1>(sycl::range<1>(origin_b_rows * origin_b_cols));
auto from_buffer = dpct::get_buffer<Ty>(b);
oneapi::mkl::blas::column_major::omatcopy_batch(
q, oneapi::mkl::transpose::conjtrans, origin_b_rows, origin_b_cols,
Ts(1.0), from_buffer, ldb, origin_b_rows * ldb, new_B_buffer,
origin_b_cols, origin_b_rows * origin_b_cols, 1);
oneapi::mkl::blas::column_major::gemmt(
q, uplo, trans_A, trans_B, n, k, alpha_value,
data_a, lda, new_B_buffer, origin_b_cols, beta_value, data_c, ldc);
#else
working_memory<T> new_B(origin_b_rows * origin_b_cols * sizeof(T), q);
oneapi::mkl::blas::column_major::omatcopy_batch(
q, oneapi::mkl::transpose::conjtrans, origin_b_rows, origin_b_cols,
Ts(1.0), reinterpret_cast<const Ty *>(b), ldb, origin_b_rows * ldb,
reinterpret_cast<Ty *>(new_B.get_ptr()), origin_b_cols,
origin_b_rows * origin_b_cols, 1);
sycl::event e = oneapi::mkl::blas::column_major::gemmt(
q, uplo, trans_A, trans_B, n, k, alpha_value,
data_a, lda, reinterpret_cast<Ty *>(new_B.get_ptr()), origin_b_cols,
beta_value, data_c, ldc);
new_B.set_event(e);
#endif
} else {
if constexpr (is_hermitian) {
trans_B = trans == oneapi::mkl::transpose::nontrans
? oneapi::mkl::transpose::conjtrans
: oneapi::mkl::transpose::nontrans;
} else {
trans_B = trans == oneapi::mkl::transpose::nontrans
? oneapi::mkl::transpose::trans
: oneapi::mkl::transpose::nontrans;
}
auto data_a = get_memory(reinterpret_cast<const Ty *>(a));
auto data_b = get_memory(reinterpret_cast<const Ty *>(b));
auto data_c = get_memory(reinterpret_cast<Ty *>(c));
oneapi::mkl::blas::column_major::gemmt(
q, uplo, trans_A, trans_B, n, k, alpha_value,
data_a, lda, data_b, ldb, beta_value, data_c, ldc);
}
}
template <class Ta, class Tb, class Ts>
inline void
trsm_batch_impl(sycl::queue &q, oneapi::mkl::side left_right,
oneapi::mkl::uplo upper_lower, oneapi::mkl::transpose trans,
oneapi::mkl::diag unit_diag, int m, int n, const void *alpha,
const void **a, int lda, void **b, int ldb, int batch_size) {
struct matrix_info_t {
matrix_info_t(oneapi::mkl::side side_info, oneapi::mkl::uplo uplo_info,
oneapi::mkl::transpose transpose_info,
oneapi::mkl::diag diag_info, Ts value_info, std::int64_t m,
std::int64_t n, std::int64_t lda, std::int64_t ldb,
std::int64_t groupsize_info)
: side_info(side_info), uplo_info(uplo_info),
transpose_info(transpose_info), diag_info(diag_info),
value_info(value_info), groupsize_info(groupsize_info) {
size_info[0] = m;
size_info[1] = n;
ld_info[0] = lda;
ld_info[1] = ldb;
}
oneapi::mkl::side side_info;
oneapi::mkl::uplo uplo_info;
oneapi::mkl::transpose transpose_info;
oneapi::mkl::diag diag_info;
Ts value_info;
std::int64_t size_info[2];
std::int64_t ld_info[2];
std::int64_t groupsize_info;
};
Ts alpha_value = dpct::get_value(reinterpret_cast<const Ts *>(alpha), q);
matrix_info_t *matrix_info =
new matrix_info_t(left_right, upper_lower, trans, unit_diag, alpha_value,
m, n, lda, ldb, batch_size);
sycl::event e = oneapi::mkl::blas::column_major::trsm_batch(
q, &(matrix_info->side_info), &(matrix_info->uplo_info),
&(matrix_info->transpose_info), &(matrix_info->diag_info),
matrix_info->size_info, matrix_info->size_info + 1,
&(matrix_info->value_info), reinterpret_cast<const Ta **>(a),
matrix_info->ld_info, reinterpret_cast<Tb **>(b),
matrix_info->ld_info + 1, 1, &(matrix_info->groupsize_info));
q.submit([&](sycl::handler &cgh) {
cgh.depends_on(e);
cgh.host_task([=] { delete matrix_info; });
});
}
template <typename T>
inline void getrfnp_batch_wrapper(sycl::queue &exec_queue, int n, T *a[],
int lda, int *info, int batch_size) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) Interfaces "
"Project does not support this API.");
#else
using Ty = typename DataType<T>::T2;
// Set the info array value to 0
detail::dpct_memset(exec_queue, info, 0, sizeof(int) * batch_size);
std::int64_t stride_a = n * lda;
std::int64_t scratchpad_size =
oneapi::mkl::lapack::getrfnp_batch_scratchpad_size<Ty>(
exec_queue, n, n, lda, stride_a, batch_size);
Ty *a_strided_mem =
(Ty *)dpct::dpct_malloc(stride_a * batch_size * sizeof(Ty), exec_queue);
T **host_a = (T **)malloc(batch_size * sizeof(T *));
dpct::dpct_memcpy(host_a, a, batch_size * sizeof(T *));
for (std::int64_t i = 0; i < batch_size; ++i)
dpct::dpct_memcpy(a_strided_mem + i * stride_a, host_a[i],
n * lda * sizeof(T));
#ifdef DPCT_USM_LEVEL_NONE
{
sycl::buffer<Ty, 1> scratchpad{sycl::range<1>(scratchpad_size)};
auto a_buffer = get_buffer<Ty>(a_strided_mem);
oneapi::mkl::lapack::getrfnp_batch(exec_queue, n, n, a_buffer, lda,
stride_a, batch_size, scratchpad,
scratchpad_size);
}
std::vector<sycl::event> events;
for (std::int64_t i = 0; i < batch_size; ++i)
events.push_back(detail::dpct_memcpy(exec_queue, host_a[i],
a_strided_mem + i * stride_a,
n * lda * sizeof(T), automatic));
#else
Ty *scratchpad = sycl::malloc_device<Ty>(scratchpad_size, exec_queue);
sycl::event e = oneapi::mkl::lapack::getrfnp_batch(
exec_queue, n, n, a_strided_mem, lda, stride_a, batch_size, scratchpad,
scratchpad_size);
std::vector<sycl::event> events;
for (std::int64_t i = 0; i < batch_size; ++i)
events.push_back(detail::dpct_memcpy(exec_queue, host_a[i],
a_strided_mem + i * stride_a,
n * lda * sizeof(T), automatic, {e}));
std::vector<void *> ptrs{scratchpad, a_strided_mem};
dpct::async_dpct_free(ptrs, events, exec_queue);
#endif
exec_queue.submit([&](sycl::handler &cgh) {
cgh.depends_on(events);
cgh.host_task([=] { free(host_a); });
});
#endif
}
} // namespace detail
inline oneapi::mkl::transpose get_transpose(int t) {
if (t == 0) {
return oneapi::mkl::transpose::nontrans;
} else if (t == 1) {
return oneapi::mkl::transpose::trans;
} else {
return oneapi::mkl::transpose::conjtrans;
}
}
/// Computes the LU factorizations of a batch of general matrices.
/// \param [in] exec_queue The queue where the routine should be executed.
/// \param [in] n The order of the matrices.
/// \param [in, out] a Array of pointers to matrices. These matrices will be
/// overwritten by lower triangulars with unit diagonal elements and upper
/// triangulars.
/// \param [in] lda The leading dimension of the matrices.
/// \param [out] ipiv An array stores the pivot indices. If \p ipiv is nullptr,
/// non-pivoting LU factorization is computed.
/// \param [out] info An array stores the error information.
/// \param [in] batch_size The size of the batch.
template <typename T>
inline void getrf_batch_wrapper(sycl::queue &exec_queue, int n, T *a[],
int lda, int *ipiv, int *info, int batch_size) {
if (ipiv == nullptr) {
detail::getrfnp_batch_wrapper(exec_queue, n, a, lda, info, batch_size);
return;
}
using Ty = typename DataType<T>::T2;
// Set the info array value to 0
detail::dpct_memset(exec_queue, info, 0, sizeof(int) * batch_size);
#ifdef DPCT_USM_LEVEL_NONE
std::int64_t stride_a = n * lda;
std::int64_t stride_ipiv = n;
std::int64_t scratchpad_size = oneapi::mkl::lapack::getrf_batch_scratchpad_size<Ty>(
exec_queue, n, n, lda, stride_a, stride_ipiv, batch_size);
T *a_buffer_ptr;
a_buffer_ptr = (T *)dpct_malloc(stride_a * batch_size * sizeof(T));
T **host_a = (T **)malloc(batch_size * sizeof(T *));
dpct_memcpy(host_a, a, batch_size * sizeof(T *));
for (std::int64_t i = 0; i < batch_size; ++i)
dpct_memcpy(a_buffer_ptr + i * stride_a, host_a[i], n * lda * sizeof(T));
{
sycl::buffer<std::int64_t, 1> ipiv_buf(
sycl::range<1>(batch_size * stride_ipiv));
sycl::buffer<Ty, 1> scratchpad{sycl::range<1>(scratchpad_size)};
auto a_buffer = get_buffer<Ty>(a_buffer_ptr);
oneapi::mkl::lapack::getrf_batch(exec_queue, n, n, a_buffer, lda, stride_a,
ipiv_buf, stride_ipiv, batch_size, scratchpad,
scratchpad_size);
auto to_buffer = get_buffer<int>(ipiv);
exec_queue.submit([&](sycl::handler &cgh) {
auto from_acc = ipiv_buf.get_access<sycl::access_mode::read>(cgh);
auto to_acc = to_buffer.get_access<sycl::access_mode::write>(cgh);
cgh.parallel_for<dpct_kernel_name<class getrf_device_int64_to_int, T>>(
sycl::range<2>(batch_size, n), [=](sycl::id<2> id) {
to_acc[id.get(0) * n + id.get(1)] =
static_cast<int>(from_acc[id.get(0) * stride_ipiv + id.get(1)]);
});
});
}
// Copy back to the original buffers
std::vector<sycl::event> events;
for (std::int64_t i = 0; i < batch_size; ++i)
events.push_back(detail::dpct_memcpy(exec_queue, host_a[i],
a_buffer_ptr + i * stride_a,
n * lda * sizeof(T), automatic));
std::vector<void *> ptrs{host_a};
std::thread mem_free_thread(
[=](std::vector<void *> pointers_array,
std::vector<sycl::event> events_array) {
sycl::event::wait(events_array);
for (auto p : pointers_array)
free(p);
},
ptrs, events);
mem_free_thread.detach();
#else
std::int64_t m_int64 = n;
std::int64_t n_int64 = n;
std::int64_t lda_int64 = lda;
std::int64_t group_sizes = batch_size;
std::int64_t scratchpad_size = oneapi::mkl::lapack::getrf_batch_scratchpad_size<Ty>(
exec_queue, &m_int64, &n_int64, &lda_int64, 1, &group_sizes);
Ty *scratchpad = sycl::malloc_device<Ty>(scratchpad_size, exec_queue);
std::int64_t *ipiv_int64 =
sycl::malloc_device<std::int64_t>(batch_size * n, exec_queue);
std::int64_t **ipiv_int64_ptr =
sycl::malloc_shared<std::int64_t *>(batch_size, exec_queue);
T **a_shared = sycl::malloc_shared<T *>(batch_size, exec_queue);
exec_queue.memcpy(a_shared, a, batch_size * sizeof(T *)).wait();
for (std::int64_t i = 0; i < batch_size; ++i)
ipiv_int64_ptr[i] = ipiv_int64 + n * i;
oneapi::mkl::lapack::getrf_batch(exec_queue, &m_int64, &n_int64, (Ty **)a_shared, &lda_int64,
ipiv_int64_ptr, 1, &group_sizes, scratchpad,
scratchpad_size);
sycl::event e = exec_queue.submit([&](sycl::handler &cgh) {
cgh.parallel_for<dpct_kernel_name<class getrf_device_int64_to_int, T>>(
sycl::range<1>(batch_size * n), [=](sycl::id<1> idx) {
ipiv[idx] = static_cast<int>(ipiv_int64[idx]);
});
});
std::vector<void *> ptrs{scratchpad, ipiv_int64, ipiv_int64_ptr, a_shared};
async_dpct_free(ptrs, {e}, exec_queue);
#endif
}
/// Solves a system of linear equations with a batch of LU-factored square
/// coefficient matrices, with multiple right-hand sides.
/// \param [in] exec_queue The queue where the routine should be executed.
/// \param [in] trans Indicates the form of the linear equations.
/// \param [in] n The order of the matrices.
/// \param [in] nrhs The number of right hand sides.
/// \param [in] a Array of pointers to matrices.
/// \param [in] lda The leading dimension of the matrices in \p a.
/// \param [in] ipiv An array stores the pivots.
/// \param [in, out] b Array of pointers to matrices, whose columns are
/// the right-hand sides for the systems of equations.
/// \param [in] ldb The leading dimension of the matrices in \p b.
/// \param [out] info A value stores the error information.
/// \param [in] batch_size The size of the batch.
template <typename T>
inline void getrs_batch_wrapper(sycl::queue &exec_queue,
oneapi::mkl::transpose trans, int n, int nrhs,
const T *a[], int lda, int *ipiv, T *b[],
int ldb, int *info, int batch_size) {
using Ty = typename DataType<T>::T2;
// Set the info value to 0
*info = 0;
#ifdef DPCT_USM_LEVEL_NONE
std::int64_t stride_a = n * lda;
std::int64_t stride_b = nrhs * ldb;
std::int64_t stride_ipiv = n;
std::int64_t scratchpad_size = oneapi::mkl::lapack::getrs_batch_scratchpad_size<Ty>(
exec_queue, trans, n, nrhs, lda, stride_a, stride_ipiv, ldb, stride_b,
batch_size);
T *a_buffer_ptr, *b_buffer_ptr;
a_buffer_ptr = (T *)dpct_malloc(stride_a * batch_size * sizeof(T));
b_buffer_ptr = (T *)dpct_malloc(stride_b * batch_size * sizeof(T));
T **host_a = (T **)malloc(batch_size * sizeof(T *));
T **host_b = (T **)malloc(batch_size * sizeof(T *));
dpct_memcpy(host_a, a, batch_size * sizeof(T *));
dpct_memcpy(host_b, b, batch_size * sizeof(T *));
for (std::int64_t i = 0; i < batch_size; ++i) {
dpct_memcpy(a_buffer_ptr + i * stride_a, host_a[i], n * lda * sizeof(T));
dpct_memcpy(b_buffer_ptr + i * stride_b, host_b[i], nrhs * ldb * sizeof(T));
}
{
auto a_buffer = get_buffer<Ty>(a_buffer_ptr);
auto b_buffer = get_buffer<Ty>(b_buffer_ptr);
sycl::buffer<Ty, 1> scratchpad{sycl::range<1>(scratchpad_size)};
sycl::buffer<std::int64_t, 1> ipiv_buf(
sycl::range<1>(batch_size * stride_ipiv));
auto from_buf = get_buffer<int>(ipiv);
exec_queue.submit([&](sycl::handler &cgh) {
auto from_acc = from_buf.get_access<sycl::access_mode::read>(cgh);
auto to_acc = ipiv_buf.get_access<sycl::access_mode::write>(cgh);
cgh.parallel_for<dpct_kernel_name<class getrs_device_int64_to_int, T>>(
sycl::range<2>(batch_size, n), [=](sycl::id<2> id) {
to_acc[id.get(0) * stride_ipiv + id.get(1)] =
static_cast<std::int64_t>(from_acc[id.get(0) * n + id.get(1)]);
});
});
oneapi::mkl::lapack::getrs_batch(exec_queue, trans, n, nrhs, a_buffer, lda,
stride_a, ipiv_buf, stride_ipiv, b_buffer, ldb,
stride_b, batch_size, scratchpad, scratchpad_size);
}
// Copy back to the original buffers
std::vector<sycl::event> events;
for (std::int64_t i = 0; i < batch_size; ++i)
events.push_back(detail::dpct_memcpy(exec_queue, host_b[i],
b_buffer_ptr + i * stride_b,
nrhs * ldb * sizeof(T), automatic));
std::vector<void *> ptrs{host_a, host_b};
std::thread mem_free_thread(
[=](std::vector<void *> pointers_array,
std::vector<sycl::event> events_array) {
sycl::event::wait(events_array);
for (auto p : pointers_array)
free(p);
},
ptrs, events);
mem_free_thread.detach();
#else
std::int64_t n_int64 = n;
std::int64_t nrhs_int64 = nrhs;
std::int64_t lda_int64 = lda;
std::int64_t ldb_int64 = ldb;
std::int64_t group_sizes = batch_size;
std::int64_t scratchpad_size = oneapi::mkl::lapack::getrs_batch_scratchpad_size<Ty>(
exec_queue, &trans, &n_int64, &nrhs_int64, &lda_int64, &ldb_int64, 1,
&group_sizes);
Ty *scratchpad = sycl::malloc_device<Ty>(scratchpad_size, exec_queue);
std::int64_t *ipiv_int64 =
sycl::malloc_device<std::int64_t>(batch_size * n, exec_queue);
std::int64_t **ipiv_int64_ptr =
sycl::malloc_shared<std::int64_t *>(batch_size, exec_queue);
T **a_shared = sycl::malloc_shared<T *>(batch_size, exec_queue);
T **b_shared = sycl::malloc_shared<T *>(batch_size, exec_queue);
exec_queue.memcpy(a_shared, a, batch_size * sizeof(T *));
exec_queue.memcpy(b_shared, b, batch_size * sizeof(T *));
exec_queue.submit([&](sycl::handler &cgh) {
cgh.parallel_for<dpct_kernel_name<class getrs_device_int64_to_int, T>>(
sycl::range<1>(batch_size * n), [=](sycl::id<1> idx) {
ipiv_int64[idx] = static_cast<std::int64_t>(ipiv[idx]);
});
}).wait();
for (std::int64_t i = 0; i < batch_size; ++i)
ipiv_int64_ptr[i] = ipiv_int64 + n * i;
sycl::event e = oneapi::mkl::lapack::getrs_batch(
exec_queue, &trans, &n_int64, &nrhs_int64, (Ty **)a_shared, &lda_int64,
ipiv_int64_ptr, (Ty **)b_shared, &ldb_int64, 1, &group_sizes, scratchpad,
scratchpad_size);
std::vector<void *> ptrs{scratchpad, ipiv_int64_ptr, ipiv_int64, a_shared, b_shared};
async_dpct_free(ptrs, {e}, exec_queue);
#endif
}
/// Computes the inverses of a batch of LU-factored matrices.
/// \param [in] exec_queue The queue where the routine should be executed.
/// \param [in] n The order of the matrices.
/// \param [in] a Array of pointers to matrices.
/// \param [in] lda The leading dimension of the matrices in \p a.
/// \param [in] ipiv An array stores the pivots.
/// \param [out] b Array of pointers to inverse matrices.
/// \param [in] ldb The leading dimension of the matrices in \p b.
/// \param [out] info An array stores the error information.
/// \param [in] batch_size The size of the batch.
template <typename T>
inline void getri_batch_wrapper(sycl::queue &exec_queue, int n,
const T *a[], int lda, int *ipiv, T *b[],
int ldb, int *info, int batch_size) {
using Ty = typename DataType<T>::T2;
// Set the info array value to 0
detail::dpct_memset(exec_queue, info, 0, sizeof(int) * batch_size);
#ifdef DPCT_USM_LEVEL_NONE
std::int64_t stride_b = n * ldb;
std::int64_t stride_ipiv = n;
std::int64_t scratchpad_size = oneapi::mkl::lapack::getri_batch_scratchpad_size<Ty>(
exec_queue, n, ldb, stride_b, stride_ipiv, batch_size);
T *b_buffer_ptr;
b_buffer_ptr = (T *)dpct_malloc(stride_b * batch_size * sizeof(T));
T **host_a = (T **)malloc(batch_size * sizeof(T *));
T **host_b = (T **)malloc(batch_size * sizeof(T *));
dpct_memcpy(host_a, a, batch_size * sizeof(T *));
dpct_memcpy(host_b, b, batch_size * sizeof(T *));
for (std::int64_t i = 0; i < batch_size; ++i) {
// Need to create a copy of input matrices "a" to keep them unchanged.
// Matrices "b" (copy of matrices "a") will be used as input and output
// parameter in oneapi::mkl::lapack::getri_batch call.
matrix_mem_copy(b_buffer_ptr + i * stride_b, host_a[i], ldb, lda, n, n,
dpct::device_to_device, exec_queue);
}
{
auto b_buffer = get_buffer<Ty>(b_buffer_ptr);
sycl::buffer<Ty, 1> scratchpad{sycl::range<1>(scratchpad_size)};
sycl::buffer<std::int64_t, 1> ipiv_buf(
sycl::range<1>(batch_size * stride_ipiv));
auto from_buf = get_buffer<int>(ipiv);
exec_queue.submit([&](sycl::handler &cgh) {
auto from_acc = from_buf.get_access<sycl::access_mode::read>(cgh);
auto to_acc = ipiv_buf.get_access<sycl::access_mode::write>(cgh);
cgh.parallel_for<dpct_kernel_name<class getri_device_int64_to_int, T>>(
sycl::range<2>(batch_size, n), [=](sycl::id<2> id) {
to_acc[id.get(0) * stride_ipiv + id.get(1)] =
static_cast<std::int64_t>(from_acc[id.get(0) * n + id.get(1)]);
});
});
oneapi::mkl::lapack::getri_batch(exec_queue, n, b_buffer, ldb, stride_b, ipiv_buf,
stride_ipiv, batch_size, scratchpad,
scratchpad_size);
}
// Copy back to the original buffers
std::vector<sycl::event> events;
for (std::int64_t i = 0; i < batch_size; ++i)
events.push_back(detail::dpct_memcpy(exec_queue, host_b[i],
b_buffer_ptr + i * stride_b,
n * ldb * sizeof(T), automatic));
std::vector<void *> ptrs{host_a, host_b};
std::thread mem_free_thread(
[=](std::vector<void *> pointers_array,
std::vector<sycl::event> events_array) {
sycl::event::wait(events_array);
for (auto p : pointers_array)
free(p);
},
ptrs, events);
mem_free_thread.detach();
#else
std::int64_t n_int64 = n;
std::int64_t ldb_int64 = ldb;
std::int64_t group_sizes = batch_size;
std::int64_t scratchpad_size = oneapi::mkl::lapack::getri_batch_scratchpad_size<Ty>(
exec_queue, &n_int64, &ldb_int64, 1, &group_sizes);
Ty *scratchpad = sycl::malloc_device<Ty>(scratchpad_size, exec_queue);
std::int64_t *ipiv_int64 =
sycl::malloc_device<std::int64_t>(batch_size * n, exec_queue);
std::int64_t **ipiv_int64_ptr =
sycl::malloc_shared<std::int64_t *>(batch_size, exec_queue);
exec_queue.submit([&](sycl::handler &cgh) {
cgh.parallel_for<dpct_kernel_name<class getri_device_int64_to_int, T>>(
sycl::range<1>(batch_size * n), [=](sycl::id<1> idx) {
ipiv_int64[idx] = static_cast<std::int64_t>(ipiv[idx]);
});
});
T **a_shared = sycl::malloc_shared<T *>(batch_size, exec_queue);
T **b_shared = sycl::malloc_shared<T *>(batch_size, exec_queue);
exec_queue.memcpy(a_shared, a, batch_size * sizeof(T *));
exec_queue.memcpy(b_shared, b, batch_size * sizeof(T *)).wait();
for (std::int64_t i = 0; i < batch_size; ++i) {
ipiv_int64_ptr[i] = ipiv_int64 + n * i;
// Need to create a copy of input matrices "a" to keep them unchanged.
// Matrices "b" (copy of matrices "a") will be used as input and output
// parameter in oneapi::mkl::lapack::getri_batch call.
matrix_mem_copy(b_shared[i], a_shared[i], ldb, lda, n, n, dpct::device_to_device,
exec_queue);
}
sycl::event e = oneapi::mkl::lapack::getri_batch(
exec_queue, &n_int64, (Ty **)b_shared, &ldb_int64, ipiv_int64_ptr, 1,
&group_sizes, scratchpad, scratchpad_size);
std::vector<void *> ptrs{scratchpad, ipiv_int64_ptr, ipiv_int64, a_shared, b_shared};
async_dpct_free(ptrs, {e}, exec_queue);
#endif
}
/// Computes the QR factorizations of a batch of general matrices.
/// \param [in] exec_queue The queue where the routine should be executed.
/// \param [in] m The number of rows in the matrices.
/// \param [in] n The number of columns in the matrices.
/// \param [in, out] a Array of pointers to matrices. These
/// matrices will be overwritten by the factorization data.
/// \param [in] lda The leading dimension of the matrices in \p a.
/// \param [out] tau An array stores the scalars.
/// \param [out] info A value stores the error information.
/// \param [in] batch_size The size of the batch.
template <typename T>
inline void geqrf_batch_wrapper(sycl::queue exec_queue, int m, int n,
T *a[], int lda, T *tau[], int *info,
int batch_size) {
using Ty = typename DataType<T>::T2;
// Set the info value to 0
*info = 0;
#ifdef DPCT_USM_LEVEL_NONE
std::int64_t stride_a = n * lda;
std::int64_t stride_tau = std::max(1, std::min(m, n));
std::int64_t scratchpad_size = oneapi::mkl::lapack::geqrf_batch_scratchpad_size<Ty>(
exec_queue, m, n, lda, stride_a, stride_tau, batch_size);
T *a_buffer_ptr, *tau_buffer_ptr;
a_buffer_ptr = (T *)dpct_malloc(stride_a * batch_size * sizeof(T));
tau_buffer_ptr = (T *)dpct_malloc(stride_tau * batch_size * sizeof(T));
T **host_a = (T **)malloc(batch_size * sizeof(T *));
T **host_tau = (T **)malloc(batch_size * sizeof(T *));
dpct_memcpy(host_a, a, batch_size * sizeof(T *));
dpct_memcpy(host_tau, tau, batch_size * sizeof(T *));
for (std::int64_t i = 0; i < batch_size; ++i)
dpct_memcpy(a_buffer_ptr + i * stride_a, host_a[i], n * lda * sizeof(T));
{
auto a_buffer = get_buffer<Ty>(a_buffer_ptr);
auto tau_buffer = get_buffer<Ty>(tau_buffer_ptr);
sycl::buffer<Ty, 1> scratchpad{sycl::range<1>(scratchpad_size)};
oneapi::mkl::lapack::geqrf_batch(exec_queue, m, n, a_buffer, lda, stride_a,
tau_buffer, stride_tau, batch_size, scratchpad,
scratchpad_size);
}
// Copy back to the original buffers
std::vector<sycl::event> events_a;
std::vector<sycl::event> events_tau;
for (std::int64_t i = 0; i < batch_size; ++i) {
events_a.push_back(detail::dpct_memcpy(exec_queue, host_a[i],
a_buffer_ptr + i * stride_a,
n * lda * sizeof(T), automatic));
events_tau.push_back(detail::dpct_memcpy(
exec_queue, host_tau[i], tau_buffer_ptr + i * stride_tau,
std::max(1, std::min(m, n)) * sizeof(T), automatic));
}
std::vector<void *> ptr_a{host_a};
std::vector<void *> ptr_tau{host_tau};
std::thread mem_free_thread_a(
[=](std::vector<void *> pointers_array,
std::vector<sycl::event> events_array) {
sycl::event::wait(events_array);
for (auto p : pointers_array)
free(p);
},
ptr_a, events_a);
std::thread mem_free_thread_tau(
[=](std::vector<void *> pointers_array,
std::vector<sycl::event> events_array) {
sycl::event::wait(events_array);
for (auto p : pointers_array)
free(p);
},
ptr_tau, events_tau);
mem_free_thread_a.detach();
mem_free_thread_tau.detach();
#else
std::int64_t m_int64 = n;
std::int64_t n_int64 = n;
std::int64_t lda_int64 = lda;
std::int64_t group_sizes = batch_size;
std::int64_t scratchpad_size = oneapi::mkl::lapack::geqrf_batch_scratchpad_size<Ty>(
exec_queue, &m_int64, &n_int64, &lda_int64, 1, &group_sizes);
Ty *scratchpad = sycl::malloc_device<Ty>(scratchpad_size, exec_queue);
T **a_shared = sycl::malloc_shared<T *>(batch_size, exec_queue);
T **tau_shared = sycl::malloc_shared<T *>(batch_size, exec_queue);
exec_queue.memcpy(a_shared, a, batch_size * sizeof(T *));
exec_queue.memcpy(tau_shared, tau, batch_size * sizeof(T *)).wait();
sycl::event e = oneapi::mkl::lapack::geqrf_batch(
exec_queue, &m_int64, &n_int64, (Ty **)a_shared, &lda_int64, (Ty **)tau_shared, 1,
&group_sizes, scratchpad, scratchpad_size);
std::vector<void *> ptrs{scratchpad, a_shared, tau_shared};
async_dpct_free(ptrs, {e}, exec_queue);
#endif
}
/// Computes the Euclidean norm of a vector.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] n Number of elements in vector x.
/// \param [in] x Input vector x.
/// \param [in] x_type Data type of the vector x.
/// \param [in] incx Stride of vector x.
/// \param [out] result The result scalar.
/// \param [in] result_type Data type of the result.
inline void nrm2(sycl::queue &q, int n, const void *x, library_data_t x_type,
int incx, void *result, library_data_t result_type) {
std::uint64_t key = detail::get_type_combination_id(x_type, result_type);
switch (key) {
case detail::get_type_combination_id(library_data_t::real_float,
library_data_t::real_float): {
detail::nrm2_impl<float, float>(q, n, x, incx, result);
break;
}
case detail::get_type_combination_id(library_data_t::real_double,
library_data_t::real_double): {
detail::nrm2_impl<double, double>(q, n, x, incx, result);
break;
}
case detail::get_type_combination_id(library_data_t::complex_float,
library_data_t::real_float): {
detail::nrm2_impl<std::complex<float>, float>(
q, n, x, incx, result);
break;
}
case detail::get_type_combination_id(library_data_t::complex_double,
library_data_t::real_double): {
detail::nrm2_impl<std::complex<double>, double>(
q, n, x, incx, result);
break;
}
case detail::get_type_combination_id(library_data_t::real_half,
library_data_t::real_half): {
detail::nrm2_impl<sycl::half, sycl::half>(
q, n, x, incx, result);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
/// Computes the dot product of two vectors.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] n Number of elements in vector x.
/// \param [in] x Input vector x.
/// \param [in] x_type Data type of the vector x.
/// \param [in] incx Stride of vector x.
/// \param [in] y Input vector y.
/// \param [in] y_type Data type of the vector y.
/// \param [in] incy Stride of vector y.
/// \param [out] result The result scalar.
/// \param [in] result_type Data type of the result.
inline void dot(sycl::queue &q, int n, const void *x, library_data_t x_type,
int incx, const void *y, library_data_t y_type, int incy,
void *result, library_data_t result_type) {
detail::dotuc<false>(q, n, x, x_type, incx, y, y_type, incy, result,
result_type);
}
/// Computes the dot product of two vectors, conjugating the first vector.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] n Number of elements in vector x.
/// \param [in] x Input vector x.
/// \param [in] x_type Data type of the vector x.
/// \param [in] incx Stride of vector x.
/// \param [in] y Input vector y.
/// \param [in] y_type Data type of the vector y.
/// \param [in] incy Stride of vector y.
/// \param [out] result The result scalar.
/// \param [in] result_type Data type of the result.
inline void dotc(sycl::queue &q, int n, const void *x, library_data_t x_type,
int incx, const void *y, library_data_t y_type, int incy,
void *result, library_data_t result_type) {
detail::dotuc<true>(q, n, x, x_type, incx, y, y_type, incy, result,
result_type);
}
/// Computes the product of a vector by a scalar.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] n Number of elements in vector x.
/// \param [in] alpha The scale factor alpha.
/// \param [in] alpha_type The data type of alpha.
/// \param [in, out] x Input/Output vector x.
/// \param [in] x_type Data type of the vector x.
/// \param [in] incx Stride of vector x.
inline void scal(sycl::queue &q, int n, const void *alpha,
library_data_t alpha_type, void *x, library_data_t x_type,
int incx) {
std::uint64_t key = detail::get_type_combination_id(x_type);
switch (key) {
case detail::get_type_combination_id(library_data_t::real_float): {
detail::scal_impl<float, float>(q, n, alpha, x, incx);
break;
}
case detail::get_type_combination_id(library_data_t::real_double): {
detail::scal_impl<double, double>(q, n, alpha, x, incx);
break;
}
case detail::get_type_combination_id(library_data_t::complex_float): {
detail::scal_impl<std::complex<float>, std::complex<float>>(q, n, alpha,
x, incx);
break;
}
case detail::get_type_combination_id(library_data_t::complex_double): {
detail::scal_impl<std::complex<double>, std::complex<double>>(
q, n, alpha, x, incx);
break;
}
case detail::get_type_combination_id(library_data_t::real_half): {
float alpha_value =
dpct::get_value(reinterpret_cast<const float *>(alpha), q);
sycl::half alaph_half(alpha_value);
detail::scal_impl<sycl::half, sycl::half>(q, n, &alaph_half, x, incx);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
/// Computes a vector-scalar product and adds the result to a vector.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] n Number of elements in vector x.
/// \param [in] alpha The scale factor alpha.
/// \param [in] alpha_type The data type of alpha.
/// \param [in] x Input vector x.
/// \param [in] x_type Data type of the vector x.
/// \param [in] incx Stride of vector x.
/// \param [in, out] y Input/Output vector y.
/// \param [in] y_type Data type of the vector y.
/// \param [in] incy Stride of vector y.
inline void axpy(sycl::queue &q, int n, const void *alpha,
library_data_t alpha_type, const void *x, library_data_t x_type,
int incx, void *y, library_data_t y_type, int incy) {
std::uint64_t key = detail::get_type_combination_id(x_type, alpha_type);
switch (key) {
case detail::get_type_combination_id(library_data_t::real_float,
library_data_t::real_float): {
detail::axpy_impl<float, float>(q, n, alpha, x, incx, y, incy);
break;
}
case detail::get_type_combination_id(library_data_t::real_double,
library_data_t::real_double): {
detail::axpy_impl<double, double>(q, n, alpha, x, incx, y, incy);
break;
}
case detail::get_type_combination_id(library_data_t::complex_float,
library_data_t::complex_float): {
detail::axpy_impl<std::complex<float>, std::complex<float>>(
q, n, alpha, x, incx, y, incy);
break;
}
case detail::get_type_combination_id(library_data_t::complex_double,
library_data_t::complex_double): {
detail::axpy_impl<std::complex<double>, std::complex<double>>(
q, n, alpha, x, incx, y, incy);
break;
}
case detail::get_type_combination_id(library_data_t::real_half,
library_data_t::real_float): {
float alpha_value =
dpct::get_value(reinterpret_cast<const float *>(alpha), q);
sycl::half alaph_half(alpha_value);
detail::axpy_impl<sycl::half, sycl::half>(q, n, &alaph_half, x, incx, y, incy);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
/// Performs rotation of points in the plane.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] n Number of elements in vector x.
/// \param [in, out] x Input/Output vector x.
/// \param [in] x_type Data type of the vector x.
/// \param [in] incx Stride of vector x.
/// \param [in, out] y Input/Output vector y.
/// \param [in] y_type Data type of the vector y.
/// \param [in] incy Stride of vector y.
/// \param [in] c Scaling factor.
/// \param [in] s Scaling factor.
/// \param [in] cs_type Data type of the scaling factors.
inline void rot(sycl::queue &q, int n, void *x, library_data_t x_type,
int incx, void *y, library_data_t y_type, int incy,
const void *c, const void *s, library_data_t cs_type) {
std::uint64_t key = detail::get_type_combination_id(x_type, cs_type);
switch (key) {
case detail::get_type_combination_id(library_data_t::real_float,
library_data_t::real_float): {
detail::rot_impl<float, float, float>(q, n, x, incx, y, incy, c, s);
break;
}
case detail::get_type_combination_id(library_data_t::real_double,
library_data_t::real_double): {
detail::rot_impl<double, double, double>(q, n, x, incx, y, incy, c, s);
break;
}
case detail::get_type_combination_id(library_data_t::complex_float,
library_data_t::real_float): {
detail::rot_impl<std::complex<float>, float, float>(q, n, x, incx, y, incy, c,
s);
break;
}
case detail::get_type_combination_id(library_data_t::complex_double,
library_data_t::real_double): {
detail::rot_impl<std::complex<double>, double, double>(q, n, x, incx, y, incy, c,
s);
break;
}
case detail::get_type_combination_id(library_data_t::complex_float,
library_data_t::complex_float): {
detail::rot_impl<std::complex<float>, float, std::complex<float>>(q, n, x, incx, y, incy, c, s);
break;
}
case detail::get_type_combination_id(library_data_t::complex_double,
library_data_t::complex_double): {
detail::rot_impl<std::complex<double>, double, std::complex<double>>(q, n, x, incx, y, incy, c, s);
break;
}
case detail::get_type_combination_id(library_data_t::real_half,
library_data_t::real_half): {
detail::rot_impl<sycl::half, sycl::half, sycl::half>(q, n, x, incx, y, incy, c, s);
break;
}
case detail::get_type_combination_id(library_data_t::real_bfloat16,
library_data_t::real_bfloat16): {
detail::rot_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, oneapi::mkl::bfloat16>(q, n, x, incx, y, incy, c, s);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
/// Computes matrix-matrix product with general matrices.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] a_trans Specifies the operation applied to A.
/// \param [in] b_trans Specifies the operation applied to B.
/// \param [in] m Specifies the number of rows of the matrix op(A) and of the matrix C.
/// \param [in] n Specifies the number of columns of the matrix op(B) and of the matrix C.
/// \param [in] k Specifies the number of columns of the matrix op(A) and the number of rows of the matrix op(B).
/// \param [in] alpha Scaling factor for the matrix-matrix product.
/// \param [in] a Input matrix A.
/// \param [in] a_type Data type of the matrix A.
/// \param [in] lda Leading dimension of A.
/// \param [in] b Input matrix B.
/// \param [in] b_type Data type of the matrix B.
/// \param [in] ldb Leading dimension of B.
/// \param [in] beta Scaling factor for matrix C.
/// \param [in, out] c Input/Output matrix C.
/// \param [in] c_type Data type of the matrix C.
/// \param [in] ldc Leading dimension of C.
/// \param [in] scaling_type Data type of the scaling factors.
inline void gemm(sycl::queue &q, oneapi::mkl::transpose a_trans,
oneapi::mkl::transpose b_trans, int m, int n, int k,
const void *alpha, const void *a, library_data_t a_type,
int lda, const void *b, library_data_t b_type, int ldb,
const void *beta, void *c, library_data_t c_type, int ldc,
library_data_t scaling_type) {
bool matched = false;
if (scaling_type == library_data_t::real_float &&
c_type == library_data_t::complex_float) {
scaling_type = library_data_t::complex_float;
} else if (scaling_type == library_data_t::real_double &&
c_type == library_data_t::complex_double) {
scaling_type = library_data_t::complex_double;
}
std::uint64_t key =
detail::get_type_combination_id(a_type, b_type, c_type, scaling_type);
switch (key) {
case detail::get_type_combination_id(
library_data_t::real_float, library_data_t::real_float,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_impl<float, float, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_double, library_data_t::real_double,
library_data_t::real_double, library_data_t::real_double): {
detail::gemm_impl<double, double, double, double>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::complex_float, library_data_t::complex_float,
library_data_t::complex_float, library_data_t::complex_float): {
detail::gemm_impl<std::complex<float>, std::complex<float>,
std::complex<float>, std::complex<float>>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::complex_double, library_data_t::complex_double,
library_data_t::complex_double, library_data_t::complex_double): {
detail::gemm_impl<std::complex<double>, std::complex<double>,
std::complex<double>, std::complex<double>>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_half, library_data_t::real_half): {
detail::gemm_impl<sycl::half, sycl::half, sycl::half,
sycl::half>(q, a_trans, b_trans, m, n, k, alpha, a,
lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_bfloat16, library_data_t::real_bfloat16,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, float,
float>(q, a_trans, b_trans, m, n, k, alpha, a, lda, b,
ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_impl<sycl::half, sycl::half, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_half, library_data_t::real_float): {
float alpha_value =
dpct::get_value(reinterpret_cast<const float *>(alpha), q);
float beta_value =
dpct::get_value(reinterpret_cast<const float *>(beta), q);
sycl::half alpha_half(alpha_value);
sycl::half beta_half(beta_value);
detail::gemm_impl<sycl::half, sycl::half, sycl::half,
sycl::half>(q, a_trans, b_trans, m, n, k, &alpha_half,
a, lda, b, ldb, &beta_half, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_int8, library_data_t::real_int8,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_impl<std::int8_t, std::int8_t, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_bfloat16, library_data_t::real_bfloat16,
library_data_t::real_bfloat16, library_data_t::real_float): {
detail::gemm_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16,
oneapi::mkl::bfloat16, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
break;
}
case detail::get_type_combination_id(
library_data_t::real_int8, library_data_t::real_int8,
library_data_t::real_int32, library_data_t::real_int32): {
float alpha_float =
dpct::get_value(reinterpret_cast<const std::int32_t *>(alpha), q);
float beta_float =
dpct::get_value(reinterpret_cast<const std::int32_t *>(beta), q);
detail::gemm_impl<std::int8_t, std::int8_t, std::int32_t, float>(
q, a_trans, b_trans, m, n, k, &alpha_float, a, lda, b, ldb, &beta_float, c, ldc);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
/// Computes a batch of matrix-matrix product with general matrices.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] a_trans Specifies the operation applied to A.
/// \param [in] b_trans Specifies the operation applied to B.
/// \param [in] m Specifies the number of rows of the matrix op(A) and of the matrix C.
/// \param [in] n Specifies the number of columns of the matrix op(B) and of the matrix C.
/// \param [in] k Specifies the number of columns of the matrix op(A) and the number of rows of the matrix op(B).
/// \param [in] alpha Scaling factor for the matrix-matrix product.
/// \param [in] a Input matrix A.
/// \param [in] a_type Data type of the matrix A.
/// \param [in] lda Leading dimension of A.
/// \param [in] b Input matrix B.
/// \param [in] b_type Data type of the matrix B.
/// \param [in] ldb Leading dimension of B.
/// \param [in] beta Scaling factor for matrix C.
/// \param [in, out] c Input/Output matrix C.
/// \param [in] c_type Data type of the matrix C.
/// \param [in] ldc Leading dimension of C.
/// \param [in] batch_size Specifies the number of matrix multiply operations to perform.
/// \param [in] scaling_type Data type of the scaling factors.
inline void gemm_batch(sycl::queue &q, oneapi::mkl::transpose a_trans,
oneapi::mkl::transpose b_trans, int m, int n, int k,
const void *alpha, const void *a[],
library_data_t a_type, int lda, const void *b[],
library_data_t b_type, int ldb, const void *beta,
void *c[], library_data_t c_type, int ldc,
int batch_size, library_data_t scaling_type) {
#ifdef DPCT_USM_LEVEL_NONE
throw std::runtime_error("this API is unsupported when USM level is none");
#else
bool matched = false;
if (scaling_type == library_data_t::real_float &&
c_type == library_data_t::complex_float) {
scaling_type = library_data_t::complex_float;
} else if (scaling_type == library_data_t::real_double &&
c_type == library_data_t::complex_double) {
scaling_type = library_data_t::complex_double;
}
std::uint64_t key =
detail::get_type_combination_id(a_type, b_type, c_type, scaling_type);
switch (key) {
case detail::get_type_combination_id(
library_data_t::real_float, library_data_t::real_float,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<float, float, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_double, library_data_t::real_double,
library_data_t::real_double, library_data_t::real_double): {
detail::gemm_batch_impl<double, double, double, double>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::complex_float, library_data_t::complex_float,
library_data_t::complex_float, library_data_t::complex_float): {
detail::gemm_batch_impl<std::complex<float>, std::complex<float>,
std::complex<float>, std::complex<float>>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::complex_double, library_data_t::complex_double,
library_data_t::complex_double, library_data_t::complex_double): {
detail::gemm_batch_impl<std::complex<double>, std::complex<double>,
std::complex<double>, std::complex<double>>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_half, library_data_t::real_half): {
detail::gemm_batch_impl<sycl::half, sycl::half, sycl::half,
sycl::half>(q, a_trans, b_trans, m, n, k, alpha,
a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
#ifdef __INTEL_MKL__
case detail::get_type_combination_id(
library_data_t::real_bfloat16, library_data_t::real_bfloat16,
library_data_t::real_bfloat16, library_data_t::real_float): {
detail::gemm_batch_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16,
oneapi::mkl::bfloat16, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_bfloat16, library_data_t::real_bfloat16,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, float,
float>(q, a_trans, b_trans, m, n, k, alpha, a, lda,
b, ldb, beta, c, ldc, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_int8, library_data_t::real_int8,
library_data_t::real_int32, library_data_t::real_int32): {
float alpha_float =
dpct::get_value(reinterpret_cast<const std::int32_t *>(alpha), q);
float beta_float =
dpct::get_value(reinterpret_cast<const std::int32_t *>(beta), q);
detail::gemm_batch_impl<std::int8_t, std::int8_t, std::int32_t,
float>(q, a_trans, b_trans, m, n, k, &alpha_float,
a, lda, b, ldb, &beta_float, c, ldc,
batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_int8, library_data_t::real_int8,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<std::int8_t, std::int8_t, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<sycl::half, sycl::half, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc,
batch_size);
break;
}
#endif
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_half, library_data_t::real_float): {
float alpha_value =
dpct::get_value(reinterpret_cast<const float *>(alpha), q);
float beta_value =
dpct::get_value(reinterpret_cast<const float *>(beta), q);
sycl::half alpha_half(alpha_value);
sycl::half beta_half(beta_value);
detail::gemm_batch_impl<sycl::half, sycl::half, sycl::half, sycl::half>(
q, a_trans, b_trans, m, n, k, &alpha_half, a, lda, b, ldb, &beta_half, c, ldc,
batch_size);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
#endif
}
/// Computes a batch of matrix-matrix product with general matrices.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] a_trans Specifies the operation applied to A.
/// \param [in] b_trans Specifies the operation applied to B.
/// \param [in] m Specifies the number of rows of the matrix op(A) and of the matrix C.
/// \param [in] n Specifies the number of columns of the matrix op(B) and of the matrix C.
/// \param [in] k Specifies the number of columns of the matrix op(A) and the number of rows of the matrix op(B).
/// \param [in] alpha Scaling factor for the matrix-matrix product.
/// \param [in] a Input matrix A.
/// \param [in] a_type Data type of the matrix A.
/// \param [in] lda Leading dimension of A.
/// \param [in] stride_a Stride between the different A matrices.
/// \param [in] b Input matrix B.
/// \param [in] b_type Data type of the matrix B.
/// \param [in] ldb Leading dimension of B.
/// \param [in] stride_b Stride between the different B matrices.
/// \param [in] beta Scaling factor for matrix C.
/// \param [in, out] c Input/Output matrix C.
/// \param [in] c_type Data type of the matrix C.
/// \param [in] ldc Leading dimension of C.
/// \param [in] stride_c Stride between the different C matrices.
/// \param [in] batch_size Specifies the number of matrix multiply operations to perform.
/// \param [in] scaling_type Data type of the scaling factors.
inline void gemm_batch(sycl::queue &q, oneapi::mkl::transpose a_trans,
oneapi::mkl::transpose b_trans, int m, int n, int k,
const void *alpha, const void *a, library_data_t a_type,
int lda, long long int stride_a, const void *b,
library_data_t b_type, int ldb, long long int stride_b,
const void *beta, void *c, library_data_t c_type,
int ldc, long long int stride_c, int batch_size,
library_data_t scaling_type) {
bool matched = false;
if (scaling_type == library_data_t::real_float &&
c_type == library_data_t::complex_float) {
scaling_type = library_data_t::complex_float;
} else if (scaling_type == library_data_t::real_double &&
c_type == library_data_t::complex_double) {
scaling_type = library_data_t::complex_double;
}
std::uint64_t key =
detail::get_type_combination_id(a_type, b_type, c_type, scaling_type);
switch (key) {
case detail::get_type_combination_id(
library_data_t::real_float, library_data_t::real_float,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<float, float, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b,
beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_double, library_data_t::real_double,
library_data_t::real_double, library_data_t::real_double): {
detail::gemm_batch_impl<double, double, double, double>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b,
beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::complex_float, library_data_t::complex_float,
library_data_t::complex_float, library_data_t::complex_float): {
detail::gemm_batch_impl<std::complex<float>, std::complex<float>,
std::complex<float>, std::complex<float>>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b,
beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::complex_double, library_data_t::complex_double,
library_data_t::complex_double, library_data_t::complex_double): {
detail::gemm_batch_impl<std::complex<double>, std::complex<double>,
std::complex<double>, std::complex<double>>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b,
beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_half, library_data_t::real_half): {
detail::gemm_batch_impl<sycl::half, sycl::half, sycl::half,
sycl::half>(q, a_trans, b_trans, m, n, k, alpha,
a, lda, stride_a, b, ldb, stride_b,
beta, c, ldc, stride_c, batch_size);
break;
}
#ifdef __INTEL_MKL__
case detail::get_type_combination_id(
library_data_t::real_bfloat16, library_data_t::real_bfloat16,
library_data_t::real_bfloat16, library_data_t::real_float): {
detail::gemm_batch_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16,
oneapi::mkl::bfloat16, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b,
beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_bfloat16, library_data_t::real_bfloat16,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, float,
float>(q, a_trans, b_trans, m, n, k, alpha, a, lda,
stride_a, b, ldb, stride_b, beta, c, ldc,
stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_int8, library_data_t::real_int8,
library_data_t::real_int32, library_data_t::real_int32): {
detail::gemm_batch_impl<std::int8_t, std::int8_t, std::int32_t,
std::int32_t>(q, a_trans, b_trans, m, n, k, alpha,
a, lda, stride_a, b, ldb, stride_b,
beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_int8, library_data_t::real_int8,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<std::int8_t, std::int8_t, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b,
beta, c, ldc, stride_c, batch_size);
break;
}
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_float, library_data_t::real_float): {
detail::gemm_batch_impl<sycl::half, sycl::half, float, float>(
q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b,
beta, c, ldc, stride_c, batch_size);
break;
}
#endif
case detail::get_type_combination_id(
library_data_t::real_half, library_data_t::real_half,
library_data_t::real_half, library_data_t::real_float): {
float alpha_value =
dpct::get_value(reinterpret_cast<const float *>(alpha), q);
float beta_value =
dpct::get_value(reinterpret_cast<const float *>(beta), q);
sycl::half alpha_half(alpha_value);
sycl::half beta_half(beta_value);
detail::gemm_batch_impl<sycl::half, sycl::half, sycl::half, sycl::half>(
q, a_trans, b_trans, m, n, k, &alpha_half, a, lda, stride_a, b, ldb, stride_b,
&beta_half, c, ldc, stride_c, batch_size);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
}
/// This routines perform a special rank-k update of a symmetric matrix C by
/// general matrices A and B.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] uplo Specifies whether C's data is stored in its upper or lower triangle.
/// \param [in] trans Specifies the operation to apply.
/// \param [in] n The number of rows and columns in C.
/// \param [in] k The inner dimension of matrix multiplications.
/// \param [in] alpha Scaling factor for the rank-k update.
/// \param [in] a Input matrix A.
/// \param [in] lda Leading dimension of A.
/// \param [in] b Input matrix B.
/// \param [in] ldb Leading dimension of B.
/// \param [in] beta Scaling factor for the rank-k update.
/// \param [in, out] c Input/Output matrix C.
/// \param [in] ldc Leading dimension of C.
template <class T>
inline void syrk(sycl::queue &q, oneapi::mkl::uplo uplo,
oneapi::mkl::transpose trans, int n, int k, const T *alpha,
const T *a, int lda, const T *b, int ldb, const T *beta, T *c,
int ldc) {
detail::rk_impl<false, T, T>(q, uplo, trans, n, k, alpha, a, lda, b,
ldb, beta, c, ldc);
}
/// This routines perform a special rank-k update of a Hermitian matrix C by
/// general matrices A and B.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] uplo Specifies whether C's data is stored in its upper or lower triangle.
/// \param [in] trans Specifies the operation to apply.
/// \param [in] n The number of rows and columns in C.
/// \param [in] k The inner dimension of matrix multiplications.
/// \param [in] alpha Scaling factor for the rank-k update.
/// \param [in] a Input matrix A.
/// \param [in] lda Leading dimension of A.
/// \param [in] b Input matrix B.
/// \param [in] ldb Leading dimension of B.
/// \param [in] beta Scaling factor for the rank-k update.
/// \param [in, out] c Input/Output matrix C.
/// \param [in] ldc Leading dimension of C.
template <class T, class Tbeta>
inline void herk(sycl::queue &q, oneapi::mkl::uplo uplo,
oneapi::mkl::transpose trans, int n, int k, const T *alpha,
const T *a, int lda, const T *b, int ldb, const Tbeta *beta,
T *c, int ldc) {
detail::rk_impl<true, T, Tbeta>(q, uplo, trans, n, k, alpha, a, lda, b,
ldb, beta, c, ldc);
}
/// This routine performs a group of trsm operations. Each trsm solves an
/// equation of the form op(A) * X = alpha * B or X * op(A) = alpha * B.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] left_right Specifies A multiplies X on the left or on the right.
/// \param [in] upper_lower Specifies A is upper or lower triangular.
/// \param [in] trans Specifies the operation applied to A.
/// \param [in] unit_diag Specifies whether A is unit triangular.
/// \param [in] m Number of rows of the B matrices.
/// \param [in] n Number of columns of the B matrices.
/// \param [in] alpha Scaling factor for the solutions.
/// \param [in] a Input matrices A.
/// \param [in] a_type Data type of the matrices A.
/// \param [in] lda Leading dimension of the matrices A.
/// \param [in, out] b Input and output matrices B.
/// \param [in] b_type Data type of the matrices B.
/// \param [in] ldb Leading dimension of the matrices B.
/// \param [in] batch_size Specifies the number of trsm operations to perform.
/// \param [in] scaling_type Data type of the scaling factors.
inline void trsm_batch(sycl::queue &q, oneapi::mkl::side left_right,
oneapi::mkl::uplo upper_lower,
oneapi::mkl::transpose trans,
oneapi::mkl::diag unit_diag, int m, int n,
const void *alpha, const void **a, library_data_t a_type,
int lda, void **b, library_data_t b_type, int ldb,
int batch_size, library_data_t scaling_type) {
#ifdef DPCT_USM_LEVEL_NONE
throw std::runtime_error("this API is unsupported when USM level is none");
#else
std::uint64_t key =
detail::get_type_combination_id(a_type, b_type, scaling_type);
switch (key) {
case detail::get_type_combination_id(library_data_t::real_float,
library_data_t::real_float,
library_data_t::real_float): {
detail::trsm_batch_impl<float, float, float>(q, left_right, upper_lower,
trans, unit_diag, m, n, alpha,
a, lda, b, ldb, batch_size);
break;
}
case detail::get_type_combination_id(library_data_t::real_double,
library_data_t::real_double,
library_data_t::real_double): {
detail::trsm_batch_impl<double, double, double>(
q, left_right, upper_lower, trans, unit_diag, m, n, alpha, a, lda, b,
ldb, batch_size);
break;
}
case detail::get_type_combination_id(library_data_t::complex_float,
library_data_t::complex_float,
library_data_t::complex_float): {
detail::trsm_batch_impl<std::complex<float>, std::complex<float>,
std::complex<float>>(q, left_right, upper_lower,
trans, unit_diag, m, n, alpha,
a, lda, b, ldb, batch_size);
break;
}
case detail::get_type_combination_id(library_data_t::complex_double,
library_data_t::complex_double,
library_data_t::complex_double): {
detail::trsm_batch_impl<std::complex<double>, std::complex<double>,
std::complex<double>>(q, left_right, upper_lower,
trans, unit_diag, m, n, alpha,
a, lda, b, ldb, batch_size);
break;
}
default:
throw std::runtime_error("the combination of data type is unsupported");
}
#endif
}
/// Computes a triangular matrix-general matrix product.
/// \param [in] q The queue where the routine should be executed.
/// \param [in] left_right Specifies A is on the left or right side of the
/// multiplication.
/// \param [in] upper_lower Specifies A is upper or lower triangular.
/// \param [in] trans Specifies the operation applied to A.
/// \param [in] unit_diag Specifies whether A is unit triangular.
/// \param [in] m Number of rows of B.
/// \param [in] n Number of columns of B.
/// \param [in] alpha Scaling factor for the matrix-matrix product.
/// \param [in] a Input matrices A.
/// \param [in] lda Leading dimension of the matrices A.
/// \param [in] b Input matrices B.
/// \param [in] ldb Leading dimension of the matrices B.
/// \param [out] c Output matrices C.
/// \param [in] ldc Leading dimension of the matrices C.
template <class T>
inline void trmm(sycl::queue &q, oneapi::mkl::side left_right,
oneapi::mkl::uplo upper_lower, oneapi::mkl::transpose trans,
oneapi::mkl::diag unit_diag, int m, int n, const T *alpha,
const T *a, int lda, const T *b, int ldb, T *c, int ldc) {
using Ty = typename DataType<T>::T2;
auto alpha_val = dpct::get_value(alpha, q);
if (b != c) {
dpct::matrix_mem_copy(c, b, ldc, ldb, m, n, dpct::device_to_device, q);
}
auto data_a = detail::get_memory(reinterpret_cast<const Ty *>(a));
auto data_c = detail::get_memory(reinterpret_cast<Ty *>(c));
oneapi::mkl::blas::column_major::trmm(q, left_right, upper_lower, trans,
unit_diag, m, n, alpha_val, data_a, lda,
data_c, ldc);
}
} // namespace dpct
#endif // __DPCT_BLAS_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/include/dpct/atomic.hpp | //==---- atomic.hpp -------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_ATOMIC_HPP__
#define __DPCT_ATOMIC_HPP__
#include <sycl/sycl.hpp>
namespace dpct {
/// Atomically add the value operand to the value at the addr and assign the
/// result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to add to the value at \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_fetch_add(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_add(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_fetch_add(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_add(operand);
}
/// Atomically add the value operand to the value at the addr and assign the
/// result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to add to the value at \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_fetch_add(T *addr, T operand,
sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_add<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_add<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_add<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false && "Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_fetch_add(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_fetch_add<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically subtract the value operand from the value at the addr and assign
/// the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to subtract from the value at \p addr
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_fetch_sub(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_sub(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_fetch_sub(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_sub(operand);
}
/// Atomically subtract the value operand from the value at the addr and assign
/// the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to subtract from the value at \p addr
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_fetch_sub(T *addr, T operand,
sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_sub<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_sub<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_sub<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false && "Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_fetch_sub(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_fetch_sub<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically perform a bitwise AND between the value operand and the value at the addr
/// and assign the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to use in bitwise AND operation with the value at the \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_fetch_and(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_and(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_fetch_and(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_and(operand);
}
/// Atomically perform a bitwise AND between the value operand and the value at the addr
/// and assign the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to use in bitwise AND operation with the value at the \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_fetch_and(T *addr, T operand,
sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_and<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_and<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_and<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false && "Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_fetch_and(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_fetch_and<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically or the value at the addr with the value operand, and assign
/// the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to use in bitwise OR operation with the value at the \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_fetch_or(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_or(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_fetch_or(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_or(operand);
}
/// Atomically or the value at the addr with the value operand, and assign
/// the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to use in bitwise OR operation with the value at the \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_fetch_or(T *addr, T operand,
sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_or<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_or<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_or<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false && "Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_fetch_or(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_fetch_or<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically xor the value at the addr with the value operand, and assign
/// the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to use in bitwise XOR operation with the value at the \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_fetch_xor(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_xor(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_fetch_xor(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_xor(operand);
}
/// Atomically xor the value at the addr with the value operand, and assign
/// the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to use in bitwise XOR operation with the value at the \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_fetch_xor(T *addr, T operand,
sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_xor<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_xor<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_xor<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false && "Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_fetch_xor(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_fetch_xor<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically calculate the minimum of the value at addr and the value operand
/// and assign the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_fetch_min(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_min(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_fetch_min(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_min(operand);
}
/// Atomically calculate the minimum of the value at addr and the value operand
/// and assign the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_fetch_min(T *addr, T operand,
sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_min<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_min<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_min<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false && "Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_fetch_min(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_fetch_min<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically calculate the maximum of the value at addr and the value operand
/// and assign the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_fetch_max(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_max(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_fetch_max(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.fetch_max(operand);
}
/// Atomically calculate the maximum of the value at addr and the value operand
/// and assign the result to the value at addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_fetch_max(T *addr, T operand,
sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_max<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_max<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_max<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false && "Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_fetch_max(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_fetch_max<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically set \p operand to the value stored in \p addr, if old value stored in
/// \p addr is equal to zero or greater than \p operand, else decrease the value stored
/// in \p addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The threshold value.
/// \param memoryOrder The memory ordering used.
/// \returns The old value stored in \p addr.
template <sycl::access::address_space addressSpace = sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline unsigned int atomic_fetch_compare_dec(unsigned int *addr,
unsigned int operand) {
auto atm = sycl::atomic_ref<unsigned int, memoryOrder, memoryScope,
addressSpace>(addr[0]);
unsigned int old;
while (true) {
old = atm.load();
if (old == 0 || old > operand) {
if (atm.compare_exchange_strong(old, operand))
break;
} else if (atm.compare_exchange_strong(old, old - 1))
break;
}
return old;
}
/// Atomically increment the value stored in \p addr if old value stored in \p
/// addr is less than \p operand, else set 0 to the value stored in \p addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The threshold value.
/// \param memoryOrder The memory ordering used.
/// \returns The old value stored in \p addr.
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline unsigned int atomic_fetch_compare_inc(unsigned int *addr,
unsigned int operand) {
auto atm = sycl::atomic_ref<unsigned int, memoryOrder, memoryScope,
addressSpace>(addr[0]);
unsigned int old;
while (true) {
old = atm.load();
if (old >= operand) {
if (atm.compare_exchange_strong(old, 0))
break;
} else if (atm.compare_exchange_strong(old, old + 1))
break;
}
return old;
}
/// Atomically increment the value stored in \p addr if old value stored in \p
/// addr is less than \p operand, else set 0 to the value stored in \p addr.
/// \param [in, out] addr The pointer to the data.
/// \param operand The threshold value.
/// \param memoryOrder The memory ordering used.
/// \returns The old value stored in \p addr.
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline unsigned int
atomic_fetch_compare_inc(unsigned int *addr, unsigned int operand,
sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_fetch_compare_inc<addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr,
operand);
case sycl::memory_order::acq_rel:
return atomic_fetch_compare_inc<addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr,
operand);
case sycl::memory_order::seq_cst:
return atomic_fetch_compare_inc<addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr,
operand);
default:
assert(false && "Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
/// Atomically exchange the value at the address addr with the value operand.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to be exchanged with the value pointed by \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
inline T atomic_exchange(T *addr, T operand) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.exchange(operand);
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2>
inline T1 atomic_exchange(T1 *addr, T2 operand) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
return atm.exchange(operand);
}
/// Atomically exchange the value at the address addr with the value operand.
/// \param [in, out] addr The pointer to the data.
/// \param operand The value to be exchanged with the value pointed by \p addr.
/// \param memoryOrder The memory ordering used.
/// \returns The value at the \p addr before the call.
template <typename T, sycl::access::address_space addressSpace =
sycl::access::address_space::global_space>
inline T atomic_exchange(T *addr, T operand,
sycl::memory_order memoryOrder) {
switch (memoryOrder) {
case sycl::memory_order::relaxed:
return atomic_exchange<T, addressSpace, sycl::memory_order::relaxed,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::acq_rel:
return atomic_exchange<T, addressSpace, sycl::memory_order::acq_rel,
sycl::memory_scope::device>(addr, operand);
case sycl::memory_order::seq_cst:
return atomic_exchange<T, addressSpace, sycl::memory_order::seq_cst,
sycl::memory_scope::device>(addr, operand);
default:
assert(false && "Invalid memory_order for atomics. Valid memory_order for "
"atomics are: sycl::memory_order::relaxed, "
"sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
}
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
typename T1, typename T2>
inline T1 atomic_exchange(T1 *addr, T2 operand,
sycl::memory_order memoryOrder) {
atomic_exchange<T1, addressSpace>(addr, operand, memoryOrder);
}
/// Atomically compare the value at \p addr to the value expected and exchange
/// with the value desired if the value at \p addr is equal to the value expected.
/// Returns the value at the \p addr before the call.
/// \param [in, out] addr Multi_ptr.
/// \param expected The value to compare against the value at \p addr.
/// \param desired The value to assign to \p addr if the value at \p addr is expected.
/// \param success The memory ordering used when comparison succeeds.
/// \param fail The memory ordering used when comparison fails.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
T atomic_compare_exchange_strong(
sycl::multi_ptr<T, addressSpace> addr, T expected, T desired,
sycl::memory_order success = sycl::memory_order::relaxed,
sycl::memory_order fail = sycl::memory_order::relaxed) {
auto atm = sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(*addr);
atm.compare_exchange_strong(expected, desired, success, fail);
return expected;
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2, typename T3>
T1 atomic_compare_exchange_strong(
sycl::multi_ptr<T1, addressSpace> addr, T2 expected, T3 desired,
sycl::memory_order success = sycl::memory_order::relaxed,
sycl::memory_order fail = sycl::memory_order::relaxed) {
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(*addr);
T1 expected_value = expected;
atm.compare_exchange_strong(expected_value, desired, success, fail);
return expected_value;
}
/// Atomically compare the value at \p addr to the value expected and exchange
/// with the value desired if the value at \p addr is equal to the value expected.
/// Returns the value at the \p addr before the call.
/// \param [in] addr The pointer to the data.
/// \param expected The value to compare against the value at \p addr.
/// \param desired The value to assign to \p addr if the value at \p addr is expected.
/// \param success The memory ordering used when comparison succeeds.
/// \param fail The memory ordering used when comparison fails.
/// \returns The value at the \p addr before the call.
template <typename T,
sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device>
T atomic_compare_exchange_strong(
T *addr, T expected, T desired,
sycl::memory_order success = sycl::memory_order::relaxed,
sycl::memory_order fail = sycl::memory_order::relaxed) {
auto atm =
sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
atm.compare_exchange_strong(expected, desired, success, fail);
return expected;
}
template <sycl::access::address_space addressSpace =
sycl::access::address_space::global_space,
sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
sycl::memory_scope memoryScope = sycl::memory_scope::device,
typename T1, typename T2, typename T3>
T1 atomic_compare_exchange_strong(
T1 *addr, T2 expected, T3 desired,
sycl::memory_order success = sycl::memory_order::relaxed,
sycl::memory_order fail = sycl::memory_order::relaxed) {
T1 expected_value = expected;
auto atm =
sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
atm.compare_exchange_strong(expected_value, desired, success, fail);
return expected_value;
}
/// Atomic extension to implement standard APIs in std::atomic
namespace detail{
template <typename T> struct IsValidAtomicType {
static constexpr bool value =
(std::is_same<T, int>::value || std::is_same<T, unsigned int>::value ||
std::is_same<T, long>::value || std::is_same<T, unsigned long>::value ||
std::is_same<T, long long>::value ||
std::is_same<T, unsigned long long>::value ||
std::is_same<T, float>::value || std::is_same<T, double>::value ||
std::is_pointer<T>::value);
};
} // namespace detail
template <typename T,
sycl::memory_scope DefaultScope = sycl::memory_scope::system,
sycl::memory_order DefaultOrder = sycl::memory_order::seq_cst,
sycl::access::address_space Space =
sycl::access::address_space::generic_space>
class atomic{
static_assert(
detail::IsValidAtomicType<T>::value,
"Invalid atomic type. Valid types are int, unsigned int, long, "
"unsigned long, long long, unsigned long long, float, double "
"and pointer types");
T __d;
public:
/// default memory synchronization order
static constexpr sycl::memory_order default_read_order =
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space>::default_read_order;
static constexpr sycl::memory_order default_write_order =
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space>::default_write_order;
static constexpr sycl::memory_scope default_scope = DefaultScope;
static constexpr sycl::memory_order default_read_modify_write_order =
DefaultOrder;
/// Default constructor.
constexpr atomic() noexcept = default;
/// Constructor with initialize value.
constexpr atomic(T d) noexcept : __d(d){};
/// atomically replaces the value of the referenced object with a non-atomic argument
/// \param operand The value to replace the pointed value.
/// \param memoryOrder The memory ordering used.
/// \param memoryScope The memory scope used.
void store(T operand, sycl::memory_order memoryOrder = default_write_order,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
atm.store(operand, memoryOrder, memoryScope);
}
/// atomically obtains the value of the referenced object
/// \param memoryOrder The memory ordering used.
/// \param memoryScope The memory scope used.
/// \returns The value of the referenced object
T load(sycl::memory_order memoryOrder = default_read_order,
sycl::memory_scope memoryScope = default_scope) const noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(
const_cast<T &>(__d));
return atm.load(memoryOrder, memoryScope);
}
/// atomically replaces the value of the referenced object and obtains the value held previously
/// \param operand The value to replace the pointed value.
/// \param memoryOrder The memory ordering used.
/// \param memoryScope The memory scope used.
/// \returns The value of the referenced object before the call.
T exchange(T operand,
sycl::memory_order memoryOrder = default_read_modify_write_order,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
return atm.exchange(operand, memoryOrder, memoryScope);
}
/// atomically compares the value of the referenced object with non-atomic argument
/// and performs atomic exchange if equal or atomic load if not
/// \param expected The value expected to be found in the object referenced by the atomic_ref object
/// \param desired The value to store in the referenced object if it is as expected
/// \param success The memory models for the read-modify-write
/// \param failure The memory models for load operations
/// \param memoryScope The memory scope used.
/// \returns true if the referenced object was successfully changed, false otherwise.
bool compare_exchange_weak(
T &expected, T desired,
sycl::memory_order success, sycl::memory_order failure,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
return atm.compare_exchange_weak(expected, desired, success, failure, memoryScope);
}
/// \param expected The value expected to be found in the object referenced by the atomic_ref object
/// \param desired The value to store in the referenced object if it is as expected
/// \param memoryOrder The memory synchronization ordering for operations
/// \param memoryScope The memory scope used.
/// \returns true if the referenced object was successfully changed, false otherwise.
bool compare_exchange_weak(T &expected, T desired,
sycl::memory_order memoryOrder = default_read_modify_write_order,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
return atm.compare_exchange_weak(expected, desired, memoryOrder, memoryScope);
}
/// atomically compares the value of the referenced object with non-atomic argument
/// and performs atomic exchange if equal or atomic load if not
/// \param expected The value expected to be found in the object referenced by the atomic_ref object
/// \param desired The value to store in the referenced object if it is as expected
/// \param success The memory models for the read-modify-write
/// \param failure The memory models for load operations
/// \param memoryScope The memory scope used.
/// \returns true if the referenced object was successfully changed, false otherwise.
bool compare_exchange_strong(
T &expected, T desired,
sycl::memory_order success, sycl::memory_order failure,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
return atm.compare_exchange_strong(expected, desired, success, failure, memoryScope);
}
/// \param expected The value expected to be found in the object referenced by the atomic_ref object
/// \param desired The value to store in the referenced object if it is as expected
/// \param memoryOrder The memory synchronization ordering for operations
/// \param memoryScope The memory scope used.
/// \returns true if the referenced object was successfully changed, false otherwise.
bool compare_exchange_strong(T &expected, T desired,
sycl::memory_order memoryOrder = default_read_modify_write_order,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
return atm.compare_exchange_strong(expected, desired, memoryOrder, memoryScope);
}
/// atomically adds the argument to the value stored in the atomic object and obtains the value held previously
/// \param operand The other argument of arithmetic addition
/// \param memoryOrder The memory ordering used.
/// \param memoryScope The memory scope used.
/// \returns The value of the referenced object before the call.
T fetch_add(T operand,
sycl::memory_order memoryOrder = default_read_modify_write_order,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
return atm.fetch_add(operand, memoryOrder, memoryScope);
}
/// atomically subtracts the argument from the value stored in the atomic object and obtains the value held previously
/// \param operand The other argument of arithmetic subtraction
/// \param memoryOrder The memory ordering used.
/// \param memoryScope The memory scope used.
/// \returns The value of the referenced object before the call.
T fetch_sub(T operand,
sycl::memory_order memoryOrder = default_read_modify_write_order,
sycl::memory_scope memoryScope = default_scope) noexcept {
sycl::atomic_ref<T, DefaultOrder, DefaultScope, Space> atm(__d);
return atm.fetch_sub(operand, memoryOrder, memoryScope);
}
};
} // namespace dpct
#endif // __DPCT_ATOMIC_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/include/dpct/rng_utils.hpp | //==---- rng_utils.hpp ----------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_RNG_UTILS_HPP__
#define __DPCT_RNG_UTILS_HPP__
#include <sycl/sycl.hpp>
#include <oneapi/mkl.hpp>
#ifdef __INTEL_MKL__ // The oneMKL Interfaces Project does not support this.
#include <oneapi/mkl/rng/device.hpp>
#endif
#include "device.hpp"
#include "lib_common_utils.hpp"
namespace dpct {
namespace rng {
#ifdef __INTEL_MKL__ // The oneMKL Interfaces Project does not support this.
namespace device {
/// The random number generator on device.
/// \tparam engine_t The device random number generator engine. It can only be
/// oneapi::mkl::rng::device::mrg32k3a<1> or
/// oneapi::mkl::rng::device::mrg32k3a<4> or
/// oneapi::mkl::rng::device::philox4x32x10<1> or
/// oneapi::mkl::rng::device::philox4x32x10<4>.
template <typename engine_t> class rng_generator {
static_assert(
std::disjunction_v<
std::is_same<engine_t, oneapi::mkl::rng::device::mrg32k3a<1>>,
std::is_same<engine_t, oneapi::mkl::rng::device::mrg32k3a<4>>,
std::is_same<engine_t, oneapi::mkl::rng::device::philox4x32x10<1>>,
std::is_same<engine_t, oneapi::mkl::rng::device::philox4x32x10<4>>,
std::is_same<engine_t, oneapi::mkl::rng::device::mcg59<1>>>,
"engine_t can only be oneapi::mkl::rng::device::mrg32k3a<1> or "
"oneapi::mkl::rng::device::mrg32k3a<4> or "
"oneapi::mkl::rng::device::philox4x32x10<1> or "
"oneapi::mkl::rng::device::philox4x32x10<4> or "
"oneapi::mkl::rng::device::mcg59<1>.");
static constexpr bool _is_engine_vec_size_one = std::disjunction_v<
std::is_same<engine_t, oneapi::mkl::rng::device::mrg32k3a<1>>,
std::is_same<engine_t, oneapi::mkl::rng::device::philox4x32x10<1>>,
std::is_same<engine_t, oneapi::mkl::rng::device::mcg59<1>>>;
static constexpr std::uint64_t default_seed = 0;
oneapi::mkl::rng::device::bits<std::uint32_t> _distr_bits;
oneapi::mkl::rng::device::uniform_bits<std::uint32_t> _distr_uniform_bits;
oneapi::mkl::rng::device::gaussian<float> _distr_gaussian_float;
oneapi::mkl::rng::device::gaussian<double> _distr_gaussian_double;
oneapi::mkl::rng::device::lognormal<float> _distr_lognormal_float;
oneapi::mkl::rng::device::lognormal<double> _distr_lognormal_double;
oneapi::mkl::rng::device::poisson<std::uint32_t> _distr_poisson;
oneapi::mkl::rng::device::uniform<float> _distr_uniform_float;
oneapi::mkl::rng::device::uniform<double> _distr_uniform_double;
engine_t _engine;
public:
/// Default constructor of rng_generator
rng_generator() { _engine = engine_t(default_seed); }
/// Constructor of rng_generator if engine type is not mcg59
/// \param [in] seed The seed to initialize the engine state.
/// \param [in] num_to_skip Set the number of elements need to be skipped.
/// The number is calculated as: num_to_skip[0] + num_to_skip[1] * 2^64 +
/// num_to_skip[2] * 2^128 + ... + num_to_skip[n-1] * 2^(64*(n-1))
template <typename T = engine_t,
typename std::enable_if<!std::is_same_v<
T, oneapi::mkl::rng::device::mcg59<1>>>::type * = nullptr>
rng_generator(std::uint64_t seed,
std::initializer_list<std::uint64_t> num_to_skip) {
_engine = engine_t(seed, num_to_skip);
}
/// Constructor of rng_generator if engine type is mcg59
/// \param [in] seed The seed to initialize the engine state.
/// \param [in] num_to_skip Set the number of elements need to be skipped.
template <typename T = engine_t,
typename std::enable_if<std::is_same_v<
T, oneapi::mkl::rng::device::mcg59<1>>>::type * = nullptr>
rng_generator(std::uint64_t seed, std::uint64_t num_to_skip) {
_engine = engine_t(seed, num_to_skip);
}
/// Generate random number(s) obeys distribution \tparam distr_t.
/// \tparam T The distribution of the random number. It can only be
/// oneapi::mkl::rng::device::bits<std::uint32_t>,
/// oneapi::mkl::rng::device::uniform_bits<std::uint32_t>,
/// oneapi::mkl::rng::device::gaussian<float>,
/// oneapi::mkl::rng::device::gaussian<double>,
/// oneapi::mkl::rng::device::lognormal<float>,
/// oneapi::mkl::rng::device::lognormal<double>,
/// oneapi::mkl::rng::device::poisson<std::uint32_t>,
/// oneapi::mkl::rng::device::uniform<float> or
/// oneapi::mkl::rng::device::uniform<double>
/// \tparam vec_size The length of the return vector. It can only be 1, 2
/// or 4.
/// \param distr_params The parameter(s) for lognormal or poisson
/// distribution.
/// \return The vector of the random number(s).
template <typename distr_t, int vec_size, class... distr_params_t>
auto generate(distr_params_t... distr_params) {
static_assert(vec_size == 1 || vec_size == 2 || vec_size == 4,
"vec_size is not supported.");
static_assert(
std::disjunction_v<
std::is_same<distr_t,
oneapi::mkl::rng::device::bits<std::uint32_t>>,
std::is_same<distr_t,
oneapi::mkl::rng::device::uniform_bits<std::uint32_t>>,
std::is_same<distr_t, oneapi::mkl::rng::device::gaussian<float>>,
std::is_same<distr_t, oneapi::mkl::rng::device::gaussian<double>>,
std::is_same<distr_t, oneapi::mkl::rng::device::lognormal<float>>,
std::is_same<distr_t, oneapi::mkl::rng::device::lognormal<double>>,
std::is_same<distr_t,
oneapi::mkl::rng::device::poisson<std::uint32_t>>,
std::is_same<distr_t, oneapi::mkl::rng::device::uniform<float>>,
std::is_same<distr_t, oneapi::mkl::rng::device::uniform<double>>>,
"distribution is not supported.");
if constexpr (std::is_same_v<
distr_t, oneapi::mkl::rng::device::bits<std::uint32_t>>) {
return generate_vec<vec_size>(_distr_bits);
}
if constexpr (std::is_same_v<
distr_t,
oneapi::mkl::rng::device::uniform_bits<std::uint32_t>>) {
return generate_vec<vec_size>(_distr_uniform_bits);
}
if constexpr (std::is_same_v<distr_t,
oneapi::mkl::rng::device::gaussian<float>>) {
return generate_vec<vec_size>(_distr_gaussian_float);
}
if constexpr (std::is_same_v<distr_t,
oneapi::mkl::rng::device::gaussian<double>>) {
return generate_vec<vec_size>(_distr_gaussian_double);
}
if constexpr (std::is_same_v<distr_t,
oneapi::mkl::rng::device::lognormal<float>>) {
return generate_vec<vec_size>(_distr_lognormal_float, distr_params...,
0.0f, 1.0f);
}
if constexpr (std::is_same_v<distr_t,
oneapi::mkl::rng::device::lognormal<double>>) {
return generate_vec<vec_size>(_distr_lognormal_double, distr_params...,
0.0, 1.0);
}
if constexpr (std::is_same_v<distr_t, oneapi::mkl::rng::device::poisson<
std::uint32_t>>) {
return generate_vec<vec_size>(_distr_poisson, distr_params...);
}
if constexpr (std::is_same_v<distr_t,
oneapi::mkl::rng::device::uniform<float>>) {
return generate_vec<vec_size>(_distr_uniform_float);
}
if constexpr (std::is_same_v<distr_t,
oneapi::mkl::rng::device::uniform<double>>) {
return generate_vec<vec_size>(_distr_uniform_double);
}
}
/// Get the random number generator engine.
/// \return The reference of the internal random number generator engine.
engine_t &get_engine() { return _engine; }
private:
template <int vec_size, typename distr_t, class... distr_params_t>
auto generate_vec(distr_t &distr, distr_params_t... distr_params) {
if constexpr (sizeof...(distr_params_t)) {
typename distr_t::param_type pt(distr_params...);
distr.param(pt);
}
if constexpr (vec_size == 4) {
if constexpr (_is_engine_vec_size_one) {
sycl::vec<typename distr_t::result_type, 4> res;
res.x() = oneapi::mkl::rng::device::generate(distr, _engine);
res.y() = oneapi::mkl::rng::device::generate(distr, _engine);
res.z() = oneapi::mkl::rng::device::generate(distr, _engine);
res.w() = oneapi::mkl::rng::device::generate(distr, _engine);
return res;
} else {
return oneapi::mkl::rng::device::generate(distr, _engine);
}
} else if constexpr (vec_size == 1) {
if constexpr (_is_engine_vec_size_one) {
return oneapi::mkl::rng::device::generate(distr, _engine);
} else {
return oneapi::mkl::rng::device::generate_single(distr, _engine);
}
} else if constexpr (vec_size == 2) {
if constexpr (_is_engine_vec_size_one) {
sycl::vec<typename distr_t::result_type, 2> res;
res.x() = oneapi::mkl::rng::device::generate(distr, _engine);
res.y() = oneapi::mkl::rng::device::generate(distr, _engine);
return res;
} else {
sycl::vec<typename distr_t::result_type, 2> res;
res.x() = oneapi::mkl::rng::device::generate_single(distr, _engine);
res.y() = oneapi::mkl::rng::device::generate_single(distr, _engine);
return res;
}
}
}
};
} // namespace device
#endif
namespace host {
namespace detail {
class rng_generator_base {
public:
/// Set the seed of host rng_generator.
/// \param seed The engine seed.
virtual void set_seed(const std::uint64_t seed) = 0;
/// Set the dimensions of host rng_generator.
/// \param dimensions The engine dimensions.
virtual void set_dimensions(const std::uint32_t dimensions) = 0;
/// Set the queue of host rng_generator.
/// \param queue The engine queue.
virtual void set_queue(sycl::queue *queue) = 0;
/// Generate unsigned int random number(s) with 'uniform_bits' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
virtual inline void generate_uniform_bits(unsigned int *output,
std::int64_t n) = 0;
/// Generate unsigned long long random number(s) with 'uniform_bits'
/// distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
virtual inline void generate_uniform_bits(unsigned long long *output,
std::int64_t n) = 0;
/// Generate float random number(s) with 'lognormal' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param m Mean of associated normal distribution
/// \param s Standard deviation of associated normal distribution.
virtual inline void generate_lognormal(float *output, std::int64_t n, float m,
float s) = 0;
/// Generate double random number(s) with 'lognormal' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param m Mean of associated normal distribution
/// \param s Standard deviation of associated normal distribution.
virtual inline void generate_lognormal(double *output, std::int64_t n,
double m, double s) = 0;
/// Generate float random number(s) with 'gaussian' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param mean Mean of normal distribution
/// \param stddev Standard deviation of normal distribution.
virtual inline void generate_gaussian(float *output, std::int64_t n,
float mean, float stddev) = 0;
/// Generate double random number(s) with 'gaussian' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param mean Mean of normal distribution
/// \param stddev Standard deviation of normal distribution.
virtual inline void generate_gaussian(double *output, std::int64_t n,
double mean, double stddev) = 0;
/// Generate unsigned int random number(s) with 'poisson' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param lambda Lambda for the Poisson distribution.
virtual inline void generate_poisson(unsigned int *output, std::int64_t n,
double lambda) = 0;
/// Generate float random number(s) with 'uniform' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
virtual inline void generate_uniform(float *output, std::int64_t n) = 0;
/// Generate double random number(s) with 'uniform' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
virtual inline void generate_uniform(double *output, std::int64_t n) = 0;
/// Skip ahead several random number(s).
/// \param num_to_skip The number of random numbers to be skipped.
virtual void skip_ahead(const std::uint64_t num_to_skip) = 0;
protected:
sycl::queue *_queue{&dpct::get_default_queue()};
std::uint64_t _seed{0};
std::uint32_t _dimensions{1};
};
/// The random number generator on host.
template <typename engine_t = oneapi::mkl::rng::philox4x32x10>
class rng_generator : public rng_generator_base {
public:
/// Constructor of rng_generator.
rng_generator() : _engine(create_engine(_queue, _seed, _dimensions)) {}
/// Set the seed of host rng_generator.
/// \param seed The engine seed.
void set_seed(const std::uint64_t seed) {
if (seed == _seed) {
return;
}
_seed = seed;
_engine = create_engine(_queue, _seed, _dimensions);
}
/// Set the dimensions of host rng_generator.
/// \param dimensions The engine dimensions.
void set_dimensions(const std::uint32_t dimensions) {
if (dimensions == _dimensions) {
return;
}
_dimensions = dimensions;
_engine = create_engine(_queue, _seed, _dimensions);
}
/// Set the queue of host rng_generator.
/// \param queue The engine queue.
void set_queue(sycl::queue *queue) {
if (queue == _queue) {
return;
}
_queue = queue;
_engine = create_engine(_queue, _seed, _dimensions);
}
/// Generate unsigned int random number(s) with 'uniform_bits' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
inline void generate_uniform_bits(unsigned int *output, std::int64_t n) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) "
"Interfaces Project does not support this API.");
#else
static_assert(sizeof(unsigned int) == sizeof(std::uint32_t));
generate<oneapi::mkl::rng::uniform_bits<std::uint32_t>>(
(std::uint32_t *)output, n);
#endif
}
/// Generate unsigned long long random number(s) with 'uniform_bits'
/// distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
inline void generate_uniform_bits(unsigned long long *output,
std::int64_t n) {
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) "
"Interfaces Project does not support this API.");
#else
static_assert(sizeof(unsigned long long) == sizeof(std::uint64_t));
generate<oneapi::mkl::rng::uniform_bits<std::uint64_t>>(
(std::uint64_t *)output, n);
#endif
}
/// Generate float random number(s) with 'lognormal' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param m Mean of associated normal distribution
/// \param s Standard deviation of associated normal distribution.
inline void generate_lognormal(float *output, std::int64_t n, float m,
float s) {
generate<oneapi::mkl::rng::lognormal<float>>(output, n, m, s);
}
/// Generate double random number(s) with 'lognormal' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param m Mean of associated normal distribution
/// \param s Standard deviation of associated normal distribution.
inline void generate_lognormal(double *output, std::int64_t n, double m,
double s) {
generate<oneapi::mkl::rng::lognormal<double>>(output, n, m, s);
}
/// Generate float random number(s) with 'gaussian' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param mean Mean of normal distribution
/// \param stddev Standard deviation of normal distribution.
inline void generate_gaussian(float *output, std::int64_t n, float mean,
float stddev) {
generate<oneapi::mkl::rng::gaussian<float>>(output, n, mean, stddev);
}
/// Generate double random number(s) with 'gaussian' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param mean Mean of normal distribution
/// \param stddev Standard deviation of normal distribution.
inline void generate_gaussian(double *output, std::int64_t n, double mean,
double stddev) {
generate<oneapi::mkl::rng::gaussian<double>>(output, n, mean, stddev);
}
/// Generate unsigned int random number(s) with 'poisson' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
/// \param lambda Lambda for the Poisson distribution.
inline void generate_poisson(unsigned int *output, std::int64_t n,
double lambda) {
generate<oneapi::mkl::rng::poisson<unsigned int>>(output, n, lambda);
}
/// Generate float random number(s) with 'uniform' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
inline void generate_uniform(float *output, std::int64_t n) {
generate<oneapi::mkl::rng::uniform<float>>(output, n);
}
/// Generate double random number(s) with 'uniform' distribution.
/// \param output The pointer of the first random number.
/// \param n The number of random numbers.
inline void generate_uniform(double *output, std::int64_t n) {
generate<oneapi::mkl::rng::uniform<double>>(output, n);
}
/// Skip ahead several random number(s).
/// \param num_to_skip The number of random numbers to be skipped.
void skip_ahead(const std::uint64_t num_to_skip) {
#ifndef __INTEL_MKL__
oneapi::mkl::rng::skip_ahead(_engine, num_to_skip);
#else
if constexpr (std::is_same_v<engine_t, oneapi::mkl::rng::mt2203>)
throw std::runtime_error("no skip_ahead method of mt2203 engine.");
else
oneapi::mkl::rng::skip_ahead(_engine, num_to_skip);
#endif
}
private:
static inline engine_t create_engine(sycl::queue *queue,
const std::uint64_t seed,
const std::uint32_t dimensions) {
#ifdef __INTEL_MKL__
return std::is_same_v<engine_t, oneapi::mkl::rng::sobol>
? engine_t(*queue, dimensions)
: engine_t(*queue, seed);
#else
return engine_t(*queue, seed);
#endif
}
template <typename distr_t, typename buffer_t, class... distr_params_t>
void generate(buffer_t *output, const std::int64_t n,
const distr_params_t... distr_params) {
auto output_buf = dpct::detail::get_memory(output);
oneapi::mkl::rng::generate(distr_t(distr_params...), _engine, n,
output_buf);
}
engine_t _engine{};
};
} // namespace detail
} // namespace host
enum class random_engine_type {
philox4x32x10,
mrg32k3a,
mt2203,
mt19937,
sobol,
mcg59
};
typedef std::shared_ptr<rng::host::detail::rng_generator_base> host_rng_ptr;
/// Create a host random number generator.
/// \param type The random engine type.
/// \return The pointer of random number generator.
inline host_rng_ptr create_host_rng(const random_engine_type type) {
switch (type) {
case random_engine_type::philox4x32x10:
return std::make_shared<
rng::host::detail::rng_generator<oneapi::mkl::rng::philox4x32x10>>();
case random_engine_type::mrg32k3a:
return std::make_shared<
rng::host::detail::rng_generator<oneapi::mkl::rng::mrg32k3a>>();
#ifndef __INTEL_MKL__
throw std::runtime_error("The oneAPI Math Kernel Library (oneMKL) "
"Interfaces Project does not support this API.");
#else
case random_engine_type::mt2203:
return std::make_shared<
rng::host::detail::rng_generator<oneapi::mkl::rng::mt2203>>();
case random_engine_type::mt19937:
return std::make_shared<
rng::host::detail::rng_generator<oneapi::mkl::rng::mt19937>>();
case random_engine_type::sobol:
return std::make_shared<
rng::host::detail::rng_generator<oneapi::mkl::rng::sobol>>();
case random_engine_type::mcg59:
return std::make_shared<
rng::host::detail::rng_generator<oneapi::mkl::rng::mcg59>>();
#endif
}
}
} // namespace rng
} // namespace dpct
#endif // __DPCT_RNG_UTILS_HPP__
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/include/dpct/dpl_extras/numeric.h | //==---- numeric.h --------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_NUMERIC_H__
#define __DPCT_NUMERIC_H__
namespace dpct {
template <typename Policy, typename InputIt1, typename InputIt2, typename T>
T inner_product(Policy &&policy, InputIt1 first1, InputIt1 last1,
InputIt2 first2, T init) {
return std::transform_reduce(std::forward<Policy>(policy), first1, last1,
first2, init);
}
template <typename Policy, typename InputIt1, typename InputIt2, typename T,
typename BinaryOperation1, typename BinaryOperation2>
T inner_product(Policy &&policy, InputIt1 first1, InputIt1 last1,
InputIt2 first2, T init, BinaryOperation1 op1,
BinaryOperation2 op2) {
return std::transform_reduce(std::forward<Policy>(policy), first1, last1,
first2, init, op1, op2);
}
} // end namespace dpct
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/include/dpct/dpl_extras/iterators.h | //==---- iterators.h ------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_ITERATORS_H__
#define __DPCT_ITERATORS_H__
#include <oneapi/dpl/iterator>
#include "functional.h"
namespace dpct {
namespace internal {
// Wrapper class returned from a dereferenced transform_iterator which was
// created using
// make_transform_output_iterator(). Used to apply the supplied transform
// function when writing into an object of this class.
//
// Example:
// int a[] = {0, 1, 2, 3, 4};
// int* p = a;
// auto f = [](auto v) {return v*v;};
// auto tr_out = dpct::make_transform_output_iterator(p+1, f);
// auto wrap = *tr_out; // wrap is a transform_output_ref_wrapper
// std::cout<<*(p+1)<<std::endl; // '1'
// wrap = 2; // apply function, store 2*2=4
// std::cout<<*(p+1)<<std::endl; // '4'
template <typename T, typename _UnaryFunc> class transform_output_ref_wrapper {
private:
T __my_reference_;
_UnaryFunc __my_unary_func_;
public:
template <typename U>
transform_output_ref_wrapper(U &&__reference, _UnaryFunc __unary_func)
: __my_reference_(std::forward<U>(__reference)),
__my_unary_func_(__unary_func) {}
// When writing to an object of this type, apply the supplied unary function,
// then write to the wrapped reference
template <typename UnaryInputType>
transform_output_ref_wrapper &operator=(const UnaryInputType &e) {
__my_reference_ = __my_unary_func_(e);
return *this;
}
};
// Unary functor to create a transform_output_reference_wrapper when a
// transform_iterator is dereferenced, so that a
// the supplied unary function may be applied on write, resulting in a
// transform_output_iterator
template <typename _UnaryFunc> struct _Unary_Out {
_Unary_Out(_UnaryFunc __f_) : __f(__f_) {}
_UnaryFunc __f;
template <typename T> auto operator()(T &&val) const {
return transform_output_ref_wrapper<T, _UnaryFunc>(std::forward<T>(val),
__f);
}
};
} // end namespace internal
using std::advance;
using std::distance;
template <typename T>
oneapi::dpl::counting_iterator<T> make_counting_iterator(const T &input) {
return oneapi::dpl::counting_iterator<T>(input);
}
template <typename _Tp> class constant_iterator {
public:
typedef std::false_type is_hetero;
typedef std::true_type is_passed_directly;
typedef std::ptrdiff_t difference_type;
typedef _Tp value_type;
typedef _Tp *pointer;
// There is no storage behind the iterator, so we return a value instead of
// reference.
typedef const _Tp reference;
typedef const _Tp const_reference;
typedef std::random_access_iterator_tag iterator_category;
explicit constant_iterator(_Tp __init)
: __my_value_(__init), __my_counter_(0) {}
private:
// used to construct iterator instances with different counter values required
// by arithmetic operators
constant_iterator(const _Tp &__value, const difference_type &__offset)
: __my_value_(__value), __my_counter_(__offset) {}
public:
// non-const variants of access operators are not provided so unintended
// writes are caught at compile time.
const_reference operator*() const { return __my_value_; }
const_reference operator[](difference_type) const { return __my_value_; }
difference_type operator-(const constant_iterator &__it) const {
return __my_counter_ - __it.__my_counter_;
}
constant_iterator &operator+=(difference_type __forward) {
__my_counter_ += __forward;
return *this;
}
constant_iterator &operator-=(difference_type __backward) {
return *this += -__backward;
}
constant_iterator &operator++() { return *this += 1; }
constant_iterator &operator--() { return *this -= 1; }
constant_iterator operator++(int) {
constant_iterator __it(*this);
++(*this);
return __it;
}
constant_iterator operator--(int) {
constant_iterator __it(*this);
--(*this);
return __it;
}
constant_iterator operator-(difference_type __backward) const {
return constant_iterator(__my_value_, __my_counter_ - __backward);
}
constant_iterator operator+(difference_type __forward) const {
return constant_iterator(__my_value_, __my_counter_ + __forward);
}
friend constant_iterator operator+(difference_type __forward,
const constant_iterator __it) {
return __it + __forward;
}
bool operator==(const constant_iterator &__it) const {
return __my_value_ == __it.__my_value_ &&
this->__my_counter_ == __it.__my_counter_;
}
bool operator!=(const constant_iterator &__it) const {
return !(*this == __it);
}
bool operator<(const constant_iterator &__it) const {
return *this - __it < 0;
}
bool operator>(const constant_iterator &__it) const { return __it < *this; }
bool operator<=(const constant_iterator &__it) const {
return !(*this > __it);
}
bool operator>=(const constant_iterator &__it) const {
return !(*this < __it);
}
private:
_Tp __my_value_;
uint64_t __my_counter_;
};
template <typename _Tp>
constant_iterator<_Tp> make_constant_iterator(_Tp __value) {
return constant_iterator<_Tp>(__value);
}
// key_value_pair class to represent a key and value, specifically a
// dereferenced arg_index_input_iterator
template <typename _KeyTp, typename _ValueTp> class key_value_pair {
public:
key_value_pair() = default;
key_value_pair(const _KeyTp &_key, const _ValueTp &_value)
: key(_key), value(_value) {}
bool operator==(const key_value_pair<_KeyTp, _ValueTp> &_kvp) const {
return (key == _kvp.key) && (value == _kvp.value);
}
bool operator!=(const key_value_pair<_KeyTp, _ValueTp> &_kvp) const {
return (key != _kvp.key) || (value != _kvp.value);
}
_KeyTp key;
_ValueTp value;
};
namespace detail {
template <typename KeyTp, typename _ValueTp> struct make_key_value_pair {
template <typename ValRefTp>
key_value_pair<KeyTp, _ValueTp>
operator()(const oneapi::dpl::__internal::tuple<KeyTp, ValRefTp> &tup) const {
return ::dpct::key_value_pair<KeyTp, _ValueTp>(::std::get<0>(tup),
::std::get<1>(tup));
}
};
template <class T> struct __zip_iterator_impl;
template <class... Ts> struct __zip_iterator_impl<std::tuple<Ts...>> {
using type = oneapi::dpl::zip_iterator<Ts...>;
};
} // end namespace detail
// dpct::zip_iterator can only accept std::tuple type as template argument for
// compatibility purpose. Please use oneapi::dpl::zip_iterator if you want to
// pass iterator's types directly.
template <typename... Ts>
using zip_iterator = typename detail::__zip_iterator_impl<Ts...>::type;
// arg_index_input_iterator is an iterator over a input iterator, with a index.
// When dereferenced, it returns a key_value_pair, which can be interrogated for
// the index key or the value from the input iterator
template <typename InputIteratorT, typename OffsetT = ptrdiff_t,
typename OutputValueT =
typename ::std::iterator_traits<InputIteratorT>::value_type>
class arg_index_input_iterator
: public oneapi::dpl::transform_iterator<
oneapi::dpl::zip_iterator<oneapi::dpl::counting_iterator<OffsetT>,
InputIteratorT>,
detail::make_key_value_pair<OffsetT, OutputValueT>> {
using arg_index_input_iterator_wrap = oneapi::dpl::transform_iterator<
oneapi::dpl::zip_iterator<oneapi::dpl::counting_iterator<OffsetT>,
InputIteratorT>,
detail::make_key_value_pair<OffsetT, OutputValueT>>;
public:
typedef OffsetT difference_type;
// signal to __get_sycl_range that this iterator is as a direct pass iterator
using is_zip = ::std::true_type;
arg_index_input_iterator(const arg_index_input_iterator_wrap &__arg_wrap)
: arg_index_input_iterator_wrap(__arg_wrap) {}
arg_index_input_iterator(InputIteratorT __iter)
: arg_index_input_iterator_wrap(
oneapi::dpl::make_zip_iterator(
oneapi::dpl::counting_iterator(OffsetT(0)), __iter),
detail::make_key_value_pair<OffsetT, OutputValueT>()) {}
arg_index_input_iterator &operator=(const arg_index_input_iterator &__input) {
arg_index_input_iterator_wrap::operator=(__input);
return *this;
}
arg_index_input_iterator &operator++() {
arg_index_input_iterator_wrap::operator++();
return *this;
}
arg_index_input_iterator &operator--() {
arg_index_input_iterator_wrap::operator--();
return *this;
}
arg_index_input_iterator operator++(int) {
arg_index_input_iterator __it(*this);
++(*this);
return __it;
}
arg_index_input_iterator operator--(int) {
arg_index_input_iterator __it(*this);
--(*this);
return __it;
}
arg_index_input_iterator operator+(difference_type __forward) const {
return arg_index_input_iterator(
arg_index_input_iterator_wrap::operator+(__forward));
}
arg_index_input_iterator operator-(difference_type __backward) const {
return arg_index_input_iterator(
arg_index_input_iterator_wrap::operator-(__backward));
}
arg_index_input_iterator &operator+=(difference_type __forward) {
arg_index_input_iterator_wrap::operator+=(__forward);
return *this;
}
arg_index_input_iterator &operator-=(difference_type __backward) {
arg_index_input_iterator_wrap::operator-=(__backward);
return *this;
}
friend arg_index_input_iterator
operator+(difference_type __forward, const arg_index_input_iterator &__it) {
return __it + __forward;
}
difference_type operator-(const arg_index_input_iterator &__it) const {
return arg_index_input_iterator_wrap::operator-(__it);
}
bool operator==(const arg_index_input_iterator &__it) const {
return arg_index_input_iterator_wrap::operator==(__it);
}
bool operator!=(const arg_index_input_iterator &__it) const {
return !(*this == __it);
}
bool operator<(const arg_index_input_iterator &__it) const {
return *this - __it < 0;
}
bool operator>(const arg_index_input_iterator &__it) const {
return __it < *this;
}
bool operator<=(const arg_index_input_iterator &__it) const {
return !(*this > __it);
}
bool operator>=(const arg_index_input_iterator &__it) const {
return !(*this < __it);
}
// returns an arg_index_input_iterator with the same iter position, but a
// count reset to 0
arg_index_input_iterator create_normalized() {
return arg_index_input_iterator(
::std::get<1>(arg_index_input_iterator_wrap::base().base()));
}
};
template <typename IterT> struct io_iterator_pair {
inline io_iterator_pair() : selector(false) {}
inline io_iterator_pair(const IterT &first, const IterT &second)
: selector(false) {
iter[0] = first;
iter[1] = second;
}
inline IterT first() const { return selector ? iter[1] : iter[0]; }
inline IterT second() const { return selector ? iter[0] : iter[1]; }
inline void swap() { selector = !selector; }
bool selector;
IterT iter[2];
};
template <typename _Iter, typename _UnaryFunc>
auto make_transform_output_iterator(_Iter __it, _UnaryFunc __unary_func) {
return oneapi::dpl::transform_iterator(
__it, internal::_Unary_Out<_UnaryFunc>(__unary_func));
}
} // end namespace dpct
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/include/dpct/dpl_extras/algorithm.h | //==---- algorithm.h ------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_ALGORITHM_H__
#define __DPCT_ALGORITHM_H__
#include <oneapi/dpl/execution>
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/numeric>
#include "functional.h"
#include "iterators.h"
#include "vector.h"
namespace dpct {
template <typename Policy, typename Iter1, typename Iter2, typename Pred,
typename T>
void replace_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p,
const T &new_value) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
std::transform(std::forward<Policy>(policy), first, last, mask, first,
internal::replace_if_fun<typename std::iterator_traits<Iter1>::value_type, Pred>(p, new_value));
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3,
typename Pred, typename T>
Iter3 replace_copy_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask,
Iter3 result, Pred p, const T &new_value) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
return std::transform(std::forward<Policy>(policy), first, last, mask, result,
internal::replace_if_fun<typename std::iterator_traits<Iter3>::value_type, Pred>(p, new_value));
}
template <typename Policy, typename Iter1, typename Iter2, typename Pred>
internal::enable_if_hetero_execution_policy<Policy, Iter1>
remove_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using oneapi::dpl::make_zip_iterator;
using policy_type = typename std::decay<Policy>::type;
using internal::__buffer;
using ValueType = typename std::iterator_traits<Iter1>::value_type;
__buffer<ValueType> _tmp(std::distance(first, last));
auto end = std::copy_if(
std::forward<Policy>(policy), make_zip_iterator(first, mask),
make_zip_iterator(last, mask + std::distance(first, last)),
make_zip_iterator(_tmp.get(), oneapi::dpl::discard_iterator()),
internal::negate_predicate_key_fun<Pred>(p));
return std::copy(std::forward<Policy>(policy), _tmp.get(),
std::get<0>(end.base()), first);
}
template <typename Policy, typename Iter1, typename Iter2, typename Pred>
typename std::enable_if<!internal::is_hetero_execution_policy<
typename std::decay<Policy>::type>::value,
Iter1>::type
remove_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using oneapi::dpl::make_zip_iterator;
using policy_type = typename std::decay<Policy>::type;
using ValueType = typename std::iterator_traits<Iter1>::value_type;
std::vector<ValueType> _tmp(std::distance(first, last));
auto end = std::copy_if(
policy, make_zip_iterator(first, mask),
make_zip_iterator(last, mask + std::distance(first, last)),
make_zip_iterator(_tmp.begin(), oneapi::dpl::discard_iterator()),
internal::negate_predicate_key_fun<Pred>(p));
return std::copy(policy, _tmp.begin(), std::get<0>(end.base()), first);
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3,
typename Pred>
Iter3 remove_copy_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask,
Iter3 result, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using oneapi::dpl::make_zip_iterator;
auto ret_val = std::remove_copy_if(
std::forward<Policy>(policy), make_zip_iterator(first, mask),
make_zip_iterator(last, mask + std::distance(first, last)),
make_zip_iterator(result, oneapi::dpl::discard_iterator()),
internal::predicate_key_fun<Pred>(p));
return std::get<0>(ret_val.base());
}
template <class Policy, class Iter1, class Iter2, class BinaryPred>
std::pair<Iter1, Iter2> unique(Policy &&policy, Iter1 keys_first,
Iter1 keys_last, Iter2 values_first,
BinaryPred binary_pred) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::unique(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first, values_first),
oneapi::dpl::make_zip_iterator(
keys_last, values_first + std::distance(keys_first, keys_last)),
internal::compare_key_fun<BinaryPred>(binary_pred));
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_first, values_first), ret_val);
return std::make_pair(keys_first + n1, values_first + n1);
}
template <class Policy, class Iter1, class Iter2>
std::pair<Iter1, Iter2> unique(Policy &&policy, Iter1 keys_first,
Iter1 keys_last, Iter2 values_first) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using T = typename std::iterator_traits<Iter1>::value_type;
return unique(std::forward<Policy>(policy), keys_first, keys_last,
values_first, std::equal_to<T>());
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class BinaryPred>
std::pair<Iter3, Iter4> unique_copy(Policy &&policy, Iter1 keys_first,
Iter1 keys_last, Iter2 values_first,
Iter3 keys_result, Iter4 values_result,
BinaryPred binary_pred) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::unique_copy(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first, values_first),
oneapi::dpl::make_zip_iterator(
keys_last, values_first + std::distance(keys_first, keys_last)),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::unique_fun<BinaryPred>(binary_pred));
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4>
std::pair<Iter3, Iter4> unique_copy(Policy &&policy, Iter1 keys_first,
Iter1 keys_last, Iter2 values_first,
Iter3 keys_result, Iter4 values_result) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using T = typename std::iterator_traits<Iter1>::value_type;
auto comp = std::equal_to<T>();
return unique_copy(std::forward<Policy>(policy), keys_first, keys_last,
values_first, keys_result, values_result, comp);
}
template <typename Policy, typename Iter, typename Pred>
Iter partition_point(Policy &&policy, Iter first, Iter last, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
if (std::is_partitioned(std::forward<Policy>(policy), first, last, p))
return std::find_if_not(std::forward<Policy>(policy), first, last, p);
else
return first;
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3,
typename Pred>
Iter3 copy_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask,
Iter3 result, Pred pred) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::copy_if(
std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(first, mask),
oneapi::dpl::make_zip_iterator(last, mask + std::distance(first, last)),
oneapi::dpl::make_zip_iterator(result, oneapi::dpl::discard_iterator()),
internal::predicate_key_fun<Pred>(pred));
return std::get<0>(ret_val.base());
}
template <class Policy, class Iter1, class Iter2, class UnaryOperation,
class Pred>
Iter2 transform_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 result,
UnaryOperation unary_op, Pred pred) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using T = typename std::iterator_traits<Iter1>::value_type;
const auto n = std::distance(first, last);
std::for_each(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(first, result),
oneapi::dpl::make_zip_iterator(first, result) + n,
internal::transform_if_fun<T, Pred, UnaryOperation>(pred, unary_op));
return result + n;
}
template <class Policy, class Iter1, class Iter2, class Iter3,
class UnaryOperation, class Pred>
Iter3 transform_if(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask,
Iter3 result, UnaryOperation unary_op, Pred pred) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using T = typename std::iterator_traits<Iter1>::value_type;
using Ref1 = typename std::iterator_traits<Iter1>::reference;
using Ref2 = typename std::iterator_traits<Iter2>::reference;
const auto n = std::distance(first, last);
std::for_each(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(first, mask, result),
oneapi::dpl::make_zip_iterator(first, mask, result) + n,
internal::transform_if_unary_zip_mask_fun<T, Pred, UnaryOperation>(
pred, unary_op));
return result + n;
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class BinaryOperation, class Pred>
Iter4 transform_if(Policy &&policy, Iter1 first1, Iter1 last1, Iter2 first2,
Iter3 mask, Iter4 result, BinaryOperation binary_op,
Pred pred) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
const auto n = std::distance(first1, last1);
using ZipIterator =
typename oneapi::dpl::zip_iterator<Iter1, Iter2, Iter3, Iter4>;
using T = typename std::iterator_traits<ZipIterator>::value_type;
std::for_each(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(first1, first2, mask, result),
oneapi::dpl::make_zip_iterator(last1, first2 + n, mask + n, result + n),
internal::transform_if_zip_mask_fun<T, Pred, BinaryOperation>(pred,
binary_op));
return result + n;
}
template <typename Policy, typename InputIter1, typename InputIter2,
typename OutputIter>
void scatter(Policy &&policy, InputIter1 first, InputIter1 last, InputIter2 map,
OutputIter result) {
static_assert(
std::is_same<typename std::iterator_traits<InputIter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<InputIter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<OutputIter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
oneapi::dpl::copy(policy, first, last,
oneapi::dpl::make_permutation_iterator(result, map));
}
template <typename Policy, typename InputIter1, typename InputIter2,
typename OutputIter>
OutputIter gather(Policy &&policy, InputIter1 map_first, InputIter1 map_last,
InputIter2 input_first, OutputIter result) {
static_assert(
std::is_same<typename std::iterator_traits<InputIter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<InputIter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<OutputIter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto perm_begin =
oneapi::dpl::make_permutation_iterator(input_first, map_first);
const int n = ::std::distance(map_first, map_last);
return oneapi::dpl::copy(policy, perm_begin, perm_begin + n, result);
}
template <typename Policy, typename InputIter1, typename InputIter2,
typename InputIter3, typename OutputIter, typename Predicate>
void scatter_if(Policy &&policy, InputIter1 first, InputIter1 last,
InputIter2 map, InputIter3 mask, OutputIter result,
Predicate pred) {
static_assert(
std::is_same<typename std::iterator_traits<InputIter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<InputIter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<InputIter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<OutputIter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
transform_if(policy, first, last, mask,
oneapi::dpl::make_permutation_iterator(result, map),
[=](auto &&v) { return v; }, [=](auto &&m) { return pred(m); });
}
template <typename Policy, typename InputIter1, typename InputIter2,
typename InputIter3, typename OutputIter, typename Predicate>
OutputIter gather_if(Policy &&policy, InputIter1 map_first, InputIter1 map_last,
InputIter2 mask, InputIter3 input_first, OutputIter result,
Predicate pred) {
static_assert(
std::is_same<typename std::iterator_traits<InputIter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<InputIter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<InputIter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<
typename std::iterator_traits<OutputIter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto perm_begin =
oneapi::dpl::make_permutation_iterator(input_first, map_first);
const int n = std::distance(map_first, map_last);
return transform_if(policy, perm_begin, perm_begin + n, mask, result,
[=](auto &&v) { return v; },
[=](auto &&m) { return pred(m); });
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3,
typename Iter4, typename Iter5, typename Iter6>
std::pair<Iter5, Iter6>
merge(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1, Iter2 keys_first2,
Iter2 keys_last2, Iter3 values_first1, Iter4 values_first2,
Iter5 keys_result, Iter6 values_result) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto n1 = std::distance(keys_first1, keys_last1);
auto n2 = std::distance(keys_first2, keys_last2);
std::merge(std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(keys_last1, values_first1 + n1),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(keys_last2, values_first2 + n2),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<>());
return std::make_pair(keys_result + n1 + n2, values_result + n1 + n2);
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3,
typename Iter4, typename Iter5, typename Iter6, typename Comp>
std::pair<Iter5, Iter6>
merge(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1, Iter2 keys_first2,
Iter2 keys_last2, Iter3 values_first1, Iter4 values_first2,
Iter5 keys_result, Iter6 values_result, Comp comp) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto n1 = std::distance(keys_first1, keys_last1);
auto n2 = std::distance(keys_first2, keys_last2);
std::merge(std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(keys_last1, values_first1 + n1),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(keys_last2, values_first2 + n2),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<Comp>(comp));
return std::make_pair(keys_result + n1 + n2, values_result + n1 + n2);
}
template <class Policy, class Iter, class T>
void iota(Policy &&policy, Iter first, Iter last, T init, T step) {
static_assert(
std::is_same<typename std::iterator_traits<Iter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using DiffSize = typename std::iterator_traits<Iter>::difference_type;
std::transform(
std::forward<Policy>(policy), oneapi::dpl::counting_iterator<DiffSize>(0),
oneapi::dpl::counting_iterator<DiffSize>(std::distance(first, last)),
first, internal::sequence_fun<T>(init, step));
}
template <class Policy, class Iter, class T>
void iota(Policy &&policy, Iter first, Iter last, T init) {
static_assert(
std::is_same<typename std::iterator_traits<Iter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
iota(std::forward<Policy>(policy), first, last, init, T(1));
}
template <class Policy, class Iter>
void iota(Policy &&policy, Iter first, Iter last) {
static_assert(
std::is_same<typename std::iterator_traits<Iter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using DiffSize = typename std::iterator_traits<Iter>::difference_type;
iota(std::forward<Policy>(policy), first, last, DiffSize(0), DiffSize(1));
}
template <class Policy, class Iter1, class Iter2, class Comp>
void sort(Policy &&policy, Iter1 keys_first, Iter1 keys_last,
Iter2 values_first, Comp comp) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto first = oneapi::dpl::make_zip_iterator(keys_first, values_first);
auto last = first + std::distance(keys_first, keys_last);
std::sort(std::forward<Policy>(policy), first, last,
internal::compare_key_fun<Comp>(comp));
}
template <class Policy, class Iter1, class Iter2>
void sort(Policy &&policy, Iter1 keys_first, Iter1 keys_last,
Iter2 values_first) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
sort(std::forward<Policy>(policy), keys_first, keys_last, values_first,
internal::__less());
}
template <class Policy, class Iter1, class Iter2, class Comp>
void stable_sort(Policy &&policy, Iter1 keys_first, Iter1 keys_last,
Iter2 values_first, Comp comp) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
std::stable_sort(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first, values_first),
oneapi::dpl::make_zip_iterator(
keys_last, values_first + std::distance(keys_first, keys_last)),
internal::compare_key_fun<Comp>(comp));
}
template <class Policy, class Iter1, class Iter2>
void stable_sort(Policy &&policy, Iter1 keys_first, Iter1 keys_last,
Iter2 values_first) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
stable_sort(std::forward<Policy>(policy), keys_first, keys_last, values_first,
internal::__less());
}
template <class Policy, class Iter, class Operator>
void for_each_index(Policy &&policy, Iter first, Iter last, Operator unary_op) {
static_assert(
std::is_same<typename std::iterator_traits<Iter>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
using DiffSize = typename std::iterator_traits<Iter>::difference_type;
std::transform(
std::forward<Policy>(policy), oneapi::dpl::counting_iterator<DiffSize>(0),
oneapi::dpl::counting_iterator<DiffSize>(std::distance(first, last)),
first, unary_op);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5>
std::pair<Iter4, Iter5>
set_intersection(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1,
Iter2 keys_first2, Iter2 keys_last2, Iter3 values_first1,
Iter4 keys_result, Iter5 values_result) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_intersection(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2,
oneapi::dpl::discard_iterator()),
oneapi::dpl::make_zip_iterator(keys_last2,
oneapi::dpl::discard_iterator()),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<>());
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5, class Comp>
std::pair<Iter4, Iter5>
set_intersection(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1,
Iter2 keys_first2, Iter2 keys_last2, Iter3 values_first1,
Iter4 keys_result, Iter5 values_result, Comp comp) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_intersection(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2,
oneapi::dpl::discard_iterator()),
oneapi::dpl::make_zip_iterator(keys_last2,
oneapi::dpl::discard_iterator()),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<Comp>(comp));
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5, class Iter6>
std::pair<Iter5, Iter6>
set_symmetric_difference(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1,
Iter2 keys_first2, Iter2 keys_last2,
Iter3 values_first1, Iter4 values_first2,
Iter5 keys_result, Iter6 values_result) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_symmetric_difference(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(
keys_last2, values_first2 + std::distance(keys_first2, keys_last2)),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<>());
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5, class Iter6, class Comp>
std::pair<Iter5, Iter6>
set_symmetric_difference(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1,
Iter2 keys_first2, Iter2 keys_last2,
Iter3 values_first1, Iter4 values_first2,
Iter5 keys_result, Iter6 values_result, Comp comp) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_symmetric_difference(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(
keys_last2, values_first2 + std::distance(keys_first2, keys_last2)),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<Comp>(comp));
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5, class Iter6>
std::pair<Iter5, Iter6>
set_difference(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1,
Iter2 keys_first2, Iter2 keys_last2, Iter3 values_first1,
Iter4 values_first2, Iter5 keys_result, Iter6 values_result) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_difference(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(
keys_last2, values_first2 + std::distance(keys_first2, keys_last2)),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<>());
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5, class Iter6, class Comp>
std::pair<Iter5, Iter6> set_difference(Policy &&policy, Iter1 keys_first1,
Iter1 keys_last1, Iter2 keys_first2,
Iter2 keys_last2, Iter3 values_first1,
Iter4 values_first2, Iter5 keys_result,
Iter6 values_result, Comp comp) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_difference(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(
keys_last2, values_first2 + std::distance(keys_first2, keys_last2)),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<Comp>(comp));
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5, class Iter6>
internal::enable_if_execution_policy<Policy, std::pair<Iter5, Iter6>>
set_union(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1,
Iter2 keys_first2, Iter2 keys_last2, Iter3 values_first1,
Iter4 values_first2, Iter5 keys_result, Iter6 values_result) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_union(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(
keys_last2, values_first2 + std::distance(keys_first2, keys_last2)),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<>());
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <class Policy, class Iter1, class Iter2, class Iter3, class Iter4,
class Iter5, class Iter6, class Comp>
internal::enable_if_execution_policy<Policy, std::pair<Iter5, Iter6>>
set_union(Policy &&policy, Iter1 keys_first1, Iter1 keys_last1,
Iter2 keys_first2, Iter2 keys_last2, Iter3 values_first1,
Iter4 values_first2, Iter5 keys_result, Iter6 values_result,
Comp comp) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter5>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter6>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::set_union(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(keys_first1, values_first1),
oneapi::dpl::make_zip_iterator(
keys_last1, values_first1 + std::distance(keys_first1, keys_last1)),
oneapi::dpl::make_zip_iterator(keys_first2, values_first2),
oneapi::dpl::make_zip_iterator(
keys_last2, values_first2 + std::distance(keys_first2, keys_last2)),
oneapi::dpl::make_zip_iterator(keys_result, values_result),
internal::compare_key_fun<Comp>(comp));
auto n1 = std::distance(
oneapi::dpl::make_zip_iterator(keys_result, values_result), ret_val);
return std::make_pair(keys_result + n1, values_result + n1);
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3,
typename Iter4, typename Pred>
internal::enable_if_execution_policy<Policy, std::pair<Iter3, Iter4>>
stable_partition_copy(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask,
Iter3 out_true, Iter4 out_false, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
auto ret_val = std::partition_copy(
std::forward<Policy>(policy), oneapi::dpl::make_zip_iterator(first, mask),
oneapi::dpl::make_zip_iterator(last, mask + std::distance(first, last)),
oneapi::dpl::make_zip_iterator(out_true, oneapi::dpl::discard_iterator()),
oneapi::dpl::make_zip_iterator(out_false,
oneapi::dpl::discard_iterator()),
internal::predicate_key_fun<Pred>(p));
return std::make_pair(std::get<0>(ret_val.first.base()),
std::get<0>(ret_val.second.base()));
}
template <typename Policy, typename Iter1, typename Iter3, typename Iter4,
typename Pred>
internal::enable_if_execution_policy<Policy, std::pair<Iter3, Iter4>>
stable_partition_copy(Policy &&policy, Iter1 first, Iter1 last, Iter3 out_true,
Iter4 out_false, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
return std::partition_copy(std::forward<Policy>(policy), first, last,
out_true, out_false, p);
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3,
typename Iter4, typename Pred>
internal::enable_if_execution_policy<Policy, std::pair<Iter3, Iter4>>
partition_copy(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask,
Iter3 out_true, Iter4 out_false, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter3>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter4>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
return stable_partition_copy(std::forward<Policy>(policy), first, last, mask,
out_true, out_false, p);
}
template <typename Policy, typename Iter1, typename Iter2, typename Pred>
internal::enable_if_hetero_execution_policy<Policy, Iter1>
stable_partition(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
typedef typename std::decay<Policy>::type policy_type;
internal::__buffer<typename std::iterator_traits<Iter1>::value_type> _tmp(
std::distance(first, last));
std::copy(std::forward<Policy>(policy), mask,
mask + std::distance(first, last), _tmp.get());
auto ret_val =
std::stable_partition(std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(first, _tmp.get()),
oneapi::dpl::make_zip_iterator(
last, _tmp.get() + std::distance(first, last)),
internal::predicate_key_fun<Pred>(p));
return std::get<0>(ret_val.base());
}
template <typename Policy, typename Iter1, typename Iter2, typename Pred>
typename std::enable_if<!internal::is_hetero_execution_policy<
typename std::decay<Policy>::type>::value,
Iter1>::type
stable_partition(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
typedef typename std::decay<Policy>::type policy_type;
std::vector<typename std::iterator_traits<Iter1>::value_type> _tmp(
std::distance(first, last));
std::copy(std::forward<Policy>(policy), mask,
mask + std::distance(first, last), _tmp.begin());
auto ret_val = std::stable_partition(
std::forward<Policy>(policy),
oneapi::dpl::make_zip_iterator(first, _tmp.begin()),
oneapi::dpl::make_zip_iterator(last,
_tmp.begin() + std::distance(first, last)),
internal::predicate_key_fun<Pred>(p));
return std::get<0>(ret_val.base());
}
template <typename Policy, typename Iter1, typename Iter2, typename Pred>
internal::enable_if_execution_policy<Policy, Iter1>
partition(Policy &&policy, Iter1 first, Iter1 last, Iter2 mask, Pred p) {
static_assert(
std::is_same<typename std::iterator_traits<Iter1>::iterator_category,
std::random_access_iterator_tag>::value &&
std::is_same<typename std::iterator_traits<Iter2>::iterator_category,
std::random_access_iterator_tag>::value,
"Iterators passed to algorithms must be random-access iterators.");
return stable_partition(std::forward<Policy>(policy), first, last, mask, p);
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t>
inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value &&
dpct::internal::is_iterator<key_out_t>::value &&
dpct::internal::is_iterator<value_t>::value &&
dpct::internal::is_iterator<value_out_t>::value>
sort_pairs(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_t values_in, value_out_t values_out, int64_t n,
bool descending = false, int begin_bit = 0,
int end_bit =
sizeof(typename ::std::iterator_traits<key_t>::value_type) * 8);
template <typename _ExecutionPolicy, typename key_t, typename key_out_t>
inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value &&
dpct::internal::is_iterator<key_out_t>::value>
sort_keys(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
int64_t n, bool descending = false, int begin_bit = 0,
int end_bit =
sizeof(typename ::std::iterator_traits<key_t>::value_type) * 8);
namespace internal {
// Transforms key to a specific bit range and sorts the transformed key
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename transformed_key_t>
inline void transform_and_sort(_ExecutionPolicy &&policy, key_t keys_in,
key_out_t keys_out, int64_t n, bool descending,
int begin_bit, int end_bit) {
using key_t_value_t = typename std::iterator_traits<key_t>::value_type;
auto trans_key =
translate_key<key_t_value_t, transformed_key_t>(begin_bit, end_bit);
// Use of the comparison operator that is not simply std::greater() or
// std::less() will result in
// not using radix sort which will cost some performance. However, this is
// necessary to provide the transformation of the key to the bitrange
// desired.
auto partial_sort_with_comp = [&](const auto &comp) {
return oneapi::dpl::partial_sort_copy(
std::forward<_ExecutionPolicy>(policy), keys_in, keys_in + n, keys_out,
keys_out + n, [=](const auto a, const auto b) {
return comp(trans_key(a), trans_key(b));
});
};
if (descending)
partial_sort_with_comp(::std::greater<transformed_key_t>());
else
partial_sort_with_comp(::std::less<transformed_key_t>());
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t>
inline void sort_only(_ExecutionPolicy &&policy, key_t keys_in,
key_out_t keys_out, int64_t n, bool descending) {
using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type;
if constexpr (::std::is_floating_point<key_t_value_t>::value) {
if (descending) {
// Comparison operator that is not std::greater() ensures stability of
// -0.0 and 0.0
// at the cost of some performance because radix sort will not be used.
auto comp_descending = [=](const auto a, const auto b) { return a > b; };
oneapi::dpl::partial_sort_copy(::std::forward<_ExecutionPolicy>(policy),
keys_in, keys_in + n, keys_out,
keys_out + n, comp_descending);
} else {
// Comparison operator that is not std::less() ensures stability of -0.0
// and 0.0
// at the cost of some performance because radix sort will not be used.
auto comp_ascending = [=](const auto a, const auto b) { return a < b; };
oneapi::dpl::partial_sort_copy(::std::forward<_ExecutionPolicy>(policy),
keys_in, keys_in + n, keys_out,
keys_out + n, comp_ascending);
}
} else {
if (descending) {
oneapi::dpl::partial_sort_copy(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_in + n,
keys_out, keys_out + n, ::std::greater<key_t_value_t>());
} else {
oneapi::dpl::partial_sort_copy(::std::forward<_ExecutionPolicy>(policy),
keys_in, keys_in + n, keys_out,
keys_out + n);
}
}
}
// Transforms key from a pair to a specific bit range and sorts the pairs by the
// transformed key
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename transform_key_t, typename value_t, typename value_out_t>
inline void transform_and_sort_pairs(_ExecutionPolicy &&policy, key_t keys_in,
key_out_t keys_out, value_t values_in,
value_out_t values_out, int64_t n,
bool descending, int begin_bit,
int end_bit) {
using key_t_value_t = typename std::iterator_traits<key_t>::value_type;
auto zip_input = oneapi::dpl::zip_iterator(keys_in, values_in);
auto zip_output = oneapi::dpl::zip_iterator(keys_out, values_out);
auto trans_key =
translate_key<key_t_value_t, transform_key_t>(begin_bit, end_bit);
// Use of the comparison operator that is not simply std::greater() or
// std::less() will result in
// not using radix sort which will cost some performance. However, this is
// necessary to provide the transformation of the key to the bitrange desired
// and also to select the key from the zipped pair.
auto load_val = [=](const auto a) { return trans_key(std::get<0>(a)); };
auto partial_sort_with_comp = [&](const auto &comp) {
return oneapi::dpl::partial_sort_copy(
std::forward<_ExecutionPolicy>(policy), zip_input, zip_input + n,
zip_output, zip_output + n, [=](const auto a, const auto b) {
return comp(load_val(a), load_val(b));
});
};
if (descending)
partial_sort_with_comp(::std::greater<key_t_value_t>());
else
partial_sort_with_comp(::std::less<key_t_value_t>());
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t>
inline void sort_only_pairs(_ExecutionPolicy &&policy, key_t keys_in,
key_out_t keys_out, value_t values_in,
value_out_t values_out, int64_t n,
bool descending) {
using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type;
auto zip_input = oneapi::dpl::zip_iterator(keys_in, values_in);
auto zip_output = oneapi::dpl::zip_iterator(keys_out, values_out);
// Use of the comparison operator that is not simply std::greater() or
// std::less() will result in
// not using radix sort which will cost some performance. However, this is
// necessary to select the key from the zipped pair.
auto load_val = [=](const auto a) { return std::get<0>(a); };
auto partial_sort_with_comp = [&](const auto &comp) {
return oneapi::dpl::partial_sort_copy(
std::forward<_ExecutionPolicy>(policy), zip_input, zip_input + n,
zip_output, zip_output + n, [=](const auto a, const auto b) {
return comp(load_val(a), load_val(b));
});
};
if (descending)
partial_sort_with_comp(::std::greater<key_t_value_t>());
else
partial_sort_with_comp(::std::less<key_t_value_t>());
}
// overload for key_out_t != std::nullptr_t
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t>
typename ::std::enable_if<!::std::is_null_pointer<key_out_t>::value>::type
sort_pairs_impl(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_t values_in, value_out_t values_out, int64_t n,
bool descending, int begin_bit, int end_bit) {
using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type;
int clipped_begin_bit = ::std::max(begin_bit, 0);
int clipped_end_bit =
::std::min((::std::uint64_t)end_bit, sizeof(key_t_value_t) * 8);
int num_bytes = (clipped_end_bit - clipped_begin_bit - 1) / 8 + 1;
auto transform_and_sort_pairs_f = [&](auto x) {
using T = typename ::std::decay_t<decltype(x)>;
internal::transform_and_sort_pairs<decltype(policy), key_t, key_out_t, T,
value_t, value_out_t>(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, values_in,
values_out, n, descending, clipped_begin_bit, clipped_end_bit);
};
if (clipped_end_bit - clipped_begin_bit == sizeof(key_t_value_t) * 8) {
internal::sort_only_pairs(::std::forward<_ExecutionPolicy>(policy), keys_in,
keys_out, values_in, values_out, n, descending);
} else if (num_bytes == 1) {
transform_and_sort_pairs_f.template operator()<uint8_t>(0);
} else if (num_bytes == 2) {
transform_and_sort_pairs_f.template operator()<uint16_t>(0);
} else if (num_bytes <= 4) {
transform_and_sort_pairs_f.template operator()<uint32_t>(0);
} else // if (num_bytes <= 8)
{
transform_and_sort_pairs_f.template operator()<uint64_t>(0);
}
}
// overload for key_out_t == std::nullptr_t
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t>
typename ::std::enable_if<::std::is_null_pointer<key_out_t>::value>::type
sort_pairs_impl(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_t values_in, value_out_t values_out, int64_t n,
bool descending, int begin_bit, int end_bit) {
// create temporary keys_out to discard, memory footprint could be improved by
// a specialized iterator with a single
// unchanging dummy key_t element
using key_t_value_t = typename std::iterator_traits<key_t>::value_type;
sycl::buffer<key_t_value_t, 1> temp_keys_out{sycl::range<1>(n)};
internal::sort_pairs_impl(std::forward<_ExecutionPolicy>(policy), keys_in,
oneapi::dpl::begin(temp_keys_out), values_in,
values_out, n, descending, begin_bit, end_bit);
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t, typename OffsetIteratorT>
inline void segmented_sort_pairs_by_parallel_sorts(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_out_t values_in, value_t values_out, int64_t n, int64_t nsegments,
OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets,
bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
using offset_type =
typename ::std::iterator_traits<OffsetIteratorT>::value_type;
::std::vector<offset_type> host_accessible_offset_starts(nsegments);
::std::vector<offset_type> host_accessible_offset_ends(nsegments);
// make offsets accessible on host
::std::copy(::std::forward<_ExecutionPolicy>(policy), begin_offsets,
begin_offsets + nsegments, host_accessible_offset_starts.begin());
::std::copy(::std::forward<_ExecutionPolicy>(policy), end_offsets,
end_offsets + nsegments, host_accessible_offset_ends.begin());
for (::std::uint64_t i = 0; i < nsegments; i++) {
uint64_t segment_begin = host_accessible_offset_starts[i];
uint64_t segment_end =
::std::min(n, (int64_t)host_accessible_offset_ends[i]);
if (segment_begin < segment_end) {
::dpct::sort_pairs(::std::forward<_ExecutionPolicy>(policy),
keys_in + segment_begin, keys_out + segment_begin,
values_in + segment_begin, values_out + segment_begin,
segment_end - segment_begin, descending, begin_bit,
end_bit);
}
}
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename OffsetIteratorT>
inline void segmented_sort_keys_by_parallel_sorts(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, int64_t n,
int64_t nsegments, OffsetIteratorT begin_offsets,
OffsetIteratorT end_offsets, bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
using offset_type =
typename ::std::iterator_traits<OffsetIteratorT>::value_type;
::std::vector<offset_type> host_accessible_offset_starts(nsegments);
::std::vector<offset_type> host_accessible_offset_ends(nsegments);
// make offsets accessible on host
::std::copy(::std::forward<_ExecutionPolicy>(policy), begin_offsets,
begin_offsets + nsegments, host_accessible_offset_starts.begin());
::std::copy(::std::forward<_ExecutionPolicy>(policy), end_offsets,
end_offsets + nsegments, host_accessible_offset_ends.begin());
for (::std::uint64_t i = 0; i < nsegments; i++) {
uint64_t segment_begin = host_accessible_offset_starts[i];
uint64_t segment_end =
::std::min(n, (int64_t)host_accessible_offset_ends[i]);
if (segment_begin < segment_end) {
::dpct::sort_keys(::std::forward<_ExecutionPolicy>(policy),
keys_in + segment_begin, keys_out + segment_begin,
segment_end - segment_begin, descending, begin_bit,
end_bit);
}
}
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t, typename OffsetIteratorT>
inline void segmented_sort_pairs_by_parallel_for_of_sorts(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_t values_in, value_out_t values_out, int64_t n, int64_t nsegments,
OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets,
bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
policy.queue().submit([&](sycl::handler &cgh) {
cgh.parallel_for(nsegments, [=](sycl::id<1> i) {
uint64_t segment_begin = begin_offsets[i];
uint64_t segment_end = ::std::min(n, (int64_t)end_offsets[i]);
if (segment_begin == segment_end) {
return;
}
::dpct::sort_pairs(::std::execution::seq, keys_in + segment_begin,
keys_out + segment_begin, values_in + segment_begin,
values_out + segment_begin,
segment_end - segment_begin, descending, begin_bit,
end_bit);
});
});
policy.queue().wait();
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename OffsetIteratorT>
inline void segmented_sort_keys_by_parallel_for_of_sorts(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, int64_t n,
int64_t nsegments, OffsetIteratorT begin_offsets,
OffsetIteratorT end_offsets, bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
policy.queue().submit([&](sycl::handler &cgh) {
cgh.parallel_for(nsegments, [=](sycl::id<1> i) {
uint64_t segment_begin = begin_offsets[i];
uint64_t segment_end = ::std::min(n, (int64_t)end_offsets[i]);
if (segment_begin == segment_end) {
return;
}
::dpct::sort_keys(::std::execution::seq, keys_in + segment_begin,
keys_out + segment_begin, segment_end - segment_begin,
descending, begin_bit, end_bit);
});
});
policy.queue().wait();
}
template <typename _ExecutionPolicy, typename OffsetIteratorT>
inline void
mark_segments(_ExecutionPolicy &&policy, OffsetIteratorT begin_offsets,
OffsetIteratorT end_offsets, int64_t n, int64_t nsegments,
sycl::buffer<::std::size_t, 1> segments) {
::std::size_t work_group_size =
policy.queue()
.get_device()
.template get_info<sycl::info::device::max_work_group_size>();
auto sg_sizes = policy.queue()
.get_device()
.template get_info<sycl::info::device::sub_group_sizes>();
::std::size_t sub_group_size = sg_sizes.empty() ? 0 : sg_sizes.back();
float avg_seg_size = (float)n / (float)nsegments;
if (avg_seg_size > work_group_size) {
// If average segment size is larger than workgroup, use workgroup to
// coordinate to mark segments
policy.queue()
.submit([&](sycl::handler &h) {
auto segments_acc = segments.get_access<sycl::access_mode::write>(h);
h.parallel_for(work_group_size, ([=](sycl::id<1> id) {
for (::std::size_t seg = 0; seg < nsegments; seg++) {
::std::size_t i = begin_offsets[seg];
::std::size_t end = end_offsets[seg];
while (i + id < end) {
segments_acc[i + id] = seg;
i += work_group_size;
}
}
}));
})
.wait();
} else if (sub_group_size > 0 && avg_seg_size > sub_group_size / 2) {
// If average segment size is larger than half a subgroup, use subgroup to
// coordinate to mark segments
policy.queue()
.submit([&](sycl::handler &h) {
auto segments_acc = segments.get_access<sycl::access_mode::write>(h);
h.parallel_for(
sycl::nd_range<1>{work_group_size, work_group_size},
([=](sycl::nd_item<1> item) {
auto sub_group = item.get_sub_group();
::std::size_t num_subgroups =
sub_group.get_group_range().size();
::std::size_t local_size = sub_group.get_local_range().size();
::std::size_t sub_group_id = sub_group.get_group_id();
while (sub_group_id < nsegments) {
::std::size_t subgroup_local_id = sub_group.get_local_id();
::std::size_t i = begin_offsets[sub_group_id];
::std::size_t end = end_offsets[sub_group_id];
while (i + subgroup_local_id < end) {
segments_acc[i + subgroup_local_id] = sub_group_id;
i += local_size;
}
sub_group_id += num_subgroups;
}
}));
})
.wait();
} else {
// If average segment size is small as compared to subgroup, use single
// work item to mark each segment
policy.queue()
.submit([&](sycl::handler &h) {
auto segments_acc = segments.get_access<sycl::access_mode::write>(h);
h.parallel_for(nsegments, ([=](sycl::id<1> seg) {
for (::std::size_t i = begin_offsets[seg];
i < end_offsets[seg]; i++) {
segments_acc[i] = seg;
}
}));
})
.wait();
}
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename OffsetIteratorT>
inline void segmented_sort_keys_by_two_pair_sorts(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, int64_t n,
int64_t nsegments, OffsetIteratorT begin_offsets,
OffsetIteratorT end_offsets, bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
sycl::buffer<::std::size_t, 1> segments{sycl::range<1>(n)};
sycl::buffer<::std::size_t, 1> segments_sorted{sycl::range<1>(n)};
using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type;
sycl::buffer<key_t_value_t, 1> keys_temp{sycl::range<1>(n)};
mark_segments(::std::forward<_ExecutionPolicy>(policy), begin_offsets,
end_offsets, n, nsegments, segments);
// Part 1: Sort by keys keeping track of which segment were in
dpct::sort_pairs(::std::forward<_ExecutionPolicy>(policy), keys_in,
oneapi::dpl::begin(keys_temp), oneapi::dpl::begin(segments),
oneapi::dpl::begin(segments_sorted), n, descending);
// Part 2: Sort the segments with a stable sort to get back sorted segments.
dpct::sort_pairs(::std::forward<_ExecutionPolicy>(policy),
oneapi::dpl::begin(segments_sorted),
oneapi::dpl::begin(segments), oneapi::dpl::begin(keys_temp),
keys_out, n, false);
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t, typename OffsetIteratorT>
inline void segmented_sort_pairs_by_two_pair_sorts(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_out_t values_in, value_t values_out, int64_t n, int64_t nsegments,
OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets,
bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
sycl::buffer<::std::size_t, 1> segments{sycl::range<1>(n)};
sycl::buffer<::std::size_t, 1> segments_sorted{sycl::range<1>(n)};
using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type;
sycl::buffer<key_t_value_t, 1> keys_temp{sycl::range<1>(n)};
using value_t_value_t = typename ::std::iterator_traits<value_t>::value_type;
sycl::buffer<value_t_value_t, 1> values_temp{sycl::range<1>(n)};
mark_segments(::std::forward<_ExecutionPolicy>(policy), begin_offsets,
end_offsets, n, nsegments, segments);
auto zip_seg_vals =
oneapi::dpl::make_zip_iterator(oneapi::dpl::begin(segments), values_in);
auto zip_seg_vals_out = oneapi::dpl::make_zip_iterator(
oneapi::dpl::begin(segments_sorted), oneapi::dpl::begin(values_temp));
// Part 1: Sort by keys keeping track of which segment were in
dpct::sort_pairs(::std::forward<_ExecutionPolicy>(policy), keys_in,
oneapi::dpl::begin(keys_temp), zip_seg_vals,
zip_seg_vals_out, n, descending);
auto zip_keys_vals = oneapi::dpl::make_zip_iterator(
oneapi::dpl::begin(keys_temp), oneapi::dpl::begin(values_temp));
auto zip_keys_vals_out = oneapi::dpl::make_zip_iterator(keys_out, values_out);
// Part 2: Sort the segments with a stable sort to get back sorted segments.
dpct::sort_pairs(::std::forward<_ExecutionPolicy>(policy),
oneapi::dpl::begin(segments_sorted),
oneapi::dpl::begin(segments), zip_keys_vals,
zip_keys_vals_out, n, false);
}
} // end namespace internal
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t>
inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value &&
dpct::internal::is_iterator<key_out_t>::value &&
dpct::internal::is_iterator<value_t>::value &&
dpct::internal::is_iterator<value_out_t>::value>
sort_pairs(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_t values_in, value_out_t values_out, int64_t n,
bool descending, int begin_bit, int end_bit) {
internal::sort_pairs_impl(std::forward<_ExecutionPolicy>(policy), keys_in,
keys_out, values_in, values_out, n, descending,
begin_bit, end_bit);
}
template <typename _ExecutionPolicy, typename key_t, typename value_t>
inline void sort_pairs(
_ExecutionPolicy &&policy, io_iterator_pair<key_t> &keys,
io_iterator_pair<value_t> &values, int64_t n, bool descending = false,
bool do_swap_iters = false,
int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
sort_pairs(::std::forward<_ExecutionPolicy>(policy), keys.first(),
keys.second(), values.first(), values.second(), n, descending,
begin_bit, end_bit);
if (do_swap_iters) {
keys.swap();
values.swap();
}
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t>
inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value &&
dpct::internal::is_iterator<key_out_t>::value>
sort_keys(_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
int64_t n, bool descending, int begin_bit, int end_bit) {
using key_t_value_t = typename ::std::iterator_traits<key_t>::value_type;
int clipped_begin_bit = ::std::max(begin_bit, 0);
int clipped_end_bit =
::std::min((::std::uint64_t)end_bit, sizeof(key_t_value_t) * 8);
int num_bytes = (clipped_end_bit - clipped_begin_bit - 1) / 8 + 1;
auto transform_and_sort_f = [&](auto x) {
using T = typename ::std::decay_t<decltype(x)>;
internal::transform_and_sort<decltype(policy), key_t, key_out_t, T>(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, n,
descending, clipped_begin_bit, clipped_end_bit);
};
if (clipped_end_bit - clipped_begin_bit == sizeof(key_t_value_t) * 8) {
internal::sort_only(::std::forward<_ExecutionPolicy>(policy), keys_in,
keys_out, n, descending);
} else if (num_bytes == 1) {
transform_and_sort_f.template operator()<uint8_t>(0);
} else if (num_bytes == 2) {
transform_and_sort_f.template operator()<uint16_t>(0);
} else if (num_bytes <= 4) {
transform_and_sort_f.template operator()<uint32_t>(0);
} else // if (num_bytes <= 8)
{
transform_and_sort_f.template operator()<uint64_t>(0);
}
}
template <typename _ExecutionPolicy, typename key_t>
inline void sort_keys(
_ExecutionPolicy &&policy, io_iterator_pair<key_t> &keys, int64_t n,
bool descending = false,
bool do_swap_iters = false,
int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
sort_keys(std::forward<_ExecutionPolicy>(policy), keys.first(), keys.second(),
n, descending, begin_bit, end_bit);
if (do_swap_iters)
keys.swap();
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename OffsetIteratorT>
inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value &&
dpct::internal::is_iterator<key_out_t>::value>
segmented_sort_keys(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out, int64_t n,
int64_t nsegments, OffsetIteratorT begin_offsets,
OffsetIteratorT end_offsets, bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
int compute_units =
policy.queue()
.get_device()
.template get_info<sycl::info::device::max_compute_units>();
auto sg_sizes = policy.queue()
.get_device()
.template get_info<sycl::info::device::sub_group_sizes>();
int subgroup_size = sg_sizes.empty() ? 1 : sg_sizes.back();
// parallel for of serial sorts when we have sufficient number of segments for
// load balance when number of segments is large as compared to our target
// compute capability
if (nsegments >
compute_units *
(policy.queue().get_device().is_gpu() ? subgroup_size : 1)) {
dpct::internal::segmented_sort_keys_by_parallel_for_of_sorts(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, n,
nsegments, begin_offsets, end_offsets, descending, begin_bit, end_bit);
} else if (nsegments < 512) // for loop of parallel sorts when we have a small
// number of total sorts to limit total overhead
{
dpct::internal::segmented_sort_keys_by_parallel_sorts(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, n,
nsegments, begin_offsets, end_offsets, descending, begin_bit, end_bit);
} else // decent catch all using 2 full sorts
{
dpct::internal::segmented_sort_keys_by_two_pair_sorts(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, n,
nsegments, begin_offsets, end_offsets, descending, begin_bit, end_bit);
}
}
template <typename _ExecutionPolicy, typename key_t, typename OffsetIteratorT>
inline void segmented_sort_keys(
_ExecutionPolicy &&policy, io_iterator_pair<key_t> &keys, int64_t n,
int64_t nsegments, OffsetIteratorT begin_offsets,
OffsetIteratorT end_offsets, bool descending = false,
bool do_swap_iters = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
segmented_sort_keys(::std::forward<_ExecutionPolicy>(policy), keys.first(),
keys.second(), n, nsegments, begin_offsets, end_offsets,
descending, begin_bit, end_bit);
if (do_swap_iters) {
keys.swap();
}
}
template <typename _ExecutionPolicy, typename key_t, typename key_out_t,
typename value_t, typename value_out_t, typename OffsetIteratorT>
inline ::std::enable_if_t<dpct::internal::is_iterator<key_t>::value &&
dpct::internal::is_iterator<key_out_t>::value &&
dpct::internal::is_iterator<value_t>::value &&
dpct::internal::is_iterator<value_out_t>::value>
segmented_sort_pairs(
_ExecutionPolicy &&policy, key_t keys_in, key_out_t keys_out,
value_t values_in, value_out_t values_out, int64_t n, int64_t nsegments,
OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets,
bool descending = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
int compute_units =
policy.queue()
.get_device()
.template get_info<sycl::info::device::max_compute_units>();
auto sg_sizes = policy.queue()
.get_device()
.template get_info<sycl::info::device::sub_group_sizes>();
int subgroup_size = sg_sizes.empty() ? 1 : sg_sizes.back();
// parallel for of serial sorts when we have sufficient number of segments for
// load balance when number of segments is large as compared to our target
// compute capability
if (nsegments >
compute_units *
(policy.queue().get_device().is_gpu() ? subgroup_size : 1)) {
dpct::internal::segmented_sort_pairs_by_parallel_for_of_sorts(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, values_in,
values_out, n, nsegments, begin_offsets, end_offsets, descending,
begin_bit, end_bit);
} else if (nsegments < 512) // for loop of parallel sorts when we have a small
// number of total sorts to limit total overhead
{
dpct::internal::segmented_sort_pairs_by_parallel_sorts(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, values_in,
values_out, n, nsegments, begin_offsets, end_offsets, descending,
begin_bit, end_bit);
} else // decent catch all using 2 full sorts
{
dpct::internal::segmented_sort_pairs_by_two_pair_sorts(
::std::forward<_ExecutionPolicy>(policy), keys_in, keys_out, values_in,
values_out, n, nsegments, begin_offsets, end_offsets, descending,
begin_bit, end_bit);
}
}
template <typename _ExecutionPolicy, typename key_t, typename value_t,
typename OffsetIteratorT>
inline void segmented_sort_pairs(
_ExecutionPolicy &&policy, io_iterator_pair<key_t> &keys,
io_iterator_pair<value_t> &values, int64_t n, int64_t nsegments,
OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets,
bool descending = false, bool do_swap_iters = false, int begin_bit = 0,
int end_bit = sizeof(typename ::std::iterator_traits<key_t>::value_type) *
8) {
segmented_sort_pairs(std::forward<_ExecutionPolicy>(policy), keys.first(),
keys.second(), values.first(), values.second(), n,
nsegments, begin_offsets, end_offsets, descending,
begin_bit, end_bit);
if (do_swap_iters) {
keys.swap();
values.swap();
}
}
template <typename _ExecutionPolicy, typename Iter1, typename Iter2>
inline void reduce_argmax(_ExecutionPolicy &&policy, Iter1 input, Iter2 output,
::std::size_t n) {
dpct::arg_index_input_iterator<decltype(input), int> input_arg_idx(input);
auto ret = ::std::max_element(
::std::forward<_ExecutionPolicy>(policy), input_arg_idx,
input_arg_idx + n,
[](const auto &a, const auto &b) { return (a.value < b.value); });
::std::copy(::std::forward<_ExecutionPolicy>(policy), ret, ret + 1, output);
}
template <typename _ExecutionPolicy, typename Iter1, typename Iter2>
inline void reduce_argmin(_ExecutionPolicy &&policy, Iter1 input, Iter2 output,
::std::size_t n) {
dpct::arg_index_input_iterator<decltype(input), int> input_arg_idx(input);
auto ret = ::std::min_element(
::std::forward<_ExecutionPolicy>(policy), input_arg_idx,
input_arg_idx + n,
[](const auto &a, const auto &b) { return (a.value < b.value); });
::std::copy(::std::forward<_ExecutionPolicy>(policy), ret, ret + 1, output);
}
template <typename _ExecutionPolicy, typename Iter1,
typename ValueLessComparable, typename StrictWeakOrdering>
inline ::std::pair<Iter1, Iter1>
equal_range(_ExecutionPolicy &&policy, Iter1 start, Iter1 end,
const ValueLessComparable &value, StrictWeakOrdering comp) {
::std::vector<::std::int64_t> res_lower(1);
::std::vector<::std::int64_t> res_upper(1);
::std::vector<ValueLessComparable> value_vec(1, value);
::oneapi::dpl::lower_bound(policy, start, end, value_vec.begin(),
value_vec.end(), res_lower.begin(), comp);
::oneapi::dpl::upper_bound(::std::forward<_ExecutionPolicy>(policy), start,
end, value_vec.begin(), value_vec.end(),
res_upper.begin(), comp);
auto result = ::std::make_pair(start + res_lower[0], start + res_upper[0]);
return result;
}
template <typename _ExecutionPolicy, typename Iter1,
typename ValueLessComparable>
inline ::std::pair<Iter1, Iter1> equal_range(_ExecutionPolicy &&policy,
Iter1 start, Iter1 end,
const ValueLessComparable &value) {
return equal_range(::std::forward<_ExecutionPolicy>(policy), start, end,
value, internal::__less());
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3>
inline ::std::enable_if_t<
dpct::internal::is_iterator<Iter1>::value &&
dpct::internal::is_iterator<Iter2>::value &&
internal::is_hetero_execution_policy<::std::decay_t<Policy>>::value>
segmented_reduce_argmin(Policy &&policy, Iter1 keys_in, Iter2 keys_out,
::std::int64_t nsegments, Iter3 begin_offsets,
Iter3 end_offsets) {
policy.queue().submit([&](sycl::handler &cgh) {
cgh.parallel_for(nsegments, [=](sycl::id<1> i) {
if (end_offsets[i] <= begin_offsets[i]) {
keys_out[i] = dpct::key_value_pair(
1, ::std::numeric_limits<
typename ::std::iterator_traits<Iter1>::value_type>::max());
} else {
dpct::arg_index_input_iterator<Iter1, int> arg_index(keys_in +
begin_offsets[i]);
keys_out[i] = *::std::min_element(
arg_index, arg_index + (end_offsets[i] - begin_offsets[i]),
[](const auto &a, const auto &b) { return a.value < b.value; });
}
});
});
policy.queue().wait();
}
template <typename Policy, typename Iter1, typename Iter2, typename Iter3>
inline ::std::enable_if_t<
dpct::internal::is_iterator<Iter1>::value &&
dpct::internal::is_iterator<Iter2>::value &&
internal::is_hetero_execution_policy<::std::decay_t<Policy>>::value>
segmented_reduce_argmax(Policy &&policy, Iter1 keys_in, Iter2 keys_out,
::std::int64_t nsegments, Iter3 begin_offsets,
Iter3 end_offsets) {
policy.queue().submit([&](sycl::handler &cgh) {
cgh.parallel_for(nsegments, [=](sycl::id<1> i) {
if (end_offsets[i] <= begin_offsets[i]) {
keys_out[i] = dpct::key_value_pair(
1,
::std::numeric_limits<
typename ::std::iterator_traits<Iter1>::value_type>::lowest());
} else {
dpct::arg_index_input_iterator<Iter1, int> arg_index(keys_in +
begin_offsets[i]);
keys_out[i] = *::std::max_element(
arg_index, arg_index + (end_offsets[i] - begin_offsets[i]),
[](const auto &a, const auto &b) { return a.value < b.value; });
}
});
});
policy.queue().wait();
}
} // end namespace dpct
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/include/dpct/dpl_extras/memory.h | //==---- memory.h ---------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_MEMORY_H__
#define __DPCT_MEMORY_H__
#include <sycl/sycl.hpp>
// Memory management section:
// device_pointer, device_reference, swap, device_iterator, malloc_device,
// device_new, free_device, device_delete
namespace dpct {
namespace detail {
template <typename T>
struct make_allocatable
{
using type = T;
};
template <>
struct make_allocatable<void>
{
using type = dpct::byte_t;
};
#if defined(__LIBSYCL_MAJOR_VERSION) && defined(__LIBSYCL_MINOR_VERSION) && \
defined(__LIBSYCL_PATCH_VERSION)
#define _DPCT_LIBSYCL_VERSION \
(__LIBSYCL_MAJOR_VERSION * 10000 + __LIBSYCL_MINOR_VERSION * 100 + \
__LIBSYCL_PATCH_VERSION)
#else
#define _DPCT_LIBSYCL_VERSION 0
#endif
template <typename _DataT>
using __buffer_allocator =
#if _DPCT_LIBSYCL_VERSION >= 60000
sycl::buffer_allocator<typename make_allocatable<_DataT>::type>;
#else
sycl::buffer_allocator;
#endif
} // namespace detail
#ifdef DPCT_USM_LEVEL_NONE
template <typename T, sycl::access_mode Mode = sycl::access_mode::read_write,
typename Allocator = detail::__buffer_allocator<T>>
class device_pointer;
#else
template <typename T> class device_pointer;
#endif
template <typename T> struct device_reference {
using pointer = device_pointer<T>;
using value_type = T;
template <typename OtherT>
device_reference(const device_reference<OtherT> &input)
: value(input.value) {}
device_reference(const pointer &input) : value((*input).value) {}
device_reference(value_type &input) : value(input) {}
template <typename OtherT>
device_reference &operator=(const device_reference<OtherT> &input) {
value = input;
return *this;
};
device_reference &operator=(const device_reference &input) {
T val = input.value;
value = val;
return *this;
};
device_reference &operator=(const value_type &x) {
value = x;
return *this;
};
pointer operator&() const { return pointer(&value); };
operator value_type() const { return T(value); }
device_reference &operator++() {
++value;
return *this;
};
device_reference &operator--() {
--value;
return *this;
};
device_reference operator++(int) {
device_reference ref(*this);
++(*this);
return ref;
};
device_reference operator--(int) {
device_reference ref(*this);
--(*this);
return ref;
};
device_reference &operator+=(const T &input) {
value += input;
return *this;
};
device_reference &operator-=(const T &input) {
value -= input;
return *this;
};
device_reference &operator*=(const T &input) {
value *= input;
return *this;
};
device_reference &operator/=(const T &input) {
value /= input;
return *this;
};
device_reference &operator%=(const T &input) {
value %= input;
return *this;
};
device_reference &operator&=(const T &input) {
value &= input;
return *this;
};
device_reference &operator|=(const T &input) {
value |= input;
return *this;
};
device_reference &operator^=(const T &input) {
value ^= input;
return *this;
};
device_reference &operator<<=(const T &input) {
value <<= input;
return *this;
};
device_reference &operator>>=(const T &input) {
value >>= input;
return *this;
};
void swap(device_reference &input) {
T tmp = (*this);
*this = (input);
input = (tmp);
}
T &value;
};
template <typename T>
void swap(device_reference<T> &x, device_reference<T> &y) {
x.swap(y);
}
template <typename T> void swap(T &x, T &y) {
T tmp = x;
x = y;
y = tmp;
}
namespace internal {
// struct for checking if iterator is heterogeneous or not
template <typename Iter,
typename Void = void> // for non-heterogeneous iterators
struct is_hetero_iterator : std::false_type {};
template <typename Iter> // for heterogeneous iterators
struct is_hetero_iterator<
Iter, typename std::enable_if<Iter::is_hetero::value, void>::type>
: std::true_type {};
} // namespace internal
#ifdef DPCT_USM_LEVEL_NONE
template <typename T, sycl::access_mode Mode, typename Allocator>
class device_iterator;
template <typename ValueType, typename Allocator, typename Derived>
class device_pointer_base {
protected:
sycl::buffer<ValueType, 1, Allocator> buffer;
std::size_t idx;
public:
using pointer = ValueType *;
using difference_type = std::make_signed<std::size_t>::type;
device_pointer_base(sycl::buffer<ValueType, 1> in, std::size_t i = 0)
: buffer(in), idx(i) {}
#ifdef __USE_DPCT
template <typename OtherT>
device_pointer_base(OtherT *ptr)
: buffer(
dpct::detail::mem_mgr::instance()
.translate_ptr(ptr)
.buffer.template reinterpret<ValueType, 1>(sycl::range<1>(
dpct::detail::mem_mgr::instance().translate_ptr(ptr).size /
sizeof(ValueType)))),
idx(ptr - (ValueType*)dpct::detail::mem_mgr::instance()
.translate_ptr(ptr).alloc_ptr) {}
#endif
device_pointer_base(const std::size_t count)
: buffer(sycl::range<1>(count / sizeof(ValueType))), idx() {}
// buffer has no default ctor we pass zero-range to create an empty buffer
device_pointer_base() : buffer(sycl::range<1>(0)) {}
device_pointer_base(const device_pointer_base &in)
: buffer(in.buffer), idx(in.idx) {}
pointer get() const {
auto res =
(const_cast<device_pointer_base *>(this)
->buffer.template get_access<sycl::access_mode::read_write>())
.get_pointer();
return res + idx;
}
operator ValueType *() {
auto res = (buffer.template get_access<sycl::access_mode::read_write>())
.get_pointer();
return res + idx;
}
operator ValueType *() const {
auto res =
(const_cast<device_pointer_base *>(this)
->buffer.template get_access<sycl::access_mode::read_write>())
.get_pointer();
return res + idx;
}
Derived operator+(difference_type forward) const {
return Derived{buffer, idx + forward};
}
Derived operator-(difference_type backward) const {
return Derived{buffer, idx - backward};
}
Derived operator++(int) {
Derived p(buffer, idx);
idx += 1;
return p;
}
Derived operator--(int) {
Derived p(buffer, idx);
idx -= 1;
return p;
}
difference_type operator-(const Derived &it) const { return idx - it.idx; }
template <typename OtherIterator>
typename std::enable_if<internal::is_hetero_iterator<OtherIterator>::value,
difference_type>::type
operator-(const OtherIterator &it) const {
return idx - std::distance(oneapi::dpl::begin(buffer), it);
}
std::size_t get_idx() const { return idx; } // required
sycl::buffer<ValueType, 1, Allocator> get_buffer() {
return buffer;
} // required
};
template <typename T, sycl::access_mode Mode, typename Allocator>
class device_pointer
: public device_pointer_base<T, Allocator,
device_pointer<T, Mode, Allocator>> {
private:
using base_type = device_pointer_base<T, Allocator, device_pointer>;
public:
using value_type = T;
using difference_type = std::make_signed<std::size_t>::type;
using pointer = T *;
using reference = T &;
using iterator_category = std::random_access_iterator_tag;
using is_hetero = std::true_type; // required
using is_passed_directly = std::false_type;
static constexpr sycl::access_mode mode = Mode; // required
device_pointer(sycl::buffer<T, 1> in, std::size_t i = 0) : base_type(in, i) {}
#ifdef __USE_DPCT
template <typename OtherT> device_pointer(OtherT *ptr) : base_type(ptr) {}
#endif
// needed for malloc_device, count is number of bytes to allocate
device_pointer(const std::size_t count) : base_type(count) {}
device_pointer() : base_type() {}
device_pointer(const device_pointer &in) : base_type(in) {}
device_pointer &operator+=(difference_type forward) {
this->idx += forward;
return *this;
}
device_pointer &operator-=(difference_type backward) {
this->idx -= backward;
return *this;
}
// include operators from base class
using base_type::operator++;
using base_type::operator--;
device_pointer &operator++() {
this->idx += 1;
return *this;
}
device_pointer &operator--() {
this->idx -= 1;
return *this;
}
};
template <sycl::access_mode Mode, typename Allocator>
class device_pointer<void, Mode, Allocator>
: public device_pointer_base<dpct::byte_t, Allocator,
device_pointer<void, Mode, Allocator>> {
private:
using base_type =
device_pointer_base<dpct::byte_t, Allocator, device_pointer>;
public:
using value_type = dpct::byte_t;
using difference_type = std::make_signed<std::size_t>::type;
using pointer = void *;
using reference = value_type &;
using iterator_category = std::random_access_iterator_tag;
using is_hetero = std::true_type; // required
using is_passed_directly = std::false_type;
static constexpr sycl::access_mode mode = Mode; // required
device_pointer(sycl::buffer<value_type, 1> in, std::size_t i = 0)
: base_type(in, i) {}
#ifdef __USE_DPCT
template <typename OtherT> device_pointer(OtherT *ptr) : base_type(ptr) {}
#endif
// needed for malloc_device, count is number of bytes to allocate
device_pointer(const std::size_t count) : base_type(count) {}
device_pointer() : base_type() {}
device_pointer(const device_pointer &in) : base_type(in) {}
device_pointer &operator+=(difference_type forward) {
this->idx += forward;
return *this;
}
device_pointer &operator-=(difference_type backward) {
this->idx -= backward;
return *this;
}
// include operators from base class
using base_type::operator++;
using base_type::operator--;
device_pointer &operator++() {
this->idx += 1;
return *this;
}
device_pointer &operator--() {
this->idx -= 1;
return *this;
}
};
#else
template <typename T> class device_iterator;
template <typename ValueType, typename Derived> class device_pointer_base {
protected:
ValueType *ptr;
public:
using pointer = ValueType *;
using difference_type = std::make_signed<std::size_t>::type;
device_pointer_base(ValueType *p) : ptr(p) {}
device_pointer_base(const std::size_t count) {
sycl::queue default_queue = dpct::get_default_queue();
ptr = static_cast<ValueType *>(sycl::malloc_shared(
count, default_queue.get_device(), default_queue.get_context()));
}
device_pointer_base() {}
pointer get() const { return ptr; }
operator ValueType *() { return ptr; }
operator ValueType *() const { return ptr; }
ValueType &operator[](difference_type idx) { return ptr[idx]; }
ValueType &operator[](difference_type idx) const { return ptr[idx]; }
Derived operator+(difference_type forward) const {
return Derived{ptr + forward};
}
Derived operator-(difference_type backward) const {
return Derived{ptr - backward};
}
Derived operator++(int) {
Derived p(ptr);
++ptr;
return p;
}
Derived operator--(int) {
Derived p(ptr);
--ptr;
return p;
}
difference_type operator-(const Derived &it) const { return ptr - it.ptr; }
};
template <typename T>
class device_pointer : public device_pointer_base<T, device_pointer<T>> {
private:
using base_type = device_pointer_base<T, device_pointer<T>>;
public:
using value_type = T;
using difference_type = std::make_signed<std::size_t>::type;
using pointer = T *;
using reference = T &;
using const_reference = const T &;
using iterator_category = std::random_access_iterator_tag;
using is_hetero = std::false_type; // required
using is_passed_directly = std::true_type; // required
device_pointer(T *p) : base_type(p) {}
// needed for malloc_device, count is number of bytes to allocate
device_pointer(const std::size_t count) : base_type(count) {}
device_pointer() : base_type() {}
device_pointer &operator=(const device_iterator<T> &in) {
this->ptr = static_cast<device_pointer<T>>(in).ptr;
return *this;
}
// include operators from base class
using base_type::operator++;
using base_type::operator--;
device_pointer &operator++() {
++(this->ptr);
return *this;
}
device_pointer &operator--() {
--(this->ptr);
return *this;
}
device_pointer &operator+=(difference_type forward) {
this->ptr = this->ptr + forward;
return *this;
}
device_pointer &operator-=(difference_type backward) {
this->ptr = this->ptr - backward;
return *this;
}
};
template <>
class device_pointer<void>
: public device_pointer_base<dpct::byte_t, device_pointer<void>> {
private:
using base_type = device_pointer_base<dpct::byte_t, device_pointer<void>>;
public:
using value_type = dpct::byte_t;
using difference_type = std::make_signed<std::size_t>::type;
using pointer = void *;
using reference = value_type &;
using const_reference = const value_type &;
using iterator_category = std::random_access_iterator_tag;
using is_hetero = std::false_type; // required
using is_passed_directly = std::true_type; // required
device_pointer(void *p) : base_type(static_cast<value_type *>(p)) {}
// needed for malloc_device, count is number of bytes to allocate
device_pointer(const std::size_t count) : base_type(count) {}
device_pointer() : base_type() {}
pointer get() const { return static_cast<pointer>(this->ptr); }
operator void *() { return this->ptr; }
operator void *() const { return this->ptr; }
// include operators from base class
using base_type::operator++;
using base_type::operator--;
device_pointer &operator++() {
++(this->ptr);
return *this;
}
device_pointer &operator--() {
--(this->ptr);
return *this;
}
device_pointer &operator+=(difference_type forward) {
this->ptr = this->ptr + forward;
return *this;
}
device_pointer &operator-=(difference_type backward) {
this->ptr = this->ptr - backward;
return *this;
}
};
#endif
#ifdef DPCT_USM_LEVEL_NONE
template <typename T, sycl::access_mode Mode = sycl::access_mode::read_write,
typename Allocator = detail::__buffer_allocator<T>>
class device_iterator : public device_pointer<T, Mode, Allocator> {
using Base = device_pointer<T, Mode, Allocator>;
public:
using value_type = T;
using difference_type = std::make_signed<std::size_t>::type;
using pointer = T *;
using reference = T &;
using iterator_category = std::random_access_iterator_tag;
using is_hetero = std::true_type; // required
using is_passed_directly = std::false_type; // required
static constexpr sycl::access_mode mode = Mode; // required
device_iterator() : Base() {}
device_iterator(sycl::buffer<T, 1, Allocator> vec, std::size_t index)
: Base(vec, index) {}
template <sycl::access_mode inMode>
device_iterator(const device_iterator<T, inMode, Allocator> &in)
: Base(in.buffer, in.idx) {} // required for iter_mode
device_iterator &operator=(const device_iterator &in) {
Base::buffer = in.buffer;
Base::idx = in.idx;
return *this;
}
reference operator*() const {
return const_cast<device_iterator *>(this)
->buffer.template get_access<mode>()[Base::idx];
}
reference operator[](difference_type i) const { return *(*this + i); }
device_iterator &operator++() {
++Base::idx;
return *this;
}
device_iterator &operator--() {
--Base::idx;
return *this;
}
device_iterator operator++(int) {
device_iterator it(*this);
++(*this);
return it;
}
device_iterator operator--(int) {
device_iterator it(*this);
--(*this);
return it;
}
device_iterator operator+(difference_type forward) const {
const auto new_idx = Base::idx + forward;
return {Base::buffer, new_idx};
}
device_iterator &operator+=(difference_type forward) {
Base::idx += forward;
return *this;
}
device_iterator operator-(difference_type backward) const {
return {Base::buffer, Base::idx - backward};
}
device_iterator &operator-=(difference_type backward) {
Base::idx -= backward;
return *this;
}
friend device_iterator operator+(difference_type forward,
const device_iterator &it) {
return it + forward;
}
difference_type operator-(const device_iterator &it) const {
return Base::idx - it.idx;
}
template <typename OtherIterator>
typename std::enable_if<internal::is_hetero_iterator<OtherIterator>::value,
difference_type>::type
operator-(const OtherIterator &it) const {
return Base::idx - std::distance(oneapi::dpl::begin(Base::buffer), it);
}
bool operator==(const device_iterator &it) const { return *this - it == 0; }
bool operator!=(const device_iterator &it) const { return !(*this == it); }
bool operator<(const device_iterator &it) const { return *this - it < 0; }
bool operator>(const device_iterator &it) const { return it < *this; }
bool operator<=(const device_iterator &it) const { return !(*this > it); }
bool operator>=(const device_iterator &it) const { return !(*this < it); }
std::size_t get_idx() const { return Base::idx; } // required
sycl::buffer<T, 1, Allocator> get_buffer() {
return Base::buffer;
} // required
};
#else
template <typename T> class device_iterator : public device_pointer<T> {
using Base = device_pointer<T>;
protected:
std::size_t idx;
public:
using value_type = T;
using difference_type = std::make_signed<std::size_t>::type;
using pointer = typename Base::pointer;
using reference = typename Base::reference;
using iterator_category = std::random_access_iterator_tag;
using is_hetero = std::false_type; // required
using is_passed_directly = std::true_type; // required
static constexpr sycl::access_mode mode =
sycl::access_mode::read_write; // required
device_iterator() : Base(nullptr), idx(0) {}
device_iterator(T *vec, std::size_t index) : Base(vec), idx(index) {}
template <sycl::access_mode inMode>
device_iterator(const device_iterator<T> &in)
: Base(in.ptr), idx(in.idx) {} // required for iter_mode
device_iterator &operator=(const device_iterator &in) {
Base::operator=(in);
idx = in.idx;
return *this;
}
reference operator*() const { return *(Base::ptr + idx); }
reference operator[](difference_type i) { return Base::ptr[idx + i]; }
reference operator[](difference_type i) const { return Base::ptr[idx + i]; }
device_iterator &operator++() {
++idx;
return *this;
}
device_iterator &operator--() {
--idx;
return *this;
}
device_iterator operator++(int) {
device_iterator it(*this);
++(*this);
return it;
}
device_iterator operator--(int) {
device_iterator it(*this);
--(*this);
return it;
}
device_iterator operator+(difference_type forward) const {
const auto new_idx = idx + forward;
return {Base::ptr, new_idx};
}
device_iterator &operator+=(difference_type forward) {
idx += forward;
return *this;
}
device_iterator operator-(difference_type backward) const {
return {Base::ptr, idx - backward};
}
device_iterator &operator-=(difference_type backward) {
idx -= backward;
return *this;
}
friend device_iterator operator+(difference_type forward,
const device_iterator &it) {
return it + forward;
}
difference_type operator-(const device_iterator &it) const {
return idx - it.idx;
}
template <typename OtherIterator>
typename std::enable_if<internal::is_hetero_iterator<OtherIterator>::value,
difference_type>::type
operator-(const OtherIterator &it) const {
return idx - it.get_idx();
}
bool operator==(const device_iterator &it) const { return *this - it == 0; }
bool operator!=(const device_iterator &it) const { return !(*this == it); }
bool operator<(const device_iterator &it) const { return *this - it < 0; }
bool operator>(const device_iterator &it) const { return it < *this; }
bool operator<=(const device_iterator &it) const { return !(*this > it); }
bool operator>=(const device_iterator &it) const { return !(*this < it); }
std::size_t get_idx() const { return idx; } // required
device_iterator &get_buffer() { return *this; } // required
std::size_t size() const { return idx; }
};
#endif
template <typename T>
device_pointer<T> malloc_device(const std::size_t num_elements) {
return device_pointer<T>(num_elements * sizeof(T));
}
static inline device_pointer<void> malloc_device(const std::size_t num_bytes) {
return device_pointer<void>(num_bytes);
}
template <typename T>
device_pointer<T> device_new(device_pointer<T> p, const T &value,
const std::size_t count = 1) {
std::vector<T> result(count, value);
p.buffer = sycl::buffer<T, 1>(result.begin(), result.end());
return p + count;
}
template <typename T>
device_pointer<T> device_new(device_pointer<T> p, const std::size_t count = 1) {
return device_new(p, T{}, count);
}
template <typename T>
device_pointer<T> device_new(const std::size_t count = 1) {
return device_pointer<T>(count);
}
template <typename T> void free_device(device_pointer<T> ptr) {}
template <typename T>
typename std::enable_if<!std::is_trivially_destructible<T>::value, void>::type
device_delete(device_pointer<T> p, const std::size_t count = 1) {
for (std::size_t i = 0; i < count; ++i) {
p[i].~T();
}
}
template <typename T>
typename std::enable_if<std::is_trivially_destructible<T>::value, void>::type
device_delete(device_pointer<T>, const std::size_t count = 1) {}
template <typename T> device_pointer<T> get_device_pointer(T *ptr) {
return device_pointer<T>(ptr);
}
template <typename T>
device_pointer<T> get_device_pointer(const device_pointer<T> &ptr) {
return device_pointer<T>(ptr);
}
template <typename T> T *get_raw_pointer(const device_pointer<T> &ptr) {
return ptr.get();
}
template <typename Pointer> Pointer get_raw_pointer(const Pointer &ptr) {
return ptr;
}
template <typename T> const T &get_raw_reference(const device_reference<T> &ref) {
return ref.value;
}
template <typename T> T &get_raw_reference(device_reference<T> &ref) {
return ref.value;
}
template <typename T> const T &get_raw_reference(const T &ref) {
return ref;
}
template <typename T> T &get_raw_reference(T &ref) {
return ref;
}
} // namespace dpct
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/include/dpct/dpl_extras/vector.h | //==---- vector.h ---------------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_VECTOR_H__
#define __DPCT_VECTOR_H__
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
#include <sycl/sycl.hpp>
#include "memory.h"
#include <algorithm>
#include <iterator>
#include <vector>
#include "../device.hpp"
namespace dpct {
namespace internal {
template <typename Iter, typename Void = void> // for non-iterators
struct is_iterator : std::false_type {};
template <typename Iter> // For iterators
struct is_iterator<
Iter,
typename std::enable_if<
!std::is_void<typename Iter::iterator_category>::value, void>::type>
: std::true_type {};
template <typename T> // For pointers
struct is_iterator<T *> : std::true_type {};
} // end namespace internal
#ifndef DPCT_USM_LEVEL_NONE
template <typename T,
typename Allocator = sycl::usm_allocator<T, sycl::usm::alloc::shared>>
class device_vector {
public:
using iterator = device_iterator<T>;
using const_iterator = const iterator;
using reference = device_reference<T>;
using const_reference = const reference;
using value_type = T;
using pointer = T *;
using const_pointer = const T *;
using difference_type =
typename ::std::iterator_traits<iterator>::difference_type;
using size_type = ::std::size_t;
private:
Allocator _alloc;
size_type _size;
size_type _capacity;
pointer _storage;
size_type _min_capacity() const { return size_type(1); }
void _set_capacity_and_alloc() {
_capacity = ::std::max(_size * 2, _min_capacity());
_storage = _alloc.allocate(_capacity);
}
public:
template <typename OtherA> operator ::std::vector<T, OtherA>() const {
auto __tmp = ::std::vector<T, OtherA>(this->size());
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
this->begin(), this->end(), __tmp.begin());
return __tmp;
}
device_vector()
: _alloc(get_default_queue()), _size(0), _capacity(_min_capacity()) {
_set_capacity_and_alloc();
}
~device_vector() /*= default*/ { _alloc.deallocate(_storage, _capacity); };
explicit device_vector(size_type n) : device_vector(n, T()) {}
explicit device_vector(size_type n, const T &value)
: _alloc(get_default_queue()), _size(n) {
_set_capacity_and_alloc();
if (_size > 0) {
::std::fill(oneapi::dpl::execution::make_device_policy(get_default_queue()),
begin(), end(), T(value));
}
}
device_vector(const device_vector &other) : _alloc(get_default_queue()) {
_size = other.size();
_capacity = other.capacity();
_storage = _alloc.allocate(_capacity);
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
other.begin(), other.end(), begin());
}
device_vector(device_vector &&other)
: _alloc(get_default_queue()), _size(other.size()),
_capacity(other.capacity()), _storage(other._storage) {
other._size = 0;
other._capacity = 0;
other._storage = nullptr;
}
template <typename InputIterator>
device_vector(InputIterator first,
typename ::std::enable_if<
internal::is_iterator<InputIterator>::value &&
!::std::is_pointer<InputIterator>::value &&
::std::is_same<typename ::std::iterator_traits<
InputIterator>::iterator_category,
::std::random_access_iterator_tag>::value,
InputIterator>::type last)
: _alloc(get_default_queue()) {
_size = ::std::distance(first, last);
_set_capacity_and_alloc();
if (_size > 0) {
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
first, last, begin());
}
}
template <typename InputIterator>
device_vector(InputIterator first,
typename ::std::enable_if<::std::is_pointer<InputIterator>::value,
InputIterator>::type last)
: _alloc(get_default_queue()) {
_size = ::std::distance(first, last);
_set_capacity_and_alloc();
if (_size > 0) {
auto ptr_type = sycl::get_pointer_type(first, get_default_context());
if (ptr_type != sycl::usm::alloc::host &&
ptr_type != sycl::usm::alloc::unknown) {
::std::copy(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
first, last, begin());
} else {
sycl::buffer<T, 1> buf(first, last);
auto buf_first = oneapi::dpl::begin(buf);
auto buf_last = oneapi::dpl::end(buf);
::std::copy(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
buf_first, buf_last, begin());
}
}
}
template <typename InputIterator>
device_vector(InputIterator first,
typename ::std::enable_if<
internal::is_iterator<InputIterator>::value &&
!::std::is_pointer<InputIterator>::value &&
!::std::is_same<typename ::std::iterator_traits<
InputIterator>::iterator_category,
::std::random_access_iterator_tag>::value,
InputIterator>::type last)
: _alloc(get_default_queue()), _size(::std::distance(first, last)) {
_set_capacity_and_alloc();
::std::vector<T> _tmp(first, last);
if (_size > 0) {
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
_tmp.begin(), _tmp.end(), this->begin());
}
}
template <typename OtherAllocator>
device_vector(const device_vector<T, OtherAllocator> &v)
: _alloc(get_default_queue()), _storage(v.real_begin()), _size(v.size()),
_capacity(v.capacity()) {}
template <typename OtherAllocator>
device_vector(::std::vector<T, OtherAllocator> &v)
: _alloc(get_default_queue()), _size(v.size()) {
_set_capacity_and_alloc();
if (_size > 0) {
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
v.begin(), v.end(), this->begin());
}
}
template <typename OtherAllocator>
device_vector &operator=(const ::std::vector<T, OtherAllocator> &v) {
resize(v.size());
if (_size > 0) {
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
v.begin(), v.end(), begin());
}
return *this;
}
device_vector &operator=(const device_vector &other) {
// Copy assignment operator:
resize(other.size());
if (_size > 0) {
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
other.begin(), other.end(), begin());
}
return *this;
}
device_vector &operator=(device_vector &&other) {
// Move assignment operator:
device_vector dummy(::std::move(other));
this->swap(dummy);
return *this;
}
size_type size() const { return _size; }
iterator begin() noexcept { return device_iterator<T>(_storage, 0); }
iterator end() { return device_iterator<T>(_storage, size()); }
const_iterator begin() const noexcept {
return device_iterator<T>(_storage, 0);
}
const_iterator cbegin() const noexcept { return begin(); }
const_iterator end() const { return device_iterator<T>(_storage, size()); }
const_iterator cend() const { return end(); }
T *real_begin() { return _storage; }
const T *real_begin() const { return _storage; }
void swap(device_vector &v) {
::std::swap(_size, v._size);
::std::swap(_capacity, v._capacity);
::std::swap(_storage, v._storage);
::std::swap(_alloc, v._alloc);
}
reference operator[](size_type n) { return _storage[n]; }
const_reference operator[](size_type n) const { return _storage[n]; }
void reserve(size_type n) {
if (n > capacity()) {
// allocate buffer for new size
auto tmp = _alloc.allocate(2 * n);
// copy content (old buffer to new buffer)
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
begin(), end(), tmp);
// deallocate old memory
_alloc.deallocate(_storage, _capacity);
_storage = tmp;
_capacity = 2 * n;
}
}
void resize(size_type new_size, const T &x = T()) {
reserve(new_size);
if (_size < new_size) {
::std::fill(oneapi::dpl::execution::make_device_policy(get_default_queue()),
begin() + _size, begin() + new_size, x);
}
_size = new_size;
}
size_type max_size(void) const {
return ::std::numeric_limits<size_type>::max() / sizeof(T);
}
size_type capacity() const { return _capacity; }
const_reference front() const { return *begin(); }
reference front() { return *begin(); }
const_reference back(void) const { return *(end() - 1); }
reference back(void) { return *(end() - 1); }
pointer data(void) { return _storage; }
const_pointer data(void) const { return _storage; }
void shrink_to_fit(void) {
if (_size != capacity()) {
size_type tmp_capacity = ::std::max(_size, _min_capacity());
auto tmp = _alloc.allocate(tmp_capacity);
if (_size > 0) {
::std::copy(
oneapi::dpl::execution::make_device_policy(get_default_queue()),
begin(), end(), tmp);
}
_alloc.deallocate(_storage, _capacity);
_storage = tmp;
_capacity = tmp_capacity;
}
}
void assign(size_type n, const T &x) {
resize(n);
if (_size > 0) {
::std::fill(oneapi::dpl::execution::make_device_policy(get_default_queue()),
begin(), begin() + n, x);
}
}
template <typename InputIterator>
void
assign(InputIterator first,
typename ::std::enable_if<internal::is_iterator<InputIterator>::value,
InputIterator>::type last) {
auto n = ::std::distance(first, last);
resize(n);
if (_size > 0) {
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
first, last, begin());
}
}
void clear(void) { _size = 0; }
bool empty(void) const { return (size() == 0); }
void push_back(const T &x) { insert(end(), size_type(1), x); }
void pop_back(void) {
if (_size > 0)
--_size;
}
iterator erase(iterator first, iterator last) {
auto n = ::std::distance(first, last);
if (last == end()) {
_size = _size - n;
return end();
}
auto m = ::std::distance(last, end());
if (m <= 0) {
return end();
}
auto tmp = _alloc.allocate(m);
// copy remainder to temporary buffer.
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
last, end(), tmp);
// override (erase) subsequence in storage.
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
tmp, tmp + m, first);
_alloc.deallocate(tmp, m);
_size -= n;
return begin() + first.get_idx() + n;
}
iterator erase(iterator pos) { return erase(pos, pos + 1); }
iterator insert(iterator position, const T &x) {
auto n = ::std::distance(begin(), position);
insert(position, size_type(1), x);
return begin() + n;
}
void insert(iterator position, size_type n, const T &x) {
if (position == end()) {
resize(size() + n);
::std::fill(oneapi::dpl::execution::make_device_policy(get_default_queue()),
end() - n, end(), x);
} else {
auto i_n = ::std::distance(begin(), position);
// allocate temporary storage
auto m = ::std::distance(position, end());
// will throw if position is not inside active vector
auto tmp = _alloc.allocate(m);
// copy remainder
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
position, end(), tmp);
resize(size() + n);
// resizing might invalidate position
position = begin() + position.get_idx();
::std::fill(oneapi::dpl::execution::make_device_policy(get_default_queue()),
position, position + n, x);
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
tmp, tmp + m, position + n);
_alloc.deallocate(tmp, m);
}
}
template <typename InputIterator>
void
insert(iterator position, InputIterator first,
typename ::std::enable_if<internal::is_iterator<InputIterator>::value,
InputIterator>::type last) {
auto n = ::std::distance(first, last);
if (position == end()) {
resize(size() + n);
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
first, last, end());
} else {
auto m = ::std::distance(position, end());
// will throw if position is not inside active vector
auto tmp = _alloc.allocate(m);
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
position, end(), tmp);
resize(size() + n);
// resizing might invalidate position
position = begin() + position.get_idx();
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
first, last, position);
::std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
tmp, tmp + m, position + n);
_alloc.deallocate(tmp, m);
}
}
Allocator get_allocator() const { return _alloc; }
};
#else
template <typename T, typename Allocator = detail::__buffer_allocator<T>>
class device_vector {
static_assert(
std::is_same<Allocator, detail::__buffer_allocator<T>>::value,
"device_vector doesn't support custom allocator when USM is not used.");
public:
using iterator = device_iterator<T>;
using const_iterator = const iterator;
using reference = device_reference<T>;
using const_reference = const reference;
using value_type = T;
using pointer = T *;
using const_pointer = const T *;
using difference_type =
typename std::iterator_traits<iterator>::difference_type;
using size_type = std::size_t;
private:
using Buffer = sycl::buffer<T, 1>;
using Range = sycl::range<1>;
// Using mem_mgr to handle memory allocation
void *_storage;
size_type _size;
size_type _min_capacity() const { return size_type(1); }
void *alloc_store(size_type num_bytes) {
return detail::mem_mgr::instance().mem_alloc(num_bytes);
}
public:
template <typename OtherA> operator std::vector<T, OtherA>() const {
auto __tmp = std::vector<T, OtherA>(this->size());
std::copy(oneapi::dpl::execution::dpcpp_default, this->begin(), this->end(),
__tmp.begin());
return __tmp;
}
device_vector()
: _storage(alloc_store(_min_capacity() * sizeof(T))), _size(0) {}
~device_vector() = default;
explicit device_vector(size_type n) : device_vector(n, T()) {}
explicit device_vector(size_type n, const T &value)
: _storage(alloc_store(std::max(n, _min_capacity()) * sizeof(T))),
_size(n) {
auto buf = get_buffer();
std::fill(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(buf),
oneapi::dpl::begin(buf) + n, T(value));
}
device_vector(const device_vector &other)
: _storage(other._storage), _size(other.size()) {}
device_vector(device_vector &&other)
: _storage(std::move(other._storage)), _size(other.size()) {}
template <typename InputIterator>
device_vector(InputIterator first,
typename std::enable_if<
internal::is_iterator<InputIterator>::value &&
!std::is_pointer<InputIterator>::value &&
std::is_same<typename std::iterator_traits<
InputIterator>::iterator_category,
std::random_access_iterator_tag>::value,
InputIterator>::type last)
: _storage(alloc_store(std::distance(first, last) * sizeof(T))),
_size(std::distance(first, last)) {
auto buf = get_buffer();
auto dst = oneapi::dpl::begin(buf);
std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
first, last, dst);
}
template <typename InputIterator>
device_vector(InputIterator first,
typename std::enable_if<std::is_pointer<InputIterator>::value,
InputIterator>::type last)
: _storage(alloc_store(std::distance(first, last) * sizeof(T))),
_size(std::distance(first, last)) {
auto buf = get_buffer();
Buffer tmp_buf(first, last);
auto start = oneapi::dpl::begin(tmp_buf);
auto end = oneapi::dpl::end(tmp_buf);
auto dst = oneapi::dpl::begin(buf);
std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
start, end, dst);
}
template <typename InputIterator>
device_vector(InputIterator first,
typename std::enable_if<
internal::is_iterator<InputIterator>::value &&
!std::is_same<typename std::iterator_traits<
InputIterator>::iterator_category,
std::random_access_iterator_tag>::value,
InputIterator>::type last)
: _storage(alloc_store(std::distance(first, last) * sizeof(T))),
_size(std::distance(first, last)) {
auto buf = get_buffer();
std::vector<T> tmp(first, last);
Buffer tmp_buf(tmp);
auto start = oneapi::dpl::begin(tmp_buf);
auto end = oneapi::dpl::end(tmp_buf);
auto dst = oneapi::dpl::begin(buf);
std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
start, end, dst);
}
template <typename OtherAllocator>
device_vector(const device_vector<T, OtherAllocator> &v)
: _storage(alloc_store(v.size() * sizeof(T))), _size(v.size()) {
auto buf = get_buffer();
auto dst = oneapi::dpl::begin(buf);
std::copy(oneapi::dpl::execution::make_device_policy(get_default_queue()),
v.real_begin(), v.real_begin() + v.size(), dst);
}
template <typename OtherAllocator>
device_vector(std::vector<T, OtherAllocator> &v)
: _storage(alloc_store(v.size() * sizeof(T))), _size(v.size()) {
std::copy(oneapi::dpl::execution::dpcpp_default, v.begin(), v.end(),
oneapi::dpl::begin(get_buffer()));
}
device_vector &operator=(const device_vector &other) {
// Copy assignment operator:
_size = other.size();
void *tmp = alloc_store(_size * sizeof(T));
auto tmp_buf =
detail::mem_mgr::instance()
.translate_ptr(tmp)
.buffer.template reinterpret<T, 1>(sycl::range<1>(_size));
std::copy(oneapi::dpl::execution::dpcpp_default,
oneapi::dpl::begin(other.get_buffer()),
oneapi::dpl::end(other.get_buffer()),
oneapi::dpl::begin(tmp_buf));
detail::mem_mgr::instance().mem_free(_storage);
_storage = tmp;
return *this;
}
device_vector &operator=(device_vector &&other) {
// Move assignment operator:
_size = other.size();
this->_storage = std::move(other._storage);
return *this;
}
template <typename OtherAllocator>
device_vector &operator=(const std::vector<T, OtherAllocator> &v) {
Buffer data(v.begin(), v.end());
_size = v.size();
void *tmp = alloc_store(_size * sizeof(T));
auto tmp_buf =
detail::mem_mgr::instance()
.translate_ptr(tmp)
.buffer.template reinterpret<T, 1>(sycl::range<1>(_size));
std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(data),
oneapi::dpl::end(data), oneapi::dpl::begin(tmp_buf));
detail::mem_mgr::instance().mem_free(_storage);
_storage = tmp;
return *this;
}
Buffer get_buffer() const {
return detail::mem_mgr::instance()
.translate_ptr(_storage)
.buffer.template reinterpret<T, 1>(sycl::range<1>(capacity()));
}
size_type size() const { return _size; }
iterator begin() noexcept { return device_iterator<T>(get_buffer(), 0); }
iterator end() { return device_iterator<T>(get_buffer(), _size); }
const_iterator begin() const noexcept {
return device_iterator<T>(get_buffer(), 0);
}
const_iterator cbegin() const noexcept { return begin(); }
const_iterator end() const { return device_iterator<T>(get_buffer(), _size); }
const_iterator cend() const { return end(); }
T *real_begin() {
return (detail::mem_mgr::instance()
.translate_ptr(_storage)
.buffer.template get_access<sycl::access_mode::read_write>())
.get_pointer();
}
const T *real_begin() const {
return const_cast<device_vector *>(this)
->detail::mem_mgr::instance()
.translate_ptr(_storage)
.buffer.template get_access<sycl::access_mode::read_write>()
.get_pointer();
}
void swap(device_vector &v) {
void *temp = v._storage;
v._storage = this->_storage;
this->_storage = temp;
std::swap(_size, v._size);
}
reference operator[](size_type n) { return *(begin() + n); }
const_reference operator[](size_type n) const { return *(begin() + n); }
void reserve(size_type n) {
if (n > capacity()) {
// create new buffer (allocate for new size)
void *a = alloc_store(n * sizeof(T));
// copy content (old buffer to new buffer)
if (_storage != nullptr) {
auto tmp = detail::mem_mgr::instance()
.translate_ptr(a)
.buffer.template reinterpret<T, 1>(sycl::range<1>(n));
auto src_buf = get_buffer();
std::copy(oneapi::dpl::execution::dpcpp_default,
oneapi::dpl::begin(src_buf), oneapi::dpl::end(src_buf),
oneapi::dpl::begin(tmp));
// deallocate old memory
detail::mem_mgr::instance().mem_free(_storage);
}
_storage = a;
}
}
void resize(size_type new_size, const T &x = T()) {
reserve(new_size);
if (_size < new_size) {
auto src_buf = get_buffer();
std::fill(oneapi::dpl::execution::dpcpp_default,
oneapi::dpl::begin(src_buf) + _size,
oneapi::dpl::begin(src_buf) + new_size, x);
}
_size = new_size;
}
size_type max_size(void) const {
return std::numeric_limits<size_type>::max() / sizeof(T);
}
size_type capacity() const {
return _storage != nullptr ? detail::mem_mgr::instance()
.translate_ptr(_storage)
.buffer.size() /
sizeof(T)
: 0;
}
const_reference front() const { return *begin(); }
reference front() { return *begin(); }
const_reference back(void) const { return *(end() - 1); }
reference back(void) { return *(end() - 1); }
pointer data(void) { return reinterpret_cast<pointer>(_storage); }
const_pointer data(void) const {
return reinterpret_cast<const_pointer>(_storage);
}
void shrink_to_fit(void) {
if (_size != capacity()) {
void *a = alloc_store(_size * sizeof(T));
auto tmp = detail::mem_mgr::instance()
.translate_ptr(a)
.buffer.template reinterpret<T, 1>(sycl::range<1>(_size));
std::copy(oneapi::dpl::execution::dpcpp_default,
oneapi::dpl::begin(get_buffer()),
oneapi::dpl::begin(get_buffer()) + _size,
oneapi::dpl::begin(tmp));
detail::mem_mgr::instance().mem_free(_storage);
_storage = a;
}
}
void assign(size_type n, const T &x) {
resize(n);
std::fill(oneapi::dpl::execution::dpcpp_default, begin(), begin() + n, x);
}
template <typename InputIterator>
void
assign(InputIterator first,
typename std::enable_if<internal::is_iterator<InputIterator>::value,
InputIterator>::type last) {
auto n = std::distance(first, last);
resize(n);
if (internal::is_iterator<InputIterator>::value &&
!std::is_pointer<InputIterator>::value)
std::copy(oneapi::dpl::execution::dpcpp_default, first, last, begin());
else {
Buffer tmp(first, last);
std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(tmp),
oneapi::dpl::end(tmp), begin());
}
}
void clear(void) {
_size = 0;
detail::mem_mgr::instance().mem_free(_storage);
_storage = nullptr;
}
bool empty(void) const { return (size() == 0); }
void push_back(const T &x) { insert(end(), size_type(1), x); }
void pop_back(void) {
if (_size > 0)
--_size;
}
iterator erase(iterator first, iterator last) {
auto n = std::distance(first, last);
if (last == end()) {
_size = _size - n;
return end();
}
Buffer tmp{Range(std::distance(last, end()))};
// copy remainder to temporary buffer.
std::copy(oneapi::dpl::execution::dpcpp_default, last, end(),
oneapi::dpl::begin(tmp));
// override (erase) subsequence in storage.
std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(tmp),
oneapi::dpl::end(tmp), first);
resize(_size - n);
return begin() + first.get_idx() + n;
}
iterator erase(iterator pos) { return erase(pos, pos + 1); }
iterator insert(iterator position, const T &x) {
auto n = std::distance(begin(), position);
insert(position, size_type(1), x);
return begin() + n;
}
void insert(iterator position, size_type n, const T &x) {
if (position == end()) {
resize(size() + n);
std::fill(oneapi::dpl::execution::dpcpp_default, end() - n, end(), x);
} else {
auto i_n = std::distance(begin(), position);
// allocate temporary storage
Buffer tmp{Range(std::distance(position, end()))};
// copy remainder
std::copy(oneapi::dpl::execution::dpcpp_default, position, end(),
oneapi::dpl::begin(tmp));
resize(size() + n);
// resizing might invalidate position
position = begin() + position.get_idx();
std::fill(oneapi::dpl::execution::dpcpp_default, position, position + n,
x);
std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(tmp),
oneapi::dpl::end(tmp), position + n);
}
}
template <typename InputIterator>
void
insert(iterator position, InputIterator first,
typename std::enable_if<internal::is_iterator<InputIterator>::value,
InputIterator>::type last) {
auto n = std::distance(first, last);
if (position == end()) {
resize(size() + n);
std::copy(oneapi::dpl::execution::dpcpp_default, first, last, end());
} else {
Buffer tmp{Range(std::distance(position, end()))};
std::copy(oneapi::dpl::execution::dpcpp_default, position, end(),
oneapi::dpl::begin(tmp));
resize(size() + n);
// resizing might invalidate position
position = begin() + position.get_idx();
std::copy(oneapi::dpl::execution::dpcpp_default, first, last, position);
std::copy(oneapi::dpl::execution::dpcpp_default, oneapi::dpl::begin(tmp),
oneapi::dpl::end(tmp), position + n);
}
}
};
#endif
} // end namespace dpct
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/include/dpct/dpl_extras/dpcpp_extensions.h | //==---- dpcpp_extensions.h ------------------*- C++ -*---------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------===//
#ifndef __DPCT_DPCPP_EXTENSIONS_H__
#define __DPCT_DPCPP_EXTENSIONS_H__
#include <sycl/sycl.hpp>
#include <stdexcept>
#ifdef SYCL_EXT_ONEAPI_USER_DEFINED_REDUCTIONS
#include <sycl/ext/oneapi/experimental/user_defined_reductions.hpp>
#endif
#include "../dpct.hpp"
namespace dpct {
namespace group {
namespace detail {
template <typename... _Args>
constexpr auto __reduce_over_group(_Args... __args) {
return sycl::reduce_over_group(__args...);
}
template <typename... _Args> constexpr auto __group_broadcast(_Args... __args) {
return sycl::group_broadcast(__args...);
}
template <typename... _Args>
constexpr auto __exclusive_scan_over_group(_Args... __args) {
return sycl::exclusive_scan_over_group(__args...);
}
template <typename... _Args>
constexpr auto __inclusive_scan_over_group(_Args... __args) {
return sycl::inclusive_scan_over_group(__args...);
}
} // end namespace detail
/// Perform an exclusive scan over the values of inputs from all work-items in
/// the group using the operator binary_op, which must be one of the SYCL 2020
/// group algorithms library function objects.
///
/// \param item A work-item in a group.
/// \param inputs Pointer to the input data for the scan operation.
/// \param outputs Pointer to the location where scan results will be stored.
/// \param init initial value of the scan result.
/// \param binary_op functor that implements the binary operation used to
/// perform the scan.
template <typename Item, typename T, class BinaryOperation,
int VALUES_PER_THREAD>
__dpct_inline__ void
exclusive_scan(const Item &item, T (&inputs)[VALUES_PER_THREAD],
T (&outputs)[VALUES_PER_THREAD], T init,
BinaryOperation binary_op) {
T result = inputs[0];
#pragma unroll
for (int i = 1; i < VALUES_PER_THREAD; ++i) {
result = binary_op(result, inputs[i]);
}
T exclusive_result =
detail::__exclusive_scan_over_group(item.get_group(), result, binary_op);
T input = inputs[0];
if (item.get_local_linear_id() == 0) {
outputs[0] = init;
} else {
outputs[0] = exclusive_result;
}
#pragma unroll
for (int i = 1; i < VALUES_PER_THREAD; ++i) {
T output = binary_op(input, outputs[i - 1]);
input = inputs[i];
outputs[i] = output;
}
}
/// Perform an exclusive scan over the values of input from all work-items in
/// the group using the operator binary_op, which must be one of the SYCL 2020
/// group algorithms library function objects.
///
/// \param item A work-item in a group.
/// \param input Input data for the scan operation.
/// \param init initial value of the scan result.
/// \param binary_op functor that implements the binary operation used to
/// perform the scan. \param group_aggregate group-wide aggregate of all inputs
/// in the work-items of the group. \returns exclusive scan of the first i
/// work-items where item is the i-th work item.
template <typename Item, typename T, class BinaryOperation>
__dpct_inline__ T
exclusive_scan(const Item &item, T input, T init, BinaryOperation binary_op,
T &group_aggregate) {
T output = detail::__exclusive_scan_over_group(item.get_group(), input, init,
binary_op);
if (item.get_local_linear_id() == item.get_local_range().size() - 1) {
group_aggregate = binary_op(output, input);
}
group_aggregate = detail::__group_broadcast(
item.get_group(), group_aggregate, item.get_local_range().size() - 1);
return output;
}
/// Perform an exclusive scan over the values of input from all work-items in
/// the group using the operator binary_op, which must be one of the SYCL 2020
/// group algorithms library function objects.
///
/// \param item A work-item in a group.
/// \param input Input data for the scan operation.
/// \param binary_op functor that implements the binary operation used to
/// perform the scan. \param prefix_callback_op functor invoked by the first
/// work-item in the group that returns the
/// initial value in the resulting scan of the work-items in the group.
/// \returns exclusive scan of the input elements assigned to work-items in the
/// group.
template <typename Item, typename T, class BinaryOperation,
class GroupPrefixCallbackOperation>
__dpct_inline__ T
exclusive_scan(const Item &item, T input, BinaryOperation binary_op,
GroupPrefixCallbackOperation &prefix_callback_op) {
T group_aggregate;
T output =
detail::__exclusive_scan_over_group(item.get_group(), input, binary_op);
if (item.get_local_linear_id() == item.get_local_range().size() - 1) {
group_aggregate = binary_op(output, input);
}
group_aggregate = detail::__group_broadcast(
item.get_group(), group_aggregate, item.get_local_range().size() - 1);
T group_prefix = prefix_callback_op(group_aggregate);
if (item.get_local_linear_id() == 0) {
output = group_prefix;
} else {
output = binary_op(group_prefix, output);
}
return output;
}
namespace detail {
typedef uint16_t digit_counter_type;
typedef uint32_t packed_counter_type;
template <int N, int CURRENT_VAL = N, int COUNT = 0> struct log2 {
enum { VALUE = log2<N, (CURRENT_VAL >> 1), COUNT + 1>::VALUE };
};
template <int N, int COUNT> struct log2<N, 0, COUNT> {
enum { VALUE = (1 << (COUNT - 1) < N) ? COUNT : COUNT - 1 };
};
__dpct_inline__ uint32_t bfe(uint32_t source, uint32_t bit_start,
uint32_t num_bits) {
const uint32_t MASK = (1 << num_bits) - 1;
return (source >> bit_start) & MASK;
}
template <int RADIX_BITS, bool DESCENDING = false> class radix_rank {
public:
static size_t get_local_memory_size(size_t group_threads) {
return group_threads * PADDED_COUNTER_LANES * sizeof(packed_counter_type);
}
radix_rank(uint8_t *local_memory) : _local_memory(local_memory) {}
template <typename Item, int VALUES_PER_THREAD>
__dpct_inline__ void
rank_keys(const Item &item, uint32_t (&keys)[VALUES_PER_THREAD],
int (&ranks)[VALUES_PER_THREAD], int current_bit, int num_bits) {
digit_counter_type thread_prefixes[VALUES_PER_THREAD];
digit_counter_type *digit_counters[VALUES_PER_THREAD];
digit_counter_type *buffer =
reinterpret_cast<digit_counter_type *>(_local_memory);
reset_local_memory(item);
item.barrier(sycl::access::fence_space::local_space);
#pragma unroll
for (int i = 0; i < VALUES_PER_THREAD; ++i) {
uint32_t digit = bfe(keys[i], current_bit, num_bits);
uint32_t sub_counter = digit >> LOG_COUNTER_LANES;
uint32_t counter_lane = digit & (COUNTER_LANES - 1);
if (DESCENDING) {
sub_counter = PACKING_RATIO - 1 - sub_counter;
counter_lane = COUNTER_LANES - 1 - counter_lane;
}
digit_counters[i] =
&buffer[counter_lane * item.get_local_range().size() * PACKING_RATIO +
item.get_local_linear_id() * PACKING_RATIO + sub_counter];
thread_prefixes[i] = *digit_counters[i];
*digit_counters[i] = thread_prefixes[i] + 1;
}
item.barrier(sycl::access::fence_space::local_space);
scan_counters(item);
item.barrier(sycl::access::fence_space::local_space);
for (int i = 0; i < VALUES_PER_THREAD; ++i) {
ranks[i] = thread_prefixes[i] + *digit_counters[i];
}
}
private:
template <typename Item>
__dpct_inline__ void reset_local_memory(const Item &item) {
packed_counter_type *ptr =
reinterpret_cast<packed_counter_type *>(_local_memory);
#pragma unroll
for (int i = 0; i < PADDED_COUNTER_LANES; ++i) {
ptr[i * item.get_local_range().size() + item.get_local_linear_id()] = 0;
}
}
template <typename Item>
__dpct_inline__ packed_counter_type upsweep(const Item &item) {
packed_counter_type sum = 0;
packed_counter_type *ptr =
reinterpret_cast<packed_counter_type *>(_local_memory);
#pragma unroll
for (int i = 0; i < PADDED_COUNTER_LANES; i++) {
cached_segment[i] =
ptr[item.get_local_linear_id() * PADDED_COUNTER_LANES + i];
}
#pragma unroll
for (int i = 0; i < PADDED_COUNTER_LANES; ++i) {
sum += cached_segment[i];
}
return sum;
}
template <typename Item>
__dpct_inline__ void
exclusive_downsweep(const Item &item, packed_counter_type raking_partial) {
packed_counter_type *ptr =
reinterpret_cast<packed_counter_type *>(_local_memory);
packed_counter_type sum = raking_partial;
#pragma unroll
for (int i = 0; i < PADDED_COUNTER_LANES; ++i) {
packed_counter_type value = cached_segment[i];
cached_segment[i] = sum;
sum += value;
}
#pragma unroll
for (int i = 0; i < PADDED_COUNTER_LANES; ++i) {
ptr[item.get_local_linear_id() * PADDED_COUNTER_LANES + i] =
cached_segment[i];
}
}
struct prefix_callback {
__dpct_inline__ packed_counter_type
operator()(packed_counter_type block_aggregate) {
packed_counter_type block_prefix = 0;
#pragma unroll
for (int packed = 1; packed < PACKING_RATIO; packed++) {
block_prefix += block_aggregate
<< (sizeof(digit_counter_type) * 8 * packed);
}
return block_prefix;
}
};
template <typename Item>
__dpct_inline__ void scan_counters(const Item &item) {
packed_counter_type raking_partial = upsweep(item);
prefix_callback callback;
packed_counter_type exclusive_partial = exclusive_scan(
item, raking_partial, sycl::ext::oneapi::plus<packed_counter_type>(),
callback);
exclusive_downsweep(item, exclusive_partial);
}
private:
static constexpr int PACKING_RATIO =
sizeof(packed_counter_type) / sizeof(digit_counter_type);
static constexpr int LOG_PACKING_RATIO = log2<PACKING_RATIO>::VALUE;
static constexpr int LOG_COUNTER_LANES = RADIX_BITS - LOG_PACKING_RATIO;
static constexpr int COUNTER_LANES = 1 << LOG_COUNTER_LANES;
static constexpr int PADDED_COUNTER_LANES = COUNTER_LANES + 1;
packed_counter_type cached_segment[PADDED_COUNTER_LANES];
uint8_t *_local_memory;
};
template <typename T, typename U> struct base_traits {
static __dpct_inline__ U twiddle_in(U key) {
throw std::runtime_error("Not implemented");
}
static __dpct_inline__ U twiddle_out(U key) {
throw std::runtime_error("Not implemented");
}
};
template <typename U> struct base_traits<uint32_t, U> {
static __dpct_inline__ U twiddle_in(U key) { return key; }
static __dpct_inline__ U twiddle_out(U key) { return key; }
};
template <typename U> struct base_traits<int, U> {
static constexpr U HIGH_BIT = U(1) << ((sizeof(U) * 8) - 1);
static __dpct_inline__ U twiddle_in(U key) { return key ^ HIGH_BIT; }
static __dpct_inline__ U twiddle_out(U key) { return key ^ HIGH_BIT; }
};
template <typename U> struct base_traits<float, U> {
static constexpr U HIGH_BIT = U(1) << ((sizeof(U) * 8) - 1);
static __dpct_inline__ U twiddle_in(U key) {
U mask = (key & HIGH_BIT) ? U(-1) : HIGH_BIT;
return key ^ mask;
}
static __dpct_inline__ U twiddle_out(U key) {
U mask = (key & HIGH_BIT) ? HIGH_BIT : U(-1);
return key ^ mask;
}
};
template <typename T> struct traits : base_traits<T, T> {};
template <> struct traits<uint32_t> : base_traits<uint32_t, uint32_t> {};
template <> struct traits<int> : base_traits<int, uint32_t> {};
template <> struct traits<float> : base_traits<float, uint32_t> {};
} // namespace detail
namespace detail {
template <int N> struct power_of_two {
enum { VALUE = ((N & (N - 1)) == 0) };
};
__dpct_inline__ uint32_t shr_add(uint32_t x, uint32_t shift, uint32_t addend) {
return (x >> shift) + addend;
}
} // namespace detail
/// Implements scatter to blocked exchange pattern used in radix sort algorithm.
///
/// \tparam T type of the data elements exchanges
/// \tparam VALUES_PER_THREAD number of data elements assigned to a thread
template <typename T, int VALUES_PER_THREAD> class exchange {
public:
static size_t get_local_memory_size(size_t group_threads) {
size_t padding_values =
(INSERT_PADDING)
? ((group_threads * VALUES_PER_THREAD) >> LOG_LOCAL_MEMORY_BANKS)
: 0;
return (group_threads * VALUES_PER_THREAD + padding_values) * sizeof(T);
}
exchange(uint8_t *local_memory) : _local_memory(local_memory) {}
/// Rearrange elements from rank order to blocked order
template <typename Item>
__dpct_inline__ void
scatter_to_blocked(Item item, T (&keys)[VALUES_PER_THREAD],
int (&ranks)[VALUES_PER_THREAD]) {
T *buffer = reinterpret_cast<T *>(_local_memory);
#pragma unroll
for (int i = 0; i < VALUES_PER_THREAD; i++) {
int offset = ranks[i];
if (INSERT_PADDING)
offset = detail::shr_add(offset, LOG_LOCAL_MEMORY_BANKS, offset);
buffer[offset] = keys[i];
}
item.barrier(sycl::access::fence_space::local_space);
#pragma unroll
for (int i = 0; i < VALUES_PER_THREAD; i++) {
int offset = (item.get_local_id(0) * VALUES_PER_THREAD) + i;
if (INSERT_PADDING)
offset = detail::shr_add(offset, LOG_LOCAL_MEMORY_BANKS, offset);
keys[i] = buffer[offset];
}
}
private:
static constexpr int LOG_LOCAL_MEMORY_BANKS = 5;
static constexpr bool INSERT_PADDING =
(VALUES_PER_THREAD > 4) &&
(detail::power_of_two<VALUES_PER_THREAD>::VALUE);
uint8_t *_local_memory;
};
/// Implements radix sort to sort integer data elements assigned to all threads
/// in the group.
///
/// \tparam T type of the data elements exchanges
/// \tparam VALUES_PER_THREAD number of data elements assigned to a thread
/// \tparam DECENDING boolean value indicating if data elements are sorted in
/// decending order.
template <typename T, int VALUES_PER_THREAD, bool DESCENDING = false>
class radix_sort {
public:
static size_t get_local_memory_size(size_t group_threads) {
size_t ranks_size =
detail::radix_rank<RADIX_BITS>::get_local_memory_size(group_threads);
size_t exchange_size =
exchange<T, VALUES_PER_THREAD>::get_local_memory_size(group_threads);
return sycl::max(ranks_size, exchange_size);
}
radix_sort(uint8_t *local_memory) : _local_memory(local_memory) {}
template <typename Item>
__dpct_inline__ void
sort(const Item &item, T (&keys)[VALUES_PER_THREAD], int begin_bit = 0,
int end_bit = 8 * sizeof(T)) {
uint32_t(&unsigned_keys)[VALUES_PER_THREAD] =
reinterpret_cast<uint32_t(&)[VALUES_PER_THREAD]>(keys);
#pragma unroll
for (int i = 0; i < VALUES_PER_THREAD; ++i) {
unsigned_keys[i] = detail::traits<T>::twiddle_in(unsigned_keys[i]);
}
while (true) {
int pass_bits = sycl::min(RADIX_BITS, end_bit - begin_bit);
int ranks[VALUES_PER_THREAD];
detail::radix_rank<RADIX_BITS, DESCENDING>(_local_memory)
.template rank_keys(item, unsigned_keys, ranks, begin_bit, pass_bits);
begin_bit += RADIX_BITS;
item.barrier(sycl::access::fence_space::local_space);
exchange<T, VALUES_PER_THREAD>(_local_memory)
.scatter_to_blocked(item, keys, ranks);
item.barrier(sycl::access::fence_space::local_space);
if (begin_bit >= end_bit)
break;
}
#pragma unroll
for (int i = 0; i < VALUES_PER_THREAD; ++i) {
unsigned_keys[i] = detail::traits<T>::twiddle_out(unsigned_keys[i]);
}
}
private:
static constexpr int RADIX_BITS = 4;
uint8_t *_local_memory;
};
/// Perform a reduction of the data elements assigned to all threads in the
/// group.
///
/// \param item A work-item in a group.
/// \param inputs Pointer to the input data for the reduce operation.
/// \param binary_op functor that implements the binary operation used to
/// perform the scan. \returns value of the reduction using binary_op
template <typename Item, typename T, class BinaryOperation,
int VALUES_PER_THREAD>
__dpct_inline__ T
reduce(Item item, T (&inputs)[VALUES_PER_THREAD], BinaryOperation binary_op) {
T result = inputs[0];
#pragma unroll
for (int i = 1; i < VALUES_PER_THREAD; i++) {
result = binary_op(result, inputs[i]);
}
return detail::__reduce_over_group(item.get_group(), result, binary_op);
}
/// Perform a reduction on a limited number of the work items in a subgroup
///
/// \param item A work-item in a group.
/// \param value value per work item which is to be reduced
/// \param items_to_reduce num work items at the start of the subgroup to reduce
/// \param binary_op functor that implements the binary operation used to
/// perform the scan. \returns value of the reduction using binary_op
template <typename Item, typename T, class BinaryOperation>
__dpct_inline__
typename ::std::enable_if_t<sycl::has_known_identity_v<BinaryOperation, T>, T>
reduce_over_partial_group(const Item &item, const T &value,
const ::std::uint16_t &items_to_reduce,
BinaryOperation binary_op) {
T value_temp = (item.get_local_linear_id() < items_to_reduce)
? value
: sycl::known_identity_v<BinaryOperation, T>;
return detail::__reduce_over_group(item.get_sub_group(), value_temp,
binary_op);
}
/// Perform an inclusive scan over the values of inputs from all work-items in
/// the group using the operator binary_op, which must be one of the SYCL 2020
/// group algorithms library function objects.
///
/// \param item A work-item in a group.
/// \param inputs Pointer to the input data for the scan operation.
/// \param outputs Pointer to the location where scan results will be stored.
/// \param binary_op functor that implements the binary operation used to
/// perform the scan. \returns inclusive scan of the input elements assigned to
/// work-items in the group.
template <typename Item, typename T, class BinaryOperation,
int VALUES_PER_THREAD>
__dpct_inline__ void
inclusive_scan(const Item &item, T (&inputs)[VALUES_PER_THREAD],
T (&outputs)[VALUES_PER_THREAD], BinaryOperation binary_op) {
T result = inputs[0];
#pragma unroll
for (int i = 1; i < VALUES_PER_THREAD; ++i) {
result = binary_op(result, inputs[i]);
}
T exclusive_result =
detail::__exclusive_scan_over_group(item.get_group(), result, binary_op);
if (item.get_local_linear_id() == 0) {
outputs[0] = inputs[0];
} else {
outputs[0] = binary_op(inputs[0], exclusive_result);
}
#pragma unroll
for (int i = 1; i < VALUES_PER_THREAD; ++i) {
outputs[i] = binary_op(inputs[i], outputs[i - 1]);
}
}
/// Perform an inclusive scan over the values of inputs from all work-items in
/// the group using the operator binary_op, which must be one of the SYCL 2020
/// group algorithms library function objects.
///
/// \param item A work-item in a group.
/// \param input Pointer to the input data for the scan operation.
/// \param binary_op functor that implements the binary operation used to
/// perform the scan. \param group_aggregate group-wide aggregate of all inputs
/// in the work-items of the group. \returns inclusive scan of the input
/// elements assigned to work-items in the group.
template <typename Item, typename T, class BinaryOperation>
__dpct_inline__ T inclusive_scan(const Item &item, T input,
BinaryOperation binary_op,
T &group_aggregate) {
T output =
detail::__inclusive_scan_over_group(item.get_group(), input, binary_op);
if (item.get_local_linear_id() == item.get_local_range().size() - 1) {
group_aggregate = output;
}
group_aggregate = detail::__group_broadcast(
item.get_group(), group_aggregate, item.get_local_range().size() - 1);
return output;
}
/// Perform an inclusive scan over the values of input from all work-items in
/// the group using the operator binary_op, which must be one of the SYCL 2020
/// group algorithms library function objects.
///
/// \param item A work-item in a group.
/// \param input Input data for the scan operation.
/// \param binary_op functor that implements the binary operation used to
/// perform the scan. \param prefix_callback_op functor invoked by the first
/// work-item in the group that returns the
/// initial value in the resulting scan of the work-items in the group.
/// \returns inclusive scan of the input elements assigned to work-items in the
/// group.
template <typename Item, typename T, class BinaryOperation,
class GroupPrefixCallbackOperation>
__dpct_inline__ T
inclusive_scan(const Item &item, T input, BinaryOperation binary_op,
GroupPrefixCallbackOperation &prefix_callback_op) {
T group_aggregate;
T output = inclusive_scan(item, input, binary_op, group_aggregate);
T group_prefix = prefix_callback_op(group_aggregate);
return binary_op(group_prefix, output);
}
} // namespace group
namespace device {
namespace detail {
template <typename... _Args> constexpr auto __joint_reduce(_Args... __args) {
return sycl::joint_reduce(__args...);
}
} // namespace detail
/// Perform a reduce on each of the segments specified within data stored on
/// the device.
///
/// \param queue Command queue used to access device used for reduction
/// \param inputs Pointer to the data elements on the device to be reduced
/// \param outputs Pointer to the storage where the reduced value for each
/// segment will be stored \param segment_count number of segments to be reduced
/// \param begin_offsets Pointer to the set of indices that are the first
/// element in each segment \param end_offsets Pointer to the set of indices
/// that are one past the last element in each segment \param binary_op functor
/// that implements the binary operation used to perform the scan. \param init
/// initial value of the reduction for each segment.
template <int GROUP_SIZE, typename T, typename OffsetT, class BinaryOperation>
void segmented_reduce(sycl::queue queue, T *inputs, T *outputs,
size_t segment_count, OffsetT *begin_offsets,
OffsetT *end_offsets, BinaryOperation binary_op, T init) {
sycl::range<1> global_size(segment_count * GROUP_SIZE);
sycl::range<1> local_size(GROUP_SIZE);
queue.submit([&](sycl::handler &cgh) {
cgh.parallel_for(
sycl::nd_range<1>(global_size, local_size), [=](sycl::nd_item<1> item) {
OffsetT segment_begin = begin_offsets[item.get_group_linear_id()];
OffsetT segment_end = end_offsets[item.get_group_linear_id()];
if (segment_begin == segment_end) {
if (item.get_local_linear_id() == 0) {
outputs[item.get_group_linear_id()] = init;
}
return;
}
sycl::multi_ptr<T, sycl::access::address_space::global_space>
input_ptr = inputs;
T group_aggregate = detail::__joint_reduce(
item.get_group(), input_ptr + segment_begin,
input_ptr + segment_end, init, binary_op);
if (item.get_local_linear_id() == 0) {
outputs[item.get_group_linear_id()] = group_aggregate;
}
});
});
}
#ifdef SYCL_EXT_ONEAPI_USER_DEFINED_REDUCTIONS
namespace experimental {
namespace detail {
template <typename _Tp, typename... _Ts> struct __is_any {
constexpr static bool value = std::disjunction_v<
std::is_same<std::remove_cv_t<_Tp>, std::remove_cv_t<_Ts>>...>;
};
template <typename _Tp, typename _Bp> struct __in_native_op_list {
constexpr static bool value =
__is_any<_Bp, sycl::plus<_Tp>, sycl::bit_or<_Tp>, sycl::bit_xor<_Tp>,
sycl::bit_and<_Tp>, sycl::maximum<_Tp>, sycl::minimum<_Tp>,
sycl::multiplies<_Tp>>::value;
};
template <typename _Tp, typename _Bp> struct __is_native_op {
constexpr static bool value = __in_native_op_list<_Tp, _Bp>::value ||
__in_native_op_list<void, _Bp>::value;
};
} // namespace detail
/// Perform a reduce on each of the segments specified within data stored on
/// the device. Compared with dpct::device::segmented_reduce, this experimental
/// feature support user define reductions.
///
/// \param queue Command queue used to access device used for reduction
/// \param inputs Pointer to the data elements on the device to be reduced
/// \param outputs Pointer to the storage where the reduced value for each
/// segment will be stored \param segment_count number of segments to be reduced
/// \param begin_offsets Pointer to the set of indices that are the first
/// element in each segment \param end_offsets Pointer to the set of indices
/// that are one past the last element in each segment \param binary_op functor
/// that implements the binary operation used to perform the scan. \param init
/// initial value of the reduction for each segment.
template <int GROUP_SIZE, typename T, typename OffsetT, class BinaryOperation>
void segmented_reduce(sycl::queue queue, T *inputs, T *outputs,
size_t segment_count, OffsetT *begin_offsets,
OffsetT *end_offsets, BinaryOperation binary_op, T init) {
sycl::range<1> global_size(segment_count * GROUP_SIZE);
sycl::range<1> local_size(GROUP_SIZE);
if constexpr (!detail::__is_native_op<T, BinaryOperation>::value) {
queue.submit([&](sycl::handler &cgh) {
size_t temp_memory_size = GROUP_SIZE * sizeof(T);
auto scratch = sycl::local_accessor<std::byte, 1>(temp_memory_size, cgh);
cgh.parallel_for(
sycl::nd_range<1>(global_size, local_size),
[=](sycl::nd_item<1> item) {
OffsetT segment_begin = begin_offsets[item.get_group_linear_id()];
OffsetT segment_end = end_offsets[item.get_group_linear_id()];
if (segment_begin == segment_end) {
if (item.get_local_linear_id() == 0) {
outputs[item.get_group_linear_id()] = init;
}
return;
}
// Create a handle that associates the group with an allocation it
// can use
auto handle =
sycl::ext::oneapi::experimental::group_with_scratchpad(
item.get_group(),
sycl::span(&scratch[0], temp_memory_size));
T group_aggregate = sycl::ext::oneapi::experimental::joint_reduce(
handle, inputs + segment_begin, inputs + segment_end, init,
binary_op);
if (item.get_local_linear_id() == 0) {
outputs[item.get_group_linear_id()] = group_aggregate;
}
});
});
} else {
dpct::device::segmented_reduce<GROUP_SIZE>(queue, inputs, outputs,
segment_count, begin_offsets,
end_offsets, binary_op, init);
}
}
} // namespace experimental
#endif // SYCL_EXT_ONEAPI_USER_DEFINED_REDUCTIONS
} // namespace device
} // namespace dpct
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/02_sycl_migrated/include/dpct/dpl_extras/functional.h | //==---- functional.h -----------------------------*- C++ -*----------------==//
//
// Copyright (C) Intel Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
#ifndef __DPCT_FUNCTIONAL_H__
#define __DPCT_FUNCTIONAL_H__
#include <functional>
#include <oneapi/dpl/functional>
#include <oneapi/dpl/iterator>
#if ONEDPL_USE_DPCPP_BACKEND
#include <oneapi/dpl/pstl/hetero/dpcpp/parallel_backend_sycl_utils.h>
#endif
#include <tuple>
#include <utility>
namespace dpct {
struct null_type {};
namespace internal {
template <class _ExecPolicy, class _T>
using enable_if_execution_policy =
typename std::enable_if<oneapi::dpl::execution::is_execution_policy<
typename std::decay<_ExecPolicy>::type>::value,
_T>::type;
template <typename _T> struct is_hetero_execution_policy : ::std::false_type {};
template <typename... PolicyParams>
struct is_hetero_execution_policy<
oneapi::dpl::execution::device_policy<PolicyParams...>> : ::std::true_type {
};
template <typename _T> struct is_fpga_execution_policy : ::std::false_type {};
#if _ONEDPL_FPGA_DEVICE
template <unsigned int unroll_factor, typename... PolicyParams>
struct is_hetero_execution_policy<
execution::fpga_policy<unroll_factor, PolicyParams...>> : ::std::true_type {
};
#endif
template <class _ExecPolicy, class _T>
using enable_if_hetero_execution_policy = typename std::enable_if<
is_hetero_execution_policy<typename std::decay<_ExecPolicy>::type>::value,
_T>::type;
#if _ONEDPL_CPP14_INTEGER_SEQUENCE_PRESENT
template <std::size_t... _Sp>
using index_sequence = std::index_sequence<_Sp...>;
template <std::size_t _Np>
using make_index_sequence = std::make_index_sequence<_Np>;
#else
template <std::size_t... _Sp> class index_sequence {};
template <std::size_t _Np, std::size_t... _Sp>
struct make_index_sequence_impl
: make_index_sequence_impl<_Np - 1, _Np - 1, _Sp...> {};
template <std::size_t... _Sp> struct make_index_sequence_impl<0, _Sp...> {
using type = index_sequence<_Sp...>;
};
template <std::size_t _Np>
using make_index_sequence = typename make_index_sequence_impl<_Np>::type;
#endif
// Minimal buffer implementations for temporary storage in mapping rules
// Some of our algorithms need to start with raw memory buffer,
// not an initialized array, because initialization/destruction
// would make the span be at least O(N).
#if ONEDPL_USE_DPCPP_BACKEND
template <typename _Tp> class __buffer {
sycl::buffer<_Tp, 1> __buf;
__buffer(const __buffer &) = delete;
void operator=(const __buffer &) = delete;
public:
// Try to obtain buffer of given size to store objects of _Tp type
__buffer(std::size_t __n) : __buf(sycl::range<1>(__n)) {}
// Return pointer to buffer, or NULL if buffer could not be obtained.
auto get() -> decltype(oneapi::dpl::begin(__buf)) const {
return oneapi::dpl::begin(__buf);
}
};
#else
template <typename _Tp> class __buffer {
std::unique_ptr<_Tp> _M_ptr;
__buffer(const __buffer &) = delete;
void operator=(const __buffer &) = delete;
public:
// Try to obtain buffer of given size to store objects of _Tp type
__buffer(const std::size_t __n) : _M_ptr(new _Tp[__n]) {}
// Return pointer to buffer, or NULL if buffer could not be obtained.
_Tp *get() const { return _M_ptr.get(); }
};
#endif
// Implements C++14 std::less<void> specialization to allow parameter type
// deduction.
class __less {
public:
template <typename _Xp, typename _Yp>
bool operator()(_Xp &&__x, _Yp &&__y) const {
return std::forward<_Xp>(__x) < std::forward<_Yp>(__y);
}
};
template <typename Policy, typename NewName> struct rebind_policy {
using type = Policy;
};
template <typename KernelName, typename NewName>
struct rebind_policy<oneapi::dpl::execution::device_policy<KernelName>,
NewName> {
using type = oneapi::dpl::execution::device_policy<NewName>;
};
#if _ONEDPL_FPGA_DEVICE
template <unsigned int factor, typename KernelName, typename NewName>
struct rebind_policy<oneapi::dpl::execution::fpga_policy<factor, KernelName>,
NewName> {
using type = oneapi::dpl::execution::fpga_policy<factor, NewName>;
};
#endif
template <typename T1, typename T2,
typename R1 = typename std::iterator_traits<T1>::reference,
typename R2 = typename std::iterator_traits<T2>::reference>
struct perm_fun {
typedef R2 result_of;
perm_fun(T1 input) : source(input) {}
R2 operator()(R1 x) const { return *(source + x); }
private:
T1 source;
};
// Functor compares first element (key) from tied sequence.
template <typename Compare = class internal::__less> struct compare_key_fun {
typedef bool result_of;
compare_key_fun(Compare _comp = internal::__less()) : comp(_comp) {}
template <typename _T1, typename _T2>
result_of operator()(_T1 &&a, _T2 &&b) const {
using std::get;
return comp(get<0>(a), get<0>(b));
}
private:
mutable Compare comp;
};
// Functor evaluates second element of tied sequence with predicate.
// Used by: copy_if, remove_copy_if, stable_partition_copy
// Lambda:
template <typename Predicate> struct predicate_key_fun {
typedef bool result_of;
predicate_key_fun(Predicate _pred) : pred(_pred) {}
template <typename _T1> result_of operator()(_T1 &&a) const {
using std::get;
return pred(get<1>(a));
}
private:
mutable Predicate pred;
};
// Used by: remove_if
template <typename Predicate> struct negate_predicate_key_fun {
typedef bool result_of;
negate_predicate_key_fun(Predicate _pred) : pred(_pred) {}
template <typename _T1> result_of operator()(_T1 &&a) const {
using std::get;
return !pred(get<1>(a));
}
private:
mutable Predicate pred;
};
template <typename T> struct sequence_fun {
using result_type = T;
sequence_fun(T _init, T _step) : init(_init), step(_step) {}
template <typename _T> result_type operator()(_T &&i) const {
return static_cast<T>(init + step * i);
}
private:
const T init;
const T step;
};
//[binary_pred](Ref a, Ref b){ return(binary_pred(get<0>(a),get<0>(b)));
template <typename Predicate> struct unique_fun {
typedef bool result_of;
unique_fun(Predicate _pred) : pred(_pred) {}
template <typename _T> result_of operator()(_T &&a, _T &&b) const {
using std::get;
return pred(get<0>(a), get<0>(b));
}
private:
mutable Predicate pred;
};
// Lambda: [pred, &new_value](Ref1 a, Ref2 s) {return pred(s) ? new_value : a;
// });
template <typename T, typename Predicate> struct replace_if_fun {
public:
typedef T result_of;
replace_if_fun(Predicate _pred, T _new_value)
: pred(_pred), new_value(_new_value) {}
template <typename _T1, typename _T2> T operator()(_T1 &&a, _T2 &&s) const {
return pred(s) ? new_value : a;
}
private:
mutable Predicate pred;
const T new_value;
};
//[pred,op](Ref a){return pred(a) ? op(a) : a; }
template <typename T, typename Predicate, typename Operator>
struct transform_if_fun {
transform_if_fun(Predicate _pred, Operator _op) : pred(_pred), op(_op) {}
template <typename _T>
void operator()(_T&& t) const {
using std::get;
if (pred(get<0>(t)))
get<1>(t) = op(get<0>(t));
}
private:
mutable Predicate pred;
mutable Operator op;
};
//[pred, op](Ref1 a, Ref2 s) { return pred(s) ? op(a) : a; });
template <typename T, typename Predicate, typename Operator>
struct transform_if_unary_zip_mask_fun {
transform_if_unary_zip_mask_fun(Predicate _pred, Operator _op) : pred(_pred), op(_op) {}
template <typename _T>
void operator()(_T&& t) const {
using std::get;
if (pred(get<1>(t)))
get<2>(t) = op(get<0>(t));
}
private:
mutable Predicate pred;
mutable Operator op;
};
template <typename T, typename Predicate, typename BinaryOperation>
class transform_if_zip_mask_fun {
public:
transform_if_zip_mask_fun(Predicate _pred = oneapi::dpl::identity(),
BinaryOperation _op = oneapi::dpl::identity())
: pred(_pred), op(_op) {}
template <typename _T> void operator()(_T &&t) const {
using std::get;
if (pred(get<2>(t)))
get<3>(t) = op(get<0>(t), get<1>(t));
}
private:
mutable Predicate pred;
mutable BinaryOperation op;
};
// This following code is similar to a section of code in
// oneDPL/include/oneapi/dpl/pstl/hetero/dpcpp/parallel_backend_sycl_radix_sort.h
// It has a similar approach, and could be consolidated.
// Outside of some differences in approach, there are two significant
// differences in function.
//
// 1) This code allows the output type of the bit range translation to be fit
// into to the minimal type required to provide that many bits. The code in
// oneDPL to calculate the bucket for the radix is similar but its output is
// always std::uint32_t. The assumption that the bit range desired will fit in
// 32 bits is not true for this code.
//
// 2) This code ensures that for floating point type, -0.0f and 0.0f map to the
// same value. This allows the output of this translation to be used to provide
// a sort which ensures the stability of these values for floating point types.
template <int N> struct uint_byte_map {};
template <> struct uint_byte_map<1> { using type = uint8_t; };
template <> struct uint_byte_map<2> { using type = uint16_t; };
template <> struct uint_byte_map<4> { using type = uint32_t; };
template <> struct uint_byte_map<8> { using type = uint64_t; };
template <typename T> struct uint_map {
using type = typename uint_byte_map<sizeof(T)>::type;
};
template <typename T, typename OutKeyT> class translate_key {
using uint_type_t = typename uint_map<T>::type;
public:
translate_key(int begin_bit, int end_bit) {
shift = begin_bit;
mask = ~OutKeyT(0); // all ones
mask = mask >> (sizeof(OutKeyT) * 8 -
(end_bit - begin_bit)); // setup appropriate mask
flip_sign = uint_type_t(1) << (sizeof(uint_type_t) * 8 - 1); // sign bit
flip_key = ~uint_type_t(0); // 0xF...F
}
inline OutKeyT operator()(const T &key) const {
uint_type_t intermediate;
if constexpr (std::is_floating_point<T>::value) {
// normal case (both -0.0f and 0.0f equal -0.0f)
if (key != T(-0.0f)) {
uint_type_t is_negative = reinterpret_cast<const uint_type_t &>(key) >>
(sizeof(uint_type_t) * 8 - 1);
intermediate = reinterpret_cast<const uint_type_t &>(key) ^
((is_negative * flip_key) | flip_sign);
} else // special case for -0.0f to keep stability with 0.0f
{
T negzero = T(-0.0f);
intermediate = reinterpret_cast<const uint_type_t &>(negzero);
}
} else if constexpr (std::is_signed<T>::value) {
intermediate = reinterpret_cast<const uint_type_t &>(key) ^ flip_sign;
} else {
intermediate = key;
}
return static_cast<OutKeyT>(intermediate >> shift) &
mask; // shift, cast, and mask
}
private:
uint8_t shift;
OutKeyT mask;
uint_type_t flip_sign;
uint_type_t flip_key;
};
} // end namespace internal
} // end namespace dpct
#endif
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/helper_timer.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Helper Timing Functions
#ifndef COMMON_HELPER_TIMER_H_
#define COMMON_HELPER_TIMER_H_
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
// includes, system
#include <vector>
// includes, project
#include <exception.h>
// Definition of the StopWatch Interface, this is used if we don't want to use
// the CUT functions But rather in a self contained class interface
class StopWatchInterface {
public:
StopWatchInterface() {}
virtual ~StopWatchInterface() {}
public:
//! Start time measurement
virtual void start() = 0;
//! Stop time measurement
virtual void stop() = 0;
//! Reset time counters to zero
virtual void reset() = 0;
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned, otherwise the
//! time between the last start() and stop call is returned
virtual float getTime() = 0;
//! Mean time to date based on the number of times the stopwatch has been
//! _stopped_ (ie finished sessions) and the current total time
virtual float getAverageTime() = 0;
};
//////////////////////////////////////////////////////////////////
// Begin Stopwatch timer class definitions for all OS platforms //
//////////////////////////////////////////////////////////////////
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
// includes, system
#define WINDOWS_LEAN_AND_MEAN
#include <windows.h>
#undef min
#undef max
//! Windows specific implementation of StopWatch
class StopWatchWin : public StopWatchInterface {
public:
//! Constructor, default
StopWatchWin()
: start_time(),
end_time(),
diff_time(0.0f),
total_time(0.0f),
running(false),
clock_sessions(0),
freq(0),
freq_set(false) {
if (!freq_set) {
// helper variable
LARGE_INTEGER temp;
// get the tick frequency from the OS
QueryPerformanceFrequency(reinterpret_cast<LARGE_INTEGER *>(&temp));
// convert to type in which it is needed
freq = (static_cast<double>(temp.QuadPart)) / 1000.0;
// rememeber query
freq_set = true;
}
}
// Destructor
~StopWatchWin() {}
public:
//! Start time measurement
inline void start();
//! Stop time measurement
inline void stop();
//! Reset time counters to zero
inline void reset();
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned, otherwise the
//! time between the last start() and stop call is returned
inline float getTime();
//! Mean time to date based on the number of times the stopwatch has been
//! _stopped_ (ie finished sessions) and the current total time
inline float getAverageTime();
private:
// member variables
//! Start of measurement
LARGE_INTEGER start_time;
//! End of measurement
LARGE_INTEGER end_time;
//! Time difference between the last start and stop
float diff_time;
//! TOTAL time difference between starts and stops
float total_time;
//! flag if the stop watch is running
bool running;
//! Number of times clock has been started
//! and stopped to allow averaging
int clock_sessions;
//! tick frequency
double freq;
//! flag if the frequency has been set
bool freq_set;
};
// functions, inlined
////////////////////////////////////////////////////////////////////////////////
//! Start time measurement
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchWin::start() {
QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&start_time));
running = true;
}
////////////////////////////////////////////////////////////////////////////////
//! Stop time measurement and increment add to the current diff_time summation
//! variable. Also increment the number of times this clock has been run.
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchWin::stop() {
QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&end_time));
diff_time = static_cast<float>(((static_cast<double>(end_time.QuadPart) -
static_cast<double>(start_time.QuadPart)) /
freq));
total_time += diff_time;
clock_sessions++;
running = false;
}
////////////////////////////////////////////////////////////////////////////////
//! Reset the timer to 0. Does not change the timer running state but does
//! recapture this point in time as the current start time if it is running.
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchWin::reset() {
diff_time = 0;
total_time = 0;
clock_sessions = 0;
if (running) {
QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&start_time));
}
}
////////////////////////////////////////////////////////////////////////////////
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned added to the
//! current diff_time sum, otherwise the current summed time difference alone
//! is returned.
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchWin::getTime() {
// Return the TOTAL time to date
float retval = total_time;
if (running) {
LARGE_INTEGER temp;
QueryPerformanceCounter(reinterpret_cast<LARGE_INTEGER *>(&temp));
retval += static_cast<float>(((static_cast<double>(temp.QuadPart) -
static_cast<double>(start_time.QuadPart)) /
freq));
}
return retval;
}
////////////////////////////////////////////////////////////////////////////////
//! Time in msec. for a single run based on the total number of COMPLETED runs
//! and the total time.
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchWin::getAverageTime() {
return (clock_sessions > 0) ? (total_time / clock_sessions) : 0.0f;
}
#else
// Declarations for Stopwatch on Linux and Mac OSX
// includes, system
#include <sys/time.h>
#include <ctime>
//! Windows specific implementation of StopWatch
class StopWatchLinux : public StopWatchInterface {
public:
//! Constructor, default
StopWatchLinux()
: start_time(),
diff_time(0.0),
total_time(0.0),
running(false),
clock_sessions(0) {}
// Destructor
virtual ~StopWatchLinux() {}
public:
//! Start time measurement
inline void start();
//! Stop time measurement
inline void stop();
//! Reset time counters to zero
inline void reset();
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned, otherwise the
//! time between the last start() and stop call is returned
inline float getTime();
//! Mean time to date based on the number of times the stopwatch has been
//! _stopped_ (ie finished sessions) and the current total time
inline float getAverageTime();
private:
// helper functions
//! Get difference between start time and current time
inline float getDiffTime();
private:
// member variables
//! Start of measurement
struct timeval start_time;
//! Time difference between the last start and stop
float diff_time;
//! TOTAL time difference between starts and stops
float total_time;
//! flag if the stop watch is running
bool running;
//! Number of times clock has been started
//! and stopped to allow averaging
int clock_sessions;
};
// functions, inlined
////////////////////////////////////////////////////////////////////////////////
//! Start time measurement
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchLinux::start() {
gettimeofday(&start_time, 0);
running = true;
}
////////////////////////////////////////////////////////////////////////////////
//! Stop time measurement and increment add to the current diff_time summation
//! variable. Also increment the number of times this clock has been run.
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchLinux::stop() {
diff_time = getDiffTime();
total_time += diff_time;
running = false;
clock_sessions++;
}
////////////////////////////////////////////////////////////////////////////////
//! Reset the timer to 0. Does not change the timer running state but does
//! recapture this point in time as the current start time if it is running.
////////////////////////////////////////////////////////////////////////////////
inline void StopWatchLinux::reset() {
diff_time = 0;
total_time = 0;
clock_sessions = 0;
if (running) {
gettimeofday(&start_time, 0);
}
}
////////////////////////////////////////////////////////////////////////////////
//! Time in msec. after start. If the stop watch is still running (i.e. there
//! was no call to stop()) then the elapsed time is returned added to the
//! current diff_time sum, otherwise the current summed time difference alone
//! is returned.
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchLinux::getTime() {
// Return the TOTAL time to date
float retval = total_time;
if (running) {
retval += getDiffTime();
}
return retval;
}
////////////////////////////////////////////////////////////////////////////////
//! Time in msec. for a single run based on the total number of COMPLETED runs
//! and the total time.
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchLinux::getAverageTime() {
return (clock_sessions > 0) ? (total_time / clock_sessions) : 0.0f;
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
inline float StopWatchLinux::getDiffTime() {
struct timeval t_time;
gettimeofday(&t_time, 0);
// time difference in milli-seconds
return static_cast<float>(1000.0 * (t_time.tv_sec - start_time.tv_sec) +
(0.001 * (t_time.tv_usec - start_time.tv_usec)));
}
#endif // WIN32
////////////////////////////////////////////////////////////////////////////////
//! Timer functionality exported
////////////////////////////////////////////////////////////////////////////////
//! Create a new timer
//! @return true if a time has been created, otherwise false
//! @param name of the new timer, 0 if the creation failed
////////////////////////////////////////////////////////////////////////////////
inline bool sdkCreateTimer(StopWatchInterface **timer_interface) {
// printf("sdkCreateTimer called object %08x\n", (void *)*timer_interface);
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
*timer_interface = reinterpret_cast<StopWatchInterface *>(new StopWatchWin());
#else
*timer_interface =
reinterpret_cast<StopWatchInterface *>(new StopWatchLinux());
#endif
return (*timer_interface != NULL) ? true : false;
}
////////////////////////////////////////////////////////////////////////////////
//! Delete a timer
//! @return true if a time has been deleted, otherwise false
//! @param name of the timer to delete
////////////////////////////////////////////////////////////////////////////////
inline bool sdkDeleteTimer(StopWatchInterface **timer_interface) {
// printf("sdkDeleteTimer called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
delete *timer_interface;
*timer_interface = NULL;
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Start the time with name \a name
//! @param name name of the timer to start
////////////////////////////////////////////////////////////////////////////////
inline bool sdkStartTimer(StopWatchInterface **timer_interface) {
// printf("sdkStartTimer called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
(*timer_interface)->start();
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Stop the time with name \a name. Does not reset.
//! @param name name of the timer to stop
////////////////////////////////////////////////////////////////////////////////
inline bool sdkStopTimer(StopWatchInterface **timer_interface) {
// printf("sdkStopTimer called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
(*timer_interface)->stop();
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Resets the timer's counter.
//! @param name name of the timer to reset.
////////////////////////////////////////////////////////////////////////////////
inline bool sdkResetTimer(StopWatchInterface **timer_interface) {
// printf("sdkResetTimer called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
(*timer_interface)->reset();
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
//! Return the average time for timer execution as the total time
//! for the timer dividied by the number of completed (stopped) runs the timer
//! has made.
//! Excludes the current running time if the timer is currently running.
//! @param name name of the timer to return the time of
////////////////////////////////////////////////////////////////////////////////
inline float sdkGetAverageTimerValue(StopWatchInterface **timer_interface) {
// printf("sdkGetAverageTimerValue called object %08x\n", (void
// *)*timer_interface);
if (*timer_interface) {
return (*timer_interface)->getAverageTime();
} else {
return 0.0f;
}
}
////////////////////////////////////////////////////////////////////////////////
//! Total execution time for the timer over all runs since the last reset
//! or timer creation.
//! @param name name of the timer to obtain the value of.
////////////////////////////////////////////////////////////////////////////////
inline float sdkGetTimerValue(StopWatchInterface **timer_interface) {
// printf("sdkGetTimerValue called object %08x\n", (void *)*timer_interface);
if (*timer_interface) {
return (*timer_interface)->getTime();
} else {
return 0.0f;
}
}
#endif // COMMON_HELPER_TIMER_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/helper_string.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// These are helper functions for the SDK samples (string parsing, timers, etc)
#ifndef COMMON_HELPER_STRING_H_
#define COMMON_HELPER_STRING_H_
#include <stdio.h>
#include <stdlib.h>
#include <fstream>
#include <string>
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
#ifndef _CRT_SECURE_NO_DEPRECATE
#define _CRT_SECURE_NO_DEPRECATE
#endif
#ifndef STRCASECMP
#define STRCASECMP _stricmp
#endif
#ifndef STRNCASECMP
#define STRNCASECMP _strnicmp
#endif
#ifndef STRCPY
#define STRCPY(sFilePath, nLength, sPath) strcpy_s(sFilePath, nLength, sPath)
#endif
#ifndef FOPEN
#define FOPEN(fHandle, filename, mode) fopen_s(&fHandle, filename, mode)
#endif
#ifndef FOPEN_FAIL
#define FOPEN_FAIL(result) (result != 0)
#endif
#ifndef SSCANF
#define SSCANF sscanf_s
#endif
#ifndef SPRINTF
#define SPRINTF sprintf_s
#endif
#else // Linux Includes
#include <string.h>
#include <strings.h>
#ifndef STRCASECMP
#define STRCASECMP strcasecmp
#endif
#ifndef STRNCASECMP
#define STRNCASECMP strncasecmp
#endif
#ifndef STRCPY
#define STRCPY(sFilePath, nLength, sPath) strcpy(sFilePath, sPath)
#endif
#ifndef FOPEN
#define FOPEN(fHandle, filename, mode) (fHandle = fopen(filename, mode))
#endif
#ifndef FOPEN_FAIL
#define FOPEN_FAIL(result) (result == NULL)
#endif
#ifndef SSCANF
#define SSCANF sscanf
#endif
#ifndef SPRINTF
#define SPRINTF sprintf
#endif
#endif
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
// CUDA Utility Helper Functions
inline int stringRemoveDelimiter(char delimiter, const char *string) {
int string_start = 0;
while (string[string_start] == delimiter) {
string_start++;
}
if (string_start >= static_cast<int>(strlen(string) - 1)) {
return 0;
}
return string_start;
}
inline int getFileExtension(char *filename, char **extension) {
int string_length = static_cast<int>(strlen(filename));
while (filename[string_length--] != '.') {
if (string_length == 0) break;
}
if (string_length > 0) string_length += 2;
if (string_length == 0)
*extension = NULL;
else
*extension = &filename[string_length];
return string_length;
}
inline bool checkCmdLineFlag(const int argc, const char **argv,
const char *string_ref) {
bool bFound = false;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
const char *equal_pos = strchr(string_argv, '=');
int argv_length = static_cast<int>(
equal_pos == 0 ? strlen(string_argv) : equal_pos - string_argv);
int length = static_cast<int>(strlen(string_ref));
if (length == argv_length &&
!STRNCASECMP(string_argv, string_ref, length)) {
bFound = true;
continue;
}
}
}
return bFound;
}
// This function wraps the CUDA Driver API into a template function
template <class T>
inline bool getCmdLineArgumentValue(const int argc, const char **argv,
const char *string_ref, T *value) {
bool bFound = false;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
int length = static_cast<int>(strlen(string_ref));
if (!STRNCASECMP(string_argv, string_ref, length)) {
if (length + 1 <= static_cast<int>(strlen(string_argv))) {
int auto_inc = (string_argv[length] == '=') ? 1 : 0;
*value = (T)atoi(&string_argv[length + auto_inc]);
}
bFound = true;
i = argc;
}
}
}
return bFound;
}
inline int getCmdLineArgumentInt(const int argc, const char **argv,
const char *string_ref) {
bool bFound = false;
int value = -1;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
int length = static_cast<int>(strlen(string_ref));
if (!STRNCASECMP(string_argv, string_ref, length)) {
if (length + 1 <= static_cast<int>(strlen(string_argv))) {
int auto_inc = (string_argv[length] == '=') ? 1 : 0;
value = atoi(&string_argv[length + auto_inc]);
} else {
value = 0;
}
bFound = true;
continue;
}
}
}
if (bFound) {
return value;
} else {
return 0;
}
}
inline float getCmdLineArgumentFloat(const int argc, const char **argv,
const char *string_ref) {
bool bFound = false;
float value = -1;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
const char *string_argv = &argv[i][string_start];
int length = static_cast<int>(strlen(string_ref));
if (!STRNCASECMP(string_argv, string_ref, length)) {
if (length + 1 <= static_cast<int>(strlen(string_argv))) {
int auto_inc = (string_argv[length] == '=') ? 1 : 0;
value = static_cast<float>(atof(&string_argv[length + auto_inc]));
} else {
value = 0.f;
}
bFound = true;
continue;
}
}
}
if (bFound) {
return value;
} else {
return 0;
}
}
inline bool getCmdLineArgumentString(const int argc, const char **argv,
const char *string_ref,
char **string_retval) {
bool bFound = false;
if (argc >= 1) {
for (int i = 1; i < argc; i++) {
int string_start = stringRemoveDelimiter('-', argv[i]);
char *string_argv = const_cast<char *>(&argv[i][string_start]);
int length = static_cast<int>(strlen(string_ref));
if (!STRNCASECMP(string_argv, string_ref, length)) {
*string_retval = &string_argv[length + 1];
bFound = true;
continue;
}
}
}
if (!bFound) {
*string_retval = NULL;
}
return bFound;
}
//////////////////////////////////////////////////////////////////////////////
//! Find the path for a file assuming that
//! files are found in the searchPath.
//!
//! @return the path if succeeded, otherwise 0
//! @param filename name of the file
//! @param executable_path optional absolute path of the executable
//////////////////////////////////////////////////////////////////////////////
inline char *sdkFindFilePath(const char *filename,
const char *executable_path) {
// <executable_name> defines a variable that is replaced with the name of the
// executable
// Typical relative search paths to locate needed companion files (e.g. sample
// input data, or JIT source files) The origin for the relative search may be
// the .exe file, a .bat file launching an .exe, a browser .exe launching the
// .exe or .bat, etc
const char *searchPath[] = {
"./", // same dir
"./data/", // same dir
"../../../../Samples/<executable_name>/", // up 4 in tree
"../../../Samples/<executable_name>/", // up 3 in tree
"../../Samples/<executable_name>/", // up 2 in tree
"../../../../Samples/<executable_name>/data/", // up 4 in tree
"../../../Samples/<executable_name>/data/", // up 3 in tree
"../../Samples/<executable_name>/data/", // up 2 in tree
"../../../../Samples/0_Introduction/<executable_name>/", // up 4 in tree
"../../../Samples/0_Introduction/<executable_name>/", // up 3 in tree
"../../Samples/0_Introduction/<executable_name>/", // up 2 in tree
"../../../../Samples/1_Utilities/<executable_name>/", // up 4 in tree
"../../../Samples/1_Utilities/<executable_name>/", // up 3 in tree
"../../Samples/1_Utilities/<executable_name>/", // up 2 in tree
"../../../../Samples/2_Concepts_and_Techniques/<executable_name>/", // up 4 in tree
"../../../Samples/2_Concepts_and_Techniques/<executable_name>/", // up 3 in tree
"../../Samples/2_Concepts_and_Techniques/<executable_name>/", // up 2 in tree
"../../../../Samples/3_CUDA_Features/<executable_name>/", // up 4 in tree
"../../../Samples/3_CUDA_Features/<executable_name>/", // up 3 in tree
"../../Samples/3_CUDA_Features/<executable_name>/", // up 2 in tree
"../../../../Samples/4_CUDA_Libraries/<executable_name>/", // up 4 in tree
"../../../Samples/4_CUDA_Libraries/<executable_name>/", // up 3 in tree
"../../Samples/4_CUDA_Libraries/<executable_name>/", // up 2 in tree
"../../../../Samples/5_Domain_Specific/<executable_name>/", // up 4 in tree
"../../../Samples/5_Domain_Specific/<executable_name>/", // up 3 in tree
"../../Samples/5_Domain_Specific/<executable_name>/", // up 2 in tree
"../../../../Samples/6_Performance/<executable_name>/", // up 4 in tree
"../../../Samples/6_Performance/<executable_name>/", // up 3 in tree
"../../Samples/6_Performance/<executable_name>/", // up 2 in tree
"../../../../Samples/0_Introduction/<executable_name>/data/", // up 4 in tree
"../../../Samples/0_Introduction/<executable_name>/data/", // up 3 in tree
"../../Samples/0_Introduction/<executable_name>/data/", // up 2 in tree
"../../../../Samples/1_Utilities/<executable_name>/data/", // up 4 in tree
"../../../Samples/1_Utilities/<executable_name>/data/", // up 3 in tree
"../../Samples/1_Utilities/<executable_name>/data/", // up 2 in tree
"../../../../Samples/2_Concepts_and_Techniques/<executable_name>/data/", // up 4 in tree
"../../../Samples/2_Concepts_and_Techniques/<executable_name>/data/", // up 3 in tree
"../../Samples/2_Concepts_and_Techniques/<executable_name>/data/", // up 2 in tree
"../../../../Samples/3_CUDA_Features/<executable_name>/data/", // up 4 in tree
"../../../Samples/3_CUDA_Features/<executable_name>/data/", // up 3 in tree
"../../Samples/3_CUDA_Features/<executable_name>/data/", // up 2 in tree
"../../../../Samples/4_CUDA_Libraries/<executable_name>/data/", // up 4 in tree
"../../../Samples/4_CUDA_Libraries/<executable_name>/data/", // up 3 in tree
"../../Samples/4_CUDA_Libraries/<executable_name>/data/", // up 2 in tree
"../../../../Samples/5_Domain_Specific/<executable_name>/data/", // up 4 in tree
"../../../Samples/5_Domain_Specific/<executable_name>/data/", // up 3 in tree
"../../Samples/5_Domain_Specific/<executable_name>/data/", // up 2 in tree
"../../../../Samples/6_Performance/<executable_name>/data/", // up 4 in tree
"../../../Samples/6_Performance/<executable_name>/data/", // up 3 in tree
"../../Samples/6_Performance/<executable_name>/data/", // up 2 in tree
"../../../../Common/data/", // up 4 in tree
"../../../Common/data/", // up 3 in tree
"../../Common/data/" // up 2 in tree
};
// Extract the executable name
std::string executable_name;
if (executable_path != 0) {
executable_name = std::string(executable_path);
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
// Windows path delimiter
size_t delimiter_pos = executable_name.find_last_of('\\');
executable_name.erase(0, delimiter_pos + 1);
if (executable_name.rfind(".exe") != std::string::npos) {
// we strip .exe, only if the .exe is found
executable_name.resize(executable_name.size() - 4);
}
#else
// Linux & OSX path delimiter
size_t delimiter_pos = executable_name.find_last_of('/');
executable_name.erase(0, delimiter_pos + 1);
#endif
}
// Loop over all search paths and return the first hit
for (unsigned int i = 0; i < sizeof(searchPath) / sizeof(char *); ++i) {
std::string path(searchPath[i]);
size_t executable_name_pos = path.find("<executable_name>");
// If there is executable_name variable in the searchPath
// replace it with the value
if (executable_name_pos != std::string::npos) {
if (executable_path != 0) {
path.replace(executable_name_pos, strlen("<executable_name>"),
executable_name);
} else {
// Skip this path entry if no executable argument is given
continue;
}
}
#ifdef _DEBUG
printf("sdkFindFilePath <%s> in %s\n", filename, path.c_str());
#endif
// Test if the file exists
path.append(filename);
FILE *fp;
FOPEN(fp, path.c_str(), "rb");
if (fp != NULL) {
fclose(fp);
// File found
// returning an allocated array here for backwards compatibility reasons
char *file_path = reinterpret_cast<char *>(malloc(path.length() + 1));
STRCPY(file_path, path.length() + 1, path.c_str());
return file_path;
}
if (fp) {
fclose(fp);
}
}
// File not found
printf("\nerror: sdkFindFilePath: file <%s> not found!\n", filename);
return 0;
}
#endif // COMMON_HELPER_STRING_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/exception.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* CUda UTility Library */
#ifndef COMMON_EXCEPTION_H_
#define COMMON_EXCEPTION_H_
// includes, system
#include <stdlib.h>
#include <exception>
#include <iostream>
#include <stdexcept>
#include <string>
//! Exception wrapper.
//! @param Std_Exception Exception out of namespace std for easy typing.
template <class Std_Exception>
class Exception : public Std_Exception {
public:
//! @brief Static construction interface
//! @return Alwayss throws ( Located_Exception<Exception>)
//! @param file file in which the Exception occurs
//! @param line line in which the Exception occurs
//! @param detailed details on the code fragment causing the Exception
static void throw_it(const char *file, const int line,
const char *detailed = "-");
//! Static construction interface
//! @return Alwayss throws ( Located_Exception<Exception>)
//! @param file file in which the Exception occurs
//! @param line line in which the Exception occurs
//! @param detailed details on the code fragment causing the Exception
static void throw_it(const char *file, const int line,
const std::string &detailed);
//! Destructor
virtual ~Exception() throw();
private:
//! Constructor, default (private)
Exception();
//! Constructor, standard
//! @param str string returned by what()
explicit Exception(const std::string &str);
};
////////////////////////////////////////////////////////////////////////////////
//! Exception handler function for arbitrary exceptions
//! @param ex exception to handle
////////////////////////////////////////////////////////////////////////////////
template <class Exception_Typ>
inline void handleException(const Exception_Typ &ex) {
std::cerr << ex.what() << std::endl;
exit(EXIT_FAILURE);
}
//! Convenience macros
//! Exception caused by dynamic program behavior, e.g. file does not exist
#define RUNTIME_EXCEPTION(msg) \
Exception<std::runtime_error>::throw_it(__FILE__, __LINE__, msg)
//! Logic exception in program, e.g. an assert failed
#define LOGIC_EXCEPTION(msg) \
Exception<std::logic_error>::throw_it(__FILE__, __LINE__, msg)
//! Out of range exception
#define RANGE_EXCEPTION(msg) \
Exception<std::range_error>::throw_it(__FILE__, __LINE__, msg)
////////////////////////////////////////////////////////////////////////////////
//! Implementation
// includes, system
#include <sstream>
////////////////////////////////////////////////////////////////////////////////
//! Static construction interface.
//! @param Exception causing code fragment (file and line) and detailed infos.
////////////////////////////////////////////////////////////////////////////////
/*static*/ template <class Std_Exception>
void Exception<Std_Exception>::throw_it(const char *file, const int line,
const char *detailed) {
std::stringstream s;
// Quiet heavy-weight but exceptions are not for
// performance / release versions
s << "Exception in file '" << file << "' in line " << line << "\n"
<< "Detailed description: " << detailed << "\n";
throw Exception(s.str());
}
////////////////////////////////////////////////////////////////////////////////
//! Static construction interface.
//! @param Exception causing code fragment (file and line) and detailed infos.
////////////////////////////////////////////////////////////////////////////////
/*static*/ template <class Std_Exception>
void Exception<Std_Exception>::throw_it(const char *file, const int line,
const std::string &msg) {
throw_it(file, line, msg.c_str());
}
////////////////////////////////////////////////////////////////////////////////
//! Constructor, default (private).
////////////////////////////////////////////////////////////////////////////////
template <class Std_Exception>
Exception<Std_Exception>::Exception() : Std_Exception("Unknown Exception.\n") {}
////////////////////////////////////////////////////////////////////////////////
//! Constructor, standard (private).
//! String returned by what().
////////////////////////////////////////////////////////////////////////////////
template <class Std_Exception>
Exception<Std_Exception>::Exception(const std::string &s) : Std_Exception(s) {}
////////////////////////////////////////////////////////////////////////////////
//! Destructor
////////////////////////////////////////////////////////////////////////////////
template <class Std_Exception>
Exception<Std_Exception>::~Exception() throw() {}
// functions, exported
#endif // COMMON_EXCEPTION_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/helper_cuda.h | /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions for initialization and error checking
#ifndef COMMON_HELPER_CUDA_H_
#define COMMON_HELPER_CUDA_H_
#pragma once
#include <sycl/sycl.hpp>
#include <dpct/dpct.hpp>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <helper_string.h>
#ifndef EXIT_WAIVED
#define EXIT_WAIVED 2
#endif
// Note, it is required that your SDK sample to include the proper header
// files, please refer the CUDA examples for examples of the needed CUDA
// headers, which may change depending on which CUDA functions are used.
// CUDA Runtime error messages
#ifdef __DPCT_HPP__
static const char *_cudaGetErrorEnum(dpct::err0 error) {
/*
DPCT1009:16: SYCL uses exceptions to report errors and does not use the error
codes. The original code was commented out and a warning string was inserted.
You need to rewrite this code.
*/
return "cudaGetErrorName is not supported" /*cudaGetErrorName(error)*/;
}
#endif
#ifdef CUDA_DRIVER_API
// CUDA Driver API errors
static const char *_cudaGetErrorEnum(CUresult error) {
static char unknown[] = "<unknown>";
const char *ret = NULL;
cuGetErrorName(error, &ret);
return ret ? ret : unknown;
}
#endif
#ifdef CUBLAS_API_H_
// cuBLAS API errors
static const char *_cudaGetErrorEnum(cublasStatus_t error) {
switch (error) {
case CUBLAS_STATUS_SUCCESS:
return "CUBLAS_STATUS_SUCCESS";
case CUBLAS_STATUS_NOT_INITIALIZED:
return "CUBLAS_STATUS_NOT_INITIALIZED";
case CUBLAS_STATUS_ALLOC_FAILED:
return "CUBLAS_STATUS_ALLOC_FAILED";
case CUBLAS_STATUS_INVALID_VALUE:
return "CUBLAS_STATUS_INVALID_VALUE";
case CUBLAS_STATUS_ARCH_MISMATCH:
return "CUBLAS_STATUS_ARCH_MISMATCH";
case CUBLAS_STATUS_MAPPING_ERROR:
return "CUBLAS_STATUS_MAPPING_ERROR";
case CUBLAS_STATUS_EXECUTION_FAILED:
return "CUBLAS_STATUS_EXECUTION_FAILED";
case CUBLAS_STATUS_INTERNAL_ERROR:
return "CUBLAS_STATUS_INTERNAL_ERROR";
case CUBLAS_STATUS_NOT_SUPPORTED:
return "CUBLAS_STATUS_NOT_SUPPORTED";
case CUBLAS_STATUS_LICENSE_ERROR:
return "CUBLAS_STATUS_LICENSE_ERROR";
}
return "<unknown>";
}
#endif
#ifdef _CUFFT_H_
// cuFFT API errors
static const char *_cudaGetErrorEnum(cufftResult error) {
switch (error) {
case CUFFT_SUCCESS:
return "CUFFT_SUCCESS";
case CUFFT_INVALID_PLAN:
return "CUFFT_INVALID_PLAN";
case CUFFT_ALLOC_FAILED:
return "CUFFT_ALLOC_FAILED";
case CUFFT_INVALID_TYPE:
return "CUFFT_INVALID_TYPE";
case CUFFT_INVALID_VALUE:
return "CUFFT_INVALID_VALUE";
case CUFFT_INTERNAL_ERROR:
return "CUFFT_INTERNAL_ERROR";
case CUFFT_EXEC_FAILED:
return "CUFFT_EXEC_FAILED";
case CUFFT_SETUP_FAILED:
return "CUFFT_SETUP_FAILED";
case CUFFT_INVALID_SIZE:
return "CUFFT_INVALID_SIZE";
case CUFFT_UNALIGNED_DATA:
return "CUFFT_UNALIGNED_DATA";
case CUFFT_INCOMPLETE_PARAMETER_LIST:
return "CUFFT_INCOMPLETE_PARAMETER_LIST";
case CUFFT_INVALID_DEVICE:
return "CUFFT_INVALID_DEVICE";
case CUFFT_PARSE_ERROR:
return "CUFFT_PARSE_ERROR";
case CUFFT_NO_WORKSPACE:
return "CUFFT_NO_WORKSPACE";
case CUFFT_NOT_IMPLEMENTED:
return "CUFFT_NOT_IMPLEMENTED";
case CUFFT_LICENSE_ERROR:
return "CUFFT_LICENSE_ERROR";
case CUFFT_NOT_SUPPORTED:
return "CUFFT_NOT_SUPPORTED";
}
return "<unknown>";
}
#endif
#ifdef CUSPARSEAPI
// cuSPARSE API errors
static const char *_cudaGetErrorEnum(cusparseStatus_t error) {
switch (error) {
case CUSPARSE_STATUS_SUCCESS:
return "CUSPARSE_STATUS_SUCCESS";
case CUSPARSE_STATUS_NOT_INITIALIZED:
return "CUSPARSE_STATUS_NOT_INITIALIZED";
case CUSPARSE_STATUS_ALLOC_FAILED:
return "CUSPARSE_STATUS_ALLOC_FAILED";
case CUSPARSE_STATUS_INVALID_VALUE:
return "CUSPARSE_STATUS_INVALID_VALUE";
case CUSPARSE_STATUS_ARCH_MISMATCH:
return "CUSPARSE_STATUS_ARCH_MISMATCH";
case CUSPARSE_STATUS_MAPPING_ERROR:
return "CUSPARSE_STATUS_MAPPING_ERROR";
case CUSPARSE_STATUS_EXECUTION_FAILED:
return "CUSPARSE_STATUS_EXECUTION_FAILED";
case CUSPARSE_STATUS_INTERNAL_ERROR:
return "CUSPARSE_STATUS_INTERNAL_ERROR";
case CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
}
return "<unknown>";
}
#endif
#ifdef CUSOLVER_COMMON_H_
// cuSOLVER API errors
static const char *_cudaGetErrorEnum(cusolverStatus_t error) {
switch (error) {
case CUSOLVER_STATUS_SUCCESS:
return "CUSOLVER_STATUS_SUCCESS";
case CUSOLVER_STATUS_NOT_INITIALIZED:
return "CUSOLVER_STATUS_NOT_INITIALIZED";
case CUSOLVER_STATUS_ALLOC_FAILED:
return "CUSOLVER_STATUS_ALLOC_FAILED";
case CUSOLVER_STATUS_INVALID_VALUE:
return "CUSOLVER_STATUS_INVALID_VALUE";
case CUSOLVER_STATUS_ARCH_MISMATCH:
return "CUSOLVER_STATUS_ARCH_MISMATCH";
case CUSOLVER_STATUS_MAPPING_ERROR:
return "CUSOLVER_STATUS_MAPPING_ERROR";
case CUSOLVER_STATUS_EXECUTION_FAILED:
return "CUSOLVER_STATUS_EXECUTION_FAILED";
case CUSOLVER_STATUS_INTERNAL_ERROR:
return "CUSOLVER_STATUS_INTERNAL_ERROR";
case CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
case CUSOLVER_STATUS_NOT_SUPPORTED:
return "CUSOLVER_STATUS_NOT_SUPPORTED ";
case CUSOLVER_STATUS_ZERO_PIVOT:
return "CUSOLVER_STATUS_ZERO_PIVOT";
case CUSOLVER_STATUS_INVALID_LICENSE:
return "CUSOLVER_STATUS_INVALID_LICENSE";
}
return "<unknown>";
}
#endif
#ifdef CURAND_H_
// cuRAND API errors
static const char *_cudaGetErrorEnum(int error) {
switch (error) {
case 0:
return "CURAND_STATUS_SUCCESS";
case 100:
return "CURAND_STATUS_VERSION_MISMATCH";
case 101:
return "CURAND_STATUS_NOT_INITIALIZED";
case 102:
return "CURAND_STATUS_ALLOCATION_FAILED";
case 103:
return "CURAND_STATUS_TYPE_ERROR";
case 104:
return "CURAND_STATUS_OUT_OF_RANGE";
case 105:
return "CURAND_STATUS_LENGTH_NOT_MULTIPLE";
case 106:
return "CURAND_STATUS_DOUBLE_PRECISION_REQUIRED";
case 201:
return "CURAND_STATUS_LAUNCH_FAILURE";
case 202:
return "CURAND_STATUS_PREEXISTING_FAILURE";
case 203:
return "CURAND_STATUS_INITIALIZATION_FAILED";
case 204:
return "CURAND_STATUS_ARCH_MISMATCH";
case 999:
return "CURAND_STATUS_INTERNAL_ERROR";
}
return "<unknown>";
}
#endif
#ifdef NVJPEGAPI
// nvJPEG API errors
static const char *_cudaGetErrorEnum(nvjpegStatus_t error) {
switch (error) {
case NVJPEG_STATUS_SUCCESS:
return "NVJPEG_STATUS_SUCCESS";
case NVJPEG_STATUS_NOT_INITIALIZED:
return "NVJPEG_STATUS_NOT_INITIALIZED";
case NVJPEG_STATUS_INVALID_PARAMETER:
return "NVJPEG_STATUS_INVALID_PARAMETER";
case NVJPEG_STATUS_BAD_JPEG:
return "NVJPEG_STATUS_BAD_JPEG";
case NVJPEG_STATUS_JPEG_NOT_SUPPORTED:
return "NVJPEG_STATUS_JPEG_NOT_SUPPORTED";
case NVJPEG_STATUS_ALLOCATOR_FAILURE:
return "NVJPEG_STATUS_ALLOCATOR_FAILURE";
case NVJPEG_STATUS_EXECUTION_FAILED:
return "NVJPEG_STATUS_EXECUTION_FAILED";
case NVJPEG_STATUS_ARCH_MISMATCH:
return "NVJPEG_STATUS_ARCH_MISMATCH";
case NVJPEG_STATUS_INTERNAL_ERROR:
return "NVJPEG_STATUS_INTERNAL_ERROR";
}
return "<unknown>";
}
#endif
#ifdef NV_NPPIDEFS_H
// NPP API errors
static const char *_cudaGetErrorEnum(NppStatus error) {
switch (error) {
case NPP_NOT_SUPPORTED_MODE_ERROR:
return "NPP_NOT_SUPPORTED_MODE_ERROR";
case NPP_ROUND_MODE_NOT_SUPPORTED_ERROR:
return "NPP_ROUND_MODE_NOT_SUPPORTED_ERROR";
case NPP_RESIZE_NO_OPERATION_ERROR:
return "NPP_RESIZE_NO_OPERATION_ERROR";
case NPP_NOT_SUFFICIENT_COMPUTE_CAPABILITY:
return "NPP_NOT_SUFFICIENT_COMPUTE_CAPABILITY";
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) <= 0x5000
case NPP_BAD_ARG_ERROR:
return "NPP_BAD_ARGUMENT_ERROR";
case NPP_COEFF_ERROR:
return "NPP_COEFFICIENT_ERROR";
case NPP_RECT_ERROR:
return "NPP_RECTANGLE_ERROR";
case NPP_QUAD_ERROR:
return "NPP_QUADRANGLE_ERROR";
case NPP_MEM_ALLOC_ERR:
return "NPP_MEMORY_ALLOCATION_ERROR";
case NPP_HISTO_NUMBER_OF_LEVELS_ERROR:
return "NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR";
case NPP_INVALID_INPUT:
return "NPP_INVALID_INPUT";
case NPP_POINTER_ERROR:
return "NPP_POINTER_ERROR";
case NPP_WARNING:
return "NPP_WARNING";
case NPP_ODD_ROI_WARNING:
return "NPP_ODD_ROI_WARNING";
#else
// These are for CUDA 5.5 or higher
case NPP_BAD_ARGUMENT_ERROR:
return "NPP_BAD_ARGUMENT_ERROR";
case NPP_COEFFICIENT_ERROR:
return "NPP_COEFFICIENT_ERROR";
case NPP_RECTANGLE_ERROR:
return "NPP_RECTANGLE_ERROR";
case NPP_QUADRANGLE_ERROR:
return "NPP_QUADRANGLE_ERROR";
case NPP_MEMORY_ALLOCATION_ERR:
return "NPP_MEMORY_ALLOCATION_ERROR";
case NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR:
return "NPP_HISTOGRAM_NUMBER_OF_LEVELS_ERROR";
case NPP_INVALID_HOST_POINTER_ERROR:
return "NPP_INVALID_HOST_POINTER_ERROR";
case NPP_INVALID_DEVICE_POINTER_ERROR:
return "NPP_INVALID_DEVICE_POINTER_ERROR";
#endif
case NPP_LUT_NUMBER_OF_LEVELS_ERROR:
return "NPP_LUT_NUMBER_OF_LEVELS_ERROR";
case NPP_TEXTURE_BIND_ERROR:
return "NPP_TEXTURE_BIND_ERROR";
case NPP_WRONG_INTERSECTION_ROI_ERROR:
return "NPP_WRONG_INTERSECTION_ROI_ERROR";
case NPP_NOT_EVEN_STEP_ERROR:
return "NPP_NOT_EVEN_STEP_ERROR";
case NPP_INTERPOLATION_ERROR:
return "NPP_INTERPOLATION_ERROR";
case NPP_RESIZE_FACTOR_ERROR:
return "NPP_RESIZE_FACTOR_ERROR";
case NPP_HAAR_CLASSIFIER_PIXEL_MATCH_ERROR:
return "NPP_HAAR_CLASSIFIER_PIXEL_MATCH_ERROR";
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) <= 0x5000
case NPP_MEMFREE_ERR:
return "NPP_MEMFREE_ERR";
case NPP_MEMSET_ERR:
return "NPP_MEMSET_ERR";
case NPP_MEMCPY_ERR:
return "NPP_MEMCPY_ERROR";
case NPP_MIRROR_FLIP_ERR:
return "NPP_MIRROR_FLIP_ERR";
#else
case NPP_MEMFREE_ERROR:
return "NPP_MEMFREE_ERROR";
case NPP_MEMSET_ERROR:
return "NPP_MEMSET_ERROR";
case NPP_MEMCPY_ERROR:
return "NPP_MEMCPY_ERROR";
case NPP_MIRROR_FLIP_ERROR:
return "NPP_MIRROR_FLIP_ERROR";
#endif
case NPP_ALIGNMENT_ERROR:
return "NPP_ALIGNMENT_ERROR";
case NPP_STEP_ERROR:
return "NPP_STEP_ERROR";
case NPP_SIZE_ERROR:
return "NPP_SIZE_ERROR";
case NPP_NULL_POINTER_ERROR:
return "NPP_NULL_POINTER_ERROR";
case NPP_CUDA_KERNEL_EXECUTION_ERROR:
return "NPP_CUDA_KERNEL_EXECUTION_ERROR";
case NPP_NOT_IMPLEMENTED_ERROR:
return "NPP_NOT_IMPLEMENTED_ERROR";
case NPP_ERROR:
return "NPP_ERROR";
case NPP_SUCCESS:
return "NPP_SUCCESS";
case NPP_WRONG_INTERSECTION_QUAD_WARNING:
return "NPP_WRONG_INTERSECTION_QUAD_WARNING";
case NPP_MISALIGNED_DST_ROI_WARNING:
return "NPP_MISALIGNED_DST_ROI_WARNING";
case NPP_AFFINE_QUAD_INCORRECT_WARNING:
return "NPP_AFFINE_QUAD_INCORRECT_WARNING";
case NPP_DOUBLE_SIZE_WARNING:
return "NPP_DOUBLE_SIZE_WARNING";
case NPP_WRONG_INTERSECTION_ROI_WARNING:
return "NPP_WRONG_INTERSECTION_ROI_WARNING";
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) >= 0x6000
/* These are 6.0 or higher */
case NPP_LUT_PALETTE_BITSIZE_ERROR:
return "NPP_LUT_PALETTE_BITSIZE_ERROR";
case NPP_ZC_MODE_NOT_SUPPORTED_ERROR:
return "NPP_ZC_MODE_NOT_SUPPORTED_ERROR";
case NPP_QUALITY_INDEX_ERROR:
return "NPP_QUALITY_INDEX_ERROR";
case NPP_CHANNEL_ORDER_ERROR:
return "NPP_CHANNEL_ORDER_ERROR";
case NPP_ZERO_MASK_VALUE_ERROR:
return "NPP_ZERO_MASK_VALUE_ERROR";
case NPP_NUMBER_OF_CHANNELS_ERROR:
return "NPP_NUMBER_OF_CHANNELS_ERROR";
case NPP_COI_ERROR:
return "NPP_COI_ERROR";
case NPP_DIVISOR_ERROR:
return "NPP_DIVISOR_ERROR";
case NPP_CHANNEL_ERROR:
return "NPP_CHANNEL_ERROR";
case NPP_STRIDE_ERROR:
return "NPP_STRIDE_ERROR";
case NPP_ANCHOR_ERROR:
return "NPP_ANCHOR_ERROR";
case NPP_MASK_SIZE_ERROR:
return "NPP_MASK_SIZE_ERROR";
case NPP_MOMENT_00_ZERO_ERROR:
return "NPP_MOMENT_00_ZERO_ERROR";
case NPP_THRESHOLD_NEGATIVE_LEVEL_ERROR:
return "NPP_THRESHOLD_NEGATIVE_LEVEL_ERROR";
case NPP_THRESHOLD_ERROR:
return "NPP_THRESHOLD_ERROR";
case NPP_CONTEXT_MATCH_ERROR:
return "NPP_CONTEXT_MATCH_ERROR";
case NPP_FFT_FLAG_ERROR:
return "NPP_FFT_FLAG_ERROR";
case NPP_FFT_ORDER_ERROR:
return "NPP_FFT_ORDER_ERROR";
case NPP_SCALE_RANGE_ERROR:
return "NPP_SCALE_RANGE_ERROR";
case NPP_DATA_TYPE_ERROR:
return "NPP_DATA_TYPE_ERROR";
case NPP_OUT_OFF_RANGE_ERROR:
return "NPP_OUT_OFF_RANGE_ERROR";
case NPP_DIVIDE_BY_ZERO_ERROR:
return "NPP_DIVIDE_BY_ZERO_ERROR";
case NPP_RANGE_ERROR:
return "NPP_RANGE_ERROR";
case NPP_NO_MEMORY_ERROR:
return "NPP_NO_MEMORY_ERROR";
case NPP_ERROR_RESERVED:
return "NPP_ERROR_RESERVED";
case NPP_NO_OPERATION_WARNING:
return "NPP_NO_OPERATION_WARNING";
case NPP_DIVIDE_BY_ZERO_WARNING:
return "NPP_DIVIDE_BY_ZERO_WARNING";
#endif
#if ((NPP_VERSION_MAJOR << 12) + (NPP_VERSION_MINOR << 4)) >= 0x7000
/* These are 7.0 or higher */
case NPP_OVERFLOW_ERROR:
return "NPP_OVERFLOW_ERROR";
case NPP_CORRUPTED_DATA_ERROR:
return "NPP_CORRUPTED_DATA_ERROR";
#endif
}
return "<unknown>";
}
#endif
template <typename T>
void check(T result, char const *const func, const char *const file,
int const line) {
}
#ifdef __DPCT_HPP__
// This will output the proper CUDA error strings in the event
// that a CUDA host call returns an error
#define checkCudaErrors(val) check((val), #val, __FILE__, __LINE__)
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError(msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file,
const int line) {
/*
DPCT1010:17: SYCL uses exceptions to report errors and does not use the error
codes. The call was replaced with 0. You need to rewrite this code.
*/
dpct::err0 err = 0;
}
// This will only print the proper error string when calling cudaGetLastError
// but not exit program incase error detected.
#define printLastCudaError(msg) __printLastCudaError(msg, __FILE__, __LINE__)
inline void __printLastCudaError(const char *errorMessage, const char *file,
const int line) {
/*
DPCT1010:19: SYCL uses exceptions to report errors and does not use the error
codes. The call was replaced with 0. You need to rewrite this code.
*/
dpct::err0 err = 0;
}
#endif
#ifndef MAX
#define MAX(a, b) (a > b ? a : b)
#endif
// Float To Int conversion
inline int ftoi(float value) {
return (value >= 0 ? static_cast<int>(value + 0.5)
: static_cast<int>(value - 0.5));
}
// Beginning of GPU Architecture definitions
inline int _ConvertSMVer2Cores(int major, int minor) {
// Defines for GPU Architecture types (using the SM version to determine
// the # of cores per SM
typedef struct dpct_type_113531 {
int SM; // 0xMm (hexidecimal notation), M = SM Major version,
// and m = SM minor version
int Cores;
} sSMtoCores;
sSMtoCores nGpuArchCoresPerSM[] = {
{0x30, 192},
{0x32, 192},
{0x35, 192},
{0x37, 192},
{0x50, 128},
{0x52, 128},
{0x53, 128},
{0x60, 64},
{0x61, 128},
{0x62, 128},
{0x70, 64},
{0x72, 64},
{0x75, 64},
{0x80, 64},
{0x86, 128},
{0x87, 128},
{0x89, 128},
{0x90, 128},
{-1, -1}};
int index = 0;
while (nGpuArchCoresPerSM[index].SM != -1) {
if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor)) {
return nGpuArchCoresPerSM[index].Cores;
}
index++;
}
// If we don't find the values, we default use the previous one
// to run properly
printf(
"MapSMtoCores for SM %d.%d is undefined."
" Default to use %d Cores/SM\n",
major, minor, nGpuArchCoresPerSM[index - 1].Cores);
return nGpuArchCoresPerSM[index - 1].Cores;
}
inline const char* _ConvertSMVer2ArchName(int major, int minor) {
// Defines for GPU Architecture types (using the SM version to determine
// the GPU Arch name)
typedef struct dpct_type_281558 {
int SM; // 0xMm (hexidecimal notation), M = SM Major version,
// and m = SM minor version
const char* name;
} sSMtoArchName;
sSMtoArchName nGpuArchNameSM[] = {
{0x30, "Kepler"},
{0x32, "Kepler"},
{0x35, "Kepler"},
{0x37, "Kepler"},
{0x50, "Maxwell"},
{0x52, "Maxwell"},
{0x53, "Maxwell"},
{0x60, "Pascal"},
{0x61, "Pascal"},
{0x62, "Pascal"},
{0x70, "Volta"},
{0x72, "Xavier"},
{0x75, "Turing"},
{0x80, "Ampere"},
{0x86, "Ampere"},
{0x87, "Ampere"},
{0x89, "Ada"},
{0x90, "Hopper"},
{-1, "Graphics Device"}};
int index = 0;
while (nGpuArchNameSM[index].SM != -1) {
if (nGpuArchNameSM[index].SM == ((major << 4) + minor)) {
return nGpuArchNameSM[index].name;
}
index++;
}
// If we don't find the values, we default use the previous one
// to run properly
printf(
"MapSMtoArchName for SM %d.%d is undefined."
" Default to use %s\n",
major, minor, nGpuArchNameSM[index - 1].name);
return nGpuArchNameSM[index - 1].name;
}
// end of GPU Architecture definitions
#ifdef __DPCT_HPP__
// General GPU Device CUDA Initialization
inline int gpuDeviceInit(int devID) {
int device_count;
checkCudaErrors(DPCT_CHECK_ERROR(
device_count = dpct::dev_mgr::instance().device_count()));
if (device_count == 0) {
fprintf(stderr,
"gpuDeviceInit() CUDA error: "
"no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
if (devID < 0) {
devID = 0;
}
if (devID > device_count - 1) {
fprintf(stderr, "\n");
fprintf(stderr, ">> %d CUDA capable GPU device(s) detected. <<\n",
device_count);
fprintf(stderr,
">> gpuDeviceInit (-device=%d) is not a valid"
" GPU device. <<\n",
devID);
fprintf(stderr, "\n");
return -devID;
}
int computeMode = -1, major = 0, minor = 0;
/*
DPCT1035:21: All SYCL devices can be used by the host to submit tasks. You may
need to adjust this code.
*/
checkCudaErrors(DPCT_CHECK_ERROR(computeMode = 1));
checkCudaErrors(DPCT_CHECK_ERROR(
major = dpct::dev_mgr::instance().get_device(devID).get_major_version()));
checkCudaErrors(DPCT_CHECK_ERROR(
minor = dpct::dev_mgr::instance().get_device(devID).get_minor_version()));
/*
DPCT1035:22: All SYCL devices can be used by the host to submit tasks. You may
need to adjust this code.
*/
if (computeMode == 0) {
fprintf(stderr,
"Error: device is running in <Compute Mode "
"Prohibited>, no threads can use cudaSetDevice().\n");
return -1;
}
if (major < 1) {
fprintf(stderr, "gpuDeviceInit(): GPU device does not support CUDA.\n");
exit(EXIT_FAILURE);
}
/*
DPCT1093:23: The "devID" device may be not the one intended for use. Adjust
the selected device if needed.
*/
checkCudaErrors(DPCT_CHECK_ERROR(dpct::select_device(devID)));
printf("gpuDeviceInit() CUDA Device [%d]: \"%s\n", devID, _ConvertSMVer2ArchName(major, minor));
return devID;
}
// This function returns the best GPU (with maximum GFLOPS)
inline int gpuGetMaxGflopsDeviceId() try {
int current_device = 0, sm_per_multiproc = 0;
int max_perf_device = 0;
int device_count = 0;
int devices_prohibited = 0;
uint64_t max_compute_perf = 0;
checkCudaErrors(DPCT_CHECK_ERROR(
device_count = dpct::dev_mgr::instance().device_count()));
if (device_count == 0) {
fprintf(stderr,
"gpuGetMaxGflopsDeviceId() CUDA error:"
" no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
// Find the best CUDA capable GPU device
current_device = 0;
while (current_device < device_count) {
int computeMode = -1, major = 0, minor = 0;
/*
DPCT1035:24: All SYCL devices can be used by the host to submit tasks. You
may need to adjust this code.
*/
checkCudaErrors(DPCT_CHECK_ERROR(computeMode = 1));
checkCudaErrors(DPCT_CHECK_ERROR(major = dpct::dev_mgr::instance()
.get_device(current_device)
.get_major_version()));
checkCudaErrors(DPCT_CHECK_ERROR(minor = dpct::dev_mgr::instance()
.get_device(current_device)
.get_minor_version()));
// If this GPU is not running on Compute Mode prohibited,
// then we can add it to the list
/*
DPCT1035:25: All SYCL devices can be used by the host to submit tasks. You
may need to adjust this code.
*/
if (computeMode != 0) {
if (major == 9999 && minor == 9999) {
sm_per_multiproc = 1;
} else {
sm_per_multiproc =
_ConvertSMVer2Cores(major, minor);
}
int multiProcessorCount = 0, clockRate = 0;
checkCudaErrors(
DPCT_CHECK_ERROR(multiProcessorCount = dpct::dev_mgr::instance()
.get_device(current_device)
.get_max_compute_units()));
dpct::err0 result =
DPCT_CHECK_ERROR(clockRate = dpct::dev_mgr::instance()
.get_device(current_device)
.get_max_clock_frequency());
uint64_t compute_perf = (uint64_t)multiProcessorCount * sm_per_multiproc * clockRate;
if (compute_perf > max_compute_perf) {
max_compute_perf = compute_perf;
max_perf_device = current_device;
}
} else {
devices_prohibited++;
}
++current_device;
}
if (devices_prohibited == device_count) {
fprintf(stderr,
"gpuGetMaxGflopsDeviceId() CUDA error:"
" all devices have compute mode prohibited.\n");
exit(EXIT_FAILURE);
}
return max_perf_device;
}
catch (sycl::exception const &exc) {
std::cerr << exc.what() << "Exception caught at file:" << __FILE__
<< ", line:" << __LINE__ << std::endl;
std::exit(1);
}
// Initialization code to find the best CUDA Device
inline int findCudaDevice(int argc, const char **argv) {
int devID = 0;
// If the command-line has a device number specified, use it
if (checkCmdLineFlag(argc, argv, "device")) {
devID = getCmdLineArgumentInt(argc, argv, "device=");
if (devID < 0) {
printf("Invalid command line parameter\n ");
exit(EXIT_FAILURE);
} else {
devID = gpuDeviceInit(devID);
if (devID < 0) {
printf("exiting...\n");
exit(EXIT_FAILURE);
}
}
} else {
// Otherwise pick the device with highest Gflops/s
devID = gpuGetMaxGflopsDeviceId();
/*
DPCT1093:26: The "devID" device may be not the one intended for use. Adjust
the selected device if needed.
*/
checkCudaErrors(DPCT_CHECK_ERROR(dpct::select_device(devID)));
int major = 0, minor = 0;
checkCudaErrors(DPCT_CHECK_ERROR(
major =
dpct::dev_mgr::instance().get_device(devID).get_major_version()));
checkCudaErrors(DPCT_CHECK_ERROR(
minor =
dpct::dev_mgr::instance().get_device(devID).get_minor_version()));
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n",
devID, _ConvertSMVer2ArchName(major, minor), major, minor);
}
return devID;
}
inline int findIntegratedGPU() {
int current_device = 0;
int device_count = 0;
int devices_prohibited = 0;
checkCudaErrors(DPCT_CHECK_ERROR(
device_count = dpct::dev_mgr::instance().device_count()));
if (device_count == 0) {
fprintf(stderr, "CUDA error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
// Find the integrated GPU which is compute capable
while (current_device < device_count) {
int computeMode = -1, integrated = -1;
/*
DPCT1035:27: All SYCL devices can be used by the host to submit tasks. You
may need to adjust this code.
*/
checkCudaErrors(DPCT_CHECK_ERROR(computeMode = 1));
checkCudaErrors(
DPCT_CHECK_ERROR(integrated = dpct::dev_mgr::instance()
.get_device(current_device)
.get_integrated()));
// If GPU is integrated and is not running on Compute Mode prohibited,
// then cuda can map to GLES resource
/*
DPCT1035:28: All SYCL devices can be used by the host to submit tasks. You
may need to adjust this code.
*/
if (integrated && (computeMode != 0)) {
/*
DPCT1093:29: The "current_device" device may be not the one intended for
use. Adjust the selected device if needed.
*/
checkCudaErrors(DPCT_CHECK_ERROR(dpct::select_device(current_device)));
int major = 0, minor = 0;
checkCudaErrors(DPCT_CHECK_ERROR(major = dpct::dev_mgr::instance()
.get_device(current_device)
.get_major_version()));
checkCudaErrors(DPCT_CHECK_ERROR(minor = dpct::dev_mgr::instance()
.get_device(current_device)
.get_minor_version()));
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n",
current_device, _ConvertSMVer2ArchName(major, minor), major, minor);
return current_device;
} else {
devices_prohibited++;
}
current_device++;
}
if (devices_prohibited == device_count) {
fprintf(stderr,
"CUDA error:"
" No GLES-CUDA Interop capable GPU found.\n");
exit(EXIT_FAILURE);
}
return -1;
}
// General check for CUDA GPU SM Capabilities
inline bool checkCudaCapabilities(int major_version, int minor_version) {
int dev;
int major = 0, minor = 0;
checkCudaErrors(dev = dpct::dev_mgr::instance().current_device_id());
checkCudaErrors(DPCT_CHECK_ERROR(
major = dpct::dev_mgr::instance().get_device(dev).get_major_version()));
checkCudaErrors(DPCT_CHECK_ERROR(
minor = dpct::dev_mgr::instance().get_device(dev).get_minor_version()));
if ((major > major_version) ||
(major == major_version &&
minor >= minor_version)) {
printf(" Device %d: <%16s >, Compute SM %d.%d detected\n", dev,
_ConvertSMVer2ArchName(major, minor), major, minor);
return true;
} else {
printf(
" No GPU device was found that can support "
"CUDA compute capability %d.%d.\n",
major_version, minor_version);
return false;
}
}
#endif
// end of CUDA Helper Functions
#endif // COMMON_HELPER_CUDA_H_
| h |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/taskflow/taskflow.hpp | #pragma once
#include "core/executor.hpp"
#include "algorithm/critical.hpp"
#include "algorithm/for_each.hpp"
/**
@dir taskflow
@brief root taskflow include dir
*/
/**
@dir taskflow/core
@brief taskflow core include dir
*/
/**
@dir taskflow/algorithm
@brief taskflow algorithms include dir
*/
/**
@dir taskflow/cuda
@brief taskflow CUDA include dir
*/
/**
@file taskflow/taskflow.hpp
@brief main taskflow include file
*/
// TF_VERSION % 100 is the patch level
// TF_VERSION / 100 % 1000 is the minor version
// TF_VERSION / 100000 is the major version
// current version: 3.5.0
#define TF_VERSION 300500
#define TF_MAJOR_VERSION TF_VERSION/100000
#define TF_MINOR_VERSION TF_VERSION/100%1000
#define TF_PATCH_VERSION TF_VERSION%100
/**
@brief taskflow namespace
*/
namespace tf {
/**
@private
*/
namespace detail { }
/**
@brief queries the version information in a string format @c major.minor.patch
Release notes are available here: https://taskflow.github.io/taskflow/Releases.html
*/
constexpr const char* version() {
return "3.5.0";
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/taskflow/dsl/meta_macro.hpp | // 2020/08/30 - Created by netcan: https://github.com/netcan
// ref https://github.com/Erlkoenig90/map-macro/
#pragma once
#ifdef _MSC_VER
#define TF_EMPTY()
#define TF_GET_ARG_COUNT_(...) \
TF_PASTE(TF_GET_ARG_COUNT_I(__VA_ARGS__, 64, 63, 62, 61, 60, 59, 58, 57, 56, \
55, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, \
43, 42, 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, \
31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, \
19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, \
6, 5, 4, 3, 2, 1, 0, ), \
TF_EMPTY())
#else
#define TF_GET_ARG_COUNT_(...) \
TF_GET_ARG_COUNT_I(__VA_ARGS__, 64, 63, 62, 61, 60, 59, 58, 57, 56, 55, 54, \
53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, \
39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, \
25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, \
11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, )
#endif
#define TF_GET_ARG_COUNT(...) TF_GET_ARG_COUNT_(__dummy__, ##__VA_ARGS__)
#define TF_GET_ARG_COUNT_I( \
e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, \
e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, \
e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, \
e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, \
e62, e63, e64, size, ...) \
size
#define TF_GET_FIRST(a, ...) a
#define TF_GET_SECOND(a, b, ...) b
#define TF_CONCATE(x, y) x##y
#define TF_PASTE(x, y) TF_CONCATE(x, y)
#define TF_EVAL0(...) __VA_ARGS__
#define TF_EVAL1(...) TF_EVAL0(TF_EVAL0(TF_EVAL0(__VA_ARGS__)))
#define TF_EVAL2(...) TF_EVAL1(TF_EVAL1(TF_EVAL1(__VA_ARGS__)))
#define TF_EVAL3(...) TF_EVAL2(TF_EVAL2(TF_EVAL2(__VA_ARGS__)))
#define TF_EVAL4(...) TF_EVAL3(TF_EVAL3(TF_EVAL3(__VA_ARGS__)))
#define TF_EVAL5(...) TF_EVAL4(TF_EVAL4(TF_EVAL4(__VA_ARGS__)))
#ifdef _MSC_VER
// MSVC needs more evaluations
#define TF_EVAL6(...) TF_EVAL5(TF_EVAL5(TF_EVAL5(__VA_ARGS__)))
#define TF_EVAL(...) TF_EVAL6(TF_EVAL6(__VA_ARGS__))
#else
#define TF_EVAL(...) TF_EVAL5(__VA_ARGS__)
#endif
#define TF_MAP_END(...)
#define TF_MAP_OUT
#define EMPTY()
#define DEFER(id) id EMPTY()
#define TF_MAP_GET_END2() 0, TF_MAP_END
#define TF_MAP_GET_END1(...) TF_MAP_GET_END2
#define TF_MAP_GET_END(...) TF_MAP_GET_END1
#define TF_MAP_NEXT0(test, next, ...) next TF_MAP_OUT
#define TF_MAP_NEXT1(test, next) DEFER(TF_MAP_NEXT0)(test, next, 0)
#define TF_MAP_NEXT(test, next) TF_MAP_NEXT1(TF_MAP_GET_END test, next)
#define TF_MAP0(f, x, peek, ...) \
f(x) DEFER(TF_MAP_NEXT(peek, TF_MAP1))(f, peek, __VA_ARGS__)
#define TF_MAP1(f, x, peek, ...) \
f(x) DEFER(TF_MAP_NEXT(peek, TF_MAP0))(f, peek, __VA_ARGS__)
#define TF_MAP(f, ...) \
TF_EVAL(TF_MAP1(f, __VA_ARGS__, ()()(), ()()(), ()()(), 0))
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/taskflow/dsl/type_list.hpp | // 2020/08/28 - Created by netcan: https://github.com/netcan
#pragma once
#include <cstddef>
namespace tf {
namespace dsl {
template <typename...> using void_t = void;
template <typename... Ts> struct TypeList {
using type = TypeList<Ts...>;
static constexpr size_t size = 0;
template <typename... T> struct append { using type = TypeList<T...>; };
template <typename... T> using appendTo = typename append<T...>::type;
template <typename T> using prepend = typename TypeList<T>::type;
template <template <typename...> class T> using exportTo = T<Ts...>;
};
template <typename Head, typename... Tails> struct TypeList<Head, Tails...> {
using type = TypeList<Head, Tails...>;
using head = Head;
using tails = TypeList<Tails...>;
static constexpr size_t size = sizeof...(Tails) + 1;
template <typename... Ts> struct append {
using type = TypeList<Head, Tails..., Ts...>;
};
template <typename... Ts> using appendTo = typename append<Ts...>::type;
template <typename T>
using prepend = typename TypeList<T, Head, Tails...>::type;
template <template <typename...> class T> using exportTo = T<Head, Tails...>;
};
template <typename IN> struct IsTypeList {
constexpr static bool value = false;
};
template <typename IN> constexpr bool IsTypeList_v = IsTypeList<IN>::value;
template <typename... Ts> struct IsTypeList<TypeList<Ts...>> {
constexpr static bool value = true;
};
template <typename... IN> struct Concat;
template <typename... IN> using Concat_t = typename Concat<IN...>::type;
template <> struct Concat<> { using type = TypeList<>; };
template <typename IN> struct Concat<IN> { using type = IN; };
template <typename IN, typename IN2> struct Concat<IN, IN2> {
using type = typename IN2::template exportTo<IN::template append>::type;
};
template <typename IN, typename IN2, typename... Rest>
struct Concat<IN, IN2, Rest...> {
using type = Concat_t<Concat_t<IN, IN2>, Rest...>;
};
template <typename IN, typename OUT = TypeList<>, typename = void>
struct Flatten {
using type = OUT;
};
template <typename IN> using Flatten_t = typename Flatten<IN>::type;
template <typename IN, typename OUT>
struct Flatten<IN, OUT, std::enable_if_t<IsTypeList_v<typename IN::head>>> {
using type =
typename Flatten<typename IN::tails,
Concat_t<OUT, Flatten_t<typename IN::head>>>::type;
};
template <typename IN, typename OUT>
struct Flatten<IN, OUT, std::enable_if_t<!IsTypeList_v<typename IN::head>>> {
using type = typename Flatten<
typename IN::tails,
typename OUT::template appendTo<typename IN::head>>::type;
};
template <typename IN, template <typename> class F> struct Map {
using type = TypeList<>;
};
template <typename IN, template <typename> class F>
using Map_t = typename Map<IN, F>::type;
template <template <typename> class F, typename... Ts>
struct Map<TypeList<Ts...>, F> {
using type = TypeList<typename F<Ts>::type...>;
};
template <typename IN, template <typename> class F, typename OUT = TypeList<>,
typename = void>
struct Filter {
using type = OUT;
};
template <typename IN, template <typename> class F>
using Filter_t = typename Filter<IN, F>::type;
template <typename IN, template <typename> class F, typename OUT>
class Filter<IN, F, OUT, void_t<typename IN::head>> {
using H = typename IN::head;
public:
using type = typename std::conditional_t<
F<H>::value,
Filter<typename IN::tails, F, typename OUT::template appendTo<H>>,
Filter<typename IN::tails, F, OUT>>::type;
};
template <typename IN, typename = void> struct Unique { using type = IN; };
template <typename IN> using Unique_t = typename Unique<IN>::type;
template <typename IN> class Unique<IN, void_t<typename IN::head>> {
template <typename T> struct IsDifferR {
template <typename R> struct apply {
static constexpr bool value = !std::is_same<T, R>::value;
};
};
using tails = Unique_t<typename IN::tails>;
using eraseHead =
Filter_t<tails, IsDifferR<typename IN::head>::template apply>;
public:
using type = typename eraseHead::template prepend<typename IN::head>;
};
} // namespace dsl
} // namespace tf
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/taskflow/dsl/task_dsl.hpp | // 2020/08/28 - Created by netcan: https://github.com/netcan
#pragma once
#include "../core/flow_builder.hpp"
#include "meta_macro.hpp"
#include "task_analyzer.hpp"
#include "task_trait.hpp"
namespace tf {
namespace dsl {
struct EmptyContext {};
template <typename CONTEXT = EmptyContext, typename... Chains> class TaskDsl {
using Links = Unique_t<Flatten_t<TypeList<typename Chain<Chains>::type...>>>;
using Analyzer = typename Links::template exportTo<TaskAnalyzer>;
using AllTasks = typename Analyzer::AllTasks;
template <typename TASK> struct TaskCbWithContext {
using type = TaskCb<TASK, CONTEXT>;
};
using TasksCB =
typename Map_t<AllTasks,
TaskCbWithContext>::template exportTo<std::tuple>;
using OneToOneLinkSet = typename Analyzer::OneToOneLinkSet;
template <typename OneToOneLink> struct OneToOneLinkInstanceType {
using type = typename OneToOneLink::template InstanceType<TasksCB>;
};
using OneToOneLinkInstances =
typename Map_t<OneToOneLinkSet,
OneToOneLinkInstanceType>::template exportTo<std::tuple>;
public:
constexpr TaskDsl(FlowBuilder &flow_builder, const CONTEXT &context = {}) {
build_tasks_cb(flow_builder, context,
std::make_index_sequence<AllTasks::size>{});
build_links(std::make_index_sequence<OneToOneLinkSet::size>{});
}
template <typename TASK> Task &get_task() {
constexpr size_t TasksCBSize = std::tuple_size<TasksCB>::value;
constexpr size_t TaskIndex =
TupleElementByF_v<TasksCB, IsTask<TASK>::template apply>;
static_assert(TaskIndex < TasksCBSize, "fatal: not find TaskCb in TasksCB");
return std::get<TaskIndex>(tasksCb_).task_;
}
private:
template <size_t... Is>
void build_tasks_cb(FlowBuilder &flow_builder, const CONTEXT &context,
std::index_sequence<Is...>) {
auto _ = {0, (std::get<Is>(tasksCb_).build(flow_builder, context), 0)...};
(void)_;
}
template <size_t... Is> void build_links(std::index_sequence<Is...>) {
auto _ = {0, (std::get<Is>(links_).build(tasksCb_), 0)...};
(void)_;
}
private:
TasksCB tasksCb_;
OneToOneLinkInstances links_;
};
template <typename = void, typename... Chains, typename CONTEXT = EmptyContext>
constexpr TaskDsl<CONTEXT, Chains...> taskDsl(FlowBuilder &flow_builder,
CONTEXT &&context = {}) {
return {flow_builder, context};
}
} // namespace dsl
} // namespace tf
///////////////////////////////////////////////////////////////////////////////
#define TF_CHAIN(link) , link->void
#define TF_CONTEXT_1(name) tf::dsl::EmptyContext
#define TF_CONTEXT_2(name, context) context
#define TF_CAPTURE_THIS_1
#define TF_CAPTURE_THIS_2 *this
///////////////////////////////////////////////////////////////////////////////
// make_task(TASK_NAME, { return a action lambda })
#define make_task(name, ...) \
struct TF_GET_FIRST name : tf::dsl::TaskSignature, \
TF_PASTE(TF_CONTEXT_, TF_GET_ARG_COUNT name) \
name { \
using _ContextType = TF_PASTE(TF_CONTEXT_, TF_GET_ARG_COUNT name) name; \
TF_GET_FIRST name(const _ContextType &context) : _ContextType(context) {} \
auto operator()() { \
return [TF_PASTE(TF_CAPTURE_THIS_, TF_GET_ARG_COUNT name)] __VA_ARGS__; \
} \
}
// some_tasks(A, B, C) means SomeTask
#define some_tasks(...) auto (*)(tf::dsl::SomeTask<__VA_ARGS__>)
// same as some_tasks
#define fork_tasks(...) some_tasks(__VA_ARGS__)
// same as some_tasks
#define merge_tasks(...) some_tasks(__VA_ARGS__)
// task(A) means a task A
#define task(Task) auto (*)(Task)
// taskbuild(...) build a task dsl graph
#define build_taskflow(...) tf::dsl::taskDsl<void TF_MAP(TF_CHAIN, __VA_ARGS__)>
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/taskflow/dsl/tuple_utils.hpp | // 2020/08/28 - Created by netcan: https://github.com/netcan
#pragma once
#include <cstddef>
#include <tuple>
namespace tf {
namespace dsl {
namespace detail {
// get tuple element index by f, if not exists then index >= tuple_size
template <typename TUP, template <typename> class F, typename = void>
struct TupleElementByF {
constexpr static size_t Index = 0;
};
template <template <typename> class F, typename H, typename... Ts>
struct TupleElementByF<std::tuple<H, Ts...>, F, std::enable_if_t<F<H>::value>> {
constexpr static size_t Index = 0;
};
template <template <typename> class F, typename H, typename... Ts>
struct TupleElementByF<std::tuple<H, Ts...>, F,
std::enable_if_t<!F<H>::value>> {
constexpr static size_t Index =
1 + TupleElementByF<std::tuple<Ts...>, F>::Index;
};
template <typename T, typename TUP, size_t... Is>
constexpr inline T AggregationByTupImpl(TUP &&tup, std::index_sequence<Is...>) {
return T{std::get<Is>(tup)...};
}
} // namespace detail
template <typename TUP, template <typename> class F>
constexpr size_t TupleElementByF_v = detail::TupleElementByF<TUP, F>::Index;
template <typename T, typename TUP>
constexpr inline T AggregationByTup(TUP &&tup) {
return detail::AggregationByTupImpl<T>(
std::forward<TUP>(tup),
std::make_index_sequence<std::tuple_size<std::decay_t<TUP>>::size>{});
}
} // namespace dsl
} // namespace tf
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/taskflow/dsl/task_trait.hpp | // 2020/08/28 - Created by netcan: https://github.com/netcan
#pragma once
#include "../core/flow_builder.hpp"
#include "../core/task.hpp"
#include "type_list.hpp"
#include <type_traits>
namespace tf {
namespace dsl {
struct TaskSignature {};
template <typename TASK, typename CONTEXT> struct TaskCb {
using TaskType = TASK;
void build(FlowBuilder &build, const CONTEXT &context) {
task_ = build.emplace(TaskType{context}());
}
Task task_;
};
template <typename TASK> struct IsTask {
template <typename TaskCb> struct apply {
constexpr static bool value =
std::is_same<typename TaskCb::TaskType, TASK>::value;
};
};
template <typename TASK, typename = void> struct TaskTrait;
template <typename... TASK> struct SomeTask {
using TaskList =
Unique_t<Flatten_t<TypeList<typename TaskTrait<TASK>::TaskList...>>>;
};
// a task self
template <typename TASK>
struct TaskTrait<
TASK, std::enable_if_t<std::is_base_of<TaskSignature, TASK>::value>> {
using TaskList = TypeList<TASK>;
};
template <typename... TASK> struct TaskTrait<SomeTask<TASK...>> {
using TaskList = typename SomeTask<TASK...>::TaskList;
};
} // namespace dsl
} // namespace tf
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/taskflow/dsl/dsl.hpp | // TaskflowDSL is an experimental project that leverages C++17 to
// provide a dedicated interface for expressive taskflow programming
//
// Created by netcan: https://github.com/netcan
#pragma once
#include "dsl/task_dsl.hpp"
namespace tf {
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/taskflow/dsl/task_analyzer.hpp | // 2020/08/28 - Created by netcan: https://github.com/netcan
#pragma once
#include "connection.hpp"
#include "type_list.hpp"
#include <type_traits>
namespace tf {
namespace dsl {
template <typename... Links> class TaskAnalyzer {
template <typename FROMs, typename TOs, typename = void>
struct BuildOneToOneLink;
template <typename... Fs, typename Ts>
struct BuildOneToOneLink<TypeList<Fs...>, Ts> {
using type = Concat_t<typename BuildOneToOneLink<Fs, Ts>::type...>;
};
template <typename F, typename... Ts>
struct BuildOneToOneLink<F, TypeList<Ts...>,
std::enable_if_t<!IsTypeList_v<F>>> {
using type = TypeList<OneToOneLink<F, Ts>...>;
};
template <typename Link> class OneToOneLinkSetF {
using FromTaskList = typename Link::FromTaskList;
using ToTaskList = typename Link::ToTaskList;
public:
using type = typename BuildOneToOneLink<FromTaskList, ToTaskList>::type;
};
public:
using AllTasks = Unique_t<
Concat_t<typename Links::FromTaskList..., typename Links::ToTaskList...>>;
using OneToOneLinkSet =
Unique_t<Flatten_t<Map_t<TypeList<Links...>, OneToOneLinkSetF>>>;
};
} // namespace dsl
} // namespace tf
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/taskflow/dsl/connection.hpp | // 2020/08/28 - Created by netcan: https://github.com/netcan
#pragma once
#include "../core/flow_builder.hpp"
#include "task_trait.hpp"
#include "tuple_utils.hpp"
#include "type_list.hpp"
namespace tf {
namespace dsl {
template <typename F, typename T> class Connection {
using FROMs = typename TaskTrait<F>::TaskList;
using TOs = typename TaskTrait<T>::TaskList;
public:
using FromTaskList = Unique_t<Flatten_t<FROMs>>;
using ToTaskList = Unique_t<Flatten_t<TOs>>;
};
template <typename T, typename OUT = TypeList<>> struct Chain;
template <typename F, typename OUT> struct Chain<auto (*)(F)->void, OUT> {
using From = F;
using type = OUT;
};
template <typename F, typename T, typename OUT>
struct Chain<auto (*)(F)->T, OUT> {
private:
using To = typename Chain<T, OUT>::From;
public:
using From = F;
using type = typename Chain<
T, typename OUT::template appendTo<Connection<From, To>>>::type;
};
template <typename FROM, typename TO> struct OneToOneLink {
template <typename TasksCB> struct InstanceType {
constexpr void build(TasksCB &tasksCb) {
constexpr size_t TasksCBSize = std::tuple_size<TasksCB>::value;
constexpr size_t FromTaskIndex =
TupleElementByF_v<TasksCB, IsTask<FROM>::template apply>;
constexpr size_t ToTaskIndex =
TupleElementByF_v<TasksCB, IsTask<TO>::template apply>;
static_assert(FromTaskIndex < TasksCBSize && ToTaskIndex < TasksCBSize,
"fatal: not find TaskCb in TasksCB");
std::get<FromTaskIndex>(tasksCb).task_.precede(
std::get<ToTaskIndex>(tasksCb).task_);
}
};
};
} // namespace dsl
}; // namespace tf
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/taskflow/core/tsq.hpp | #pragma once
#include "../utility/macros.hpp"
#include "../utility/traits.hpp"
/**
@file tsq.hpp
@brief task queue include file
*/
namespace tf {
// ----------------------------------------------------------------------------
// Task Types
// ----------------------------------------------------------------------------
/**
@enum TaskPriority
@brief enumeration of all task priority values
A priority is an enumerated value of type @c unsigned.
Currently, %Taskflow defines three priority levels,
@c HIGH, @c NORMAL, and @c LOW, starting from 0, 1, to 2.
That is, the lower the value, the higher the priority.
*/
enum class TaskPriority : unsigned {
/** @brief value of the highest priority (i.e., 0) */
HIGH = 0,
/** @brief value of the normal priority (i.e., 1) */
NORMAL = 1,
/** @brief value of the lowest priority (i.e., 2) */
LOW = 2,
/** @brief conventional value for iterating priority values */
MAX = 3
};
// ----------------------------------------------------------------------------
// Task Queue
// ----------------------------------------------------------------------------
/**
@class: TaskQueue
@tparam T data type (must be a pointer type)
@tparam MAX_PRIORITY maximum level of the priority
@brief class to create a lock-free unbounded single-producer multiple-consumer queue
This class implements the work-stealing queue described in the paper,
<a href="https://www.di.ens.fr/~zappa/readings/ppopp13.pdf">Correct and Efficient Work-Stealing for Weak Memory Models</a>,
and extends it to include priority.
Only the queue owner can perform pop and push operations,
while others can steal data from the queue simultaneously.
Priority starts from zero (highest priority) to the template value
`MAX_PRIORITY-1` (lowest priority).
All operations are associated with priority values to indicate
the corresponding queues to which an operation is applied.
The default template value, `MAX_PRIORITY`, is `TaskPriority::MAX`
which applies only three priority levels to the task queue.
@code{.cpp}
auto [A, B, C, D, E] = taskflow.emplace(
[] () { },
[&] () {
std::cout << "Task B: " << counter++ << '\n'; // 0
},
[&] () {
std::cout << "Task C: " << counter++ << '\n'; // 2
},
[&] () {
std::cout << "Task D: " << counter++ << '\n'; // 1
},
[] () { }
);
A.precede(B, C, D);
E.succeed(B, C, D);
B.priority(tf::TaskPriority::HIGH);
C.priority(tf::TaskPriority::LOW);
D.priority(tf::TaskPriority::NORMAL);
executor.run(taskflow).wait();
@endcode
In the above example, we have a task graph of five tasks,
@c A, @c B, @c C, @c D, and @c E, in which @c B, @c C, and @c D
can run in simultaneously when @c A finishes.
Since we only uses one worker thread in the executor,
we can deterministically run @c B first, then @c D, and @c C
in order of their priority values.
The output is as follows:
@code{.shell-session}
Task B: 0
Task D: 1
Task C: 2
@endcode
*/
template <typename T, unsigned MAX_PRIORITY = static_cast<unsigned>(TaskPriority::MAX)>
class TaskQueue {
static_assert(MAX_PRIORITY > 0, "MAX_PRIORITY must be at least one");
static_assert(std::is_pointer_v<T>, "T must be a pointer type");
struct Array {
int64_t C;
int64_t M;
std::atomic<T>* S;
explicit Array(int64_t c) :
C {c},
M {c-1},
S {new std::atomic<T>[static_cast<size_t>(C)]} {
}
~Array() {
delete [] S;
}
int64_t capacity() const noexcept {
return C;
}
void push(int64_t i, T o) noexcept {
S[i & M].store(o, std::memory_order_relaxed);
}
T pop(int64_t i) noexcept {
return S[i & M].load(std::memory_order_relaxed);
}
Array* resize(int64_t b, int64_t t) {
Array* ptr = new Array {2*C};
for(int64_t i=t; i!=b; ++i) {
ptr->push(i, pop(i));
}
return ptr;
}
};
// Doubling the alignment by 2 seems to generate the most
// decent performance.
CachelineAligned<std::atomic<int64_t>> _top[MAX_PRIORITY];
CachelineAligned<std::atomic<int64_t>> _bottom[MAX_PRIORITY];
std::atomic<Array*> _array[MAX_PRIORITY];
std::vector<Array*> _garbage[MAX_PRIORITY];
//std::atomic<T> _cache {nullptr};
public:
/**
@brief constructs the queue with a given capacity
@param capacity the capacity of the queue (must be power of 2)
*/
explicit TaskQueue(int64_t capacity = 512);
/**
@brief destructs the queue
*/
~TaskQueue();
/**
@brief queries if the queue is empty at the time of this call
*/
bool empty() const noexcept;
/**
@brief queries if the queue is empty at a specific priority value
*/
bool empty(unsigned priority) const noexcept;
/**
@brief queries the number of items at the time of this call
*/
size_t size() const noexcept;
/**
@brief queries the number of items with the given priority
at the time of this call
*/
size_t size(unsigned priority) const noexcept;
/**
@brief queries the capacity of the queue
*/
int64_t capacity() const noexcept;
/**
@brief queries the capacity of the queue at a specific priority value
*/
int64_t capacity(unsigned priority) const noexcept;
/**
@brief inserts an item to the queue
@param item the item to push to the queue
@param priority priority value of the item to push (default = 0)
Only the owner thread can insert an item to the queue.
The operation can trigger the queue to resize its capacity
if more space is required.
*/
TF_FORCE_INLINE void push(T item, unsigned priority);
/**
@brief pops out an item from the queue
Only the owner thread can pop out an item from the queue.
The return can be a @c nullptr if this operation failed (empty queue).
*/
T pop();
/**
@brief pops out an item with a specific priority value from the queue
@param priority priority of the item to pop
Only the owner thread can pop out an item from the queue.
The return can be a @c nullptr if this operation failed (empty queue).
*/
TF_FORCE_INLINE T pop(unsigned priority);
/**
@brief steals an item from the queue
Any threads can try to steal an item from the queue.
The return can be a @c nullptr if this operation failed (not necessary empty).
*/
T steal();
/**
@brief steals an item with a specific priority value from the queue
@param priority priority of the item to steal
Any threads can try to steal an item from the queue.
The return can be a @c nullptr if this operation failed (not necessary empty).
*/
T steal(unsigned priority);
private:
TF_NO_INLINE Array* resize_array(Array* a, unsigned p, std::int64_t b, std::int64_t t);
};
// Constructor
template <typename T, unsigned MAX_PRIORITY>
TaskQueue<T, MAX_PRIORITY>::TaskQueue(int64_t c) {
assert(c && (!(c & (c-1))));
unroll<0, MAX_PRIORITY, 1>([&](auto p){
_top[p].data.store(0, std::memory_order_relaxed);
_bottom[p].data.store(0, std::memory_order_relaxed);
_array[p].store(new Array{c}, std::memory_order_relaxed);
_garbage[p].reserve(32);
});
}
// Destructor
template <typename T, unsigned MAX_PRIORITY>
TaskQueue<T, MAX_PRIORITY>::~TaskQueue() {
unroll<0, MAX_PRIORITY, 1>([&](auto p){
for(auto a : _garbage[p]) {
delete a;
}
delete _array[p].load();
});
}
// Function: empty
template <typename T, unsigned MAX_PRIORITY>
bool TaskQueue<T, MAX_PRIORITY>::empty() const noexcept {
for(unsigned i=0; i<MAX_PRIORITY; i++) {
if(!empty(i)) {
return false;
}
}
return true;
}
// Function: empty
template <typename T, unsigned MAX_PRIORITY>
bool TaskQueue<T, MAX_PRIORITY>::empty(unsigned p) const noexcept {
int64_t b = _bottom[p].data.load(std::memory_order_relaxed);
int64_t t = _top[p].data.load(std::memory_order_relaxed);
return (b <= t);
}
// Function: size
template <typename T, unsigned MAX_PRIORITY>
size_t TaskQueue<T, MAX_PRIORITY>::size() const noexcept {
size_t s;
unroll<0, MAX_PRIORITY, 1>([&](auto i) { s = i ? size(i) + s : size(i); });
return s;
}
// Function: size
template <typename T, unsigned MAX_PRIORITY>
size_t TaskQueue<T, MAX_PRIORITY>::size(unsigned p) const noexcept {
int64_t b = _bottom[p].data.load(std::memory_order_relaxed);
int64_t t = _top[p].data.load(std::memory_order_relaxed);
return static_cast<size_t>(b >= t ? b - t : 0);
}
// Function: push
template <typename T, unsigned MAX_PRIORITY>
TF_FORCE_INLINE void TaskQueue<T, MAX_PRIORITY>::push(T o, unsigned p) {
int64_t b = _bottom[p].data.load(std::memory_order_relaxed);
int64_t t = _top[p].data.load(std::memory_order_acquire);
Array* a = _array[p].load(std::memory_order_relaxed);
// queue is full
if(a->capacity() - 1 < (b - t)) {
a = resize_array(a, p, b, t);
}
a->push(b, o);
std::atomic_thread_fence(std::memory_order_release);
_bottom[p].data.store(b + 1, std::memory_order_relaxed);
}
// Function: pop
template <typename T, unsigned MAX_PRIORITY>
T TaskQueue<T, MAX_PRIORITY>::pop() {
for(unsigned i=0; i<MAX_PRIORITY; i++) {
if(auto t = pop(i); t) {
return t;
}
}
return nullptr;
}
// Function: pop
template <typename T, unsigned MAX_PRIORITY>
TF_FORCE_INLINE T TaskQueue<T, MAX_PRIORITY>::pop(unsigned p) {
int64_t b = _bottom[p].data.load(std::memory_order_relaxed) - 1;
Array* a = _array[p].load(std::memory_order_relaxed);
_bottom[p].data.store(b, std::memory_order_relaxed);
std::atomic_thread_fence(std::memory_order_seq_cst);
int64_t t = _top[p].data.load(std::memory_order_relaxed);
T item {nullptr};
if(t <= b) {
item = a->pop(b);
if(t == b) {
// the last item just got stolen
if(!_top[p].data.compare_exchange_strong(t, t+1,
std::memory_order_seq_cst,
std::memory_order_relaxed)) {
item = nullptr;
}
_bottom[p].data.store(b + 1, std::memory_order_relaxed);
}
}
else {
_bottom[p].data.store(b + 1, std::memory_order_relaxed);
}
return item;
}
// Function: steal
template <typename T, unsigned MAX_PRIORITY>
T TaskQueue<T, MAX_PRIORITY>::steal() {
for(unsigned i=0; i<MAX_PRIORITY; i++) {
if(auto t = steal(i); t) {
return t;
}
}
return nullptr;
}
// Function: steal
template <typename T, unsigned MAX_PRIORITY>
T TaskQueue<T, MAX_PRIORITY>::steal(unsigned p) {
int64_t t = _top[p].data.load(std::memory_order_acquire);
std::atomic_thread_fence(std::memory_order_seq_cst);
int64_t b = _bottom[p].data.load(std::memory_order_acquire);
T item {nullptr};
if(t < b) {
Array* a = _array[p].load(std::memory_order_consume);
item = a->pop(t);
if(!_top[p].data.compare_exchange_strong(t, t+1,
std::memory_order_seq_cst,
std::memory_order_relaxed)) {
return nullptr;
}
}
return item;
}
// Function: capacity
template <typename T, unsigned MAX_PRIORITY>
int64_t TaskQueue<T, MAX_PRIORITY>::capacity() const noexcept {
size_t s;
unroll<0, MAX_PRIORITY, 1>([&](auto i) {
s = i ? capacity(i) + s : capacity(i);
});
return s;
}
// Function: capacity
template <typename T, unsigned MAX_PRIORITY>
int64_t TaskQueue<T, MAX_PRIORITY>::capacity(unsigned p) const noexcept {
return _array[p].load(std::memory_order_relaxed)->capacity();
}
template <typename T, unsigned MAX_PRIORITY>
TF_NO_INLINE typename TaskQueue<T, MAX_PRIORITY>::Array*
TaskQueue<T, MAX_PRIORITY>::resize_array(Array* a, unsigned p, std::int64_t b, std::int64_t t) {
Array* tmp = a->resize(b, t);
_garbage[p].push_back(a);
std::swap(a, tmp);
_array[p].store(a, std::memory_order_release);
// Note: the original paper using relaxed causes t-san to complain
//_array.store(a, std::memory_order_relaxed);
return a;
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/taskflow/core/graph.hpp | #pragma once
#include "../utility/traits.hpp"
#include "../utility/iterator.hpp"
#include "../utility/object_pool.hpp"
#include "../utility/os.hpp"
#include "../utility/math.hpp"
#include "../utility/small_vector.hpp"
#include "../utility/serializer.hpp"
#include "error.hpp"
#include "declarations.hpp"
#include "semaphore.hpp"
#include "environment.hpp"
#include "topology.hpp"
#include "tsq.hpp"
/**
@file graph.hpp
@brief graph include file
*/
namespace tf {
// ----------------------------------------------------------------------------
// Class: CustomGraphBase
// ----------------------------------------------------------------------------
/**
@private
*/
class CustomGraphBase {
public:
virtual void dump(std::ostream&, const void*, const std::string&) const = 0;
virtual ~CustomGraphBase() = default;
};
// ----------------------------------------------------------------------------
// Class: Graph
// ----------------------------------------------------------------------------
/**
@class Graph
@brief class to create a graph object
A graph is the ultimate storage for a task dependency graph and is the main
gateway to interact with an executor.
A graph manages a set of nodes in a global object pool that animates and
recycles node objects efficiently without going through repetitive and
expensive memory allocations and deallocations.
This class is mainly used for creating an opaque graph object in a custom
class to interact with the executor through taskflow composition.
A graph object is move-only.
*/
class Graph {
friend class Node;
friend class FlowBuilder;
friend class Subflow;
friend class Taskflow;
friend class Executor;
public:
/**
@brief constructs a graph object
*/
Graph() = default;
/**
@brief disabled copy constructor
*/
Graph(const Graph&) = delete;
/**
@brief constructs a graph using move semantics
*/
Graph(Graph&&);
/**
@brief destructs the graph object
*/
~Graph();
/**
@brief disabled copy assignment operator
*/
Graph& operator = (const Graph&) = delete;
/**
@brief assigns a graph using move semantics
*/
Graph& operator = (Graph&&);
/**
@brief queries if the graph is empty
*/
bool empty() const;
/**
@brief queries the number of nodes in the graph
*/
size_t size() const;
/**
@brief clears the graph
*/
void clear();
private:
std::vector<Node*> _nodes;
void _clear();
void _clear_detached();
void _merge(Graph&&);
void _erase(Node*);
template <typename ...ArgsT>
Node* _emplace_back(ArgsT&&... args);
Node* _emplace_back();
};
// ----------------------------------------------------------------------------
/**
@class Runtime
@brief class to create a runtime object used by a runtime task
A runtime object is used by a runtime task for users to interact with the
scheduling runtime, such as scheduling an active task and
spawning a subflow.
@code{.cpp}
taskflow.emplace([](tf::Runtime& rt){
rt.run([](tf::Subflow& sf){
tf::Task A = sf.emplace([](){});
tf::Task B = sf.emplace([](){});
A.precede(B);
});
});
@endcode
A runtime task is associated with an executor and a worker that
runs the runtime task.
*/
class Runtime {
friend class Executor;
public:
/**
@brief obtains the running executor
The running executor of a runtime task is the executor that runs
the parent taskflow of that runtime task.
@code{.cpp}
tf::Executor executor;
tf::Taskflow taskflow;
taskflow.emplace([&](tf::Runtime& rt){
assert(&(rt.executor()) == &executor);
});
executor.run(taskflow).wait();
@endcode
*/
Executor& executor();
/**
@brief schedules an active task immediately to the worker's queue
@param task the given active task to schedule immediately
This member function immediately schedules an active task to the
task queue of the associated worker in the runtime task.
An active task is a task in a running taskflow.
The task may or may not be running, and scheduling that task
will immediately put the task into the task queue of the worker
that is running the runtime task.
Consider the following example:
@code{.cpp}
tf::Task A, B, C, D;
std::tie(A, B, C, D) = taskflow.emplace(
[] () { return 0; },
[&C] (tf::Runtime& rt) { // C must be captured by reference
std::cout << "B\n";
rt.schedule(C);
},
[] () { std::cout << "C\n"; },
[] () { std::cout << "D\n"; }
);
A.precede(B, C, D);
executor.run(taskflow).wait();
@endcode
The executor will first run the condition task @c A which returns @c 0
to inform the scheduler to go to the runtime task @c B.
During the execution of @c B, it directly schedules task @c C without
going through the normal taskflow graph scheduling process.
At this moment, task @c C is active because its parent taskflow is running.
When the taskflow finishes, we will see both @c B and @c C in the output.
*/
void schedule(Task task);
/**
@brief runs the given target and waits until it completes
A target can be
(1) a callable to spawn a subflow or
(2) a composable target with `tf::Graph& T::graph()` defined
@code{.cpp}
// complete a subflow synchronously
taskflow.emplace([](tf::Runtime& rt){
rt.run_and_wait([](tf::Subflow& sf){
tf::Task A = sf.emplace([](){});
tf::Task B = sf.emplace([](){});
});
});
// complete a custom graph synchronously
tf::Taskflow taskflow;
taskflow.emplace([](){});
taskflow.emplace([&](tf::Runtime& rt){
rt.run_and_wait(taskflow);
});
@endcode
*/
template <typename T>
void run_and_wait(T&& target);
private:
explicit Runtime(Executor&, Worker&, Node*);
Executor& _executor;
Worker& _worker;
Node* _parent;
};
// constructor
inline Runtime::Runtime(Executor& e, Worker& w, Node* p) :
_executor{e},
_worker {w},
_parent {p}{
}
// Function: executor
inline Executor& Runtime::executor() {
return _executor;
}
// ----------------------------------------------------------------------------
// Node
// ----------------------------------------------------------------------------
/**
@private
*/
class Node {
friend class Graph;
friend class Task;
friend class TaskView;
friend class Taskflow;
friend class Executor;
friend class FlowBuilder;
friend class Subflow;
friend class Runtime;
TF_ENABLE_POOLABLE_ON_THIS;
// state bit flag
constexpr static int CONDITIONED = 1;
constexpr static int DETACHED = 2;
constexpr static int ACQUIRED = 4;
constexpr static int READY = 8;
constexpr static int DEFERRED = 16;
// static work handle
struct Static {
template <typename C>
Static(C&&);
std::function<void()> work;
};
// runtime work handle
struct Runtime {
template <typename C>
Runtime(C&&);
std::function<void(tf::Runtime&)> work;
};
// dynamic work handle
struct Dynamic {
template <typename C>
Dynamic(C&&);
std::function<void(Subflow&)> work;
Graph subgraph;
};
// condition work handle
struct Condition {
template <typename C>
Condition(C&&);
std::function<int()> work;
};
// multi-condition work handle
struct MultiCondition {
template <typename C>
MultiCondition(C&&);
std::function<SmallVector<int>()> work;
};
// module work handle
struct Module {
template <typename T>
Module(T&);
Graph& graph;
};
// Async work
struct Async {
template <typename T>
Async(T&&, std::shared_ptr<AsyncTopology>);
std::function<void(bool)> work;
std::shared_ptr<AsyncTopology> topology;
};
// Silent async work
struct SilentAsync {
template <typename C>
SilentAsync(C&&);
std::function<void()> work;
};
// cudaFlow work handle
struct cudaFlow {
template <typename C, typename G>
cudaFlow(C&& c, G&& g);
std::function<void(Executor&, Node*)> work;
std::unique_ptr<CustomGraphBase> graph;
};
// syclFlow work handle
struct syclFlow {
template <typename C, typename G>
syclFlow(C&& c, G&& g);
std::function<void(Executor&, Node*)> work;
std::unique_ptr<CustomGraphBase> graph;
};
using handle_t = std::variant<
std::monostate, // placeholder
Static, // static tasking
Dynamic, // dynamic tasking
Condition, // conditional tasking
MultiCondition, // multi-conditional tasking
Module, // composable tasking
Async, // async tasking
SilentAsync, // async tasking (no future)
cudaFlow, // cudaFlow
syclFlow, // syclFlow
Runtime // runtime tasking
>;
struct Semaphores {
SmallVector<Semaphore*> to_acquire;
SmallVector<Semaphore*> to_release;
};
public:
// variant index
constexpr static auto PLACEHOLDER = get_index_v<std::monostate, handle_t>;
constexpr static auto STATIC = get_index_v<Static, handle_t>;
constexpr static auto DYNAMIC = get_index_v<Dynamic, handle_t>;
constexpr static auto CONDITION = get_index_v<Condition, handle_t>;
constexpr static auto MULTI_CONDITION = get_index_v<MultiCondition, handle_t>;
constexpr static auto MODULE = get_index_v<Module, handle_t>;
constexpr static auto ASYNC = get_index_v<Async, handle_t>;
constexpr static auto SILENT_ASYNC = get_index_v<SilentAsync, handle_t>;
constexpr static auto CUDAFLOW = get_index_v<cudaFlow, handle_t>;
constexpr static auto SYCLFLOW = get_index_v<syclFlow, handle_t>;
constexpr static auto RUNTIME = get_index_v<Runtime, handle_t>;
template <typename... Args>
Node(Args&&... args);
~Node();
size_t num_successors() const;
size_t num_dependents() const;
size_t num_strong_dependents() const;
size_t num_weak_dependents() const;
const std::string& name() const;
private:
std::string _name;
unsigned _priority {0};
void* _data {nullptr};
handle_t _handle;
SmallVector<Node*> _successors;
SmallVector<Node*> _dependents;
Topology* _topology {nullptr};
Node* _parent {nullptr};
std::atomic<int> _state {0};
std::atomic<size_t> _join_counter {0};
std::unique_ptr<Semaphores> _semaphores;
void _precede(Node*);
void _set_up_join_counter();
bool _is_cancelled() const;
bool _is_conditioner() const;
bool _acquire_all(SmallVector<Node*>&);
SmallVector<Node*> _release_all();
};
// ----------------------------------------------------------------------------
// Node Object Pool
// ----------------------------------------------------------------------------
/**
@private
*/
inline ObjectPool<Node> node_pool;
// ----------------------------------------------------------------------------
// Definition for Node::Static
// ----------------------------------------------------------------------------
// Constructor
template <typename C>
Node::Static::Static(C&& c) : work {std::forward<C>(c)} {
}
// ----------------------------------------------------------------------------
// Definition for Node::Dynamic
// ----------------------------------------------------------------------------
// Constructor
template <typename C>
Node::Dynamic::Dynamic(C&& c) : work {std::forward<C>(c)} {
}
// ----------------------------------------------------------------------------
// Definition for Node::Condition
// ----------------------------------------------------------------------------
// Constructor
template <typename C>
Node::Condition::Condition(C&& c) : work {std::forward<C>(c)} {
}
// ----------------------------------------------------------------------------
// Definition for Node::MultiCondition
// ----------------------------------------------------------------------------
// Constructor
template <typename C>
Node::MultiCondition::MultiCondition(C&& c) : work {std::forward<C>(c)} {
}
// ----------------------------------------------------------------------------
// Definition for Node::cudaFlow
// ----------------------------------------------------------------------------
template <typename C, typename G>
Node::cudaFlow::cudaFlow(C&& c, G&& g) :
work {std::forward<C>(c)},
graph {std::forward<G>(g)} {
}
// ----------------------------------------------------------------------------
// Definition for Node::syclFlow
// ----------------------------------------------------------------------------
template <typename C, typename G>
Node::syclFlow::syclFlow(C&& c, G&& g) :
work {std::forward<C>(c)},
graph {std::forward<G>(g)} {
}
// ----------------------------------------------------------------------------
// Definition for Node::Module
// ----------------------------------------------------------------------------
// Constructor
template <typename T>
inline Node::Module::Module(T& obj) : graph{ obj.graph() } {
}
// ----------------------------------------------------------------------------
// Definition for Node::Async
// ----------------------------------------------------------------------------
// Constructor
template <typename C>
Node::Async::Async(C&& c, std::shared_ptr<AsyncTopology>tpg) :
work {std::forward<C>(c)},
topology {std::move(tpg)} {
}
// ----------------------------------------------------------------------------
// Definition for Node::SilentAsync
// ----------------------------------------------------------------------------
// Constructor
template <typename C>
Node::SilentAsync::SilentAsync(C&& c) :
work {std::forward<C>(c)} {
}
// ----------------------------------------------------------------------------
// Definition for Node::Runtime
// ----------------------------------------------------------------------------
// Constructor
template <typename C>
Node::Runtime::Runtime(C&& c) :
work {std::forward<C>(c)} {
}
// ----------------------------------------------------------------------------
// Definition for Node
// ----------------------------------------------------------------------------
// Constructor
template <typename... Args>
Node::Node(Args&&... args): _handle{std::forward<Args>(args)...} {
}
// Destructor
inline Node::~Node() {
// this is to avoid stack overflow
if(_handle.index() == DYNAMIC) {
// using std::get_if instead of std::get makes this compatible
// with older macOS versions
// the result of std::get_if is guaranteed to be non-null
// due to the index check above
auto& subgraph = std::get_if<Dynamic>(&_handle)->subgraph;
std::vector<Node*> nodes;
nodes.reserve(subgraph.size());
std::move(
subgraph._nodes.begin(), subgraph._nodes.end(), std::back_inserter(nodes)
);
subgraph._nodes.clear();
size_t i = 0;
while(i < nodes.size()) {
if(nodes[i]->_handle.index() == DYNAMIC) {
auto& sbg = std::get_if<Dynamic>(&(nodes[i]->_handle))->subgraph;
std::move(
sbg._nodes.begin(), sbg._nodes.end(), std::back_inserter(nodes)
);
sbg._nodes.clear();
}
++i;
}
//auto& np = Graph::_node_pool();
for(i=0; i<nodes.size(); ++i) {
node_pool.recycle(nodes[i]);
}
}
}
// Procedure: _precede
inline void Node::_precede(Node* v) {
_successors.push_back(v);
v->_dependents.push_back(this);
}
// Function: num_successors
inline size_t Node::num_successors() const {
return _successors.size();
}
// Function: dependents
inline size_t Node::num_dependents() const {
return _dependents.size();
}
// Function: num_weak_dependents
inline size_t Node::num_weak_dependents() const {
size_t n = 0;
for(size_t i=0; i<_dependents.size(); i++) {
//if(_dependents[i]->_handle.index() == Node::CONDITION) {
if(_dependents[i]->_is_conditioner()) {
n++;
}
}
return n;
}
// Function: num_strong_dependents
inline size_t Node::num_strong_dependents() const {
size_t n = 0;
for(size_t i=0; i<_dependents.size(); i++) {
//if(_dependents[i]->_handle.index() != Node::CONDITION) {
if(!_dependents[i]->_is_conditioner()) {
n++;
}
}
return n;
}
// Function: name
inline const std::string& Node::name() const {
return _name;
}
// Function: _is_conditioner
inline bool Node::_is_conditioner() const {
return _handle.index() == Node::CONDITION ||
_handle.index() == Node::MULTI_CONDITION;
}
// Function: _is_cancelled
inline bool Node::_is_cancelled() const {
if(_handle.index() == Node::ASYNC) {
auto h = std::get_if<Node::Async>(&_handle);
if(h->topology && h->topology->_is_cancelled.load(std::memory_order_relaxed)) {
return true;
}
// async tasks spawned from subflow does not have topology
}
return _topology && _topology->_is_cancelled.load(std::memory_order_relaxed);
}
// Procedure: _set_up_join_counter
inline void Node::_set_up_join_counter() {
size_t c = 0;
for(auto p : _dependents) {
//if(p->_handle.index() == Node::CONDITION) {
if(p->_is_conditioner()) {
_state.fetch_or(Node::CONDITIONED, std::memory_order_relaxed);
}
else {
c++;
}
}
_join_counter.store(c, std::memory_order_release);
}
// Function: _acquire_all
inline bool Node::_acquire_all(SmallVector<Node*>& nodes) {
auto& to_acquire = _semaphores->to_acquire;
for(size_t i = 0; i < to_acquire.size(); ++i) {
if(!to_acquire[i]->_try_acquire_or_wait(this)) {
for(size_t j = 1; j <= i; ++j) {
auto r = to_acquire[i-j]->_release();
nodes.insert(std::end(nodes), std::begin(r), std::end(r));
}
return false;
}
}
return true;
}
// Function: _release_all
inline SmallVector<Node*> Node::_release_all() {
auto& to_release = _semaphores->to_release;
SmallVector<Node*> nodes;
for(const auto& sem : to_release) {
auto r = sem->_release();
nodes.insert(std::end(nodes), std::begin(r), std::end(r));
}
return nodes;
}
// ----------------------------------------------------------------------------
// Graph definition
// ----------------------------------------------------------------------------
// Destructor
inline Graph::~Graph() {
_clear();
}
// Move constructor
inline Graph::Graph(Graph&& other) :
_nodes {std::move(other._nodes)} {
}
// Move assignment
inline Graph& Graph::operator = (Graph&& other) {
_clear();
_nodes = std::move(other._nodes);
return *this;
}
// Procedure: clear
inline void Graph::clear() {
_clear();
}
// Procedure: clear
inline void Graph::_clear() {
for(auto node : _nodes) {
node_pool.recycle(node);
}
_nodes.clear();
}
// Procedure: clear_detached
inline void Graph::_clear_detached() {
auto mid = std::partition(_nodes.begin(), _nodes.end(), [] (Node* node) {
return !(node->_state.load(std::memory_order_relaxed) & Node::DETACHED);
});
for(auto itr = mid; itr != _nodes.end(); ++itr) {
node_pool.recycle(*itr);
}
_nodes.resize(std::distance(_nodes.begin(), mid));
}
// Procedure: merge
inline void Graph::_merge(Graph&& g) {
for(auto n : g._nodes) {
_nodes.push_back(n);
}
g._nodes.clear();
}
// Function: erase
inline void Graph::_erase(Node* node) {
if(auto I = std::find(_nodes.begin(), _nodes.end(), node); I != _nodes.end()) {
_nodes.erase(I);
node_pool.recycle(node);
}
}
// Function: size
inline size_t Graph::size() const {
return _nodes.size();
}
// Function: empty
inline bool Graph::empty() const {
return _nodes.empty();
}
// Function: emplace_back
template <typename ...ArgsT>
Node* Graph::_emplace_back(ArgsT&&... args) {
_nodes.push_back(node_pool.animate(std::forward<ArgsT>(args)...));
return _nodes.back();
}
// Function: emplace_back
inline Node* Graph::_emplace_back() {
_nodes.push_back(node_pool.animate());
return _nodes.back();
}
} // end of namespace tf. ---------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/taskflow/core/executor.hpp | #pragma once
#include "observer.hpp"
#include "taskflow.hpp"
/**
@file executor.hpp
@brief executor include file
*/
namespace tf {
// ----------------------------------------------------------------------------
// Executor Definition
// ----------------------------------------------------------------------------
/** @class Executor
@brief class to create an executor for running a taskflow graph
An executor manages a set of worker threads to run one or multiple taskflows
using an efficient work-stealing scheduling algorithm.
@code{.cpp}
// Declare an executor and a taskflow
tf::Executor executor;
tf::Taskflow taskflow;
// Add three tasks into the taskflow
tf::Task A = taskflow.emplace([] () { std::cout << "This is TaskA\n"; });
tf::Task B = taskflow.emplace([] () { std::cout << "This is TaskB\n"; });
tf::Task C = taskflow.emplace([] () { std::cout << "This is TaskC\n"; });
// Build precedence between tasks
A.precede(B, C);
tf::Future<void> fu = executor.run(taskflow);
fu.wait(); // block until the execution completes
executor.run(taskflow, [](){ std::cout << "end of 1 run"; }).wait();
executor.run_n(taskflow, 4);
executor.wait_for_all(); // block until all associated executions finish
executor.run_n(taskflow, 4, [](){ std::cout << "end of 4 runs"; }).wait();
executor.run_until(taskflow, [cnt=0] () mutable { return ++cnt == 10; });
@endcode
All the @c run methods are @em thread-safe. You can submit multiple
taskflows at the same time to an executor from different threads.
*/
class Executor {
friend class FlowBuilder;
friend class Subflow;
friend class Runtime;
public:
/**
@brief constructs the executor with @c N worker threads
@param N number of workers (default std::thread::hardware_concurrency)
@param wix worker interface class to alter worker (thread) behaviors
The constructor spawns @c N worker threads to run tasks in a
work-stealing loop. The number of workers must be greater than zero
or an exception will be thrown.
By default, the number of worker threads is equal to the maximum
hardware concurrency returned by std::thread::hardware_concurrency.
Users can alter the worker behavior, such as changing thread affinity,
via deriving an instance from tf::WorkerInterface.
*/
explicit Executor(
size_t N = std::thread::hardware_concurrency(),
std::shared_ptr<WorkerInterface> wix = nullptr
);
/**
@brief destructs the executor
The destructor calls Executor::wait_for_all to wait for all submitted
taskflows to complete and then notifies all worker threads to stop
and join these threads.
*/
~Executor();
/**
@brief runs a taskflow once
@param taskflow a tf::Taskflow object
@return a tf::Future that holds the result of the execution
This member function executes the given taskflow once and returns a tf::Future
object that eventually holds the result of the execution.
@code{.cpp}
tf::Future<void> future = executor.run(taskflow);
// do something else
future.wait();
@endcode
This member function is thread-safe.
@attention
The executor does not own the given taskflow. It is your responsibility to
ensure the taskflow remains alive during its execution.
*/
tf::Future<void> run(Taskflow& taskflow);
/**
@brief runs a moved taskflow once
@param taskflow a moved tf::Taskflow object
@return a tf::Future that holds the result of the execution
This member function executes a moved taskflow once and returns a tf::Future
object that eventually holds the result of the execution.
The executor will take care of the lifetime of the moved taskflow.
@code{.cpp}
tf::Future<void> future = executor.run(std::move(taskflow));
// do something else
future.wait();
@endcode
This member function is thread-safe.
*/
tf::Future<void> run(Taskflow&& taskflow);
/**
@brief runs a taskflow once and invoke a callback upon completion
@param taskflow a tf::Taskflow object
@param callable a callable object to be invoked after this run
@return a tf::Future that holds the result of the execution
This member function executes the given taskflow once and invokes the given
callable when the execution completes.
This member function returns a tf::Future object that
eventually holds the result of the execution.
@code{.cpp}
tf::Future<void> future = executor.run(taskflow, [](){ std::cout << "done"; });
// do something else
future.wait();
@endcode
This member function is thread-safe.
@attention
The executor does not own the given taskflow. It is your responsibility to
ensure the taskflow remains alive during its execution.
*/
template<typename C>
tf::Future<void> run(Taskflow& taskflow, C&& callable);
/**
@brief runs a moved taskflow once and invoke a callback upon completion
@param taskflow a moved tf::Taskflow object
@param callable a callable object to be invoked after this run
@return a tf::Future that holds the result of the execution
This member function executes a moved taskflow once and invokes the given
callable when the execution completes.
This member function returns a tf::Future object that
eventually holds the result of the execution.
The executor will take care of the lifetime of the moved taskflow.
@code{.cpp}
tf::Future<void> future = executor.run(
std::move(taskflow), [](){ std::cout << "done"; }
);
// do something else
future.wait();
@endcode
This member function is thread-safe.
*/
template<typename C>
tf::Future<void> run(Taskflow&& taskflow, C&& callable);
/**
@brief runs a taskflow for @c N times
@param taskflow a tf::Taskflow object
@param N number of runs
@return a tf::Future that holds the result of the execution
This member function executes the given taskflow @c N times and returns a tf::Future
object that eventually holds the result of the execution.
@code{.cpp}
tf::Future<void> future = executor.run_n(taskflow, 2); // run taskflow 2 times
// do something else
future.wait();
@endcode
This member function is thread-safe.
@attention
The executor does not own the given taskflow. It is your responsibility to
ensure the taskflow remains alive during its execution.
*/
tf::Future<void> run_n(Taskflow& taskflow, size_t N);
/**
@brief runs a moved taskflow for @c N times
@param taskflow a moved tf::Taskflow object
@param N number of runs
@return a tf::Future that holds the result of the execution
This member function executes a moved taskflow @c N times and returns a tf::Future
object that eventually holds the result of the execution.
The executor will take care of the lifetime of the moved taskflow.
@code{.cpp}
tf::Future<void> future = executor.run_n(
std::move(taskflow), 2 // run the moved taskflow 2 times
);
// do something else
future.wait();
@endcode
This member function is thread-safe.
*/
tf::Future<void> run_n(Taskflow&& taskflow, size_t N);
/**
@brief runs a taskflow for @c N times and then invokes a callback
@param taskflow a tf::Taskflow
@param N number of runs
@param callable a callable object to be invoked after this run
@return a tf::Future that holds the result of the execution
This member function executes the given taskflow @c N times and invokes the given
callable when the execution completes.
This member function returns a tf::Future object that
eventually holds the result of the execution.
@code{.cpp}
tf::Future<void> future = executor.run(
taskflow, 2, [](){ std::cout << "done"; } // runs taskflow 2 times and invoke
// the lambda to print "done"
);
// do something else
future.wait();
@endcode
This member function is thread-safe.
@attention
The executor does not own the given taskflow. It is your responsibility to
ensure the taskflow remains alive during its execution.
*/
template<typename C>
tf::Future<void> run_n(Taskflow& taskflow, size_t N, C&& callable);
/**
@brief runs a moved taskflow for @c N times and then invokes a callback
@param taskflow a moved tf::Taskflow
@param N number of runs
@param callable a callable object to be invoked after this run
@return a tf::Future that holds the result of the execution
This member function executes a moved taskflow @c N times and invokes the given
callable when the execution completes.
This member function returns a tf::Future object that
eventually holds the result of the execution.
@code{.cpp}
tf::Future<void> future = executor.run_n(
// run the moved taskflow 2 times and invoke the lambda to print "done"
std::move(taskflow), 2, [](){ std::cout << "done"; }
);
// do something else
future.wait();
@endcode
This member function is thread-safe.
*/
template<typename C>
tf::Future<void> run_n(Taskflow&& taskflow, size_t N, C&& callable);
/**
@brief runs a taskflow multiple times until the predicate becomes true
@param taskflow a tf::Taskflow
@param pred a boolean predicate to return @c true for stop
@return a tf::Future that holds the result of the execution
This member function executes the given taskflow multiple times until
the predicate returns @c true.
This member function returns a tf::Future object that
eventually holds the result of the execution.
@code{.cpp}
tf::Future<void> future = executor.run_until(
taskflow, [](){ return rand()%10 == 0 }
);
// do something else
future.wait();
@endcode
This member function is thread-safe.
@attention
The executor does not own the given taskflow. It is your responsibility to
ensure the taskflow remains alive during its execution.
*/
template<typename P>
tf::Future<void> run_until(Taskflow& taskflow, P&& pred);
/**
@brief runs a moved taskflow and keeps running it
until the predicate becomes true
@param taskflow a moved tf::Taskflow object
@param pred a boolean predicate to return @c true for stop
@return a tf::Future that holds the result of the execution
This member function executes a moved taskflow multiple times until
the predicate returns @c true.
This member function returns a tf::Future object that
eventually holds the result of the execution.
The executor will take care of the lifetime of the moved taskflow.
@code{.cpp}
tf::Future<void> future = executor.run_until(
std::move(taskflow), [](){ return rand()%10 == 0 }
);
// do something else
future.wait();
@endcode
This member function is thread-safe.
*/
template<typename P>
tf::Future<void> run_until(Taskflow&& taskflow, P&& pred);
/**
@brief runs a taskflow multiple times until the predicate becomes true and
then invokes the callback
@param taskflow a tf::Taskflow
@param pred a boolean predicate to return @c true for stop
@param callable a callable object to be invoked after this run completes
@return a tf::Future that holds the result of the execution
This member function executes the given taskflow multiple times until
the predicate returns @c true and then invokes the given callable when
the execution completes.
This member function returns a tf::Future object that
eventually holds the result of the execution.
@code{.cpp}
tf::Future<void> future = executor.run_until(
taskflow, [](){ return rand()%10 == 0 }, [](){ std::cout << "done"; }
);
// do something else
future.wait();
@endcode
This member function is thread-safe.
@attention
The executor does not own the given taskflow. It is your responsibility to
ensure the taskflow remains alive during its execution.
*/
template<typename P, typename C>
tf::Future<void> run_until(Taskflow& taskflow, P&& pred, C&& callable);
/**
@brief runs a moved taskflow and keeps running
it until the predicate becomes true and then invokes the callback
@param taskflow a moved tf::Taskflow
@param pred a boolean predicate to return @c true for stop
@param callable a callable object to be invoked after this run completes
@return a tf::Future that holds the result of the execution
This member function executes a moved taskflow multiple times until
the predicate returns @c true and then invokes the given callable when
the execution completes.
This member function returns a tf::Future object that
eventually holds the result of the execution.
The executor will take care of the lifetime of the moved taskflow.
@code{.cpp}
tf::Future<void> future = executor.run_until(
std::move(taskflow),
[](){ return rand()%10 == 0 }, [](){ std::cout << "done"; }
);
// do something else
future.wait();
@endcode
This member function is thread-safe.
*/
template<typename P, typename C>
tf::Future<void> run_until(Taskflow&& taskflow, P&& pred, C&& callable);
/**
@brief runs a target graph and waits until it completes using
an internal worker of this executor
@tparam T target type which has `tf::Graph& T::graph()` defined
@param target the target task graph object
The method runs a target graph which has `tf::Graph& T::graph()` defined
and waits until the execution completes.
Unlike the typical flow of calling `tf::Executor::run` series
plus waiting on the result, this method must be called by an internal
worker of this executor. The caller worker will participate in
the work-stealing loop of the scheduler, therby avoiding potential
deadlock caused by blocked waiting.
@code{.cpp}
tf::Executor executor(2);
tf::Taskflow taskflow;
std::array<tf::Taskflow, 1000> others;
std::atomic<size_t> counter{0};
for(size_t n=0; n<1000; n++) {
for(size_t i=0; i<1000; i++) {
others[n].emplace([&](){ counter++; });
}
taskflow.emplace([&executor, &tf=others[n]](){
executor.run_and_wait(tf);
//executor.run(tf).wait(); <- blocking the worker without doing anything
// will introduce deadlock
});
}
executor.run(taskflow).wait();
@endcode
The method is thread-safe as long as the target is not concurrently
ran by two or more threads.
@attention
You must call tf::Executor::run_and_wait from a worker of the calling executor
or an exception will be thrown.
*/
template <typename T>
void run_and_wait(T& target);
/**
@brief keeps running the work-stealing loop until the predicate becomes true
@tparam P predicate type
@param predicate a boolean predicate to indicate when to stop the loop
The method keeps the caller worker in the work-stealing loop such that it
does not block (e.g., causing deadlock with other blocking workers)
until the stop predicate becomes true.
@code{.cpp}
taskflow.emplace([&](){
std::future<void> fu = std::async([](){ std::sleep(100s); });
executor.loop_until([](){
return fu.wait_for(std::chrono::seconds(0)) == future_status::ready;
});
});
@endcode
@attention
You must call tf::Executor::loop_until from a worker of the calling executor
or an exception will be thrown.
*/
template <typename P>
void loop_until(P&& predicate);
/**
@brief waits for all tasks to complete
This member function waits until all submitted tasks
(e.g., taskflows, asynchronous tasks) to finish.
@code{.cpp}
executor.run(taskflow1);
executor.run_n(taskflow2, 10);
executor.run_n(taskflow3, 100);
executor.wait_for_all(); // wait until the above submitted taskflows finish
@endcode
*/
void wait_for_all();
/**
@brief queries the number of worker threads
Each worker represents one unique thread spawned by an executor
upon its construction time.
@code{.cpp}
tf::Executor executor(4);
std::cout << executor.num_workers(); // 4
@endcode
*/
size_t num_workers() const noexcept;
/**
@brief queries the number of running topologies at the time of this call
When a taskflow is submitted to an executor, a topology is created to store
runtime metadata of the running taskflow.
When the execution of the submitted taskflow finishes,
its corresponding topology will be removed from the executor.
@code{.cpp}
executor.run(taskflow);
std::cout << executor.num_topologies(); // 0 or 1 (taskflow still running)
@endcode
*/
size_t num_topologies() const;
/**
@brief queries the number of running taskflows with moved ownership
@code{.cpp}
executor.run(std::move(taskflow));
std::cout << executor.num_taskflows(); // 0 or 1 (taskflow still running)
@endcode
*/
size_t num_taskflows() const;
/**
@brief queries the id of the caller thread in this executor
Each worker has an unique id in the range of @c 0 to @c N-1 associated with
its parent executor.
If the caller thread does not belong to the executor, @c -1 is returned.
@code{.cpp}
tf::Executor executor(4); // 4 workers in the executor
executor.this_worker_id(); // -1 (main thread is not a worker)
taskflow.emplace([&](){
std::cout << executor.this_worker_id(); // 0, 1, 2, or 3
});
executor.run(taskflow);
@endcode
*/
int this_worker_id() const;
/**
@brief runs a given function asynchronously
@tparam F callable type
@tparam ArgsT parameter types
@param f callable object to call
@param args parameters to pass to the callable
@return a tf::Future that will holds the result of the execution
The method creates an asynchronous task to launch the given
function on the given arguments.
Unlike std::async, the return here is a @em tf::Future that holds
an optional object to the result.
If the asynchronous task is cancelled before it runs, the return is
a @c std::nullopt, or the value returned by the callable.
@code{.cpp}
tf::Future<std::optional<int>> future = executor.async([](){
std::cout << "create an asynchronous task and returns 1\n";
return 1;
});
@endcode
This member function is thread-safe.
*/
template <typename F, typename... ArgsT>
auto async(F&& f, ArgsT&&... args);
/**
@brief runs a given function asynchronously and gives a name to this task
@tparam F callable type
@tparam ArgsT parameter types
@param name name of the asynchronous task
@param f callable object to call
@param args parameters to pass to the callable
@return a tf::Future that will holds the result of the execution
The method creates a named asynchronous task to launch the given
function on the given arguments.
Naming an asynchronous task is primarily used for profiling and visualizing
the task execution timeline.
Unlike std::async, the return here is a tf::Future that holds
an optional object to the result.
If the asynchronous task is cancelled before it runs, the return is
a @c std::nullopt, or the value returned by the callable.
@code{.cpp}
tf::Future<std::optional<int>> future = executor.named_async("name", [](){
std::cout << "create an asynchronous task with a name and returns 1\n";
return 1;
});
@endcode
This member function is thread-safe.
*/
template <typename F, typename... ArgsT>
auto named_async(const std::string& name, F&& f, ArgsT&&... args);
/**
@brief similar to tf::Executor::async but does not return a future object
This member function is more efficient than tf::Executor::async
and is encouraged to use when there is no data returned.
@code{.cpp}
executor.silent_async([](){
std::cout << "create an asynchronous task with no return\n";
});
@endcode
This member function is thread-safe.
*/
template <typename F, typename... ArgsT>
void silent_async(F&& f, ArgsT&&... args);
/**
@brief similar to tf::Executor::named_async but does not return a future object
This member function is more efficient than tf::Executor::named_async
and is encouraged to use when there is no data returned.
@code{.cpp}
executor.named_silent_async("name", [](){
std::cout << "create an asynchronous task with a name and no return\n";
});
@endcode
This member function is thread-safe.
*/
template <typename F, typename... ArgsT>
void named_silent_async(const std::string& name, F&& f, ArgsT&&... args);
/**
@brief constructs an observer to inspect the activities of worker threads
@tparam Observer observer type derived from tf::ObserverInterface
@tparam ArgsT argument parameter pack
@param args arguments to forward to the constructor of the observer
@return a shared pointer to the created observer
Each executor manages a list of observers with shared ownership with callers.
For each of these observers, the two member functions,
tf::ObserverInterface::on_entry and tf::ObserverInterface::on_exit
will be called before and after the execution of a task.
This member function is not thread-safe.
*/
template <typename Observer, typename... ArgsT>
std::shared_ptr<Observer> make_observer(ArgsT&&... args);
/**
@brief removes an observer from the executor
This member function is not thread-safe.
*/
template <typename Observer>
void remove_observer(std::shared_ptr<Observer> observer);
/**
@brief queries the number of observers
*/
size_t num_observers() const noexcept;
private:
const size_t _MAX_STEALS;
std::condition_variable _topology_cv;
std::mutex _taskflow_mutex;
std::mutex _topology_mutex;
std::mutex _wsq_mutex;
size_t _num_topologies {0};
std::unordered_map<std::thread::id, size_t> _wids;
std::vector<std::thread> _threads;
std::vector<Worker> _workers;
std::list<Taskflow> _taskflows;
Notifier _notifier;
TaskQueue<Node*> _wsq;
std::atomic<bool> _done {0};
std::shared_ptr<WorkerInterface> _worker_interface;
std::unordered_set<std::shared_ptr<ObserverInterface>> _observers;
Worker* _this_worker();
bool _wait_for_task(Worker&, Node*&);
void _observer_prologue(Worker&, Node*);
void _observer_epilogue(Worker&, Node*);
void _spawn(size_t);
void _exploit_task(Worker&, Node*&);
void _explore_task(Worker&, Node*&);
void _schedule(Worker&, Node*);
void _schedule(Node*);
void _schedule(Worker&, const SmallVector<Node*>&);
void _schedule(const SmallVector<Node*>&);
void _set_up_topology(Worker*, Topology*);
void _tear_down_topology(Worker&, Topology*);
void _tear_down_async(Node*);
void _tear_down_invoke(Worker&, Node*);
void _cancel_invoke(Worker&, Node*);
void _increment_topology();
void _decrement_topology();
void _decrement_topology_and_notify();
void _invoke(Worker&, Node*);
void _invoke_static_task(Worker&, Node*);
void _invoke_dynamic_task(Worker&, Node*);
void _consume_graph(Worker&, Node*, Graph&);
void _detach_dynamic_task(Worker&, Node*, Graph&);
void _invoke_condition_task(Worker&, Node*, SmallVector<int>&);
void _invoke_multi_condition_task(Worker&, Node*, SmallVector<int>&);
void _invoke_module_task(Worker&, Node*);
void _invoke_async_task(Worker&, Node*);
void _invoke_silent_async_task(Worker&, Node*);
void _invoke_cudaflow_task(Worker&, Node*);
void _invoke_syclflow_task(Worker&, Node*);
void _invoke_runtime_task(Worker&, Node*);
template <typename P>
void _loop_until(Worker&, P&&);
template <typename C, std::enable_if_t<is_cudaflow_task_v<C>, void>* = nullptr>
void _invoke_cudaflow_task_entry(Node*, C&&);
template <typename C, typename Q,
std::enable_if_t<is_syclflow_task_v<C>, void>* = nullptr
>
void _invoke_syclflow_task_entry(Node*, C&&, Q&);
};
// Constructor
inline Executor::Executor(size_t N, std::shared_ptr<WorkerInterface> wix) :
_MAX_STEALS {((N+1) << 1)},
_threads {N},
_workers {N},
_notifier {N},
_worker_interface {std::move(wix)} {
if(N == 0) {
TF_THROW("no cpu workers to execute taskflows");
}
_spawn(N);
// instantite the default observer if requested
if(has_env(TF_ENABLE_PROFILER)) {
TFProfManager::get()._manage(make_observer<TFProfObserver>());
}
}
// Destructor
inline Executor::~Executor() {
// wait for all topologies to complete
wait_for_all();
// shut down the scheduler
_done = true;
_notifier.notify(true);
for(auto& t : _threads){
t.join();
}
}
// Function: num_workers
inline size_t Executor::num_workers() const noexcept {
return _workers.size();
}
// Function: num_topologies
inline size_t Executor::num_topologies() const {
return _num_topologies;
}
// Function: num_taskflows
inline size_t Executor::num_taskflows() const {
return _taskflows.size();
}
// Function: _this_worker
inline Worker* Executor::_this_worker() {
auto itr = _wids.find(std::this_thread::get_id());
return itr == _wids.end() ? nullptr : &_workers[itr->second];
}
// Function: named_async
template <typename F, typename... ArgsT>
auto Executor::named_async(const std::string& name, F&& f, ArgsT&&... args) {
_increment_topology();
using T = std::invoke_result_t<F, ArgsT...>;
using R = std::conditional_t<std::is_same_v<T, void>, void, std::optional<T>>;
std::promise<R> p;
auto tpg = std::make_shared<AsyncTopology>();
Future<R> fu(p.get_future(), tpg);
auto node = node_pool.animate(
std::in_place_type_t<Node::Async>{},
[p=make_moc(std::move(p)), f=std::forward<F>(f), args...]
(bool cancel) mutable {
if constexpr(std::is_same_v<R, void>) {
if(!cancel) {
f(args...);
}
p.object.set_value();
}
else {
p.object.set_value(cancel ? std::nullopt : std::make_optional(f(args...)));
}
},
std::move(tpg)
);
node->_name = name;
if(auto w = _this_worker(); w) {
_schedule(*w, node);
}
else{
_schedule(node);
}
return fu;
}
// Function: async
template <typename F, typename... ArgsT>
auto Executor::async(F&& f, ArgsT&&... args) {
return named_async("", std::forward<F>(f), std::forward<ArgsT>(args)...);
}
// Function: named_silent_async
template <typename F, typename... ArgsT>
void Executor::named_silent_async(
const std::string& name, F&& f, ArgsT&&... args
) {
_increment_topology();
Node* node = node_pool.animate(
std::in_place_type_t<Node::SilentAsync>{},
[f=std::forward<F>(f), args...] () mutable {
f(args...);
}
);
node->_name = name;
if(auto w = _this_worker(); w) {
_schedule(*w, node);
}
else {
_schedule(node);
}
}
// Function: silent_async
template <typename F, typename... ArgsT>
void Executor::silent_async(F&& f, ArgsT&&... args) {
named_silent_async("", std::forward<F>(f), std::forward<ArgsT>(args)...);
}
// Function: this_worker_id
inline int Executor::this_worker_id() const {
auto i = _wids.find(std::this_thread::get_id());
return i == _wids.end() ? -1 : static_cast<int>(_workers[i->second]._id);
}
// Procedure: _spawn
inline void Executor::_spawn(size_t N) {
std::mutex mutex;
std::condition_variable cond;
size_t n=0;
for(size_t id=0; id<N; ++id) {
_workers[id]._id = id;
_workers[id]._vtm = id;
_workers[id]._executor = this;
_workers[id]._waiter = &_notifier._waiters[id];
_threads[id] = std::thread([this] (
Worker& w, std::mutex& mutex, std::condition_variable& cond, size_t& n
) -> void {
// assign the thread
w._thread = &_threads[w._id];
// enables the mapping
{
std::scoped_lock lock(mutex);
_wids[std::this_thread::get_id()] = w._id;
if(n++; n == num_workers()) {
cond.notify_one();
}
}
Node* t = nullptr;
// before entering the scheduler (work-stealing loop),
// call the user-specified prologue function
if(_worker_interface) {
_worker_interface->scheduler_prologue(w);
}
// must use 1 as condition instead of !done because
// the previous worker may stop while the following workers
// are still preparing for entering the scheduling loop
std::exception_ptr ptr{nullptr};
try {
while(1) {
// execute the tasks.
_exploit_task(w, t);
// wait for tasks
if(_wait_for_task(w, t) == false) {
break;
}
}
}
catch(...) {
ptr = std::current_exception();
}
// call the user-specified epilogue function
if(_worker_interface) {
_worker_interface->scheduler_epilogue(w, ptr);
}
}, std::ref(_workers[id]), std::ref(mutex), std::ref(cond), std::ref(n));
// POSIX-like system can use the following to affine threads to cores
//cpu_set_t cpuset;
//CPU_ZERO(&cpuset);
//CPU_SET(id, &cpuset);
//pthread_setaffinity_np(
// _threads[id].native_handle(), sizeof(cpu_set_t), &cpuset
//);
}
std::unique_lock<std::mutex> lock(mutex);
cond.wait(lock, [&](){ return n==N; });
}
// Function: _loop_until
template <typename P>
inline void Executor::_loop_until(Worker& w, P&& stop_predicate) {
std::uniform_int_distribution<size_t> rdvtm(0, _workers.size()-1);
exploit:
while(!stop_predicate()) {
//exploit:
if(auto t = w._wsq.pop(); t) {
_invoke(w, t);
}
else {
size_t num_steals = 0;
explore:
t = (w._id == w._vtm) ? _wsq.steal() : _workers[w._vtm]._wsq.steal();
if(t) {
_invoke(w, t);
goto exploit;
}
else if(!stop_predicate()) {
if(num_steals++ > _MAX_STEALS) {
std::this_thread::yield();
}
w._vtm = rdvtm(w._rdgen);
goto explore;
}
else {
break;
}
}
}
}
// Function: _explore_task
inline void Executor::_explore_task(Worker& w, Node*& t) {
//assert(_workers[w].wsq.empty());
//assert(!t);
size_t num_steals = 0;
size_t num_yields = 0;
std::uniform_int_distribution<size_t> rdvtm(0, _workers.size()-1);
// Here, we write do-while to make the worker steal at once
// from the assigned victim.
do {
t = (w._id == w._vtm) ? _wsq.steal() : _workers[w._vtm]._wsq.steal();
if(t) {
break;
}
if(num_steals++ > _MAX_STEALS) {
std::this_thread::yield();
if(num_yields++ > 100) {
break;
}
}
w._vtm = rdvtm(w._rdgen);
} while(!_done);
}
// Procedure: _exploit_task
inline void Executor::_exploit_task(Worker& w, Node*& t) {
while(t) {
_invoke(w, t);
t = w._wsq.pop();
}
}
// Function: _wait_for_task
inline bool Executor::_wait_for_task(Worker& worker, Node*& t) {
explore_task:
_explore_task(worker, t);
// The last thief who successfully stole a task will wake up
// another thief worker to avoid starvation.
if(t) {
_notifier.notify(false);
return true;
}
// ---- 2PC guard ----
_notifier.prepare_wait(worker._waiter);
if(!_wsq.empty()) {
_notifier.cancel_wait(worker._waiter);
worker._vtm = worker._id;
goto explore_task;
}
if(_done) {
_notifier.cancel_wait(worker._waiter);
_notifier.notify(true);
return false;
}
// We need to use index-based scanning to avoid data race
// with _spawn which may initialize a worker at the same time.
for(size_t vtm=0; vtm<_workers.size(); vtm++) {
if(!_workers[vtm]._wsq.empty()) {
_notifier.cancel_wait(worker._waiter);
worker._vtm = vtm;
goto explore_task;
}
}
//--_num_thieves;
//_num_thieves.fetch_sub(1, std::memory_order_release);
/*//if(auto vtm = _find_vtm(me); vtm != _workers.size()) {
if(!_wsq.empty()) {
_notifier.cancel_wait(worker._waiter);
//t = (vtm == me) ? _wsq.steal() : _workers[vtm].wsq.steal();
t = _wsq.steal(); // must steal here
if(t) {
if(_num_thieves.fetch_sub(1) == 1) {
_notifier.notify(false);
}
return true;
}
else {
worker._vtm = worker._id;
goto explore_task;
}
}
if(_done) {
_notifier.cancel_wait(worker._waiter);
_notifier.notify(true);
--_num_thieves;
return false;
}
if(_num_thieves.fetch_sub(1) == 1) {
if(_num_actives) {
_notifier.cancel_wait(worker._waiter);
goto wait_for_task;
}
// check all queues again
for(auto& w : _workers) {
if(!w._wsq.empty()) {
worker._vtm = w._id;
_notifier.cancel_wait(worker._waiter);
goto wait_for_task;
}
}
}*/
// Now I really need to relinguish my self to others
_notifier.commit_wait(worker._waiter);
goto explore_task;
}
// Function: make_observer
template<typename Observer, typename... ArgsT>
std::shared_ptr<Observer> Executor::make_observer(ArgsT&&... args) {
static_assert(
std::is_base_of_v<ObserverInterface, Observer>,
"Observer must be derived from ObserverInterface"
);
// use a local variable to mimic the constructor
auto ptr = std::make_shared<Observer>(std::forward<ArgsT>(args)...);
ptr->set_up(_workers.size());
_observers.emplace(std::static_pointer_cast<ObserverInterface>(ptr));
return ptr;
}
// Procedure: remove_observer
template <typename Observer>
void Executor::remove_observer(std::shared_ptr<Observer> ptr) {
static_assert(
std::is_base_of_v<ObserverInterface, Observer>,
"Observer must be derived from ObserverInterface"
);
_observers.erase(std::static_pointer_cast<ObserverInterface>(ptr));
}
// Function: num_observers
inline size_t Executor::num_observers() const noexcept {
return _observers.size();
}
// Procedure: _schedule
inline void Executor::_schedule(Worker& worker, Node* node) {
// We need to fetch p before the release such that the read
// operation is synchronized properly with other thread to
// void data race.
auto p = node->_priority;
node->_state.fetch_or(Node::READY, std::memory_order_release);
// caller is a worker to this pool - starting at v3.5 we do not use
// any complicated notification mechanism as the experimental result
// has shown no significant advantage.
if(worker._executor == this) {
worker._wsq.push(node, p);
_notifier.notify(false);
return;
}
{
std::lock_guard<std::mutex> lock(_wsq_mutex);
_wsq.push(node, p);
}
_notifier.notify(false);
}
// Procedure: _schedule
inline void Executor::_schedule(Node* node) {
// We need to fetch p before the release such that the read
// operation is synchronized properly with other thread to
// void data race.
auto p = node->_priority;
node->_state.fetch_or(Node::READY, std::memory_order_release);
{
std::lock_guard<std::mutex> lock(_wsq_mutex);
_wsq.push(node, p);
}
_notifier.notify(false);
}
// Procedure: _schedule
inline void Executor::_schedule(Worker& worker, const SmallVector<Node*>& nodes) {
// We need to cacth the node count to avoid accessing the nodes
// vector while the parent topology is removed!
const auto num_nodes = nodes.size();
if(num_nodes == 0) {
return;
}
// caller is a worker to this pool - starting at v3.5 we do not use
// any complicated notification mechanism as the experimental result
// has shown no significant advantage.
if(worker._executor == this) {
for(size_t i=0; i<num_nodes; ++i) {
// We need to fetch p before the release such that the read
// operation is synchronized properly with other thread to
// void data race.
auto p = nodes[i]->_priority;
nodes[i]->_state.fetch_or(Node::READY, std::memory_order_release);
worker._wsq.push(nodes[i], p);
_notifier.notify(false);
}
return;
}
{
std::lock_guard<std::mutex> lock(_wsq_mutex);
for(size_t k=0; k<num_nodes; ++k) {
auto p = nodes[k]->_priority;
nodes[k]->_state.fetch_or(Node::READY, std::memory_order_release);
_wsq.push(nodes[k], p);
}
}
_notifier.notify_n(num_nodes);
}
// Procedure: _schedule
inline void Executor::_schedule(const SmallVector<Node*>& nodes) {
// parent topology may be removed!
const auto num_nodes = nodes.size();
if(num_nodes == 0) {
return;
}
// We need to fetch p before the release such that the read
// operation is synchronized properly with other thread to
// void data race.
{
std::lock_guard<std::mutex> lock(_wsq_mutex);
for(size_t k=0; k<num_nodes; ++k) {
auto p = nodes[k]->_priority;
nodes[k]->_state.fetch_or(Node::READY, std::memory_order_release);
_wsq.push(nodes[k], p);
}
}
_notifier.notify_n(num_nodes);
}
// Procedure: _invoke
inline void Executor::_invoke(Worker& worker, Node* node) {
// synchronize all outstanding memory operations caused by reordering
while(!(node->_state.load(std::memory_order_acquire) & Node::READY));
begin_invoke:
// no need to do other things if the topology is cancelled
if(node->_is_cancelled()) {
_cancel_invoke(worker, node);
return;
}
// if acquiring semaphore(s) exists, acquire them first
if(node->_semaphores && !node->_semaphores->to_acquire.empty()) {
SmallVector<Node*> nodes;
if(!node->_acquire_all(nodes)) {
_schedule(worker, nodes);
return;
}
node->_state.fetch_or(Node::ACQUIRED, std::memory_order_release);
}
// condition task
//int cond = -1;
SmallVector<int> conds;
// switch is faster than nested if-else due to jump table
switch(node->_handle.index()) {
// static task
case Node::STATIC:{
_invoke_static_task(worker, node);
}
break;
// dynamic task
case Node::DYNAMIC: {
_invoke_dynamic_task(worker, node);
}
break;
// condition task
case Node::CONDITION: {
_invoke_condition_task(worker, node, conds);
}
break;
// multi-condition task
case Node::MULTI_CONDITION: {
_invoke_multi_condition_task(worker, node, conds);
}
break;
// module task
case Node::MODULE: {
_invoke_module_task(worker, node);
}
break;
// async task
case Node::ASYNC: {
_invoke_async_task(worker, node);
_tear_down_async(node);
return ;
}
break;
// silent async task
case Node::SILENT_ASYNC: {
_invoke_silent_async_task(worker, node);
_tear_down_async(node);
return ;
}
break;
// cudaflow task
case Node::CUDAFLOW: {
_invoke_cudaflow_task(worker, node);
}
break;
// syclflow task
case Node::SYCLFLOW: {
_invoke_syclflow_task(worker, node);
}
break;
// runtime task
case Node::RUNTIME: {
_invoke_runtime_task(worker, node);
}
break;
// monostate (placeholder)
default:
break;
}
// if releasing semaphores exist, release them
if(node->_semaphores && !node->_semaphores->to_release.empty()) {
_schedule(worker, node->_release_all());
}
// Reset the join counter to support the cyclic control flow.
// + We must do this before scheduling the successors to avoid race
// condition on _dependents.
// + We must use fetch_add instead of direct assigning
// because the user-space call on "invoke" may explicitly schedule
// this task again (e.g., pipeline) which can access the join_counter.
if((node->_state.load(std::memory_order_relaxed) & Node::CONDITIONED)) {
node->_join_counter.fetch_add(node->num_strong_dependents());
}
else {
node->_join_counter.fetch_add(node->num_dependents());
}
// acquire the parent flow counter
auto& j = (node->_parent) ? node->_parent->_join_counter :
node->_topology->_join_counter;
// Here, we want to cache the latest successor with the highest priority
Node* cache {nullptr};
auto max_p = static_cast<unsigned>(TaskPriority::MAX);
// Invoke the task based on the corresponding type
switch(node->_handle.index()) {
// condition and multi-condition tasks
case Node::CONDITION:
case Node::MULTI_CONDITION: {
for(auto cond : conds) {
if(cond >= 0 && static_cast<size_t>(cond) < node->_successors.size()) {
auto s = node->_successors[cond];
// zeroing the join counter for invariant
s->_join_counter.store(0, std::memory_order_relaxed);
j.fetch_add(1);
if(s->_priority <= max_p) {
if(cache) {
_schedule(worker, cache);
}
cache = s;
max_p = s->_priority;
}
else {
_schedule(worker, s);
}
}
}
}
break;
// non-condition task
default: {
for(size_t i=0; i<node->_successors.size(); ++i) {
if(auto s = node->_successors[i]; --(s->_join_counter) == 0) {
j.fetch_add(1);
if(s->_priority <= max_p) {
if(cache) {
_schedule(worker, cache);
}
cache = s;
max_p = s->_priority;
}
else {
_schedule(worker, s);
}
}
}
}
break;
}
// tear_down the invoke
_tear_down_invoke(worker, node);
// perform tail recursion elimination for the right-most child to reduce
// the number of expensive pop/push operations through the task queue
if(cache) {
node = cache;
//node->_state.fetch_or(Node::READY, std::memory_order_release);
goto begin_invoke;
}
}
// Procedure: _tear_down_async
inline void Executor::_tear_down_async(Node* node) {
if(node->_parent) {
node->_parent->_join_counter.fetch_sub(1);
}
else {
_decrement_topology_and_notify();
}
node_pool.recycle(node);
}
// Proecdure: _tear_down_invoke
inline void Executor::_tear_down_invoke(Worker& worker, Node* node) {
// we must check parent first before substracting the join counter,
// or it can introduce data race
if(node->_parent == nullptr) {
if(node->_topology->_join_counter.fetch_sub(1) == 1) {
_tear_down_topology(worker, node->_topology);
}
}
// joined subflow
else {
node->_parent->_join_counter.fetch_sub(1);
}
}
// Procedure: _cancel_invoke
inline void Executor::_cancel_invoke(Worker& worker, Node* node) {
switch(node->_handle.index()) {
// async task needs to carry out the promise
case Node::ASYNC:
std::get_if<Node::Async>(&(node->_handle))->work(true);
_tear_down_async(node);
break;
// silent async doesn't need to carry out the promise
case Node::SILENT_ASYNC:
_tear_down_async(node);
break;
// tear down topology if the node is the last leaf
default: {
_tear_down_invoke(worker, node);
}
break;
}
}
// Procedure: _observer_prologue
inline void Executor::_observer_prologue(Worker& worker, Node* node) {
for(auto& observer : _observers) {
observer->on_entry(WorkerView(worker), TaskView(*node));
}
}
// Procedure: _observer_epilogue
inline void Executor::_observer_epilogue(Worker& worker, Node* node) {
for(auto& observer : _observers) {
observer->on_exit(WorkerView(worker), TaskView(*node));
}
}
// Procedure: _invoke_static_task
inline void Executor::_invoke_static_task(Worker& worker, Node* node) {
_observer_prologue(worker, node);
std::get_if<Node::Static>(&node->_handle)->work();
_observer_epilogue(worker, node);
}
// Procedure: _invoke_dynamic_task
inline void Executor::_invoke_dynamic_task(Worker& w, Node* node) {
_observer_prologue(w, node);
auto handle = std::get_if<Node::Dynamic>(&node->_handle);
handle->subgraph._clear();
Subflow sf(*this, w, node, handle->subgraph);
handle->work(sf);
if(sf._joinable) {
_consume_graph(w, node, handle->subgraph);
}
_observer_epilogue(w, node);
}
// Procedure: _detach_dynamic_task
inline void Executor::_detach_dynamic_task(
Worker& w, Node* p, Graph& g
) {
// graph is empty and has no async tasks
if(g.empty() && p->_join_counter == 0) {
return;
}
SmallVector<Node*> src;
for(auto n : g._nodes) {
n->_state.store(Node::DETACHED, std::memory_order_relaxed);
n->_set_up_join_counter();
n->_topology = p->_topology;
n->_parent = nullptr;
if(n->num_dependents() == 0) {
src.push_back(n);
}
}
{
std::lock_guard<std::mutex> lock(p->_topology->_taskflow._mutex);
p->_topology->_taskflow._graph._merge(std::move(g));
}
p->_topology->_join_counter.fetch_add(src.size());
_schedule(w, src);
}
// Procedure: _consume_graph
inline void Executor::_consume_graph(Worker& w, Node* p, Graph& g) {
// graph is empty and has no async tasks
if(g.empty() && p->_join_counter == 0) {
return;
}
SmallVector<Node*> src;
for(auto n : g._nodes) {
n->_state.store(0, std::memory_order_relaxed);
n->_set_up_join_counter();
n->_topology = p->_topology;
n->_parent = p;
if(n->num_dependents() == 0) {
src.push_back(n);
}
}
p->_join_counter.fetch_add(src.size());
_schedule(w, src);
_loop_until(w, [p] () -> bool { return p->_join_counter == 0; });
}
// Procedure: _invoke_condition_task
inline void Executor::_invoke_condition_task(
Worker& worker, Node* node, SmallVector<int>& conds
) {
_observer_prologue(worker, node);
conds = { std::get_if<Node::Condition>(&node->_handle)->work() };
_observer_epilogue(worker, node);
}
// Procedure: _invoke_multi_condition_task
inline void Executor::_invoke_multi_condition_task(
Worker& worker, Node* node, SmallVector<int>& conds
) {
_observer_prologue(worker, node);
conds = std::get_if<Node::MultiCondition>(&node->_handle)->work();
_observer_epilogue(worker, node);
}
// Procedure: _invoke_cudaflow_task
inline void Executor::_invoke_cudaflow_task(Worker& worker, Node* node) {
_observer_prologue(worker, node);
std::get_if<Node::cudaFlow>(&node->_handle)->work(*this, node);
_observer_epilogue(worker, node);
}
// Procedure: _invoke_syclflow_task
inline void Executor::_invoke_syclflow_task(Worker& worker, Node* node) {
_observer_prologue(worker, node);
std::get_if<Node::syclFlow>(&node->_handle)->work(*this, node);
_observer_epilogue(worker, node);
}
// Procedure: _invoke_module_task
inline void Executor::_invoke_module_task(Worker& w, Node* node) {
_observer_prologue(w, node);
_consume_graph(
w, node, std::get_if<Node::Module>(&node->_handle)->graph
);
_observer_epilogue(w, node);
}
// Procedure: _invoke_async_task
inline void Executor::_invoke_async_task(Worker& w, Node* node) {
_observer_prologue(w, node);
std::get_if<Node::Async>(&node->_handle)->work(false);
_observer_epilogue(w, node);
}
// Procedure: _invoke_silent_async_task
inline void Executor::_invoke_silent_async_task(Worker& w, Node* node) {
_observer_prologue(w, node);
std::get_if<Node::SilentAsync>(&node->_handle)->work();
_observer_epilogue(w, node);
}
// Procedure: _invoke_runtime_task
inline void Executor::_invoke_runtime_task(Worker& w, Node* node) {
_observer_prologue(w, node);
Runtime rt(*this, w, node);
std::get_if<Node::Runtime>(&node->_handle)->work(rt);
_observer_epilogue(w, node);
}
// Function: run
inline tf::Future<void> Executor::run(Taskflow& f) {
return run_n(f, 1, [](){});
}
// Function: run
inline tf::Future<void> Executor::run(Taskflow&& f) {
return run_n(std::move(f), 1, [](){});
}
// Function: run
template <typename C>
tf::Future<void> Executor::run(Taskflow& f, C&& c) {
return run_n(f, 1, std::forward<C>(c));
}
// Function: run
template <typename C>
tf::Future<void> Executor::run(Taskflow&& f, C&& c) {
return run_n(std::move(f), 1, std::forward<C>(c));
}
// Function: run_n
inline tf::Future<void> Executor::run_n(Taskflow& f, size_t repeat) {
return run_n(f, repeat, [](){});
}
// Function: run_n
inline tf::Future<void> Executor::run_n(Taskflow&& f, size_t repeat) {
return run_n(std::move(f), repeat, [](){});
}
// Function: run_n
template <typename C>
tf::Future<void> Executor::run_n(Taskflow& f, size_t repeat, C&& c) {
return run_until(
f, [repeat]() mutable { return repeat-- == 0; }, std::forward<C>(c)
);
}
// Function: run_n
template <typename C>
tf::Future<void> Executor::run_n(Taskflow&& f, size_t repeat, C&& c) {
return run_until(
std::move(f), [repeat]() mutable { return repeat-- == 0; }, std::forward<C>(c)
);
}
// Function: run_until
template<typename P>
tf::Future<void> Executor::run_until(Taskflow& f, P&& pred) {
return run_until(f, std::forward<P>(pred), [](){});
}
// Function: run_until
template<typename P>
tf::Future<void> Executor::run_until(Taskflow&& f, P&& pred) {
return run_until(std::move(f), std::forward<P>(pred), [](){});
}
// Function: run_until
template <typename P, typename C>
tf::Future<void> Executor::run_until(Taskflow& f, P&& p, C&& c) {
_increment_topology();
// Need to check the empty under the lock since dynamic task may
// define detached blocks that modify the taskflow at the same time
bool empty;
{
std::lock_guard<std::mutex> lock(f._mutex);
empty = f.empty();
}
// No need to create a real topology but returns an dummy future
if(empty || p()) {
c();
std::promise<void> promise;
promise.set_value();
_decrement_topology_and_notify();
return tf::Future<void>(promise.get_future(), std::monostate{});
}
// create a topology for this run
auto t = std::make_shared<Topology>(f, std::forward<P>(p), std::forward<C>(c));
// need to create future before the topology got torn down quickly
tf::Future<void> future(t->_promise.get_future(), t);
// modifying topology needs to be protected under the lock
{
std::lock_guard<std::mutex> lock(f._mutex);
f._topologies.push(t);
if(f._topologies.size() == 1) {
_set_up_topology(_this_worker(), t.get());
}
}
return future;
}
// Function: run_until
template <typename P, typename C>
tf::Future<void> Executor::run_until(Taskflow&& f, P&& pred, C&& c) {
std::list<Taskflow>::iterator itr;
{
std::scoped_lock<std::mutex> lock(_taskflow_mutex);
itr = _taskflows.emplace(_taskflows.end(), std::move(f));
itr->_satellite = itr;
}
return run_until(*itr, std::forward<P>(pred), std::forward<C>(c));
}
// Function: run_and_wait
template <typename T>
void Executor::run_and_wait(T& target) {
auto w = _this_worker();
if(w == nullptr) {
TF_THROW("run_and_wait must be called by a worker of the executor");
}
Node parent; // dummy parent
_consume_graph(*w, &parent, target.graph());
}
// Function: loop_until
template <typename P>
void Executor::loop_until(P&& predicate) {
auto w = _this_worker();
if(w == nullptr) {
TF_THROW("loop_until must be called by a worker of the executor");
}
_loop_until(*w, std::forward<P>(predicate));
}
// Procedure: _increment_topology
inline void Executor::_increment_topology() {
std::lock_guard<std::mutex> lock(_topology_mutex);
++_num_topologies;
}
// Procedure: _decrement_topology_and_notify
inline void Executor::_decrement_topology_and_notify() {
std::lock_guard<std::mutex> lock(_topology_mutex);
if(--_num_topologies == 0) {
_topology_cv.notify_all();
}
}
// Procedure: _decrement_topology
inline void Executor::_decrement_topology() {
std::lock_guard<std::mutex> lock(_topology_mutex);
--_num_topologies;
}
// Procedure: wait_for_all
inline void Executor::wait_for_all() {
std::unique_lock<std::mutex> lock(_topology_mutex);
_topology_cv.wait(lock, [&](){ return _num_topologies == 0; });
}
// Function: _set_up_topology
inline void Executor::_set_up_topology(Worker* worker, Topology* tpg) {
// ---- under taskflow lock ----
tpg->_sources.clear();
tpg->_taskflow._graph._clear_detached();
// scan each node in the graph and build up the links
for(auto node : tpg->_taskflow._graph._nodes) {
node->_topology = tpg;
node->_parent = nullptr;
node->_state.store(0, std::memory_order_relaxed);
if(node->num_dependents() == 0) {
tpg->_sources.push_back(node);
}
node->_set_up_join_counter();
}
tpg->_join_counter = tpg->_sources.size();
if(worker) {
_schedule(*worker, tpg->_sources);
}
else {
_schedule(tpg->_sources);
}
}
// Function: _tear_down_topology
inline void Executor::_tear_down_topology(Worker& worker, Topology* tpg) {
auto &f = tpg->_taskflow;
//assert(&tpg == &(f._topologies.front()));
// case 1: we still need to run the topology again
if(!tpg->_is_cancelled && !tpg->_pred()) {
//assert(tpg->_join_counter == 0);
std::lock_guard<std::mutex> lock(f._mutex);
tpg->_join_counter = tpg->_sources.size();
_schedule(worker, tpg->_sources);
}
// case 2: the final run of this topology
else {
// TODO: if the topology is cancelled, need to release all semaphores
if(tpg->_call != nullptr) {
tpg->_call();
}
// If there is another run (interleave between lock)
if(std::unique_lock<std::mutex> lock(f._mutex); f._topologies.size()>1) {
//assert(tpg->_join_counter == 0);
// Set the promise
tpg->_promise.set_value();
f._topologies.pop();
tpg = f._topologies.front().get();
// decrement the topology but since this is not the last we don't notify
_decrement_topology();
// set up topology needs to be under the lock or it can
// introduce memory order error with pop
_set_up_topology(&worker, tpg);
}
else {
//assert(f._topologies.size() == 1);
// Need to back up the promise first here becuz taskflow might be
// destroy soon after calling get
auto p {std::move(tpg->_promise)};
// Back up lambda capture in case it has the topology pointer,
// to avoid it releasing on pop_front ahead of _mutex.unlock &
// _promise.set_value. Released safely when leaving scope.
auto c {std::move(tpg->_call)};
// Get the satellite if any
auto s {f._satellite};
// Now we remove the topology from this taskflow
f._topologies.pop();
//f._mutex.unlock();
lock.unlock();
// We set the promise in the end in case taskflow leaves the scope.
// After set_value, the caller will return from wait
p.set_value();
_decrement_topology_and_notify();
// remove the taskflow if it is managed by the executor
// TODO: in the future, we may need to synchronize on wait
// (which means the following code should the moved before set_value)
if(s) {
std::scoped_lock<std::mutex> lock(_taskflow_mutex);
_taskflows.erase(*s);
}
}
}
}
// ############################################################################
// Forward Declaration: Subflow
// ############################################################################
inline void Subflow::join() {
// assert(this_worker().worker == &_worker);
if(!_joinable) {
TF_THROW("subflow not joinable");
}
// only the parent worker can join the subflow
_executor._consume_graph(_worker, _parent, _graph);
_joinable = false;
}
inline void Subflow::detach() {
// assert(this_worker().worker == &_worker);
if(!_joinable) {
TF_THROW("subflow already joined or detached");
}
// only the parent worker can detach the subflow
_executor._detach_dynamic_task(_worker, _parent, _graph);
_joinable = false;
}
// Function: named_async
template <typename F, typename... ArgsT>
auto Subflow::named_async(const std::string& name, F&& f, ArgsT&&... args) {
return _named_async(
*_executor._this_worker(), name, std::forward<F>(f), std::forward<ArgsT>(args)...
);
}
// Function: _named_async
template <typename F, typename... ArgsT>
auto Subflow::_named_async(
Worker& w,
const std::string& name,
F&& f,
ArgsT&&... args
) {
_parent->_join_counter.fetch_add(1);
using T = std::invoke_result_t<F, ArgsT...>;
using R = std::conditional_t<std::is_same_v<T, void>, void, std::optional<T>>;
std::promise<R> p;
auto tpg = std::make_shared<AsyncTopology>();
Future<R> fu(p.get_future(), tpg);
auto node = node_pool.animate(
std::in_place_type_t<Node::Async>{},
[p=make_moc(std::move(p)), f=std::forward<F>(f), args...]
(bool cancel) mutable {
if constexpr(std::is_same_v<R, void>) {
if(!cancel) {
f(args...);
}
p.object.set_value();
}
else {
p.object.set_value(cancel ? std::nullopt : std::make_optional(f(args...)));
}
},
std::move(tpg)
);
node->_name = name;
node->_topology = _parent->_topology;
node->_parent = _parent;
_executor._schedule(w, node);
return fu;
}
// Function: async
template <typename F, typename... ArgsT>
auto Subflow::async(F&& f, ArgsT&&... args) {
return named_async("", std::forward<F>(f), std::forward<ArgsT>(args)...);
}
// Function: _named_silent_async
template <typename F, typename... ArgsT>
void Subflow::_named_silent_async(
Worker& w, const std::string& name, F&& f, ArgsT&&... args
) {
_parent->_join_counter.fetch_add(1);
auto node = node_pool.animate(
std::in_place_type_t<Node::SilentAsync>{},
[f=std::forward<F>(f), args...] () mutable {
f(args...);
}
);
node->_name = name;
node->_topology = _parent->_topology;
node->_parent = _parent;
_executor._schedule(w, node);
}
// Function: silent_async
template <typename F, typename... ArgsT>
void Subflow::named_silent_async(const std::string& name, F&& f, ArgsT&&... args) {
_named_silent_async(
*_executor._this_worker(), name, std::forward<F>(f), std::forward<ArgsT>(args)...
);
}
// Function: named_silent_async
template <typename F, typename... ArgsT>
void Subflow::silent_async(F&& f, ArgsT&&... args) {
named_silent_async("", std::forward<F>(f), std::forward<ArgsT>(args)...);
}
// ############################################################################
// Forward Declaration: Runtime
// ############################################################################
// Procedure: schedule
inline void Runtime::schedule(Task task) {
auto node = task._node;
auto& j = node->_parent ? node->_parent->_join_counter :
node->_topology->_join_counter;
j.fetch_add(1);
_executor._schedule(_worker, node);
}
// Procedure: emplace
template <typename T>
void Runtime::run_and_wait(T&& target) {
// dynamic task (subflow)
if constexpr(is_dynamic_task_v<T>) {
Graph graph;
Subflow sf(_executor, _worker, _parent, graph);
target(sf);
if(sf._joinable) {
_executor._consume_graph(_worker, _parent, graph);
}
}
// graph object
else {
_executor._consume_graph(_worker, _parent, target.graph());
}
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/taskflow/core/task.hpp | #pragma once
#include "graph.hpp"
/**
@file task.hpp
@brief task include file
*/
namespace tf {
// ----------------------------------------------------------------------------
// Task Types
// ----------------------------------------------------------------------------
/**
@enum TaskType
@brief enumeration of all task types
*/
enum class TaskType : int {
/** @brief placeholder task type */
PLACEHOLDER = 0,
/** @brief cudaFlow task type */
CUDAFLOW,
/** @brief syclFlow task type */
SYCLFLOW,
/** @brief static task type */
STATIC,
/** @brief dynamic (subflow) task type */
DYNAMIC,
/** @brief condition task type */
CONDITION,
/** @brief module task type */
MODULE,
/** @brief asynchronous task type */
ASYNC,
/** @brief runtime task type */
RUNTIME,
/** @brief undefined task type (for internal use only) */
UNDEFINED
};
/**
@private
@brief array of all task types (used for iterating task types)
*/
inline constexpr std::array<TaskType, 9> TASK_TYPES = {
TaskType::PLACEHOLDER,
TaskType::CUDAFLOW,
TaskType::SYCLFLOW,
TaskType::STATIC,
TaskType::DYNAMIC,
TaskType::CONDITION,
TaskType::MODULE,
TaskType::ASYNC,
TaskType::RUNTIME
};
/**
@brief convert a task type to a human-readable string
The name of each task type is the litte-case string of its characters.
@code{.cpp}
TaskType::PLACEHOLDER -> "placeholder"
TaskType::CUDAFLOW -> "cudaflow"
TaskType::SYCLFLOW -> "syclflow"
TaskType::STATIC -> "static"
TaskType::DYNAMIC -> "subflow"
TaskType::CONDITION -> "condition"
TaskType::MODULE -> "module"
TaskType::ASYNC -> "async"
TaskType::RUNTIME -> "runtime"
@endcode
*/
inline const char* to_string(TaskType type) {
const char* val;
switch(type) {
case TaskType::PLACEHOLDER: val = "placeholder"; break;
case TaskType::CUDAFLOW: val = "cudaflow"; break;
case TaskType::SYCLFLOW: val = "syclflow"; break;
case TaskType::STATIC: val = "static"; break;
case TaskType::DYNAMIC: val = "subflow"; break;
case TaskType::CONDITION: val = "condition"; break;
case TaskType::MODULE: val = "module"; break;
case TaskType::ASYNC: val = "async"; break;
case TaskType::RUNTIME: val = "runtime"; break;
default: val = "undefined"; break;
}
return val;
}
// ----------------------------------------------------------------------------
// Task Traits
// ----------------------------------------------------------------------------
/**
@brief determines if a callable is a static task
A static task is a callable object constructible from std::function<void()>.
*/
template <typename C>
constexpr bool is_static_task_v =
std::is_invocable_r_v<void, C> &&
!std::is_invocable_r_v<int, C> &&
!std::is_invocable_r_v<tf::SmallVector<int>, C>;
/**
@brief determines if a callable is a dynamic task
A dynamic task is a callable object constructible from std::function<void(Subflow&)>.
*/
template <typename C>
constexpr bool is_dynamic_task_v = std::is_invocable_r_v<void, C, Subflow&>;
/**
@brief determines if a callable is a condition task
A condition task is a callable object constructible from std::function<int()>.
*/
template <typename C>
constexpr bool is_condition_task_v = std::is_invocable_r_v<int, C>;
/**
@brief determines if a callable is a multi-condition task
A multi-condition task is a callable object constructible from
std::function<tf::SmallVector<int>()>.
*/
template <typename C>
constexpr bool is_multi_condition_task_v =
std::is_invocable_r_v<SmallVector<int>, C>;
/**
@brief determines if a callable is a %cudaFlow task
A cudaFlow task is a callable object constructible from
std::function<void(tf::cudaFlow&)> or std::function<void(tf::cudaFlowCapturer&)>.
*/
template <typename C>
constexpr bool is_cudaflow_task_v = std::is_invocable_r_v<void, C, cudaFlow&> ||
std::is_invocable_r_v<void, C, cudaFlowCapturer&>;
/**
@brief determines if a callable is a %syclFlow task
A syclFlow task is a callable object constructible from
std::function<void(tf::syclFlow&)>.
*/
template <typename C>
constexpr bool is_syclflow_task_v = std::is_invocable_r_v<void, C, syclFlow&>;
/**
@brief determines if a callable is a runtime task
A runtime task is a callable object constructible from
std::function<void(tf::Runtime&)>.
*/
template <typename C>
constexpr bool is_runtime_task_v = std::is_invocable_r_v<void, C, Runtime&>;
// ----------------------------------------------------------------------------
// Task
// ----------------------------------------------------------------------------
/**
@class Task
@brief class to create a task handle over a node in a taskflow graph
A task is a wrapper over a node in a taskflow graph.
It provides a set of methods for users to access and modify the attributes of
the associated node in the taskflow graph.
A task is very lightweight object (i.e., only storing a node pointer) that
can be trivially copied around,
and it does not own the lifetime of the associated node.
*/
class Task {
friend class FlowBuilder;
friend class Runtime;
friend class Taskflow;
friend class TaskView;
friend class Executor;
public:
/**
@brief constructs an empty task
*/
Task() = default;
/**
@brief constructs the task with the copy of the other task
*/
Task(const Task& other);
/**
@brief replaces the contents with a copy of the other task
*/
Task& operator = (const Task&);
/**
@brief replaces the contents with a null pointer
*/
Task& operator = (std::nullptr_t);
/**
@brief compares if two tasks are associated with the same graph node
*/
bool operator == (const Task& rhs) const;
/**
@brief compares if two tasks are not associated with the same graph node
*/
bool operator != (const Task& rhs) const;
/**
@brief queries the name of the task
*/
const std::string& name() const;
/**
@brief queries the number of successors of the task
*/
size_t num_successors() const;
/**
@brief queries the number of predecessors of the task
*/
size_t num_dependents() const;
/**
@brief queries the number of strong dependents of the task
*/
size_t num_strong_dependents() const;
/**
@brief queries the number of weak dependents of the task
*/
size_t num_weak_dependents() const;
/**
@brief assigns a name to the task
@param name a @std_string acceptable string
@return @c *this
*/
Task& name(const std::string& name);
/**
@brief assigns a callable
@tparam C callable type
@param callable callable to construct one of the static, dynamic, condition,
and cudaFlow tasks
@return @c *this
*/
template <typename C>
Task& work(C&& callable);
/**
@brief creates a module task from a taskflow
@tparam T object type
@param object a custom object that defines @c T::graph() method
@return @c *this
*/
template <typename T>
Task& composed_of(T& object);
/**
@brief adds precedence links from this to other tasks
@tparam Ts parameter pack
@param tasks one or multiple tasks
@return @c *this
*/
template <typename... Ts>
Task& precede(Ts&&... tasks);
/**
@brief adds precedence links from other tasks to this
@tparam Ts parameter pack
@param tasks one or multiple tasks
@return @c *this
*/
template <typename... Ts>
Task& succeed(Ts&&... tasks);
/**
@brief makes the task release this semaphore
*/
Task& release(Semaphore& semaphore);
/**
@brief makes the task acquire this semaphore
*/
Task& acquire(Semaphore& semaphore);
/**
@brief assigns pointer to user data
@param data pointer to user data
The following example shows how to attach user data to a task and
run the task iteratively while changing the data value:
@code{.cpp}
tf::Executor executor;
tf::Taskflow taskflow("attach data to a task");
int data;
// create a task and attach it the data
auto A = taskflow.placeholder();
A.data(&data).work([A](){
auto d = *static_cast<int*>(A.data());
std::cout << "data is " << d << std::endl;
});
// run the taskflow iteratively with changing data
for(data = 0; data<10; data++){
executor.run(taskflow).wait();
}
@endcode
@return @c *this
*/
Task& data(void* data);
/**
@brief assigns a priority value to the task
A priority value can be one of the following three levels,
tf::TaskPriority::HIGH (numerically equivalent to 0),
tf::TaskPriority::NORMAL (numerically equivalent to 1), and
tf::TaskPriority::LOW (numerically equivalent to 2).
The smaller the priority value, the higher the priority.
*/
Task& priority(TaskPriority p);
/**
@brief queries the priority value of the task
*/
TaskPriority priority() const;
/**
@brief resets the task handle to null
*/
void reset();
/**
@brief resets the associated work to a placeholder
*/
void reset_work();
/**
@brief queries if the task handle points to a task node
*/
bool empty() const;
/**
@brief queries if the task has a work assigned
*/
bool has_work() const;
/**
@brief applies an visitor callable to each successor of the task
*/
template <typename V>
void for_each_successor(V&& visitor) const;
/**
@brief applies an visitor callable to each dependents of the task
*/
template <typename V>
void for_each_dependent(V&& visitor) const;
/**
@brief obtains a hash value of the underlying node
*/
size_t hash_value() const;
/**
@brief returns the task type
*/
TaskType type() const;
/**
@brief dumps the task through an output stream
*/
void dump(std::ostream& ostream) const;
/**
@brief queries pointer to user data
*/
void* data() const;
private:
Task(Node*);
Node* _node {nullptr};
};
// Constructor
inline Task::Task(Node* node) : _node {node} {
}
// Constructor
inline Task::Task(const Task& rhs) : _node {rhs._node} {
}
// Function: precede
template <typename... Ts>
Task& Task::precede(Ts&&... tasks) {
(_node->_precede(tasks._node), ...);
//_precede(std::forward<Ts>(tasks)...);
return *this;
}
// Function: succeed
template <typename... Ts>
Task& Task::succeed(Ts&&... tasks) {
(tasks._node->_precede(_node), ...);
//_succeed(std::forward<Ts>(tasks)...);
return *this;
}
// Function: composed_of
template <typename T>
Task& Task::composed_of(T& object) {
_node->_handle.emplace<Node::Module>(object);
return *this;
}
// Operator =
inline Task& Task::operator = (const Task& rhs) {
_node = rhs._node;
return *this;
}
// Operator =
inline Task& Task::operator = (std::nullptr_t ptr) {
_node = ptr;
return *this;
}
// Operator ==
inline bool Task::operator == (const Task& rhs) const {
return _node == rhs._node;
}
// Operator !=
inline bool Task::operator != (const Task& rhs) const {
return _node != rhs._node;
}
// Function: name
inline Task& Task::name(const std::string& name) {
_node->_name = name;
return *this;
}
// Function: acquire
inline Task& Task::acquire(Semaphore& s) {
if(!_node->_semaphores) {
_node->_semaphores = std::make_unique<Node::Semaphores>();
}
_node->_semaphores->to_acquire.push_back(&s);
return *this;
}
// Function: release
inline Task& Task::release(Semaphore& s) {
if(!_node->_semaphores) {
//_node->_semaphores.emplace();
_node->_semaphores = std::make_unique<Node::Semaphores>();
}
_node->_semaphores->to_release.push_back(&s);
return *this;
}
// Procedure: reset
inline void Task::reset() {
_node = nullptr;
}
// Procedure: reset_work
inline void Task::reset_work() {
_node->_handle.emplace<std::monostate>();
}
// Function: name
inline const std::string& Task::name() const {
return _node->_name;
}
// Function: num_dependents
inline size_t Task::num_dependents() const {
return _node->num_dependents();
}
// Function: num_strong_dependents
inline size_t Task::num_strong_dependents() const {
return _node->num_strong_dependents();
}
// Function: num_weak_dependents
inline size_t Task::num_weak_dependents() const {
return _node->num_weak_dependents();
}
// Function: num_successors
inline size_t Task::num_successors() const {
return _node->num_successors();
}
// Function: empty
inline bool Task::empty() const {
return _node == nullptr;
}
// Function: has_work
inline bool Task::has_work() const {
return _node ? _node->_handle.index() != 0 : false;
}
// Function: task_type
inline TaskType Task::type() const {
switch(_node->_handle.index()) {
case Node::PLACEHOLDER: return TaskType::PLACEHOLDER;
case Node::STATIC: return TaskType::STATIC;
case Node::DYNAMIC: return TaskType::DYNAMIC;
case Node::CONDITION: return TaskType::CONDITION;
case Node::MULTI_CONDITION: return TaskType::CONDITION;
case Node::MODULE: return TaskType::MODULE;
case Node::ASYNC: return TaskType::ASYNC;
case Node::SILENT_ASYNC: return TaskType::ASYNC;
case Node::CUDAFLOW: return TaskType::CUDAFLOW;
case Node::SYCLFLOW: return TaskType::SYCLFLOW;
case Node::RUNTIME: return TaskType::RUNTIME;
default: return TaskType::UNDEFINED;
}
}
// Function: for_each_successor
template <typename V>
void Task::for_each_successor(V&& visitor) const {
for(size_t i=0; i<_node->_successors.size(); ++i) {
visitor(Task(_node->_successors[i]));
}
}
// Function: for_each_dependent
template <typename V>
void Task::for_each_dependent(V&& visitor) const {
for(size_t i=0; i<_node->_dependents.size(); ++i) {
visitor(Task(_node->_dependents[i]));
}
}
// Function: hash_value
inline size_t Task::hash_value() const {
return std::hash<Node*>{}(_node);
}
// Procedure: dump
inline void Task::dump(std::ostream& os) const {
os << "task ";
if(name().empty()) os << _node;
else os << name();
os << " [type=" << to_string(type()) << ']';
}
// Function: work
template <typename C>
Task& Task::work(C&& c) {
if constexpr(is_static_task_v<C>) {
_node->_handle.emplace<Node::Static>(std::forward<C>(c));
}
else if constexpr(is_dynamic_task_v<C>) {
_node->_handle.emplace<Node::Dynamic>(std::forward<C>(c));
}
else if constexpr(is_condition_task_v<C>) {
_node->_handle.emplace<Node::Condition>(std::forward<C>(c));
}
else if constexpr(is_multi_condition_task_v<C>) {
_node->_handle.emplace<Node::MultiCondition>(std::forward<C>(c));
}
else if constexpr(is_cudaflow_task_v<C>) {
_node->_handle.emplace<Node::cudaFlow>(std::forward<C>(c));
}
else if constexpr(is_runtime_task_v<C>) {
_node->_handle.emplace<Node::Runtime>(std::forward<C>(c));
}
else {
static_assert(dependent_false_v<C>, "invalid task callable");
}
return *this;
}
// Function: data
inline void* Task::data() const {
return _node->_data;
}
// Function: data
inline Task& Task::data(void* data) {
_node->_data = data;
return *this;
}
// Function: priority
inline Task& Task::priority(TaskPriority p) {
_node->_priority = static_cast<unsigned>(p);
return *this;
}
// Function: priority
inline TaskPriority Task::priority() const {
return static_cast<TaskPriority>(_node->_priority);
}
// ----------------------------------------------------------------------------
// global ostream
// ----------------------------------------------------------------------------
/**
@brief overload of ostream inserter operator for cudaTask
*/
inline std::ostream& operator << (std::ostream& os, const Task& task) {
task.dump(os);
return os;
}
// ----------------------------------------------------------------------------
/**
@class TaskView
@brief class to access task information from the observer interface
*/
class TaskView {
friend class Executor;
public:
/**
@brief queries the name of the task
*/
const std::string& name() const;
/**
@brief queries the number of successors of the task
*/
size_t num_successors() const;
/**
@brief queries the number of predecessors of the task
*/
size_t num_dependents() const;
/**
@brief queries the number of strong dependents of the task
*/
size_t num_strong_dependents() const;
/**
@brief queries the number of weak dependents of the task
*/
size_t num_weak_dependents() const;
/**
@brief applies an visitor callable to each successor of the task
*/
template <typename V>
void for_each_successor(V&& visitor) const;
/**
@brief applies an visitor callable to each dependents of the task
*/
template <typename V>
void for_each_dependent(V&& visitor) const;
/**
@brief queries the task type
*/
TaskType type() const;
/**
@brief obtains a hash value of the underlying node
*/
size_t hash_value() const;
private:
TaskView(const Node&);
TaskView(const TaskView&) = default;
const Node& _node;
};
// Constructor
inline TaskView::TaskView(const Node& node) : _node {node} {
}
// Function: name
inline const std::string& TaskView::name() const {
return _node._name;
}
// Function: num_dependents
inline size_t TaskView::num_dependents() const {
return _node.num_dependents();
}
// Function: num_strong_dependents
inline size_t TaskView::num_strong_dependents() const {
return _node.num_strong_dependents();
}
// Function: num_weak_dependents
inline size_t TaskView::num_weak_dependents() const {
return _node.num_weak_dependents();
}
// Function: num_successors
inline size_t TaskView::num_successors() const {
return _node.num_successors();
}
// Function: type
inline TaskType TaskView::type() const {
switch(_node._handle.index()) {
case Node::PLACEHOLDER: return TaskType::PLACEHOLDER;
case Node::STATIC: return TaskType::STATIC;
case Node::DYNAMIC: return TaskType::DYNAMIC;
case Node::CONDITION: return TaskType::CONDITION;
case Node::MULTI_CONDITION: return TaskType::CONDITION;
case Node::MODULE: return TaskType::MODULE;
case Node::ASYNC: return TaskType::ASYNC;
case Node::SILENT_ASYNC: return TaskType::ASYNC;
case Node::CUDAFLOW: return TaskType::CUDAFLOW;
case Node::SYCLFLOW: return TaskType::SYCLFLOW;
case Node::RUNTIME: return TaskType::RUNTIME;
default: return TaskType::UNDEFINED;
}
}
// Function: hash_value
inline size_t TaskView::hash_value() const {
return std::hash<const Node*>{}(&_node);
}
// Function: for_each_successor
template <typename V>
void TaskView::for_each_successor(V&& visitor) const {
for(size_t i=0; i<_node._successors.size(); ++i) {
visitor(TaskView(_node._successors[i]));
}
}
// Function: for_each_dependent
template <typename V>
void TaskView::for_each_dependent(V&& visitor) const {
for(size_t i=0; i<_node._dependents.size(); ++i) {
visitor(TaskView(_node._dependents[i]));
}
}
} // end of namespace tf. ---------------------------------------------------
namespace std {
/**
@struct hash
@brief hash specialization for std::hash<tf::Task>
*/
template <>
struct hash<tf::Task> {
auto operator() (const tf::Task& task) const noexcept {
return task.hash_value();
}
};
/**
@struct hash
@brief hash specialization for std::hash<tf::TaskView>
*/
template <>
struct hash<tf::TaskView> {
auto operator() (const tf::TaskView& task_view) const noexcept {
return task_view.hash_value();
}
};
} // end of namespace std ----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/taskflow/core/error.hpp | #pragma once
#include <iostream>
#include <sstream>
#include <exception>
#include "../utility/stream.hpp"
namespace tf {
// Procedure: throw_se
// Throws the system error under a given error code.
template <typename... ArgsT>
//void throw_se(const char* fname, const size_t line, Error::Code c, ArgsT&&... args) {
void throw_re(const char* fname, const size_t line, ArgsT&&... args) {
std::ostringstream oss;
oss << "[" << fname << ":" << line << "] ";
//ostreamize(oss, std::forward<ArgsT>(args)...);
(oss << ... << args);
throw std::runtime_error(oss.str());
}
} // ------------------------------------------------------------------------
#define TF_THROW(...) tf::throw_re(__FILE__, __LINE__, __VA_ARGS__);
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/taskflow/core/async_task.hpp | #pragma once
#include "graph.hpp"
/**
@file async_task.hpp
@brief asynchronous task include file
*/
namespace tf {
// ----------------------------------------------------------------------------
// AsyncTask
// ----------------------------------------------------------------------------
/**
@brief class to create a dependent asynchronous task
A tf::AsyncTask is a lightweight handle that retains @em shared ownership
of a dependent async task created by an executor.
This shared ownership ensures that the async task remains alive when
adding it to the dependency list of another async task,
thus avoiding the classical [ABA problem](https://en.wikipedia.org/wiki/ABA_problem).
@code{.cpp}
// main thread retains shared ownership of async task A
tf::AsyncTask A = executor.silent_dependent_async([](){});
// task A remains alive (i.e., at least one ref count by the main thread)
// when being added to the dependency list of async task B
tf::AsyncTask B = executor.silent_dependent_async([](){}, A);
@endcode
Currently, tf::AsyncTask is implemented based on the logic of
C++ smart pointer std::shared_ptr and
is considered cheap to copy or move as long as only a handful of objects
own it.
When a worker completes an async task, it will remove the task from the executor,
decrementing the number of shared owners by one.
If that counter reaches zero, the task is destroyed.
*/
class AsyncTask {
friend class Executor;
public:
/**
@brief constructs an empty task handle
*/
AsyncTask() = default;
/**
@brief destroys the managed asynchronous task if this is the last owner
*/
~AsyncTask();
/**
@brief constructs an asynchronous task that shares ownership of @c rhs
*/
AsyncTask(const AsyncTask& rhs);
/**
@brief move-constructs an asynchronous task from @c rhs
*/
AsyncTask(AsyncTask&& rhs);
/**
@brief copy-assigns the asynchronous task from @c rhs
Releases the managed object of @c this and retains a new shared ownership
of @c rhs.
*/
AsyncTask& operator = (const AsyncTask& rhs);
/**
@brief move-assigns the asynchronous task from @c rhs
Releases the managed object of @c this and takes over the ownership of @c rhs.
*/
AsyncTask& operator = (AsyncTask&& rhs);
/**
@brief checks if the asynchronous task stores nothing
*/
bool empty() const;
/**
@brief release the managed object of @c this
*/
void reset();
/**
@brief obtains a hash value of this asynchronous task
*/
size_t hash_value() const;
/**
@brief returns the number of shared owners that are currently managing
this asynchronous task
*/
size_t use_count() const;
/**
@brief returns the boolean indicating whether the async task is done
*/
bool is_done() const;
private:
explicit AsyncTask(Node*);
Node* _node {nullptr};
void _incref();
void _decref();
};
// Constructor
inline AsyncTask::AsyncTask(Node* ptr) : _node{ptr} {
_incref();
}
// Function: _incref
inline void AsyncTask::_incref() {
if(_node) {
std::get_if<Node::DependentAsync>(&(_node->_handle))->use_count.fetch_add(
1, std::memory_order_relaxed
);
}
}
// Function: _decref
inline void AsyncTask::_decref() {
if(_node && std::get_if<Node::DependentAsync>(&(_node->_handle))->use_count.fetch_sub(
1, std::memory_order_acq_rel
) == 1) {
node_pool.recycle(_node);
}
}
// Copy Constructor
inline AsyncTask::AsyncTask(const AsyncTask& rhs) :
_node{rhs._node} {
_incref();
}
// Move Constructor
inline AsyncTask::AsyncTask(AsyncTask&& rhs) :
_node {rhs._node} {
rhs._node = nullptr;
}
// Destructor
inline AsyncTask::~AsyncTask() {
_decref();
}
// Copy assignment
inline AsyncTask& AsyncTask::operator = (const AsyncTask& rhs) {
_decref();
_node = rhs._node;
_incref();
return *this;
}
// Move assignment
inline AsyncTask& AsyncTask::operator = (AsyncTask&& rhs) {
_decref();
_node = rhs._node;
rhs._node = nullptr;
return *this;
}
// Function: empty
inline bool AsyncTask::empty() const {
return _node == nullptr;
}
// Function: reset
inline void AsyncTask::reset() {
_decref();
_node = nullptr;
}
// Function: hash_value
inline size_t AsyncTask::hash_value() const {
return std::hash<Node*>{}(_node);
}
// Function: use_count
inline size_t AsyncTask::use_count() const {
return _node == nullptr ? size_t{0} :
std::get_if<Node::DependentAsync>(&(_node->_handle))->use_count.load(
std::memory_order_relaxed
);
}
// Function: is_done
inline bool AsyncTask::is_done() const {
return std::get_if<Node::DependentAsync>(&(_node->_handle))->state.load(
std::memory_order_acquire
) == Node::AsyncState::FINISHED;
}
} // end of namespace tf ----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/taskflow/core/async.hpp | #pragma once
#include "executor.hpp"
// https://hackmd.io/@sysprog/concurrency-atomics
namespace tf {
// ----------------------------------------------------------------------------
// Async
// ----------------------------------------------------------------------------
// Function: async
template <typename F>
auto Executor::async(const std::string& name, F&& f) {
_increment_topology();
using R = std::invoke_result_t<std::decay_t<F>>;
std::promise<R> p;
auto fu{p.get_future()};
auto node = node_pool.animate(
name, 0, nullptr, nullptr, 0,
std::in_place_type_t<Node::Async>{},
_make_promised_async(std::move(p), std::forward<F>(f))
);
_schedule_async_task(node);
return fu;
}
// Function: async
template <typename F>
auto Executor::async(F&& f) {
return async("", std::forward<F>(f));
}
// ----------------------------------------------------------------------------
// Silent Async
// ----------------------------------------------------------------------------
// Function: silent_async
template <typename F>
void Executor::silent_async(const std::string& name, F&& f) {
_increment_topology();
auto node = node_pool.animate(
name, 0, nullptr, nullptr, 0,
std::in_place_type_t<Node::Async>{}, std::forward<F>(f)
);
_schedule_async_task(node);
}
// Function: silent_async
template <typename F>
void Executor::silent_async(F&& f) {
silent_async("", std::forward<F>(f));
}
// ----------------------------------------------------------------------------
// Async Helper Methods
// ----------------------------------------------------------------------------
// Function: _make_promised_async
template <typename R, typename F>
auto Executor::_make_promised_async(std::promise<R>&& p, F&& func) {
return [p=make_moc(std::move(p)), func=std::forward<F>(func)]() mutable {
if constexpr(std::is_same_v<R, void>) {
func();
p.object.set_value();
}
else {
p.object.set_value(func());
}
};
}
// Procedure: _schedule_async_task
inline void Executor::_schedule_async_task(Node* node) {
if(auto w = _this_worker(); w) {
_schedule(*w, node);
}
else{
_schedule(node);
}
}
// Procedure: _tear_down_async
inline void Executor::_tear_down_async(Node* node) {
// from runtime
if(node->_parent) {
node->_parent->_join_counter.fetch_sub(1, std::memory_order_release);
}
// from executor
else {
_decrement_topology();
}
node_pool.recycle(node);
}
// ----------------------------------------------------------------------------
// Silent Dependent Async
// ----------------------------------------------------------------------------
// Function: silent_dependent_async
template <typename F, typename... Tasks,
std::enable_if_t<all_same_v<AsyncTask, std::decay_t<Tasks>...>, void>*
>
tf::AsyncTask Executor::silent_dependent_async(F&& func, Tasks&&... tasks) {
return silent_dependent_async("", std::forward<F>(func), std::forward<Tasks>(tasks)...);
}
// Function: silent_dependent_async
template <typename F, typename... Tasks,
std::enable_if_t<all_same_v<AsyncTask, std::decay_t<Tasks>...>, void>*
>
tf::AsyncTask Executor::silent_dependent_async(
const std::string& name, F&& func, Tasks&&... tasks
){
_increment_topology();
size_t num_dependents = sizeof...(Tasks);
// create a task before scheduling the node to retain a shared ownership first
AsyncTask task(node_pool.animate(
name, 0, nullptr, nullptr, num_dependents,
std::in_place_type_t<Node::DependentAsync>{}, std::forward<F>(func)
));
if constexpr(sizeof...(Tasks) > 0) {
(_process_async_dependent(task._node, tasks, num_dependents), ...);
}
if(num_dependents == 0) {
_schedule_async_task(task._node);
}
return task;
}
// Function: silent_dependent_async
template <typename F, typename I,
std::enable_if_t<!std::is_same_v<std::decay_t<I>, AsyncTask>, void>*
>
tf::AsyncTask Executor::silent_dependent_async(F&& func, I first, I last) {
return silent_dependent_async("", std::forward<F>(func), first, last);
}
// Function: silent_dependent_async
template <typename F, typename I,
std::enable_if_t<!std::is_same_v<std::decay_t<I>, AsyncTask>, void>*
>
tf::AsyncTask Executor::silent_dependent_async(
const std::string& name, F&& func, I first, I last
) {
_increment_topology();
size_t num_dependents = std::distance(first, last);
AsyncTask task(node_pool.animate(
name, 0, nullptr, nullptr, num_dependents,
std::in_place_type_t<Node::DependentAsync>{}, std::forward<F>(func)
));
for(; first != last; first++){
_process_async_dependent(task._node, *first, num_dependents);
}
if(num_dependents == 0) {
_schedule_async_task(task._node);
}
return task;
}
// ----------------------------------------------------------------------------
// Dependent Async
// ----------------------------------------------------------------------------
// Function: dependent_async
template <typename F, typename... Tasks,
std::enable_if_t<all_same_v<AsyncTask, std::decay_t<Tasks>...>, void>*
>
auto Executor::dependent_async(F&& func, Tasks&&... tasks) {
return dependent_async("", std::forward<F>(func), std::forward<Tasks>(tasks)...);
}
// Function: dependent_async
template <typename F, typename... Tasks,
std::enable_if_t<all_same_v<AsyncTask, std::decay_t<Tasks>...>, void>*
>
auto Executor::dependent_async(
const std::string& name, F&& func, Tasks&&... tasks
) {
_increment_topology();
using R = std::invoke_result_t<std::decay_t<F>>;
std::promise<R> p;
auto fu{p.get_future()};
size_t num_dependents = sizeof...(tasks);
AsyncTask task(node_pool.animate(
name, 0, nullptr, nullptr, num_dependents,
std::in_place_type_t<Node::DependentAsync>{},
_make_promised_async(std::move(p), std::forward<F>(func))
));
if constexpr(sizeof...(Tasks) > 0) {
(_process_async_dependent(task._node, tasks, num_dependents), ...);
}
if(num_dependents == 0) {
_schedule_async_task(task._node);
}
return std::make_pair(std::move(task), std::move(fu));
}
// Function: dependent_async
template <typename F, typename I,
std::enable_if_t<!std::is_same_v<std::decay_t<I>, AsyncTask>, void>*
>
auto Executor::dependent_async(F&& func, I first, I last) {
return dependent_async("", std::forward<F>(func), first, last);
}
// Function: dependent_async
template <typename F, typename I,
std::enable_if_t<!std::is_same_v<std::decay_t<I>, AsyncTask>, void>*
>
auto Executor::dependent_async(
const std::string& name, F&& func, I first, I last
) {
_increment_topology();
using R = std::invoke_result_t<std::decay_t<F>>;
std::promise<R> p;
auto fu{p.get_future()};
size_t num_dependents = std::distance(first, last);
AsyncTask task(node_pool.animate(
name, 0, nullptr, nullptr, num_dependents,
std::in_place_type_t<Node::DependentAsync>{},
_make_promised_async(std::move(p), std::forward<F>(func))
));
for(; first != last; first++) {
_process_async_dependent(task._node, *first, num_dependents);
}
if(num_dependents == 0) {
_schedule_async_task(task._node);
}
return std::make_pair(std::move(task), std::move(fu));
}
// ----------------------------------------------------------------------------
// Dependent Async Helper Functions
// ----------------------------------------------------------------------------
// Procedure: _process_async_dependent
inline void Executor::_process_async_dependent(
Node* node, tf::AsyncTask& task, size_t& num_dependents
) {
auto& state = std::get_if<Node::DependentAsync>(&(task._node->_handle))->state;
add_successor:
auto target = Node::AsyncState::UNFINISHED;
// acquires the lock
if(state.compare_exchange_weak(target, Node::AsyncState::LOCKED,
std::memory_order_acq_rel,
std::memory_order_acquire)) {
task._node->_successors.push_back(node);
state.store(Node::AsyncState::UNFINISHED, std::memory_order_release);
}
// dep's state is FINISHED, which means dep finished its callable already
// thus decrement the node's join counter by 1
else if (target == Node::AsyncState::FINISHED) {
num_dependents = node->_join_counter.fetch_sub(1, std::memory_order_acq_rel) - 1;
}
// another worker adding its async task to the same successors of this node
else {
goto add_successor;
}
}
// Procedure: _tear_down_dependent_async
inline void Executor::_tear_down_dependent_async(Worker& worker, Node* node) {
auto handle = std::get_if<Node::DependentAsync>(&(node->_handle));
// this async task comes from Executor
auto target = Node::AsyncState::UNFINISHED;
while(!handle->state.compare_exchange_weak(target, Node::AsyncState::FINISHED,
std::memory_order_acq_rel,
std::memory_order_relaxed)) {
target = Node::AsyncState::UNFINISHED;
}
// spaw successors whenever their dependencies are resolved
worker._cache = nullptr;
for(size_t i=0; i<node->_successors.size(); ++i) {
if(auto s = node->_successors[i];
s->_join_counter.fetch_sub(1, std::memory_order_acq_rel) == 1
) {
if(worker._cache) {
_schedule(worker, worker._cache);
}
worker._cache = s;
}
}
// now the executor no longer needs to retain ownership
if(handle->use_count.fetch_sub(1, std::memory_order_acq_rel) == 1) {
node_pool.recycle(node);
}
_decrement_topology();
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/taskflow/core/environment.hpp | #pragma once
#define TF_ENABLE_PROFILER "TF_ENABLE_PROFILER"
namespace tf {
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/taskflow/core/observer.hpp | #pragma once
#include "task.hpp"
#include "worker.hpp"
/**
@file observer.hpp
@brief observer include file
*/
namespace tf {
// ----------------------------------------------------------------------------
// timeline data structure
// ----------------------------------------------------------------------------
/**
@brief default time point type of observers
*/
using observer_stamp_t = std::chrono::time_point<std::chrono::steady_clock>;
/**
@private
*/
struct Segment {
std::string name;
TaskType type;
observer_stamp_t beg;
observer_stamp_t end;
template <typename Archiver>
auto save(Archiver& ar) const {
return ar(name, type, beg, end);
}
template <typename Archiver>
auto load(Archiver& ar) {
return ar(name, type, beg, end);
}
Segment() = default;
Segment(
const std::string& n, TaskType t, observer_stamp_t b, observer_stamp_t e
) : name {n}, type {t}, beg {b}, end {e} {
}
auto span() const {
return end-beg;
}
};
/**
@private
*/
struct Timeline {
size_t uid;
observer_stamp_t origin;
std::vector<std::vector<std::vector<Segment>>> segments;
Timeline() = default;
Timeline(const Timeline& rhs) = delete;
Timeline(Timeline&& rhs) = default;
Timeline& operator = (const Timeline& rhs) = delete;
Timeline& operator = (Timeline&& rhs) = default;
template <typename Archiver>
auto save(Archiver& ar) const {
return ar(uid, origin, segments);
}
template <typename Archiver>
auto load(Archiver& ar) {
return ar(uid, origin, segments);
}
};
/**
@private
*/
struct ProfileData {
std::vector<Timeline> timelines;
ProfileData() = default;
ProfileData(const ProfileData& rhs) = delete;
ProfileData(ProfileData&& rhs) = default;
ProfileData& operator = (const ProfileData& rhs) = delete;
ProfileData& operator = (ProfileData&&) = default;
template <typename Archiver>
auto save(Archiver& ar) const {
return ar(timelines);
}
template <typename Archiver>
auto load(Archiver& ar) {
return ar(timelines);
}
};
// ----------------------------------------------------------------------------
// observer interface
// ----------------------------------------------------------------------------
/**
@class: ObserverInterface
@brief class to derive an executor observer
The tf::ObserverInterface class allows users to define custom methods to monitor
the behaviors of an executor. This is particularly useful when you want to
inspect the performance of an executor and visualize when each thread
participates in the execution of a task.
To prevent users from direct access to the internal threads and tasks,
tf::ObserverInterface provides immutable wrappers,
tf::WorkerView and tf::TaskView, over workers and tasks.
Please refer to tf::WorkerView and tf::TaskView for details.
Example usage:
@code{.cpp}
struct MyObserver : public tf::ObserverInterface {
MyObserver(const std::string& name) {
std::cout << "constructing observer " << name << '\n';
}
void set_up(size_t num_workers) override final {
std::cout << "setting up observer with " << num_workers << " workers\n";
}
void on_entry(WorkerView w, tf::TaskView tv) override final {
std::ostringstream oss;
oss << "worker " << w.id() << " ready to run " << tv.name() << '\n';
std::cout << oss.str();
}
void on_exit(WorkerView w, tf::TaskView tv) override final {
std::ostringstream oss;
oss << "worker " << w.id() << " finished running " << tv.name() << '\n';
std::cout << oss.str();
}
};
tf::Taskflow taskflow;
tf::Executor executor;
// insert tasks into taskflow
// ...
// create a custom observer
std::shared_ptr<MyObserver> observer = executor.make_observer<MyObserver>("MyObserver");
// run the taskflow
executor.run(taskflow).wait();
@endcode
*/
class ObserverInterface {
public:
/**
@brief virtual destructor
*/
virtual ~ObserverInterface() = default;
/**
@brief constructor-like method to call when the executor observer is fully created
@param num_workers the number of the worker threads in the executor
*/
virtual void set_up(size_t num_workers) = 0;
/**
@brief method to call before a worker thread executes a closure
@param wv an immutable view of this worker thread
@param task_view a constant wrapper object to the task
*/
virtual void on_entry(WorkerView wv, TaskView task_view) = 0;
/**
@brief method to call after a worker thread executed a closure
@param wv an immutable view of this worker thread
@param task_view a constant wrapper object to the task
*/
virtual void on_exit(WorkerView wv, TaskView task_view) = 0;
};
// ----------------------------------------------------------------------------
// ChromeObserver definition
// ----------------------------------------------------------------------------
/**
@class: ChromeObserver
@brief class to create an observer based on Chrome tracing format
A tf::ChromeObserver inherits tf::ObserverInterface and defines methods to dump
the observed thread activities into a format that can be visualized through
@ChromeTracing.
@code{.cpp}
tf::Taskflow taskflow;
tf::Executor executor;
// insert tasks into taskflow
// ...
// create a custom observer
std::shared_ptr<tf::ChromeObserver> observer = executor.make_observer<tf::ChromeObserver>();
// run the taskflow
executor.run(taskflow).wait();
// dump the thread activities to a chrome-tracing format.
observer->dump(std::cout);
@endcode
*/
class ChromeObserver : public ObserverInterface {
friend class Executor;
// data structure to record each task execution
struct Segment {
std::string name;
observer_stamp_t beg;
observer_stamp_t end;
Segment(
const std::string& n,
observer_stamp_t b,
observer_stamp_t e
);
};
// data structure to store the entire execution timeline
struct Timeline {
observer_stamp_t origin;
std::vector<std::vector<Segment>> segments;
std::vector<std::stack<observer_stamp_t>> stacks;
};
public:
/**
@brief dumps the timelines into a @ChromeTracing format through
an output stream
*/
void dump(std::ostream& ostream) const;
/**
@brief dumps the timelines into a @ChromeTracing format
*/
inline std::string dump() const;
/**
@brief clears the timeline data
*/
inline void clear();
/**
@brief queries the number of tasks observed
*/
inline size_t num_tasks() const;
private:
inline void set_up(size_t num_workers) override final;
inline void on_entry(WorkerView w, TaskView task_view) override final;
inline void on_exit(WorkerView w, TaskView task_view) override final;
Timeline _timeline;
};
// constructor
inline ChromeObserver::Segment::Segment(
const std::string& n, observer_stamp_t b, observer_stamp_t e
) :
name {n}, beg {b}, end {e} {
}
// Procedure: set_up
inline void ChromeObserver::set_up(size_t num_workers) {
_timeline.segments.resize(num_workers);
_timeline.stacks.resize(num_workers);
for(size_t w=0; w<num_workers; ++w) {
_timeline.segments[w].reserve(32);
}
_timeline.origin = observer_stamp_t::clock::now();
}
// Procedure: on_entry
inline void ChromeObserver::on_entry(WorkerView wv, TaskView) {
_timeline.stacks[wv.id()].push(observer_stamp_t::clock::now());
}
// Procedure: on_exit
inline void ChromeObserver::on_exit(WorkerView wv, TaskView tv) {
size_t w = wv.id();
assert(!_timeline.stacks[w].empty());
auto beg = _timeline.stacks[w].top();
_timeline.stacks[w].pop();
_timeline.segments[w].emplace_back(
tv.name(), beg, observer_stamp_t::clock::now()
);
}
// Function: clear
inline void ChromeObserver::clear() {
for(size_t w=0; w<_timeline.segments.size(); ++w) {
_timeline.segments[w].clear();
while(!_timeline.stacks[w].empty()) {
_timeline.stacks[w].pop();
}
}
}
// Procedure: dump
inline void ChromeObserver::dump(std::ostream& os) const {
using namespace std::chrono;
size_t first;
for(first = 0; first<_timeline.segments.size(); ++first) {
if(_timeline.segments[first].size() > 0) {
break;
}
}
os << '[';
for(size_t w=first; w<_timeline.segments.size(); w++) {
if(w != first && _timeline.segments[w].size() > 0) {
os << ',';
}
for(size_t i=0; i<_timeline.segments[w].size(); i++) {
os << '{'<< "\"cat\":\"ChromeObserver\",";
// name field
os << "\"name\":\"";
if(_timeline.segments[w][i].name.empty()) {
os << w << '_' << i;
}
else {
os << _timeline.segments[w][i].name;
}
os << "\",";
// segment field
os << "\"ph\":\"X\","
<< "\"pid\":1,"
<< "\"tid\":" << w << ','
<< "\"ts\":" << duration_cast<microseconds>(
_timeline.segments[w][i].beg - _timeline.origin
).count() << ','
<< "\"dur\":" << duration_cast<microseconds>(
_timeline.segments[w][i].end - _timeline.segments[w][i].beg
).count();
if(i != _timeline.segments[w].size() - 1) {
os << "},";
}
else {
os << '}';
}
}
}
os << "]\n";
}
// Function: dump
inline std::string ChromeObserver::dump() const {
std::ostringstream oss;
dump(oss);
return oss.str();
}
// Function: num_tasks
inline size_t ChromeObserver::num_tasks() const {
return std::accumulate(
_timeline.segments.begin(), _timeline.segments.end(), size_t{0},
[](size_t sum, const auto& exe){
return sum + exe.size();
}
);
}
// ----------------------------------------------------------------------------
// TFProfObserver definition
// ----------------------------------------------------------------------------
/**
@class TFProfObserver
@brief class to create an observer based on the built-in taskflow profiler format
A tf::TFProfObserver inherits tf::ObserverInterface and defines methods to dump
the observed thread activities into a format that can be visualized through
@TFProf.
@code{.cpp}
tf::Taskflow taskflow;
tf::Executor executor;
// insert tasks into taskflow
// ...
// create a custom observer
std::shared_ptr<tf::TFProfObserver> observer = executor.make_observer<tf::TFProfObserver>();
// run the taskflow
executor.run(taskflow).wait();
// dump the thread activities to Taskflow Profiler format.
observer->dump(std::cout);
@endcode
*/
class TFProfObserver : public ObserverInterface {
friend class Executor;
friend class TFProfManager;
/** @private overall task summary */
struct TaskSummary {
size_t count {0};
size_t total_span {0};
size_t min_span;
size_t max_span;
float avg_span() const { return total_span * 1.0f / count; }
};
/** @private worker summary at a level */
struct WorkerSummary {
size_t id;
size_t level;
size_t count {0};
size_t total_span {0};
size_t min_span{0};
size_t max_span{0};
std::array<TaskSummary, TASK_TYPES.size()> tsum;
float avg_span() const { return total_span * 1.0f / count; }
//return count < 2 ? 0.0f : total_delay * 1.0f / (count-1);
};
/** @private */
struct Summary {
std::array<TaskSummary, TASK_TYPES.size()> tsum;
std::vector<WorkerSummary> wsum;
void dump_tsum(std::ostream&) const;
void dump_wsum(std::ostream&) const;
void dump(std::ostream&) const;
};
public:
/**
@brief dumps the timelines into a @TFProf format through
an output stream
*/
void dump(std::ostream& ostream) const;
/**
@brief dumps the timelines into a JSON string
*/
std::string dump() const;
/**
@brief shows the summary report through an output stream
*/
void summary(std::ostream& ostream) const;
/**
@brief returns the summary report in a string
*/
std::string summary() const;
/**
@brief clears the timeline data
*/
void clear();
/**
@brief queries the number of tasks observed
*/
size_t num_tasks() const;
/**
@brief queries the number of observed workers
*/
size_t num_workers() const;
private:
Timeline _timeline;
std::vector<std::stack<observer_stamp_t>> _stacks;
inline void set_up(size_t num_workers) override final;
inline void on_entry(WorkerView, TaskView) override final;
inline void on_exit(WorkerView, TaskView) override final;
};
// dump the task summary
inline void TFProfObserver::Summary::dump_tsum(std::ostream& os) const {
// task summary
size_t type_w{10}, count_w{5}, time_w{9}, avg_w{8}, min_w{8}, max_w{8};
std::for_each(tsum.begin(), tsum.end(), [&](const auto& i){
if(i.count == 0) return;
count_w = std::max(count_w, std::to_string(i.count).size());
});
std::for_each(tsum.begin(), tsum.end(), [&](const auto& i){
if(i.count == 0) return;
time_w = std::max(time_w, std::to_string(i.total_span).size());
});
std::for_each(tsum.begin(), tsum.end(), [&](const auto& i){
if(i.count == 0) return;
avg_w = std::max(time_w, std::to_string(i.avg_span()).size());
});
std::for_each(tsum.begin(), tsum.end(), [&](const auto& i){
if(i.count == 0) return;
min_w = std::max(min_w, std::to_string(i.min_span).size());
});
std::for_each(tsum.begin(), tsum.end(), [&](const auto& i){
if(i.count == 0) return;
max_w = std::max(max_w, std::to_string(i.max_span).size());
});
os << std::setw(type_w) << "-Task-"
<< std::setw(count_w+2) << "Count"
<< std::setw(time_w+2) << "Time (us)"
<< std::setw(avg_w+2) << "Avg (us)"
<< std::setw(min_w+2) << "Min (us)"
<< std::setw(max_w+2) << "Max (us)"
<< '\n';
for(size_t i=0; i<TASK_TYPES.size(); i++) {
if(tsum[i].count == 0) {
continue;
}
os << std::setw(type_w) << to_string(TASK_TYPES[i])
<< std::setw(count_w+2) << tsum[i].count
<< std::setw(time_w+2) << tsum[i].total_span
<< std::setw(avg_w+2) << std::to_string(tsum[i].avg_span())
<< std::setw(min_w+2) << tsum[i].min_span
<< std::setw(max_w+2) << tsum[i].max_span
<< '\n';
}
}
// dump the worker summary
inline void TFProfObserver::Summary::dump_wsum(std::ostream& os) const {
// task summary
size_t w_w{10}, t_w{10}, l_w{5}, c_w{5}, d_w{9}, avg_w{8}, min_w{8}, max_w{8};
std::for_each(wsum.begin(), wsum.end(), [&](const auto& i){
if(i.count == 0) return;
l_w = std::max(l_w, std::to_string(i.level).size());
});
std::for_each(wsum.begin(), wsum.end(), [&](const auto& i){
if(i.count == 0) return;
c_w = std::max(c_w, std::to_string(i.count).size());
});
std::for_each(wsum.begin(), wsum.end(), [&](const auto& i){
if(i.count == 0) return;
d_w = std::max(d_w, std::to_string(i.total_span).size());
});
std::for_each(wsum.begin(), wsum.end(), [&](const auto& i){
if(i.count == 0) return;
avg_w = std::max(avg_w, std::to_string(i.avg_span()).size());
});
std::for_each(wsum.begin(), wsum.end(), [&](const auto& i){
if(i.count == 0) return;
min_w = std::max(min_w, std::to_string(i.min_span).size());
});
std::for_each(wsum.begin(), wsum.end(), [&](const auto& i){
if(i.count == 0) return;
max_w = std::max(max_w, std::to_string(i.max_span).size());
});
os << std::setw(w_w) << "-Worker-"
<< std::setw(l_w+2) << "Level"
<< std::setw(t_w) << "Task"
<< std::setw(c_w+2) << "Count"
<< std::setw(d_w+2) << "Time (us)"
<< std::setw(avg_w+2) << "Avg (us)"
<< std::setw(min_w+2) << "Min (us)"
<< std::setw(max_w+2) << "Max (us)"
<< '\n';
for(const auto& ws : wsum) {
if(ws.count == 0) {
continue;
}
os << std::setw(w_w) << ws.id
<< std::setw(l_w+2) << ws.level;
bool first = true;
for(size_t i=0; i<TASK_TYPES.size(); i++) {
if(ws.tsum[i].count == 0) {
continue;
}
os << (first ? std::setw(t_w) : std::setw(w_w + l_w + 2 + t_w));
first = false;
os << to_string(TASK_TYPES[i])
<< std::setw(c_w+2) << ws.tsum[i].count
<< std::setw(d_w+2) << ws.tsum[i].total_span
<< std::setw(avg_w+2) << std::to_string(ws.tsum[i].avg_span())
<< std::setw(min_w+2) << ws.tsum[i].min_span
<< std::setw(max_w+2) << ws.tsum[i].max_span
<< '\n';
}
// per-worker summary
os << std::setw(w_w + l_w + t_w + c_w + 4) << ws.count
<< std::setw(d_w+2) << ws.total_span
<< std::setw(avg_w+2) << std::to_string(ws.avg_span())
<< std::setw(min_w+2) << ws.min_span
<< std::setw(max_w+2) << ws.max_span
<< '\n';
//for(size_t j=0; j<w_w+l_w+t_w+4; j++) os << ' ';
//for(size_t j=0; j<c_w+d_w+avg_w+min_w+max_w+8; j++) os << '-';
//os <<'\n';
}
}
// dump the summary report through an ostream
inline void TFProfObserver::Summary::dump(std::ostream& os) const {
dump_tsum(os);
os << '\n';
dump_wsum(os);
}
// Procedure: set_up
inline void TFProfObserver::set_up(size_t num_workers) {
_timeline.uid = unique_id<size_t>();
_timeline.origin = observer_stamp_t::clock::now();
_timeline.segments.resize(num_workers);
_stacks.resize(num_workers);
}
// Procedure: on_entry
inline void TFProfObserver::on_entry(WorkerView wv, TaskView) {
_stacks[wv.id()].push(observer_stamp_t::clock::now());
}
// Procedure: on_exit
inline void TFProfObserver::on_exit(WorkerView wv, TaskView tv) {
size_t w = wv.id();
assert(!_stacks[w].empty());
if(_stacks[w].size() > _timeline.segments[w].size()) {
_timeline.segments[w].resize(_stacks[w].size());
}
auto beg = _stacks[w].top();
_stacks[w].pop();
_timeline.segments[w][_stacks[w].size()].emplace_back(
tv.name(), tv.type(), beg, observer_stamp_t::clock::now()
);
}
// Function: clear
inline void TFProfObserver::clear() {
for(size_t w=0; w<_timeline.segments.size(); ++w) {
for(size_t l=0; l<_timeline.segments[w].size(); ++l) {
_timeline.segments[w][l].clear();
}
while(!_stacks[w].empty()) {
_stacks[w].pop();
}
}
}
// Procedure: dump
inline void TFProfObserver::dump(std::ostream& os) const {
using namespace std::chrono;
size_t first;
for(first = 0; first<_timeline.segments.size(); ++first) {
if(_timeline.segments[first].size() > 0) {
break;
}
}
// not timeline data to dump
if(first == _timeline.segments.size()) {
os << "{}\n";
return;
}
os << "{\"executor\":\"" << _timeline.uid << "\",\"data\":[";
bool comma = false;
for(size_t w=first; w<_timeline.segments.size(); w++) {
for(size_t l=0; l<_timeline.segments[w].size(); l++) {
if(_timeline.segments[w][l].empty()) {
continue;
}
if(comma) {
os << ',';
}
else {
comma = true;
}
os << "{\"worker\":" << w << ",\"level\":" << l << ",\"data\":[";
for(size_t i=0; i<_timeline.segments[w][l].size(); ++i) {
const auto& s = _timeline.segments[w][l][i];
if(i) os << ',';
// span
os << "{\"span\":["
<< duration_cast<microseconds>(s.beg - _timeline.origin).count()
<< ","
<< duration_cast<microseconds>(s.end - _timeline.origin).count()
<< "],";
// name
os << "\"name\":\"";
if(s.name.empty()) {
os << w << '_' << i;
}
else {
os << s.name;
}
os << "\",";
// e.g., category "type": "Condition Task"
os << "\"type\":\"" << to_string(s.type) << "\"";
os << "}";
}
os << "]}";
}
}
os << "]}\n";
}
// Function: dump
inline std::string TFProfObserver::dump() const {
std::ostringstream oss;
dump(oss);
return oss.str();
}
// Procedure: summary
inline void TFProfObserver::summary(std::ostream& os) const {
using namespace std::chrono;
Summary summary;
std::optional<observer_stamp_t> view_beg, view_end;
// find the first non-empty worker
size_t first;
for(first = 0; first<_timeline.segments.size(); ++first) {
if(_timeline.segments[first].size() > 0) {
break;
}
}
// not timeline data to dump
if(first == _timeline.segments.size()) {
goto end_of_summary;
}
for(size_t w=first; w<_timeline.segments.size(); w++) {
for(size_t l=0; l<_timeline.segments[w].size(); l++) {
if(_timeline.segments[w][l].empty()) {
continue;
}
// worker w at level l
WorkerSummary ws;
ws.id = w;
ws.level = l;
ws.count = _timeline.segments[w][l].size();
// scan all tasks at level l
for(size_t i=0; i<_timeline.segments[w][l].size(); ++i) {
// update the entire span
auto& s = _timeline.segments[w][l][i];
view_beg = view_beg ? std::min(*view_beg, s.beg) : s.beg;
view_end = view_end ? std::max(*view_end, s.end) : s.end;
// update the task summary
size_t t = duration_cast<microseconds>(s.end - s.beg).count();
auto& x = summary.tsum[static_cast<int>(s.type)];
x.count += 1;
x.total_span += t;
x.min_span = (x.count == 1) ? t : std::min(t, x.min_span);
x.max_span = (x.count == 1) ? t : std::max(t, x.max_span);
// update the worker summary
ws.total_span += t;
ws.min_span = (i == 0) ? t : std::min(t, ws.min_span);
ws.max_span = (i == 0) ? t : std::max(t, ws.max_span);
auto&y = ws.tsum[static_cast<int>(s.type)];
y.count += 1;
y.total_span += t;
y.min_span = (y.count == 1) ? t : std::min(t, y.min_span);
y.max_span = (y.count == 1) ? t : std::max(t, y.max_span);
// update the delay
//if(i) {
// size_t d = duration_cast<nanoseconds>(
// s.beg - _timeline.segments[w][l][i-1].end
// ).count();
// ws.total_delay += d;
// ws.min_delay = (i == 1) ? d : std::min(ws.min_delay, d);
// ws.max_delay = (i == 1) ? d : std::max(ws.max_delay, d);
//}
}
summary.wsum.push_back(ws);
}
}
end_of_summary:
size_t view = 0;
if(view_beg && view_end) {
view = duration_cast<microseconds>(*view_end - *view_beg).count();
}
os << "==Observer " << _timeline.uid << ": "
<< num_workers() << " workers completed "
<< num_tasks() << " tasks in "
<< view << " us\n";
summary.dump(os);
}
// Procedure: summary
inline std::string TFProfObserver::summary() const {
std::ostringstream oss;
summary(oss);
return oss.str();
}
// Function: num_tasks
inline size_t TFProfObserver::num_tasks() const {
size_t s = 0;
for(size_t w=0; w<_timeline.segments.size(); ++w) {
for(size_t l=0; l<_timeline.segments[w].size(); ++l) {
s += _timeline.segments[w][l].size();
}
}
return s;
}
// Function: num_workers
inline size_t TFProfObserver::num_workers() const {
size_t w = 0;
for(size_t i=0; i<_timeline.segments.size(); ++i) {
w += (!_timeline.segments[i].empty());
}
return w;
}
// ----------------------------------------------------------------------------
// TFProfManager
// ----------------------------------------------------------------------------
/**
@private
*/
class TFProfManager {
friend class Executor;
public:
~TFProfManager();
TFProfManager(const TFProfManager&) = delete;
TFProfManager& operator=(const TFProfManager&) = delete;
static TFProfManager& get();
void dump(std::ostream& ostream) const;
private:
const std::string _fpath;
std::mutex _mutex;
std::vector<std::shared_ptr<TFProfObserver>> _observers;
TFProfManager();
void _manage(std::shared_ptr<TFProfObserver> observer);
};
// constructor
inline TFProfManager::TFProfManager() :
_fpath {get_env(TF_ENABLE_PROFILER)} {
}
// Procedure: manage
inline void TFProfManager::_manage(std::shared_ptr<TFProfObserver> observer) {
std::lock_guard lock(_mutex);
_observers.push_back(std::move(observer));
}
// Procedure: dump
inline void TFProfManager::dump(std::ostream& os) const {
for(size_t i=0; i<_observers.size(); ++i) {
if(i) os << ',';
_observers[i]->dump(os);
}
}
// Destructor
inline TFProfManager::~TFProfManager() {
std::ofstream ofs(_fpath);
if(ofs) {
// .tfp
if(_fpath.rfind(".tfp") != std::string::npos) {
ProfileData data;
data.timelines.reserve(_observers.size());
for(size_t i=0; i<_observers.size(); ++i) {
data.timelines.push_back(std::move(_observers[i]->_timeline));
}
Serializer<std::ofstream> serializer(ofs);
serializer(data);
}
// .json
else { // if(_fpath.rfind(".json") != std::string::npos) {
ofs << "[\n";
for(size_t i=0; i<_observers.size(); ++i) {
if(i) ofs << ',';
_observers[i]->dump(ofs);
}
ofs << "]\n";
}
}
// do a summary report in stderr for each observer
else {
std::ostringstream oss;
for(size_t i=0; i<_observers.size(); ++i) {
_observers[i]->summary(oss);
}
fprintf(stderr, "%s", oss.str().c_str());
}
}
// Function: get
inline TFProfManager& TFProfManager::get() {
static TFProfManager mgr;
return mgr;
}
// ----------------------------------------------------------------------------
// Identifier for Each Built-in Observer
// ----------------------------------------------------------------------------
/** @enum ObserverType
@brief enumeration of all observer types
*/
enum class ObserverType : int {
TFPROF = 0,
CHROME,
UNDEFINED
};
/**
@brief convert an observer type to a human-readable string
*/
inline const char* to_string(ObserverType type) {
switch(type) {
case ObserverType::TFPROF: return "tfprof";
case ObserverType::CHROME: return "chrome";
default: return "undefined";
}
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/taskflow/core/notifier.hpp | // 2019/02/09 - created by Tsung-Wei Huang
// - modified the event count from Eigen
#pragma once
#include <iostream>
#include <vector>
#include <cstdlib>
#include <cstdio>
#include <atomic>
#include <memory>
#include <deque>
#include <mutex>
#include <condition_variable>
#include <thread>
#include <algorithm>
#include <numeric>
#include <cassert>
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2016 Dmitry Vyukov <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
namespace tf {
// Notifier allows to wait for arbitrary predicates in non-blocking
// algorithms. Think of condition variable, but wait predicate does not need to
// be protected by a mutex. Usage:
// Waiting thread does:
//
// if (predicate)
// return act();
// Notifier::Waiter& w = waiters[my_index];
// ec.prepare_wait(&w);
// if (predicate) {
// ec.cancel_wait(&w);
// return act();
// }
// ec.commit_wait(&w);
//
// Notifying thread does:
//
// predicate = true;
// ec.notify(true);
//
// notify is cheap if there are no waiting threads. prepare_wait/commit_wait are not
// cheap, but they are executed only if the preceeding predicate check has
// failed.
//
// Algorihtm outline:
// There are two main variables: predicate (managed by user) and _state.
// Operation closely resembles Dekker mutual algorithm:
// https://en.wikipedia.org/wiki/Dekker%27s_algorithm
// Waiting thread sets _state then checks predicate, Notifying thread sets
// predicate then checks _state. Due to seq_cst fences in between these
// operations it is guaranteed than either waiter will see predicate change
// and won't block, or notifying thread will see _state change and will unblock
// the waiter, or both. But it can't happen that both threads don't see each
// other changes, which would lead to deadlock.
class Notifier {
friend class Executor;
public:
struct Waiter {
std::atomic<Waiter*> next;
std::mutex mu;
std::condition_variable cv;
uint64_t epoch;
unsigned state;
enum {
kNotSignaled,
kWaiting,
kSignaled,
};
};
explicit Notifier(size_t N) : _waiters{N} {
assert(_waiters.size() < (1 << kWaiterBits) - 1);
// Initialize epoch to something close to overflow to test overflow.
_state = kStackMask | (kEpochMask - kEpochInc * _waiters.size() * 2);
}
~Notifier() {
// Ensure there are no waiters.
assert((_state.load() & (kStackMask | kWaiterMask)) == kStackMask);
}
// prepare_wait prepares for waiting.
// After calling this function the thread must re-check the wait predicate
// and call either cancel_wait or commit_wait passing the same Waiter object.
void prepare_wait(Waiter* w) {
w->epoch = _state.fetch_add(kWaiterInc, std::memory_order_relaxed);
std::atomic_thread_fence(std::memory_order_seq_cst);
}
// commit_wait commits waiting.
void commit_wait(Waiter* w) {
w->state = Waiter::kNotSignaled;
// Modification epoch of this waiter.
uint64_t epoch =
(w->epoch & kEpochMask) +
(((w->epoch & kWaiterMask) >> kWaiterShift) << kEpochShift);
uint64_t state = _state.load(std::memory_order_seq_cst);
for (;;) {
if (int64_t((state & kEpochMask) - epoch) < 0) {
// The preceeding waiter has not decided on its fate. Wait until it
// calls either cancel_wait or commit_wait, or is notified.
std::this_thread::yield();
state = _state.load(std::memory_order_seq_cst);
continue;
}
// We've already been notified.
if (int64_t((state & kEpochMask) - epoch) > 0) return;
// Remove this thread from prewait counter and add it to the waiter list.
assert((state & kWaiterMask) != 0);
uint64_t newstate = state - kWaiterInc + kEpochInc;
//newstate = (newstate & ~kStackMask) | (w - &_waiters[0]);
newstate = static_cast<uint64_t>((newstate & ~kStackMask) | static_cast<uint64_t>(w - &_waiters[0]));
if ((state & kStackMask) == kStackMask)
w->next.store(nullptr, std::memory_order_relaxed);
else
w->next.store(&_waiters[state & kStackMask], std::memory_order_relaxed);
if (_state.compare_exchange_weak(state, newstate,
std::memory_order_release))
break;
}
_park(w);
}
// cancel_wait cancels effects of the previous prepare_wait call.
void cancel_wait(Waiter* w) {
uint64_t epoch =
(w->epoch & kEpochMask) +
(((w->epoch & kWaiterMask) >> kWaiterShift) << kEpochShift);
uint64_t state = _state.load(std::memory_order_relaxed);
for (;;) {
if (int64_t((state & kEpochMask) - epoch) < 0) {
// The preceeding waiter has not decided on its fate. Wait until it
// calls either cancel_wait or commit_wait, or is notified.
std::this_thread::yield();
state = _state.load(std::memory_order_relaxed);
continue;
}
// We've already been notified.
if (int64_t((state & kEpochMask) - epoch) > 0) return;
// Remove this thread from prewait counter.
assert((state & kWaiterMask) != 0);
if (_state.compare_exchange_weak(state, state - kWaiterInc + kEpochInc,
std::memory_order_relaxed))
return;
}
}
// notify wakes one or all waiting threads.
// Must be called after changing the associated wait predicate.
void notify(bool all) {
std::atomic_thread_fence(std::memory_order_seq_cst);
uint64_t state = _state.load(std::memory_order_acquire);
for (;;) {
// Easy case: no waiters.
if ((state & kStackMask) == kStackMask && (state & kWaiterMask) == 0)
return;
uint64_t waiters = (state & kWaiterMask) >> kWaiterShift;
uint64_t newstate;
if (all) {
// Reset prewait counter and empty wait list.
newstate = (state & kEpochMask) + (kEpochInc * waiters) + kStackMask;
} else if (waiters) {
// There is a thread in pre-wait state, unblock it.
newstate = state + kEpochInc - kWaiterInc;
} else {
// Pop a waiter from list and unpark it.
Waiter* w = &_waiters[state & kStackMask];
Waiter* wnext = w->next.load(std::memory_order_relaxed);
uint64_t next = kStackMask;
//if (wnext != nullptr) next = wnext - &_waiters[0];
if (wnext != nullptr) next = static_cast<uint64_t>(wnext - &_waiters[0]);
// Note: we don't add kEpochInc here. ABA problem on the lock-free stack
// can't happen because a waiter is re-pushed onto the stack only after
// it was in the pre-wait state which inevitably leads to epoch
// increment.
newstate = (state & kEpochMask) + next;
}
if (_state.compare_exchange_weak(state, newstate,
std::memory_order_acquire)) {
if (!all && waiters) return; // unblocked pre-wait thread
if ((state & kStackMask) == kStackMask) return;
Waiter* w = &_waiters[state & kStackMask];
if (!all) w->next.store(nullptr, std::memory_order_relaxed);
_unpark(w);
return;
}
}
}
// notify n workers
void notify_n(size_t n) {
if(n >= _waiters.size()) {
notify(true);
}
else {
for(size_t k=0; k<n; ++k) {
notify(false);
}
}
}
size_t size() const {
return _waiters.size();
}
private:
// State_ layout:
// - low kStackBits is a stack of waiters committed wait.
// - next kWaiterBits is count of waiters in prewait state.
// - next kEpochBits is modification counter.
static const uint64_t kStackBits = 16;
static const uint64_t kStackMask = (1ull << kStackBits) - 1;
static const uint64_t kWaiterBits = 16;
static const uint64_t kWaiterShift = 16;
static const uint64_t kWaiterMask = ((1ull << kWaiterBits) - 1)
<< kWaiterShift;
static const uint64_t kWaiterInc = 1ull << kWaiterBits;
static const uint64_t kEpochBits = 32;
static const uint64_t kEpochShift = 32;
static const uint64_t kEpochMask = ((1ull << kEpochBits) - 1) << kEpochShift;
static const uint64_t kEpochInc = 1ull << kEpochShift;
std::atomic<uint64_t> _state;
std::vector<Waiter> _waiters;
void _park(Waiter* w) {
std::unique_lock<std::mutex> lock(w->mu);
while (w->state != Waiter::kSignaled) {
w->state = Waiter::kWaiting;
w->cv.wait(lock);
}
}
void _unpark(Waiter* waiters) {
Waiter* next = nullptr;
for (Waiter* w = waiters; w; w = next) {
next = w->next.load(std::memory_order_relaxed);
unsigned state;
{
std::unique_lock<std::mutex> lock(w->mu);
state = w->state;
w->state = Waiter::kSignaled;
}
// Avoid notifying if it wasn't waiting.
if (state == Waiter::kWaiting) w->cv.notify_one();
}
}
};
} // namespace tf ------------------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/taskflow/core/topology.hpp | #pragma once
namespace tf {
// ----------------------------------------------------------------------------
// class: TopologyBase
class TopologyBase {
friend class Executor;
friend class Node;
template <typename T>
friend class Future;
protected:
std::atomic<bool> _is_cancelled { false };
};
// ----------------------------------------------------------------------------
// class: AsyncTopology
class AsyncTopology : public TopologyBase {
};
// ----------------------------------------------------------------------------
// class: Topology
class Topology : public TopologyBase {
friend class Executor;
friend class Runtime;
public:
template <typename P, typename C>
Topology(Taskflow&, P&&, C&&);
private:
Taskflow& _taskflow;
std::promise<void> _promise;
SmallVector<Node*> _sources;
std::function<bool()> _pred;
std::function<void()> _call;
std::atomic<size_t> _join_counter {0};
};
// Constructor
template <typename P, typename C>
Topology::Topology(Taskflow& tf, P&& p, C&& c):
_taskflow(tf),
_pred {std::forward<P>(p)},
_call {std::forward<C>(c)} {
}
} // end of namespace tf. ----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/taskflow/core/semaphore.hpp | #pragma once
#include <vector>
#include <mutex>
#include "declarations.hpp"
/**
@file semaphore.hpp
@brief semaphore include file
*/
namespace tf {
// ----------------------------------------------------------------------------
// Semaphore
// ----------------------------------------------------------------------------
/**
@class Semaphore
@brief class to create a semophore object for building a concurrency constraint
A semaphore creates a constraint that limits the maximum concurrency,
i.e., the number of workers, in a set of tasks.
You can let a task acquire/release one or multiple semaphores before/after
executing its work.
A task can acquire and release a semaphore,
or just acquire or just release it.
A tf::Semaphore object starts with an initial count.
As long as that count is above 0, tasks can acquire the semaphore and do
their work.
If the count is 0 or less, a task trying to acquire the semaphore will not run
but goes to a waiting list of that semaphore.
When the semaphore is released by another task,
it reschedules all tasks on that waiting list.
@code{.cpp}
tf::Executor executor(8); // create an executor of 8 workers
tf::Taskflow taskflow;
tf::Semaphore semaphore(1); // create a semaphore with initial count 1
std::vector<tf::Task> tasks {
taskflow.emplace([](){ std::cout << "A" << std::endl; }),
taskflow.emplace([](){ std::cout << "B" << std::endl; }),
taskflow.emplace([](){ std::cout << "C" << std::endl; }),
taskflow.emplace([](){ std::cout << "D" << std::endl; }),
taskflow.emplace([](){ std::cout << "E" << std::endl; })
};
for(auto & task : tasks) { // each task acquires and release the semaphore
task.acquire(semaphore);
task.release(semaphore);
}
executor.run(taskflow).wait();
@endcode
The above example creates five tasks with no dependencies between them.
Under normal circumstances, the five tasks would be executed concurrently.
However, this example has a semaphore with initial count 1,
and all tasks need to acquire that semaphore before running and release that
semaphore after they are done.
This arrangement limits the number of concurrently running tasks to only one.
*/
class Semaphore {
friend class Node;
public:
/**
@brief constructs a semaphore with the given counter
A semaphore creates a constraint that limits the maximum concurrency,
i.e., the number of workers, in a set of tasks.
@code{.cpp}
tf::Semaphore semaphore(4); // concurrency constraint of 4 workers
@endcode
*/
explicit Semaphore(size_t max_workers);
/**
@brief queries the counter value (not thread-safe during the run)
*/
size_t count() const;
private:
std::mutex _mtx;
size_t _counter;
std::vector<Node*> _waiters;
bool _try_acquire_or_wait(Node*);
std::vector<Node*> _release();
};
inline Semaphore::Semaphore(size_t max_workers) :
_counter(max_workers) {
}
inline bool Semaphore::_try_acquire_or_wait(Node* me) {
std::lock_guard<std::mutex> lock(_mtx);
if(_counter > 0) {
--_counter;
return true;
}
else {
_waiters.push_back(me);
return false;
}
}
inline std::vector<Node*> Semaphore::_release() {
std::lock_guard<std::mutex> lock(_mtx);
++_counter;
std::vector<Node*> r{std::move(_waiters)};
return r;
}
inline size_t Semaphore::count() const {
return _counter;
}
} // end of namespace tf. ---------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/taskflow/core/taskflow.hpp | #pragma once
#include "flow_builder.hpp"
/**
@file taskflow/core/taskflow.hpp
@brief taskflow include file
*/
namespace tf {
// ----------------------------------------------------------------------------
/**
@class Taskflow
@brief class to create a taskflow object
A %taskflow manages a task dependency graph where each task represents a
callable object (e.g., @std_lambda, @std_function) and an edge represents a
dependency between two tasks. A task is one of the following types:
1. static task : the callable constructible from
@c std::function<void()>
2. dynamic task : the callable constructible from
@c std::function<void(tf::Subflow&)>
3. condition task : the callable constructible from
@c std::function<int()>
4. multi-condition task: the callable constructible from
@c %std::function<tf::SmallVector<int>()>
5. module task : the task constructed from tf::Taskflow::composed_of
6. runtime task : the callable constructible from
@c std::function<void(tf::Runtime&)>
7. %cudaFlow task : the callable constructible from
@c std::function<void(tf::cudaFlow&)> or
@c std::function<void(tf::cudaFlowCapturer&)>
8. %syclFlow task : the callable constructible from
@c std::function<void(tf::syclFlow&)>
Each task is a basic computation unit and is run by one worker thread
from an executor.
The following example creates a simple taskflow graph of four static tasks,
@c A, @c B, @c C, and @c D, where
@c A runs before @c B and @c C and
@c D runs after @c B and @c C.
@code{.cpp}
tf::Executor executor;
tf::Taskflow taskflow("simple");
tf::Task A = taskflow.emplace([](){ std::cout << "TaskA\n"; });
tf::Task B = taskflow.emplace([](){ std::cout << "TaskB\n"; });
tf::Task C = taskflow.emplace([](){ std::cout << "TaskC\n"; });
tf::Task D = taskflow.emplace([](){ std::cout << "TaskD\n"; });
A.precede(B, C); // A runs before B and C
D.succeed(B, C); // D runs after B and C
executor.run(taskflow).wait();
@endcode
The taskflow object itself is NOT thread-safe. You should not
modifying the graph while it is running,
such as adding new tasks, adding new dependencies, and moving
the taskflow to another.
To minimize the overhead of task creation,
our runtime leverages a global object pool to recycle
tasks in a thread-safe manner.
Please refer to @ref Cookbook to learn more about each task type
and how to submit a taskflow to an executor.
*/
class Taskflow : public FlowBuilder {
friend class Topology;
friend class Executor;
friend class FlowBuilder;
struct Dumper {
size_t id;
std::stack<std::pair<const Node*, const Graph*>> stack;
std::unordered_map<const Graph*, size_t> visited;
};
public:
/**
@brief constructs a taskflow with the given name
@code{.cpp}
tf::Taskflow taskflow("My Taskflow");
std::cout << taskflow.name(); // "My Taskflow"
@endcode
*/
Taskflow(const std::string& name);
/**
@brief constructs a taskflow
*/
Taskflow();
/**
@brief constructs a taskflow from a moved taskflow
Constructing a taskflow @c taskflow1 from a moved taskflow @c taskflow2 will
migrate the graph of @c taskflow2 to @c taskflow1.
After the move, @c taskflow2 will become empty.
@code{.cpp}
tf::Taskflow taskflow1(std::move(taskflow2));
assert(taskflow2.empty());
@endcode
Notice that @c taskflow2 should not be running in an executor
during the move operation, or the behavior is undefined.
*/
Taskflow(Taskflow&& rhs);
/**
@brief move assignment operator
Moving a taskflow @c taskflow2 to another taskflow @c taskflow1 will destroy
the existing graph of @c taskflow1 and assign it the graph of @c taskflow2.
After the move, @c taskflow2 will become empty.
@code{.cpp}
taskflow1 = std::move(taskflow2);
assert(taskflow2.empty());
@endcode
Notice that both @c taskflow1 and @c taskflow2 should not be running
in an executor during the move operation, or the behavior is undefined.
*/
Taskflow& operator = (Taskflow&& rhs);
/**
@brief default destructor
When the destructor is called, all tasks and their associated data
(e.g., captured data) will be destroyed.
It is your responsibility to ensure all submitted execution of this
taskflow have completed before destroying it.
For instance, the following code results in undefined behavior
since the executor may still be running the taskflow while
it is destroyed after the block.
@code{.cpp}
{
tf::Taskflow taskflow;
executor.run(taskflow);
}
@endcode
To fix the problem, we must wait for the execution to complete
before destroying the taskflow.
@code{.cpp}
{
tf::Taskflow taskflow;
executor.run(taskflow).wait();
}
@endcode
*/
~Taskflow() = default;
/**
@brief dumps the taskflow to a DOT format through a std::ostream target
@code{.cpp}
taskflow.dump(std::cout); // dump the graph to the standard output
std::ofstream ofs("output.dot");
taskflow.dump(ofs); // dump the graph to the file output.dot
@endcode
For dynamically spawned tasks, such as module tasks, subflow tasks,
and GPU tasks, you need to run the taskflow first before you can
dump the entire graph.
@code{.cpp}
tf::Task parent = taskflow.emplace([](tf::Subflow sf){
sf.emplace([](){ std::cout << "child\n"; });
});
taskflow.dump(std::cout); // this dumps only the parent tasks
executor.run(taskflow).wait();
taskflow.dump(std::cout); // this dumps both parent and child tasks
@endcode
*/
void dump(std::ostream& ostream) const;
/**
@brief dumps the taskflow to a std::string of DOT format
This method is similar to tf::Taskflow::dump(std::ostream& ostream),
but returning a string of the graph in DOT format.
*/
std::string dump() const;
/**
@brief queries the number of tasks
*/
size_t num_tasks() const;
/**
@brief queries the emptiness of the taskflow
An empty taskflow has no tasks. That is the return of
tf::Taskflow::num_tasks is zero.
*/
bool empty() const;
/**
@brief assigns a name to the taskflow
@code{.cpp}
taskflow.name("assign another name");
@endcode
*/
void name(const std::string&);
/**
@brief queries the name of the taskflow
@code{.cpp}
std::cout << "my name is: " << taskflow.name();
@endcode
*/
const std::string& name() const;
/**
@brief clears the associated task dependency graph
When you clear a taskflow, all tasks and their associated data
(e.g., captured data in task callables) will be destroyed.
The behavior of clearing a running taskflow is undefined.
*/
void clear();
/**
@brief applies a visitor to each task in the taskflow
A visitor is a callable that takes an argument of type tf::Task
and returns nothing. The following example iterates each task in a
taskflow and prints its name:
@code{.cpp}
taskflow.for_each_task([](tf::Task task){
std::cout << task.name() << '\n';
});
@endcode
*/
template <typename V>
void for_each_task(V&& visitor) const;
/**
@brief returns a reference to the underlying graph object
A graph object (of type tf::Graph) is the ultimate storage for the
task dependency graph and should only be used as an opaque
data structure to interact with the executor (e.g., composition).
*/
Graph& graph();
private:
mutable std::mutex _mutex;
std::string _name;
Graph _graph;
std::queue<std::shared_ptr<Topology>> _topologies;
std::optional<std::list<Taskflow>::iterator> _satellite;
void _dump(std::ostream&, const Graph*) const;
void _dump(std::ostream&, const Node*, Dumper&) const;
void _dump(std::ostream&, const Graph*, Dumper&) const;
};
// Constructor
inline Taskflow::Taskflow(const std::string& name) :
FlowBuilder {_graph},
_name {name} {
}
// Constructor
inline Taskflow::Taskflow() : FlowBuilder{_graph} {
}
// Move constructor
inline Taskflow::Taskflow(Taskflow&& rhs) : FlowBuilder{_graph} {
std::scoped_lock<std::mutex> lock(rhs._mutex);
_name = std::move(rhs._name);
_graph = std::move(rhs._graph);
_topologies = std::move(rhs._topologies);
_satellite = rhs._satellite;
rhs._satellite.reset();
}
// Move assignment
inline Taskflow& Taskflow::operator = (Taskflow&& rhs) {
if(this != &rhs) {
std::scoped_lock<std::mutex, std::mutex> lock(_mutex, rhs._mutex);
_name = std::move(rhs._name);
_graph = std::move(rhs._graph);
_topologies = std::move(rhs._topologies);
_satellite = rhs._satellite;
rhs._satellite.reset();
}
return *this;
}
// Procedure:
inline void Taskflow::clear() {
_graph._clear();
}
// Function: num_tasks
inline size_t Taskflow::num_tasks() const {
return _graph.size();
}
// Function: empty
inline bool Taskflow::empty() const {
return _graph.empty();
}
// Function: name
inline void Taskflow::name(const std::string &name) {
_name = name;
}
// Function: name
inline const std::string& Taskflow::name() const {
return _name;
}
// Function: graph
inline Graph& Taskflow::graph() {
return _graph;
}
// Function: for_each_task
template <typename V>
void Taskflow::for_each_task(V&& visitor) const {
for(size_t i=0; i<_graph._nodes.size(); ++i) {
visitor(Task(_graph._nodes[i]));
}
}
// Procedure: dump
inline std::string Taskflow::dump() const {
std::ostringstream oss;
dump(oss);
return oss.str();
}
// Function: dump
inline void Taskflow::dump(std::ostream& os) const {
os << "digraph Taskflow {\n";
_dump(os, &_graph);
os << "}\n";
}
// Procedure: _dump
inline void Taskflow::_dump(std::ostream& os, const Graph* top) const {
Dumper dumper;
dumper.id = 0;
dumper.stack.push({nullptr, top});
dumper.visited[top] = dumper.id++;
while(!dumper.stack.empty()) {
auto [p, f] = dumper.stack.top();
dumper.stack.pop();
os << "subgraph cluster_p" << f << " {\nlabel=\"";
// n-level module
if(p) {
os << 'm' << dumper.visited[f];
}
// top-level taskflow graph
else {
os << "Taskflow: ";
if(_name.empty()) os << 'p' << this;
else os << _name;
}
os << "\";\n";
_dump(os, f, dumper);
os << "}\n";
}
}
// Procedure: _dump
inline void Taskflow::_dump(
std::ostream& os, const Node* node, Dumper& dumper
) const {
os << 'p' << node << "[label=\"";
if(node->_name.empty()) os << 'p' << node;
else os << node->_name;
os << "\" ";
// shape for node
switch(node->_handle.index()) {
case Node::CONDITION:
case Node::MULTI_CONDITION:
os << "shape=diamond color=black fillcolor=aquamarine style=filled";
break;
case Node::RUNTIME:
os << "shape=component";
break;
case Node::CUDAFLOW:
os << " style=\"filled\""
<< " color=\"black\" fillcolor=\"purple\""
<< " fontcolor=\"white\""
<< " shape=\"folder\"";
break;
case Node::SYCLFLOW:
os << " style=\"filled\""
<< " color=\"black\" fillcolor=\"red\""
<< " fontcolor=\"white\""
<< " shape=\"folder\"";
break;
default:
break;
}
os << "];\n";
for(size_t s=0; s<node->_successors.size(); ++s) {
if(node->_is_conditioner()) {
// case edge is dashed
os << 'p' << node << " -> p" << node->_successors[s]
<< " [style=dashed label=\"" << s << "\"];\n";
} else {
os << 'p' << node << " -> p" << node->_successors[s] << ";\n";
}
}
// subflow join node
if(node->_parent && node->_parent->_handle.index() == Node::DYNAMIC &&
node->_successors.size() == 0
) {
os << 'p' << node << " -> p" << node->_parent << ";\n";
}
// node info
switch(node->_handle.index()) {
case Node::DYNAMIC: {
auto& sbg = std::get_if<Node::Dynamic>(&node->_handle)->subgraph;
if(!sbg.empty()) {
os << "subgraph cluster_p" << node << " {\nlabel=\"Subflow: ";
if(node->_name.empty()) os << 'p' << node;
else os << node->_name;
os << "\";\n" << "color=blue\n";
_dump(os, &sbg, dumper);
os << "}\n";
}
}
break;
case Node::CUDAFLOW: {
std::get_if<Node::cudaFlow>(&node->_handle)->graph->dump(
os, node, node->_name
);
}
break;
case Node::SYCLFLOW: {
std::get_if<Node::syclFlow>(&node->_handle)->graph->dump(
os, node, node->_name
);
}
break;
default:
break;
}
}
// Procedure: _dump
inline void Taskflow::_dump(
std::ostream& os, const Graph* graph, Dumper& dumper
) const {
for(const auto& n : graph->_nodes) {
// regular task
if(n->_handle.index() != Node::MODULE) {
_dump(os, n, dumper);
}
// module task
else {
//auto module = &(std::get_if<Node::Module>(&n->_handle)->module);
auto module = &(std::get_if<Node::Module>(&n->_handle)->graph);
os << 'p' << n << "[shape=box3d, color=blue, label=\"";
if(n->_name.empty()) os << 'p' << n;
else os << n->_name;
if(dumper.visited.find(module) == dumper.visited.end()) {
dumper.visited[module] = dumper.id++;
dumper.stack.push({n, module});
}
os << " [m" << dumper.visited[module] << "]\"];\n";
for(const auto s : n->_successors) {
os << 'p' << n << "->" << 'p' << s << ";\n";
}
}
}
}
// ----------------------------------------------------------------------------
// class definition: Future
// ----------------------------------------------------------------------------
/**
@class Future
@brief class to access the result of an execution
tf::Future is a derived class from std::future that will eventually hold the
execution result of a submitted taskflow (tf::Executor::run)
or an asynchronous task (tf::Executor::async, tf::Executor::silent_async).
In addition to the base methods inherited from std::future,
you can call tf::Future::cancel to cancel the execution of the running taskflow
associated with this future object.
The following example cancels a submission of a taskflow that contains
1000 tasks each running one second.
@code{.cpp}
tf::Executor executor;
tf::Taskflow taskflow;
for(int i=0; i<1000; i++) {
taskflow.emplace([](){
std::this_thread::sleep_for(std::chrono::seconds(1));
});
}
// submit the taskflow
tf::Future fu = executor.run(taskflow);
// request to cancel the submitted execution above
fu.cancel();
// wait until the cancellation finishes
fu.get();
@endcode
*/
template <typename T>
class Future : public std::future<T> {
friend class Executor;
friend class Subflow;
using handle_t = std::variant<
std::monostate, std::weak_ptr<Topology>, std::weak_ptr<AsyncTopology>
>;
// variant index
constexpr static auto ASYNC = get_index_v<std::weak_ptr<AsyncTopology>, handle_t>;
constexpr static auto TASKFLOW = get_index_v<std::weak_ptr<Topology>, handle_t>;
public:
/**
@brief default constructor
*/
Future() = default;
/**
@brief disabled copy constructor
*/
Future(const Future&) = delete;
/**
@brief default move constructor
*/
Future(Future&&) = default;
/**
@brief disabled copy assignment
*/
Future& operator = (const Future&) = delete;
/**
@brief default move assignment
*/
Future& operator = (Future&&) = default;
/**
@brief cancels the execution of the running taskflow associated with
this future object
@return @c true if the execution can be cancelled or
@c false if the execution has already completed
When you request a cancellation, the executor will stop scheduling
any tasks onwards. Tasks that are already running will continue to finish
(non-preemptive).
You can call tf::Future::wait to wait for the cancellation to complete.
*/
bool cancel();
private:
handle_t _handle;
template <typename P>
Future(std::future<T>&&, P&&);
};
template <typename T>
template <typename P>
Future<T>::Future(std::future<T>&& fu, P&& p) :
std::future<T> {std::move(fu)},
_handle {std::forward<P>(p)} {
}
// Function: cancel
template <typename T>
bool Future<T>::cancel() {
return std::visit([](auto&& arg){
using P = std::decay_t<decltype(arg)>;
if constexpr(std::is_same_v<P, std::monostate>) {
return false;
}
else {
auto ptr = arg.lock();
if(ptr) {
ptr->_is_cancelled.store(true, std::memory_order_relaxed);
return true;
}
return false;
}
}, _handle);
}
} // end of namespace tf. ---------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/taskflow/core/declarations.hpp | #pragma once
namespace tf {
// ----------------------------------------------------------------------------
// taskflow
// ----------------------------------------------------------------------------
class AsyncTopology;
class Node;
class Graph;
class FlowBuilder;
class Semaphore;
class Subflow;
class Runtime;
class Task;
class TaskView;
class Taskflow;
class Topology;
class TopologyBase;
class Executor;
class Worker;
class WorkerView;
class ObserverInterface;
class ChromeTracingObserver;
class TFProfObserver;
class TFProfManager;
template <typename T>
class Future;
template <typename...Fs>
class Pipeline;
// ----------------------------------------------------------------------------
// cudaFlow
// ----------------------------------------------------------------------------
class cudaNode;
class cudaGraph;
class cudaTask;
class cudaFlow;
class cudaFlowCapturer;
class cudaFlowCapturerBase;
class cudaCapturingBase;
class cudaLinearCapturing;
class cudaSequentialCapturing;
class cudaRoundRobinCapturing;
// ----------------------------------------------------------------------------
// syclFlow
// ----------------------------------------------------------------------------
class syclNode;
class syclGraph;
class syclTask;
class syclFlow;
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/taskflow/core/executor-module-opt.hpp | #pragma once
#include "observer.hpp"
#include "taskflow.hpp"
/**
@file executor.hpp
@brief executor include file
*/
namespace tf {
// ----------------------------------------------------------------------------
// Executor Definition
// ----------------------------------------------------------------------------
/** @class Executor
@brief class to create an executor for running a taskflow graph
An executor manages a set of worker threads to run one or multiple taskflows
using an efficient work-stealing scheduling algorithm.
@code{.cpp}
// Declare an executor and a taskflow
tf::Executor executor;
tf::Taskflow taskflow;
// Add three tasks into the taskflow
tf::Task A = taskflow.emplace([] () { std::cout << "This is TaskA\n"; });
tf::Task B = taskflow.emplace([] () { std::cout << "This is TaskB\n"; });
tf::Task C = taskflow.emplace([] () { std::cout << "This is TaskC\n"; });
// Build precedence between tasks
A.precede(B, C);
tf::Future<void> fu = executor.run(taskflow);
fu.wait(); // block until the execution completes
executor.run(taskflow, [](){ std::cout << "end of 1 run"; }).wait();
executor.run_n(taskflow, 4);
executor.wait_for_all(); // block until all associated executions finish
executor.run_n(taskflow, 4, [](){ std::cout << "end of 4 runs"; }).wait();
executor.run_until(taskflow, [cnt=0] () mutable { return ++cnt == 10; });
@endcode
All the @c run methods are @em thread-safe. You can submit multiple
taskflows at the same time to an executor from different threads.
*/
class Executor {
friend class FlowBuilder;
friend class Subflow;
friend class Runtime;
public:
/**
@brief constructs the executor with @c N worker threads
The constructor spawns @c N worker threads to run tasks in a
work-stealing loop. The number of workers must be greater than zero
or an exception will be thrown.
By default, the number of worker threads is equal to the maximum
hardware concurrency returned by std::thread::hardware_concurrency.
*/
explicit Executor(size_t N = std::thread::hardware_concurrency());
/**
@brief destructs the executor
The destructor calls Executor::wait_for_all to wait for all submitted
taskflows to complete and then notifies all worker threads to stop
and join these threads.
*/
~Executor();
/**
@brief runs a taskflow once
@param taskflow a tf::Taskflow object
@return a tf::Future that holds the result of the execution
This member function executes the given taskflow once and returns a tf::Future
object that eventually holds the result of the execution.
@code{.cpp}
tf::Future<void> future = executor.run(taskflow);
// do something else
future.wait();
@endcode
This member function is thread-safe.
@attention
The executor does not own the given taskflow. It is your responsibility to
ensure the taskflow remains alive during its execution.
*/
tf::Future<void> run(Taskflow& taskflow);
/**
@brief runs a moved taskflow once
@param taskflow a moved tf::Taskflow object
@return a tf::Future that holds the result of the execution
This member function executes a moved taskflow once and returns a tf::Future
object that eventually holds the result of the execution.
The executor will take care of the lifetime of the moved taskflow.
@code{.cpp}
tf::Future<void> future = executor.run(std::move(taskflow));
// do something else
future.wait();
@endcode
This member function is thread-safe.
*/
tf::Future<void> run(Taskflow&& taskflow);
/**
@brief runs a taskflow once and invoke a callback upon completion
@param taskflow a tf::Taskflow object
@param callable a callable object to be invoked after this run
@return a tf::Future that holds the result of the execution
This member function executes the given taskflow once and invokes the given
callable when the execution completes.
This member function returns a tf::Future object that
eventually holds the result of the execution.
@code{.cpp}
tf::Future<void> future = executor.run(taskflow, [](){ std::cout << "done"; });
// do something else
future.wait();
@endcode
This member function is thread-safe.
@attention
The executor does not own the given taskflow. It is your responsibility to
ensure the taskflow remains alive during its execution.
*/
template<typename C>
tf::Future<void> run(Taskflow& taskflow, C&& callable);
/**
@brief runs a moved taskflow once and invoke a callback upon completion
@param taskflow a moved tf::Taskflow object
@param callable a callable object to be invoked after this run
@return a tf::Future that holds the result of the execution
This member function executes a moved taskflow once and invokes the given
callable when the execution completes.
This member function returns a tf::Future object that
eventually holds the result of the execution.
The executor will take care of the lifetime of the moved taskflow.
@code{.cpp}
tf::Future<void> future = executor.run(
std::move(taskflow), [](){ std::cout << "done"; }
);
// do something else
future.wait();
@endcode
This member function is thread-safe.
*/
template<typename C>
tf::Future<void> run(Taskflow&& taskflow, C&& callable);
/**
@brief runs a taskflow for @c N times
@param taskflow a tf::Taskflow object
@param N number of runs
@return a tf::Future that holds the result of the execution
This member function executes the given taskflow @c N times and returns a tf::Future
object that eventually holds the result of the execution.
@code{.cpp}
tf::Future<void> future = executor.run_n(taskflow, 2); // run taskflow 2 times
// do something else
future.wait();
@endcode
This member function is thread-safe.
@attention
The executor does not own the given taskflow. It is your responsibility to
ensure the taskflow remains alive during its execution.
*/
tf::Future<void> run_n(Taskflow& taskflow, size_t N);
/**
@brief runs a moved taskflow for @c N times
@param taskflow a moved tf::Taskflow object
@param N number of runs
@return a tf::Future that holds the result of the execution
This member function executes a moved taskflow @c N times and returns a tf::Future
object that eventually holds the result of the execution.
The executor will take care of the lifetime of the moved taskflow.
@code{.cpp}
tf::Future<void> future = executor.run_n(
std::move(taskflow), 2 // run the moved taskflow 2 times
);
// do something else
future.wait();
@endcode
This member function is thread-safe.
*/
tf::Future<void> run_n(Taskflow&& taskflow, size_t N);
/**
@brief runs a taskflow for @c N times and then invokes a callback
@param taskflow a tf::Taskflow
@param N number of runs
@param callable a callable object to be invoked after this run
@return a tf::Future that holds the result of the execution
This member function executes the given taskflow @c N times and invokes the given
callable when the execution completes.
This member function returns a tf::Future object that
eventually holds the result of the execution.
@code{.cpp}
tf::Future<void> future = executor.run(
taskflow, 2, [](){ std::cout << "done"; } // runs taskflow 2 times and invoke
// the lambda to print "done"
);
// do something else
future.wait();
@endcode
This member function is thread-safe.
@attention
The executor does not own the given taskflow. It is your responsibility to
ensure the taskflow remains alive during its execution.
*/
template<typename C>
tf::Future<void> run_n(Taskflow& taskflow, size_t N, C&& callable);
/**
@brief runs a moved taskflow for @c N times and then invokes a callback
@param taskflow a moved tf::Taskflow
@param N number of runs
@param callable a callable object to be invoked after this run
@return a tf::Future that holds the result of the execution
This member function executes a moved taskflow @c N times and invokes the given
callable when the execution completes.
This member function returns a tf::Future object that
eventually holds the result of the execution.
@code{.cpp}
tf::Future<void> future = executor.run(
// run the moved taskflow 2 times and invoke the lambda to print "done"
std::move(taskflow), 2, [](){ std::cout << "done"; }
);
// do something else
future.wait();
@endcode
This member function is thread-safe.
*/
template<typename C>
tf::Future<void> run_n(Taskflow&& taskflow, size_t N, C&& callable);
/**
@brief runs a taskflow multiple times until the predicate becomes true
@param taskflow a tf::Taskflow
@param pred a boolean predicate to return @c true for stop
@return a tf::Future that holds the result of the execution
This member function executes the given taskflow multiple times until
the predicate returns @c true.
This member function returns a tf::Future object that
eventually holds the result of the execution.
@code{.cpp}
tf::Future<void> future = executor.run(
taskflow, [](){ return rand()%10 == 0 }
);
// do something else
future.wait();
@endcode
This member function is thread-safe.
@attention
The executor does not own the given taskflow. It is your responsibility to
ensure the taskflow remains alive during its execution.
*/
template<typename P>
tf::Future<void> run_until(Taskflow& taskflow, P&& pred);
/**
@brief runs a moved taskflow and keeps running it
until the predicate becomes true
@param taskflow a moved tf::Taskflow object
@param pred a boolean predicate to return @c true for stop
@return a tf::Future that holds the result of the execution
This member function executes a moved taskflow multiple times until
the predicate returns @c true.
This member function returns a tf::Future object that
eventually holds the result of the execution.
The executor will take care of the lifetime of the moved taskflow.
@code{.cpp}
tf::Future<void> future = executor.run(
std::move(taskflow), [](){ return rand()%10 == 0 }
);
// do something else
future.wait();
@endcode
This member function is thread-safe.
*/
template<typename P>
tf::Future<void> run_until(Taskflow&& taskflow, P&& pred);
/**
@brief runs a taskflow multiple times until the predicate becomes true and
then invokes the callback
@param taskflow a tf::Taskflow
@param pred a boolean predicate to return @c true for stop
@param callable a callable object to be invoked after this run completes
@return a tf::Future that holds the result of the execution
This member function executes the given taskflow multiple times until
the predicate returns @c true and then invokes the given callable when
the execution completes.
This member function returns a tf::Future object that
eventually holds the result of the execution.
@code{.cpp}
tf::Future<void> future = executor.run(
taskflow, [](){ return rand()%10 == 0 }, [](){ std::cout << "done"; }
);
// do something else
future.wait();
@endcode
This member function is thread-safe.
@attention
The executor does not own the given taskflow. It is your responsibility to
ensure the taskflow remains alive during its execution.
*/
template<typename P, typename C>
tf::Future<void> run_until(Taskflow& taskflow, P&& pred, C&& callable);
/**
@brief runs a moved taskflow and keeps running
it until the predicate becomes true and then invokes the callback
@param taskflow a moved tf::Taskflow
@param pred a boolean predicate to return @c true for stop
@param callable a callable object to be invoked after this run completes
@return a tf::Future that holds the result of the execution
This member function executes a moved taskflow multiple times until
the predicate returns @c true and then invokes the given callable when
the execution completes.
This member function returns a tf::Future object that
eventually holds the result of the execution.
The executor will take care of the lifetime of the moved taskflow.
@code{.cpp}
tf::Future<void> future = executor.run(
std::move(taskflow),
[](){ return rand()%10 == 0 }, [](){ std::cout << "done"; }
);
// do something else
future.wait();
@endcode
This member function is thread-safe.
*/
template<typename P, typename C>
tf::Future<void> run_until(Taskflow&& taskflow, P&& pred, C&& callable);
/**
@brief wait for all tasks to complete
This member function waits until all submitted tasks
(e.g., taskflows, asynchronous tasks) to finish.
@code{.cpp}
executor.run(taskflow1);
executor.run_n(taskflow2, 10);
executor.run_n(taskflow3, 100);
executor.wait_for_all(); // wait until the above submitted taskflows finish
@endcode
*/
void wait_for_all();
/**
@brief queries the number of worker threads
Each worker represents one unique thread spawned by an executor
upon its construction time.
@code{.cpp}
tf::Executor executor(4);
std::cout << executor.num_workers(); // 4
@endcode
*/
size_t num_workers() const noexcept;
/**
@brief queries the number of running topologies at the time of this call
When a taskflow is submitted to an executor, a topology is created to store
runtime metadata of the running taskflow.
When the execution of the submitted taskflow finishes,
its corresponding topology will be removed from the executor.
@code{.cpp}
executor.run(taskflow);
std::cout << executor.num_topologies(); // 0 or 1 (taskflow still running)
@endcode
*/
size_t num_topologies() const;
/**
@brief queries the number of running taskflows with moved ownership
@code{.cpp}
executor.run(std::move(taskflow));
std::cout << executor.num_taskflows(); // 0 or 1 (taskflow still running)
@endcode
*/
size_t num_taskflows() const;
/**
@brief queries the id of the caller thread in this executor
Each worker has an unique id in the range of @c 0 to @c N-1 associated with
its parent executor.
If the caller thread does not belong to the executor, @c -1 is returned.
@code{.cpp}
tf::Executor executor(4); // 4 workers in the executor
executor.this_worker_id(); // -1 (main thread is not a worker)
taskflow.emplace([&](){
std::cout << executor.this_worker_id(); // 0, 1, 2, or 3
});
executor.run(taskflow);
@endcode
*/
int this_worker_id() const;
/**
@brief runs a given function asynchronously
@tparam F callable type
@tparam ArgsT parameter types
@param f callable object to call
@param args parameters to pass to the callable
@return a tf::Future that will holds the result of the execution
The method creates an asynchronous task to launch the given
function on the given arguments.
Unlike std::async, the return here is a @em tf::Future that holds
an optional object to the result.
If the asynchronous task is cancelled before it runs, the return is
a @c std::nullopt, or the value returned by the callable.
@code{.cpp}
tf::Future<std::optional<int>> future = executor.async([](){
std::cout << "create an asynchronous task and returns 1\n";
return 1;
});
@endcode
This member function is thread-safe.
*/
template <typename F, typename... ArgsT>
auto async(F&& f, ArgsT&&... args);
/**
@brief runs a given function asynchronously and gives a name to this task
@tparam F callable type
@tparam ArgsT parameter types
@param name name of the asynchronous task
@param f callable object to call
@param args parameters to pass to the callable
@return a tf::Future that will holds the result of the execution
The method creates a named asynchronous task to launch the given
function on the given arguments.
Naming an asynchronous task is primarily used for profiling and visualizing
the task execution timeline.
Unlike std::async, the return here is a tf::Future that holds
an optional object to the result.
If the asynchronous task is cancelled before it runs, the return is
a @c std::nullopt, or the value returned by the callable.
@code{.cpp}
tf::Future<std::optional<int>> future = executor.named_async("name", [](){
std::cout << "create an asynchronous task with a name and returns 1\n";
return 1;
});
@endcode
This member function is thread-safe.
*/
template <typename F, typename... ArgsT>
auto named_async(const std::string& name, F&& f, ArgsT&&... args);
/**
@brief similar to tf::Executor::async but does not return a future object
This member function is more efficient than tf::Executor::async
and is encouraged to use when there is no data returned.
@code{.cpp}
executor.silent_async([](){
std::cout << "create an asynchronous task with no return\n";
});
@endcode
This member function is thread-safe.
*/
template <typename F, typename... ArgsT>
void silent_async(F&& f, ArgsT&&... args);
/**
@brief similar to tf::Executor::named_async but does not return a future object
This member function is more efficient than tf::Executor::named_async
and is encouraged to use when there is no data returned.
@code{.cpp}
executor.named_silent_async("name", [](){
std::cout << "create an asynchronous task with a name and no return\n";
});
@endcode
This member function is thread-safe.
*/
template <typename F, typename... ArgsT>
void named_silent_async(const std::string& name, F&& f, ArgsT&&... args);
/**
@brief constructs an observer to inspect the activities of worker threads
@tparam Observer observer type derived from tf::ObserverInterface
@tparam ArgsT argument parameter pack
@param args arguments to forward to the constructor of the observer
@return a shared pointer to the created observer
Each executor manages a list of observers with shared ownership with callers.
For each of these observers, the two member functions,
tf::ObserverInterface::on_entry and tf::ObserverInterface::on_exit
will be called before and after the execution of a task.
This member function is not thread-safe.
*/
template <typename Observer, typename... ArgsT>
std::shared_ptr<Observer> make_observer(ArgsT&&... args);
/**
@brief removes an observer from the executor
This member function is not thread-safe.
*/
template <typename Observer>
void remove_observer(std::shared_ptr<Observer> observer);
/**
@brief queries the number of observers
*/
size_t num_observers() const noexcept;
private:
std::condition_variable _topology_cv;
std::mutex _taskflow_mutex;
std::mutex _topology_mutex;
std::mutex _wsq_mutex;
size_t _num_topologies {0};
std::unordered_map<std::thread::id, size_t> _wids;
std::vector<Worker> _workers;
std::vector<std::thread> _threads;
std::list<Taskflow> _taskflows;
Notifier _notifier;
TaskQueue<Node*> _wsq;
std::atomic<size_t> _num_actives {0};
std::atomic<size_t> _num_thieves {0};
std::atomic<bool> _done {0};
std::unordered_set<std::shared_ptr<ObserverInterface>> _observers;
Worker* _this_worker();
bool _wait_for_task(Worker&, Node*&);
void _observer_prologue(Worker&, Node*);
void _observer_epilogue(Worker&, Node*);
void _spawn(size_t);
void _worker_loop(Worker&);
void _exploit_task(Worker&, Node*&);
void _explore_task(Worker&, Node*&);
void _consume_task(Worker&, Node*);
void _schedule(Worker&, Node*);
void _schedule(Node*);
void _schedule(Worker&, const SmallVector<Node*>&);
void _schedule(const SmallVector<Node*>&);
void _set_up_topology(Worker*, Topology*);
void _tear_down_topology(Worker&, Topology*);
void _tear_down_async(Node*);
void _tear_down_invoke(Worker&, Node*);
void _cancel_invoke(Worker&, Node*);
void _increment_topology();
void _decrement_topology();
void _decrement_topology_and_notify();
void _invoke(Worker&, Node*);
void _invoke_static_task(Worker&, Node*);
void _invoke_dynamic_task(Worker&, Node*);
void _invoke_dynamic_task_external(Worker&, Node*, Graph&, bool);
void _invoke_dynamic_task_internal(Worker&, Node*, Graph&);
void _invoke_condition_task(Worker&, Node*, SmallVector<int>&);
void _invoke_multi_condition_task(Worker&, Node*, SmallVector<int>&);
void _invoke_module_task(Worker&, Node*, bool&);
void _invoke_module_task_internal(Worker&, Node*, Graph&, bool&);
void _invoke_async_task(Worker&, Node*);
void _invoke_silent_async_task(Worker&, Node*);
void _invoke_cudaflow_task(Worker&, Node*);
void _invoke_syclflow_task(Worker&, Node*);
void _invoke_runtime_task(Worker&, Node*);
template <typename C,
std::enable_if_t<is_cudaflow_task_v<C>, void>* = nullptr
>
void _invoke_cudaflow_task_entry(Node*, C&&);
template <typename C, typename Q,
std::enable_if_t<is_syclflow_task_v<C>, void>* = nullptr
>
void _invoke_syclflow_task_entry(Node*, C&&, Q&);
};
// Constructor
inline Executor::Executor(size_t N) :
_workers {N},
_notifier {N} {
if(N == 0) {
TF_THROW("no cpu workers to execute taskflows");
}
_spawn(N);
// instantite the default observer if requested
if(has_env(TF_ENABLE_PROFILER)) {
TFProfManager::get()._manage(make_observer<TFProfObserver>());
}
}
// Destructor
inline Executor::~Executor() {
// wait for all topologies to complete
wait_for_all();
// shut down the scheduler
_done = true;
_notifier.notify(true);
for(auto& t : _threads){
t.join();
}
}
// Function: num_workers
inline size_t Executor::num_workers() const noexcept {
return _workers.size();
}
// Function: num_topologies
inline size_t Executor::num_topologies() const {
return _num_topologies;
}
// Function: num_taskflows
inline size_t Executor::num_taskflows() const {
return _taskflows.size();
}
// Function: _this_worker
inline Worker* Executor::_this_worker() {
auto itr = _wids.find(std::this_thread::get_id());
return itr == _wids.end() ? nullptr : &_workers[itr->second];
}
// Function: named_async
template <typename F, typename... ArgsT>
auto Executor::named_async(const std::string& name, F&& f, ArgsT&&... args) {
_increment_topology();
using T = std::invoke_result_t<F, ArgsT...>;
using R = std::conditional_t<std::is_same_v<T, void>, void, std::optional<T>>;
std::promise<R> p;
auto tpg = std::make_shared<AsyncTopology>();
Future<R> fu(p.get_future(), tpg);
auto node = node_pool.animate(
std::in_place_type_t<Node::Async>{},
[p=make_moc(std::move(p)), f=std::forward<F>(f), args...]
(bool cancel) mutable {
if constexpr(std::is_same_v<R, void>) {
if(!cancel) {
f(args...);
}
p.object.set_value();
}
else {
p.object.set_value(cancel ? std::nullopt : std::make_optional(f(args...)));
}
},
std::move(tpg)
);
node->_name = name;
if(auto w = _this_worker(); w) {
_schedule(*w, node);
}
else{
_schedule(node);
}
return fu;
}
// Function: async
template <typename F, typename... ArgsT>
auto Executor::async(F&& f, ArgsT&&... args) {
return named_async("", std::forward<F>(f), std::forward<ArgsT>(args)...);
}
// Function: named_silent_async
template <typename F, typename... ArgsT>
void Executor::named_silent_async(
const std::string& name, F&& f, ArgsT&&... args
) {
_increment_topology();
Node* node = node_pool.animate(
std::in_place_type_t<Node::SilentAsync>{},
[f=std::forward<F>(f), args...] () mutable {
f(args...);
}
);
node->_name = name;
if(auto w = _this_worker(); w) {
_schedule(*w, node);
}
else {
_schedule(node);
}
}
// Function: silent_async
template <typename F, typename... ArgsT>
void Executor::silent_async(F&& f, ArgsT&&... args) {
named_silent_async("", std::forward<F>(f), std::forward<ArgsT>(args)...);
}
// Function: this_worker_id
inline int Executor::this_worker_id() const {
auto i = _wids.find(std::this_thread::get_id());
return i == _wids.end() ? -1 : static_cast<int>(_workers[i->second]._id);
}
// Procedure: _spawn
inline void Executor::_spawn(size_t N) {
std::mutex mutex;
std::condition_variable cond;
size_t n=0;
for(size_t id=0; id<N; ++id) {
_workers[id]._id = id;
_workers[id]._vtm = id;
_workers[id]._executor = this;
_workers[id]._waiter = &_notifier._waiters[id];
_threads.emplace_back([this] (
Worker& w, std::mutex& mutex, std::condition_variable& cond, size_t& n
) -> void {
// enables the mapping
{
std::scoped_lock lock(mutex);
_wids[std::this_thread::get_id()] = w._id;
if(n++; n == num_workers()) {
cond.notify_one();
}
}
//this_worker().worker = &w;
Node* t = nullptr;
// must use 1 as condition instead of !done
while(1) {
// execute the tasks.
_exploit_task(w, t);
// wait for tasks
if(_wait_for_task(w, t) == false) {
break;
}
}
}, std::ref(_workers[id]), std::ref(mutex), std::ref(cond), std::ref(n));
}
std::unique_lock<std::mutex> lock(mutex);
cond.wait(lock, [&](){ return n==N; });
}
// Function: _consume_task
inline void Executor::_consume_task(Worker& w, Node* p) {
std::uniform_int_distribution<size_t> rdvtm(0, _workers.size()-1);
while(p->_join_counter != 0) {
exploit:
if(auto t = w._wsq.pop(); t) {
_invoke(w, t);
}
else {
size_t num_steals = 0;
//size_t num_pauses = 0;
size_t max_steals = ((_workers.size() + 1) << 1);
explore:
t = (w._id == w._vtm) ? _wsq.steal() : _workers[w._vtm]._wsq.steal();
if(t) {
_invoke(w, t);
goto exploit;
}
else if(p->_join_counter != 0){
if(num_steals++ > max_steals) {
std::this_thread::yield();
}
//std::this_thread::yield();
w._vtm = rdvtm(w._rdgen);
goto explore;
}
else {
break;
}
}
}
}
// Function: _explore_task
inline void Executor::_explore_task(Worker& w, Node*& t) {
//assert(_workers[w].wsq.empty());
//assert(!t);
size_t num_steals = 0;
size_t num_yields = 0;
size_t max_steals = ((_workers.size() + 1) << 1);
std::uniform_int_distribution<size_t> rdvtm(0, _workers.size()-1);
do {
t = (w._id == w._vtm) ? _wsq.steal() : _workers[w._vtm]._wsq.steal();
if(t) {
break;
}
if(num_steals++ > max_steals) {
std::this_thread::yield();
if(num_yields++ > 100) {
break;
}
}
w._vtm = rdvtm(w._rdgen);
} while(!_done);
}
// Procedure: _exploit_task
inline void Executor::_exploit_task(Worker& w, Node*& t) {
if(t) {
if(_num_actives.fetch_add(1) == 0 && _num_thieves == 0) {
_notifier.notify(false);
}
while(t) {
_invoke(w, t);
t = w._wsq.pop();
}
--_num_actives;
}
}
// Function: _wait_for_task
inline bool Executor::_wait_for_task(Worker& worker, Node*& t) {
wait_for_task:
//assert(!t);
++_num_thieves;
explore_task:
_explore_task(worker, t);
if(t) {
if(_num_thieves.fetch_sub(1) == 1) {
_notifier.notify(false);
}
return true;
}
_notifier.prepare_wait(worker._waiter);
//if(auto vtm = _find_vtm(me); vtm != _workers.size()) {
if(!_wsq.empty()) {
_notifier.cancel_wait(worker._waiter);
//t = (vtm == me) ? _wsq.steal() : _workers[vtm].wsq.steal();
t = _wsq.steal(); // must steal here
if(t) {
if(_num_thieves.fetch_sub(1) == 1) {
_notifier.notify(false);
}
return true;
}
else {
worker._vtm = worker._id;
goto explore_task;
}
}
if(_done) {
_notifier.cancel_wait(worker._waiter);
_notifier.notify(true);
--_num_thieves;
return false;
}
if(_num_thieves.fetch_sub(1) == 1) {
if(_num_actives) {
_notifier.cancel_wait(worker._waiter);
goto wait_for_task;
}
// check all queues again
for(auto& w : _workers) {
if(!w._wsq.empty()) {
worker._vtm = w._id;
_notifier.cancel_wait(worker._waiter);
goto wait_for_task;
}
}
}
// Now I really need to relinguish my self to others
_notifier.commit_wait(worker._waiter);
return true;
}
// Function: make_observer
template<typename Observer, typename... ArgsT>
std::shared_ptr<Observer> Executor::make_observer(ArgsT&&... args) {
static_assert(
std::is_base_of_v<ObserverInterface, Observer>,
"Observer must be derived from ObserverInterface"
);
// use a local variable to mimic the constructor
auto ptr = std::make_shared<Observer>(std::forward<ArgsT>(args)...);
ptr->set_up(_workers.size());
_observers.emplace(std::static_pointer_cast<ObserverInterface>(ptr));
return ptr;
}
// Procedure: remove_observer
template <typename Observer>
void Executor::remove_observer(std::shared_ptr<Observer> ptr) {
static_assert(
std::is_base_of_v<ObserverInterface, Observer>,
"Observer must be derived from ObserverInterface"
);
_observers.erase(std::static_pointer_cast<ObserverInterface>(ptr));
}
// Function: num_observers
inline size_t Executor::num_observers() const noexcept {
return _observers.size();
}
// Procedure: _schedule
inline void Executor::_schedule(Worker& worker, Node* node) {
node->_state.fetch_or(Node::READY, std::memory_order_release);
// caller is a worker to this pool
if(worker._executor == this) {
worker._wsq.push(node);
return;
}
{
std::lock_guard<std::mutex> lock(_wsq_mutex);
_wsq.push(node);
}
_notifier.notify(false);
}
// Procedure: _schedule
inline void Executor::_schedule(Node* node) {
node->_state.fetch_or(Node::READY, std::memory_order_release);
{
std::lock_guard<std::mutex> lock(_wsq_mutex);
_wsq.push(node);
}
_notifier.notify(false);
}
// Procedure: _schedule
inline void Executor::_schedule(
Worker& worker, const SmallVector<Node*>& nodes
) {
// We need to cacth the node count to avoid accessing the nodes
// vector while the parent topology is removed!
const auto num_nodes = nodes.size();
if(num_nodes == 0) {
return;
}
// make the node ready
for(size_t i=0; i<num_nodes; ++i) {
nodes[i]->_state.fetch_or(Node::READY, std::memory_order_release);
}
if(worker._executor == this) {
for(size_t i=0; i<num_nodes; ++i) {
worker._wsq.push(nodes[i]);
}
return;
}
{
std::lock_guard<std::mutex> lock(_wsq_mutex);
for(size_t k=0; k<num_nodes; ++k) {
_wsq.push(nodes[k]);
}
}
_notifier.notify_n(num_nodes);
}
// Procedure: _schedule
inline void Executor::_schedule(const SmallVector<Node*>& nodes) {
// parent topology may be removed!
const auto num_nodes = nodes.size();
if(num_nodes == 0) {
return;
}
// make the node ready
for(size_t i=0; i<num_nodes; ++i) {
nodes[i]->_state.fetch_or(Node::READY, std::memory_order_release);
}
{
std::lock_guard<std::mutex> lock(_wsq_mutex);
for(size_t k=0; k<num_nodes; ++k) {
_wsq.push(nodes[k]);
}
}
_notifier.notify_n(num_nodes);
}
// Procedure: _invoke
inline void Executor::_invoke(Worker& worker, Node* node) {
int state;
SmallVector<int> conds;
// synchronize all outstanding memory operations caused by reordering
do {
state = node->_state.load(std::memory_order_acquire);
} while(! (state & Node::READY));
// unwind stack for deferred node
if(state & Node::DEFERRED) {
node->_state.fetch_and(~Node::DEFERRED, std::memory_order_relaxed);
goto invoke_epilogue;
}
//while(!(node->_state.load(std::memory_order_acquire) & Node::READY));
invoke_prologue:
// no need to do other things if the topology is cancelled
if(node->_is_cancelled()) {
_cancel_invoke(worker, node);
return;
}
// if acquiring semaphore(s) exists, acquire them first
if(node->_semaphores && !node->_semaphores->to_acquire.empty()) {
SmallVector<Node*> nodes;
if(!node->_acquire_all(nodes)) {
_schedule(worker, nodes);
return;
}
node->_state.fetch_or(Node::ACQUIRED, std::memory_order_release);
}
// condition task
//int cond = -1;
//SmallVector<int> conds = { -1 };
// switch is faster than nested if-else due to jump table
switch(node->_handle.index()) {
// static task
case Node::STATIC:{
_invoke_static_task(worker, node);
}
break;
// dynamic task
case Node::DYNAMIC: {
_invoke_dynamic_task(worker, node);
}
break;
// condition task
case Node::CONDITION: {
_invoke_condition_task(worker, node, conds);
}
break;
// multi-condition task
case Node::MULTI_CONDITION: {
_invoke_multi_condition_task(worker, node, conds);
}
break;
// module task
case Node::MODULE: {
bool deferred = false;
_invoke_module_task(worker, node, deferred);
if(deferred) {
return;
}
}
break;
// async task
case Node::ASYNC: {
_invoke_async_task(worker, node);
_tear_down_async(node);
return ;
}
break;
// silent async task
case Node::SILENT_ASYNC: {
_invoke_silent_async_task(worker, node);
_tear_down_async(node);
return ;
}
break;
// cudaflow task
case Node::CUDAFLOW: {
_invoke_cudaflow_task(worker, node);
}
break;
// syclflow task
case Node::SYCLFLOW: {
_invoke_syclflow_task(worker, node);
}
break;
// runtime task
case Node::RUNTIME: {
_invoke_runtime_task(worker, node);
}
break;
// monostate (placeholder)
default:
break;
}
invoke_epilogue:
// if releasing semaphores exist, release them
if(node->_semaphores && !node->_semaphores->to_release.empty()) {
_schedule(worker, node->_release_all());
}
// We MUST recover the dependency since the graph may have cycles.
// This must be done before scheduling the successors, otherwise this might cause
// race condition on the _dependents
if((node->_state.load(std::memory_order_relaxed) & Node::CONDITIONED)) {
node->_join_counter = node->num_strong_dependents();
}
else {
node->_join_counter = node->num_dependents();
}
// acquire the parent flow counter
auto& j = (node->_parent) ? node->_parent->_join_counter :
node->_topology->_join_counter;
Node* cache {nullptr};
// At this point, the node storage might be destructed (to be verified)
// case 1: non-condition task
switch(node->_handle.index()) {
// condition and multi-condition tasks
case Node::CONDITION:
case Node::MULTI_CONDITION: {
for(auto cond : conds) {
if(cond >= 0 && static_cast<size_t>(cond) < node->_successors.size()) {
auto s = node->_successors[cond];
// zeroing the join counter for invariant
s->_join_counter.store(0, std::memory_order_relaxed);
j.fetch_add(1);
if(cache) {
_schedule(worker, cache);
}
cache = s;
}
}
}
break;
// non-condition task
default: {
for(size_t i=0; i<node->_successors.size(); ++i) {
if(--(node->_successors[i]->_join_counter) == 0) {
j.fetch_add(1);
if(cache) {
_schedule(worker, cache);
}
cache = node->_successors[i];
}
}
}
break;
}
// tear_down the invoke
_tear_down_invoke(worker, node);
// perform tail recursion elimination for the right-most child to reduce
// the number of expensive pop/push operations through the task queue
if(cache) {
node = cache;
//node->_state.fetch_or(Node::READY, std::memory_order_release);
goto invoke_prologue;
}
}
// Procedure: _tear_down_async
inline void Executor::_tear_down_async(Node* node) {
if(node->_parent) {
node->_parent->_join_counter.fetch_sub(1);
}
else {
_decrement_topology_and_notify();
}
node_pool.recycle(node);
}
// Proecdure: _tear_down_invoke
inline void Executor::_tear_down_invoke(Worker& worker, Node* node) {
// we must check parent first before substracting the join counter,
// or it can introduce data race
if(auto parent = node->_parent; parent == nullptr) {
if(node->_topology->_join_counter.fetch_sub(1) == 1) {
_tear_down_topology(worker, node->_topology);
}
}
else {
// prefetch the deferred status, as subtracting the join counter can
// immediately cause the other worker to release the subflow
auto deferred = parent->_state.load(std::memory_order_relaxed) & Node::DEFERRED;
if(parent->_join_counter.fetch_sub(1) == 1 && deferred) {
_schedule(worker, parent);
}
}
}
// Procedure: _cancel_invoke
inline void Executor::_cancel_invoke(Worker& worker, Node* node) {
switch(node->_handle.index()) {
// async task needs to carry out the promise
case Node::ASYNC:
std::get_if<Node::Async>(&(node->_handle))->work(true);
_tear_down_async(node);
break;
// silent async doesn't need to carry out the promise
case Node::SILENT_ASYNC:
_tear_down_async(node);
break;
// tear down topology if the node is the last leaf
default: {
_tear_down_invoke(worker, node);
}
break;
}
}
// Procedure: _observer_prologue
inline void Executor::_observer_prologue(Worker& worker, Node* node) {
for(auto& observer : _observers) {
observer->on_entry(WorkerView(worker), TaskView(*node));
}
}
// Procedure: _observer_epilogue
inline void Executor::_observer_epilogue(Worker& worker, Node* node) {
for(auto& observer : _observers) {
observer->on_exit(WorkerView(worker), TaskView(*node));
}
}
// Procedure: _invoke_static_task
inline void Executor::_invoke_static_task(Worker& worker, Node* node) {
_observer_prologue(worker, node);
std::get_if<Node::Static>(&node->_handle)->work();
_observer_epilogue(worker, node);
}
// Procedure: _invoke_dynamic_task
inline void Executor::_invoke_dynamic_task(Worker& w, Node* node) {
_observer_prologue(w, node);
auto handle = std::get_if<Node::Dynamic>(&node->_handle);
handle->subgraph._clear();
Subflow sf(*this, w, node, handle->subgraph);
handle->work(sf);
if(sf._joinable) {
_invoke_dynamic_task_internal(w, node, handle->subgraph);
}
_observer_epilogue(w, node);
}
// Procedure: _invoke_dynamic_task_external
inline void Executor::_invoke_dynamic_task_external(
Worker& w, Node* p, Graph& g, bool detach
) {
// graph is empty and has no async tasks
if(g.empty() && p->_join_counter == 0) {
return;
}
SmallVector<Node*> src;
for(auto n : g._nodes) {
n->_topology = p->_topology;
n->_state.store(0, std::memory_order_relaxed);
n->_set_up_join_counter();
if(detach) {
n->_parent = nullptr;
n->_state.fetch_or(Node::DETACHED, std::memory_order_relaxed);
}
else {
n->_parent = p;
}
if(n->num_dependents() == 0) {
src.push_back(n);
}
}
// detach here
if(detach) {
{
std::lock_guard<std::mutex> lock(p->_topology->_taskflow._mutex);
p->_topology->_taskflow._graph._merge(std::move(g));
}
p->_topology->_join_counter.fetch_add(src.size());
_schedule(w, src);
}
// join here
else {
p->_join_counter.fetch_add(src.size());
_schedule(w, src);
_consume_task(w, p);
}
}
// Procedure: _invoke_dynamic_task_internal
inline void Executor::_invoke_dynamic_task_internal(
Worker& w, Node* p, Graph& g
) {
// graph is empty and has no async tasks
if(g.empty() && p->_join_counter == 0) {
return;
}
SmallVector<Node*> src;
for(auto n : g._nodes) {
n->_topology = p->_topology;
n->_state.store(0, std::memory_order_relaxed);
n->_set_up_join_counter();
n->_parent = p;
if(n->num_dependents() == 0) {
src.push_back(n);
}
}
p->_join_counter.fetch_add(src.size());
_schedule(w, src);
_consume_task(w, p);
}
// Procedure: _invoke_module_task_internal
inline void Executor::_invoke_module_task_internal(
Worker& w, Node* p, Graph& g, bool& deferred
) {
// graph is empty and has no async tasks
if(g.empty()) {
return;
}
// set deferred
deferred = true;
p->_state.fetch_or(Node::DEFERRED, std::memory_order_relaxed);
SmallVector<Node*> src;
for(auto n : g._nodes) {
n->_topology = p->_topology;
n->_state.store(0, std::memory_order_relaxed);
n->_set_up_join_counter();
n->_parent = p;
if(n->num_dependents() == 0) {
src.push_back(n);
}
}
p->_join_counter.fetch_add(src.size());
_schedule(w, src);
}
// Procedure: _invoke_condition_task
inline void Executor::_invoke_condition_task(
Worker& worker, Node* node, SmallVector<int>& conds
) {
_observer_prologue(worker, node);
conds = { std::get_if<Node::Condition>(&node->_handle)->work() };
_observer_epilogue(worker, node);
}
// Procedure: _invoke_multi_condition_task
inline void Executor::_invoke_multi_condition_task(
Worker& worker, Node* node, SmallVector<int>& conds
) {
_observer_prologue(worker, node);
conds = std::get_if<Node::MultiCondition>(&node->_handle)->work();
_observer_epilogue(worker, node);
}
// Procedure: _invoke_cudaflow_task
inline void Executor::_invoke_cudaflow_task(Worker& worker, Node* node) {
_observer_prologue(worker, node);
std::get_if<Node::cudaFlow>(&node->_handle)->work(*this, node);
_observer_epilogue(worker, node);
}
// Procedure: _invoke_syclflow_task
inline void Executor::_invoke_syclflow_task(Worker& worker, Node* node) {
_observer_prologue(worker, node);
std::get_if<Node::syclFlow>(&node->_handle)->work(*this, node);
_observer_epilogue(worker, node);
}
// Procedure: _invoke_module_task
inline void Executor::_invoke_module_task(Worker& w, Node* node, bool& deferred) {
_observer_prologue(w, node);
_invoke_module_task_internal(
w, node, std::get_if<Node::Module>(&node->_handle)->graph, deferred
);
_observer_epilogue(w, node);
}
// Procedure: _invoke_async_task
inline void Executor::_invoke_async_task(Worker& w, Node* node) {
_observer_prologue(w, node);
std::get_if<Node::Async>(&node->_handle)->work(false);
_observer_epilogue(w, node);
}
// Procedure: _invoke_silent_async_task
inline void Executor::_invoke_silent_async_task(Worker& w, Node* node) {
_observer_prologue(w, node);
std::get_if<Node::SilentAsync>(&node->_handle)->work();
_observer_epilogue(w, node);
}
// Procedure: _invoke_runtime_task
inline void Executor::_invoke_runtime_task(Worker& w, Node* node) {
_observer_prologue(w, node);
Runtime rt(*this, w, node);
std::get_if<Node::Runtime>(&node->_handle)->work(rt);
_observer_epilogue(w, node);
}
// Function: run
inline tf::Future<void> Executor::run(Taskflow& f) {
return run_n(f, 1, [](){});
}
// Function: run
inline tf::Future<void> Executor::run(Taskflow&& f) {
return run_n(std::move(f), 1, [](){});
}
// Function: run
template <typename C>
tf::Future<void> Executor::run(Taskflow& f, C&& c) {
return run_n(f, 1, std::forward<C>(c));
}
// Function: run
template <typename C>
tf::Future<void> Executor::run(Taskflow&& f, C&& c) {
return run_n(std::move(f), 1, std::forward<C>(c));
}
// Function: run_n
inline tf::Future<void> Executor::run_n(Taskflow& f, size_t repeat) {
return run_n(f, repeat, [](){});
}
// Function: run_n
inline tf::Future<void> Executor::run_n(Taskflow&& f, size_t repeat) {
return run_n(std::move(f), repeat, [](){});
}
// Function: run_n
template <typename C>
tf::Future<void> Executor::run_n(Taskflow& f, size_t repeat, C&& c) {
return run_until(
f, [repeat]() mutable { return repeat-- == 0; }, std::forward<C>(c)
);
}
// Function: run_n
template <typename C>
tf::Future<void> Executor::run_n(Taskflow&& f, size_t repeat, C&& c) {
return run_until(
std::move(f), [repeat]() mutable { return repeat-- == 0; }, std::forward<C>(c)
);
}
// Function: run_until
template<typename P>
tf::Future<void> Executor::run_until(Taskflow& f, P&& pred) {
return run_until(f, std::forward<P>(pred), [](){});
}
// Function: run_until
template<typename P>
tf::Future<void> Executor::run_until(Taskflow&& f, P&& pred) {
return run_until(std::move(f), std::forward<P>(pred), [](){});
}
// Function: run_until
template <typename P, typename C>
tf::Future<void> Executor::run_until(Taskflow& f, P&& p, C&& c) {
_increment_topology();
// Need to check the empty under the lock since dynamic task may
// define detached blocks that modify the taskflow at the same time
bool empty;
{
std::lock_guard<std::mutex> lock(f._mutex);
empty = f.empty();
}
// No need to create a real topology but returns an dummy future
if(empty || p()) {
c();
std::promise<void> promise;
promise.set_value();
_decrement_topology_and_notify();
return tf::Future<void>(promise.get_future(), std::monostate{});
}
// create a topology for this run
auto t = std::make_shared<Topology>(f, std::forward<P>(p), std::forward<C>(c));
// need to create future before the topology got torn down quickly
tf::Future<void> future(t->_promise.get_future(), t);
// modifying topology needs to be protected under the lock
{
std::lock_guard<std::mutex> lock(f._mutex);
f._topologies.push(t);
if(f._topologies.size() == 1) {
_set_up_topology(_this_worker(), t.get());
}
}
return future;
}
// Function: run_until
template <typename P, typename C>
tf::Future<void> Executor::run_until(Taskflow&& f, P&& pred, C&& c) {
std::list<Taskflow>::iterator itr;
{
std::scoped_lock<std::mutex> lock(_taskflow_mutex);
itr = _taskflows.emplace(_taskflows.end(), std::move(f));
itr->_satellite = itr;
}
return run_until(*itr, std::forward<P>(pred), std::forward<C>(c));
}
// Procedure: _increment_topology
inline void Executor::_increment_topology() {
std::lock_guard<std::mutex> lock(_topology_mutex);
++_num_topologies;
}
// Procedure: _decrement_topology_and_notify
inline void Executor::_decrement_topology_and_notify() {
std::lock_guard<std::mutex> lock(_topology_mutex);
if(--_num_topologies == 0) {
_topology_cv.notify_all();
}
}
// Procedure: _decrement_topology
inline void Executor::_decrement_topology() {
std::lock_guard<std::mutex> lock(_topology_mutex);
--_num_topologies;
}
// Procedure: wait_for_all
inline void Executor::wait_for_all() {
std::unique_lock<std::mutex> lock(_topology_mutex);
_topology_cv.wait(lock, [&](){ return _num_topologies == 0; });
}
// Function: _set_up_topology
inline void Executor::_set_up_topology(Worker* worker, Topology* tpg) {
// ---- under taskflow lock ----
tpg->_sources.clear();
tpg->_taskflow._graph._clear_detached();
// scan each node in the graph and build up the links
for(auto node : tpg->_taskflow._graph._nodes) {
node->_topology = tpg;
node->_state.store(0, std::memory_order_relaxed);
if(node->num_dependents() == 0) {
tpg->_sources.push_back(node);
}
node->_set_up_join_counter();
}
tpg->_join_counter = tpg->_sources.size();
if(worker) {
_schedule(*worker, tpg->_sources);
}
else {
_schedule(tpg->_sources);
}
}
// Function: _tear_down_topology
inline void Executor::_tear_down_topology(Worker& worker, Topology* tpg) {
auto &f = tpg->_taskflow;
//assert(&tpg == &(f._topologies.front()));
// case 1: we still need to run the topology again
if(!tpg->_is_cancelled && !tpg->_pred()) {
//assert(tpg->_join_counter == 0);
std::lock_guard<std::mutex> lock(f._mutex);
tpg->_join_counter = tpg->_sources.size();
_schedule(worker, tpg->_sources);
}
// case 2: the final run of this topology
else {
// TODO: if the topology is cancelled, need to release all semaphores
if(tpg->_call != nullptr) {
tpg->_call();
}
// If there is another run (interleave between lock)
if(std::unique_lock<std::mutex> lock(f._mutex); f._topologies.size()>1) {
//assert(tpg->_join_counter == 0);
// Set the promise
tpg->_promise.set_value();
f._topologies.pop();
tpg = f._topologies.front().get();
// decrement the topology but since this is not the last we don't notify
_decrement_topology();
// set up topology needs to be under the lock or it can
// introduce memory order error with pop
_set_up_topology(&worker, tpg);
}
else {
//assert(f._topologies.size() == 1);
// Need to back up the promise first here becuz taskflow might be
// destroy soon after calling get
auto p {std::move(tpg->_promise)};
// Back up lambda capture in case it has the topology pointer,
// to avoid it releasing on pop_front ahead of _mutex.unlock &
// _promise.set_value. Released safely when leaving scope.
auto c {std::move(tpg->_call)};
// Get the satellite if any
auto s {f._satellite};
// Now we remove the topology from this taskflow
f._topologies.pop();
//f._mutex.unlock();
lock.unlock();
// We set the promise in the end in case taskflow leaves the scope.
// After set_value, the caller will return from wait
p.set_value();
_decrement_topology_and_notify();
// remove the taskflow if it is managed by the executor
// TODO: in the future, we may need to synchronize on wait
// (which means the following code should the moved before set_value)
if(s) {
std::scoped_lock<std::mutex> lock(_taskflow_mutex);
_taskflows.erase(*s);
}
}
}
}
// ############################################################################
// Forward Declaration: Subflow
// ############################################################################
inline void Subflow::join() {
// assert(this_worker().worker == &_worker);
if(!_joinable) {
TF_THROW("subflow not joinable");
}
// only the parent worker can join the subflow
_executor._invoke_dynamic_task_external(_worker, _parent, _graph, false);
_joinable = false;
}
inline void Subflow::detach() {
// assert(this_worker().worker == &_worker);
if(!_joinable) {
TF_THROW("subflow already joined or detached");
}
// only the parent worker can detach the subflow
_executor._invoke_dynamic_task_external(_worker, _parent, _graph, true);
_joinable = false;
}
// Function: named_async
template <typename F, typename... ArgsT>
auto Subflow::named_async(const std::string& name, F&& f, ArgsT&&... args) {
return _named_async(
*_executor._this_worker(), name, std::forward<F>(f), std::forward<ArgsT>(args)...
);
}
// Function: _named_async
template <typename F, typename... ArgsT>
auto Subflow::_named_async(
Worker& w,
const std::string& name,
F&& f,
ArgsT&&... args
) {
_parent->_join_counter.fetch_add(1);
using T = std::invoke_result_t<F, ArgsT...>;
using R = std::conditional_t<std::is_same_v<T, void>, void, std::optional<T>>;
std::promise<R> p;
auto tpg = std::make_shared<AsyncTopology>();
Future<R> fu(p.get_future(), tpg);
auto node = node_pool.animate(
std::in_place_type_t<Node::Async>{},
[p=make_moc(std::move(p)), f=std::forward<F>(f), args...]
(bool cancel) mutable {
if constexpr(std::is_same_v<R, void>) {
if(!cancel) {
f(args...);
}
p.object.set_value();
}
else {
p.object.set_value(cancel ? std::nullopt : std::make_optional(f(args...)));
}
},
std::move(tpg)
);
node->_name = name;
node->_topology = _parent->_topology;
node->_parent = _parent;
_executor._schedule(w, node);
return fu;
}
// Function: async
template <typename F, typename... ArgsT>
auto Subflow::async(F&& f, ArgsT&&... args) {
return named_async("", std::forward<F>(f), std::forward<ArgsT>(args)...);
}
// Function: _named_silent_async
template <typename F, typename... ArgsT>
void Subflow::_named_silent_async(
Worker& w, const std::string& name, F&& f, ArgsT&&... args
) {
_parent->_join_counter.fetch_add(1);
auto node = node_pool.animate(
std::in_place_type_t<Node::SilentAsync>{},
[f=std::forward<F>(f), args...] () mutable {
f(args...);
}
);
node->_name = name;
node->_topology = _parent->_topology;
node->_parent = _parent;
_executor._schedule(w, node);
}
// Function: silent_async
template <typename F, typename... ArgsT>
void Subflow::named_silent_async(const std::string& name, F&& f, ArgsT&&... args) {
_named_silent_async(
*_executor._this_worker(), name, std::forward<F>(f), std::forward<ArgsT>(args)...
);
}
// Function: named_silent_async
template <typename F, typename... ArgsT>
void Subflow::silent_async(F&& f, ArgsT&&... args) {
named_silent_async("", std::forward<F>(f), std::forward<ArgsT>(args)...);
}
// ############################################################################
// Forward Declaration: Runtime
// ############################################################################
// Procedure: schedule
inline void Runtime::schedule(Task task) {
auto node = task._node;
auto& j = node->_parent ? node->_parent->_join_counter :
node->_topology->_join_counter;
j.fetch_add(1);
_executor._schedule(_worker, node);
}
// Procedure: run
template <typename C>
void Runtime::run(C&& callable) {
// dynamic task (subflow)
if constexpr(is_dynamic_task_v<C>) {
Graph graph;
Subflow sf(_executor, _worker, _parent, graph);
callable(sf);
if(sf._joinable) {
_executor._invoke_dynamic_task_internal(_worker, _parent, graph);
}
}
else {
static_assert(dependent_false_v<C>, "unsupported task callable to run");
}
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/taskflow/core/worker.hpp | #pragma once
#include "declarations.hpp"
#include "tsq.hpp"
#include "notifier.hpp"
/**
@file worker.hpp
@brief worker include file
*/
namespace tf {
// ----------------------------------------------------------------------------
// Class Definition: Worker
// ----------------------------------------------------------------------------
/**
@class Worker
@brief class to create a worker in an executor
The class is primarily used by the executor to perform work-stealing algorithm.
Users can access a worker object and alter its property
(e.g., changing the thread affinity in a POSIX-like system)
using tf::WorkerInterface.
*/
class Worker {
friend class Executor;
friend class WorkerView;
public:
/**
@brief queries the worker id associated with its parent executor
A worker id is a unsigned integer in the range <tt>[0, N)</tt>,
where @c N is the number of workers spawned at the construction
time of the executor.
*/
inline size_t id() const { return _id; }
/**
@brief acquires a pointer access to the underlying thread
*/
inline std::thread* thread() const { return _thread; }
/**
@brief queries the size of the queue (i.e., number of enqueued tasks to
run) associated with the worker
*/
inline size_t queue_size() const { return _wsq.size(); }
/**
@brief queries the current capacity of the queue
*/
inline size_t queue_capacity() const { return static_cast<size_t>(_wsq.capacity()); }
private:
size_t _id;
size_t _vtm;
Executor* _executor;
std::thread* _thread;
Notifier::Waiter* _waiter;
std::default_random_engine _rdgen { std::random_device{}() };
TaskQueue<Node*> _wsq;
};
// ----------------------------------------------------------------------------
// Class Definition: PerThreadWorker
// ----------------------------------------------------------------------------
/**
@private
*/
//struct PerThreadWorker {
//
// Worker* worker;
//
// PerThreadWorker() : worker {nullptr} {}
//
// PerThreadWorker(const PerThreadWorker&) = delete;
// PerThreadWorker(PerThreadWorker&&) = delete;
//
// PerThreadWorker& operator = (const PerThreadWorker&) = delete;
// PerThreadWorker& operator = (PerThreadWorker&&) = delete;
//};
/**
@private
*/
//inline PerThreadWorker& this_worker() {
// thread_local PerThreadWorker worker;
// return worker;
//}
// ----------------------------------------------------------------------------
// Class Definition: WorkerView
// ----------------------------------------------------------------------------
/**
@class WorkerView
@brief class to create an immutable view of a worker in an executor
An executor keeps a set of internal worker threads to run tasks.
A worker view provides users an immutable interface to observe
when a worker runs a task, and the view object is only accessible
from an observer derived from tf::ObserverInterface.
*/
class WorkerView {
friend class Executor;
public:
/**
@brief queries the worker id associated with its parent executor
A worker id is a unsigned integer in the range <tt>[0, N)</tt>,
where @c N is the number of workers spawned at the construction
time of the executor.
*/
size_t id() const;
/**
@brief queries the size of the queue (i.e., number of pending tasks to
run) associated with the worker
*/
size_t queue_size() const;
/**
@brief queries the current capacity of the queue
*/
size_t queue_capacity() const;
private:
WorkerView(const Worker&);
WorkerView(const WorkerView&) = default;
const Worker& _worker;
};
// Constructor
inline WorkerView::WorkerView(const Worker& w) : _worker{w} {
}
// function: id
inline size_t WorkerView::id() const {
return _worker._id;
}
// Function: queue_size
inline size_t WorkerView::queue_size() const {
return _worker._wsq.size();
}
// Function: queue_capacity
inline size_t WorkerView::queue_capacity() const {
return static_cast<size_t>(_worker._wsq.capacity());
}
// ----------------------------------------------------------------------------
// Class Definition: WorkerInterface
// ----------------------------------------------------------------------------
/**
@class WorkerInterface
@brief class to configure worker behavior in an executor
The tf::WorkerInterface class lets users interact with the executor
to customize the worker behavior,
such as calling custom methods before and after a worker enters and leaves
the loop.
When you create an executor, it spawns a set of workers to run tasks.
The interaction between the executor and its spawned workers looks like
the following:
for(size_t n=0; n<num_workers; n++) {
create_thread([](Worker& worker)
// pre-processing executor-specific worker information
// ...
// enter the scheduling loop
// Here, WorkerInterface::scheduler_prologue is invoked, if any
while(1) {
perform_work_stealing_algorithm();
if(stop) {
break;
}
}
// leaves the scheduling loop and joins this worker thread
// Here, WorkerInterface::scheduler_epilogue is invoked, if any
);
}
@note
Methods defined in tf::WorkerInterface are not thread-safe and may be
be invoked by multiple workers concurrently.
*/
class WorkerInterface {
public:
/**
@brief default destructor
*/
virtual ~WorkerInterface() = default;
/**
@brief method to call before a worker enters the scheduling loop
@param worker a reference to the worker
The method is called by the constructor of an executor.
*/
virtual void scheduler_prologue(Worker& worker) = 0;
/**
@brief method to call after a worker leaves the scheduling loop
@param worker a reference to the worker
@param ptr an pointer to the exception thrown by the scheduling loop
The method is called by the constructor of an executor.
*/
virtual void scheduler_epilogue(Worker& worker, std::exception_ptr ptr) = 0;
};
/**
@fn make_worker_interface
@brief helper function to create an instance derived from tf::WorkerInterface
@param args arguments to forward to the constructor of @c T
*/
template <typename T, typename... ArgsT>
std::shared_ptr<T> make_worker_interface(ArgsT&&... args) {
static_assert(
std::is_base_of_v<WorkerInterface, T>,
"T must be derived from WorkerInterface"
);
return std::make_shared<T>(std::forward<ArgsT>(args)...);
}
} // end of namespact tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/taskflow/core/flow_builder.hpp | #pragma once
#include "task.hpp"
/**
@file flow_builder.hpp
@brief flow builder include file
*/
namespace tf {
/**
@class FlowBuilder
@brief class to build a task dependency graph
The class provides essential methods to construct a task dependency graph
from which tf::Taskflow and tf::Subflow are derived.
*/
class FlowBuilder {
friend class Executor;
public:
/**
@brief constructs a flow builder with a graph
*/
FlowBuilder(Graph& graph);
/**
@brief creates a static task
@tparam C callable type constructible from std::function<void()>
@param callable callable to construct a static task
@return a tf::Task handle
The following example creates a static task.
@code{.cpp}
tf::Task static_task = taskflow.emplace([](){});
@endcode
Please refer to @ref StaticTasking for details.
*/
template <typename C,
std::enable_if_t<is_static_task_v<C>, void>* = nullptr
>
Task emplace(C&& callable);
/**
@brief creates a dynamic task
@tparam C callable type constructible from std::function<void(tf::Subflow&)>
@param callable callable to construct a dynamic task
@return a tf::Task handle
The following example creates a dynamic task (tf::Subflow)
that spawns two static tasks.
@code{.cpp}
tf::Task dynamic_task = taskflow.emplace([](tf::Subflow& sf){
tf::Task static_task1 = sf.emplace([](){});
tf::Task static_task2 = sf.emplace([](){});
});
@endcode
Please refer to @ref DynamicTasking for details.
*/
template <typename C,
std::enable_if_t<is_dynamic_task_v<C>, void>* = nullptr
>
Task emplace(C&& callable);
/**
@brief creates a condition task
@tparam C callable type constructible from std::function<int()>
@param callable callable to construct a condition task
@return a tf::Task handle
The following example creates an if-else block using one condition task
and three static tasks.
@code{.cpp}
tf::Taskflow taskflow;
auto [init, cond, yes, no] = taskflow.emplace(
[] () { },
[] () { return 0; },
[] () { std::cout << "yes\n"; },
[] () { std::cout << "no\n"; }
);
// executes yes if cond returns 0, or no if cond returns 1
cond.precede(yes, no);
cond.succeed(init);
@endcode
Please refer to @ref ConditionalTasking for details.
*/
template <typename C,
std::enable_if_t<is_condition_task_v<C>, void>* = nullptr
>
Task emplace(C&& callable);
/**
@brief creates a multi-condition task
@tparam C callable type constructible from
std::function<tf::SmallVector<int>()>
@param callable callable to construct a multi-condition task
@return a tf::Task handle
The following example creates a multi-condition task that selectively
jumps to two successor tasks.
@code{.cpp}
tf::Taskflow taskflow;
auto [init, cond, branch1, branch2, branch3] = taskflow.emplace(
[] () { },
[] () { return tf::SmallVector{0, 2}; },
[] () { std::cout << "branch1\n"; },
[] () { std::cout << "branch2\n"; },
[] () { std::cout << "branch3\n"; }
);
// executes branch1 and branch3 when cond returns 0 and 2
cond.precede(branch1, branch2, branch3);
cond.succeed(init);
@endcode
Please refer to @ref ConditionalTasking for details.
*/
template <typename C,
std::enable_if_t<is_multi_condition_task_v<C>, void>* = nullptr
>
Task emplace(C&& callable);
/**
@brief creates multiple tasks from a list of callable objects
@tparam C callable types
@param callables one or multiple callable objects constructible from each task category
@return a tf::Task handle
The method returns a tuple of tasks each corresponding to the given
callable target. You can use structured binding to get the return tasks
one by one.
The following example creates four static tasks and assign them to
@c A, @c B, @c C, and @c D using structured binding.
@code{.cpp}
auto [A, B, C, D] = taskflow.emplace(
[] () { std::cout << "A"; },
[] () { std::cout << "B"; },
[] () { std::cout << "C"; },
[] () { std::cout << "D"; }
);
@endcode
*/
template <typename... C, std::enable_if_t<(sizeof...(C)>1), void>* = nullptr>
auto emplace(C&&... callables);
/**
@brief removes a task from a taskflow
@param task task to remove
Removes a task and its input and output dependencies from the graph
associated with the flow builder.
If the task does not belong to the graph, nothing will happen.
@code{.cpp}
tf::Task A = taskflow.emplace([](){ std::cout << "A"; });
tf::Task B = taskflow.emplace([](){ std::cout << "B"; });
tf::Task C = taskflow.emplace([](){ std::cout << "C"; });
tf::Task D = taskflow.emplace([](){ std::cout << "D"; });
A.precede(B, C, D);
// erase A from the taskflow and its dependencies to B, C, and D
taskflow.erase(A);
@endcode
*/
void erase(Task task);
/**
@brief creates a module task for the target object
@tparam T target object type
@param object a custom object that defines the method @c T::graph()
@return a tf::Task handle
The example below demonstrates a taskflow composition using
the @c composed_of method.
@code{.cpp}
tf::Taskflow t1, t2;
t1.emplace([](){ std::cout << "t1"; });
// t2 is partially composed of t1
tf::Task comp = t2.composed_of(t1);
tf::Task init = t2.emplace([](){ std::cout << "t2"; });
init.precede(comp);
@endcode
The taskflow object @c t2 is composed of another taskflow object @c t1,
preceded by another static task @c init.
When taskflow @c t2 is submitted to an executor,
@c init will run first and then @c comp which spwans its definition
in taskflow @c t1.
The target @c object being composed must define the method
<tt>T::graph()</tt> that returns a reference to a graph object of
type tf::Graph such that it can interact with the executor.
For example:
@code{.cpp}
// custom struct
struct MyObj {
tf::Graph graph;
MyObj() {
tf::FlowBuilder builder(graph);
tf::Task task = builder.emplace([](){
std::cout << "a task\n"; // static task
});
}
Graph& graph() { return graph; }
};
MyObj obj;
tf::Task comp = taskflow.composed_of(obj);
@endcode
Please refer to @ref ComposableTasking for details.
*/
template <typename T>
Task composed_of(T& object);
/**
@brief creates a placeholder task
@return a tf::Task handle
A placeholder task maps to a node in the taskflow graph, but
it does not have any callable work assigned yet.
A placeholder task is different from an empty task handle that
does not point to any node in a graph.
@code{.cpp}
// create a placeholder task with no callable target assigned
tf::Task placeholder = taskflow.placeholder();
assert(placeholder.empty() == false && placeholder.has_work() == false);
// create an empty task handle
tf::Task task;
assert(task.empty() == true);
// assign the task handle to the placeholder task
task = placeholder;
assert(task.empty() == false && task.has_work() == false);
@endcode
*/
Task placeholder();
/**
@brief creates a %cudaFlow task on the caller's GPU device context
@tparam C callable type constructible from @c std::function<void(tf::cudaFlow&)>
@return a tf::Task handle
This method is equivalent to calling tf::FlowBuilder::emplace_on(callable, d)
where @c d is the caller's device context.
The following example creates a %cudaFlow of two kernel tasks, @c task1 and
@c task2, where @c task1 runs before @c task2.
@code{.cpp}
taskflow.emplace([&](tf::cudaFlow& cf){
// create two kernel tasks
tf::cudaTask task1 = cf.kernel(grid1, block1, shm1, kernel1, args1);
tf::cudaTask task2 = cf.kernel(grid2, block2, shm2, kernel2, args2);
// kernel1 runs before kernel2
task1.precede(task2);
});
@endcode
Please refer to @ref GPUTaskingcudaFlow and @ref GPUTaskingcudaFlowCapturer
for details.
*/
template <typename C,
std::enable_if_t<is_cudaflow_task_v<C>, void>* = nullptr
>
Task emplace(C&& callable);
/**
@brief creates a %cudaFlow task on the given device
@tparam C callable type constructible from std::function<void(tf::cudaFlow&)>
@tparam D device type, either @c int or @c std::ref<int> (stateful)
@return a tf::Task handle
The following example creates a %cudaFlow of two kernel tasks, @c task1 and
@c task2 on GPU @c 2, where @c task1 runs before @c task2
@code{.cpp}
taskflow.emplace_on([&](tf::cudaFlow& cf){
// create two kernel tasks
tf::cudaTask task1 = cf.kernel(grid1, block1, shm1, kernel1, args1);
tf::cudaTask task2 = cf.kernel(grid2, block2, shm2, kernel2, args2);
// kernel1 runs before kernel2
task1.precede(task2);
}, 2);
@endcode
*/
template <typename C, typename D,
std::enable_if_t<is_cudaflow_task_v<C>, void>* = nullptr
>
Task emplace_on(C&& callable, D&& device);
/**
@brief creates a %syclFlow task on the default queue
@tparam C callable type constructible from std::function<void(tf::syclFlow&)>
@param callable a callable that takes a referenced tf::syclFlow object
@return a tf::Task handle
The following example creates a %syclFlow on the default queue to submit
two kernel tasks, @c task1 and @c task2, where @c task1 runs before @c task2.
@code{.cpp}
taskflow.emplace([&](tf::syclFlow& cf){
// create two single-thread kernel tasks
tf::syclTask task1 = cf.single_task([](){});
tf::syclTask task2 = cf.single_task([](){});
// kernel1 runs before kernel2
task1.precede(task2);
});
@endcode
*/
template <typename C, std::enable_if_t<is_syclflow_task_v<C>, void>* = nullptr>
Task emplace(C&& callable);
/**
@brief creates a %syclFlow task on the given queue
@tparam C callable type constructible from std::function<void(tf::syclFlow&)>
@tparam Q queue type
@param callable a callable that takes a referenced tf::syclFlow object
@param queue a queue of type sycl::queue
@return a tf::Task handle
The following example creates a %syclFlow on the given queue to submit
two kernel tasks, @c task1 and @c task2, where @c task1 runs before @c task2.
@code{.cpp}
taskflow.emplace_on([&](tf::syclFlow& cf){
// create two single-thread kernel tasks
tf::syclTask task1 = cf.single_task([](){});
tf::syclTask task2 = cf.single_task([](){});
// kernel1 runs before kernel2
task1.precede(task2);
}, queue);
@endcode
*/
template <typename C, typename Q,
std::enable_if_t<is_syclflow_task_v<C>, void>* = nullptr
>
Task emplace_on(C&& callable, Q&& queue);
/**
@brief creates a runtime task
@tparam C callable type constructible from std::function<void(tf::Runtime&)>
@param callable callable to construct a runtime task
@return a tf::Task handle
The following example creates a runtime task that enables in-task
control over the running executor.
@code{.cpp}
tf::Task runtime_task = taskflow.emplace([](tf::Runtime& rt){
auto& executor = rt.executor();
std::cout << executor.num_workers() << '\n';
});
@endcode
Please refer to @ref RuntimeTasking for details.
*/
template <typename C,
std::enable_if_t<is_runtime_task_v<C>, void>* = nullptr
>
Task emplace(C&& callable);
/**
@brief adds adjacent dependency links to a linear list of tasks
@param tasks a vector of tasks
This member function creates linear dependencies over a vector of tasks.
@code{.cpp}
tf::Task A = taskflow.emplace([](){ std::cout << "A"; });
tf::Task B = taskflow.emplace([](){ std::cout << "B"; });
tf::Task C = taskflow.emplace([](){ std::cout << "C"; });
tf::Task D = taskflow.emplace([](){ std::cout << "D"; });
std::vector<tf::Task> tasks {A, B, C, D}
taskflow.linearize(tasks); // A->B->C->D
@endcode
*/
void linearize(std::vector<Task>& tasks);
/**
@brief adds adjacent dependency links to a linear list of tasks
@param tasks an initializer list of tasks
This member function creates linear dependencies over a list of tasks.
@code{.cpp}
tf::Task A = taskflow.emplace([](){ std::cout << "A"; });
tf::Task B = taskflow.emplace([](){ std::cout << "B"; });
tf::Task C = taskflow.emplace([](){ std::cout << "C"; });
tf::Task D = taskflow.emplace([](){ std::cout << "D"; });
taskflow.linearize({A, B, C, D}); // A->B->C->D
@endcode
*/
void linearize(std::initializer_list<Task> tasks);
// ------------------------------------------------------------------------
// parallel iterations
// ------------------------------------------------------------------------
/**
@brief constructs a STL-styled parallel-for task
@tparam B beginning iterator type
@tparam E ending iterator type
@tparam C callable type
@param first iterator to the beginning (inclusive)
@param last iterator to the end (exclusive)
@param callable a callable object to apply to the dereferenced iterator
@return a tf::Task handle
The task spawns a subflow that applies the callable object to each object
obtained by dereferencing every iterator in the range <tt>[first, last)</tt>.
This method is equivalent to the parallel execution of the following loop:
@code{.cpp}
for(auto itr=first; itr!=last; itr++) {
callable(*itr);
}
@endcode
Arguments templated to enable stateful range using std::reference_wrapper.
The callable needs to take a single argument of
the dereferenced iterator type.
Please refer to @ref ParallelIterations for details.
*/
template <typename B, typename E, typename C>
Task for_each(B first, E last, C callable);
/**
@brief constructs a parallel-transform task
@tparam B beginning index type (must be integral)
@tparam E ending index type (must be integral)
@tparam S step type (must be integral)
@tparam C callable type
@param first index of the beginning (inclusive)
@param last index of the end (exclusive)
@param step step size
@param callable a callable object to apply to each valid index
@return a tf::Task handle
The task spawns a subflow that applies the callable object to each index
in the range <tt>[first, last)</tt> with the step size.
This method is equivalent to the parallel execution of the following loop:
@code{.cpp}
// case 1: step size is positive
for(auto i=first; i<last; i+=step) {
callable(i);
}
// case 2: step size is negative
for(auto i=first, i>last; i+=step) {
callable(i);
}
@endcode
Arguments are templated to enable stateful range using std::reference_wrapper.
The callable needs to take a single argument of the integral index type.
Please refer to @ref ParallelIterations for details.
*/
template <typename B, typename E, typename S, typename C>
Task for_each_index(B first, E last, S step, C callable);
// ------------------------------------------------------------------------
// transform
// ------------------------------------------------------------------------
/**
@brief constructs a parallel-transform task
@tparam B beginning input iterator type
@tparam E ending input iterator type
@tparam O output iterator type
@tparam C callable type
@param first1 iterator to the beginning of the first range
@param last1 iterator to the end of the first range
@param d_first iterator to the beginning of the output range
@param c an unary callable to apply to dereferenced input elements
@return a tf::Task handle
The task spawns a subflow that applies the callable object to an
input range and stores the result in another output range.
This method is equivalent to the parallel execution of the following loop:
@code{.cpp}
while (first1 != last1) {
*d_first++ = c(*first1++);
}
@endcode
Arguments are templated to enable stateful range using std::reference_wrapper.
The callable needs to take a single argument of the dereferenced
iterator type.
*/
template <typename B, typename E, typename O, typename C>
Task transform(B first1, E last1, O d_first, C c);
/**
@brief constructs a parallel-transform task
@tparam B1 beginning input iterator type for the first input range
@tparam E1 ending input iterator type for the first input range
@tparam B2 beginning input iterator type for the first second range
@tparam O output iterator type
@tparam C callable type
@param first1 iterator to the beginning of the first input range
@param last1 iterator to the end of the first input range
@param first2 iterator to the beginning of the second input range
@param d_first iterator to the beginning of the output range
@param c a binary operator to apply to dereferenced input elements
@return a tf::Task handle
The task spawns a subflow that applies the callable object to two
input ranges and stores the result in another output range.
This method is equivalent to the parallel execution of the following loop:
@code{.cpp}
while (first1 != last1) {
*d_first++ = c(*first1++, *first2++);
}
@endcode
Arguments are templated to enable stateful range using std::reference_wrapper.
The callable needs to take two arguments of dereferenced elements
from the two input ranges.
*/
template <typename B1, typename E1, typename B2, typename O, typename C>
Task transform(B1 first1, E1 last1, B2 first2, O d_first, C c);
// ------------------------------------------------------------------------
// reduction
// ------------------------------------------------------------------------
/**
@brief constructs a STL-styled parallel-reduce task
@tparam B beginning iterator type
@tparam E ending iterator type
@tparam T result type
@tparam O binary reducer type
@param first iterator to the beginning (inclusive)
@param last iterator to the end (exclusive)
@param init initial value of the reduction and the storage for the reduced result
@param bop binary operator that will be applied
@return a tf::Task handle
The task spawns a subflow to perform parallel reduction over @c init
and the elements in the range <tt>[first, last)</tt>.
The reduced result is store in @c init.
This method is equivalent to the parallel execution of the following loop:
@code{.cpp}
for(auto itr=first; itr!=last; itr++) {
init = bop(init, *itr);
}
@endcode
Arguments are templated to enable stateful range using std::reference_wrapper.
Please refer to @ref ParallelReduction for details.
*/
template <typename B, typename E, typename T, typename O>
Task reduce(B first, E last, T& init, O bop);
// ------------------------------------------------------------------------
// transfrom and reduction
// ------------------------------------------------------------------------
/**
@brief constructs a STL-styled parallel transform-reduce task
@tparam B beginning iterator type
@tparam E ending iterator type
@tparam T result type
@tparam BOP binary reducer type
@tparam UOP unary transformion type
@param first iterator to the beginning (inclusive)
@param last iterator to the end (exclusive)
@param init initial value of the reduction and the storage for the reduced result
@param bop binary operator that will be applied in unspecified order to the results of @c uop
@param uop unary operator that will be applied to transform each element in the range to the result type
@return a tf::Task handle
The task spawns a subflow to perform parallel reduction over @c init and
the transformed elements in the range <tt>[first, last)</tt>.
The reduced result is store in @c init.
This method is equivalent to the parallel execution of the following loop:
@code{.cpp}
for(auto itr=first; itr!=last; itr++) {
init = bop(init, uop(*itr));
}
@endcode
Arguments are templated to enable stateful range using std::reference_wrapper.
Please refer to @ref ParallelReduction for details.
*/
template <typename B, typename E, typename T, typename BOP, typename UOP>
Task transform_reduce(B first, E last, T& init, BOP bop, UOP uop);
// ------------------------------------------------------------------------
// sort
// ------------------------------------------------------------------------
/**
@brief constructs a dynamic task to perform STL-styled parallel sort
@tparam B beginning iterator type (random-accessible)
@tparam E ending iterator type (random-accessible)
@tparam C comparator type
@param first iterator to the beginning (inclusive)
@param last iterator to the end (exclusive)
@param cmp comparison function object
The task spawns a subflow to parallelly sort elements in the range
<tt>[first, last)</tt>.
Arguments are templated to enable stateful range using std::reference_wrapper.
Please refer to @ref ParallelSort for details.
*/
template <typename B, typename E, typename C>
Task sort(B first, E last, C cmp);
/**
@brief constructs a dynamic task to perform STL-styled parallel sort using
the @c std::less<T> comparator, where @c T is the element type
@tparam B beginning iterator type (random-accessible)
@tparam E ending iterator type (random-accessible)
@param first iterator to the beginning (inclusive)
@param last iterator to the end (exclusive)
The task spawns a subflow to parallelly sort elements in the range
<tt>[first, last)</tt> using the @c std::less<T> comparator,
where @c T is the dereferenced iterator type.
Arguments are templated to enable stateful range using std::reference_wrapper.
Please refer to @ref ParallelSort for details.
*/
template <typename B, typename E>
Task sort(B first, E last);
protected:
/**
@brief associated graph object
*/
Graph& _graph;
private:
template <typename L>
void _linearize(L&);
};
// Constructor
inline FlowBuilder::FlowBuilder(Graph& graph) :
_graph {graph} {
}
// Function: emplace
template <typename C, std::enable_if_t<is_static_task_v<C>, void>*>
Task FlowBuilder::emplace(C&& c) {
return Task(_graph._emplace_back(
std::in_place_type_t<Node::Static>{}, std::forward<C>(c)
));
}
// Function: emplace
template <typename C, std::enable_if_t<is_dynamic_task_v<C>, void>*>
Task FlowBuilder::emplace(C&& c) {
return Task(_graph._emplace_back(
std::in_place_type_t<Node::Dynamic>{}, std::forward<C>(c)
));
}
// Function: emplace
template <typename C, std::enable_if_t<is_condition_task_v<C>, void>*>
Task FlowBuilder::emplace(C&& c) {
return Task(_graph._emplace_back(
std::in_place_type_t<Node::Condition>{}, std::forward<C>(c)
));
}
// Function: emplace
template <typename C, std::enable_if_t<is_multi_condition_task_v<C>, void>*>
Task FlowBuilder::emplace(C&& c) {
return Task(_graph._emplace_back(
std::in_place_type_t<Node::MultiCondition>{}, std::forward<C>(c)
));
}
// Function: emplace
template <typename C, std::enable_if_t<is_runtime_task_v<C>, void>*>
Task FlowBuilder::emplace(C&& c) {
return Task(_graph._emplace_back(
std::in_place_type_t<Node::Runtime>{}, std::forward<C>(c)
));
}
// Function: emplace
template <typename... C, std::enable_if_t<(sizeof...(C)>1), void>*>
auto FlowBuilder::emplace(C&&... cs) {
return std::make_tuple(emplace(std::forward<C>(cs))...);
}
// Function: erase
inline void FlowBuilder::erase(Task task) {
if (!task._node) {
return;
}
task.for_each_dependent([&] (Task dependent) {
auto& S = dependent._node->_successors;
if(auto I = std::find(S.begin(), S.end(), task._node); I != S.end()) {
S.erase(I);
}
});
task.for_each_successor([&] (Task dependent) {
auto& D = dependent._node->_dependents;
if(auto I = std::find(D.begin(), D.end(), task._node); I != D.end()) {
D.erase(I);
}
});
_graph._erase(task._node);
}
// Function: composed_of
template <typename T>
Task FlowBuilder::composed_of(T& object) {
auto node = _graph._emplace_back(
std::in_place_type_t<Node::Module>{}, object
);
return Task(node);
}
// Function: placeholder
inline Task FlowBuilder::placeholder() {
auto node = _graph._emplace_back();
return Task(node);
}
// Procedure: _linearize
template <typename L>
void FlowBuilder::_linearize(L& keys) {
auto itr = keys.begin();
auto end = keys.end();
if(itr == end) {
return;
}
auto nxt = itr;
for(++nxt; nxt != end; ++nxt, ++itr) {
itr->_node->_precede(nxt->_node);
}
}
// Procedure: linearize
inline void FlowBuilder::linearize(std::vector<Task>& keys) {
_linearize(keys);
}
// Procedure: linearize
inline void FlowBuilder::linearize(std::initializer_list<Task> keys) {
_linearize(keys);
}
// ----------------------------------------------------------------------------
/**
@class Subflow
@brief class to construct a subflow graph from the execution of a dynamic task
By default, a subflow automatically @em joins its parent node.
You may explicitly join or detach a subflow by calling tf::Subflow::join
or tf::Subflow::detach, respectively.
The following example creates a taskflow graph that spawns a subflow from
the execution of task @c B, and the subflow contains three tasks, @c B1,
@c B2, and @c B3, where @c B3 runs after @c B1 and @c B2.
@code{.cpp}
// create three static tasks
tf::Task A = taskflow.emplace([](){}).name("A");
tf::Task C = taskflow.emplace([](){}).name("C");
tf::Task D = taskflow.emplace([](){}).name("D");
// create a subflow graph (dynamic tasking)
tf::Task B = taskflow.emplace([] (tf::Subflow& subflow) {
tf::Task B1 = subflow.emplace([](){}).name("B1");
tf::Task B2 = subflow.emplace([](){}).name("B2");
tf::Task B3 = subflow.emplace([](){}).name("B3");
B1.precede(B3);
B2.precede(B3);
}).name("B");
A.precede(B); // B runs after A
A.precede(C); // C runs after A
B.precede(D); // D runs after B
C.precede(D); // D runs after C
@endcode
*/
class Subflow : public FlowBuilder {
friend class Executor;
friend class FlowBuilder;
friend class Runtime;
public:
/**
@brief enables the subflow to join its parent task
Performs an immediate action to join the subflow. Once the subflow is joined,
it is considered finished and you may not modify the subflow anymore.
@code{.cpp}
taskflow.emplace([](tf::Subflow& sf){
sf.emplace([](){});
sf.join(); // join the subflow of one task
});
@endcode
Only the worker that spawns this subflow can join it.
*/
void join();
/**
@brief enables the subflow to detach from its parent task
Performs an immediate action to detach the subflow. Once the subflow is detached,
it is considered finished and you may not modify the subflow anymore.
@code{.cpp}
taskflow.emplace([](tf::Subflow& sf){
sf.emplace([](){});
sf.detach();
});
@endcode
Only the worker that spawns this subflow can detach it.
*/
void detach();
/**
@brief resets the subflow to a joinable state
@param clear_graph specifies whether to clear the associated graph (default @c true)
Clears the underlying task graph depending on the
given variable @c clear_graph (default @c true) and then
updates the subflow to a joinable state.
*/
void reset(bool clear_graph = true);
/**
@brief queries if the subflow is joinable
This member function queries if the subflow is joinable.
When a subflow is joined or detached, it becomes not joinable.
@code{.cpp}
taskflow.emplace([](tf::Subflow& sf){
sf.emplace([](){});
std::cout << sf.joinable() << '\n'; // true
sf.join();
std::cout << sf.joinable() << '\n'; // false
});
@endcode
*/
bool joinable() const noexcept;
/**
@brief runs a given function asynchronously
@tparam F callable type
@tparam ArgsT parameter types
@param f callable object to call
@param args parameters to pass to the callable
@return a tf::Future that will holds the result of the execution
The method creates an asynchronous task to launch the given
function on the given arguments.
The difference to tf::Executor::async is that the created asynchronous task
pertains to the subflow.
When the subflow joins, all asynchronous tasks created from the subflow
are guaranteed to finish before the join.
For example:
@code{.cpp}
std::atomic<int> counter(0);
taskflow.empalce([&](tf::Subflow& sf){
for(int i=0; i<100; i++) {
sf.async([&](){ counter++; });
}
sf.join();
assert(counter == 100);
});
@endcode
This method is thread-safe and can be called by multiple tasks in the
subflow at the same time.
@attention
You cannot create asynchronous tasks from a detached subflow.
Doing this results in undefined behavior.
*/
template <typename F, typename... ArgsT>
auto async(F&& f, ArgsT&&... args);
/**
@brief runs the given function asynchronously and assigns the task a name
@tparam F callable type
@tparam ArgsT parameter types
@param name name of the asynchronous task
@param f callable object to call
@param args parameters to pass to the callable
@return a tf::Future that will holds the result of the execution
The method creates a named asynchronous task to launch the given
function on the given arguments.
The difference from tf::Executor::async is that the created asynchronous task
pertains to the subflow.
When the subflow joins, all asynchronous tasks created from the subflow
are guaranteed to finish before the join.
For example:
@code{.cpp}
std::atomic<int> counter(0);
taskflow.empalce([&](tf::Subflow& sf){
for(int i=0; i<100; i++) {
sf.async("name", [&](){ counter++; });
}
sf.join();
assert(counter == 100);
});
@endcode
This method is thread-safe and can be called by multiple tasks in the
subflow at the same time.
@attention
You cannot create named asynchronous tasks from a detached subflow.
Doing this results in undefined behavior.
*/
template <typename F, typename... ArgsT>
auto named_async(const std::string& name, F&& f, ArgsT&&... args);
/**
@brief similar to tf::Subflow::async but does not return a future object
This member function is more efficient than tf::Subflow::async
and is encouraged to use when there is no data returned.
@code{.cpp}
taskflow.empalce([&](tf::Subflow& sf){
for(int i=0; i<100; i++) {
sf.silent_async([&](){ counter++; });
}
sf.join();
assert(counter == 100);
});
@endcode
This member function is thread-safe.
*/
template <typename F, typename... ArgsT>
void silent_async(F&& f, ArgsT&&... args);
/**
@brief similar to tf::Subflow::named_async but does not return a future object
This member function is more efficient than tf::Subflow::named_async
and is encouraged to use when there is no data returned.
@code{.cpp}
taskflow.empalce([&](tf::Subflow& sf){
for(int i=0; i<100; i++) {
sf.named_silent_async("name", [&](){ counter++; });
}
sf.join();
assert(counter == 100);
});
@endcode
This member function is thread-safe.
*/
template <typename F, typename... ArgsT>
void named_silent_async(const std::string& name, F&& f, ArgsT&&... args);
/**
@brief returns the executor that runs this subflow
*/
inline Executor& executor();
private:
Executor& _executor;
Worker& _worker;
Node* _parent;
bool _joinable {true};
Subflow(Executor&, Worker&, Node*, Graph&);
template <typename F, typename... ArgsT>
auto _named_async(Worker& w, const std::string& name, F&& f, ArgsT&&... args);
template <typename F, typename... ArgsT>
void _named_silent_async(Worker& w, const std::string& name, F&& f, ArgsT&&... args);
};
// Constructor
inline Subflow::Subflow(
Executor& executor, Worker& worker, Node* parent, Graph& graph
) :
FlowBuilder {graph},
_executor {executor},
_worker {worker},
_parent {parent} {
// assert(_parent != nullptr);
}
// Function: joined
inline bool Subflow::joinable() const noexcept {
return _joinable;
}
// Function: executor
inline Executor& Subflow::executor() {
return _executor;
}
// Procedure: reset
inline void Subflow::reset(bool clear_graph) {
if(clear_graph) {
_graph._clear();
}
_joinable = true;
}
} // end of namespace tf. ---------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/taskflow/utility/traits.hpp | #pragma once
#include <type_traits>
#include <iterator>
#include <iostream>
#include <fstream>
#include <mutex>
#include <stack>
#include <queue>
#include <vector>
#include <algorithm>
#include <memory>
#include <atomic>
#include <thread>
#include <future>
#include <functional>
#include <unordered_map>
#include <unordered_set>
#include <sstream>
#include <list>
#include <numeric>
#include <random>
#include <iomanip>
#include <cassert>
#include <cmath>
#include <array>
#include <string>
#include <variant>
#include <optional>
#include "os.hpp"
namespace tf {
//-----------------------------------------------------------------------------
// Traits
//-----------------------------------------------------------------------------
//// Struct: dependent_false
//template <typename... T>
//struct dependent_false {
// static constexpr bool value = false;
//};
//
//template <typename... T>
//constexpr auto dependent_false_v = dependent_false<T...>::value;
template<typename> inline constexpr bool dependent_false_v = false;
// ----------------------------------------------------------------------------
// is_pod
//-----------------------------------------------------------------------------
template <typename T>
struct is_pod {
static const bool value = std::is_trivial_v<T> &&
std::is_standard_layout_v<T>;
};
template <typename T>
constexpr bool is_pod_v = is_pod<T>::value;
//-----------------------------------------------------------------------------
// NoInit
//-----------------------------------------------------------------------------
template <typename T>
struct NoInit {
//static_assert(is_pod_v<T>, "NoInit only supports POD type");
// constructor without initialization
NoInit () noexcept {}
// implicit conversion T -> NoInit<T>
constexpr NoInit (T value) noexcept : v{value} {}
// implicit conversion NoInit<T> -> T
constexpr operator T () const noexcept { return v; }
T v;
};
//-----------------------------------------------------------------------------
// Move-On-Copy
//-----------------------------------------------------------------------------
// Struct: MoveOnCopyWrapper
template <typename T>
struct MoC {
MoC(T&& rhs) : object(std::move(rhs)) {}
MoC(const MoC& other) : object(std::move(other.object)) {}
T& get() { return object; }
mutable T object;
};
template <typename T>
auto make_moc(T&& m) {
return MoC<T>(std::forward<T>(m));
}
//-----------------------------------------------------------------------------
// Visitors.
//-----------------------------------------------------------------------------
//// Overloadded.
//template <typename... Ts>
//struct Visitors : Ts... {
// using Ts::operator()... ;
//};
//
//template <typename... Ts>
//Visitors(Ts...) -> Visitors<Ts...>;
// ----------------------------------------------------------------------------
// std::variant
// ----------------------------------------------------------------------------
template <typename T, typename>
struct get_index;
template <size_t I, typename... Ts>
struct get_index_impl {};
template <size_t I, typename T, typename... Ts>
struct get_index_impl<I, T, T, Ts...> : std::integral_constant<size_t, I>{};
template <size_t I, typename T, typename U, typename... Ts>
struct get_index_impl<I, T, U, Ts...> : get_index_impl<I+1, T, Ts...>{};
template <typename T, typename... Ts>
struct get_index<T, std::variant<Ts...>> : get_index_impl<0, T, Ts...>{};
template <typename T, typename... Ts>
constexpr auto get_index_v = get_index<T, Ts...>::value;
// ----------------------------------------------------------------------------
// unwrap_reference
// ----------------------------------------------------------------------------
template <class T>
struct unwrap_reference { using type = T; };
template <class U>
struct unwrap_reference<std::reference_wrapper<U>> { using type = U&; };
template<class T>
using unwrap_reference_t = typename unwrap_reference<T>::type;
template< class T >
struct unwrap_ref_decay : unwrap_reference<std::decay_t<T>> {};
template<class T>
using unwrap_ref_decay_t = typename unwrap_ref_decay<T>::type;
// ----------------------------------------------------------------------------
// stateful iterators
// ----------------------------------------------------------------------------
// STL-styled iterator
template <typename B, typename E>
struct stateful_iterator {
using TB = std::decay_t<unwrap_ref_decay_t<B>>;
using TE = std::decay_t<unwrap_ref_decay_t<E>>;
static_assert(std::is_same_v<TB, TE>, "decayed iterator types must match");
using type = TB;
};
template <typename B, typename E>
using stateful_iterator_t = typename stateful_iterator<B, E>::type;
// raw integral index
template <typename B, typename E, typename S>
struct stateful_index {
using TB = std::decay_t<unwrap_ref_decay_t<B>>;
using TE = std::decay_t<unwrap_ref_decay_t<E>>;
using TS = std::decay_t<unwrap_ref_decay_t<S>>;
static_assert(
std::is_integral_v<TB>, "decayed beg index must be an integral type"
);
static_assert(
std::is_integral_v<TE>, "decayed end index must be an integral type"
);
static_assert(
std::is_integral_v<TS>, "decayed step must be an integral type"
);
static_assert(
std::is_same_v<TB, TE> && std::is_same_v<TE, TS>,
"decayed index and step types must match"
);
using type = TB;
};
template <typename B, typename E, typename S>
using stateful_index_t = typename stateful_index<B, E, S>::type;
// ----------------------------------------------------------------------------
// visit a tuple with a functor at runtime
// ----------------------------------------------------------------------------
template <typename Func, typename Tuple, size_t N = 0>
void visit_tuple(Func func, Tuple& tup, size_t idx) {
if (N == idx) {
std::invoke(func, std::get<N>(tup));
return;
}
if constexpr (N + 1 < std::tuple_size_v<Tuple>) {
return visit_tuple<Func, Tuple, N + 1>(func, tup, idx);
}
}
// ----------------------------------------------------------------------------
// unroll loop
// ----------------------------------------------------------------------------
// Template unrolled looping construct.
template<auto beg, auto end, auto step, bool valid = (beg < end)>
struct Unroll {
template<typename F>
static void eval(F f) {
f(beg);
Unroll<beg + step, end, step>::eval(f);
}
};
template<auto beg, auto end, auto step>
struct Unroll<beg, end, step, false> {
template<typename F>
static void eval(F) { }
};
template<auto beg, auto end, auto step, typename F>
void unroll(F f) {
Unroll<beg, end, step>::eval(f);
}
// ----------------------------------------------------------------------------
// make types of variant unique
// ----------------------------------------------------------------------------
template <typename T, typename... Ts>
struct filter_duplicates { using type = T; };
template <template <typename...> class C, typename... Ts, typename U, typename... Us>
struct filter_duplicates<C<Ts...>, U, Us...>
: std::conditional_t<(std::is_same_v<U, Ts> || ...)
, filter_duplicates<C<Ts...>, Us...>
, filter_duplicates<C<Ts..., U>, Us...>> {};
template <typename T>
struct unique_variant;
template <typename... Ts>
struct unique_variant<std::variant<Ts...>> : filter_duplicates<std::variant<>, Ts...> {};
template <typename T>
using unique_variant_t = typename unique_variant<T>::type;
} // end of namespace tf. ----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/taskflow/utility/singleton.hpp | #pragma once
namespace tf {
/** @class Singleton
@brief class template to create a thread-safe singleton object
*/
template <typename T>
class Singleton {
public:
/**
@brief get a reference to the singleton object
*/
inline static T& get() {
static T instance;
return instance;
}
private:
Singleton() = default;
~Singleton() = default;
Singleton(const Singleton&)= delete;
Singleton& operator=(const Singleton&)= delete;
};
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/taskflow/utility/object_pool.hpp | // 2020/03/13 - modified by Tsung-Wei Huang
// - fixed bug in aligning memory
//
// 2020/02/02 - modified by Tsung-Wei Huang
// - new implementation motivated by Hoard
//
// 2019/07/10 - modified by Tsung-Wei Huang
// - replace raw pointer with smart pointer
//
// 2019/06/13 - created by Tsung-Wei Huang
// - implemented an object pool class
#pragma once
#include <thread>
#include <atomic>
#include <mutex>
#include <vector>
#include <cassert>
#include <cstddef>
namespace tf {
#define TF_ENABLE_POOLABLE_ON_THIS \
template <typename T, size_t S> friend class ObjectPool; \
void* _object_pool_block
// Class: ObjectPool
//
// The class implements an efficient thread-safe object pool motivated
// by the Hoard memory allocator algorithm.
// Different from the normal memory allocator, object pool allocates
// only one object at a time.
//
// Internall, we use the following variables to maintain blocks and heaps:
// X: size in byte of a item slot
// M: number of items per block
// F: emptiness threshold
// B: number of bins per local heap (bin[B-1] is the full list)
// W: number of items per bin
// K: shrinkness constant
//
// Example scenario 1:
// M = 30
// F = 4
// W = (30+4-1)/4 = 8
//
// b0: 0, 1, 2, 3, 4, 5, 6, 7
// b1: 8, 9, 10, 11, 12, 13, 14, 15
// b2: 16, 17, 18, 19, 20, 21, 22, 23
// b3: 24, 25, 26, 27, 28, 29
// b4: 30 (anything equal to M)
//
// Example scenario 2:
// M = 32
// F = 4
// W = (32+4-1)/4 = 8
// b0: 0, 1, 2, 3, 4, 5, 6, 7
// b1: 8, 9, 10, 11, 12, 13, 14, 15
// b2: 16, 17, 18, 19, 20, 21, 22, 23
// b3: 24, 25, 26, 27, 28, 29, 30, 31
// b4: 32 (anything equal to M)
//
template <typename T, size_t S = 65536>
class ObjectPool {
// the data column must be sufficient to hold the pointer in freelist
constexpr static size_t X = (std::max)(sizeof(T*), sizeof(T));
//constexpr static size_t X = sizeof(long double) + std::max(sizeof(T*), sizeof(T));
//constexpr static size_t M = (S - offsetof(Block, data)) / X;
constexpr static size_t M = S / X;
constexpr static size_t F = 4;
constexpr static size_t B = F + 1;
constexpr static size_t W = (M + F - 1) / F;
constexpr static size_t K = 4;
static_assert(
S && (!(S & (S-1))), "block size S must be a power of two"
);
static_assert(
M >= 128, "block size S must be larger enough to pool at least 128 objects"
);
struct Blocklist {
Blocklist* prev;
Blocklist* next;
};
struct GlobalHeap {
std::mutex mutex;
Blocklist list;
};
struct LocalHeap {
std::mutex mutex;
Blocklist lists[B];
size_t u {0};
size_t a {0};
};
struct Block {
std::atomic<LocalHeap*> heap;
Blocklist list_node;
size_t i;
size_t u;
T* top;
// long double padding;
char data[S];
};
public:
/**
@brief constructs an object pool from a number of anticipated threads
*/
explicit ObjectPool(unsigned = std::thread::hardware_concurrency());
/**
@brief destructs the object pool
*/
~ObjectPool();
/**
@brief acquires a pointer to a object constructed from a given argument list
*/
template <typename... ArgsT>
T* animate(ArgsT&&... args);
/**
@brief recycles a object pointed by @c ptr and destroys it
*/
void recycle(T* ptr);
size_t num_bins_per_local_heap() const;
size_t num_objects_per_bin() const;
size_t num_objects_per_block() const;
size_t num_available_objects() const;
size_t num_allocated_objects() const;
size_t capacity() const;
size_t num_local_heaps() const;
size_t num_global_heaps() const;
size_t num_heaps() const;
float emptiness_threshold() const;
private:
const size_t _lheap_mask;
GlobalHeap _gheap;
std::vector<LocalHeap> _lheaps;
LocalHeap& _this_heap();
constexpr unsigned _next_pow2(unsigned n) const;
template <class P, class Q>
constexpr size_t _offset_in_class(const Q P::*member) const;
template <class P, class Q>
constexpr P* _parent_class_of(Q*, const Q P::*member);
template <class P, class Q>
constexpr P* _parent_class_of(const Q*, const Q P::*member) const;
constexpr Block* _block_of(Blocklist*);
constexpr Block* _block_of(const Blocklist*) const;
size_t _bin(size_t) const;
T* _allocate(Block*);
void _deallocate(Block*, T*);
void _blocklist_init_head(Blocklist*);
void _blocklist_add_impl(Blocklist*, Blocklist*, Blocklist*);
void _blocklist_push_front(Blocklist*, Blocklist*);
void _blocklist_push_back(Blocklist*, Blocklist*);
void _blocklist_del_impl(Blocklist*, Blocklist*);
void _blocklist_del(Blocklist*);
void _blocklist_replace(Blocklist*, Blocklist*);
void _blocklist_move_front(Blocklist*, Blocklist*);
void _blocklist_move_back(Blocklist*, Blocklist*);
bool _blocklist_is_first(const Blocklist*, const Blocklist*);
bool _blocklist_is_last(const Blocklist*, const Blocklist*);
bool _blocklist_is_empty(const Blocklist*);
bool _blocklist_is_singular(const Blocklist*);
template <typename C>
void _for_each_block_safe(Blocklist*, C&&);
template <typename C>
void _for_each_block(Blocklist*, C&&);
};
// ----------------------------------------------------------------------------
// ObjectPool definition
// ----------------------------------------------------------------------------
// Constructor
template <typename T, size_t S>
ObjectPool<T, S>::ObjectPool(unsigned t) :
//_heap_mask {(_next_pow2(t) << 1) - 1u},
//_heap_mask { _next_pow2(t<<1) - 1u },
//_heap_mask {(t << 1) - 1},
_lheap_mask { _next_pow2((t+1) << 1) - 1 },
_lheaps { _lheap_mask + 1 } {
_blocklist_init_head(&_gheap.list);
for(auto& h : _lheaps) {
for(size_t i=0; i<B; ++i) {
_blocklist_init_head(&h.lists[i]);
}
}
}
// Destructor
template <typename T, size_t S>
ObjectPool<T, S>::~ObjectPool() {
// clear local heaps
for(auto& h : _lheaps) {
for(size_t i=0; i<B; ++i) {
_for_each_block_safe(&h.lists[i], [] (Block* b) {
//std::free(b);
delete b;
});
}
}
// clear global heap
_for_each_block_safe(&_gheap.list, [] (Block* b) {
//std::free(b);
delete b;
});
}
// Function: num_bins_per_local_heap
template <typename T, size_t S>
size_t ObjectPool<T, S>::num_bins_per_local_heap() const {
return B;
}
// Function: num_objects_per_bin
template <typename T, size_t S>
size_t ObjectPool<T, S>::num_objects_per_bin() const {
return W;
}
// Function: num_objects_per_block
template <typename T, size_t S>
size_t ObjectPool<T, S>::num_objects_per_block() const {
return M;
}
// Function: emptiness_threshold
template <typename T, size_t S>
float ObjectPool<T, S>::emptiness_threshold() const {
return 1.0f/F;
}
// Function: num_global_heaps
template <typename T, size_t S>
size_t ObjectPool<T, S>::num_global_heaps() const {
return 1;
}
// Function: num_lheaps
template <typename T, size_t S>
size_t ObjectPool<T, S>::num_local_heaps() const {
return _lheaps.size();
}
// Function: num_heaps
template <typename T, size_t S>
size_t ObjectPool<T, S>::num_heaps() const {
return _lheaps.size() + 1;
}
// Function: capacity
template <typename T, size_t S>
size_t ObjectPool<T, S>::capacity() const {
size_t n = 0;
// global heap
for(auto p=_gheap.list.next; p!=&_gheap.list; p=p->next) {
n += M;
};
// local heap
for(auto& h : _lheaps) {
n += h.a;
}
return n;
}
// Function: num_available_objects
template <typename T, size_t S>
size_t ObjectPool<T, S>::num_available_objects() const {
size_t n = 0;
// global heap
for(auto p=_gheap.list.next; p!=&_gheap.list; p=p->next) {
n += (M - _block_of(p)->u);
};
// local heap
for(auto& h : _lheaps) {
n += (h.a - h.u);
}
return n;
}
// Function: num_allocated_objects
template <typename T, size_t S>
size_t ObjectPool<T, S>::num_allocated_objects() const {
size_t n = 0;
// global heap
for(auto p=_gheap.list.next; p!=&_gheap.list; p=p->next) {
n += _block_of(p)->u;
};
// local heap
for(auto& h : _lheaps) {
n += h.u;
}
return n;
}
// Function: _bin
template <typename T, size_t S>
size_t ObjectPool<T, S>::_bin(size_t u) const {
return u == M ? F : u/W;
}
// Function: _offset_in_class
template <typename T, size_t S>
template <class P, class Q>
constexpr size_t ObjectPool<T, S>::_offset_in_class(
const Q P::*member) const {
return (size_t) &( reinterpret_cast<P*>(0)->*member);
}
// C macro: parent_class_of(list_pointer, Block, list)
// C++: parent_class_of(list_pointer, &Block::list)
template <typename T, size_t S>
template <class P, class Q>
constexpr P* ObjectPool<T, S>::_parent_class_of(
Q* ptr, const Q P::*member
) {
return (P*)( (char*)ptr - _offset_in_class(member));
}
// Function: _parent_class_of
template <typename T, size_t S>
template <class P, class Q>
constexpr P* ObjectPool<T, S>::_parent_class_of(
const Q* ptr, const Q P::*member
) const {
return (P*)( (char*)ptr - _offset_in_class(member));
}
// Function: _block_of
template <typename T, size_t S>
constexpr typename ObjectPool<T, S>::Block*
ObjectPool<T, S>::_block_of(Blocklist* list) {
return _parent_class_of(list, &Block::list_node);
}
// Function: _block_of
template <typename T, size_t S>
constexpr typename ObjectPool<T, S>::Block*
ObjectPool<T, S>::_block_of(const Blocklist* list) const {
return _parent_class_of(list, &Block::list_node);
}
// Procedure: initialize a list head
template <typename T, size_t S>
void ObjectPool<T, S>::_blocklist_init_head(Blocklist *list) {
list->next = list;
list->prev = list;
}
// Procedure: _blocklist_add_impl
// Insert a new entry between two known consecutive entries.
//
// This is only for internal list manipulation where we know
// the prev/next entries already!
template <typename T, size_t S>
void ObjectPool<T, S>::_blocklist_add_impl(
Blocklist *curr, Blocklist *prev, Blocklist *next
) {
next->prev = curr;
curr->next = next;
curr->prev = prev;
prev->next = curr;
}
// list_push_front - add a new entry
// @curr: curr entry to be added
// @head: list head to add it after
//
// Insert a new entry after the specified head.
// This is good for implementing stacks.
//
template <typename T, size_t S>
void ObjectPool<T, S>::_blocklist_push_front(
Blocklist *curr, Blocklist *head
) {
_blocklist_add_impl(curr, head, head->next);
}
// list_add_tail - add a new entry
// @curr: curr entry to be added
// @head: list head to add it before
//
// Insert a new entry before the specified head.
// This is useful for implementing queues.
//
template <typename T, size_t S>
void ObjectPool<T, S>::_blocklist_push_back(
Blocklist *curr, Blocklist *head
) {
_blocklist_add_impl(curr, head->prev, head);
}
// Delete a list entry by making the prev/next entries
// point to each other.
//
// This is only for internal list manipulation where we know
// the prev/next entries already!
//
template <typename T, size_t S>
void ObjectPool<T, S>::_blocklist_del_impl(
Blocklist * prev, Blocklist * next
) {
next->prev = prev;
prev->next = next;
}
// _blocklist_del - deletes entry from list.
// @entry: the element to delete from the list.
// Note: list_empty() on entry does not return true after this, the entry is
// in an undefined state.
template <typename T, size_t S>
void ObjectPool<T, S>::_blocklist_del(Blocklist *entry) {
_blocklist_del_impl(entry->prev, entry->next);
entry->next = nullptr;
entry->prev = nullptr;
}
// list_replace - replace old entry by new one
// @old : the element to be replaced
// @curr : the new element to insert
//
// If @old was empty, it will be overwritten.
template <typename T, size_t S>
void ObjectPool<T, S>::_blocklist_replace(
Blocklist *old, Blocklist *curr
) {
curr->next = old->next;
curr->next->prev = curr;
curr->prev = old->prev;
curr->prev->next = curr;
}
// list_move - delete from one list and add as another's head
// @list: the entry to move
// @head: the head that will precede our entry
template <typename T, size_t S>
void ObjectPool<T, S>::_blocklist_move_front(
Blocklist *list, Blocklist *head
) {
_blocklist_del_impl(list->prev, list->next);
_blocklist_push_front(list, head);
}
// list_move_tail - delete from one list and add as another's tail
// @list: the entry to move
// @head: the head that will follow our entry
template <typename T, size_t S>
void ObjectPool<T, S>::_blocklist_move_back(
Blocklist *list, Blocklist *head
) {
_blocklist_del_impl(list->prev, list->next);
_blocklist_push_back(list, head);
}
// list_is_first - tests whether @list is the last entry in list @head
// @list: the entry to test
// @head: the head of the list
template <typename T, size_t S>
bool ObjectPool<T, S>::_blocklist_is_first(
const Blocklist *list, const Blocklist *head
) {
return list->prev == head;
}
// list_is_last - tests whether @list is the last entry in list @head
// @list: the entry to test
// @head: the head of the list
template <typename T, size_t S>
bool ObjectPool<T, S>::_blocklist_is_last(
const Blocklist *list, const Blocklist *head
) {
return list->next == head;
}
// list_empty - tests whether a list is empty
// @head: the list to test.
template <typename T, size_t S>
bool ObjectPool<T, S>::_blocklist_is_empty(const Blocklist *head) {
return head->next == head;
}
// list_is_singular - tests whether a list has just one entry.
// @head: the list to test.
template <typename T, size_t S>
bool ObjectPool<T, S>::_blocklist_is_singular(
const Blocklist *head
) {
return !_blocklist_is_empty(head) && (head->next == head->prev);
}
// Procedure: _for_each_block
template <typename T, size_t S>
template <typename C>
void ObjectPool<T, S>::_for_each_block(Blocklist* head, C&& c) {
Blocklist* p;
for(p=head->next; p!=head; p=p->next) {
c(_block_of(p));
}
}
// Procedure: _for_each_block_safe
// Iterate each item of a list - safe to free
template <typename T, size_t S>
template <typename C>
void ObjectPool<T, S>::_for_each_block_safe(Blocklist* head, C&& c) {
Blocklist* p;
Blocklist* t;
for(p=head->next, t=p->next; p!=head; p=t, t=p->next) {
c(_block_of(p));
}
}
// Function: _allocate
// allocate a spot from the block
template <typename T, size_t S>
T* ObjectPool<T, S>::_allocate(Block* s) {
if(s->top == nullptr) {
return reinterpret_cast<T*>(s->data + s->i++ * X);
}
else {
T* retval = s->top;
s->top = *(reinterpret_cast<T**>(s->top));
return retval;
}
}
// Procedure: _deallocate
template <typename T, size_t S>
void ObjectPool<T, S>::_deallocate(Block* s, T* ptr) {
*(reinterpret_cast<T**>(ptr)) = s->top;
s->top = ptr;
}
// Function: allocate
template <typename T, size_t S>
template <typename... ArgsT>
T* ObjectPool<T, S>::animate(ArgsT&&... args) {
//std::cout << "construct a new item\n";
// my logically mapped heap
LocalHeap& h = _this_heap();
Block* s {nullptr};
h.mutex.lock();
// scan the list of superblocks from the most full to the least full
int f = static_cast<int>(F-1);
for(; f>=0; f--) {
if(!_blocklist_is_empty(&h.lists[f])) {
s = _block_of(h.lists[f].next);
break;
}
}
// no superblock found
if(f == -1) {
// check heap 0 for a superblock
_gheap.mutex.lock();
if(!_blocklist_is_empty(&_gheap.list)) {
s = _block_of(_gheap.list.next);
//printf("get a superblock from global heap %lu\n", s->u);
assert(s->u < M && s->heap == nullptr);
f = static_cast<int>(_bin(s->u + 1));
_blocklist_move_front(&s->list_node, &h.lists[f]);
s->heap = &h; // must be within the global heap lock
_gheap.mutex.unlock();
h.u = h.u + s->u;
h.a = h.a + M;
}
// create a new block
else {
//printf("create a new superblock\n");
_gheap.mutex.unlock();
f = 0;
//s = static_cast<Block*>(std::malloc(sizeof(Block)));
s = new Block();
if(s == nullptr) {
throw std::bad_alloc();
}
s->heap = &h;
s->i = 0;
s->u = 0;
s->top = nullptr;
_blocklist_push_front(&s->list_node, &h.lists[f]);
h.a = h.a + M;
}
}
// the superblock must have at least one space
//assert(s->u < M);
//printf("%lu %lu %lu\n", h.u, h.a, s->u);
//assert(h.u < h.a);
h.u = h.u + 1;
s->u = s->u + 1;
// take one item from the superblock
T* mem = _allocate(s);
int b = static_cast<int>(_bin(s->u));
if(b != f) {
//printf("move superblock from list[%d] to list[%d]\n", f, b);
_blocklist_move_front(&s->list_node, &h.lists[b]);
}
//std::cout << "s.i " << s->i << '\n'
// << "s.u " << s->u << '\n'
// << "h.u " << h.u << '\n'
// << "h.a " << h.a << '\n';
h.mutex.unlock();
//printf("allocate %p (s=%p)\n", mem, s);
new (mem) T(std::forward<ArgsT>(args)...);
mem->_object_pool_block = s;
return mem;
}
// Function: destruct
template <typename T, size_t S>
void ObjectPool<T, S>::recycle(T* mem) {
//Block* s = *reinterpret_cast<Block**>(
// reinterpret_cast<char*>(mem) - sizeof(Block**)
//);
//Block* s= *(reinterpret_cast<Block**>(mem) - O); // (mem) - 1
Block* s = static_cast<Block*>(mem->_object_pool_block);
mem->~T();
//printf("deallocate %p (s=%p) M=%lu W=%lu X=%lu\n", mem, s, M, W, X);
// here we need a loop because when we lock the heap,
// other threads may have removed the superblock to another heap
bool sync = false;
do {
LocalHeap* h = s->heap.load(std::memory_order_relaxed);
// the block is in global heap
if(h == nullptr) {
std::lock_guard<std::mutex> glock(_gheap.mutex);
if(s->heap == h) {
sync = true;
_deallocate(s, mem);
s->u = s->u - 1;
}
}
else {
std::lock_guard<std::mutex> llock(h->mutex);
if(s->heap == h) {
sync = true;
// deallocate the item from the superblock
size_t f = _bin(s->u);
_deallocate(s, mem);
s->u = s->u - 1;
h->u = h->u - 1;
size_t b = _bin(s->u);
if(b != f) {
//printf("move superblock from list[%d] to list[%d]\n", f, b);
_blocklist_move_front(&s->list_node, &h->lists[b]);
}
// transfer a mostly-empty superblock to global heap
if((h->u + K*M < h->a) && (h->u < ((F-1) * h->a / F))) {
for(size_t i=0; i<F; i++) {
if(!_blocklist_is_empty(&h->lists[i])) {
Block* x = _block_of(h->lists[i].next);
//printf("transfer a block (x.u=%lu/x.i=%lu) to the global heap\n", x->u, x->i);
assert(h->u > x->u && h->a > M);
h->u = h->u - x->u;
h->a = h->a - M;
x->heap = nullptr;
std::lock_guard<std::mutex> glock(_gheap.mutex);
_blocklist_move_front(&x->list_node, &_gheap.list);
break;
}
}
}
}
}
} while(!sync);
//std::cout << "s.i " << s->i << '\n'
// << "s.u " << s->u << '\n';
}
// Function: _this_heap
template <typename T, size_t S>
typename ObjectPool<T, S>::LocalHeap&
ObjectPool<T, S>::_this_heap() {
// here we don't use thread local since object pool might be
// created and destroyed multiple times
//thread_local auto hv = std::hash<std::thread::id>()(std::this_thread::get_id());
//return _lheaps[hv & _lheap_mask];
return _lheaps[
std::hash<std::thread::id>()(std::this_thread::get_id()) & _lheap_mask
];
}
// Function: _next_pow2
template <typename T, size_t S>
constexpr unsigned ObjectPool<T, S>::_next_pow2(unsigned n) const {
if(n == 0) return 1;
n--;
n |= n >> 1;
n |= n >> 2;
n |= n >> 4;
n |= n >> 8;
n |= n >> 16;
n++;
return n;
}
} // end namespace tf --------------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/taskflow/utility/macros.hpp | #pragma once
#if defined(_MSC_VER)
#define TF_FORCE_INLINE __forceinline
#elif defined(__GNUC__) && __GNUC__ > 3
#define TF_FORCE_INLINE __attribute__((__always_inline__)) inline
#else
#define TF_FORCE_INLINE inline
#endif
#if defined(_MSC_VER)
#define TF_NO_INLINE __declspec(noinline)
#elif defined(__GNUC__) && __GNUC__ > 3
#define TF_NO_INLINE __attribute__((__noinline__))
#else
#define TF_NO_INLINE
#endif
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/taskflow/utility/stream.hpp | #pragma once
#include <iostream>
#include <string>
namespace tf {
// Procedure: ostreamize
template <typename T>
void ostreamize(std::ostream& os, T&& token) {
os << std::forward<T>(token);
}
// Procedure: ostreamize
template <typename T, typename... Rest>
void ostreamize(std::ostream& os, T&& token, Rest&&... rest) {
os << std::forward<T>(token);
ostreamize(os, std::forward<Rest>(rest)...);
}
// Function: stringify
template <typename... ArgsT>
std::string stringify(ArgsT&&... args) {
std::ostringstream oss;
ostreamize(oss, std::forward<ArgsT>(args)...);
return oss.str();
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/taskflow/utility/uuid.hpp | #pragma once
#include <iostream>
#include <string>
#include <cstring>
#include <limits>
#include <random>
#include <chrono>
namespace tf {
// Class: UUID
//
// A universally unique identifier (UUID) is an identifier standard used in software
// construction. A UUID is simply a 128-bit value. The meaning of each bit is defined
// by any of several variants.
// For human-readable display, many systems use a canonical format using hexadecimal
// text with inserted hyphen characters.
//
// For example: 123e4567-e89b-12d3-a456-426655440000
//
// The intent of UUIDs is to enable distributed systems to uniquely identify information
// without significant central coordination.
//
// Copyright 2006 Andy Tompkins.
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
struct UUID {
using value_type = uint8_t;
using reference = uint8_t&;
using const_reference = const uint8_t&;
using iterator = uint8_t*;
using const_iterator = const uint8_t*;
using size_type = size_t;
using difference_type = ptrdiff_t;
inline UUID();
UUID(const UUID&) = default;
UUID(UUID&&) = default;
UUID& operator = (const UUID&) = default;
UUID& operator = (UUID&&) = default;
inline static size_type size();
inline iterator begin();
inline const_iterator begin() const;
inline iterator end();
inline const_iterator end() const;
inline bool is_nil() const;
inline void swap(UUID& rhs);
inline size_t hash_value() const;
inline bool operator == (const UUID&) const;
inline bool operator < (const UUID&) const;
inline bool operator > (const UUID&) const;
inline bool operator != (const UUID&) const;
inline bool operator >= (const UUID&) const;
inline bool operator <= (const UUID&) const;
uint8_t data[16] {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
inline std::string to_string() const;
};
// Constructor
inline UUID::UUID() {
static thread_local std::default_random_engine engine {
std::random_device{}()
};
std::uniform_int_distribution<unsigned long> distribution(
(std::numeric_limits<unsigned long>::min)(),
(std::numeric_limits<unsigned long>::max)()
);
int i = 0;
auto random_value = distribution(engine);
for (auto it=begin(); it!=end(); ++it, ++i) {
if (i == sizeof(unsigned long)) {
random_value = distribution(engine);
i = 0;
}
*it = static_cast<UUID::value_type>((random_value >> (i*8)) & 0xFF);
}
// set variant: must be 0b10xxxxxx
*(begin()+8) &= 0xBF;
*(begin()+8) |= 0x80;
// set version: must be 0b0100xxxx
*(begin()+6) &= 0x4F; //0b01001111
*(begin()+6) |= 0x40; //0b01000000
}
// Function: size
inline typename UUID::size_type UUID::size() {
return 16;
}
// Function: begin
inline typename UUID::iterator UUID::begin() {
return data;
}
// Function: begin
inline typename UUID::const_iterator UUID::begin() const {
return data;
}
// Function: end
inline typename UUID::iterator UUID::end() {
return data+size();
}
// Function: end
inline typename UUID::const_iterator UUID::end() const {
return data+size();
}
// Function: is_nil
inline bool UUID::is_nil() const {
for (std::size_t i = 0; i < sizeof(this->data); ++i) {
if (this->data[i] != 0U) {
return false;
}
}
return true;
}
// Procedure: swap
inline void UUID::swap(UUID& rhs) {
UUID tmp = *this;
*this = rhs;
rhs = tmp;
}
// Function: hash_value
inline size_t UUID::hash_value() const {
size_t seed = 0;
for(auto i=begin(); i != end(); ++i) {
seed ^= static_cast<size_t>(*i) + 0x9e3779b9 + (seed << 6) + (seed >> 2);
}
return seed;
}
// Operator: ==
inline bool UUID::operator == (const UUID& rhs) const {
return std::memcmp(data, rhs.data, sizeof(data)) == 0;
}
// Operator: !=
inline bool UUID::operator != (const UUID& rhs) const {
return std::memcmp(data, rhs.data, sizeof(data)) != 0;
}
// Operator: <
inline bool UUID::operator < (const UUID& rhs) const {
return std::memcmp(data, rhs.data, sizeof(data)) < 0;
}
// Operator: >
inline bool UUID::operator > (const UUID& rhs) const {
return std::memcmp(data, rhs.data, sizeof(data)) > 0;
}
// Operator: <=
inline bool UUID::operator <= (const UUID& rhs) const {
return std::memcmp(data, rhs.data, sizeof(data)) <= 0;
}
// Operator: >=
inline bool UUID::operator >= (const UUID& rhs) const {
return std::memcmp(data, rhs.data, sizeof(data)) >= 0;
}
// Function: to_string
inline std::string UUID::to_string() const {
auto to_char = [](size_t i) {
if (i <= 9) return static_cast<char>('0' + i);
return static_cast<char>('a' + (i-10));
};
std::string result;
result.reserve(36);
std::size_t i=0;
for (auto it = begin(); it!=end(); ++it, ++i) {
const size_t hi = ((*it) >> 4) & 0x0F;
result += to_char(hi);
const size_t lo = (*it) & 0x0F;
result += to_char(lo);
if (i == 3 || i == 5 || i == 7 || i == 9) {
result += '-';
}
}
return result;
}
// Procedure: swap
inline void swap(UUID& lhs, UUID& rhs) {
lhs.swap(rhs);
}
// ostream
inline std::ostream& operator << (std::ostream& os, const UUID& rhs) {
os << rhs.to_string();
return os;
}
} // End of namespace tf. ----------------------------------------------------
//-----------------------------------------------------------------------------
namespace std {
// Partial specialization: hash<tf::UUID>
template <>
struct hash<tf::UUID> {
size_t operator()(const tf::UUID& rhs) const { return rhs.hash_value(); }
};
} // End of namespace std. ---------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/taskflow/utility/math.hpp | #pragma once
#include <atomic>
namespace tf {
// rounds the given 64-bit unsigned integer to the nearest power of 2
template <typename T, std::enable_if_t<
(std::is_unsigned_v<std::decay_t<T>> && sizeof(T) == 8) , void
>* = nullptr>
constexpr T next_pow2(T x) {
if(x == 0) return 1;
x--;
x |= x>>1;
x |= x>>2;
x |= x>>4;
x |= x>>8;
x |= x>>16;
x |= x>>32;
x++;
return x;
}
// rounds the given 32-bit unsigned integer to the nearest power of 2
template <typename T, std::enable_if_t<
(std::is_unsigned_v<std::decay_t<T>> && sizeof(T) == 4), void
>* = nullptr>
constexpr T next_pow2(T x) {
if(x == 0) return 1;
x--;
x |= x>>1;
x |= x>>2;
x |= x>>4;
x |= x>>8;
x |= x>>16;
x++;
return x;
}
// checks if the given number if a power of 2
template <typename T, std::enable_if_t<
std::is_integral_v<std::decay_t<T>>, void>* = nullptr
>
constexpr bool is_pow2(const T& x) {
return x && (!(x&(x-1)));
}
//// finds the ceil of x divided by b
//template <typename T, std::enable_if_t<
// std::is_integral_v<std::decay_t<T>>, void>* = nullptr
//>
//constexpr T ceil(const T& x, const T& y) {
// //return (x + y - 1) / y;
// return (x-1) / y + 1;
//}
/**
@brief returns floor(log2(n)), assumes n > 0
*/
template<typename T>
constexpr int log2(T n) {
int log = 0;
while (n >>= 1) {
++log;
}
return log;
}
/**
@brief finds the median of three numbers of dereferenced iterators using
the given comparator
*/
template <typename RandItr, typename C>
RandItr median_of_three(RandItr l, RandItr m, RandItr r, C cmp) {
return cmp(*l, *m) ? (cmp(*m, *r) ? m : (cmp(*l, *r) ? r : l ))
: (cmp(*r, *m) ? m : (cmp(*r, *l) ? r : l ));
}
/**
@brief finds the pseudo median of a range of items using spreaded
nine numbers
*/
template <typename RandItr, typename C>
RandItr pseudo_median_of_nine(RandItr beg, RandItr end, C cmp) {
size_t N = std::distance(beg, end);
size_t offset = N >> 3;
return median_of_three(
median_of_three(beg, beg+offset, beg+(offset*2), cmp),
median_of_three(beg+(offset*3), beg+(offset*4), beg+(offset*5), cmp),
median_of_three(beg+(offset*6), beg+(offset*7), end-1, cmp),
cmp
);
}
/**
@brief sorts two elements of dereferenced iterators using the given
comparison function
*/
template<typename Iter, typename Compare>
void sort2(Iter a, Iter b, Compare comp) {
if (comp(*b, *a)) std::iter_swap(a, b);
}
/**
@brief sorts three elements of dereferenced iterators using the given
comparison function
*/
template<typename Iter, typename Compare>
void sort3(Iter a, Iter b, Iter c, Compare comp) {
sort2(a, b, comp);
sort2(b, c, comp);
sort2(a, b, comp);
}
/**
@brief generates a program-wise unique id of the give type (thread-safe)
*/
template <typename T, std::enable_if_t<std::is_integral_v<T>, void>* = nullptr>
T unique_id() {
static std::atomic<T> counter{0};
return counter.fetch_add(1, std::memory_order_relaxed);
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/taskflow/utility/iterator.hpp | #pragma once
#include <cstddef>
#include <type_traits>
namespace tf {
template <typename T>
constexpr std::enable_if_t<std::is_integral<std::decay_t<T>>::value, size_t>
distance(T beg, T end, T step) {
return (end - beg + step + (step > 0 ? -1 : 1)) / step;
}
template <typename T>
constexpr std::enable_if_t<std::is_integral<std::decay_t<T>>::value, bool>
is_range_invalid(T beg, T end, T step) {
return ((step == 0 && beg != end) ||
(beg < end && step <= 0) ||
(beg > end && step >= 0));
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/taskflow/utility/serializer.hpp | #pragma once
#include <type_traits>
#include <iterator>
#include <iostream>
#include <fstream>
#include <stack>
#include <queue>
#include <vector>
#include <algorithm>
#include <memory>
#include <functional>
#include <map>
#include <set>
#include <unordered_map>
#include <unordered_set>
#include <sstream>
#include <list>
#include <forward_list>
#include <numeric>
#include <iomanip>
#include <cassert>
#include <cmath>
#include <array>
#include <string>
#include <variant>
#include <optional>
namespace tf {
// ----------------------------------------------------------------------------
// Supported C++ STL type
// ----------------------------------------------------------------------------
// std::basic_string
template <typename T>
struct is_std_basic_string : std::false_type {};
template <typename... ArgsT>
struct is_std_basic_string <std::basic_string<ArgsT...>> : std::true_type {};
template <typename T>
constexpr bool is_std_basic_string_v = is_std_basic_string<T>::value;
// std::array
template <typename T>
struct is_std_array : std::false_type {};
template <typename T, size_t N>
struct is_std_array <std::array<T, N>> : std::true_type {};
template <typename T>
constexpr bool is_std_array_v = is_std_array<T>::value;
// std::vector
template <typename T>
struct is_std_vector : std::false_type {};
template <typename... ArgsT>
struct is_std_vector <std::vector<ArgsT...>> : std::true_type {};
template <typename T>
constexpr bool is_std_vector_v = is_std_vector<T>::value;
// std::deque
template <typename T>
struct is_std_deque : std::false_type {};
template <typename... ArgsT>
struct is_std_deque <std::deque<ArgsT...>> : std::true_type {};
template <typename T>
constexpr bool is_std_deque_v = is_std_deque<T>::value;
// std::list
template <typename T>
struct is_std_list : std::false_type {};
template <typename... ArgsT>
struct is_std_list <std::list<ArgsT...>> : std::true_type {};
template <typename T>
constexpr bool is_std_list_v = is_std_list<T>::value;
// std::forward_list
template <typename T>
struct is_std_forward_list : std::false_type {};
template <typename... ArgsT>
struct is_std_forward_list <std::forward_list<ArgsT...>> : std::true_type {};
template <typename T>
constexpr bool is_std_forward_list_v = is_std_forward_list<T>::value;
// std::map
template <typename T>
struct is_std_map : std::false_type {};
template <typename... ArgsT>
struct is_std_map <std::map<ArgsT...>> : std::true_type {};
template <typename T>
constexpr bool is_std_map_v = is_std_map<T>::value;
// std::unordered_map
template <typename T>
struct is_std_unordered_map : std::false_type {};
template <typename... ArgsT>
struct is_std_unordered_map <std::unordered_map<ArgsT...>> : std::true_type {};
template <typename T>
constexpr bool is_std_unordered_map_v = is_std_unordered_map<T>::value;
// std::set
template <typename T>
struct is_std_set : std::false_type {};
template <typename... ArgsT>
struct is_std_set <std::set<ArgsT...>> : std::true_type {};
template <typename T>
constexpr bool is_std_set_v = is_std_set<T>::value;
// std::unordered_set
template <typename T>
struct is_std_unordered_set : std::false_type {};
template <typename... ArgsT>
struct is_std_unordered_set <std::unordered_set<ArgsT...>> : std::true_type {};
template <typename T>
constexpr bool is_std_unordered_set_v = is_std_unordered_set<T>::value;
// std::variant
template <typename T>
struct is_std_variant : std::false_type {};
template <typename... ArgsT>
struct is_std_variant <std::variant<ArgsT...>> : std::true_type {};
template <typename T>
constexpr bool is_std_variant_v = is_std_variant<T>::value;
// std::optional
template <typename T>
struct is_std_optional : std::false_type {};
template <typename... ArgsT>
struct is_std_optional <std::optional<ArgsT...>> : std::true_type {};
template <typename T>
constexpr bool is_std_optional_v = is_std_optional<T>::value;
// std::unique_ptr
template <typename T>
struct is_std_unique_ptr : std::false_type {};
template <typename... ArgsT>
struct is_std_unique_ptr <std::unique_ptr<ArgsT...>> : std::true_type {};
template <typename T>
constexpr bool is_std_unique_ptr_v = is_std_unique_ptr<T>::value;
// std::shared_ptr
template <typename T>
struct is_std_shared_ptr : std::false_type {};
template <typename... ArgsT>
struct is_std_shared_ptr <std::shared_ptr<ArgsT...>> : std::true_type {};
template <typename T>
constexpr bool is_std_shared_ptr_v = is_std_shared_ptr<T>::value;
// std::duration
template <typename T> struct is_std_duration : std::false_type {};
template <typename... ArgsT>
struct is_std_duration<std::chrono::duration<ArgsT...>> : std::true_type {};
template <typename T>
constexpr bool is_std_duration_v = is_std_duration<T>::value;
// std::time_point
template <typename T>
struct is_std_time_point : std::false_type {};
template <typename... ArgsT>
struct is_std_time_point<std::chrono::time_point<ArgsT...>> : std::true_type {};
template <typename T>
constexpr bool is_std_time_point_v = is_std_time_point<T>::value;
// std::tuple
template <typename T>
struct is_std_tuple : std::false_type {};
template <typename... ArgsT>
struct is_std_tuple<std::tuple<ArgsT...>> : std::true_type {};
template <typename T>
constexpr bool is_std_tuple_v = is_std_tuple<T>::value;
//-----------------------------------------------------------------------------
// Type extraction.
//-----------------------------------------------------------------------------
// ExtractType: forward declaration
template <size_t, typename>
struct ExtractType;
// ExtractType_t: alias interface
template <size_t idx, typename C>
using ExtractType_t = typename ExtractType<idx, C>::type;
// ExtractType: base
template <template <typename...> typename C, typename T, typename... RestT>
struct ExtractType <0, C<T, RestT...>> {
using type = T;
};
// ExtractType: base
template <typename T>
struct ExtractType <0, T> {
using type = T;
};
// ExtractType: recursive definition.
template <size_t idx, template <typename...> typename C, typename T, typename... RestT>
struct ExtractType <idx, C<T, RestT...>> : ExtractType<idx-1, C<RestT...>> {
};
// ----------------------------------------------------------------------------
// Size Wrapper
// ----------------------------------------------------------------------------
// Struct: SizeTag
// Class that wraps a given size item which can be customized.
template <typename T>
class SizeTag {
public:
using type = std::conditional_t<std::is_lvalue_reference_v<T>, T, std::decay_t<T>>;
SizeTag(T&& item) : _item(std::forward<T>(item)) {}
SizeTag& operator = (const SizeTag&) = delete;
inline const T& get() const {return _item;}
template <typename ArchiverT>
auto save(ArchiverT & ar) const { return ar(_item); }
template <typename ArchiverT>
auto load(ArchiverT & ar) { return ar(_item); }
private:
type _item;
};
// Function: make_size_tag
template <typename T>
SizeTag<T> make_size_tag(T&& t) {
return { std::forward<T>(t) };
}
// ----------------------------------------------------------------------------
// Size Wrapper
// ----------------------------------------------------------------------------
// Class: MapItem
template <typename KeyT, typename ValueT>
class MapItem {
public:
using KeyType = std::conditional_t <std::is_lvalue_reference_v<KeyT>, KeyT, std::decay_t<KeyT>>;
using ValueType = std::conditional_t <std::is_lvalue_reference_v<ValueT>, ValueT, std::decay_t<ValueT>>;
MapItem(KeyT&& k, ValueT&& v) : _key(std::forward<KeyT>(k)), _value(std::forward<ValueT>(v)) {}
MapItem& operator = (const MapItem&) = delete;
inline const KeyT& key() const { return _key; }
inline const ValueT& value() const { return _value; }
template <typename ArchiverT>
auto save(ArchiverT & ar) const { return ar(_key, _value); }
template <typename ArchiverT>
auto load(ArchiverT & ar) { return ar(_key, _value); }
private:
KeyType _key;
ValueType _value;
};
// Function: make_kv_pair
template <typename KeyT, typename ValueT>
MapItem<KeyT, ValueT> make_kv_pair(KeyT&& k, ValueT&& v) {
return { std::forward<KeyT>(k), std::forward<ValueT>(v) };
}
// ----------------------------------------------------------------------------
// Serializer Definition
// ----------------------------------------------------------------------------
template <typename T>
constexpr auto is_default_serializable_v = (
std::is_arithmetic_v<T> ||
std::is_enum_v<T> ||
is_std_basic_string_v<T> ||
is_std_vector_v<T> ||
is_std_deque_v<T> ||
is_std_list_v<T> ||
is_std_forward_list_v<T> ||
is_std_map_v<T> ||
is_std_unordered_map_v<T> ||
is_std_set_v<T> ||
is_std_unordered_set_v<T> ||
is_std_duration_v<T> ||
is_std_time_point_v<T> ||
is_std_variant_v<T> ||
is_std_optional_v<T> ||
is_std_tuple_v<T> ||
is_std_array_v<T>
);
// Class: Serializer
template <typename Stream, typename SizeType = std::streamsize>
class Serializer {
public:
Serializer(Stream& stream);
template <typename... T>
SizeType operator()(T&&... items);
private:
Stream& _stream;
template <typename T,
std::enable_if_t<!is_default_serializable_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _save(T&&);
template <typename T,
std::enable_if_t<std::is_arithmetic_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _save(T&&);
template <typename T,
std::enable_if_t<is_std_basic_string_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _save(T&&);
template <typename T,
std::enable_if_t<is_std_vector_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _save(T&&);
template <typename T,
std::enable_if_t<
is_std_deque_v<std::decay_t<T>> ||
is_std_list_v<std::decay_t<T>>,
void
>* = nullptr
>
SizeType _save(T&&);
template <typename T,
std::enable_if_t<
is_std_forward_list_v<std::decay_t<T>>,
void
>* = nullptr
>
SizeType _save(T&&);
template <typename T,
std::enable_if_t<
is_std_map_v<std::decay_t<T>> ||
is_std_unordered_map_v<std::decay_t<T>>,
void
>* = nullptr
>
SizeType _save(T&&);
template <typename T,
std::enable_if_t<
is_std_set_v<std::decay_t<T>> ||
is_std_unordered_set_v<std::decay_t<T>>,
void
>* = nullptr
>
SizeType _save(T&&);
template <typename T,
std::enable_if_t<std::is_enum_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _save(T&&);
template <typename T,
std::enable_if_t<is_std_duration_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _save(T&&);
template <typename T,
std::enable_if_t<is_std_time_point_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _save(T&&);
template <typename T,
std::enable_if_t<is_std_optional_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _save(T&&);
template <typename T,
std::enable_if_t<is_std_variant_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _save(T&&);
template <typename T,
std::enable_if_t<is_std_tuple_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _save(T&&);
template <typename T,
std::enable_if_t<is_std_array_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _save(T&&);
};
// Constructor
template <typename Stream, typename SizeType>
Serializer<Stream, SizeType>::Serializer(Stream& stream) : _stream(stream) {
}
// Operator ()
template <typename Stream, typename SizeType>
template <typename... T>
SizeType Serializer<Stream, SizeType>::operator() (T&&... items) {
return (_save(std::forward<T>(items)) + ...);
}
// arithmetic data type
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<std::is_arithmetic_v<std::decay_t<T>>, void>*
>
SizeType Serializer<Stream, SizeType>::_save(T&& t) {
_stream.write(reinterpret_cast<const char*>(std::addressof(t)), sizeof(t));
return sizeof(t);
}
// std::basic_string
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<is_std_basic_string_v<std::decay_t<T>>, void>*
>
SizeType Serializer<Stream, SizeType>::_save(T&& t) {
using U = std::decay_t<T>;
auto sz = _save(make_size_tag(t.size()));
_stream.write(
reinterpret_cast<const char*>(t.data()),
t.size()*sizeof(typename U::value_type)
);
return sz + t.size()*sizeof(typename U::value_type);
}
// std::vector
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<is_std_vector_v<std::decay_t<T>>, void>*
>
SizeType Serializer<Stream, SizeType>::_save(T&& t) {
using U = std::decay_t<T>;
auto sz = _save(make_size_tag(t.size()));
if constexpr (std::is_arithmetic_v<typename U::value_type>) {
_stream.write(
reinterpret_cast<const char*>(t.data()),
t.size() * sizeof(typename U::value_type)
);
sz += t.size() * sizeof(typename U::value_type);
} else {
for(auto&& item : t) {
sz += _save(item);
}
}
return sz;
}
// std::list and std::deque
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<is_std_deque_v<std::decay_t<T>> ||
is_std_list_v<std::decay_t<T>>, void>*
>
SizeType Serializer<Stream, SizeType>::_save(T&& t) {
auto sz = _save(make_size_tag(t.size()));
for(auto&& item : t) {
sz += _save(item);
}
return sz;
}
// std::forward_list
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<is_std_forward_list_v<std::decay_t<T>>, void>*
>
SizeType Serializer<Stream, SizeType>::_save(T&& t) {
auto sz = _save(make_size_tag(std::distance(t.begin(), t.end())));
for(auto&& item : t) {
sz += _save(item);
}
return sz;
}
// std::map and std::unordered_map
template <typename Stream, typename SizeType>
template <typename T, std::enable_if_t<
is_std_map_v<std::decay_t<T>> ||
is_std_unordered_map_v<std::decay_t<T>>,
void
>*>
SizeType Serializer<Stream, SizeType>::_save(T&& t) {
auto sz = _save(make_size_tag(t.size()));
for(auto&& [k, v] : t) {
sz += _save(make_kv_pair(k, v));
}
return sz;
}
// std::set and std::unordered_set
template <typename Stream, typename SizeType>
template <typename T, std::enable_if_t<
is_std_set_v<std::decay_t<T>> ||
is_std_unordered_set_v<std::decay_t<T>>,
void
>*>
SizeType Serializer<Stream, SizeType>::_save(T&& t) {
auto sz = _save(make_size_tag(t.size()));
for(auto&& item : t) {
sz += _save(item);
}
return sz;
}
// enum data type
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<std::is_enum_v<std::decay_t<T>>, void>*
>
SizeType Serializer<Stream, SizeType>::_save(T&& t) {
using U = std::decay_t<T>;
return _save(static_cast<std::underlying_type_t<U>>(t));
}
// duration data type
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<is_std_duration_v<std::decay_t<T>>, void>*
>
SizeType Serializer<Stream, SizeType>::_save(T&& t) {
return _save(t.count());
}
// time point data type
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<is_std_time_point_v<std::decay_t<T>>, void>*
>
SizeType Serializer<Stream, SizeType>::_save(T&& t) {
return _save(t.time_since_epoch());
}
// optional data type
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<is_std_optional_v<std::decay_t<T>>, void>*
>
SizeType Serializer<Stream, SizeType>::_save(T&& t) {
if(bool flag = t.has_value(); flag) {
return _save(flag) + _save(*t);
}
else {
return _save(flag);
}
}
// variant type
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<is_std_variant_v<std::decay_t<T>>, void>*
>
SizeType Serializer<Stream, SizeType>::_save(T&& t) {
return _save(t.index()) +
std::visit([&] (auto&& arg){ return _save(arg);}, t);
}
// tuple type
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<is_std_tuple_v<std::decay_t<T>>, void>*
>
SizeType Serializer<Stream, SizeType>::_save(T&& t) {
return std::apply(
[&] (auto&&... args) {
return (_save(std::forward<decltype(args)>(args)) + ... + 0);
},
std::forward<T>(t)
);
}
// array
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<is_std_array_v<std::decay_t<T>>, void>*
>
SizeType Serializer<Stream, SizeType>::_save(T&& t) {
using U = std::decay_t<T>;
static_assert(std::tuple_size<U>::value > 0, "Array size can't be zero");
SizeType sz;
if constexpr(std::is_arithmetic_v<typename U::value_type>) {
_stream.write(reinterpret_cast<const char*>(t.data()), sizeof(t));
sz = sizeof(t);
}
else {
sz = 0;
for(auto&& item : t) {
sz += _save(item);
}
}
return sz;
}
// custom save method
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<!is_default_serializable_v<std::decay_t<T>>, void>*
>
SizeType Serializer<Stream, SizeType>::_save(T&& t) {
return t.save(*this);
}
// ----------------------------------------------------------------------------
// DeSerializer Definition
// ----------------------------------------------------------------------------
template <typename T>
constexpr auto is_default_deserializable_v =
std::is_arithmetic_v<T> ||
std::is_enum_v<T> ||
is_std_basic_string_v<T> ||
is_std_vector_v<T> ||
is_std_deque_v<T> ||
is_std_list_v<T> ||
is_std_forward_list_v<T> ||
is_std_map_v<T> ||
is_std_unordered_map_v<T> ||
is_std_set_v<T> ||
is_std_unordered_set_v<T> ||
is_std_duration_v<T> ||
is_std_time_point_v<T> ||
is_std_variant_v<T> ||
is_std_optional_v<T> ||
is_std_tuple_v<T> ||
is_std_array_v<T>;
// Class: Deserializer
template <typename Stream, typename SizeType = std::streamsize>
class Deserializer {
public:
Deserializer(Stream& stream);
template <typename... T>
SizeType operator()(T&&... items);
private:
Stream& _stream;
// Function: _variant_helper
template <
size_t I = 0, typename... ArgsT,
std::enable_if_t<I==sizeof...(ArgsT)>* = nullptr
>
SizeType _variant_helper(size_t, std::variant<ArgsT...>&);
// Function: _variant_helper
template <
size_t I = 0, typename... ArgsT,
std::enable_if_t<I<sizeof...(ArgsT)>* = nullptr
>
SizeType _variant_helper(size_t, std::variant<ArgsT...>&);
template <typename T,
std::enable_if_t<std::is_arithmetic_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _load(T&&);
template <typename T,
std::enable_if_t<is_std_basic_string_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _load(T&&);
template <typename T,
std::enable_if_t<is_std_vector_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _load(T&&);
template <typename T,
std::enable_if_t<
is_std_deque_v<std::decay_t<T>> ||
is_std_list_v<std::decay_t<T>> ||
is_std_forward_list_v<std::decay_t<T>>,
void
>* = nullptr
>
SizeType _load(T&&);
template <typename T,
std::enable_if_t<is_std_map_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _load(T&&);
template <typename T,
std::enable_if_t<is_std_unordered_map_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _load(T&&);
template <typename T,
std::enable_if_t<is_std_set_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _load(T&&);
template <typename T,
std::enable_if_t<is_std_unordered_set_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _load(T&&);
template <typename T,
std::enable_if_t<std::is_enum_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _load(T&&);
template <typename T,
std::enable_if_t<is_std_duration_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _load(T&&);
template <typename T,
std::enable_if_t<is_std_time_point_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _load(T&&);
template <typename T,
std::enable_if_t<is_std_optional_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _load(T&&);
template <typename T,
std::enable_if_t<is_std_variant_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _load(T&&);
template <typename T,
std::enable_if_t<is_std_tuple_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _load(T&&);
template <typename T,
std::enable_if_t<is_std_array_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _load(T&&);
template <typename T,
std::enable_if_t<!is_default_deserializable_v<std::decay_t<T>>, void>* = nullptr
>
SizeType _load(T&&);
};
// Constructor
template <typename Stream, typename SizeType>
Deserializer<Stream, SizeType>::Deserializer(Stream& stream) : _stream(stream) {
}
// Operator ()
template <typename Stream, typename SizeType>
template <typename... T>
SizeType Deserializer<Stream, SizeType>::operator() (T&&... items) {
return (_load(std::forward<T>(items)) + ...);
}
// Function: _variant_helper
template <typename Stream, typename SizeType>
template <size_t I, typename... ArgsT, std::enable_if_t<I==sizeof...(ArgsT)>*>
SizeType Deserializer<Stream, SizeType>::_variant_helper(size_t, std::variant<ArgsT...>&) {
return 0;
}
// Function: _variant_helper
template <typename Stream, typename SizeType>
template <size_t I, typename... ArgsT, std::enable_if_t<I<sizeof...(ArgsT)>*>
SizeType Deserializer<Stream, SizeType>::_variant_helper(size_t i, std::variant<ArgsT...>& v) {
if(i == 0) {
using type = ExtractType_t<I, std::variant<ArgsT...>>;
if(v.index() != I) {
static_assert(
std::is_default_constructible<type>::value,
"Failed to archive variant (type should be default constructible T())"
);
v = type();
}
return _load(*std::get_if<type>(&v));
}
return _variant_helper<I+1, ArgsT...>(i-1, v);
}
// arithmetic data type
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<std::is_arithmetic_v<std::decay_t<T>>, void>*
>
SizeType Deserializer<Stream, SizeType>::_load(T&& t) {
_stream.read(reinterpret_cast<char*>(std::addressof(t)), sizeof(t));
return sizeof(t);
}
// std::basic_string
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<is_std_basic_string_v<std::decay_t<T>>, void>*
>
SizeType Deserializer<Stream, SizeType>::_load(T&& t) {
using U = std::decay_t<T>;
typename U::size_type num_chars;
auto sz = _load(make_size_tag(num_chars));
t.resize(num_chars);
_stream.read(reinterpret_cast<char*>(t.data()), num_chars*sizeof(typename U::value_type));
return sz + num_chars*sizeof(typename U::value_type);
}
// std::vector
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<is_std_vector_v<std::decay_t<T>>, void>*
>
SizeType Deserializer<Stream, SizeType>::_load(T&& t) {
using U = std::decay_t<T>;
typename U::size_type num_data;
auto sz = _load(make_size_tag(num_data));
if constexpr(std::is_arithmetic_v<typename U::value_type>) {
t.resize(num_data);
_stream.read(reinterpret_cast<char*>(t.data()), num_data * sizeof(typename U::value_type));
sz += num_data * sizeof(typename U::value_type);
}
else {
t.resize(num_data);
for(auto && v : t) {
sz += _load(v);
}
}
return sz;
}
// std::list and std::deque
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<is_std_deque_v<std::decay_t<T>> ||
is_std_list_v<std::decay_t<T>> ||
is_std_forward_list_v<std::decay_t<T>>, void>*
>
SizeType Deserializer<Stream, SizeType>::_load(T&& t) {
using U = std::decay_t<T>;
typename U::size_type num_data;
auto sz = _load(make_size_tag(num_data));
t.resize(num_data);
for(auto && v : t) {
sz += _load(v);
}
return sz;
}
// std::map
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<is_std_map_v<std::decay_t<T>>, void>*
>
SizeType Deserializer<Stream, SizeType>::_load(T&& t) {
using U = std::decay_t<T>;
typename U::size_type num_data;
auto sz = _load(make_size_tag(num_data));
t.clear();
auto hint = t.begin();
typename U::key_type k;
typename U::mapped_type v;
for(size_t i=0; i<num_data; ++i) {
sz += _load(make_kv_pair(k, v));
hint = t.emplace_hint(hint, std::move(k), std::move(v));
}
return sz;
}
// std::unordered_map
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<is_std_unordered_map_v<std::decay_t<T>>, void>*
>
SizeType Deserializer<Stream, SizeType>::_load(T&& t) {
using U = std::decay_t<T>;
typename U::size_type num_data;
auto sz = _load(make_size_tag(num_data));
t.clear();
t.reserve(num_data);
typename U::key_type k;
typename U::mapped_type v;
for(size_t i=0; i<num_data; ++i) {
sz += _load(make_kv_pair(k, v));
t.emplace(std::move(k), std::move(v));
}
return sz;
}
// std::set
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<is_std_set_v<std::decay_t<T>>, void>*
>
SizeType Deserializer<Stream, SizeType>::_load(T&& t) {
using U = std::decay_t<T>;
typename U::size_type num_data;
auto sz = _load(make_size_tag(num_data));
t.clear();
auto hint = t.begin();
typename U::key_type k;
for(size_t i=0; i<num_data; ++i) {
sz += _load(k);
hint = t.emplace_hint(hint, std::move(k));
}
return sz;
}
// std::unordered_set
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<is_std_unordered_set_v<std::decay_t<T>>, void>*
>
SizeType Deserializer<Stream, SizeType>::_load(T&& t) {
using U = std::decay_t<T>;
typename U::size_type num_data;
auto sz = _load(make_size_tag(num_data));
t.clear();
t.reserve(num_data);
typename U::key_type k;
for(size_t i=0; i<num_data; ++i) {
sz += _load(k);
t.emplace(std::move(k));
}
return sz;
}
// enum data type
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<std::is_enum_v<std::decay_t<T>>, void>*
>
SizeType Deserializer<Stream, SizeType>::_load(T&& t) {
using U = std::decay_t<T>;
std::underlying_type_t<U> k;
auto sz = _load(k);
t = static_cast<U>(k);
return sz;
}
// duration data type
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<is_std_duration_v<std::decay_t<T>>, void>*
>
SizeType Deserializer<Stream, SizeType>::_load(T&& t) {
using U = std::decay_t<T>;
typename U::rep count;
auto s = _load(count);
t = U{count};
return s;
}
// time point data type
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<is_std_time_point_v<std::decay_t<T>>, void>*
>
SizeType Deserializer<Stream, SizeType>::_load(T&& t) {
using U = std::decay_t<T>;
typename U::duration elapsed;
auto s = _load(elapsed);
t = U{elapsed};
return s;
}
// optional data type
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<is_std_optional_v<std::decay_t<T>>, void>*
>
SizeType Deserializer<Stream, SizeType>::_load(T&& t) {
using U = std::decay_t<T>;
bool has_value;
auto s = _load(has_value);
if(has_value) {
if(!t) {
t = typename U::value_type();
}
s += _load(*t);
}
else {
t.reset();
}
return s;
}
// variant type
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<is_std_variant_v<std::decay_t<T>>, void>*
>
SizeType Deserializer<Stream, SizeType>::_load(T&& t) {
std::decay_t<decltype(t.index())> idx;
auto s = _load(idx);
return s + _variant_helper(idx, t);
}
// tuple type
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<is_std_tuple_v<std::decay_t<T>>, void>*
>
SizeType Deserializer<Stream, SizeType>::_load(T&& t) {
return std::apply(
[&] (auto&&... args) {
return (_load(std::forward<decltype(args)>(args)) + ... + 0);
},
std::forward<T>(t)
);
}
// array
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<is_std_array_v<std::decay_t<T>>, void>*
>
SizeType Deserializer<Stream, SizeType>::_load(T&& t) {
using U = std::decay_t<T>;
static_assert(std::tuple_size<U>::value > 0, "Array size can't be zero");
SizeType sz;
if constexpr(std::is_arithmetic_v<typename U::value_type>) {
_stream.read(reinterpret_cast<char*>(t.data()), sizeof(t));
sz = sizeof(t);
}
else {
sz = 0;
for(auto && v : t) {
sz += _load(v);
}
}
return sz;
}
// custom save method
template <typename Stream, typename SizeType>
template <typename T,
std::enable_if_t<!is_default_deserializable_v<std::decay_t<T>>, void>*
>
SizeType Deserializer<Stream, SizeType>::_load(T&& t) {
return t.load(*this);
}
} // ned of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/taskflow/utility/os.hpp | #pragma once
#include <cstdlib>
#include <cstdio>
#include <string>
#define TF_OS_LINUX 0
#define TF_OS_DRAGONFLY 0
#define TF_OS_FREEBSD 0
#define TF_OS_NETBSD 0
#define TF_OS_OPENBSD 0
#define TF_OS_DARWIN 0
#define TF_OS_WINDOWS 0
#define TF_OS_CNK 0
#define TF_OS_HURD 0
#define TF_OS_SOLARIS 0
#define TF_OS_UNIX 0
#ifdef _WIN32
#undef TF_OS_WINDOWS
#define TF_OS_WINDOWS 1
#endif
#ifdef __CYGWIN__
#undef TF_OS_WINDOWS
#define TF_OS_WINDOWS 1
#endif
#if (defined __APPLE__ && defined __MACH__)
#undef TF_OS_DARWIN
#define TF_OS_DARWIN 1
#endif
// in some ppc64 linux installations, only the second condition is met
#if (defined __linux)
#undef TF_OS_LINUX
#define TF_OS_LINUX 1
#elif (defined __linux__)
#undef TF_OS_LINUX
#define TF_OS_LINUX 1
#else
#endif
#if (defined __DragonFly__)
#undef TF_OS_DRAGONFLY
#define TF_OS_DRAGONFLY 1
#endif
#if (defined __FreeBSD__)
#undef TF_OS_FREEBSD
#define TF_OS_FREEBSD 1
#endif
#if (defined __NetBSD__)
#undef TF_OS_NETBSD
#define TF_OS_NETBSD 1
#endif
#if (defined __OpenBSD__)
#undef TF_OS_OPENBSD
#define TF_OS_OPENBSD 1
#endif
#if (defined __bgq__)
#undef TF_OS_CNK
#define TF_OS_CNK 1
#endif
#if (defined __GNU__)
#undef TF_OS_HURD
#define TF_OS_HURD 1
#endif
#if (defined __sun)
#undef TF_OS_SOLARIS
#define TF_OS_SOLARIS 1
#endif
#if (1 != \
TF_OS_LINUX + TF_OS_DRAGONFLY + TF_OS_FREEBSD + TF_OS_NETBSD + \
TF_OS_OPENBSD + TF_OS_DARWIN + TF_OS_WINDOWS + TF_OS_HURD + \
TF_OS_SOLARIS)
#define TF_OS_UNKNOWN 1
#endif
#if TF_OS_LINUX || TF_OS_DRAGONFLY || TF_OS_FREEBSD || TF_OS_NETBSD || \
TF_OS_OPENBSD || TF_OS_DARWIN || TF_OS_HURD || TF_OS_SOLARIS
#undef TF_OS_UNIX
#define TF_OS_UNIX 1
#endif
//-----------------------------------------------------------------------------
// Cache line alignment
//-----------------------------------------------------------------------------
#if defined(__i386__) || defined(__x86_64__)
#define TF_CACHELINE_SIZE 64
#elif defined(__powerpc64__)
// TODO
// This is the L1 D-cache line size of our Power7 machines.
// Need to check if this is appropriate for other PowerPC64 systems.
#define TF_CACHELINE_SIZE 128
#elif defined(__arm__)
// Cache line sizes for ARM: These values are not strictly correct since
// cache line sizes depend on implementations, not architectures.
// There are even implementations with cache line sizes configurable
// at boot time.
#if defined(__ARM_ARCH_5T__)
#define TF_CACHELINE_SIZE 32
#elif defined(__ARM_ARCH_7A__)
#define TF_CACHELINE_SIZE 64
#endif
#endif
#ifndef TF_CACHELINE_SIZE
// A reasonable default guess. Note that overestimates tend to waste more
// space, while underestimates tend to waste more time.
#define TF_CACHELINE_SIZE 64
#endif
//-----------------------------------------------------------------------------
// pause
//-----------------------------------------------------------------------------
//#if __has_include (<immintrin.h>)
// #define TF_HAS_MM_PAUSE 1
// #include <immintrin.h>
//#endif
namespace tf {
// Struct: CachelineAligned
// Due to prefetch, we typically do 2x cacheline for the alignment.
template <typename T>
struct CachelineAligned {
alignas (2*TF_CACHELINE_SIZE) T data;
};
// Function: get_env
inline std::string get_env(const std::string& str) {
#ifdef _MSC_VER
char *ptr = nullptr;
size_t len = 0;
if(_dupenv_s(&ptr, &len, str.c_str()) == 0 && ptr != nullptr) {
std::string res(ptr, len);
std::free(ptr);
return res;
}
return "";
#else
auto ptr = std::getenv(str.c_str());
return ptr ? ptr : "";
#endif
}
// Function: has_env
inline bool has_env(const std::string& str) {
#ifdef _MSC_VER
char *ptr = nullptr;
size_t len = 0;
if(_dupenv_s(&ptr, &len, str.c_str()) == 0 && ptr != nullptr) {
std::string res(ptr, len);
std::free(ptr);
return true;
}
return false;
#else
auto ptr = std::getenv(str.c_str());
return ptr ? true : false;
#endif
}
// Procedure: relax_cpu
//inline void relax_cpu() {
//#ifdef TF_HAS_MM_PAUSE
// _mm_pause();
//#endif
//}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/taskflow/utility/small_vector.hpp | // small vector modified from llvm
#pragma once
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdlib>
#include <cstring>
#include <initializer_list>
#include <iterator>
#include <memory>
#if defined(__GNUC__)
#define TF_LIKELY(x) (__builtin_expect((x), 1))
#define TF_UNLIKELY(x) (__builtin_expect((x), 0))
#else
#define TF_LIKELY(x) (x)
#define TF_UNLIKELY(x) (x)
#endif
/**
@file small_vector.hpp
@brief small vector include file
*/
namespace tf { namespace detail {
/**
@private
@brief NextCapacity - Returns the next power of two (in 64-bits)
that is strictly greater than A. Returns zero on overflow.
this function assumes A to be positive
*/
inline uint64_t NextCapacity(uint64_t A) {
A |= (A >> 1);
A |= (A >> 2);
A |= (A >> 4);
A |= (A >> 8);
A |= (A >> 16);
A |= (A >> 32);
return A + 1;
}
}} // end of namespace tf::detail --------------------------------------------
namespace tf {
/**
@private
*/
template <typename T>
struct IsPod : std::integral_constant<bool, std::is_standard_layout<T>::value &&
std::is_trivial<T>::value> {};
/**
@private
*/
class SmallVectorBase {
protected:
void *BeginX, *EndX, *CapacityX;
protected:
SmallVectorBase(void *FirstEl, size_t Size)
: BeginX(FirstEl), EndX(FirstEl), CapacityX((char*)FirstEl+Size) {}
/// This is an implementation of the grow() method which only works
/// on POD-like data types and is out of line to reduce code duplication.
void grow_pod(void *FirstEl, size_t MinSizeInBytes, size_t TSize){
size_t CurSizeBytes = size_in_bytes();
size_t NewCapacityInBytes = 2 * capacity_in_bytes() + TSize; // Always grow.
if (NewCapacityInBytes < MinSizeInBytes) {
NewCapacityInBytes = MinSizeInBytes;
}
void *NewElts;
if (BeginX == FirstEl) {
NewElts = std::malloc(NewCapacityInBytes);
// Copy the elements over. No need to run dtors on PODs.
memcpy(NewElts, this->BeginX, CurSizeBytes);
} else {
// If this wasn't grown from the inline copy, grow the allocated space.
NewElts = realloc(this->BeginX, NewCapacityInBytes);
}
//assert(NewElts && "Out of memory");
this->EndX = (char*)NewElts+CurSizeBytes;
this->BeginX = NewElts;
this->CapacityX = (char*)this->BeginX + NewCapacityInBytes;
}
public:
/// This returns size()*sizeof(T).
size_t size_in_bytes() const {
return size_t((char*)EndX - (char*)BeginX);
}
/// capacity_in_bytes - This returns capacity()*sizeof(T).
size_t capacity_in_bytes() const {
return size_t((char*)CapacityX - (char*)BeginX);
}
bool empty() const { return BeginX == EndX; }
};
/**
@private
*/
template <typename T, unsigned N> struct SmallVectorStorage;
/**
@private
*/
template <typename T, typename = void>
class SmallVectorTemplateCommon : public SmallVectorBase {
private:
template <typename, unsigned> friend struct SmallVectorStorage;
template <typename X>
struct AlignedUnionType {
alignas(X) std::byte buff[std::max(sizeof(std::byte), sizeof(X))];
};
// Allocate raw space for N elements of type T. If T has a ctor or dtor, we
// don't want it to be automatically run, so we need to represent the space as
// something else. Use an array of char of sufficient alignment.
// deprecated in c++23
//typedef typename std::aligned_union<1, T>::type U;
typedef AlignedUnionType<T> U;
U FirstEl;
// Space after 'FirstEl' is clobbered, do not add any instance vars after it.
protected:
SmallVectorTemplateCommon(size_t Size) : SmallVectorBase(&FirstEl, Size) {}
void grow_pod(size_t MinSizeInBytes, size_t TSize) {
SmallVectorBase::grow_pod(&FirstEl, MinSizeInBytes, TSize);
}
/// Return true if this is a smallvector which has not had dynamic
/// memory allocated for it.
bool isSmall() const {
return BeginX == static_cast<const void*>(&FirstEl);
}
/// Put this vector in a state of being small.
void resetToSmall() {
BeginX = EndX = CapacityX = &FirstEl;
}
void setEnd(T *P) { this->EndX = P; }
public:
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef T value_type;
typedef T *iterator;
typedef const T *const_iterator;
typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
typedef std::reverse_iterator<iterator> reverse_iterator;
typedef T &reference;
typedef const T &const_reference;
typedef T *pointer;
typedef const T *const_pointer;
// forward iterator creation methods.
inline iterator begin() { return (iterator)this->BeginX; }
inline const_iterator begin() const { return (const_iterator)this->BeginX; }
inline iterator end() { return (iterator)this->EndX; }
inline const_iterator end() const { return (const_iterator)this->EndX; }
protected:
iterator capacity_ptr() { return (iterator)this->CapacityX; }
const_iterator capacity_ptr() const { return (const_iterator)this->CapacityX;}
public:
// reverse iterator creation methods.
reverse_iterator rbegin() { return reverse_iterator(end()); }
const_reverse_iterator rbegin() const{ return const_reverse_iterator(end()); }
reverse_iterator rend() { return reverse_iterator(begin()); }
const_reverse_iterator rend() const { return const_reverse_iterator(begin());}
inline size_type size() const { return end()-begin(); }
inline size_type max_size() const { return size_type(-1) / sizeof(T); }
/// Return the total number of elements in the currently allocated buffer.
size_t capacity() const { return capacity_ptr() - begin(); }
/// Return a pointer to the vector's buffer, even if empty().
pointer data() { return pointer(begin()); }
/// Return a pointer to the vector's buffer, even if empty().
const_pointer data() const { return const_pointer(begin()); }
inline reference operator[](size_type idx) {
//assert(idx < size());
return begin()[idx];
}
inline const_reference operator[](size_type idx) const {
//assert(idx < size());
return begin()[idx];
}
reference front() {
//assert(!empty());
return begin()[0];
}
const_reference front() const {
//assert(!empty());
return begin()[0];
}
reference back() {
//assert(!empty());
return end()[-1];
}
const_reference back() const {
//assert(!empty());
return end()[-1];
}
};
/**
@private
*/
template <typename T, bool isPodLike>
class SmallVectorTemplateBase : public SmallVectorTemplateCommon<T> {
protected:
SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
static void destroy_range(T *S, T *E) {
while (S != E) {
--E;
E->~T();
}
}
/// Move the range [I, E) into the uninitialized memory starting with "Dest",
/// constructing elements as needed.
template<typename It1, typename It2>
static void uninitialized_move(It1 I, It1 E, It2 Dest) {
std::uninitialized_copy(std::make_move_iterator(I),
std::make_move_iterator(E), Dest);
}
/// Copy the range [I, E) onto the uninitialized memory starting with "Dest",
/// constructing elements as needed.
template<typename It1, typename It2>
static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
std::uninitialized_copy(I, E, Dest);
}
/// Grow the allocated memory (without initializing new elements), doubling
/// the size of the allocated memory. Guarantees space for at least one more
/// element, or MinSize more elements if specified.
void grow(size_t MinSize = 0);
public:
void push_back(const T &Elt) {
if (TF_UNLIKELY(this->EndX >= this->CapacityX))
this->grow();
::new ((void*) this->end()) T(Elt);
this->setEnd(this->end()+1);
}
void push_back(T &&Elt) {
if (TF_UNLIKELY(this->EndX >= this->CapacityX))
this->grow();
::new ((void*) this->end()) T(::std::move(Elt));
this->setEnd(this->end()+1);
}
void pop_back() {
this->setEnd(this->end()-1);
this->end()->~T();
}
};
/**
@private
*/
template <typename T, bool isPodLike>
void SmallVectorTemplateBase<T, isPodLike>::grow(size_t MinSize) {
size_t CurCapacity = this->capacity();
size_t CurSize = this->size();
// Always grow, even from zero.
size_t NewCapacity = size_t(tf::detail::NextCapacity(CurCapacity+2));
if (NewCapacity < MinSize)
NewCapacity = MinSize;
T *NewElts = static_cast<T*>(std::malloc(NewCapacity*sizeof(T)));
// Move the elements over.
this->uninitialized_move(this->begin(), this->end(), NewElts);
// Destroy the original elements.
destroy_range(this->begin(), this->end());
// If this wasn't grown from the inline copy, deallocate the old space.
if (!this->isSmall())
std::free(this->begin());
this->setEnd(NewElts+CurSize);
this->BeginX = NewElts;
this->CapacityX = this->begin()+NewCapacity;
}
/**
@private
*/
template <typename T>
class SmallVectorTemplateBase<T, true> : public SmallVectorTemplateCommon<T> {
protected:
SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
// No need to do a destroy loop for POD's.
static void destroy_range(T *, T *) {}
/// Move the range [I, E) onto the uninitialized memory
/// starting with "Dest", constructing elements into it as needed.
template<typename It1, typename It2>
static void uninitialized_move(It1 I, It1 E, It2 Dest) {
// Just do a copy.
uninitialized_copy(I, E, Dest);
}
/// Copy the range [I, E) onto the uninitialized memory
/// starting with "Dest", constructing elements into it as needed.
template<typename It1, typename It2>
static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
// Arbitrary iterator types; just use the basic implementation.
std::uninitialized_copy(I, E, Dest);
}
/// Copy the range [I, E) onto the uninitialized memory
/// starting with "Dest", constructing elements into it as needed.
template <typename T1, typename T2>
static void uninitialized_copy(
T1 *I, T1 *E, T2 *Dest,
typename std::enable_if<std::is_same<typename std::remove_const<T1>::type,
T2>::value>::type * = nullptr) {
// Use memcpy for PODs iterated by pointers (which includes SmallVector
// iterators): std::uninitialized_copy optimizes to memmove, but we can
// use memcpy here. Note that I and E are iterators and thus might be
// invalid for memcpy if they are equal.
if (I != E)
memcpy(Dest, I, (E - I) * sizeof(T));
}
/// Double the size of the allocated memory, guaranteeing space for at
/// least one more element or MinSize if specified.
void grow(size_t MinSize = 0) {
this->grow_pod(MinSize*sizeof(T), sizeof(T));
}
public:
void push_back(const T &Elt) {
if (TF_UNLIKELY(this->EndX >= this->CapacityX))
this->grow();
memcpy(this->end(), &Elt, sizeof(T));
this->setEnd(this->end()+1);
}
void pop_back() {
this->setEnd(this->end()-1);
}
};
/**
@private
*/
template <typename T>
class SmallVectorImpl : public SmallVectorTemplateBase<T, IsPod<T>::value> {
typedef SmallVectorTemplateBase<T, IsPod<T>::value> SuperClass;
SmallVectorImpl(const SmallVectorImpl&) = delete;
public:
typedef typename SuperClass::iterator iterator;
typedef typename SuperClass::const_iterator const_iterator;
typedef typename SuperClass::size_type size_type;
protected:
// Default ctor - Initialize to empty.
explicit SmallVectorImpl(unsigned N)
: SmallVectorTemplateBase<T, IsPod<T>::value>(N*sizeof(T)) {
}
public:
~SmallVectorImpl() {
// Destroy the constructed elements in the vector.
this->destroy_range(this->begin(), this->end());
// If this wasn't grown from the inline copy, deallocate the old space.
if (!this->isSmall())
std::free(this->begin());
}
void clear() {
this->destroy_range(this->begin(), this->end());
this->EndX = this->BeginX;
}
void resize(size_type N) {
if (N < this->size()) {
this->destroy_range(this->begin()+N, this->end());
this->setEnd(this->begin()+N);
} else if (N > this->size()) {
if (this->capacity() < N)
this->grow(N);
for (auto I = this->end(), E = this->begin() + N; I != E; ++I)
new (&*I) T();
this->setEnd(this->begin()+N);
}
}
void resize(size_type N, const T &NV) {
if (N < this->size()) {
this->destroy_range(this->begin()+N, this->end());
this->setEnd(this->begin()+N);
} else if (N > this->size()) {
if (this->capacity() < N)
this->grow(N);
std::uninitialized_fill(this->end(), this->begin()+N, NV);
this->setEnd(this->begin()+N);
}
}
void reserve(size_type N) {
if (this->capacity() < N)
this->grow(N);
}
T pop_back_val() {
T Result = ::std::move(this->back());
this->pop_back();
return Result;
}
void swap(SmallVectorImpl &RHS);
/// Add the specified range to the end of the SmallVector.
template<typename in_iter>
void append(in_iter in_start, in_iter in_end) {
size_type NumInputs = std::distance(in_start, in_end);
// Grow allocated space if needed.
if (NumInputs > size_type(this->capacity_ptr()-this->end()))
this->grow(this->size()+NumInputs);
// Copy the new elements over.
this->uninitialized_copy(in_start, in_end, this->end());
this->setEnd(this->end() + NumInputs);
}
/// Add the specified range to the end of the SmallVector.
void append(size_type NumInputs, const T &Elt) {
// Grow allocated space if needed.
if (NumInputs > size_type(this->capacity_ptr()-this->end()))
this->grow(this->size()+NumInputs);
// Copy the new elements over.
std::uninitialized_fill_n(this->end(), NumInputs, Elt);
this->setEnd(this->end() + NumInputs);
}
void append(std::initializer_list<T> IL) {
append(IL.begin(), IL.end());
}
void assign(size_type NumElts, const T &Elt) {
clear();
if (this->capacity() < NumElts)
this->grow(NumElts);
this->setEnd(this->begin()+NumElts);
std::uninitialized_fill(this->begin(), this->end(), Elt);
}
void assign(std::initializer_list<T> IL) {
clear();
append(IL);
}
iterator erase(const_iterator CI) {
// Just cast away constness because this is a non-const member function.
iterator I = const_cast<iterator>(CI);
//assert(I >= this->begin() && "Iterator to erase is out of bounds.");
//assert(I < this->end() && "Erasing at past-the-end iterator.");
iterator N = I;
// Shift all elts down one.
std::move(I+1, this->end(), I);
// Drop the last elt.
this->pop_back();
return(N);
}
iterator erase(const_iterator CS, const_iterator CE) {
// Just cast away constness because this is a non-const member function.
iterator S = const_cast<iterator>(CS);
iterator E = const_cast<iterator>(CE);
//assert(S >= this->begin() && "Range to erase is out of bounds.");
//assert(S <= E && "Trying to erase invalid range.");
//assert(E <= this->end() && "Trying to erase past the end.");
iterator N = S;
// Shift all elts down.
iterator I = std::move(E, this->end(), S);
// Drop the last elts.
this->destroy_range(I, this->end());
this->setEnd(I);
return(N);
}
iterator insert(iterator I, T &&Elt) {
if (I == this->end()) { // Important special case for empty vector.
this->push_back(::std::move(Elt));
return this->end()-1;
}
//assert(I >= this->begin() && "Insertion iterator is out of bounds.");
//assert(I <= this->end() && "Inserting past the end of the vector.");
if (this->EndX >= this->CapacityX) {
size_t EltNo = I-this->begin();
this->grow();
I = this->begin()+EltNo;
}
::new ((void*) this->end()) T(::std::move(this->back()));
// Push everything else over.
std::move_backward(I, this->end()-1, this->end());
this->setEnd(this->end()+1);
// If we just moved the element we're inserting, be sure to update
// the reference.
T *EltPtr = &Elt;
if (I <= EltPtr && EltPtr < this->EndX)
++EltPtr;
*I = ::std::move(*EltPtr);
return I;
}
iterator insert(iterator I, const T &Elt) {
if (I == this->end()) { // Important special case for empty vector.
this->push_back(Elt);
return this->end()-1;
}
//assert(I >= this->begin() && "Insertion iterator is out of bounds.");
//assert(I <= this->end() && "Inserting past the end of the vector.");
if (this->EndX >= this->CapacityX) {
size_t EltNo = I-this->begin();
this->grow();
I = this->begin()+EltNo;
}
::new ((void*) this->end()) T(std::move(this->back()));
// Push everything else over.
std::move_backward(I, this->end()-1, this->end());
this->setEnd(this->end()+1);
// If we just moved the element we're inserting, be sure to update
// the reference.
const T *EltPtr = &Elt;
if (I <= EltPtr && EltPtr < this->EndX)
++EltPtr;
*I = *EltPtr;
return I;
}
iterator insert(iterator I, size_type NumToInsert, const T &Elt) {
// Convert iterator to elt# to avoid invalidating iterator when we reserve()
size_t InsertElt = I - this->begin();
if (I == this->end()) { // Important special case for empty vector.
append(NumToInsert, Elt);
return this->begin()+InsertElt;
}
//assert(I >= this->begin() && "Insertion iterator is out of bounds.");
//assert(I <= this->end() && "Inserting past the end of the vector.");
// Ensure there is enough space.
reserve(this->size() + NumToInsert);
// Uninvalidate the iterator.
I = this->begin()+InsertElt;
// If there are more elements between the insertion point and the end of the
// range than there are being inserted, we can use a simple approach to
// insertion. Since we already reserved space, we know that this won't
// reallocate the vector.
if (size_t(this->end()-I) >= NumToInsert) {
T *OldEnd = this->end();
append(std::move_iterator<iterator>(this->end() - NumToInsert),
std::move_iterator<iterator>(this->end()));
// Copy the existing elements that get replaced.
std::move_backward(I, OldEnd-NumToInsert, OldEnd);
std::fill_n(I, NumToInsert, Elt);
return I;
}
// Otherwise, we're inserting more elements than exist already, and we're
// not inserting at the end.
// Move over the elements that we're about to overwrite.
T *OldEnd = this->end();
this->setEnd(this->end() + NumToInsert);
size_t NumOverwritten = OldEnd-I;
this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten);
// Replace the overwritten part.
std::fill_n(I, NumOverwritten, Elt);
// Insert the non-overwritten middle part.
std::uninitialized_fill_n(OldEnd, NumToInsert-NumOverwritten, Elt);
return I;
}
template<typename ItTy>
iterator insert(iterator I, ItTy From, ItTy To) {
// Convert iterator to elt# to avoid invalidating iterator when we reserve()
size_t InsertElt = I - this->begin();
if (I == this->end()) { // Important special case for empty vector.
append(From, To);
return this->begin()+InsertElt;
}
//assert(I >= this->begin() && "Insertion iterator is out of bounds.");
//assert(I <= this->end() && "Inserting past the end of the vector.");
size_t NumToInsert = std::distance(From, To);
// Ensure there is enough space.
reserve(this->size() + NumToInsert);
// Uninvalidate the iterator.
I = this->begin()+InsertElt;
// If there are more elements between the insertion point and the end of the
// range than there are being inserted, we can use a simple approach to
// insertion. Since we already reserved space, we know that this won't
// reallocate the vector.
if (size_t(this->end()-I) >= NumToInsert) {
T *OldEnd = this->end();
append(std::move_iterator<iterator>(this->end() - NumToInsert),
std::move_iterator<iterator>(this->end()));
// Copy the existing elements that get replaced.
std::move_backward(I, OldEnd-NumToInsert, OldEnd);
std::copy(From, To, I);
return I;
}
// Otherwise, we're inserting more elements than exist already, and we're
// not inserting at the end.
// Move over the elements that we're about to overwrite.
T *OldEnd = this->end();
this->setEnd(this->end() + NumToInsert);
size_t NumOverwritten = OldEnd-I;
this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten);
// Replace the overwritten part.
for (T *J = I; NumOverwritten > 0; --NumOverwritten) {
*J = *From;
++J; ++From;
}
// Insert the non-overwritten middle part.
this->uninitialized_copy(From, To, OldEnd);
return I;
}
void insert(iterator I, std::initializer_list<T> IL) {
insert(I, IL.begin(), IL.end());
}
template <typename... ArgTypes> void emplace_back(ArgTypes &&... Args) {
if (TF_UNLIKELY(this->EndX >= this->CapacityX))
this->grow();
::new ((void *)this->end()) T(std::forward<ArgTypes>(Args)...);
this->setEnd(this->end() + 1);
}
SmallVectorImpl &operator=(const SmallVectorImpl &RHS);
SmallVectorImpl &operator=(SmallVectorImpl &&RHS);
bool operator==(const SmallVectorImpl &RHS) const {
if (this->size() != RHS.size()) return false;
return std::equal(this->begin(), this->end(), RHS.begin());
}
bool operator!=(const SmallVectorImpl &RHS) const {
return !(*this == RHS);
}
bool operator<(const SmallVectorImpl &RHS) const {
return std::lexicographical_compare(this->begin(), this->end(),
RHS.begin(), RHS.end());
}
/// Set the array size to \p N, which the current array must have enough
/// capacity for.
///
/// This does not construct or destroy any elements in the vector.
///
/// Clients can use this in conjunction with capacity() to write past the end
/// of the buffer when they know that more elements are available, and only
/// update the size later. This avoids the cost of value initializing elements
/// which will only be overwritten.
void set_size(size_type N) {
//assert(N <= this->capacity());
this->setEnd(this->begin() + N);
}
};
template <typename T>
void SmallVectorImpl<T>::swap(SmallVectorImpl<T> &RHS) {
if (this == &RHS) return;
// We can only avoid copying elements if neither vector is small.
if (!this->isSmall() && !RHS.isSmall()) {
std::swap(this->BeginX, RHS.BeginX);
std::swap(this->EndX, RHS.EndX);
std::swap(this->CapacityX, RHS.CapacityX);
return;
}
if (RHS.size() > this->capacity())
this->grow(RHS.size());
if (this->size() > RHS.capacity())
RHS.grow(this->size());
// Swap the shared elements.
size_t NumShared = this->size();
if (NumShared > RHS.size()) NumShared = RHS.size();
for (size_type i = 0; i != NumShared; ++i)
std::swap((*this)[i], RHS[i]);
// Copy over the extra elts.
if (this->size() > RHS.size()) {
size_t EltDiff = this->size() - RHS.size();
this->uninitialized_copy(this->begin()+NumShared, this->end(), RHS.end());
RHS.setEnd(RHS.end()+EltDiff);
this->destroy_range(this->begin()+NumShared, this->end());
this->setEnd(this->begin()+NumShared);
} else if (RHS.size() > this->size()) {
size_t EltDiff = RHS.size() - this->size();
this->uninitialized_copy(RHS.begin()+NumShared, RHS.end(), this->end());
this->setEnd(this->end() + EltDiff);
this->destroy_range(RHS.begin()+NumShared, RHS.end());
RHS.setEnd(RHS.begin()+NumShared);
}
}
template <typename T>
SmallVectorImpl<T> &SmallVectorImpl<T>::
operator=(const SmallVectorImpl<T> &RHS) {
// Avoid self-assignment.
if (this == &RHS) return *this;
// If we already have sufficient space, assign the common elements, then
// destroy any excess.
size_t RHSSize = RHS.size();
size_t CurSize = this->size();
if (CurSize >= RHSSize) {
// Assign common elements.
iterator NewEnd;
if (RHSSize)
NewEnd = std::copy(RHS.begin(), RHS.begin()+RHSSize, this->begin());
else
NewEnd = this->begin();
// Destroy excess elements.
this->destroy_range(NewEnd, this->end());
// Trim.
this->setEnd(NewEnd);
return *this;
}
// If we have to grow to have enough elements, destroy the current elements.
// This allows us to avoid copying them during the grow.
// FIXME: don't do this if they're efficiently moveable.
if (this->capacity() < RHSSize) {
// Destroy current elements.
this->destroy_range(this->begin(), this->end());
this->setEnd(this->begin());
CurSize = 0;
this->grow(RHSSize);
} else if (CurSize) {
// Otherwise, use assignment for the already-constructed elements.
std::copy(RHS.begin(), RHS.begin()+CurSize, this->begin());
}
// Copy construct the new elements in place.
this->uninitialized_copy(RHS.begin()+CurSize, RHS.end(),
this->begin()+CurSize);
// Set end.
this->setEnd(this->begin()+RHSSize);
return *this;
}
template <typename T>
SmallVectorImpl<T> &SmallVectorImpl<T>::operator=(SmallVectorImpl<T> &&RHS) {
// Avoid self-assignment.
if (this == &RHS) return *this;
// If the RHS isn't small, clear this vector and then steal its buffer.
if (!RHS.isSmall()) {
this->destroy_range(this->begin(), this->end());
if (!this->isSmall()) std::free(this->begin());
this->BeginX = RHS.BeginX;
this->EndX = RHS.EndX;
this->CapacityX = RHS.CapacityX;
RHS.resetToSmall();
return *this;
}
// If we already have sufficient space, assign the common elements, then
// destroy any excess.
size_t RHSSize = RHS.size();
size_t CurSize = this->size();
if (CurSize >= RHSSize) {
// Assign common elements.
iterator NewEnd = this->begin();
if (RHSSize)
NewEnd = std::move(RHS.begin(), RHS.end(), NewEnd);
// Destroy excess elements and trim the bounds.
this->destroy_range(NewEnd, this->end());
this->setEnd(NewEnd);
// Clear the RHS.
RHS.clear();
return *this;
}
// If we have to grow to have enough elements, destroy the current elements.
// This allows us to avoid copying them during the grow.
// FIXME: this may not actually make any sense if we can efficiently move
// elements.
if (this->capacity() < RHSSize) {
// Destroy current elements.
this->destroy_range(this->begin(), this->end());
this->setEnd(this->begin());
CurSize = 0;
this->grow(RHSSize);
} else if (CurSize) {
// Otherwise, use assignment for the already-constructed elements.
std::move(RHS.begin(), RHS.begin()+CurSize, this->begin());
}
// Move-construct the new elements in place.
this->uninitialized_move(RHS.begin()+CurSize, RHS.end(),
this->begin()+CurSize);
// Set end.
this->setEnd(this->begin()+RHSSize);
RHS.clear();
return *this;
}
/**
@private
*/
template <typename T, unsigned N>
struct SmallVectorStorage {
/**
@private
*/
typename SmallVectorTemplateCommon<T>::U InlineElts[N - 1];
};
/**
@private
*/
template <typename T> struct SmallVectorStorage<T, 1> {};
/**
@private
*/
template <typename T> struct SmallVectorStorage<T, 0> {};
/**
@brief class to define a vector optimized for small array
@tparam T data type
@tparam N threshold of the number of elements in the initial storage
The class defines a C++ STL-styled vector (a variable-sized array)
optimized for the case when the array is small.
It contains some number of elements in-place,
which allows it to avoid heap allocation when the actual number of
elements is below that threshold. This allows normal @em small cases to be
fast without losing generality for large inputs.
All the methods in [std::vector](https://en.cppreference.com/w/cpp/container/vector)
can apply to this class.
The class is stripped from the LLVM codebase.
*/
template <typename T, unsigned N = 2>
class SmallVector : public SmallVectorImpl<T> {
/// Inline space for elements which aren't stored in the base class.
SmallVectorStorage<T, N> Storage;
public:
/**
@brief constructs an empty vector
*/
SmallVector() : SmallVectorImpl<T>(N) {
}
/**
@brief constructs a vector with @c Size copies of elements with value @c value
*/
explicit SmallVector(size_t Size, const T &Value = T())
: SmallVectorImpl<T>(N) {
this->assign(Size, Value);
}
/**
@brief constructs a vector with the contents of the range
<tt>[S, E)</tt>
*/
template<typename ItTy>
SmallVector(ItTy S, ItTy E) : SmallVectorImpl<T>(N) {
this->append(S, E);
}
//template <typename RangeTy>
//explicit SmallVector(const tf::iterator_range<RangeTy> &R)
// : SmallVectorImpl<T>(N) {
// this->append(R.begin(), R.end());
//}
/**
@brief constructs a vector with the contents of the initializer list @c IL
*/
SmallVector(std::initializer_list<T> IL) : SmallVectorImpl<T>(N) {
this->assign(IL);
}
/**
@brief constructs the vector with the copy of the contents of @c RHS
*/
SmallVector(const SmallVector &RHS) : SmallVectorImpl<T>(N) {
if (!RHS.empty())
SmallVectorImpl<T>::operator=(RHS);
}
/**
@brief constructs the vector with the contents of @c RHS using move semantics
*/
SmallVector(SmallVector &&RHS) : SmallVectorImpl<T>(N) {
if (!RHS.empty())
SmallVectorImpl<T>::operator=(::std::move(RHS));
}
/**
@brief replaces the contents with a copy of the contents of @c RHS
*/
const SmallVector &operator=(const SmallVector &RHS) {
SmallVectorImpl<T>::operator=(RHS);
return *this;
}
/**
@brief replaces the contents with the contents of @c RHS using move semantics
*/
const SmallVector &operator=(SmallVector &&RHS) {
SmallVectorImpl<T>::operator=(::std::move(RHS));
return *this;
}
/**
@brief constructs a vector with the contents of @c RHS using move semantics
*/
SmallVector(SmallVectorImpl<T> &&RHS) : SmallVectorImpl<T>(N) {
if (!RHS.empty())
SmallVectorImpl<T>::operator=(::std::move(RHS));
}
/**
@brief replaces the contents with the contents of @c RHS using move semantics
*/
const SmallVector &operator=(SmallVectorImpl<T> &&RHS) {
SmallVectorImpl<T>::operator=(::std::move(RHS));
return *this;
}
/**
@brief replaces the contents with the copy of the contents of an initializer list @c IL
*/
const SmallVector &operator=(std::initializer_list<T> IL) {
this->assign(IL);
return *this;
}
};
template<typename T, unsigned N>
static inline size_t capacity_in_bytes(const SmallVector<T, N> &X) {
return X.capacity_in_bytes();
}
} // end tf namespace ---------------------------------------------------------
namespace std {
/// Implement std::swap in terms of SmallVector swap.
template<typename T>
inline void
swap(tf::SmallVectorImpl<T> &LHS, tf::SmallVectorImpl<T> &RHS) {
LHS.swap(RHS);
}
/// Implement std::swap in terms of SmallVector swap.
template<typename T, unsigned N>
inline void
swap(tf::SmallVector<T, N> &LHS, tf::SmallVector<T, N> &RHS) {
LHS.swap(RHS);
}
} // end of namespace std ----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/taskflow/sycl/sycl_graph.hpp | #pragma once
#include <CL/sycl.hpp>
#include "sycl_meta.hpp"
namespace tf {
// ----------------------------------------------------------------------------
// syclGraph class
// ----------------------------------------------------------------------------
// class: syclGraph
class syclGraph : public CustomGraphBase {
friend class syclNode;
friend class syclTask;
friend class syclFlow;
friend class Taskflow;
friend class Executor;
constexpr static int OFFLOADED = 0x01;
constexpr static int TOPOLOGY_CHANGED = 0x02;
public:
syclGraph() = default;
~syclGraph() = default;
syclGraph(const syclGraph&) = delete;
syclGraph(syclGraph&&);
syclGraph& operator = (const syclGraph&) = delete;
syclGraph& operator = (syclGraph&&);
template <typename... ArgsT>
syclNode* emplace_back(ArgsT&&...);
bool empty() const;
void clear();
void dump(std::ostream&, const void*, const std::string&) const override final;
private:
int _state {0};
std::vector<std::unique_ptr<syclNode>> _nodes;
};
// ----------------------------------------------------------------------------
// syclNode definitions
// ----------------------------------------------------------------------------
// class: syclNode
class syclNode {
friend class syclGraph;
friend class syclTask;
friend class syclFlow;
friend class Taskflow;
friend class Executor;
struct Empty {
};
struct CGH {
std::function<void(sycl::handler&)> work;
template <typename F>
CGH(F&& func) : work {std::forward<F>(func)} {}
};
using handle_t = std::variant<
Empty,
CGH
>;
public:
// variant index
constexpr static auto EMPTY = get_index_v<Empty, handle_t>;
constexpr static auto COMMAND_GROUP_HANDLER = get_index_v<CGH, handle_t>;
syclNode() = delete;
template <typename... ArgsT>
syclNode(syclGraph&, ArgsT&&...);
private:
syclGraph& _graph;
std::string _name;
int _level;
sycl::event _event;
handle_t _handle;
SmallVector<syclNode*> _successors;
SmallVector<syclNode*> _dependents;
void _precede(syclNode*);
};
// ----------------------------------------------------------------------------
// syclNode definitions
// ----------------------------------------------------------------------------
// Constructor
template <typename... ArgsT>
syclNode::syclNode(syclGraph& g, ArgsT&&... args) :
_graph {g},
_handle {std::forward<ArgsT>(args)...} {
}
// Procedure: _precede
inline void syclNode::_precede(syclNode* v) {
_graph._state |= syclGraph::TOPOLOGY_CHANGED;
_successors.push_back(v);
v->_dependents.push_back(this);
}
// ----------------------------------------------------------------------------
// syclGraph definitions
// ----------------------------------------------------------------------------
// Move constructor
inline syclGraph::syclGraph(syclGraph&& g) :
_nodes {std::move(g._nodes)} {
assert(g._nodes.empty());
}
// Move assignment
inline syclGraph& syclGraph::operator = (syclGraph&& rhs) {
// lhs
_nodes = std::move(rhs._nodes);
assert(rhs._nodes.empty());
return *this;
}
// Function: empty
inline bool syclGraph::empty() const {
return _nodes.empty();
}
// Procedure: clear
inline void syclGraph::clear() {
_state = syclGraph::TOPOLOGY_CHANGED;
_nodes.clear();
}
// Function: emplace_back
template <typename... ArgsT>
syclNode* syclGraph::emplace_back(ArgsT&&... args) {
_state |= syclGraph::TOPOLOGY_CHANGED;
auto node = std::make_unique<syclNode>(std::forward<ArgsT>(args)...);
_nodes.emplace_back(std::move(node));
return _nodes.back().get();
// TODO: object pool
//auto node = new syclNode(std::forward<ArgsT>(args)...);
//_nodes.push_back(node);
//return node;
}
// Procedure: dump the graph to a DOT format
inline void syclGraph::dump(
std::ostream& os, const void* root, const std::string& root_name
) const {
// recursive dump with stack
std::stack<std::tuple<const syclGraph*, const syclNode*, int>> stack;
stack.push(std::make_tuple(this, nullptr, 1));
int pl = 0;
while(!stack.empty()) {
auto [graph, parent, l] = stack.top();
stack.pop();
for(int i=0; i<pl-l+1; i++) {
os << "}\n";
}
if(parent == nullptr) {
if(root) {
os << "subgraph cluster_p" << root << " {\nlabel=\"syclFlow: ";
if(root_name.empty()) os << 'p' << root;
else os << root_name;
os << "\";\n" << "color=\"red\"\n";
}
else {
os << "digraph syclFlow {\n";
}
}
else {
os << "subgraph cluster_p" << parent << " {\nlabel=\"syclSubflow: ";
if(parent->_name.empty()) os << 'p' << parent;
else os << parent->_name;
os << "\";\n" << "color=\"purple\"\n";
}
for(auto& v : graph->_nodes) {
os << 'p' << v.get() << "[label=\"";
if(v->_name.empty()) {
os << 'p' << v.get() << "\"";
}
else {
os << v->_name << "\"";
}
os << "];\n";
for(const auto s : v->_successors) {
os << 'p' << v.get() << " -> " << 'p' << s << ";\n";
}
if(v->_successors.size() == 0) {
if(parent == nullptr) {
if(root) {
os << 'p' << v.get() << " -> p" << root << ";\n";
}
}
else {
os << 'p' << v.get() << " -> p" << parent << ";\n";
}
}
}
// set the previous level
pl = l;
}
for(int i=0; i<pl; i++) {
os << "}\n";
}
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/taskflow/sycl/sycl_task.hpp | #pragma once
#include "sycl_graph.hpp"
/**
@file sycl_task.hpp
@brief syclTask include file
*/
namespace tf {
// ----------------------------------------------------------------------------
// syclTask
// ----------------------------------------------------------------------------
/**
@class syclTask
@brief handle to a node of the internal CUDA graph
*/
class syclTask {
friend class syclFlow;
friend std::ostream& operator << (std::ostream&, const syclTask&);
public:
/**
@brief constructs an empty syclTask
*/
syclTask() = default;
/**
@brief copy-constructs a syclTask
*/
syclTask(const syclTask&) = default;
/**
@brief copy-assigns a syclTask
*/
syclTask& operator = (const syclTask&) = default;
/**
@brief adds precedence links from this to other tasks
@tparam Ts parameter pack
@param tasks one or multiple tasks
@return @c *this
*/
template <typename... Ts>
syclTask& precede(Ts&&... tasks);
/**
@brief adds precedence links from other tasks to this
@tparam Ts parameter pack
@param tasks one or multiple tasks
@return @c *this
*/
template <typename... Ts>
syclTask& succeed(Ts&&... tasks);
/**
@brief assigns a name to the task
@param name a @std_string acceptable string
@return @c *this
*/
syclTask& name(const std::string& name);
/**
@brief queries the name of the task
*/
const std::string& name() const;
/**
@brief queries the number of successors
*/
size_t num_successors() const;
/**
@brief queries the number of dependents
*/
size_t num_dependents() const;
/**
@brief queries if the task is associated with a syclNode
*/
bool empty() const;
/**
@brief dumps the task through an output stream
@tparam T output stream type with insertion operator (<<) defined
@param ostream an output stream target
*/
template <typename T>
void dump(T& ostream) const;
/**
@brief applies an visitor callable to each successor of the task
*/
template <typename V>
void for_each_successor(V&& visitor) const;
/**
@brief applies an visitor callable to each dependents of the task
*/
template <typename V>
void for_each_dependent(V&& visitor) const;
private:
syclTask(syclNode*);
syclNode* _node {nullptr};
};
// Constructor
inline syclTask::syclTask(syclNode* node) : _node {node} {
}
// Function: precede
template <typename... Ts>
syclTask& syclTask::precede(Ts&&... tasks) {
(_node->_precede(tasks._node), ...);
return *this;
}
// Function: succeed
template <typename... Ts>
syclTask& syclTask::succeed(Ts&&... tasks) {
(tasks._node->_precede(_node), ...);
return *this;
}
// Function: empty
inline bool syclTask::empty() const {
return _node == nullptr;
}
// Function: name
inline syclTask& syclTask::name(const std::string& name) {
_node->_name = name;
return *this;
}
// Function: name
inline const std::string& syclTask::name() const {
return _node->_name;
}
// Function: num_successors
inline size_t syclTask::num_successors() const {
return _node->_successors.size();
}
// Function: num_dependents
inline size_t syclTask::num_dependents() const {
return _node->_dependents.size();
}
// Procedure: dump
template <typename T>
void syclTask::dump(T& os) const {
os << "syclTask ";
if(_node->_name.empty()) os << _node;
else os << _node->_name;
}
// Function: for_each_successor
template <typename V>
void syclTask::for_each_successor(V&& visitor) const {
for(size_t i=0; i<_node->_successors.size(); ++i) {
visitor(syclTask(_node->_successors[i]));
}
}
// Function: for_each_dependent
template <typename V>
void syclTask::for_each_dependent(V&& visitor) const {
for(size_t i=0; i<_node->_dependents.size(); ++i) {
visitor(syclTask(_node->_dependents[i]));
}
}
// ----------------------------------------------------------------------------
// global ostream
// ----------------------------------------------------------------------------
/**
@brief overload of ostream inserter operator for syclTask
*/
inline std::ostream& operator << (std::ostream& os, const syclTask& ct) {
ct.dump(os);
return os;
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/taskflow/sycl/sycl_execution_policy.hpp | #pragma once
/**
@file sycl_execution_policy.hpp
@brief SYCL execution policy include file
*/
namespace tf {
/**
@class syclExecutionPolicy
@brief class to define execution policy for SYCL standard algorithms
@tparam NT number of threads per block
@tparam VT number of work units per thread
Execution policy configures the kernel execution parameters in SYCL algorithms.
The first template argument, @c NT, the number of threads per block should
always be a power-of-two number.
The second template argument, @c VT, the number of work units per thread
is recommended to be an odd number to avoid bank conflict.
Details can be referred to @ref SYCLSTDExecutionPolicy.
*/
template<unsigned NT, unsigned VT>
class syclExecutionPolicy {
static_assert(is_pow2(NT), "max # threads per block must be a power of two");
public:
/** @brief static constant for getting the number of threads per block */
const static unsigned nt = NT;
/** @brief static constant for getting the number of work units per thread */
const static unsigned vt = VT;
/** @brief static constant for getting the number of elements to process per block */
const static unsigned nv = NT*VT;
/**
@brief constructs an execution policy object with the given queue
*/
syclExecutionPolicy(sycl::queue& queue) : _queue{queue} {}
/**
@brief returns an mutable reference to the associated queue
*/
sycl::queue& queue() noexcept { return _queue; };
/**
@brief returns an immutable reference to the associated queue
*/
const sycl::queue& queue() const noexcept { return _queue; }
private:
sycl::queue& _queue;
};
/**
@brief default execution policy
*/
using syclDefaultExecutionPolicy = syclExecutionPolicy<512, 9>;
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/taskflow/sycl/sycl_meta.hpp | #pragma once
#include "sycl_execution_policy.hpp"
namespace tf {
// default warp size
inline constexpr unsigned SYCL_WARP_SIZE = 32;
// empty type
struct syclEmpty { };
// ----------------------------------------------------------------------------
// iterator unrolling
// ----------------------------------------------------------------------------
// Template unrolled looping construct.
template<unsigned i, unsigned count, bool valid = (i < count)>
struct syclIterate {
template<typename F>
static void eval(F f) {
f(i);
syclIterate<i + 1, count>::eval(f);
}
};
template<unsigned i, unsigned count>
struct syclIterate<i, count, false> {
template<typename F>
static void eval(F) { }
};
template<unsigned begin, unsigned end, typename F>
void sycl_iterate(F f) {
syclIterate<begin, end>::eval(f);
}
template<unsigned count, typename F>
void sycl_iterate(F f) {
sycl_iterate<0, count>(f);
}
template<unsigned count, typename T>
T reduce(const T(&x)[count]) {
T y;
sycl_iterate<count>([&](auto i) { y = i ? x[i] + y : x[i]; });
return y;
}
template<unsigned count, typename T>
void fill(T(&x)[count], T val) {
sycl_iterate<count>([&](auto i) { x[i] = val; });
}
// Invoke unconditionally.
template<unsigned nt, unsigned vt, typename F>
void sycl_strided_iterate(F f, unsigned tid) {
sycl_iterate<vt>([=](auto i) { f(i, nt * i + tid); });
}
// Check range.
template<unsigned nt, unsigned vt, unsigned vt0 = vt, typename F>
void sycl_strided_iterate(F f, unsigned tid, unsigned count) {
// Unroll the first vt0 elements of each thread.
if(vt0 > 1 && count >= nt * vt0) {
sycl_strided_iterate<nt, vt0>(f, tid); // No checking
} else {
sycl_iterate<vt0>([=](auto i) {
auto j = nt * i + tid;
if(j < count) f(i, j);
});
}
// TODO: seems dummy when vt0 == vt
sycl_iterate<vt0, vt>([=](auto i) {
auto j = nt * i + tid;
if(j < count) f(i, j);
});
}
template<unsigned vt, typename F>
void sycl_thread_iterate(F f, unsigned tid) {
sycl_iterate<vt>([=](auto i) { f(i, vt * tid + i); });
}
// ----------------------------------------------------------------------------
// syclRange
// ----------------------------------------------------------------------------
// syclRange
struct syclRange {
unsigned begin, end;
unsigned size() const { return end - begin; }
unsigned count() const { return size(); }
bool valid() const { return end > begin; }
};
inline syclRange sycl_get_tile(unsigned b, unsigned nv, unsigned count) {
return syclRange { nv * b, std::min(count, nv * (b + 1)) };
}
// ----------------------------------------------------------------------------
// syclArray
// ----------------------------------------------------------------------------
template<typename T, unsigned size>
struct syclArray {
T data[size];
T operator[](unsigned i) const { return data[i]; }
T& operator[](unsigned i) { return data[i]; }
syclArray() = default;
syclArray(const syclArray&) = default;
syclArray& operator=(const syclArray&) = default;
// Fill the array with x.
syclArray(T x) {
sycl_iterate<size>([&](unsigned i) { data[i] = x; });
}
};
template<typename T>
struct syclArray<T, 0> {
T operator[](unsigned) const { return T(); }
T& operator[](unsigned) { return *(T*)nullptr; }
};
template<typename T, typename V, unsigned size>
struct syclKVArray {
syclArray<T, size> keys;
syclArray<V, size> vals;
};
// ----------------------------------------------------------------------------
// thread reg <-> global mem
// ----------------------------------------------------------------------------
template<unsigned nt, unsigned vt, unsigned vt0 = vt, typename I>
auto sycl_mem_to_reg_strided(I mem, unsigned tid, unsigned count) {
using T = typename std::iterator_traits<I>::value_type;
syclArray<T, vt> x;
sycl_strided_iterate<nt, vt, vt0>(
[&](auto i, auto j) { x[i] = mem[j]; }, tid, count
);
return x;
}
template<unsigned nt, unsigned vt, unsigned vt0 = vt, typename T, typename it_t>
void sycl_reg_to_mem_strided(
syclArray<T, vt> x, unsigned tid, unsigned count, it_t mem) {
sycl_strided_iterate<nt, vt, vt0>(
[=](auto i, auto j) { mem[j] = x[i]; }, tid, count
);
}
template<unsigned nt, unsigned vt, unsigned vt0 = vt, typename I, typename O>
auto sycl_transform_mem_to_reg_strided(
I mem, unsigned tid, unsigned count, O op
) {
using T = std::invoke_result_t<O, typename std::iterator_traits<I>::value_type>;
syclArray<T, vt> x;
sycl_strided_iterate<nt, vt, vt0>(
[&](auto i, auto j) { x[i] = op(mem[j]); }, tid, count
);
return x;
}
// ----------------------------------------------------------------------------
// thread reg <-> shared
// ----------------------------------------------------------------------------
//template<unsigned nt, unsigned vt, typename T, unsigned shared_size>
//void sycl_reg_to_shared_thread(
// syclArray<T, vt> x, unsigned tid, T (&shared)[shared_size], bool sync = true
//) {
//
// static_assert(shared_size >= nt * vt,
// "reg_to_shared_thread must have at least nt * vt storage");
//
// sycl_thread_iterate<vt>([&](auto i, auto j) { shared[j] = x[i]; }, tid);
//
// if(sync) __syncthreads();
//}
//
//template<unsigned nt, unsigned vt, typename T, unsigned shared_size>
//auto sycl_shared_to_reg_thread(
// const T (&shared)[shared_size], unsigned tid, bool sync = true
//) {
//
// static_assert(shared_size >= nt * vt,
// "reg_to_shared_thread must have at least nt * vt storage");
//
// syclArray<T, vt> x;
// sycl_thread_iterate<vt>([&](auto i, auto j) {
// x[i] = shared[j];
// }, tid);
//
// if(sync) __syncthreads();
//
// return x;
//}
//
//template<unsigned nt, unsigned vt, typename T, unsigned shared_size>
//void sycl_reg_to_shared_strided(
// syclArray<T, vt> x, unsigned tid, T (&shared)[shared_size], bool sync = true
//) {
//
// static_assert(shared_size >= nt * vt,
// "reg_to_shared_strided must have at least nt * vt storage");
//
// sycl_strided_iterate<nt, vt>(
// [&](auto i, auto j) { shared[j] = x[i]; }, tid
// );
//
// if(sync) __syncthreads();
//}
//
//template<unsigned nt, unsigned vt, typename T, unsigned shared_size>
//auto sycl_shared_to_reg_strided(
// const T (&shared)[shared_size], unsigned tid, bool sync = true
//) {
//
// static_assert(shared_size >= nt * vt,
// "shared_to_reg_strided must have at least nt * vt storage");
//
// syclArray<T, vt> x;
// sycl_strided_iterate<nt, vt>([&](auto i, auto j) { x[i] = shared[j]; }, tid);
// if(sync) __syncthreads();
//
// return x;
//}
//
//template<
// unsigned nt, unsigned vt, unsigned vt0 = vt, typename T, typename it_t,
// unsigned shared_size
//>
//auto sycl_reg_to_mem_thread(
// syclArray<T, vt> x, unsigned tid,
// unsigned count, it_t mem, T (&shared)[shared_size]
//) {
// sycl_reg_to_shared_thread<nt>(x, tid, shared);
// auto y = sycl_shared_to_reg_strided<nt, vt>(shared, tid);
// sycl_reg_to_mem_strided<nt, vt, vt0>(y, tid, count, mem);
//}
//
//template<
// unsigned nt, unsigned vt, unsigned vt0 = vt, typename T, typename it_t,
// unsigned shared_size
//>
//auto sycl_mem_to_reg_thread(
// it_t mem, unsigned tid, unsigned count, T (&shared)[shared_size]
//) {
//
// auto x = sycl_mem_to_reg_strided<nt, vt, vt0>(mem, tid, count);
// sycl_reg_to_shared_strided<nt, vt>(x, tid, shared);
// auto y = sycl_shared_to_reg_thread<nt, vt>(shared, tid);
// return y;
//}
//
//template<unsigned nt, unsigned vt, typename T, unsigned S>
//auto sycl_shared_gather(
// const T(&data)[S], syclArray<unsigned, vt> indices, bool sync = true
//) {
//
// static_assert(S >= nt * vt,
// "shared_gather must have at least nt * vt storage");
//
// syclArray<T, vt> x;
// sycl_iterate<vt>([&](auto i) { x[i] = data[indices[i]]; });
//
// if(sync) __syncthreads();
//
// return x;
//}
//
//
//
//// ----------------------------------------------------------------------------
//// reg<->reg
//// ----------------------------------------------------------------------------
//
//template<unsigned nt, unsigned vt, typename T, unsigned S>
//auto sycl_reg_thread_to_strided(
// syclArray<T, vt> x, unsigned tid, T (&shared)[S]
//) {
// sycl_reg_to_shared_thread<nt>(x, tid, shared);
// return sycl_shared_to_reg_strided<nt, vt>(shared, tid);
//}
//
//template<unsigned nt, unsigned vt, typename T, unsigned S>
//auto sycl_reg_strided_to_thread(
// syclArray<T, vt> x, unsigned tid, T (&shared)[S]
//) {
// sycl_reg_to_shared_strided<nt>(x, tid, shared);
// return sycl_shared_to_reg_thread<nt, vt>(shared, tid);
//}
// ----------------------------------------------------------------------------
// syclLoadStoreIterator
// ----------------------------------------------------------------------------
template<typename L, typename S, typename T, typename I>
struct syclLoadStoreIterator : std::iterator_traits<const T*> {
L load;
S store;
I base;
syclLoadStoreIterator(L load_, S store_, I base_) :
load(load_), store(store_), base(base_) { }
struct assign_t {
L load;
S store;
I index;
assign_t& operator=(T rhs) {
static_assert(!std::is_same<S, syclEmpty>::value,
"load_iterator is being stored to.");
store(rhs, index);
return *this;
}
operator T() const {
static_assert(!std::is_same<L, syclEmpty>::value,
"store_iterator is being loaded from.");
return load(index);
}
};
assign_t operator[](I index) const {
return assign_t { load, store, base + index };
}
assign_t operator*() const {
return assign_t { load, store, base };
}
syclLoadStoreIterator operator+(I offset) const {
syclLoadStoreIterator cp = *this;
cp += offset;
return cp;
}
syclLoadStoreIterator& operator+=(I offset) {
base += offset;
return *this;
}
syclLoadStoreIterator operator-(I offset) const {
syclLoadStoreIterator cp = *this;
cp -= offset;
return cp;
}
syclLoadStoreIterator& operator-=(I offset) {
base -= offset;
return *this;
}
};
//template<typename T>
//struct trivial_load_functor {
// template<typename I>
// T operator()(I index) const {
// return T();
// }
//};
//template<typename T>
//struct trivial_store_functor {
// template<typename I>
// void operator()(T v, I index) const { }
//};
template <typename T, typename I = int, typename L, typename S>
auto sycl_make_load_store_iterator(L load, S store, I base = 0) {
return syclLoadStoreIterator<L, S, T, I>(load, store, base);
}
template <typename T, typename I = int, typename L>
auto sycl_make_load_iterator(L load, I base = 0) {
return sycl_make_load_store_iterator<T>(load, syclEmpty(), base);
}
template <typename T, typename I = int, typename S>
auto sycl_make_store_iterator(S store, I base = 0) {
return sycl_make_load_store_iterator<T>(syclEmpty(), store, base);
}
// ----------------------------------------------------------------------------
// swap
// ----------------------------------------------------------------------------
template<typename T>
void sycl_swap(T& a, T& b) {
auto c = a;
a = b;
b = c;
}
// ----------------------------------------------------------------------------
// launch kernel
// ----------------------------------------------------------------------------
//template<typename F, typename... args_t>
//__global__ void sycl_kernel(F f, args_t... args) {
// f(threadIdx.x, blockIdx.x, args...);
//}
// ----------------------------------------------------------------------------
// operators
// ----------------------------------------------------------------------------
template <typename T>
struct sycl_plus : public std::binary_function<T, T, T> {
T operator()(T a, T b) const { return a + b; }
};
template <typename T>
struct sycl_minus : public std::binary_function<T, T, T> {
T operator()(T a, T b) const { return a - b; }
};
template <typename T>
struct sycl_multiplies : public std::binary_function<T, T, T> {
T operator()(T a, T b) const { return a * b; }
};
template <typename T>
struct sycl_maximum : public std::binary_function<T, T, T> {
T operator()(T a, T b) const { return a > b ? a : b; }
};
template <typename T>
struct sycl_minimum : public std::binary_function<T, T, T> {
T operator()(T a, T b) const { return a < b ? a : b; }
};
template <typename T>
struct sycl_less : public std::binary_function<T, T, T> {
T operator()(T a, T b) const { return a < b; }
};
template <typename T>
struct sycl_greater : public std::binary_function<T, T, T> {
T operator()(T a, T b) const { return a > b; }
};
// ----------------------------------------------------------------------------
// Memory Object
// ----------------------------------------------------------------------------
/**
@private
*/
template <typename T>
class syclScopedDeviceMemory {
public:
syclScopedDeviceMemory() = delete;
syclScopedDeviceMemory(size_t N, sycl::queue& queue) :
_queue {queue},
_N {N} {
if(N) {
_data = sycl::malloc_device<T>(N, _queue);
}
}
syclScopedDeviceMemory(syclScopedDeviceMemory&& rhs) :
_queue{std::move(rhs._queue)}, _data{rhs._data}, _N {rhs._N} {
rhs._data = nullptr;
rhs._N = 0;
}
~syclScopedDeviceMemory() {
if(_data) {
sycl::free(_data, _queue);
}
}
syclScopedDeviceMemory& operator = (syclScopedDeviceMemory&& rhs) {
if(_data) {
sycl::free(_data, _queue);
}
_queue = std::move(rhs._queue);
_data = rhs._data;
_N = rhs._N;
rhs._data = nullptr;
rhs._N = 0;
return *this;
}
size_t size() const { return _N; }
T* data() { return _data; }
const T* data() const { return _data; }
syclScopedDeviceMemory(const syclScopedDeviceMemory&) = delete;
syclScopedDeviceMemory& operator = (const syclScopedDeviceMemory&) = delete;
private:
sycl::queue& _queue;
T* _data {nullptr};
size_t _N {0};
};
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/taskflow/sycl/syclflow.hpp | #pragma once
#include "../taskflow.hpp"
#include "sycl_task.hpp"
/**
@file syclflow.hpp
@brief main syclFlow include file
*/
namespace tf {
// ----------------------------------------------------------------------------
// class definition: syclFlow
// ----------------------------------------------------------------------------
/**
@class syclFlow
@brief class for building a SYCL task dependency graph
*/
class syclFlow {
friend class Executor;
struct External {
syclGraph graph;
};
struct Internal {
Executor& executor;
Internal(Executor& e) : executor {e} {}
};
using handle_t = std::variant<External, Internal>;
public:
/**
@brief constructs a standalone %syclFlow from the given queue
A standalone %syclFlow does not go through any taskflow and
can be run by the caller thread using explicit offload methods
(e.g., tf::syclFlow::offload).
*/
syclFlow(sycl::queue& queue);
/**
@brief destroys the %syclFlow
*/
~syclFlow() = default;
/**
@brief queries the emptiness of the graph
*/
bool empty() const;
/**
@brief queries the number of tasks
*/
size_t num_tasks() const;
/**
@brief dumps the %syclFlow graph into a DOT format through an
output stream
*/
void dump(std::ostream& os) const;
/**
@brief clear the associated graph
*/
void clear();
// ------------------------------------------------------------------------
// Generic device operations
// ------------------------------------------------------------------------
/**
@brief creates a task that launches the given command group function object
@tparam F type of command group function object
@param func function object that is constructible from
std::function<void(sycl::handler&)>
Creates a task that is associated from the given command group.
In SYCL, each command group function object is given a unique
command group handler object to perform all the necessary work
required to correctly process data on a device using a kernel.
*/
template <typename F, std::enable_if_t<
std::is_invocable_r_v<void, F, sycl::handler&>, void>* = nullptr
>
syclTask on(F&& func);
/**
@brief updates the task to the given command group function object
Similar to tf::syclFlow::on but operates on an existing task.
*/
template <typename F, std::enable_if_t<
std::is_invocable_r_v<void, F, sycl::handler&>, void>* = nullptr
>
void on(syclTask task, F&& func);
/**
@brief creates a memcpy task that copies untyped data in bytes
@param tgt pointer to the target memory block
@param src pointer to the source memory block
@param bytes bytes to copy
@return a tf::syclTask handle
A memcpy task transfers @c bytes of data from a source locationA @c src
to a target location @c tgt. Both @c src and @c tgt may be either host
or USM pointers.
*/
syclTask memcpy(void* tgt, const void* src, size_t bytes);
/**
@brief creates a memset task that fills untyped data with a byte value
@param ptr pointer to the destination device memory area
@param value value to set for each byte of specified memory
@param bytes number of bytes to set
@return a tf::syclTask handle
Fills @c bytes of memory beginning at address @c ptr with @c value.
@c ptr must be a USM allocation.
@c value is interpreted as an unsigned char.
*/
syclTask memset(void* ptr, int value, size_t bytes);
/**
@brief creates a fill task that fills typed data with the given value
@tparam T trivially copyable value type
@param ptr pointer to the memory to fill
@param pattern pattern value to fill into the memory
@param count number of items to fill the value
Creates a task that fills the specified memory with the
specified value.
*/
template <typename T>
syclTask fill(void* ptr, const T& pattern, size_t count);
/**
@brief creates a copy task that copies typed data from a source to a target
memory block
@tparam T trivially copyable value type
@param target pointer to the memory to fill
@param source pointer to the pattern value to fill into the memory
@param count number of items to fill the value
Creates a task that copies @c count items of type @c T from a source memory
location to a target memory location.
*/
template <typename T,
std::enable_if_t<!std::is_same_v<T, void>, void>* = nullptr
>
syclTask copy(T* target, const T* source, size_t count);
/**
@brief creates a kernel task
@tparam ArgsT arguments types
@param args arguments to forward to the parallel_for methods defined
in the handler object
Creates a kernel task from a parallel_for method through the handler
object associated with a command group.
*/
template <typename...ArgsT>
syclTask parallel_for(ArgsT&&... args);
// ------------------------------------------------------------------------
// algorithms
// ------------------------------------------------------------------------
/**
@brief invokes a SYCL kernel function using only one thread
@tparam F kernel function type
@param func kernel function
Creates a task that launches the given function object using only one
kernel thread.
*/
template <typename F>
syclTask single_task(F&& func);
/**
@brief applies a callable to each dereferenced element of the data array
@tparam I iterator type
@tparam C callable type
@param first iterator to the beginning (inclusive)
@param last iterator to the end (exclusive)
@param callable a callable object to apply to the dereferenced iterator
@return a tf::syclTask handle
This method is equivalent to the parallel execution of the following loop on a GPU:
@code{.cpp}
for(auto itr = first; itr != last; itr++) {
callable(*itr);
}
@endcode
*/
template <typename I, typename C>
syclTask for_each(I first, I last, C&& callable);
/**
@brief applies a callable to each index in the range with the step size
@tparam I index type
@tparam C callable type
@param first beginning index
@param last last index
@param step step size
@param callable the callable to apply to each element in the data array
@return a tf::syclTask handle
This method is equivalent to the parallel execution of the following loop on a GPU:
@code{.cpp}
// step is positive [first, last)
for(auto i=first; i<last; i+=step) {
callable(i);
}
// step is negative [first, last)
for(auto i=first; i>last; i+=step) {
callable(i);
}
@endcode
*/
template <typename I, typename C>
syclTask for_each_index(I first, I last, I step, C&& callable);
/**
@brief applies a callable to a source range and stores the result in a target range
@tparam I iterator type
@tparam C callable type
@tparam S source types
@param first iterator to the beginning (inclusive)
@param last iterator to the end (exclusive)
@param callable the callable to apply to each element in the range
@param srcs iterators to the source ranges
@return a tf::syclTask handle
This method is equivalent to the parallel execution of the following
loop on a SYCL device:
@code{.cpp}
while (first != last) {
*first++ = callable(*src1++, *src2++, *src3++, ...);
}
@endcode
*/
template <typename I, typename C, typename... S>
syclTask transform(I first, I last, C&& callable, S... srcs);
/**
@brief performs parallel reduction over a range of items
@tparam I input iterator type
@tparam T value type
@tparam C callable type
@param first iterator to the beginning (inclusive)
@param last iterator to the end (exclusive)
@param result pointer to the result with an initialized value
@param op binary reduction operator
@return a tf::syclTask handle
This method is equivalent to the parallel execution of the following loop
on a SYCL device:
@code{.cpp}
while (first != last) {
*result = op(*result, *first++);
}
@endcode
*/
template <typename I, typename T, typename C>
syclTask reduce(I first, I last, T* result, C&& op);
/**
@brief similar to tf::syclFlow::reduce but does not assume any initial
value to reduce
This method is equivalent to the parallel execution of the following loop
on a SYCL device:
@code{.cpp}
*result = *first++; // no initial values partitipcate in the loop
while (first != last) {
*result = op(*result, *first++);
}
@endcode
*/
template <typename I, typename T, typename C>
syclTask uninitialized_reduce(I first, I last, T* result, C&& op);
// ------------------------------------------------------------------------
// offload methods
// ------------------------------------------------------------------------
/**
@brief offloads the %syclFlow onto a GPU and repeatedly runs it until
the predicate becomes true
@tparam P predicate type (a binary callable)
@param predicate a binary predicate (returns @c true for stop)
Repetitively executes the present %syclFlow through the given queue object
until the predicate returns @c true.
By default, if users do not offload the %syclFlow,
the executor will offload it once.
*/
template <typename P>
void offload_until(P&& predicate);
/**
@brief offloads the %syclFlow and executes it by the given times
@param N number of executions
*/
void offload_n(size_t N);
/**
@brief offloads the %syclFlow and executes it once
*/
void offload();
// ------------------------------------------------------------------------
// update methods
// ------------------------------------------------------------------------
/**
@brief rebinds the task to a memcpy task
Similar to tf::syclFlow::memcpy but operates on an existing task.
*/
void memcpy(syclTask task, void* tgt, const void* src, size_t bytes);
/**
@brief rebinds the task to a memset task
Similar to tf::syclFlow::memset but operates on an existing task.
*/
void memset(syclTask task, void* ptr, int value, size_t bytes);
/**
@brief rebinds the task to a fill task
Similar to tf::syclFlow::fill but operates on an existing task.
*/
template <typename T>
void fill(syclTask task, void* ptr, const T& pattern, size_t count);
/**
@brief rebinds the task to a copy task
Similar to tf::syclFlow::copy but operates on an existing task.
*/
template <typename T,
std::enable_if_t<!std::is_same_v<T, void>, void>* = nullptr
>
void copy(syclTask task, T* target, const T* source, size_t count);
/**
@brief rebinds the task to a parallel-for kernel task
Similar to tf::syclFlow::parallel_for but operates on an existing task.
*/
template <typename...ArgsT>
void parallel_for(syclTask task, ArgsT&&... args);
/**
@brief rebinds the task to a single-threaded kernel task
Similar to tf::syclFlow::single_task but operates on an existing task.
*/
template <typename F>
void single_task(syclTask task, F&& func);
private:
syclFlow(Executor&, syclGraph&, sycl::queue&);
sycl::queue& _queue;
handle_t _handle;
syclGraph& _graph;
std::vector<syclNode*> _tpg;
std::queue<syclNode*> _bfs;
};
// constructor
inline syclFlow::syclFlow(sycl::queue& queue) :
_queue {queue},
_handle {std::in_place_type_t<External>{}},
_graph {std::get_if<External>(&_handle)->graph} {
}
// Construct the syclFlow from executor (internal graph)
inline syclFlow::syclFlow(Executor& e, syclGraph& g, sycl::queue& queue) :
_queue {queue},
_handle {std::in_place_type_t<Internal>{}, e},
_graph {g} {
}
// Function: empty
inline bool syclFlow::empty() const {
return _graph._nodes.empty();
}
// Function: num_tasks
inline size_t syclFlow::num_tasks() const {
return _graph._nodes.size();
}
// Procedure: dump
inline void syclFlow::dump(std::ostream& os) const {
_graph.dump(os, nullptr, "");
}
// Procedure: clear
inline void syclFlow::clear() {
_graph.clear();
}
// Function: memcpy
inline syclTask syclFlow::memcpy(void* tgt, const void* src, size_t bytes) {
return on([=](sycl::handler& h){ h.memcpy(tgt, src, bytes); });
}
// Function: memset
inline syclTask syclFlow::memset(void* ptr, int value, size_t bytes) {
return on([=](sycl::handler& h){ h.memset(ptr, value, bytes); });
}
// Function: fill
template <typename T>
syclTask syclFlow::fill(void* ptr, const T& pattern, size_t count) {
return on([=](sycl::handler& h){ h.fill(ptr, pattern, count); });
}
// Function: copy
template <typename T,
std::enable_if_t<!std::is_same_v<T, void>, void>*
>
syclTask syclFlow::copy(T* target, const T* source, size_t count) {
return on([=](sycl::handler& h){ h.memcpy(target, source, count*sizeof(T)); });
}
// Function: on
template <typename F, std::enable_if_t<
std::is_invocable_r_v<void, F, sycl::handler&>, void>*
>
syclTask syclFlow::on(F&& f) {
auto node = _graph.emplace_back(_graph,
std::in_place_type_t<syclNode::CGH>{}, std::forward<F>(f)
);
return syclTask(node);
}
// Function: single_task
template <typename F>
syclTask syclFlow::single_task(F&& func) {
return on([f=std::forward<F>(func)] (sycl::handler& h) {
h.single_task(f);
});
}
// Function: parallel_for
template <typename...ArgsT>
syclTask syclFlow::parallel_for(ArgsT&&... args) {
return on([args...] (sycl::handler& h) { h.parallel_for(args...); });
}
// Procedure: offload_until
template <typename P>
void syclFlow::offload_until(P&& predicate) {
if(!(_graph._state & syclGraph::TOPOLOGY_CHANGED)) {
goto offload;
}
// levelize the graph
_tpg.clear();
// insert the first level of nodes into the queue
for(auto& u : _graph._nodes) {
u->_level = u->_dependents.size();
if(u->_level == 0) {
_bfs.push(u.get());
}
}
while(!_bfs.empty()) {
auto u = _bfs.front();
_bfs.pop();
_tpg.push_back(u);
for(auto v : u->_successors) {
if(--(v->_level) == 0) {
v->_level = u->_level + 1;
_bfs.push(v);
}
}
}
offload:
// offload the syclFlow graph
bool in_order = _queue.is_in_order();
while(!predicate()) {
// traverse node in a topological order
for(auto u : _tpg) {
switch(u->_handle.index()) {
// task type 1: command group handler
case syclNode::COMMAND_GROUP_HANDLER:
u->_event = _queue.submit([u, in_order](sycl::handler& h){
// wait on all predecessors
if(!in_order) {
for(auto p : u->_dependents) {
h.depends_on(p->_event);
}
}
std::get_if<syclNode::CGH>(&u->_handle)->work(h);
});
break;
}
}
// synchronize the execution
_queue.wait();
}
_graph._state = syclGraph::OFFLOADED;
}
// Procedure: offload_n
inline void syclFlow::offload_n(size_t n) {
offload_until([repeat=n] () mutable { return repeat-- == 0; });
}
// Procedure: offload
inline void syclFlow::offload() {
offload_until([repeat=1] () mutable { return repeat-- == 0; });
}
// Function: on
template <typename F, std::enable_if_t<
std::is_invocable_r_v<void, F, sycl::handler&>, void>*
>
void syclFlow::on(syclTask task, F&& f) {
std::get_if<syclNode::CGH>(&task._node->_handle)->work =
std::forward<F>(f);
}
// Function: memcpy
inline void syclFlow::memcpy(
syclTask task, void* tgt, const void* src, size_t bytes
) {
on(task, [=](sycl::handler& h){ h.memcpy(tgt, src, bytes); });
}
// Function: memset
inline void syclFlow::memset(
syclTask task, void* ptr, int value, size_t bytes
) {
on(task, [=](sycl::handler& h){ h.memset(ptr, value, bytes); });
}
// Function: fill
template <typename T>
void syclFlow::fill(
syclTask task, void* ptr, const T& pattern, size_t count
) {
on(task, [=](sycl::handler& h){ h.fill(ptr, pattern, count); });
}
// Function: copy
template <typename T,
std::enable_if_t<!std::is_same_v<T, void>, void>*
>
void syclFlow::copy(
syclTask task, T* target, const T* source, size_t count
) {
on(task, [=](sycl::handler& h){
h.memcpy(target, source, count*sizeof(T));}
);
}
// Function: parallel_for
template <typename...ArgsT>
void syclFlow::parallel_for(syclTask task, ArgsT&&... args) {
on(task, [args...] (sycl::handler& h) { h.parallel_for(args...); });
}
// Function: single_task
template <typename F>
void syclFlow::single_task(syclTask task, F&& func) {
on(task, [f=std::forward<F>(func)] (sycl::handler& h) { h.single_task(f); });
}
// ############################################################################
// Forward declaration: FlowBuilder
// ############################################################################
// FlowBuilder::emplace_on
template <typename C, typename Q, std::enable_if_t<is_syclflow_task_v<C>, void>*>
Task FlowBuilder::emplace_on(C&& callable, Q&& q) {
auto n = _graph._emplace_back(
std::in_place_type_t<Node::syclFlow>{},
[c=std::forward<C>(callable), queue=std::forward<Q>(q)]
(Executor& e, Node* p) mutable {
e._invoke_syclflow_task_entry(p, c, queue);
},
std::make_unique<syclGraph>()
);
return Task(n);
}
// FlowBuilder::emplace
template <typename C, std::enable_if_t<is_syclflow_task_v<C>, void>*>
Task FlowBuilder::emplace(C&& callable) {
return emplace_on(std::forward<C>(callable), sycl::queue{});
}
// ############################################################################
// Forward declaration: Executor
// ############################################################################
// Procedure: _invoke_syclflow_task_entry (syclFlow)
template <typename C, typename Q,
std::enable_if_t<is_syclflow_task_v<C>, void>*
>
void Executor::_invoke_syclflow_task_entry(Node* node, C&& c, Q& queue) {
auto h = std::get_if<Node::syclFlow>(&node->_handle);
syclGraph* g = dynamic_cast<syclGraph*>(h->graph.get());
g->clear();
syclFlow sf(*this, *g, queue);
c(sf);
if(!(g->_state & syclGraph::OFFLOADED)) {
sf.offload();
}
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/taskflow/sycl/algorithm/sycl_transform.hpp | #pragma once
#include "../sycl_flow.hpp"
namespace tf {
// Function: _transform_cgh
template <typename I, typename C, typename... S>
auto syclFlow::_transform_cgh(I first, I last, C&& op, S... srcs) {
// TODO: special case N == 0?
size_t N = std::distance(first, last);
size_t B = _default_group_size(N);
return [=, op=std::forward<C>(op)] (sycl::handler& handler) mutable {
size_t _N = (N % B == 0) ? N : (N + B - N % B);
handler.parallel_for(
sycl::nd_range<1>{sycl::range<1>(_N), sycl::range<1>(B)},
[=] (sycl::nd_item<1> item) {
size_t i = item.get_global_id(0);
if(i < N) {
*(first + i) = op(*(srcs + i)...);
}
}
);
};
}
// Function: transform
template <typename I, typename C, typename... S>
syclTask syclFlow::transform(I first, I last, C&& op, S... srcs) {
return on(_transform_cgh(first, last, std::forward<C>(op), srcs...));
}
// Procedure: transform
template <typename I, typename C, typename... S>
void syclFlow::transform(
syclTask task, I first, I last, C&& op, S... srcs
) {
on(task, _transform_cgh(first, last, std::forward<C>(op), srcs...));
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/taskflow/sycl/algorithm/reduce.hpp | #pragma once
#include "../syclflow.hpp"
namespace tf::detail {
// ----------------------------------------------------------------------------
// reduction helper functions
// ----------------------------------------------------------------------------
/** @private */
template<unsigned nt, typename T>
struct syclBlockReduce {
static const unsigned group_size = std::min(nt, SYCL_WARP_SIZE);
static const unsigned shm_size = std::max(nt, 2* group_size);
static const unsigned num_passes = log2(group_size);
static const unsigned num_items = nt / group_size;
static_assert(
nt && (0 == nt % SYCL_WARP_SIZE),
"syclBlockReduce requires num threads to be a multiple of warp_size (32)"
);
using shm_t = sycl::accessor<
T, 1, sycl::access::mode::read_write, sycl::access::target::local
>;
template<typename op_t>
T operator()(
sycl::nd_item<1>&, T, const shm_t&, unsigned, op_t, bool = true
) const;
};
// function: reduce to be called from a block
template<unsigned nt, typename T>
template<typename op_t>
T syclBlockReduce<nt, T>::operator ()(
sycl::nd_item<1>& item,
T x,
const shm_t& shm,
unsigned count,
op_t op,
bool ret
) const {
auto tid = item.get_local_id(0);
// Store your data into shared memory.
shm[tid] = x;
item.barrier(sycl::access::fence_space::local_space);
if(tid < group_size) {
// Each thread scans within its lane.
sycl_strided_iterate<group_size, num_items>([&](auto i, auto j) {
if(i > 0) {
x = op(x, shm[j]);
}
}, tid, count);
shm[tid] = x;
}
item.barrier(sycl::access::fence_space::local_space);
auto count2 = count < group_size ? count : group_size;
auto first = (1 & num_passes) ? group_size : 0;
if(tid < group_size) {
shm[first + tid] = x;
}
item.barrier(sycl::access::fence_space::local_space);
sycl_iterate<num_passes>([&](auto pass) {
if(tid < group_size) {
if(auto offset = 1 << pass; tid + offset < count2) {
x = op(x, shm[first + offset + tid]);
}
first = group_size - first;
shm[first + tid] = x;
}
item.barrier(sycl::access::fence_space::local_space);
});
if(ret) {
x = shm[0];
item.barrier(sycl::access::fence_space::local_space);
}
return x;
}
/** @private */
template <typename P, typename I, typename T, typename O>
sycl::event sycl_reduce_loop(
P&& p,
I input,
unsigned count,
T* res,
O op,
bool incl,
void* ptr,
std::vector<sycl::event> evs
) {
using E = std::decay_t<P>;
using R = syclBlockReduce<E::nt, T>;
auto buf = static_cast<T*>(ptr);
auto B = (count + E::nv - 1) / E::nv;
auto e = p.queue().submit([=, evs=std::move(evs)](sycl::handler& h) {
h.depends_on(evs);
// create a shared memory
typename R::shm_t shm(sycl::range<1>(R::shm_size), h);
h.parallel_for(
sycl::nd_range<1>{sycl::range<1>(B*E::nt), sycl::range<1>(E::nt)},
[=](sycl::nd_item<1> item) {
auto tid = item.get_local_id(0);
auto bid = item.get_group(0);
// get the tile of this group
auto tile = sycl_get_tile(bid, E::nv, count);
// load data from input to register
auto x = sycl_mem_to_reg_strided<E::nt, E::vt>(
input + tile.begin, tid, tile.count()
);
// reduce multiple values per thread into a scalar.
T s;
sycl_strided_iterate<E::nt, E::vt>(
[&] (auto i, auto) { s = i ? op(s, x[i]) : x[0]; }, tid, tile.count()
);
// reduce to a scalar per block.
s = R()(
item, s, shm, (tile.count()<E::nt ? tile.count() : E::nt), op, false
);
if(!tid) {
(1 == B) ? *res = (incl ? op(*res, s) : s) : buf[bid] = s;
}
}
);
});
if(B > 1) {
return sycl_reduce_loop(p, buf, B, res, op, incl, buf+B, {e});
}
else {
return e;
}
}
} // end of namespace detail -------------------------------------------------
namespace tf {
/**
@brief queries the buffer size in bytes needed to call reduce kernels
@tparam P execution policy type
@tparam T value type
@param count number of elements to reduce
The function is used to allocate a buffer for calling asynchronous reduce.
Please refer to @ref SYCLSTDReduce for details.
*/
template <typename P, typename T>
unsigned sycl_reduce_buffer_size(unsigned count) {
using E = std::decay_t<P>;
unsigned B = (count + E::nv - 1) / E::nv;
unsigned n = 0;
for(auto b=B; b>1; n += (b=(b+E::nv-1)/E::nv));
return n*sizeof(T);
}
//// sycl reduction
//template <typename I, typename T, typename C, bool uninitialized>
//auto syclFlow::_reduce_cgh(I first, I last, T* res, C&& op) {
//
// // TODO: special case N == 0?
// size_t N = std::distance(first, last);
// size_t B = _default_group_size(N);
//
// return [=, op=std::forward<C>(op)](sycl::handler& handler) mutable {
//
// // create a shared memory
// sycl::accessor<
// T, 1, sycl::access::mode::read_write, sycl::access::target::local
// > shm(sycl::range<1>(B), handler);
//
// // perform parallel reduction
// handler.parallel_for(
// sycl::nd_range<1>{sycl::range<1>(B), sycl::range<1>(B)},
// [=] (sycl::nd_item<1> item) {
//
// size_t tid = item.get_global_id(0);
//
// if(tid >= N) {
// return;
// }
//
// shm[tid] = *(first+tid);
//
// for(size_t i=tid+B; i<N; i+=B) {
// shm[tid] = op(shm[tid], *(first+i));
// }
//
// item.barrier(sycl::access::fence_space::local_space);
//
// for(size_t s = B / 2; s > 0; s >>= 1) {
// if(tid < s && tid + s < N) {
// shm[tid] = op(shm[tid], shm[tid+s]);
// }
// item.barrier(sycl::access::fence_space::local_space);
// }
//
// if(tid == 0) {
// if constexpr (uninitialized) {
// *res = shm[0];
// }
// else {
// *res = op(*res, shm[0]);
// }
// }
// });
// };
//}
// ----------------------------------------------------------------------------
// SYCL standard reduce algorithms
// ----------------------------------------------------------------------------
/**
@brief performs parallel reduction over a range of items
@tparam P execution policy type
@tparam I input iterator type
@tparam T value type
@tparam O binary operator type
@param p execution policy
@param first iterator to the beginning of the range
@param last iterator to the end of the range
@param res pointer to the result
@param op binary operator to apply to reduce elements
This method is equivalent to the parallel execution of the following loop
on a SYCL device:
@code{.cpp}
while (first != last) {
*result = op(*result, *first++);
}
@endcode
*/
template<typename P, typename I, typename T, typename O>
void sycl_reduce(P&& p, I first, I last, T* res, O op) {
unsigned count = std::distance(first, last);
if(count == 0) {
return;
}
// allocate temporary buffer
auto tmp = sycl::malloc_device(
sycl_reduce_buffer_size<P, T>(count), p.queue()
);
// reduction loop
detail::sycl_reduce_loop(p, first, count, res, op, true, tmp, {}).wait();
// deallocate the temporary buffer
sycl::free(tmp, p.queue());
}
/**
@brief performs asynchronous parallel reduction over a range of items
@tparam P execution policy type
@tparam I input iterator type
@tparam T value type
@tparam O binary operator type
@param p execution policy
@param first iterator to the beginning of the range
@param last iterator to the end of the range
@param res pointer to the result
@param op binary operator to apply to reduce elements
@param buf pointer to the temporary buffer
@return an SYCL event
Please refer to @ref SYCLSTDReduce for details.
*/
template<typename P, typename I, typename T, typename O>
sycl::event sycl_reduce_async(
P&& p, I first, I last, T* res, O op, void* buf, std::vector<sycl::event> dep
) {
unsigned count = std::distance(first, last);
if(count == 0) {
return {};
}
// reduction loop
return detail::sycl_reduce_loop(
p, first, count, res, op, true, buf, std::move(dep)
);
}
/**
@brief performs parallel reduction over a range of items
without an initial value
@tparam P execution policy type
@tparam I input iterator type
@tparam T value type
@tparam O binary operator type
@param p execution policy
@param first iterator to the beginning of the range
@param last iterator to the end of the range
@param res pointer to the result
@param op binary operator to apply to reduce elements
This method is equivalent to the parallel execution of the following loop
on a SYCL device:
@code{.cpp}
*result = *first++; // no initial values partitipcate in the loop
while (first != last) {
*result = op(*result, *first++);
}
@endcode
*/
template<typename P, typename I, typename T, typename O>
void sycl_uninitialized_reduce(P&& p, I first, I last, T* res, O op) {
unsigned count = std::distance(first, last);
if(count == 0) {
return;
}
// allocate temporary buffer
auto tmp = sycl::malloc_device(
sycl_reduce_buffer_size<P, T>(count), p.queue()
);
// reduction loop
detail::sycl_reduce_loop(p, first, count, res, op, false, tmp, {}).wait();
// deallocate the temporary buffer
sycl::free(tmp, p.queue());
}
/**
@brief performs asynchronous parallel reduction over a range of items
without an initial value
@tparam P execution policy type
@tparam I input iterator type
@tparam T value type
@tparam O binary operator type
@param p execution policy
@param first iterator to the beginning of the range
@param last iterator to the end of the range
@param res pointer to the result
@param op binary operator to apply to reduce elements
@param buf pointer to the temporary buffer
@return an SYCL event
Please refer to @ref SYCLSTDReduce for details.
*/
template<typename P, typename I, typename T, typename O>
sycl::event sycl_uninitialized_reduce_async(
P&& p, I first, I last, T* res, O op, void* buf, std::vector<sycl::event> dep
) {
unsigned count = std::distance(first, last);
if(count == 0) {
return {};
}
// reduction loop
return detail::sycl_reduce_loop(
p, first, count, res, op, false, buf, std::move(dep)
);
}
// ----------------------------------------------------------------------------
// syclFlow reduce
// ----------------------------------------------------------------------------
// Function: reduce
template <typename I, typename T, typename C>
syclTask syclFlow::reduce(I first, I last, T* res, C&& op) {
//return on(_reduce_cgh<I, T, C, false>(first, last, res, std::forward<C>(op)));
auto bufsz = sycl_reduce_buffer_size<syclDefaultExecutionPolicy, T>(
std::distance(first, last)
);
return on([=, buf=MoC{syclScopedDeviceMemory<std::byte>(bufsz, _queue)}]
(sycl::queue& queue, std::vector<sycl::event> events) mutable {
syclDefaultExecutionPolicy p(queue);
return sycl_reduce_async(
p, first, last, res, op, buf.get().data(), std::move(events)
);
});
}
// Function: uninitialized_reduce
template <typename I, typename T, typename C>
syclTask syclFlow::uninitialized_reduce(I first, I last, T* res, C&& op) {
//return on(_reduce_cgh<I, T, C, true>(first, last, res, std::forward<C>(op)));
auto bufsz = sycl_reduce_buffer_size<syclDefaultExecutionPolicy, T>(
std::distance(first, last)
);
return on([=, buf=MoC{syclScopedDeviceMemory<std::byte>(bufsz, _queue)}]
(sycl::queue& queue, std::vector<sycl::event> events) mutable {
syclDefaultExecutionPolicy p(queue);
return sycl_uninitialized_reduce_async(
p, first, last, res, op, buf.get().data(), std::move(events)
);
});
}
// ----------------------------------------------------------------------------
// rebind methods
// ----------------------------------------------------------------------------
//// Function: reduce
//template <typename I, typename T, typename C>
//void syclFlow::reduce(syclTask task, I first, I last, T* res, C&& op) {
// //on(task, _reduce_cgh<I, T, C, false>(
// // first, last, res, std::forward<C>(op)
// //));
//
// auto bufsz = sycl_reduce_buffer_size<syclDefaultExecutionPolicy, T>(
// std::distance(first, last)
// );
//
// on(task, [=, buf=MoC{syclScopedDeviceMemory<std::byte>(bufsz, _queue)}]
// (sycl::queue& queue, std::vector<sycl::event> events) mutable {
// syclDefaultExecutionPolicy p(queue);
// return sycl_reduce_async(
// p, first, last, res, op, buf.get().data(), std::move(events)
// );
// });
//}
//
//// Function: uninitialized_reduce
//template <typename I, typename T, typename C>
//void syclFlow::uninitialized_reduce(
// syclTask task, I first, I last, T* res, C&& op
//) {
// //on(task, _reduce_cgh<I, T, C, true>(
// // first, last, res, std::forward<C>(op)
// //));
// auto bufsz = sycl_reduce_buffer_size<syclDefaultExecutionPolicy, T>(
// std::distance(first, last)
// );
//
// on(task, [=, buf=MoC{syclScopedDeviceMemory<std::byte>(bufsz, _queue)}]
// (sycl::queue& queue, std::vector<sycl::event> events) mutable {
// syclDefaultExecutionPolicy p(queue);
// return sycl_uninitialized_reduce_async(
// p, first, last, res, op, buf.get().data(), std::move(events)
// );
// });
//}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/taskflow/sycl/algorithm/sycl_for_each.hpp | #pragma once
#include "../sycl_flow.hpp"
namespace tf {
// command group function object of for_each
template <typename I, typename C>
auto syclFlow::_for_each_cgh(I first, I last, C&& op) {
// TODO: special case N == 0?
size_t N = std::distance(first, last);
size_t B = _default_group_size(N);
return [=, op=std::forward<C>(op)] (sycl::handler& handler) mutable {
size_t _N = (N % B == 0) ? N : (N + B - N % B);
handler.parallel_for(
sycl::nd_range<1>{sycl::range<1>(_N), sycl::range<1>(B)},
[=] (sycl::nd_item<1> item) {
size_t i = item.get_global_id(0);
if(i < N) {
op(*(first + i));
}
}
);
};
}
// command group function object of for_each_index
template <typename I, typename C>
auto syclFlow::_for_each_index_cgh(I first, I last, I step, C&& op) {
if(is_range_invalid(first, last, step)) {
TF_THROW("invalid range [", first, ", ", last, ") with step size ", step);
}
// TODO: special case when N is 0?
size_t N = distance(first, last, step);
size_t B = _default_group_size(N);
return [=, op=std::forward<C>(op)] (sycl::handler& handler) mutable {
size_t _N = (N % B == 0) ? N : (N + B - N % B);
handler.parallel_for(
sycl::nd_range<1>{sycl::range<1>(_N), sycl::range<1>(B)},
[=] (sycl::nd_item<1> item) {
size_t i = item.get_global_id(0);
if(i < N) {
op(static_cast<I>(i)*step + first);
}
}
);
};
}
// ----------------------------------------------------------------------------
// for_each and for_each_index algorithms
// ----------------------------------------------------------------------------
// Function: for_each
template <typename I, typename C>
syclTask syclFlow::for_each(I first, I last, C&& op) {
return on(_for_each_cgh(first, last, std::forward<C>(op)));
}
// Function: for_each_index
template <typename I, typename C>
syclTask syclFlow::for_each_index(I beg, I end, I inc, C&& op) {
return on(_for_each_index_cgh(beg, end, inc, std::forward<C>(op)));
}
// ----------------------------------------------------------------------------
// rebind
// ----------------------------------------------------------------------------
// Function: for_each
template <typename I, typename C>
void syclFlow::for_each(syclTask task, I first, I last, C&& op) {
on(task, _for_each_cgh(first, last, std::forward<C>(op)));
}
// Function: for_each_index
template <typename I, typename C>
void syclFlow::for_each_index(syclTask task, I beg, I end, I inc, C&& op) {
on(task, _for_each_index_cgh(beg, end, inc, std::forward<C>(op)));
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/taskflow/algorithm/sort.hpp | #pragma once
#include "../core/executor.hpp"
namespace tf {
// threshold whether or not to perform parallel sort
template <typename I>
constexpr size_t parallel_sort_cutoff() {
//using value_type = std::decay_t<decltype(*std::declval<I>())>;
using value_type = typename std::iterator_traits<I>::value_type;
constexpr size_t object_size = sizeof(value_type);
if constexpr(std::is_same_v<value_type, std::string>) {
return 65536 / sizeof(std::string);
}
else {
if constexpr(object_size < 16) return 4096;
else if constexpr(object_size < 32) return 2048;
else if constexpr(object_size < 64) return 1024;
else if constexpr(object_size < 128) return 768;
else if constexpr(object_size < 256) return 512;
else if constexpr(object_size < 512) return 256;
else return 128;
}
}
// ----------------------------------------------------------------------------
// pattern-defeating quick sort (pdqsort)
// ----------------------------------------------------------------------------
// Sorts [begin, end) using insertion sort with the given comparison function.
template<typename RandItr, typename Compare>
void insertion_sort(RandItr begin, RandItr end, Compare comp) {
using T = typename std::iterator_traits<RandItr>::value_type;
if (begin == end) {
return;
}
for (RandItr cur = begin + 1; cur != end; ++cur) {
RandItr shift = cur;
RandItr shift_1 = cur - 1;
// Compare first to avoid 2 moves for an element
// already positioned correctly.
if (comp(*shift, *shift_1)) {
T tmp = std::move(*shift);
do {
*shift-- = std::move(*shift_1);
}while (shift != begin && comp(tmp, *--shift_1));
*shift = std::move(tmp);
}
}
}
// Sorts [begin, end) using insertion sort with the given comparison function.
// Assumes *(begin - 1) is an element smaller than or equal to any element
// in [begin, end).
template<typename RandItr, typename Compare>
void unguarded_insertion_sort(RandItr begin, RandItr end, Compare comp) {
using T = typename std::iterator_traits<RandItr>::value_type;
if (begin == end) {
return;
}
for (RandItr cur = begin + 1; cur != end; ++cur) {
RandItr shift = cur;
RandItr shift_1 = cur - 1;
// Compare first so we can avoid 2 moves
// for an element already positioned correctly.
if (comp(*shift, *shift_1)) {
T tmp = std::move(*shift);
do {
*shift-- = std::move(*shift_1);
}while (comp(tmp, *--shift_1));
*shift = std::move(tmp);
}
}
}
// Attempts to use insertion sort on [begin, end).
// Will return false if more than
// partial_insertion_sort_limit elements were moved,
// and abort sorting. Otherwise it will successfully sort and return true.
template<typename RandItr, typename Compare>
bool partial_insertion_sort(RandItr begin, RandItr end, Compare comp) {
using T = typename std::iterator_traits<RandItr>::value_type;
using D = typename std::iterator_traits<RandItr>::difference_type;
// When we detect an already sorted partition, attempt an insertion sort
// that allows this amount of element moves before giving up.
constexpr auto partial_insertion_sort_limit = D{8};
if (begin == end) return true;
auto limit = D{0};
for (RandItr cur = begin + 1; cur != end; ++cur) {
if (limit > partial_insertion_sort_limit) {
return false;
}
RandItr shift = cur;
RandItr shift_1 = cur - 1;
// Compare first so we can avoid 2 moves
// for an element already positioned correctly.
if (comp(*shift, *shift_1)) {
T tmp = std::move(*shift);
do {
*shift-- = std::move(*shift_1);
}while (shift != begin && comp(tmp, *--shift_1));
*shift = std::move(tmp);
limit += cur - shift;
}
}
return true;
}
// Partitions [begin, end) around pivot *begin using comparison function comp.
// Elements equal to the pivot are put in the right-hand partition.
// Returns the position of the pivot after partitioning and whether the passed
// sequence already was correctly partitioned.
// Assumes the pivot is a median of at least 3 elements and that [begin, end)
// is at least insertion_sort_threshold long.
template<typename Iter, typename Compare>
std::pair<Iter, bool> partition_right(Iter begin, Iter end, Compare comp) {
using T = typename std::iterator_traits<Iter>::value_type;
// Move pivot into local for speed.
T pivot(std::move(*begin));
Iter first = begin;
Iter last = end;
// Find the first element greater than or equal than the pivot
// (the median of 3 guarantees/ this exists).
while (comp(*++first, pivot));
// Find the first element strictly smaller than the pivot.
// We have to guard this search if there was no element before *first.
if (first - 1 == begin) while (first < last && !comp(*--last, pivot));
else while (!comp(*--last, pivot));
// If the first pair of elements that should be swapped to partition
// are the same element, the passed in sequence already was correctly
// partitioned.
bool already_partitioned = first >= last;
// Keep swapping pairs of elements that are on the wrong side of the pivot.
// Previously swapped pairs guard the searches,
// which is why the first iteration is special-cased above.
while (first < last) {
std::iter_swap(first, last);
while (comp(*++first, pivot));
while (!comp(*--last, pivot));
}
// Put the pivot in the right place.
Iter pivot_pos = first - 1;
*begin = std::move(*pivot_pos);
*pivot_pos = std::move(pivot);
return std::make_pair(pivot_pos, already_partitioned);
}
// Similar function to the one above, except elements equal to the pivot
// are put to the left of the pivot and it doesn't check or return
// if the passed sequence already was partitioned.
// Since this is rarely used (the many equal case),
// and in that case pdqsort already has O(n) performance,
// no block quicksort is applied here for simplicity.
template<typename RandItr, typename Compare>
RandItr partition_left(RandItr begin, RandItr end, Compare comp) {
using T = typename std::iterator_traits<RandItr>::value_type;
T pivot(std::move(*begin));
RandItr first = begin;
RandItr last = end;
while (comp(pivot, *--last));
if (last + 1 == end) {
while (first < last && !comp(pivot, *++first));
}
else {
while (!comp(pivot, *++first));
}
while (first < last) {
std::iter_swap(first, last);
while (comp(pivot, *--last));
while (!comp(pivot, *++first));
}
RandItr pivot_pos = last;
*begin = std::move(*pivot_pos);
*pivot_pos = std::move(pivot);
return pivot_pos;
}
template<typename Iter, typename Compare>
void parallel_pdqsort(
tf::Subflow& sf,
Iter begin, Iter end, Compare comp,
int bad_allowed, bool leftmost = true
) {
// Partitions below this size are sorted sequentially
constexpr auto cutoff = parallel_sort_cutoff<Iter>();
// Partitions below this size are sorted using insertion sort
constexpr auto insertion_sort_threshold = 24;
// Partitions above this size use Tukey's ninther to select the pivot.
constexpr auto ninther_threshold = 128;
//using diff_t = typename std::iterator_traits<Iter>::difference_type;
// Use a while loop for tail recursion elimination.
while (true) {
//diff_t size = end - begin;
size_t size = end - begin;
// Insertion sort is faster for small arrays.
if (size < insertion_sort_threshold) {
if (leftmost) {
insertion_sort(begin, end, comp);
}
else {
unguarded_insertion_sort(begin, end, comp);
}
return;
}
if(size <= cutoff) {
std::sort(begin, end, comp);
return;
}
// Choose pivot as median of 3 or pseudomedian of 9.
//diff_t s2 = size / 2;
size_t s2 = size >> 1;
if (size > ninther_threshold) {
sort3(begin, begin + s2, end - 1, comp);
sort3(begin + 1, begin + (s2 - 1), end - 2, comp);
sort3(begin + 2, begin + (s2 + 1), end - 3, comp);
sort3(begin + (s2 - 1), begin + s2, begin + (s2 + 1), comp);
std::iter_swap(begin, begin + s2);
}
else {
sort3(begin + s2, begin, end - 1, comp);
}
// If *(begin - 1) is the end of the right partition
// of a previous partition operation, there is no element in [begin, end)
// that is smaller than *(begin - 1).
// Then if our pivot compares equal to *(begin - 1) we change strategy,
// putting equal elements in the left partition,
// greater elements in the right partition.
// We do not have to recurse on the left partition,
// since it's sorted (all equal).
if (!leftmost && !comp(*(begin - 1), *begin)) {
begin = partition_left(begin, end, comp) + 1;
continue;
}
// Partition and get results.
const auto pair = partition_right(begin, end, comp);
const auto pivot_pos = pair.first;
const auto already_partitioned = pair.second;
// Check for a highly unbalanced partition.
//diff_t l_size = pivot_pos - begin;
//diff_t r_size = end - (pivot_pos + 1);
const size_t l_size = pivot_pos - begin;
const size_t r_size = end - (pivot_pos + 1);
const bool highly_unbalanced = l_size < size / 8 || r_size < size / 8;
// If we got a highly unbalanced partition we shuffle elements
// to break many patterns.
if (highly_unbalanced) {
// If we had too many bad partitions, switch to heapsort
// to guarantee O(n log n).
if (--bad_allowed == 0) {
std::make_heap(begin, end, comp);
std::sort_heap(begin, end, comp);
return;
}
if (l_size >= insertion_sort_threshold) {
std::iter_swap(begin, begin + l_size / 4);
std::iter_swap(pivot_pos - 1, pivot_pos - l_size / 4);
if (l_size > ninther_threshold) {
std::iter_swap(begin + 1, begin + (l_size / 4 + 1));
std::iter_swap(begin + 2, begin + (l_size / 4 + 2));
std::iter_swap(pivot_pos - 2, pivot_pos - (l_size / 4 + 1));
std::iter_swap(pivot_pos - 3, pivot_pos - (l_size / 4 + 2));
}
}
if (r_size >= insertion_sort_threshold) {
std::iter_swap(pivot_pos + 1, pivot_pos + (1 + r_size / 4));
std::iter_swap(end - 1, end - r_size / 4);
if (r_size > ninther_threshold) {
std::iter_swap(pivot_pos + 2, pivot_pos + (2 + r_size / 4));
std::iter_swap(pivot_pos + 3, pivot_pos + (3 + r_size / 4));
std::iter_swap(end - 2, end - (1 + r_size / 4));
std::iter_swap(end - 3, end - (2 + r_size / 4));
}
}
}
// decently balanced
else {
// sequence try to use insertion sort.
if (already_partitioned &&
partial_insertion_sort(begin, pivot_pos, comp) &&
partial_insertion_sort(pivot_pos + 1, end, comp)
) {
return;
}
}
// Sort the left partition first using recursion and
// do tail recursion elimination for the right-hand partition.
sf.silent_async(
[&sf, begin, pivot_pos, comp, bad_allowed, leftmost] () mutable {
parallel_pdqsort(sf, begin, pivot_pos, comp, bad_allowed, leftmost);
}
);
begin = pivot_pos + 1;
leftmost = false;
}
}
// ----------------------------------------------------------------------------
// 3-way quick sort
// ----------------------------------------------------------------------------
// 3-way quick sort
template <typename RandItr, typename C>
void parallel_3wqsort(tf::Subflow& sf, RandItr first, RandItr last, C compare) {
using namespace std::string_literals;
constexpr auto cutoff = parallel_sort_cutoff<RandItr>();
sort_partition:
if(static_cast<size_t>(last - first) < cutoff) {
std::sort(first, last+1, compare);
return;
}
auto m = pseudo_median_of_nine(first, last, compare);
if(m != first) {
std::iter_swap(first, m);
}
auto l = first;
auto r = last;
auto f = std::next(first, 1);
bool is_swapped_l = false;
bool is_swapped_r = false;
while(f <= r) {
if(compare(*f, *l)) {
is_swapped_l = true;
std::iter_swap(l, f);
l++;
f++;
}
else if(compare(*l, *f)) {
is_swapped_r = true;
std::iter_swap(r, f);
r--;
}
else {
f++;
}
}
if(l - first > 1 && is_swapped_l) {
//sf.emplace([&](tf::Subflow& sfl) mutable {
// parallel_3wqsort(sfl, first, l-1, compare);
//});
sf.silent_async([&sf, first, l, &compare] () mutable {
parallel_3wqsort(sf, first, l-1, compare);
});
}
if(last - r > 1 && is_swapped_r) {
//sf.emplace([&](tf::Subflow& sfr) mutable {
// parallel_3wqsort(sfr, r+1, last, compare);
//});
//sf.silent_async([&sf, r, last, &compare] () mutable {
// parallel_3wqsort(sf, r+1, last, compare);
//});
first = r+1;
goto sort_partition;
}
//sf.join();
}
// ----------------------------------------------------------------------------
// tf::Taskflow::sort
// ----------------------------------------------------------------------------
// Function: sort
template <typename B, typename E, typename C>
Task FlowBuilder::sort(B beg, E end, C cmp) {
using B_t = std::decay_t<unwrap_ref_decay_t<B>>;
using E_t = std::decay_t<unwrap_ref_decay_t<E>>;
Task task = emplace([b=beg, e=end, cmp] (Subflow& sf) mutable {
// fetch the iterator values
B_t beg = b;
E_t end = e;
if(beg == end) {
return;
}
size_t W = sf._executor.num_workers();
size_t N = std::distance(beg, end);
// only myself - no need to spawn another graph
if(W <= 1 || N <= parallel_sort_cutoff<B_t>()) {
std::sort(beg, end, cmp);
return;
}
//parallel_3wqsort(sf, beg, end-1, cmp);
parallel_pdqsort(sf, beg, end, cmp, log2(end - beg));
sf.join();
});
return task;
}
// Function: sort
template <typename B, typename E>
Task FlowBuilder::sort(B beg, E end) {
using value_type = std::decay_t<decltype(*std::declval<B>())>;
return sort(beg, end, std::less<value_type>{});
}
} // namespace tf ------------------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/taskflow/algorithm/for_each.hpp | // reference:
// - gomp: https://github.com/gcc-mirror/gcc/blob/master/libgomp/iter.c
// - komp: https://github.com/llvm-mirror/openmp/blob/master/runtime/src/kmp_dispatch.cpp
#pragma once
#include "../core/executor.hpp"
namespace tf {
// ----------------------------------------------------------------------------
// default parallel for
// ----------------------------------------------------------------------------
// Function: for_each
template <typename B, typename E, typename C>
Task FlowBuilder::for_each(B beg, E end, C c) {
using B_t = std::decay_t<unwrap_ref_decay_t<B>>;
using E_t = std::decay_t<unwrap_ref_decay_t<E>>;
using namespace std::string_literals;
Task task = emplace([b=beg, e=end, c] (Subflow& sf) mutable {
// fetch the stateful values
B_t beg = b;
E_t end = e;
if(beg == end) {
return;
}
size_t chunk_size = 1;
size_t W = sf._executor.num_workers();
size_t N = std::distance(beg, end);
// only myself - no need to spawn another graph
if(W <= 1 || N <= chunk_size) {
std::for_each(beg, end, c);
return;
}
if(N < W) {
W = N;
}
std::atomic<size_t> next(0);
auto loop = [=, &next] () mutable {
size_t z = 0;
size_t p1 = 2 * W * (chunk_size + 1);
double p2 = 0.5 / static_cast<double>(W);
size_t s0 = next.load(std::memory_order_relaxed);
while(s0 < N) {
size_t r = N - s0;
// fine-grained
if(r < p1) {
while(1) {
s0 = next.fetch_add(chunk_size, std::memory_order_relaxed);
if(s0 >= N) {
return;
}
size_t e0 = (chunk_size <= (N - s0)) ? s0 + chunk_size : N;
std::advance(beg, s0-z);
for(size_t x=s0; x<e0; x++) {
c(*beg++);
}
z = e0;
}
break;
}
// coarse-grained
else {
size_t q = static_cast<size_t>(p2 * r);
if(q < chunk_size) {
q = chunk_size;
}
size_t e0 = (q <= r) ? s0 + q : N;
if(next.compare_exchange_strong(s0, e0, std::memory_order_relaxed,
std::memory_order_relaxed)) {
std::advance(beg, s0-z);
for(size_t x = s0; x< e0; x++) {
c(*beg++);
}
z = e0;
s0 = next.load(std::memory_order_relaxed);
}
}
}
};
for(size_t w=0; w<W; w++) {
auto r = N - next.load(std::memory_order_relaxed);
// no more loop work to do - finished by previous async tasks
if(!r) {
break;
}
// tail optimization
if(r <= chunk_size || w == W-1) {
loop();
break;
}
else {
sf._named_silent_async(sf._worker, "loop-"s + std::to_string(w), loop);
}
}
sf.join();
});
return task;
}
// Function: for_each_index
template <typename B, typename E, typename S, typename C>
Task FlowBuilder::for_each_index(B beg, E end, S inc, C c){
using namespace std::string_literals;
using B_t = std::decay_t<unwrap_ref_decay_t<B>>;
using E_t = std::decay_t<unwrap_ref_decay_t<E>>;
using S_t = std::decay_t<unwrap_ref_decay_t<S>>;
Task task = emplace([b=beg, e=end, a=inc, c] (Subflow& sf) mutable {
// fetch the iterator values
B_t beg = b;
E_t end = e;
S_t inc = a;
if(is_range_invalid(beg, end, inc)) {
TF_THROW("invalid range [", beg, ", ", end, ") with step size ", inc);
}
size_t chunk_size = 1;
size_t W = sf._executor.num_workers();
size_t N = distance(beg, end, inc);
// only myself - no need to spawn another graph
if(W <= 1 || N <= chunk_size) {
for(size_t x=0; x<N; x++, beg+=inc) {
c(beg);
}
return;
}
if(N < W) {
W = N;
}
std::atomic<size_t> next(0);
auto loop = [=, &next] () mutable {
size_t p1 = 2 * W * (chunk_size + 1);
double p2 = 0.5 / static_cast<double>(W);
size_t s0 = next.load(std::memory_order_relaxed);
while(s0 < N) {
size_t r = N - s0;
// fine-grained
if(r < p1) {
while(1) {
s0 = next.fetch_add(chunk_size, std::memory_order_relaxed);
if(s0 >= N) {
return;
}
size_t e0 = (chunk_size <= (N - s0)) ? s0 + chunk_size : N;
auto s = static_cast<B_t>(s0) * inc + beg;
for(size_t x=s0; x<e0; x++, s+=inc) {
c(s);
}
}
break;
}
// coarse-grained
else {
size_t q = static_cast<size_t>(p2 * r);
if(q < chunk_size) {
q = chunk_size;
}
size_t e0 = (q <= r) ? s0 + q : N;
if(next.compare_exchange_strong(s0, e0, std::memory_order_relaxed,
std::memory_order_relaxed)) {
auto s = static_cast<B_t>(s0) * inc + beg;
for(size_t x=s0; x<e0; x++, s+= inc) {
c(s);
}
s0 = next.load(std::memory_order_relaxed);
}
}
}
};
for(size_t w=0; w<W; w++) {
auto r = N - next.load(std::memory_order_relaxed);
// no more loop work to do - finished by previous async tasks
if(!r) {
break;
}
// tail optimization
if(r <= chunk_size || w == W-1) {
loop();
break;
}
else {
sf._named_silent_async(sf._worker, "loop-"s + std::to_string(w), loop);
}
}
sf.join();
});
return task;
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/taskflow/algorithm/reduce.hpp | #pragma once
#include "../core/executor.hpp"
namespace tf {
// ----------------------------------------------------------------------------
// default reduction
// ----------------------------------------------------------------------------
template <typename B, typename E, typename T, typename O>
Task FlowBuilder::reduce(B beg, E end, T& init, O bop) {
using B_t = std::decay_t<unwrap_ref_decay_t<B>>;
using E_t = std::decay_t<unwrap_ref_decay_t<E>>;
using namespace std::string_literals;
Task task = emplace([b=beg, e=end, &r=init, bop] (Subflow& sf) mutable {
// fetch the iterator values
B_t beg = b;
E_t end = e;
if(beg == end) {
return;
}
//size_t C = (c == 0) ? 1 : c;
size_t chunk_size = 1;
size_t W = sf._executor.num_workers();
size_t N = std::distance(beg, end);
// only myself - no need to spawn another graph
if(W <= 1 || N <= chunk_size) {
for(; beg!=end; r = bop(r, *beg++));
return;
}
if(N < W) {
W = N;
}
std::mutex mutex;
std::atomic<size_t> next(0);
auto loop = [=, &mutex, &next, &r] () mutable {
size_t s0 = next.fetch_add(2, std::memory_order_relaxed);
if(s0 >= N) {
return;
}
std::advance(beg, s0);
if(N - s0 == 1) {
std::lock_guard<std::mutex> lock(mutex);
r = bop(r, *beg);
return;
}
auto beg1 = beg++;
auto beg2 = beg++;
T sum = bop(*beg1, *beg2);
size_t z = s0 + 2;
size_t p1 = 2 * W * (chunk_size + 1);
double p2 = 0.5 / static_cast<double>(W);
s0 = next.load(std::memory_order_relaxed);
while(s0 < N) {
size_t r = N - s0;
// fine-grained
if(r < p1) {
while(1) {
s0 = next.fetch_add(chunk_size, std::memory_order_relaxed);
if(s0 >= N) {
break;
}
size_t e0 = (chunk_size <= (N - s0)) ? s0 + chunk_size : N;
std::advance(beg, s0-z);
for(size_t x=s0; x<e0; x++, beg++) {
sum = bop(sum, *beg);
}
z = e0;
}
break;
}
// coarse-grained
else {
size_t q = static_cast<size_t>(p2 * r);
if(q < chunk_size) {
q = chunk_size;
}
size_t e0 = (q <= r) ? s0 + q : N;
if(next.compare_exchange_strong(s0, e0, std::memory_order_relaxed,
std::memory_order_relaxed)) {
std::advance(beg, s0-z);
for(size_t x = s0; x<e0; x++, beg++) {
sum = bop(sum, *beg);
}
z = e0;
s0 = next.load(std::memory_order_relaxed);
}
}
}
std::lock_guard<std::mutex> lock(mutex);
r = bop(r, sum);
};
for(size_t w=0; w<W; w++) {
//if(w*2 >= N) {
// break;
//}
//sf._named_silent_async(
// sf._worker, "part-"s + std::to_string(w), loop
//);
auto r = N - next.load(std::memory_order_relaxed);
// no more loop work to do - finished by previous async tasks
if(!r) {
break;
}
// tail optimization
if(r <= chunk_size || w == W-1) {
loop();
break;
}
else {
sf._named_silent_async(sf._worker, "loop-"s + std::to_string(w), loop);
}
}
sf.join();
});
return task;
}
// ----------------------------------------------------------------------------
// default transform and reduction
// ----------------------------------------------------------------------------
template <typename B, typename E, typename T, typename BOP, typename UOP>
Task FlowBuilder::transform_reduce(
B beg, E end, T& init, BOP bop, UOP uop
) {
using B_t = std::decay_t<unwrap_ref_decay_t<B>>;
using E_t = std::decay_t<unwrap_ref_decay_t<E>>;
using namespace std::string_literals;
Task task = emplace([b=beg, e=end, &r=init, bop, uop] (Subflow& sf) mutable {
// fetch the iterator values
B_t beg = b;
E_t end = e;
if(beg == end) {
return;
}
//size_t chunk_size = (c == 0) ? 1 : c;
size_t chunk_size = 1;
size_t W = sf._executor.num_workers();
size_t N = std::distance(beg, end);
// only myself - no need to spawn another graph
if(W <= 1 || N <= chunk_size) {
for(; beg!=end; r = bop(std::move(r), uop(*beg++)));
return;
}
if(N < W) {
W = N;
}
std::mutex mutex;
std::atomic<size_t> next(0);
auto loop = [=, &mutex, &next, &r] () mutable {
size_t s0 = next.fetch_add(2, std::memory_order_relaxed);
if(s0 >= N) {
return;
}
std::advance(beg, s0);
if(N - s0 == 1) {
std::lock_guard<std::mutex> lock(mutex);
r = bop(std::move(r), uop(*beg));
return;
}
auto beg1 = beg++;
auto beg2 = beg++;
T sum = bop(uop(*beg1), uop(*beg2));
size_t z = s0 + 2;
size_t p1 = 2 * W * (chunk_size + 1);
double p2 = 0.5 / static_cast<double>(W);
s0 = next.load(std::memory_order_relaxed);
while(s0 < N) {
size_t r = N - s0;
// fine-grained
if(r < p1) {
while(1) {
s0 = next.fetch_add(chunk_size, std::memory_order_relaxed);
if(s0 >= N) {
break;
}
size_t e0 = (chunk_size <= (N - s0)) ? s0 + chunk_size : N;
std::advance(beg, s0-z);
for(size_t x=s0; x<e0; x++, beg++) {
sum = bop(std::move(sum), uop(*beg));
}
z = e0;
}
break;
}
// coarse-grained
else {
size_t q = static_cast<size_t>(p2 * r);
if(q < chunk_size) {
q = chunk_size;
}
size_t e0 = (q <= r) ? s0 + q : N;
if(next.compare_exchange_strong(s0, e0, std::memory_order_relaxed,
std::memory_order_relaxed)) {
std::advance(beg, s0-z);
for(size_t x = s0; x<e0; x++, beg++) {
sum = bop(std::move(sum), uop(*beg));
}
z = e0;
s0 = next.load(std::memory_order_relaxed);
}
}
}
std::lock_guard<std::mutex> lock(mutex);
r = bop(std::move(r), std::move(sum));
};
for(size_t w=0; w<W; w++) {
//if(w*2 >= N) {
// break;
//}
//sf._named_silent_async(sf._worker, "loop-"s + std::to_string(w), loop);
auto r = N - next.load(std::memory_order_relaxed);
// no more loop work to do - finished by previous async tasks
if(!r) {
break;
}
// tail optimization
if(r <= chunk_size || w == W-1) {
loop();
break;
}
else {
sf._named_silent_async(sf._worker, "loop-"s + std::to_string(w), loop);
}
}
sf.join();
});
return task;
}
} // end of namespace tf -----------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/taskflow/algorithm/critical.hpp | #pragma once
#include "../core/task.hpp"
/**
@file critical.hpp
@brief critical include file
*/
namespace tf {
// ----------------------------------------------------------------------------
// CriticalSection
// ----------------------------------------------------------------------------
/**
@class CriticalSection
@brief class to create a critical region of limited workers to run tasks
tf::CriticalSection is a warpper over tf::Semaphore and is specialized for
limiting the maximum concurrency over a set of tasks.
A critical section starts with an initial count representing that limit.
When a task is added to the critical section,
the task acquires and releases the semaphore internal to the critical section.
This design avoids explicit call of tf::Task::acquire and tf::Task::release.
The following example creates a critical section of one worker and adds
the five tasks to the critical section.
@code{.cpp}
tf::Executor executor(8); // create an executor of 8 workers
tf::Taskflow taskflow;
// create a critical section of 1 worker
tf::CriticalSection critical_section(1);
tf::Task A = taskflow.emplace([](){ std::cout << "A" << std::endl; });
tf::Task B = taskflow.emplace([](){ std::cout << "B" << std::endl; });
tf::Task C = taskflow.emplace([](){ std::cout << "C" << std::endl; });
tf::Task D = taskflow.emplace([](){ std::cout << "D" << std::endl; });
tf::Task E = taskflow.emplace([](){ std::cout << "E" << std::endl; });
critical_section.add(A, B, C, D, E);
executor.run(taskflow).wait();
@endcode
*/
class CriticalSection : public Semaphore {
public:
/**
@brief constructs a critical region of a limited number of workers
*/
explicit CriticalSection(size_t max_workers = 1);
/**
@brief adds a task into the critical region
*/
template <typename... Tasks>
void add(Tasks...tasks);
};
inline CriticalSection::CriticalSection(size_t max_workers) :
Semaphore {max_workers} {
}
template <typename... Tasks>
void CriticalSection::add(Tasks... tasks) {
(tasks.acquire(*this), ...);
(tasks.release(*this), ...);
}
} // end of namespace tf. ---------------------------------------------------
| hpp |
oneAPI-samples | data/projects/oneAPI-samples/DirectProgramming/C++SYCL/DenseLinearAlgebra/guided_jacobiCudaGraphs_SYCLMigration/03_sycl_migrated_optimized/Common/taskflow/algorithm/data_pipeline.hpp | #pragma once
#include "pipeline.hpp"
namespace tf {
// ----------------------------------------------------------------------------
// Class Definition: DataPipe
// ----------------------------------------------------------------------------
/**
@class DataPipe
@brief class to create a stage in a data-parallel pipeline
A data pipe represents a stage of a data-parallel pipeline.
A data pipe can be either @em parallel direction or @em serial direction
(specified by tf::PipeType) and is associated with a callable to invoke
by the pipeline scheduler.
You need to use the template function, tf::make_data_pipe, to create
a data pipe. The input and output types of a tf::DataPipe should be decayed types
(though the library will always decay them for you using `std::decay`)
to allow internal storage to work.
The data will be passed by reference to your callable, at which you can take
it by copy or reference.
@code{.cpp}
tf::make_data_pipe<int, std::string>(
tf::PipeType::SERIAL,
[](int& input) {return std::to_string(input + 100);}
);
@endcode
In addition to the data, you callable can take an additional reference
of tf::Pipeflow in the second argument to probe the runtime information
for a stage task, such as its line number and token number:
@code{.cpp}
tf::make_data_pipe<int, std::string>(
tf::PipeType::SERIAL,
[](int& input, tf::Pipeflow& pf) {
printf("token=%lu, line=%lu\n", pf.token(), pf.line());
return std::to_string(input + 100);
}
);
@endcode
*/
template <typename Input, typename Output, typename C>
class DataPipe {
template <typename... Ps>
friend class DataPipeline;
public:
/**
@brief callable type of the data pipe
*/
using callable_t = C;
/**
@brief input type of the data pipe
*/
using input_t = Input;
/**
@brief output type of the data pipe
*/
using output_t = Output;
/**
@brief default constructor
*/
DataPipe() = default;
/**
@brief constructs a data pipe
You should use the helper function, tf::make_data_pipe,
to create a DataPipe object, especially when you need tf::DataPipe
to automatically deduct the lambda type.
*/
DataPipe(PipeType d, callable_t&& callable) :
_type{d}, _callable{std::forward<callable_t>(callable)} {
}
/**
@brief queries the type of the data pipe
A data pipe can be either parallel (tf::PipeType::PARALLEL) or serial
(tf::PipeType::SERIAL).
*/
PipeType type() const {
return _type;
}
/**
@brief assigns a new type to the data pipe
*/
void type(PipeType type) {
_type = type;
}
/**
@brief assigns a new callable to the data pipe
@tparam U callable type
@param callable a callable object constructible from the callable type
of this data pipe
Assigns a new callable to the pipe using universal forwarding.
*/
template <typename U>
void callable(U&& callable) {
_callable = std::forward<U>(callable);
}
private:
PipeType _type;
callable_t _callable;
};
/**
@brief function to construct a data pipe (tf::DataPipe)
@tparam Input input data type
@tparam Output output data type
@tparam C callable type
tf::make_data_pipe is a helper function to create a data pipe (tf::DataPipe)
in a data-parallel pipeline (tf::DataPipeline).
The first argument specifies the direction of the data pipe,
either tf::PipeType::SERIAL or tf::PipeType::PARALLE,
and the second argument is a callable to invoke by the pipeline scheduler.
Input and output data types are specified via template parameters,
which will always be decayed by the library to its original form
for storage purpose.
The callable must take the input data type in its first argument
and returns a value of the output data type.
@code{.cpp}
tf::make_data_pipe<int, std::string>(
tf::PipeType::SERIAL,
[](int& input) {
return std::to_string(input + 100);
}
);
@endcode
The callable can additionally take a reference of tf::Pipeflow,
which allows you to query the runtime information of a stage task,
such as its line number and token number.
@code{.cpp}
tf::make_data_pipe<int, std::string>(
tf::PipeType::SERIAL,
[](int& input, tf::Pipeflow& pf) {
printf("token=%lu, line=%lu\n", pf.token(), pf.line());
return std::to_string(input + 100);
}
);
@endcode
*/
template <typename Input, typename Output, typename C>
auto make_data_pipe(PipeType d, C&& callable) {
return DataPipe<Input, Output, C>(d, std::forward<C>(callable));
}
// ----------------------------------------------------------------------------
// Class Definition: DataPipeline
// ----------------------------------------------------------------------------
/**
@class DataPipeline
@brief class to create a data-parallel pipeline scheduling framework
@tparam Ps data pipe types
Similar to tf::Pipeline, a tf::DataPipeline is a composable graph object
for users to create a <i>data-parallel pipeline scheduling framework</i>
using a module task in a taskflow.
The only difference is that tf::DataPipline provides a data abstraction
for users to quickly express dataflow in a pipeline.
The following example creates a data-parallel pipeline of three stages
that generate dataflow from `void` to `int`, `std::string`, `float`, and `void`.
@code{.cpp}
#include <taskflow/taskflow.hpp>
#include <taskflow/algorithm/data_pipeline.hpp>
int main() {
// data flow => void -> int -> std::string -> float -> void
tf::Taskflow taskflow("pipeline");
tf::Executor executor;
const size_t num_lines = 4;
tf::DataPipeline pl(num_lines,
tf::make_data_pipe<void, int>(tf::PipeType::SERIAL, [&](tf::Pipeflow& pf) -> int{
if(pf.token() == 5) {
pf.stop();
return 0;
}
else {
return pf.token();
}
}),
tf::make_data_pipe<int, std::string>(tf::PipeType::SERIAL, [](int& input) {
return std::to_string(input + 100);
}),
tf::make_data_pipe<std::string, void>(tf::PipeType::SERIAL, [](std::string& input) {
std::cout << input << std::endl;
})
);
// build the pipeline graph using composition
taskflow.composed_of(pl).name("pipeline");
// dump the pipeline graph structure (with composition)
taskflow.dump(std::cout);
// run the pipeline
executor.run(taskflow).wait();
return 0;
}
@endcode
The pipeline schedules five tokens over four parallel lines in a circular fashion,
as depicted below:
@code{.shell-session}
o -> o -> o
| | |
v v v
o -> o -> o
| | |
v v v
o -> o -> o
| | |
v v v
o -> o -> o
@endcode
*/
template <typename... Ps>
class DataPipeline {
static_assert(sizeof...(Ps)>0, "must have at least one pipe");
/**
@private
*/
struct Line {
std::atomic<size_t> join_counter;
};
/**
@private
*/
struct PipeMeta {
PipeType type;
};
public:
/**
@brief internal storage type for each data token (default std::variant)
*/
using data_t = unique_variant_t<std::variant<std::conditional_t<
std::is_void_v<typename Ps::output_t>,
std::monostate,
std::decay_t<typename Ps::output_t>>...
>>;
/**
@brief constructs a data-parallel pipeline object
@param num_lines the number of parallel lines
@param ps a list of pipes
Constructs a data-parallel pipeline of up to @c num_lines parallel lines to schedule
tokens through the given linear chain of pipes.
The first pipe must define a serial direction (tf::PipeType::SERIAL)
or an exception will be thrown.
*/
DataPipeline(size_t num_lines, Ps&&... ps);
/**
@brief constructs a data-parallel pipeline object
@param num_lines the number of parallel lines
@param ps a tuple of pipes
Constructs a data-parallel pipeline of up to @c num_lines parallel lines to schedule
tokens through the given linear chain of pipes stored in a std::tuple.
The first pipe must define a serial direction (tf::PipeType::SERIAL)
or an exception will be thrown.
*/
DataPipeline(size_t num_lines, std::tuple<Ps...>&& ps);
/**
@brief queries the number of parallel lines
The function returns the number of parallel lines given by the user
upon the construction of the pipeline.
The number of lines represents the maximum parallelism this pipeline
can achieve.
*/
size_t num_lines() const noexcept;
/**
@brief queries the number of pipes
The Function returns the number of pipes given by the user
upon the construction of the pipeline.
*/
constexpr size_t num_pipes() const noexcept;
/**
@brief resets the pipeline
Resetting the pipeline to the initial state. After resetting a pipeline,
its token identifier will start from zero as if the pipeline was just
constructed.
*/
void reset();
/**
@brief queries the number of generated tokens in the pipeline
The number represents the total scheduling tokens that has been
generated by the pipeline so far.
*/
size_t num_tokens() const noexcept;
/**
@brief obtains the graph object associated with the pipeline construct
This method is primarily used as an opaque data structure for creating
a module task of this pipeline.
*/
Graph& graph();
private:
Graph _graph;
size_t _num_tokens;
std::tuple<Ps...> _pipes;
std::array<PipeMeta, sizeof...(Ps)> _meta;
std::vector<std::array<Line, sizeof...(Ps)>> _lines;
std::vector<Task> _tasks;
std::vector<Pipeflow> _pipeflows;
std::vector<CachelineAligned<data_t>> _buffer;
template <size_t... I>
auto _gen_meta(std::tuple<Ps...>&&, std::index_sequence<I...>);
void _on_pipe(Pipeflow&, Runtime&);
void _build();
};
// constructor
template <typename... Ps>
DataPipeline<Ps...>::DataPipeline(size_t num_lines, Ps&&... ps) :
_pipes {std::make_tuple(std::forward<Ps>(ps)...)},
_meta {PipeMeta{ps.type()}...},
_lines (num_lines),
_tasks (num_lines + 1),
_pipeflows (num_lines),
_buffer (num_lines) {
if(num_lines == 0) {
TF_THROW("must have at least one line");
}
if(std::get<0>(_pipes).type() != PipeType::SERIAL) {
TF_THROW("first pipe must be serial");
}
reset();
_build();
}
// constructor
template <typename... Ps>
DataPipeline<Ps...>::DataPipeline(size_t num_lines, std::tuple<Ps...>&& ps) :
_pipes {std::forward<std::tuple<Ps...>>(ps)},
_meta {_gen_meta(
std::forward<std::tuple<Ps...>>(ps), std::make_index_sequence<sizeof...(Ps)>{}
)},
_lines (num_lines),
_tasks (num_lines + 1),
_pipeflows (num_lines),
_buffer (num_lines) {
if(num_lines == 0) {
TF_THROW("must have at least one line");
}
if(std::get<0>(_pipes).type() != PipeType::SERIAL) {
TF_THROW("first pipe must be serial");
}
reset();
_build();
}
// Function: _get_meta
template <typename... Ps>
template <size_t... I>
auto DataPipeline<Ps...>::_gen_meta(std::tuple<Ps...>&& ps, std::index_sequence<I...>) {
return std::array{PipeMeta{std::get<I>(ps).type()}...};
}
// Function: num_lines
template <typename... Ps>
size_t DataPipeline<Ps...>::num_lines() const noexcept {
return _pipeflows.size();
}
// Function: num_pipes
template <typename... Ps>
constexpr size_t DataPipeline<Ps...>::num_pipes() const noexcept {
return sizeof...(Ps);
}
// Function: num_tokens
template <typename... Ps>
size_t DataPipeline<Ps...>::num_tokens() const noexcept {
return _num_tokens;
}
// Function: graph
template <typename... Ps>
Graph& DataPipeline<Ps...>::graph() {
return _graph;
}
// Function: reset
template <typename... Ps>
void DataPipeline<Ps...>::reset() {
_num_tokens = 0;
for(size_t l = 0; l<num_lines(); l++) {
_pipeflows[l]._pipe = 0;
_pipeflows[l]._line = l;
}
_lines[0][0].join_counter.store(0, std::memory_order_relaxed);
for(size_t l=1; l<num_lines(); l++) {
for(size_t f=1; f<num_pipes(); f++) {
_lines[l][f].join_counter.store(
static_cast<size_t>(_meta[f].type), std::memory_order_relaxed
);
}
}
for(size_t f=1; f<num_pipes(); f++) {
_lines[0][f].join_counter.store(1, std::memory_order_relaxed);
}
for(size_t l=1; l<num_lines(); l++) {
_lines[l][0].join_counter.store(
static_cast<size_t>(_meta[0].type) - 1, std::memory_order_relaxed
);
}
}
// Procedure: _on_pipe
template <typename... Ps>
void DataPipeline<Ps...>::_on_pipe(Pipeflow& pf, Runtime&) {
visit_tuple([&](auto&& pipe){
using data_pipe_t = std::decay_t<decltype(pipe)>;
using callable_t = typename data_pipe_t::callable_t;
using input_t = std::decay_t<typename data_pipe_t::input_t>;
using output_t = std::decay_t<typename data_pipe_t::output_t>;
// first pipe
if constexpr (std::is_invocable_v<callable_t, Pipeflow&>) {
// [](tf::Pipeflow&) -> void {}, i.e., we only have one pipe
if constexpr (std::is_void_v<output_t>) {
pipe._callable(pf);
// [](tf::Pipeflow&) -> output_t {}
} else {
_buffer[pf._line].data = pipe._callable(pf);
}
}
// other pipes without pipeflow in the second argument
else if constexpr (std::is_invocable_v<callable_t, input_t&>) {
// [](input_t&) -> void {}, i.e., the last pipe
if constexpr (std::is_void_v<output_t>) {
pipe._callable(std::get<input_t>(_buffer[pf._line].data));
// [](input_t&) -> output_t {}
} else {
_buffer[pf._line].data = pipe._callable(
std::get<input_t>(_buffer[pf._line].data)
);
}
}
// other pipes with pipeflow in the second argument
else if constexpr (std::is_invocable_v<callable_t, input_t&, Pipeflow&>) {
// [](input_t&, tf::Pipeflow&) -> void {}
if constexpr (std::is_void_v<output_t>) {
pipe._callable(std::get<input_t>(_buffer[pf._line].data), pf);
// [](input_t&, tf::Pipeflow&) -> output_t {}
} else {
_buffer[pf._line].data = pipe._callable(
std::get<input_t>(_buffer[pf._line].data), pf
);
}
}
//else if constexpr(std::is_invocable_v<callable_t, Pipeflow&, Runtime&>) {
// pipe._callable(pf, rt);
//}
else {
static_assert(dependent_false_v<callable_t>, "un-supported pipe callable type");
}
}, _pipes, pf._pipe);
}
// Procedure: _build
template <typename... Ps>
void DataPipeline<Ps...>::_build() {
using namespace std::literals::string_literals;
FlowBuilder fb(_graph);
// init task
_tasks[0] = fb.emplace([this]() {
return static_cast<int>(_num_tokens % num_lines());
}).name("cond");
// line task
for(size_t l = 0; l < num_lines(); l++) {
_tasks[l + 1] = fb.emplace([this, l] (tf::Runtime& rt) mutable {
auto pf = &_pipeflows[l];
pipeline:
_lines[pf->_line][pf->_pipe].join_counter.store(
static_cast<size_t>(_meta[pf->_pipe].type), std::memory_order_relaxed
);
if (pf->_pipe == 0) {
pf->_token = _num_tokens;
if (pf->_stop = false, _on_pipe(*pf, rt); pf->_stop == true) {
// here, the pipeline is not stopped yet because other
// lines of tasks may still be running their last stages
return;
}
++_num_tokens;
}
else {
_on_pipe(*pf, rt);
}
size_t c_f = pf->_pipe;
size_t n_f = (pf->_pipe + 1) % num_pipes();
size_t n_l = (pf->_line + 1) % num_lines();
pf->_pipe = n_f;
// ---- scheduling starts here ----
// Notice that the shared variable f must not be changed after this
// point because it can result in data race due to the following
// condition:
//
// a -> b
// | |
// v v
// c -> d
//
// d will be spawned by either c or b, so if c changes f but b spawns d
// then data race on f will happen
std::array<int, 2> retval;
size_t n = 0;
// downward dependency
if(_meta[c_f].type == PipeType::SERIAL &&
_lines[n_l][c_f].join_counter.fetch_sub(
1, std::memory_order_acq_rel) == 1
) {
retval[n++] = 1;
}
// forward dependency
if(_lines[pf->_line][n_f].join_counter.fetch_sub(
1, std::memory_order_acq_rel) == 1
) {
retval[n++] = 0;
}
// notice that the task index starts from 1
switch(n) {
case 2: {
rt.schedule(_tasks[n_l+1]);
goto pipeline;
}
case 1: {
if (retval[0] == 1) {
pf = &_pipeflows[n_l];
}
goto pipeline;
}
}
}).name("rt-"s + std::to_string(l));
_tasks[0].precede(_tasks[l+1]);
}
}
} // end of namespace tf -----------------------------------------------------
| hpp |
Subsets and Splits